content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#module load intel/18.0 intelmpi/18.0 R/3.6.3; R
### makes rabbit input
args = commandArgs(trailingOnly=TRUE)
chr.i <- as.character(args[1])
maxcM <- as.numeric(args[2])
f1s.set <- as.character(args[3])
#chr.i <- "Scaffold_1863_HRSCAF_2081"; maxcM=10; f1s.set <- "all_AxC"
### libraries
library(data.table)
library(SeqArray)
library(foreach)
### set wd
setwd("/project/berglandlab/Karen/MappingDec2019/WithPulicaria/June2020")
### load SuperClone
sc <- fread("Superclones201617182019withObtusaandPulicaria_kingcorr_20200623_wmedrd.txt")
### which F1s?
#f1s <- fread("/scratch/aob2x/daphnia_hwe_sims/DaphniaPulex20162017Sequencing/AlanAnalysis/rQTL/F1s_to_use.onlyPheno.delim")
#f1s <- fread("/scratch/aob2x/daphnia_hwe_sims/DaphniaPulex20162017Sequencing/AlanAnalysis/rQTL/F1s_to_use.allF1s.delim")
#f1s <- fread("/scratch/aob2x/daphnia_hwe_sims/DaphniaPulex20162017Sequencing/AlanAnalysis/rQTL/F1s_to_use.all_AxC_F1s.delim")
if(f1s.set=="onlyPheno_AxC") {
f1s <- sc[AxCF1Hybrid==1][OneLiterPheno==1]$clone
} else if (f1s.set=="wildF1s_AxC"){
f1s <- sc[AxCF1Hybrid==1][OneLiterPheno==0]$clone
} else if(f1s.set=="all_AxC") {
f1s <- sc[AxCF1Hybrid==1]$clone
} else if(f1s.set=="all_CxC") {
f1s <- sc[OneLiterPheno==1][AxCF1Hybrid==0][SC=="selfedC"]$clone
}
f1s <- data.table(cloneid=f1s)
### open GDS
genofile <- seqOpen("/project/berglandlab/Karen/MappingDec2019/WithPulicaria/June2020/MapJune2020_ann.seq.gds", allow.duplicate=TRUE)
### load in filter file
snpFilter <- fread("snpsvarpulexpresentinhalf_table_20200623")
### make snp.dt
snp.dt <- data.table(chr=seqGetData(genofile, "chromosome"),
pos=seqGetData(genofile, "position"),
id=seqGetData(genofile, "variant.id"),
numAlleles=seqNumAllele(genofile),
key="chr")
setkey(snpFilter, chr, pos)
setkey(snp.dt, chr, pos)
snp.dt <- merge(snpFilter, snp.dt)
#snp.dt.ag <- snp.dt[,.N,chr]
#write.table(snp.dt.ag, file="/scratch/aob2x/daphnia_hwe_sims/Rabbit_phase/chrs.csv", quote=F, row.names=T, col.names=F, sep=",")
### make large input file
### uses dosage information
seqSetFilter(genofile,
sample.id=c(sc[SC=="A"][which.max(medrd)]$clone,
sc[SC=="C"][which.max(medrd)]$clone,
f1s$cloneid),
variant.id=snp.dt[J(chr.i)][numAlleles==2]$id)
genomat <- as.data.table(t(seqGetData(genofile, "$dosage")))
setnames(genomat, seqGetData(genofile, "sample.id"))
setnames(genomat, sc[SC=="A"][which.max(medrd)]$clone, "A")
setnames(genomat, sc[SC=="C"][which.max(medrd)]$clone, "C")
genomat[,id:=seqGetData(genofile, "variant.id")]
genomat <- genomat[(A!=0 & C!=2) | (A!=2 & C!=0)]
### most informative
genomat <- genomat[(A==1 & C==0) | (A==1 & C==2) | (A==0 & C==1) | (A==2 & C==1) | (A==1 & C==1)]
setkey(genomat, id)
#genomat <- genomat[J(sample(genomat$id, 5000))]
genomat <- genomat[order(id)]
### random sample
#genomat <- genomat[sample(c(1:dim(genomat)[1]), 5000)]
#genomat <- genomat[order(id)]
#table(genomat$A, genomat$C)
parents <- foreach(ind.i=c("A", "C"), .combine="rbind")%do%{
tmp <- t(as.matrix(genomat[,ind.i, with=F]))
tmp[tmp=="2"] <- "22"
tmp[tmp=="1"] <- "12"
tmp[tmp=="0"] <- "11"
cbind(matrix(ind.i, ncol=1), tmp)
}
offspring <- foreach(ind.i=f1s$cloneid, .combine="rbind", .errorhandling="remove")%do%{
tmp <- t(as.matrix(genomat[,ind.i, with=F]))
tmp[tmp=="2"] <- "2N"
#tmp[tmp=="1"] <- sample(c("1N","2N"), dim(tmp)[1], replace=T)
tmp[tmp=="1"] <- "12"
tmp[tmp=="0"] <- "1N"
tmp[is.na(tmp)] <- "NN"
cbind(matrix(ind.i, ncol=1), tmp)
}
marker <- matrix(c("marker", seqGetData(genofile, "variant.id")), nrow=1)
#chr <- matrix(c("chromosome", rep(NA, dim(genomat)[1])), nrow=1)
#pos <- matrix(c("pos(cM)", rep(NA, dim(genomat)[1])), nrow=1)
chr <- matrix(c("chromosome", rep(as.numeric(as.factor(chr.i)), dim(marker)[2]-1)), nrow=1)
pos <- matrix(c("chromosome", seq(from=0, to=maxcM, length.out=dim(marker)[2]-1)), nrow=1)
header <- do.call("rbind", list(marker, chr, pos))
out <- do.call("rbind", list(header, parents, offspring))
###
out.fn <- paste("/scratch/aob2x/daphnia_hwe_sims/Rabbit_phase_", maxcM, "cm/", chr.i, "/", chr.i, ".all.in", sep="")
writeLines( paste("#founders,",2, sep=""),
con=out.fn
)
options(scipen=999)
write.table(out,
file=out.fn,
quote=FALSE,
row.names=FALSE,
col.names=FALSE,
sep=",",
na="NA",
append=TRUE)
### make ped file
ped.fn <- paste("/scratch/aob2x/daphnia_hwe_sims/Rabbit_phase_", maxcM, "cm/", chr.i, "/", chr.i, ".ped", sep="")
writeLines( "Pedigree-Information,DesignPedigree\nGeneration,MemberID,Female=1/Male=2/Hermaphrodite=0,MotherID,FatherID\n0,1,1,0,0\n0,2,2,0,0\n1,3,0,1,2\nPedigree-Information,SampleInfor\nProgenyLine,MemberID,Funnelcode",
con=ped.fn
)
f1s[,id:=3]
f1s[,fc:="1-2"]
write.table(f1s,
file=ped.fn,
quote=FALSE,
row.names=FALSE,
col.names=FALSE,
sep=",",
na="NA",
append=TRUE)
| /AlanAnalysis/rQTL/rabbit.format_input.maxRD.dosage.R | no_license | kbkubow/DaphniaPulex20162017Sequencing | R | false | false | 5,543 | r | #module load intel/18.0 intelmpi/18.0 R/3.6.3; R
### makes rabbit input
args = commandArgs(trailingOnly=TRUE)
chr.i <- as.character(args[1])
maxcM <- as.numeric(args[2])
f1s.set <- as.character(args[3])
#chr.i <- "Scaffold_1863_HRSCAF_2081"; maxcM=10; f1s.set <- "all_AxC"
### libraries
library(data.table)
library(SeqArray)
library(foreach)
### set wd
setwd("/project/berglandlab/Karen/MappingDec2019/WithPulicaria/June2020")
### load SuperClone
sc <- fread("Superclones201617182019withObtusaandPulicaria_kingcorr_20200623_wmedrd.txt")
### which F1s?
#f1s <- fread("/scratch/aob2x/daphnia_hwe_sims/DaphniaPulex20162017Sequencing/AlanAnalysis/rQTL/F1s_to_use.onlyPheno.delim")
#f1s <- fread("/scratch/aob2x/daphnia_hwe_sims/DaphniaPulex20162017Sequencing/AlanAnalysis/rQTL/F1s_to_use.allF1s.delim")
#f1s <- fread("/scratch/aob2x/daphnia_hwe_sims/DaphniaPulex20162017Sequencing/AlanAnalysis/rQTL/F1s_to_use.all_AxC_F1s.delim")
if(f1s.set=="onlyPheno_AxC") {
f1s <- sc[AxCF1Hybrid==1][OneLiterPheno==1]$clone
} else if (f1s.set=="wildF1s_AxC"){
f1s <- sc[AxCF1Hybrid==1][OneLiterPheno==0]$clone
} else if(f1s.set=="all_AxC") {
f1s <- sc[AxCF1Hybrid==1]$clone
} else if(f1s.set=="all_CxC") {
f1s <- sc[OneLiterPheno==1][AxCF1Hybrid==0][SC=="selfedC"]$clone
}
f1s <- data.table(cloneid=f1s)
### open GDS
genofile <- seqOpen("/project/berglandlab/Karen/MappingDec2019/WithPulicaria/June2020/MapJune2020_ann.seq.gds", allow.duplicate=TRUE)
### load in filter file
snpFilter <- fread("snpsvarpulexpresentinhalf_table_20200623")
### make snp.dt
snp.dt <- data.table(chr=seqGetData(genofile, "chromosome"),
pos=seqGetData(genofile, "position"),
id=seqGetData(genofile, "variant.id"),
numAlleles=seqNumAllele(genofile),
key="chr")
setkey(snpFilter, chr, pos)
setkey(snp.dt, chr, pos)
snp.dt <- merge(snpFilter, snp.dt)
#snp.dt.ag <- snp.dt[,.N,chr]
#write.table(snp.dt.ag, file="/scratch/aob2x/daphnia_hwe_sims/Rabbit_phase/chrs.csv", quote=F, row.names=T, col.names=F, sep=",")
### make large input file
### uses dosage information
seqSetFilter(genofile,
sample.id=c(sc[SC=="A"][which.max(medrd)]$clone,
sc[SC=="C"][which.max(medrd)]$clone,
f1s$cloneid),
variant.id=snp.dt[J(chr.i)][numAlleles==2]$id)
genomat <- as.data.table(t(seqGetData(genofile, "$dosage")))
setnames(genomat, seqGetData(genofile, "sample.id"))
setnames(genomat, sc[SC=="A"][which.max(medrd)]$clone, "A")
setnames(genomat, sc[SC=="C"][which.max(medrd)]$clone, "C")
genomat[,id:=seqGetData(genofile, "variant.id")]
genomat <- genomat[(A!=0 & C!=2) | (A!=2 & C!=0)]
### most informative
genomat <- genomat[(A==1 & C==0) | (A==1 & C==2) | (A==0 & C==1) | (A==2 & C==1) | (A==1 & C==1)]
setkey(genomat, id)
#genomat <- genomat[J(sample(genomat$id, 5000))]
genomat <- genomat[order(id)]
### random sample
#genomat <- genomat[sample(c(1:dim(genomat)[1]), 5000)]
#genomat <- genomat[order(id)]
#table(genomat$A, genomat$C)
parents <- foreach(ind.i=c("A", "C"), .combine="rbind")%do%{
tmp <- t(as.matrix(genomat[,ind.i, with=F]))
tmp[tmp=="2"] <- "22"
tmp[tmp=="1"] <- "12"
tmp[tmp=="0"] <- "11"
cbind(matrix(ind.i, ncol=1), tmp)
}
offspring <- foreach(ind.i=f1s$cloneid, .combine="rbind", .errorhandling="remove")%do%{
tmp <- t(as.matrix(genomat[,ind.i, with=F]))
tmp[tmp=="2"] <- "2N"
#tmp[tmp=="1"] <- sample(c("1N","2N"), dim(tmp)[1], replace=T)
tmp[tmp=="1"] <- "12"
tmp[tmp=="0"] <- "1N"
tmp[is.na(tmp)] <- "NN"
cbind(matrix(ind.i, ncol=1), tmp)
}
marker <- matrix(c("marker", seqGetData(genofile, "variant.id")), nrow=1)
#chr <- matrix(c("chromosome", rep(NA, dim(genomat)[1])), nrow=1)
#pos <- matrix(c("pos(cM)", rep(NA, dim(genomat)[1])), nrow=1)
chr <- matrix(c("chromosome", rep(as.numeric(as.factor(chr.i)), dim(marker)[2]-1)), nrow=1)
pos <- matrix(c("chromosome", seq(from=0, to=maxcM, length.out=dim(marker)[2]-1)), nrow=1)
header <- do.call("rbind", list(marker, chr, pos))
out <- do.call("rbind", list(header, parents, offspring))
###
out.fn <- paste("/scratch/aob2x/daphnia_hwe_sims/Rabbit_phase_", maxcM, "cm/", chr.i, "/", chr.i, ".all.in", sep="")
writeLines( paste("#founders,",2, sep=""),
con=out.fn
)
options(scipen=999)
write.table(out,
file=out.fn,
quote=FALSE,
row.names=FALSE,
col.names=FALSE,
sep=",",
na="NA",
append=TRUE)
### make ped file
ped.fn <- paste("/scratch/aob2x/daphnia_hwe_sims/Rabbit_phase_", maxcM, "cm/", chr.i, "/", chr.i, ".ped", sep="")
writeLines( "Pedigree-Information,DesignPedigree\nGeneration,MemberID,Female=1/Male=2/Hermaphrodite=0,MotherID,FatherID\n0,1,1,0,0\n0,2,2,0,0\n1,3,0,1,2\nPedigree-Information,SampleInfor\nProgenyLine,MemberID,Funnelcode",
con=ped.fn
)
f1s[,id:=3]
f1s[,fc:="1-2"]
write.table(f1s,
file=ped.fn,
quote=FALSE,
row.names=FALSE,
col.names=FALSE,
sep=",",
na="NA",
append=TRUE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/app.R
\name{getUiFunctions}
\alias{getUiFunctions}
\title{Matches user interface (UI) functions from a given loader}
\usage{
getUiFunctions(ns, loader, ..., priority = NULL)
}
\arguments{
\item{ns}{Shiny function to create namespaced IDs}
\item{loader}{Character: loader to run the functions}
\item{...}{Extra arguments to pass to the user interface (UI) functions}
\item{priority}{Character: name of functions to prioritise by the given
order; for instance, c("data", "analyses") would load "data", then "analyses"
then remaining functions}
}
\value{
List of functions related to the given loader
}
\description{
Matches user interface (UI) functions from a given loader
}
| /man/getUiFunctions.Rd | no_license | mgandal/psichomics | R | false | true | 755 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/app.R
\name{getUiFunctions}
\alias{getUiFunctions}
\title{Matches user interface (UI) functions from a given loader}
\usage{
getUiFunctions(ns, loader, ..., priority = NULL)
}
\arguments{
\item{ns}{Shiny function to create namespaced IDs}
\item{loader}{Character: loader to run the functions}
\item{...}{Extra arguments to pass to the user interface (UI) functions}
\item{priority}{Character: name of functions to prioritise by the given
order; for instance, c("data", "analyses") would load "data", then "analyses"
then remaining functions}
}
\value{
List of functions related to the given loader
}
\description{
Matches user interface (UI) functions from a given loader
}
|
#' Triangle plot
#'
#' @description Function which plots a triangle based on its apexes.
#'
#' @param x1 Value of the first point on the X axis.
#' @param x2 Value of the second point on the X axis.
#' @param x3 Value of the third point on the X axis.
#' @param y1 Value of the first point on the Y axis.
#' @param y2 Value of the second point on the Y axis.
#' @param y3 Value of the third point on the Y axis.
#'
#' @return Plot
#' @export
#'
#' @examples
#' fo_plot_tri(0, 0, 1, 0, 0, 1)
fo_plot_tri <- function(x1, y1, x2, y2, x3, y3){
x <- c(x1, x2, x3)
y <- c(y1, y2, y3)
graphics::plot(x, y)
graphics::lines(c(x[1], x[2]), c(y[1], y[2]))
graphics::lines(c(x[2], x[3]), c(y[2], y[3]))
graphics::lines(c(x[1], x[3]), c(y[1], y[3]))
}
| /R/fo_plottri.R | permissive | spacea/projekt.2019.pacocha | R | false | false | 754 | r | #' Triangle plot
#'
#' @description Function which plots a triangle based on its apexes.
#'
#' @param x1 Value of the first point on the X axis.
#' @param x2 Value of the second point on the X axis.
#' @param x3 Value of the third point on the X axis.
#' @param y1 Value of the first point on the Y axis.
#' @param y2 Value of the second point on the Y axis.
#' @param y3 Value of the third point on the Y axis.
#'
#' @return Plot
#' @export
#'
#' @examples
#' fo_plot_tri(0, 0, 1, 0, 0, 1)
fo_plot_tri <- function(x1, y1, x2, y2, x3, y3){
x <- c(x1, x2, x3)
y <- c(y1, y2, y3)
graphics::plot(x, y)
graphics::lines(c(x[1], x[2]), c(y[1], y[2]))
graphics::lines(c(x[2], x[3]), c(y[2], y[3]))
graphics::lines(c(x[1], x[3]), c(y[1], y[3]))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/patch_uint8.R
\name{sequence}
\alias{sequence}
\alias{:}
\alias{:.default}
\alias{:.uint8}
\title{Sequence}
\usage{
":"(from, to)
\method{:}{default}(from, to)
\method{:}{uint8}(from, to)
}
\arguments{
\item{from}{from}
\item{to}{to}
}
\value{
sequence
}
\description{
Only dispatches on first argument.
}
| /man/sequence.Rd | permissive | coolbutuseless/uint8 | R | false | true | 387 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/patch_uint8.R
\name{sequence}
\alias{sequence}
\alias{:}
\alias{:.default}
\alias{:.uint8}
\title{Sequence}
\usage{
":"(from, to)
\method{:}{default}(from, to)
\method{:}{uint8}(from, to)
}
\arguments{
\item{from}{from}
\item{to}{to}
}
\value{
sequence
}
\description{
Only dispatches on first argument.
}
|
library(tidyverse)
library(stringr)
library(lubridate)
library(janitor)
library(usethis)
nba_all_elo <- read_csv("data-raw/nba-elo/nbaallelo.csv")%>%
arrange(game_id, `_iscopy`)%>%
mutate_if(is.character,as.factor)%>%
mutate(opp_win_equiv = lead(win_equiv),
opp_seasongame = lead(seasongame, 1),
date_game = mdy(date_game),
is_playoffs = as.logical(is_playoffs),
notes = as.character(notes))%>%
filter(`_iscopy` == 0)%>%
select(-c(`_iscopy`, game_location))
usethis::use_data(nba_all_elo, overwrite = TRUE)
| /data-raw/process_data_set_jessica.R | permissive | mariumtapal/fivethirtyeight | R | false | false | 558 | r | library(tidyverse)
library(stringr)
library(lubridate)
library(janitor)
library(usethis)
nba_all_elo <- read_csv("data-raw/nba-elo/nbaallelo.csv")%>%
arrange(game_id, `_iscopy`)%>%
mutate_if(is.character,as.factor)%>%
mutate(opp_win_equiv = lead(win_equiv),
opp_seasongame = lead(seasongame, 1),
date_game = mdy(date_game),
is_playoffs = as.logical(is_playoffs),
notes = as.character(notes))%>%
filter(`_iscopy` == 0)%>%
select(-c(`_iscopy`, game_location))
usethis::use_data(nba_all_elo, overwrite = TRUE)
|
library(dplyr)
library(ggplot2)
library(scales)
library(forcats)
data<-read.csv("online_shoppers_intention.csv")
print(data)
is.data.frame(data)
print(data$Region)
print(data$Revenue)
#most shoppers did not purchase items while visiting this site, as shown here
countTrueRev<-length(which(data$Revenue=="TRUE"))
countTrueRev
countFalseRev<-length(which(data$Revenue=="FALSE"))
countFalseRev
#most shoppers visited the site on the weekdays, not the weekends, as shown here
countWeekend<-length(which(data$Weekend=="TRUE"))
countWeekend
countNoWeekend<-length(which(data$Weekend=="FALSE"))
countNoWeekend
#most shoppers did not look at an administrative page, as shown here
countAdmin0<-length(which(data$Administrative=="0"))
countAdmin0
countAdmin1<-length(which(data$Administrative=="1"))
countAdmin1
countAdmin2<-length(which(data$Administrative=="2"))
countAdmin2
countAdmin3<-length(which(data$Administrative=="3"))
countAdmin3
#most shoppers (10,869) checked administrative pages for less than 60 seconds
#less than 1500 shoppers checked administrative pages for more than 60 seconds
#almost 6000 shoppers did not check administrative pages
countAdDur<-length(which(data$Administrative_Duration<"60"))
countAdDur
countAdDur0<-length(which(data$Administrative_Duration=="0"))
countAdDur0
countAdDur1<-length(which(data$Administrative_Duration>"60"))
countAdDur1
#the max amount of pages relating to products looked at is 705, the min is 0
max(data$ProductRelated, na.rm = TRUE)
min(data$ProductRelated, na.rm = TRUE)
#counting the amount of pages viewed
countProductPages0<-length(which(data$ProductRelated<="5"))
countProductPages0
countProductPages1<-length(which(data$ProductRelated>="5", data$ProductRelated<"20"))
countProductPages1
countProductPages2<-length(which(data$ProductRelated>="20", data$ProductRelated<"100"))
countProductPages2
countProductPages3<-length(which(data$ProductRelated>="100", data$ProductRelated<"300"))
countProductPages3
countProductPages4<-length(which(data$ProductRelated>="300", data$ProductRelated<"1000"))
countProductPages4
sum(data$ProductRelated, na.rm = TRUE)
#sum of total amount of visitors is 12,330, as shown here
v<-sum(countVisNew+countVisOther+countVisRet)
v
#4,780 visitors were from Region 1
countRegion1<-length(which(data$Region=="1"))
countRegion1
#VISITOR TYPE
#There are 10,551 returning visitors
#There are 1,694 new visitors
countVisNew<-length(which(data$VisitorType=="New_Visitor"))
countVisNew
countVisRet<-length(which(data$VisitorType=="Returning_Visitor"))
countVisRet
countVisOther<-length(which(data$VisitorType=="Other"))
countVisOther
#Types of TRAFFIC: Direct (shopper typed URL into browser),
#referral (shopper clicked on a link from another website),
#search (shopper came from search engine), campaign (shopper came from campaign), and there are more different branches of said traffic
#The less pages customers looked at, the more likely they were to buy a product. Most who viewed product related pages purchased a product
#this graph shows relationship with revenue
ggplot(data = data, aes(x=data$ProductRelated, y=data$VisitorType,
colour=data$VisitorType,
size=data$Revenue))+
geom_point()
#this graph shows relationship between visitors and product related pages with no revenue factored in
qplot(data$VisitorType,data$ProductRelated,xlab = 'Visitor Type',
ylab = 'Number of Product Related Pages Viewed',
main='Visitor Type VS Product Pages')
#Quick plot: Administrative Pages vs Visitors
#Returning visitors visited a greater amount of administrative pages to view/edit their account
qplot(data$VisitorType,data$Administrative,xlab = 'Visitor Type',
ylab = 'Admin Pages',
main='Visitor Type VS Admin Pages')
#Graph shows how long shoppers took viewing/editing their account information
#Most visitors took 15 minutes or less to view/edit information
qplot(data$VisitorType,data$Administrative_Duration,xlab = 'Visitor Type',
ylab = 'Seconds (time)',
main='Administrative Pages Duration')
#Bounce rate of visitors/traffic type (where the visitors came from)
qplot(data$BounceRates,data$TrafficType,
xlab = 'Bounce Rate',
ylab = 'Traffic Type',
main='Bounce Rates of Visitors & Traffic Type', colour=data$VisitorType)
| /Analysis of Online Shoppers.R | no_license | asiapm/Analysis-of-Online-Visitors | R | false | false | 4,513 | r | library(dplyr)
library(ggplot2)
library(scales)
library(forcats)
data<-read.csv("online_shoppers_intention.csv")
print(data)
is.data.frame(data)
print(data$Region)
print(data$Revenue)
#most shoppers did not purchase items while visiting this site, as shown here
countTrueRev<-length(which(data$Revenue=="TRUE"))
countTrueRev
countFalseRev<-length(which(data$Revenue=="FALSE"))
countFalseRev
#most shoppers visited the site on the weekdays, not the weekends, as shown here
countWeekend<-length(which(data$Weekend=="TRUE"))
countWeekend
countNoWeekend<-length(which(data$Weekend=="FALSE"))
countNoWeekend
#most shoppers did not look at an administrative page, as shown here
countAdmin0<-length(which(data$Administrative=="0"))
countAdmin0
countAdmin1<-length(which(data$Administrative=="1"))
countAdmin1
countAdmin2<-length(which(data$Administrative=="2"))
countAdmin2
countAdmin3<-length(which(data$Administrative=="3"))
countAdmin3
#most shoppers (10,869) checked administrative pages for less than 60 seconds
#less than 1500 shoppers checked administrative pages for more than 60 seconds
#almost 6000 shoppers did not check administrative pages
countAdDur<-length(which(data$Administrative_Duration<"60"))
countAdDur
countAdDur0<-length(which(data$Administrative_Duration=="0"))
countAdDur0
countAdDur1<-length(which(data$Administrative_Duration>"60"))
countAdDur1
#the max amount of pages relating to products looked at is 705, the min is 0
max(data$ProductRelated, na.rm = TRUE)
min(data$ProductRelated, na.rm = TRUE)
#counting the amount of pages viewed
countProductPages0<-length(which(data$ProductRelated<="5"))
countProductPages0
countProductPages1<-length(which(data$ProductRelated>="5", data$ProductRelated<"20"))
countProductPages1
countProductPages2<-length(which(data$ProductRelated>="20", data$ProductRelated<"100"))
countProductPages2
countProductPages3<-length(which(data$ProductRelated>="100", data$ProductRelated<"300"))
countProductPages3
countProductPages4<-length(which(data$ProductRelated>="300", data$ProductRelated<"1000"))
countProductPages4
sum(data$ProductRelated, na.rm = TRUE)
#sum of total amount of visitors is 12,330, as shown here
v<-sum(countVisNew+countVisOther+countVisRet)
v
#4,780 visitors were from Region 1
countRegion1<-length(which(data$Region=="1"))
countRegion1
#VISITOR TYPE
#There are 10,551 returning visitors
#There are 1,694 new visitors
countVisNew<-length(which(data$VisitorType=="New_Visitor"))
countVisNew
countVisRet<-length(which(data$VisitorType=="Returning_Visitor"))
countVisRet
countVisOther<-length(which(data$VisitorType=="Other"))
countVisOther
#Types of TRAFFIC: Direct (shopper typed URL into browser),
#referral (shopper clicked on a link from another website),
#search (shopper came from search engine), campaign (shopper came from campaign), and there are more different branches of said traffic
#The less pages customers looked at, the more likely they were to buy a product. Most who viewed product related pages purchased a product
#this graph shows relationship with revenue
ggplot(data = data, aes(x=data$ProductRelated, y=data$VisitorType,
colour=data$VisitorType,
size=data$Revenue))+
geom_point()
#this graph shows relationship between visitors and product related pages with no revenue factored in
qplot(data$VisitorType,data$ProductRelated,xlab = 'Visitor Type',
ylab = 'Number of Product Related Pages Viewed',
main='Visitor Type VS Product Pages')
#Quick plot: Administrative Pages vs Visitors
#Returning visitors visited a greater amount of administrative pages to view/edit their account
qplot(data$VisitorType,data$Administrative,xlab = 'Visitor Type',
ylab = 'Admin Pages',
main='Visitor Type VS Admin Pages')
#Graph shows how long shoppers took viewing/editing their account information
#Most visitors took 15 minutes or less to view/edit information
qplot(data$VisitorType,data$Administrative_Duration,xlab = 'Visitor Type',
ylab = 'Seconds (time)',
main='Administrative Pages Duration')
#Bounce rate of visitors/traffic type (where the visitors came from)
qplot(data$BounceRates,data$TrafficType,
xlab = 'Bounce Rate',
ylab = 'Traffic Type',
main='Bounce Rates of Visitors & Traffic Type', colour=data$VisitorType)
|
error.bar <- function(x, y, upper, lower=upper, color,length=0.06,...){
if(length(x) != length(y) | length(y) !=length(lower) | length(lower) != length(upper))
stop("vectors must be same length")
arrows(x,y+upper, x, y-lower, col=color,angle=90, code=3, length=length, ...)
}
pdf("test.pdf")
par(mar=c(6,4,4,2), cex=1.2)
dist <- read.table("test.sum")
plot(rev(dist[,2]), type='b', pch= 1,lwd = 2 , col="aquamarine3", xaxt='n', frame.plot = FALSE, ylim=c(0,100), ylab="Proportion", xlab="")
#lines(rev(sim[,2]), type='b',pch= 20, cex=0.2,lwd = 2 , col="dim gray")
error.bar(1:length(dist[,2]), rev(dist[,2]), rev(dist[,3]), rev(dist[,3]), 'dim gray')
axis(1,seq(1:length(dist[,1])),line=0, labels=rep("",length(dist[,1])))
text(seq(1:length(dist[,1])),rep(-0.04,7), cex=1, offset=2,labels=rev(dist[,1]/1000),srt=0,xpd=TRUE)
mtext("Distance to TSD (kb)", side=1, cex=1.2, at=6, line=3)
legend('topright', bty='n', border='NA', lty= c(1,2), pch = c(1,20), cex=1 , lwd = 2 ,col=c("aquamarine3", "dim gray"), c("Unique", "Control"))
dist <- read.table("test_all.sum")
dist <- subset(dist, V1<=2500 & V1>=500)
plot(rev(dist[,3]/1000000), type='l', pch= 1,lwd = 2 , col="aquamarine3", xaxt='n', frame.plot = FALSE, ylim=c(5000/1000000,15000/1000000), ylab="Normalized DNase reads", xlab="")
#lines(rev(sim[,2]), type='b',pch= 20, cex=0.2,lwd = 2 , col="dim gray")
#error.bar(1:length(dist[,2]), rev(dist[,2]), rev(dist[,3]), rev(dist[,3]), 'dim gray')
axis(1,seq(0, 2000, by=200),line=0, labels=rep("",11))
text(seq(0, 2000, by=200), rep(0.004, 11), cex=1, offset=2,labels=seq(-1, 1, by=0.2), srt=0,xpd=TRUE)
mtext("Distance to TSD (kb)", side=1, cex=1.2, at=1000, line=3)
legend('topright', bty='n', border='NA', lty= c(1,2), pch = c(1,20), cex=1 , lwd = 2 ,col=c("aquamarine3", "dim gray"), c("Unique", "Control"))
dev.off()
| /bin/Compare_DHS_density/bin/test.R | no_license | wangpanqiao/Transposition | R | false | false | 1,842 | r |
error.bar <- function(x, y, upper, lower=upper, color,length=0.06,...){
if(length(x) != length(y) | length(y) !=length(lower) | length(lower) != length(upper))
stop("vectors must be same length")
arrows(x,y+upper, x, y-lower, col=color,angle=90, code=3, length=length, ...)
}
pdf("test.pdf")
par(mar=c(6,4,4,2), cex=1.2)
dist <- read.table("test.sum")
plot(rev(dist[,2]), type='b', pch= 1,lwd = 2 , col="aquamarine3", xaxt='n', frame.plot = FALSE, ylim=c(0,100), ylab="Proportion", xlab="")
#lines(rev(sim[,2]), type='b',pch= 20, cex=0.2,lwd = 2 , col="dim gray")
error.bar(1:length(dist[,2]), rev(dist[,2]), rev(dist[,3]), rev(dist[,3]), 'dim gray')
axis(1,seq(1:length(dist[,1])),line=0, labels=rep("",length(dist[,1])))
text(seq(1:length(dist[,1])),rep(-0.04,7), cex=1, offset=2,labels=rev(dist[,1]/1000),srt=0,xpd=TRUE)
mtext("Distance to TSD (kb)", side=1, cex=1.2, at=6, line=3)
legend('topright', bty='n', border='NA', lty= c(1,2), pch = c(1,20), cex=1 , lwd = 2 ,col=c("aquamarine3", "dim gray"), c("Unique", "Control"))
dist <- read.table("test_all.sum")
dist <- subset(dist, V1<=2500 & V1>=500)
plot(rev(dist[,3]/1000000), type='l', pch= 1,lwd = 2 , col="aquamarine3", xaxt='n', frame.plot = FALSE, ylim=c(5000/1000000,15000/1000000), ylab="Normalized DNase reads", xlab="")
#lines(rev(sim[,2]), type='b',pch= 20, cex=0.2,lwd = 2 , col="dim gray")
#error.bar(1:length(dist[,2]), rev(dist[,2]), rev(dist[,3]), rev(dist[,3]), 'dim gray')
axis(1,seq(0, 2000, by=200),line=0, labels=rep("",11))
text(seq(0, 2000, by=200), rep(0.004, 11), cex=1, offset=2,labels=seq(-1, 1, by=0.2), srt=0,xpd=TRUE)
mtext("Distance to TSD (kb)", side=1, cex=1.2, at=1000, line=3)
legend('topright', bty='n', border='NA', lty= c(1,2), pch = c(1,20), cex=1 , lwd = 2 ,col=c("aquamarine3", "dim gray"), c("Unique", "Control"))
dev.off()
|
# Load all the data
data <- read.csv("./household_power_consumption.txt", header = T,
sep = ";", na = "?")
data$Date <- as.Date(data$Date, "%d/%m/%Y")
# Get subset, data from the only 2 days we need
ds <- subset(data, (Date >= "2007-02-01" & Date <= "2007-02-02"))
# Make plot
hist(ds$Global_active_power, main = "Global Active Power",
xlab = "Global Active Power (kilowatts)", ylab = "Frequency",
col = "red")
# Write to png file
dev.copy(png, file = "plot1.png", height = 480, width = 480)
dev.off()
| /plot1.R | no_license | michael-lee/ExData_Plotting1 | R | false | false | 533 | r | # Load all the data
data <- read.csv("./household_power_consumption.txt", header = T,
sep = ";", na = "?")
data$Date <- as.Date(data$Date, "%d/%m/%Y")
# Get subset, data from the only 2 days we need
ds <- subset(data, (Date >= "2007-02-01" & Date <= "2007-02-02"))
# Make plot
hist(ds$Global_active_power, main = "Global Active Power",
xlab = "Global Active Power (kilowatts)", ylab = "Frequency",
col = "red")
# Write to png file
dev.copy(png, file = "plot1.png", height = 480, width = 480)
dev.off()
|
#setwd("~/Projects/DFS/")
#setwd("~/Documents/PrincetonFall16/fantasyfootball/DFS/")
####### DESCRIPTION #########
# In this file we clean various csv files in the optimizationCode folder for ease of use in other scripts.
####### REMOVE FIRST AND LAST NAME FROM (ORIGINAL) DFN OFFENSE CSV #########
week.latest <- ceiling((as.numeric(Sys.Date()) - as.numeric(as.Date("2016-09-11")))/7 + 1) - 1
for (i in 1:week.latest) {
# load files
name <- paste("dfn_offense_week", i, sep = "")
assign(name, read.csv(file = paste0('optimizationCode/data_warehouse/dailyfantasynerd/dfn_offense_week', i, '.csv'), stringsAsFactors = F))
# remove Sr. and Jr.
temp <- eval(parse(text=name))
temp$Player.Name <- sub(' Sr.', '', temp$Player.Name)
temp$Player.Name <- sub(' Jr.', '', temp$Player.Name)
# write to file
write.csv(temp, file = paste0("optimizationCode/data_warehouse/dailyfantasynerd/dfn_offense_week", i, ".csv"), row.names = F)
}
####### REMOVE FIRST AND LAST NAME FROM (UPDATED) DFN OFFENSE CSV #########
week.latest <- ceiling((as.numeric(Sys.Date()) - as.numeric(as.Date("2016-09-11")))/7 + 1) - 1
for (i in 1:week.latest) {
# load files
name <- paste("dfn_offense_updated_week", i, sep = "")
assign(name, read.csv(file = paste0('optimizationCode/data_warehouse/dailyfantasynerd/updates/dfn_offense_week', i, '.csv'), stringsAsFactors = F))
# remove Sr. and Jr.
temp <- eval(parse(text=name))
temp$Player.Name <- sub(' Sr.', '', temp$Player.Name)
temp$Player.Name <- sub(' Jr.', '', temp$Player.Name)
# write to file
write.csv(temp, file = paste0("optimizationCode/data_warehouse/dailyfantasynerd/updates/dfn_offense_week", i, ".csv"), row.names = F)
}
####### REMOVE FIRST AND LAST NAME FROM CLEANED INPUT OFFENSE CSV #########
week.latest <- ceiling((as.numeric(Sys.Date()) - as.numeric(as.Date("2016-09-11")))/7 + 1) - 1
for (i in 1:week.latest) {
# load files
name <- paste("cleaned_input_offense_wk", i, sep = "")
assign(name, read.csv(file = paste0('optimizationCode/data_warehouse/2016_cleaned_input/wk', i, '/offensive_players.csv'), stringsAsFactors = F))
# remove Sr. and Jr.
temp <- eval(parse(text=name))
temp$Name <- sub(' Sr.', '', temp$Name)
temp$Name <- sub(' Jr.', '', temp$Name)
# write to file
write.csv(temp, file = paste0("optimizationCode/data_warehouse/2016_cleaned_input/wk", i, "/offensive_players.csv"), row.names = F)
}
####### REMOVE FIRST AND LAST NAME FROM CLEANED INPUT OFFENSE CSV #########
week.latest <- ceiling((as.numeric(Sys.Date()) - as.numeric(as.Date("2016-09-11")))/7 + 1) - 1
for (i in 1:week.latest) {
# load files
name <- paste("cleaned_input_offense_wk", i, sep = "")
assign(name, read.csv(file = paste0('optimizationCode/data_warehouse/2016_cleaned_input/wk', i, '/includes_thu-mon/offensive_players.csv'), stringsAsFactors = F))
# remove Sr. and Jr.
temp <- eval(parse(text=name))
temp$Name <- sub(' Sr.', '', temp$Name)
temp$Name <- sub(' Jr.', '', temp$Name)
# remove players with I, II, III, IV, V, etc suffixes
# write to file
write.csv(temp, file = paste0("optimizationCode/data_warehouse/2016_cleaned_input/wk", i, "/includes_thu-mon/offensive_players.csv"), row.names = F)
}
| /NFL_old/optimizationCode/cleaning_data/dataCleaning.R | no_license | OliverCGreenwald/dfs | R | false | false | 3,237 | r | #setwd("~/Projects/DFS/")
#setwd("~/Documents/PrincetonFall16/fantasyfootball/DFS/")
####### DESCRIPTION #########
# In this file we clean various csv files in the optimizationCode folder for ease of use in other scripts.
####### REMOVE FIRST AND LAST NAME FROM (ORIGINAL) DFN OFFENSE CSV #########
week.latest <- ceiling((as.numeric(Sys.Date()) - as.numeric(as.Date("2016-09-11")))/7 + 1) - 1
for (i in 1:week.latest) {
# load files
name <- paste("dfn_offense_week", i, sep = "")
assign(name, read.csv(file = paste0('optimizationCode/data_warehouse/dailyfantasynerd/dfn_offense_week', i, '.csv'), stringsAsFactors = F))
# remove Sr. and Jr.
temp <- eval(parse(text=name))
temp$Player.Name <- sub(' Sr.', '', temp$Player.Name)
temp$Player.Name <- sub(' Jr.', '', temp$Player.Name)
# write to file
write.csv(temp, file = paste0("optimizationCode/data_warehouse/dailyfantasynerd/dfn_offense_week", i, ".csv"), row.names = F)
}
####### REMOVE FIRST AND LAST NAME FROM (UPDATED) DFN OFFENSE CSV #########
week.latest <- ceiling((as.numeric(Sys.Date()) - as.numeric(as.Date("2016-09-11")))/7 + 1) - 1
for (i in 1:week.latest) {
# load files
name <- paste("dfn_offense_updated_week", i, sep = "")
assign(name, read.csv(file = paste0('optimizationCode/data_warehouse/dailyfantasynerd/updates/dfn_offense_week', i, '.csv'), stringsAsFactors = F))
# remove Sr. and Jr.
temp <- eval(parse(text=name))
temp$Player.Name <- sub(' Sr.', '', temp$Player.Name)
temp$Player.Name <- sub(' Jr.', '', temp$Player.Name)
# write to file
write.csv(temp, file = paste0("optimizationCode/data_warehouse/dailyfantasynerd/updates/dfn_offense_week", i, ".csv"), row.names = F)
}
####### REMOVE FIRST AND LAST NAME FROM CLEANED INPUT OFFENSE CSV #########
week.latest <- ceiling((as.numeric(Sys.Date()) - as.numeric(as.Date("2016-09-11")))/7 + 1) - 1
for (i in 1:week.latest) {
# load files
name <- paste("cleaned_input_offense_wk", i, sep = "")
assign(name, read.csv(file = paste0('optimizationCode/data_warehouse/2016_cleaned_input/wk', i, '/offensive_players.csv'), stringsAsFactors = F))
# remove Sr. and Jr.
temp <- eval(parse(text=name))
temp$Name <- sub(' Sr.', '', temp$Name)
temp$Name <- sub(' Jr.', '', temp$Name)
# write to file
write.csv(temp, file = paste0("optimizationCode/data_warehouse/2016_cleaned_input/wk", i, "/offensive_players.csv"), row.names = F)
}
####### REMOVE FIRST AND LAST NAME FROM CLEANED INPUT OFFENSE CSV #########
week.latest <- ceiling((as.numeric(Sys.Date()) - as.numeric(as.Date("2016-09-11")))/7 + 1) - 1
for (i in 1:week.latest) {
# load files
name <- paste("cleaned_input_offense_wk", i, sep = "")
assign(name, read.csv(file = paste0('optimizationCode/data_warehouse/2016_cleaned_input/wk', i, '/includes_thu-mon/offensive_players.csv'), stringsAsFactors = F))
# remove Sr. and Jr.
temp <- eval(parse(text=name))
temp$Name <- sub(' Sr.', '', temp$Name)
temp$Name <- sub(' Jr.', '', temp$Name)
# remove players with I, II, III, IV, V, etc suffixes
# write to file
write.csv(temp, file = paste0("optimizationCode/data_warehouse/2016_cleaned_input/wk", i, "/includes_thu-mon/offensive_players.csv"), row.names = F)
}
|
library(RSNNS)
library(NeuralNetTools)
n=1000
#set.seed(1)
# more complex example with "islands", A and B scattered around alternative nodes on a grid
sig=0.2
x0=sample(seq(0,3,by=1),2*n, replace=T)
y0=sample(seq(0,3,by=1),2*n, replace=T)
x=x0+rnorm(2*n)*sig
y=y0+rnorm(2*n)*sig
t=ifelse(2*floor((x0+y0)/2)==(x0+y0),1,0)
trai=data.frame(x,y,t)
colnames(trai)=c("x","y","tr")
# mix
nc=ncol(trai)
dt=trai[sample(1:nrow(trai),length(1:nrow(trai))),1:nc]
Values <- dt[,1:nc-1]
Targets <- dt[,nc]
# test/validation
trts <- splitForTrainingAndTest(Values, Targets, ratio=0.15)
dt=trts
str(dt)
#model <- mlp(trai[1:2],trai$tr,size=10,outputActFunc = "Act_Logistic",learnFuncParams = c(0.1), maxit = 100)
#http://www.ra.cs.uni-tuebingen.de/SNNS/UserManual/node52.html
model <- mlp(dt$inputsTrain,dt$targetsTrain,size=10,
inputsTest = dt$inputsTest, targetsTest = dt$targetsTest,
outputActFunc = "Act_Logistic",
# learnFunc = "Std_Backpropagation",learnFuncParams = c(0.1),
learnFunc = "BackpropMomentum",learnFuncParams = c(0.1,0.1),
# learnFunc = "SCG", learnFuncParams = c(0.1),
maxit = 2000)
summary(model)
readline(prompt="Press [enter] to continue")
plotnet(model)
readline(prompt="Press [enter] to continue")
plotIterativeError(model)
readline(prompt="Press [enter] to continue")
plotROC(model$fitted.values,Targets)
readline(prompt="Press [enter] to continue")
xn=seq(-1,4,by=0.1)
yn=seq(-1,4,by=0.1)
out=outer(xn,yn,function(x,y){predict(model,data.frame(x,y))})
xp=dt$inputsTest[,1]
yp=dt$inputsTest[,2]
sa=dt$targetsTest==0
sb=dt$targetsTest==1
xa=xp[sa]
ya=yp[sa]
xb=xp[sb]
yb=yp[sb]
filled.contour(xn,yn,out,color=heat.colors,plot.axes={axis(1);axis(2);grid();lines(xa,ya,type="p");lines(xb,yb,type="p",pch=0)})
readline(prompt="Press [enter] to continue")
A=model$fittedTestValues[sa]<0.5
B=model$fittedTestValues[sa]>0.5
Ab=model$fittedTestValues[sb]<0.5
Bb=model$fittedTestValues[sb]>0.5
plot(xa[A],ya[A],xlim=range(xp),ylim=range(yp),xlab="",ylab="")
lines(xa[B],ya[B],col="red",type="p")
lines(xb[Ab],yb[Ab],pch=0,col="red",type="p")
lines(xb[Bb],yb[Bb],pch=0,type="p")
#efficiency matrix
tA=sum(sa)
tB=sum(sb)
cat(sum(A)/tA,sum(B)/tA,"\n")
cat(sum(Ab)/tB,sum(Bb)/tB,"\n") | /Statistical Programming/Neural-Networks/.ipynb_checkpoints/redes_parrilla-checkpoint.R | no_license | LauraTrujilloT/unican | R | false | false | 2,325 | r | library(RSNNS)
library(NeuralNetTools)
n=1000
#set.seed(1)
# more complex example with "islands", A and B scattered around alternative nodes on a grid
sig=0.2
x0=sample(seq(0,3,by=1),2*n, replace=T)
y0=sample(seq(0,3,by=1),2*n, replace=T)
x=x0+rnorm(2*n)*sig
y=y0+rnorm(2*n)*sig
t=ifelse(2*floor((x0+y0)/2)==(x0+y0),1,0)
trai=data.frame(x,y,t)
colnames(trai)=c("x","y","tr")
# mix
nc=ncol(trai)
dt=trai[sample(1:nrow(trai),length(1:nrow(trai))),1:nc]
Values <- dt[,1:nc-1]
Targets <- dt[,nc]
# test/validation
trts <- splitForTrainingAndTest(Values, Targets, ratio=0.15)
dt=trts
str(dt)
#model <- mlp(trai[1:2],trai$tr,size=10,outputActFunc = "Act_Logistic",learnFuncParams = c(0.1), maxit = 100)
#http://www.ra.cs.uni-tuebingen.de/SNNS/UserManual/node52.html
model <- mlp(dt$inputsTrain,dt$targetsTrain,size=10,
inputsTest = dt$inputsTest, targetsTest = dt$targetsTest,
outputActFunc = "Act_Logistic",
# learnFunc = "Std_Backpropagation",learnFuncParams = c(0.1),
learnFunc = "BackpropMomentum",learnFuncParams = c(0.1,0.1),
# learnFunc = "SCG", learnFuncParams = c(0.1),
maxit = 2000)
summary(model)
readline(prompt="Press [enter] to continue")
plotnet(model)
readline(prompt="Press [enter] to continue")
plotIterativeError(model)
readline(prompt="Press [enter] to continue")
plotROC(model$fitted.values,Targets)
readline(prompt="Press [enter] to continue")
xn=seq(-1,4,by=0.1)
yn=seq(-1,4,by=0.1)
out=outer(xn,yn,function(x,y){predict(model,data.frame(x,y))})
xp=dt$inputsTest[,1]
yp=dt$inputsTest[,2]
sa=dt$targetsTest==0
sb=dt$targetsTest==1
xa=xp[sa]
ya=yp[sa]
xb=xp[sb]
yb=yp[sb]
filled.contour(xn,yn,out,color=heat.colors,plot.axes={axis(1);axis(2);grid();lines(xa,ya,type="p");lines(xb,yb,type="p",pch=0)})
readline(prompt="Press [enter] to continue")
A=model$fittedTestValues[sa]<0.5
B=model$fittedTestValues[sa]>0.5
Ab=model$fittedTestValues[sb]<0.5
Bb=model$fittedTestValues[sb]>0.5
plot(xa[A],ya[A],xlim=range(xp),ylim=range(yp),xlab="",ylab="")
lines(xa[B],ya[B],col="red",type="p")
lines(xb[Ab],yb[Ab],pch=0,col="red",type="p")
lines(xb[Bb],yb[Bb],pch=0,type="p")
#efficiency matrix
tA=sum(sa)
tB=sum(sb)
cat(sum(A)/tA,sum(B)/tA,"\n")
cat(sum(Ab)/tB,sum(Bb)/tB,"\n") |
library('hydrotools')
library('zoo')
library('knitr') # needed for kable()
basepath='/var/www/R';
source("/var/www/R/config.R")
source("https://raw.githubusercontent.com/HARPgroup/hydro-tools/master/R/fac_utils.R")
################################################################################################
################################################################################################
# get all upstream Rsegs
# rivseg = 'OR4_8271_8120'
#
# # Get all segs above rivseg of interest
# # Read data that requires file download
# download_read <- function(url, filetype, zip) {
# localpath <- tempdir()
# filename <- basename(url)
# filepath <- paste(localpath,"\\", filename, sep="")
#
# download.file(url, filepath)
#
# if(zip==TRUE){
# folder <- unzip(filepath, exdir=localpath)
# filepath <- grep(".*.csv.*", folder, value=TRUE)
# }
# if(filetype=="csv"){
# df <- read.csv(file=filepath, header=TRUE, sep=",")
# }
# if(filetype=="shp"){
# layer <- gsub("\\.zip", "", filename)
# df <- read_sf(dsn=localpath, layer=layer)
# }
# if(filetype!="csv" & filetype!="shp"){
# message(paste("Error in download_read(): filetype must be 'csv' or 'shp'"))
# }
# return(df)
# }
#
# #----From VAhydro----
# segs <- list()
# segs$all <- download_read(url=paste(site,"/vahydro_riversegs_export",sep=""), filetype="csv", zip=FALSE)
# segs$all$riverseg <- str_replace(segs$all$hydrocode, 'vahydrosw_wshed_', '') #prerequisite for fn_extract_basin()
# upstream_segs <- fn_extract_basin(segs$all, rivseg)
# print(upstream_segs$riverseg)
# print(upstream_segs$hydroid)
################################################################################################
################################################################################################
# this will show the runs going on
# ps ax|grep run_
# clear run
# php fn_clearRun.php 252117 401
# SML Impoundment:
# max(SMLdat$trigger1)
##############################################################################SML_om_id <- 252119
# runid <- 401
runid <- 400
SML_om_id <- 252119
SMLdat <- om_get_rundata(SML_om_id, runid, site = omsite)
SMLdat_df <- data.frame(SMLdat)
sort(colnames(SMLdat_df))
SML_imp <- om_quantile_table(SMLdat_df, metrics = c("impoundment_demand","impoundment_demand_met_mgd",'impoundment_lake_elev',"impoundment_Storage","impoundment_use_remain_mg","impoundment_days_remaining","impoundment_Qin","impoundment_Qout",
"Leesville_Lake_demand","Leesville_Lake_demand_met_mgd","Leesville_Lake_lake_elev","Leesville_Lake_Storage","Leesville_Lake_use_remain_mg","Leesville_Lake_days_remaining","Leesville_Lake_Qin","Leesville_Lake_Qout","Leesville_Lake_release","Leesville_Lake_refill_full_mgd",
"wd_mgd","pump_lees","refill_lees","Qin","Qout","release_sml","sml_use_remain_mg",
"trigger1","trigger2","trigger3","trigger3_tbl","trigger_level","Qbrook","Rbrook","Tbrook","lees_min",
"sml_elev"
),rdigits = 2)
kable(SML_imp,'markdown')
test <- sqldf("SELECT year, month, day, sml_elev, trigger1, trigger2, trigger3, trigger3_tbl, trigger_level
FROM SMLdat_df
ORDER BY sml_elev
")
################################################################################################
# LOAD MODEL IDs:
rseg_om_id <- 252117 # Smith Mountain and Leesville Dams
fac_om_id <- 351208 # SML SERVICE AREA:Smith Mountain and Leesville Dams
runid <- 401
################################################################################################
facdat <- om_get_rundata(fac_om_id, runid, site = omsite)
rsegdat <- om_get_rundata(rseg_om_id, runid, site = omsite)
mstart <- zoo::as.Date(as.POSIXct(min(index(rsegdat)),origin="1970-01-01"))
mend <- zoo::as.Date(as.POSIXct(max(index(rsegdat)),origin="1970-01-01"))
facdat_df <- data.frame(facdat)
rsegdat_df <- data.frame(rsegdat)
sort(colnames(rsegdat_df))
sort(colnames(facdat_df))
facdat_df$impoundment_use_remain_mg
facdat_df$lake_elev
#-------------------------------------------------------------------------
SML <- om_quantile_table(facdat_df, metrics = c("historic_monthly_pct","vwp_max_mgy","vwp_max_mgd",
"vwp_base_mgd","wd_mgd","unmet_demand_mgd",
"impoundment_use_remain_mg","lake_elev"),
rdigits = 3)
kable(SML,'markdown')
################################################################################################
################################################################################################ | /R/permitting/smith_mountain_lake/SmithMountainLake_WTP.R | no_license | HARPgroup/vahydro | R | false | false | 4,864 | r | library('hydrotools')
library('zoo')
library('knitr') # needed for kable()
basepath='/var/www/R';
source("/var/www/R/config.R")
source("https://raw.githubusercontent.com/HARPgroup/hydro-tools/master/R/fac_utils.R")
################################################################################################
################################################################################################
# get all upstream Rsegs
# rivseg = 'OR4_8271_8120'
#
# # Get all segs above rivseg of interest
# # Read data that requires file download
# download_read <- function(url, filetype, zip) {
# localpath <- tempdir()
# filename <- basename(url)
# filepath <- paste(localpath,"\\", filename, sep="")
#
# download.file(url, filepath)
#
# if(zip==TRUE){
# folder <- unzip(filepath, exdir=localpath)
# filepath <- grep(".*.csv.*", folder, value=TRUE)
# }
# if(filetype=="csv"){
# df <- read.csv(file=filepath, header=TRUE, sep=",")
# }
# if(filetype=="shp"){
# layer <- gsub("\\.zip", "", filename)
# df <- read_sf(dsn=localpath, layer=layer)
# }
# if(filetype!="csv" & filetype!="shp"){
# message(paste("Error in download_read(): filetype must be 'csv' or 'shp'"))
# }
# return(df)
# }
#
# #----From VAhydro----
# segs <- list()
# segs$all <- download_read(url=paste(site,"/vahydro_riversegs_export",sep=""), filetype="csv", zip=FALSE)
# segs$all$riverseg <- str_replace(segs$all$hydrocode, 'vahydrosw_wshed_', '') #prerequisite for fn_extract_basin()
# upstream_segs <- fn_extract_basin(segs$all, rivseg)
# print(upstream_segs$riverseg)
# print(upstream_segs$hydroid)
################################################################################################
################################################################################################
# this will show the runs going on
# ps ax|grep run_
# clear run
# php fn_clearRun.php 252117 401
# SML Impoundment:
# max(SMLdat$trigger1)
##############################################################################SML_om_id <- 252119
# runid <- 401
runid <- 400
SML_om_id <- 252119
SMLdat <- om_get_rundata(SML_om_id, runid, site = omsite)
SMLdat_df <- data.frame(SMLdat)
sort(colnames(SMLdat_df))
SML_imp <- om_quantile_table(SMLdat_df, metrics = c("impoundment_demand","impoundment_demand_met_mgd",'impoundment_lake_elev',"impoundment_Storage","impoundment_use_remain_mg","impoundment_days_remaining","impoundment_Qin","impoundment_Qout",
"Leesville_Lake_demand","Leesville_Lake_demand_met_mgd","Leesville_Lake_lake_elev","Leesville_Lake_Storage","Leesville_Lake_use_remain_mg","Leesville_Lake_days_remaining","Leesville_Lake_Qin","Leesville_Lake_Qout","Leesville_Lake_release","Leesville_Lake_refill_full_mgd",
"wd_mgd","pump_lees","refill_lees","Qin","Qout","release_sml","sml_use_remain_mg",
"trigger1","trigger2","trigger3","trigger3_tbl","trigger_level","Qbrook","Rbrook","Tbrook","lees_min",
"sml_elev"
),rdigits = 2)
kable(SML_imp,'markdown')
test <- sqldf("SELECT year, month, day, sml_elev, trigger1, trigger2, trigger3, trigger3_tbl, trigger_level
FROM SMLdat_df
ORDER BY sml_elev
")
################################################################################################
# LOAD MODEL IDs:
rseg_om_id <- 252117 # Smith Mountain and Leesville Dams
fac_om_id <- 351208 # SML SERVICE AREA:Smith Mountain and Leesville Dams
runid <- 401
################################################################################################
facdat <- om_get_rundata(fac_om_id, runid, site = omsite)
rsegdat <- om_get_rundata(rseg_om_id, runid, site = omsite)
mstart <- zoo::as.Date(as.POSIXct(min(index(rsegdat)),origin="1970-01-01"))
mend <- zoo::as.Date(as.POSIXct(max(index(rsegdat)),origin="1970-01-01"))
facdat_df <- data.frame(facdat)
rsegdat_df <- data.frame(rsegdat)
sort(colnames(rsegdat_df))
sort(colnames(facdat_df))
facdat_df$impoundment_use_remain_mg
facdat_df$lake_elev
#-------------------------------------------------------------------------
SML <- om_quantile_table(facdat_df, metrics = c("historic_monthly_pct","vwp_max_mgy","vwp_max_mgd",
"vwp_base_mgd","wd_mgd","unmet_demand_mgd",
"impoundment_use_remain_mg","lake_elev"),
rdigits = 3)
kable(SML,'markdown')
################################################################################################
################################################################################################ |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geneFoldChange.R
\name{geneFoldChange}
\alias{geneFoldChange}
\title{Preferential Expresion in target}
\usage{
geneFoldChange(
stats,
gene,
maskvector = NULL,
mask = NULL,
tgt.thresh,
cntrst.thresh = NULL,
checkOrientation = TRUE
)
}
\arguments{
\item{stats}{1-D vector or MINC filename whose values will define the target and contrast regions. It is recommended that you provide a 1D vector instead of a filename, though the code will work regardless. If you are providing a filename, make sure it is in MINC orientation (X=Left-to-Right, Y=Posterior-to-Anterior, Z=Inferior-to-Superior). This is usually the case with files produced with MINC Tools.}
\item{gene}{1-D vector (same length as that of statsvector) or filename of gene expression data (raw format). It is recommended that you provide a 1D vector instead of a filename, though the code will work regardless. If you are providing a filename, make sure it is in ABI orientation X=Anterior-to-Posterior, Y=Superior-to-Inferior, and Z=Left-to-Right. This is usually the case when you download Allen Data.}
\item{maskvector}{mask for both stats and gene, for which elements to analyze. If filenames are provided for stats and gene, mask must be in MINC orientation. Vector Elements are either TRUE or FALSE and vector length must be the same as length of statsvector. DEFAULT: all elements are TRUE, ie. all elements in statsvector and gene are analyzed.}
\item{tgt.thresh}{threshold for target regions (statsvector>tgt.thresh form the target region)}
\item{cntrst.thresh}{threshold for contrast regions (statsvector<=crst.thresh form the contrast region). DEFAULT: contrast includes all the elements in the mask}
\item{checkOrientation}{should I check orientation. You should only turn this off if you know for sure that stats, gene, and mask are in the same orientation (either all ABI or all MINC).}
}
\value{
Fold-Change between mean expression in the target and contrast regions
}
\description{
This function will mask a statistic vector into a target and contrast region (default, contrast is the whole vector). Then, the mean gene expression energy in both target and contrast region is calculated, and its quotient (preferential gene expression in target vs contrast) is returned.
}
\examples{
# I included several files in this package for this example:
# The raw gene expression energy files for Nrp1 (http://api.brain-map.org/grid_data/download/74272479)
nrp1Filename=system.file('extdata/Nrp1_P56_coronal_74272479_200um.zip',package="ABIgeneRMINC")
# Allen Brain Institute annotations (http://download.alleninstitute.org/informatics-archive/current-release/mouse_annotation/P56_Mouse_gridAnnotation.zip)
annotFilename=system.file('extdata/P56_Mouse_gridAnnotation.zip',package="ABIgeneRMINC")
# T-statistics mincfiles comparing neuroanatomical volumes of mice raised in enriched environments vs standard lab cages.
statFilename=system.file('extdata/enrichment_stats.mnc',package="ABIgeneRMINC")
#
# The following example calculates the foldchange between Nrp1 expression
# in significantly larger neuroanatomy (t-statistics > 2) verses the whole brain.
# Read gene file and rotate it to MINC orientation
nrp1expr=allenVectorTOmincVector(read.raw.gene(nrp1Filename))
# Read stats file
stats=RMINC::mincGetVolume(statFilename)
# Read annotations, rotate it to MINC orientation, and binarize to make a brain maskvector
annotations=allenVectorTOmincVector(read.raw.gene(annotFilename))
maskvector=annotations>0 ; attributes(maskvector) = attributes(annotations)
# Foldchange of Nrp1 expression in target (regions with stats>2) vs contrast (whole brain)
geneFoldChange(stats,nrp1expr,maskvector,tgt.thresh=2)
# 2.072129
# Foldchange of Nrp1 expression in target (regions with stats>2) vs contrast (regions with stats<=0.5 )
geneFoldChange(stats,nrp1expr,maskvector,tgt.thresh=2,cntrst.thresh=0.5)
# 2.415599
#
# Foldchange of Nrp1 expression in target (regions with stats>2) vs contrast (whole brain)
# using strings with filenames instead of 1-D vectors
geneFoldChange(statFilename,nrp1Filename,mask = annotFilename,tgt.thresh=2)
# 2.072129
}
| /man/geneFoldChange.Rd | no_license | DJFernandes/ABIgeneRMINC | R | false | true | 4,223 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geneFoldChange.R
\name{geneFoldChange}
\alias{geneFoldChange}
\title{Preferential Expresion in target}
\usage{
geneFoldChange(
stats,
gene,
maskvector = NULL,
mask = NULL,
tgt.thresh,
cntrst.thresh = NULL,
checkOrientation = TRUE
)
}
\arguments{
\item{stats}{1-D vector or MINC filename whose values will define the target and contrast regions. It is recommended that you provide a 1D vector instead of a filename, though the code will work regardless. If you are providing a filename, make sure it is in MINC orientation (X=Left-to-Right, Y=Posterior-to-Anterior, Z=Inferior-to-Superior). This is usually the case with files produced with MINC Tools.}
\item{gene}{1-D vector (same length as that of statsvector) or filename of gene expression data (raw format). It is recommended that you provide a 1D vector instead of a filename, though the code will work regardless. If you are providing a filename, make sure it is in ABI orientation X=Anterior-to-Posterior, Y=Superior-to-Inferior, and Z=Left-to-Right. This is usually the case when you download Allen Data.}
\item{maskvector}{mask for both stats and gene, for which elements to analyze. If filenames are provided for stats and gene, mask must be in MINC orientation. Vector Elements are either TRUE or FALSE and vector length must be the same as length of statsvector. DEFAULT: all elements are TRUE, ie. all elements in statsvector and gene are analyzed.}
\item{tgt.thresh}{threshold for target regions (statsvector>tgt.thresh form the target region)}
\item{cntrst.thresh}{threshold for contrast regions (statsvector<=crst.thresh form the contrast region). DEFAULT: contrast includes all the elements in the mask}
\item{checkOrientation}{should I check orientation. You should only turn this off if you know for sure that stats, gene, and mask are in the same orientation (either all ABI or all MINC).}
}
\value{
Fold-Change between mean expression in the target and contrast regions
}
\description{
This function will mask a statistic vector into a target and contrast region (default, contrast is the whole vector). Then, the mean gene expression energy in both target and contrast region is calculated, and its quotient (preferential gene expression in target vs contrast) is returned.
}
\examples{
# I included several files in this package for this example:
# The raw gene expression energy files for Nrp1 (http://api.brain-map.org/grid_data/download/74272479)
nrp1Filename=system.file('extdata/Nrp1_P56_coronal_74272479_200um.zip',package="ABIgeneRMINC")
# Allen Brain Institute annotations (http://download.alleninstitute.org/informatics-archive/current-release/mouse_annotation/P56_Mouse_gridAnnotation.zip)
annotFilename=system.file('extdata/P56_Mouse_gridAnnotation.zip',package="ABIgeneRMINC")
# T-statistics mincfiles comparing neuroanatomical volumes of mice raised in enriched environments vs standard lab cages.
statFilename=system.file('extdata/enrichment_stats.mnc',package="ABIgeneRMINC")
#
# The following example calculates the foldchange between Nrp1 expression
# in significantly larger neuroanatomy (t-statistics > 2) verses the whole brain.
# Read gene file and rotate it to MINC orientation
nrp1expr=allenVectorTOmincVector(read.raw.gene(nrp1Filename))
# Read stats file
stats=RMINC::mincGetVolume(statFilename)
# Read annotations, rotate it to MINC orientation, and binarize to make a brain maskvector
annotations=allenVectorTOmincVector(read.raw.gene(annotFilename))
maskvector=annotations>0 ; attributes(maskvector) = attributes(annotations)
# Foldchange of Nrp1 expression in target (regions with stats>2) vs contrast (whole brain)
geneFoldChange(stats,nrp1expr,maskvector,tgt.thresh=2)
# 2.072129
# Foldchange of Nrp1 expression in target (regions with stats>2) vs contrast (regions with stats<=0.5 )
geneFoldChange(stats,nrp1expr,maskvector,tgt.thresh=2,cntrst.thresh=0.5)
# 2.415599
#
# Foldchange of Nrp1 expression in target (regions with stats>2) vs contrast (whole brain)
# using strings with filenames instead of 1-D vectors
geneFoldChange(statFilename,nrp1Filename,mask = annotFilename,tgt.thresh=2)
# 2.072129
}
|
setwd("D:/Rt/Gab/")
require(wnl)
dPK49 = read.csv("PK49.csv", as.is=TRUE)
colnames(dPK49) = c("TIME", "DV") # dPK49
dPK49 = dPK49[dPK49[,"DV"] != "Missing",] # dPK49
dPK49[,"DV"] = as.numeric(dPK49[,"DV"]) ; dPK49
AMT = 400
TAU = 19 / 60 # 19 min
infRate = 400/19*60 # 1263.158, 1261.83 in WinNonlin
fPK49 = function(THETA)
{
V = THETA[1]
CL = THETA[2]
Syn = THETA[3]
T1 = dPK49[,"TIME"][dPK49[,"TIME"] <= TAU] # only one point at TIME=0
T2 = dPK49[,"TIME"][dPK49[,"TIME"] > TAU]
y1 = Syn/CL + infRate/CL*(1 - exp(-CL/V*T1))
y2 = Syn/CL + infRate/CL*(1 - exp(-CL/V*TAU))*exp(-CL/V*(T2 - TAU))
return(c(y1, y2))
}
fPK49(c(3.59259, 0.14204, 16.2272))
nlr(fPK49, dPK49, pNames=c("V", "CL", "Syn"), IE=c(3.5, 0.12, 12))
# Figure 49.1, p 699
plot(dPK49[,"TIME"], dPK49[,"DV"], xlim=c(0, 160), ylim=c(80, 240), xlab="Time (h)", ylab="Concentration", pch=19)
lines(dPK49[,"TIME"], fPK49(e$PE[1:e$nTheta]))
###
| /R-old/PK49.R | no_license | pipetcpt/study-pkpd | R | false | false | 930 | r | setwd("D:/Rt/Gab/")
require(wnl)
dPK49 = read.csv("PK49.csv", as.is=TRUE)
colnames(dPK49) = c("TIME", "DV") # dPK49
dPK49 = dPK49[dPK49[,"DV"] != "Missing",] # dPK49
dPK49[,"DV"] = as.numeric(dPK49[,"DV"]) ; dPK49
AMT = 400
TAU = 19 / 60 # 19 min
infRate = 400/19*60 # 1263.158, 1261.83 in WinNonlin
fPK49 = function(THETA)
{
V = THETA[1]
CL = THETA[2]
Syn = THETA[3]
T1 = dPK49[,"TIME"][dPK49[,"TIME"] <= TAU] # only one point at TIME=0
T2 = dPK49[,"TIME"][dPK49[,"TIME"] > TAU]
y1 = Syn/CL + infRate/CL*(1 - exp(-CL/V*T1))
y2 = Syn/CL + infRate/CL*(1 - exp(-CL/V*TAU))*exp(-CL/V*(T2 - TAU))
return(c(y1, y2))
}
fPK49(c(3.59259, 0.14204, 16.2272))
nlr(fPK49, dPK49, pNames=c("V", "CL", "Syn"), IE=c(3.5, 0.12, 12))
# Figure 49.1, p 699
plot(dPK49[,"TIME"], dPK49[,"DV"], xlim=c(0, 160), ylim=c(80, 240), xlab="Time (h)", ylab="Concentration", pch=19)
lines(dPK49[,"TIME"], fPK49(e$PE[1:e$nTheta]))
###
|
#### Script Description Header ####
# File Name: SDG&E JARP Interval Meter Data Cleaning.R
# File Location: "~/Desktop/SDG&E JARP/Wholesale Price Data/Clean Wholesale Price Data"
# Project: San Diego JARP Supplementary Testimony
# Description: Cleans wholeale energy price data for use in SDG&E JARP modeling.
#### User Inputs ####
Data_Year <- 2019
Time_Interval_Minutes <- 15
#### Load Packages ####
library(tidyverse)
library(lubridate)
# Disable Scientific Notation
options(scipen = 999)
# Set Working Directories
setwd("~/Desktop/SDG&E JARP Supplementary Testimony Workpapers/Wholesale Price Data/Clean Wholesale Price Data")
Code_WD <- getwd()
setwd("../")
setwd(file.path("Raw Wholesale Price Data", Data_Year))
Data_WD <- getwd()
#### Load and Clean CAISO RT5M Wholesale Price Data ####
# Data is for SDG&E DLAP, Real-Time 5-Minute Market
# Source: http://oasis.caiso.com/ Interval Locational Marginal Prices (LMP) DLAP_SDGE-APND
CAISO_RT5M_Files <- list.files(pattern = ".csv")
Raw_CAISO_RT5M_Joined <- data.frame(INTERVALSTARTTIME_GMT = character(), VALUE = numeric(), stringsAsFactors = F)
for(CAISO_RT5M_File in CAISO_RT5M_Files){
Raw_CAISO_RT5M_Single <- read.csv(CAISO_RT5M_File) %>%
filter(XML_DATA_ITEM == "LMP_PRC") %>%
select(INTERVALSTARTTIME_GMT, VALUE)
Raw_CAISO_RT5M_Joined <- rbind(Raw_CAISO_RT5M_Joined, Raw_CAISO_RT5M_Single)
}
rm(Raw_CAISO_RT5M_Single, CAISO_RT5M_File, CAISO_RT5M_Files)
Clean_SDGE_RT5M_LMP <- Raw_CAISO_RT5M_Joined %>%
mutate(Date_Time = as.POSIXct(gsub("T", " ", substr(INTERVALSTARTTIME_GMT, 1, 16)), tz = "UTC")) %>%
mutate(Date_Time = with_tz(Date_Time, tz = "America/Los_Angeles")) %>%
mutate(LMP_RT5M = VALUE/1000) %>% # Convert from $/MWh to $/kWh
select(Date_Time, LMP_RT5M) %>%
arrange(Date_Time)
rm(Raw_CAISO_RT5M_Joined)
#### Convert to New Time Resolution ####
if(Time_Interval_Minutes != 5){
Start_Time_Aggregated <- Clean_SDGE_RT5M_LMP$Date_Time[1]
End_Time_Aggregated <- Clean_SDGE_RT5M_LMP$Date_Time[nrow(Clean_SDGE_RT5M_LMP)]
Time_Vector_Aggregated <- data.frame(Date_Time_Aggregated = seq.POSIXt(Start_Time_Aggregated, End_Time_Aggregated, by = paste(Time_Interval_Minutes, "min")))
Clean_SDGE_RT5M_LMP <- Clean_SDGE_RT5M_LMP %>%
mutate(Date_Time = findInterval(Date_Time, Time_Vector_Aggregated$Date_Time_Aggregated)) %>%
mutate(Date_Time = Time_Vector_Aggregated$Date_Time_Aggregated[Date_Time]) %>%
group_by(Date_Time) %>%
summarize(LMP_RT5M = mean(LMP_RT5M, na.rm = T)) %>%
ungroup()
rm(Start_Time_Aggregated, End_Time_Aggregated, Time_Vector_Aggregated)
}
#### Save Final Dataset ####
setwd(Code_WD)
saveRDS(Clean_SDGE_RT5M_LMP, paste0("Clean_SDGE_RT5M_LMP_",Data_Year,"_",Time_Interval_Minutes, "_min.rds"))
write.csv(Clean_SDGE_RT5M_LMP, paste0("Clean_SDGE_RT5M_LMP_",Data_Year,"_",Time_Interval_Minutes, "_min.csv"), row.names = F) | /Wholesale Price Data/Clean Wholesale Price Data/SDGE DLAP RTP Cleaning.R | no_license | RyanCMann/SDGE_JARP_Supplementary_Testimony_Workpapers | R | false | false | 2,902 | r | #### Script Description Header ####
# File Name: SDG&E JARP Interval Meter Data Cleaning.R
# File Location: "~/Desktop/SDG&E JARP/Wholesale Price Data/Clean Wholesale Price Data"
# Project: San Diego JARP Supplementary Testimony
# Description: Cleans wholeale energy price data for use in SDG&E JARP modeling.
#### User Inputs ####
Data_Year <- 2019
Time_Interval_Minutes <- 15
#### Load Packages ####
library(tidyverse)
library(lubridate)
# Disable Scientific Notation
options(scipen = 999)
# Set Working Directories
setwd("~/Desktop/SDG&E JARP Supplementary Testimony Workpapers/Wholesale Price Data/Clean Wholesale Price Data")
Code_WD <- getwd()
setwd("../")
setwd(file.path("Raw Wholesale Price Data", Data_Year))
Data_WD <- getwd()
#### Load and Clean CAISO RT5M Wholesale Price Data ####
# Data is for SDG&E DLAP, Real-Time 5-Minute Market
# Source: http://oasis.caiso.com/ Interval Locational Marginal Prices (LMP) DLAP_SDGE-APND
CAISO_RT5M_Files <- list.files(pattern = ".csv")
Raw_CAISO_RT5M_Joined <- data.frame(INTERVALSTARTTIME_GMT = character(), VALUE = numeric(), stringsAsFactors = F)
for(CAISO_RT5M_File in CAISO_RT5M_Files){
Raw_CAISO_RT5M_Single <- read.csv(CAISO_RT5M_File) %>%
filter(XML_DATA_ITEM == "LMP_PRC") %>%
select(INTERVALSTARTTIME_GMT, VALUE)
Raw_CAISO_RT5M_Joined <- rbind(Raw_CAISO_RT5M_Joined, Raw_CAISO_RT5M_Single)
}
rm(Raw_CAISO_RT5M_Single, CAISO_RT5M_File, CAISO_RT5M_Files)
Clean_SDGE_RT5M_LMP <- Raw_CAISO_RT5M_Joined %>%
mutate(Date_Time = as.POSIXct(gsub("T", " ", substr(INTERVALSTARTTIME_GMT, 1, 16)), tz = "UTC")) %>%
mutate(Date_Time = with_tz(Date_Time, tz = "America/Los_Angeles")) %>%
mutate(LMP_RT5M = VALUE/1000) %>% # Convert from $/MWh to $/kWh
select(Date_Time, LMP_RT5M) %>%
arrange(Date_Time)
rm(Raw_CAISO_RT5M_Joined)
#### Convert to New Time Resolution ####
if(Time_Interval_Minutes != 5){
Start_Time_Aggregated <- Clean_SDGE_RT5M_LMP$Date_Time[1]
End_Time_Aggregated <- Clean_SDGE_RT5M_LMP$Date_Time[nrow(Clean_SDGE_RT5M_LMP)]
Time_Vector_Aggregated <- data.frame(Date_Time_Aggregated = seq.POSIXt(Start_Time_Aggregated, End_Time_Aggregated, by = paste(Time_Interval_Minutes, "min")))
Clean_SDGE_RT5M_LMP <- Clean_SDGE_RT5M_LMP %>%
mutate(Date_Time = findInterval(Date_Time, Time_Vector_Aggregated$Date_Time_Aggregated)) %>%
mutate(Date_Time = Time_Vector_Aggregated$Date_Time_Aggregated[Date_Time]) %>%
group_by(Date_Time) %>%
summarize(LMP_RT5M = mean(LMP_RT5M, na.rm = T)) %>%
ungroup()
rm(Start_Time_Aggregated, End_Time_Aggregated, Time_Vector_Aggregated)
}
#### Save Final Dataset ####
setwd(Code_WD)
saveRDS(Clean_SDGE_RT5M_LMP, paste0("Clean_SDGE_RT5M_LMP_",Data_Year,"_",Time_Interval_Minutes, "_min.rds"))
write.csv(Clean_SDGE_RT5M_LMP, paste0("Clean_SDGE_RT5M_LMP_",Data_Year,"_",Time_Interval_Minutes, "_min.csv"), row.names = F) |
#' Remove channels in wave files
#'
#' \code{rm_channels} remove channels in wave files
#' @usage rm_channels(files = NULL, channels, path = NULL, parallel = 1, pb = TRUE)
#' @param files Character vector indicating the files that will be analyzed. If not provided. Optional.
#' then all wave files in the working directory (or path) will be processed.
#' @param channels Numeric vector indicating the index (or channel number) for the channels that will be kept (left = 1, right = 2; 3 to inf for multichannel sound files).
#' @param path Character string containing the directory path where the sound files are located.
#' If \code{NULL} (default) then the current working directory is used.
#' @param parallel Numeric. Controls whether parallel computing is applied.
#' It specifies the number of cores to be used. Default is 1 (i.e. no parallel computing).
#' @param pb Logical argument to control progress bar and messages. Default is \code{TRUE}.
#' @return Sound files that have been converted are saved in the new
#' folder "converted_sound_files". If `img = TRUE` then spectrogram images highlighting the silence segments
#' that were removed are also saved.
#' @export
#' @name rm_channels
#' @details The function removes channels from wave files. It works on regular and
#' multichannel wave files. Converted files are saved in a new directory ("converted_sound_files")
#' and original files are not modified.
#' @seealso \code{\link{fixwavs}}, \code{\link{rm_sil}},
#' @examples{
#' # save sound file examples
#' data("Phae.long1")
#' Phae.long1.2 <- stereo(Phae.long1, Phae.long1)
#'
#' writeWave(Phae.long1.2, file.path(tempdir(), "Phae.long1.2.wav"))
#'
#' rm_channels(channels = 1, path = tempdir())
#'
#' #check this floder
#' tempdir()
#' }
#'
#' @references {
#' Araya-Salas, M., & Smith-Vidaurre, G. (2017). warbleR: An R package to streamline analysis of animal acoustic signals. Methods in Ecology and Evolution, 8(2), 184-191.
#' }
#' @author Marcelo Araya-Salas (\email{marceloa27@@gmail.com})
#last modification on Jul-30-2018 (MAS)
rm_channels <- function(files = NULL, channels, path = NULL, parallel = 1, pb = TRUE){
#### set arguments from options
# get function arguments
argms <- methods::formalArgs(checkwavs)
# get warbleR options
opt.argms <- if(!is.null(getOption("warbleR"))) getOption("warbleR") else SILLYNAME <- 0
# rename path for sound files
names(opt.argms)[names(opt.argms) == "wav.path"] <- "path"
# remove options not as default in call and not in function arguments
opt.argms <- opt.argms[!sapply(opt.argms, is.null) & names(opt.argms) %in% argms]
# get arguments set in the call
call.argms <- as.list(base::match.call())[-1]
# remove arguments in options that are in call
opt.argms <- opt.argms[!names(opt.argms) %in% names(call.argms)]
# set options left
if (length(opt.argms) > 0)
for (q in 1:length(opt.argms))
assign(names(opt.argms)[q], opt.argms[[q]])
#check path to working directory
if (is.null(path)) path <- getwd() else if (!dir.exists(path)) stop("'path' provided does not exist")
#read files
fls <- list.files(path = path, pattern = "\\.wav$", ignore.case = TRUE)
#stop if files are not in working directory
if (length(fls) == 0) stop("no .wav files in working directory")
#subet based on file list provided (flist)
if (!is.null(files)) fls <- fls[fls %in% files]
if (length(fls) == 0) stop(".wav files are not in working directory")
dir.create(file.path(path, "converted_sound_files"))
mcwv_FUN <- function(x, channels) {
wv <- tuneR::readWave(file.path(path, x), toWaveMC = TRUE)
if (nchannel(wv) >= max(channels))
{
wv <- wv[ , channels]
if (nchannel(wv) <= 2) wv <- Wave(wv)
writeWave(object = wv, filename = file.path(path, "converted_sound_files", x), extensible = FALSE)
a <- 0
} else a <- 1
return(a)
}
pbapply::pboptions(type = ifelse(as.logical(pb), "timer", "none"))
# set clusters for windows OS
if (Sys.info()[1] == "Windows" & parallel > 1)
cl <- parallel::makePSOCKcluster(getOption("cl.cores", parallel)) else cl <- parallel
# run loop apply function
out <- pbapply::pbsapply(X = fls, cl = cl, FUN = function(x)
{
mcwv_FUN(x, channels)
})
if (sum(out) > 0)
write(file = "", x = paste(sum(out), "file(s) not processed (# channels < max(channels)"))
}
| /R/rm_channels.R | no_license | edwbaker/warbleR | R | false | false | 4,500 | r | #' Remove channels in wave files
#'
#' \code{rm_channels} remove channels in wave files
#' @usage rm_channels(files = NULL, channels, path = NULL, parallel = 1, pb = TRUE)
#' @param files Character vector indicating the files that will be analyzed. If not provided. Optional.
#' then all wave files in the working directory (or path) will be processed.
#' @param channels Numeric vector indicating the index (or channel number) for the channels that will be kept (left = 1, right = 2; 3 to inf for multichannel sound files).
#' @param path Character string containing the directory path where the sound files are located.
#' If \code{NULL} (default) then the current working directory is used.
#' @param parallel Numeric. Controls whether parallel computing is applied.
#' It specifies the number of cores to be used. Default is 1 (i.e. no parallel computing).
#' @param pb Logical argument to control progress bar and messages. Default is \code{TRUE}.
#' @return Sound files that have been converted are saved in the new
#' folder "converted_sound_files". If `img = TRUE` then spectrogram images highlighting the silence segments
#' that were removed are also saved.
#' @export
#' @name rm_channels
#' @details The function removes channels from wave files. It works on regular and
#' multichannel wave files. Converted files are saved in a new directory ("converted_sound_files")
#' and original files are not modified.
#' @seealso \code{\link{fixwavs}}, \code{\link{rm_sil}},
#' @examples{
#' # save sound file examples
#' data("Phae.long1")
#' Phae.long1.2 <- stereo(Phae.long1, Phae.long1)
#'
#' writeWave(Phae.long1.2, file.path(tempdir(), "Phae.long1.2.wav"))
#'
#' rm_channels(channels = 1, path = tempdir())
#'
#' #check this floder
#' tempdir()
#' }
#'
#' @references {
#' Araya-Salas, M., & Smith-Vidaurre, G. (2017). warbleR: An R package to streamline analysis of animal acoustic signals. Methods in Ecology and Evolution, 8(2), 184-191.
#' }
#' @author Marcelo Araya-Salas (\email{marceloa27@@gmail.com})
#last modification on Jul-30-2018 (MAS)
rm_channels <- function(files = NULL, channels, path = NULL, parallel = 1, pb = TRUE){
#### set arguments from options
# get function arguments
argms <- methods::formalArgs(checkwavs)
# get warbleR options
opt.argms <- if(!is.null(getOption("warbleR"))) getOption("warbleR") else SILLYNAME <- 0
# rename path for sound files
names(opt.argms)[names(opt.argms) == "wav.path"] <- "path"
# remove options not as default in call and not in function arguments
opt.argms <- opt.argms[!sapply(opt.argms, is.null) & names(opt.argms) %in% argms]
# get arguments set in the call
call.argms <- as.list(base::match.call())[-1]
# remove arguments in options that are in call
opt.argms <- opt.argms[!names(opt.argms) %in% names(call.argms)]
# set options left
if (length(opt.argms) > 0)
for (q in 1:length(opt.argms))
assign(names(opt.argms)[q], opt.argms[[q]])
#check path to working directory
if (is.null(path)) path <- getwd() else if (!dir.exists(path)) stop("'path' provided does not exist")
#read files
fls <- list.files(path = path, pattern = "\\.wav$", ignore.case = TRUE)
#stop if files are not in working directory
if (length(fls) == 0) stop("no .wav files in working directory")
#subet based on file list provided (flist)
if (!is.null(files)) fls <- fls[fls %in% files]
if (length(fls) == 0) stop(".wav files are not in working directory")
dir.create(file.path(path, "converted_sound_files"))
mcwv_FUN <- function(x, channels) {
wv <- tuneR::readWave(file.path(path, x), toWaveMC = TRUE)
if (nchannel(wv) >= max(channels))
{
wv <- wv[ , channels]
if (nchannel(wv) <= 2) wv <- Wave(wv)
writeWave(object = wv, filename = file.path(path, "converted_sound_files", x), extensible = FALSE)
a <- 0
} else a <- 1
return(a)
}
pbapply::pboptions(type = ifelse(as.logical(pb), "timer", "none"))
# set clusters for windows OS
if (Sys.info()[1] == "Windows" & parallel > 1)
cl <- parallel::makePSOCKcluster(getOption("cl.cores", parallel)) else cl <- parallel
# run loop apply function
out <- pbapply::pbsapply(X = fls, cl = cl, FUN = function(x)
{
mcwv_FUN(x, channels)
})
if (sum(out) > 0)
write(file = "", x = paste(sum(out), "file(s) not processed (# channels < max(channels)"))
}
|
\name{USMoney}
\alias{USMoney}
\title{USMoney}
\description{
Money, output and price deflator time series data, 1950--1983.
}
\usage{data("USMoney")}
\format{
A quarterly multiple time series from 1950 to 1983 with 3 variables.
\describe{
\item{gnp}{nominal GNP.}
\item{m1}{M1 measure of money stock.}
\item{deflator}{implicit price deflator for GNP.}
}
}
\source{
Online complements to Greene (2003), Table F20.2.
\url{http://pages.stern.nyu.edu/~wgreene/Text/tables/tablelist5.htm}
}
\references{
Greene, W.H. (2003). \emph{Econometric Analysis}, 5th edition. Upper Saddle River, NJ: Prentice Hall.
}
\seealso{\code{\link{Greene2003}}}
\examples{
data("USMoney")
plot(USMoney)
}
\keyword{datasets}
| /man/USMoney.Rd | no_license | arubhardwaj/AER | R | false | false | 756 | rd | \name{USMoney}
\alias{USMoney}
\title{USMoney}
\description{
Money, output and price deflator time series data, 1950--1983.
}
\usage{data("USMoney")}
\format{
A quarterly multiple time series from 1950 to 1983 with 3 variables.
\describe{
\item{gnp}{nominal GNP.}
\item{m1}{M1 measure of money stock.}
\item{deflator}{implicit price deflator for GNP.}
}
}
\source{
Online complements to Greene (2003), Table F20.2.
\url{http://pages.stern.nyu.edu/~wgreene/Text/tables/tablelist5.htm}
}
\references{
Greene, W.H. (2003). \emph{Econometric Analysis}, 5th edition. Upper Saddle River, NJ: Prentice Hall.
}
\seealso{\code{\link{Greene2003}}}
\examples{
data("USMoney")
plot(USMoney)
}
\keyword{datasets}
|
library(gamlss)
library(dplyr)
library(tidyr)
library(Lahman)
library(ggplot2)
theme_set(theme_bw())
# Grab career batting average of non-pitchers
# (allow players that have pitched <= 3 games, like Ty Cobb)
pitchers <- Pitching %>%
group_by(playerID) %>%
summarize(gamesPitched = sum(G)) %>%
filter(gamesPitched > 3)
# in this setup, we're keeping some extra information for later in the post:
# a "bats" column and a "year" column
career <- Batting %>%
filter(AB > 0) %>%
anti_join(pitchers, by = "playerID") %>%
group_by(playerID) %>%
summarize(H = sum(H), AB = sum(AB), year = mean(yearID)) %>%
mutate(average = H / AB)
# Add player names
career <- Master %>%
tbl_df() %>%
dplyr::select(playerID, nameFirst, nameLast, bats) %>%
unite(name, nameFirst, nameLast, sep = " ") %>%
inner_join(career, by = "playerID")
library(gamlss)
fit <- gamlss(cbind(H, AB - H) ~ log(AB),
data = dplyr::select(career, -bats),
family = BB(mu.link = "identity"))
career_eb <- career %>%
mutate(mu = fitted(fit, "mu"),
sigma = fitted(fit, "sigma"),
alpha0 = mu / sigma,
beta0 = (1 - mu) / sigma,
alpha1 = alpha0 + H,
beta1 = beta0 + AB - H,
estimate = alpha1 / (alpha1 + beta1))
career %>%
count(bats)
career2 <- career %>%
filter(!is.na(bats)) %>%
mutate(bats = relevel(bats, "R"))
fit2 <- gamlss(cbind(H, AB - H) ~ log(AB) + bats,
data = career2,
family = BB(mu.link = "identity"))
library(broom)
tidy(fit2)
sigma <- fitted(fit2, "sigma")[1]
crossing(bats = c("L", "R"),
AB = c(1, 10, 100, 1000, 10000)) %>%
augment(fit2, newdata = .) %>%
rename(mu = .fitted) %>%
crossing(x = seq(.1, .36, .0005)) %>%
mutate(alpha = mu / sigma,
beta = (1 - mu) / sigma,
density = dbeta(x, alpha, beta)) %>%
ggplot(aes(x, density, color = factor(AB), lty = bats)) +
geom_line() +
labs(x = "Batting average",
y = "Prior density",
color = "AB",
lty = "Batting hand")
crossing(bats = c("L", "R"),
AB = c(10, 100, 1000, 10000)) %>%
augment(fit2, newdata = .) %>%
mutate(H = .3 * AB,
alpha0 = .fitted / sigma,
beta0 = (1 - .fitted) / sigma,
alpha1 = alpha0 + H,
beta1 = beta0 + AB - H,
estimate = alpha1 / (alpha1 + beta1),
conf.low = qbeta(.025, alpha1, beta1),
conf.high = qbeta(.975, alpha1, beta1),
record = paste(H, AB, sep = " / ")) %>%
ggplot(aes(estimate, record, color = bats)) +
geom_point() +
geom_errorbarh(aes(xmin = conf.low, xmax = conf.high)) +
labs(x = "Estimate w/ 95% credible interval",
y = "Batting record",
color = "Batting hand")
#over time
career2 %>%
mutate(decade = factor(round(year - 5, -1))) %>%
filter(AB >= 500) %>%
ggplot(aes(decade, average)) +
geom_boxplot() +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
ylab("Batting average")
library(splines)
fit3 <- gamlss(cbind(H, AB - H) ~ 0 + ns(year, df = 5) + bats + log(AB),
data = career2,
family = BB(mu.link = "identity"))
plot_gamlss_fit <- function(f) {
career2 %>%
dplyr::select(year, bats) %>%
distinct() %>%
filter(bats != "B") %>%
mutate(AB = 1000) %>%
augment(f, newdata = .) %>%
rename(mu = .fitted) %>%
mutate(sigma = fitted(fit3, "sigma")[1],
alpha0 = mu / sigma,
beta0 = (1 - mu) / sigma,
conf_low = qbeta(.025, alpha0, beta0),
conf_high = qbeta(.975, alpha0, beta0)) %>%
ggplot(aes(year, mu, color = bats, group = bats)) +
geom_line() +
geom_ribbon(aes(ymin = conf_low, ymax = conf_high), linetype = 2, alpha = .1) +
labs(x = "Year",
y = "Prior distribution (median + 95% quantiles)",
color = "Batting hand")
}
plot_gamlss_fit(fit3)
#account for effect of handedness changing over time by adding interaction term
fit4 <- gamlss(cbind(H, AB - H) ~ 0 + ns(year, 5) * bats + log(AB),
data = career2,
family = BB(mu.link = "identity"))
plot_gamlss_fit(fit4)
Pitching %>%
dplyr::select(playerID, yearID, GS) %>%
distinct() %>%
inner_join(dplyr::select(Master, playerID, throws)) %>%
count(yearID, throws, wt = GS) %>%
filter(!is.na(throws)) %>%
mutate(percent = n / sum(n)) %>%
filter(throws == "L") %>%
ggplot(aes(yearID, percent)) +
geom_line() +
geom_smooth() +
scale_y_continuous(labels = scales::percent_format()) +
xlab("Year") +
ylab("% of games with left-handed pitcher")
players <- crossing(year = c(1915, 1965, 2015),
bats = c("L", "R"),
H = 30,
AB = 100)
players_posterior <- players %>%
mutate(mu = predict(fit4, what = "mu", newdata = players),
sigma = predict(fit4, what = "sigma", newdata = players, type = "response"),
alpha0 = mu / sigma,
beta0 = (1 - mu) / sigma,
alpha1 = alpha0 + H,
beta1 = beta0 + AB - H)
players_posterior %>%
crossing(x = seq(.15, .3, .001)) %>%
mutate(density = dbeta(x, alpha1, beta1)) %>%
ggplot(aes(x, density, color = bats)) +
geom_line() +
facet_wrap(~ year) +
xlab("Batting average") +
ylab("Posterior density") +
ggtitle("Posterior distributions for batters with 30 / 100")
#whats next mixture models
fit_bb_mle <- function(x, n) {
ll <- function(alpha, beta) {
-sum(VGAM::dbetabinom.ab(x, n, alpha, beta, log = TRUE))
}
m <- stats4::mle(ll, start = list(alpha = 30, beta = 100), method = "L-BFGS-B",
lower = c(0.0001, .1))
ab <- stats4::coef(m)
data_frame(alpha = ab[1], beta = ab[2])
}
batting_w_pitchers <- Batting %>%
filter(AB >= 50, lgID == "NL", yearID > 1985) %>%
group_by(playerID) %>%
summarize(H = sum(H), AB = sum(AB), year = mean(yearID)) %>%
mutate(average = H / AB,
isPitcher = ifelse(playerID %in% pitchers$playerID, "Pitcher", "Non-Pitcher"),
isPitcher = relevel(factor(isPitcher), "Pitcher"))
fit <- fit_bb_mle(batting_w_pitchers$H, batting_w_pitchers$AB)
batting_w_pitchers %>%
ggplot(aes(average, fill = isPitcher)) +
geom_histogram(aes(y = ..density..)) +
stat_function(fun = function(x) dbeta(x, fit$alpha, fit$beta), lty = 2) +
labs(fill = "") | /Scripts/drob_heirarchical_bayes.R | no_license | rajkorde/RTestCode | R | false | false | 6,317 | r | library(gamlss)
library(dplyr)
library(tidyr)
library(Lahman)
library(ggplot2)
theme_set(theme_bw())
# Grab career batting average of non-pitchers
# (allow players that have pitched <= 3 games, like Ty Cobb)
pitchers <- Pitching %>%
group_by(playerID) %>%
summarize(gamesPitched = sum(G)) %>%
filter(gamesPitched > 3)
# in this setup, we're keeping some extra information for later in the post:
# a "bats" column and a "year" column
career <- Batting %>%
filter(AB > 0) %>%
anti_join(pitchers, by = "playerID") %>%
group_by(playerID) %>%
summarize(H = sum(H), AB = sum(AB), year = mean(yearID)) %>%
mutate(average = H / AB)
# Add player names
career <- Master %>%
tbl_df() %>%
dplyr::select(playerID, nameFirst, nameLast, bats) %>%
unite(name, nameFirst, nameLast, sep = " ") %>%
inner_join(career, by = "playerID")
library(gamlss)
fit <- gamlss(cbind(H, AB - H) ~ log(AB),
data = dplyr::select(career, -bats),
family = BB(mu.link = "identity"))
career_eb <- career %>%
mutate(mu = fitted(fit, "mu"),
sigma = fitted(fit, "sigma"),
alpha0 = mu / sigma,
beta0 = (1 - mu) / sigma,
alpha1 = alpha0 + H,
beta1 = beta0 + AB - H,
estimate = alpha1 / (alpha1 + beta1))
career %>%
count(bats)
career2 <- career %>%
filter(!is.na(bats)) %>%
mutate(bats = relevel(bats, "R"))
fit2 <- gamlss(cbind(H, AB - H) ~ log(AB) + bats,
data = career2,
family = BB(mu.link = "identity"))
library(broom)
tidy(fit2)
sigma <- fitted(fit2, "sigma")[1]
crossing(bats = c("L", "R"),
AB = c(1, 10, 100, 1000, 10000)) %>%
augment(fit2, newdata = .) %>%
rename(mu = .fitted) %>%
crossing(x = seq(.1, .36, .0005)) %>%
mutate(alpha = mu / sigma,
beta = (1 - mu) / sigma,
density = dbeta(x, alpha, beta)) %>%
ggplot(aes(x, density, color = factor(AB), lty = bats)) +
geom_line() +
labs(x = "Batting average",
y = "Prior density",
color = "AB",
lty = "Batting hand")
crossing(bats = c("L", "R"),
AB = c(10, 100, 1000, 10000)) %>%
augment(fit2, newdata = .) %>%
mutate(H = .3 * AB,
alpha0 = .fitted / sigma,
beta0 = (1 - .fitted) / sigma,
alpha1 = alpha0 + H,
beta1 = beta0 + AB - H,
estimate = alpha1 / (alpha1 + beta1),
conf.low = qbeta(.025, alpha1, beta1),
conf.high = qbeta(.975, alpha1, beta1),
record = paste(H, AB, sep = " / ")) %>%
ggplot(aes(estimate, record, color = bats)) +
geom_point() +
geom_errorbarh(aes(xmin = conf.low, xmax = conf.high)) +
labs(x = "Estimate w/ 95% credible interval",
y = "Batting record",
color = "Batting hand")
#over time
career2 %>%
mutate(decade = factor(round(year - 5, -1))) %>%
filter(AB >= 500) %>%
ggplot(aes(decade, average)) +
geom_boxplot() +
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
ylab("Batting average")
library(splines)
fit3 <- gamlss(cbind(H, AB - H) ~ 0 + ns(year, df = 5) + bats + log(AB),
data = career2,
family = BB(mu.link = "identity"))
plot_gamlss_fit <- function(f) {
career2 %>%
dplyr::select(year, bats) %>%
distinct() %>%
filter(bats != "B") %>%
mutate(AB = 1000) %>%
augment(f, newdata = .) %>%
rename(mu = .fitted) %>%
mutate(sigma = fitted(fit3, "sigma")[1],
alpha0 = mu / sigma,
beta0 = (1 - mu) / sigma,
conf_low = qbeta(.025, alpha0, beta0),
conf_high = qbeta(.975, alpha0, beta0)) %>%
ggplot(aes(year, mu, color = bats, group = bats)) +
geom_line() +
geom_ribbon(aes(ymin = conf_low, ymax = conf_high), linetype = 2, alpha = .1) +
labs(x = "Year",
y = "Prior distribution (median + 95% quantiles)",
color = "Batting hand")
}
plot_gamlss_fit(fit3)
#account for effect of handedness changing over time by adding interaction term
fit4 <- gamlss(cbind(H, AB - H) ~ 0 + ns(year, 5) * bats + log(AB),
data = career2,
family = BB(mu.link = "identity"))
plot_gamlss_fit(fit4)
Pitching %>%
dplyr::select(playerID, yearID, GS) %>%
distinct() %>%
inner_join(dplyr::select(Master, playerID, throws)) %>%
count(yearID, throws, wt = GS) %>%
filter(!is.na(throws)) %>%
mutate(percent = n / sum(n)) %>%
filter(throws == "L") %>%
ggplot(aes(yearID, percent)) +
geom_line() +
geom_smooth() +
scale_y_continuous(labels = scales::percent_format()) +
xlab("Year") +
ylab("% of games with left-handed pitcher")
players <- crossing(year = c(1915, 1965, 2015),
bats = c("L", "R"),
H = 30,
AB = 100)
players_posterior <- players %>%
mutate(mu = predict(fit4, what = "mu", newdata = players),
sigma = predict(fit4, what = "sigma", newdata = players, type = "response"),
alpha0 = mu / sigma,
beta0 = (1 - mu) / sigma,
alpha1 = alpha0 + H,
beta1 = beta0 + AB - H)
players_posterior %>%
crossing(x = seq(.15, .3, .001)) %>%
mutate(density = dbeta(x, alpha1, beta1)) %>%
ggplot(aes(x, density, color = bats)) +
geom_line() +
facet_wrap(~ year) +
xlab("Batting average") +
ylab("Posterior density") +
ggtitle("Posterior distributions for batters with 30 / 100")
#whats next mixture models
fit_bb_mle <- function(x, n) {
ll <- function(alpha, beta) {
-sum(VGAM::dbetabinom.ab(x, n, alpha, beta, log = TRUE))
}
m <- stats4::mle(ll, start = list(alpha = 30, beta = 100), method = "L-BFGS-B",
lower = c(0.0001, .1))
ab <- stats4::coef(m)
data_frame(alpha = ab[1], beta = ab[2])
}
batting_w_pitchers <- Batting %>%
filter(AB >= 50, lgID == "NL", yearID > 1985) %>%
group_by(playerID) %>%
summarize(H = sum(H), AB = sum(AB), year = mean(yearID)) %>%
mutate(average = H / AB,
isPitcher = ifelse(playerID %in% pitchers$playerID, "Pitcher", "Non-Pitcher"),
isPitcher = relevel(factor(isPitcher), "Pitcher"))
fit <- fit_bb_mle(batting_w_pitchers$H, batting_w_pitchers$AB)
batting_w_pitchers %>%
ggplot(aes(average, fill = isPitcher)) +
geom_histogram(aes(y = ..density..)) +
stat_function(fun = function(x) dbeta(x, fit$alpha, fit$beta), lty = 2) +
labs(fill = "") |
#' Habitat selectivity through quotient analysis
#'
#' \code{quo_an} executes the quotient approach defined in Overholtz et
#' al. 2011. Proportion of fish incidence in a water quality bin is divided by
#' the proportion of stations within that bin. Values greater than 1 indicate
#' preference.
#'
#' This approach was outlined in:
#'
#' Van Der Lingen, CD, L Hutchings, D Merkle, JJ van der Westhuizen, and J
#' Nelson. 2001. Comparative spawning habitats of anchovy (\emph{Engraulis
#' capensis}) and sardine (\emph{Sardinops sagax}) in the southern Benguela
#' upwelling ecosystem. 185-209. Spatial processes and management of marine
#' populations. Alaska Sea Grant College Program, AK-SG-01-02, Fairbanks.
#'
#' It was defined, as it appears here, in:
#'
#' Overholtz, W.J., Hare, J.A. and Keith, C.M., 2011. Impacts of interannual
#' environmental forcing and climate change on the distribution of Atlantic
#' mackerel on the US Northeast continental shelf. Marine and Coastal
#' Fisheries, 3(1), pp.219-232.
#'
#' The proportion of sites in each water quality bin (\emph{pSe}) is
#' determined by dividing the number of stations within the bin by the total
#' number of stations. The porportion of stations with fish (\emph{M}) in
#' each bin (\emph{pMe}) is determined by dividing the number of stations in the
#' bin with fish by the total number of stations with fish. The quotient value
#' for the water quality bin (\emph{Qe}) is calculated as \emph{pMe/pSe}.
#'
#' Values greater than 1 \dQuote{indicate a greater number of positive (fish)
#' stations than expected based on sampling effort}. Confidence intervals are
#' calculated using \code{boot.ci} from the \code{boot} package. The \code{type}
#' input to \code{boot.ci} is "\code{perc}"; all other inputs to \code{boot} and
#' \code{boot.ci} are the function defaults.
#'
#' @param wq Numeric. Water quality data at each station.
#' @param det Numeric. Number of detections at each station.
#' @param bin_width Numeric. Size of water quality bins. Default is 1.
#' @param pres_abs Logical. Should the data be reduced to presence/absence?
#' Default is false.
#' @param R Numeric. Number of bootstrap replicates. Default is 999.
#' @return Output is a data frame with bin labels, number of detections or
#' number sites with detections (depending on \code{pres_abs}
#' input) within each bin, number of sites within each bin, and
#' pMe, pSe, and Qe as defined above. Confidence intervals at the
#' 0.025 and 0.975 percentiles are also provided.
#' @export
quo_an <- function(wq, det, bin_width = 1, pres_abs = F, R = 999){
# Create breaks.
minval <- min(wq, na.rm = T)
maxval <- max(wq, na.rm = T)
brks <- seq(minval, maxval, bin_width)
brks <- if(maxval > max(brks)) c(brks, max(brks) + bin_width) else brks
# Create grouping bins.
bins <- cut(wq, brks)
# Aggregate by environmental bins.
agg.func <- function(x){
if(pres_abs == T){
# Only count number over 0 if presence/absence
x <- x > 0
}
as.data.frame(xtabs(x ~ bins), responseName = 'det')
}
boot_func <- function(x, index){
x <- x[index]
boot_det <- agg.func(x)
# Bootstrapped pMe
as.vector(boot_det$det)/sum(boot_det$det)
}
strap <- boot::boot(det, boot_func, R)
#Confidence Interval
ci <- matrix(nrow = length(strap$t0), ncol = 2)
for(i in 1:length(strap$t0)){
ci[i,] <- boot::boot.ci(strap, type = 'perc', index = i)$percent[4:5]
}
fish <- agg.func(det)
station <- as.data.frame(table(bins), responseName = 'wq')
# Merge data and correctly order bins.
q_an <- merge(fish, station)
# Quotient analysis
q_an$pme <- q_an$det / sum(q_an$det)
q_an$pse <- q_an$wq / sum(q_an$wq)
q_an$qe <- q_an$pme / q_an$pse
q_an$ci.025 <- ci[, 1] / q_an$pse
q_an$ci.975 <- ci[, 2] / q_an$pse
names(q_an) <- c('bin', 'detections', 'wq.var', 'pMe', 'pSe',
'Qe', 'CI_0.025', 'CI_0.975')
q_an
}
| /R/quo_an.R | no_license | mhpob/TelemetryR | R | false | false | 4,012 | r | #' Habitat selectivity through quotient analysis
#'
#' \code{quo_an} executes the quotient approach defined in Overholtz et
#' al. 2011. Proportion of fish incidence in a water quality bin is divided by
#' the proportion of stations within that bin. Values greater than 1 indicate
#' preference.
#'
#' This approach was outlined in:
#'
#' Van Der Lingen, CD, L Hutchings, D Merkle, JJ van der Westhuizen, and J
#' Nelson. 2001. Comparative spawning habitats of anchovy (\emph{Engraulis
#' capensis}) and sardine (\emph{Sardinops sagax}) in the southern Benguela
#' upwelling ecosystem. 185-209. Spatial processes and management of marine
#' populations. Alaska Sea Grant College Program, AK-SG-01-02, Fairbanks.
#'
#' It was defined, as it appears here, in:
#'
#' Overholtz, W.J., Hare, J.A. and Keith, C.M., 2011. Impacts of interannual
#' environmental forcing and climate change on the distribution of Atlantic
#' mackerel on the US Northeast continental shelf. Marine and Coastal
#' Fisheries, 3(1), pp.219-232.
#'
#' The proportion of sites in each water quality bin (\emph{pSe}) is
#' determined by dividing the number of stations within the bin by the total
#' number of stations. The porportion of stations with fish (\emph{M}) in
#' each bin (\emph{pMe}) is determined by dividing the number of stations in the
#' bin with fish by the total number of stations with fish. The quotient value
#' for the water quality bin (\emph{Qe}) is calculated as \emph{pMe/pSe}.
#'
#' Values greater than 1 \dQuote{indicate a greater number of positive (fish)
#' stations than expected based on sampling effort}. Confidence intervals are
#' calculated using \code{boot.ci} from the \code{boot} package. The \code{type}
#' input to \code{boot.ci} is "\code{perc}"; all other inputs to \code{boot} and
#' \code{boot.ci} are the function defaults.
#'
#' @param wq Numeric. Water quality data at each station.
#' @param det Numeric. Number of detections at each station.
#' @param bin_width Numeric. Size of water quality bins. Default is 1.
#' @param pres_abs Logical. Should the data be reduced to presence/absence?
#' Default is false.
#' @param R Numeric. Number of bootstrap replicates. Default is 999.
#' @return Output is a data frame with bin labels, number of detections or
#' number sites with detections (depending on \code{pres_abs}
#' input) within each bin, number of sites within each bin, and
#' pMe, pSe, and Qe as defined above. Confidence intervals at the
#' 0.025 and 0.975 percentiles are also provided.
#' @export
quo_an <- function(wq, det, bin_width = 1, pres_abs = F, R = 999){
# Create breaks.
minval <- min(wq, na.rm = T)
maxval <- max(wq, na.rm = T)
brks <- seq(minval, maxval, bin_width)
brks <- if(maxval > max(brks)) c(brks, max(brks) + bin_width) else brks
# Create grouping bins.
bins <- cut(wq, brks)
# Aggregate by environmental bins.
agg.func <- function(x){
if(pres_abs == T){
# Only count number over 0 if presence/absence
x <- x > 0
}
as.data.frame(xtabs(x ~ bins), responseName = 'det')
}
boot_func <- function(x, index){
x <- x[index]
boot_det <- agg.func(x)
# Bootstrapped pMe
as.vector(boot_det$det)/sum(boot_det$det)
}
strap <- boot::boot(det, boot_func, R)
#Confidence Interval
ci <- matrix(nrow = length(strap$t0), ncol = 2)
for(i in 1:length(strap$t0)){
ci[i,] <- boot::boot.ci(strap, type = 'perc', index = i)$percent[4:5]
}
fish <- agg.func(det)
station <- as.data.frame(table(bins), responseName = 'wq')
# Merge data and correctly order bins.
q_an <- merge(fish, station)
# Quotient analysis
q_an$pme <- q_an$det / sum(q_an$det)
q_an$pse <- q_an$wq / sum(q_an$wq)
q_an$qe <- q_an$pme / q_an$pse
q_an$ci.025 <- ci[, 1] / q_an$pse
q_an$ci.975 <- ci[, 2] / q_an$pse
names(q_an) <- c('bin', 'detections', 'wq.var', 'pMe', 'pSe',
'Qe', 'CI_0.025', 'CI_0.975')
q_an
}
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/upper_aerodigestive_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.45,family="gaussian",standardize=FALSE)
sink('./Model/EN/Correlation/upper_aerodigestive_tract/upper_aerodigestive_tract_054.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Correlation/upper_aerodigestive_tract/upper_aerodigestive_tract_054.R | no_license | leon1003/QSMART | R | false | false | 418 | r | library(glmnet)
mydata = read.table("./TrainingSet/Correlation/upper_aerodigestive_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.45,family="gaussian",standardize=FALSE)
sink('./Model/EN/Correlation/upper_aerodigestive_tract/upper_aerodigestive_tract_054.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_comments_from_lines.R
\name{get_comments_from_lines}
\alias{get_comments_from_lines}
\title{Get comments at the beginning of a vector of strings}
\usage{
get_comments_from_lines(lines)
}
\arguments{
\item{lines}{Vector of strings, typically obtained from using \code{scan} with \code{sep='\n'}.}
}
\value{
Vector of strings with the commented lines.
}
\description{
Get comments at the beginning of a vector of strings
}
| /man/get_comments_from_lines.Rd | no_license | Theopapa/rtoolz | R | false | true | 504 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_comments_from_lines.R
\name{get_comments_from_lines}
\alias{get_comments_from_lines}
\title{Get comments at the beginning of a vector of strings}
\usage{
get_comments_from_lines(lines)
}
\arguments{
\item{lines}{Vector of strings, typically obtained from using \code{scan} with \code{sep='\n'}.}
}
\value{
Vector of strings with the commented lines.
}
\description{
Get comments at the beginning of a vector of strings
}
|
#' @importFrom pryr standardise_call
#' @importFrom Deriv Simplify
#' @importFrom digest digest
#' @importFrom DescTools StrAlign
#' @importFrom utils capture.output ls.str
#' @importFrom stringr str_match str_match_all str_extract_all
#' @import methods
#' @import BiocGenerics
#' @useDynLib gpuMagic, .registration = TRUE, .fixes = 'C_'
.onDetach <- function(libpath) {
gc()
}
.onUnload <- function(libpath) {
.gpuResourcesManager$deleteEnv()
library.dynam.unload("gpuMagic", libpath)
}
.onLoad <- function(libname, pkgname) {
if(getPlatformNum()!=0){
setDevice(1)
}
}
DEBUG = TRUE
| /R/pkgSetting.R | no_license | Jiefei-Wang/gpuMagic | R | false | false | 615 | r | #' @importFrom pryr standardise_call
#' @importFrom Deriv Simplify
#' @importFrom digest digest
#' @importFrom DescTools StrAlign
#' @importFrom utils capture.output ls.str
#' @importFrom stringr str_match str_match_all str_extract_all
#' @import methods
#' @import BiocGenerics
#' @useDynLib gpuMagic, .registration = TRUE, .fixes = 'C_'
.onDetach <- function(libpath) {
gc()
}
.onUnload <- function(libpath) {
.gpuResourcesManager$deleteEnv()
library.dynam.unload("gpuMagic", libpath)
}
.onLoad <- function(libname, pkgname) {
if(getPlatformNum()!=0){
setDevice(1)
}
}
DEBUG = TRUE
|
## argument decreasing = TRUE inverts the direction of the order. Numbers from biggest to smallest and
## characters from Z to A. This is helpful when we consider rank#1 the biggest city.
## as.character () will return the vector with the name of the city. If we just return orderdata[rank,1]
## we get a factor instead.
find_city_rank <- function(data,column,rank){
orderdata <- data[order(decreasing = TRUE,data[,column]),]
return(as.character(orderdata[rank,1]))
} | /PracticeAssignment/PA3-tutorial-master/find_city_rank.R | no_license | cartmanG/Learn | R | false | false | 473 | r | ## argument decreasing = TRUE inverts the direction of the order. Numbers from biggest to smallest and
## characters from Z to A. This is helpful when we consider rank#1 the biggest city.
## as.character () will return the vector with the name of the city. If we just return orderdata[rank,1]
## we get a factor instead.
find_city_rank <- function(data,column,rank){
orderdata <- data[order(decreasing = TRUE,data[,column]),]
return(as.character(orderdata[rank,1]))
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dmst.test.R
\name{dmst.test}
\alias{dmst.test}
\title{Dynamic Minimum Spanning Tree spatial scan test}
\usage{
dmst.test(
coords,
cases,
pop,
w,
ex = sum(cases)/sum(pop) * pop,
nsim = 499,
alpha = 0.1,
ubpop = 0.5,
ubd = 1,
longlat = FALSE,
cl = NULL
)
}
\arguments{
\item{coords}{An \eqn{n \times 2} matrix of centroid
coordinates for the regions.}
\item{cases}{The number of cases observed in each region.}
\item{pop}{The population size associated with each
region.}
\item{w}{A binary spatial adjacency matrix for the
regions.}
\item{ex}{The expected number of cases for each region.
The default is calculated under the constant risk
hypothesis.}
\item{nsim}{The number of simulations from which to
compute the p-value.}
\item{alpha}{The significance level to determine whether
a cluster is signficant. Default is 0.10.}
\item{ubpop}{The upperbound of the proportion of the
total population to consider for a cluster.}
\item{ubd}{A proportion in (0, 1]. The distance of
potential clusters must be no more than \code{ubd * m},
where \code{m} is the maximum intercentroid distance
between all coordinates.}
\item{longlat}{The default is \code{FALSE}, which
specifies that Euclidean distance should be used. If
\code{longlat} is \code{TRUE}, then the great circle
distance is used to calculate the intercentroid
distance.}
\item{cl}{
A cluster object created by \code{\link{makeCluster}},
or an integer to indicate number of child-processes
(integer values are ignored on Windows) for parallel evaluations
(see Details on performance).
}
}
\value{
Returns a \code{smerc_cluster} object.
}
\description{
\code{dmst.test} implements the dynamic Minimum Spanning
Tree scan test of Assuncao et al. (2006). Starting with a
single region as a current zone, new candidate zones are
constructed by combining the current zone with the
connected region that maximizes the resulting likelihood
ratio test statistic. This procedure is repeated until
the population or distance upper bounds are reached. The
same procedure is repeated for each region. The clusters
returned are non-overlapping, ordered from most
significant to least significant. The first cluster is
the most likely to be a cluster. If no significant
clusters are found, then the most likely cluster is
returned (along with a warning).
}
\details{
The maximum intercentroid distance can be found by
executing the command:
\code{sp::spDists(as.matrix(coords), longlat = longlat)},
based on the specified values of \code{coords} and
\code{longlat}.
}
\examples{
data(nydf)
data(nyw)
coords = with(nydf, cbind(longitude, latitude))
out = dmst.test(coords = coords, cases = floor(nydf$cases),
pop = nydf$pop, w = nyw,
alpha = 0.12, longlat = TRUE,
nsim = 2, ubpop = 0.05, ubd = 0.1)
data(nypoly)
library(sp)
plot(nypoly, col = color.clusters(out))
}
\references{
Assuncao, R.M., Costa, M.A., Tavares, A. and
Neto, S.J.F. (2006). Fast detection of arbitrarily
shaped disease clusters, Statistics in Medicine, 25,
723-742. <doi:10.1002/sim.2411>
}
\seealso{
\code{\link{print.smerc_cluster}},
\code{\link{summary.smerc_cluster}},
\code{\link{plot.smerc_cluster}},
\code{\link{scan.stat}}, \code{\link{scan.test}}
}
\author{
Joshua French
}
| /smerc/man/dmst.test.Rd | no_license | akhikolla/InformationHouse | R | false | true | 3,474 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dmst.test.R
\name{dmst.test}
\alias{dmst.test}
\title{Dynamic Minimum Spanning Tree spatial scan test}
\usage{
dmst.test(
coords,
cases,
pop,
w,
ex = sum(cases)/sum(pop) * pop,
nsim = 499,
alpha = 0.1,
ubpop = 0.5,
ubd = 1,
longlat = FALSE,
cl = NULL
)
}
\arguments{
\item{coords}{An \eqn{n \times 2} matrix of centroid
coordinates for the regions.}
\item{cases}{The number of cases observed in each region.}
\item{pop}{The population size associated with each
region.}
\item{w}{A binary spatial adjacency matrix for the
regions.}
\item{ex}{The expected number of cases for each region.
The default is calculated under the constant risk
hypothesis.}
\item{nsim}{The number of simulations from which to
compute the p-value.}
\item{alpha}{The significance level to determine whether
a cluster is signficant. Default is 0.10.}
\item{ubpop}{The upperbound of the proportion of the
total population to consider for a cluster.}
\item{ubd}{A proportion in (0, 1]. The distance of
potential clusters must be no more than \code{ubd * m},
where \code{m} is the maximum intercentroid distance
between all coordinates.}
\item{longlat}{The default is \code{FALSE}, which
specifies that Euclidean distance should be used. If
\code{longlat} is \code{TRUE}, then the great circle
distance is used to calculate the intercentroid
distance.}
\item{cl}{
A cluster object created by \code{\link{makeCluster}},
or an integer to indicate number of child-processes
(integer values are ignored on Windows) for parallel evaluations
(see Details on performance).
}
}
\value{
Returns a \code{smerc_cluster} object.
}
\description{
\code{dmst.test} implements the dynamic Minimum Spanning
Tree scan test of Assuncao et al. (2006). Starting with a
single region as a current zone, new candidate zones are
constructed by combining the current zone with the
connected region that maximizes the resulting likelihood
ratio test statistic. This procedure is repeated until
the population or distance upper bounds are reached. The
same procedure is repeated for each region. The clusters
returned are non-overlapping, ordered from most
significant to least significant. The first cluster is
the most likely to be a cluster. If no significant
clusters are found, then the most likely cluster is
returned (along with a warning).
}
\details{
The maximum intercentroid distance can be found by
executing the command:
\code{sp::spDists(as.matrix(coords), longlat = longlat)},
based on the specified values of \code{coords} and
\code{longlat}.
}
\examples{
data(nydf)
data(nyw)
coords = with(nydf, cbind(longitude, latitude))
out = dmst.test(coords = coords, cases = floor(nydf$cases),
pop = nydf$pop, w = nyw,
alpha = 0.12, longlat = TRUE,
nsim = 2, ubpop = 0.05, ubd = 0.1)
data(nypoly)
library(sp)
plot(nypoly, col = color.clusters(out))
}
\references{
Assuncao, R.M., Costa, M.A., Tavares, A. and
Neto, S.J.F. (2006). Fast detection of arbitrarily
shaped disease clusters, Statistics in Medicine, 25,
723-742. <doi:10.1002/sim.2411>
}
\seealso{
\code{\link{print.smerc_cluster}},
\code{\link{summary.smerc_cluster}},
\code{\link{plot.smerc_cluster}},
\code{\link{scan.stat}}, \code{\link{scan.test}}
}
\author{
Joshua French
}
|
#' Get Finnish Population grid in two different resolutions for years 2010-2019
#
#' Thin wrapper around Finnish population grid data provided by
#' [Statistics Finland](https://www.stat.fi/org/avoindata/paikkatietoaineistot/vaestoruutuaineisto_1km_en.html).
#'
#'
#' @param year A numeric for year of the population grid. Years available 2010-2019.
#' @param resolution 1 (1km x 1km) or 5 (5km x 5km)
#'
#' @return sf object
#'
#' @author Markus Kainu <markus.kainu@@kela.fi>, Joona Lehtomäki <joona.lehtomaki@@iki.fi>
#'
#' @export
#'
#' @examples
#' \dontrun{
#' f <- get_population_grid(year=2017)
#' plot(f)
#' }
#'
#' @rdname get_population_grid
#' @export
get_population_grid <- function(year = 2017, resolution = 5){
# Check if you have access to http://geo.stat.fi/geoserver/wfs
if (!check_api_access()){
message("You have no access to http://geo.stat.fi/geoserver/wfs.
Please check your connection, firewall settings and/or review your proxy settings")
} else {
# Standard and compulsory query parameters
base_queries <- list("service" = "WFS", "version" = wfs_providers$Tilastokeskus$version)
layer <- paste0(wfs_providers$Tilastokeskus$layer_typename$get_population_grid, year, "_", resolution, "km")
# Note that there should be at least one parameter: request type.
queries <- append(base_queries, list(request = "getFeature", typename = layer))
api_obj <- wfs_api(base_url= wfs_providers$Tilastokeskus$URL, queries = queries)
sf_obj <- to_sf(api_obj)
# If the data retrieved has no CRS defined, use ETRS89 / TM35FIN
# (epsg:3067)
if (is.na(sf::st_crs(sf_obj))) {
warning("Coercing CRS to epsg:3067 (ETRS89 / TM35FIN)", call. = FALSE)
sf::st_crs(sf_obj) <- 3067
}
message("Data is licensed under: ", wfs_providers$Tilastokeskus$license)
return(sf_obj)
}
}
| /R/get_population_grid.R | no_license | phineeeeeeeeeeeeeeeees/geofi | R | false | false | 1,827 | r | #' Get Finnish Population grid in two different resolutions for years 2010-2019
#
#' Thin wrapper around Finnish population grid data provided by
#' [Statistics Finland](https://www.stat.fi/org/avoindata/paikkatietoaineistot/vaestoruutuaineisto_1km_en.html).
#'
#'
#' @param year A numeric for year of the population grid. Years available 2010-2019.
#' @param resolution 1 (1km x 1km) or 5 (5km x 5km)
#'
#' @return sf object
#'
#' @author Markus Kainu <markus.kainu@@kela.fi>, Joona Lehtomäki <joona.lehtomaki@@iki.fi>
#'
#' @export
#'
#' @examples
#' \dontrun{
#' f <- get_population_grid(year=2017)
#' plot(f)
#' }
#'
#' @rdname get_population_grid
#' @export
get_population_grid <- function(year = 2017, resolution = 5){
# Check if you have access to http://geo.stat.fi/geoserver/wfs
if (!check_api_access()){
message("You have no access to http://geo.stat.fi/geoserver/wfs.
Please check your connection, firewall settings and/or review your proxy settings")
} else {
# Standard and compulsory query parameters
base_queries <- list("service" = "WFS", "version" = wfs_providers$Tilastokeskus$version)
layer <- paste0(wfs_providers$Tilastokeskus$layer_typename$get_population_grid, year, "_", resolution, "km")
# Note that there should be at least one parameter: request type.
queries <- append(base_queries, list(request = "getFeature", typename = layer))
api_obj <- wfs_api(base_url= wfs_providers$Tilastokeskus$URL, queries = queries)
sf_obj <- to_sf(api_obj)
# If the data retrieved has no CRS defined, use ETRS89 / TM35FIN
# (epsg:3067)
if (is.na(sf::st_crs(sf_obj))) {
warning("Coercing CRS to epsg:3067 (ETRS89 / TM35FIN)", call. = FALSE)
sf::st_crs(sf_obj) <- 3067
}
message("Data is licensed under: ", wfs_providers$Tilastokeskus$license)
return(sf_obj)
}
}
|
#' Nonparametric Rank Tests for Independence
#'
#' This function performs a nonparametric test of ranking data based
#' on the correlation. This function can be applied to the ranking data
#' with missing ranks and tie ranks.
#'
#' @param X1 a vector, using NA to stand for the missing ranks
#' @param X2 the same as X1
#' @param method whether the test is based on Spearman correlation or Kendall
#' correlation
#' @return a list of the test statistics
#' @export
#' @author Li Qinglong <liqinglong0830@@163.com>
#' @examples
#' Arith = c(14, 18, 23, 26, 27, 30, 40, NA, NA)
#' Lang = c(28, 14, 46, NA, 53, NA, 54, 50, NA)
#' independence.test(Arith, Lang, method = "spearman")
#' independence.test(Arith, Lang, method = "kendall")
#' @references Rank Correlation Methods for Missing Data, Mayer Alvo and Paul
#' Cablio \cr
#' Nonparametric Rank Tests for Independence in Opinion Surveys, Philip L.H. Yu,
#' K.F. Lam, and Mayer Alvo
independence.test <- function(X1, X2, method = c("spearman", "kendall"))
{
# Test for independence extended to incomplete and tied rankings
X1 = as.vector(X1)
X2 = as.vector(X2)
t = length(X1)
if (t != length(X2))
{
stop("The lengths of X1 and X2 are not equal!")
}
k1 = sum(!is.na(X1))
k2 = sum(!is.na(X2))
if (k1 == 0 || k2 == 0)
{
stop("No observations in X1 or X2!")
}
# Make sure k1 <= k2
if (k1 > k2)
# Swap X1 and X2
{
temp = X1
X1 = X2
X2 = temp
temp = k1
k1 = k2
k2 = temp
}
rank1 = rank(X1, ties.method = "average", na.last = "keep")
rank2 = rank(X2, ties.method = "average", na.last = "keep")
# Using right invariant property
o = which(!is.na(rank1)) # Label of obejects in ranking 1
o_star = rank2[o]
o_star[is.na(o_star)] = (k2 + 1) / 2
o_mean = mean(o_star)
ind = !(is.na(X1 + X2))
k_star = sum(ind)
method = match.arg(method)
if (method == "spearman")
{
As_star = (t + 1)^2 / ((k1 + 1) * (k2 + 1)) *
sum((rank1[ind] - (k1 + 1) / 2) * (rank2[ind] - (k2 + 1) / 2))
# Under H1
varAs_star = ((t + 1) ^ 2 / ((k1 + 1) * (k2 + 1)))^2 * (1 / (k1 - 1)) *
sum((o_star - o_mean)^2) * sum((rank1[o] - (k1 + 1) / 2)^2)
# Under H2
K1 = k1 * (k1 - 1) / (k1 + 1)
K2 = k2 * (k2 - 1) / (k2 + 1)
# varAs_star = (t + 1)^4 / (144 * (t - 1)) * K1 * K2
Cs = t * (t^2 - 1) / 12
# Genaralized Spearman distance
ds_star = Cs - As_star
if (k1 %% 2) # if k1 is odd
{
rs = (k1^2 - 1) * (3 * k2 - k1)
}
else # if k1 is even
{
rs = k1 * (k1 * (3 * k2 - k1) - 2)
}
rs = rs * (t + 1) ^ 2 / (24 * (k1 + 1) * (k2 + 1))
# ms = Cs - rs
# Ms = Cs + rs
# Correlation computation is only right when there is no tied rankings
# Type a correlation
Alpha_a = As_star / rs
# Type b correlation
Alpha_b = 12 * As_star / ((t + 1)^2 * sqrt(K1 * K2))
# Standarlized test statistic
z_stat = As_star / sqrt(varAs_star)
# Double tail test
p_value = 2 * (1 - pnorm(abs(z_stat)))
res = list( Similarity_Measure = As_star,
# Spearman_Correlation = Alpha_a,
Test_Statistic = z_stat,
P_value = p_value,
Distance = ds_star
)
}
else if (method == "kendall")
{
a1 = matrix(rep(0, t^2), ncol = t)
a2 = matrix(rep(0, t^2), ncol = t)
for (i in 1:(t-1))
{
for (j in (i+1):t)
{
a1[i, j] = sum(c(sign(rank1[i] - rank1[j]) * !is.na(rank1[i]) * !is.na(rank1[j]),
is.na(rank1[j]) * !is.na(rank1[i]) * (1 - 2 * rank1[i] / (k1 + 1)),
is.na(rank1[i]) * !is.na(rank1[j]) * (2 * rank1[j] / (k1 + 1) - 1)),
na.rm = TRUE)
a2[i, j] = sum(c(sign(rank2[i] - rank2[j]) * !is.na(rank2[i]) * !is.na(rank2[j]),
is.na(rank2[j]) * !is.na(rank2[i]) * (1 - 2 * rank2[i] / (k2 + 1)),
is.na(rank2[i]) * !is.na(rank2[j]) * (2 * rank2[j] / (k2 + 1) - 1)),
na.rm = TRUE)
}
}
Ak_star = sum(a1 * a2)
# Under H1 (16 / t^2) * varAs_star
varAk_star = (16 / t^2) * ((t + 1) ^ 2 / ((k1 + 1) * (k2 + 1)))^2 *
(1 / (k1 - 1)) * sum((o_star - o_mean)^2) * sum((rank1[o] - (k1 + 1) / 2)^2)
# Under H2
K1 = k1 * (k1 - 1) / (k1 + 1)
K2 = k2 * (k2 - 1) / (k2 + 1)
# K1 = k1 * (k1 - 1) / (k1 + 1) * (1 - sum(g1^3 - g1) / (k1^3 - k1))
# K2 = k2 * (k2 - 1) / (k2 + 1) * (1 - sum(g2^3 - g2) / (k2^3 - k2))
# varAk_star = K1 * K2 / (9 * t * (t - 1)) *
# ((2 * t + k1 + 3) * (2 * t + k2 + 3) / 2 + (t^2 - k1 - 2) * (t^2 - k2 - 2) / (t - 2))
Ck = t * (t - 1) / 2
# Genaralized Spearman distance
dk_star = Ck - Ak_star
if (k1 %% 2) # if k1 is odd
{
rk = (k1^2 - 1) * (t * (3 * k2 - k1) + k2 * (k1 + 3))
}
else # if k1 is even
{
rk = k1 * (3 * k1 * k2 * (t + 1) - (k1^2 + 2) * (t - k2) - 3 * (k2 + 1))
}
rk = rk / (6 * (k1 + 1) * (k2 + 1))
# ms = Cs - rs
# Ms = Cs + rs
# Correlation computation is only right when there is no tied rankings
# Type a correlation
Alpha_a = Ak_star / rk
# Type b correlation
Alpha_b = 6 * Ak_star / sqrt((2 * t + k1 + 3) * (2 * t + k2 + 3) * K1 * K2)
# Standarlized test statistic
z_stat = Ak_star / sqrt(varAk_star)
# Double tail test
p_value = 2 * (1 - pnorm(abs(z_stat)))
res = list( Similarity_Measure = Ak_star,
# Kendall_Correlation = Alpha_a,
Test_Statistic = z_stat,
P_value = p_value,
Distance = dk_star
)
}
return(res)
} | /R/independence.test.R | no_license | zfy1989lee/StatMethRank | R | false | false | 5,269 | r | #' Nonparametric Rank Tests for Independence
#'
#' This function performs a nonparametric test of ranking data based
#' on the correlation. This function can be applied to the ranking data
#' with missing ranks and tie ranks.
#'
#' @param X1 a vector, using NA to stand for the missing ranks
#' @param X2 the same as X1
#' @param method whether the test is based on Spearman correlation or Kendall
#' correlation
#' @return a list of the test statistics
#' @export
#' @author Li Qinglong <liqinglong0830@@163.com>
#' @examples
#' Arith = c(14, 18, 23, 26, 27, 30, 40, NA, NA)
#' Lang = c(28, 14, 46, NA, 53, NA, 54, 50, NA)
#' independence.test(Arith, Lang, method = "spearman")
#' independence.test(Arith, Lang, method = "kendall")
#' @references Rank Correlation Methods for Missing Data, Mayer Alvo and Paul
#' Cablio \cr
#' Nonparametric Rank Tests for Independence in Opinion Surveys, Philip L.H. Yu,
#' K.F. Lam, and Mayer Alvo
independence.test <- function(X1, X2, method = c("spearman", "kendall"))
{
# Test for independence extended to incomplete and tied rankings
X1 = as.vector(X1)
X2 = as.vector(X2)
t = length(X1)
if (t != length(X2))
{
stop("The lengths of X1 and X2 are not equal!")
}
k1 = sum(!is.na(X1))
k2 = sum(!is.na(X2))
if (k1 == 0 || k2 == 0)
{
stop("No observations in X1 or X2!")
}
# Make sure k1 <= k2
if (k1 > k2)
# Swap X1 and X2
{
temp = X1
X1 = X2
X2 = temp
temp = k1
k1 = k2
k2 = temp
}
rank1 = rank(X1, ties.method = "average", na.last = "keep")
rank2 = rank(X2, ties.method = "average", na.last = "keep")
# Using right invariant property
o = which(!is.na(rank1)) # Label of obejects in ranking 1
o_star = rank2[o]
o_star[is.na(o_star)] = (k2 + 1) / 2
o_mean = mean(o_star)
ind = !(is.na(X1 + X2))
k_star = sum(ind)
method = match.arg(method)
if (method == "spearman")
{
As_star = (t + 1)^2 / ((k1 + 1) * (k2 + 1)) *
sum((rank1[ind] - (k1 + 1) / 2) * (rank2[ind] - (k2 + 1) / 2))
# Under H1
varAs_star = ((t + 1) ^ 2 / ((k1 + 1) * (k2 + 1)))^2 * (1 / (k1 - 1)) *
sum((o_star - o_mean)^2) * sum((rank1[o] - (k1 + 1) / 2)^2)
# Under H2
K1 = k1 * (k1 - 1) / (k1 + 1)
K2 = k2 * (k2 - 1) / (k2 + 1)
# varAs_star = (t + 1)^4 / (144 * (t - 1)) * K1 * K2
Cs = t * (t^2 - 1) / 12
# Genaralized Spearman distance
ds_star = Cs - As_star
if (k1 %% 2) # if k1 is odd
{
rs = (k1^2 - 1) * (3 * k2 - k1)
}
else # if k1 is even
{
rs = k1 * (k1 * (3 * k2 - k1) - 2)
}
rs = rs * (t + 1) ^ 2 / (24 * (k1 + 1) * (k2 + 1))
# ms = Cs - rs
# Ms = Cs + rs
# Correlation computation is only right when there is no tied rankings
# Type a correlation
Alpha_a = As_star / rs
# Type b correlation
Alpha_b = 12 * As_star / ((t + 1)^2 * sqrt(K1 * K2))
# Standarlized test statistic
z_stat = As_star / sqrt(varAs_star)
# Double tail test
p_value = 2 * (1 - pnorm(abs(z_stat)))
res = list( Similarity_Measure = As_star,
# Spearman_Correlation = Alpha_a,
Test_Statistic = z_stat,
P_value = p_value,
Distance = ds_star
)
}
else if (method == "kendall")
{
a1 = matrix(rep(0, t^2), ncol = t)
a2 = matrix(rep(0, t^2), ncol = t)
for (i in 1:(t-1))
{
for (j in (i+1):t)
{
a1[i, j] = sum(c(sign(rank1[i] - rank1[j]) * !is.na(rank1[i]) * !is.na(rank1[j]),
is.na(rank1[j]) * !is.na(rank1[i]) * (1 - 2 * rank1[i] / (k1 + 1)),
is.na(rank1[i]) * !is.na(rank1[j]) * (2 * rank1[j] / (k1 + 1) - 1)),
na.rm = TRUE)
a2[i, j] = sum(c(sign(rank2[i] - rank2[j]) * !is.na(rank2[i]) * !is.na(rank2[j]),
is.na(rank2[j]) * !is.na(rank2[i]) * (1 - 2 * rank2[i] / (k2 + 1)),
is.na(rank2[i]) * !is.na(rank2[j]) * (2 * rank2[j] / (k2 + 1) - 1)),
na.rm = TRUE)
}
}
Ak_star = sum(a1 * a2)
# Under H1 (16 / t^2) * varAs_star
varAk_star = (16 / t^2) * ((t + 1) ^ 2 / ((k1 + 1) * (k2 + 1)))^2 *
(1 / (k1 - 1)) * sum((o_star - o_mean)^2) * sum((rank1[o] - (k1 + 1) / 2)^2)
# Under H2
K1 = k1 * (k1 - 1) / (k1 + 1)
K2 = k2 * (k2 - 1) / (k2 + 1)
# K1 = k1 * (k1 - 1) / (k1 + 1) * (1 - sum(g1^3 - g1) / (k1^3 - k1))
# K2 = k2 * (k2 - 1) / (k2 + 1) * (1 - sum(g2^3 - g2) / (k2^3 - k2))
# varAk_star = K1 * K2 / (9 * t * (t - 1)) *
# ((2 * t + k1 + 3) * (2 * t + k2 + 3) / 2 + (t^2 - k1 - 2) * (t^2 - k2 - 2) / (t - 2))
Ck = t * (t - 1) / 2
# Genaralized Spearman distance
dk_star = Ck - Ak_star
if (k1 %% 2) # if k1 is odd
{
rk = (k1^2 - 1) * (t * (3 * k2 - k1) + k2 * (k1 + 3))
}
else # if k1 is even
{
rk = k1 * (3 * k1 * k2 * (t + 1) - (k1^2 + 2) * (t - k2) - 3 * (k2 + 1))
}
rk = rk / (6 * (k1 + 1) * (k2 + 1))
# ms = Cs - rs
# Ms = Cs + rs
# Correlation computation is only right when there is no tied rankings
# Type a correlation
Alpha_a = Ak_star / rk
# Type b correlation
Alpha_b = 6 * Ak_star / sqrt((2 * t + k1 + 3) * (2 * t + k2 + 3) * K1 * K2)
# Standarlized test statistic
z_stat = Ak_star / sqrt(varAk_star)
# Double tail test
p_value = 2 * (1 - pnorm(abs(z_stat)))
res = list( Similarity_Measure = Ak_star,
# Kendall_Correlation = Alpha_a,
Test_Statistic = z_stat,
P_value = p_value,
Distance = dk_star
)
}
return(res)
} |
## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ---- echo=FALSE--------------------------------------------------------------
doAll=FALSE
## ---- eval=doAll, results="hide"----------------------------------------------
# pacman::p_load(Biobase, GEOquery, limma, tidyr, AnnotationDbi, BiocGenerics, hgug4112a.db)
## ---- eval=doAll, results="hide"----------------------------------------------
# wd = getwd()
# system("mkdir dirData")
# dirData = paste0(wd,"/dirData/")
# setwd(dirData)
# GEOquery::getGEOSuppFiles("GSE50467")
# setwd("GSE50467")
# system("tar xvf GSE50467_RAW.tar")
# system("gzip -d *.gz")
# x=limma::read.maimages(dir(".","txt"),"agilent",green.only=TRUE,other.columns="gIsWellAboveBG")
# GSE50467raw = x
# save(GSE50467raw, file=paste0(dirData,"GSE50467raw.rda"))
# setwd(wd)
## ---- eval=doAll, message=FALSE-----------------------------------------------
# load(paste0(dirData,"GSE50467raw.rda"))
# annot = AnnotationDbi::select(hgug4112a.db, keys=GSE50467raw$genes[,"ProbeName"],
# column = c("ENTREZID","ENSEMBL"), keytype="PROBEID")
# annot = annot[!is.na(annot[,"ENTREZID"]),]
# uniq_probe = match(unique(annot[,1]),annot[,1])
# annot1 = annot[uniq_probe,]
# uniq_entrez = match(unique(annot1[,2]), annot1[,2])
# annot2 = annot1[uniq_entrez,]
# annot2 = na.omit(annot2)
# GSE50467raw = GSE50467raw[match(annot2[,1],GSE50467raw$genes[,"ProbeName"]),]
# rownames(GSE50467raw) = annot2[,1]
# save(GSE50467raw, file=paste0(dirData,"GSE50467raw.rda"))
## ---- warning = FALSE, eval=doAll---------------------------------------------
# load(paste0(dirData,"GSE50467raw.rda"))
# n.arrays = c(1:ncol(GSE50467raw))
# invisible(lapply(n.arrays,function(x) {plotMA(GSE50467raw, array=x)}))
# plotDensities(GSE50467raw,log=TRUE,legend=FALSE, main="Histograma de las muestras sin normalizar")
# boxplot(GSE50467raw$E, xlab="Muestras", main="Boxplot de las muestras sin normalizar", xaxt="n")
## ---- eval=doAll, results="hide"----------------------------------------------
# GSE50467=backgroundCorrect(GSE50467raw, method="normexp")
# GSE50467=normalizeBetweenArrays(GSE50467, method="quantile")
# save(GSE50467, file=paste0(dirData,"GSE50467.rda"))
## ---- eval=doAll--------------------------------------------------------------
# load(paste0(dirData,"GSE50467.rda"))
# setwd(dirData)
# system("wget https://www.ebi.ac.uk/arrayexpress/files/E-GEOD-50467/E-GEOD-50467.sdrf.txt")
# fenodata = read.csv("E-GEOD-50467.sdrf.txt",sep="\t",header=TRUE)
# setwd(wd)
# fenodata = fenodata[order(fenodata$Source.Name),]
# pd = new("AnnotatedDataFrame", data = fenodata)
# rownames(pd) = fenodata$Source.Name
## ----echo=FALSE, eval=doAll---------------------------------------------------
# rownames(pd)
## ---- eval=doAll--------------------------------------------------------------
# cols = c()
# colnames(GSE50467$E) = as.vector(lapply(colnames(GSE50467$E), function(x){c(cols, paste0(substr(x, 1, 10), " 1"))}))
## ---- eval=doAll--------------------------------------------------------------
# setwd(dirData)
# system("wget https://www.ebi.ac.uk/arrayexpress/files/E-GEOD-50467/E-GEOD-50467.idf.txt")
# experimentdata = read.csv("E-GEOD-50467.idf.txt",header = FALSE, sep="\t")
# setwd(wd)
# experimentdata2 = t(tidyr::unite(experimentdata, "data", 2:8, sep=" "))
# exp.names = experimentdata2[1,]
# exp.list = as.list(experimentdata2[-1,])
# names(exp.list) = exp.names
# MIAME = MIAME(name=exp.list$`Publication Author List`, lab = exp.list$`Person Address`, contact = exp.list$`Person Email`, title = exp.list$`Investigation Title`, abstract=exp.list$`Experiment Description`, url = paste0("http://dx.doi.org/",substr(exp.list$`Publication DOI`, 1,22)), pubMedIds = substr(exp.list$`Pubmed ID`, 1, 8), other = list(ExtraInfo = 'MIAME created from list with experimental data.'))
## ---- eval=doAll--------------------------------------------------------------
# MIAME
## ---- eval=doAll--------------------------------------------------------------
# rownames(annot2) = annot2[,1]
# fD = new("AnnotatedDataFrame", data = annot2)
## ---- eval=doAll--------------------------------------------------------------
# Exp.set = new("ExpressionSet",exprs=GSE50467$E,phenoData=pd,experimentData = MIAME, featureData=fD, annotation = "hgug4112a.db")
# save(Exp.set, file=paste0(wd,"/Eset50467.rda"))
## ---- warning=FALSE, eval=doAll-----------------------------------------------
# n.arrays = c(1:ncol(GSE50467))
# invisible(lapply(n.arrays,function(x) {plotMA.EList(GSE50467, array=x)}))
# plotDensities(GSE50467,log=TRUE,legend=FALSE, main="Histograma de las muestras normalizadas")
# boxplot(GSE50467$E, xlab="Muestras", main="Boxplot de las muestras normalizadas", xaxt="n")
## ---- eval=doAll, message=FALSE-----------------------------------------------
# final_annot = AnnotationDbi::select(hgug4112a.db, keys=fData(Exp.set)$PROBEID,
# column=c("ENTREZID","ENSEMBL", "SYMBOL"), keytype="PROBEID")
# fData(Exp.set) = final_annot[match(featureNames(Exp.set),final_annot$PROBEID),]
# head(fData(Exp.set))
# save(Exp.set, file=paste0(wd,"/Eset50467.rda"))
| /basic_diff.expression/enriquepresa/inst/doc/tarea1.R | no_license | eprdz/pipelines_git | R | false | false | 5,249 | r | ## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ---- echo=FALSE--------------------------------------------------------------
doAll=FALSE
## ---- eval=doAll, results="hide"----------------------------------------------
# pacman::p_load(Biobase, GEOquery, limma, tidyr, AnnotationDbi, BiocGenerics, hgug4112a.db)
## ---- eval=doAll, results="hide"----------------------------------------------
# wd = getwd()
# system("mkdir dirData")
# dirData = paste0(wd,"/dirData/")
# setwd(dirData)
# GEOquery::getGEOSuppFiles("GSE50467")
# setwd("GSE50467")
# system("tar xvf GSE50467_RAW.tar")
# system("gzip -d *.gz")
# x=limma::read.maimages(dir(".","txt"),"agilent",green.only=TRUE,other.columns="gIsWellAboveBG")
# GSE50467raw = x
# save(GSE50467raw, file=paste0(dirData,"GSE50467raw.rda"))
# setwd(wd)
## ---- eval=doAll, message=FALSE-----------------------------------------------
# load(paste0(dirData,"GSE50467raw.rda"))
# annot = AnnotationDbi::select(hgug4112a.db, keys=GSE50467raw$genes[,"ProbeName"],
# column = c("ENTREZID","ENSEMBL"), keytype="PROBEID")
# annot = annot[!is.na(annot[,"ENTREZID"]),]
# uniq_probe = match(unique(annot[,1]),annot[,1])
# annot1 = annot[uniq_probe,]
# uniq_entrez = match(unique(annot1[,2]), annot1[,2])
# annot2 = annot1[uniq_entrez,]
# annot2 = na.omit(annot2)
# GSE50467raw = GSE50467raw[match(annot2[,1],GSE50467raw$genes[,"ProbeName"]),]
# rownames(GSE50467raw) = annot2[,1]
# save(GSE50467raw, file=paste0(dirData,"GSE50467raw.rda"))
## ---- warning = FALSE, eval=doAll---------------------------------------------
# load(paste0(dirData,"GSE50467raw.rda"))
# n.arrays = c(1:ncol(GSE50467raw))
# invisible(lapply(n.arrays,function(x) {plotMA(GSE50467raw, array=x)}))
# plotDensities(GSE50467raw,log=TRUE,legend=FALSE, main="Histograma de las muestras sin normalizar")
# boxplot(GSE50467raw$E, xlab="Muestras", main="Boxplot de las muestras sin normalizar", xaxt="n")
## ---- eval=doAll, results="hide"----------------------------------------------
# GSE50467=backgroundCorrect(GSE50467raw, method="normexp")
# GSE50467=normalizeBetweenArrays(GSE50467, method="quantile")
# save(GSE50467, file=paste0(dirData,"GSE50467.rda"))
## ---- eval=doAll--------------------------------------------------------------
# load(paste0(dirData,"GSE50467.rda"))
# setwd(dirData)
# system("wget https://www.ebi.ac.uk/arrayexpress/files/E-GEOD-50467/E-GEOD-50467.sdrf.txt")
# fenodata = read.csv("E-GEOD-50467.sdrf.txt",sep="\t",header=TRUE)
# setwd(wd)
# fenodata = fenodata[order(fenodata$Source.Name),]
# pd = new("AnnotatedDataFrame", data = fenodata)
# rownames(pd) = fenodata$Source.Name
## ----echo=FALSE, eval=doAll---------------------------------------------------
# rownames(pd)
## ---- eval=doAll--------------------------------------------------------------
# cols = c()
# colnames(GSE50467$E) = as.vector(lapply(colnames(GSE50467$E), function(x){c(cols, paste0(substr(x, 1, 10), " 1"))}))
## ---- eval=doAll--------------------------------------------------------------
# setwd(dirData)
# system("wget https://www.ebi.ac.uk/arrayexpress/files/E-GEOD-50467/E-GEOD-50467.idf.txt")
# experimentdata = read.csv("E-GEOD-50467.idf.txt",header = FALSE, sep="\t")
# setwd(wd)
# experimentdata2 = t(tidyr::unite(experimentdata, "data", 2:8, sep=" "))
# exp.names = experimentdata2[1,]
# exp.list = as.list(experimentdata2[-1,])
# names(exp.list) = exp.names
# MIAME = MIAME(name=exp.list$`Publication Author List`, lab = exp.list$`Person Address`, contact = exp.list$`Person Email`, title = exp.list$`Investigation Title`, abstract=exp.list$`Experiment Description`, url = paste0("http://dx.doi.org/",substr(exp.list$`Publication DOI`, 1,22)), pubMedIds = substr(exp.list$`Pubmed ID`, 1, 8), other = list(ExtraInfo = 'MIAME created from list with experimental data.'))
## ---- eval=doAll--------------------------------------------------------------
# MIAME
## ---- eval=doAll--------------------------------------------------------------
# rownames(annot2) = annot2[,1]
# fD = new("AnnotatedDataFrame", data = annot2)
## ---- eval=doAll--------------------------------------------------------------
# Exp.set = new("ExpressionSet",exprs=GSE50467$E,phenoData=pd,experimentData = MIAME, featureData=fD, annotation = "hgug4112a.db")
# save(Exp.set, file=paste0(wd,"/Eset50467.rda"))
## ---- warning=FALSE, eval=doAll-----------------------------------------------
# n.arrays = c(1:ncol(GSE50467))
# invisible(lapply(n.arrays,function(x) {plotMA.EList(GSE50467, array=x)}))
# plotDensities(GSE50467,log=TRUE,legend=FALSE, main="Histograma de las muestras normalizadas")
# boxplot(GSE50467$E, xlab="Muestras", main="Boxplot de las muestras normalizadas", xaxt="n")
## ---- eval=doAll, message=FALSE-----------------------------------------------
# final_annot = AnnotationDbi::select(hgug4112a.db, keys=fData(Exp.set)$PROBEID,
# column=c("ENTREZID","ENSEMBL", "SYMBOL"), keytype="PROBEID")
# fData(Exp.set) = final_annot[match(featureNames(Exp.set),final_annot$PROBEID),]
# head(fData(Exp.set))
# save(Exp.set, file=paste0(wd,"/Eset50467.rda"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{stCheckSTcovars}
\alias{stCheckSTcovars}
\title{Check an Array/List of Spatio-Temporal Covariates}
\usage{
stCheckSTcovars(ST, ID.unique = character(0), date.unique = integer(0))
}
\arguments{
\item{ST}{A 3D-\code{array} containing the ST-covariates, or a
\code{list} of \code{array}:s, the list elements have to be of matching
sizes and have the same \code{rownames} and \code{colnames}; list elemets
are stacked to form a 3D-array.}
\item{ID.unique}{vector with unique IDs that HAVE to be present in the
ST-covariates, typically the observation locations and un-observation
locations for predictions}
\item{date.unique}{vector with unique dates/times that HAVE to be
present in the ST-covariates, typically the observation time-points.}
}
\value{
Updated \code{ST} array
}
\description{
Checks that array/list of spatio-temporal covariates is valid, making sure
that at least all locations specified in \code{ID.unique} exist. The function will
attempt to name extract locations ID's from \code{colnames(ST)} and
observation dates from \code{rownames(ST)} (using
\code{\link{convertCharToDate}}).
}
\examples{
##load data
data(mesa.model)
##check covariates
tmp <- stCheckSTcovars( mesa.model$ST.all, mesa.model$locations$ID )
str(tmp)
##require non-existant site
try( stCheckSTcovars( mesa.model$ST.all, "Bad.Site" ) )
##require non-existant site
try( stCheckSTcovars( mesa.model$ST.all, date.unique=1 ) )
}
\seealso{
Other object checking utilities: \code{\link{stCheckClass}},
\code{\link{stCheckCovars}}, \code{\link{stCheckFields}},
\code{\link{stCheckObs}}
}
\author{
Johan Lindstrom
}
| /man/stCheckSTcovars.Rd | no_license | victoriaknutson/SpatioTemporal | R | false | true | 1,712 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{stCheckSTcovars}
\alias{stCheckSTcovars}
\title{Check an Array/List of Spatio-Temporal Covariates}
\usage{
stCheckSTcovars(ST, ID.unique = character(0), date.unique = integer(0))
}
\arguments{
\item{ST}{A 3D-\code{array} containing the ST-covariates, or a
\code{list} of \code{array}:s, the list elements have to be of matching
sizes and have the same \code{rownames} and \code{colnames}; list elemets
are stacked to form a 3D-array.}
\item{ID.unique}{vector with unique IDs that HAVE to be present in the
ST-covariates, typically the observation locations and un-observation
locations for predictions}
\item{date.unique}{vector with unique dates/times that HAVE to be
present in the ST-covariates, typically the observation time-points.}
}
\value{
Updated \code{ST} array
}
\description{
Checks that array/list of spatio-temporal covariates is valid, making sure
that at least all locations specified in \code{ID.unique} exist. The function will
attempt to name extract locations ID's from \code{colnames(ST)} and
observation dates from \code{rownames(ST)} (using
\code{\link{convertCharToDate}}).
}
\examples{
##load data
data(mesa.model)
##check covariates
tmp <- stCheckSTcovars( mesa.model$ST.all, mesa.model$locations$ID )
str(tmp)
##require non-existant site
try( stCheckSTcovars( mesa.model$ST.all, "Bad.Site" ) )
##require non-existant site
try( stCheckSTcovars( mesa.model$ST.all, date.unique=1 ) )
}
\seealso{
Other object checking utilities: \code{\link{stCheckClass}},
\code{\link{stCheckCovars}}, \code{\link{stCheckFields}},
\code{\link{stCheckObs}}
}
\author{
Johan Lindstrom
}
|
library(bfp)
### Name: rmvt
### Title: Multivariate Student Random Deviates
### Aliases: rmvt
### Keywords: distribution internal
### ** Examples
## samples from the multivariate Cauchy distribution:
bfp:::rmvt(20)
## here the covariance exists:
sigma <- matrix(c(1, 0.5, 0.5, 1), nrow=2)
df <- 10
## theoretical covariance:
sigma * df / (df - 2)
## this should be close:
cov(bfp:::rmvt(n=100000, sigma=sigma, df=df))
| /data/genthat_extracted_code/bfp/examples/rmvt.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 428 | r | library(bfp)
### Name: rmvt
### Title: Multivariate Student Random Deviates
### Aliases: rmvt
### Keywords: distribution internal
### ** Examples
## samples from the multivariate Cauchy distribution:
bfp:::rmvt(20)
## here the covariance exists:
sigma <- matrix(c(1, 0.5, 0.5, 1), nrow=2)
df <- 10
## theoretical covariance:
sigma * df / (df - 2)
## this should be close:
cov(bfp:::rmvt(n=100000, sigma=sigma, df=df))
|
check_latest_version <- function(DEBUG=FALSE) {
fname = paste0(".tmp_GUIDEversion_", format(Sys.time(), "%Y-%m-%d"))
# Check if website has already been visited today
if (!file.exists(fname)) {
# download html
html <- getURL("http://www.stat.wisc.edu/~loh/guide.html", followlocation = TRUE)
# parse html
doc = htmlParse(html, asText=TRUE)
plain.text <- xpathSApply(doc, "//div[@id='content']/h1[@align='center']/strong", xmlValue)
# Get version number
ver <- gsub("version ", "",
regmatches(
paste(plain.text),
gregexpr("(?<=\\().*?(?=\\))",
paste(plain.text),
perl=T))[[1]])
cat(ver, file = fname, append = FALSE)
if (DEBUG) paste0("[DEBUG] Get version from website: ", ver, " (save in ", fname, ")")
} else {
other_files <- list.files(".", pattern=".tmp_GUIDEversion_", all.files = TRUE)[which(list.files(".", pattern=".tmp_GUIDEversion_", all.files = TRUE) != fname)]
if (length(other_files) != 0) file.remove(other_files)
ver <- readLines(fname,n=1)
if (DEBUG) paste0("[DEBUG] Get version from saved file (", fname,"): ", ver)
}
return(ver)
}
| /R/check_latest_version.R | no_license | jhilaire/guidr | R | false | false | 1,230 | r | check_latest_version <- function(DEBUG=FALSE) {
fname = paste0(".tmp_GUIDEversion_", format(Sys.time(), "%Y-%m-%d"))
# Check if website has already been visited today
if (!file.exists(fname)) {
# download html
html <- getURL("http://www.stat.wisc.edu/~loh/guide.html", followlocation = TRUE)
# parse html
doc = htmlParse(html, asText=TRUE)
plain.text <- xpathSApply(doc, "//div[@id='content']/h1[@align='center']/strong", xmlValue)
# Get version number
ver <- gsub("version ", "",
regmatches(
paste(plain.text),
gregexpr("(?<=\\().*?(?=\\))",
paste(plain.text),
perl=T))[[1]])
cat(ver, file = fname, append = FALSE)
if (DEBUG) paste0("[DEBUG] Get version from website: ", ver, " (save in ", fname, ")")
} else {
other_files <- list.files(".", pattern=".tmp_GUIDEversion_", all.files = TRUE)[which(list.files(".", pattern=".tmp_GUIDEversion_", all.files = TRUE) != fname)]
if (length(other_files) != 0) file.remove(other_files)
ver <- readLines(fname,n=1)
if (DEBUG) paste0("[DEBUG] Get version from saved file (", fname,"): ", ver)
}
return(ver)
}
|
## File Name: mice_imputation_weighted_norm_prepare.R
## File Version: 0.112
mice_imputation_weighted_norm_prepare <- function(x, ry, y, imputationWeights,
interactions, quadratics, pls.facs, pls.impMethod, ... )
{
x <- cbind(1, as.matrix(x))
xobs <- x[ry,]
yobs <- y[ry]
if ( is.null( imputationWeights ) ){
imputationWeights <- rep(1, length(y) )
}
weights.obs <- imputationWeights[ ry ]
# standardize all weights to one
weights.obs <- length(weights.obs) * weights.obs / sum( weights.obs )
# PLS interactions and quadratics
res <- mice_imputation_get_states(pos=parent.frame(n=2) )
newstate <- res$newstate
vname <- res$vname
plsout <- mice_imputation_pls_helper( newstate=newstate, vname=vname,
pls.impMethod=pls.impMethod,
x=x[,-1], y=y, ry=ry, imputationWeights=imputationWeights,
interactions=interactions, quadratics=quadratics, pls.facs=pls.facs,
... )
pls.facs <- plsout$pls.facs
yimp <- plsout$yimp
#---- output
res <- list( yimp=yimp, pls.facs=pls.facs, yobs=yobs, xobs=xobs,
weights.obs=weights.obs, x=x)
return(res)
}
| /R/mice_imputation_weighted_norm_prepare.R | no_license | cran/miceadds | R | false | false | 1,252 | r | ## File Name: mice_imputation_weighted_norm_prepare.R
## File Version: 0.112
mice_imputation_weighted_norm_prepare <- function(x, ry, y, imputationWeights,
interactions, quadratics, pls.facs, pls.impMethod, ... )
{
x <- cbind(1, as.matrix(x))
xobs <- x[ry,]
yobs <- y[ry]
if ( is.null( imputationWeights ) ){
imputationWeights <- rep(1, length(y) )
}
weights.obs <- imputationWeights[ ry ]
# standardize all weights to one
weights.obs <- length(weights.obs) * weights.obs / sum( weights.obs )
# PLS interactions and quadratics
res <- mice_imputation_get_states(pos=parent.frame(n=2) )
newstate <- res$newstate
vname <- res$vname
plsout <- mice_imputation_pls_helper( newstate=newstate, vname=vname,
pls.impMethod=pls.impMethod,
x=x[,-1], y=y, ry=ry, imputationWeights=imputationWeights,
interactions=interactions, quadratics=quadratics, pls.facs=pls.facs,
... )
pls.facs <- plsout$pls.facs
yimp <- plsout$yimp
#---- output
res <- list( yimp=yimp, pls.facs=pls.facs, yobs=yobs, xobs=xobs,
weights.obs=weights.obs, x=x)
return(res)
}
|
testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615847371-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 785 | r | testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ), relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
##### The movielens project
##### Create EdX set, validation set. This first part is taken from HarvardX EdX website.
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cral.us.r-project.org")
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId],
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1, sample.kind = "Rounding")
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
## ----Creating edx_work and edx_cv------------------------------------------------------------------------
test_index2 <- createDataPartition(y = edx$rating, times = 1, p = 0.1, list = FALSE)
edx_work <- edx[-test_index2,]
temp <- edx[test_index2,]
edx_cv <- temp %>%
semi_join(edx_work, by = "movieId") %>%
semi_join(edx_work, by = "userId")
# To ensure all movies and users present in edx_cv are also present in edx_work.
removed <- anti_join(temp, edx_cv)
edx_work <- rbind(edx_work, removed)
rm(test_index2, temp)
# Which movie has the highest number of ratings?
edx %>% group_by(title) %>% summarise(count = n()) %>% arrange(desc(count)) %>%
head(., 10) %>% knitr::kable()
# What is the ranking order of movie ratings.
edx %>% group_by(rating) %>% summarise(count = n()) %>% arrange(desc(count)) %>%
knitr::kable()
# Lineplot of the frequency of each rating.
edx %>%
group_by(rating) %>%
summarize(count = n()) %>%
ggplot(aes(x = rating, y = count)) +
ggtitle("Line plot of total ratings counts") +
geom_line()
# Here I define the RMSE function that I will use in some of my assessments of predictive models below.
RMSE <- function(true_ratings, predicted_ratings){
sqrt(mean((true_ratings - predicted_ratings)^2))
}
########################################################
##### Now picking up on Section 6 of the Machine Learning module of the course, I will start to process
# the edx and validation datasets generated by the instructions given for this project.
# The following libraries will be relevant to my work here.
library(dslabs)
library(tidyverse)
library(dplyr)
library(caret)
##### Most basic model where I assume all ratings are equal to the over all mean of all ratings in edx.
mu_hat <- as.data.frame(mean(edx_work$rating))
rownames(mu_hat) <- c("mu_hat")
mu_hat %>% knitr::kable()
naive_rmse <- RMSE(edx_cv$rating, mu_hat$`mean(edx_work$rating)`)
rmse_results <- data_frame(method = "Just the average", RMSE = naive_rmse)
### "Just the average yields an RMSE of 1.06. Not bad but it should be easy to improve on this.
### Trying to factor in movie bias.
mu <- mean(edx_work$rating)
movie_avgs <- edx_work %>%
group_by(movieId) %>%
summarise(b_i = mean(rating - mu))
head(movie_avgs, 10) %>% knitr::kable()
# Visualising the frequency distribution of movie residuals.
movie_avgs %>% qplot(b_i, geom ="histogram", bins = 10, data = ., color = I("black"),
main = "Frequency distribution of movie residuals")
## ----Algorithm 2-----------------------------------------------------------------------------------------
# Calculating the new predictions given the b_i values for each row in edx_cv dataset and adding it to
# mu.
predicted_ratings_2 <- mu + edx_cv %>%
left_join(movie_avgs, by='movieId') %>%
.$b_i
model_2_rmse <- RMSE(predicted_ratings_2, edx_cv$rating)
rmse_results <- bind_rows(rmse_results,
data_frame(method = "Movie Effect Model",
RMSE = model_2_rmse))
## ----Algorithm 3-----------------------------------------------------------------------------------------
### Now I am looking at the influence of the user in context of the movie effect (i.e. cumulative effect).
user_avgs <- edx_work %>%
left_join(movie_avgs, by='movieId') %>%
group_by(userId) %>%
summarise(b_u = mean(rating - mu))
predicted_ratings_3 <- edx_cv %>%
left_join(movie_avgs, by = "movieId") %>%
left_join(user_avgs, by = "userId") %>%
mutate(pred = mu + b_i + b_u) %>%
.$pred
model_3_rmse <- RMSE(predicted_ratings_3, edx_cv$rating)
rmse_results <- bind_rows(rmse_results,
data_frame(method = "User and Movie Effect Model",
RMSE = model_3_rmse))
# Below is an effor to apply regularisation across the dataset. Modifying ratings that have only a few ratings
# vs many.
### Looking at top ten movies, and bottom 10 movies in edx.
movie_titles <- edx_work %>%
select(movieId, title) %>%
distinct()
# Top 10
movie_avgs %>% left_join(movie_titles, by="movieId") %>%
arrange(desc(b_i)) %>%
select(title, b_i) %>%
slice(1:10) %>%
knitr::kable()
# Bottom 10
movie_avgs %>% left_join(movie_titles, by="movieId") %>%
arrange(b_i) %>%
select(title, b_i) %>%
slice(1:10) %>%
knitr::kable()
### Now looking at the top 10 movie residual values in edx plus the number of ratings for each movie.
# It is clear the many of the movies with largest residuals have very few ratings.
edx_work %>% count(movieId) %>%
left_join(movie_avgs)%>%
left_join(movie_titles, by = "movieId") %>%
arrange(desc(b_i)) %>%
select(title, b_i, n) %>%
slice(1:10) %>%
knitr::kable()
### Application of the regularisation term.
## ----Lambda of 3
lambda <- 3
mu <- mean(edx_work$rating)
movie_reg_avgs <- edx_work %>%
group_by(movieId) %>%
summarise(b_i = sum(rating - mu)/(n() + lambda), n_i = n())
head(movie_reg_avgs, 10) %>% knitr::kable()
# Visualising the difference in b_i values from the original movie_avgs vs the modified movie_reg_avgs.
data_frame(original = movie_avgs$b_i,
regularized = movie_reg_avgs$b_i,
n = movie_reg_avgs$n_i) %>%
ggplot(aes(original, regularized, size=sqrt(n))) +
geom_point(shape=1, alpha=0.5)
### Lets look at top 10 using these regularised movie estimates. Now we are seeing more sensible movies
# start to appear, with understandably lower b_i values.
edx_work %>% count(movieId) %>%
left_join(movie_reg_avgs)%>%
left_join(movie_titles, by = "movieId") %>%
arrange(desc(b_i)) %>%
select(title, b_i, n) %>%
slice(1:10) %>%
knitr::kable()
## ----Algorithm 4-----------------------------------------------------------------------------------------
# Perform prediction on regularised movie b_i.
predicted_ratings_4 <- edx_cv %>%
left_join(movie_reg_avgs, by = 'movieId') %>%
mutate(pred = mu + b_i) %>%
.$pred
model_4_rmse <- RMSE(predicted_ratings_4, edx_cv$rating)
rmse_results <- bind_rows(rmse_results,
data_frame(method = "Regularised movie effect",
RMSE = model_4_rmse))
### Now I need to apply the same calculation for regularised user effects.
user_reg_avgs <- edx_work %>%
group_by(userId) %>%
summarise(b_u = sum(rating - mu)/(n() + lambda), n_u = n())
# Visualising this.
data_frame(original = user_avgs$b_u,
regularized = user_reg_avgs$b_u,
n = user_reg_avgs$n_u) %>%
ggplot(aes(original, regularized, size=sqrt(n))) +
geom_point(shape=1, alpha=0.5)
# Apply regularised user b to prediction calculation.
## ----Algorithm 5-----------------------------------------------------------------------------------------
predicted_ratings_5 <- edx_cv %>%
left_join(user_reg_avgs, by = "userId") %>%
mutate(pred = mu + b_u) %>%
.$pred
model_5_rmse <- RMSE(predicted_ratings_5, edx_cv$rating)
rmse_results <- bind_rows(rmse_results,
data_frame(method = "Regularised user effect",
RMSE = model_5_rmse))
## ----Algorithm 6-----------------------------------------------------------------------------------------
# Apply regularised user b to whole prediction calculation, including reg movie effect.
predicted_ratings_6 <- edx_cv %>%
left_join(movie_reg_avgs, by = 'movieId') %>%
left_join(user_reg_avgs, by = "userId") %>%
mutate(pred = mu + b_i + b_u) %>%
.$pred
model_6_rmse <- RMSE(predicted_ratings_6, edx_cv$rating)
rmse_results <- bind_rows(rmse_results,
data_frame(method = "Reg movie and user effect",
RMSE = model_6_rmse))
# This yields an RMSE of 0.8844 which is already pretty good.
# Note that lambda is a parameter. We can use cross validation to tune it.
lambda <- seq(0, 10, 0.25)
## --------------------------------------------------------------------------------------------------------
# Running the refinement of best lambda value.
set.seed(1, sample.kind = "Rounding")
rmses <- sapply(lambda, function(l){
mu <- mean(edx_work$rating)
b_i <- edx_work %>%
group_by(movieId) %>%
summarise(b_i = sum(rating - mu)/(n()+l))
b_u <- edx_work %>%
left_join(b_i, by = "movieId") %>%
group_by(userId) %>%
summarise(b_u = sum(rating - b_i - mu)/(n()+l))
predicted_ratings<- edx_cv %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = mu + b_i + b_u) %>%
.$pred
return(RMSE(predicted_ratings, edx_cv$rating))
})
plot(lambda, rmses, main = "Tuning Lambda")
## ----echo=F----------------------------------------------------------------------------------------------
print("The optimal Lambda")
lambda[which.min(rmses)]
### The best lambda value is 4.5.
### Applying this value to the formal prediction.
## ----Algorithm 7-----------------------------------------------------------------------------------------
b_i <- edx_work %>%
group_by(movieId) %>%
summarise(b_i = sum(rating - mu)/(n()+4.5))
b_u <- edx_work %>%
left_join(b_i, by = "movieId") %>%
group_by(userId) %>%
summarise(b_u = sum(rating - b_i - mu)/(n()+4.5))
predicted_ratings_7 <- edx_cv %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = mu + b_i + b_u) %>%
.$pred
model_7_rmse <- RMSE(predicted_ratings_7, edx_cv$rating)
rmse_results <- bind_rows(rmse_results,
data_frame(method = "Reg mov & usr with 4.5 l",
RMSE = model_7_rmse))
# Some tidying up.
rm(edx, predicted_ratings_2, predicted_ratings_3, predicted_ratings_4, predicted_ratings_5, predicted_ratings_6, predicted_ratings_7)
## --------------------------------------------------------------------------------------------------------
### Principal componenent analysis.
# I will apply a pca to a small subset of EDX.
# Focusing in on the most rated movies (>1000 ratings), and most prolific users (>500 ratings).
edx_small <- edx_work %>%
group_by(movieId) %>%
filter(n() >= 1000) %>% ungroup() %>%
group_by(userId) %>%
filter(n() >= 500) %>% ungroup()
### Now I need to make a matrix out of userId's and movieId's showing corresponding ratings.
y <- edx_small %>%
select(userId, movieId, rating) %>%
spread(movieId, rating) %>%
as.matrix()
y[1:10, 1:5]
rm(edx_small, edx_work)
# Making column 1 the rownames. Then remove column 1.
rownames(y) <- y[,1]
y <- y[,-1]
tmp <- y
colnames(tmp) <- with(movie_titles, title[match(colnames(tmp), movieId)])
y[1:10, 1:5]
## ----Preparation for PCA---------------------------------------------------------------------------------
### Preparing the matrix for PCA.
y <- sweep(y, 1, rowMeans(y, na.rm = TRUE))
y <- sweep(y, 2, colMeans(y, na.rm = TRUE))
y[is.na(y)] <- 0
y <- sweep(y, 1, rowMeans(y))
## ----PCA-------------------------------------------------------------------------------------------------
pca <- prcomp(y, center = F, scale. = F, retx = T)
# Visualising the PCA.
plot(pca$sdev, main = "PCA standard deviation")
## --------------------------------------------------------------------------------------------------------
var_explained <- cumsum(pca$sdev^2/sum(pca$sdev^2))
plot(var_explained, main = "PCA variance by PC index")
# Certainly most of the variation is explained by 500 principal components. It is impractical for me to
# apply all of these using the method I implement below. I will begin by applying a few PC's at a time.
# Some data visualisation. Looking at the plot of PC1 vs PC2 to get a feel for the sort of relationships
# that exist between movies.
library(ggrepel)
pcs <- data.frame(pca$rotation, name = colnames(tmp))
pcs %>% ggplot(aes(PC1, PC2)) + geom_point() +
geom_text_repel(aes(PC1, PC2, label=name),
data = filter(pcs,
PC1 < -0.1 | PC1 > 0.1 | PC2 < -0.075 | PC2 > 0.1))
## ----echo=F----------------------------------------------------------------------------------------------
pcs %>% select(name, PC1) %>% arrange(PC1) %>% slice(1:10) %>%
knitr::kable()
## ----echo=F----------------------------------------------------------------------------------------------
pcs %>% select(name, PC1) %>% arrange(desc(PC1)) %>% slice(1:10) %>%
knitr::kable()
rm(tmp)
##### I need to take the x and rotation matrices from pca and expand them to match the dimensions of
# the edx_cv matrix. That is unique userId (68052) by unique movieId (9728).
# These expanded matrices will be sparse matrices.
### Now I need the actual ratings from the edx_cv data set in a 68052 by 9728 matrix.
## ----edx_cv to matrix------------------------------------------------------------------------------------
val_ratings.m <- edx_cv %>%
select(userId, movieId, rating) %>%
spread(movieId, rating) %>%
as.matrix()
# Fixing the rownames of edx_cv matrix and removing first column that features userId
rownames(val_ratings.m) <- val_ratings.m[,1]
val_ratings.m <- val_ratings.m[,-1]
colnames(val_ratings.m) <- with(movie_titles, title[match(colnames(val_ratings.m), movieId)])
val_ratings.m[1:10, 5:7]
### I need the lists of all userId's and movieId's in the edx_cv to use as a reference in making my
# expanded pca sparse matrices.
## --------------------------------------------------------------------------------------------------------
unique_usr_val <- as.matrix(unique(edx_cv$userId))
colnames(unique_usr_val) <- c("userId")
## --------------------------------------------------------------------------------------------------------
unique_mov_val <- as.matrix(unique(edx_cv$movieId))
colnames(unique_mov_val) <- c("movieId")
# Creating the "User effect" sparse matrix.
pca_x <- pca$x %>% as.data.frame() %>%
tibble::rownames_to_column(., "userId") %>%
merge(unique_usr_val, ., by = "userId", all = TRUE)
pca_x <- as.matrix(pca_x[,-1])
pca_x[is.na(pca_x)] <- 0
# Creating the "Principal component" sparse matrix.
pca_rotation <- pca$rotation %>% as.data.frame() %>%
tibble::rownames_to_column(., "movieId") %>%
merge(unique_mov_val, ., by = "movieId", all = TRUE)
pca_rotation <- as.matrix(pca_rotation[,-1])
pca_rotation[is.na(pca_rotation)] <- 0
#### Now to build a large matrix of predictions from where I left off after "Reg mov and usr with 4.5".
# This will have the dimensions that are consistent with unique userId/movidId values from edx_cv.
predictions <- edx_cv %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = mu + b_i + b_u)
## ----predictions to matrix-------------------------------------------------------------------------------
y3 <- predictions %>%
select(userId, movieId, pred) %>%
spread(movieId, pred) %>%
as.matrix()
rownames(y3) <- y3[,1]
y3 <- y3[,-1]
# Clean up.
rm(predictions)
# Defining my p and q vectors
# Vectors p are called user effects. Vectors q are called prinipal components.
# NOTE... both p1 and q1 are converted to matrices, with p1 being 68052 X 1, and q1 1 X 9728
p1 <- as.matrix(pca_x[,1])
q1 <- matrix(pca_rotation[,1], nrow = 1, byrow = T)
p2 <- as.matrix(pca_x[,2])
q2 <- matrix(pca_rotation[,2], nrow = 1, byrow = T)
p3 <- as.matrix(pca_x[,3])
q3 <- matrix(pca_rotation[,3], nrow = 1, byrow = T)
p4 <- as.matrix(pca_x[,4])
q4 <- matrix(pca_rotation[,4], nrow = 1, byrow = T)
p5 <- as.matrix(pca_x[,5])
q5 <- matrix(pca_rotation[,5], nrow = 1, byrow = T)
p6 <- as.matrix(pca_x[,6])
q6 <- matrix(pca_rotation[,6], nrow = 1, byrow = T)
p7 <- as.matrix(pca_x[,7])
q7 <- matrix(pca_rotation[,7], nrow = 1, byrow = T)
p8 <- as.matrix(pca_x[,8])
q8 <- matrix(pca_rotation[,8], nrow = 1, byrow = T)
p9 <- as.matrix(pca_x[,9])
q9 <- matrix(pca_rotation[,9], nrow = 1, byrow = T)
p10 <- as.matrix(pca_x[,10])
q10 <- matrix(pca_rotation[,10], nrow = 1, byrow = T)
#### Prediction algorithm #9: Reg plus PC1 to PC10
new_pred_10 <- y3 + (p1%*%q1) + (p2%*%q2) + (p3%*%q3) + (p4%*%q4) + (p5%*%q5) + (p6%*%q6) +
(p7%*%q7) + (p8%*%q8) + (p9%*%q9) + (p10%*%q10)
rmse_PC1_to_10 <- sqrt(mean((val_ratings.m - new_pred_10)^2, na.rm = TRUE))
rmse_results <- bind_rows(rmse_results,
data_frame(method = "Reg plus PC1 to PC10",
RMSE = rmse_PC1_to_10))
# Summary table of all RMSE results for prediction algorithms used to date.
rmse_results %>% knitr::kable()
### FINAL Prediction Algorithm
# As a final model I am taking that last algorithm (Reg plus PC1 to PC10) which is a
# culmination of all preceding algorithms. A final assessment for the algorithm is to
# test it against the validation testing dataset that has been kept aside during the
# course of this work.
# However as the last PCA section is based on matrix operations that must match dimensions
# perfectly, some modifications are necessary to test it against the validation dataset.
# First the validation dataset has to be put into matrix form to make it compatible with
# the PCA work.
## ----Validation to matrix--------------------------------------------------------------------------------
validation.m <- validation %>%
select(userId, movieId, rating) %>%
spread(movieId, rating) %>%
as.matrix()
rownames(validation.m) <- validation.m[,1]
validation.m <- validation.m[,-1]
colnames(validation.m) <- with(movie_titles, title[match(colnames(validation.m), movieId)])
## --------------------------------------------------------------------------------------------------------
unique_usr_val.f <- as.matrix(unique(validation$userId))
colnames(unique_usr_val.f) <- c("userId")
## --------------------------------------------------------------------------------------------------------
unique_mov_val.f <- as.matrix(unique(validation$movieId))
colnames(unique_mov_val.f) <- c("movieId")
## ----pca$x sparse matrix---------------------------------------------------------------------------------
pca_x.f <- pca$x %>% as.data.frame() %>%
tibble::rownames_to_column(., "userId") %>%
merge(unique_usr_val.f, ., by = "userId", all = TRUE)
pca_x.f <- as.matrix(pca_x.f[,-1])
pca_x.f[is.na(pca_x.f)] <- 0
## ----pca$rotation sparse matrix--------------------------------------------------------------------------
pca_rotation.f <- pca$rotation %>% as.data.frame() %>%
tibble::rownames_to_column(., "movieId") %>%
merge(unique_mov_val.f, ., by = "movieId", all = TRUE)
pca_rotation.f <- as.matrix(pca_rotation.f[,-1])
pca_rotation.f[is.na(pca_rotation.f)] <- 0
## ----algorithm 7 predictions using validation------------------------------------------------------------
predictions.f <- validation %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = mu + b_i + b_u)
## ----predictions.f to matrix-----------------------------------------------------------------------------
y3.f <- predictions.f %>%
select(userId, movieId, pred) %>%
spread(movieId, pred) %>%
as.matrix()
rownames(y3.f) <- y3.f[,1]
y3.f <- y3.f[,-1]
## ----redefining p and q----------------------------------------------------------------------------------
p1 <- as.matrix(pca_x.f[,1])
q1 <- matrix(pca_rotation.f[,1], nrow = 1, byrow = T)
p2 <- as.matrix(pca_x.f[,2])
q2 <- matrix(pca_rotation.f[,2], nrow = 1, byrow = T)
p3 <- as.matrix(pca_x.f[,3])
q3 <- matrix(pca_rotation.f[,3], nrow = 1, byrow = T)
p4 <- as.matrix(pca_x.f[,4])
q4 <- matrix(pca_rotation.f[,4], nrow = 1, byrow = T)
p5 <- as.matrix(pca_x.f[,5])
q5 <- matrix(pca_rotation.f[,5], nrow = 1, byrow = T)
p6 <- as.matrix(pca_x.f[,6])
q6 <- matrix(pca_rotation.f[,6], nrow = 1, byrow = T)
p7 <- as.matrix(pca_x.f[,7])
q7 <- matrix(pca_rotation.f[,7], nrow = 1, byrow = T)
p8 <- as.matrix(pca_x.f[,8])
q8 <- matrix(pca_rotation.f[,8], nrow = 1, byrow = T)
p9 <- as.matrix(pca_x.f[,9])
q9 <- matrix(pca_rotation.f[,9], nrow = 1, byrow = T)
p10 <- as.matrix(pca_x.f[,10])
q10 <- matrix(pca_rotation.f[,10], nrow = 1, byrow = T)
## ----Final algorithm-------------------------------------------------------------------------------------
new_pred_10.f <- y3.f + (p1%*%q1) + (p2%*%q2) + (p3%*%q3) + (p4%*%q4) + (p5%*%q5) + (p6%*%q6) +
(p7%*%q7) + (p8%*%q8) + (p9%*%q9) + (p10%*%q10)
# Reporting RMSE value of final prediction algorithm
rmse_final <- sqrt(mean((validation.m - new_pred_10.f)^2, na.rm = TRUE))
print("Final Algorithm RMSE")
rmse_final
| /200328_Capstone_sub.R | no_license | jwschmidberger/MovieLens_capstone | R | false | false | 22,239 | r | ##### The movielens project
##### Create EdX set, validation set. This first part is taken from HarvardX EdX website.
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cral.us.r-project.org")
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId],
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1, sample.kind = "Rounding")
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
## ----Creating edx_work and edx_cv------------------------------------------------------------------------
test_index2 <- createDataPartition(y = edx$rating, times = 1, p = 0.1, list = FALSE)
edx_work <- edx[-test_index2,]
temp <- edx[test_index2,]
edx_cv <- temp %>%
semi_join(edx_work, by = "movieId") %>%
semi_join(edx_work, by = "userId")
# To ensure all movies and users present in edx_cv are also present in edx_work.
removed <- anti_join(temp, edx_cv)
edx_work <- rbind(edx_work, removed)
rm(test_index2, temp)
# Which movie has the highest number of ratings?
edx %>% group_by(title) %>% summarise(count = n()) %>% arrange(desc(count)) %>%
head(., 10) %>% knitr::kable()
# What is the ranking order of movie ratings.
edx %>% group_by(rating) %>% summarise(count = n()) %>% arrange(desc(count)) %>%
knitr::kable()
# Lineplot of the frequency of each rating.
edx %>%
group_by(rating) %>%
summarize(count = n()) %>%
ggplot(aes(x = rating, y = count)) +
ggtitle("Line plot of total ratings counts") +
geom_line()
# Here I define the RMSE function that I will use in some of my assessments of predictive models below.
RMSE <- function(true_ratings, predicted_ratings){
sqrt(mean((true_ratings - predicted_ratings)^2))
}
########################################################
##### Now picking up on Section 6 of the Machine Learning module of the course, I will start to process
# the edx and validation datasets generated by the instructions given for this project.
# The following libraries will be relevant to my work here.
library(dslabs)
library(tidyverse)
library(dplyr)
library(caret)
##### Most basic model where I assume all ratings are equal to the over all mean of all ratings in edx.
mu_hat <- as.data.frame(mean(edx_work$rating))
rownames(mu_hat) <- c("mu_hat")
mu_hat %>% knitr::kable()
naive_rmse <- RMSE(edx_cv$rating, mu_hat$`mean(edx_work$rating)`)
rmse_results <- data_frame(method = "Just the average", RMSE = naive_rmse)
### "Just the average yields an RMSE of 1.06. Not bad but it should be easy to improve on this.
### Trying to factor in movie bias.
mu <- mean(edx_work$rating)
movie_avgs <- edx_work %>%
group_by(movieId) %>%
summarise(b_i = mean(rating - mu))
head(movie_avgs, 10) %>% knitr::kable()
# Visualising the frequency distribution of movie residuals.
movie_avgs %>% qplot(b_i, geom ="histogram", bins = 10, data = ., color = I("black"),
main = "Frequency distribution of movie residuals")
## ----Algorithm 2-----------------------------------------------------------------------------------------
# Calculating the new predictions given the b_i values for each row in edx_cv dataset and adding it to
# mu.
predicted_ratings_2 <- mu + edx_cv %>%
left_join(movie_avgs, by='movieId') %>%
.$b_i
model_2_rmse <- RMSE(predicted_ratings_2, edx_cv$rating)
rmse_results <- bind_rows(rmse_results,
data_frame(method = "Movie Effect Model",
RMSE = model_2_rmse))
## ----Algorithm 3-----------------------------------------------------------------------------------------
### Now I am looking at the influence of the user in context of the movie effect (i.e. cumulative effect).
user_avgs <- edx_work %>%
left_join(movie_avgs, by='movieId') %>%
group_by(userId) %>%
summarise(b_u = mean(rating - mu))
predicted_ratings_3 <- edx_cv %>%
left_join(movie_avgs, by = "movieId") %>%
left_join(user_avgs, by = "userId") %>%
mutate(pred = mu + b_i + b_u) %>%
.$pred
model_3_rmse <- RMSE(predicted_ratings_3, edx_cv$rating)
rmse_results <- bind_rows(rmse_results,
data_frame(method = "User and Movie Effect Model",
RMSE = model_3_rmse))
# Below is an effor to apply regularisation across the dataset. Modifying ratings that have only a few ratings
# vs many.
### Looking at top ten movies, and bottom 10 movies in edx.
movie_titles <- edx_work %>%
select(movieId, title) %>%
distinct()
# Top 10
movie_avgs %>% left_join(movie_titles, by="movieId") %>%
arrange(desc(b_i)) %>%
select(title, b_i) %>%
slice(1:10) %>%
knitr::kable()
# Bottom 10
movie_avgs %>% left_join(movie_titles, by="movieId") %>%
arrange(b_i) %>%
select(title, b_i) %>%
slice(1:10) %>%
knitr::kable()
### Now looking at the top 10 movie residual values in edx plus the number of ratings for each movie.
# It is clear the many of the movies with largest residuals have very few ratings.
edx_work %>% count(movieId) %>%
left_join(movie_avgs)%>%
left_join(movie_titles, by = "movieId") %>%
arrange(desc(b_i)) %>%
select(title, b_i, n) %>%
slice(1:10) %>%
knitr::kable()
### Application of the regularisation term.
## ----Lambda of 3
lambda <- 3
mu <- mean(edx_work$rating)
movie_reg_avgs <- edx_work %>%
group_by(movieId) %>%
summarise(b_i = sum(rating - mu)/(n() + lambda), n_i = n())
head(movie_reg_avgs, 10) %>% knitr::kable()
# Visualising the difference in b_i values from the original movie_avgs vs the modified movie_reg_avgs.
data_frame(original = movie_avgs$b_i,
regularized = movie_reg_avgs$b_i,
n = movie_reg_avgs$n_i) %>%
ggplot(aes(original, regularized, size=sqrt(n))) +
geom_point(shape=1, alpha=0.5)
### Lets look at top 10 using these regularised movie estimates. Now we are seeing more sensible movies
# start to appear, with understandably lower b_i values.
edx_work %>% count(movieId) %>%
left_join(movie_reg_avgs)%>%
left_join(movie_titles, by = "movieId") %>%
arrange(desc(b_i)) %>%
select(title, b_i, n) %>%
slice(1:10) %>%
knitr::kable()
## ----Algorithm 4-----------------------------------------------------------------------------------------
# Perform prediction on regularised movie b_i.
predicted_ratings_4 <- edx_cv %>%
left_join(movie_reg_avgs, by = 'movieId') %>%
mutate(pred = mu + b_i) %>%
.$pred
model_4_rmse <- RMSE(predicted_ratings_4, edx_cv$rating)
rmse_results <- bind_rows(rmse_results,
data_frame(method = "Regularised movie effect",
RMSE = model_4_rmse))
### Now I need to apply the same calculation for regularised user effects.
user_reg_avgs <- edx_work %>%
group_by(userId) %>%
summarise(b_u = sum(rating - mu)/(n() + lambda), n_u = n())
# Visualising this.
data_frame(original = user_avgs$b_u,
regularized = user_reg_avgs$b_u,
n = user_reg_avgs$n_u) %>%
ggplot(aes(original, regularized, size=sqrt(n))) +
geom_point(shape=1, alpha=0.5)
# Apply regularised user b to prediction calculation.
## ----Algorithm 5-----------------------------------------------------------------------------------------
predicted_ratings_5 <- edx_cv %>%
left_join(user_reg_avgs, by = "userId") %>%
mutate(pred = mu + b_u) %>%
.$pred
model_5_rmse <- RMSE(predicted_ratings_5, edx_cv$rating)
rmse_results <- bind_rows(rmse_results,
data_frame(method = "Regularised user effect",
RMSE = model_5_rmse))
## ----Algorithm 6-----------------------------------------------------------------------------------------
# Apply regularised user b to whole prediction calculation, including reg movie effect.
predicted_ratings_6 <- edx_cv %>%
left_join(movie_reg_avgs, by = 'movieId') %>%
left_join(user_reg_avgs, by = "userId") %>%
mutate(pred = mu + b_i + b_u) %>%
.$pred
model_6_rmse <- RMSE(predicted_ratings_6, edx_cv$rating)
rmse_results <- bind_rows(rmse_results,
data_frame(method = "Reg movie and user effect",
RMSE = model_6_rmse))
# This yields an RMSE of 0.8844 which is already pretty good.
# Note that lambda is a parameter. We can use cross validation to tune it.
lambda <- seq(0, 10, 0.25)
## --------------------------------------------------------------------------------------------------------
# Running the refinement of best lambda value.
set.seed(1, sample.kind = "Rounding")
rmses <- sapply(lambda, function(l){
mu <- mean(edx_work$rating)
b_i <- edx_work %>%
group_by(movieId) %>%
summarise(b_i = sum(rating - mu)/(n()+l))
b_u <- edx_work %>%
left_join(b_i, by = "movieId") %>%
group_by(userId) %>%
summarise(b_u = sum(rating - b_i - mu)/(n()+l))
predicted_ratings<- edx_cv %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = mu + b_i + b_u) %>%
.$pred
return(RMSE(predicted_ratings, edx_cv$rating))
})
plot(lambda, rmses, main = "Tuning Lambda")
## ----echo=F----------------------------------------------------------------------------------------------
print("The optimal Lambda")
lambda[which.min(rmses)]
### The best lambda value is 4.5.
### Applying this value to the formal prediction.
## ----Algorithm 7-----------------------------------------------------------------------------------------
b_i <- edx_work %>%
group_by(movieId) %>%
summarise(b_i = sum(rating - mu)/(n()+4.5))
b_u <- edx_work %>%
left_join(b_i, by = "movieId") %>%
group_by(userId) %>%
summarise(b_u = sum(rating - b_i - mu)/(n()+4.5))
predicted_ratings_7 <- edx_cv %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = mu + b_i + b_u) %>%
.$pred
model_7_rmse <- RMSE(predicted_ratings_7, edx_cv$rating)
rmse_results <- bind_rows(rmse_results,
data_frame(method = "Reg mov & usr with 4.5 l",
RMSE = model_7_rmse))
# Some tidying up.
rm(edx, predicted_ratings_2, predicted_ratings_3, predicted_ratings_4, predicted_ratings_5, predicted_ratings_6, predicted_ratings_7)
## --------------------------------------------------------------------------------------------------------
### Principal componenent analysis.
# I will apply a pca to a small subset of EDX.
# Focusing in on the most rated movies (>1000 ratings), and most prolific users (>500 ratings).
edx_small <- edx_work %>%
group_by(movieId) %>%
filter(n() >= 1000) %>% ungroup() %>%
group_by(userId) %>%
filter(n() >= 500) %>% ungroup()
### Now I need to make a matrix out of userId's and movieId's showing corresponding ratings.
y <- edx_small %>%
select(userId, movieId, rating) %>%
spread(movieId, rating) %>%
as.matrix()
y[1:10, 1:5]
rm(edx_small, edx_work)
# Making column 1 the rownames. Then remove column 1.
rownames(y) <- y[,1]
y <- y[,-1]
tmp <- y
colnames(tmp) <- with(movie_titles, title[match(colnames(tmp), movieId)])
y[1:10, 1:5]
## ----Preparation for PCA---------------------------------------------------------------------------------
### Preparing the matrix for PCA.
y <- sweep(y, 1, rowMeans(y, na.rm = TRUE))
y <- sweep(y, 2, colMeans(y, na.rm = TRUE))
y[is.na(y)] <- 0
y <- sweep(y, 1, rowMeans(y))
## ----PCA-------------------------------------------------------------------------------------------------
pca <- prcomp(y, center = F, scale. = F, retx = T)
# Visualising the PCA.
plot(pca$sdev, main = "PCA standard deviation")
## --------------------------------------------------------------------------------------------------------
var_explained <- cumsum(pca$sdev^2/sum(pca$sdev^2))
plot(var_explained, main = "PCA variance by PC index")
# Certainly most of the variation is explained by 500 principal components. It is impractical for me to
# apply all of these using the method I implement below. I will begin by applying a few PC's at a time.
# Some data visualisation. Looking at the plot of PC1 vs PC2 to get a feel for the sort of relationships
# that exist between movies.
library(ggrepel)
pcs <- data.frame(pca$rotation, name = colnames(tmp))
pcs %>% ggplot(aes(PC1, PC2)) + geom_point() +
geom_text_repel(aes(PC1, PC2, label=name),
data = filter(pcs,
PC1 < -0.1 | PC1 > 0.1 | PC2 < -0.075 | PC2 > 0.1))
## ----echo=F----------------------------------------------------------------------------------------------
pcs %>% select(name, PC1) %>% arrange(PC1) %>% slice(1:10) %>%
knitr::kable()
## ----echo=F----------------------------------------------------------------------------------------------
pcs %>% select(name, PC1) %>% arrange(desc(PC1)) %>% slice(1:10) %>%
knitr::kable()
rm(tmp)
##### I need to take the x and rotation matrices from pca and expand them to match the dimensions of
# the edx_cv matrix. That is unique userId (68052) by unique movieId (9728).
# These expanded matrices will be sparse matrices.
### Now I need the actual ratings from the edx_cv data set in a 68052 by 9728 matrix.
## ----edx_cv to matrix------------------------------------------------------------------------------------
val_ratings.m <- edx_cv %>%
select(userId, movieId, rating) %>%
spread(movieId, rating) %>%
as.matrix()
# Fixing the rownames of edx_cv matrix and removing first column that features userId
rownames(val_ratings.m) <- val_ratings.m[,1]
val_ratings.m <- val_ratings.m[,-1]
colnames(val_ratings.m) <- with(movie_titles, title[match(colnames(val_ratings.m), movieId)])
val_ratings.m[1:10, 5:7]
### I need the lists of all userId's and movieId's in the edx_cv to use as a reference in making my
# expanded pca sparse matrices.
## --------------------------------------------------------------------------------------------------------
unique_usr_val <- as.matrix(unique(edx_cv$userId))
colnames(unique_usr_val) <- c("userId")
## --------------------------------------------------------------------------------------------------------
unique_mov_val <- as.matrix(unique(edx_cv$movieId))
colnames(unique_mov_val) <- c("movieId")
# Creating the "User effect" sparse matrix.
pca_x <- pca$x %>% as.data.frame() %>%
tibble::rownames_to_column(., "userId") %>%
merge(unique_usr_val, ., by = "userId", all = TRUE)
pca_x <- as.matrix(pca_x[,-1])
pca_x[is.na(pca_x)] <- 0
# Creating the "Principal component" sparse matrix.
pca_rotation <- pca$rotation %>% as.data.frame() %>%
tibble::rownames_to_column(., "movieId") %>%
merge(unique_mov_val, ., by = "movieId", all = TRUE)
pca_rotation <- as.matrix(pca_rotation[,-1])
pca_rotation[is.na(pca_rotation)] <- 0
#### Now to build a large matrix of predictions from where I left off after "Reg mov and usr with 4.5".
# This will have the dimensions that are consistent with unique userId/movidId values from edx_cv.
predictions <- edx_cv %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = mu + b_i + b_u)
## ----predictions to matrix-------------------------------------------------------------------------------
y3 <- predictions %>%
select(userId, movieId, pred) %>%
spread(movieId, pred) %>%
as.matrix()
rownames(y3) <- y3[,1]
y3 <- y3[,-1]
# Clean up.
rm(predictions)
# Defining my p and q vectors
# Vectors p are called user effects. Vectors q are called prinipal components.
# NOTE... both p1 and q1 are converted to matrices, with p1 being 68052 X 1, and q1 1 X 9728
p1 <- as.matrix(pca_x[,1])
q1 <- matrix(pca_rotation[,1], nrow = 1, byrow = T)
p2 <- as.matrix(pca_x[,2])
q2 <- matrix(pca_rotation[,2], nrow = 1, byrow = T)
p3 <- as.matrix(pca_x[,3])
q3 <- matrix(pca_rotation[,3], nrow = 1, byrow = T)
p4 <- as.matrix(pca_x[,4])
q4 <- matrix(pca_rotation[,4], nrow = 1, byrow = T)
p5 <- as.matrix(pca_x[,5])
q5 <- matrix(pca_rotation[,5], nrow = 1, byrow = T)
p6 <- as.matrix(pca_x[,6])
q6 <- matrix(pca_rotation[,6], nrow = 1, byrow = T)
p7 <- as.matrix(pca_x[,7])
q7 <- matrix(pca_rotation[,7], nrow = 1, byrow = T)
p8 <- as.matrix(pca_x[,8])
q8 <- matrix(pca_rotation[,8], nrow = 1, byrow = T)
p9 <- as.matrix(pca_x[,9])
q9 <- matrix(pca_rotation[,9], nrow = 1, byrow = T)
p10 <- as.matrix(pca_x[,10])
q10 <- matrix(pca_rotation[,10], nrow = 1, byrow = T)
#### Prediction algorithm #9: Reg plus PC1 to PC10
new_pred_10 <- y3 + (p1%*%q1) + (p2%*%q2) + (p3%*%q3) + (p4%*%q4) + (p5%*%q5) + (p6%*%q6) +
(p7%*%q7) + (p8%*%q8) + (p9%*%q9) + (p10%*%q10)
rmse_PC1_to_10 <- sqrt(mean((val_ratings.m - new_pred_10)^2, na.rm = TRUE))
rmse_results <- bind_rows(rmse_results,
data_frame(method = "Reg plus PC1 to PC10",
RMSE = rmse_PC1_to_10))
# Summary table of all RMSE results for prediction algorithms used to date.
rmse_results %>% knitr::kable()
### FINAL Prediction Algorithm
# As a final model I am taking that last algorithm (Reg plus PC1 to PC10) which is a
# culmination of all preceding algorithms. A final assessment for the algorithm is to
# test it against the validation testing dataset that has been kept aside during the
# course of this work.
# However as the last PCA section is based on matrix operations that must match dimensions
# perfectly, some modifications are necessary to test it against the validation dataset.
# First the validation dataset has to be put into matrix form to make it compatible with
# the PCA work.
## ----Validation to matrix--------------------------------------------------------------------------------
validation.m <- validation %>%
select(userId, movieId, rating) %>%
spread(movieId, rating) %>%
as.matrix()
rownames(validation.m) <- validation.m[,1]
validation.m <- validation.m[,-1]
colnames(validation.m) <- with(movie_titles, title[match(colnames(validation.m), movieId)])
## --------------------------------------------------------------------------------------------------------
unique_usr_val.f <- as.matrix(unique(validation$userId))
colnames(unique_usr_val.f) <- c("userId")
## --------------------------------------------------------------------------------------------------------
unique_mov_val.f <- as.matrix(unique(validation$movieId))
colnames(unique_mov_val.f) <- c("movieId")
## ----pca$x sparse matrix---------------------------------------------------------------------------------
pca_x.f <- pca$x %>% as.data.frame() %>%
tibble::rownames_to_column(., "userId") %>%
merge(unique_usr_val.f, ., by = "userId", all = TRUE)
pca_x.f <- as.matrix(pca_x.f[,-1])
pca_x.f[is.na(pca_x.f)] <- 0
## ----pca$rotation sparse matrix--------------------------------------------------------------------------
pca_rotation.f <- pca$rotation %>% as.data.frame() %>%
tibble::rownames_to_column(., "movieId") %>%
merge(unique_mov_val.f, ., by = "movieId", all = TRUE)
pca_rotation.f <- as.matrix(pca_rotation.f[,-1])
pca_rotation.f[is.na(pca_rotation.f)] <- 0
## ----algorithm 7 predictions using validation------------------------------------------------------------
predictions.f <- validation %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(pred = mu + b_i + b_u)
## ----predictions.f to matrix-----------------------------------------------------------------------------
y3.f <- predictions.f %>%
select(userId, movieId, pred) %>%
spread(movieId, pred) %>%
as.matrix()
rownames(y3.f) <- y3.f[,1]
y3.f <- y3.f[,-1]
## ----redefining p and q----------------------------------------------------------------------------------
p1 <- as.matrix(pca_x.f[,1])
q1 <- matrix(pca_rotation.f[,1], nrow = 1, byrow = T)
p2 <- as.matrix(pca_x.f[,2])
q2 <- matrix(pca_rotation.f[,2], nrow = 1, byrow = T)
p3 <- as.matrix(pca_x.f[,3])
q3 <- matrix(pca_rotation.f[,3], nrow = 1, byrow = T)
p4 <- as.matrix(pca_x.f[,4])
q4 <- matrix(pca_rotation.f[,4], nrow = 1, byrow = T)
p5 <- as.matrix(pca_x.f[,5])
q5 <- matrix(pca_rotation.f[,5], nrow = 1, byrow = T)
p6 <- as.matrix(pca_x.f[,6])
q6 <- matrix(pca_rotation.f[,6], nrow = 1, byrow = T)
p7 <- as.matrix(pca_x.f[,7])
q7 <- matrix(pca_rotation.f[,7], nrow = 1, byrow = T)
p8 <- as.matrix(pca_x.f[,8])
q8 <- matrix(pca_rotation.f[,8], nrow = 1, byrow = T)
p9 <- as.matrix(pca_x.f[,9])
q9 <- matrix(pca_rotation.f[,9], nrow = 1, byrow = T)
p10 <- as.matrix(pca_x.f[,10])
q10 <- matrix(pca_rotation.f[,10], nrow = 1, byrow = T)
## ----Final algorithm-------------------------------------------------------------------------------------
new_pred_10.f <- y3.f + (p1%*%q1) + (p2%*%q2) + (p3%*%q3) + (p4%*%q4) + (p5%*%q5) + (p6%*%q6) +
(p7%*%q7) + (p8%*%q8) + (p9%*%q9) + (p10%*%q10)
# Reporting RMSE value of final prediction algorithm
rmse_final <- sqrt(mean((validation.m - new_pred_10.f)^2, na.rm = TRUE))
print("Final Algorithm RMSE")
rmse_final
|
#### 1) Model relationships between spel. d18O stdev and confounding factors
#### 2) Apply correction to spel d18O s.d.
#### 3) Construct and plot uncorrected and corrected spel d18O s.d. (IADV) trends
setwd("speleothem-IADV/")
library(RMySQL)
library(dplyr)
library(ggplot2)
library(grid)
library(gridExtra)
## Load and filter spel data
# Connect to SISAL SQL database
mydb <- dbConnect(MySQL(), user = "root", password = "", dbname = "sisalv2",
host = "localhost")
# Select tropical/sub tropical sites and Holocene data
Raw_Data <- dbGetQuery(mydb, "SELECT * FROM site JOIN entity USING (site_id) JOIN sample USING (entity_id) JOIN original_chronology USING (sample_id) JOIN d18O USING (sample_id)
WHERE (latitude BETWEEN -35 AND 45) AND (interp_age <= 12000);")
Raw_Data <- Raw_Data %>% filter(entity_status != "superseded")
# Filter to monsoon entities
Raw_Data$region <- with(Raw_Data, ifelse(latitude >= 15 & latitude <= 35 & longitude >= 75 & longitude <= 98, "ISM",
ifelse(latitude >= 20 & latitude <= 45 & longitude >= 100 & longitude <= 125, "EAM",
ifelse(latitude >= -10 & latitude <= 0 & longitude >= -80 & longitude <= -70 | latitude >= -30 & latitude <= -10 & longitude >= -60 & longitude <= -30, "SW-SAM",
ifelse(latitude >= -30 & latitude <= 5 & longitude >= 80 & longitude <= 170, "IAM",
ifelse(latitude >= -30 & latitude <= 0 & longitude >= 0 & longitude <= 50, "SAfM",
ifelse(latitude >= 0 & latitude <= 35 & longitude >= -110 & longitude <= -50, "CAM",
ifelse(latitude >= -10 & latitude <= 0 & longitude >= -70 & longitude <= -30, "NE-SAM", "other"))))))))
Raw_Data <- Raw_Data %>% filter(region != "other")
# filter to records at least 100 yrs long and at temporal res of at least 20yrs
source("spel_IADV/coverage_sampled_vs_gap.R") #loads function that gives entity length, excluding gaps and hiatuses
dat_length <- data.frame()
for (i in unique(Raw_Data$entity_id)){
length <- get_ent_coverage(entity_id = i, age_start = -50, age_end = 12000)[,2]
sub_df <- data.frame(entity_id = i, length = length)
dat_length <- rbind(dat_length, sub_df)
}
dat_length <- dat_length %>% filter(length >= 100)
length_df <- data.frame()
source("spel_IADV/entity_sampling_mean_res.R") #loads function that gives average temporal res of entity
for (i in 1:nrow(dat_length)){
datlen <- get_ent_sampling(entity_id = dat_length$entity_id[i], age_start = -50, age_end = 12000)$sampling_mean
dat_len <- data.frame(c(dat_length[i,], datlen))
colnames(dat_len)[3] <- c("mean_res")
length_df <- rbind(length_df, dat_len)
}
length_df <- length_df %>% filter(mean_res <= 20)
# Calculate d18O stdev for a running window
bin_size <- 100
bin_hw <- bin_size/2
centres <- seq((0-bin_hw),(12000-bin_hw),bin_hw)
dat_ls <- list()
for (j in 1:length(length_df$entity_id)){ #each entity
ent <- length_df$entity_id[j]
dat <- Raw_Data %>% filter(entity_id == ent)
df_out <- data.frame()
for (i in centres){
datsub <- dat %>% filter(interp_age >= i-bin_hw & interp_age <= i+bin_hw) #subset to bin
if (nrow(datsub) <= 1){ next } #skip if one or no samples in this bin, can't calculate s.d.
sub_sd <- sd(datsub$d18O_measurement) #calc stdev for bin
n <- nrow(datsub) # calc nsamples per window
mean_d18O <- mean(datsub$d18O_measurement) #calc mean d18O per window
#extract measurement uncertainty for bin
if (length(unique(datsub$d18O_precision)) == 1){
meas_uncert <- unique(datsub$d18O_precision)
} else {
meas_uncert <- mean(datsub$d18O_precision)
}
if (all(is.na(datsub$depth_sample)) == T){ # if no sample depth data exists
#print(paste(ent, i))
#output
df_sub <- data.frame(entity_id = ent, region = unique(dat$region), win_start = i-bin_hw, win_end = i+bin_hw, gradient = NA,
stdev = sub_sd, nsamples = n, mean_d18O = mean_d18O, meas_uncert = meas_uncert)
} else { #if sample depth data exists, calculate growth rate
sub_reg <- lm(datsub$depth_sample ~ datsub$interp_age)
growth_rate <- sub_reg$coefficients[2]
if (unique(dat$depth_ref == "from base")){ growth_rate <- growth_rate*-1 } #if depth is from base rather than from top, flip growth rate
#output
df_sub <- data.frame(entity_id = ent, region = unique(dat$region), win_start = i-bin_hw, win_end = i+bin_hw, gradient = growth_rate,
stdev = sub_sd, nsamples = n, mean_d18O = mean_d18O, meas_uncert = meas_uncert)
}
rownames(df_sub) <- NULL
df_out <- rbind(df_out, df_sub)
}
dat_ls[[j]] <- df_out # data output
}
dat_df <- bind_rows(dat_ls)
# Filter to entities with >50% windows with stdev above uncert
x <- dat_df %>%
group_by(entity_id) %>%
summarise(all_n = n(),
below_n = length(stdev[meas_uncert > stdev])) %>%
mutate(prop = below_n/all_n) %>%
filter(prop < 0.5) # only 1 entity > 0.5
dat_df <- dat_df %>% filter(entity_id %in% x$entity_id)
# remove windows where stdev < measurement uncertainty
dat_df <- dat_df %>% filter(stdev > meas_uncert)
# add site metadata
df_sitedat <- left_join(dat_df, unique(Raw_Data[,c("site_id","site_name","entity_id","latitude","longitude")]), by = "entity_id")
#output sites for map (fig 1)
sites <- df_sitedat %>% group_by(site_id, latitude, longitude) %>% summarise(n())
write.csv(sites[,-4], "spel_IADV/sites_stdev_analysis.csv", row.names = F)
## colinearity of predictor variables?
cor(df_sitedat$gradient, df_sitedat$nsamples, use = "complete.obs")
cor(df_sitedat$gradient, df_sitedat$mean_d18O, use = "complete.obs")
cor(df_sitedat$nsamples, df_sitedat$mean_d18O, use = "complete.obs")
#### Fit multiple linear regression model
## NH mlr
NH_dat <- df_sitedat %>% filter(latitude > 0)
#NH_mlr <- lm(log(stdev) ~ log(nsamples) + mean_d18O, data = NH_dat, na.action = "na.exclude") ##nsamples instead of growth rate - in supplement
NH_mlr <- lm(log(stdev) ~ log(gradient) + mean_d18O, data = NH_dat, na.action = "na.exclude")
NH_mlr_summ <- summary(NH_mlr)
## SH mlr
SH_dat <- df_sitedat %>% filter(latitude < 0)
#SH_mlr <- lm(log(stdev) ~ log(nsamples) + mean_d18O, data = SH_dat, na.action = "na.exclude") ##nsamples instead of growth rate - in supplement
SH_mlr <- lm(log(stdev) ~ log(gradient) + mean_d18O, data = SH_dat, na.action = "na.exclude")
SH_mlr_summ <- summary(SH_mlr)
#### MLR residual plots:
## get residuals
# NH
partial.res <- residuals(NH_mlr, "partial")
colnames(partial.res) <- paste(colnames(partial.res), "_res", sep = "")
NH_site_metdat <- df_sitedat %>% filter(latitude > 0)
resid_dat_NH <- cbind(NH_site_metdat, partial.res)
# SH
partial.res <- residuals(SH_mlr, "partial")
colnames(partial.res) <- paste(colnames(partial.res), "_res", sep = "")
SH_site_metdat <- df_sitedat %>% filter(latitude < 0)
resid_dat_SH <- cbind(SH_site_metdat, partial.res)
## plots
#growth rate NH
P1 <- ggplot(data = resid_dat_NH, aes(x = log(gradient), y = `log(gradient)_res`)) + geom_point(size = 0.2, col = "#AAAAAA") +
geom_abline(slope = lm(`log(gradient)_res` ~ log(gradient), data = resid_dat_NH)$coefficients[2], intercept = lm(`log(gradient)_res` ~ log(gradient), data = resid_dat_NH)$coefficients[1], col = "#464646") +
theme_bw() +
#theme(text = element_text(size = 12)) +
xlab("") + ylab("")
# alternative plot for MLR with n-samples - in supplement
#P1 <- ggplot(data = resid_dat_NH, aes(x = log(nsamples), y = `log(nsamples)_res`)) + geom_point(size = 0.2, col = "#AAAAAA") +
# geom_abline(slope = lm(`log(nsamples)_res` ~ log(nsamples), data = resid_dat_NH)$coefficients[2], intercept = lm(`log(nsamples)_res` ~ log(nsamples), data = resid_dat_NH)$coefficients[1], col = "#464646") +
# theme_bw() +
# #theme(text = element_text(size = 12)) +
# xlab("") + ylab("")
#mean d18O NH
P2 <- ggplot(data = resid_dat_NH, aes(x = mean_d18O, y = mean_d18O_res)) + geom_point(size = 0.2, col = "#AAAAAA") +
geom_abline(slope = lm(resid_dat_NH$mean_d18O_res ~ resid_dat_NH$mean_d18O)$coefficients[2], intercept = lm(mean_d18O_res ~ mean_d18O, data = resid_dat_NH)$coefficients[1], col = "#464646") +
theme_bw() +
#theme(text = element_text(size = 12)) +
xlab("") + ylab("")
#growth rate SH
P3 <- ggplot(data = resid_dat_SH, aes(x = log(gradient), y = `log(gradient)_res`)) + geom_point(size = 0.2, col = "#AAAAAA") +
geom_abline(slope = lm(`log(gradient)_res` ~ log(gradient), data = resid_dat_SH)$coefficients[2], intercept = lm(`log(gradient)_res` ~ log(gradient), data = resid_dat_SH)$coefficients[1], col = "#464646") +
theme_bw() +
#theme(text = element_text(size = 12)) #+
#xlab("log(growth rate) (mm/year)") + ylab("")
xlab("") + ylab("")
# alternative plot for MLR with n-samples - in supplement
#P3 <- ggplot(data = resid_dat_SH, aes(x = log(nsamples), y = `log(nsamples)_res`)) + geom_point(size = 0.2, col = "#AAAAAA") +
# geom_abline(slope = lm(`log(nsamples)_res` ~ log(nsamples), data = resid_dat_SH)$coefficients[2], intercept = lm(`log(nsamples)_res` ~ log(nsamples), data = resid_dat_SH)$coefficients[1], col = "#464646") +
# theme_bw() +
# #theme(text = element_text(size = 12)) #+
# #xlab("log(growth rate) (mm/year)") + ylab("")
# xlab("") + ylab("")
#mean d18O SH
P4 <- ggplot(data = resid_dat_SH, aes(x = mean_d18O, y = mean_d18O_res)) + geom_point(size = 0.2, col = "#AAAAAA") +
geom_abline(slope = lm(resid_dat_SH$mean_d18O_res ~ resid_dat_SH$mean_d18O)$coefficients[2], intercept = lm(mean_d18O_res ~ mean_d18O, data = resid_dat_SH)$coefficients[1], col = "#464646") +
theme_bw() +
#theme(text = element_text(size = 12)) #+
#xlab(expression(paste("mean ", delta^{18}, "O (\u2030)"))) + ylab("")
xlab("") + ylab("")
pdf("MLR_fig2.pdf", width = 12/2.54, height = 8/2.54)
grid.arrange(P1,P2,P3,P4,
widths = c(1,1),
layout_matrix = rbind(c(1,2),
c(3,4)),
left = textGrob(expression(paste("f(", delta^{18}, "O s.d.)")), rot = 90))
dev.off()
#### apply correction
# predict stdev from model:
NH_dat2 <- NH_dat %>% mutate(predicted_stdev = exp(NH_mlr_summ$coefficients["(Intercept)","Estimate"] +
#(NH_mlr_summ$coefficients["log(nsamples)","Estimate"]*(log(nsamples))) +
(NH_mlr_summ$coefficients["log(gradient)","Estimate"]*(log(gradient))) +
(NH_mlr_summ$coefficients["mean_d18O","Estimate"]*mean_d18O)))
SH_dat2 <- SH_dat %>% mutate(predicted_stdev = exp(SH_mlr_summ$coefficients["(Intercept)","Estimate"] +
#(SH_mlr_summ$coefficients["log(nsamples)","Estimate"]*(log(nsamples))) +
(SH_mlr_summ$coefficients["log(gradient)","Estimate"]*(log(gradient))) +
(SH_mlr_summ$coefficients["mean_d18O","Estimate"]*mean_d18O)))
## combine NH and SH data
dat_all <- rbind(NH_dat2, SH_dat2)
## apply correction
dat_all <- dat_all %>% mutate(corrected_stdev = stdev - predicted_stdev)
# regional composite
comp <- dat_all %>% group_by(region, win_end) %>% summarise(med = median(corrected_stdev, na.rm = T), Q1 = quantile(corrected_stdev, na.rm = T)[2], Q3 = quantile(corrected_stdev, na.rm = T)[4])
comp$age <- comp$win_end - bin_hw
write.csv(comp, "spel_IADV/spel_sd.csv", row.names = F) #output
# plot
comp <- comp %>% filter(! region %in% c("NE-SAM","SAfM","CAM","IAM"));
comp$grp <- "corrected"
# combine corrected and uncorrected stdev
comp_raw <- df_sitedat %>%
filter(entity_id %in% unique(na.omit(dat_all)$entity_id)) %>%
group_by(region, win_end) %>%
summarise(med = median(stdev, na.rm = T), Q1 = quantile(stdev, na.rm = T)[2], Q3 = quantile(stdev, na.rm = T)[4]) %>%
filter(! region %in% c("NE-SAM","SAfM","CAM","IAM"))
comp_raw$grp = "uncorrected"
raw_corr_comp <- rbind(comp, comp_raw)
raw_corr_comp[which(raw_corr_comp$region == "SW-SAM"),"region"] <- "SAM"
#variable order
raw_corr_comp$grp <- factor(raw_corr_comp$grp, c("uncorrected","corrected"))
raw_corr_comp$win_end <- raw_corr_comp$win_end/1000
png("raw_v_corrected_fig3.png", width = 13, height = 8.5, units = "cm", res = 96)
ggplot() +
geom_ribbon(data = raw_corr_comp, aes(x = win_end, ymin = Q1, ymax = Q3),col = NA, fill = "#AAAAAA") +
geom_line(data = raw_corr_comp, aes(x = win_end, y = med), col = "#464646") +
scale_x_continuous(breaks = seq(0,12,3)) +
scale_y_continuous(breaks = seq(0,1.25,0.25)) +
ylab(expression(paste(delta^{18}, "O s.d. (\u2030)"))) + xlab("Kyrs BP") +
facet_grid(grp ~ region, scales = "free_y") +
theme_bw() +
theme(legend.position = "none",
strip.background = element_rect(fill = NA))
dev.off()
## linear regression through composites (table 2)
raw_corr_comp %>%
filter(win_end <= 6 & win_end >= 0) %>%
group_by(region) %>%
summarise(gradient = summary(lm(med ~ win_end))$coefficients[2,1],
st_err = summary(lm(med ~ win_end))$coefficients[2,2],
P_val = summary(lm(med ~ win_end))$coefficients[2,4])
| /spel_IADV/MLR_spel_d18Osd.R | no_license | SarahParker44/speleothem-IADV | R | false | false | 13,509 | r | #### 1) Model relationships between spel. d18O stdev and confounding factors
#### 2) Apply correction to spel d18O s.d.
#### 3) Construct and plot uncorrected and corrected spel d18O s.d. (IADV) trends
setwd("speleothem-IADV/")
library(RMySQL)
library(dplyr)
library(ggplot2)
library(grid)
library(gridExtra)
## Load and filter spel data
# Connect to SISAL SQL database
mydb <- dbConnect(MySQL(), user = "root", password = "", dbname = "sisalv2",
host = "localhost")
# Select tropical/sub tropical sites and Holocene data
Raw_Data <- dbGetQuery(mydb, "SELECT * FROM site JOIN entity USING (site_id) JOIN sample USING (entity_id) JOIN original_chronology USING (sample_id) JOIN d18O USING (sample_id)
WHERE (latitude BETWEEN -35 AND 45) AND (interp_age <= 12000);")
Raw_Data <- Raw_Data %>% filter(entity_status != "superseded")
# Filter to monsoon entities
Raw_Data$region <- with(Raw_Data, ifelse(latitude >= 15 & latitude <= 35 & longitude >= 75 & longitude <= 98, "ISM",
ifelse(latitude >= 20 & latitude <= 45 & longitude >= 100 & longitude <= 125, "EAM",
ifelse(latitude >= -10 & latitude <= 0 & longitude >= -80 & longitude <= -70 | latitude >= -30 & latitude <= -10 & longitude >= -60 & longitude <= -30, "SW-SAM",
ifelse(latitude >= -30 & latitude <= 5 & longitude >= 80 & longitude <= 170, "IAM",
ifelse(latitude >= -30 & latitude <= 0 & longitude >= 0 & longitude <= 50, "SAfM",
ifelse(latitude >= 0 & latitude <= 35 & longitude >= -110 & longitude <= -50, "CAM",
ifelse(latitude >= -10 & latitude <= 0 & longitude >= -70 & longitude <= -30, "NE-SAM", "other"))))))))
Raw_Data <- Raw_Data %>% filter(region != "other")
# filter to records at least 100 yrs long and at temporal res of at least 20yrs
source("spel_IADV/coverage_sampled_vs_gap.R") #loads function that gives entity length, excluding gaps and hiatuses
dat_length <- data.frame()
for (i in unique(Raw_Data$entity_id)){
length <- get_ent_coverage(entity_id = i, age_start = -50, age_end = 12000)[,2]
sub_df <- data.frame(entity_id = i, length = length)
dat_length <- rbind(dat_length, sub_df)
}
dat_length <- dat_length %>% filter(length >= 100)
length_df <- data.frame()
source("spel_IADV/entity_sampling_mean_res.R") #loads function that gives average temporal res of entity
for (i in 1:nrow(dat_length)){
datlen <- get_ent_sampling(entity_id = dat_length$entity_id[i], age_start = -50, age_end = 12000)$sampling_mean
dat_len <- data.frame(c(dat_length[i,], datlen))
colnames(dat_len)[3] <- c("mean_res")
length_df <- rbind(length_df, dat_len)
}
length_df <- length_df %>% filter(mean_res <= 20)
# Calculate d18O stdev for a running window
bin_size <- 100
bin_hw <- bin_size/2
centres <- seq((0-bin_hw),(12000-bin_hw),bin_hw)
dat_ls <- list()
for (j in 1:length(length_df$entity_id)){ #each entity
ent <- length_df$entity_id[j]
dat <- Raw_Data %>% filter(entity_id == ent)
df_out <- data.frame()
for (i in centres){
datsub <- dat %>% filter(interp_age >= i-bin_hw & interp_age <= i+bin_hw) #subset to bin
if (nrow(datsub) <= 1){ next } #skip if one or no samples in this bin, can't calculate s.d.
sub_sd <- sd(datsub$d18O_measurement) #calc stdev for bin
n <- nrow(datsub) # calc nsamples per window
mean_d18O <- mean(datsub$d18O_measurement) #calc mean d18O per window
#extract measurement uncertainty for bin
if (length(unique(datsub$d18O_precision)) == 1){
meas_uncert <- unique(datsub$d18O_precision)
} else {
meas_uncert <- mean(datsub$d18O_precision)
}
if (all(is.na(datsub$depth_sample)) == T){ # if no sample depth data exists
#print(paste(ent, i))
#output
df_sub <- data.frame(entity_id = ent, region = unique(dat$region), win_start = i-bin_hw, win_end = i+bin_hw, gradient = NA,
stdev = sub_sd, nsamples = n, mean_d18O = mean_d18O, meas_uncert = meas_uncert)
} else { #if sample depth data exists, calculate growth rate
sub_reg <- lm(datsub$depth_sample ~ datsub$interp_age)
growth_rate <- sub_reg$coefficients[2]
if (unique(dat$depth_ref == "from base")){ growth_rate <- growth_rate*-1 } #if depth is from base rather than from top, flip growth rate
#output
df_sub <- data.frame(entity_id = ent, region = unique(dat$region), win_start = i-bin_hw, win_end = i+bin_hw, gradient = growth_rate,
stdev = sub_sd, nsamples = n, mean_d18O = mean_d18O, meas_uncert = meas_uncert)
}
rownames(df_sub) <- NULL
df_out <- rbind(df_out, df_sub)
}
dat_ls[[j]] <- df_out # data output
}
dat_df <- bind_rows(dat_ls)
# Filter to entities with >50% windows with stdev above uncert
x <- dat_df %>%
group_by(entity_id) %>%
summarise(all_n = n(),
below_n = length(stdev[meas_uncert > stdev])) %>%
mutate(prop = below_n/all_n) %>%
filter(prop < 0.5) # only 1 entity > 0.5
dat_df <- dat_df %>% filter(entity_id %in% x$entity_id)
# remove windows where stdev < measurement uncertainty
dat_df <- dat_df %>% filter(stdev > meas_uncert)
# add site metadata
df_sitedat <- left_join(dat_df, unique(Raw_Data[,c("site_id","site_name","entity_id","latitude","longitude")]), by = "entity_id")
#output sites for map (fig 1)
sites <- df_sitedat %>% group_by(site_id, latitude, longitude) %>% summarise(n())
write.csv(sites[,-4], "spel_IADV/sites_stdev_analysis.csv", row.names = F)
## colinearity of predictor variables?
cor(df_sitedat$gradient, df_sitedat$nsamples, use = "complete.obs")
cor(df_sitedat$gradient, df_sitedat$mean_d18O, use = "complete.obs")
cor(df_sitedat$nsamples, df_sitedat$mean_d18O, use = "complete.obs")
#### Fit multiple linear regression model
## NH mlr
NH_dat <- df_sitedat %>% filter(latitude > 0)
#NH_mlr <- lm(log(stdev) ~ log(nsamples) + mean_d18O, data = NH_dat, na.action = "na.exclude") ##nsamples instead of growth rate - in supplement
NH_mlr <- lm(log(stdev) ~ log(gradient) + mean_d18O, data = NH_dat, na.action = "na.exclude")
NH_mlr_summ <- summary(NH_mlr)
## SH mlr
SH_dat <- df_sitedat %>% filter(latitude < 0)
#SH_mlr <- lm(log(stdev) ~ log(nsamples) + mean_d18O, data = SH_dat, na.action = "na.exclude") ##nsamples instead of growth rate - in supplement
SH_mlr <- lm(log(stdev) ~ log(gradient) + mean_d18O, data = SH_dat, na.action = "na.exclude")
SH_mlr_summ <- summary(SH_mlr)
#### MLR residual plots:
## get residuals
# NH
partial.res <- residuals(NH_mlr, "partial")
colnames(partial.res) <- paste(colnames(partial.res), "_res", sep = "")
NH_site_metdat <- df_sitedat %>% filter(latitude > 0)
resid_dat_NH <- cbind(NH_site_metdat, partial.res)
# SH
partial.res <- residuals(SH_mlr, "partial")
colnames(partial.res) <- paste(colnames(partial.res), "_res", sep = "")
SH_site_metdat <- df_sitedat %>% filter(latitude < 0)
resid_dat_SH <- cbind(SH_site_metdat, partial.res)
## plots
#growth rate NH
P1 <- ggplot(data = resid_dat_NH, aes(x = log(gradient), y = `log(gradient)_res`)) + geom_point(size = 0.2, col = "#AAAAAA") +
geom_abline(slope = lm(`log(gradient)_res` ~ log(gradient), data = resid_dat_NH)$coefficients[2], intercept = lm(`log(gradient)_res` ~ log(gradient), data = resid_dat_NH)$coefficients[1], col = "#464646") +
theme_bw() +
#theme(text = element_text(size = 12)) +
xlab("") + ylab("")
# alternative plot for MLR with n-samples - in supplement
#P1 <- ggplot(data = resid_dat_NH, aes(x = log(nsamples), y = `log(nsamples)_res`)) + geom_point(size = 0.2, col = "#AAAAAA") +
# geom_abline(slope = lm(`log(nsamples)_res` ~ log(nsamples), data = resid_dat_NH)$coefficients[2], intercept = lm(`log(nsamples)_res` ~ log(nsamples), data = resid_dat_NH)$coefficients[1], col = "#464646") +
# theme_bw() +
# #theme(text = element_text(size = 12)) +
# xlab("") + ylab("")
#mean d18O NH
P2 <- ggplot(data = resid_dat_NH, aes(x = mean_d18O, y = mean_d18O_res)) + geom_point(size = 0.2, col = "#AAAAAA") +
geom_abline(slope = lm(resid_dat_NH$mean_d18O_res ~ resid_dat_NH$mean_d18O)$coefficients[2], intercept = lm(mean_d18O_res ~ mean_d18O, data = resid_dat_NH)$coefficients[1], col = "#464646") +
theme_bw() +
#theme(text = element_text(size = 12)) +
xlab("") + ylab("")
#growth rate SH
P3 <- ggplot(data = resid_dat_SH, aes(x = log(gradient), y = `log(gradient)_res`)) + geom_point(size = 0.2, col = "#AAAAAA") +
geom_abline(slope = lm(`log(gradient)_res` ~ log(gradient), data = resid_dat_SH)$coefficients[2], intercept = lm(`log(gradient)_res` ~ log(gradient), data = resid_dat_SH)$coefficients[1], col = "#464646") +
theme_bw() +
#theme(text = element_text(size = 12)) #+
#xlab("log(growth rate) (mm/year)") + ylab("")
xlab("") + ylab("")
# alternative plot for MLR with n-samples - in supplement
#P3 <- ggplot(data = resid_dat_SH, aes(x = log(nsamples), y = `log(nsamples)_res`)) + geom_point(size = 0.2, col = "#AAAAAA") +
# geom_abline(slope = lm(`log(nsamples)_res` ~ log(nsamples), data = resid_dat_SH)$coefficients[2], intercept = lm(`log(nsamples)_res` ~ log(nsamples), data = resid_dat_SH)$coefficients[1], col = "#464646") +
# theme_bw() +
# #theme(text = element_text(size = 12)) #+
# #xlab("log(growth rate) (mm/year)") + ylab("")
# xlab("") + ylab("")
#mean d18O SH
P4 <- ggplot(data = resid_dat_SH, aes(x = mean_d18O, y = mean_d18O_res)) + geom_point(size = 0.2, col = "#AAAAAA") +
geom_abline(slope = lm(resid_dat_SH$mean_d18O_res ~ resid_dat_SH$mean_d18O)$coefficients[2], intercept = lm(mean_d18O_res ~ mean_d18O, data = resid_dat_SH)$coefficients[1], col = "#464646") +
theme_bw() +
#theme(text = element_text(size = 12)) #+
#xlab(expression(paste("mean ", delta^{18}, "O (\u2030)"))) + ylab("")
xlab("") + ylab("")
pdf("MLR_fig2.pdf", width = 12/2.54, height = 8/2.54)
grid.arrange(P1,P2,P3,P4,
widths = c(1,1),
layout_matrix = rbind(c(1,2),
c(3,4)),
left = textGrob(expression(paste("f(", delta^{18}, "O s.d.)")), rot = 90))
dev.off()
#### apply correction
# predict stdev from model:
NH_dat2 <- NH_dat %>% mutate(predicted_stdev = exp(NH_mlr_summ$coefficients["(Intercept)","Estimate"] +
#(NH_mlr_summ$coefficients["log(nsamples)","Estimate"]*(log(nsamples))) +
(NH_mlr_summ$coefficients["log(gradient)","Estimate"]*(log(gradient))) +
(NH_mlr_summ$coefficients["mean_d18O","Estimate"]*mean_d18O)))
SH_dat2 <- SH_dat %>% mutate(predicted_stdev = exp(SH_mlr_summ$coefficients["(Intercept)","Estimate"] +
#(SH_mlr_summ$coefficients["log(nsamples)","Estimate"]*(log(nsamples))) +
(SH_mlr_summ$coefficients["log(gradient)","Estimate"]*(log(gradient))) +
(SH_mlr_summ$coefficients["mean_d18O","Estimate"]*mean_d18O)))
## combine NH and SH data
dat_all <- rbind(NH_dat2, SH_dat2)
## apply correction
dat_all <- dat_all %>% mutate(corrected_stdev = stdev - predicted_stdev)
# regional composite
comp <- dat_all %>% group_by(region, win_end) %>% summarise(med = median(corrected_stdev, na.rm = T), Q1 = quantile(corrected_stdev, na.rm = T)[2], Q3 = quantile(corrected_stdev, na.rm = T)[4])
comp$age <- comp$win_end - bin_hw
write.csv(comp, "spel_IADV/spel_sd.csv", row.names = F) #output
# plot
comp <- comp %>% filter(! region %in% c("NE-SAM","SAfM","CAM","IAM"));
comp$grp <- "corrected"
# combine corrected and uncorrected stdev
comp_raw <- df_sitedat %>%
filter(entity_id %in% unique(na.omit(dat_all)$entity_id)) %>%
group_by(region, win_end) %>%
summarise(med = median(stdev, na.rm = T), Q1 = quantile(stdev, na.rm = T)[2], Q3 = quantile(stdev, na.rm = T)[4]) %>%
filter(! region %in% c("NE-SAM","SAfM","CAM","IAM"))
comp_raw$grp = "uncorrected"
raw_corr_comp <- rbind(comp, comp_raw)
raw_corr_comp[which(raw_corr_comp$region == "SW-SAM"),"region"] <- "SAM"
#variable order
raw_corr_comp$grp <- factor(raw_corr_comp$grp, c("uncorrected","corrected"))
raw_corr_comp$win_end <- raw_corr_comp$win_end/1000
png("raw_v_corrected_fig3.png", width = 13, height = 8.5, units = "cm", res = 96)
ggplot() +
geom_ribbon(data = raw_corr_comp, aes(x = win_end, ymin = Q1, ymax = Q3),col = NA, fill = "#AAAAAA") +
geom_line(data = raw_corr_comp, aes(x = win_end, y = med), col = "#464646") +
scale_x_continuous(breaks = seq(0,12,3)) +
scale_y_continuous(breaks = seq(0,1.25,0.25)) +
ylab(expression(paste(delta^{18}, "O s.d. (\u2030)"))) + xlab("Kyrs BP") +
facet_grid(grp ~ region, scales = "free_y") +
theme_bw() +
theme(legend.position = "none",
strip.background = element_rect(fill = NA))
dev.off()
## linear regression through composites (table 2)
raw_corr_comp %>%
filter(win_end <= 6 & win_end >= 0) %>%
group_by(region) %>%
summarise(gradient = summary(lm(med ~ win_end))$coefficients[2,1],
st_err = summary(lm(med ~ win_end))$coefficients[2,2],
P_val = summary(lm(med ~ win_end))$coefficients[2,4])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Checkr.R
\name{evaluate}
\alias{evaluate}
\title{Evaluate a Checkr object}
\usage{
evaluate(chckr, x, level = -1)
}
\arguments{
\item{chckr}{Checkr object}
\item{x}{object to check}
\item{level}{deparse level to determine original name of x}
}
\value{
NA_character or character
}
\description{
Evaluate executes the provided isvalid() function of a given Checkr object.
If it returns TRUE, NA is returned, otherwiese a string generated from
message().
}
| /man/evaluate.Rd | permissive | kkmann/checkr | R | false | true | 534 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Checkr.R
\name{evaluate}
\alias{evaluate}
\title{Evaluate a Checkr object}
\usage{
evaluate(chckr, x, level = -1)
}
\arguments{
\item{chckr}{Checkr object}
\item{x}{object to check}
\item{level}{deparse level to determine original name of x}
}
\value{
NA_character or character
}
\description{
Evaluate executes the provided isvalid() function of a given Checkr object.
If it returns TRUE, NA is returned, otherwiese a string generated from
message().
}
|
# install_github("HARPGroup/hydro-tools", force=TRUE)
library("hydrotools")
library('zoo')
basepath='/var/www/R';
source("/var/www/R/config.R")
# Set up our data source
ds <- RomDataSource$new(site, rest_uname = rest_uname)
ds$get_token(rest_pw)
felid <- 347380
ielid <- 231301
wselid <- 231299
lmdatf4 <- om_get_rundata(felid, 401, site=omsite)
lmdati4 <- om_get_rundata(ielid, 401, site=omsite)
lmdat4 <- om_get_rundata(wselid, 401, site=omsite)
| /R/permitting/lake_manassas/lake_manassas.R | no_license | HARPgroup/vahydro | R | false | false | 456 | r | # install_github("HARPGroup/hydro-tools", force=TRUE)
library("hydrotools")
library('zoo')
basepath='/var/www/R';
source("/var/www/R/config.R")
# Set up our data source
ds <- RomDataSource$new(site, rest_uname = rest_uname)
ds$get_token(rest_pw)
felid <- 347380
ielid <- 231301
wselid <- 231299
lmdatf4 <- om_get_rundata(felid, 401, site=omsite)
lmdati4 <- om_get_rundata(ielid, 401, site=omsite)
lmdat4 <- om_get_rundata(wselid, 401, site=omsite)
|
#' @title Draw axis labels for 3d persp plots
#' @description This function draws axis labels for a 3d plot generated by graphics::persp
#' @param xlab String - X axis annotation. Defaults to "x".
#' @param ylab String - Y axis annotation. Defaults to "y".
#' @param zlab String - Z axis annotation. Defaults to "z".
#' @param cex double - character magnification. Defaults to 2.
#' @param pmat matrix of type double - A 4x4 matrix returned by calling graphics::persp
#' @param dist vector of type double - A three-dimensional vector containing the distances in (x,y,z)-direction between
#' axis and annotation. Defaults to c(1.4,1.3,1.4).
#' @param x vector of type double - The x range. It suffices, to supply c(min(x),max(x))
#' @param y vector of type double - The y range. It suffices, to supply c(min(y),max(y))
#' @param z vector of type double - The z range. It suffices, to supply c(min(z),max(z))
#' @details This function is especially designed for persp plots, since these lack manipulation options for
#' individual axis annotation texts. Please supply empty axis labels to persp, i.e. persp(xlab = "",ylab = "",zlab="",...).
#' @return No return value
#' @author Philipp van Wickevoort Crommelin
#' @examples
#' x = seq(from = -10,
#' to = 10,
#' length.out = 30)
#' y = x
#' f = function(x,y) {
#' r =sqrt(x^2+y^2)
#' 10 * sin(r)/r
#' }
#' z = outer(x, y, f)
#' z[is.na(z)] = 1
#'
#' par(mfrow=c(2, 1))
#'
#' p1 = persp(x = x,
#' y = y,
#' z = z,
#' theta = 30,
#' phi = 30,
#' expand = 0.5,
#' col = "lightblue",
#' ticktype="detailed",
#' xlab="",
#' ylab="",
#' zlab="",
#' r = 10)
#' myBayes::axLabels3d(x = x,
#' y = y,
#' z = c(min(z),max(z)),
#' pmat = p1,
#' xlab = "x label",
#' ylab = "y label",
#' zlab = "z label")
#'
#' p1 = persp(x = x,
#' y = y,
#' z = z,
#' theta = 30,
#' phi = 30,
#' expand = 0.5,
#' col = "lightblue",
#' ticktype="detailed",
#' xlab="",
#' ylab="",
#' zlab="",
#' r = 10)
#' myBayes::axLabels3d(x = x,
#' y = y,
#' z = c(min(z),max(z)),
#' pmat = p1,
#' dist = c(1.6,1.5,1.6),
#' xlab = "x label",
#' zlab = "z label")
#'
#' @export
axLabels3d = function(xlab = "x", ylab = "y", zlab = "z", cex = 2, pmat,
dist = c(1.4,1.3,1.4),
x,y,z){
gpar = par()
project2d = function(v) trans3d(x = v[1],
y = v[2],
z = v[3],
pmat = pmat)
xmin=min(x)
xmax=max(x)
ymin=min(y)
ymax=max(y)
zmin=min(z)
zmax=max(z)
xrng = xmax-xmin
yrng = ymax-ymin
zrng = zmax-zmin
#x axis
if(xlab!=""){
bot3d = c(xmin,ymax-yrng*dist[1],zmin)
top3d = c(xmax,ymax-yrng*dist[1],zmin)
mid3d = c(mean(range(x)),ymax-yrng*dist[1],zmin)
bot2d = project2d(bot3d)
mid2d = project2d(mid3d)
top2d = project2d(top3d)
angle = 180 * atan((top2d$y-bot2d$y)/(top2d$x-bot2d$x))/pi
par(xpd = NA, #enable drawing out of plotting area
srt = angle) #rotate text by angle
text(x = mid2d$x,
y = mid2d$y,
labels = xlab,
cex = cex)
}
#y axis
if(ylab!=""){
bot3d = c(xmin+xrng*dist[2],ymin,zmin)
mid3d = c(xmin+xrng*dist[2],mean(range(y)),zmin)
top3d = c(xmin+xrng*dist[2],ymax,zmin)
bot2d = project2d(bot3d)
mid2d = project2d(mid3d)
top2d = project2d(top3d)
angle = 180 * atan((top2d$y-bot2d$y)/(top2d$x-bot2d$x))/pi
par(srt = angle) #rotate text by angle
text(x = mid2d$x,
y = mid2d$y,
labels = ylab,
cex = cex)
}
#z axis
if(zlab!=""){
bot3d = c((xmax-xrng*dist[3])/sqrt(2),(ymax-yrng*dist[3])/sqrt(2),zmin)
mid3d = c((xmax-xrng*dist[3])/sqrt(2),(ymax-yrng*dist[3])/sqrt(2),mean(range(z)))
top3d = c((xmax-xrng*dist[3])/sqrt(2),(ymax-yrng*dist[3])/sqrt(2),zmax)
bot2d = project2d(bot3d)
mid2d = project2d(mid3d)
top2d = project2d(top3d)
angle = 180 * atan((top2d$y-bot2d$y)/(top2d$x-bot2d$x))/pi + 180
par(srt = angle) #rotate text by angle
text(x = mid2d$x,
y = mid2d$y,
labels = zlab,
cex = cex)
}
#reset par
par(srt = gpar$srt,
xpd = gpar$xpd)
}
| /R/labels3d.R | no_license | PhilippVWC/myBayes | R | false | false | 4,546 | r | #' @title Draw axis labels for 3d persp plots
#' @description This function draws axis labels for a 3d plot generated by graphics::persp
#' @param xlab String - X axis annotation. Defaults to "x".
#' @param ylab String - Y axis annotation. Defaults to "y".
#' @param zlab String - Z axis annotation. Defaults to "z".
#' @param cex double - character magnification. Defaults to 2.
#' @param pmat matrix of type double - A 4x4 matrix returned by calling graphics::persp
#' @param dist vector of type double - A three-dimensional vector containing the distances in (x,y,z)-direction between
#' axis and annotation. Defaults to c(1.4,1.3,1.4).
#' @param x vector of type double - The x range. It suffices, to supply c(min(x),max(x))
#' @param y vector of type double - The y range. It suffices, to supply c(min(y),max(y))
#' @param z vector of type double - The z range. It suffices, to supply c(min(z),max(z))
#' @details This function is especially designed for persp plots, since these lack manipulation options for
#' individual axis annotation texts. Please supply empty axis labels to persp, i.e. persp(xlab = "",ylab = "",zlab="",...).
#' @return No return value
#' @author Philipp van Wickevoort Crommelin
#' @examples
#' x = seq(from = -10,
#' to = 10,
#' length.out = 30)
#' y = x
#' f = function(x,y) {
#' r =sqrt(x^2+y^2)
#' 10 * sin(r)/r
#' }
#' z = outer(x, y, f)
#' z[is.na(z)] = 1
#'
#' par(mfrow=c(2, 1))
#'
#' p1 = persp(x = x,
#' y = y,
#' z = z,
#' theta = 30,
#' phi = 30,
#' expand = 0.5,
#' col = "lightblue",
#' ticktype="detailed",
#' xlab="",
#' ylab="",
#' zlab="",
#' r = 10)
#' myBayes::axLabels3d(x = x,
#' y = y,
#' z = c(min(z),max(z)),
#' pmat = p1,
#' xlab = "x label",
#' ylab = "y label",
#' zlab = "z label")
#'
#' p1 = persp(x = x,
#' y = y,
#' z = z,
#' theta = 30,
#' phi = 30,
#' expand = 0.5,
#' col = "lightblue",
#' ticktype="detailed",
#' xlab="",
#' ylab="",
#' zlab="",
#' r = 10)
#' myBayes::axLabels3d(x = x,
#' y = y,
#' z = c(min(z),max(z)),
#' pmat = p1,
#' dist = c(1.6,1.5,1.6),
#' xlab = "x label",
#' zlab = "z label")
#'
#' @export
axLabels3d = function(xlab = "x", ylab = "y", zlab = "z", cex = 2, pmat,
dist = c(1.4,1.3,1.4),
x,y,z){
gpar = par()
project2d = function(v) trans3d(x = v[1],
y = v[2],
z = v[3],
pmat = pmat)
xmin=min(x)
xmax=max(x)
ymin=min(y)
ymax=max(y)
zmin=min(z)
zmax=max(z)
xrng = xmax-xmin
yrng = ymax-ymin
zrng = zmax-zmin
#x axis
if(xlab!=""){
bot3d = c(xmin,ymax-yrng*dist[1],zmin)
top3d = c(xmax,ymax-yrng*dist[1],zmin)
mid3d = c(mean(range(x)),ymax-yrng*dist[1],zmin)
bot2d = project2d(bot3d)
mid2d = project2d(mid3d)
top2d = project2d(top3d)
angle = 180 * atan((top2d$y-bot2d$y)/(top2d$x-bot2d$x))/pi
par(xpd = NA, #enable drawing out of plotting area
srt = angle) #rotate text by angle
text(x = mid2d$x,
y = mid2d$y,
labels = xlab,
cex = cex)
}
#y axis
if(ylab!=""){
bot3d = c(xmin+xrng*dist[2],ymin,zmin)
mid3d = c(xmin+xrng*dist[2],mean(range(y)),zmin)
top3d = c(xmin+xrng*dist[2],ymax,zmin)
bot2d = project2d(bot3d)
mid2d = project2d(mid3d)
top2d = project2d(top3d)
angle = 180 * atan((top2d$y-bot2d$y)/(top2d$x-bot2d$x))/pi
par(srt = angle) #rotate text by angle
text(x = mid2d$x,
y = mid2d$y,
labels = ylab,
cex = cex)
}
#z axis
if(zlab!=""){
bot3d = c((xmax-xrng*dist[3])/sqrt(2),(ymax-yrng*dist[3])/sqrt(2),zmin)
mid3d = c((xmax-xrng*dist[3])/sqrt(2),(ymax-yrng*dist[3])/sqrt(2),mean(range(z)))
top3d = c((xmax-xrng*dist[3])/sqrt(2),(ymax-yrng*dist[3])/sqrt(2),zmax)
bot2d = project2d(bot3d)
mid2d = project2d(mid3d)
top2d = project2d(top3d)
angle = 180 * atan((top2d$y-bot2d$y)/(top2d$x-bot2d$x))/pi + 180
par(srt = angle) #rotate text by angle
text(x = mid2d$x,
y = mid2d$y,
labels = zlab,
cex = cex)
}
#reset par
par(srt = gpar$srt,
xpd = gpar$xpd)
}
|
library(shiny)
library(DT)
library(leaflet)
ui <- pageWithSidebar(
headerPanel("Welsh Rail Dataset"),
sidebarPanel( width=4, selectInput("Year", "Please Select Year:", choices=c("2017-2018", "2016-2017")), div(DT::dataTableOutput("mytable"), style = "font-size:60%")),
mainPanel(leafletOutput("myleaflet",width="90%",height="700px")
)
)
| /AppPrototype/ui.R | no_license | RobPascoe/myappsample | R | false | false | 359 | r | library(shiny)
library(DT)
library(leaflet)
ui <- pageWithSidebar(
headerPanel("Welsh Rail Dataset"),
sidebarPanel( width=4, selectInput("Year", "Please Select Year:", choices=c("2017-2018", "2016-2017")), div(DT::dataTableOutput("mytable"), style = "font-size:60%")),
mainPanel(leafletOutput("myleaflet",width="90%",height="700px")
)
)
|
install.packages("AppliedPredictiveModeling")
library(AppliedPredictiveModeling)
library(caret)
data(AlzheimerDisease)
adData <- data.frame(diagnosis,predictors)
train <- createDataPartition(diagnosis, p = .5, list = F)
training <- adData[train,]
testing <- adData[-train,]
#answer is list = F, and both dfs in adData assignment
# Make a plot of the outcome (CompressiveStrength) versus the index of the samples.
# Color by each of the variables in the data set (you may find the cut2() function in
# the Hmisc package useful for turning continuous covariates into factors).
# What do you notice in these plots?
data(concrete)
set.seed(1000)
inTrain = createDataPartition(mixtures$CompressiveStrength, p = 3/4)[[1]]
training = mixtures[ inTrain,]
testing = mixtures[-inTrain,]
training$index <- 1:nrows(training)
qplot(index, CompressiveStrength, data = training)
qplot(index, CompressiveStrength, color = Age,data = training)
qplot(index, CompressiveStrength, color = FlyAsh,data = training)
qplot(index, CompressiveStrength, color = Age,data = training)
qplot(index, CompressiveStrength, color = FlyAsh,data = training)
qplot(index, CompressiveStrength, color = Water,data = training)
qplot(index, CompressiveStrength, color = Superplasticizier,data = training)
#a slight pattern emerges, CS goes down w/ index, no var explains it
#why log transform won't work for superplaticizier
qplot(Superplasticizer, data = training)
qplot(log(Superplasticizer), data = training)
qplot(log(Superplasticizer+1), data = training) #same skeness
# Find all the predictor variables in the training set that begin with IL.
# Perform principal components on these variables with the preProcess() function
# from the caret package. Calculate the number of principal components needed
# to capture 80% of the variance. How many are there?
library(caret)
library(AppliedPredictiveModeling)
set.seed(3433)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]]
training = adData[ inTrain,]
testing = adData[-inTrain,]
#7 pc account for 80% of the variance
library(dplyr)
il <- select(training,diagnosis,starts_with("IL"))
preProcess(il, method = "pca", thresh = 0.8)
#compare fit with pca and without pca
library(caret)
library(AppliedPredictiveModeling)
library(dplyr)
set.seed(3433)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]]
training = adData[ inTrain,]
testing = adData[-inTrain,]
il <- select(training,diagnosis,starts_with("IL"))
iltest <- select(testing, diagnosis, starts_with("IL"))
#build and test model using PCS
ilPCA <- preProcess(il, method ="pca", thresh = .8)
trainPC <- predict(ilPCA, il) #update test dataset using PCA
modelFit <- train(diagnosis ~ ., method = "glm", data = trainPC)
testPC <- predict(ilPCA,iltest) #update training set using parameters of PCA from traing
confusionMatrix(testing$diagnosis,predict(modelFit,testPC))
modelFit <- train(diagnosis ~ ., method = "glm", data = il)
confusionMatrix(testing$diagnosis, predict(modelFit,testing))
| /quizzesAndOther/quiz1.R | no_license | michaelrahija/PracticalMachineLearning | R | false | false | 3,133 | r | install.packages("AppliedPredictiveModeling")
library(AppliedPredictiveModeling)
library(caret)
data(AlzheimerDisease)
adData <- data.frame(diagnosis,predictors)
train <- createDataPartition(diagnosis, p = .5, list = F)
training <- adData[train,]
testing <- adData[-train,]
#answer is list = F, and both dfs in adData assignment
# Make a plot of the outcome (CompressiveStrength) versus the index of the samples.
# Color by each of the variables in the data set (you may find the cut2() function in
# the Hmisc package useful for turning continuous covariates into factors).
# What do you notice in these plots?
data(concrete)
set.seed(1000)
inTrain = createDataPartition(mixtures$CompressiveStrength, p = 3/4)[[1]]
training = mixtures[ inTrain,]
testing = mixtures[-inTrain,]
training$index <- 1:nrows(training)
qplot(index, CompressiveStrength, data = training)
qplot(index, CompressiveStrength, color = Age,data = training)
qplot(index, CompressiveStrength, color = FlyAsh,data = training)
qplot(index, CompressiveStrength, color = Age,data = training)
qplot(index, CompressiveStrength, color = FlyAsh,data = training)
qplot(index, CompressiveStrength, color = Water,data = training)
qplot(index, CompressiveStrength, color = Superplasticizier,data = training)
#a slight pattern emerges, CS goes down w/ index, no var explains it
#why log transform won't work for superplaticizier
qplot(Superplasticizer, data = training)
qplot(log(Superplasticizer), data = training)
qplot(log(Superplasticizer+1), data = training) #same skeness
# Find all the predictor variables in the training set that begin with IL.
# Perform principal components on these variables with the preProcess() function
# from the caret package. Calculate the number of principal components needed
# to capture 80% of the variance. How many are there?
library(caret)
library(AppliedPredictiveModeling)
set.seed(3433)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]]
training = adData[ inTrain,]
testing = adData[-inTrain,]
#7 pc account for 80% of the variance
library(dplyr)
il <- select(training,diagnosis,starts_with("IL"))
preProcess(il, method = "pca", thresh = 0.8)
#compare fit with pca and without pca
library(caret)
library(AppliedPredictiveModeling)
library(dplyr)
set.seed(3433)
data(AlzheimerDisease)
adData = data.frame(diagnosis,predictors)
inTrain = createDataPartition(adData$diagnosis, p = 3/4)[[1]]
training = adData[ inTrain,]
testing = adData[-inTrain,]
il <- select(training,diagnosis,starts_with("IL"))
iltest <- select(testing, diagnosis, starts_with("IL"))
#build and test model using PCS
ilPCA <- preProcess(il, method ="pca", thresh = .8)
trainPC <- predict(ilPCA, il) #update test dataset using PCA
modelFit <- train(diagnosis ~ ., method = "glm", data = trainPC)
testPC <- predict(ilPCA,iltest) #update training set using parameters of PCA from traing
confusionMatrix(testing$diagnosis,predict(modelFit,testPC))
modelFit <- train(diagnosis ~ ., method = "glm", data = il)
confusionMatrix(testing$diagnosis, predict(modelFit,testing))
|
require(xgboost)
require(jsonlite)
source('../generate_models_params.R')
context("Models from previous versions of XGBoost can be loaded")
metadata <- model_generator_metadata()
run_model_param_check <- function (config) {
expect_equal(config$learner$learner_model_param$num_feature, '4')
expect_equal(config$learner$learner_train_param$booster, 'gbtree')
}
get_num_tree <- function (booster) {
dump <- xgb.dump(booster)
m <- regexec('booster\\[[0-9]+\\]', dump, perl = TRUE)
m <- regmatches(dump, m)
num_tree <- Reduce('+', lapply(m, length))
return (num_tree)
}
run_booster_check <- function (booster, name) {
# If given a handle, we need to call xgb.Booster.complete() prior to using xgb.config().
if (inherits(booster, "xgb.Booster") && xgboost:::is.null.handle(booster$handle)) {
booster <- xgb.Booster.complete(booster)
}
config <- jsonlite::fromJSON(xgb.config(booster))
run_model_param_check(config)
if (name == 'cls') {
expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds * metadata$kClasses)
expect_equal(as.numeric(config$learner$learner_model_param$base_score), 0.5)
expect_equal(config$learner$learner_train_param$objective, 'multi:softmax')
expect_equal(as.numeric(config$learner$learner_model_param$num_class), metadata$kClasses)
} else if (name == 'logit') {
expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
expect_equal(as.numeric(config$learner$learner_model_param$num_class), 0)
expect_equal(config$learner$learner_train_param$objective, 'binary:logistic')
} else if (name == 'ltr') {
expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
expect_equal(config$learner$learner_train_param$objective, 'rank:ndcg')
} else {
expect_equal(name, 'reg')
expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
expect_equal(as.numeric(config$learner$learner_model_param$base_score), 0.5)
expect_equal(config$learner$learner_train_param$objective, 'reg:squarederror')
}
}
test_that("Models from previous versions of XGBoost can be loaded", {
bucket <- 'xgboost-ci-jenkins-artifacts'
region <- 'us-west-2'
file_name <- 'xgboost_r_model_compatibility_test.zip'
zipfile <- file.path(getwd(), file_name)
model_dir <- file.path(getwd(), 'models')
download.file(paste('https://', bucket, '.s3-', region, '.amazonaws.com/', file_name, sep = ''),
destfile = zipfile, mode = 'wb')
unzip(zipfile, overwrite = TRUE)
pred_data <- xgb.DMatrix(matrix(c(0, 0, 0, 0), nrow = 1, ncol = 4))
lapply(list.files(model_dir), function (x) {
model_file <- file.path(model_dir, x)
m <- regexec("xgboost-([0-9\\.]+)\\.([a-z]+)\\.[a-z]+", model_file, perl = TRUE)
m <- regmatches(model_file, m)[[1]]
model_xgb_ver <- m[2]
name <- m[3]
if (endsWith(model_file, '.rds')) {
booster <- readRDS(model_file)
} else {
booster <- xgb.load(model_file)
}
predict(booster, newdata = pred_data)
run_booster_check(booster, name)
})
expect_true(TRUE)
})
| /R-package/tests/testthat/test_model_compatibility.R | permissive | foocares/xgboost | R | false | false | 3,095 | r | require(xgboost)
require(jsonlite)
source('../generate_models_params.R')
context("Models from previous versions of XGBoost can be loaded")
metadata <- model_generator_metadata()
run_model_param_check <- function (config) {
expect_equal(config$learner$learner_model_param$num_feature, '4')
expect_equal(config$learner$learner_train_param$booster, 'gbtree')
}
get_num_tree <- function (booster) {
dump <- xgb.dump(booster)
m <- regexec('booster\\[[0-9]+\\]', dump, perl = TRUE)
m <- regmatches(dump, m)
num_tree <- Reduce('+', lapply(m, length))
return (num_tree)
}
run_booster_check <- function (booster, name) {
# If given a handle, we need to call xgb.Booster.complete() prior to using xgb.config().
if (inherits(booster, "xgb.Booster") && xgboost:::is.null.handle(booster$handle)) {
booster <- xgb.Booster.complete(booster)
}
config <- jsonlite::fromJSON(xgb.config(booster))
run_model_param_check(config)
if (name == 'cls') {
expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds * metadata$kClasses)
expect_equal(as.numeric(config$learner$learner_model_param$base_score), 0.5)
expect_equal(config$learner$learner_train_param$objective, 'multi:softmax')
expect_equal(as.numeric(config$learner$learner_model_param$num_class), metadata$kClasses)
} else if (name == 'logit') {
expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
expect_equal(as.numeric(config$learner$learner_model_param$num_class), 0)
expect_equal(config$learner$learner_train_param$objective, 'binary:logistic')
} else if (name == 'ltr') {
expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
expect_equal(config$learner$learner_train_param$objective, 'rank:ndcg')
} else {
expect_equal(name, 'reg')
expect_equal(get_num_tree(booster), metadata$kForests * metadata$kRounds)
expect_equal(as.numeric(config$learner$learner_model_param$base_score), 0.5)
expect_equal(config$learner$learner_train_param$objective, 'reg:squarederror')
}
}
test_that("Models from previous versions of XGBoost can be loaded", {
bucket <- 'xgboost-ci-jenkins-artifacts'
region <- 'us-west-2'
file_name <- 'xgboost_r_model_compatibility_test.zip'
zipfile <- file.path(getwd(), file_name)
model_dir <- file.path(getwd(), 'models')
download.file(paste('https://', bucket, '.s3-', region, '.amazonaws.com/', file_name, sep = ''),
destfile = zipfile, mode = 'wb')
unzip(zipfile, overwrite = TRUE)
pred_data <- xgb.DMatrix(matrix(c(0, 0, 0, 0), nrow = 1, ncol = 4))
lapply(list.files(model_dir), function (x) {
model_file <- file.path(model_dir, x)
m <- regexec("xgboost-([0-9\\.]+)\\.([a-z]+)\\.[a-z]+", model_file, perl = TRUE)
m <- regmatches(model_file, m)[[1]]
model_xgb_ver <- m[2]
name <- m[3]
if (endsWith(model_file, '.rds')) {
booster <- readRDS(model_file)
} else {
booster <- xgb.load(model_file)
}
predict(booster, newdata = pred_data)
run_booster_check(booster, name)
})
expect_true(TRUE)
})
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://www.rstudio.com/shiny/
#
library(shiny)
library(GARPFRM)
shinyServer(function(input, output) {
spec <- reactive({
input$goButton
isolate({
optionSpec(style=input$style,
type=input$type,
S0=input$S0,
K=input$K,
maturity=input$maturity,
r=input$rfr,
volatility=input$vol,
q=input$q)
})
})
sol <- reactive({
input$goButton
isolate({
#optimize.portfolio(R=data(), portfolio=portf(),
# optimize_method=input$optimize_method,
# search_size=input$search_size, trace=TRUE)
optionValue(option=spec(), method=input$method, N=input$N)
})
})
delta <- reactive({
input$goButton
isolate({
computeGreeks(spec(), greek="delta")
})
})
theta <- reactive({
input$goButton
isolate({
computeGreeks(spec(), greek="theta")
})
})
gamma <- reactive({
input$goButton
isolate({
computeGreeks(spec(), greek="gamma")
})
})
rho <- reactive({
input$goButton
isolate({
computeGreeks(spec(), greek="rho")
})
})
vega <- reactive({
input$goButton
isolate({
computeGreeks(spec(), greek="vega")
})
})
output$value <- renderPrint({
sol()
})
output$delta <- renderPrint({
delta()
})
output$theta <- renderPrint({
theta()
})
output$gamma <- renderPrint({
gamma()
})
output$rho <- renderPrint({
rho()
})
output$vega <- renderPrint({
vega()
})
output$plotDelta <- renderPlot({
computeGreeks(option = spec(),
greek = "delta",
prices = seq(input$price_range[1],input$price_range[2], 0.5),
plot = TRUE)
})
output$plotTheta <- renderPlot({
computeGreeks(option = spec(),
greek = "theta",
prices = seq(input$price_range[1],input$price_range[2], 0.5),
plot = TRUE)
})
output$plotGamma <- renderPlot({
computeGreeks(option = spec(),
greek = "gamma",
prices = seq(input$price_range[1],input$price_range[2], 0.5),
plot = TRUE)
})
output$plotRho <- renderPlot({
computeGreeks(option = spec(),
greek = "rho",
prices = seq(input$price_range[1],input$price_range[2], 0.5),
plot = TRUE)
})
output$plotVega <- renderPlot({
computeGreeks(option = spec(),
greek = "vega",
prices = seq(input$price_range[1],input$price_range[2], 0.5),
plot = TRUE)
})
})
| /ShinyApps/OptionAnalysis/server.R | no_license | agamat/GARPFRM_shiny | R | false | false | 2,901 | r |
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://www.rstudio.com/shiny/
#
library(shiny)
library(GARPFRM)
shinyServer(function(input, output) {
spec <- reactive({
input$goButton
isolate({
optionSpec(style=input$style,
type=input$type,
S0=input$S0,
K=input$K,
maturity=input$maturity,
r=input$rfr,
volatility=input$vol,
q=input$q)
})
})
sol <- reactive({
input$goButton
isolate({
#optimize.portfolio(R=data(), portfolio=portf(),
# optimize_method=input$optimize_method,
# search_size=input$search_size, trace=TRUE)
optionValue(option=spec(), method=input$method, N=input$N)
})
})
delta <- reactive({
input$goButton
isolate({
computeGreeks(spec(), greek="delta")
})
})
theta <- reactive({
input$goButton
isolate({
computeGreeks(spec(), greek="theta")
})
})
gamma <- reactive({
input$goButton
isolate({
computeGreeks(spec(), greek="gamma")
})
})
rho <- reactive({
input$goButton
isolate({
computeGreeks(spec(), greek="rho")
})
})
vega <- reactive({
input$goButton
isolate({
computeGreeks(spec(), greek="vega")
})
})
output$value <- renderPrint({
sol()
})
output$delta <- renderPrint({
delta()
})
output$theta <- renderPrint({
theta()
})
output$gamma <- renderPrint({
gamma()
})
output$rho <- renderPrint({
rho()
})
output$vega <- renderPrint({
vega()
})
output$plotDelta <- renderPlot({
computeGreeks(option = spec(),
greek = "delta",
prices = seq(input$price_range[1],input$price_range[2], 0.5),
plot = TRUE)
})
output$plotTheta <- renderPlot({
computeGreeks(option = spec(),
greek = "theta",
prices = seq(input$price_range[1],input$price_range[2], 0.5),
plot = TRUE)
})
output$plotGamma <- renderPlot({
computeGreeks(option = spec(),
greek = "gamma",
prices = seq(input$price_range[1],input$price_range[2], 0.5),
plot = TRUE)
})
output$plotRho <- renderPlot({
computeGreeks(option = spec(),
greek = "rho",
prices = seq(input$price_range[1],input$price_range[2], 0.5),
plot = TRUE)
})
output$plotVega <- renderPlot({
computeGreeks(option = spec(),
greek = "vega",
prices = seq(input$price_range[1],input$price_range[2], 0.5),
plot = TRUE)
})
})
|
dat <- read.csv("C:/Users/viviani/Desktop/test.csv")
model_peritrial <- lm(pupil_diameter ~ as.factor(peritrial_factor) + number_of_trials_seen:as.factor(peritrial_factor),
data = dat)
model_intertrial <-lm(pupil_diameter ~ as.factor(trial_factor),
data = dat)
dat$trial_is_happening <- dat$trial_factor!=(-999)
rois <- unique(dat$ROI_ID)
n <- length(rois)
pvals = numeric(n)
coeffs = numeric(n)
for (i in 1:n){
roi = rois[[i]]
cat(sprintf("%d of %d \r",i,n))
subset = dat[dat$ROI_ID == roi,]
model_trial <- lm(dF_on_F~trial_is_happening,
data= subset)
pvals[i] <- anova(model_trial)$`Pr(>F)`[1] #The pvalue
coeffs[i]<- model_trial$coefficients[2]
}
pvals <- p.adjust(pvals, method = 'fdr')
| /RScripts/pupils_vs_peritrial.R | no_license | vivian-imbriotis/AccDataTools | R | false | false | 789 | r | dat <- read.csv("C:/Users/viviani/Desktop/test.csv")
model_peritrial <- lm(pupil_diameter ~ as.factor(peritrial_factor) + number_of_trials_seen:as.factor(peritrial_factor),
data = dat)
model_intertrial <-lm(pupil_diameter ~ as.factor(trial_factor),
data = dat)
dat$trial_is_happening <- dat$trial_factor!=(-999)
rois <- unique(dat$ROI_ID)
n <- length(rois)
pvals = numeric(n)
coeffs = numeric(n)
for (i in 1:n){
roi = rois[[i]]
cat(sprintf("%d of %d \r",i,n))
subset = dat[dat$ROI_ID == roi,]
model_trial <- lm(dF_on_F~trial_is_happening,
data= subset)
pvals[i] <- anova(model_trial)$`Pr(>F)`[1] #The pvalue
coeffs[i]<- model_trial$coefficients[2]
}
pvals <- p.adjust(pvals, method = 'fdr')
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
#set the environment variables to cache the inverse
#get will return the matrix
#setinv is the function that calculate the inverse
#getinv return the inverse
#the list of function will be used in the calling function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y){
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(solve) m <<- solve
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Write a short comment describing this function
#First look in parent environment for the inverse
#Return it if it is available
#Calculate the it if not available and set the value in cache
cacheSolve <- function(x,...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m))
{
message("getting cached data")
return(m)
}
message("No cache data, will calculate")
data <- x$get()
m <- solve(data)
x$setinv(m)
m
}
#example of how to run this
#step1: create the matrix m
#Step2:run the makeCacheMatrix and save the result in a variable
#Step3: call the cacheSolve with the named variable as argument
c=rbind(c(1, -1/4), c(-1/4, 1))
mkmatrix <-makeCacheMatrix(c)
solve <-cacheSolve(mkmatrix)
solve
| /cachematrix.R | no_license | Adjeiinfo/ProgrammingAssignment2 | R | false | false | 1,470 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
#set the environment variables to cache the inverse
#get will return the matrix
#setinv is the function that calculate the inverse
#getinv return the inverse
#the list of function will be used in the calling function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y){
x <<- y
m <<- NULL
}
get <- function() x
setinv <- function(solve) m <<- solve
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Write a short comment describing this function
#First look in parent environment for the inverse
#Return it if it is available
#Calculate the it if not available and set the value in cache
cacheSolve <- function(x,...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinv()
if(!is.null(m))
{
message("getting cached data")
return(m)
}
message("No cache data, will calculate")
data <- x$get()
m <- solve(data)
x$setinv(m)
m
}
#example of how to run this
#step1: create the matrix m
#Step2:run the makeCacheMatrix and save the result in a variable
#Step3: call the cacheSolve with the named variable as argument
c=rbind(c(1, -1/4), c(-1/4, 1))
mkmatrix <-makeCacheMatrix(c)
solve <-cacheSolve(mkmatrix)
solve
|
test_that("checkUserConfirmation", {
with_reset_config({
setOMLConfig(confirm.upload = FALSE)
expect_true(checkUserConfirmation("flow"))
})
})
| /tests/testthat/test_local_checkUserConfirmation.R | permissive | openml/openml-r | R | false | false | 155 | r | test_that("checkUserConfirmation", {
with_reset_config({
setOMLConfig(confirm.upload = FALSE)
expect_true(checkUserConfirmation("flow"))
})
})
|
#!/usr/bin/Rscript
map2color<-function(x,pal,limits=NULL){
if(is.null(limits)) limits=range(x)
pal[findInterval(x,seq(limits[1],limits[2],length.out=length(pal)+1), all.inside=TRUE)]
}
save_splat_parameters <- function(output, genes, pseudotime, topology, random_seed, modules = 0) {
destination <- file(output)
gene_string <- paste("Genes:", genes, sep = " ")
top <- "["
for (i in 1:dim(topology)[1]) {
branch <- topology[i, ]
top <- paste(top, "[", branch[1], ", ", branch[2], "]", sep = "")
if (i < dim(topology)[1]) {
top <- paste(top, ", ", sep = "")
}
}
top <- paste(top, "]", sep = "")
top_string <- paste("topology:", top, sep = " ")
pt_string <- "pseudotimes: ["
for (i in 1:(dim(topology)[1] + 1)) {
pt_string <- paste(pt_string, pseudotime, sep = "")
if (i < (dim(topology)[1] + 1)) {
pt_string <- paste(pt_string, ", ", sep = "")
}
}
pt_string <- paste(pt_string, "]", sep = "")
mod_string <- paste("#modules:", modules, sep = " ")
seed_string <- paste("random seed:", random_seed, sep = " ")
param_text <- c(gene_string, pt_string, top_string, mod_string, seed_string)
writeLines(param_text, destination)
close(destination)
}
gen_random_topology <- function(branch_points) {
total_branches <- 2 * branch_points + 1
res <- rep(-1, total_branches)
res[1] <- 0
available_branchpoints <- c(1)
available_destinations <- 2:total_branches
for (i in 1:branch_points) {
# select new branch point
new_branch_point <- sample(available_branchpoints, 1)
# select destinations
goes_to <- sample(available_destinations, 2, replace = FALSE)
# map from-to
res[goes_to] <- new_branch_point
# update available branchpoints
available_branchpoints <- append(available_branchpoints, goes_to)
without_new_branchpoint <- (available_branchpoints != new_branch_point)
available_branchpoints <- available_branchpoints[without_new_branchpoint]
# update available destinations
without_new_destinations <- (!(available_destinations %in% goes_to))
available_destinations <- available_destinations[without_new_destinations]
}
return(res)
}
get_path_order <- function(path.from) {
# Transform the vector into a list of (from, to) pairs
path.pairs <- list()
for (idx in seq_along(path.from)) {
path.pairs[[idx]] <- c(path.from[idx], idx)
}
# Determine the processing order
# If a path is in the "done" vector any path originating here can be
# completed
done <- 0
while (length(path.pairs) > 0) {
path.pair <- path.pairs[[1]]
path.pairs <- path.pairs[-1]
from <- path.pair[1]
to <- path.pair[2]
if (from %in% done) {
done <- c(done, to)
} else {
path.pairs <- c(path.pairs, list(path.pair))
}
}
# Remove the origin from the vector
done <- done[-1]
return(done)
}
trace_back <- function(path.from, n) {
if (n == 0) {
return(0)
} else {
return(trace_back(path.from, path.from[n]) + 1)
}
}
branch_connectivity <- function(lineage_tree) {
total_branches <- length(lineage_tree)
pairings <- matrix(-1, ncol=2, nrow=total_branches-1)
branches <- data.frame(from=lineage_tree, to=1:total_branches)
counter <- 0
for (b in 1:total_branches) {
connections <- which(branches$from == b)
if (length(connections) > 0) {
for (i in seq_along(connections)) {
pairings[counter + i, ] <- c(b, connections[i])
}
counter <- counter + length(connections)
}
}
pairings
}
| /scripts/splat_funcs.R | no_license | soedinglab/merlot-scripts | R | false | false | 3,559 | r | #!/usr/bin/Rscript
map2color<-function(x,pal,limits=NULL){
if(is.null(limits)) limits=range(x)
pal[findInterval(x,seq(limits[1],limits[2],length.out=length(pal)+1), all.inside=TRUE)]
}
save_splat_parameters <- function(output, genes, pseudotime, topology, random_seed, modules = 0) {
destination <- file(output)
gene_string <- paste("Genes:", genes, sep = " ")
top <- "["
for (i in 1:dim(topology)[1]) {
branch <- topology[i, ]
top <- paste(top, "[", branch[1], ", ", branch[2], "]", sep = "")
if (i < dim(topology)[1]) {
top <- paste(top, ", ", sep = "")
}
}
top <- paste(top, "]", sep = "")
top_string <- paste("topology:", top, sep = " ")
pt_string <- "pseudotimes: ["
for (i in 1:(dim(topology)[1] + 1)) {
pt_string <- paste(pt_string, pseudotime, sep = "")
if (i < (dim(topology)[1] + 1)) {
pt_string <- paste(pt_string, ", ", sep = "")
}
}
pt_string <- paste(pt_string, "]", sep = "")
mod_string <- paste("#modules:", modules, sep = " ")
seed_string <- paste("random seed:", random_seed, sep = " ")
param_text <- c(gene_string, pt_string, top_string, mod_string, seed_string)
writeLines(param_text, destination)
close(destination)
}
gen_random_topology <- function(branch_points) {
total_branches <- 2 * branch_points + 1
res <- rep(-1, total_branches)
res[1] <- 0
available_branchpoints <- c(1)
available_destinations <- 2:total_branches
for (i in 1:branch_points) {
# select new branch point
new_branch_point <- sample(available_branchpoints, 1)
# select destinations
goes_to <- sample(available_destinations, 2, replace = FALSE)
# map from-to
res[goes_to] <- new_branch_point
# update available branchpoints
available_branchpoints <- append(available_branchpoints, goes_to)
without_new_branchpoint <- (available_branchpoints != new_branch_point)
available_branchpoints <- available_branchpoints[without_new_branchpoint]
# update available destinations
without_new_destinations <- (!(available_destinations %in% goes_to))
available_destinations <- available_destinations[without_new_destinations]
}
return(res)
}
get_path_order <- function(path.from) {
# Transform the vector into a list of (from, to) pairs
path.pairs <- list()
for (idx in seq_along(path.from)) {
path.pairs[[idx]] <- c(path.from[idx], idx)
}
# Determine the processing order
# If a path is in the "done" vector any path originating here can be
# completed
done <- 0
while (length(path.pairs) > 0) {
path.pair <- path.pairs[[1]]
path.pairs <- path.pairs[-1]
from <- path.pair[1]
to <- path.pair[2]
if (from %in% done) {
done <- c(done, to)
} else {
path.pairs <- c(path.pairs, list(path.pair))
}
}
# Remove the origin from the vector
done <- done[-1]
return(done)
}
trace_back <- function(path.from, n) {
if (n == 0) {
return(0)
} else {
return(trace_back(path.from, path.from[n]) + 1)
}
}
branch_connectivity <- function(lineage_tree) {
total_branches <- length(lineage_tree)
pairings <- matrix(-1, ncol=2, nrow=total_branches-1)
branches <- data.frame(from=lineage_tree, to=1:total_branches)
counter <- 0
for (b in 1:total_branches) {
connections <- which(branches$from == b)
if (length(connections) > 0) {
for (i in seq_along(connections)) {
pairings[counter + i, ] <- c(b, connections[i])
}
counter <- counter + length(connections)
}
}
pairings
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/validationUtil.R
\name{getMae}
\alias{getMae}
\title{Mae}
\usage{
getMae(obs, prd, MARGIN)
}
\description{
Get mean absolute error
}
\author{
Daniel San-Mart\'in \email{daniel@predictia.es}
}
\keyword{internal}
| /man/getMae.Rd | no_license | jamiepg3/R_VALUE | R | false | false | 298 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/validationUtil.R
\name{getMae}
\alias{getMae}
\title{Mae}
\usage{
getMae(obs, prd, MARGIN)
}
\description{
Get mean absolute error
}
\author{
Daniel San-Mart\'in \email{daniel@predictia.es}
}
\keyword{internal}
|
## File Name: cdm_pem_acceleration.R
## File Version: 0.132
cdm_pem_acceleration <- function( iter, pem_parameter_index, pem_parameter_sequence,
pem_pars, PEM_itermax, parmlist, ll_fct, ll_args, deviance.history=NULL )
{
res0 <- ll <- NULL
PEM <- TRUE
#-- transform into a vector
pem_parm <- cdm_pem_collect_parameters( parmlist=parmlist,
pem_parameter_index=pem_parameter_index )
#-- collect parameters in initial iterations
pem_parameter_sequence <- cdm_pem_parameter_sequence_initial_iterations(
pem_parm=pem_parm,
pem_parameter_sequence=pem_parameter_sequence,
iter=iter )
pem_update <- FALSE
if ( ( iter %% 2==0 ) & ( iter > 0 ) & ( iter < PEM_itermax ) ){
pem_update <- TRUE
pem_parameter_sequence$P2 <- pem_parm
#** baseline likelihood
ll_args <- cdm_pem_include_ll_args( ll_args=ll_args,
pem_parm=pem_parm, pem_pars=pem_pars,
pem_parameter_index=pem_parameter_index )
res0 <- res <- do.call( what=ll_fct, args=ll_args )
ll0 <- ll <- res$ll
P0 <- pem_parameter_sequence$P0
P1 <- pem_parameter_sequence$P1
P2 <- pem_parameter_sequence$P2
iterate <- TRUE
ii <- 0
#--- begin PEM iterations
while (iterate){
ll_args0 <- ll_args
res0 <- res
ll0 <- ll
tt <- cdm_pem_algorithm_compute_t( i=ii )
Pnew <- cdm_pem_algorithm_compute_Pnew( tt=tt, P0=P0, P1=P1, P2=P2 )
ll_args <- cdm_pem_include_ll_args( ll_args=ll_args, pem_parm=Pnew,
pem_pars=pem_pars, pem_parameter_index=pem_parameter_index )
res <- do.call( what=ll_fct, args=ll_args )
ll <- res$ll
if ( is.na(ll) ){
ll <- -Inf
}
if ( ll < ll0 ){
iterate <- FALSE
}
ii <- ii + 1
}
#--- end PEM iterations
ll <- res0$ll
pem_parameter_sequence$P0 <- P1
pem_parameter_sequence$P1 <- P2
}
if (iter > PEM_itermax){
PEM <- FALSE
}
if ( ! is.null( deviance.history) ){
diff_history <- diff( deviance.history[ 1:iter ] )
NL0 <- 15
NL <- min( NL0, iter ) # number of lags
if ( iter > NL0 ){
diff2 <- diff_history[ seq( iter - 1, iter - NL, -1 ) ]
PEM <- ! ( sum( ( diff2 < 0 ) ) > ( .35 * NL0 ) )
}
}
#--- output
res <- list(ll=ll, pem_parameter_sequence=pem_parameter_sequence, PEM=PEM,
res_ll_fct=res0, pem_update=pem_update )
return(res)
}
| /R/cdm_pem_acceleration.R | no_license | cran/CDM | R | false | false | 2,896 | r | ## File Name: cdm_pem_acceleration.R
## File Version: 0.132
cdm_pem_acceleration <- function( iter, pem_parameter_index, pem_parameter_sequence,
pem_pars, PEM_itermax, parmlist, ll_fct, ll_args, deviance.history=NULL )
{
res0 <- ll <- NULL
PEM <- TRUE
#-- transform into a vector
pem_parm <- cdm_pem_collect_parameters( parmlist=parmlist,
pem_parameter_index=pem_parameter_index )
#-- collect parameters in initial iterations
pem_parameter_sequence <- cdm_pem_parameter_sequence_initial_iterations(
pem_parm=pem_parm,
pem_parameter_sequence=pem_parameter_sequence,
iter=iter )
pem_update <- FALSE
if ( ( iter %% 2==0 ) & ( iter > 0 ) & ( iter < PEM_itermax ) ){
pem_update <- TRUE
pem_parameter_sequence$P2 <- pem_parm
#** baseline likelihood
ll_args <- cdm_pem_include_ll_args( ll_args=ll_args,
pem_parm=pem_parm, pem_pars=pem_pars,
pem_parameter_index=pem_parameter_index )
res0 <- res <- do.call( what=ll_fct, args=ll_args )
ll0 <- ll <- res$ll
P0 <- pem_parameter_sequence$P0
P1 <- pem_parameter_sequence$P1
P2 <- pem_parameter_sequence$P2
iterate <- TRUE
ii <- 0
#--- begin PEM iterations
while (iterate){
ll_args0 <- ll_args
res0 <- res
ll0 <- ll
tt <- cdm_pem_algorithm_compute_t( i=ii )
Pnew <- cdm_pem_algorithm_compute_Pnew( tt=tt, P0=P0, P1=P1, P2=P2 )
ll_args <- cdm_pem_include_ll_args( ll_args=ll_args, pem_parm=Pnew,
pem_pars=pem_pars, pem_parameter_index=pem_parameter_index )
res <- do.call( what=ll_fct, args=ll_args )
ll <- res$ll
if ( is.na(ll) ){
ll <- -Inf
}
if ( ll < ll0 ){
iterate <- FALSE
}
ii <- ii + 1
}
#--- end PEM iterations
ll <- res0$ll
pem_parameter_sequence$P0 <- P1
pem_parameter_sequence$P1 <- P2
}
if (iter > PEM_itermax){
PEM <- FALSE
}
if ( ! is.null( deviance.history) ){
diff_history <- diff( deviance.history[ 1:iter ] )
NL0 <- 15
NL <- min( NL0, iter ) # number of lags
if ( iter > NL0 ){
diff2 <- diff_history[ seq( iter - 1, iter - NL, -1 ) ]
PEM <- ! ( sum( ( diff2 < 0 ) ) > ( .35 * NL0 ) )
}
}
#--- output
res <- list(ll=ll, pem_parameter_sequence=pem_parameter_sequence, PEM=PEM,
res_ll_fct=res0, pem_update=pem_update )
return(res)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot_top_currencies}
\alias{plot_top_currencies}
\title{Plot The Price of the Largest Market Cap Cryptocurrencies}
\usage{
plot_top_currencies(currency = "USD", k = 5, bar_color = "grey")
}
\arguments{
\item{currency}{currency code (default is 'USD')}
\item{k}{the number of top cryptocurrencies to plot (default is 5)}
\item{bar_color}{a valid color name or hexadecimal color code (default is 'grey')}
}
\value{
A ggplot of top Cryptocurrencies based on their rank (Market Cap)
}
\description{
Plot The Price of the Largest Market Cap Cryptocurrencies
}
\examples{
plot_top_currencies('EUR')
plot_top_currencies('GBP')
}
| /man/plot_top_currencies.Rd | no_license | trafficonese/coinmarketcap_v2 | R | false | true | 715 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{plot_top_currencies}
\alias{plot_top_currencies}
\title{Plot The Price of the Largest Market Cap Cryptocurrencies}
\usage{
plot_top_currencies(currency = "USD", k = 5, bar_color = "grey")
}
\arguments{
\item{currency}{currency code (default is 'USD')}
\item{k}{the number of top cryptocurrencies to plot (default is 5)}
\item{bar_color}{a valid color name or hexadecimal color code (default is 'grey')}
}
\value{
A ggplot of top Cryptocurrencies based on their rank (Market Cap)
}
\description{
Plot The Price of the Largest Market Cap Cryptocurrencies
}
\examples{
plot_top_currencies('EUR')
plot_top_currencies('GBP')
}
|
#' A Shiny app to plot negative binomial distributions interactively
#' Mu parameter of a negative binomial distribution UI
#'
#' @param id An ID to namespace the module
muVarUI <- function(id) {
## Have a unique id.
shiny::textInput(shiny::NS(id, "mu"), "Mu Paramater", value = "1.0")
}
#' Mu parameter of a negative binomial distribution Server
#'
#' @param id An ID to namespace the module
muVarServer <- function(id) {
shiny::moduleServer(id, function(input, output, session) {
shiny::reactive(input$mu)
})
}
| /nbinomPlot/R/muVar.R | permissive | zettsu-t/cPlusPlusFriend | R | false | false | 545 | r | #' A Shiny app to plot negative binomial distributions interactively
#' Mu parameter of a negative binomial distribution UI
#'
#' @param id An ID to namespace the module
muVarUI <- function(id) {
## Have a unique id.
shiny::textInput(shiny::NS(id, "mu"), "Mu Paramater", value = "1.0")
}
#' Mu parameter of a negative binomial distribution Server
#'
#' @param id An ID to namespace the module
muVarServer <- function(id) {
shiny::moduleServer(id, function(input, output, session) {
shiny::reactive(input$mu)
})
}
|
tuneCMAES = function(learner, task, resampling, measures, par.set, control, opt.path, show.info, resample.fun) {
requirePackages("cmaes", why = "tune_cmaes", default.method = "load")
low = getLower(par.set)
upp = getUpper(par.set)
start = control$start
if (is.null(start))
start = sampleValue(par.set, start, trafo = FALSE)
start = convertStartToNumeric(start, par.set)
# set sigma to 1/4 per dim, defaults in cmaes are crap for this, last time I looked
# and vectorized evals for speed and parallel, then insert user controls
# FIXME: there is a bug in cmaes that I reported MULTIPLE times now
# while the docs say we can set sigma to a vector, there is a stopifnot in code which does not allow it
sigma = median(upp - low) / 2
ctrl.cmaes = list(vectorized = TRUE, sigma = sigma)
ctrl.cmaes = insert(ctrl.cmaes, control$extra.args)
# check whether the budget parameter is used correctly;
# this check is only performed, if the budget is defined, but neither start, lambda nor maxit were defined
N = length(start)
budget = control$budget
# either use user choice or lambda default, now lambda is set
if (is.null(ctrl.cmaes$lambda))
ctrl.cmaes$lambda = 4 + floor(3 * log(N))
# if we have budget, calc maxit, otherwise use CMAES default, now maxit is set
maxit = if (is.null(budget))
ifelse(is.null(ctrl.cmaes$maxit), 100 * N^2, ctrl.cmaes$maxit)
else
floor(budget / ctrl.cmaes$lambda)
if (!is.null(budget) && budget < ctrl.cmaes$lambda)
stopf("Budget = %$i cannot be less than lambda = %i!", budget, ctrl.cmaes$lambda)
if (!is.null(ctrl.cmaes$maxit) && ctrl.cmaes$maxit != maxit)
stopf("Provided setting of maxit = %i does not work with provided budget = %s, lambda = %i",
ctrl.cmaes$maxit, ifelse(is.null(budget), "NULL", budget), ctrl.cmaes$lambda)
ctrl.cmaes$maxit = maxit
cmaes::cma_es(par = start, fn = tunerFitnFunVectorized, lower = low, upper = upp, control = ctrl.cmaes,
learner = learner, task = task, resampling = resampling, measures = measures,
par.set = par.set, ctrl = control, opt.path = opt.path, show.info = show.info,
convertx = convertXVectorizedMatrixCols, remove.nas = FALSE, resample.fun = resample.fun)
makeTuneResultFromOptPath(learner, par.set, measures, control, opt.path)
}
| /R/tuneCMAES.R | no_license | cauldnz/mlr | R | false | false | 2,305 | r | tuneCMAES = function(learner, task, resampling, measures, par.set, control, opt.path, show.info, resample.fun) {
requirePackages("cmaes", why = "tune_cmaes", default.method = "load")
low = getLower(par.set)
upp = getUpper(par.set)
start = control$start
if (is.null(start))
start = sampleValue(par.set, start, trafo = FALSE)
start = convertStartToNumeric(start, par.set)
# set sigma to 1/4 per dim, defaults in cmaes are crap for this, last time I looked
# and vectorized evals for speed and parallel, then insert user controls
# FIXME: there is a bug in cmaes that I reported MULTIPLE times now
# while the docs say we can set sigma to a vector, there is a stopifnot in code which does not allow it
sigma = median(upp - low) / 2
ctrl.cmaes = list(vectorized = TRUE, sigma = sigma)
ctrl.cmaes = insert(ctrl.cmaes, control$extra.args)
# check whether the budget parameter is used correctly;
# this check is only performed, if the budget is defined, but neither start, lambda nor maxit were defined
N = length(start)
budget = control$budget
# either use user choice or lambda default, now lambda is set
if (is.null(ctrl.cmaes$lambda))
ctrl.cmaes$lambda = 4 + floor(3 * log(N))
# if we have budget, calc maxit, otherwise use CMAES default, now maxit is set
maxit = if (is.null(budget))
ifelse(is.null(ctrl.cmaes$maxit), 100 * N^2, ctrl.cmaes$maxit)
else
floor(budget / ctrl.cmaes$lambda)
if (!is.null(budget) && budget < ctrl.cmaes$lambda)
stopf("Budget = %$i cannot be less than lambda = %i!", budget, ctrl.cmaes$lambda)
if (!is.null(ctrl.cmaes$maxit) && ctrl.cmaes$maxit != maxit)
stopf("Provided setting of maxit = %i does not work with provided budget = %s, lambda = %i",
ctrl.cmaes$maxit, ifelse(is.null(budget), "NULL", budget), ctrl.cmaes$lambda)
ctrl.cmaes$maxit = maxit
cmaes::cma_es(par = start, fn = tunerFitnFunVectorized, lower = low, upper = upp, control = ctrl.cmaes,
learner = learner, task = task, resampling = resampling, measures = measures,
par.set = par.set, ctrl = control, opt.path = opt.path, show.info = show.info,
convertx = convertXVectorizedMatrixCols, remove.nas = FALSE, resample.fun = resample.fun)
makeTuneResultFromOptPath(learner, par.set, measures, control, opt.path)
}
|
#checking for correlations between strontium isotope ratios and concentrations of various elements
x <- read.csv("C:\\Users\\Mike\\Desktop\\Grad\\Projects\\strontium_isoscape\\Sr_covariates.csv",
stringsAsFactors=FALSE)
x <- read.csv("/home/mike/Desktop/grad/Projects/strontium_isoscape/Sr_covariates.csv",
stringsAsFactors=FALSE)
#replacing all "<0.0000x" with "0.0000x"
ischar <- rep(NA, length(17:ncol(x)))
for(i in 17:(ncol(x))){
ischar[i-16] <- is.character(x[1,i])
}
x2 <- x
for(i in 1:nrow(x)){
for(j in 17:ncol(x)){
if(ischar[j-16] & substr(x[i,j], 1, 1) == '<'){
x2[i,j] <- substr(x[i,j], 2, nchar(x[i,j]))
}
}
}
#more fixer-uppering
colnames(x2)[15] <- 'sr87_86'
x2[,17:ncol(x2)] <- apply(x2[,17:ncol(x2)], 2, as.numeric)
x3 <- x2[-which(is.na(x2$sr87_86)),]
#take mean of duplicate samples
x4 <- aggregate(x3[,15:ncol(x3)], by=list(x3$Sample.Number), FUN=mean)
#normalizing by calcium
non_Ca_cols <- c(4:9,11:ncol(x4))
x4[,non_Ca_cols] <- x4[,non_Ca_cols]/x4$Ca
#correlations
as.matrix(round(cor(x4[,c(2,4:ncol(x4))])[,1], 3))
par(mfrow=c(4,3))
for(i in c(8,9,10,11,14,16,19,23,24,25,27,29)){
mod <- lm(x4$sr87_86 ~ x4[,i])
if(i==10){
plot(x4[,i], x4$sr87_86, xlab='Ca', pch=20, ylab='Sr 86/86',
main=paste('Adj. R^2 =', round(summary(mod)$adj.r.squared, 3)))
} else {
plot(x4[,i], x4$sr87_86, xlab=paste(colnames(x4[i]), '/Ca', sep=''), pch=20, ylab='Sr 86/86',
main=paste('Adj. R^2 =', round(summary(mod)$adj.r.squared, 3)))
}
abline(mod)
}
#export
write.csv(x4, "/home/mike/Desktop/grad/Projects/strontium_isoscape/Sr_covariates_clean.csv")
| /old_notes_and_data/Sr_covariates.R | no_license | vlahm/mekong_strontium_isoscape | R | false | false | 1,685 | r | #checking for correlations between strontium isotope ratios and concentrations of various elements
x <- read.csv("C:\\Users\\Mike\\Desktop\\Grad\\Projects\\strontium_isoscape\\Sr_covariates.csv",
stringsAsFactors=FALSE)
x <- read.csv("/home/mike/Desktop/grad/Projects/strontium_isoscape/Sr_covariates.csv",
stringsAsFactors=FALSE)
#replacing all "<0.0000x" with "0.0000x"
ischar <- rep(NA, length(17:ncol(x)))
for(i in 17:(ncol(x))){
ischar[i-16] <- is.character(x[1,i])
}
x2 <- x
for(i in 1:nrow(x)){
for(j in 17:ncol(x)){
if(ischar[j-16] & substr(x[i,j], 1, 1) == '<'){
x2[i,j] <- substr(x[i,j], 2, nchar(x[i,j]))
}
}
}
#more fixer-uppering
colnames(x2)[15] <- 'sr87_86'
x2[,17:ncol(x2)] <- apply(x2[,17:ncol(x2)], 2, as.numeric)
x3 <- x2[-which(is.na(x2$sr87_86)),]
#take mean of duplicate samples
x4 <- aggregate(x3[,15:ncol(x3)], by=list(x3$Sample.Number), FUN=mean)
#normalizing by calcium
non_Ca_cols <- c(4:9,11:ncol(x4))
x4[,non_Ca_cols] <- x4[,non_Ca_cols]/x4$Ca
#correlations
as.matrix(round(cor(x4[,c(2,4:ncol(x4))])[,1], 3))
par(mfrow=c(4,3))
for(i in c(8,9,10,11,14,16,19,23,24,25,27,29)){
mod <- lm(x4$sr87_86 ~ x4[,i])
if(i==10){
plot(x4[,i], x4$sr87_86, xlab='Ca', pch=20, ylab='Sr 86/86',
main=paste('Adj. R^2 =', round(summary(mod)$adj.r.squared, 3)))
} else {
plot(x4[,i], x4$sr87_86, xlab=paste(colnames(x4[i]), '/Ca', sep=''), pch=20, ylab='Sr 86/86',
main=paste('Adj. R^2 =', round(summary(mod)$adj.r.squared, 3)))
}
abline(mod)
}
#export
write.csv(x4, "/home/mike/Desktop/grad/Projects/strontium_isoscape/Sr_covariates_clean.csv")
|
#' Available linters
#'
#' @name linters
#' @title linters
#' @param source_file returned by \code{\link{get_source_expressions}}
#' @param length the length cutoff to use for the given linter.
NULL
named_list <- function(...) {
nms <- re_substitutes(as.character(eval(substitute(alist(...)))),
rex("(", anything), "")
vals <- list(...)
names(vals) <- nms
vals[!vapply(vals, is.null, logical(1))]
}
#' Modify the list of default linters
#'
#' @param ... named arguments of linters to change. If the named linter already
#' exists it is replaced by the new linter, if it does not exist it is added.
#' If the value is \code{NULL} the linter is removed.
#' @param default default linters to change
#' @export
#' @examples
#' # change the default line length cutoff
#' with_defaults(line_length_linter = line_length_linter(120))
#'
#' # you can also omit the argument name if you are just using different
#' # arguments.
#' with_defaults(line_length_linter(120))
#'
#' # enforce camelCase rather than snake_case
#' with_defaults(camel_case_linter = NULL,
#' snake_case_linter)
with_defaults <- function(..., default = default_linters) {
vals <- list(...)
nms <- names2(vals)
missing <- nms == ""
if (any(missing)) {
nms[missing] <- re_substitutes(as.character(eval(substitute(alist(...)[missing]))),
rex("(", anything), "")
}
default[nms] <- vals
res <- default[!vapply(default, is.null, logical(1))]
res[] <- lapply(res, function(x) {
prev_class <- class(x)
class(x) <- c(prev_class, "lintr_function")
x
})
}
# this is just to make the auto documentation cleaner
str.lintr_function <- function(x, ...) {
cat("\n")
}
#' Default linters to use
#' @export
default_linters <- with_defaults(default = list(),
assignment_linter,
single_quotes_linter,
absolute_paths_linter,
no_tab_linter,
line_length_linter(80),
commas_after_linter,
commas_before_linter,
infix_spaces_linter,
spaces_left_parentheses_linter,
spaces_inside_linter,
open_curly_linter(),
closed_curly_linter(),
camel_case_linter,
multiple_dots_linter,
object_length_linter(30),
object_usage_linter,
trailing_whitespace_linter,
trailing_blank_lines_linter,
commented_code_linter,
NULL
)
#' Default lintr settings
#' @seealso \code{\link{read_settings}}, \code{\link{default_linters}}
default_settings <- NULL
settings <- NULL
.onLoad <- function(libname, pkgname) { # nolint
op <- options()
op.lintr <- list(
lintr.linter_file = ".lintr"
)
toset <- !(names(op.lintr) %in% names(op))
if (any(toset)) options(op.lintr[toset])
default_settings <<- list(
linters = default_linters,
exclude = rex::rex("#", any_spaces, "nolint"),
exclude_start = rex::rex("#", any_spaces, "nolint start"),
exclude_end = rex::rex("#", any_spaces, "nolint end"),
exclusions = list(),
cache_directory = "~/.R/lintr_cache", # nolint
comment_token = rot(
paste0(
"0n12nn72507",
"r6273qnnp34",
"43qno7q42n1",
"n71nn28")
, 54 - 13),
comment_bot = logical_env("LINTR_COMMENT_BOT") %||% TRUE,
error_on_lint = logical_env("LINTR_ERROR_ON_LINT") %||% FALSE
)
settings <<- list2env(default_settings, parent = emptyenv())
invisible()
}
| /R/zzz.R | no_license | rmsharp/lintr | R | false | false | 3,282 | r | #' Available linters
#'
#' @name linters
#' @title linters
#' @param source_file returned by \code{\link{get_source_expressions}}
#' @param length the length cutoff to use for the given linter.
NULL
named_list <- function(...) {
nms <- re_substitutes(as.character(eval(substitute(alist(...)))),
rex("(", anything), "")
vals <- list(...)
names(vals) <- nms
vals[!vapply(vals, is.null, logical(1))]
}
#' Modify the list of default linters
#'
#' @param ... named arguments of linters to change. If the named linter already
#' exists it is replaced by the new linter, if it does not exist it is added.
#' If the value is \code{NULL} the linter is removed.
#' @param default default linters to change
#' @export
#' @examples
#' # change the default line length cutoff
#' with_defaults(line_length_linter = line_length_linter(120))
#'
#' # you can also omit the argument name if you are just using different
#' # arguments.
#' with_defaults(line_length_linter(120))
#'
#' # enforce camelCase rather than snake_case
#' with_defaults(camel_case_linter = NULL,
#' snake_case_linter)
with_defaults <- function(..., default = default_linters) {
vals <- list(...)
nms <- names2(vals)
missing <- nms == ""
if (any(missing)) {
nms[missing] <- re_substitutes(as.character(eval(substitute(alist(...)[missing]))),
rex("(", anything), "")
}
default[nms] <- vals
res <- default[!vapply(default, is.null, logical(1))]
res[] <- lapply(res, function(x) {
prev_class <- class(x)
class(x) <- c(prev_class, "lintr_function")
x
})
}
# this is just to make the auto documentation cleaner
str.lintr_function <- function(x, ...) {
cat("\n")
}
#' Default linters to use
#' @export
default_linters <- with_defaults(default = list(),
assignment_linter,
single_quotes_linter,
absolute_paths_linter,
no_tab_linter,
line_length_linter(80),
commas_after_linter,
commas_before_linter,
infix_spaces_linter,
spaces_left_parentheses_linter,
spaces_inside_linter,
open_curly_linter(),
closed_curly_linter(),
camel_case_linter,
multiple_dots_linter,
object_length_linter(30),
object_usage_linter,
trailing_whitespace_linter,
trailing_blank_lines_linter,
commented_code_linter,
NULL
)
#' Default lintr settings
#' @seealso \code{\link{read_settings}}, \code{\link{default_linters}}
default_settings <- NULL
settings <- NULL
.onLoad <- function(libname, pkgname) { # nolint
op <- options()
op.lintr <- list(
lintr.linter_file = ".lintr"
)
toset <- !(names(op.lintr) %in% names(op))
if (any(toset)) options(op.lintr[toset])
default_settings <<- list(
linters = default_linters,
exclude = rex::rex("#", any_spaces, "nolint"),
exclude_start = rex::rex("#", any_spaces, "nolint start"),
exclude_end = rex::rex("#", any_spaces, "nolint end"),
exclusions = list(),
cache_directory = "~/.R/lintr_cache", # nolint
comment_token = rot(
paste0(
"0n12nn72507",
"r6273qnnp34",
"43qno7q42n1",
"n71nn28")
, 54 - 13),
comment_bot = logical_env("LINTR_COMMENT_BOT") %||% TRUE,
error_on_lint = logical_env("LINTR_ERROR_ON_LINT") %||% FALSE
)
settings <<- list2env(default_settings, parent = emptyenv())
invisible()
}
|
file <- read.table("household_power_consumption.txt", header = TRUE, sep= ";", na.strings = c("?",""))
file$Date <- as.Date(file$Date, format = "%d/%m/%Y")
file$timetemp <- paste(file$Date, file$Time)
file$Time <- strptime(file$timetemp, format = "%Y-%m-%d %H:%M:%S")
fileset <- file[file$Time >= as.POSIXlt("2007-02-01") & file$Time < as.POSIXlt("2007-02-03"),]
par(mar=c(4,4,2,1),mfrow = c(2,2))
plot(fileset$Time,fileset$Global_active_power, type ="n",xlab="", ylab = "Global Active Power (kilowatts)")
lines(fileset$Time,fileset$Global_active_power)
plot(fileset$Time,fileset$Voltage, type ="n", xlab="datetime", ylab = "Voltage")
lines(fileset$Time,fileset$Voltage)
plot(fileset$Time,fileset$Sub_metering_1, type ="n",xlab="", ylab = "Energy sub metering")
lines(fileset$Time, fileset$Sub_metering_1, col = "black")
lines(fileset$Time, fileset$Sub_metering_2, col = "red")
lines(fileset$Time, fileset$Sub_metering_3, col = "blue")
legend("topright", lty= c(1,1), col= c("black","red","blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(fileset$Time,fileset$Global_reactive_power, type ="n", xlab="datetime", ylab = "Global_reactive_power")
lines(fileset$Time,fileset$Global_reactive_power)
dev.copy(png,file="plot4.png")
dev.off() | /plot4.R | no_license | lauthe/ExData_Plotting1 | R | false | false | 1,263 | r | file <- read.table("household_power_consumption.txt", header = TRUE, sep= ";", na.strings = c("?",""))
file$Date <- as.Date(file$Date, format = "%d/%m/%Y")
file$timetemp <- paste(file$Date, file$Time)
file$Time <- strptime(file$timetemp, format = "%Y-%m-%d %H:%M:%S")
fileset <- file[file$Time >= as.POSIXlt("2007-02-01") & file$Time < as.POSIXlt("2007-02-03"),]
par(mar=c(4,4,2,1),mfrow = c(2,2))
plot(fileset$Time,fileset$Global_active_power, type ="n",xlab="", ylab = "Global Active Power (kilowatts)")
lines(fileset$Time,fileset$Global_active_power)
plot(fileset$Time,fileset$Voltage, type ="n", xlab="datetime", ylab = "Voltage")
lines(fileset$Time,fileset$Voltage)
plot(fileset$Time,fileset$Sub_metering_1, type ="n",xlab="", ylab = "Energy sub metering")
lines(fileset$Time, fileset$Sub_metering_1, col = "black")
lines(fileset$Time, fileset$Sub_metering_2, col = "red")
lines(fileset$Time, fileset$Sub_metering_3, col = "blue")
legend("topright", lty= c(1,1), col= c("black","red","blue"), legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(fileset$Time,fileset$Global_reactive_power, type ="n", xlab="datetime", ylab = "Global_reactive_power")
lines(fileset$Time,fileset$Global_reactive_power)
dev.copy(png,file="plot4.png")
dev.off() |
install.packages("DataExplorer")
install.packages("tidyverse")
install.packages("mice")
install.packages("caTools")
library(knitr)
library(tidyverse)
library(ggplot2)
library(mice)
library(lattice)
library(reshape2)
library(DataExplorer)
library(dplyr)
library(e1071)
library(caTools)
library(caret)
data= read.csv('give path to your dataset folder')
head(data)
View(data)
str(data)
summary(data)
sum(is.na(data))
colnames(data)
defaultpayment<- data$default.payment.next.month
introduce(data)
count(data, vars = EDUCATION)
# or we can use following to get the count
table(data$EDUCATION)
summary(data$EDUCATION)
summary(data$MARRIAGE)
table(data$MARRIAGE)
data$EDUCATION[data$EDUCATION==0] <- 4
data$EDUCATION[data$EDUCATION==5] <- 4
data$EDUCATION[data$EDUCATION==6] <- 4
data$MARRIAGE[data$MARRIAGE==0] <- 3
plot_correlation(na.omit(data),maxcat=5L)
plot_correlation(data)
attach(data)
skewness(EDUCATION)
skewness(PAY_0)
skewness(PAY_2)
skewness(PAY_3)
skewness(PAY_4)
plot_histogram(data)
new_data<- select(data,-one_of('BILL_AMT1','BILL_AMT2','BILL_AMT3','BILL_AMT4','BILL_AMT5',
'BILL_AMT6','ID','AGE'))
head(new_data)
dim(new_data)
colnames(new_data)
new_data[,1:16]<-scale(new_data[,1:16])
#splitting data: method 1
d1=sort(sample(nrow(new_data),nrow(new_data)*.7))
#splitting data: method 2
d=sample.split(new_data$default.payment.next.month,SplitRatio = 0.7)
dim(d)
t=new_data[d,]
View(t)
dim(t)
train_data=new_data[d1,]
View(train_data)
dim(train_data)
test_data=new_data[-d1,]
dim(test_data)
View(test_data)
log.model <- glm(default.payment.next.month ~.,data=train_data,family=binomial(link = "logit"))
summary(log.model)
anova(log.model,test="Chisq")
View(new_data)
prediction= predict(log.model,test_data,type = "response")
head(prediction,10)
prediction1= ifelse(prediction>0.5,1,0)
head(prediction1,10)
table(test_data$default.payment.next.month)
table(prediction1)
actual=new_data
accuracy=table(prediction1,test_data[,17])
accurate=sum(diag(accuracy))/sum(accuracy)# accuracy of the model is 81.71%
| /creditcard.R | no_license | MAHIMA018/Credit_card_default_prediction | R | false | false | 2,146 | r | install.packages("DataExplorer")
install.packages("tidyverse")
install.packages("mice")
install.packages("caTools")
library(knitr)
library(tidyverse)
library(ggplot2)
library(mice)
library(lattice)
library(reshape2)
library(DataExplorer)
library(dplyr)
library(e1071)
library(caTools)
library(caret)
data= read.csv('give path to your dataset folder')
head(data)
View(data)
str(data)
summary(data)
sum(is.na(data))
colnames(data)
defaultpayment<- data$default.payment.next.month
introduce(data)
count(data, vars = EDUCATION)
# or we can use following to get the count
table(data$EDUCATION)
summary(data$EDUCATION)
summary(data$MARRIAGE)
table(data$MARRIAGE)
data$EDUCATION[data$EDUCATION==0] <- 4
data$EDUCATION[data$EDUCATION==5] <- 4
data$EDUCATION[data$EDUCATION==6] <- 4
data$MARRIAGE[data$MARRIAGE==0] <- 3
plot_correlation(na.omit(data),maxcat=5L)
plot_correlation(data)
attach(data)
skewness(EDUCATION)
skewness(PAY_0)
skewness(PAY_2)
skewness(PAY_3)
skewness(PAY_4)
plot_histogram(data)
new_data<- select(data,-one_of('BILL_AMT1','BILL_AMT2','BILL_AMT3','BILL_AMT4','BILL_AMT5',
'BILL_AMT6','ID','AGE'))
head(new_data)
dim(new_data)
colnames(new_data)
new_data[,1:16]<-scale(new_data[,1:16])
#splitting data: method 1
d1=sort(sample(nrow(new_data),nrow(new_data)*.7))
#splitting data: method 2
d=sample.split(new_data$default.payment.next.month,SplitRatio = 0.7)
dim(d)
t=new_data[d,]
View(t)
dim(t)
train_data=new_data[d1,]
View(train_data)
dim(train_data)
test_data=new_data[-d1,]
dim(test_data)
View(test_data)
log.model <- glm(default.payment.next.month ~.,data=train_data,family=binomial(link = "logit"))
summary(log.model)
anova(log.model,test="Chisq")
View(new_data)
prediction= predict(log.model,test_data,type = "response")
head(prediction,10)
prediction1= ifelse(prediction>0.5,1,0)
head(prediction1,10)
table(test_data$default.payment.next.month)
table(prediction1)
actual=new_data
accuracy=table(prediction1,test_data[,17])
accurate=sum(diag(accuracy))/sum(accuracy)# accuracy of the model is 81.71%
|
\name{sim}
\Rdversion{1.1}
\alias{sim}
\title{
Generate synthetic observations
}
\description{
Generate synthetic observations
}
\usage{
sim(a, n = 100)
}
\arguments{
\item{a}{ARMA model}
\item{n}{Number of synthetic observations required}
}
\details{
The ARMA model is a list with the following components.
\cr
\tabular{ll}{
\code{phi}\tab
Vector of AR coefficients (index number equals coefficient subscript)
\cr
\code{theta}\tab
Vector of MA coefficients (index number equals coefficient subscript)
\cr
\code{sigma2}\tab
White noise variance
}}
\value{
Returns a vector of \code{n} synthetic observations.
}
\examples{
a = specify(ar=c(0,0,.99))
x = sim(a,60)
plotc(x)
}
| /man/sim.Rd | no_license | cran/itsmr | R | false | false | 681 | rd | \name{sim}
\Rdversion{1.1}
\alias{sim}
\title{
Generate synthetic observations
}
\description{
Generate synthetic observations
}
\usage{
sim(a, n = 100)
}
\arguments{
\item{a}{ARMA model}
\item{n}{Number of synthetic observations required}
}
\details{
The ARMA model is a list with the following components.
\cr
\tabular{ll}{
\code{phi}\tab
Vector of AR coefficients (index number equals coefficient subscript)
\cr
\code{theta}\tab
Vector of MA coefficients (index number equals coefficient subscript)
\cr
\code{sigma2}\tab
White noise variance
}}
\value{
Returns a vector of \code{n} synthetic observations.
}
\examples{
a = specify(ar=c(0,0,.99))
x = sim(a,60)
plotc(x)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shared.R
\name{generate_stdzied_design_matrix}
\alias{generate_stdzied_design_matrix}
\title{Generates a design matrix with standardized predictors.}
\usage{
generate_stdzied_design_matrix(n = 50, p = 1, covariate_gen = rnorm, ...)
}
\arguments{
\item{n}{Number of rows in the design matrix}
\item{p}{Number of columns in the design matrix}
\item{covariate_gen}{The function to use to draw the covariate realizations (assumed to be iid).
This defaults to \code{rnorm} for $N(0,1)$ draws.}
\item{...}{Optional arguments to be passed to the \code{covariate_dist} function.}
}
\value{
THe design matrix
}
\description{
This function is useful for debugging.
}
\author{
Adam Kapelner
}
| /GreedyExperimentalDesign/man/generate_stdzied_design_matrix.Rd | no_license | kapelner/GreedyExperimentalDesign | R | false | true | 763 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shared.R
\name{generate_stdzied_design_matrix}
\alias{generate_stdzied_design_matrix}
\title{Generates a design matrix with standardized predictors.}
\usage{
generate_stdzied_design_matrix(n = 50, p = 1, covariate_gen = rnorm, ...)
}
\arguments{
\item{n}{Number of rows in the design matrix}
\item{p}{Number of columns in the design matrix}
\item{covariate_gen}{The function to use to draw the covariate realizations (assumed to be iid).
This defaults to \code{rnorm} for $N(0,1)$ draws.}
\item{...}{Optional arguments to be passed to the \code{covariate_dist} function.}
}
\value{
THe design matrix
}
\description{
This function is useful for debugging.
}
\author{
Adam Kapelner
}
|
################################
## This code is for plot1.png ##
################################
## Since the master csv file has too many rows, this code extracts data for dates
## 1/2/2007 and 2/2/2007 by skipping all other rows. read.csv function is used.
## Read only relevant rows from master csv to get mdat
mdat <- read.csv("./data/household_power_consumption.csv",
sep=";",
header=FALSE,
blank.lines.skip=TRUE,
nrows=2880,
skip=66637,
na.strings=""?"",
colClasses=c("character",
"character",
"numeric",
"numeric",
"numeric",
"numeric",
"numeric",
"numeric",
"numeric"))
## Dummy read of one row to get file header from master csv file
colnames <- read.csv("./data/household_power_consumption.csv",
sep=";",
header=TRUE,
blank.lines.skip = TRUE,
nrows=1)
## Copy file header to mdat
names(mdat) <- names(colnames)
## Base plotting code to construct plot1.png
## Open png device with suitable pixels and background
png(filename = "plot1.png",
width=480,
height=480,
units="px",
bg="white")
## Plot histogram
hist(mdat$Global_active_power,
col="red",
xlab="Global Active Power (kilowatts)",
main="Global Active Power")
## Close device
dev.off()
| /plot1.R | no_license | sumikrsh/ExData_Plotting1 | R | false | false | 1,663 | r | ################################
## This code is for plot1.png ##
################################
## Since the master csv file has too many rows, this code extracts data for dates
## 1/2/2007 and 2/2/2007 by skipping all other rows. read.csv function is used.
## Read only relevant rows from master csv to get mdat
mdat <- read.csv("./data/household_power_consumption.csv",
sep=";",
header=FALSE,
blank.lines.skip=TRUE,
nrows=2880,
skip=66637,
na.strings=""?"",
colClasses=c("character",
"character",
"numeric",
"numeric",
"numeric",
"numeric",
"numeric",
"numeric",
"numeric"))
## Dummy read of one row to get file header from master csv file
colnames <- read.csv("./data/household_power_consumption.csv",
sep=";",
header=TRUE,
blank.lines.skip = TRUE,
nrows=1)
## Copy file header to mdat
names(mdat) <- names(colnames)
## Base plotting code to construct plot1.png
## Open png device with suitable pixels and background
png(filename = "plot1.png",
width=480,
height=480,
units="px",
bg="white")
## Plot histogram
hist(mdat$Global_active_power,
col="red",
xlab="Global Active Power (kilowatts)",
main="Global Active Power")
## Close device
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PATHOGEN-PfSI-Utilities.R
\name{util_PfSISlice}
\alias{util_PfSISlice}
\title{PfSI: Empty Occupancy Vector}
\usage{
util_PfSISlice(time)
}
\arguments{
\item{time}{current time slice}
}
\value{
list
}
\description{
Make an empty occupancy vector at a single time slice for the \code{PfSI} module. This is a helper function called by \code{\link{util_PfSIHistory}}
}
\examples{
util_PfSISlice(time = 0)
}
| /man/util_PfSISlice.Rd | no_license | smitdave/MASH | R | false | true | 481 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PATHOGEN-PfSI-Utilities.R
\name{util_PfSISlice}
\alias{util_PfSISlice}
\title{PfSI: Empty Occupancy Vector}
\usage{
util_PfSISlice(time)
}
\arguments{
\item{time}{current time slice}
}
\value{
list
}
\description{
Make an empty occupancy vector at a single time slice for the \code{PfSI} module. This is a helper function called by \code{\link{util_PfSIHistory}}
}
\examples{
util_PfSISlice(time = 0)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fat.r
\name{plot.fat}
\alias{plot.fat}
\title{Display results from the funnel plot asymmetry test}
\usage{
\method{plot}{fat}(
x,
ref,
confint = TRUE,
confint.level = 0.1,
confint.col = "skyblue",
confint.alpha = 0.5,
confint.density = NULL,
xlab = "Effect size",
add.pval = TRUE,
...
)
}
\arguments{
\item{x}{An object of class \code{fat}}
\item{ref}{A numeric value indicating the fixed or random effects summary estimate. If no value is provided
then it will be retrieved from a fixed effects meta-analysis (if possible).}
\item{confint}{A logical indicator. If \code{TRUE}, a confidence interval will be displayed for the estimated
regression model (based on a Student-T distribution)}
\item{confint.level}{Significance level for constructing the confidence interval.}
\item{confint.col}{The color for filling the confidence interval. Choose \code{NA} to leave polygons unfilled.
If \code{confint.density} is specified with a positive value this gives the color of the shading lines.}
\item{confint.alpha}{A numeric value between 0 and 1 indicating the opacity for the confidence region.}
\item{confint.density}{The density of shading lines, in lines per inch. The default value of \code{NULL} means
that no shading lines are drawn. A zero value of density means no shading nor filling whereas negative values
and \code{NA} suppress shading (and so allow color filling).}
\item{xlab}{A title for the x axis}
\item{add.pval}{Logical to indicate whether a P-value should be added to the plot}
\item{...}{Additional arguments.}
}
\description{
Generates a funnel plot for a fitted \code{fat} object.
}
\examples{
data(Fibrinogen)
b <- log(Fibrinogen$HR)
b.se <- ((log(Fibrinogen$HR.975) - log(Fibrinogen$HR.025))/(2*qnorm(0.975)))
n.total <- Fibrinogen$N.total
# A very simple funnel plot
plot(fat(b=b, b.se=b.se), xlab = "Log hazard ratio")
# Plot the funnel for an alternative test
plot(fat(b=b, b.se=b.se, n.total=n.total, method="M-FIV"), xlab = "Log hazard ratio")
}
\author{
Thomas Debray <thomas.debray@gmail.com>
Frantisek Bartos <f.bartos96@gmail.com>
}
| /man/plot.fat.Rd | no_license | cran/metamisc | R | false | true | 2,246 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fat.r
\name{plot.fat}
\alias{plot.fat}
\title{Display results from the funnel plot asymmetry test}
\usage{
\method{plot}{fat}(
x,
ref,
confint = TRUE,
confint.level = 0.1,
confint.col = "skyblue",
confint.alpha = 0.5,
confint.density = NULL,
xlab = "Effect size",
add.pval = TRUE,
...
)
}
\arguments{
\item{x}{An object of class \code{fat}}
\item{ref}{A numeric value indicating the fixed or random effects summary estimate. If no value is provided
then it will be retrieved from a fixed effects meta-analysis (if possible).}
\item{confint}{A logical indicator. If \code{TRUE}, a confidence interval will be displayed for the estimated
regression model (based on a Student-T distribution)}
\item{confint.level}{Significance level for constructing the confidence interval.}
\item{confint.col}{The color for filling the confidence interval. Choose \code{NA} to leave polygons unfilled.
If \code{confint.density} is specified with a positive value this gives the color of the shading lines.}
\item{confint.alpha}{A numeric value between 0 and 1 indicating the opacity for the confidence region.}
\item{confint.density}{The density of shading lines, in lines per inch. The default value of \code{NULL} means
that no shading lines are drawn. A zero value of density means no shading nor filling whereas negative values
and \code{NA} suppress shading (and so allow color filling).}
\item{xlab}{A title for the x axis}
\item{add.pval}{Logical to indicate whether a P-value should be added to the plot}
\item{...}{Additional arguments.}
}
\description{
Generates a funnel plot for a fitted \code{fat} object.
}
\examples{
data(Fibrinogen)
b <- log(Fibrinogen$HR)
b.se <- ((log(Fibrinogen$HR.975) - log(Fibrinogen$HR.025))/(2*qnorm(0.975)))
n.total <- Fibrinogen$N.total
# A very simple funnel plot
plot(fat(b=b, b.se=b.se), xlab = "Log hazard ratio")
# Plot the funnel for an alternative test
plot(fat(b=b, b.se=b.se, n.total=n.total, method="M-FIV"), xlab = "Log hazard ratio")
}
\author{
Thomas Debray <thomas.debray@gmail.com>
Frantisek Bartos <f.bartos96@gmail.com>
}
|
library(ggplot2);
library(ggcorrplot);
data = read.csv("cereal.csv");
summary(data);
mean(data$calories);
# drop rows with negative values
data <- data[data$carbo >= 0, ]
data <- data[data$potass >= 0, ]
data <- data[data$sugars >= 0, ]
# save data to
saveRDS(data, file = "clean_cereal.rds")
data = readRDS(file = "clean_cereal.rds");
summary(data);
# [1] "name" "mfr" "type" "calories" "protein" "fat"
# [7] "sodium" "fiber" "carbo" "sugars" "potass" "vitamins"#
# [13] "shelf" "weight" "cups" "rating"
require(corrgram)
corrgram(data, order=TRUE, lower.panel=panel.shade, upper.panel=panel.pie,
text.panel=panel.txt, main="Cereal data")
x11();
ggplot(data = data, aes(x = mfr, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = type, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = calories, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = protein, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = fat, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = sodium, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = fiber, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = carbo, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = sugars, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = potass, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = vitamins, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = shelf, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = weight, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = cups, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
| /HW_1/Test/Final_HW/Problem1_Cleaning.r | no_license | jeetendragan/statistical-data-mining-ub | R | false | false | 2,510 | r | library(ggplot2);
library(ggcorrplot);
data = read.csv("cereal.csv");
summary(data);
mean(data$calories);
# drop rows with negative values
data <- data[data$carbo >= 0, ]
data <- data[data$potass >= 0, ]
data <- data[data$sugars >= 0, ]
# save data to
saveRDS(data, file = "clean_cereal.rds")
data = readRDS(file = "clean_cereal.rds");
summary(data);
# [1] "name" "mfr" "type" "calories" "protein" "fat"
# [7] "sodium" "fiber" "carbo" "sugars" "potass" "vitamins"#
# [13] "shelf" "weight" "cups" "rating"
require(corrgram)
corrgram(data, order=TRUE, lower.panel=panel.shade, upper.panel=panel.pie,
text.panel=panel.txt, main="Cereal data")
x11();
ggplot(data = data, aes(x = mfr, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = type, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = calories, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = protein, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = fat, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = sodium, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = fiber, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = carbo, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = sugars, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = potass, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = vitamins, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = shelf, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = weight, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
x11();
ggplot(data = data, aes(x = cups, y = rating)) +
geom_point(color='blue') +
geom_smooth(method = "lm", se = FALSE)
|
# Here, assuming we only deal with daily, non-comparable SVI
search_terms = c('euromicron', 'TC Unterhaltungstechnik', 'SGL Carbon', 'zooplus', 'TUI', 'Borussia Dortmund', 'EUCA', 'TCU', 'SGL', 'ZO1', 'TUI1', 'BVB')
frequency = 'daily'
comparable = TRUE
country = NA
region = NA
year = NA
years = c(2004,2005,2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016)
months = c(1,4,7,10)
length = 3
url=vector()
counter = 1
for(search_term in search_terms){
for(year in years){
for(month in months){
if(year == as.numeric(substr(as.character(Sys.time()), 1, 4)) & month > as.numeric(substr(as.character(Sys.time()), 6, 7))){
next() # This stops us from creating URLs for dates that don't exist.
}
url[counter]=URL_GT(keyword=search_term, year=year, month=month)
counter = counter + 1
}
}
}
for(search_term in search_terms){
url[counter]=URL_GT(search_term)
counter = counter + 1
}
for(i in 1:length(url)){
lynx_commands <- lynx_script(url[i]) # Create the lynx script
write.table(lynx_commands, '/root/gt_download', row.names=F, col.names=F, quote=F) # Save the lynx script
system("lynx -cmd_script=/root/gt_download www.google.com") # Execute the lynx script (takes a while, be patient)
}
| /Google trends batch service.R | no_license | anovelli20/Google-Trends | R | false | false | 1,263 | r | # Here, assuming we only deal with daily, non-comparable SVI
search_terms = c('euromicron', 'TC Unterhaltungstechnik', 'SGL Carbon', 'zooplus', 'TUI', 'Borussia Dortmund', 'EUCA', 'TCU', 'SGL', 'ZO1', 'TUI1', 'BVB')
frequency = 'daily'
comparable = TRUE
country = NA
region = NA
year = NA
years = c(2004,2005,2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016)
months = c(1,4,7,10)
length = 3
url=vector()
counter = 1
for(search_term in search_terms){
for(year in years){
for(month in months){
if(year == as.numeric(substr(as.character(Sys.time()), 1, 4)) & month > as.numeric(substr(as.character(Sys.time()), 6, 7))){
next() # This stops us from creating URLs for dates that don't exist.
}
url[counter]=URL_GT(keyword=search_term, year=year, month=month)
counter = counter + 1
}
}
}
for(search_term in search_terms){
url[counter]=URL_GT(search_term)
counter = counter + 1
}
for(i in 1:length(url)){
lynx_commands <- lynx_script(url[i]) # Create the lynx script
write.table(lynx_commands, '/root/gt_download', row.names=F, col.names=F, quote=F) # Save the lynx script
system("lynx -cmd_script=/root/gt_download www.google.com") # Execute the lynx script (takes a while, be patient)
}
|
last.12.sum <- function(x){
v <- as.vector(t(x))
n <- length(v)
mat <- matrix(NA,nrow(x)-1,ncol(x))
i <- j <- 1
for(t in (ncol(x)+1):n){
if(j > ncol(mat)){
j <- 1
i <- i + 1
}
mat[i,j] <- sum(v[(t-12):(t-1)])
j <- j + 1
}
return(mat)
}
| /data-analysis/2-timeseries/src/last-12-sum.R | no_license | cameronbracken/classy | R | false | false | 257 | r | last.12.sum <- function(x){
v <- as.vector(t(x))
n <- length(v)
mat <- matrix(NA,nrow(x)-1,ncol(x))
i <- j <- 1
for(t in (ncol(x)+1):n){
if(j > ncol(mat)){
j <- 1
i <- i + 1
}
mat[i,j] <- sum(v[(t-12):(t-1)])
j <- j + 1
}
return(mat)
}
|
##Programming Assignment 2 -- Peer Assessments
## The purpose of these R functions are to cache the time consuming process
## of matrix inversion. This prevents re-computation each time this function is called.
## The first function makeCacheMAtrix creates a matrix, containing a list of functions
##including setting the matrix values (set), getting these values (get) if in cache,
##computing the inverse (setSolve) using the solve function and getting the inverse
##(getSolve) if in cache. setSolve and getSolve are used by the the second function,
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setSolve <- function(solve) m <<- solve
getSolve <- function() m
list(set = set, get = get,
setSolve = setSolve,
getSolve = getSolve)
}
## This purpose of this R function, cacheSolve, is to either call the matrix inversion
## function, or to retrieve it in cache if the matrix inversion has already been computed.
## This function calls back to functions created in makeCacheMatrix.
cacheSolve <- function(x, ...) {
m <- x$getSolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setSolve(m)
m
} | /cachematrix.R | no_license | joschmoe/ProgrammingAssignment2 | R | false | false | 1,285 | r |
##Programming Assignment 2 -- Peer Assessments
## The purpose of these R functions are to cache the time consuming process
## of matrix inversion. This prevents re-computation each time this function is called.
## The first function makeCacheMAtrix creates a matrix, containing a list of functions
##including setting the matrix values (set), getting these values (get) if in cache,
##computing the inverse (setSolve) using the solve function and getting the inverse
##(getSolve) if in cache. setSolve and getSolve are used by the the second function,
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setSolve <- function(solve) m <<- solve
getSolve <- function() m
list(set = set, get = get,
setSolve = setSolve,
getSolve = getSolve)
}
## This purpose of this R function, cacheSolve, is to either call the matrix inversion
## function, or to retrieve it in cache if the matrix inversion has already been computed.
## This function calls back to functions created in makeCacheMatrix.
cacheSolve <- function(x, ...) {
m <- x$getSolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setSolve(m)
m
} |
#library(RPostgreSQL)
#drv<-dbDriver("PostgreSQL")
#con<-dbConnect(drv,dbname="cacopsylla",host="localhost",port=5432,user="pillo",password="")
#summary(con)
#dbListTables(con)
#dbDisconnect(con)
#dbUnloadDriver(drv)
# ... <-dbGetQuery(con,"select * FROM .... ORDER BY id") | /r-code/00_settings.R | no_license | Marwe/Appleproliferation | R | false | false | 276 | r | #library(RPostgreSQL)
#drv<-dbDriver("PostgreSQL")
#con<-dbConnect(drv,dbname="cacopsylla",host="localhost",port=5432,user="pillo",password="")
#summary(con)
#dbListTables(con)
#dbDisconnect(con)
#dbUnloadDriver(drv)
# ... <-dbGetQuery(con,"select * FROM .... ORDER BY id") |
test_that("defaults", {
from_col <- list(
col = "red",
hexa = "#ff0000ff",
hex = "#FF0000",
hex3 = "#F00",
rgb = c(255,0,0),
rgba = c(255,0,0,255)
)
to_col <- list(
hexa = "#ff0000ff",
hex = "#ff0000",
rgb = c(255,0,0),
rgba = c(255,0,0,255),
hsv = c(h=0,s=1,v=1)
)
from_fmt = c("col", "hex", "hexa", "hex3", "rgb", "rgba")
to_fmt = c("hexa", "hex", "rgba", "rgb", "hsv")
# guess from
for (from in from_fmt) {
for (to in to_fmt) {
# use quasi-labelling !! to get informative error messages in loops
expect_equal(color_conv(!!from_col[[from]], 1, "guess", !!to), !!to_col[[to]])
}
}
# text rgba and alphas > 1
expect_equal(color_conv("rgba(1,2,3,4)", 1, "guess", "hexa"), "#01020304")
expect_equal(color_conv("rgb(1,2,3)", 5, "guess", "hexa"), "#01020305")
expect_equal(color_conv("rgb(1,2,3)", 1.01, "guess", "hexa"), "#01020301")
# specify from
for (from in from_fmt) {
for (to in to_fmt) {
expect_equal(color_conv(!!from_col[[from]], 1, !!from, !!to), !!to_col[[to]])
}
}
})
# col2lab ----
test_that("col2lab", {
# http://www.brucelindbloom.com/index.html?ColorCheckerCalcHelp.html
# values checked with ref white = D65,
# RGB Model = sRGB, dom lambda = 611.4 nm (default)
# color name
red <- col2lab("red") |> unlist()
lindbloom_red = c(L = 53.24, a = 80.09, b = 67.20)
expect_equal(lindbloom_red, red, tolerance = 0.05)
# hex color
green <- col2lab("#00FF00") |> unlist()
lindbloom_green = c(L = 87.73, a = -86.18, b = 83.18)
expect_equal(lindbloom_green, green, tolerance = 0.05)
})
# lab2rgb ----
test_that("lab2rgb", {
# white
col <- '#FFFFFF'
lab <- col2lab(col)
rgb <- lab2rgb(lab)
comp <- color_conv(col, to = 'rgb')
names(comp) <- c('red', 'green', 'blue')
expect_equal(rgb, comp)
# red
col <- '#FF0000'
lab <- col2lab(col)
rgb <- lab2rgb(lab)
comp <- color_conv(col, to = 'rgb')
names(comp) <- c('red', 'green', 'blue')
expect_equal(rgb, comp)
# dodgerblue
col <- 'dodgerblue'
lab <- col2lab(col)
rgb <- lab2rgb(lab)
comp <- color_conv(col, to = 'rgb')
names(comp) <- c('red', 'green', 'blue')
expect_equal(rgb, comp)
})
# none ----
test_that("none", {
expect_equal(color_conv("none"), "none")
})
| /tests/testthat/test-color_conv.R | permissive | debruine/webmorphR | R | false | false | 2,309 | r | test_that("defaults", {
from_col <- list(
col = "red",
hexa = "#ff0000ff",
hex = "#FF0000",
hex3 = "#F00",
rgb = c(255,0,0),
rgba = c(255,0,0,255)
)
to_col <- list(
hexa = "#ff0000ff",
hex = "#ff0000",
rgb = c(255,0,0),
rgba = c(255,0,0,255),
hsv = c(h=0,s=1,v=1)
)
from_fmt = c("col", "hex", "hexa", "hex3", "rgb", "rgba")
to_fmt = c("hexa", "hex", "rgba", "rgb", "hsv")
# guess from
for (from in from_fmt) {
for (to in to_fmt) {
# use quasi-labelling !! to get informative error messages in loops
expect_equal(color_conv(!!from_col[[from]], 1, "guess", !!to), !!to_col[[to]])
}
}
# text rgba and alphas > 1
expect_equal(color_conv("rgba(1,2,3,4)", 1, "guess", "hexa"), "#01020304")
expect_equal(color_conv("rgb(1,2,3)", 5, "guess", "hexa"), "#01020305")
expect_equal(color_conv("rgb(1,2,3)", 1.01, "guess", "hexa"), "#01020301")
# specify from
for (from in from_fmt) {
for (to in to_fmt) {
expect_equal(color_conv(!!from_col[[from]], 1, !!from, !!to), !!to_col[[to]])
}
}
})
# col2lab ----
test_that("col2lab", {
# http://www.brucelindbloom.com/index.html?ColorCheckerCalcHelp.html
# values checked with ref white = D65,
# RGB Model = sRGB, dom lambda = 611.4 nm (default)
# color name
red <- col2lab("red") |> unlist()
lindbloom_red = c(L = 53.24, a = 80.09, b = 67.20)
expect_equal(lindbloom_red, red, tolerance = 0.05)
# hex color
green <- col2lab("#00FF00") |> unlist()
lindbloom_green = c(L = 87.73, a = -86.18, b = 83.18)
expect_equal(lindbloom_green, green, tolerance = 0.05)
})
# lab2rgb ----
test_that("lab2rgb", {
# white
col <- '#FFFFFF'
lab <- col2lab(col)
rgb <- lab2rgb(lab)
comp <- color_conv(col, to = 'rgb')
names(comp) <- c('red', 'green', 'blue')
expect_equal(rgb, comp)
# red
col <- '#FF0000'
lab <- col2lab(col)
rgb <- lab2rgb(lab)
comp <- color_conv(col, to = 'rgb')
names(comp) <- c('red', 'green', 'blue')
expect_equal(rgb, comp)
# dodgerblue
col <- 'dodgerblue'
lab <- col2lab(col)
rgb <- lab2rgb(lab)
comp <- color_conv(col, to = 'rgb')
names(comp) <- c('red', 'green', 'blue')
expect_equal(rgb, comp)
})
# none ----
test_that("none", {
expect_equal(color_conv("none"), "none")
})
|
## DATRAS in R
# getAphia or getSpecies: export csv of unique species codes in alphabetical order
# then manually send to lookup website, get response csv
# putAphia or putSpecies: upload response csv then vlookup input codes and results against original dbase
# table layout: convert raw datras file to 3 sheets format.
# Assuming raw format is in the weird 3 sheets in the first place
# and that's not just excel
# use Site/StationNo/Year as index & put as 1st column
Sum of HL no @ length for all length classes selected.
Actually needs to be sum of (#@L * L/W conversion = total weight per length class)
Can use CatCatchWeight? in grams. "catch weight in grams per category, or weight per haul per hour for CPUE data" (which is it?)
CatCatchWeight is the sum of weights comprised of the TotalNo of fish of the same sex.
Irrespective of Length:
So for each sex, total weight per site =
(average CatCatchWeight)/(average TotalNo) == M
+
(average CatCatchWeight)/(average TotalNo) == F
+
(average CatCatchWeight)/(average TotalNo) == (other sex?)
For LngtClass between X & Y: same calculations as above. Problem: blonde ray reported max weight for 1 fish: 9800 (g?). Min: 5. Those two together in a trawl = 9805 (g?). Average: 4902.5
Better to do as numbers?
# also, instead of average weight per CatCatchVal, use proportion of length of Cat, e.g.
# individual weights = individual's CatCatchWgt * (individuals LngClass / totalLngClass in that CatCatchWgt) e.g.
Sex # CatCatchWgt LngClass
M 2 10 1000
M 2 10 100
# 1st one: 10 * (1000/1100) = 9.09
# 1st one: 10 * (100/1100) = 0.909
# See Doug Beare's Datras R package to see what's in there already | /Gbm.auto_extras/datras.R | permissive | SimonDedman/gbm.auto | R | false | false | 1,739 | r | ## DATRAS in R
# getAphia or getSpecies: export csv of unique species codes in alphabetical order
# then manually send to lookup website, get response csv
# putAphia or putSpecies: upload response csv then vlookup input codes and results against original dbase
# table layout: convert raw datras file to 3 sheets format.
# Assuming raw format is in the weird 3 sheets in the first place
# and that's not just excel
# use Site/StationNo/Year as index & put as 1st column
Sum of HL no @ length for all length classes selected.
Actually needs to be sum of (#@L * L/W conversion = total weight per length class)
Can use CatCatchWeight? in grams. "catch weight in grams per category, or weight per haul per hour for CPUE data" (which is it?)
CatCatchWeight is the sum of weights comprised of the TotalNo of fish of the same sex.
Irrespective of Length:
So for each sex, total weight per site =
(average CatCatchWeight)/(average TotalNo) == M
+
(average CatCatchWeight)/(average TotalNo) == F
+
(average CatCatchWeight)/(average TotalNo) == (other sex?)
For LngtClass between X & Y: same calculations as above. Problem: blonde ray reported max weight for 1 fish: 9800 (g?). Min: 5. Those two together in a trawl = 9805 (g?). Average: 4902.5
Better to do as numbers?
# also, instead of average weight per CatCatchVal, use proportion of length of Cat, e.g.
# individual weights = individual's CatCatchWgt * (individuals LngClass / totalLngClass in that CatCatchWgt) e.g.
Sex # CatCatchWgt LngClass
M 2 10 1000
M 2 10 100
# 1st one: 10 * (1000/1100) = 9.09
# 1st one: 10 * (100/1100) = 0.909
# See Doug Beare's Datras R package to see what's in there already |
rm(list = ls(all = TRUE))
# load data
load('fakedata.RData')
fd = xtraj
# load necessary functions
source('dtq_with_grad.R')
objgradfun <- function(c0)
{
probmat = cdt(c0, h = myh, k = myk, bigm = mybigm, littlet = 1, data = fd)
mylik = probmat$lik
mylik[mylik < 0] = 0
objective = -sum(log(mylik))
nc0 = length(c0)
gradient = numeric(length=nc0)
for (i in c(1:nc0))
gradient[i] = -sum(probmat$grad[[i]] / probmat$lik)
return(list("objective"=objective,"gradient"=gradient))
}
# create grid, compute densities on that grid
myh = 0.01
myk = myh^1
mybigm = ceiling(pi/(myk^1.5))
# initial condition fakedata = c(1,4,0.5)
theta = c(1, 2, 0.5)
library('nloptr')
res <- nloptr(x0 = theta, eval_f = objgradfun, lb = c(0.1, 0, 0.1), ub = c(4, 4, 4), opts = list("algorithm"="NLOPT_LD_LBFGS", "print_level"=3, "check_derivatives" = FALSE, "xtol_abs"=1e-3))
nsamp = nrow(fd) - 1
fname = paste('solution_',myh,'_k1'.RData',sep='')
save(res,file=fname)
| /oldRcodes/usingdeltas/results.gopher/optimtest1.R | no_license | hbhat4000/sdeinference | R | false | false | 994 | r | rm(list = ls(all = TRUE))
# load data
load('fakedata.RData')
fd = xtraj
# load necessary functions
source('dtq_with_grad.R')
objgradfun <- function(c0)
{
probmat = cdt(c0, h = myh, k = myk, bigm = mybigm, littlet = 1, data = fd)
mylik = probmat$lik
mylik[mylik < 0] = 0
objective = -sum(log(mylik))
nc0 = length(c0)
gradient = numeric(length=nc0)
for (i in c(1:nc0))
gradient[i] = -sum(probmat$grad[[i]] / probmat$lik)
return(list("objective"=objective,"gradient"=gradient))
}
# create grid, compute densities on that grid
myh = 0.01
myk = myh^1
mybigm = ceiling(pi/(myk^1.5))
# initial condition fakedata = c(1,4,0.5)
theta = c(1, 2, 0.5)
library('nloptr')
res <- nloptr(x0 = theta, eval_f = objgradfun, lb = c(0.1, 0, 0.1), ub = c(4, 4, 4), opts = list("algorithm"="NLOPT_LD_LBFGS", "print_level"=3, "check_derivatives" = FALSE, "xtol_abs"=1e-3))
nsamp = nrow(fd) - 1
fname = paste('solution_',myh,'_k1'.RData',sep='')
save(res,file=fname)
|
library(ape)
testtree <- read.tree("11987_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="11987_0_unrooted.txt") | /codeml_files/newick_trees_processed_and_cleaned/11987_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 137 | r | library(ape)
testtree <- read.tree("11987_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="11987_0_unrooted.txt") |
#----------------------------------------------------------------------------------------------------------------------------------#
# Script by : Lucien Fitzpatrick
# Project: Phenology forecasting app
# Purpose: This script serves to run throught our daily workflow
# Inputs:
# Outputs: Downloads the new weather, updates the forecasts, and launches the app online
# Notes: This just sources the other scripts in this directory
#-----------------------------------------------------------------------------------------------------------------------------------#
# library(shiny)
# library(ggplot2)
# library(plotly)
# library(stringr)
# library(shinyWidgets)
# library(dplyr)
# library(gridExtra)
print(paste0("************** Executing Phenology Forecast workflow for ", Sys.Date(), " **************"))
#Downloading the new weather data
source("LIVE-1_M1_Meterology_download.R")
#Uploading the new data to the shiny app's folder
tictoc::tic()
source("LIVE-2_data_org.R")
tictoc::toc()
#Launching the app to the internet
source("LIVE-3_run_shiny.R")
| /scripts/Morton_Bloom_Forecast/LIVE-00_Full_workflow.R | no_license | MortonArb-ForestEcology/Phenology_Forecasting | R | false | false | 1,058 | r | #----------------------------------------------------------------------------------------------------------------------------------#
# Script by : Lucien Fitzpatrick
# Project: Phenology forecasting app
# Purpose: This script serves to run throught our daily workflow
# Inputs:
# Outputs: Downloads the new weather, updates the forecasts, and launches the app online
# Notes: This just sources the other scripts in this directory
#-----------------------------------------------------------------------------------------------------------------------------------#
# library(shiny)
# library(ggplot2)
# library(plotly)
# library(stringr)
# library(shinyWidgets)
# library(dplyr)
# library(gridExtra)
print(paste0("************** Executing Phenology Forecast workflow for ", Sys.Date(), " **************"))
#Downloading the new weather data
source("LIVE-1_M1_Meterology_download.R")
#Uploading the new data to the shiny app's folder
tictoc::tic()
source("LIVE-2_data_org.R")
tictoc::toc()
#Launching the app to the internet
source("LIVE-3_run_shiny.R")
|
##### 06_KAO_Exploring_pvalue_histograms.R ######
## Using the function also found in X6_KAO_creating_pvalues_table.R
## Looking at the pvalue distributions when removing different confounders.
## Essentially this is exploring the effect of confounders in the statistical
## test.
library(DBI)
library(RSQLite)
##### Function to generate a likelyhood ratio based p-value #####
compare_lr <- function(biomolecule_id, formula_null, formula_test, data, return = 'pvalue'){
# comparing likelyhood ratios for models +/- fixed parameter of interest
lm_formula_null <- lm(formula_null, data = data[data$biomolecule_id == biomolecule_id, ])
lm_formula_test <- lm(formula_test, data = data[data$biomolecule_id == biomolecule_id, ])
lrt <- tryCatch(anova(lm_formula_null, lm_formula_test), error = function(e) NULL)
if (is.null(lrt)){
lrt_lratio <- NA
lrt_pvalue <- NA
} else{
lrt_lratio <- lrt$F[2]
lrt_pvalue <- lrt$`Pr(>F)`[2]
}
if(return == 'pvalue') lrt_pvalue else lrt_lratio
}
###### Applying this function across all features ########
#### Establish a connection to the DB #####
con <- dbConnect(RSQLite::SQLite(), dbname = "P:/All_20200428_COVID_plasma_multiomics/SQLite Database/Covid-19 Study DB.sqlite")
#### Pull data from DB ####
dbListTables(con)
#####
df_metabolites<- dbGetQuery(con, "SELECT deidentified_patient_metadata.sample_id, normalized_abundance, metabolomics_measurements.biomolecule_id, COVID, Age_less_than_90, Gender, ICU_1
FROM metabolomics_measurements
INNER JOIN metabolomics_runs ON metabolomics_runs.replicate_id = metabolomics_measurements.replicate_id
INNER JOIN rawfiles ON rawfiles.rawfile_id = metabolomics_runs.rawfile_id
INNER JOIN deidentified_patient_metadata ON deidentified_patient_metadata.sample_id = rawfiles.sample_id
INNER JOIN biomolecules on biomolecules.biomolecule_id = metabolomics_measurements.biomolecule_id
WHERE rawfiles.keep = 1
AND biomolecules.keep = '1'
")
df_lipids<- dbGetQuery(con, "SELECT deidentified_patient_metadata.sample_id, normalized_abundance, lipidomics_measurements.biomolecule_id, COVID, Age_less_than_90, Gender, ICU_1
FROM lipidomics_measurements
INNER JOIN lipidomics_runs ON lipidomics_runs.replicate_id = lipidomics_measurements.replicate_id
INNER JOIN rawfiles ON rawfiles.rawfile_id = lipidomics_runs.rawfile_id
INNER JOIN deidentified_patient_metadata ON deidentified_patient_metadata.sample_id = rawfiles.sample_id
INNER JOIN biomolecules on biomolecules.biomolecule_id = lipidomics_measurements.biomolecule_id
WHERE rawfiles.keep = 1
AND biomolecules.keep = '1'
")
df_proteins<- dbGetQuery(con, "SELECT deidentified_patient_metadata.sample_id, normalized_abundance, proteomics_measurements.biomolecule_id, COVID, Age_less_than_90, Gender, ICU_1
FROM proteomics_measurements
INNER JOIN proteomics_runs ON proteomics_runs.replicate_id = proteomics_measurements.replicate_id
INNER JOIN rawfiles ON rawfiles.rawfile_id = proteomics_runs.rawfile_id
INNER JOIN deidentified_patient_metadata ON deidentified_patient_metadata.sample_id = rawfiles.sample_id
INNER JOIN biomolecules on biomolecules.biomolecule_id = proteomics_measurements.biomolecule_id
WHERE rawfiles.keep = 1
AND biomolecules.keep = '1'
")
dbDisconnect(con)
#### Creating dataframe to hold pvalues #######
df <- rbind(df_metabolites,df_lipids, df_proteins)
###### Applying this function across all features ########
df_pvalues_gender <- data.frame(biomolecule_id = unique(df$biomolecule_id), test = "LR_test", comparison = "GENDER", confounders = "COVID;ICU_1;Age_less_than_90")
df_pvalues_gender$p_value <- apply(df_pvalues_gender, 1, function(x)
compare_lr(as.numeric(x[1]), formula_null = normalized_abundance ~ COVID + ICU_1 + Age_less_than_90,
formula_test = normalized_abundance ~ COVID + ICU_1 + Gender + Age_less_than_90,
data = df, return = 'pvalue'))
hist(df_pvalues_gender$p_value, breaks = 100, main = 'Histogram of pvalues +/- gender')
####### Pvalues w/ age
df_pvalues_age <- data.frame(biomolecule_id = unique(df$biomolecule_id), test = "LR_test", comparison = "Age", confounders = "COVID;ICU_1;Age_less_than_90")
df_pvalues_age$p_value <- apply(df_pvalues_age, 1, function(x)
compare_lr(as.numeric(x[1]), formula_null = normalized_abundance ~ COVID + ICU_1 + Gender,
formula_test = normalized_abundance ~ COVID + ICU_1 + Gender + Age_less_than_90,
data = df, return = 'pvalue'))
hist(df_pvalues_age$p_value, breaks = 100, main = 'Histogram of pvalues +/- age')
####### Pvalues w/ ICU status
df_pvalues_icu <- data.frame(biomolecule_id = unique(df$biomolecule_id), test = "LR_test", comparison = "ICU", confounders = "COVID;Gender;Age_less_than_90")
df_pvalues_icu$p_value <- apply(df_pvalues_icu, 1, function(x)
compare_lr(as.numeric(x[1]), formula_null = normalized_abundance ~ COVID + Gender + Age_less_than_90,
formula_test = normalized_abundance ~ COVID + ICU_1 + Gender + Age_less_than_90,
data = df, return = 'pvalue'))
hist(df_pvalues_icu$p_value, breaks = 100, main = 'Histogram of pvalues +/- ICU')
####### Pvalues w/ COVID status
df_pvalues <- data.frame(biomolecule_id = unique(df$biomolecule_id), test = "LR_test", comparison = "COVID_vs_NONCOVID", confounders = "ICU_1;Gender;Age_less_than_90")
df_pvalues$p_value <- apply(df_pvalues, 1, function(x)
compare_lr(as.numeric(x[1]), formula_null = normalized_abundance ~ ICU_1 + Gender + Age_less_than_90,
formula_test = normalized_abundance ~ COVID + ICU_1 + Gender + Age_less_than_90,
data = df, return = 'pvalue'))
hist(df_pvalues$p_value, breaks = 100, main = 'Histogram of pvalues +/- COVID')
table(df_pvalues$p_value < 0.05, df_pvalues_icu$p_value < 0.05)
## FALSE TRUE
## FALSE 4465 1543
## TRUE 1436 485
####### Pvalues w/ COVID ICU interaction
df_pvalues_interaction <- data.frame(biomolecule_id = unique(df$biomolecule_id), test = "LR_test", comparison = "COVID ICU interaction", confounders = "ICU_1;Gender;Age_less_than_90")
df_pvalues_interaction$p_value <- apply(df_pvalues_interaction, 1, function(x)
compare_lr(as.numeric(x[1]), formula_null = normalized_abundance ~ COVID + ICU_1 + Gender + Age_less_than_90,
formula_test = normalized_abundance ~ COVID * ICU_1 + Gender + Age_less_than_90,
data = df, return = 'pvalue'))
hist(df_pvalues_interaction$p_value, breaks = 100, main = 'Histogram of pvalues +/- COVID ICU interaction')
| /eda/KAO/06_KAO_Exploring_pvalue_histograms.R | permissive | jsgro/COVID-19_Multi-Omics | R | false | false | 6,774 | r | ##### 06_KAO_Exploring_pvalue_histograms.R ######
## Using the function also found in X6_KAO_creating_pvalues_table.R
## Looking at the pvalue distributions when removing different confounders.
## Essentially this is exploring the effect of confounders in the statistical
## test.
library(DBI)
library(RSQLite)
##### Function to generate a likelyhood ratio based p-value #####
compare_lr <- function(biomolecule_id, formula_null, formula_test, data, return = 'pvalue'){
# comparing likelyhood ratios for models +/- fixed parameter of interest
lm_formula_null <- lm(formula_null, data = data[data$biomolecule_id == biomolecule_id, ])
lm_formula_test <- lm(formula_test, data = data[data$biomolecule_id == biomolecule_id, ])
lrt <- tryCatch(anova(lm_formula_null, lm_formula_test), error = function(e) NULL)
if (is.null(lrt)){
lrt_lratio <- NA
lrt_pvalue <- NA
} else{
lrt_lratio <- lrt$F[2]
lrt_pvalue <- lrt$`Pr(>F)`[2]
}
if(return == 'pvalue') lrt_pvalue else lrt_lratio
}
###### Applying this function across all features ########
#### Establish a connection to the DB #####
con <- dbConnect(RSQLite::SQLite(), dbname = "P:/All_20200428_COVID_plasma_multiomics/SQLite Database/Covid-19 Study DB.sqlite")
#### Pull data from DB ####
dbListTables(con)
#####
df_metabolites<- dbGetQuery(con, "SELECT deidentified_patient_metadata.sample_id, normalized_abundance, metabolomics_measurements.biomolecule_id, COVID, Age_less_than_90, Gender, ICU_1
FROM metabolomics_measurements
INNER JOIN metabolomics_runs ON metabolomics_runs.replicate_id = metabolomics_measurements.replicate_id
INNER JOIN rawfiles ON rawfiles.rawfile_id = metabolomics_runs.rawfile_id
INNER JOIN deidentified_patient_metadata ON deidentified_patient_metadata.sample_id = rawfiles.sample_id
INNER JOIN biomolecules on biomolecules.biomolecule_id = metabolomics_measurements.biomolecule_id
WHERE rawfiles.keep = 1
AND biomolecules.keep = '1'
")
df_lipids<- dbGetQuery(con, "SELECT deidentified_patient_metadata.sample_id, normalized_abundance, lipidomics_measurements.biomolecule_id, COVID, Age_less_than_90, Gender, ICU_1
FROM lipidomics_measurements
INNER JOIN lipidomics_runs ON lipidomics_runs.replicate_id = lipidomics_measurements.replicate_id
INNER JOIN rawfiles ON rawfiles.rawfile_id = lipidomics_runs.rawfile_id
INNER JOIN deidentified_patient_metadata ON deidentified_patient_metadata.sample_id = rawfiles.sample_id
INNER JOIN biomolecules on biomolecules.biomolecule_id = lipidomics_measurements.biomolecule_id
WHERE rawfiles.keep = 1
AND biomolecules.keep = '1'
")
df_proteins<- dbGetQuery(con, "SELECT deidentified_patient_metadata.sample_id, normalized_abundance, proteomics_measurements.biomolecule_id, COVID, Age_less_than_90, Gender, ICU_1
FROM proteomics_measurements
INNER JOIN proteomics_runs ON proteomics_runs.replicate_id = proteomics_measurements.replicate_id
INNER JOIN rawfiles ON rawfiles.rawfile_id = proteomics_runs.rawfile_id
INNER JOIN deidentified_patient_metadata ON deidentified_patient_metadata.sample_id = rawfiles.sample_id
INNER JOIN biomolecules on biomolecules.biomolecule_id = proteomics_measurements.biomolecule_id
WHERE rawfiles.keep = 1
AND biomolecules.keep = '1'
")
dbDisconnect(con)
#### Creating dataframe to hold pvalues #######
df <- rbind(df_metabolites,df_lipids, df_proteins)
###### Applying this function across all features ########
df_pvalues_gender <- data.frame(biomolecule_id = unique(df$biomolecule_id), test = "LR_test", comparison = "GENDER", confounders = "COVID;ICU_1;Age_less_than_90")
df_pvalues_gender$p_value <- apply(df_pvalues_gender, 1, function(x)
compare_lr(as.numeric(x[1]), formula_null = normalized_abundance ~ COVID + ICU_1 + Age_less_than_90,
formula_test = normalized_abundance ~ COVID + ICU_1 + Gender + Age_less_than_90,
data = df, return = 'pvalue'))
hist(df_pvalues_gender$p_value, breaks = 100, main = 'Histogram of pvalues +/- gender')
####### Pvalues w/ age
df_pvalues_age <- data.frame(biomolecule_id = unique(df$biomolecule_id), test = "LR_test", comparison = "Age", confounders = "COVID;ICU_1;Age_less_than_90")
df_pvalues_age$p_value <- apply(df_pvalues_age, 1, function(x)
compare_lr(as.numeric(x[1]), formula_null = normalized_abundance ~ COVID + ICU_1 + Gender,
formula_test = normalized_abundance ~ COVID + ICU_1 + Gender + Age_less_than_90,
data = df, return = 'pvalue'))
hist(df_pvalues_age$p_value, breaks = 100, main = 'Histogram of pvalues +/- age')
####### Pvalues w/ ICU status
df_pvalues_icu <- data.frame(biomolecule_id = unique(df$biomolecule_id), test = "LR_test", comparison = "ICU", confounders = "COVID;Gender;Age_less_than_90")
df_pvalues_icu$p_value <- apply(df_pvalues_icu, 1, function(x)
compare_lr(as.numeric(x[1]), formula_null = normalized_abundance ~ COVID + Gender + Age_less_than_90,
formula_test = normalized_abundance ~ COVID + ICU_1 + Gender + Age_less_than_90,
data = df, return = 'pvalue'))
hist(df_pvalues_icu$p_value, breaks = 100, main = 'Histogram of pvalues +/- ICU')
####### Pvalues w/ COVID status
df_pvalues <- data.frame(biomolecule_id = unique(df$biomolecule_id), test = "LR_test", comparison = "COVID_vs_NONCOVID", confounders = "ICU_1;Gender;Age_less_than_90")
df_pvalues$p_value <- apply(df_pvalues, 1, function(x)
compare_lr(as.numeric(x[1]), formula_null = normalized_abundance ~ ICU_1 + Gender + Age_less_than_90,
formula_test = normalized_abundance ~ COVID + ICU_1 + Gender + Age_less_than_90,
data = df, return = 'pvalue'))
hist(df_pvalues$p_value, breaks = 100, main = 'Histogram of pvalues +/- COVID')
table(df_pvalues$p_value < 0.05, df_pvalues_icu$p_value < 0.05)
## FALSE TRUE
## FALSE 4465 1543
## TRUE 1436 485
####### Pvalues w/ COVID ICU interaction
df_pvalues_interaction <- data.frame(biomolecule_id = unique(df$biomolecule_id), test = "LR_test", comparison = "COVID ICU interaction", confounders = "ICU_1;Gender;Age_less_than_90")
df_pvalues_interaction$p_value <- apply(df_pvalues_interaction, 1, function(x)
compare_lr(as.numeric(x[1]), formula_null = normalized_abundance ~ COVID + ICU_1 + Gender + Age_less_than_90,
formula_test = normalized_abundance ~ COVID * ICU_1 + Gender + Age_less_than_90,
data = df, return = 'pvalue'))
hist(df_pvalues_interaction$p_value, breaks = 100, main = 'Histogram of pvalues +/- COVID ICU interaction')
|
df_polaczone <- readRDS("output//output_polaczone//survival.rds")
df_seq <- readRDS("output//output_seq//ramka_seq.rds")
library(dplyr)
# --------- liczenie bledu n_risk --------------- #
# patrze, na ile sie rozni zlaczony wynik od sekwencyjnego.
# Tworze nowa ramke na wyniki -
# beda w niej kolumny odpowiadajace kolumnom w zbiorach wejsciowych
# i wiersze beda reprezentowaly blad kazdego wiersza
# miedzy poskladanym wynikiem df_polaczone, a sekwencyjnym df_seq.
# Nazwa "bledy kazdy wiersz" zeby odroznic od ramki, ktora bedzie miala bledy,
# ale procentowo juz posumowane dla calej kolumny np. "kolumna n_risk rozni sie 1% od sekwencyjnego
# tutaj mamy roznice w kazdym wierszu kolumny a nie calosciowo dla kolumny
bledy_kazdy_wiersz <- select(df_polaczone,time)
bledy_kazdy_wiersz$n_risk_errors <- c(abs(df_polaczone$n_risk - df_seq$n_risk))
# sumujemy wartosci w obu kolumnach i patrzymy jaki jest stounek miedzy nimi
proporcja_bledu_do_wyniku_seq <- sum(bledy_kazdy_wiersz$n_risk_errors) / sum(df_seq$n_risk)
procent_blad_n_risk <- paste(round((proporcja_bledu_do_wyniku_seq * 100),2),"%") #bierzemy procent
# tworze ramke na wyniki bledu
# bedzie miala nazwe kolumny oraz wartosc o ile % rozni sie zlozona kolumna od sekwencyjnej
bledy <- data.frame(c("n_risk"),c(procent_blad_n_risk))
colnames(bledy) <- c("nazwa_polaczonej_kolumny","procent_bledu_wzgledem_seq")
# --------- Koniec liczenie bledu n_risk --------------- #
# --------- liczenie bledu n_event --------------- #
# badam czy kolumny sa identyczne
czy_event_identycznie <- identical(df_polaczone[['n_event']],df_seq[['n_event']]) # zwraca TRUE
if(as.logical(czy_event_identycznie) == T) {
# wpis o tym ze polaczona kolumna jest identyczna
bledy[nrow(bledy) + 1,] = c("n_event","identyczna")
} else {
bledy[nrow(bledy) + 1,] = c("n_event","wystapil blad")
}
# --------- koniec liczenie bledu n_event --------------- #
# --------- blad survival na rm --------------- #
# nie liczymy ta metoda wiec tego nie liczymy poki co
# sprawdzamy roznice
# bledy_kazdy_wiersz$survival_na_rm_err <- abs(df_polaczone$survival_na_rm - df_seq$survival)
# sumujemy wartosci w obu kolumnach i patrzymy jaki jest stounek miedzy nimi
# proporcja_bledu_do_wyniku_seq <- sum(bledy_kazdy_wiersz$survival_na_rm_err) / sum(df_seq$survival)
# procent_blad_survival <- paste(round((proporcja_bledu_do_wyniku_seq * 100),2),"%")
# bledy[nrow(bledy) + 1,] = c("n_survival_srednia_bez_na",procent_blad_survival)
# --------- koniec blad survival na rm --------------- #
# --------- blad survival na to next wiersz, lower, upper --------------- #
bledy_kazdy_wiersz$survival_err <- abs(df_polaczone$survival - df_seq$survival)
bledy_kazdy_wiersz$lower_err <- abs(df_polaczone$lower - df_seq$lower)
bledy_kazdy_wiersz$upper_err <- abs(df_polaczone$upper - df_seq$upper)
# sumujemy wartosci w obu kolumnach i patrzymy jaki jest stounek miedzy nimi
proporcja_bledu_do_wyniku_seq_dwa <- sum(bledy_kazdy_wiersz$survival_err) / sum(df_seq$survival)
procent_blad_survival_dwa <- paste(round((proporcja_bledu_do_wyniku_seq_dwa * 100),2),"%")
bledy[nrow(bledy) + 1,] <- c("survival",procent_blad_survival_dwa)
# sumujemy wartosci w obu kolumnach i patrzymy jaki jest stounek miedzy nimi
proporcja_bledu_do_wyniku_seq_dwa <- sum(bledy_kazdy_wiersz$lower_err) / sum(df_seq$lower)
procent_blad_lower <- paste(round((proporcja_bledu_do_wyniku_seq_dwa * 100),2),"%")
bledy[nrow(bledy) + 1,] <- c("n_lower",procent_blad_lower)
# sumujemy wartosci w obu kolumnach i patrzymy jaki jest stounek miedzy nimi
proporcja_bledu_do_wyniku_seq_dwa <- sum(bledy_kazdy_wiersz$upper_err) / sum(df_seq$upper)
procent_blad_upper <- paste(round((proporcja_bledu_do_wyniku_seq_dwa * 100),2),"%")
bledy[nrow(bledy) + 1,] <- c("n_upper",procent_blad_upper)
saveRDS(bledy, "output//output_polaczone//bledy.rds")
# --------- koniec blad survival na to next wiersz, lower, upper --------------- #
| /liczenie-bledow.R | no_license | Sebastian99330/survival-analysis-parallel-programming | R | false | false | 3,933 | r | df_polaczone <- readRDS("output//output_polaczone//survival.rds")
df_seq <- readRDS("output//output_seq//ramka_seq.rds")
library(dplyr)
# --------- liczenie bledu n_risk --------------- #
# patrze, na ile sie rozni zlaczony wynik od sekwencyjnego.
# Tworze nowa ramke na wyniki -
# beda w niej kolumny odpowiadajace kolumnom w zbiorach wejsciowych
# i wiersze beda reprezentowaly blad kazdego wiersza
# miedzy poskladanym wynikiem df_polaczone, a sekwencyjnym df_seq.
# Nazwa "bledy kazdy wiersz" zeby odroznic od ramki, ktora bedzie miala bledy,
# ale procentowo juz posumowane dla calej kolumny np. "kolumna n_risk rozni sie 1% od sekwencyjnego
# tutaj mamy roznice w kazdym wierszu kolumny a nie calosciowo dla kolumny
bledy_kazdy_wiersz <- select(df_polaczone,time)
bledy_kazdy_wiersz$n_risk_errors <- c(abs(df_polaczone$n_risk - df_seq$n_risk))
# sumujemy wartosci w obu kolumnach i patrzymy jaki jest stounek miedzy nimi
proporcja_bledu_do_wyniku_seq <- sum(bledy_kazdy_wiersz$n_risk_errors) / sum(df_seq$n_risk)
procent_blad_n_risk <- paste(round((proporcja_bledu_do_wyniku_seq * 100),2),"%") #bierzemy procent
# tworze ramke na wyniki bledu
# bedzie miala nazwe kolumny oraz wartosc o ile % rozni sie zlozona kolumna od sekwencyjnej
bledy <- data.frame(c("n_risk"),c(procent_blad_n_risk))
colnames(bledy) <- c("nazwa_polaczonej_kolumny","procent_bledu_wzgledem_seq")
# --------- Koniec liczenie bledu n_risk --------------- #
# --------- liczenie bledu n_event --------------- #
# badam czy kolumny sa identyczne
czy_event_identycznie <- identical(df_polaczone[['n_event']],df_seq[['n_event']]) # zwraca TRUE
if(as.logical(czy_event_identycznie) == T) {
# wpis o tym ze polaczona kolumna jest identyczna
bledy[nrow(bledy) + 1,] = c("n_event","identyczna")
} else {
bledy[nrow(bledy) + 1,] = c("n_event","wystapil blad")
}
# --------- koniec liczenie bledu n_event --------------- #
# --------- blad survival na rm --------------- #
# nie liczymy ta metoda wiec tego nie liczymy poki co
# sprawdzamy roznice
# bledy_kazdy_wiersz$survival_na_rm_err <- abs(df_polaczone$survival_na_rm - df_seq$survival)
# sumujemy wartosci w obu kolumnach i patrzymy jaki jest stounek miedzy nimi
# proporcja_bledu_do_wyniku_seq <- sum(bledy_kazdy_wiersz$survival_na_rm_err) / sum(df_seq$survival)
# procent_blad_survival <- paste(round((proporcja_bledu_do_wyniku_seq * 100),2),"%")
# bledy[nrow(bledy) + 1,] = c("n_survival_srednia_bez_na",procent_blad_survival)
# --------- koniec blad survival na rm --------------- #
# --------- blad survival na to next wiersz, lower, upper --------------- #
bledy_kazdy_wiersz$survival_err <- abs(df_polaczone$survival - df_seq$survival)
bledy_kazdy_wiersz$lower_err <- abs(df_polaczone$lower - df_seq$lower)
bledy_kazdy_wiersz$upper_err <- abs(df_polaczone$upper - df_seq$upper)
# sumujemy wartosci w obu kolumnach i patrzymy jaki jest stounek miedzy nimi
proporcja_bledu_do_wyniku_seq_dwa <- sum(bledy_kazdy_wiersz$survival_err) / sum(df_seq$survival)
procent_blad_survival_dwa <- paste(round((proporcja_bledu_do_wyniku_seq_dwa * 100),2),"%")
bledy[nrow(bledy) + 1,] <- c("survival",procent_blad_survival_dwa)
# sumujemy wartosci w obu kolumnach i patrzymy jaki jest stounek miedzy nimi
proporcja_bledu_do_wyniku_seq_dwa <- sum(bledy_kazdy_wiersz$lower_err) / sum(df_seq$lower)
procent_blad_lower <- paste(round((proporcja_bledu_do_wyniku_seq_dwa * 100),2),"%")
bledy[nrow(bledy) + 1,] <- c("n_lower",procent_blad_lower)
# sumujemy wartosci w obu kolumnach i patrzymy jaki jest stounek miedzy nimi
proporcja_bledu_do_wyniku_seq_dwa <- sum(bledy_kazdy_wiersz$upper_err) / sum(df_seq$upper)
procent_blad_upper <- paste(round((proporcja_bledu_do_wyniku_seq_dwa * 100),2),"%")
bledy[nrow(bledy) + 1,] <- c("n_upper",procent_blad_upper)
saveRDS(bledy, "output//output_polaczone//bledy.rds")
# --------- koniec blad survival na to next wiersz, lower, upper --------------- #
|
cforest.support.set<-function(whichrandombox,
spatialdataset,
predictor_stack,
sizename,
...){
polys <- readRDS(file = paste(SPECIES,
sizename,
"intermediatefile",
"polys",
sep = "_"))
polys.p <- unlist(polys[[1]])
polys.df <- unlist(polys[[2]])
spatial.support.set<-spatialdataset[polys.df[whichrandombox,],]
sample.size.good<-ifelse(length(spatial.support.set$presence)>25 &
length(unique(spatial.support.set$presence))>1,
1, #if both conditions met for sample size and both 0/1s are present
0) #if not, do not use (0 weight in ensemble step)
#need to have the minimum data requirement in here too.
support.set.data <- as.data.frame(spatial.support.set)
support.set.data$Longitude <- NULL
support.set.data$Latitude <- NULL
#These two columns should be taken out because not predicting on them.
support.set <- crop(predictor_stack,
extent(polys.df[whichrandombox,]))
library(party)
#Then variable importance in cforest (Strobl et al. papers on bias).
my_cforest_control <- cforest_control(teststat = "quad",
testtype = "Univ",
mincriterion = 0, #max depth
ntree = ntree,
mtry = floor(sqrt(ncol(spatialdataset)))-1,
replace = FALSE)
cforest_importance_tree <- cforest(presence ~ .,
data = support.set.data,
controls = my_cforest_control)
imp.cforest <- as.data.frame(varimp(cforest_importance_tree))
ordered.varnames.cforest <- rownames(imp.cforest)[order(imp.cforest, decreasing=TRUE)]
results.cforest <- list(cforest_importance_tree,
imp.cforest,
ordered.varnames.cforest)
return(results.cforest)
}
| /older_files/z_source_ensemble_function_cforest_variable_importance_estimation.R | no_license | baeolophus/ou-grassland-bird-survey | R | false | false | 2,251 | r | cforest.support.set<-function(whichrandombox,
spatialdataset,
predictor_stack,
sizename,
...){
polys <- readRDS(file = paste(SPECIES,
sizename,
"intermediatefile",
"polys",
sep = "_"))
polys.p <- unlist(polys[[1]])
polys.df <- unlist(polys[[2]])
spatial.support.set<-spatialdataset[polys.df[whichrandombox,],]
sample.size.good<-ifelse(length(spatial.support.set$presence)>25 &
length(unique(spatial.support.set$presence))>1,
1, #if both conditions met for sample size and both 0/1s are present
0) #if not, do not use (0 weight in ensemble step)
#need to have the minimum data requirement in here too.
support.set.data <- as.data.frame(spatial.support.set)
support.set.data$Longitude <- NULL
support.set.data$Latitude <- NULL
#These two columns should be taken out because not predicting on them.
support.set <- crop(predictor_stack,
extent(polys.df[whichrandombox,]))
library(party)
#Then variable importance in cforest (Strobl et al. papers on bias).
my_cforest_control <- cforest_control(teststat = "quad",
testtype = "Univ",
mincriterion = 0, #max depth
ntree = ntree,
mtry = floor(sqrt(ncol(spatialdataset)))-1,
replace = FALSE)
cforest_importance_tree <- cforest(presence ~ .,
data = support.set.data,
controls = my_cforest_control)
imp.cforest <- as.data.frame(varimp(cforest_importance_tree))
ordered.varnames.cforest <- rownames(imp.cforest)[order(imp.cforest, decreasing=TRUE)]
results.cforest <- list(cforest_importance_tree,
imp.cforest,
ordered.varnames.cforest)
return(results.cforest)
}
|
# Potential Questions to Answer:
# 1. Create snacks that the customers can buy and randomize who buys which snack
# 2. Pretend you own multiple theaters and run two simulations to represent each theater and plot the results
# 3. Create conditional statements for movies that may be PG-13 and children are not allowed to watch
# Cost for adults and children
ticket_cost <- 100
ticket_cost_child <- 50
movies <- c('Moana', 'Hangover', 'Swordsman', 'Titanik', 'Friend') # List 5 of your favorite movies
screens <-3 # How many screens does the theater have? (assume 1 per movie)
seats <- 100 # How many seats does each theater hold
week_days <- rep(0, 7) # Store totals for each day
# tot_rev_screen = rep(0,3) #Store totals for each screen
# iterate through the week
for (d in 1:7) {
# Keep track of total revenue for the day
total_day_revenue = 0
# iterate through the amount of screens on a particular day
for (s in 1:screens) {
# Calculate how many adults and children are watching the movie
visitors_adults <- sample(seats, 1)
visitors_children <- sample((seats - visitors_adults),1)
# Calculate the revenue for adults and children
revenue_adult = visitors_adults * ticket_cost
revenue_children= visitors_children * ticket_cost_child
# Calculate revenue, and add to running total for the day
total_screen_revenue= revenue_adult + revenue_children
}
# Save total to the corresponding day
total_day_revenue[d] = total_day_revenue + total_screen_revenue
week_days[d]=total_day_revenue[d]
}
week_days
# Make a barchart showing total revenue per day
x= 1:7 # Assign x axis
ds = c("Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat")
barplot(week_days, names.arg = c("Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat")) # we tried to show the x axis
# Make any other chart
plot(week_days)
# Which day had the highest revenue?
max(week_days) | /CH_5_R project/R Project group 5.R | no_license | khalidme94/R | R | false | false | 2,143 | r | # Potential Questions to Answer:
# 1. Create snacks that the customers can buy and randomize who buys which snack
# 2. Pretend you own multiple theaters and run two simulations to represent each theater and plot the results
# 3. Create conditional statements for movies that may be PG-13 and children are not allowed to watch
# Cost for adults and children
ticket_cost <- 100
ticket_cost_child <- 50
movies <- c('Moana', 'Hangover', 'Swordsman', 'Titanik', 'Friend') # List 5 of your favorite movies
screens <-3 # How many screens does the theater have? (assume 1 per movie)
seats <- 100 # How many seats does each theater hold
week_days <- rep(0, 7) # Store totals for each day
# tot_rev_screen = rep(0,3) #Store totals for each screen
# iterate through the week
for (d in 1:7) {
# Keep track of total revenue for the day
total_day_revenue = 0
# iterate through the amount of screens on a particular day
for (s in 1:screens) {
# Calculate how many adults and children are watching the movie
visitors_adults <- sample(seats, 1)
visitors_children <- sample((seats - visitors_adults),1)
# Calculate the revenue for adults and children
revenue_adult = visitors_adults * ticket_cost
revenue_children= visitors_children * ticket_cost_child
# Calculate revenue, and add to running total for the day
total_screen_revenue= revenue_adult + revenue_children
}
# Save total to the corresponding day
total_day_revenue[d] = total_day_revenue + total_screen_revenue
week_days[d]=total_day_revenue[d]
}
week_days
# Make a barchart showing total revenue per day
x= 1:7 # Assign x axis
ds = c("Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat")
barplot(week_days, names.arg = c("Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat")) # we tried to show the x axis
# Make any other chart
plot(week_days)
# Which day had the highest revenue?
max(week_days) |
library(plyr)
library(tidyverse)
#tidyverse
#we start with couple of main verbs: firstone is select()
somedataset %>%
select(firstvariable, starts_with("whatever")) -> newdataset
#lets read in the data
piaac <- read.csv("http://www.ut.ee/~iseppo/piaacest.csv")
#a quick overview of the data:
names(piaac)
summary(piaac)
head(piaac)
View(piaac)
#and select a subset of variables from it, writing
#the result into a new data object piaacsmall
piaacsmall <- piaac %>%
select(seqid, age, starts_with("pv"))
#this is actually equivalent to this:
piaacsmall <- select(piaac, seqid, age, starts_with("pv"))
names(piaacsmall)
#filter():
onlymales <- piaac %>%
filter(gender=="Male")
summary(onlymales)
piaac <- piaac %>%
filter(!is.na(edlevel3), !is.na(studyarea), !is.na(health))
goodhealth <- piaac %>%
filter(health %in% c("Excellent", "Very good"))
#group_by() and summarize()
average_numeracy <- piaac %>%
group_by(gender,studyarea)%>%
summarize(avnumeracy=mean(pvnum1, na.rm=T))
averagewage <- piaac %>%
group_by(edlevel3, gender)%>%
summarize(avwage = mean(earnmth, na.rm = T))
averagewage
averagewage <- piaac %>%
group_by(edlevel3, gender)%>%
summarize(avwage = mean(earnmth, na.rm = T),
pc25 = quantile(earnmth, probs = 0.25, na.rm=T),
pc75 = quantile(earnmth, probs = 0.75, na.rm=T))%>%
arrange(avwage)
averagewage
#mutate()
piaac <- piaac %>%
group_by(gender)%>%
mutate(relativewage = earnmth / mean(earnmth, na.rm=T))
head(piaac$relativewage)
View(piaac)
piaac
library(dplyr)
piaac %>%
dplyr::group_by(gender, studyarea)%>%
dplyr::summarize(avnumeracy = mean(pvnum1, na.rm=T))
#rename()
piaac <- piaac %>%
rename(PersonId = seqid)
names(piaac)
# install.packages("Rmisc")
library(Rmisc)
averages <- piaac %>%
dplyr::rename(Literacy = pvlit1,
Numeracy = pvnum1,
"Problem solving skills" = pvpsl1,
Income = earnmth,
Education = edlevel3,
Health = health) %>%
filter(!is.na(Health), !is.na(Education), !is.na(Numeracy)) %>%
group_by(Education) %>%
summarise(lower = Rmisc::CI(Numeracy)[1],
mean = Rmisc::CI(Numeracy)[2],
upper = Rmisc::CI(Numeracy)[3])
piaac <- piaac %>%
rename(Literacy = pvlit1,
Numeracy = pvnum1,
"Problem solving skills"=pvpsl1,
Income = earnmth,
Education = edlevel3,
Health = health)
names(piaac)
piaac <- piaac %>%
dplyr::rename(Literacy = pvlit1,
Numeracy = pvnum1,
"Problem solving skills" = pvpsl1,
Income = earnmth,
Education = edlevel3,
Health = health) %>%
filter(!is.na(Health),
!is.na(Education),
!is.na(Numeracy))
library(Rmisc)
CI(piaac$Literacy)
averages <- piaac %>%
group_by(Education)%>%
summarize(lower=CI(Numeracy)[3],
mean=mean(Numeracy),
upper=CI(Numeracy)[1])
averages
names(piaac)
unique(piaac$empl_status)
piaac$employed <- piaac$empl_status == "Employed"
View(piaac)
piaac$employed <- as.numeric(piaac$employed)
ggplot(data=piaac, aes(x=Literacy, y=Income))+
geom_point()+
geom_smooth(aes(color=gender))+
facet_wrap(~Education)
ggplot(data=piaac, aes(x=Numeracy, y=Literacy))+
geom_point()+
geom_smooth(aes(color=Education))
ggplot(data=piaac, aes(x=Numeracy, y=employed))+
geom_point()+
geom_smooth(aes(color=Education))
ggplot(data=piaac, aes(x=age, y=employed))+
geom_point()+
geom_smooth(aes(color=gender))+
facet_grid(~Education)
levels(piaac$Education)
library(forcats)
piaac$Education <- fct_relevel(piaac$Education,
"Low", "Medium", "High")
piaac$Education <- fct_recode(piaac$Education,
"Basic"="Low",
"Highschool"="Medium")
ggplot(averages, aes(x=mean, y=Education))+
geom_point(shape=21, size=3, fill="white")+
theme_minimal()
averages$Education <- fct_reorder(averages$Education,
averages$mean)
averages <- averages %>%
mutate(fct_reorder(Education, mean))
attach(averages)
Education
detach(averages)
Education
| /scr/rcourseday5.R | permissive | nreigl/R.TTU_2018 | R | false | false | 4,199 | r |
library(plyr)
library(tidyverse)
#tidyverse
#we start with couple of main verbs: firstone is select()
somedataset %>%
select(firstvariable, starts_with("whatever")) -> newdataset
#lets read in the data
piaac <- read.csv("http://www.ut.ee/~iseppo/piaacest.csv")
#a quick overview of the data:
names(piaac)
summary(piaac)
head(piaac)
View(piaac)
#and select a subset of variables from it, writing
#the result into a new data object piaacsmall
piaacsmall <- piaac %>%
select(seqid, age, starts_with("pv"))
#this is actually equivalent to this:
piaacsmall <- select(piaac, seqid, age, starts_with("pv"))
names(piaacsmall)
#filter():
onlymales <- piaac %>%
filter(gender=="Male")
summary(onlymales)
piaac <- piaac %>%
filter(!is.na(edlevel3), !is.na(studyarea), !is.na(health))
goodhealth <- piaac %>%
filter(health %in% c("Excellent", "Very good"))
#group_by() and summarize()
average_numeracy <- piaac %>%
group_by(gender,studyarea)%>%
summarize(avnumeracy=mean(pvnum1, na.rm=T))
averagewage <- piaac %>%
group_by(edlevel3, gender)%>%
summarize(avwage = mean(earnmth, na.rm = T))
averagewage
averagewage <- piaac %>%
group_by(edlevel3, gender)%>%
summarize(avwage = mean(earnmth, na.rm = T),
pc25 = quantile(earnmth, probs = 0.25, na.rm=T),
pc75 = quantile(earnmth, probs = 0.75, na.rm=T))%>%
arrange(avwage)
averagewage
#mutate()
piaac <- piaac %>%
group_by(gender)%>%
mutate(relativewage = earnmth / mean(earnmth, na.rm=T))
head(piaac$relativewage)
View(piaac)
piaac
library(dplyr)
piaac %>%
dplyr::group_by(gender, studyarea)%>%
dplyr::summarize(avnumeracy = mean(pvnum1, na.rm=T))
#rename()
piaac <- piaac %>%
rename(PersonId = seqid)
names(piaac)
# install.packages("Rmisc")
library(Rmisc)
averages <- piaac %>%
dplyr::rename(Literacy = pvlit1,
Numeracy = pvnum1,
"Problem solving skills" = pvpsl1,
Income = earnmth,
Education = edlevel3,
Health = health) %>%
filter(!is.na(Health), !is.na(Education), !is.na(Numeracy)) %>%
group_by(Education) %>%
summarise(lower = Rmisc::CI(Numeracy)[1],
mean = Rmisc::CI(Numeracy)[2],
upper = Rmisc::CI(Numeracy)[3])
piaac <- piaac %>%
rename(Literacy = pvlit1,
Numeracy = pvnum1,
"Problem solving skills"=pvpsl1,
Income = earnmth,
Education = edlevel3,
Health = health)
names(piaac)
piaac <- piaac %>%
dplyr::rename(Literacy = pvlit1,
Numeracy = pvnum1,
"Problem solving skills" = pvpsl1,
Income = earnmth,
Education = edlevel3,
Health = health) %>%
filter(!is.na(Health),
!is.na(Education),
!is.na(Numeracy))
library(Rmisc)
CI(piaac$Literacy)
averages <- piaac %>%
group_by(Education)%>%
summarize(lower=CI(Numeracy)[3],
mean=mean(Numeracy),
upper=CI(Numeracy)[1])
averages
names(piaac)
unique(piaac$empl_status)
piaac$employed <- piaac$empl_status == "Employed"
View(piaac)
piaac$employed <- as.numeric(piaac$employed)
ggplot(data=piaac, aes(x=Literacy, y=Income))+
geom_point()+
geom_smooth(aes(color=gender))+
facet_wrap(~Education)
ggplot(data=piaac, aes(x=Numeracy, y=Literacy))+
geom_point()+
geom_smooth(aes(color=Education))
ggplot(data=piaac, aes(x=Numeracy, y=employed))+
geom_point()+
geom_smooth(aes(color=Education))
ggplot(data=piaac, aes(x=age, y=employed))+
geom_point()+
geom_smooth(aes(color=gender))+
facet_grid(~Education)
levels(piaac$Education)
library(forcats)
piaac$Education <- fct_relevel(piaac$Education,
"Low", "Medium", "High")
piaac$Education <- fct_recode(piaac$Education,
"Basic"="Low",
"Highschool"="Medium")
ggplot(averages, aes(x=mean, y=Education))+
geom_point(shape=21, size=3, fill="white")+
theme_minimal()
averages$Education <- fct_reorder(averages$Education,
averages$mean)
averages <- averages %>%
mutate(fct_reorder(Education, mean))
attach(averages)
Education
detach(averages)
Education
|
\name{eulerian-package}
\alias{eulerian-package}
\docType{package}
\title{
eulerian: A package to handle eulerian paths from graphs
}
\description{
An eulerian path is a path in a graph which visits every edge exactly once. This package provides methods to handle eulerian paths or cycles.
}
\keyword{ package }
\keyword{ graph }
\keyword{ euler }
\keyword{ eulerian }
\examples{
require(graph)
require(eulerian)
g <- new("graphNEL", nodes=LETTERS[1:4], edgemode="directed")
g <- addEdge(graph=g, from=LETTERS[1:4], to=LETTERS[c(2:4,1)])
if(hasEulerianCycle(g)){
ecycle <- eulerian(g)
writeLines(paste(ecycle, collapse=" -> "))
}
}
| /eulerian/man/eulerian-package.Rd | no_license | alorchhota/eulerian | R | false | false | 645 | rd | \name{eulerian-package}
\alias{eulerian-package}
\docType{package}
\title{
eulerian: A package to handle eulerian paths from graphs
}
\description{
An eulerian path is a path in a graph which visits every edge exactly once. This package provides methods to handle eulerian paths or cycles.
}
\keyword{ package }
\keyword{ graph }
\keyword{ euler }
\keyword{ eulerian }
\examples{
require(graph)
require(eulerian)
g <- new("graphNEL", nodes=LETTERS[1:4], edgemode="directed")
g <- addEdge(graph=g, from=LETTERS[1:4], to=LETTERS[c(2:4,1)])
if(hasEulerianCycle(g)){
ecycle <- eulerian(g)
writeLines(paste(ecycle, collapse=" -> "))
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/freedom_functions_1.R
\name{n.pooled}
\alias{n.pooled}
\title{Sample size for pooled testing for freedom}
\usage{
n.pooled(sep, k, pstar, pse, psp = 1)
}
\arguments{
\item{sep}{desired population sensitivity (scalar or vector)}
\item{k}{pool size (constant across pools) (scalar or vector of same length as sep)}
\item{pstar}{design prevalence (scalar or vector of same length as sep)}
\item{pse}{pool-level sensitivity (scalar or vector of same length as sep)}
\item{psp}{pool-level specificity (scalar or vector of same length as sep)}
}
\value{
vector of sample sizes
}
\description{
Calculates sample size to achieve desired
population-level sensitivity, assuming pooled sampling
and allowing for imperfect sensitivity and specificity of the pooled test
}
\examples{
# examples for n.pooled
n.pooled(0.95, 5, 0.01, 1, 1)
n.pooled(0.95, 10, 0.1, 0.9, 1)
n.pooled(0.95, c(2, 5, 10, 20), 0.1, c(0.99, 0.98, 0.97, 0.95), 1)
}
\keyword{methods}
| /man/n.pooled.Rd | no_license | mariabnd/RSurveillance | R | false | true | 1,031 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/freedom_functions_1.R
\name{n.pooled}
\alias{n.pooled}
\title{Sample size for pooled testing for freedom}
\usage{
n.pooled(sep, k, pstar, pse, psp = 1)
}
\arguments{
\item{sep}{desired population sensitivity (scalar or vector)}
\item{k}{pool size (constant across pools) (scalar or vector of same length as sep)}
\item{pstar}{design prevalence (scalar or vector of same length as sep)}
\item{pse}{pool-level sensitivity (scalar or vector of same length as sep)}
\item{psp}{pool-level specificity (scalar or vector of same length as sep)}
}
\value{
vector of sample sizes
}
\description{
Calculates sample size to achieve desired
population-level sensitivity, assuming pooled sampling
and allowing for imperfect sensitivity and specificity of the pooled test
}
\examples{
# examples for n.pooled
n.pooled(0.95, 5, 0.01, 1, 1)
n.pooled(0.95, 10, 0.1, 0.9, 1)
n.pooled(0.95, c(2, 5, 10, 20), 0.1, c(0.99, 0.98, 0.97, 0.95), 1)
}
\keyword{methods}
|
library(grid)
library(Gmisc)
grid.newpage()
### build the boxes
fishCom <- boxGrob("\n\n Fish community change \n through time\n\n", x=0.7, y=0.75, box_gp = gpar(fill = "#abdda4"))
socEc <- boxGrob("\nSocio-economic \nfactors\n", x=0.85, y=0.25,box_gp = gpar(fill = "#d53e4f"))
climCh <- boxGrob("\nEnvironmental factors\n including temperature\n", x=0.2, y=0.85, box_gp = gpar(fill = "#fdae61"))
fish <- boxGrob("\n Fishing practices \n", x=0.15, y=0.5, box_gp = gpar(fill = "#3288bd"))
fishMan <- boxGrob("\nFisheries\n management policies\n", x=0.3, y=0.15, box_gp = gpar(fill = "#5e4fa2"))
### get arrows
connectGrob(fishCom, socEc, "vertical", lty_gp = gpar(lwd=1, col="black", fill="black"))
connectGrob(fishCom, fishMan, "vertical", lty_gp = gpar(lwd=2, col="black", fill="black"))
connectGrob(fishCom, fish, "horizontal", lty_gp = gpar(lwd=2, col="black", fill="black"))
connectGrob(fishMan, fishCom, "vertical")
connectGrob(fish, fishCom, "horizontal")
connectGrob(climCh, fish, "vertical")
connectGrob(climCh, fishCom, "horizontal")
connectGrob(socEc, fish, "horizontal", lty_gp = gpar(lwd=2, col="black", fill="black"))
connectGrob(fish, socEc, "horizontal")
connectGrob(fishMan, socEc, "horizontal")
connectGrob(fishMan, fish, "-")
connectGrob(fish, fishMan, "vertical", lty_gp = gpar(lwd=2, col="black", fill="black"))
### show the plot
fishCom
socEc
climCh
fishMan
fish | /users/faye/flowChart.R | no_license | maadd/bioTIME | R | false | false | 1,397 | r | library(grid)
library(Gmisc)
grid.newpage()
### build the boxes
fishCom <- boxGrob("\n\n Fish community change \n through time\n\n", x=0.7, y=0.75, box_gp = gpar(fill = "#abdda4"))
socEc <- boxGrob("\nSocio-economic \nfactors\n", x=0.85, y=0.25,box_gp = gpar(fill = "#d53e4f"))
climCh <- boxGrob("\nEnvironmental factors\n including temperature\n", x=0.2, y=0.85, box_gp = gpar(fill = "#fdae61"))
fish <- boxGrob("\n Fishing practices \n", x=0.15, y=0.5, box_gp = gpar(fill = "#3288bd"))
fishMan <- boxGrob("\nFisheries\n management policies\n", x=0.3, y=0.15, box_gp = gpar(fill = "#5e4fa2"))
### get arrows
connectGrob(fishCom, socEc, "vertical", lty_gp = gpar(lwd=1, col="black", fill="black"))
connectGrob(fishCom, fishMan, "vertical", lty_gp = gpar(lwd=2, col="black", fill="black"))
connectGrob(fishCom, fish, "horizontal", lty_gp = gpar(lwd=2, col="black", fill="black"))
connectGrob(fishMan, fishCom, "vertical")
connectGrob(fish, fishCom, "horizontal")
connectGrob(climCh, fish, "vertical")
connectGrob(climCh, fishCom, "horizontal")
connectGrob(socEc, fish, "horizontal", lty_gp = gpar(lwd=2, col="black", fill="black"))
connectGrob(fish, socEc, "horizontal")
connectGrob(fishMan, socEc, "horizontal")
connectGrob(fishMan, fish, "-")
connectGrob(fish, fishMan, "vertical", lty_gp = gpar(lwd=2, col="black", fill="black"))
### show the plot
fishCom
socEc
climCh
fishMan
fish |
context("downstream")
test_that("taxizedb::downstream matches taxize::downstream", {
expect_equal(
taxizedb::downstream('Arabidopsis', db='ncbi', downto='species'),
taxize::downstream('Arabidopsis', db='ncbi', downto='species')
)
})
| /tests/testthat/test-downstream.R | permissive | arendsee/taxizedb | R | false | false | 246 | r | context("downstream")
test_that("taxizedb::downstream matches taxize::downstream", {
expect_equal(
taxizedb::downstream('Arabidopsis', db='ncbi', downto='species'),
taxize::downstream('Arabidopsis', db='ncbi', downto='species')
)
})
|
## Get dataset
data <- read.csv("household_power_consumption.txt",
header=T,
sep=';',
na.strings="?",
nrows=2075259,
check.names=F,
stringsAsFactors=F,
comment.char="",
quote='\"')
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
## data subset
dataSub <- subset(data,
subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
## Remove from memory
rm(data)
## Convert date
datetime <- paste(as.Date(dataSub$Date), dataSub$Time)
dataSub$Datetime <- as.POSIXct(datetime)
## Plot3
with(dataSub, {
plot(Sub_metering_1~Datetime,
type="l",
ylab="Global Active Power (kilowatts)",
xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
})
##I am legend
legend("topright",
col=c("black", "red", "blue"),
lty=1,
lwd=2,
legend=c("Sub_metering_1",
"Sub_metering_2",
"Sub_metering_3"))
## Save image
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off() | /Coursera/ExData_Plotting/plot3.R | permissive | mrvtoney/Projects | R | false | false | 1,153 | r | ## Get dataset
data <- read.csv("household_power_consumption.txt",
header=T,
sep=';',
na.strings="?",
nrows=2075259,
check.names=F,
stringsAsFactors=F,
comment.char="",
quote='\"')
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
## data subset
dataSub <- subset(data,
subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
## Remove from memory
rm(data)
## Convert date
datetime <- paste(as.Date(dataSub$Date), dataSub$Time)
dataSub$Datetime <- as.POSIXct(datetime)
## Plot3
with(dataSub, {
plot(Sub_metering_1~Datetime,
type="l",
ylab="Global Active Power (kilowatts)",
xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
})
##I am legend
legend("topright",
col=c("black", "red", "blue"),
lty=1,
lwd=2,
legend=c("Sub_metering_1",
"Sub_metering_2",
"Sub_metering_3"))
## Save image
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off() |
library(rio)
library(timeSeries)
library(rlist)
library(foreign)
library(ggplot2)
library(dplyr)
library(ggbiplot)
library(gridExtra)
library(GA)
library(ggalt)
library(pracma)
library(tidyverse)
library(sp)
library(random)
library(SearchTrees)
library(Metrics)
library(TSdist)
library(nonlinearTseries)
library(moments) #skewness, kurtosis
library(tseries) #Nonlinearity test
library(Chaos01) #Chaoticness measure
library(ForeCA) #Spectral entropy
library(arfima) # Self-similarity
library(tsfeatures) #Many features
library(ismev) #Extreme dist shape parameter
library(anomalousACM) #Anomalous features
library(umap)
library(ggplot2)
library(ggfortify)
# Loading raw data
yearly = read.csv('sample/Yearly_sample.csv', row.names = 'X')
quarterly = read.csv('sample/Quarterly_sample.csv', row.names = 'X')
monthly = read.csv('sample/Monthly_sample.csv', row.names = 'X')
weekly = read.csv('sample/Weekly_sample.csv', row.names = 'X')
daily = read.csv('sample/Daily_sample.csv', row.names = 'X')
hourly = read.csv('sample/Hourly_sample.csv', row.names = 'X')
# Choosing dataset and setting parameters
raw_data = monthly
row_length = 156
sample_size = 300
n_neighbors = 10
frequency = 12
# Loading features and parameters
norm_features = read.csv('train_features/train_normed.csv', row.names = 'X')
centered_features = read.csv('train_features/train_centered.csv', row.names = 'X')
means = read.csv('train_features/means.csv', row.names = 'X')
norm_features$nperiods = NULL
centered_features$nperiods = NULL
# write.csv(norm_features, 'train_normed.csv')
# Separating labels
labels = norm_features$label
norm_features$label = NULL
centered_features$label = NULL
pca = prcomp(norm_features, center = T, scale. = F)
# Splitting PCA data. Need to be careful with indeces.
pca_yearly = data.frame(pca$x) %>% slice(1: 1999)
pca_quarterly = data.frame(pca$x) %>% slice(2000: 3999)
pca_monthly = data.frame(pca$x) %>% slice(4000: 5999)
pca_weekly = data.frame(pca$x) %>% slice(6000: 6285)
pca_daily = data.frame(pca$x) %>% slice(6286: 8285)
pca_hourly = data.frame(pca$x) %>% slice(8286:8530)
# Choosing cluster
pca_data = pca_monthly
pca_data = data.frame(pca_data$PC1, pca_data$PC2)
names(pca_data) = c('PC1', 'PC2')
# Setting bounds for grid generation depending on the cluster parameters
pc1_upper = max(pca_data$PC1) + 0.25
pc1_lower = min(pca_data$PC1) - 0.25
pc2_upper = max(pca_data$PC2) + 0.25
pc2_lower = min(pca_data$PC2) - 0.25
# Defining function for grid generation
make_grid = function(x_seq, y_seq){
a = max(c(length(x_seq), length(y_seq)))
PC_1 = c()
PC_2 = c()
for (i in seq(1, length(x_seq))){
for (j in seq(1, length(y_seq))){
PC_1[ a * (i - 1) + j] = x_seq[i]
PC_2[ a * (i - 1) + j] = y_seq[j]
}
}
return(data.frame(PC_1, PC_2))
}
# Defining parameters for grid generation. 0.015 is a step for each axis.
x_seq = seq(pc1_lower, pc1_upper, 0.015)
y_seq = seq(pc2_lower, pc2_upper, 0.015)
# Generating of the polygon around cluster
get_polygon = function(data){
poly = chull(data$PC1, data$PC2)
data = data.frame((data %>% slice(poly))$PC1, (data %>% slice(poly))$PC2)
names(data)= c('PC_1', 'PC_2')
return(data)
}
# Filtering the generated grid to leave just points inside polygon
get_polygon(pca_data)
is_in_polygon = point.in.polygon(make_grid(x_seq, y_seq)$PC_1, make_grid(x_seq, y_seq)$PC_2, get_polygon(pca_data)$PC_1, get_polygon(pca_data)$PC_2)
in_polygon_indeces = which(is_in_polygon == 1, arr.ind = TRUE)
in_polygon_grid = make_grid(x_seq, y_seq) %>% slice(in_polygon_indeces)
plot(in_polygon_grid)
# Plotting inside-polygon grid
g_prob = ggplot() +
geom_point(data = pca_data, aes(x = PC1, y= PC2), col = 'green') +
geom_point(data = in_polygon_grid, aes(x = PC_1, y = PC_2), alpha = 0.1)+
labs(title='Time series clusters') + labs(x = "PC1", y = NULL) +
geom_encircle(data = pca_data,aes(x = PC1, y= PC2) , expand = 0, s_shape = 1)
plot(g_prob)
# Searching nearest neighbours for every point inside in-polygon grid
A <- SpatialPoints(cbind(x=pca_data$PC1, y=pca_data$PC2))
B <- SpatialPoints(cbind(x=in_polygon_grid$PC_1, y=in_polygon_grid$PC_2))
tree <- createTree(coordinates(A))
inds_t <- knnLookup(tree, newdat=coordinates(B), k=1)
inds_t
# FIltering in-polygon grid to leave just points, which neighbours not very close
min_dist = 0.02
indeces = c()
for (i in seq(nrow(in_polygon_grid))){
if(EuclideanDistance(as.vector(t(in_polygon_grid[i,])), as.vector(t(pca_data[inds_t[i],]))) > min_dist){
indeces = c(indeces, i)
}
}
# Choosing final target points
targets = in_polygon_grid %>% slice(indeces)
targets = targets %>% slice(sample(seq(1, nrow(targets)), sample_size))
# Plotting targets
g_prob = ggplot() +
geom_point(data = pca_data, aes(x = PC1, y= PC2), col = 'green') +
geom_point(data = targets, aes(x = PC_1, y = PC_2),col = 'blue')+
labs(title='Time series clusters') + labs(x = "PC1", y = NULL)+
geom_encircle(data = pca_data,aes(x = PC1, y= PC2) , expand = 0, s_shape = 1)+
geom_point(data = targets, aes(x = PC_1, y = PC_2),col = 'red')
# geom_point(data = pca_year %>% slice(inds[1,]), aes(x = PC1, y = PC2), col = 'black')+
# geom_point(data = pca_year %>% slice(inds[2,]), aes(x = PC1, y = PC2), col = 'black')+
# geom_point(data = pca_year %>% slice(inds[3,]), aes(x = PC1, y = PC2), col = 'black')
plot(g_prob)
# Searching nearest neighbours for each target point
set.seed(2)
A <- SpatialPoints(cbind(x=pca_data$PC1, y=pca_data$PC2))
B <- SpatialPoints(cbind(x=targets$PC_1, y=targets$PC_2))
tree <- createTree(coordinates(A))
inds_p <- knnLookup(tree, newdat=coordinates(B), k=n_neighbors)
# Plotting examples of targets and neighbours
g_prob = ggplot() +
geom_point(data = pca_data, aes(x = PC1, y= PC2), col = 'green') +
# geom_point(data = targets, aes(x = PC_1, y = PC_2),col = 'blue')+
labs(title='Time series clusters') + labs(x = "PC1", y = NULL) +
geom_encircle(data = pca_data,aes(x = PC1, y= PC2) , expand = 0, s_shape = 1) +
geom_point(data = targets %>% slice(1:3), aes(x = PC_1, y = PC_2),col = 'red')+
geom_point(data = pca_data %>% slice(inds_p[1,]), aes(x = PC1, y = PC2), col = 'black')+
geom_point(data = pca_data %>% slice(inds_p[2,]), aes(x = PC1, y = PC2), col = 'black')+
geom_point(data = pca_data %>% slice(inds_p[3,]), aes(x = PC1, y = PC2), col = 'black')
plot(g_prob)
# Generation part
# Defining projection weights
pc_1_weights = as.vector(t(pca$rotation[,1]))
pc_2_weights = as.vector(t(pca$rotation[,2]))
# Defining emty matrices for writng results
results_pca = matrix(ncol=2, nrow = sample_size)
results_raw = matrix(ncol = sample_size, nrow = row_length)
results_features = matrix(ncol = 23, nrow = sample_size)
# Loading params
params = read.csv('train_features/means.csv', row.names = 'X')
names(params) = c('mean', 'max', 'min')
# Defining function for features calculation. After calculation features are normed and centered with params
features = function(tser){
tser = ts(tser, frequency = frequency)
acf = acf_features(tser)
pacf = pacf_features(tser)
measures = tsmeasures(tser, width = frequency)
stl_f = stl_features(tser)
features = c(acf[1], acf[3], acf[6], pacf[1], entropy(tser), lumpiness(tser),
stability(tser), crossing_points(tser), hurst(tser), unitroot_kpss(tser),
nonlinearity(tser), kurtosis(tser), skewness(tser), testChaos01(tser),
measures[4], measures[7], max_kl_shift(tser, width = frequency)[1],gev.fit(tser, show = F)$mle[3],
stl_f[9], stl_f[3], stl_f[4], stl_f[5], stl_f[6])
normed_fvec = (as.vector(t(features)) - as.vector(t(params$min))) / (as.vector(t(params$max)) - as.vector(t(params$min)))
centered_fvec = normed_fvec - as.vector(t(params$mean))
return(centered_fvec)
}
# Defining the fitness function (minimize)
f = function(predicted){
feat = features(predicted)
pc_predicted = c(feat %*% pc_1_weights, feat %*% pc_2_weights)
return(EuclideanDistance(target, pc_predicted))
}
# Generation loop
for (i in seq(sample_size)){
# Choosing target and his initial population
target = as.vector(t(targets[i,]))
init_pop = t(raw_data[inds_p[i,]])
# Defining fitness function (maximize)
fitness = function(x) -f(x)
suggestedSol <- init_pop
GA1 <- ga(type = "real-valued",
fitness = fitness,
lower = rep(-5, row_length), upper = rep(5, row_length),
suggestions = suggestedSol,
popSize = n_neighbors, maxiter = 100,
pcrossover = 0.8, pmutation = 0.2, parallel = 2, maxFitness = -0.001)
# Writing results
feat_sol = features(ts(t(GA1@solution), frequency = frequency))
pc_predicted = c(feat_sol %*% pc_1_weights, feat_sol %*% pc_2_weights)
results_pca[i, ] = as.vector(pc_predicted)
results_raw[, i] = as.vector(t(GA1@solution))
results_features[i, ] = feat_sol
print(i)
}
# Plot optimization process example
plot(GA1)
# Generated data
gen_data_raw = data.frame(results_raw)
gen_data_pca = data.frame(results_pca)
gen_data_feat = data.frame(results_features)
names(gen_data_pca) = c('PC_1', 'PC_2')
# Sorting points to leave just inside polygon
plot(get_polygon(pca_data))
is_in_poly_gen = point.in.polygon(gen_data_pca$PC_1, gen_data_pca$PC_2, get_polygon(pca_data)$PC_1, get_polygon(pca_data)$PC_2)
in_polygon_gen_pca_inds = which(is_in_poly_gen == 1, arr.ind = TRUE)
in_polygon_gen_raw = gen_data_raw[in_polygon_gen_pca_inds]
in_polygon_gen_pca = gen_data_pca %>% slice(in_polygon_gen_pca_inds)
in_polygon_gen_feat = gen_data_feat %>% slice(in_polygon_gen_pca_inds)
# Saving the results
write.csv(in_polygon_gen_raw, sep = ",", dec = ".", file = 'Generated_monthly.csv',row.names = T, col.names = T)
write.csv(in_polygon_gen_pca, sep = ",", dec = ".", file = 'Generated_monthly_pca.csv',row.names = T, col.names = T)
write.csv(in_polygon_gen_feat, sep = ",", dec = ".", file = 'Generated_centered_features_monthly.csv',row.names = T, col.names = T)
# Plotting last generated series and its neighbours
a = autoplot(ts(init_pop[1,]))
b = autoplot(ts(init_pop[2,]))
c = autoplot(ts(init_pop[3,]))
d = autoplot(ts(init_pop[4,]))
e = autoplot(ts(t(GA1@solution)))
grid.arrange(a, b, c, d, e, nrow = 5)
plot(GA1 + labs(title='Time series clusters'))
# Plotting old data plus new
g_prob = ggplot() +
geom_point(data = pca_data, aes(x = PC1, y= PC2), col = 'green') +
# geom_point(data = targets, aes(x = PC_1, y = PC_2),col = 'blue')+
labs(title='Time series clusters') + labs(x = "PC1", y = NULL) +
geom_encircle(data = pca_data,aes(x = PC1, y= PC2) , expand = 0, s_shape = 1) +
geom_point(data = targets, aes(x = PC_1, y = PC_2),col = 'red')+
geom_point(data = gen_data_pca, aes(x = PC_1, y = PC_2), col = 'black')
plot(g_prob)
g_prob = ggplot() +
geom_point(data = pca_data, aes(x = PC1, y= PC2), col = 'green') +
# geom_point(data = targets, aes(x = PC_1, y = PC_2),col = 'blue')+
labs(title='Time series clusters') + labs(x = "PC1", y = NULL) +
geom_encircle(data = pca_data,aes(x = PC1, y= PC2) , expand = 0, s_shape = 1) +
geom_point(data = targets %>% slice(1:3), aes(x = PC_1, y = PC_2),col = 'red')+
geom_point(data = in_polygon_gen_pca %>% slice(1,), aes(x = PC_1, y = PC_2), col = 'black')+
geom_point(data = in_polygon_gen_pca %>% slice(2,), aes(x = PC_1, y = PC_2), col = 'black')+
geom_point(data = in_polygon_gen_pca %>% slice(3,), aes(x = PC_1, y = PC_2), col = 'black')
plot(g_prob)
# Plotting old data plus new data for each cluster
pca_gen_yearly = read.csv('generated/pca/Generated_yearly_pca.csv', row.names = 'X')
pca_gen_quarterly = read.csv('generated/pca/Generated_quarterly_pca.csv', row.names = 'X')
pca_gen_monthly = read.csv('generated/pca/Generated_monthly_pca.csv', row.names = 'X')
pca_gen_weekly = read.csv('generated/pca/Generated_weekly_pca.csv', row.names = 'X')
pca_gen_daily = read.csv('generated/pca/Generated_daily_pca.csv', row.names = 'X')
gen_yearly_plot = ggplot() +
geom_point(data = pca_yearly, aes(x = PC1, y= PC2), col = 'green') +
labs(title='Generated yearly series') + labs(x = NULL, y = 'PC2') +
geom_encircle(data = pca_yearly,aes(x = PC1, y= PC2) , expand = 0, s_shape = 1) +
geom_point(data = pca_gen_yearly, aes(x = PC_1, y = PC_2), col = 'purple')
plot(gen_yearly_plot)
gen_quarterly_plot = ggplot() +
geom_point(data = pca_quarterly, aes(x = PC1, y= PC2), col = 'green') +
labs(title='Generated montly series') + labs(x = NULL, y = NULL) +
geom_encircle(data = pca_quarterly,aes(x = PC1, y= PC2) , expand = 0, s_shape = 1) +
geom_point(data = pca_gen_quarterly, aes(x = PC_1, y = PC_2), col = 'purple')
plot(gen_quarterly_plot)
gen_monthly_plot = ggplot() +
geom_point(data = pca_monthly, aes(x = PC1, y= PC2), col = 'green') +
labs(title='Generated monthly series') + labs(x = NULL, y = 'PC2') +
geom_encircle(data = pca_monthly,aes(x = PC1, y= PC2) , expand = 0, s_shape = 1) +
geom_point(data = pca_gen_monthly, aes(x = PC_1, y = PC_2), col = 'purple')
plot(gen_monthly_plot)
gen_weekly_plot = ggplot() +
geom_point(data = pca_weekly, aes(x = PC1, y= PC2), col = 'green') +
labs(title='Generated weekly series') + labs(x = NULL, y = NULL) +
geom_encircle(data = pca_weekly,aes(x = PC1, y= PC2) , expand = 0, s_shape = 1) +
geom_point(data = pca_gen_weekly, aes(x = PC_1, y = PC_2), col = 'purple')
plot(gen_weekly_plot)
gen_daily_plot = ggplot() +
geom_point(data = pca_daily, aes(x = PC1, y= PC2), col = 'green') +
labs(title='Generated daily series') + labs(x = "PC1", y = 'PC2') +
geom_encircle(data = pca_daily,aes(x = PC1, y= PC2) , expand = 0, s_shape = 1) +
geom_point(data = pca_gen_daily, aes(x = PC_1, y = PC_2), col = 'purple') +
ylim(-0.15, 0.25)
plot(gen_daily_plot)
pca_gen = rbind(pca_gen_yearly, pca_gen_quarterly, pca_gen_monthly, pca_gen_weekly, pca_gen_daily)
gen_all_plot = ggplot() +
geom_point(data = pca$x, aes(x = PC1, y= PC2), col = 'green') +
labs(title='All generated series') + labs(x = "PC1", y = NULL) +
geom_point(data = pca_gen, aes(x = PC_1, y = PC_2), col = 'purple')
plot(gen_all_plot)
grid.arrange(gen_yearly_plot, gen_quarterly_plot, gen_daily_plot, gen_all_plot, nrow = 2)
| /code and data/New_series_generation.R | no_license | Pyatachokk/course_work_3 | R | false | false | 14,785 | r | library(rio)
library(timeSeries)
library(rlist)
library(foreign)
library(ggplot2)
library(dplyr)
library(ggbiplot)
library(gridExtra)
library(GA)
library(ggalt)
library(pracma)
library(tidyverse)
library(sp)
library(random)
library(SearchTrees)
library(Metrics)
library(TSdist)
library(nonlinearTseries)
library(moments) #skewness, kurtosis
library(tseries) #Nonlinearity test
library(Chaos01) #Chaoticness measure
library(ForeCA) #Spectral entropy
library(arfima) # Self-similarity
library(tsfeatures) #Many features
library(ismev) #Extreme dist shape parameter
library(anomalousACM) #Anomalous features
library(umap)
library(ggplot2)
library(ggfortify)
# Loading raw data
yearly = read.csv('sample/Yearly_sample.csv', row.names = 'X')
quarterly = read.csv('sample/Quarterly_sample.csv', row.names = 'X')
monthly = read.csv('sample/Monthly_sample.csv', row.names = 'X')
weekly = read.csv('sample/Weekly_sample.csv', row.names = 'X')
daily = read.csv('sample/Daily_sample.csv', row.names = 'X')
hourly = read.csv('sample/Hourly_sample.csv', row.names = 'X')
# Choosing dataset and setting parameters
raw_data = monthly
row_length = 156
sample_size = 300
n_neighbors = 10
frequency = 12
# Loading features and parameters
norm_features = read.csv('train_features/train_normed.csv', row.names = 'X')
centered_features = read.csv('train_features/train_centered.csv', row.names = 'X')
means = read.csv('train_features/means.csv', row.names = 'X')
norm_features$nperiods = NULL
centered_features$nperiods = NULL
# write.csv(norm_features, 'train_normed.csv')
# Separating labels
labels = norm_features$label
norm_features$label = NULL
centered_features$label = NULL
pca = prcomp(norm_features, center = T, scale. = F)
# Splitting PCA data. Need to be careful with indeces.
pca_yearly = data.frame(pca$x) %>% slice(1: 1999)
pca_quarterly = data.frame(pca$x) %>% slice(2000: 3999)
pca_monthly = data.frame(pca$x) %>% slice(4000: 5999)
pca_weekly = data.frame(pca$x) %>% slice(6000: 6285)
pca_daily = data.frame(pca$x) %>% slice(6286: 8285)
pca_hourly = data.frame(pca$x) %>% slice(8286:8530)
# Choosing cluster
pca_data = pca_monthly
pca_data = data.frame(pca_data$PC1, pca_data$PC2)
names(pca_data) = c('PC1', 'PC2')
# Setting bounds for grid generation depending on the cluster parameters
pc1_upper = max(pca_data$PC1) + 0.25
pc1_lower = min(pca_data$PC1) - 0.25
pc2_upper = max(pca_data$PC2) + 0.25
pc2_lower = min(pca_data$PC2) - 0.25
# Defining function for grid generation
make_grid = function(x_seq, y_seq){
a = max(c(length(x_seq), length(y_seq)))
PC_1 = c()
PC_2 = c()
for (i in seq(1, length(x_seq))){
for (j in seq(1, length(y_seq))){
PC_1[ a * (i - 1) + j] = x_seq[i]
PC_2[ a * (i - 1) + j] = y_seq[j]
}
}
return(data.frame(PC_1, PC_2))
}
# Defining parameters for grid generation. 0.015 is a step for each axis.
x_seq = seq(pc1_lower, pc1_upper, 0.015)
y_seq = seq(pc2_lower, pc2_upper, 0.015)
# Generating of the polygon around cluster
get_polygon = function(data){
poly = chull(data$PC1, data$PC2)
data = data.frame((data %>% slice(poly))$PC1, (data %>% slice(poly))$PC2)
names(data)= c('PC_1', 'PC_2')
return(data)
}
# Filtering the generated grid to leave just points inside polygon
get_polygon(pca_data)
is_in_polygon = point.in.polygon(make_grid(x_seq, y_seq)$PC_1, make_grid(x_seq, y_seq)$PC_2, get_polygon(pca_data)$PC_1, get_polygon(pca_data)$PC_2)
in_polygon_indeces = which(is_in_polygon == 1, arr.ind = TRUE)
in_polygon_grid = make_grid(x_seq, y_seq) %>% slice(in_polygon_indeces)
plot(in_polygon_grid)
# Plotting inside-polygon grid
g_prob = ggplot() +
geom_point(data = pca_data, aes(x = PC1, y= PC2), col = 'green') +
geom_point(data = in_polygon_grid, aes(x = PC_1, y = PC_2), alpha = 0.1)+
labs(title='Time series clusters') + labs(x = "PC1", y = NULL) +
geom_encircle(data = pca_data,aes(x = PC1, y= PC2) , expand = 0, s_shape = 1)
plot(g_prob)
# Searching nearest neighbours for every point inside in-polygon grid
A <- SpatialPoints(cbind(x=pca_data$PC1, y=pca_data$PC2))
B <- SpatialPoints(cbind(x=in_polygon_grid$PC_1, y=in_polygon_grid$PC_2))
tree <- createTree(coordinates(A))
inds_t <- knnLookup(tree, newdat=coordinates(B), k=1)
inds_t
# FIltering in-polygon grid to leave just points, which neighbours not very close
min_dist = 0.02
indeces = c()
for (i in seq(nrow(in_polygon_grid))){
if(EuclideanDistance(as.vector(t(in_polygon_grid[i,])), as.vector(t(pca_data[inds_t[i],]))) > min_dist){
indeces = c(indeces, i)
}
}
# Choosing final target points
targets = in_polygon_grid %>% slice(indeces)
targets = targets %>% slice(sample(seq(1, nrow(targets)), sample_size))
# Plotting targets
g_prob = ggplot() +
geom_point(data = pca_data, aes(x = PC1, y= PC2), col = 'green') +
geom_point(data = targets, aes(x = PC_1, y = PC_2),col = 'blue')+
labs(title='Time series clusters') + labs(x = "PC1", y = NULL)+
geom_encircle(data = pca_data,aes(x = PC1, y= PC2) , expand = 0, s_shape = 1)+
geom_point(data = targets, aes(x = PC_1, y = PC_2),col = 'red')
# geom_point(data = pca_year %>% slice(inds[1,]), aes(x = PC1, y = PC2), col = 'black')+
# geom_point(data = pca_year %>% slice(inds[2,]), aes(x = PC1, y = PC2), col = 'black')+
# geom_point(data = pca_year %>% slice(inds[3,]), aes(x = PC1, y = PC2), col = 'black')
plot(g_prob)
# Searching nearest neighbours for each target point
set.seed(2)
A <- SpatialPoints(cbind(x=pca_data$PC1, y=pca_data$PC2))
B <- SpatialPoints(cbind(x=targets$PC_1, y=targets$PC_2))
tree <- createTree(coordinates(A))
inds_p <- knnLookup(tree, newdat=coordinates(B), k=n_neighbors)
# Plotting examples of targets and neighbours
g_prob = ggplot() +
geom_point(data = pca_data, aes(x = PC1, y= PC2), col = 'green') +
# geom_point(data = targets, aes(x = PC_1, y = PC_2),col = 'blue')+
labs(title='Time series clusters') + labs(x = "PC1", y = NULL) +
geom_encircle(data = pca_data,aes(x = PC1, y= PC2) , expand = 0, s_shape = 1) +
geom_point(data = targets %>% slice(1:3), aes(x = PC_1, y = PC_2),col = 'red')+
geom_point(data = pca_data %>% slice(inds_p[1,]), aes(x = PC1, y = PC2), col = 'black')+
geom_point(data = pca_data %>% slice(inds_p[2,]), aes(x = PC1, y = PC2), col = 'black')+
geom_point(data = pca_data %>% slice(inds_p[3,]), aes(x = PC1, y = PC2), col = 'black')
plot(g_prob)
# Generation part
# Defining projection weights
pc_1_weights = as.vector(t(pca$rotation[,1]))
pc_2_weights = as.vector(t(pca$rotation[,2]))
# Defining emty matrices for writng results
results_pca = matrix(ncol=2, nrow = sample_size)
results_raw = matrix(ncol = sample_size, nrow = row_length)
results_features = matrix(ncol = 23, nrow = sample_size)
# Loading params
params = read.csv('train_features/means.csv', row.names = 'X')
names(params) = c('mean', 'max', 'min')
# Defining function for features calculation. After calculation features are normed and centered with params
features = function(tser){
tser = ts(tser, frequency = frequency)
acf = acf_features(tser)
pacf = pacf_features(tser)
measures = tsmeasures(tser, width = frequency)
stl_f = stl_features(tser)
features = c(acf[1], acf[3], acf[6], pacf[1], entropy(tser), lumpiness(tser),
stability(tser), crossing_points(tser), hurst(tser), unitroot_kpss(tser),
nonlinearity(tser), kurtosis(tser), skewness(tser), testChaos01(tser),
measures[4], measures[7], max_kl_shift(tser, width = frequency)[1],gev.fit(tser, show = F)$mle[3],
stl_f[9], stl_f[3], stl_f[4], stl_f[5], stl_f[6])
normed_fvec = (as.vector(t(features)) - as.vector(t(params$min))) / (as.vector(t(params$max)) - as.vector(t(params$min)))
centered_fvec = normed_fvec - as.vector(t(params$mean))
return(centered_fvec)
}
# Defining the fitness function (minimize)
f = function(predicted){
feat = features(predicted)
pc_predicted = c(feat %*% pc_1_weights, feat %*% pc_2_weights)
return(EuclideanDistance(target, pc_predicted))
}
# Generation loop
for (i in seq(sample_size)){
# Choosing target and his initial population
target = as.vector(t(targets[i,]))
init_pop = t(raw_data[inds_p[i,]])
# Defining fitness function (maximize)
fitness = function(x) -f(x)
suggestedSol <- init_pop
GA1 <- ga(type = "real-valued",
fitness = fitness,
lower = rep(-5, row_length), upper = rep(5, row_length),
suggestions = suggestedSol,
popSize = n_neighbors, maxiter = 100,
pcrossover = 0.8, pmutation = 0.2, parallel = 2, maxFitness = -0.001)
# Writing results
feat_sol = features(ts(t(GA1@solution), frequency = frequency))
pc_predicted = c(feat_sol %*% pc_1_weights, feat_sol %*% pc_2_weights)
results_pca[i, ] = as.vector(pc_predicted)
results_raw[, i] = as.vector(t(GA1@solution))
results_features[i, ] = feat_sol
print(i)
}
# Plot optimization process example
plot(GA1)
# Generated data
gen_data_raw = data.frame(results_raw)
gen_data_pca = data.frame(results_pca)
gen_data_feat = data.frame(results_features)
names(gen_data_pca) = c('PC_1', 'PC_2')
# Sorting points to leave just inside polygon
plot(get_polygon(pca_data))
is_in_poly_gen = point.in.polygon(gen_data_pca$PC_1, gen_data_pca$PC_2, get_polygon(pca_data)$PC_1, get_polygon(pca_data)$PC_2)
in_polygon_gen_pca_inds = which(is_in_poly_gen == 1, arr.ind = TRUE)
in_polygon_gen_raw = gen_data_raw[in_polygon_gen_pca_inds]
in_polygon_gen_pca = gen_data_pca %>% slice(in_polygon_gen_pca_inds)
in_polygon_gen_feat = gen_data_feat %>% slice(in_polygon_gen_pca_inds)
# Saving the results
write.csv(in_polygon_gen_raw, sep = ",", dec = ".", file = 'Generated_monthly.csv',row.names = T, col.names = T)
write.csv(in_polygon_gen_pca, sep = ",", dec = ".", file = 'Generated_monthly_pca.csv',row.names = T, col.names = T)
write.csv(in_polygon_gen_feat, sep = ",", dec = ".", file = 'Generated_centered_features_monthly.csv',row.names = T, col.names = T)
# Plotting last generated series and its neighbours
a = autoplot(ts(init_pop[1,]))
b = autoplot(ts(init_pop[2,]))
c = autoplot(ts(init_pop[3,]))
d = autoplot(ts(init_pop[4,]))
e = autoplot(ts(t(GA1@solution)))
grid.arrange(a, b, c, d, e, nrow = 5)
plot(GA1 + labs(title='Time series clusters'))
# Plotting old data plus new
g_prob = ggplot() +
geom_point(data = pca_data, aes(x = PC1, y= PC2), col = 'green') +
# geom_point(data = targets, aes(x = PC_1, y = PC_2),col = 'blue')+
labs(title='Time series clusters') + labs(x = "PC1", y = NULL) +
geom_encircle(data = pca_data,aes(x = PC1, y= PC2) , expand = 0, s_shape = 1) +
geom_point(data = targets, aes(x = PC_1, y = PC_2),col = 'red')+
geom_point(data = gen_data_pca, aes(x = PC_1, y = PC_2), col = 'black')
plot(g_prob)
g_prob = ggplot() +
geom_point(data = pca_data, aes(x = PC1, y= PC2), col = 'green') +
# geom_point(data = targets, aes(x = PC_1, y = PC_2),col = 'blue')+
labs(title='Time series clusters') + labs(x = "PC1", y = NULL) +
geom_encircle(data = pca_data,aes(x = PC1, y= PC2) , expand = 0, s_shape = 1) +
geom_point(data = targets %>% slice(1:3), aes(x = PC_1, y = PC_2),col = 'red')+
geom_point(data = in_polygon_gen_pca %>% slice(1,), aes(x = PC_1, y = PC_2), col = 'black')+
geom_point(data = in_polygon_gen_pca %>% slice(2,), aes(x = PC_1, y = PC_2), col = 'black')+
geom_point(data = in_polygon_gen_pca %>% slice(3,), aes(x = PC_1, y = PC_2), col = 'black')
plot(g_prob)
# Plotting old data plus new data for each cluster
pca_gen_yearly = read.csv('generated/pca/Generated_yearly_pca.csv', row.names = 'X')
pca_gen_quarterly = read.csv('generated/pca/Generated_quarterly_pca.csv', row.names = 'X')
pca_gen_monthly = read.csv('generated/pca/Generated_monthly_pca.csv', row.names = 'X')
pca_gen_weekly = read.csv('generated/pca/Generated_weekly_pca.csv', row.names = 'X')
pca_gen_daily = read.csv('generated/pca/Generated_daily_pca.csv', row.names = 'X')
gen_yearly_plot = ggplot() +
geom_point(data = pca_yearly, aes(x = PC1, y= PC2), col = 'green') +
labs(title='Generated yearly series') + labs(x = NULL, y = 'PC2') +
geom_encircle(data = pca_yearly,aes(x = PC1, y= PC2) , expand = 0, s_shape = 1) +
geom_point(data = pca_gen_yearly, aes(x = PC_1, y = PC_2), col = 'purple')
plot(gen_yearly_plot)
gen_quarterly_plot = ggplot() +
geom_point(data = pca_quarterly, aes(x = PC1, y= PC2), col = 'green') +
labs(title='Generated montly series') + labs(x = NULL, y = NULL) +
geom_encircle(data = pca_quarterly,aes(x = PC1, y= PC2) , expand = 0, s_shape = 1) +
geom_point(data = pca_gen_quarterly, aes(x = PC_1, y = PC_2), col = 'purple')
plot(gen_quarterly_plot)
gen_monthly_plot = ggplot() +
geom_point(data = pca_monthly, aes(x = PC1, y= PC2), col = 'green') +
labs(title='Generated monthly series') + labs(x = NULL, y = 'PC2') +
geom_encircle(data = pca_monthly,aes(x = PC1, y= PC2) , expand = 0, s_shape = 1) +
geom_point(data = pca_gen_monthly, aes(x = PC_1, y = PC_2), col = 'purple')
plot(gen_monthly_plot)
gen_weekly_plot = ggplot() +
geom_point(data = pca_weekly, aes(x = PC1, y= PC2), col = 'green') +
labs(title='Generated weekly series') + labs(x = NULL, y = NULL) +
geom_encircle(data = pca_weekly,aes(x = PC1, y= PC2) , expand = 0, s_shape = 1) +
geom_point(data = pca_gen_weekly, aes(x = PC_1, y = PC_2), col = 'purple')
plot(gen_weekly_plot)
gen_daily_plot = ggplot() +
geom_point(data = pca_daily, aes(x = PC1, y= PC2), col = 'green') +
labs(title='Generated daily series') + labs(x = "PC1", y = 'PC2') +
geom_encircle(data = pca_daily,aes(x = PC1, y= PC2) , expand = 0, s_shape = 1) +
geom_point(data = pca_gen_daily, aes(x = PC_1, y = PC_2), col = 'purple') +
ylim(-0.15, 0.25)
plot(gen_daily_plot)
pca_gen = rbind(pca_gen_yearly, pca_gen_quarterly, pca_gen_monthly, pca_gen_weekly, pca_gen_daily)
gen_all_plot = ggplot() +
geom_point(data = pca$x, aes(x = PC1, y= PC2), col = 'green') +
labs(title='All generated series') + labs(x = "PC1", y = NULL) +
geom_point(data = pca_gen, aes(x = PC_1, y = PC_2), col = 'purple')
plot(gen_all_plot)
grid.arrange(gen_yearly_plot, gen_quarterly_plot, gen_daily_plot, gen_all_plot, nrow = 2)
|
library(raster)
# Package to handle raster-formatted spatial data
library(rasterVis)
# The rasterVis package complements the raster package, providing a set of methods for enhanced visualization and interaction
# Defines visualisation methods with 'levelplot'
library(dismo)
# Dismo has the SDM analyses for maxent and support vector machines used by R
library(rgeos)
# To define circles with a radius around the subsampled points
# geos is a geometry engine, need to install package to access these capabilities (such as defining circumfrances)
library(rJava)
library(rgdal)
# Provides access to projection/transformation operations from a different library
# Coordinate referancing system**
library(sp)
# Coordinate referancing system
library(ncdf4)
# Opens access to read and write on netCDF files
library(kernlab)
# Required for support vector machines
# installed and running BUT UNSURE of function
library(grDevices)
# For colouring maps
library(colorRamps)
#Allows easy construction of color palettes
#Loading data for project now
#Ensure WD is in correct place WILL BE IN NEW PLACE FOR EACH SPECIES
setwd("~/Documents/UoY/Dissertation/Pout")
locs = read.csv("Pout_Severn_UTM.csv", header=T, sep = ",")
#loading severn files
#had to add the file location for R to access the severn files, is this right?
dry_always<-raster("Severn_unaltered Pout/always_dry_masked.tif")
tidal_range<-raster("Severn_unaltered Pout/tidal_range_masked.tif")
subtidal<-raster("Severn_unaltered Pout/subtidal_masked.tif")
min_elev<-raster("Severn_unaltered Pout/min_elev_masked.tif")
max_velocity<-raster("Severn_unaltered Pout/max_vel_masked.tif")
max_elev<-raster("Severn_unaltered Pout/max_elev_masked.tif")
mask_2<-raster("Severn_unaltered Pout/mask2.tif")
intertidal<-raster("Severn_unaltered Pout/intertidal_masked.tif")
depth<-raster("Severn_unaltered Pout/bathy_masked.tif")
avg_velocity<-raster("Severn_unaltered Pout/av_vel_masked.tif")
#ALL raster data is uploaded here
mask<-depth
#DO NOT HAVE 'distance_to_coast' comparison in our data set as in MaxEnt Code
#DO NOT HAVE 'lat and lon' tifs as in MaxEnt Code
# Extract depth values to table of species co-ordinates
locs_ext=extract(depth, locs[,c("X","Y")])
#this has created a VALUE of depth for each single point as dictated by x&y coordinates from species data
#now each species seen has a depth based on its coordinates in the depth raster file we are given!!
# Build a data frame of species occurrence data and depth data
locs = data.frame(locs, locs_ext)
# added locs_ext to the final column in locs file so now coordinates for species can be coupled with their depth in teh same file
# Remove points with NA values for depth, i.e. on land
locs = subset(locs, !is.na(locs_ext))
e = extent(depth)
#subset extracted all values and rows with 'na' from the locs_ext column
# WHAT DOES EXTENT DO?!
# without using the 'mask' technique above will this still remove all 'land' data above?
#what is "e"?? - is it simply giving the 'extent' of the data set in a min and max of x and y?
# Create sequences of X and Y values to define a grid
# this a 1x1 km grid
xgrid = seq(e@xmin,e@xmax,1000)
ygrid = seq(e@ymin,e@ymax,1000)
#"seq()" works by 'from', 'to', 'by incremental step'
#generated a sequence from xmin value to xmax value in "e" that increase by 1000
# Identify occurrence points within each grid cell, then draw one at random
subs = c()
for(i in 1:(length(xgrid)-1)) {
for(j in 1:(length(ygrid)-1)) {
gridsq = subset(locs, Y > ygrid[j] & Y < ygrid[j+1] & X > xgrid[i] & X < xgrid[i+1])
if(dim(gridsq)[1]>0) {
subs = rbind(subs, gridsq[sample(1:dim(gridsq)[1],1 ), ])
}
}
}
dim(locs);dim(subs) # Confirm that you have a smaller dataset than you started with (1st number)
#for is an argument that will loop a desired action on a given value in a vector
#length will get value the legth of vectors and factors in a defined object
##this a loop going through x values (every 1000m) and at each new x square, looping through all the y's related to that x (and so on for all the x values)
#gridsq is a complex way of saying the square is greater than the start of one x/y value and less than the next one after it
#rbind & cbind combine/create a matrix by rows (rbind) or columns (cbind) of the two seperate vector sets
# Assign correct co-ordinate reference system to subset
coordinates <- cbind(subs$X, subs$Y)
subs_df <- SpatialPointsDataFrame(coordinates, subs, proj4string=CRS("+proj=utm +zone=30 ellps=WGS84"))
#cbind of subs$X and subs$Y created a new data set/matrix called coordinates that only has coordinate data in it!
# we create 20,000 random "background points". There are other ways to do this, but start with this.
#NOTE
psa <- randomPoints(mask, 20000, ext=e)
#need to make sure all is up-to-date: previous error due to 'dismo' not being updated
# Stack raster layers into one variable
#NOTE WITHOUT INTERTIDAL LAYER
env_uk<-stack(depth,max_elev,min_elev,avg_velocity,dry_always,subtidal,tidal_range)
# Pull environmental data for the sumbsampled-presence points from the raster stack
presence_uk= extract(env_uk, subs_df[,c("X","Y")])
#Warning messages: transforming SpatialPoints to the CRS of the Raster?
# Pull environmental data for the pseudo-absence points from the raster stack
pseudo_uk = extract(env_uk, psa)
# Build some useful dataframes, with two columns of coordinates followed by the environmental variables. For the presence points:
presence_uk = data.frame(X=subs_df$X, Y=subs_df$Y, presence_uk)
#HOW IS THIS DIFFERENT TO ABOVE FUCNTION WITH "EXTRACT"?
# Convert psa from atomic vector matrix to data.frame
psapoints=data.frame(psa)
# Bind co-ordinates
coordinates <- cbind(psapoints$x, psapoints$y)
# Create spatial data frame of pseudo absences
psadf <- SpatialPointsDataFrame(coordinates, psapoints, proj4string=CRS("+proj=utm +zone=30 ellps=WGS84"))
# Build dataframe, with two columns of coordinates followed by the 5 environmental variables. For the pseudo-absences:
psadfx = psadf@coords
colnames(psadfx) = c("X","Y")
pseudo_uk = data.frame(cbind(psadfx,pseudo_uk))
# Vector of group assignments splitting the subsampled presence points data fram with environmental data into 5 groups
group_p = kfold(presence_uk, 5)
#kfold partitions a data set k times (in this case 5 times) for model testing purposes
# Repeat above step for pseudo-absence points
group_a = kfold(pseudo_uk, 5)
# create output required for the loop
evaluations = list(5)
models = list(5)
# where it says maxent - you may need to swap this for other functions if you're exploring different models
# Note that some model may need different inputs etc. Read the docs to figure this out.
# This is our k-fold test. You will want to spend a bit of time making predictions on each of the 5 sub-models
# created here to check you can make decent predictions even with missing data
for (test in 1:5) {
# Then we use test and the kfold groupings to divide the presence and absence points:
train_p = presence_uk[group_p!=test, c("X","Y")]
train_a = pseudo_uk[group_a!=test, c("X","Y")]
test_p = presence_uk[group_p==test, c("X","Y")]
test_a = pseudo_uk[group_a==test, c("X","Y")]
# Now, estimate a maxent model using the "training" points and the environmental data. This may take a few moments to run:
models[test] = maxent(env_uk, p=train_p, a=train_a)
# To validate the model, we use the appropriately named function.
# Produces warning message about implicit list embedding being depreciated. May fail in future versions of R
evaluations[test] = evaluate(test_p, test_a, models[[test]], env_uk)
}
# print out the AUC for the k-fold tests
# ideally should be > 0.75 for all
cat("K-FOLD AUC: ")
for (test in 1:5) {
cat(paste0(evaluations[[test]]@auc,","))
}
#IF ONE WANTED to visualise the 1st model NEED TO FINISH WITH THE OTHER 4 KFOLDS
## pred <- predict(models[[1]], env_uk)
## plot(pred)
#-could visualise all k-fold models to show they are the same (strong statistical result)
#BUT DONT NEED TO
# Assess Spatial Sorting Bias (SSB)
pres_train_me <- train_p
pres_test_me <- test_p
back_train_me <- train_a
back_test_me <- test_a
sb <- ssb(pres_test_me, back_test_me, pres_train_me)
sb[,1] / sb[,2]
#creates a model of spacial biasing to compare to given preditions
# Adjust for SSB if present via distance based point-wise sampling
i <- pwdSample(pres_test_me, back_test_me, pres_train_me, n=1, tr=0.1)
pres_test_pwd_me <- pres_test_me[!is.na(i[,1]), ]
back_test_pwd_me <- back_test_me[na.omit(as.vector(i)), ]
sb2 <- ssb(pres_test_pwd_me, back_test_pwd_me, pres_train_me)
sb2[1]/ sb2[2]
#creates full model without any K fold statistics etc
pres_points = presence_uk[c("X","Y")]
abs_points = pseudo_uk[c("X","Y")]
# create full maxent with all points
model <- maxent(env_uk, p=pres_points, a=abs_points)
#turn model into prediction that can be plotted into a raster
pred_PredFull <- predict(model, env_uk)
#to see model and obtain jpeg
plot(pred_PredFull)
#Gives AUC for full model (pred_PredFull)
evaluate_full <- evaluate(presence_uk[c("X","Y")], pseudo_uk[c("X","Y")], model, env_uk)
#see what AUC is by typing it in
evaluate_full
#see what the specific sensitivity values is of species
#use value givenas a base level or higher that one would expect to see species (compare to evaluate_full)
message(threshold(evaluate_full)$spec_sens)
#check response curves to see if they change the FULL MODEL
response(model)
#creates a raster file in the WD
#will be useful when putting a file into qgis!
writeRaster(pred_PredFull, filename="pred3_me.tif", options="INTERLEAVE=BAND", overwrite=TRUE)
| /3rd Code Pout -Inter-Max_vel.R | no_license | laxmack21/Severn-Estuary-SDMS | R | false | false | 9,643 | r | library(raster)
# Package to handle raster-formatted spatial data
library(rasterVis)
# The rasterVis package complements the raster package, providing a set of methods for enhanced visualization and interaction
# Defines visualisation methods with 'levelplot'
library(dismo)
# Dismo has the SDM analyses for maxent and support vector machines used by R
library(rgeos)
# To define circles with a radius around the subsampled points
# geos is a geometry engine, need to install package to access these capabilities (such as defining circumfrances)
library(rJava)
library(rgdal)
# Provides access to projection/transformation operations from a different library
# Coordinate referancing system**
library(sp)
# Coordinate referancing system
library(ncdf4)
# Opens access to read and write on netCDF files
library(kernlab)
# Required for support vector machines
# installed and running BUT UNSURE of function
library(grDevices)
# For colouring maps
library(colorRamps)
#Allows easy construction of color palettes
#Loading data for project now
#Ensure WD is in correct place WILL BE IN NEW PLACE FOR EACH SPECIES
setwd("~/Documents/UoY/Dissertation/Pout")
locs = read.csv("Pout_Severn_UTM.csv", header=T, sep = ",")
#loading severn files
#had to add the file location for R to access the severn files, is this right?
dry_always<-raster("Severn_unaltered Pout/always_dry_masked.tif")
tidal_range<-raster("Severn_unaltered Pout/tidal_range_masked.tif")
subtidal<-raster("Severn_unaltered Pout/subtidal_masked.tif")
min_elev<-raster("Severn_unaltered Pout/min_elev_masked.tif")
max_velocity<-raster("Severn_unaltered Pout/max_vel_masked.tif")
max_elev<-raster("Severn_unaltered Pout/max_elev_masked.tif")
mask_2<-raster("Severn_unaltered Pout/mask2.tif")
intertidal<-raster("Severn_unaltered Pout/intertidal_masked.tif")
depth<-raster("Severn_unaltered Pout/bathy_masked.tif")
avg_velocity<-raster("Severn_unaltered Pout/av_vel_masked.tif")
#ALL raster data is uploaded here
mask<-depth
#DO NOT HAVE 'distance_to_coast' comparison in our data set as in MaxEnt Code
#DO NOT HAVE 'lat and lon' tifs as in MaxEnt Code
# Extract depth values to table of species co-ordinates
locs_ext=extract(depth, locs[,c("X","Y")])
#this has created a VALUE of depth for each single point as dictated by x&y coordinates from species data
#now each species seen has a depth based on its coordinates in the depth raster file we are given!!
# Build a data frame of species occurrence data and depth data
locs = data.frame(locs, locs_ext)
# added locs_ext to the final column in locs file so now coordinates for species can be coupled with their depth in teh same file
# Remove points with NA values for depth, i.e. on land
locs = subset(locs, !is.na(locs_ext))
e = extent(depth)
#subset extracted all values and rows with 'na' from the locs_ext column
# WHAT DOES EXTENT DO?!
# without using the 'mask' technique above will this still remove all 'land' data above?
#what is "e"?? - is it simply giving the 'extent' of the data set in a min and max of x and y?
# Create sequences of X and Y values to define a grid
# this a 1x1 km grid
xgrid = seq(e@xmin,e@xmax,1000)
ygrid = seq(e@ymin,e@ymax,1000)
#"seq()" works by 'from', 'to', 'by incremental step'
#generated a sequence from xmin value to xmax value in "e" that increase by 1000
# Identify occurrence points within each grid cell, then draw one at random
subs = c()
for(i in 1:(length(xgrid)-1)) {
for(j in 1:(length(ygrid)-1)) {
gridsq = subset(locs, Y > ygrid[j] & Y < ygrid[j+1] & X > xgrid[i] & X < xgrid[i+1])
if(dim(gridsq)[1]>0) {
subs = rbind(subs, gridsq[sample(1:dim(gridsq)[1],1 ), ])
}
}
}
dim(locs);dim(subs) # Confirm that you have a smaller dataset than you started with (1st number)
#for is an argument that will loop a desired action on a given value in a vector
#length will get value the legth of vectors and factors in a defined object
##this a loop going through x values (every 1000m) and at each new x square, looping through all the y's related to that x (and so on for all the x values)
#gridsq is a complex way of saying the square is greater than the start of one x/y value and less than the next one after it
#rbind & cbind combine/create a matrix by rows (rbind) or columns (cbind) of the two seperate vector sets
# Assign correct co-ordinate reference system to subset
coordinates <- cbind(subs$X, subs$Y)
subs_df <- SpatialPointsDataFrame(coordinates, subs, proj4string=CRS("+proj=utm +zone=30 ellps=WGS84"))
#cbind of subs$X and subs$Y created a new data set/matrix called coordinates that only has coordinate data in it!
# we create 20,000 random "background points". There are other ways to do this, but start with this.
#NOTE
psa <- randomPoints(mask, 20000, ext=e)
#need to make sure all is up-to-date: previous error due to 'dismo' not being updated
# Stack raster layers into one variable
#NOTE WITHOUT INTERTIDAL LAYER
env_uk<-stack(depth,max_elev,min_elev,avg_velocity,dry_always,subtidal,tidal_range)
# Pull environmental data for the sumbsampled-presence points from the raster stack
presence_uk= extract(env_uk, subs_df[,c("X","Y")])
#Warning messages: transforming SpatialPoints to the CRS of the Raster?
# Pull environmental data for the pseudo-absence points from the raster stack
pseudo_uk = extract(env_uk, psa)
# Build some useful dataframes, with two columns of coordinates followed by the environmental variables. For the presence points:
presence_uk = data.frame(X=subs_df$X, Y=subs_df$Y, presence_uk)
#HOW IS THIS DIFFERENT TO ABOVE FUCNTION WITH "EXTRACT"?
# Convert psa from atomic vector matrix to data.frame
psapoints=data.frame(psa)
# Bind co-ordinates
coordinates <- cbind(psapoints$x, psapoints$y)
# Create spatial data frame of pseudo absences
psadf <- SpatialPointsDataFrame(coordinates, psapoints, proj4string=CRS("+proj=utm +zone=30 ellps=WGS84"))
# Build dataframe, with two columns of coordinates followed by the 5 environmental variables. For the pseudo-absences:
psadfx = psadf@coords
colnames(psadfx) = c("X","Y")
pseudo_uk = data.frame(cbind(psadfx,pseudo_uk))
# Vector of group assignments splitting the subsampled presence points data fram with environmental data into 5 groups
group_p = kfold(presence_uk, 5)
#kfold partitions a data set k times (in this case 5 times) for model testing purposes
# Repeat above step for pseudo-absence points
group_a = kfold(pseudo_uk, 5)
# create output required for the loop
evaluations = list(5)
models = list(5)
# where it says maxent - you may need to swap this for other functions if you're exploring different models
# Note that some model may need different inputs etc. Read the docs to figure this out.
# This is our k-fold test. You will want to spend a bit of time making predictions on each of the 5 sub-models
# created here to check you can make decent predictions even with missing data
for (test in 1:5) {
# Then we use test and the kfold groupings to divide the presence and absence points:
train_p = presence_uk[group_p!=test, c("X","Y")]
train_a = pseudo_uk[group_a!=test, c("X","Y")]
test_p = presence_uk[group_p==test, c("X","Y")]
test_a = pseudo_uk[group_a==test, c("X","Y")]
# Now, estimate a maxent model using the "training" points and the environmental data. This may take a few moments to run:
models[test] = maxent(env_uk, p=train_p, a=train_a)
# To validate the model, we use the appropriately named function.
# Produces warning message about implicit list embedding being depreciated. May fail in future versions of R
evaluations[test] = evaluate(test_p, test_a, models[[test]], env_uk)
}
# print out the AUC for the k-fold tests
# ideally should be > 0.75 for all
cat("K-FOLD AUC: ")
for (test in 1:5) {
cat(paste0(evaluations[[test]]@auc,","))
}
#IF ONE WANTED to visualise the 1st model NEED TO FINISH WITH THE OTHER 4 KFOLDS
## pred <- predict(models[[1]], env_uk)
## plot(pred)
#-could visualise all k-fold models to show they are the same (strong statistical result)
#BUT DONT NEED TO
# Assess Spatial Sorting Bias (SSB)
pres_train_me <- train_p
pres_test_me <- test_p
back_train_me <- train_a
back_test_me <- test_a
sb <- ssb(pres_test_me, back_test_me, pres_train_me)
sb[,1] / sb[,2]
#creates a model of spacial biasing to compare to given preditions
# Adjust for SSB if present via distance based point-wise sampling
i <- pwdSample(pres_test_me, back_test_me, pres_train_me, n=1, tr=0.1)
pres_test_pwd_me <- pres_test_me[!is.na(i[,1]), ]
back_test_pwd_me <- back_test_me[na.omit(as.vector(i)), ]
sb2 <- ssb(pres_test_pwd_me, back_test_pwd_me, pres_train_me)
sb2[1]/ sb2[2]
#creates full model without any K fold statistics etc
pres_points = presence_uk[c("X","Y")]
abs_points = pseudo_uk[c("X","Y")]
# create full maxent with all points
model <- maxent(env_uk, p=pres_points, a=abs_points)
#turn model into prediction that can be plotted into a raster
pred_PredFull <- predict(model, env_uk)
#to see model and obtain jpeg
plot(pred_PredFull)
#Gives AUC for full model (pred_PredFull)
evaluate_full <- evaluate(presence_uk[c("X","Y")], pseudo_uk[c("X","Y")], model, env_uk)
#see what AUC is by typing it in
evaluate_full
#see what the specific sensitivity values is of species
#use value givenas a base level or higher that one would expect to see species (compare to evaluate_full)
message(threshold(evaluate_full)$spec_sens)
#check response curves to see if they change the FULL MODEL
response(model)
#creates a raster file in the WD
#will be useful when putting a file into qgis!
writeRaster(pred_PredFull, filename="pred3_me.tif", options="INTERLEAVE=BAND", overwrite=TRUE)
|
context("copy data")
sc <- testthat_spark_connection()
test_that("sdf_copy_to works for default serializer", {
df <- matrix(0, ncol = 5, nrow = 2) %>% dplyr::as_data_frame()
df_tbl <- sdf_copy_to(sc, df, overwrite = TRUE)
expect_equal(
sdf_nrow(df_tbl),
2
)
})
test_that("sdf_copy_to works for scala serializer", {
skip_livy()
df <- matrix(0, ncol = 5, nrow = 2) %>% dplyr::as_data_frame()
df_tbl <- sdf_copy_to(sc, df, overwrite = TRUE, serializer = "csv_file_scala")
expect_equal(
sdf_nrow(df_tbl),
2
)
})
test_that("sdf_copy_to works for csv serializer", {
skip_livy()
df <- matrix(0, ncol = 5, nrow = 2) %>% dplyr::as_data_frame()
df_tbl <- sdf_copy_to(sc, df, overwrite = TRUE, serializer = "csv_file")
expect_equal(
sdf_nrow(df_tbl),
2
)
})
test_that("spark_table_name() doesn't warn for multiline expression (#1386)", {
expect_warning(
spark_table_name(data.frame(foo = c(1, 2, 3),
bar = c(2, 1, 3),
foobar = c("a", "b", "c"))
),
NA
)
})
test_that("sdf_copy_to supports list of callbacks", {
df <- matrix(0, ncol = 5, nrow = 2) %>% dplyr::as_data_frame()
df_tbl <- sdf_copy_to(sc, list(~df, ~df), overwrite = TRUE)
expect_equal(
sdf_nrow(df_tbl),
4
)
})
| /tests/testthat/test-copy-to.R | permissive | AgrawalAmey/sparklyr | R | false | false | 1,298 | r | context("copy data")
sc <- testthat_spark_connection()
test_that("sdf_copy_to works for default serializer", {
df <- matrix(0, ncol = 5, nrow = 2) %>% dplyr::as_data_frame()
df_tbl <- sdf_copy_to(sc, df, overwrite = TRUE)
expect_equal(
sdf_nrow(df_tbl),
2
)
})
test_that("sdf_copy_to works for scala serializer", {
skip_livy()
df <- matrix(0, ncol = 5, nrow = 2) %>% dplyr::as_data_frame()
df_tbl <- sdf_copy_to(sc, df, overwrite = TRUE, serializer = "csv_file_scala")
expect_equal(
sdf_nrow(df_tbl),
2
)
})
test_that("sdf_copy_to works for csv serializer", {
skip_livy()
df <- matrix(0, ncol = 5, nrow = 2) %>% dplyr::as_data_frame()
df_tbl <- sdf_copy_to(sc, df, overwrite = TRUE, serializer = "csv_file")
expect_equal(
sdf_nrow(df_tbl),
2
)
})
test_that("spark_table_name() doesn't warn for multiline expression (#1386)", {
expect_warning(
spark_table_name(data.frame(foo = c(1, 2, 3),
bar = c(2, 1, 3),
foobar = c("a", "b", "c"))
),
NA
)
})
test_that("sdf_copy_to supports list of callbacks", {
df <- matrix(0, ncol = 5, nrow = 2) %>% dplyr::as_data_frame()
df_tbl <- sdf_copy_to(sc, list(~df, ~df), overwrite = TRUE)
expect_equal(
sdf_nrow(df_tbl),
4
)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_q2manifest.R
\name{write_q2manifest}
\alias{write_q2manifest}
\title{Generates a read manifest for importing sequencing data into qiime2}
\usage{
write_q2manifest(outfile, directory, extension, paired, Fwd, Rev)
}
\arguments{
\item{outfile}{filename for output (default: manifest_[timestamp].txt)}
\item{directory}{directory containing reads}
\item{extension}{file extension (default: .fastq.gz)}
\item{paired}{are reads in paired format? TRUE/FALSE (default=FALSE)}
\item{Fwd}{string used to denote a forward read (default= _R1)}
\item{Rev}{string used to denote a reverse read (default= _R2)}
}
\description{
Scans a directory for files with matching sequencing data (default: fastq.gz) and then generates a q2 compliant manifest.
}
\examples{
\dontrun{write_q2manifest("q2manifest.txt","/yourdirhere/reads/", extension=".fastq.gz", paired=TRUE)}
}
| /man/write_q2manifest.Rd | permissive | ropolomx/qiime2R | R | false | true | 941 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_q2manifest.R
\name{write_q2manifest}
\alias{write_q2manifest}
\title{Generates a read manifest for importing sequencing data into qiime2}
\usage{
write_q2manifest(outfile, directory, extension, paired, Fwd, Rev)
}
\arguments{
\item{outfile}{filename for output (default: manifest_[timestamp].txt)}
\item{directory}{directory containing reads}
\item{extension}{file extension (default: .fastq.gz)}
\item{paired}{are reads in paired format? TRUE/FALSE (default=FALSE)}
\item{Fwd}{string used to denote a forward read (default= _R1)}
\item{Rev}{string used to denote a reverse read (default= _R2)}
}
\description{
Scans a directory for files with matching sequencing data (default: fastq.gz) and then generates a q2 compliant manifest.
}
\examples{
\dontrun{write_q2manifest("q2manifest.txt","/yourdirhere/reads/", extension=".fastq.gz", paired=TRUE)}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_r_light.R
\name{fit_r_light2}
\alias{fit_r_light2}
\title{Fit models to estimate light respiration (\eqn{R_\mathrm{d}})}
\usage{
fit_r_light2(
.data,
.model = "default",
.method = "ls",
Q_lower = NA,
Q_upper = NA,
Q_levels = NULL,
C_upper = NA,
quiet = FALSE,
brm_options = NULL
)
}
\arguments{
\item{.data}{A data frame containing plant ecophysiological data. See \code{\link[=required_variables]{required_variables()}} for the variables required for each model.}
\item{.model}{A character string of model name to use. See \code{\link[=get_all_models]{get_all_models()}}.}
\item{.method}{A character string of the statistical method to use: 'ls' for least-squares and 'brms' for Bayesian model using \code{\link[brms:brm]{brms::brm()}}. Default is 'ls'.}
\item{Q_lower}{Lower light intensity limit for estimating Rd using \code{kok_1956} and \code{yin_etal_2011} models.}
\item{Q_upper}{Upper light intensity limit for estimating Rd using \code{kok_1956} and \code{yin_etal_2011} models}
\item{Q_levels}{A numeric vector of light intensity levels (\eqn{\mu}mol / mol) for estimating \eqn{R_\mathrm{d}} from the linear region of the A-C curve using the \code{walker_ort_2015} model.}
\item{C_upper}{Upper C (\eqn{\mu}mol / mol) limit for estimating \eqn{R_\mathrm{d}} from the linear region of the A-C curve using the \code{walker_ort_2015} model.}
\item{quiet}{Flag. Should messages be suppressed? Default is FALSE.}
\item{brm_options}{A list of options passed to \code{\link[brms:brm]{brms::brm()}} if \code{.method = "brms"}. Default is NULL.}
}
\value{
\itemize{
\item If \code{.method = 'ls'}: an \code{\link[stats:nls]{stats::nls()}} or \code{\link[stats:lm]{stats::lm()}} object.
\item If \code{.method = 'brms'}: a \code{\link[brms:brmsfit-class]{brms::brmsfit()}} object.
}
}
\description{
We recommend using \code{\link[=fit_photosynthesis]{fit_photosynthesis()}} with argument \code{.photo_fun = "r_light"} rather than calling this function directly.
}
\note{
Confusingly, \eqn{R_\mathrm{d}} typically denotes respiration in the light, but you might see \eqn{R_\mathrm{day}} or \eqn{R_\mathrm{light}}.
\strong{Models}
\emph{Kok (1956)}
The \code{kok_1956} model estimates light respiration using the Kok method
(Kok, 1956). The Kok method involves looking for a breakpoint in the
light response of net CO2 assimilation at very low light intensities
and extrapolating from data above the breakpoint to estimate light
respiration as the y-intercept. Rd value should be negative,
denoting an efflux of CO2.
\emph{Yin et al. (2011)}
The \code{yin_etal_2011} model estimates light respiration according
to the Yin \emph{et al.} (2009, 2011) modifications of the Kok
method. The modification uses fluorescence data to get a
better estimate of light respiration. Rd values should be negative here to
denote an efflux of CO2.
\emph{Walker & Ort (2015)}
The \code{walker_ort_2015} model estimates light respiration and
\eqn{\Gamma*} according to Walker & Ort (2015) using a slope-
intercept regression method to find the intercept of multiple
A-C curves run at multiple light intensities. The method estimates
\eqn{\Gamma*} and \eqn{R_\mathrm{d}}. If estimated \eqn{R_\mathrm{d}} is
positive this could indicate issues (i.e. leaks) in the gas exchange
measurements. \eqn{\Gamma*} is in units of umol / mol and \eqn{R_\mathrm{d}}
is in units of \eqn{\mu}mol m\eqn{^{-2}} s\eqn{^{-1}} of respiratory flux.
If using \eqn{C_\mathrm{i}}, the estimated value is technically \eqn{C_\mathrm{i}}*.
You need to use \eqn{C_\mathrm{c}} to get \eqn{\Gamma*} Also note, however,
that the convention in the field is to completely ignore this note.
}
\examples{
\donttest{
# Walker & Ort (2015) model
library(broom)
library(dplyr)
library(photosynthesis)
acq_data = system.file("extdata", "A_Ci_Q_data_1.csv", package = "photosynthesis") |>
read.csv()
fit = fit_photosynthesis(
.data = acq_data,
.photo_fun = "r_light",
.model = "walker_ort_2015",
.vars = list(.A = A, .Q = Qin, .C = Ci),
C_upper = 300,
# Irradiance levels used in experiment
Q_levels = c(1500, 750, 375, 125, 100, 75, 50, 25),
)
# The 'fit' object inherits class 'lm' and many methods can be used
## Model summary:
summary(fit)
## Estimated parameters:
coef(fit)
## 95\% confidence intervals:
## n.b. these confidence intervals are not correct because the regression is fit
## sequentially. It ignores the underlying data and uncertainty in estimates of
## slopes and intercepts with each A-C curve. Use '.method = "brms"' to properly
## calculate uncertainty.
confint(fit)
## Tidy summary table using 'broom::tidy()'
tidy(fit, conf.int = TRUE, conf.level = 0.95)
## Calculate residual sum-of-squares
sum(resid(fit)^2)
# Yin et al. (2011) model
fit = fit_photosynthesis(
.data = acq_data,
.photo_fun = "r_light",
.model = "yin_etal_2011",
.vars = list(.A = A, .phiPSII = PhiPS2, .Q = Qin),
Q_lower = 20,
Q_upper = 250
)
# The 'fit' object inherits class 'lm' and many methods can be used
## Model summary:
summary(fit)
## Estimated parameters:
coef(fit)
## 95\% confidence intervals:
confint(fit)
## Tidy summary table using 'broom::tidy()'
tidy(fit, conf.int = TRUE, conf.level = 0.95)
## Calculate residual sum-of-squares
sum(resid(fit)^2)
# Kok (1956) model
fit = fit_photosynthesis(
.data = acq_data,
.photo_fun = "r_light",
.model = "kok_1956",
.vars = list(.A = A, .Q = Qin),
Q_lower = 20,
Q_upper = 150
)
# The 'fit' object inherits class 'lm' and many methods can be used
## Model summary:
summary(fit)
## Estimated parameters:
coef(fit)
## 95\% confidence intervals:
confint(fit)
## Tidy summary table using 'broom::tidy()'
tidy(fit, conf.int = TRUE, conf.level = 0.95)
## Calculate residual sum-of-squares
sum(resid(fit)^2)
}
}
\references{
Kok B. 1956. On the inhibition of photosynthesis by intense light.
Biochimica et Biophysica Acta 21: 234–244
Walker BJ, Ort DR. 2015. Improved method for measuring the apparent
CO2 photocompensation point resolves the impact of multiple internal
conductances to CO2 to net gas exchange. Plant Cell Environ 38:2462-
2474
Yin X, Struik PC, Romero P, Harbinson J, Evers JB, van der Putten
PEL, Vos J. 2009. Using combined measurements of gas exchange and
chlorophyll fluorescence to estimate parameters of a biochemical C3
photosynthesis model: a critical appraisal and a new integrated
approach applied to leaves in a wheat (Triticum aestivum) canopy.
Plant Cell Environ 32:448-464
Yin X, Sun Z, Struik PC, Gu J. 2011. Evaluating a new method to
estimate the rate of leaf respiration in the light by analysis of
combined gas exchange and chlorophyll fluorescence measurements.
Journal of Experimental Botany 62: 3489–3499
}
| /man/fit_r_light2.Rd | permissive | cran/photosynthesis | R | false | true | 6,823 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fit_r_light.R
\name{fit_r_light2}
\alias{fit_r_light2}
\title{Fit models to estimate light respiration (\eqn{R_\mathrm{d}})}
\usage{
fit_r_light2(
.data,
.model = "default",
.method = "ls",
Q_lower = NA,
Q_upper = NA,
Q_levels = NULL,
C_upper = NA,
quiet = FALSE,
brm_options = NULL
)
}
\arguments{
\item{.data}{A data frame containing plant ecophysiological data. See \code{\link[=required_variables]{required_variables()}} for the variables required for each model.}
\item{.model}{A character string of model name to use. See \code{\link[=get_all_models]{get_all_models()}}.}
\item{.method}{A character string of the statistical method to use: 'ls' for least-squares and 'brms' for Bayesian model using \code{\link[brms:brm]{brms::brm()}}. Default is 'ls'.}
\item{Q_lower}{Lower light intensity limit for estimating Rd using \code{kok_1956} and \code{yin_etal_2011} models.}
\item{Q_upper}{Upper light intensity limit for estimating Rd using \code{kok_1956} and \code{yin_etal_2011} models}
\item{Q_levels}{A numeric vector of light intensity levels (\eqn{\mu}mol / mol) for estimating \eqn{R_\mathrm{d}} from the linear region of the A-C curve using the \code{walker_ort_2015} model.}
\item{C_upper}{Upper C (\eqn{\mu}mol / mol) limit for estimating \eqn{R_\mathrm{d}} from the linear region of the A-C curve using the \code{walker_ort_2015} model.}
\item{quiet}{Flag. Should messages be suppressed? Default is FALSE.}
\item{brm_options}{A list of options passed to \code{\link[brms:brm]{brms::brm()}} if \code{.method = "brms"}. Default is NULL.}
}
\value{
\itemize{
\item If \code{.method = 'ls'}: an \code{\link[stats:nls]{stats::nls()}} or \code{\link[stats:lm]{stats::lm()}} object.
\item If \code{.method = 'brms'}: a \code{\link[brms:brmsfit-class]{brms::brmsfit()}} object.
}
}
\description{
We recommend using \code{\link[=fit_photosynthesis]{fit_photosynthesis()}} with argument \code{.photo_fun = "r_light"} rather than calling this function directly.
}
\note{
Confusingly, \eqn{R_\mathrm{d}} typically denotes respiration in the light, but you might see \eqn{R_\mathrm{day}} or \eqn{R_\mathrm{light}}.
\strong{Models}
\emph{Kok (1956)}
The \code{kok_1956} model estimates light respiration using the Kok method
(Kok, 1956). The Kok method involves looking for a breakpoint in the
light response of net CO2 assimilation at very low light intensities
and extrapolating from data above the breakpoint to estimate light
respiration as the y-intercept. Rd value should be negative,
denoting an efflux of CO2.
\emph{Yin et al. (2011)}
The \code{yin_etal_2011} model estimates light respiration according
to the Yin \emph{et al.} (2009, 2011) modifications of the Kok
method. The modification uses fluorescence data to get a
better estimate of light respiration. Rd values should be negative here to
denote an efflux of CO2.
\emph{Walker & Ort (2015)}
The \code{walker_ort_2015} model estimates light respiration and
\eqn{\Gamma*} according to Walker & Ort (2015) using a slope-
intercept regression method to find the intercept of multiple
A-C curves run at multiple light intensities. The method estimates
\eqn{\Gamma*} and \eqn{R_\mathrm{d}}. If estimated \eqn{R_\mathrm{d}} is
positive this could indicate issues (i.e. leaks) in the gas exchange
measurements. \eqn{\Gamma*} is in units of umol / mol and \eqn{R_\mathrm{d}}
is in units of \eqn{\mu}mol m\eqn{^{-2}} s\eqn{^{-1}} of respiratory flux.
If using \eqn{C_\mathrm{i}}, the estimated value is technically \eqn{C_\mathrm{i}}*.
You need to use \eqn{C_\mathrm{c}} to get \eqn{\Gamma*} Also note, however,
that the convention in the field is to completely ignore this note.
}
\examples{
\donttest{
# Walker & Ort (2015) model
library(broom)
library(dplyr)
library(photosynthesis)
acq_data = system.file("extdata", "A_Ci_Q_data_1.csv", package = "photosynthesis") |>
read.csv()
fit = fit_photosynthesis(
.data = acq_data,
.photo_fun = "r_light",
.model = "walker_ort_2015",
.vars = list(.A = A, .Q = Qin, .C = Ci),
C_upper = 300,
# Irradiance levels used in experiment
Q_levels = c(1500, 750, 375, 125, 100, 75, 50, 25),
)
# The 'fit' object inherits class 'lm' and many methods can be used
## Model summary:
summary(fit)
## Estimated parameters:
coef(fit)
## 95\% confidence intervals:
## n.b. these confidence intervals are not correct because the regression is fit
## sequentially. It ignores the underlying data and uncertainty in estimates of
## slopes and intercepts with each A-C curve. Use '.method = "brms"' to properly
## calculate uncertainty.
confint(fit)
## Tidy summary table using 'broom::tidy()'
tidy(fit, conf.int = TRUE, conf.level = 0.95)
## Calculate residual sum-of-squares
sum(resid(fit)^2)
# Yin et al. (2011) model
fit = fit_photosynthesis(
.data = acq_data,
.photo_fun = "r_light",
.model = "yin_etal_2011",
.vars = list(.A = A, .phiPSII = PhiPS2, .Q = Qin),
Q_lower = 20,
Q_upper = 250
)
# The 'fit' object inherits class 'lm' and many methods can be used
## Model summary:
summary(fit)
## Estimated parameters:
coef(fit)
## 95\% confidence intervals:
confint(fit)
## Tidy summary table using 'broom::tidy()'
tidy(fit, conf.int = TRUE, conf.level = 0.95)
## Calculate residual sum-of-squares
sum(resid(fit)^2)
# Kok (1956) model
fit = fit_photosynthesis(
.data = acq_data,
.photo_fun = "r_light",
.model = "kok_1956",
.vars = list(.A = A, .Q = Qin),
Q_lower = 20,
Q_upper = 150
)
# The 'fit' object inherits class 'lm' and many methods can be used
## Model summary:
summary(fit)
## Estimated parameters:
coef(fit)
## 95\% confidence intervals:
confint(fit)
## Tidy summary table using 'broom::tidy()'
tidy(fit, conf.int = TRUE, conf.level = 0.95)
## Calculate residual sum-of-squares
sum(resid(fit)^2)
}
}
\references{
Kok B. 1956. On the inhibition of photosynthesis by intense light.
Biochimica et Biophysica Acta 21: 234–244
Walker BJ, Ort DR. 2015. Improved method for measuring the apparent
CO2 photocompensation point resolves the impact of multiple internal
conductances to CO2 to net gas exchange. Plant Cell Environ 38:2462-
2474
Yin X, Struik PC, Romero P, Harbinson J, Evers JB, van der Putten
PEL, Vos J. 2009. Using combined measurements of gas exchange and
chlorophyll fluorescence to estimate parameters of a biochemical C3
photosynthesis model: a critical appraisal and a new integrated
approach applied to leaves in a wheat (Triticum aestivum) canopy.
Plant Cell Environ 32:448-464
Yin X, Sun Z, Struik PC, Gu J. 2011. Evaluating a new method to
estimate the rate of leaf respiration in the light by analysis of
combined gas exchange and chlorophyll fluorescence measurements.
Journal of Experimental Botany 62: 3489–3499
}
|
################################
#### Contour plots of the Kent distribution on the sphere
#### Tsagris Michail 06/2014
#### mtsagris@yahoo.gr
################################
kent.contour <- function(k, b) {
## k is the concentration parameter
## b is the ovalness parameter
## b must be less than k/2
gam <- c(0, k, 0)
lam <- c(0, -b, b)
con <- Directional::fb.saddle(gam, lam)[3]
rho <- sqrt(2)
x <- seq(-rho, rho, by = 0.01)
n <- length(x)
mat1 <- matrix(rep(x^2, n), ncol = n)
mat2 <- t(mat1)
z <- sqrt( mat1 + mat2 )
ind <- ( z^2 < rho^2 ) ## checks if x^2+y^2 < rho^2
ind[ !ind ] <- NA
theta <- 2 * asin(0.5 * z)
xa <- k * cos(theta) + b * (mat1 - mat2) - con
mat <- exp(xa) * ind
# Continuous color legend
# Note that it disappears EVERY BLACK LINE!!!!!!
# So, for the ones you want, you must do col = "black"
# For more, see here
# https://stackoverflow.com/questions/8068366/removing-lines-within-filled-contour-legend
par(fg = NA)
# Filled contoure plot in base R
filled.contour(x, x, mat,
# Number of levels
# the greater the more interpolate
nlevels = 200,
# Select color function
color.palette = colorRampPalette( c( "blue",
"cyan",
"yellow",
"red") ),
# Adjust axes to points
plot.axes = {
# # Add points
# points(u[, 1], u[, 2],
# col = "black");
# Add contour lines
contour(x, x, mat,
# Color of contour lines
# Otherwise par(fg = NA) will
# disappear them...
col="black",
# Number of levels
nlevels = 10,
# Size of contour numbers
labcex = 0.8,
# Width of contour lines
lwd = 1.5,
add = TRUE) },
# Legend tick lines
key.axes = {axis(4, col = "black", cex.lab = 1.2)},
# Axes labs
xlab = "Latitude",
ylab = "Longitude",
cex.lab = 1.2)
}
| /R/kent.contour.R | no_license | cran/Directional | R | false | false | 2,564 | r | ################################
#### Contour plots of the Kent distribution on the sphere
#### Tsagris Michail 06/2014
#### mtsagris@yahoo.gr
################################
kent.contour <- function(k, b) {
## k is the concentration parameter
## b is the ovalness parameter
## b must be less than k/2
gam <- c(0, k, 0)
lam <- c(0, -b, b)
con <- Directional::fb.saddle(gam, lam)[3]
rho <- sqrt(2)
x <- seq(-rho, rho, by = 0.01)
n <- length(x)
mat1 <- matrix(rep(x^2, n), ncol = n)
mat2 <- t(mat1)
z <- sqrt( mat1 + mat2 )
ind <- ( z^2 < rho^2 ) ## checks if x^2+y^2 < rho^2
ind[ !ind ] <- NA
theta <- 2 * asin(0.5 * z)
xa <- k * cos(theta) + b * (mat1 - mat2) - con
mat <- exp(xa) * ind
# Continuous color legend
# Note that it disappears EVERY BLACK LINE!!!!!!
# So, for the ones you want, you must do col = "black"
# For more, see here
# https://stackoverflow.com/questions/8068366/removing-lines-within-filled-contour-legend
par(fg = NA)
# Filled contoure plot in base R
filled.contour(x, x, mat,
# Number of levels
# the greater the more interpolate
nlevels = 200,
# Select color function
color.palette = colorRampPalette( c( "blue",
"cyan",
"yellow",
"red") ),
# Adjust axes to points
plot.axes = {
# # Add points
# points(u[, 1], u[, 2],
# col = "black");
# Add contour lines
contour(x, x, mat,
# Color of contour lines
# Otherwise par(fg = NA) will
# disappear them...
col="black",
# Number of levels
nlevels = 10,
# Size of contour numbers
labcex = 0.8,
# Width of contour lines
lwd = 1.5,
add = TRUE) },
# Legend tick lines
key.axes = {axis(4, col = "black", cex.lab = 1.2)},
# Axes labs
xlab = "Latitude",
ylab = "Longitude",
cex.lab = 1.2)
}
|
#Leer
#Escoger directorio de datos#
library(xlsx)
setwd("./")
pathActual <- getwd()
pathDatos <- file.path(pathActual,"Data1")
F_XlsDataEncuestas<- file.path(pathDatos,"respuestas Calidad 2016-2.xlsx")
F_XlsOutDatosEncuestas<- file.path(pathDatos,"SatisfacciónRtasFundamentos.xlsx")
dmu<-1
numColIdEncuesta<-4
data <- read.xlsx(F_XlsDataEncuestas, "Data")
# Recode
data <- apply(data, 2, function(x) {x[x == "Nunca o casi nunca"] <- 1; x})
data <- apply(data, 2, function(x) {x[x == "Casi nunca"] <- 1; x})
data <- apply(data, 2, function(x) {x[x == "Ocasionalmente"] <- 2; x})
data <- apply(data, 2, function(x) {x[x == "A veces"] <- 3; x})
data <- apply(data, 2, function(x) {x[x == "Frecuentemente"] <- 4; x})
data <- apply(data, 2, function(x) {x[x == "Siempre o casi siempre"] <- 5; x})
data <- apply(data, 2, function(x) {x[x == "Casi siempre"] <- 5; x})
data <- apply(data, 2, function(x) {x[x == "No aplica"] <- NA; x})
data<- as.matrix(data)
#para imputar NA
Mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
# s[is.na(s)] = Mode(s) # para "imputar"
# moda alternativa : names(sort(-table(mydata[,12])))[1]
#Archivo 1: Información general de los criterios y sus subcriterios
###########
datosCriterios <- read.xlsx(F_XlsDataEncuestas, "infoCriterios")
filas<- subset(datosCriterios,ID!=0) # el id=0 es la global
filas$Criterio <- NULL # delete that column
filas$Col <- NULL # delete that column
names(filas)<-c("CRITERIOS", "subcriterios")
filas<-lapply(filas, function(x)as.numeric(x))
write.xlsx(filas,file=F_XlsOutDatosEncuestas,sheetName="Subcriterios",row.names = F)
#Archivo 2: Información sobre parametros a usar en el LP
###########
param <- read.xlsx(F_XlsDataEncuestas, "infoParametros")
numClientes<- length( data[,numColIdEncuesta] )
numCriterios<- nrow(subset(datosCriterios,ID!=0))
df <- NULL;
df <- rbind(df,data.frame(Parametros="clientes",valor=numClientes) )
df <- rbind(df,data.frame(Parametros="criterios",valor=numCriterios) )
df <- rbind(df,data.frame(Parametros="alfa",valor=param[1,2]) )
df <- rbind(df,data.frame(Parametros="alfaI",valor=param[2,2]) )
df <- rbind(df,data.frame(Parametros="alfaIJ",valor=param[3,2]) )
df <- rbind(df,data.frame(Parametros="thr",valor=param[4,2]) )
df <- rbind(df,data.frame(Parametros="thrCriterio",valor=param[5,2]) )
df <- rbind(df,data.frame(Parametros="thrSubcriterio",valor=param[6,2]) )
df <- rbind(df,data.frame(Parametros="e",valor=param[7,2]) )
names(filas)<-c("CRITERIOS", "subcriterios")
filas<-lapply(filas, function(x)as.numeric(x))
write.xlsx(df,file=F_XlsOutDatosEncuestas,sheetName=paste0("Datos",dmu),append = T,row.names = F)
#Archivo 3: Información sobre satisfacción global
###########
fila<- subset(datosCriterios,ID==0) # el id=0 debe ser el global
col<- as.numeric(fila[4])
s<-data[,col]
s[is.na(s)] = Mode(s) # para "imputar" NA por la Moda
x<- data[,numColIdEncuesta] # en la columna cuatro debe estar el identificador de encuesta
z<- as.data.frame(cbind(x,s)) # en esa columna se encuentra la satisfacción global
names(z)<-c("CLIENTES","satGlobal")
z<-lapply(z, function(x)as.numeric(x))
write.xlsx(z,file=F_XlsOutDatosEncuestas,sheetName=paste0("SatisfaccionGlobal",dmu),append = T,row.names = F)
#Archivo 4: Información sobre satisfacción en los Criterios
###########
z<-vector()
for(i in 1:numCriterios){
fila<- subset(datosCriterios,ID==i) # el id=i debe ser uno de los criterio
col<- as.numeric(fila[4])
s<-data[,col]
s[is.na(s)] = Mode(s) # para "imputar"
x<- data[,numColIdEncuesta] # en la columna cuatro debe estar el identificador de encuesta
z<-rbind(z,cbind(x,i,s))
}
z<- as.data.frame(z)
names(z)<-c("CLIENTES","CRITERIOS","satCriterios")
z<-lapply(z, function(x)as.numeric(x))
write.xlsx(z,file=F_XlsOutDatosEncuestas,sheetName=paste0("SatisfaccionCriterios",dmu),append = T,row.names = F)
#Archivo 5/5: Información sobre satisfacción en los Subcriterios
###########
datosSubcriterios <- read.xlsx(F_XlsDataEncuestas, "infoSubcriterios")
z<-vector()
for(i in 1:numCriterios) {
filas<- subset(datosSubcriterios,Criterio==i)
numFilas<- nrow(filas)
for(j in 1:numFilas) {
col<- as.numeric( filas[j,3])
x<- data[,numColIdEncuesta] # en la columna cuatro debe estar el identificador de encuesta
s<-data[,col]
s[is.na(s)] = Mode(s) # para "imputar"
z<-rbind(z,cbind(x,i,j,s))
}
}
z<- as.data.frame(z)
names(z)<-c("CLIENTES","CRITERIOS","SUBCRITERIOS","satSubcriterios")
z<-lapply(z, function(x)as.numeric(x))
write.xlsx(z,file=F_XlsOutDatosEncuestas,sheetName=paste0("SatisfaccionSubcriterios",dmu),append = T,row.names = F)
| /servidor/RMusa/ProcesarEncuestas.R | no_license | pacho950609/MUSA-IIND | R | false | false | 4,650 | r | #Leer
#Escoger directorio de datos#
library(xlsx)
setwd("./")
pathActual <- getwd()
pathDatos <- file.path(pathActual,"Data1")
F_XlsDataEncuestas<- file.path(pathDatos,"respuestas Calidad 2016-2.xlsx")
F_XlsOutDatosEncuestas<- file.path(pathDatos,"SatisfacciónRtasFundamentos.xlsx")
dmu<-1
numColIdEncuesta<-4
data <- read.xlsx(F_XlsDataEncuestas, "Data")
# Recode
data <- apply(data, 2, function(x) {x[x == "Nunca o casi nunca"] <- 1; x})
data <- apply(data, 2, function(x) {x[x == "Casi nunca"] <- 1; x})
data <- apply(data, 2, function(x) {x[x == "Ocasionalmente"] <- 2; x})
data <- apply(data, 2, function(x) {x[x == "A veces"] <- 3; x})
data <- apply(data, 2, function(x) {x[x == "Frecuentemente"] <- 4; x})
data <- apply(data, 2, function(x) {x[x == "Siempre o casi siempre"] <- 5; x})
data <- apply(data, 2, function(x) {x[x == "Casi siempre"] <- 5; x})
data <- apply(data, 2, function(x) {x[x == "No aplica"] <- NA; x})
data<- as.matrix(data)
#para imputar NA
Mode <- function(x) {
ux <- unique(x)
ux[which.max(tabulate(match(x, ux)))]
}
# s[is.na(s)] = Mode(s) # para "imputar"
# moda alternativa : names(sort(-table(mydata[,12])))[1]
#Archivo 1: Información general de los criterios y sus subcriterios
###########
datosCriterios <- read.xlsx(F_XlsDataEncuestas, "infoCriterios")
filas<- subset(datosCriterios,ID!=0) # el id=0 es la global
filas$Criterio <- NULL # delete that column
filas$Col <- NULL # delete that column
names(filas)<-c("CRITERIOS", "subcriterios")
filas<-lapply(filas, function(x)as.numeric(x))
write.xlsx(filas,file=F_XlsOutDatosEncuestas,sheetName="Subcriterios",row.names = F)
#Archivo 2: Información sobre parametros a usar en el LP
###########
param <- read.xlsx(F_XlsDataEncuestas, "infoParametros")
numClientes<- length( data[,numColIdEncuesta] )
numCriterios<- nrow(subset(datosCriterios,ID!=0))
df <- NULL;
df <- rbind(df,data.frame(Parametros="clientes",valor=numClientes) )
df <- rbind(df,data.frame(Parametros="criterios",valor=numCriterios) )
df <- rbind(df,data.frame(Parametros="alfa",valor=param[1,2]) )
df <- rbind(df,data.frame(Parametros="alfaI",valor=param[2,2]) )
df <- rbind(df,data.frame(Parametros="alfaIJ",valor=param[3,2]) )
df <- rbind(df,data.frame(Parametros="thr",valor=param[4,2]) )
df <- rbind(df,data.frame(Parametros="thrCriterio",valor=param[5,2]) )
df <- rbind(df,data.frame(Parametros="thrSubcriterio",valor=param[6,2]) )
df <- rbind(df,data.frame(Parametros="e",valor=param[7,2]) )
names(filas)<-c("CRITERIOS", "subcriterios")
filas<-lapply(filas, function(x)as.numeric(x))
write.xlsx(df,file=F_XlsOutDatosEncuestas,sheetName=paste0("Datos",dmu),append = T,row.names = F)
#Archivo 3: Información sobre satisfacción global
###########
fila<- subset(datosCriterios,ID==0) # el id=0 debe ser el global
col<- as.numeric(fila[4])
s<-data[,col]
s[is.na(s)] = Mode(s) # para "imputar" NA por la Moda
x<- data[,numColIdEncuesta] # en la columna cuatro debe estar el identificador de encuesta
z<- as.data.frame(cbind(x,s)) # en esa columna se encuentra la satisfacción global
names(z)<-c("CLIENTES","satGlobal")
z<-lapply(z, function(x)as.numeric(x))
write.xlsx(z,file=F_XlsOutDatosEncuestas,sheetName=paste0("SatisfaccionGlobal",dmu),append = T,row.names = F)
#Archivo 4: Información sobre satisfacción en los Criterios
###########
z<-vector()
for(i in 1:numCriterios){
fila<- subset(datosCriterios,ID==i) # el id=i debe ser uno de los criterio
col<- as.numeric(fila[4])
s<-data[,col]
s[is.na(s)] = Mode(s) # para "imputar"
x<- data[,numColIdEncuesta] # en la columna cuatro debe estar el identificador de encuesta
z<-rbind(z,cbind(x,i,s))
}
z<- as.data.frame(z)
names(z)<-c("CLIENTES","CRITERIOS","satCriterios")
z<-lapply(z, function(x)as.numeric(x))
write.xlsx(z,file=F_XlsOutDatosEncuestas,sheetName=paste0("SatisfaccionCriterios",dmu),append = T,row.names = F)
#Archivo 5/5: Información sobre satisfacción en los Subcriterios
###########
datosSubcriterios <- read.xlsx(F_XlsDataEncuestas, "infoSubcriterios")
z<-vector()
for(i in 1:numCriterios) {
filas<- subset(datosSubcriterios,Criterio==i)
numFilas<- nrow(filas)
for(j in 1:numFilas) {
col<- as.numeric( filas[j,3])
x<- data[,numColIdEncuesta] # en la columna cuatro debe estar el identificador de encuesta
s<-data[,col]
s[is.na(s)] = Mode(s) # para "imputar"
z<-rbind(z,cbind(x,i,j,s))
}
}
z<- as.data.frame(z)
names(z)<-c("CLIENTES","CRITERIOS","SUBCRITERIOS","satSubcriterios")
z<-lapply(z, function(x)as.numeric(x))
write.xlsx(z,file=F_XlsOutDatosEncuestas,sheetName=paste0("SatisfaccionSubcriterios",dmu),append = T,row.names = F)
|
if (FALSE){
#hist_1 = history
hist_select = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/naval-propulsion-plant/2020-03-0732.201_grid_search/history_120000.Rdata"
path_result = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/naval-propulsion-plant/2020-03-0732.201_grid_search/"
hist_select = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/bostonHousing/2020-03-0629.624_grid_search/history_8000.Rdata"
path_result = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/bostonHousing/2020-03-0629.624_grid_search/"
hist_select = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/kin8nm/2020-03-0733.527_grid_search/history_30000.Rdata"
path_result = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/kin8nm/2020-03-0733.527_grid_search/"
hist_select = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/energy/2020-03-0630.52_grid_search/history_18000.Rdata"
path_result = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/energy/2020-03-0630.52_grid_search/"
hist_select = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/concrete/2020-03-0638.611_grid_search/history_9000.Rdata"
path_result = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/concrete/2020-03-0638.611_grid_search/"
hist_select = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/concrete/2020-03-0627.489_grid_search/history_9000.Rdata"
path_result = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/concrete/2020-03-0627.489_grid_search/"
hist_select = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/protein-tertiary-structure/2020-03-0534.314_grid_search/history_12000.Rdata"
path_result = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/protein-tertiary-structure/2020-03-0534.314_grid_search/"
hist_select = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/yacht/2020-03-0635.059_grid_search/history_7000.Rdata"
path_result = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/yacht/2020-03-0635.059_grid_search/"
#hist_select = "c:/Users/sick/dl Dropbox/beate sick/IDP_Projekte/DL_Projekte/shared_Oliver_Beate/mlt/UCI_Datasets/kin8nm/2020-03-0533.019_grid_search/history_200.Rdata"
#path_result = "c:/Users/sick/dl Dropbox/beate sick/IDP_Projekte/DL_Projekte/shared_Oliver_Beate/mlt/UCI_Datasets/kin8nm/2020-03-0533.019_grid_search/"
}
load(hist_select)
library(ggplot2)
history = hist_grid
str(history)
summary(history)
library(tidyr)
h = gather(history, 'sample', 'loss', nll_train:nll_test)
h$sample = as.factor(h$sample) # test or validation/test
summary(h)
str(h)
xtabs(~spatz+x_scale+regularization, data=h)
gridplot = list()
for(i in 1:length(levels(h$x_scale)) ){
for( j in 1:length(levels(h$spatz))){
for( k in 1:length(levels(h$regularization))){
# i=1
# j=2
# k=3
h1 = h
x_scale_pick = levels(h1$x_scale)[i]
h1= h1[h1$x_scale==x_scale_pick,]
spatz_pick = levels(h1$spatz)[j]
h1= h1[h1$spatz==spatz_pick,]
regularization_pick = levels(h1$regularization)[k]
h1= h1[h1$regularization==regularization_pick,]
main = h1$method[1]
sub = paste("--x_scale_", x_scale_pick,
"--spatz_", spatz_pick ,
"--regularization_", regularization_pick, sep="" )
p = ggplot(data=h1, aes(x = step, y = loss, color=fold)) +
geom_line(lwd=1.5) +
geom_hline(yintercept=2.48) +
#ylim(0,1) +
ggtitle(main, subtitle=sub) +
facet_wrap(. ~ sample, ncol=2) +
theme_bw() +
theme(plot.title = element_text(size=28),
plot.subtitle = element_text(size=26),
axis.text = element_text( size = 20 ),
axis.text.x = element_text( size = 24 ),
axis.title = element_text( size = 26, face = "bold" ),
strip.text.x = element_text(size = 26, face = "bold"),
strip.text.y= element_text(size = 26, face = "bold"))
# ggsave(paste0(path_result, "loss_",main,sub,".png"),
# plot = last_plot(), width = 18, height = 14)
gridplot = c(gridplot, list(p))
}
}
}
for(i in levels(h$x_scale)){
idx = sapply(gridplot, function(x) x$data$x_scale[1])
idx = which(idx==i)
p=cowplot::plot_grid(plotlist = gridplot[idx], ncol=1)
ggsave(paste0(path_result, "loss--x_scale_", i, ".png"),
plot = p, width = 30, height = 8*6, limitsize = F)
}
library(dplyr)
max_step = max(h$step)
hmax = filter(h, step == max_step, sample=='nll_test', spatz == 0, x_scale==TRUE)
hmax %>% group_by(regularization)
by(hmax$loss, hmax$regularization, mean)
by(hmax$loss, hmax$regularization, sd)
# # get x_scale value for each plot
# idx = sapply(gridplot, function(x) x$data$x_scale[1])
# # reorder plots such that x_scale=FALSE is in the left column and x_scale=TRUE in the right.
# id = c(1,7,2,8,3,9,4,10,5,11,6,12)
# p=cowplot::plot_grid(plotlist = gridplot[idx], ncol=2)
# ggsave(paste0(path_result, "loss.png"),
# plot = p, width = 18*2, height = 14*6, limitsize = F)
# library(dplyr)
# #dd = hh %>% filter(step == 12000) %>% filter(sample == 'nll_test')
# ddd = history %>% filter(step == 71000)
# mean(ddd$nll_test)
# sd(ddd$nll_test) / sqrt(5)
| /mlt/eval_grid_model.R | no_license | tensorchiefs/dl_playr | R | false | false | 5,766 | r | if (FALSE){
#hist_1 = history
hist_select = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/naval-propulsion-plant/2020-03-0732.201_grid_search/history_120000.Rdata"
path_result = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/naval-propulsion-plant/2020-03-0732.201_grid_search/"
hist_select = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/bostonHousing/2020-03-0629.624_grid_search/history_8000.Rdata"
path_result = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/bostonHousing/2020-03-0629.624_grid_search/"
hist_select = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/kin8nm/2020-03-0733.527_grid_search/history_30000.Rdata"
path_result = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/kin8nm/2020-03-0733.527_grid_search/"
hist_select = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/energy/2020-03-0630.52_grid_search/history_18000.Rdata"
path_result = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/energy/2020-03-0630.52_grid_search/"
hist_select = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/concrete/2020-03-0638.611_grid_search/history_9000.Rdata"
path_result = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/concrete/2020-03-0638.611_grid_search/"
hist_select = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/concrete/2020-03-0627.489_grid_search/history_9000.Rdata"
path_result = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/concrete/2020-03-0627.489_grid_search/"
hist_select = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/protein-tertiary-structure/2020-03-0534.314_grid_search/history_12000.Rdata"
path_result = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/protein-tertiary-structure/2020-03-0534.314_grid_search/"
hist_select = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/yacht/2020-03-0635.059_grid_search/history_7000.Rdata"
path_result = "/Users/oli/Dropbox/__ZHAW/__Projekte_Post_ZHAH/shared_Oliver_Beate/mlt/UCI_Datasets/yacht/2020-03-0635.059_grid_search/"
#hist_select = "c:/Users/sick/dl Dropbox/beate sick/IDP_Projekte/DL_Projekte/shared_Oliver_Beate/mlt/UCI_Datasets/kin8nm/2020-03-0533.019_grid_search/history_200.Rdata"
#path_result = "c:/Users/sick/dl Dropbox/beate sick/IDP_Projekte/DL_Projekte/shared_Oliver_Beate/mlt/UCI_Datasets/kin8nm/2020-03-0533.019_grid_search/"
}
load(hist_select)
library(ggplot2)
history = hist_grid
str(history)
summary(history)
library(tidyr)
h = gather(history, 'sample', 'loss', nll_train:nll_test)
h$sample = as.factor(h$sample) # test or validation/test
summary(h)
str(h)
xtabs(~spatz+x_scale+regularization, data=h)
gridplot = list()
for(i in 1:length(levels(h$x_scale)) ){
for( j in 1:length(levels(h$spatz))){
for( k in 1:length(levels(h$regularization))){
# i=1
# j=2
# k=3
h1 = h
x_scale_pick = levels(h1$x_scale)[i]
h1= h1[h1$x_scale==x_scale_pick,]
spatz_pick = levels(h1$spatz)[j]
h1= h1[h1$spatz==spatz_pick,]
regularization_pick = levels(h1$regularization)[k]
h1= h1[h1$regularization==regularization_pick,]
main = h1$method[1]
sub = paste("--x_scale_", x_scale_pick,
"--spatz_", spatz_pick ,
"--regularization_", regularization_pick, sep="" )
p = ggplot(data=h1, aes(x = step, y = loss, color=fold)) +
geom_line(lwd=1.5) +
geom_hline(yintercept=2.48) +
#ylim(0,1) +
ggtitle(main, subtitle=sub) +
facet_wrap(. ~ sample, ncol=2) +
theme_bw() +
theme(plot.title = element_text(size=28),
plot.subtitle = element_text(size=26),
axis.text = element_text( size = 20 ),
axis.text.x = element_text( size = 24 ),
axis.title = element_text( size = 26, face = "bold" ),
strip.text.x = element_text(size = 26, face = "bold"),
strip.text.y= element_text(size = 26, face = "bold"))
# ggsave(paste0(path_result, "loss_",main,sub,".png"),
# plot = last_plot(), width = 18, height = 14)
gridplot = c(gridplot, list(p))
}
}
}
for(i in levels(h$x_scale)){
idx = sapply(gridplot, function(x) x$data$x_scale[1])
idx = which(idx==i)
p=cowplot::plot_grid(plotlist = gridplot[idx], ncol=1)
ggsave(paste0(path_result, "loss--x_scale_", i, ".png"),
plot = p, width = 30, height = 8*6, limitsize = F)
}
library(dplyr)
max_step = max(h$step)
hmax = filter(h, step == max_step, sample=='nll_test', spatz == 0, x_scale==TRUE)
hmax %>% group_by(regularization)
by(hmax$loss, hmax$regularization, mean)
by(hmax$loss, hmax$regularization, sd)
# # get x_scale value for each plot
# idx = sapply(gridplot, function(x) x$data$x_scale[1])
# # reorder plots such that x_scale=FALSE is in the left column and x_scale=TRUE in the right.
# id = c(1,7,2,8,3,9,4,10,5,11,6,12)
# p=cowplot::plot_grid(plotlist = gridplot[idx], ncol=2)
# ggsave(paste0(path_result, "loss.png"),
# plot = p, width = 18*2, height = 14*6, limitsize = F)
# library(dplyr)
# #dd = hh %>% filter(step == 12000) %>% filter(sample == 'nll_test')
# ddd = history %>% filter(step == 71000)
# mean(ddd$nll_test)
# sd(ddd$nll_test) / sqrt(5)
|
options(digits=10)
#**********************************************************************
#prep data
metadata_map=metadata
#format coordinates
metadata_map$Lat=metadata_map$Lat/1000000
metadata_map$Lon=metadata_map$Lon/1000000
metadata_map$color[metadata_map$color=="darkgrey"]="black"
metadata_map$color[metadata_map$color=="lightblue"]="red"
#*********************************************************************
#map with leaflet
#generate map
library(leaflet)
leaflet()%>%
addTiles()%>%
addCircles(data=metadata_map,
color = metadata_map$color,
opacity = 1)%>%
addLegend(position="topright",
labels = c("Water surface Logger", "Sealed surface Logger", "Water Logger", "Vegetated surface Logger"),
colors=unique(metadata_map$color),
title="Logger types")%>%
addScaleBar(position="bottomleft")%>%
addMiniMap()
north.arrow(xb=52.55, yb=7.7, lab="NORTH", len=1, cex.lab=0.5)
#*********************************************************************
#map with rworldmap and ggplot
library(ggplot2) # ggplot() fortify()
library(dplyr) # %>% select() filter() bind_rows()
library(rgdal) # readOGR() spTransform()
library(raster) # intersect()
library(ggsn) # north2() scalebar()
library(rworldmap) # getMap()
library(GISTools)
world=getMap(resolution="high")
plot(world,
main="World",
ylim=c(49, 52),
xlim=c(6,8))
setwd("F:/satellite_data_Muenster/MODIS_neu")
MS_shape=readOGR("stadtgebiet.shp")
crs(MS_shape) #get crs
#transform coordinates to lat lon
MS_shape=spTransform(x = MS_shape, CRSobj = "+proj=longlat +datum=WGS84")
MS_shape_plot=fortify(MS_shape)
world_DE=world[world@data$ADMIN %in% "Germany",]
par(plot.new=T)
ggplot()+
#geom_polygon(data=world_DE,
#aes(x=long, y=lat), fill=NA, color="black")+
geom_polygon(data=MS_shape_plot,
aes(x=long, y=lat), color="black", fill="white")+
geom_point(data=metadata_map, aes(x=Lon, y=Lat), color=metadata_map$color)+
coord_quickmap()+
#xlim(6,9)+
#ylim(51,52)+
theme_classic()+
xlab("Longitude")+
ylab("Latitude")
map.scale(len=10, ndivs=5, xc=7.6, yc=51.85)
#try with tmap
library(tmap)
data("World")
tmap_mode("view")
tm_shape(World)+
tm_shape(shp=MS_shape,is.master = T)
| /mapping_rworldmap.r | no_license | DanaLooschelders/Urban_Heat_Island_Muenster | R | false | false | 2,281 | r | options(digits=10)
#**********************************************************************
#prep data
metadata_map=metadata
#format coordinates
metadata_map$Lat=metadata_map$Lat/1000000
metadata_map$Lon=metadata_map$Lon/1000000
metadata_map$color[metadata_map$color=="darkgrey"]="black"
metadata_map$color[metadata_map$color=="lightblue"]="red"
#*********************************************************************
#map with leaflet
#generate map
library(leaflet)
leaflet()%>%
addTiles()%>%
addCircles(data=metadata_map,
color = metadata_map$color,
opacity = 1)%>%
addLegend(position="topright",
labels = c("Water surface Logger", "Sealed surface Logger", "Water Logger", "Vegetated surface Logger"),
colors=unique(metadata_map$color),
title="Logger types")%>%
addScaleBar(position="bottomleft")%>%
addMiniMap()
north.arrow(xb=52.55, yb=7.7, lab="NORTH", len=1, cex.lab=0.5)
#*********************************************************************
#map with rworldmap and ggplot
library(ggplot2) # ggplot() fortify()
library(dplyr) # %>% select() filter() bind_rows()
library(rgdal) # readOGR() spTransform()
library(raster) # intersect()
library(ggsn) # north2() scalebar()
library(rworldmap) # getMap()
library(GISTools)
world=getMap(resolution="high")
plot(world,
main="World",
ylim=c(49, 52),
xlim=c(6,8))
setwd("F:/satellite_data_Muenster/MODIS_neu")
MS_shape=readOGR("stadtgebiet.shp")
crs(MS_shape) #get crs
#transform coordinates to lat lon
MS_shape=spTransform(x = MS_shape, CRSobj = "+proj=longlat +datum=WGS84")
MS_shape_plot=fortify(MS_shape)
world_DE=world[world@data$ADMIN %in% "Germany",]
par(plot.new=T)
ggplot()+
#geom_polygon(data=world_DE,
#aes(x=long, y=lat), fill=NA, color="black")+
geom_polygon(data=MS_shape_plot,
aes(x=long, y=lat), color="black", fill="white")+
geom_point(data=metadata_map, aes(x=Lon, y=Lat), color=metadata_map$color)+
coord_quickmap()+
#xlim(6,9)+
#ylim(51,52)+
theme_classic()+
xlab("Longitude")+
ylab("Latitude")
map.scale(len=10, ndivs=5, xc=7.6, yc=51.85)
#try with tmap
library(tmap)
data("World")
tmap_mode("view")
tm_shape(World)+
tm_shape(shp=MS_shape,is.master = T)
|
#############################
# rmst1 (one-arm) -- hidden
#############################
rmst1=function(time, status, tau, alpha=0.05){
#-- time
#-- statuts
#-- tau -- truncation time
#-- alpha -- gives (1-alpha) confidence interval
ft= survfit(Surv(time, status)~1)
idx=ft$time<=tau
wk.time=sort(c(ft$time[idx],tau))
wk.surv=ft$surv[idx]
wk.n.risk =ft$n.risk[idx]
wk.n.event=ft$n.event[idx]
time.diff <- diff(c(0, wk.time))
areas <- time.diff * c(1, wk.surv)
rmst = sum(areas)
rmst
wk.var <- ifelse((wk.n.risk-wk.n.event)==0, 0,
wk.n.event /(wk.n.risk *(wk.n.risk - wk.n.event)))
wk.var =c(wk.var,0)
rmst.var = sum( cumsum(rev(areas[-1]))^2 * rev(wk.var)[-1])
rmst.se = sqrt(rmst.var)
#--- check ---
# print(ft, rmean=tau)
#--- output ---
out=matrix(0,2,4)
out[1,]=c(rmst, rmst.se, rmst-qnorm(1-alpha/2)*rmst.se, rmst+qnorm(1-alpha/2)*rmst.se)
out[2,]=c(tau-out[1,1], rmst.se, tau-out[1,4], tau-out[1,3])
rownames(out)=c("RMST","RMTL")
colnames(out)=c("Est.", "se", paste("lower .",round((1-alpha)*100, digits=0), sep=""), paste("upper .",round((1-alpha)*100, digits=0), sep=""))
Z=list()
Z$result=out
Z$rmst = out[1,]
Z$rmtl = out[2,]
Z$tau=tau
Z$rmst.var = rmst.var
Z$fit=ft
class(Z)="rmst1"
return(Z)
}
| /R/rmst1.R | no_license | cran/survRM2 | R | false | false | 1,305 | r | #############################
# rmst1 (one-arm) -- hidden
#############################
rmst1=function(time, status, tau, alpha=0.05){
#-- time
#-- statuts
#-- tau -- truncation time
#-- alpha -- gives (1-alpha) confidence interval
ft= survfit(Surv(time, status)~1)
idx=ft$time<=tau
wk.time=sort(c(ft$time[idx],tau))
wk.surv=ft$surv[idx]
wk.n.risk =ft$n.risk[idx]
wk.n.event=ft$n.event[idx]
time.diff <- diff(c(0, wk.time))
areas <- time.diff * c(1, wk.surv)
rmst = sum(areas)
rmst
wk.var <- ifelse((wk.n.risk-wk.n.event)==0, 0,
wk.n.event /(wk.n.risk *(wk.n.risk - wk.n.event)))
wk.var =c(wk.var,0)
rmst.var = sum( cumsum(rev(areas[-1]))^2 * rev(wk.var)[-1])
rmst.se = sqrt(rmst.var)
#--- check ---
# print(ft, rmean=tau)
#--- output ---
out=matrix(0,2,4)
out[1,]=c(rmst, rmst.se, rmst-qnorm(1-alpha/2)*rmst.se, rmst+qnorm(1-alpha/2)*rmst.se)
out[2,]=c(tau-out[1,1], rmst.se, tau-out[1,4], tau-out[1,3])
rownames(out)=c("RMST","RMTL")
colnames(out)=c("Est.", "se", paste("lower .",round((1-alpha)*100, digits=0), sep=""), paste("upper .",round((1-alpha)*100, digits=0), sep=""))
Z=list()
Z$result=out
Z$rmst = out[1,]
Z$rmtl = out[2,]
Z$tau=tau
Z$rmst.var = rmst.var
Z$fit=ft
class(Z)="rmst1"
return(Z)
}
|
#007 fuzzy agrep matching with the bullet points as well
#this is all just bullets now, need to add the fuzzy agrep matching!
setwd("C:/Users/tjvan/Documents/Kaggle/HomeDepot_early_2016/homedepotgit")
#setwd("~/Analytics/Kaggle/HomeDepot/homedepotgit")
library(readr)
library(dplyr)
cat("Reading data\n")
train <- read_csv('../input/train.csv')
test <- read_csv('../input/test.csv')
desc <- read_csv('../input/product_descriptions.csv')
att <- read_csv('../input/attributes.csv')
brandnames <- filter(att, name == "MFG Brand Name")
colnames(brandnames) <- c("product_uid", "name", "brandname")
att <- mutate(att, "IsBullet"=grepl("Bullet", name))
att <- filter(att, IsBullet == TRUE)
att.bullets <- att %>% group_by(product_uid) %>% summarise(bulletvalues=paste(value, collapse=" "))
rm(att)
cat("Merge description with train and test data \n")
train <- merge(train,desc, by.x = "product_uid", by.y = "product_uid", all.x = TRUE, all.y = FALSE)
test <- merge(test,desc, by.x = "product_uid", by.y = "product_uid", all.x = TRUE, all.y = FALSE)
cat("Merge bullet points with train and test data \n")
train <- merge(train, att.bullets, by.x = "product_uid", by.y = "product_uid", all.x = TRUE, all.y = FALSE)
test <- merge(test, att.bullets, by.x = "product_uid", by.y = "product_uid", all.x = TRUE, all.y = FALSE)
cat("Merge the brand name with train and test data \n")
train <- merge(train,brandnames, by.x = "product_uid", by.y = "product_uid", all.x = TRUE, all.y = FALSE); train$name = NULL
test <- merge(test,brandnames, by.x = "product_uid", by.y = "product_uid", all.x = TRUE, all.y = FALSE); test$name = NULL
#put back in order (just in case)
train <- arrange(train, id)
test <- arrange(test, id)
t <- Sys.time()
word_match <- function(words,title,desc,bullets,brand){
n_title <- 0
n_desc <- 0
n_bullets <- 0
n_brand <- 0
count_desc <- 0
count_bullet <- 0
an_title <- 0
an_desc <- 0
words <- gsub("[ ]{2,}", " ", words) #this replaces 2 or more spaces with a single space
#print(words)
words <- unlist(strsplit(words," "))
nwords <- length(words)
for(i in 1:length(words)){
pattern <- paste("(^| )",words[i],"($| )",sep="")
n_title <- n_title + grepl(pattern,title,perl=TRUE,ignore.case=TRUE)
n_desc <- n_desc + grepl(pattern,desc,perl=TRUE,ignore.case=TRUE)
n_bullets <- n_bullets + grepl(pattern,bullets,perl=TRUE,ignore.case=TRUE)
n_brand <- n_brand + grepl(pattern,brand,perl=TRUE,ignore.case=TRUE)
an_title <- an_title + agrepl(words[i],title,ignore.case=TRUE,max.distance = 0.13) #change
an_desc <- an_desc + agrepl(words[i],desc,ignore.case=TRUE,max.distance = 0.13) #change
#frequency of exact word appearing in description
print(pattern)
foundInDesc <- gregexpr(pattern, desc, perl=TRUE, ignore.case=TRUE)[[1]][[1]]
print("printing found in desc")
print(foundInDesc)
if(foundInDesc < 0 | is.na(foundInDesc)) {
count_desc <- count_desc + 0 #this is just a placeholder for the else condition
} else {
count_desc <- count_desc + length(gregexpr(pattern, desc, perl=TRUE, ignore.case = TRUE)[[1]])
}
#frequency of exact word appearing in bullets below description
foundInBullets <- gregexpr(pattern, bullets, perl=TRUE, ignore.case=TRUE)[[1]][[1]]
if(foundInBullets < 0 | is.na(foundInBullets)) {
count_bullet <- count_bullet + 0 #this is just a placeholder for the else condition
} else {
count_bullet <- count_bullet + length(gregexpr(pattern, bullets, perl=TRUE, ignore.case = TRUE)[[1]])
}
}
return(c(n_title,nwords,n_desc,n_bullets,n_brand,an_title,an_desc,count_desc,count_bullet))
}
#put back in order (just in case)
train <- arrange(train, id)
test <- arrange(test, id)
cat("Get number of words and word matching title in train\n")
train_words <- as.data.frame(t(mapply(word_match,train$search_term,train$product_title,train$product_description,train$bulletvalues,train$brandname)))
train$nmatch_title <- train_words[,1]
train$nwords <- train_words[,2]
train$nmatch_desc <- train_words[,3]
train$nmatch_bullets <- train_words[,4]
train$nmatch_brand <- train_words[,5]
#add fuzzy's here
train$fuzzy_match_title <- train_words[,6]
train$fuzzy_match_desc <- train_words[,7]
train$countmatch_desc <- train_words[,8]
train$countmatch_bullet <- train_words[,9]
train <- arrange(train, id)
plot(train$nmatch_brand, train$relevance)
max(train$countmatch_desc)
max(train$nmatch_desc)
plot(train$countmatch_desc, train$relevance)
plot(train$countmatch_bullet, train$relevance)
hist(train$countmatch_desc)
hist(train$relevance)
#look at the super high relevancy records with low match results:
#first set up the differences
#THIS led me to want to do a fuzzy match
trainpercents <- mutate(train, "perc_title"=(nmatch_title / nwords) * 100,
"perc_desc"=(nmatch_desc / nwords) * 100,
"perc_bullet"=(nmatch_bullets / nwords) * 100,
"perc_brand"=(nmatch_brand / nwords) * 100)
train.highrel <- filter(train.percents, relevance == 3, perc_title == 0, perc_desc == 0,
perc_bullet == 0, perc_brand == 0)
summary(train$countmatch_desc)
summary(train$countmatch_bullet)
#are all description and bullet values the same?
#train.question <- mutate(train, "bulletdescdiff"=nmatch_desc - nmatch_bullets)
#train.question <- filter(train.question, bulletdescdiff > 0 | bulletdescdiff < 0)
#train.question <- mutate(train.question, "diffsqrd"=bulletdescdiff^2)
#train.question <- arrange(train.question, desc(diffsqrd)) #biggest difference in desc / bullets
cat("Get number of words and word matching title in test\n")
test_words <- as.data.frame(t(mapply(word_match,test$search_term,test$product_title,test$product_description,test$bulletvalues,test$brandname)))
test$nmatch_title <- test_words[,1]
test$nwords <- test_words[,2]
test$nmatch_desc <- test_words[,3]
test$nmatch_bullets <- test_words[,4]
test$nmatch_brand <- test_words[,5]
test$fuzzy_match_title <- test_words[,6]
test$fuzzy_match_desc <- test_words[,7]
test$countmatch_desc <- test_words[,8]
test$countmatch_bullet <- test_words[,9]
test <- arrange(test, id)
rm(train_words,test_words)
#max(test$count_desc)
cat("A simple linear model on number of words and number of words that match\n")
glm_model <- glm(relevance~nmatch_bullets+nmatch_title+nmatch_desc+nwords+countmatch_desc+nmatch_brand+countmatch_bullet,data=train)
summary(glm_model)
glm2 <- glm(data = trainpercents, relevance~perc_desc+perc_title+perc_bullet+perc_brand)
summary(glm2)
lm2 <- lm(data = trainpercents, relevance~perc_desc+perc_title+perc_bullet+perc_brand)
summary(lm2)
#normal LM?
#cat("A simple linear model on number of words and number of words that match\n")
#lm_model <- lm(relevance~nmatch_bullets+nmatch_title+nmatch_desc+nwords,data=train)
#summary(lm_model)
#test_relevance <- predict(glm_model,test)
#test_relevance <- ifelse(test_relevance>3,3,test_relevance)
#test_relevance <- ifelse(test_relevance<1,1,test_relevance)
#test$test_relevance <- test_relevance
#submission_bullets001 <- data.frame(id=test$id,relevance=test$test_relevance)
#write_csv(submission_bullets001,"../output/submission_bullets001.csv")
#print(Sys.time()-t)
#head(test)
write_csv(train, "../traintest/HD1_baseline_trainWithBulletsAndDescCounts.csv")
write_csv(test, "../traintest/HD1_baseline_testWithBulletsAndDescCounts.csv")
| /007-fuzzy agrep matching and bullets.R | no_license | chuhoting/KDS_HomeDepot | R | false | false | 7,604 | r |
#007 fuzzy agrep matching with the bullet points as well
#this is all just bullets now, need to add the fuzzy agrep matching!
setwd("C:/Users/tjvan/Documents/Kaggle/HomeDepot_early_2016/homedepotgit")
#setwd("~/Analytics/Kaggle/HomeDepot/homedepotgit")
library(readr)
library(dplyr)
cat("Reading data\n")
train <- read_csv('../input/train.csv')
test <- read_csv('../input/test.csv')
desc <- read_csv('../input/product_descriptions.csv')
att <- read_csv('../input/attributes.csv')
brandnames <- filter(att, name == "MFG Brand Name")
colnames(brandnames) <- c("product_uid", "name", "brandname")
att <- mutate(att, "IsBullet"=grepl("Bullet", name))
att <- filter(att, IsBullet == TRUE)
att.bullets <- att %>% group_by(product_uid) %>% summarise(bulletvalues=paste(value, collapse=" "))
rm(att)
cat("Merge description with train and test data \n")
train <- merge(train,desc, by.x = "product_uid", by.y = "product_uid", all.x = TRUE, all.y = FALSE)
test <- merge(test,desc, by.x = "product_uid", by.y = "product_uid", all.x = TRUE, all.y = FALSE)
cat("Merge bullet points with train and test data \n")
train <- merge(train, att.bullets, by.x = "product_uid", by.y = "product_uid", all.x = TRUE, all.y = FALSE)
test <- merge(test, att.bullets, by.x = "product_uid", by.y = "product_uid", all.x = TRUE, all.y = FALSE)
cat("Merge the brand name with train and test data \n")
train <- merge(train,brandnames, by.x = "product_uid", by.y = "product_uid", all.x = TRUE, all.y = FALSE); train$name = NULL
test <- merge(test,brandnames, by.x = "product_uid", by.y = "product_uid", all.x = TRUE, all.y = FALSE); test$name = NULL
#put back in order (just in case)
train <- arrange(train, id)
test <- arrange(test, id)
t <- Sys.time()
word_match <- function(words,title,desc,bullets,brand){
n_title <- 0
n_desc <- 0
n_bullets <- 0
n_brand <- 0
count_desc <- 0
count_bullet <- 0
an_title <- 0
an_desc <- 0
words <- gsub("[ ]{2,}", " ", words) #this replaces 2 or more spaces with a single space
#print(words)
words <- unlist(strsplit(words," "))
nwords <- length(words)
for(i in 1:length(words)){
pattern <- paste("(^| )",words[i],"($| )",sep="")
n_title <- n_title + grepl(pattern,title,perl=TRUE,ignore.case=TRUE)
n_desc <- n_desc + grepl(pattern,desc,perl=TRUE,ignore.case=TRUE)
n_bullets <- n_bullets + grepl(pattern,bullets,perl=TRUE,ignore.case=TRUE)
n_brand <- n_brand + grepl(pattern,brand,perl=TRUE,ignore.case=TRUE)
an_title <- an_title + agrepl(words[i],title,ignore.case=TRUE,max.distance = 0.13) #change
an_desc <- an_desc + agrepl(words[i],desc,ignore.case=TRUE,max.distance = 0.13) #change
#frequency of exact word appearing in description
print(pattern)
foundInDesc <- gregexpr(pattern, desc, perl=TRUE, ignore.case=TRUE)[[1]][[1]]
print("printing found in desc")
print(foundInDesc)
if(foundInDesc < 0 | is.na(foundInDesc)) {
count_desc <- count_desc + 0 #this is just a placeholder for the else condition
} else {
count_desc <- count_desc + length(gregexpr(pattern, desc, perl=TRUE, ignore.case = TRUE)[[1]])
}
#frequency of exact word appearing in bullets below description
foundInBullets <- gregexpr(pattern, bullets, perl=TRUE, ignore.case=TRUE)[[1]][[1]]
if(foundInBullets < 0 | is.na(foundInBullets)) {
count_bullet <- count_bullet + 0 #this is just a placeholder for the else condition
} else {
count_bullet <- count_bullet + length(gregexpr(pattern, bullets, perl=TRUE, ignore.case = TRUE)[[1]])
}
}
return(c(n_title,nwords,n_desc,n_bullets,n_brand,an_title,an_desc,count_desc,count_bullet))
}
#put back in order (just in case)
train <- arrange(train, id)
test <- arrange(test, id)
cat("Get number of words and word matching title in train\n")
train_words <- as.data.frame(t(mapply(word_match,train$search_term,train$product_title,train$product_description,train$bulletvalues,train$brandname)))
train$nmatch_title <- train_words[,1]
train$nwords <- train_words[,2]
train$nmatch_desc <- train_words[,3]
train$nmatch_bullets <- train_words[,4]
train$nmatch_brand <- train_words[,5]
#add fuzzy's here
train$fuzzy_match_title <- train_words[,6]
train$fuzzy_match_desc <- train_words[,7]
train$countmatch_desc <- train_words[,8]
train$countmatch_bullet <- train_words[,9]
train <- arrange(train, id)
plot(train$nmatch_brand, train$relevance)
max(train$countmatch_desc)
max(train$nmatch_desc)
plot(train$countmatch_desc, train$relevance)
plot(train$countmatch_bullet, train$relevance)
hist(train$countmatch_desc)
hist(train$relevance)
#look at the super high relevancy records with low match results:
#first set up the differences
#THIS led me to want to do a fuzzy match
trainpercents <- mutate(train, "perc_title"=(nmatch_title / nwords) * 100,
"perc_desc"=(nmatch_desc / nwords) * 100,
"perc_bullet"=(nmatch_bullets / nwords) * 100,
"perc_brand"=(nmatch_brand / nwords) * 100)
train.highrel <- filter(train.percents, relevance == 3, perc_title == 0, perc_desc == 0,
perc_bullet == 0, perc_brand == 0)
summary(train$countmatch_desc)
summary(train$countmatch_bullet)
#are all description and bullet values the same?
#train.question <- mutate(train, "bulletdescdiff"=nmatch_desc - nmatch_bullets)
#train.question <- filter(train.question, bulletdescdiff > 0 | bulletdescdiff < 0)
#train.question <- mutate(train.question, "diffsqrd"=bulletdescdiff^2)
#train.question <- arrange(train.question, desc(diffsqrd)) #biggest difference in desc / bullets
cat("Get number of words and word matching title in test\n")
test_words <- as.data.frame(t(mapply(word_match,test$search_term,test$product_title,test$product_description,test$bulletvalues,test$brandname)))
test$nmatch_title <- test_words[,1]
test$nwords <- test_words[,2]
test$nmatch_desc <- test_words[,3]
test$nmatch_bullets <- test_words[,4]
test$nmatch_brand <- test_words[,5]
test$fuzzy_match_title <- test_words[,6]
test$fuzzy_match_desc <- test_words[,7]
test$countmatch_desc <- test_words[,8]
test$countmatch_bullet <- test_words[,9]
test <- arrange(test, id)
rm(train_words,test_words)
#max(test$count_desc)
cat("A simple linear model on number of words and number of words that match\n")
glm_model <- glm(relevance~nmatch_bullets+nmatch_title+nmatch_desc+nwords+countmatch_desc+nmatch_brand+countmatch_bullet,data=train)
summary(glm_model)
glm2 <- glm(data = trainpercents, relevance~perc_desc+perc_title+perc_bullet+perc_brand)
summary(glm2)
lm2 <- lm(data = trainpercents, relevance~perc_desc+perc_title+perc_bullet+perc_brand)
summary(lm2)
#normal LM?
#cat("A simple linear model on number of words and number of words that match\n")
#lm_model <- lm(relevance~nmatch_bullets+nmatch_title+nmatch_desc+nwords,data=train)
#summary(lm_model)
#test_relevance <- predict(glm_model,test)
#test_relevance <- ifelse(test_relevance>3,3,test_relevance)
#test_relevance <- ifelse(test_relevance<1,1,test_relevance)
#test$test_relevance <- test_relevance
#submission_bullets001 <- data.frame(id=test$id,relevance=test$test_relevance)
#write_csv(submission_bullets001,"../output/submission_bullets001.csv")
#print(Sys.time()-t)
#head(test)
write_csv(train, "../traintest/HD1_baseline_trainWithBulletsAndDescCounts.csv")
write_csv(test, "../traintest/HD1_baseline_testWithBulletsAndDescCounts.csv")
|
source( "masternegloglikeeps1.R" )
source("eudicottree.R" )
library( "expm" )
source( "Qmatrixwoodherb2.R" )
source("Pruning2.R")
bichrom.dataset<-read.table( "eudicotvals.txt",header=FALSE,sep=",",stringsAsFactors=FALSE)
last.state=50
uniform.samples<-read.csv("sample328.csv",header=FALSE)
a<- as.numeric(t(uniform.samples))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,9)
mle<-try(optim(par=a,fn=negloglikelihood.wh, method= "Nelder-Mead", bichrom.phy=angiosperm.tree, bichrom.data=bichrom.dataset,max.chromosome=last.state,pi.0=p.0),silent=TRUE)
print(mle)
if(class(mle)=="try-error"){results<-rep(NA,9)}else{
results[1:10]<-exp(mle$par)
results[11]<-mle$value}
write.table(results,file="results328.csv",sep=",")
| /Full model optimizations/explorelikeuni328.R | no_license | roszenil/Bichromdryad | R | false | false | 745 | r | source( "masternegloglikeeps1.R" )
source("eudicottree.R" )
library( "expm" )
source( "Qmatrixwoodherb2.R" )
source("Pruning2.R")
bichrom.dataset<-read.table( "eudicotvals.txt",header=FALSE,sep=",",stringsAsFactors=FALSE)
last.state=50
uniform.samples<-read.csv("sample328.csv",header=FALSE)
a<- as.numeric(t(uniform.samples))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,9)
mle<-try(optim(par=a,fn=negloglikelihood.wh, method= "Nelder-Mead", bichrom.phy=angiosperm.tree, bichrom.data=bichrom.dataset,max.chromosome=last.state,pi.0=p.0),silent=TRUE)
print(mle)
if(class(mle)=="try-error"){results<-rep(NA,9)}else{
results[1:10]<-exp(mle$par)
results[11]<-mle$value}
write.table(results,file="results328.csv",sep=",")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vnsgm.R
\name{vnsgm.ordered}
\alias{vnsgm.ordered}
\title{Vertex Nomination via Seeded Graph Matching}
\usage{
vnsgm.ordered(x, s, g1, g2, h, ell, R, gamma, maxiter = 20, pad = 0,
verbose = FALSE, plotF = FALSE)
}
\arguments{
\item{x}{vector of indices for vertices of interest (voi) in \eqn{G_1}}
\item{s}{the number of seeds.}
\item{g1}{\eqn{G_1} in \code{igraph} object where voi is known}
\item{g2}{\eqn{G_2} in \code{igraph}}
\item{h}{\eqn{h}-hop for distance from voi to other vertices to create
\eqn{h}-hop induced subgraph of \eqn{G_1}}
\item{ell}{\eqn{ell}-hop for distance from seeds to other vertices
to create \eqn{ell}-hop induced subgraph of \eqn{G_1}}
\item{R}{number of restarts for \code{multiStart}}
\item{gamma}{to be used with \code{multiStart}, max tolerance
for alpha, how far away from the barycenter user is willing to go for
the initialization of \code{sgm} on any given iteration}
\item{maxiter}{the number of maxiters for the Frank-Wolfe algorithm.}
\item{pad}{a scalar value for padding for sgm (cases where two graphs
have different number of vertices); defaulted to 0}
\item{verbose}{logical verbose outputs}
\item{plotF}{boolean to plot the probability matrix}
}
\value{
\code{VOI} Vertex of Interest
\code{seeds} \code{s} seeds
\code{Sx1} \eqn{S_x} the seeds within an h-path, i.e. in the
h-neighborhood, of VOI x in \eqn{G_1}
\code{Sx2} \eqn{S'_x} the corresponding seeds of \eqn{S_x} in \eqn{G_2}
\code{candidates_for_matching} labels for the candidates for
matching VOIs in \eqn{G_2}
\code{G1_vertices} vertices within ell-neighborhood of S_x in \eqn{G_1}
(vertices used in vertex nomination for VOIs)
\code{G2_vertices} vertices within ell-neighborhood of S'_x in \eqn{G_2}
(including candidates x' for matching VOIs x in \eqn{G_1})
\code{P} matrix \eqn{P(i,j)} is the proportion of times that vertex \eqn{j} in
the induced subgraph of \eqn{G_2} was mapped to vertex \eqn{i} in the induced subgraph of \eqn{G_1}.
Then the \eqn{i-th} and \eqn{j-th} elements of the labels vector tells you which vertices these actually were
in \eqn{G_1} and \eqn{G_2}, respectively.
}
\description{
Finds the seeds in an \eqn{h}-hop induced nbd of \eqn{G_1} around the
VOI, x,that is, finds induced subgraph generated by vertices that are
within a path of length \eqn{h} the VOI, and then finds an \eqn{ell}-hop
induced nbd of \eqn{G_1} around the seeds within the \eqn{h}-hop nbd of
x, and an \eqn{ell}-hop induced nbd of \eqn{G_2} around the corresponding
seeds. Then, matches these induced subgraphs via \code{multiStart}.
Assume first \eqn{s} vertices in two graphs are matched seeds.
}
\references{
Patsolic, Heather G.; Park, Youngser; Lyzinski, Vince; Priebe, Carey E. (2017).
Vertex Nomination Via Local Neighborhood Matching
Online: \url{https://arxiv.org/abs/1705.00674}
Fishkind, D. E., Adali, S., Priebe, C. E. (2012). Seeded Graph Matching
Online: \url{http://arxiv.org/abs/1209.0367}
}
\author{
Youngser Park <youngser@jhu.edu>, Kemeng Zhang <kzhang@jhu.edu>
}
| /man/vnsgm.ordered.Rd | no_license | dPys/graphstats | R | false | true | 3,093 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vnsgm.R
\name{vnsgm.ordered}
\alias{vnsgm.ordered}
\title{Vertex Nomination via Seeded Graph Matching}
\usage{
vnsgm.ordered(x, s, g1, g2, h, ell, R, gamma, maxiter = 20, pad = 0,
verbose = FALSE, plotF = FALSE)
}
\arguments{
\item{x}{vector of indices for vertices of interest (voi) in \eqn{G_1}}
\item{s}{the number of seeds.}
\item{g1}{\eqn{G_1} in \code{igraph} object where voi is known}
\item{g2}{\eqn{G_2} in \code{igraph}}
\item{h}{\eqn{h}-hop for distance from voi to other vertices to create
\eqn{h}-hop induced subgraph of \eqn{G_1}}
\item{ell}{\eqn{ell}-hop for distance from seeds to other vertices
to create \eqn{ell}-hop induced subgraph of \eqn{G_1}}
\item{R}{number of restarts for \code{multiStart}}
\item{gamma}{to be used with \code{multiStart}, max tolerance
for alpha, how far away from the barycenter user is willing to go for
the initialization of \code{sgm} on any given iteration}
\item{maxiter}{the number of maxiters for the Frank-Wolfe algorithm.}
\item{pad}{a scalar value for padding for sgm (cases where two graphs
have different number of vertices); defaulted to 0}
\item{verbose}{logical verbose outputs}
\item{plotF}{boolean to plot the probability matrix}
}
\value{
\code{VOI} Vertex of Interest
\code{seeds} \code{s} seeds
\code{Sx1} \eqn{S_x} the seeds within an h-path, i.e. in the
h-neighborhood, of VOI x in \eqn{G_1}
\code{Sx2} \eqn{S'_x} the corresponding seeds of \eqn{S_x} in \eqn{G_2}
\code{candidates_for_matching} labels for the candidates for
matching VOIs in \eqn{G_2}
\code{G1_vertices} vertices within ell-neighborhood of S_x in \eqn{G_1}
(vertices used in vertex nomination for VOIs)
\code{G2_vertices} vertices within ell-neighborhood of S'_x in \eqn{G_2}
(including candidates x' for matching VOIs x in \eqn{G_1})
\code{P} matrix \eqn{P(i,j)} is the proportion of times that vertex \eqn{j} in
the induced subgraph of \eqn{G_2} was mapped to vertex \eqn{i} in the induced subgraph of \eqn{G_1}.
Then the \eqn{i-th} and \eqn{j-th} elements of the labels vector tells you which vertices these actually were
in \eqn{G_1} and \eqn{G_2}, respectively.
}
\description{
Finds the seeds in an \eqn{h}-hop induced nbd of \eqn{G_1} around the
VOI, x,that is, finds induced subgraph generated by vertices that are
within a path of length \eqn{h} the VOI, and then finds an \eqn{ell}-hop
induced nbd of \eqn{G_1} around the seeds within the \eqn{h}-hop nbd of
x, and an \eqn{ell}-hop induced nbd of \eqn{G_2} around the corresponding
seeds. Then, matches these induced subgraphs via \code{multiStart}.
Assume first \eqn{s} vertices in two graphs are matched seeds.
}
\references{
Patsolic, Heather G.; Park, Youngser; Lyzinski, Vince; Priebe, Carey E. (2017).
Vertex Nomination Via Local Neighborhood Matching
Online: \url{https://arxiv.org/abs/1705.00674}
Fishkind, D. E., Adali, S., Priebe, C. E. (2012). Seeded Graph Matching
Online: \url{http://arxiv.org/abs/1209.0367}
}
\author{
Youngser Park <youngser@jhu.edu>, Kemeng Zhang <kzhang@jhu.edu>
}
|
knitr::opts_chunk$set(echo = TRUE)
################################
# Create edx set, validation set
################################
# Note: this process could take a couple of minutes
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId],
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1, sample.kind="Rounding")
# if using R 3.5 or earlier, use `set.seed(1)` instead
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
library(tidyverse)
library(caret)
library(data.table)
if(!require(lubridate)) install.packages("lubridate",
repos = "http://cran.us.r-project.org")
library(lubridate)
head(edx)
nrow(edx)
ncol(edx)
names(edx)
summary(edx) # Gives general statistical summary of the data
# We will omit any rows with na as inputs
na.omit(edx)
na.omit(validation)
# Let's clean the data a little bit. We will change the column timestamp to dates using the lubridate package, to see the exact date when the movies were rated
library(lubridate)
edx <- edx %>% mutate(dates = as_datetime(timestamp)) %>% select(-timestamp)
validation <- validation %>% mutate(date = as_datetime(timestamp)) %>% select(-timestamp)
head(validation)
# Create a new column, years
edx <- edx %>% mutate(years = as.numeric(str_sub(title,-5,-2)))
# extract years the movie came out and create a new column called years
results <- tibble()
#create a tibble which will organize all the RMSE's for different models
if(!require(tidyverse)) install.packages("ggplot2", repos = "http://cran.us.r-project.org")
library(ggplot2)
# Let's take a closer look at the edx data and see if there is any bias within the data
edx %>%
dplyr::count(movieId) %>%
ggplot(aes(n)) +
geom_histogram(bins = 50, color = "red") +
ggtitle("Movies")
# We can see that some movies are rated more than others
edx %>%
dplyr::count(userId) %>%
ggplot(aes(n)) +
geom_histogram(bins = 50, color = "blue") +
ggtitle("Users")
# Some users rate the movies more often than others
# Let's also take a look at the frequency/distribution of the ratings
ratings <- as.vector(edx$rating)
ratings <- factor(ratings)
qplot(ratings) +
ggtitle("Ratings Frequencies")
# There is a high tendency that people often give out 3,4 as ratings
# We can also analyze the relationship between the variable time and other predictors
# We can take a look at the relationship between years the movies were released and the mean ratings of each year
edx %>% group_by(years) %>%
summarize(mean_rating = mean(rating)) %>%
ggplot(aes(years, mean_rating)) +
geom_smooth()
# Used Loess method to smooth by default
# We can see a generally see the that the average ratings for more recent movies are lower and movies that were in the mid-early 1900's have higher mean ratings
# Create a functon called 'RMSE'
RMSE <- function(actual, predicted){
sqrt(mean((actual-predicted)^2))
}
# Clean the data for the validation set
validation <- validation %>% mutate(years = as.numeric(str_sub(title,-5,-2)))
# Splitting edx into test and training set
set.seed(1996)
test_index2 <- createDataPartition(edx$rating, times = 1, p = 0.1, list = FALSE)
temporary_test <- edx %>% slice(test_index2)
train <- edx %>% slice(-test_index2)
# Making sure testset and train set have same movieIds and userIds
test <- temporary_test %>% semi_join(train, by = "movieId") %>%
semi_join(train, by = "userId")
# Putting the removed rows back into the training set
removed <- anti_join(temporary_test, test)
train <- rbind(train, removed)
test_ratings <- test$rating # ratings in test set
set.seed(2020)
random <- sample(c(0, 0.5, 1, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0), length(test_ratings), replace = TRUE)
RMSE(test_ratings,random)
# RMSE turns out 2.156021 which is terrible considering that the prediction could differ by up to two stars!
# Will test it on the validation set
random <- sample(c(0, 0.5, 1, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0), length(validation$rating), replace = TRUE)
rmse_random <- RMSE(validation$rating, random)
results <- bind_rows(results, data_frame(method="Random Model", RMSE = rmse_random))
mu <- mean(train$rating)
RMSE(test_ratings, mu)
# RMSE has now decreased to 1.59691 which is much better, but more improvement could be made to this model
# Let's test it on our validation set to obtain our RMSE
mean_rmse <- RMSE(validation$rating, mu)
results <- bind_rows(results, data_frame(method= "Mean Model", RMSE = mean_rmse))
permovie_averages1 <- train %>% group_by(movieId) %>%
summarize(bi = mean(rating - mu))
model1_prediction <- test %>% left_join(permovie_averages1, by='movieId') %>%
mutate(prediction = mu + bi)
RMSE(test_ratings, model1_prediction$prediction)
# This was the RMSE for the test set, we will now obtain RMSE for the validation set
model1_prediction_valid <- validation %>% left_join(permovie_averages1, by='movieId') %>%
mutate(prediction = mu + bi)
model1_rmse <- RMSE(validation$rating, model1_prediction_valid$prediction)
results <- bind_rows(results, data_frame(method= "Movie Specific Effect Model", RMSE = model1_rmse))
permovie_averages2 <- train %>%
left_join(permovie_averages1, by='movieId') %>%
group_by(userId) %>%
summarize(bu = mean(rating - mu - bi))
model2_prediction <- test %>% left_join(permovie_averages2, 'userId') %>%
left_join(permovie_averages1, by='movieId') %>%
mutate(prediction = mu + bi + bu)
RMSE(test_ratings, model2_prediction$prediction)
# We did a better job at estimating the rating by incorporating the user specific effect to our model
# Let's give it a try on our validation set
model2_prediction_valid <- validation %>% left_join(permovie_averages1, by='movieId') %>%
left_join(permovie_averages2, by = 'userId') %>%
mutate(prediction = mu + bi + bu)
model2_rmse <- RMSE(validation$rating, model2_prediction_valid$prediction)
results <- bind_rows(results, data_frame(method= "Movie + userId Specific Effect Model", RMSE = model2_rmse))
permovie_averages5 <- train %>%
left_join(permovie_averages1, by='movieId') %>%
left_join(permovie_averages2, by = 'userId') %>%
group_by(years) %>%
summarize(by = mean(rating - mu - bi - bu))
model5_prediction <- test %>% left_join(permovie_averages2, 'userId') %>%
left_join(permovie_averages1, 'movieId') %>%
left_join(permovie_averages5, 'years') %>%
mutate(prediction = mu + bi + bu + by)
RMSE(test_ratings, model5_prediction$prediction)
# By incorporating the year effect we were able to make an improvement on our model
# Let's show that using our validation set
model5_prediction_valid <- validation %>% left_join(permovie_averages1, by='movieId') %>%
left_join(permovie_averages2, by = 'userId') %>%
left_join(permovie_averages5, 'years') %>%
mutate(prediction = mu + bi + bu + by)
model5_rmse <- RMSE(validation$rating, model5_prediction_valid$prediction)
results <- bind_rows(results, data_frame(method= "Movie + userId + year Specific Effect Model", RMSE = model5_rmse))
lambdas <- seq(0,7,0.25)
# Here the lambdas are tuning parameters and we will find the best lambda through cross validation method
# For each lambda, bi & bu is calculated and ratings are predicted & tested against the testset
# Cross validation code requires some time to run
list_RMSE <- function(lambda){
mu <- mean(train$rating)
permovie_averages3 <- train %>%
group_by(movieId) %>%
summarize(bi = sum(rating - mu)/(n() + lambda)) # movie specific effect regularized
permovie_averages4 <- train %>%
left_join(permovie_averages3, by='movieId') %>%
group_by(userId) %>%
summarize(bu = sum(rating - mu - bi)/(n() + lambda)) # userId specific effect regualarized
permovie_averages6 <- train %>%
left_join(permovie_averages3, by='movieId') %>%
left_join(permovie_averages4, by ='userId') %>%
group_by(years) %>%
summarize(by = sum(rating - mu - bi - bu)/(n() + lambda)) # year specific effect regualarized
# predict
test_prediction <- test %>% left_join(permovie_averages3, by = 'movieId') %>%
left_join(permovie_averages4, by ='userId') %>%
left_join(permovie_averages6, by ='years') %>%
mutate(prediction = mu + bi + bu + by)
RMSE(test_ratings, test_prediction$prediction)
}
RMSEs <- sapply(lambdas, list_RMSE)
plot(lambdas, RMSEs)
lambdas[which.min(RMSEs)]
# Lambda which minimized RMSE the most (optimal RMSE) against the test data was 4.5
# Now that we have our model lambda, we will test it out on our validation set
mu <- mean(train$rating)
permovie_averages3 <- train %>%
group_by(movieId) %>%
summarize(bi = sum(rating - mu)/(n() + 4.5)) # movie specific effect regularized
permovie_averages4 <- train %>%
left_join(permovie_averages3, by='movieId') %>%
group_by(userId) %>%
summarize(bu = sum(rating - mu - bi)/(n() + 4.5)) # userId specific effect regualarized
permovie_averages6 <- train %>%
left_join(permovie_averages3, by='movieId') %>%
left_join(permovie_averages4, by ='userId') %>%
group_by(years) %>%
summarize(by = sum(rating - mu - bi - bu)/(n() + 4.5)) # year specific effect regualarized
validation_prediction <- validation %>% left_join(permovie_averages3, by = 'movieId') %>%
left_join(permovie_averages4, by ='userId') %>%
left_join(permovie_averages6, by ='years') %>%
mutate(prediction = mu + bi + bu + by)
final <- RMSE(validation$rating, validation_prediction$prediction)
results <- bind_rows(results, data_frame(method= "Regularization on Movie + userId Specific Effect Model", RMSE = final))
results %>% knitr::kable() #final results | /MovieLens.R | no_license | swchoi834/homework-0 | R | false | false | 11,078 | r | knitr::opts_chunk$set(echo = TRUE)
################################
# Create edx set, validation set
################################
# Note: this process could take a couple of minutes
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId],
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1, sample.kind="Rounding")
# if using R 3.5 or earlier, use `set.seed(1)` instead
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
library(tidyverse)
library(caret)
library(data.table)
if(!require(lubridate)) install.packages("lubridate",
repos = "http://cran.us.r-project.org")
library(lubridate)
head(edx)
nrow(edx)
ncol(edx)
names(edx)
summary(edx) # Gives general statistical summary of the data
# We will omit any rows with na as inputs
na.omit(edx)
na.omit(validation)
# Let's clean the data a little bit. We will change the column timestamp to dates using the lubridate package, to see the exact date when the movies were rated
library(lubridate)
edx <- edx %>% mutate(dates = as_datetime(timestamp)) %>% select(-timestamp)
validation <- validation %>% mutate(date = as_datetime(timestamp)) %>% select(-timestamp)
head(validation)
# Create a new column, years
edx <- edx %>% mutate(years = as.numeric(str_sub(title,-5,-2)))
# extract years the movie came out and create a new column called years
results <- tibble()
#create a tibble which will organize all the RMSE's for different models
if(!require(tidyverse)) install.packages("ggplot2", repos = "http://cran.us.r-project.org")
library(ggplot2)
# Let's take a closer look at the edx data and see if there is any bias within the data
edx %>%
dplyr::count(movieId) %>%
ggplot(aes(n)) +
geom_histogram(bins = 50, color = "red") +
ggtitle("Movies")
# We can see that some movies are rated more than others
edx %>%
dplyr::count(userId) %>%
ggplot(aes(n)) +
geom_histogram(bins = 50, color = "blue") +
ggtitle("Users")
# Some users rate the movies more often than others
# Let's also take a look at the frequency/distribution of the ratings
ratings <- as.vector(edx$rating)
ratings <- factor(ratings)
qplot(ratings) +
ggtitle("Ratings Frequencies")
# There is a high tendency that people often give out 3,4 as ratings
# We can also analyze the relationship between the variable time and other predictors
# We can take a look at the relationship between years the movies were released and the mean ratings of each year
edx %>% group_by(years) %>%
summarize(mean_rating = mean(rating)) %>%
ggplot(aes(years, mean_rating)) +
geom_smooth()
# Used Loess method to smooth by default
# We can see a generally see the that the average ratings for more recent movies are lower and movies that were in the mid-early 1900's have higher mean ratings
# Create a functon called 'RMSE'
RMSE <- function(actual, predicted){
sqrt(mean((actual-predicted)^2))
}
# Clean the data for the validation set
validation <- validation %>% mutate(years = as.numeric(str_sub(title,-5,-2)))
# Splitting edx into test and training set
set.seed(1996)
test_index2 <- createDataPartition(edx$rating, times = 1, p = 0.1, list = FALSE)
temporary_test <- edx %>% slice(test_index2)
train <- edx %>% slice(-test_index2)
# Making sure testset and train set have same movieIds and userIds
test <- temporary_test %>% semi_join(train, by = "movieId") %>%
semi_join(train, by = "userId")
# Putting the removed rows back into the training set
removed <- anti_join(temporary_test, test)
train <- rbind(train, removed)
test_ratings <- test$rating # ratings in test set
set.seed(2020)
random <- sample(c(0, 0.5, 1, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0), length(test_ratings), replace = TRUE)
RMSE(test_ratings,random)
# RMSE turns out 2.156021 which is terrible considering that the prediction could differ by up to two stars!
# Will test it on the validation set
random <- sample(c(0, 0.5, 1, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0), length(validation$rating), replace = TRUE)
rmse_random <- RMSE(validation$rating, random)
results <- bind_rows(results, data_frame(method="Random Model", RMSE = rmse_random))
mu <- mean(train$rating)
RMSE(test_ratings, mu)
# RMSE has now decreased to 1.59691 which is much better, but more improvement could be made to this model
# Let's test it on our validation set to obtain our RMSE
mean_rmse <- RMSE(validation$rating, mu)
results <- bind_rows(results, data_frame(method= "Mean Model", RMSE = mean_rmse))
permovie_averages1 <- train %>% group_by(movieId) %>%
summarize(bi = mean(rating - mu))
model1_prediction <- test %>% left_join(permovie_averages1, by='movieId') %>%
mutate(prediction = mu + bi)
RMSE(test_ratings, model1_prediction$prediction)
# This was the RMSE for the test set, we will now obtain RMSE for the validation set
model1_prediction_valid <- validation %>% left_join(permovie_averages1, by='movieId') %>%
mutate(prediction = mu + bi)
model1_rmse <- RMSE(validation$rating, model1_prediction_valid$prediction)
results <- bind_rows(results, data_frame(method= "Movie Specific Effect Model", RMSE = model1_rmse))
permovie_averages2 <- train %>%
left_join(permovie_averages1, by='movieId') %>%
group_by(userId) %>%
summarize(bu = mean(rating - mu - bi))
model2_prediction <- test %>% left_join(permovie_averages2, 'userId') %>%
left_join(permovie_averages1, by='movieId') %>%
mutate(prediction = mu + bi + bu)
RMSE(test_ratings, model2_prediction$prediction)
# We did a better job at estimating the rating by incorporating the user specific effect to our model
# Let's give it a try on our validation set
model2_prediction_valid <- validation %>% left_join(permovie_averages1, by='movieId') %>%
left_join(permovie_averages2, by = 'userId') %>%
mutate(prediction = mu + bi + bu)
model2_rmse <- RMSE(validation$rating, model2_prediction_valid$prediction)
results <- bind_rows(results, data_frame(method= "Movie + userId Specific Effect Model", RMSE = model2_rmse))
permovie_averages5 <- train %>%
left_join(permovie_averages1, by='movieId') %>%
left_join(permovie_averages2, by = 'userId') %>%
group_by(years) %>%
summarize(by = mean(rating - mu - bi - bu))
model5_prediction <- test %>% left_join(permovie_averages2, 'userId') %>%
left_join(permovie_averages1, 'movieId') %>%
left_join(permovie_averages5, 'years') %>%
mutate(prediction = mu + bi + bu + by)
RMSE(test_ratings, model5_prediction$prediction)
# By incorporating the year effect we were able to make an improvement on our model
# Let's show that using our validation set
model5_prediction_valid <- validation %>% left_join(permovie_averages1, by='movieId') %>%
left_join(permovie_averages2, by = 'userId') %>%
left_join(permovie_averages5, 'years') %>%
mutate(prediction = mu + bi + bu + by)
model5_rmse <- RMSE(validation$rating, model5_prediction_valid$prediction)
results <- bind_rows(results, data_frame(method= "Movie + userId + year Specific Effect Model", RMSE = model5_rmse))
lambdas <- seq(0,7,0.25)
# Here the lambdas are tuning parameters and we will find the best lambda through cross validation method
# For each lambda, bi & bu is calculated and ratings are predicted & tested against the testset
# Cross validation code requires some time to run
list_RMSE <- function(lambda){
mu <- mean(train$rating)
permovie_averages3 <- train %>%
group_by(movieId) %>%
summarize(bi = sum(rating - mu)/(n() + lambda)) # movie specific effect regularized
permovie_averages4 <- train %>%
left_join(permovie_averages3, by='movieId') %>%
group_by(userId) %>%
summarize(bu = sum(rating - mu - bi)/(n() + lambda)) # userId specific effect regualarized
permovie_averages6 <- train %>%
left_join(permovie_averages3, by='movieId') %>%
left_join(permovie_averages4, by ='userId') %>%
group_by(years) %>%
summarize(by = sum(rating - mu - bi - bu)/(n() + lambda)) # year specific effect regualarized
# predict
test_prediction <- test %>% left_join(permovie_averages3, by = 'movieId') %>%
left_join(permovie_averages4, by ='userId') %>%
left_join(permovie_averages6, by ='years') %>%
mutate(prediction = mu + bi + bu + by)
RMSE(test_ratings, test_prediction$prediction)
}
RMSEs <- sapply(lambdas, list_RMSE)
plot(lambdas, RMSEs)
lambdas[which.min(RMSEs)]
# Lambda which minimized RMSE the most (optimal RMSE) against the test data was 4.5
# Now that we have our model lambda, we will test it out on our validation set
mu <- mean(train$rating)
permovie_averages3 <- train %>%
group_by(movieId) %>%
summarize(bi = sum(rating - mu)/(n() + 4.5)) # movie specific effect regularized
permovie_averages4 <- train %>%
left_join(permovie_averages3, by='movieId') %>%
group_by(userId) %>%
summarize(bu = sum(rating - mu - bi)/(n() + 4.5)) # userId specific effect regualarized
permovie_averages6 <- train %>%
left_join(permovie_averages3, by='movieId') %>%
left_join(permovie_averages4, by ='userId') %>%
group_by(years) %>%
summarize(by = sum(rating - mu - bi - bu)/(n() + 4.5)) # year specific effect regualarized
validation_prediction <- validation %>% left_join(permovie_averages3, by = 'movieId') %>%
left_join(permovie_averages4, by ='userId') %>%
left_join(permovie_averages6, by ='years') %>%
mutate(prediction = mu + bi + bu + by)
final <- RMSE(validation$rating, validation_prediction$prediction)
results <- bind_rows(results, data_frame(method= "Regularization on Movie + userId Specific Effect Model", RMSE = final))
results %>% knitr::kable() #final results |
# this is used by the distanceFunction_processData function below
distanceFunction <-
function(event1,
points1,
event2,
points2,
dmax,
tmax,
var_count,
infra_count,
spatial_dist_fun=distHaversine,
spatialORsocioORinfra = 'all')
{
# event will have the socioeconomic variables only [population, event_startdate, event_enddate, category, var1, var2, var3 ... ]
# points will have lon lat in coordinates form
# equal weights will be assigned to each variable
# the socioeconomic variables should already be normalized, this function will only calculate the distance
# the population density should already be normalized
# print(weight_socioeconomic)
if (length(event1) != length(event2)){
stop("event vectors are not of the same length.")
}
d_spatial <- spatial_dist_fun(points1[1,], points2[1,])
d_spatial <- round(d_spatial/1000 , 2)
dspatialnormalized <- ifelse((d_spatial > dmax), 1, round(d_spatial/dmax,4))
# print(paste("dspatialnormalized",dspatialnormalized))
startDate_1 <- as.Date(event1[2], format="%Y-%m-%d")
startDate_2 <- as.Date(event2[2], format="%Y-%m-%d")
# print(paste("startDate_1:", startDate_1, "startDate_2:", startDate_2))
main <- ifelse(startDate_1 < startDate_2, 'event1',
ifelse(startDate_1 > startDate_2, 'event2',
ifelse(as.numeric(event1[1]) < as.numeric(event2[1]),'event2','event1')))
# print(paste("event1[1]",event1[1], "event2[1]", event2[1], ifelse(as.numeric(event1[1]) < as.numeric(event2[1]),'event2','event1')))
# print(main)
if (main == 'event1') {
mainEvent <- event1
secondEvent <- event2
} else {
secondEvent <- event1
mainEvent <- event2
}
# print(paste("mainEvent:",mainEvent[1], "secondEvent:", secondEvent[1]))
startDate_1 <- as.Date(mainEvent[2], format="%Y-%m-%d")
endDate_1 <- as.Date(mainEvent[3], format="%Y-%m-%d")
startDate_2 <- as.Date(secondEvent[2], format="%Y-%m-%d")
pop <- round(as.numeric(mainEvent[1])/(as.numeric(mainEvent[1]) + as.numeric(secondEvent[1])),4)
# temporal directional distance
dtemporal_directional <- as.vector(ifelse(startDate_1 == endDate_1, startDate_2 - endDate_1, (startDate_2 - endDate_1)/(endDate_1 - startDate_1)))
if (dtemporal_directional < 0) { #if less than 0 means overlap so make it 0
dtemporal_directional <- 0
} else {
dtemporal_directional <- dtemporal_directional
}
# normalized the above distance, if distance greater than threshold, then 1 (max)
dtemporalnormalized <- ifelse((dtemporal_directional > tmax), 1, round(dtemporal_directional/tmax,4))
# print(paste("dtemporalnormalized",dtemporalnormalized))
#print(paste("dtemporal_directional: ",dtemporal_directional," dspatialnormalized: ",dspatialnormalized))
#weights based on the population density of the mainEvent
wtemporal = pop # used to be popdensity
wspatial = 1- pop # used to be popdensity
# print(paste("wtemporal",wtemporal))
# print(paste("wspatial",wspatial))
d_spatiotemporal <- dtemporalnormalized * wtemporal + dspatialnormalized * wspatial
# print(d_spatiotemporal)
d_socioeconomic <- 0
# print(mainEvent[5:length(mainEvent)])
if (var_count > 0){
var1 <- mainEvent[5:(4+var_count)]
var2 <- secondEvent[5:(4+var_count)]
ln <- length(var1)
d_socioeconomic_vector <- NA
for(i in 1:ln){
d_socioeconomic_vector[i] <- abs(as.numeric(var1[i]) - as.numeric(var2[i]))
d_socioeconomic <- d_socioeconomic + ( d_socioeconomic_vector[i] * round(1/ln,4) )
}
}
d_infrastructure_proximity <- 0
d_infrastructure_density <- 0
d_infrastructure <- 0
# print(mainEvent[5:length(mainEvent)])
if (infra_count > 0){
infra1_proximity <- mainEvent[(5+var_count):(4+var_count+infra_count)]
infra2_proximity <- secondEvent[(5+var_count):(4+var_count+infra_count)]
infra1_density <- mainEvent[(5+var_count+infra_count):(4+var_count+infra_count+infra_count)]
infra2_density <- secondEvent[(5+var_count+infra_count):(4+var_count+infra_count+infra_count)]
ln <- length(infra1_proximity)
d_infrastructure_proximity_vector <- NA
d_infrastructure_density_vector <- NA
for(i in 1:ln){
d_infrastructure_proximity_vector[i] <- abs(as.numeric(infra1_proximity[i]) - as.numeric(infra2_proximity[i]))
d_infrastructure_density_vector[i] <- abs(as.numeric(infra1_density[i]) - as.numeric(infra2_density[i]))
d_infrastructure_proximity <- d_infrastructure_proximity + ( d_infrastructure_proximity_vector[i] * round(1/ln,4) )
d_infrastructure_density <- d_infrastructure_density + ( d_infrastructure_density_vector[i] * round(1/ln,4) )
}
d_infrastructure <- (0.5 * d_infrastructure_proximity) + (0.5 * d_infrastructure_density)
}
d_final <- NA
d_final["spatiotemporal"] <- d_spatiotemporal
d_final["socioeconomic"] <- d_socioeconomic
d_final["infrastructural"] <- d_infrastructure
d_final["spatial_km"] <- d_spatial
d_final["spatial_normalized"] <- dspatialnormalized
d_final["temporal_days"] <- dtemporal_directional
d_final["temporal_normalized"] <- dtemporalnormalized
d_final["wtemporal"] <- wtemporal
d_final["wspatial"] <- wspatial
return(d_final)
}
# provide the dataset to this part
distanceFunction_processData <-
function(
m3,
dmax = 10000.00,
tmax = 30,
var_count = 0,
infra_count = 0,
fun = distHaversine,
weight_spatiotemporal = 0.33,
weight_socioeconomic = 0.33,
spatialORsocioORinfra = 'all') {
if ((weight_spatiotemporal + weight_socioeconomic) > 1){
stop(paste("weights are not correct, weight_spatiotemporal: ", weight_spatiotemporal, ", weight_socioeconomic: ", weight_socioeconomic, ".", sep=""))
}
prepareArguments <- function(events, rowNum, var_count, infra_count) {
m <- ncol(events)
# cols_not_variables <- which(colnames(events)=="spatialres.y")
number_of_variables <- var_count
number_of_infrastructures <- infra_count
cols_not_variables <- m - (var_count + (2 * infra_count))
# print(paste("number_of_variables: ",number_of_variables))
event_length <- 4 + number_of_variables + (2 * number_of_infrastructures) # first 4 parameters are: population, event_startdate, event_enddate, category
event_arg <- NA
event_arg[1] <- events$p[rowNum]
event_arg[2] <- as.character(events$event_start[rowNum],format="%Y-%m-%d")
event_arg[3] <- as.character(events$event_end[rowNum],format="%Y-%m-%d")
event_arg[4] <- events$eventCategory[rowNum]
if (number_of_variables > 0 || number_of_infrastructures > 0) {
for (x in 1:number_of_variables){
ln_event <- length(event_arg)
event_arg[ln_event + 1] <- as.vector(events[rowNum, (cols_not_variables + x)])
}
for (y in 1:number_of_infrastructures){ # this is for the proximity type infrastructure
ln_event <- length(event_arg)
event_arg[ln_event + 1] <- as.vector(events[rowNum, (cols_not_variables + number_of_variables + y)])
}
for (z in 1:number_of_infrastructures){ # this is for the density type infrastructure
ln_event <- length(event_arg)
event_arg[ln_event + 1] <- as.vector(events[rowNum, (cols_not_variables + number_of_variables + number_of_infrastructures + z)])
}
}
return(event_arg)
}
# points <- .pointsToMatrix(m3)
events <- m3
n <- nrow(m3)
m <- ncol(m3)
distanceMatrix_spatial_km <- matrix(0, ncol=n, nrow=n)
distanceMatrix_temporal <- matrix(0, ncol=n, nrow=n)
distanceMatrix_spatial_weight <- matrix(0, ncol=n, nrow=n)
distanceMatrix_temporal_weight <- matrix(0, ncol=n, nrow=n)
distanceMatrix_spatiotemporal <- matrix(0, ncol=n, nrow=n)
distanceMatrix_socioeconomic <- matrix(0, ncol=n, nrow=n)
distanceMatrix_infrastructure <- matrix(0, ncol=n, nrow=n)
distanceMatrix_spatial_normalized <- matrix(0, ncol=n, nrow=n)
distanceMatrix_temporal_normalized <- matrix(0, ncol=n, nrow=n)
# distanceMatrix_final <- matrix(0, ncol=n, nrow=n)
for (i in 2:n) {
for (j in 1:(i-1)){
points1 <- as.matrix(m3[i,c("lon","lat")])
points2 <- as.matrix(m3[j,c("lon","lat")])
event1 <- prepareArguments(events, i)
event2 <- prepareArguments(events, j)
# print(paste("i,j",i,j))
distance_event1_event2 <- distanceFunction(event1, points1, event2, points2, dmax, tmax, var_count, infra_count)
distanceMatrix_spatiotemporal[i,j] <- distance_event1_event2["spatiotemporal"]
distanceMatrix_socioeconomic[i,j] <- distance_event1_event2["socioeconomic"]
distanceMatrix_infrastructure[i,j] <- distance_event1_event2["infrastructural"]
distanceMatrix_spatial_km[i,j] <- distance_event1_event2["spatial_km"]
distanceMatrix_spatial_normalized[i,j] <- distance_event1_event2["spatial_normalized"]
distanceMatrix_temporal[i,j] <- distance_event1_event2["temporal_days"]
distanceMatrix_temporal_normalized[i,j] <- distance_event1_event2["temporal_normalized"]
distanceMatrix_spatial_weight[i,j] <- distance_event1_event2["wspatial"]
distanceMatrix_temporal_weight[i,j] <- distance_event1_event2["wtemporal"]
# print(distance_event1_event2)
}
}
distanceMatrix_spatiotemporal <- distanceMatrix_spatiotemporal + t(distanceMatrix_spatiotemporal)
distanceMatrix_socioeconomic <- distanceMatrix_socioeconomic + t(distanceMatrix_socioeconomic)
distanceMatrix_infrastructure <- distanceMatrix_infrastructure + t(distanceMatrix_infrastructure)
distanceMatrix_spatial_km <- distanceMatrix_spatial_km + t(distanceMatrix_spatial_km)
distanceMatrix_spatial_normalized <- distanceMatrix_spatial_normalized + t(distanceMatrix_spatial_normalized)
distanceMatrix_temporal <- distanceMatrix_temporal + t(distanceMatrix_temporal)
distanceMatrix_temporal_normalized <- distanceMatrix_temporal_normalized + t(distanceMatrix_temporal_normalized)
distanceMatrix_spatial_weight <- distanceMatrix_spatial_weight + t(distanceMatrix_spatial_weight)
distanceMatrix_temporal_weight <- distanceMatrix_temporal_weight + t(distanceMatrix_temporal_weight)
write.csv(distanceMatrix_spatiotemporal, "distanceMatrix_spatiotemporal.csv")
write.csv(distanceMatrix_socioeconomic, "distanceMatrix_socioeconomic.csv")
write.csv(distanceMatrix_infrastructure, "distanceMatrix_infrastructure.csv")
write.csv(distanceMatrix_spatial_km, "distanceMatrix_spatial_km.csv")
write.csv(distanceMatrix_spatial_normalized, "distanceMatrix_spatial_normalized.csv")
write.csv(distanceMatrix_temporal, "distanceMatrix_temporal.csv")
write.csv(distanceMatrix_temporal_normalized, "distanceMatrix_temporal_normalized.csv")
write.csv(distanceMatrix_spatial_weight, "distanceMatrix_spatial_weight.csv")
write.csv(distanceMatrix_temporal_weight, "distanceMatrix_temporal_weight.csv")
# distanceMatrix_spatiotemporal_recent <<- distanceMatrix_spatiotemporal
# distanceMatrix_socioeconomic_recent <<- distanceMatrix_socioeconomic
# distanceMatrix_infrastructure_recent <<- distanceMatrix_infrastructure
# assign(paste("distanceMatrix_spatiotemporal",Sys.Date(),sep="_"), distanceMatrix_spatiotemporal_recent)
# assign(paste("distanceMatrix_socioeconomic",Sys.Date(),sep="_"), distanceMatrix_socioeconomic_recent)
# assign(paste("distanceMatrix_infrastructure",Sys.Date(),sep="_"), distanceMatrix_infrastructure_recent)
if(spatialORsocioORinfra=='spatiotemporal'){
distanceMatrix_final <- distanceMatrix_spatiotemporal
} else if(spatialORsocioORinfra=='socioeconomic'){
distanceMatrix_final <- distanceMatrix_socioeconomic
} else if(spatialORsocioORinfra == 'infrastructure'){
distanceMatrix_final <- distanceMatrix_infrastructure
} else if(spatialORsocioORinfra == 'spatiotemporal_infrastructure'){
distanceMatrix_final <- (distanceMatrix_spatiotemporal * weight_spatiotemporal) + (weight_infrastructure * distanceMatrix_infrastructure)
} else if(spatialORsocioORinfra == 'spatiotemporal_socioeconomic'){
distanceMatrix_final <- (distanceMatrix_spatiotemporal * weight_spatiotemporal) + (weight_socioeconomic * distanceMatrix_socioeconomic)
} else if(spatialORsocioORinfra == 'all'){
weight_infrastructure <- 1 - (weight_spatiotemporal + weight_socioeconomic)
# print(paste("weight_infrastructure",weight_infrastructure))
distanceMatrix_final <- (distanceMatrix_spatiotemporal * weight_spatiotemporal) + (weight_socioeconomic * distanceMatrix_socioeconomic) + (weight_infrastructure * distanceMatrix_infrastructure)
} else {
stop("weights and spatialORsocioORinfra do not make sense.")
}
# distanceMatrix_final <- distanceMatrix_final+t(distanceMatrix_final)
write.csv(distanceMatrix_final, "distanceMatrix_final.csv")
return(distanceMatrix_final)
}
| /documentation/scripts/distanceFunction.R | no_license | UNLSurge/distanceFunctionR | R | false | false | 12,980 | r | # this is used by the distanceFunction_processData function below
distanceFunction <-
function(event1,
points1,
event2,
points2,
dmax,
tmax,
var_count,
infra_count,
spatial_dist_fun=distHaversine,
spatialORsocioORinfra = 'all')
{
# event will have the socioeconomic variables only [population, event_startdate, event_enddate, category, var1, var2, var3 ... ]
# points will have lon lat in coordinates form
# equal weights will be assigned to each variable
# the socioeconomic variables should already be normalized, this function will only calculate the distance
# the population density should already be normalized
# print(weight_socioeconomic)
if (length(event1) != length(event2)){
stop("event vectors are not of the same length.")
}
d_spatial <- spatial_dist_fun(points1[1,], points2[1,])
d_spatial <- round(d_spatial/1000 , 2)
dspatialnormalized <- ifelse((d_spatial > dmax), 1, round(d_spatial/dmax,4))
# print(paste("dspatialnormalized",dspatialnormalized))
startDate_1 <- as.Date(event1[2], format="%Y-%m-%d")
startDate_2 <- as.Date(event2[2], format="%Y-%m-%d")
# print(paste("startDate_1:", startDate_1, "startDate_2:", startDate_2))
main <- ifelse(startDate_1 < startDate_2, 'event1',
ifelse(startDate_1 > startDate_2, 'event2',
ifelse(as.numeric(event1[1]) < as.numeric(event2[1]),'event2','event1')))
# print(paste("event1[1]",event1[1], "event2[1]", event2[1], ifelse(as.numeric(event1[1]) < as.numeric(event2[1]),'event2','event1')))
# print(main)
if (main == 'event1') {
mainEvent <- event1
secondEvent <- event2
} else {
secondEvent <- event1
mainEvent <- event2
}
# print(paste("mainEvent:",mainEvent[1], "secondEvent:", secondEvent[1]))
startDate_1 <- as.Date(mainEvent[2], format="%Y-%m-%d")
endDate_1 <- as.Date(mainEvent[3], format="%Y-%m-%d")
startDate_2 <- as.Date(secondEvent[2], format="%Y-%m-%d")
pop <- round(as.numeric(mainEvent[1])/(as.numeric(mainEvent[1]) + as.numeric(secondEvent[1])),4)
# temporal directional distance
dtemporal_directional <- as.vector(ifelse(startDate_1 == endDate_1, startDate_2 - endDate_1, (startDate_2 - endDate_1)/(endDate_1 - startDate_1)))
if (dtemporal_directional < 0) { #if less than 0 means overlap so make it 0
dtemporal_directional <- 0
} else {
dtemporal_directional <- dtemporal_directional
}
# normalized the above distance, if distance greater than threshold, then 1 (max)
dtemporalnormalized <- ifelse((dtemporal_directional > tmax), 1, round(dtemporal_directional/tmax,4))
# print(paste("dtemporalnormalized",dtemporalnormalized))
#print(paste("dtemporal_directional: ",dtemporal_directional," dspatialnormalized: ",dspatialnormalized))
#weights based on the population density of the mainEvent
wtemporal = pop # used to be popdensity
wspatial = 1- pop # used to be popdensity
# print(paste("wtemporal",wtemporal))
# print(paste("wspatial",wspatial))
d_spatiotemporal <- dtemporalnormalized * wtemporal + dspatialnormalized * wspatial
# print(d_spatiotemporal)
d_socioeconomic <- 0
# print(mainEvent[5:length(mainEvent)])
if (var_count > 0){
var1 <- mainEvent[5:(4+var_count)]
var2 <- secondEvent[5:(4+var_count)]
ln <- length(var1)
d_socioeconomic_vector <- NA
for(i in 1:ln){
d_socioeconomic_vector[i] <- abs(as.numeric(var1[i]) - as.numeric(var2[i]))
d_socioeconomic <- d_socioeconomic + ( d_socioeconomic_vector[i] * round(1/ln,4) )
}
}
d_infrastructure_proximity <- 0
d_infrastructure_density <- 0
d_infrastructure <- 0
# print(mainEvent[5:length(mainEvent)])
if (infra_count > 0){
infra1_proximity <- mainEvent[(5+var_count):(4+var_count+infra_count)]
infra2_proximity <- secondEvent[(5+var_count):(4+var_count+infra_count)]
infra1_density <- mainEvent[(5+var_count+infra_count):(4+var_count+infra_count+infra_count)]
infra2_density <- secondEvent[(5+var_count+infra_count):(4+var_count+infra_count+infra_count)]
ln <- length(infra1_proximity)
d_infrastructure_proximity_vector <- NA
d_infrastructure_density_vector <- NA
for(i in 1:ln){
d_infrastructure_proximity_vector[i] <- abs(as.numeric(infra1_proximity[i]) - as.numeric(infra2_proximity[i]))
d_infrastructure_density_vector[i] <- abs(as.numeric(infra1_density[i]) - as.numeric(infra2_density[i]))
d_infrastructure_proximity <- d_infrastructure_proximity + ( d_infrastructure_proximity_vector[i] * round(1/ln,4) )
d_infrastructure_density <- d_infrastructure_density + ( d_infrastructure_density_vector[i] * round(1/ln,4) )
}
d_infrastructure <- (0.5 * d_infrastructure_proximity) + (0.5 * d_infrastructure_density)
}
d_final <- NA
d_final["spatiotemporal"] <- d_spatiotemporal
d_final["socioeconomic"] <- d_socioeconomic
d_final["infrastructural"] <- d_infrastructure
d_final["spatial_km"] <- d_spatial
d_final["spatial_normalized"] <- dspatialnormalized
d_final["temporal_days"] <- dtemporal_directional
d_final["temporal_normalized"] <- dtemporalnormalized
d_final["wtemporal"] <- wtemporal
d_final["wspatial"] <- wspatial
return(d_final)
}
# provide the dataset to this part
distanceFunction_processData <-
function(
m3,
dmax = 10000.00,
tmax = 30,
var_count = 0,
infra_count = 0,
fun = distHaversine,
weight_spatiotemporal = 0.33,
weight_socioeconomic = 0.33,
spatialORsocioORinfra = 'all') {
if ((weight_spatiotemporal + weight_socioeconomic) > 1){
stop(paste("weights are not correct, weight_spatiotemporal: ", weight_spatiotemporal, ", weight_socioeconomic: ", weight_socioeconomic, ".", sep=""))
}
prepareArguments <- function(events, rowNum, var_count, infra_count) {
m <- ncol(events)
# cols_not_variables <- which(colnames(events)=="spatialres.y")
number_of_variables <- var_count
number_of_infrastructures <- infra_count
cols_not_variables <- m - (var_count + (2 * infra_count))
# print(paste("number_of_variables: ",number_of_variables))
event_length <- 4 + number_of_variables + (2 * number_of_infrastructures) # first 4 parameters are: population, event_startdate, event_enddate, category
event_arg <- NA
event_arg[1] <- events$p[rowNum]
event_arg[2] <- as.character(events$event_start[rowNum],format="%Y-%m-%d")
event_arg[3] <- as.character(events$event_end[rowNum],format="%Y-%m-%d")
event_arg[4] <- events$eventCategory[rowNum]
if (number_of_variables > 0 || number_of_infrastructures > 0) {
for (x in 1:number_of_variables){
ln_event <- length(event_arg)
event_arg[ln_event + 1] <- as.vector(events[rowNum, (cols_not_variables + x)])
}
for (y in 1:number_of_infrastructures){ # this is for the proximity type infrastructure
ln_event <- length(event_arg)
event_arg[ln_event + 1] <- as.vector(events[rowNum, (cols_not_variables + number_of_variables + y)])
}
for (z in 1:number_of_infrastructures){ # this is for the density type infrastructure
ln_event <- length(event_arg)
event_arg[ln_event + 1] <- as.vector(events[rowNum, (cols_not_variables + number_of_variables + number_of_infrastructures + z)])
}
}
return(event_arg)
}
# points <- .pointsToMatrix(m3)
events <- m3
n <- nrow(m3)
m <- ncol(m3)
distanceMatrix_spatial_km <- matrix(0, ncol=n, nrow=n)
distanceMatrix_temporal <- matrix(0, ncol=n, nrow=n)
distanceMatrix_spatial_weight <- matrix(0, ncol=n, nrow=n)
distanceMatrix_temporal_weight <- matrix(0, ncol=n, nrow=n)
distanceMatrix_spatiotemporal <- matrix(0, ncol=n, nrow=n)
distanceMatrix_socioeconomic <- matrix(0, ncol=n, nrow=n)
distanceMatrix_infrastructure <- matrix(0, ncol=n, nrow=n)
distanceMatrix_spatial_normalized <- matrix(0, ncol=n, nrow=n)
distanceMatrix_temporal_normalized <- matrix(0, ncol=n, nrow=n)
# distanceMatrix_final <- matrix(0, ncol=n, nrow=n)
for (i in 2:n) {
for (j in 1:(i-1)){
points1 <- as.matrix(m3[i,c("lon","lat")])
points2 <- as.matrix(m3[j,c("lon","lat")])
event1 <- prepareArguments(events, i)
event2 <- prepareArguments(events, j)
# print(paste("i,j",i,j))
distance_event1_event2 <- distanceFunction(event1, points1, event2, points2, dmax, tmax, var_count, infra_count)
distanceMatrix_spatiotemporal[i,j] <- distance_event1_event2["spatiotemporal"]
distanceMatrix_socioeconomic[i,j] <- distance_event1_event2["socioeconomic"]
distanceMatrix_infrastructure[i,j] <- distance_event1_event2["infrastructural"]
distanceMatrix_spatial_km[i,j] <- distance_event1_event2["spatial_km"]
distanceMatrix_spatial_normalized[i,j] <- distance_event1_event2["spatial_normalized"]
distanceMatrix_temporal[i,j] <- distance_event1_event2["temporal_days"]
distanceMatrix_temporal_normalized[i,j] <- distance_event1_event2["temporal_normalized"]
distanceMatrix_spatial_weight[i,j] <- distance_event1_event2["wspatial"]
distanceMatrix_temporal_weight[i,j] <- distance_event1_event2["wtemporal"]
# print(distance_event1_event2)
}
}
distanceMatrix_spatiotemporal <- distanceMatrix_spatiotemporal + t(distanceMatrix_spatiotemporal)
distanceMatrix_socioeconomic <- distanceMatrix_socioeconomic + t(distanceMatrix_socioeconomic)
distanceMatrix_infrastructure <- distanceMatrix_infrastructure + t(distanceMatrix_infrastructure)
distanceMatrix_spatial_km <- distanceMatrix_spatial_km + t(distanceMatrix_spatial_km)
distanceMatrix_spatial_normalized <- distanceMatrix_spatial_normalized + t(distanceMatrix_spatial_normalized)
distanceMatrix_temporal <- distanceMatrix_temporal + t(distanceMatrix_temporal)
distanceMatrix_temporal_normalized <- distanceMatrix_temporal_normalized + t(distanceMatrix_temporal_normalized)
distanceMatrix_spatial_weight <- distanceMatrix_spatial_weight + t(distanceMatrix_spatial_weight)
distanceMatrix_temporal_weight <- distanceMatrix_temporal_weight + t(distanceMatrix_temporal_weight)
write.csv(distanceMatrix_spatiotemporal, "distanceMatrix_spatiotemporal.csv")
write.csv(distanceMatrix_socioeconomic, "distanceMatrix_socioeconomic.csv")
write.csv(distanceMatrix_infrastructure, "distanceMatrix_infrastructure.csv")
write.csv(distanceMatrix_spatial_km, "distanceMatrix_spatial_km.csv")
write.csv(distanceMatrix_spatial_normalized, "distanceMatrix_spatial_normalized.csv")
write.csv(distanceMatrix_temporal, "distanceMatrix_temporal.csv")
write.csv(distanceMatrix_temporal_normalized, "distanceMatrix_temporal_normalized.csv")
write.csv(distanceMatrix_spatial_weight, "distanceMatrix_spatial_weight.csv")
write.csv(distanceMatrix_temporal_weight, "distanceMatrix_temporal_weight.csv")
# distanceMatrix_spatiotemporal_recent <<- distanceMatrix_spatiotemporal
# distanceMatrix_socioeconomic_recent <<- distanceMatrix_socioeconomic
# distanceMatrix_infrastructure_recent <<- distanceMatrix_infrastructure
# assign(paste("distanceMatrix_spatiotemporal",Sys.Date(),sep="_"), distanceMatrix_spatiotemporal_recent)
# assign(paste("distanceMatrix_socioeconomic",Sys.Date(),sep="_"), distanceMatrix_socioeconomic_recent)
# assign(paste("distanceMatrix_infrastructure",Sys.Date(),sep="_"), distanceMatrix_infrastructure_recent)
if(spatialORsocioORinfra=='spatiotemporal'){
distanceMatrix_final <- distanceMatrix_spatiotemporal
} else if(spatialORsocioORinfra=='socioeconomic'){
distanceMatrix_final <- distanceMatrix_socioeconomic
} else if(spatialORsocioORinfra == 'infrastructure'){
distanceMatrix_final <- distanceMatrix_infrastructure
} else if(spatialORsocioORinfra == 'spatiotemporal_infrastructure'){
distanceMatrix_final <- (distanceMatrix_spatiotemporal * weight_spatiotemporal) + (weight_infrastructure * distanceMatrix_infrastructure)
} else if(spatialORsocioORinfra == 'spatiotemporal_socioeconomic'){
distanceMatrix_final <- (distanceMatrix_spatiotemporal * weight_spatiotemporal) + (weight_socioeconomic * distanceMatrix_socioeconomic)
} else if(spatialORsocioORinfra == 'all'){
weight_infrastructure <- 1 - (weight_spatiotemporal + weight_socioeconomic)
# print(paste("weight_infrastructure",weight_infrastructure))
distanceMatrix_final <- (distanceMatrix_spatiotemporal * weight_spatiotemporal) + (weight_socioeconomic * distanceMatrix_socioeconomic) + (weight_infrastructure * distanceMatrix_infrastructure)
} else {
stop("weights and spatialORsocioORinfra do not make sense.")
}
# distanceMatrix_final <- distanceMatrix_final+t(distanceMatrix_final)
write.csv(distanceMatrix_final, "distanceMatrix_final.csv")
return(distanceMatrix_final)
}
|
library("pt")
########################
# These routines test drawing decision trees.
########################
choice_ids <- c(1, 1, 1, 1, 1, 1, 1, 1, 1)
gamble_ids <- c(1, 1, 1, 1, 2, 2, 2, 3, 3)
outcome_ids <- c(1, 2, 3, 4, 1, 2, 3, 1, 2)
objective_consequences <- c(10, 14, 21, 2, 40, 45, 30, 100, 200)
probability_strings <-
c("1/4", "1/4", "1/4", "1/4", "1/3", "1/3", "1/3", "1/2", "1/2")
my_choices <- Choices(choice_ids, gamble_ids, outcome_ids,
objective_consequences, probability_strings)
my_choices
drawChoices(my_choices,
decision_square_x=0.2, decision_square_edge_length=0.05,
circle_radius=0.025, y_split_gap=0.1, x_split_offset=0.03,
probability_text_digits=3, y_probability_text_offset=0.015,
y_value_text_offset=0.005, x_value_text_offset=0.025,
probability_text_font_colour="red", probability_text_font_size=11,
objective_consequence_text_font_colour="blue",
objective_consequence_text_font_size=11, label=c("A","B","C"),
label_font_colour=c("orange","magenta","green"), label_font_size=c(11,11,11),
label_positions=list(c(0.26,0.7),c(0.26,0.45),c(0.26,0.1)))
########################
# These routines test drawing the simplex.
########################
my_utility <- Utility(fun="power",
par=c(alpha=0.88, beta=0.88, lambda=2.25))
drawSimplex(x1=0, x2=100, x3=200,
line_dot_density=100,
draw_ev_flag=TRUE, ev_colour="black",
draw_pt_flag=TRUE, alpha=0.61, beta=0.724, pt_colour="red",
draw_utility_flag=TRUE, utility=my_utility, eu_colour="purple",
start_points=list(c(0.1,0.9),c(0.2,0.8),c(0.3,0.7),c(0.4,0.6),c(0.5,0.5),c(0.6,0.4),c(0.7,0.3),c(0.8,0.2),c(0.9,0.1)),
labels=c("A","B","C","D","increasing preference"),
label_positions=list(c(0.05,0.02),c(0.07,0.12),c(0.92,0.02),c(0.95,0.10),c(0.7,0.7)),
label_colours=c("red","green","blue","orange","red"),
label_font_sizes=c(12,12,12,12,16),
label_font_faces=c("plain","plain","plain","plain","bold"),
label_rotations=c(0,0,0,0,-45),
circle_radii=c(0.005,0.005,0.005,0.005),
circle_outline_colours=c("black","black","black","black"),
circle_fill_colours=c("red","green","blue","orange"),
circle_positions=list(c(0,0),c(0.01,0.1),c(0.89,0),c(0.9,0.1)),
lines=list(c(0,0,0.01,0.1),c(0.89,0,0.9,0.1),c(0.01,0.1,0.9,0.1),c(0,0,0.89,0)),
line_widths=c(1, 1, 1, 1),
line_styles=c("dashed", "dashed", "dashed", "dashed"),
line_colours=c("red","red","red","red"),
arrows=list(c(0.8,0.5,0.5,0.8)),
arrow_widths=c(2),
arrow_styles=c("solid"),
arrow_colours=c("red"))
# draw the simplex for the Allais common consequence paradox
my_utility <- Utility(fun="power",
par=c(alpha=0.88, beta=0.88, lambda=1))
drawSimplex(x1=0, x2=1000000, x3=5000000,
line_dot_density=100,
draw_ev_flag=TRUE, ev_colour="black",
draw_pt_flag=TRUE, alpha=0.61, beta=0.724, pt_colour="red",
draw_utility_flag=TRUE, utility=my_utility, eu_colour="purple",
start_points=list(c(0.1,0.9),c(0.2,0.8),c(0.3,0.7),c(0.4,0.6),c(0.5,0.5),c(0.6,0.4),c(0.7,0.3),c(0.8,0.2),c(0.9,0.1)),
labels=c("A","B","C","D","increasing preference"),
label_positions=list(c(0.05,0.02),c(0.07,0.12),c(0.92,0.02),c(0.95,0.10),c(0.7,0.7)),
label_colours=c("red","green","blue","orange","red"),
label_font_sizes=c(12,12,12,12,16),
label_font_faces=c("plain","plain","plain","plain","bold"),
label_rotations=c(0,0,0,0,-45),
circle_radii=c(0.005,0.005,0.005,0.005),
circle_outline_colours=c("black","black","black","black"),
circle_fill_colours=c("red","green","blue","orange"),
circle_positions=list(c(0,0),c(0.01,0.1),c(0.89,0),c(0.9,0.1)),
lines=list(c(0,0,0.01,0.1),c(0.89,0,0.9,0.1),c(0.01,0.1,0.9,0.1),c(0,0,0.89,0)),
line_widths=c(1, 1, 1, 1),
line_styles=c("dashed", "dashed", "dashed", "dashed"),
line_colours=c("red","red","red","red"),
arrows=list(c(0.8,0.5,0.5,0.8)),
arrow_widths=c(2),
arrow_styles=c("solid"),
arrow_colours=c("red"))
# draw the simplex using the following outcomes (x1=0, x2=1, x3=2)
my_utility <- Utility(fun="power",
par=c(alpha=0.88, beta=0.88, lambda=1))
drawSimplex(x1=0, x2=1, x3=2,
line_dot_density=100,
draw_ev_flag=TRUE, ev_colour="black",
draw_pt_flag=TRUE, alpha=0.61, beta=0.724, pt_colour="red",
draw_utility_flag=TRUE, utility=my_utility, eu_colour="purple",
start_points=list(c(0.1,0.9),c(0.2,0.8),c(0.3,0.7),c(0.4,0.6),c(0.5,0.5),c(0.6,0.4),c(0.7,0.3),c(0.8,0.2),c(0.9,0.1)),
labels=c("A","B","C","D","increasing preference"),
label_positions=list(c(0.05,0.02),c(0.07,0.12),c(0.92,0.02),c(0.95,0.10),c(0.7,0.7)),
label_colours=c("red","green","blue","orange","red"),
label_font_sizes=c(12,12,12,12,16),
label_font_faces=c("plain","plain","plain","plain","bold"),
label_rotations=c(0,0,0,0,-45),
circle_radii=c(0.005,0.005,0.005,0.005),
circle_outline_colours=c("black","black","black","black"),
circle_fill_colours=c("red","green","blue","orange"),
circle_positions=list(c(0,0),c(0.01,0.1),c(0.89,0),c(0.9,0.1)),
lines=list(c(0,0,0.01,0.1),c(0.89,0,0.9,0.1),c(0.01,0.1,0.9,0.1),c(0,0,0.89,0)),
line_widths=c(1, 1, 1, 1),
line_styles=c("dashed", "dashed", "dashed", "dashed"),
line_colours=c("red","red","red","red"),
arrows=list(c(0.8,0.5,0.5,0.8)),
arrow_widths=c(2),
arrow_styles=c("solid"),
arrow_colours=c("red"))
# draw a minimal version of the above, omitting extra labelling
my_utility <- Utility(fun="power",
par=c(alpha=0.88, beta=0.88, lambda=1))
drawSimplex(x1=0, x2=1, x3=2,
line_dot_density=100,
draw_ev_flag=TRUE, ev_colour="black",
draw_pt_flag=TRUE, alpha=0.61, beta=0.724, pt_colour="red",
draw_utility_flag=TRUE, utility=my_utility, eu_colour="purple",
start_points=list(c(0.1,0.9),c(0.2,0.8),c(0.3,0.7),c(0.4,0.6),c(0.5,0.5),c(0.6,0.4),c(0.7,0.3),c(0.8,0.2),c(0.9,0.1)))
| /tests/Test_Decision_Tree.R | no_license | torkar/pt | R | false | false | 5,751 | r | library("pt")
########################
# These routines test drawing decision trees.
########################
choice_ids <- c(1, 1, 1, 1, 1, 1, 1, 1, 1)
gamble_ids <- c(1, 1, 1, 1, 2, 2, 2, 3, 3)
outcome_ids <- c(1, 2, 3, 4, 1, 2, 3, 1, 2)
objective_consequences <- c(10, 14, 21, 2, 40, 45, 30, 100, 200)
probability_strings <-
c("1/4", "1/4", "1/4", "1/4", "1/3", "1/3", "1/3", "1/2", "1/2")
my_choices <- Choices(choice_ids, gamble_ids, outcome_ids,
objective_consequences, probability_strings)
my_choices
drawChoices(my_choices,
decision_square_x=0.2, decision_square_edge_length=0.05,
circle_radius=0.025, y_split_gap=0.1, x_split_offset=0.03,
probability_text_digits=3, y_probability_text_offset=0.015,
y_value_text_offset=0.005, x_value_text_offset=0.025,
probability_text_font_colour="red", probability_text_font_size=11,
objective_consequence_text_font_colour="blue",
objective_consequence_text_font_size=11, label=c("A","B","C"),
label_font_colour=c("orange","magenta","green"), label_font_size=c(11,11,11),
label_positions=list(c(0.26,0.7),c(0.26,0.45),c(0.26,0.1)))
########################
# These routines test drawing the simplex.
########################
my_utility <- Utility(fun="power",
par=c(alpha=0.88, beta=0.88, lambda=2.25))
drawSimplex(x1=0, x2=100, x3=200,
line_dot_density=100,
draw_ev_flag=TRUE, ev_colour="black",
draw_pt_flag=TRUE, alpha=0.61, beta=0.724, pt_colour="red",
draw_utility_flag=TRUE, utility=my_utility, eu_colour="purple",
start_points=list(c(0.1,0.9),c(0.2,0.8),c(0.3,0.7),c(0.4,0.6),c(0.5,0.5),c(0.6,0.4),c(0.7,0.3),c(0.8,0.2),c(0.9,0.1)),
labels=c("A","B","C","D","increasing preference"),
label_positions=list(c(0.05,0.02),c(0.07,0.12),c(0.92,0.02),c(0.95,0.10),c(0.7,0.7)),
label_colours=c("red","green","blue","orange","red"),
label_font_sizes=c(12,12,12,12,16),
label_font_faces=c("plain","plain","plain","plain","bold"),
label_rotations=c(0,0,0,0,-45),
circle_radii=c(0.005,0.005,0.005,0.005),
circle_outline_colours=c("black","black","black","black"),
circle_fill_colours=c("red","green","blue","orange"),
circle_positions=list(c(0,0),c(0.01,0.1),c(0.89,0),c(0.9,0.1)),
lines=list(c(0,0,0.01,0.1),c(0.89,0,0.9,0.1),c(0.01,0.1,0.9,0.1),c(0,0,0.89,0)),
line_widths=c(1, 1, 1, 1),
line_styles=c("dashed", "dashed", "dashed", "dashed"),
line_colours=c("red","red","red","red"),
arrows=list(c(0.8,0.5,0.5,0.8)),
arrow_widths=c(2),
arrow_styles=c("solid"),
arrow_colours=c("red"))
# draw the simplex for the Allais common consequence paradox
my_utility <- Utility(fun="power",
par=c(alpha=0.88, beta=0.88, lambda=1))
drawSimplex(x1=0, x2=1000000, x3=5000000,
line_dot_density=100,
draw_ev_flag=TRUE, ev_colour="black",
draw_pt_flag=TRUE, alpha=0.61, beta=0.724, pt_colour="red",
draw_utility_flag=TRUE, utility=my_utility, eu_colour="purple",
start_points=list(c(0.1,0.9),c(0.2,0.8),c(0.3,0.7),c(0.4,0.6),c(0.5,0.5),c(0.6,0.4),c(0.7,0.3),c(0.8,0.2),c(0.9,0.1)),
labels=c("A","B","C","D","increasing preference"),
label_positions=list(c(0.05,0.02),c(0.07,0.12),c(0.92,0.02),c(0.95,0.10),c(0.7,0.7)),
label_colours=c("red","green","blue","orange","red"),
label_font_sizes=c(12,12,12,12,16),
label_font_faces=c("plain","plain","plain","plain","bold"),
label_rotations=c(0,0,0,0,-45),
circle_radii=c(0.005,0.005,0.005,0.005),
circle_outline_colours=c("black","black","black","black"),
circle_fill_colours=c("red","green","blue","orange"),
circle_positions=list(c(0,0),c(0.01,0.1),c(0.89,0),c(0.9,0.1)),
lines=list(c(0,0,0.01,0.1),c(0.89,0,0.9,0.1),c(0.01,0.1,0.9,0.1),c(0,0,0.89,0)),
line_widths=c(1, 1, 1, 1),
line_styles=c("dashed", "dashed", "dashed", "dashed"),
line_colours=c("red","red","red","red"),
arrows=list(c(0.8,0.5,0.5,0.8)),
arrow_widths=c(2),
arrow_styles=c("solid"),
arrow_colours=c("red"))
# draw the simplex using the following outcomes (x1=0, x2=1, x3=2)
my_utility <- Utility(fun="power",
par=c(alpha=0.88, beta=0.88, lambda=1))
drawSimplex(x1=0, x2=1, x3=2,
line_dot_density=100,
draw_ev_flag=TRUE, ev_colour="black",
draw_pt_flag=TRUE, alpha=0.61, beta=0.724, pt_colour="red",
draw_utility_flag=TRUE, utility=my_utility, eu_colour="purple",
start_points=list(c(0.1,0.9),c(0.2,0.8),c(0.3,0.7),c(0.4,0.6),c(0.5,0.5),c(0.6,0.4),c(0.7,0.3),c(0.8,0.2),c(0.9,0.1)),
labels=c("A","B","C","D","increasing preference"),
label_positions=list(c(0.05,0.02),c(0.07,0.12),c(0.92,0.02),c(0.95,0.10),c(0.7,0.7)),
label_colours=c("red","green","blue","orange","red"),
label_font_sizes=c(12,12,12,12,16),
label_font_faces=c("plain","plain","plain","plain","bold"),
label_rotations=c(0,0,0,0,-45),
circle_radii=c(0.005,0.005,0.005,0.005),
circle_outline_colours=c("black","black","black","black"),
circle_fill_colours=c("red","green","blue","orange"),
circle_positions=list(c(0,0),c(0.01,0.1),c(0.89,0),c(0.9,0.1)),
lines=list(c(0,0,0.01,0.1),c(0.89,0,0.9,0.1),c(0.01,0.1,0.9,0.1),c(0,0,0.89,0)),
line_widths=c(1, 1, 1, 1),
line_styles=c("dashed", "dashed", "dashed", "dashed"),
line_colours=c("red","red","red","red"),
arrows=list(c(0.8,0.5,0.5,0.8)),
arrow_widths=c(2),
arrow_styles=c("solid"),
arrow_colours=c("red"))
# draw a minimal version of the above, omitting extra labelling
my_utility <- Utility(fun="power",
par=c(alpha=0.88, beta=0.88, lambda=1))
drawSimplex(x1=0, x2=1, x3=2,
line_dot_density=100,
draw_ev_flag=TRUE, ev_colour="black",
draw_pt_flag=TRUE, alpha=0.61, beta=0.724, pt_colour="red",
draw_utility_flag=TRUE, utility=my_utility, eu_colour="purple",
start_points=list(c(0.1,0.9),c(0.2,0.8),c(0.3,0.7),c(0.4,0.6),c(0.5,0.5),c(0.6,0.4),c(0.7,0.3),c(0.8,0.2),c(0.9,0.1)))
|
context('summary_stats')
test_that('summary_stats returns the appropriate error', {
expect_error(summary_stats('mtcars$mpg'), 'data must be numeric')
expect_error(summary_stats(as.factor(mtcars$mpg)), 'data must be numeric')
})
| /tests/testthat/test-summary-stats.R | no_license | anandkumar90/descriptr | R | false | false | 235 | r | context('summary_stats')
test_that('summary_stats returns the appropriate error', {
expect_error(summary_stats('mtcars$mpg'), 'data must be numeric')
expect_error(summary_stats(as.factor(mtcars$mpg)), 'data must be numeric')
})
|
# DB Connnection ----
con <- db_connect()
# Pull each data table from SQL ----
#Topics
sql_query <- 'Select * from Shiny.dbo.topics'
df_topics <- dbGetQuery(con, sql_query)
#Reviews
sql_query <- 'Select * from Shiny.dbo.reviews'
df_reviews <- dbGetQuery(con, sql_query)
#Homeworks
sql_query <- 'Select * from Shiny.dbo.homeworks'
df_homeworks <- dbGetQuery(con, sql_query)
#Students
sql_query <- 'Select * from Shiny.dbo.students'
df_students <- dbGetQuery(con, sql_query)
#Review Grades
sql_query <- 'Select * from Shiny.dbo.reviewGrades'
df_reviewGrades <- dbGetQuery(con, sql_query)
#Homework Grades
sql_query <- 'Select * from Shiny.dbo.homeworkGrades'
df_homeworkGrades <- dbGetQuery(con, sql_query)
# Allows values to change and be updated
reactive <- reactiveValues(df_reviewGrades = df_reviewGrades, df_homeworkGrades = df_homeworkGrades)
# Database Methods
# Get Homework Grades-- pulls grades for each student and aggregates all assignments
getHomeworkGrades <- reactive({
merge(reactive$df_homeworkGrades, df_students) %>% merge(df_homeworks) %>%
mutate(firstLast = paste(first_name, last_name))
})
# Get Review Grades -- pulls reviews for each student and aggregates all assignments
getReviewGrades <- reactive({
merge(reactive$df_reviewGrades, df_students) %>% merge(df_reviews) %>%
mutate(firstLast = paste(first_name, last_name))
})
| /R/Student Dashboard/dataintake.R | no_license | anahayne/mastery-grade-dashboard | R | false | false | 1,379 | r | # DB Connnection ----
con <- db_connect()
# Pull each data table from SQL ----
#Topics
sql_query <- 'Select * from Shiny.dbo.topics'
df_topics <- dbGetQuery(con, sql_query)
#Reviews
sql_query <- 'Select * from Shiny.dbo.reviews'
df_reviews <- dbGetQuery(con, sql_query)
#Homeworks
sql_query <- 'Select * from Shiny.dbo.homeworks'
df_homeworks <- dbGetQuery(con, sql_query)
#Students
sql_query <- 'Select * from Shiny.dbo.students'
df_students <- dbGetQuery(con, sql_query)
#Review Grades
sql_query <- 'Select * from Shiny.dbo.reviewGrades'
df_reviewGrades <- dbGetQuery(con, sql_query)
#Homework Grades
sql_query <- 'Select * from Shiny.dbo.homeworkGrades'
df_homeworkGrades <- dbGetQuery(con, sql_query)
# Allows values to change and be updated
reactive <- reactiveValues(df_reviewGrades = df_reviewGrades, df_homeworkGrades = df_homeworkGrades)
# Database Methods
# Get Homework Grades-- pulls grades for each student and aggregates all assignments
getHomeworkGrades <- reactive({
merge(reactive$df_homeworkGrades, df_students) %>% merge(df_homeworks) %>%
mutate(firstLast = paste(first_name, last_name))
})
# Get Review Grades -- pulls reviews for each student and aggregates all assignments
getReviewGrades <- reactive({
merge(reactive$df_reviewGrades, df_students) %>% merge(df_reviews) %>%
mutate(firstLast = paste(first_name, last_name))
})
|
#' @title Old Node Networks
#'
#' @description Calls old functions to create data.frames describing all the observation nodes in various versions of DABOM, including how they are related to one another. Kept for backwards compatibility checks.
#'
#' @author Kevin See
#'
#'
#' @import dplyr stringr
#' @export
#' @return NULL
#' @examples writeLGRNodeNetwork()
writeOldNetworks = function() {
list("LowerGranite" = writeLGRNodeNetwork(),
"PriestRapids" = writePRDNodeNetwork(),
"Prosser" = writePRONodeNetwork(),
"Tumwater" = writeTUMNodeNetwork(),
"Tumwater_noUWE" = writeTUMNodeNetwork_noUWE())
}
| /R/writeOldNetworks.R | permissive | KevinSee/PITcleanr | R | false | false | 631 | r | #' @title Old Node Networks
#'
#' @description Calls old functions to create data.frames describing all the observation nodes in various versions of DABOM, including how they are related to one another. Kept for backwards compatibility checks.
#'
#' @author Kevin See
#'
#'
#' @import dplyr stringr
#' @export
#' @return NULL
#' @examples writeLGRNodeNetwork()
writeOldNetworks = function() {
list("LowerGranite" = writeLGRNodeNetwork(),
"PriestRapids" = writePRDNodeNetwork(),
"Prosser" = writePRONodeNetwork(),
"Tumwater" = writeTUMNodeNetwork(),
"Tumwater_noUWE" = writeTUMNodeNetwork_noUWE())
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllClasses.R, R/categories.R
\docType{class}
\name{Categories-class}
\alias{Categories-class}
\alias{Categories}
\alias{ids}
\alias{ids<-}
\alias{values}
\alias{values<-}
\alias{Categories}
\alias{Category-class}
\alias{Category}
\alias{[,Categories,ANY,ANY-method}
\alias{[,Categories,numeric,ANY-method}
\alias{[<-,Categories,ANY,ANY,ANY-method}
\alias{names,Categories-method}
\alias{values,Categories-method}
\alias{ids,Categories-method}
\alias{names<-,Categories-method}
\alias{values<-,Categories-method}
\alias{ids<-,Categories-method}
\title{Categories in CategoricalVariables}
\usage{
Categories(..., data = NULL)
Category(..., data = NULL)
\S4method{[}{Categories,ANY,ANY}(x, i, j, ..., drop = TRUE)
\S4method{[}{Categories,numeric,ANY}(x, i, j, ..., drop = TRUE)
\S4method{[}{Categories,ANY,ANY,ANY}(x, i, j, ...) <- value
\S4method{names}{Categories}(x)
\S4method{values}{Categories}(x)
\S4method{ids}{Categories}(x)
\S4method{names}{Categories}(x) <- value
\S4method{values}{Categories}(x) <- value
\S4method{ids}{Categories}(x) <- value
}
\arguments{
\item{...}{additional arguments to [, ignored}
\item{data}{For the constructor functions \code{Category} and
\code{Categories}, you can either pass in attributes via \code{...} or you
can create the objects with a fully defined \code{list} representation of
the objects via the \code{data} argument. See the examples.}
\item{x}{For the attribute getters and setters, an object of class
Category or Categories}
\item{i}{For the [ methods, just as with list extract methods}
\item{j}{Invalid argument to [, but in the generic's signature}
\item{drop}{Invalid argument to [, but in the generic's signature}
\item{value}{For [<-, the replacement Category to insert}
}
\description{
CategoricalVariables, as well as the array types composed from
Categoricals, contain Categories. Categories are a subclass of list that
contains only Category objects. Category objects themselves subclass list
and contain the following fields: "name", "id", "numeric_value", "missing",
and optionally "selected".
}
\examples{
cat.a <- Category(name="First", id=1, numeric_value=1, missing=FALSE)
cat.b <- Category(data=list(name="First", id=1, numeric_value=1, missing=FALSE))
identical(cat.a, cat.b)
cat.c <- Category(name="Second", id=2)
cats.1 <- Categories(cat.a, cat.c)
cats.2 <- Categories(data=list(cat.a, cat.c))
identical(cats.1, cats.2)
}
| /man/Categories.Rd | no_license | persephonet/rcrunch | R | false | true | 2,487 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllClasses.R, R/categories.R
\docType{class}
\name{Categories-class}
\alias{Categories-class}
\alias{Categories}
\alias{ids}
\alias{ids<-}
\alias{values}
\alias{values<-}
\alias{Categories}
\alias{Category-class}
\alias{Category}
\alias{[,Categories,ANY,ANY-method}
\alias{[,Categories,numeric,ANY-method}
\alias{[<-,Categories,ANY,ANY,ANY-method}
\alias{names,Categories-method}
\alias{values,Categories-method}
\alias{ids,Categories-method}
\alias{names<-,Categories-method}
\alias{values<-,Categories-method}
\alias{ids<-,Categories-method}
\title{Categories in CategoricalVariables}
\usage{
Categories(..., data = NULL)
Category(..., data = NULL)
\S4method{[}{Categories,ANY,ANY}(x, i, j, ..., drop = TRUE)
\S4method{[}{Categories,numeric,ANY}(x, i, j, ..., drop = TRUE)
\S4method{[}{Categories,ANY,ANY,ANY}(x, i, j, ...) <- value
\S4method{names}{Categories}(x)
\S4method{values}{Categories}(x)
\S4method{ids}{Categories}(x)
\S4method{names}{Categories}(x) <- value
\S4method{values}{Categories}(x) <- value
\S4method{ids}{Categories}(x) <- value
}
\arguments{
\item{...}{additional arguments to [, ignored}
\item{data}{For the constructor functions \code{Category} and
\code{Categories}, you can either pass in attributes via \code{...} or you
can create the objects with a fully defined \code{list} representation of
the objects via the \code{data} argument. See the examples.}
\item{x}{For the attribute getters and setters, an object of class
Category or Categories}
\item{i}{For the [ methods, just as with list extract methods}
\item{j}{Invalid argument to [, but in the generic's signature}
\item{drop}{Invalid argument to [, but in the generic's signature}
\item{value}{For [<-, the replacement Category to insert}
}
\description{
CategoricalVariables, as well as the array types composed from
Categoricals, contain Categories. Categories are a subclass of list that
contains only Category objects. Category objects themselves subclass list
and contain the following fields: "name", "id", "numeric_value", "missing",
and optionally "selected".
}
\examples{
cat.a <- Category(name="First", id=1, numeric_value=1, missing=FALSE)
cat.b <- Category(data=list(name="First", id=1, numeric_value=1, missing=FALSE))
identical(cat.a, cat.b)
cat.c <- Category(name="Second", id=2)
cats.1 <- Categories(cat.a, cat.c)
cats.2 <- Categories(data=list(cat.a, cat.c))
identical(cats.1, cats.2)
}
|
library(pkgnet)
## Declare paths explicitly as currently required by pkgnet
pkg_path <- system.file(package = "EpiNow2")
report_path <- file.path("inst/pkg-structure", "EpiNow2_report.html")
## Generate pkg report
report <- CreatePackageReport("EpiNow2",
report_path = report_path)
| /inst/pkg-structure/generate-pkgstructure-report.R | permissive | pearsonca/EpiNow2 | R | false | false | 316 | r | library(pkgnet)
## Declare paths explicitly as currently required by pkgnet
pkg_path <- system.file(package = "EpiNow2")
report_path <- file.path("inst/pkg-structure", "EpiNow2_report.html")
## Generate pkg report
report <- CreatePackageReport("EpiNow2",
report_path = report_path)
|
#Convert 25x25 TPM to 85x85 Run Tracking TPM
#Inputs: tpm.mat (25x25 TPM matrix)
tpm.convert <- function(tpm.mat){
#Matrix A1
A00 <- tpm.mat[1:8,1:8]
A00[,1] <- 0; A00[-1,2:4] <- 0; A00[-c(2:4),5:7] <- 0; A00[-c(5:7),8] <- 0
A01 <- tpm.mat[1:8,1:8]
A01[-1,1] <- 0; A01[-c(2:4),2:4] <- 0; A01[-c(5:7),5:7] <- 0; A01[-8,8] <- 0
A02 <- tpm.mat[1:8,1:7]
A02[-c(2:4),1] <- 0; A02[-c(5:7),2:4] <- 0; A02[-8,5:7] <- 0
A03 <- tpm.mat[1:8,1:4]
A03[-c(5:7),1] <- 0; A03[1:7,2:4] <- 0; A03[8,-c(2:4)] <- 0
A04 <- matrix(c(rep(0,7),tpm.mat[8,1]),8,1)
tpm.A1 <- rbind(cbind(A00,A01,A02,A03,A04),cbind(diag(8),matrix(0,8,20)),cbind(diag(7),matrix(0,7,21)),
cbind(diag(4),matrix(0,4,24)),c(1,rep(0,27)))
#Matrix A2
A00 <- tpm.mat[9:16,9:16]
A00[,1] <- 0; A00[-1,2:4] <- 0; A00[-c(2:4),5:7] <- 0; A00[-c(5:7),8] <- 0
A01 <- tpm.mat[9:16,9:16]
A01[-1,1] <- 0; A01[-c(2:4),2:4] <- 0; A01[-c(5:7),5:7] <- 0; A01[-8,8] <- 0
A02 <- tpm.mat[9:16,9:15]
A02[-c(2:4),1] <- 0; A02[-c(5:7),2:4] <- 0; A02[-8,5:7] <- 0
A03 <- tpm.mat[9:16,9:12]
A03[-c(5:7),1] <- 0; A03[1:7,2:4] <- 0; A03[8,-c(2:4)] <- 0
A04 <- matrix(c(rep(0,7),tpm.mat[16,9]),8,1)
tpm.A2 <- rbind(cbind(A00,A01,A02,A03,A04),cbind(diag(8),matrix(0,8,20)),cbind(diag(7),matrix(0,7,21)),
cbind(diag(4),matrix(0,4,24)),c(1,rep(0,27)))
#Matrix A3
A00 <- tpm.mat[17:24,17:24]
A00[,1] <- 0; A00[-1,2:4] <- 0; A00[-c(2:4),5:7] <- 0; A00[-c(5:7),8] <- 0
A01 <- tpm.mat[17:24,17:24]
A01[-1,1] <- 0; A01[-c(2:4),2:4] <- 0; A01[-c(5:7),5:7] <- 0; A01[-8,8] <- 0
A02 <- tpm.mat[17:24,17:23]
A02[-c(2:4),1] <- 0; A02[-c(5:7),2:4] <- 0; A02[-8,5:7] <- 0
A03 <- tpm.mat[17:24,17:20]
A03[-c(5:7),1] <- 0; A03[1:7,2:4] <- 0; A03[8,-c(2:4)] <- 0
A04 <- matrix(c(rep(0,7),tpm.mat[24,17]),8,1)
tpm.A3 <- rbind(cbind(A00,A01,A02,A03,A04),cbind(diag(8),matrix(0,8,20)),cbind(diag(7),matrix(0,7,21)),
cbind(diag(4),matrix(0,4,24)),c(1,rep(0,27)))
#Matrix B1
B00 <- tpm.mat[1:8,9:16]
B00[2:8,1] <- 0; B00[5:7,2:4] <- 0; B00[8,1:7] <- 0
B01 <- tpm.mat[1:8,9:16]
B01[-c(2:4),1] <- 0; B01[-c(5:7),2:4] <- 0; B01[c(5:7),5:7] <- 0; B01[8,-c(1:7)] <- 0
B02 <- tpm.mat[1:8,9:15]
B02[-c(5:7),1] <- 0; B02[1:7,2:7] <- 0; B02[8,-c(2:4)] <- 0
B03 <- matrix(c(rep(0,7),tpm.mat[8,9],rep(0,24)),8,4)
tpm.B1 <- rbind(cbind(B00,B01,B02,B03,matrix(0,8,1)),matrix(0,20,28))
#Matrix B2
B00 <- tpm.mat[9:16,17:24]
B00[2:8,1] <- 0; B00[5:7,2:4] <- 0; B00[8,1:7] <- 0
B01 <- tpm.mat[9:16,17:24]
B01[-c(2:4),1] <- 0; B01[-c(5:7),2:4] <- 0; B01[c(5:7),5:7] <- 0; B01[8,-c(1:7)] <- 0
B02 <- tpm.mat[9:16,17:23]
B02[-c(5:7),1] <- 0; B02[1:7,2:7] <- 0; B02[8,-c(2:4)] <- 0
B03 <- matrix(c(rep(0,7),tpm.mat[16,17],rep(0,24)),8,4)
tpm.B2 <- rbind(cbind(B00,B01,B02,B03,matrix(0,8,1)),matrix(0,20,28))
#Matrix C
C00 <- tpm.mat[1:8,17:24]
C00[c(6,7),1] <- 0; C00[8,c(3,4)] <- 0
C01 <- tpm.mat[1:8,17:24]
C01[1:7,2:8] <- 0; C01[-c(6,7),1] <- 0; C01[8,-c(3,4)] <- 0
tpm.C <- rbind(cbind(C00,C01,matrix(0,8,12)),matrix(0,20,28))
#Matrix ABC
tpm.ABC <- rbind(cbind(tpm.A1,tpm.B1,tpm.C,c(tpm.mat[1:8,25],rep(0,20))),
cbind(matrix(0,28,28),tpm.A2,tpm.B2,c(tpm.mat[9:16,25],rep(0,20))),
cbind(matrix(0,28,56),tpm.A3,c(tpm.mat[17:24,25],rep(0,20))),c(rep(0,84),1))
#OUTPUT - 85x85 TPM
return(tpm.ABC)
}
| /R_scripts/1_TPM_Functions/tpm.convert.R | no_license | basiliszag/mcbaseball | R | false | false | 3,419 | r | #Convert 25x25 TPM to 85x85 Run Tracking TPM
#Inputs: tpm.mat (25x25 TPM matrix)
tpm.convert <- function(tpm.mat){
#Matrix A1
A00 <- tpm.mat[1:8,1:8]
A00[,1] <- 0; A00[-1,2:4] <- 0; A00[-c(2:4),5:7] <- 0; A00[-c(5:7),8] <- 0
A01 <- tpm.mat[1:8,1:8]
A01[-1,1] <- 0; A01[-c(2:4),2:4] <- 0; A01[-c(5:7),5:7] <- 0; A01[-8,8] <- 0
A02 <- tpm.mat[1:8,1:7]
A02[-c(2:4),1] <- 0; A02[-c(5:7),2:4] <- 0; A02[-8,5:7] <- 0
A03 <- tpm.mat[1:8,1:4]
A03[-c(5:7),1] <- 0; A03[1:7,2:4] <- 0; A03[8,-c(2:4)] <- 0
A04 <- matrix(c(rep(0,7),tpm.mat[8,1]),8,1)
tpm.A1 <- rbind(cbind(A00,A01,A02,A03,A04),cbind(diag(8),matrix(0,8,20)),cbind(diag(7),matrix(0,7,21)),
cbind(diag(4),matrix(0,4,24)),c(1,rep(0,27)))
#Matrix A2
A00 <- tpm.mat[9:16,9:16]
A00[,1] <- 0; A00[-1,2:4] <- 0; A00[-c(2:4),5:7] <- 0; A00[-c(5:7),8] <- 0
A01 <- tpm.mat[9:16,9:16]
A01[-1,1] <- 0; A01[-c(2:4),2:4] <- 0; A01[-c(5:7),5:7] <- 0; A01[-8,8] <- 0
A02 <- tpm.mat[9:16,9:15]
A02[-c(2:4),1] <- 0; A02[-c(5:7),2:4] <- 0; A02[-8,5:7] <- 0
A03 <- tpm.mat[9:16,9:12]
A03[-c(5:7),1] <- 0; A03[1:7,2:4] <- 0; A03[8,-c(2:4)] <- 0
A04 <- matrix(c(rep(0,7),tpm.mat[16,9]),8,1)
tpm.A2 <- rbind(cbind(A00,A01,A02,A03,A04),cbind(diag(8),matrix(0,8,20)),cbind(diag(7),matrix(0,7,21)),
cbind(diag(4),matrix(0,4,24)),c(1,rep(0,27)))
#Matrix A3
A00 <- tpm.mat[17:24,17:24]
A00[,1] <- 0; A00[-1,2:4] <- 0; A00[-c(2:4),5:7] <- 0; A00[-c(5:7),8] <- 0
A01 <- tpm.mat[17:24,17:24]
A01[-1,1] <- 0; A01[-c(2:4),2:4] <- 0; A01[-c(5:7),5:7] <- 0; A01[-8,8] <- 0
A02 <- tpm.mat[17:24,17:23]
A02[-c(2:4),1] <- 0; A02[-c(5:7),2:4] <- 0; A02[-8,5:7] <- 0
A03 <- tpm.mat[17:24,17:20]
A03[-c(5:7),1] <- 0; A03[1:7,2:4] <- 0; A03[8,-c(2:4)] <- 0
A04 <- matrix(c(rep(0,7),tpm.mat[24,17]),8,1)
tpm.A3 <- rbind(cbind(A00,A01,A02,A03,A04),cbind(diag(8),matrix(0,8,20)),cbind(diag(7),matrix(0,7,21)),
cbind(diag(4),matrix(0,4,24)),c(1,rep(0,27)))
#Matrix B1
B00 <- tpm.mat[1:8,9:16]
B00[2:8,1] <- 0; B00[5:7,2:4] <- 0; B00[8,1:7] <- 0
B01 <- tpm.mat[1:8,9:16]
B01[-c(2:4),1] <- 0; B01[-c(5:7),2:4] <- 0; B01[c(5:7),5:7] <- 0; B01[8,-c(1:7)] <- 0
B02 <- tpm.mat[1:8,9:15]
B02[-c(5:7),1] <- 0; B02[1:7,2:7] <- 0; B02[8,-c(2:4)] <- 0
B03 <- matrix(c(rep(0,7),tpm.mat[8,9],rep(0,24)),8,4)
tpm.B1 <- rbind(cbind(B00,B01,B02,B03,matrix(0,8,1)),matrix(0,20,28))
#Matrix B2
B00 <- tpm.mat[9:16,17:24]
B00[2:8,1] <- 0; B00[5:7,2:4] <- 0; B00[8,1:7] <- 0
B01 <- tpm.mat[9:16,17:24]
B01[-c(2:4),1] <- 0; B01[-c(5:7),2:4] <- 0; B01[c(5:7),5:7] <- 0; B01[8,-c(1:7)] <- 0
B02 <- tpm.mat[9:16,17:23]
B02[-c(5:7),1] <- 0; B02[1:7,2:7] <- 0; B02[8,-c(2:4)] <- 0
B03 <- matrix(c(rep(0,7),tpm.mat[16,17],rep(0,24)),8,4)
tpm.B2 <- rbind(cbind(B00,B01,B02,B03,matrix(0,8,1)),matrix(0,20,28))
#Matrix C
C00 <- tpm.mat[1:8,17:24]
C00[c(6,7),1] <- 0; C00[8,c(3,4)] <- 0
C01 <- tpm.mat[1:8,17:24]
C01[1:7,2:8] <- 0; C01[-c(6,7),1] <- 0; C01[8,-c(3,4)] <- 0
tpm.C <- rbind(cbind(C00,C01,matrix(0,8,12)),matrix(0,20,28))
#Matrix ABC
tpm.ABC <- rbind(cbind(tpm.A1,tpm.B1,tpm.C,c(tpm.mat[1:8,25],rep(0,20))),
cbind(matrix(0,28,28),tpm.A2,tpm.B2,c(tpm.mat[9:16,25],rep(0,20))),
cbind(matrix(0,28,56),tpm.A3,c(tpm.mat[17:24,25],rep(0,20))),c(rep(0,84),1))
#OUTPUT - 85x85 TPM
return(tpm.ABC)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.