content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
#' Taxa occurrence
#'
#' Find the taxa that occur in a given state of Brazil.
#'
#' @param states a character vector with one or more state abbreviations
#' following. See notes for abbreviations.
#' @param type type of matching to be used. \code{any} will return the taxa that
#' occur in any of the passed \code{states}. \code{only} matches taxa that
#' occur only in all provided (no more, no less) \code{states} and \code{all} matches taxa that
#' occur at least in all \code{states} passed. See examples.
#' @param taxa optional character vector to match against the states
#' @export
#' @note List of abbreviations: \url{http://en.wikipedia.org/wiki/States_of_Brazil}
#' @return a data frame
#' @examples
#' \dontrun{
#' occ.any <- occurrence(c("SP", "BA", "MG"), type = "any")
#' occ.only <- occurrence(c("SP", "BA", "MG"), type = "only")
#' occ.all <- occurrence(c("SP", "BA", "MG"), type = "all")
#' occ.taxa <- occurrence(c("SP", "BA", "MG"), type = "all", taxa = lower.taxa("Myrcia"))
#'
#' head(occ.any)
#' head(occ.only)
#' head(occ.all)
#' head(occ.taxa)
#' }
occurrence <- function(states, type = c("any", "only", "all"), taxa = NULL) {
type <- match.arg(type)
states <- sort(sapply(trim(states), toupper))
#res <- lapply(occurrences, match, states)
if (type == "any") {
#res <- lapply(res, function(x) any(!is.na(x)))
res <- subset(distribution, grepl(paste(states, collapse = "|"), occurrence))
}
if (type == "only") {
res <- subset(distribution, grepl(paste("^", paste(states, collapse = ";"), "$", sep = ""), occurrence))
}
if (type == "all") {
res <- subset(distribution, grepl(paste(states, collapse = ".*"), occurrence))
}
# res <- distribution[unlist(res), ]
if (nrow(res) == 0) {
return(NA)
}
if (is.null(taxa)) {
merge(all.taxa[, c("id", "family", "search.str")], res[, c("id", "occurrence")], by = "id")
} else {
merge(all.taxa[all.taxa$search.str %in% taxa, c("id", "family", "search.str")], res[, c("id", "occurrence")], by = "id")
}
} | /flora/R/occurrence.R | no_license | ingted/R-Examples | R | false | false | 2,032 | r | #' Taxa occurrence
#'
#' Find the taxa that occur in a given state of Brazil.
#'
#' @param states a character vector with one or more state abbreviations
#' following. See notes for abbreviations.
#' @param type type of matching to be used. \code{any} will return the taxa that
#' occur in any of the passed \code{states}. \code{only} matches taxa that
#' occur only in all provided (no more, no less) \code{states} and \code{all} matches taxa that
#' occur at least in all \code{states} passed. See examples.
#' @param taxa optional character vector to match against the states
#' @export
#' @note List of abbreviations: \url{http://en.wikipedia.org/wiki/States_of_Brazil}
#' @return a data frame
#' @examples
#' \dontrun{
#' occ.any <- occurrence(c("SP", "BA", "MG"), type = "any")
#' occ.only <- occurrence(c("SP", "BA", "MG"), type = "only")
#' occ.all <- occurrence(c("SP", "BA", "MG"), type = "all")
#' occ.taxa <- occurrence(c("SP", "BA", "MG"), type = "all", taxa = lower.taxa("Myrcia"))
#'
#' head(occ.any)
#' head(occ.only)
#' head(occ.all)
#' head(occ.taxa)
#' }
occurrence <- function(states, type = c("any", "only", "all"), taxa = NULL) {
type <- match.arg(type)
states <- sort(sapply(trim(states), toupper))
#res <- lapply(occurrences, match, states)
if (type == "any") {
#res <- lapply(res, function(x) any(!is.na(x)))
res <- subset(distribution, grepl(paste(states, collapse = "|"), occurrence))
}
if (type == "only") {
res <- subset(distribution, grepl(paste("^", paste(states, collapse = ";"), "$", sep = ""), occurrence))
}
if (type == "all") {
res <- subset(distribution, grepl(paste(states, collapse = ".*"), occurrence))
}
# res <- distribution[unlist(res), ]
if (nrow(res) == 0) {
return(NA)
}
if (is.null(taxa)) {
merge(all.taxa[, c("id", "family", "search.str")], res[, c("id", "occurrence")], by = "id")
} else {
merge(all.taxa[all.taxa$search.str %in% taxa, c("id", "family", "search.str")], res[, c("id", "occurrence")], by = "id")
}
} |
x = read.csv(file.choose()) #select dirty_iris.csv
#replace special values with NA
x[,-5] = lapply(x[,-5], function(y) as.numeric(as.character(y)))
#total number of complete observations
c = sum(complete.cases(x))
cat("Number of complete observations : ", c, "\n")
#percentage of complete observations
cat("Number of complete observations : ", c/(dim(x)[1])*100, "\n\n")
x = na.omit(x) #delete records with NAs
library(editrules)
edit2 <- editfile(file.choose()) #select rules2.txt
sm <- violatedEdits(edit2,x)
summary(sm)
plot(sm)
boxplot(iris$Sepal.Length)
boxplot.stats(iris$Sepal.Length)
| /q2.R | no_license | 97Abhinav97/DM | R | false | false | 602 | r | x = read.csv(file.choose()) #select dirty_iris.csv
#replace special values with NA
x[,-5] = lapply(x[,-5], function(y) as.numeric(as.character(y)))
#total number of complete observations
c = sum(complete.cases(x))
cat("Number of complete observations : ", c, "\n")
#percentage of complete observations
cat("Number of complete observations : ", c/(dim(x)[1])*100, "\n\n")
x = na.omit(x) #delete records with NAs
library(editrules)
edit2 <- editfile(file.choose()) #select rules2.txt
sm <- violatedEdits(edit2,x)
summary(sm)
plot(sm)
boxplot(iris$Sepal.Length)
boxplot.stats(iris$Sepal.Length)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/project-element.R
\name{add_project}
\alias{add_project}
\title{Add project element}
\usage{
add_project(
parent_element,
project_title,
award_information,
project_personnel
)
}
\arguments{
\item{parent_element}{A list in which the project node should be nested in.}
\item{project_title}{The title of the project that the funding is awarded to.}
\item{award_information}{A list that includes the required funding information
for an EML document.This list must include the award title and the funderName.
This list can be created by calling the \code{add_funding} function
on the funding information or by manually inputting the required information.
If the list is written manually it must be formatted as follows.
\code{award_infomation = list(funderName = "Name", title = "Award Title")}
Additional information about the funding may be added to the list. See the
\code{\link{add_funding}} documentation for more information.}
\item{project_personnel}{A list that includes the required information on project
personnel for an EML document. It must include the first name, last name,
organization, and personnel role for this project. This list can be created
by calling the \code{add_personnel} function on the project personnel or by manually inputting the required
information. If the list is written manually it must be formatted as follows.
\code{project_personnel = list(individualName = list(givenName = "First Name", surName = "Last Name"),
role = "Position", organization = "Organization")}
Additional information about the project personnel may be added to the list. See the
\code{\link{add_personnel}} documentation for more information.}
}
\value{
This function returns the parent element with a new project node containing
all project information required for an EML document.
}
\description{
This function creates a project node within the parent element that
contains all the required elements for the project section of an EML document.
This function can be used in combination with \code{add_personnel}
and \code{add_funding}. \code{add_personnel} can be used to generate the \code{project_personnel}
input and \code{add_funding} can be used to generate the \code{award_information} input.
}
\examples{
add_project(parent_element = list(),
project_title = "my project title",
award_information = add_funding(funder_name = "Bank",
funder_identifier = "Funder 1",
award_number = "000",
award_title = "Money up for grabs",
award_url = "awardforme.com"),
project_personnel = add_personnel(parent_element = list(),
first_name = "Smithy",
last_name = "Smith",
email = "myemail@mail.gov",
role = "Manager",
organization = "US GOV"))
add_project(parent_element = list(),
project_title = "my project title",
award_information = list(funderName = "Bank",
funderIdentifier = "Funder 1",
awardNumber = "000",
title = "Money up for grabs",
awardUrl = "awardforme.com"),
project_personnel = list(individualName = list(givenName = "Smithy",
surName = "Smith"),
electronicMailAddress = "myemail@mail.gov",
role = "Manager",
organizationName = "US GOV"))
}
| /man/add_project.Rd | permissive | ErinCain/EDIutils | R | false | true | 3,998 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/project-element.R
\name{add_project}
\alias{add_project}
\title{Add project element}
\usage{
add_project(
parent_element,
project_title,
award_information,
project_personnel
)
}
\arguments{
\item{parent_element}{A list in which the project node should be nested in.}
\item{project_title}{The title of the project that the funding is awarded to.}
\item{award_information}{A list that includes the required funding information
for an EML document.This list must include the award title and the funderName.
This list can be created by calling the \code{add_funding} function
on the funding information or by manually inputting the required information.
If the list is written manually it must be formatted as follows.
\code{award_infomation = list(funderName = "Name", title = "Award Title")}
Additional information about the funding may be added to the list. See the
\code{\link{add_funding}} documentation for more information.}
\item{project_personnel}{A list that includes the required information on project
personnel for an EML document. It must include the first name, last name,
organization, and personnel role for this project. This list can be created
by calling the \code{add_personnel} function on the project personnel or by manually inputting the required
information. If the list is written manually it must be formatted as follows.
\code{project_personnel = list(individualName = list(givenName = "First Name", surName = "Last Name"),
role = "Position", organization = "Organization")}
Additional information about the project personnel may be added to the list. See the
\code{\link{add_personnel}} documentation for more information.}
}
\value{
This function returns the parent element with a new project node containing
all project information required for an EML document.
}
\description{
This function creates a project node within the parent element that
contains all the required elements for the project section of an EML document.
This function can be used in combination with \code{add_personnel}
and \code{add_funding}. \code{add_personnel} can be used to generate the \code{project_personnel}
input and \code{add_funding} can be used to generate the \code{award_information} input.
}
\examples{
add_project(parent_element = list(),
project_title = "my project title",
award_information = add_funding(funder_name = "Bank",
funder_identifier = "Funder 1",
award_number = "000",
award_title = "Money up for grabs",
award_url = "awardforme.com"),
project_personnel = add_personnel(parent_element = list(),
first_name = "Smithy",
last_name = "Smith",
email = "myemail@mail.gov",
role = "Manager",
organization = "US GOV"))
add_project(parent_element = list(),
project_title = "my project title",
award_information = list(funderName = "Bank",
funderIdentifier = "Funder 1",
awardNumber = "000",
title = "Money up for grabs",
awardUrl = "awardforme.com"),
project_personnel = list(individualName = list(givenName = "Smithy",
surName = "Smith"),
electronicMailAddress = "myemail@mail.gov",
role = "Manager",
organizationName = "US GOV"))
}
|
#Group: Mathew Witek, Michael Gleyzer, and Harishkartik Kumaran Pillai
NOAA<-read.csv("~/Desktop/NOAA+GISS.csv")
NOAA.mat<-as.matrix(NOAA)
#Implementation of bootstrap as applied to linear regression.
my.smooth.for.boot<-function(X, Y){
#Fits a linear function for vector of input values x and output values y
smsp.strcv<-smooth.spline(X,Y)
#calculates residuals by subtracting the y coordinates of the linear interpolation from the y coordinates
smspcv.resid<-(Y-approx(smsp.strcv$x,smsp.strcv$y,X)$y)
#calculates the standard deviation of the residuals
sd.resid<-sqrt(sum(smspcv.resid^2)/(length(Y)-smsp.strcv$df))
#this line standarizes the residuals
stud.resid<-smspcv.resid/sd.resid
#store the y coordiantes of the linear interpolation in the variable my.smooth
my.smooth<-approx(smsp.strcv$x,smsp.strcv$y,X)$y
#creates a list which stores vectors of the residuals, s.d. and y interpolation values
list(raw.resid=smspcv.resid,sd.resid=sd.resid,smooth=my.smooth)
}
my.boot.smooth<-function(X,Y,nboot=1000,confidence=0.95){
#Creates a matrix of 1 row by 1 column
par(mfrow=c(1,1))
#Stores the return value of the function my.smooth.for.boot in the variable str0.
str0<-my.smooth.for.boot(X,Y)
#Initializes smooth.dist as a null object
smooth.dist<-NULL
#stores y coordinates of linear interpolation in the varibale base.smooth
base.smooth<-str0$smooth
#store the standard deviation of the residuals in the variable base.sd
base.sd<-str0$sd.resid
#store the residuals in the variable base.resid
base.resid<-str0$raw.resid
#Stores length of base.smooth vector in n1
n1<-length(base.smooth)
#loops 1000 times
for(i in 1:nboot){
#This line stores the results of the sample with replacement from the vector holding the residuals in the bres variable.
bres<-sample(base.resid,length(base.resid),replace=T)
#add to the vector containing the y values from the linear interpolation
Yboot.dat<-((base.smooth+bres))
#print out vector
#print(boot.dat)
#store the result of my.smooth.for.boot in the variable bstr0
bstr0<-my.smooth.for.boot(X,Yboot.dat)
#store the y values of the Linear Interpolation in boot.smooth
boot.smooth<-bstr0$smooth
#create data frame holding the differences between the bootstrapped itnerpolated y values and the interpolated y values from the intial data
#Data frame to have 1000 columns
smooth.dist<-rbind(smooth.dist,boot.smooth-base.smooth)
}
#this line assigns the length of the data frame smooht.dist's first row
n1<-length(smooth.dist[1,])
#calculates alpha = 0.05
alpha<-1-confidence
#Initializes LB as a null object
LB<-NULL
#Initialize UB as a null object
UB<-NULL
#this for loop iterates through all columns of smooth.dist object/data
for(i in 1:n1){
#sorts all the distances in a given column of smooth.dist in increasing order
s1<-sort(smooth.dist[,i])
#finds the length of s1 and stores it in the n2 variable
n2<-length(s1)
#assign v1 the vector (1/n2, 2/n2 ... 1)
v1<-c(1:n2)/n2
#Assigns bvec the results of linearly interpolating v1 and s1 in the bounds of alpha = (0.025, 0.975) and Creates a list where each entry contains two y coordinates(for LB and UB).
bvec<-approx(v1,s1,c(alpha/2,1-alpha/2))$y
LB<-c(LB,base.smooth[i]-bvec[2])
UB<-c(UB,base.smooth[i]-bvec[1])
}
#plots lower bond, smooth fit, and upper bound
plot(rep(X,4),c(LB,base.smooth,UB,Y),xlab="X",ylab="Y",type="n")
#plots the points of X,Y
points(X,Y)
o1<-order(X)
lines(X[o1],LB[o1],col=2)
lines(X[o1],UB[o1],col=2)
smooth <- smooth.spline(X,Y,df = 2)
#lines(smooth.spline(X,Y),col = 1)
lines(X[o1],base.smooth[o1],col=1)
lines(smooth,col=3)
}
my.smooth.for.boot(NOAA[[3]] , NOAA[[2]])
my.boot.smooth(NOAA[[3]] , NOAA[[2]],nboot =1000 , confidence= 0.95)
| /Project 1/assignment1.r | no_license | mwitek1997/R-Projects | R | false | false | 3,937 | r | #Group: Mathew Witek, Michael Gleyzer, and Harishkartik Kumaran Pillai
NOAA<-read.csv("~/Desktop/NOAA+GISS.csv")
NOAA.mat<-as.matrix(NOAA)
#Implementation of bootstrap as applied to linear regression.
my.smooth.for.boot<-function(X, Y){
#Fits a linear function for vector of input values x and output values y
smsp.strcv<-smooth.spline(X,Y)
#calculates residuals by subtracting the y coordinates of the linear interpolation from the y coordinates
smspcv.resid<-(Y-approx(smsp.strcv$x,smsp.strcv$y,X)$y)
#calculates the standard deviation of the residuals
sd.resid<-sqrt(sum(smspcv.resid^2)/(length(Y)-smsp.strcv$df))
#this line standarizes the residuals
stud.resid<-smspcv.resid/sd.resid
#store the y coordiantes of the linear interpolation in the variable my.smooth
my.smooth<-approx(smsp.strcv$x,smsp.strcv$y,X)$y
#creates a list which stores vectors of the residuals, s.d. and y interpolation values
list(raw.resid=smspcv.resid,sd.resid=sd.resid,smooth=my.smooth)
}
my.boot.smooth<-function(X,Y,nboot=1000,confidence=0.95){
#Creates a matrix of 1 row by 1 column
par(mfrow=c(1,1))
#Stores the return value of the function my.smooth.for.boot in the variable str0.
str0<-my.smooth.for.boot(X,Y)
#Initializes smooth.dist as a null object
smooth.dist<-NULL
#stores y coordinates of linear interpolation in the varibale base.smooth
base.smooth<-str0$smooth
#store the standard deviation of the residuals in the variable base.sd
base.sd<-str0$sd.resid
#store the residuals in the variable base.resid
base.resid<-str0$raw.resid
#Stores length of base.smooth vector in n1
n1<-length(base.smooth)
#loops 1000 times
for(i in 1:nboot){
#This line stores the results of the sample with replacement from the vector holding the residuals in the bres variable.
bres<-sample(base.resid,length(base.resid),replace=T)
#add to the vector containing the y values from the linear interpolation
Yboot.dat<-((base.smooth+bres))
#print out vector
#print(boot.dat)
#store the result of my.smooth.for.boot in the variable bstr0
bstr0<-my.smooth.for.boot(X,Yboot.dat)
#store the y values of the Linear Interpolation in boot.smooth
boot.smooth<-bstr0$smooth
#create data frame holding the differences between the bootstrapped itnerpolated y values and the interpolated y values from the intial data
#Data frame to have 1000 columns
smooth.dist<-rbind(smooth.dist,boot.smooth-base.smooth)
}
#this line assigns the length of the data frame smooht.dist's first row
n1<-length(smooth.dist[1,])
#calculates alpha = 0.05
alpha<-1-confidence
#Initializes LB as a null object
LB<-NULL
#Initialize UB as a null object
UB<-NULL
#this for loop iterates through all columns of smooth.dist object/data
for(i in 1:n1){
#sorts all the distances in a given column of smooth.dist in increasing order
s1<-sort(smooth.dist[,i])
#finds the length of s1 and stores it in the n2 variable
n2<-length(s1)
#assign v1 the vector (1/n2, 2/n2 ... 1)
v1<-c(1:n2)/n2
#Assigns bvec the results of linearly interpolating v1 and s1 in the bounds of alpha = (0.025, 0.975) and Creates a list where each entry contains two y coordinates(for LB and UB).
bvec<-approx(v1,s1,c(alpha/2,1-alpha/2))$y
LB<-c(LB,base.smooth[i]-bvec[2])
UB<-c(UB,base.smooth[i]-bvec[1])
}
#plots lower bond, smooth fit, and upper bound
plot(rep(X,4),c(LB,base.smooth,UB,Y),xlab="X",ylab="Y",type="n")
#plots the points of X,Y
points(X,Y)
o1<-order(X)
lines(X[o1],LB[o1],col=2)
lines(X[o1],UB[o1],col=2)
smooth <- smooth.spline(X,Y,df = 2)
#lines(smooth.spline(X,Y),col = 1)
lines(X[o1],base.smooth[o1],col=1)
lines(smooth,col=3)
}
my.smooth.for.boot(NOAA[[3]] , NOAA[[2]])
my.boot.smooth(NOAA[[3]] , NOAA[[2]],nboot =1000 , confidence= 0.95)
|
# Data prepare for sc16
rm(list = ls())
source('head.R')
library(ggplot2)
#@@@ CONFIGURE @@@#
load(file.path(dir_dataSource,'load_ftr_attrid.Rda'))
source('sc16F1Func.R')
#test
lowerTime <- as.POSIXct('2013-07-01')
upperTime <- as.POSIXct('2013-09-01')
saveName <- 'dataPrepareAFR1307_1308.Rda'
dataPrepare <- function(lowerTime,upperTime,saveName,flSource = '0'){
# S1. Failure record prepare
data.f <- subset(data.flist, f_time < upperTime & f_time > lowerTime)
data.f <- subset(data.f,ip %in% cmdb$ip & svr_id %in% cmdb$svr_asset_id)
data.f$failShiptime <- as.numeric(difftime(data.f$f_time,data.f$use_time,tz = 'UTC',units = 'days'))/365
data.f$fsTime <- floor(data.f$failShiptime)
data.f$fsTimeN <- cut(data.f$failShiptime,c(0,1/2,1:7),include.lowest = T)
data.f$fsTimeN <- gsub('^\\[|^\\(|,.*$','',data.f$fsTimeN)
# S2. Compute online time for cmdb
cmdb <- subset(cmdb,use_time <= upperTime)
cmdb$shiptimeToLeft <- as.numeric(difftime(lowerTime,cmdb$use_time,tz = 'UTC',units = 'days'))/365
cmdb$shiptimeToRight <- as.numeric(difftime(upperTime,cmdb$use_time,tz = 'UTC',units = 'days'))/365
cmdb$shTime <- floor(cmdb$shiptimeToRight)
cmdb$shTimeN <- cut(cmdb$shiptimeToRight,c(0,1/2,1:7),include.lowest = T)
cmdb$shTimeN <- gsub('^\\[|^\\(|,.*$','',cmdb$shTimeN)
# S3. Label dev_class_id for each server
cmdb$dClass <- ''
class_C <- 'C1'
class_B <- c('B5','B6','B1')
class_TS <- c('TS1','TS3','TS4','TS5','TS6')
cmdb$dClass[cmdb$dev_class_id %in% class_C] <- 'C'
cmdb$dClass[cmdb$dev_class_id %in% class_B] <- 'B'
cmdb$dClass[cmdb$dev_class_id %in% class_TS] <- 'TS'
# S4 Label server with disk model
cmdb <- mchAttr(cmdb,disk_ip,'svr_asset_id','svr_id',
c('numDisk','numModel','numMain','mainModel','capacity'))
colcmdb <- c('svr_asset_id','ip','dev_class_id','bs1','use_time','shiptimeToLeft','dClass',
'shiptimeToRight','shTime','numDisk','numModel','numMain','mainModel','capacity')
cmdbio <- subset(cmdb,svr_asset_id %in% mean_io$svrid &
dev_class_id %in% c(class_C,class_TS) &
shiptimeToRight > 0 &
!is.na(numDisk),colcmdb)
modelNeed <- c('ST3500514NS','ST31000524NS','ST32000645NS',
'ST500NM0011','ST1000NM0011','ST2000NM0011')
# add tag for disk including disk number and disk model
cmdbio$tagDisk <- 'A'
cmdbio$tagDisk[cmdbio$numDisk >= 6] <- 'B'
cmdbio <- factorX(subset(cmdbio,mainModel %in% modelNeed))
cmdbio$tagDisk <- paste(cmdbio$tagDisk,cmdbio$mainModel,sep='-')
# filter capacity for C and revise capacity for TS
# C
cmdbio <- subset(cmdbio,!is.na(capacity) & (dClass != 'C' | capacity %in% c(500,250,1000)))
# TS
cmdbio$capacityMerge <- cmdbio$capacity
cmdbio$capacityMerge[cmdbio$capacityMerge <= 18000] <- 12000
cmdbio$capacityMerge[cmdbio$capacityMerge > 18000] <- 24000
cmdbio$dClass[cmdbio$dClass == 'TS' & cmdbio$capacityMerge == 12000] <- 'TS1T'
cmdbio$dClass[cmdbio$dClass == 'TS' & cmdbio$capacityMerge == 24000] <- 'TS2T'
# S5. Add some attributes
# CMDB
tmp.cmdb <- factorX(cmdbio)
# failure record
tmp.f <- subset(data.f,svr_id %in% tmp.cmdb$svr_asset_id)
tmp.f$ip <- factor(tmp.f$ip)
tmp.f$svr_id <- factor(tmp.f$svr_id)
tmp.f <- mchAttr(tmp.f,tmp.cmdb,
'svr_id','svr_asset_id',
c('capacity','use_time','shiptimeToLeft',
'shiptimeToRight','shTime','shTimeN','dClass','tagDisk'))
tmp.f <- factorX(tmp.f)
# IO statistic
mean_io <- subset(mean_io,svrid %in% factor(cmdbio$svr_asset_id))
tmp.io <- mean_io
tmp.io <- mchAttr(tmp.io,cmdbio,'svrid','svr_asset_id',
c('dev_class_id','dClass','use_time','shiptimeToLeft',
'shiptimeToRight','shTime','shTimeN','ip','shiptimeToRight','tagDisk'))
tmp.io <- factorX(tmp.io)
# disk information
tmp.disk <- disk_ip
tmp.disk$dClass <- cmdbio$dClass[match(tmp.disk$ip,cmdbio$ip)]
tmp.disk <- factorX(tmp.disk)
# S5.virtDC
if (flSource == '0'){
virtDC <- virt_disk(tmp.f,tmp.cmdb,upperTime)
}else if(flSource != '0'){
virtDC <- virt_disk(factorX(subset(tmp.f,grepl(flSource,group))),tmp.cmdb,upperTime)
}
# S5. Save
save(tmp.cmdb,tmp.f,tmp.io,tmp.disk,cmdb,data.f,virtDC,file = file.path(dir_data,saveName))
# list(tmp.cmdb,tmp.f,tmp.io,tmp.disk,cmdb,data.f)
}
# All Data
dataPrepare(as.POSIXct('2010-01-01'),as.POSIXct('2015-01-01'),'dataPrepareAFR10-15.Rda')
# Two data record source
dataPrepare(as.POSIXct('2010-01-01'),as.POSIXct('2015-01-01'),'dataPrepareAFR10-15_uwork.Rda','uwork')
dataPrepare(as.POSIXct('2010-01-01'),as.POSIXct('2015-01-01'),'dataPrepareAFR10-15_helper.Rda','helper')
# # Full Year
# dataPrepare(as.POSIXct('2013-01-01'),as.POSIXct('2013-10-01'),'dataPrepareAFR13.Rda')
# dataPrepare(as.POSIXct('2014-01-01'),as.POSIXct('2015-01-01'),'dataPrepareAFR14.Rda')
#
# # Half Year
# dataPrepare(as.POSIXct('2013-01-01'),as.POSIXct('2013-07-01'),'dataPrepareAFR13A.Rda')
# dataPrepare(as.POSIXct('2013-07-01'),as.POSIXct('2014-01-01'),'dataPrepareAFR13B.Rda')
# dataPrepare(as.POSIXct('2014-01-01'),as.POSIXct('2014-07-01'),'dataPrepareAFR14A.Rda')
# dataPrepare(as.POSIXct('2014-07-01'),as.POSIXct('2015-01-01'),'dataPrepareAFR14B.Rda')
#
# # two month with io and smart
# dataPrepare(as.POSIXct('2014-06-01'),as.POSIXct('2014-08-01'),'dataPrepareAFR1406_1407.Rda')
#
# # two month
# dataPrepare(as.POSIXct('2013-01-01'),as.POSIXct('2013-03-01'),'dataPrepareAFR1301_1302.Rda')
# dataPrepare(as.POSIXct('2013-03-01'),as.POSIXct('2013-05-01'),'dataPrepareAFR1303_1304.Rda')
# dataPrepare(as.POSIXct('2013-05-01'),as.POSIXct('2013-07-01'),'dataPrepareAFR1305_1306.Rda')
# dataPrepare(as.POSIXct('2013-07-01'),as.POSIXct('2013-09-01'),'dataPrepareAFR1307_1308.Rda')
# dataPrepare(as.POSIXct('2013-09-01'),as.POSIXct('2013-11-01'),'dataPrepareAFR1309_1310.Rda')
#
# dataPrepare(as.POSIXct('2014-01-01'),as.POSIXct('2014-03-01'),'dataPrepareAFR1401_1402.Rda')
# dataPrepare(as.POSIXct('2014-03-01'),as.POSIXct('2014-05-01'),'dataPrepareAFR1403_1404.Rda')
# dataPrepare(as.POSIXct('2014-05-01'),as.POSIXct('2014-07-01'),'dataPrepareAFR1405_1406.Rda')
# dataPrepare(as.POSIXct('2014-07-01'),as.POSIXct('2014-09-01'),'dataPrepareAFR1407_1408.Rda')
# dataPrepare(as.POSIXct('2014-09-01'),as.POSIXct('2014-11-01'),'dataPrepareAFR1409_1410.Rda')
# dataPrepare(as.POSIXct('2014-11-01'),as.POSIXct('2015-01-01'),'dataPrepareAFR1411_1412.Rda')
#
| /IO_statistic/dataPrepareAFR.R | no_license | yiyusheng/attrid | R | false | false | 6,530 | r | # Data prepare for sc16
rm(list = ls())
source('head.R')
library(ggplot2)
#@@@ CONFIGURE @@@#
load(file.path(dir_dataSource,'load_ftr_attrid.Rda'))
source('sc16F1Func.R')
#test
lowerTime <- as.POSIXct('2013-07-01')
upperTime <- as.POSIXct('2013-09-01')
saveName <- 'dataPrepareAFR1307_1308.Rda'
dataPrepare <- function(lowerTime,upperTime,saveName,flSource = '0'){
# S1. Failure record prepare
data.f <- subset(data.flist, f_time < upperTime & f_time > lowerTime)
data.f <- subset(data.f,ip %in% cmdb$ip & svr_id %in% cmdb$svr_asset_id)
data.f$failShiptime <- as.numeric(difftime(data.f$f_time,data.f$use_time,tz = 'UTC',units = 'days'))/365
data.f$fsTime <- floor(data.f$failShiptime)
data.f$fsTimeN <- cut(data.f$failShiptime,c(0,1/2,1:7),include.lowest = T)
data.f$fsTimeN <- gsub('^\\[|^\\(|,.*$','',data.f$fsTimeN)
# S2. Compute online time for cmdb
cmdb <- subset(cmdb,use_time <= upperTime)
cmdb$shiptimeToLeft <- as.numeric(difftime(lowerTime,cmdb$use_time,tz = 'UTC',units = 'days'))/365
cmdb$shiptimeToRight <- as.numeric(difftime(upperTime,cmdb$use_time,tz = 'UTC',units = 'days'))/365
cmdb$shTime <- floor(cmdb$shiptimeToRight)
cmdb$shTimeN <- cut(cmdb$shiptimeToRight,c(0,1/2,1:7),include.lowest = T)
cmdb$shTimeN <- gsub('^\\[|^\\(|,.*$','',cmdb$shTimeN)
# S3. Label dev_class_id for each server
cmdb$dClass <- ''
class_C <- 'C1'
class_B <- c('B5','B6','B1')
class_TS <- c('TS1','TS3','TS4','TS5','TS6')
cmdb$dClass[cmdb$dev_class_id %in% class_C] <- 'C'
cmdb$dClass[cmdb$dev_class_id %in% class_B] <- 'B'
cmdb$dClass[cmdb$dev_class_id %in% class_TS] <- 'TS'
# S4 Label server with disk model
cmdb <- mchAttr(cmdb,disk_ip,'svr_asset_id','svr_id',
c('numDisk','numModel','numMain','mainModel','capacity'))
colcmdb <- c('svr_asset_id','ip','dev_class_id','bs1','use_time','shiptimeToLeft','dClass',
'shiptimeToRight','shTime','numDisk','numModel','numMain','mainModel','capacity')
cmdbio <- subset(cmdb,svr_asset_id %in% mean_io$svrid &
dev_class_id %in% c(class_C,class_TS) &
shiptimeToRight > 0 &
!is.na(numDisk),colcmdb)
modelNeed <- c('ST3500514NS','ST31000524NS','ST32000645NS',
'ST500NM0011','ST1000NM0011','ST2000NM0011')
# add tag for disk including disk number and disk model
cmdbio$tagDisk <- 'A'
cmdbio$tagDisk[cmdbio$numDisk >= 6] <- 'B'
cmdbio <- factorX(subset(cmdbio,mainModel %in% modelNeed))
cmdbio$tagDisk <- paste(cmdbio$tagDisk,cmdbio$mainModel,sep='-')
# filter capacity for C and revise capacity for TS
# C
cmdbio <- subset(cmdbio,!is.na(capacity) & (dClass != 'C' | capacity %in% c(500,250,1000)))
# TS
cmdbio$capacityMerge <- cmdbio$capacity
cmdbio$capacityMerge[cmdbio$capacityMerge <= 18000] <- 12000
cmdbio$capacityMerge[cmdbio$capacityMerge > 18000] <- 24000
cmdbio$dClass[cmdbio$dClass == 'TS' & cmdbio$capacityMerge == 12000] <- 'TS1T'
cmdbio$dClass[cmdbio$dClass == 'TS' & cmdbio$capacityMerge == 24000] <- 'TS2T'
# S5. Add some attributes
# CMDB
tmp.cmdb <- factorX(cmdbio)
# failure record
tmp.f <- subset(data.f,svr_id %in% tmp.cmdb$svr_asset_id)
tmp.f$ip <- factor(tmp.f$ip)
tmp.f$svr_id <- factor(tmp.f$svr_id)
tmp.f <- mchAttr(tmp.f,tmp.cmdb,
'svr_id','svr_asset_id',
c('capacity','use_time','shiptimeToLeft',
'shiptimeToRight','shTime','shTimeN','dClass','tagDisk'))
tmp.f <- factorX(tmp.f)
# IO statistic
mean_io <- subset(mean_io,svrid %in% factor(cmdbio$svr_asset_id))
tmp.io <- mean_io
tmp.io <- mchAttr(tmp.io,cmdbio,'svrid','svr_asset_id',
c('dev_class_id','dClass','use_time','shiptimeToLeft',
'shiptimeToRight','shTime','shTimeN','ip','shiptimeToRight','tagDisk'))
tmp.io <- factorX(tmp.io)
# disk information
tmp.disk <- disk_ip
tmp.disk$dClass <- cmdbio$dClass[match(tmp.disk$ip,cmdbio$ip)]
tmp.disk <- factorX(tmp.disk)
# S5.virtDC
if (flSource == '0'){
virtDC <- virt_disk(tmp.f,tmp.cmdb,upperTime)
}else if(flSource != '0'){
virtDC <- virt_disk(factorX(subset(tmp.f,grepl(flSource,group))),tmp.cmdb,upperTime)
}
# S5. Save
save(tmp.cmdb,tmp.f,tmp.io,tmp.disk,cmdb,data.f,virtDC,file = file.path(dir_data,saveName))
# list(tmp.cmdb,tmp.f,tmp.io,tmp.disk,cmdb,data.f)
}
# All Data
dataPrepare(as.POSIXct('2010-01-01'),as.POSIXct('2015-01-01'),'dataPrepareAFR10-15.Rda')
# Two data record source
dataPrepare(as.POSIXct('2010-01-01'),as.POSIXct('2015-01-01'),'dataPrepareAFR10-15_uwork.Rda','uwork')
dataPrepare(as.POSIXct('2010-01-01'),as.POSIXct('2015-01-01'),'dataPrepareAFR10-15_helper.Rda','helper')
# # Full Year
# dataPrepare(as.POSIXct('2013-01-01'),as.POSIXct('2013-10-01'),'dataPrepareAFR13.Rda')
# dataPrepare(as.POSIXct('2014-01-01'),as.POSIXct('2015-01-01'),'dataPrepareAFR14.Rda')
#
# # Half Year
# dataPrepare(as.POSIXct('2013-01-01'),as.POSIXct('2013-07-01'),'dataPrepareAFR13A.Rda')
# dataPrepare(as.POSIXct('2013-07-01'),as.POSIXct('2014-01-01'),'dataPrepareAFR13B.Rda')
# dataPrepare(as.POSIXct('2014-01-01'),as.POSIXct('2014-07-01'),'dataPrepareAFR14A.Rda')
# dataPrepare(as.POSIXct('2014-07-01'),as.POSIXct('2015-01-01'),'dataPrepareAFR14B.Rda')
#
# # two month with io and smart
# dataPrepare(as.POSIXct('2014-06-01'),as.POSIXct('2014-08-01'),'dataPrepareAFR1406_1407.Rda')
#
# # two month
# dataPrepare(as.POSIXct('2013-01-01'),as.POSIXct('2013-03-01'),'dataPrepareAFR1301_1302.Rda')
# dataPrepare(as.POSIXct('2013-03-01'),as.POSIXct('2013-05-01'),'dataPrepareAFR1303_1304.Rda')
# dataPrepare(as.POSIXct('2013-05-01'),as.POSIXct('2013-07-01'),'dataPrepareAFR1305_1306.Rda')
# dataPrepare(as.POSIXct('2013-07-01'),as.POSIXct('2013-09-01'),'dataPrepareAFR1307_1308.Rda')
# dataPrepare(as.POSIXct('2013-09-01'),as.POSIXct('2013-11-01'),'dataPrepareAFR1309_1310.Rda')
#
# dataPrepare(as.POSIXct('2014-01-01'),as.POSIXct('2014-03-01'),'dataPrepareAFR1401_1402.Rda')
# dataPrepare(as.POSIXct('2014-03-01'),as.POSIXct('2014-05-01'),'dataPrepareAFR1403_1404.Rda')
# dataPrepare(as.POSIXct('2014-05-01'),as.POSIXct('2014-07-01'),'dataPrepareAFR1405_1406.Rda')
# dataPrepare(as.POSIXct('2014-07-01'),as.POSIXct('2014-09-01'),'dataPrepareAFR1407_1408.Rda')
# dataPrepare(as.POSIXct('2014-09-01'),as.POSIXct('2014-11-01'),'dataPrepareAFR1409_1410.Rda')
# dataPrepare(as.POSIXct('2014-11-01'),as.POSIXct('2015-01-01'),'dataPrepareAFR1411_1412.Rda')
#
|
##### Spatial data procedures can throw up more problems than with a dataframe
##### There's a lot of good resouces out there:
# http://www.maths.lancs.ac.uk/~rowlings/Teaching/UseR2012/cheatsheet.html
##### Here are some bit and bobs that have come in handy
# Conditional mean replacement of missing values in spatialpointsdataframe
# spatial= spatialpointsdataframe
# exp= exposure variable
# condition= conditional variable
spatial@data$exp[is.na(spatial@data$exp) & spatial@data$condition==1]<-mean(spatial@data$exp[spatial@data$condition==1], na.rm=T)
spatial@data$exp[is.na(spatial@data$exp) & spatial@data$condition==0]<-mean(spatial@data$exp[spatial@data$condition==0], na.rm=T)
## Splitting up a large shapefile into bitesize pieces
## Example datazones by local authority
path="C:/mcherrie/"
geo<-readOGR(path, layer="DZ_2011_EoR_Scotland")
list<-unique(geo@data$CouncilA_2)
for (i in list){
geo2<-subset(geo, CouncilA_2==i)
saveRDS(geo2, paste0(path,i, ".rds"))
}
## A very frustrating but usual occurence
## this does not work
geo<-readOGR("C:/pathpathpath/", layer="OutputArea2011_EoR")
## this works
geo<-readOGR("C:/pathpathpath", layer="OutputArea2011_EoR")
| /spatial.R | no_license | markocherrie/Helpful_Code | R | false | false | 1,189 | r | ##### Spatial data procedures can throw up more problems than with a dataframe
##### There's a lot of good resouces out there:
# http://www.maths.lancs.ac.uk/~rowlings/Teaching/UseR2012/cheatsheet.html
##### Here are some bit and bobs that have come in handy
# Conditional mean replacement of missing values in spatialpointsdataframe
# spatial= spatialpointsdataframe
# exp= exposure variable
# condition= conditional variable
spatial@data$exp[is.na(spatial@data$exp) & spatial@data$condition==1]<-mean(spatial@data$exp[spatial@data$condition==1], na.rm=T)
spatial@data$exp[is.na(spatial@data$exp) & spatial@data$condition==0]<-mean(spatial@data$exp[spatial@data$condition==0], na.rm=T)
## Splitting up a large shapefile into bitesize pieces
## Example datazones by local authority
path="C:/mcherrie/"
geo<-readOGR(path, layer="DZ_2011_EoR_Scotland")
list<-unique(geo@data$CouncilA_2)
for (i in list){
geo2<-subset(geo, CouncilA_2==i)
saveRDS(geo2, paste0(path,i, ".rds"))
}
## A very frustrating but usual occurence
## this does not work
geo<-readOGR("C:/pathpathpath/", layer="OutputArea2011_EoR")
## this works
geo<-readOGR("C:/pathpathpath", layer="OutputArea2011_EoR")
|
library(HDoutliers)
### Name: dataTrans
### Title: Data Transformation for Leland Wilkinson's _hdoutliers_
### Algorithm
### Aliases: dataTrans
### Keywords: cluster
### ** Examples
require(FactoMineR)
data(tea)
head(tea)
dataTrans(tea[,-1])
| /data/genthat_extracted_code/HDoutliers/examples/dataTrans.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 255 | r | library(HDoutliers)
### Name: dataTrans
### Title: Data Transformation for Leland Wilkinson's _hdoutliers_
### Algorithm
### Aliases: dataTrans
### Keywords: cluster
### ** Examples
require(FactoMineR)
data(tea)
head(tea)
dataTrans(tea[,-1])
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ons.R
\name{ons_available_datasets}
\alias{ons_available_datasets}
\title{Available Datasets}
\usage{
ons_available_datasets()
}
\value{
list of available datasets
}
\description{
Available Datasets
}
\examples{
\dontrun{
ons_available_datasets()
}
}
\author{
Neale Swinnerton \href{mailto:neale@mastodonc.com}{neale@mastodonc.com}
}
| /man/ons_available_datasets.Rd | permissive | tomjemmett/monstR | R | false | true | 412 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ons.R
\name{ons_available_datasets}
\alias{ons_available_datasets}
\title{Available Datasets}
\usage{
ons_available_datasets()
}
\value{
list of available datasets
}
\description{
Available Datasets
}
\examples{
\dontrun{
ons_available_datasets()
}
}
\author{
Neale Swinnerton \href{mailto:neale@mastodonc.com}{neale@mastodonc.com}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logging.R
\name{strip}
\alias{strip}
\title{Strip trailing newline characters from text}
\usage{
strip(text)
}
\arguments{
\item{text}{text to strip}
}
\description{
Strip trailing newline characters from text
}
| /wsim.io/man/strip.Rd | permissive | isciences/wsim | R | false | true | 290 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logging.R
\name{strip}
\alias{strip}
\title{Strip trailing newline characters from text}
\usage{
strip(text)
}
\arguments{
\item{text}{text to strip}
}
\description{
Strip trailing newline characters from text
}
|
# This compares the variance of the size factor estimates when pooling is random,
# compared to when pooling is performed using the ring arrangement.
require(scran)
set.seed(100)
collected.order <- collected.random <- list()
for (it in 1:10) {
ngenes <- 10000L
ncells <- 200L
true.means <- rgamma(ngenes, 2, 2)
dispersions <- 0.1
all.facs <- runif(ncells, 0.1, 1)
effective.means <- outer(true.means, all.facs, "*")
counts <- matrix(rnbinom(ngenes*ncells, mu=effective.means, size=1/dispersions), ncol=ncells)
lib.sizes <- colSums(counts)
exprs <- t(t(counts)/lib.sizes)
use.ave.cell <- rowMeans(exprs)
keep <- use.ave.cell>0
use.ave.cell <- use.ave.cell[keep]
exprs <- exprs[keep,,drop=FALSE]
size <- 20L
sphere <- scran:::.generateSphere(lib.sizes)
out <- scran:::.create_linear_system(exprs, sphere=sphere, pool.sizes=size, ave.cell=use.ave.cell)
design <- as.matrix(out$design)
output <- out$output
est <- solve(qr(design), output) * lib.sizes
# Trying with the opposite case, where everyone is mixed together.
sphere <- sample(ncells)
sphere <- as.integer(c(sphere, sphere))
out2 <- scran:::.create_linear_system(exprs, sphere=sphere, pool.sizes=size, ave.cell=use.ave.cell)
design2 <- as.matrix(out2$design)
output2 <- out2$output
est2 <- solve(qr(design2), output2) * lib.sizes
collected.order[[it]] <- mad(log(est/all.facs))
collected.random[[it]] <- mad(log(est2/all.facs))
cat("Ordered:", collected.order[[it]], "\n")
cat("Random:", collected.random[[it]], "\n")
cat("\n")
}
mean(unlist(collected.order))
mean(unlist(collected.random))
sd(unlist(collected.order))/sqrt(length(collected.order))
sd(unlist(collected.random))/sqrt(length(collected.random))
sessionInfo()
| /simulations/poolsim.R | no_license | MarioniLab/Deconvolution2016 | R | false | false | 1,693 | r | # This compares the variance of the size factor estimates when pooling is random,
# compared to when pooling is performed using the ring arrangement.
require(scran)
set.seed(100)
collected.order <- collected.random <- list()
for (it in 1:10) {
ngenes <- 10000L
ncells <- 200L
true.means <- rgamma(ngenes, 2, 2)
dispersions <- 0.1
all.facs <- runif(ncells, 0.1, 1)
effective.means <- outer(true.means, all.facs, "*")
counts <- matrix(rnbinom(ngenes*ncells, mu=effective.means, size=1/dispersions), ncol=ncells)
lib.sizes <- colSums(counts)
exprs <- t(t(counts)/lib.sizes)
use.ave.cell <- rowMeans(exprs)
keep <- use.ave.cell>0
use.ave.cell <- use.ave.cell[keep]
exprs <- exprs[keep,,drop=FALSE]
size <- 20L
sphere <- scran:::.generateSphere(lib.sizes)
out <- scran:::.create_linear_system(exprs, sphere=sphere, pool.sizes=size, ave.cell=use.ave.cell)
design <- as.matrix(out$design)
output <- out$output
est <- solve(qr(design), output) * lib.sizes
# Trying with the opposite case, where everyone is mixed together.
sphere <- sample(ncells)
sphere <- as.integer(c(sphere, sphere))
out2 <- scran:::.create_linear_system(exprs, sphere=sphere, pool.sizes=size, ave.cell=use.ave.cell)
design2 <- as.matrix(out2$design)
output2 <- out2$output
est2 <- solve(qr(design2), output2) * lib.sizes
collected.order[[it]] <- mad(log(est/all.facs))
collected.random[[it]] <- mad(log(est2/all.facs))
cat("Ordered:", collected.order[[it]], "\n")
cat("Random:", collected.random[[it]], "\n")
cat("\n")
}
mean(unlist(collected.order))
mean(unlist(collected.random))
sd(unlist(collected.order))/sqrt(length(collected.order))
sd(unlist(collected.random))/sqrt(length(collected.random))
sessionInfo()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build_run_modify.R
\docType{package}
\name{umx}
\alias{umx}
\alias{umx-package}
\title{Functions for Structural Equation Modeling in OpenMx}
\description{
\code{umx} allows you to more easily build, run, modify, and report structural models, building on the OpenMx package.
All core functions are organized into families, so they are easier to find (see "families" below under \strong{See Also})
All the functions have full-featured and well commented examples, some even have \emph{figures}, so use the help, even if you think it won't help :-)
Have a look, for example at \code{\link[=umxRAM]{umxRAM()}}
Check out NEWS about new features at \code{news(package = "umx")}
}
\details{
Introductory working examples are below. You can run all demos with demo(umx)
When I have a vignette, it will be: vignette("umx", package = "umx")
There is a helpful blog at \url{https://tbates.github.io}
If you want the bleeding-edge version:
devtools::install_github("tbates/umx")
}
\examples{
require("umx")
data(demoOneFactor)
myData = mxData(cov(demoOneFactor), type = "cov", numObs = nrow(demoOneFactor))
latents = c("G")
manifests = names(demoOneFactor)
m1 <- umxRAM("One Factor", data = myData,
umxPath(latents, to = manifests),
umxPath(var = manifests),
umxPath(var = latents , fixedAt=1)
)
# umx added informative labels, created starting values,
# Ran you model (if autoRun is on), and displayed a brief summary
# including a comparison if you modified a model...!
# Let's get some journal-ready fit information
umxSummary(m1)
umxSummary(m1, show = "std") #also display parameter estimates
# You can get the coefficients of an MxModel with coef(), just like for lm etc.
coef(m1)
# But with more control using parameters
parameters(m1, thresh="above", b=.3, pat = "G_to.*", digits = 3)
# ==================
# = Model updating =
# ==================
# Can we set the loading of X5 on G to zero?
m2 = umxModify(m1, "G_to_x1", name = "no_effect_of_g_on_X5", comparison = TRUE)
umxCompare(m1, m2)
# Note: umxSetParameters can do this with some additional flexibility
# ========================
# = Confidence intervals =
# ========================
# umxSummary() will show these, but you can also use the confint() function
confint(m1) # OpenMx's SE-based confidence intervals
umxConfint(m1, parm = 'all', run = TRUE) # likelihood-based CIs
# And make a Figure in dot (.gv) format!
plot(m1, std = TRUE)
# If you just want the .dot code returned set file = NA
plot(m1, std = TRUE, file = NA)
}
\references{
\itemize{
\item \url{https://www.github.com/tbates/umx}
}
}
\seealso{
Other Teaching and testing Functions: \code{\link{tmx_genotypic_effect}},
\code{\link{tmx_is.identified}}
Other Core Modeling Functions: \code{\link{plot.MxLISRELModel}},
\code{\link{plot.MxModel}}, \code{\link{umxAlgebra}},
\code{\link{umxMatrix}}, \code{\link{umxModify}},
\code{\link{umxPath}}, \code{\link{umxRAM}},
\code{\link{umxRun}}, \code{\link{umxSummary}},
\code{\link{umxSuperModel}}
Other Reporting Functions: \code{\link{FishersMethod}},
\code{\link{loadings.MxModel}},
\code{\link{tmx_is.identified}}, \code{\link{tmx_show}},
\code{\link{umxAPA}}, \code{\link{umxEval}},
\code{\link{umxFactorScores}},
\code{\link{umxGetParameters}},
\code{\link{umxParameters}}, \code{\link{umxReduce}},
\code{\link{umxSummary}}, \code{\link{umxWeightedAIC}},
\code{\link{umx_APA_pval}}, \code{\link{umx_aggregate}},
\code{\link{umx_names}}, \code{\link{umx_print}},
\code{\link{umx_time}}, \code{\link{xmu_get_CI}},
\code{\link{xmu_show_fit_or_comparison}}
Other Modify or Compare Models: \code{\link{umxAdd1}},
\code{\link{umxDrop1}}, \code{\link{umxEquate}},
\code{\link{umxFixAll}}, \code{\link{umxMI}},
\code{\link{umxModify}}, \code{\link{umxSetParameters}},
\code{\link{umxUnexplainedCausalNexus}}
Other Plotting functions: \code{\link{plot.MxLISRELModel}},
\code{\link{plot.MxModel}}, \code{\link{umxPlotACEcov}},
\code{\link{umxPlotACEv}}, \code{\link{umxPlotACE}},
\code{\link{umxPlotCP}}, \code{\link{umxPlotGxEbiv}},
\code{\link{umxPlotGxE}}, \code{\link{umxPlotIP}},
\code{\link{umxPlotSexLim}}, \code{\link{umxPlotSimplex}}
Other Super-easy helpers: \code{\link{umxEFA}},
\code{\link{umxLav2RAM}}, \code{\link{umxTwoStage}}
Other Twin Modeling Functions: \code{\link{power.ACE.test}},
\code{\link{umxACE_cov_fixed}}, \code{\link{umxACEcov}},
\code{\link{umxACEv}}, \code{\link{umxACE}},
\code{\link{umxCP}}, \code{\link{umxGxE_window}},
\code{\link{umxGxEbiv}}, \code{\link{umxGxE}},
\code{\link{umxIPold}}, \code{\link{umxIP}},
\code{\link{umxSexLim}}, \code{\link{umxSimplex}},
\code{\link{umxSummaryACEcov}},
\code{\link{umxSummaryACEv}},
\code{\link{umxSummaryACE}}, \code{\link{umxSummaryCP}},
\code{\link{umxSummaryGxEbiv}},
\code{\link{umxSummaryGxE}}, \code{\link{umxSummaryIP}},
\code{\link{umxSummarySexLim}},
\code{\link{umxSummarySimplex}},
\code{\link{xmu_twin_check}}
Other Twin Reporting Functions: \code{\link{umxPlotCP}},
\code{\link{umxReduceACE}}, \code{\link{umxReduceGxE}},
\code{\link{umxReduce}},
\code{\link{umxSummarizeTwinData}}
Other Twin Data functions: \code{\link{umx_long2wide}},
\code{\link{umx_make_TwinData}},
\code{\link{umx_residualize}},
\code{\link{umx_scale_wide_twin_data}},
\code{\link{umx_wide2long}}
Other Get and set: \code{\link{umx_default_option}},
\code{\link{umx_get_checkpoint}},
\code{\link{umx_get_options}},
\code{\link{umx_set_auto_plot}},
\code{\link{umx_set_auto_run}},
\code{\link{umx_set_checkpoint}},
\code{\link{umx_set_condensed_slots}},
\code{\link{umx_set_cores}},
\code{\link{umx_set_data_variance_check}},
\code{\link{umx_set_optimization_options}},
\code{\link{umx_set_optimizer}},
\code{\link{umx_set_plot_file_suffix}},
\code{\link{umx_set_plot_format}},
\code{\link{umx_set_separator}},
\code{\link{umx_set_silent}},
\code{\link{umx_set_table_format}}
Other Check or test: \code{\link{umx_check_names}},
\code{\link{umx_is_class}},
\code{\link{umx_is_endogenous}},
\code{\link{umx_is_exogenous}},
\code{\link{umx_is_numeric}},
\code{\link{xmu_twin_check}}
Other Data Functions: \code{\link{umxCovData}},
\code{\link{umxDescribeDataWLS}},
\code{\link{umxHetCor}},
\code{\link{umxPadAndPruneForDefVars}},
\code{\link{umx_as_numeric}}, \code{\link{umx_cov2raw}},
\code{\link{umx_lower2full}},
\code{\link{umx_make_MR_data}},
\code{\link{umx_make_TwinData}},
\code{\link{umx_make_bin_cont_pair_data}},
\code{\link{umx_make_fake_data}},
\code{\link{umx_polychoric}},
\code{\link{umx_polypairwise}},
\code{\link{umx_polytriowise}},
\code{\link{umx_read_lower}}, \code{\link{umx_rename}},
\code{\link{umx_reorder}},
\code{\link{umx_select_valid}}, \code{\link{umx_stack}},
\code{\link{umx_swap_a_block}}
Other File Functions: \code{\link{dl_from_dropbox}},
\code{\link{umx_make_sql_from_excel}},
\code{\link{umx_move_file}}, \code{\link{umx_open}},
\code{\link{umx_rename_file}},
\code{\link{umx_write_to_clipboard}}
Other String Functions: \code{\link{umx_explode_twin_names}},
\code{\link{umx_explode}}, \code{\link{umx_grep}},
\code{\link{umx_names}}, \code{\link{umx_object_as_str}},
\code{\link{umx_paste_names}}, \code{\link{umx_rot}},
\code{\link{umx_trim}},
\code{\link{umx_write_to_clipboard}}
Other Miscellaneous Stats Helpers: \code{\link{oddsratio}},
\code{\link{reliability}}, \code{\link{umxCov2cor}},
\code{\link{umxHetCor}}, \code{\link{umx_apply}},
\code{\link{umx_cor}}, \code{\link{umx_fun_mean_sd}},
\code{\link{umx_means}}, \code{\link{umx_r_test}},
\code{\link{umx_round}}, \code{\link{umx_var}}
Other Miscellaneous Utility Functions: \code{\link{install.OpenMx}},
\code{\link{qm}}, \code{\link{umxBrownie}},
\code{\link{umxFactor}}, \code{\link{umxVersion}},
\code{\link{umx_array_shift}},
\code{\link{umx_cell_is_on}},
\code{\link{umx_cont_2_quantiles}},
\code{\link{umx_find_object}}, \code{\link{umx_make}},
\code{\link{umx_msg}}, \code{\link{umx_open_CRAN_page}},
\code{\link{umx_pad}}, \code{\link{umx_pb_note}},
\code{\link{umx_print}}, \code{\link{umx_scale}},
\code{\link{umx_score_scale}},
\code{\link{xmu_check_variance}}
Other datasets: \code{\link{Fischbein_wt}},
\code{\link{GFF}}, \code{\link{iqdat}},
\code{\link{us_skinfold_data}}
Other Advanced Model Building Functions: \code{\link{umxJiggle}},
\code{\link{umxLabel}}, \code{\link{umxLatent}},
\code{\link{umxRAM2Ordinal}},
\code{\link{umxThresholdMatrix}},
\code{\link{umxValues}},
\code{\link{umx_fix_first_loadings}},
\code{\link{umx_fix_latents}},
\code{\link{umx_get_bracket_addresses}},
\code{\link{umx_standardize}},
\code{\link{umx_string_to_algebra}}
Other zAdvanced Helpers: \code{\link{umx_merge_CIs}},
\code{\link{umx_standardize_ACEcov}},
\code{\link{umx_standardize_ACEv}},
\code{\link{umx_standardize_ACE}},
\code{\link{umx_standardize_CP}},
\code{\link{umx_standardize_IP}},
\code{\link{umx_standardize_SexLim}},
\code{\link{umx_standardize_Simplex}},
\code{\link{umx_stash_CIs}}
Other xmu internal not for end user: \code{\link{umxModel}},
\code{\link{xmuHasSquareBrackets}},
\code{\link{xmuLabel_MATRIX_Model}},
\code{\link{xmuLabel_Matrix}},
\code{\link{xmuLabel_RAM_Model}}, \code{\link{xmuMI}},
\code{\link{xmuMakeDeviationThresholdsMatrices}},
\code{\link{xmuMakeOneHeadedPathsFromPathList}},
\code{\link{xmuMakeTwoHeadedPathsFromPathList}},
\code{\link{xmuMaxLevels}}, \code{\link{xmuMinLevels}},
\code{\link{xmuPropagateLabels}},
\code{\link{xmu_assemble_twin_supermodel}},
\code{\link{xmu_check_levels_identical}},
\code{\link{xmu_clean_label}},
\code{\link{xmu_dot_make_paths}},
\code{\link{xmu_dot_make_residuals}},
\code{\link{xmu_dot_maker}},
\code{\link{xmu_dot_move_ranks}},
\code{\link{xmu_dot_rank_str}},
\code{\link{xmu_lavaan_process_group}},
\code{\link{xmu_make_mxData}},
\code{\link{xmu_make_top_twin}},
\code{\link{xmu_model_needs_means}},
\code{\link{xmu_name_from_lavaan_str}},
\code{\link{xmu_safe_run_summary}},
\code{\link{xmu_set_sep_from_suffix}},
\code{\link{xmu_simplex_corner}},
\code{\link{xmu_start_value_list}},
\code{\link{xmu_starts}}
}
\concept{Advanced Model Building Functions}
\concept{Check or test}
\concept{Core Modeling Functions}
\concept{Data Functions}
\concept{File Functions}
\concept{Get and set}
\concept{Miscellaneous Stats Helpers}
\concept{Miscellaneous Utility Functions}
\concept{Modify or Compare Models}
\concept{Plotting functions}
\concept{Reporting Functions}
\concept{String Functions}
\concept{Super-easy helpers}
\concept{Teaching and testing Functions}
\concept{Twin Data functions}
\concept{Twin Modeling Functions}
\concept{Twin Reporting Functions}
\concept{datasets}
\concept{xmu internal not for end user}
\concept{zAdvanced Helpers}
| /man/umx.Rd | no_license | qingwending/umx | R | false | true | 10,984 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/build_run_modify.R
\docType{package}
\name{umx}
\alias{umx}
\alias{umx-package}
\title{Functions for Structural Equation Modeling in OpenMx}
\description{
\code{umx} allows you to more easily build, run, modify, and report structural models, building on the OpenMx package.
All core functions are organized into families, so they are easier to find (see "families" below under \strong{See Also})
All the functions have full-featured and well commented examples, some even have \emph{figures}, so use the help, even if you think it won't help :-)
Have a look, for example at \code{\link[=umxRAM]{umxRAM()}}
Check out NEWS about new features at \code{news(package = "umx")}
}
\details{
Introductory working examples are below. You can run all demos with demo(umx)
When I have a vignette, it will be: vignette("umx", package = "umx")
There is a helpful blog at \url{https://tbates.github.io}
If you want the bleeding-edge version:
devtools::install_github("tbates/umx")
}
\examples{
require("umx")
data(demoOneFactor)
myData = mxData(cov(demoOneFactor), type = "cov", numObs = nrow(demoOneFactor))
latents = c("G")
manifests = names(demoOneFactor)
m1 <- umxRAM("One Factor", data = myData,
umxPath(latents, to = manifests),
umxPath(var = manifests),
umxPath(var = latents , fixedAt=1)
)
# umx added informative labels, created starting values,
# Ran you model (if autoRun is on), and displayed a brief summary
# including a comparison if you modified a model...!
# Let's get some journal-ready fit information
umxSummary(m1)
umxSummary(m1, show = "std") #also display parameter estimates
# You can get the coefficients of an MxModel with coef(), just like for lm etc.
coef(m1)
# But with more control using parameters
parameters(m1, thresh="above", b=.3, pat = "G_to.*", digits = 3)
# ==================
# = Model updating =
# ==================
# Can we set the loading of X5 on G to zero?
m2 = umxModify(m1, "G_to_x1", name = "no_effect_of_g_on_X5", comparison = TRUE)
umxCompare(m1, m2)
# Note: umxSetParameters can do this with some additional flexibility
# ========================
# = Confidence intervals =
# ========================
# umxSummary() will show these, but you can also use the confint() function
confint(m1) # OpenMx's SE-based confidence intervals
umxConfint(m1, parm = 'all', run = TRUE) # likelihood-based CIs
# And make a Figure in dot (.gv) format!
plot(m1, std = TRUE)
# If you just want the .dot code returned set file = NA
plot(m1, std = TRUE, file = NA)
}
\references{
\itemize{
\item \url{https://www.github.com/tbates/umx}
}
}
\seealso{
Other Teaching and testing Functions: \code{\link{tmx_genotypic_effect}},
\code{\link{tmx_is.identified}}
Other Core Modeling Functions: \code{\link{plot.MxLISRELModel}},
\code{\link{plot.MxModel}}, \code{\link{umxAlgebra}},
\code{\link{umxMatrix}}, \code{\link{umxModify}},
\code{\link{umxPath}}, \code{\link{umxRAM}},
\code{\link{umxRun}}, \code{\link{umxSummary}},
\code{\link{umxSuperModel}}
Other Reporting Functions: \code{\link{FishersMethod}},
\code{\link{loadings.MxModel}},
\code{\link{tmx_is.identified}}, \code{\link{tmx_show}},
\code{\link{umxAPA}}, \code{\link{umxEval}},
\code{\link{umxFactorScores}},
\code{\link{umxGetParameters}},
\code{\link{umxParameters}}, \code{\link{umxReduce}},
\code{\link{umxSummary}}, \code{\link{umxWeightedAIC}},
\code{\link{umx_APA_pval}}, \code{\link{umx_aggregate}},
\code{\link{umx_names}}, \code{\link{umx_print}},
\code{\link{umx_time}}, \code{\link{xmu_get_CI}},
\code{\link{xmu_show_fit_or_comparison}}
Other Modify or Compare Models: \code{\link{umxAdd1}},
\code{\link{umxDrop1}}, \code{\link{umxEquate}},
\code{\link{umxFixAll}}, \code{\link{umxMI}},
\code{\link{umxModify}}, \code{\link{umxSetParameters}},
\code{\link{umxUnexplainedCausalNexus}}
Other Plotting functions: \code{\link{plot.MxLISRELModel}},
\code{\link{plot.MxModel}}, \code{\link{umxPlotACEcov}},
\code{\link{umxPlotACEv}}, \code{\link{umxPlotACE}},
\code{\link{umxPlotCP}}, \code{\link{umxPlotGxEbiv}},
\code{\link{umxPlotGxE}}, \code{\link{umxPlotIP}},
\code{\link{umxPlotSexLim}}, \code{\link{umxPlotSimplex}}
Other Super-easy helpers: \code{\link{umxEFA}},
\code{\link{umxLav2RAM}}, \code{\link{umxTwoStage}}
Other Twin Modeling Functions: \code{\link{power.ACE.test}},
\code{\link{umxACE_cov_fixed}}, \code{\link{umxACEcov}},
\code{\link{umxACEv}}, \code{\link{umxACE}},
\code{\link{umxCP}}, \code{\link{umxGxE_window}},
\code{\link{umxGxEbiv}}, \code{\link{umxGxE}},
\code{\link{umxIPold}}, \code{\link{umxIP}},
\code{\link{umxSexLim}}, \code{\link{umxSimplex}},
\code{\link{umxSummaryACEcov}},
\code{\link{umxSummaryACEv}},
\code{\link{umxSummaryACE}}, \code{\link{umxSummaryCP}},
\code{\link{umxSummaryGxEbiv}},
\code{\link{umxSummaryGxE}}, \code{\link{umxSummaryIP}},
\code{\link{umxSummarySexLim}},
\code{\link{umxSummarySimplex}},
\code{\link{xmu_twin_check}}
Other Twin Reporting Functions: \code{\link{umxPlotCP}},
\code{\link{umxReduceACE}}, \code{\link{umxReduceGxE}},
\code{\link{umxReduce}},
\code{\link{umxSummarizeTwinData}}
Other Twin Data functions: \code{\link{umx_long2wide}},
\code{\link{umx_make_TwinData}},
\code{\link{umx_residualize}},
\code{\link{umx_scale_wide_twin_data}},
\code{\link{umx_wide2long}}
Other Get and set: \code{\link{umx_default_option}},
\code{\link{umx_get_checkpoint}},
\code{\link{umx_get_options}},
\code{\link{umx_set_auto_plot}},
\code{\link{umx_set_auto_run}},
\code{\link{umx_set_checkpoint}},
\code{\link{umx_set_condensed_slots}},
\code{\link{umx_set_cores}},
\code{\link{umx_set_data_variance_check}},
\code{\link{umx_set_optimization_options}},
\code{\link{umx_set_optimizer}},
\code{\link{umx_set_plot_file_suffix}},
\code{\link{umx_set_plot_format}},
\code{\link{umx_set_separator}},
\code{\link{umx_set_silent}},
\code{\link{umx_set_table_format}}
Other Check or test: \code{\link{umx_check_names}},
\code{\link{umx_is_class}},
\code{\link{umx_is_endogenous}},
\code{\link{umx_is_exogenous}},
\code{\link{umx_is_numeric}},
\code{\link{xmu_twin_check}}
Other Data Functions: \code{\link{umxCovData}},
\code{\link{umxDescribeDataWLS}},
\code{\link{umxHetCor}},
\code{\link{umxPadAndPruneForDefVars}},
\code{\link{umx_as_numeric}}, \code{\link{umx_cov2raw}},
\code{\link{umx_lower2full}},
\code{\link{umx_make_MR_data}},
\code{\link{umx_make_TwinData}},
\code{\link{umx_make_bin_cont_pair_data}},
\code{\link{umx_make_fake_data}},
\code{\link{umx_polychoric}},
\code{\link{umx_polypairwise}},
\code{\link{umx_polytriowise}},
\code{\link{umx_read_lower}}, \code{\link{umx_rename}},
\code{\link{umx_reorder}},
\code{\link{umx_select_valid}}, \code{\link{umx_stack}},
\code{\link{umx_swap_a_block}}
Other File Functions: \code{\link{dl_from_dropbox}},
\code{\link{umx_make_sql_from_excel}},
\code{\link{umx_move_file}}, \code{\link{umx_open}},
\code{\link{umx_rename_file}},
\code{\link{umx_write_to_clipboard}}
Other String Functions: \code{\link{umx_explode_twin_names}},
\code{\link{umx_explode}}, \code{\link{umx_grep}},
\code{\link{umx_names}}, \code{\link{umx_object_as_str}},
\code{\link{umx_paste_names}}, \code{\link{umx_rot}},
\code{\link{umx_trim}},
\code{\link{umx_write_to_clipboard}}
Other Miscellaneous Stats Helpers: \code{\link{oddsratio}},
\code{\link{reliability}}, \code{\link{umxCov2cor}},
\code{\link{umxHetCor}}, \code{\link{umx_apply}},
\code{\link{umx_cor}}, \code{\link{umx_fun_mean_sd}},
\code{\link{umx_means}}, \code{\link{umx_r_test}},
\code{\link{umx_round}}, \code{\link{umx_var}}
Other Miscellaneous Utility Functions: \code{\link{install.OpenMx}},
\code{\link{qm}}, \code{\link{umxBrownie}},
\code{\link{umxFactor}}, \code{\link{umxVersion}},
\code{\link{umx_array_shift}},
\code{\link{umx_cell_is_on}},
\code{\link{umx_cont_2_quantiles}},
\code{\link{umx_find_object}}, \code{\link{umx_make}},
\code{\link{umx_msg}}, \code{\link{umx_open_CRAN_page}},
\code{\link{umx_pad}}, \code{\link{umx_pb_note}},
\code{\link{umx_print}}, \code{\link{umx_scale}},
\code{\link{umx_score_scale}},
\code{\link{xmu_check_variance}}
Other datasets: \code{\link{Fischbein_wt}},
\code{\link{GFF}}, \code{\link{iqdat}},
\code{\link{us_skinfold_data}}
Other Advanced Model Building Functions: \code{\link{umxJiggle}},
\code{\link{umxLabel}}, \code{\link{umxLatent}},
\code{\link{umxRAM2Ordinal}},
\code{\link{umxThresholdMatrix}},
\code{\link{umxValues}},
\code{\link{umx_fix_first_loadings}},
\code{\link{umx_fix_latents}},
\code{\link{umx_get_bracket_addresses}},
\code{\link{umx_standardize}},
\code{\link{umx_string_to_algebra}}
Other zAdvanced Helpers: \code{\link{umx_merge_CIs}},
\code{\link{umx_standardize_ACEcov}},
\code{\link{umx_standardize_ACEv}},
\code{\link{umx_standardize_ACE}},
\code{\link{umx_standardize_CP}},
\code{\link{umx_standardize_IP}},
\code{\link{umx_standardize_SexLim}},
\code{\link{umx_standardize_Simplex}},
\code{\link{umx_stash_CIs}}
Other xmu internal not for end user: \code{\link{umxModel}},
\code{\link{xmuHasSquareBrackets}},
\code{\link{xmuLabel_MATRIX_Model}},
\code{\link{xmuLabel_Matrix}},
\code{\link{xmuLabel_RAM_Model}}, \code{\link{xmuMI}},
\code{\link{xmuMakeDeviationThresholdsMatrices}},
\code{\link{xmuMakeOneHeadedPathsFromPathList}},
\code{\link{xmuMakeTwoHeadedPathsFromPathList}},
\code{\link{xmuMaxLevels}}, \code{\link{xmuMinLevels}},
\code{\link{xmuPropagateLabels}},
\code{\link{xmu_assemble_twin_supermodel}},
\code{\link{xmu_check_levels_identical}},
\code{\link{xmu_clean_label}},
\code{\link{xmu_dot_make_paths}},
\code{\link{xmu_dot_make_residuals}},
\code{\link{xmu_dot_maker}},
\code{\link{xmu_dot_move_ranks}},
\code{\link{xmu_dot_rank_str}},
\code{\link{xmu_lavaan_process_group}},
\code{\link{xmu_make_mxData}},
\code{\link{xmu_make_top_twin}},
\code{\link{xmu_model_needs_means}},
\code{\link{xmu_name_from_lavaan_str}},
\code{\link{xmu_safe_run_summary}},
\code{\link{xmu_set_sep_from_suffix}},
\code{\link{xmu_simplex_corner}},
\code{\link{xmu_start_value_list}},
\code{\link{xmu_starts}}
}
\concept{Advanced Model Building Functions}
\concept{Check or test}
\concept{Core Modeling Functions}
\concept{Data Functions}
\concept{File Functions}
\concept{Get and set}
\concept{Miscellaneous Stats Helpers}
\concept{Miscellaneous Utility Functions}
\concept{Modify or Compare Models}
\concept{Plotting functions}
\concept{Reporting Functions}
\concept{String Functions}
\concept{Super-easy helpers}
\concept{Teaching and testing Functions}
\concept{Twin Data functions}
\concept{Twin Modeling Functions}
\concept{Twin Reporting Functions}
\concept{datasets}
\concept{xmu internal not for end user}
\concept{zAdvanced Helpers}
|
#!/usr/bin/env Rscript
require(plotrix)
x11()
args = commandArgs(trailingOnly = TRUE)
tau = 6.283185
stereo = function(lat, lon, clat, clon) {
phi = lat*tau/360
lam = lon*tau/360
phi1 = clat*tau/360
lam0 = clon*tau/360
r = 6371009
k = 2*r/(1 + sin(phi1)*sin(phi)+cos(phi1)*cos(phi)*cos(lam-lam0))
x = k*cos(phi)*sin(lam-lam0)
y = k*(cos(phi1)*sin(phi)-sin(phi1)*cos(phi)*cos(lam-lam0))
return(data.frame(x, y))
}
pointdata = read.table(args[1])
center = tail(pointdata, 1)
stereodata = stereo(pointdata$V1, pointdata$V2, center$V1, center$V2)
circledata = read.table(args[2])
#xmin = min(c(circledata$V1, circledata$V3, stereodata[[1]]))
#xmax = max(c(circledata$V1, circledata$V3, stereodata[[1]]))
#ymin = min(c(circledata$V2, circledata$V4, stereodata[[2]]))
#ymax = max(c(circledata$V2, circledata$V4, stereodata[[2]]))
xmin = min(c(circledata$V3, stereodata[[1]]))
xmax = max(c(circledata$V3, stereodata[[1]]))
ymin = min(c(circledata$V4, stereodata[[2]]))
ymax = max(c(circledata$V4, stereodata[[2]]))
xlow = xmin
xhig = xmax
ylow = ymin
yhig = ymax
plot(stereodata[[1]], stereodata[[2]], xlim=c(xlow, xhig), ylim=c(ylow, yhig), pch=19, cex=0.5, col="red", asp=1)
par(new=T)
plot(circledata$V1, circledata$V2, xlim=c(xlow, xhig), ylim=c(ylow, yhig), pch=19, cex=0.4, col="green", asp=1)
par(new=T)
plot(circledata$V3, circledata$V4, xlim=c(xlow, xhig), ylim=c(ylow, yhig), pch=19, cex=0.4, col="blue", asp=1)
par(new=T)
for (i in 1:length(circledata$V5)) {
draw.circle(circledata$V3[i], circledata$V4[i], circledata$V5[i])
}
summary(stereodata)
summary(circledata)
| /cgal/spherical/optimal/naive/plotresults.R | no_license | michaelore/cpre492-algorithm | R | false | false | 1,621 | r | #!/usr/bin/env Rscript
require(plotrix)
x11()
args = commandArgs(trailingOnly = TRUE)
tau = 6.283185
stereo = function(lat, lon, clat, clon) {
phi = lat*tau/360
lam = lon*tau/360
phi1 = clat*tau/360
lam0 = clon*tau/360
r = 6371009
k = 2*r/(1 + sin(phi1)*sin(phi)+cos(phi1)*cos(phi)*cos(lam-lam0))
x = k*cos(phi)*sin(lam-lam0)
y = k*(cos(phi1)*sin(phi)-sin(phi1)*cos(phi)*cos(lam-lam0))
return(data.frame(x, y))
}
pointdata = read.table(args[1])
center = tail(pointdata, 1)
stereodata = stereo(pointdata$V1, pointdata$V2, center$V1, center$V2)
circledata = read.table(args[2])
#xmin = min(c(circledata$V1, circledata$V3, stereodata[[1]]))
#xmax = max(c(circledata$V1, circledata$V3, stereodata[[1]]))
#ymin = min(c(circledata$V2, circledata$V4, stereodata[[2]]))
#ymax = max(c(circledata$V2, circledata$V4, stereodata[[2]]))
xmin = min(c(circledata$V3, stereodata[[1]]))
xmax = max(c(circledata$V3, stereodata[[1]]))
ymin = min(c(circledata$V4, stereodata[[2]]))
ymax = max(c(circledata$V4, stereodata[[2]]))
xlow = xmin
xhig = xmax
ylow = ymin
yhig = ymax
plot(stereodata[[1]], stereodata[[2]], xlim=c(xlow, xhig), ylim=c(ylow, yhig), pch=19, cex=0.5, col="red", asp=1)
par(new=T)
plot(circledata$V1, circledata$V2, xlim=c(xlow, xhig), ylim=c(ylow, yhig), pch=19, cex=0.4, col="green", asp=1)
par(new=T)
plot(circledata$V3, circledata$V4, xlim=c(xlow, xhig), ylim=c(ylow, yhig), pch=19, cex=0.4, col="blue", asp=1)
par(new=T)
for (i in 1:length(circledata$V5)) {
draw.circle(circledata$V3[i], circledata$V4[i], circledata$V5[i])
}
summary(stereodata)
summary(circledata)
|
#' An internal function to detect the random effects component from an object of class formula
#'
#' @keywords random effects models
#' @param term formula to be processed
#' @examples
#' #Internal function only
#' #no examples
#' #
#' #
isBar <- function(term) {
if(is.call(term)) {
if((term[[1]] == as.name("|")) || (term[[1]] == as.name("||"))) {
return(TRUE)
}
}
FALSE
} | /R/isBar.R | no_license | cran/missingHE | R | false | false | 413 | r | #' An internal function to detect the random effects component from an object of class formula
#'
#' @keywords random effects models
#' @param term formula to be processed
#' @examples
#' #Internal function only
#' #no examples
#' #
#' #
isBar <- function(term) {
if(is.call(term)) {
if((term[[1]] == as.name("|")) || (term[[1]] == as.name("||"))) {
return(TRUE)
}
}
FALSE
} |
#!/usr/bin/Rscript
##########################################################################################
##
## LOH_MakePlots.R
##
## Plot raw data for LOH visualisation.
##
##########################################################################################
args = commandArgs(TRUE)
name=args[1] #used for naming in- and output files
species=args[2]
repository_dir=args[3] #location of repository
source(paste(repository_dir,"/all_GeneratePlots.R",sep=""))
setwd(paste(name,"/results/LOH",sep=""))
system(paste("mkdir -p ",name,"_Chromosomes",sep=""))
chrom.sizes = DefineChromSizes(species)
if (species=="Human")
{
chromosomes=21
} else if (species=="Mouse")
{
chromosomes=19
}
data=paste(name,".VariantsForLOH.txt",sep="")
LOHDat = ProcessCountData(data,chrom.sizes,"LOH")
plotGlobalRatioProfile(cn=LOHDat[[1]],ChromBorders=LOHDat[[2]],cnSeg="",samplename=name,method="LOH",toolname="LOH",normalization="",y_axis="LOH",Transparency=70, Cex=0.3,outformat="pdf")
for (i in 1:chromosomes)
{
plotChromosomalRatioProfile(cn=LOHDat[[4]],chrom.sizes,cnSeg="",samplename=name,chromosome=i,method="LOH",toolname="LOH",SliceStart="",SliceStop="",Transparency=70, Cex=0.7, outformat="pdf")
}
system(paste("pdfunite ",name,"_Chromosomes/",name,".Chr?.LOH.LOH.pdf ",name,"_Chromosomes/",name,".Chr??.LOH.LOH.pdf ",name,".Chromosomes.LOH.LOH.pdf",sep=""))
LOHDat = ProcessCountData(data,chrom.sizes,"LOH_raw")
plotGlobalRatioProfile(cn=LOHDat[[1]],ChromBorders=LOHDat[[2]],cnSeg="",samplename=name,method="LOH_raw",toolname="LOH_raw",normalization="",y_axis="LOH_raw",Transparency=70, Cex=0.3,outformat="pdf")
for (i in 1:chromosomes)
{
plotChromosomalRatioProfile(cn=LOHDat[[4]],chrom.sizes,cnSeg="",samplename=name,chromosome=i,method="LOH_raw",toolname="LOH_raw",SliceStart="",SliceStop="",Transparency=70, Cex=0.7, outformat="pdf")
}
system(paste("pdfunite ",name,"_Chromosomes/",name,".Chr?.LOH_raw.LOH_raw.pdf ",name,"_Chromosomes/",name,".Chr??.LOH_raw.LOH_raw.pdf ",name,".Chromosomes.LOH_raw.LOH_raw.pdf",sep=""))
data=paste(name,".VariantsForLOHGermline.txt",sep="")
LOH_GermlineDat = ProcessCountData(data,chrom.sizes,"LOH_Germline")
plotGlobalRatioProfile(cn=LOH_GermlineDat[[1]],ChromBorders=LOH_GermlineDat[[2]],cnSeg="",samplename=name,method="LOH_Germline",toolname="LOH_Germline",normalization="",y_axis="LOH_Germline",Transparency=70, Cex=0.3,outformat="pdf")
for (i in 1:chromosomes)
{
plotChromosomalRatioProfile(cn=LOH_GermlineDat[[4]],chrom.sizes,cnSeg="",samplename=name,chromosome=i,method="LOH_Germline",toolname="LOH_Germline",SliceStart="",SliceStop="",Transparency=70, Cex=0.7, outformat="pdf")
}
system(paste("pdfunite ",name,"_Chromosomes/",name,".Chr?.LOH_Germline.LOH_Germline.pdf ",name,"_Chromosomes/",name,".Chr??.LOH_Germline.LOH_Germline.pdf ",name,".Chromosomes.LOH_Germline.LOH_Germline.pdf",sep="")) | /repository/LOH_MakePlots.R | permissive | roland-rad-lab/MoCaSeq | R | false | false | 2,876 | r | #!/usr/bin/Rscript
##########################################################################################
##
## LOH_MakePlots.R
##
## Plot raw data for LOH visualisation.
##
##########################################################################################
args = commandArgs(TRUE)
name=args[1] #used for naming in- and output files
species=args[2]
repository_dir=args[3] #location of repository
source(paste(repository_dir,"/all_GeneratePlots.R",sep=""))
setwd(paste(name,"/results/LOH",sep=""))
system(paste("mkdir -p ",name,"_Chromosomes",sep=""))
chrom.sizes = DefineChromSizes(species)
if (species=="Human")
{
chromosomes=21
} else if (species=="Mouse")
{
chromosomes=19
}
data=paste(name,".VariantsForLOH.txt",sep="")
LOHDat = ProcessCountData(data,chrom.sizes,"LOH")
plotGlobalRatioProfile(cn=LOHDat[[1]],ChromBorders=LOHDat[[2]],cnSeg="",samplename=name,method="LOH",toolname="LOH",normalization="",y_axis="LOH",Transparency=70, Cex=0.3,outformat="pdf")
for (i in 1:chromosomes)
{
plotChromosomalRatioProfile(cn=LOHDat[[4]],chrom.sizes,cnSeg="",samplename=name,chromosome=i,method="LOH",toolname="LOH",SliceStart="",SliceStop="",Transparency=70, Cex=0.7, outformat="pdf")
}
system(paste("pdfunite ",name,"_Chromosomes/",name,".Chr?.LOH.LOH.pdf ",name,"_Chromosomes/",name,".Chr??.LOH.LOH.pdf ",name,".Chromosomes.LOH.LOH.pdf",sep=""))
LOHDat = ProcessCountData(data,chrom.sizes,"LOH_raw")
plotGlobalRatioProfile(cn=LOHDat[[1]],ChromBorders=LOHDat[[2]],cnSeg="",samplename=name,method="LOH_raw",toolname="LOH_raw",normalization="",y_axis="LOH_raw",Transparency=70, Cex=0.3,outformat="pdf")
for (i in 1:chromosomes)
{
plotChromosomalRatioProfile(cn=LOHDat[[4]],chrom.sizes,cnSeg="",samplename=name,chromosome=i,method="LOH_raw",toolname="LOH_raw",SliceStart="",SliceStop="",Transparency=70, Cex=0.7, outformat="pdf")
}
system(paste("pdfunite ",name,"_Chromosomes/",name,".Chr?.LOH_raw.LOH_raw.pdf ",name,"_Chromosomes/",name,".Chr??.LOH_raw.LOH_raw.pdf ",name,".Chromosomes.LOH_raw.LOH_raw.pdf",sep=""))
data=paste(name,".VariantsForLOHGermline.txt",sep="")
LOH_GermlineDat = ProcessCountData(data,chrom.sizes,"LOH_Germline")
plotGlobalRatioProfile(cn=LOH_GermlineDat[[1]],ChromBorders=LOH_GermlineDat[[2]],cnSeg="",samplename=name,method="LOH_Germline",toolname="LOH_Germline",normalization="",y_axis="LOH_Germline",Transparency=70, Cex=0.3,outformat="pdf")
for (i in 1:chromosomes)
{
plotChromosomalRatioProfile(cn=LOH_GermlineDat[[4]],chrom.sizes,cnSeg="",samplename=name,chromosome=i,method="LOH_Germline",toolname="LOH_Germline",SliceStart="",SliceStop="",Transparency=70, Cex=0.7, outformat="pdf")
}
system(paste("pdfunite ",name,"_Chromosomes/",name,".Chr?.LOH_Germline.LOH_Germline.pdf ",name,"_Chromosomes/",name,".Chr??.LOH_Germline.LOH_Germline.pdf ",name,".Chromosomes.LOH_Germline.LOH_Germline.pdf",sep="")) |
##########################################################################
##### Terrie Klinger's Kasitsna Bay Data #####
##### Percent cover Invertebrate Data Cleaning Script #####
##### by Rachael E. Blake #####
##### 1 May 2017 #####
##########################################################################
library(plyr) ; library(readxl) ; library(tidyverse) ; library(reshape2) ; library(stringr)
# read in excel file
# this function creates a list of data frames, one for each excel sheet
read_excel_allsheets <- function(filename) {
sheets <- readxl::excel_sheets(filename)
x <- lapply(sheets, function(X) readxl::read_excel(filename, sheet=X, skip=2))
names(x) <- sheets
x
}
X_sheets_n <- read_excel_allsheets("std_percent_cvr_by_year_to_2015_for_RB.xlsx")
# remove the "notes" sheet
X_sheets <- X_sheets_n[c(1,2,4:18)]
# make each data frame long instead of wide
X_long <- lapply(X_sheets, function(x) as.data.frame(t(x)))
fix_data <- function(df) {
# make column names
names(df) <- as.character(unlist(df[1,]))
df <- df[-1,]
df1 <- df %>%
# row names to column
tibble::rownames_to_column(var="standard_code") %>%
# remove spaces from column names
dplyr::rename(abbr_code=`abbr code`, FUCUS_TOTAL=`FUCUS%TOTAL`,
FUCUS_SPORELINGS=`FUCUS SPORELINGS%`, Ulva_Ent=`Ulva/Ent`,
Pterosiphonia_poly=`Pterosiphonia/poly`, Clad_sericia=`Clad sericia`,
Masto_pap=`Masto pap`, Barnacle_spat=`Barnacle spat`,
Palmaria_callophylloides=`Palmaria callophylloides`,
Crustose_coralline=`Crustose coralline`,
erect_coralline=`erect coralline`
) %>%
# make everything character
dplyr::mutate_if(is.factor, as.character)
# replace NAs with 0, because Terrie says missing values represent 0s, NOT missing data
df1[is.na(df1)] <- 0
# return
return(df1)
}
# apply fix_data function to list of data frames
X_clean <- lapply(X_long, function(x) fix_data(x))
# put all data frames into one giant one
PerCov_clean <- do.call("rbind", X_clean)
# make column for Year using data frame name
PerCov_clean$Year <- rep(names(X_clean), sapply(X_clean, nrow))
# make columns for block and treatment
PerCov_clean$Block <- str_sub(PerCov_clean$standard_code, -9,-8)
PerCov_clean$Treatment <- str_sub(PerCov_clean$standard_code, -4,-3)
# make columns numeric
PerCov_clean[,c(4:28)] <- lapply(PerCov_clean[,c(4:28)], function(x) as.numeric(x))
head(PerCov_clean)
| /data_cleaning_scripts/Data_Cleaning_K_Bay_Percent_Cover.r | no_license | reblake/Klinger_Kasitsna_Bay | R | false | false | 3,082 | r | ##########################################################################
##### Terrie Klinger's Kasitsna Bay Data #####
##### Percent cover Invertebrate Data Cleaning Script #####
##### by Rachael E. Blake #####
##### 1 May 2017 #####
##########################################################################
library(plyr) ; library(readxl) ; library(tidyverse) ; library(reshape2) ; library(stringr)
# read in excel file
# this function creates a list of data frames, one for each excel sheet
read_excel_allsheets <- function(filename) {
sheets <- readxl::excel_sheets(filename)
x <- lapply(sheets, function(X) readxl::read_excel(filename, sheet=X, skip=2))
names(x) <- sheets
x
}
X_sheets_n <- read_excel_allsheets("std_percent_cvr_by_year_to_2015_for_RB.xlsx")
# remove the "notes" sheet
X_sheets <- X_sheets_n[c(1,2,4:18)]
# make each data frame long instead of wide
X_long <- lapply(X_sheets, function(x) as.data.frame(t(x)))
fix_data <- function(df) {
# make column names
names(df) <- as.character(unlist(df[1,]))
df <- df[-1,]
df1 <- df %>%
# row names to column
tibble::rownames_to_column(var="standard_code") %>%
# remove spaces from column names
dplyr::rename(abbr_code=`abbr code`, FUCUS_TOTAL=`FUCUS%TOTAL`,
FUCUS_SPORELINGS=`FUCUS SPORELINGS%`, Ulva_Ent=`Ulva/Ent`,
Pterosiphonia_poly=`Pterosiphonia/poly`, Clad_sericia=`Clad sericia`,
Masto_pap=`Masto pap`, Barnacle_spat=`Barnacle spat`,
Palmaria_callophylloides=`Palmaria callophylloides`,
Crustose_coralline=`Crustose coralline`,
erect_coralline=`erect coralline`
) %>%
# make everything character
dplyr::mutate_if(is.factor, as.character)
# replace NAs with 0, because Terrie says missing values represent 0s, NOT missing data
df1[is.na(df1)] <- 0
# return
return(df1)
}
# apply fix_data function to list of data frames
X_clean <- lapply(X_long, function(x) fix_data(x))
# put all data frames into one giant one
PerCov_clean <- do.call("rbind", X_clean)
# make column for Year using data frame name
PerCov_clean$Year <- rep(names(X_clean), sapply(X_clean, nrow))
# make columns for block and treatment
PerCov_clean$Block <- str_sub(PerCov_clean$standard_code, -9,-8)
PerCov_clean$Treatment <- str_sub(PerCov_clean$standard_code, -4,-3)
# make columns numeric
PerCov_clean[,c(4:28)] <- lapply(PerCov_clean[,c(4:28)], function(x) as.numeric(x))
head(PerCov_clean)
|
powerdata <- read.csv("household_power_consumption.txt", header = T, sep = ';', na.strings = "?")
powerdata$Date <- as.Date(powerdata$Date, format = "%d/%m/%Y")
#powerdata$Datatime <- strptime(paste(powerdata$Date, powerdata$Time), "%d/%m/%Y %H:%M%:%S")
selectdata <- subset(powerdata, subset = (Date >="2007-02-01" & Date <="2007-02-02"))
#selectdata$Datatime <- strptime(paste(selectdata$Date, selectdata$Time), "%d/%m/%Y %H:%M%:%S")
DT <- paste(as.Date(selectdata$Date), selectdata$Time)
selectdata$DT <- as.POSIXct(DT)
png(filename = "./plot2.png", width = 480, height = 480, units = "px")
plot(selectdata$DT, selectdata$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)")
dev.off()
| /Plot2.R | no_license | RayMick/ExData_Plotting1 | R | false | false | 737 | r | powerdata <- read.csv("household_power_consumption.txt", header = T, sep = ';', na.strings = "?")
powerdata$Date <- as.Date(powerdata$Date, format = "%d/%m/%Y")
#powerdata$Datatime <- strptime(paste(powerdata$Date, powerdata$Time), "%d/%m/%Y %H:%M%:%S")
selectdata <- subset(powerdata, subset = (Date >="2007-02-01" & Date <="2007-02-02"))
#selectdata$Datatime <- strptime(paste(selectdata$Date, selectdata$Time), "%d/%m/%Y %H:%M%:%S")
DT <- paste(as.Date(selectdata$Date), selectdata$Time)
selectdata$DT <- as.POSIXct(DT)
png(filename = "./plot2.png", width = 480, height = 480, units = "px")
plot(selectdata$DT, selectdata$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power (kilowatts)")
dev.off()
|
\alias{gtkLabelGetSelectable}
\name{gtkLabelGetSelectable}
\title{gtkLabelGetSelectable}
\description{Gets the value set by \code{\link{gtkLabelSetSelectable}}.}
\usage{gtkLabelGetSelectable(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkLabel}}] a \code{\link{GtkLabel}}}}
\value{[logical] \code{TRUE} if the user can copy text from the label}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /man/gtkLabelGetSelectable.Rd | no_license | cran/RGtk2.10 | R | false | false | 428 | rd | \alias{gtkLabelGetSelectable}
\name{gtkLabelGetSelectable}
\title{gtkLabelGetSelectable}
\description{Gets the value set by \code{\link{gtkLabelSetSelectable}}.}
\usage{gtkLabelGetSelectable(object)}
\arguments{\item{\code{object}}{[\code{\link{GtkLabel}}] a \code{\link{GtkLabel}}}}
\value{[logical] \code{TRUE} if the user can copy text from the label}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
/Scripts/02_simulando_datos_estimadores.r | no_license | Jess1Vel/Curso_de_Estadistica_Inferencial_con_R | R | false | false | 3,082 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OfficialJoke-package.R
\docType{package}
\name{OfficialJoke-package}
\alias{OfficialJoke-package}
\alias{OfficialJokeR}
\title{R wrapper for An API of Official Joke}
\value{
List with all parameters of the joke from official joke APIs.
}
\description{
This package provides access to the \href{https://github.com/15Dkatz/official_joke_api}{official_joke_api}
API from R. Final Project For MDS 2019 Fall.
}
\examples{
get_joke()
get_random_joke()
get_joke(type = "general",choice="ten",return_type="dataframe")
}
\author{
Ximing Zhang
}
| /man/OfficialJoke-package.Rd | no_license | zhangxm96/OfficialJokeR | R | false | true | 615 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OfficialJoke-package.R
\docType{package}
\name{OfficialJoke-package}
\alias{OfficialJoke-package}
\alias{OfficialJokeR}
\title{R wrapper for An API of Official Joke}
\value{
List with all parameters of the joke from official joke APIs.
}
\description{
This package provides access to the \href{https://github.com/15Dkatz/official_joke_api}{official_joke_api}
API from R. Final Project For MDS 2019 Fall.
}
\examples{
get_joke()
get_random_joke()
get_joke(type = "general",choice="ten",return_type="dataframe")
}
\author{
Ximing Zhang
}
|
############## Setting-up Transfer Learning Script
require(tensorflow)
np<-import("numpy")
# Import slim from contrib libraty of tensorflow
slim = tf$contrib$slim
# Reset tensorflow Graph
tf$reset_default_graph()
# Resizing the images
input.img = tf$placeholder(tf$float32, shape(NULL, NULL, NULL, 3))
scaled.img = tf$image$resize_images(input.img, shape(224,224))
# Define VGG16 network
library(magrittr)
VGG16.model<-function(slim, input.image){
vgg16.network = slim$conv2d(input.image, 64, shape(3,3), scope='vgg_16/conv1/conv1_1') %>%
slim$conv2d(64, shape(3,3), scope='vgg_16/conv1/conv1_2') %>%
slim$max_pool2d( shape(2, 2), scope='vgg_16/pool1') %>%
slim$conv2d(128, shape(3,3), scope='vgg_16/conv2/conv2_1') %>%
slim$conv2d(128, shape(3,3), scope='vgg_16/conv2/conv2_2') %>%
slim$max_pool2d( shape(2, 2), scope='vgg_16/pool2') %>%
slim$conv2d(256, shape(3,3), scope='vgg_16/conv3/conv3_1') %>%
slim$conv2d(256, shape(3,3), scope='vgg_16/conv3/conv3_2') %>%
slim$conv2d(256, shape(3,3), scope='vgg_16/conv3/conv3_3') %>%
slim$max_pool2d(shape(2, 2), scope='vgg_16/pool3') %>%
slim$conv2d(512, shape(3,3), scope='vgg_16/conv4/conv4_1') %>%
slim$conv2d(512, shape(3,3), scope='vgg_16/conv4/conv4_2') %>%
slim$conv2d(512, shape(3,3), scope='vgg_16/conv4/conv4_3') %>%
slim$max_pool2d(shape(2, 2), scope='vgg_16/pool4') %>%
slim$conv2d(512, shape(3,3), scope='vgg_16/conv5/conv5_1') %>%
slim$conv2d(512, shape(3,3), scope='vgg_16/conv5/conv5_2') %>%
slim$conv2d(512, shape(3,3), scope='vgg_16/conv5/conv5_3') %>%
slim$max_pool2d(shape(2, 2), scope='vgg_16/pool5') %>%
slim$conv2d(4096, shape(7, 7), padding='VALID', scope='vgg_16/fc6') %>%
slim$conv2d(4096, shape(1, 1), scope='vgg_16/fc7') %>%
slim$conv2d(1000, shape(1, 1), scope='vgg_16/fc8') %>%
tf$squeeze(shape(1, 2), name='vgg_16/fc8/squeezed')
return(vgg16.network)
}
vgg16.network<-VGG16.model(slim, input.image = scaled.img)
# Restore the weights
restorer = tf$train$Saver()
sess = tf$Session()
restorer$restore(sess, 'vgg_16.ckpt')
### Load initial layer
WEIGHTS_PATH<-'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
load_weights<-function(sess){
weights_dict = np$load(WEIGHTS_PATH, encoding = 'bytes')
}
# Evaluating using VGG16 network
require(jpeg)
testImgURL<-"http://farm4.static.flickr.com/3155/2591264041_273abea408.jpg"
img.test<-tempfile()
download.file(testImgURL,img.test, mode="wb")
read.image <- readJPEG(img.test)
file.remove(img.test) # cleanup
## Evaluate
size = dim(read.image)
imgs = array(255*read.image, dim = c(1, size[1], size[2], size[3]))
VGG16_eval = sess$run(vgg16.network, dict(images = imgs))
probs = exp(VGG16_eval)/sum(exp(VGG16_eval)) # 672: 'mountain bike, all-terrain bike, off-roader',
| /Chapter 10/src/Chapter10_1_VGG16Model.R | permissive | PacktPublishing/R-Deep-Learning-Cookbook | R | false | false | 2,897 | r | ############## Setting-up Transfer Learning Script
require(tensorflow)
np<-import("numpy")
# Import slim from contrib libraty of tensorflow
slim = tf$contrib$slim
# Reset tensorflow Graph
tf$reset_default_graph()
# Resizing the images
input.img = tf$placeholder(tf$float32, shape(NULL, NULL, NULL, 3))
scaled.img = tf$image$resize_images(input.img, shape(224,224))
# Define VGG16 network
library(magrittr)
VGG16.model<-function(slim, input.image){
vgg16.network = slim$conv2d(input.image, 64, shape(3,3), scope='vgg_16/conv1/conv1_1') %>%
slim$conv2d(64, shape(3,3), scope='vgg_16/conv1/conv1_2') %>%
slim$max_pool2d( shape(2, 2), scope='vgg_16/pool1') %>%
slim$conv2d(128, shape(3,3), scope='vgg_16/conv2/conv2_1') %>%
slim$conv2d(128, shape(3,3), scope='vgg_16/conv2/conv2_2') %>%
slim$max_pool2d( shape(2, 2), scope='vgg_16/pool2') %>%
slim$conv2d(256, shape(3,3), scope='vgg_16/conv3/conv3_1') %>%
slim$conv2d(256, shape(3,3), scope='vgg_16/conv3/conv3_2') %>%
slim$conv2d(256, shape(3,3), scope='vgg_16/conv3/conv3_3') %>%
slim$max_pool2d(shape(2, 2), scope='vgg_16/pool3') %>%
slim$conv2d(512, shape(3,3), scope='vgg_16/conv4/conv4_1') %>%
slim$conv2d(512, shape(3,3), scope='vgg_16/conv4/conv4_2') %>%
slim$conv2d(512, shape(3,3), scope='vgg_16/conv4/conv4_3') %>%
slim$max_pool2d(shape(2, 2), scope='vgg_16/pool4') %>%
slim$conv2d(512, shape(3,3), scope='vgg_16/conv5/conv5_1') %>%
slim$conv2d(512, shape(3,3), scope='vgg_16/conv5/conv5_2') %>%
slim$conv2d(512, shape(3,3), scope='vgg_16/conv5/conv5_3') %>%
slim$max_pool2d(shape(2, 2), scope='vgg_16/pool5') %>%
slim$conv2d(4096, shape(7, 7), padding='VALID', scope='vgg_16/fc6') %>%
slim$conv2d(4096, shape(1, 1), scope='vgg_16/fc7') %>%
slim$conv2d(1000, shape(1, 1), scope='vgg_16/fc8') %>%
tf$squeeze(shape(1, 2), name='vgg_16/fc8/squeezed')
return(vgg16.network)
}
vgg16.network<-VGG16.model(slim, input.image = scaled.img)
# Restore the weights
restorer = tf$train$Saver()
sess = tf$Session()
restorer$restore(sess, 'vgg_16.ckpt')
### Load initial layer
WEIGHTS_PATH<-'vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
load_weights<-function(sess){
weights_dict = np$load(WEIGHTS_PATH, encoding = 'bytes')
}
# Evaluating using VGG16 network
require(jpeg)
testImgURL<-"http://farm4.static.flickr.com/3155/2591264041_273abea408.jpg"
img.test<-tempfile()
download.file(testImgURL,img.test, mode="wb")
read.image <- readJPEG(img.test)
file.remove(img.test) # cleanup
## Evaluate
size = dim(read.image)
imgs = array(255*read.image, dim = c(1, size[1], size[2], size[3]))
VGG16_eval = sess$run(vgg16.network, dict(images = imgs))
probs = exp(VGG16_eval)/sum(exp(VGG16_eval)) # 672: 'mountain bike, all-terrain bike, off-roader',
|
sim_mean_sd = function(n, mu = 2, sigma = 3) {
sim_data = tibble(
x = rnorm(n, mean = mu, sd = sigma),
)
sim_data %>%
summarize(
mu_hat = mean(x),
sigma_hat = sd(x)
)
} | /resources/sim_mean_sd.R | no_license | P8105/p8105.github.io | R | false | false | 205 | r | sim_mean_sd = function(n, mu = 2, sigma = 3) {
sim_data = tibble(
x = rnorm(n, mean = mu, sd = sigma),
)
sim_data %>%
summarize(
mu_hat = mean(x),
sigma_hat = sd(x)
)
} |
rm(list=ls(all=T))
##set working directory
setwd("C:/Users/parul/Desktop/Data Science/PROJECT/project1")
##load libraries
#loading multiple packages at once
x = c("ggplot2", "corrgram", "DMwR", "caret", "randomForest",'imbalance', "unbalanced", "C50", "dummies",
"e1071", "Information","MASS", "rpart", "gbm", "ROSE", 'sampling', 'class','e1071','Metrics',
'DataCombine', 'gplots','inTrees','GGally','purrr','ROCR','tidyr','ggplot2','pROC')
#install.packages(x)
lapply(x, require, character.only = TRUE)
rm(x)
## Read the data
train = read.csv("Train_data.csv", header = T, na.strings = c(" ", "", "NA"),stringsAsFactors = FALSE)
test = read.csv("Test_data.csv", header = T, na.strings = c(" ", "", "NA"),stringsAsFactors = FALSE)
train$isTrain=TRUE
test$isTrain=FALSE
##combine train and test data to preprocess data before feeding it to ML algorithms
data1=rbind(train,test)
##**************************DATA EXPLORATION******************************
dim(data1)
str(data1)
data1$international.plan=as.factor(data1$international.plan)
data1$voice.mail.plan=as.factor(data1$voice.mail.plan)
data1$area.code=as.factor(data1$area.code)
data1$Churn=as.factor(data1$Churn)
data1$state=as.factor(data1$state)
#***************************MISSING VALUE ANALYSIS********************************************
#create dataframe with missing percentage
missing_val = data.frame(apply(data1,2,function(x){sum(is.na(x))}))
#convert row names into columns
missing_val$Columns = row.names(missing_val)
row.names(missing_val) = NULL
#Rename the variable conating missing values
names(missing_val)[1] = "Missing_percentage"
#calculate missing percentage
missing_val$Missing_percentage = (missing_val$Missing_percentage/nrow(data1)) * 100
missing_val = missing_val[,c(2,1)]
##NO MISSING DATA##
#********************************DATA VISUALIZATION*************************
print("proportion of Churn in each class: 1: negative class, 2: positive class")
prop.table(table(data1$Churn))
#1. target variable: Churn
ggplot(data1,aes(factor(Churn))) +geom_bar(fill = "coral",alpha = 0.7)+labs(y="count",x="Churn") + theme_classic()+ggtitle("Customer Churn")
#2.#effect of area code on churn
ggplot(data1, aes(area.code, Churn)) + geom_bar(stat = "identity", aes(fill = factor(Churn)))
#3.#effect of state on churn
ggplot(data1, aes(state, Churn)) + geom_bar(stat = "identity", aes(fill = factor(Churn)))
#4.#effect of voice mail plan on churn
ggplot(data1, aes(voice.mail.plan, Churn)) + geom_bar(stat = "identity", aes(fill = factor(Churn)))
#5.#effect of international plan on churn
ggplot(data1, aes(international.plan, Churn)) + geom_bar(stat = "identity", aes(fill = factor(Churn)))
#6.#effect of number of service calls on churn
ggplot(data1, aes(number.customer.service.calls, Churn)) + geom_bar(stat = "identity", aes(fill = factor(Churn)))
##convert factor strings to numeric factor##
##Data Manupulation; convert string categories into factor numeric
for(i in 1:ncol(data1)){
if(class(data1[,i]) == 'factor'){
data1[,i] = factor(data1[,i], labels=(1:length(levels(factor(data1[,i])))))
}
}
#**************************************FEATURE SELECTION***********************************
# ## Find correlated independent variables
numeric_index = sapply(data1,is.numeric) #selecting only numeric
numeric_data = data1[,numeric_index]
cnames=colnames(numeric_data)
#visual plot of correlation matrix
ggcorr(data1[cnames],label=TRUE,label_alpha = TRUE)
cormatrix=cor(data1[cnames])
cormatrix[!lower.tri(cormatrix)]=0
#abc.new <- data[,!apply(cormatrix,2,function(x) any(abs(x) > 0.95))]
cor_var=c()
for(i in cnames){
for(j in cnames){
if(abs(cormatrix[j,i])>0.95){
cor_var=append(cor_var,j)
}}}
#remove correlated variables from data
data1=data1[, !colnames(data1) %in% cor_var]
##chi-square test
cat_var=list("state","area.code","internatiional.plan","voice.mail.plan")
factor_index = sapply(data1,is.factor)
factor_data = data1[,factor_index]
for (i in 1:dim(factor_data)[2])
{
print(names(factor_data)[i])
print(chisq.test(table(factor_data$Churn,factor_data[,i])))
}
#drop the categorical variable for which p-value> 0.05
#Null hypo, H0: predictor and target variable are independent
#Reject H0 when p-value <0.05 (alpha value), hence select (drop) those variables for which p-value<0.05
#Drop phone number as it is an irrelevant variable for churn prediction
drop_var=c("phone.number","area.code")
data1=data1[, !colnames(data1) %in% drop_var]
#drop 'state' as it has too many levels
data1=subset(data1,select=-c(state))
datacopy=data1
data1=datacopy
#******************SOLVING TARGET CLASS IMBALANCE PROBLEM*******************************
##divide data into train and test sets and perform Resampling
#load original data
data1=datacopy
#1. Random Over Sampling
#applied only on train data
library(ROSE)
data1=datacopy
train=subset(data1,isTrain==TRUE)
test=subset(data1,isTrain==FALSE)
table(train$Churn)
train_over=ovun.sample(Churn~. , data=train, method = "over" , N=2850*2)$data
table(train_over$Churn)
#combine to generate complete data
data1=rbind(train_over,test)
#2. Random under Sampling
#applied on whole data
data1=datacopy
table(data1$Churn)
data1=ovun.sample(Churn~. , data=data1, method = "under" , N=707*2)$data
table(data1$Churn)
# 3. Combining under and over sampling
#applied on train data
data1=datacopy
train=subset(data1,isTrain==TRUE)
test=subset(data1,isTrain==FALSE)
table(train$Churn)
train_both=ovun.sample(Churn~. , data=train, method = "over" , p=0.5)$data
data1=rbind(train_both,test)
# 4. Generate synthetic data using SMOTE oversampling
library(unbalanced)
data1=datacopy
train=subset(data1,isTrain==TRUE)
test=subset(data1,isTrain==FALSE)
table(train$Churn)
train_smote=ubBalance(X=train[,!colnames(train)=="Churn"],Y=train$Churn, positive=2, type = "ubSMOTE", verbose=TRUE)
train_smote_balanced=cbind(train_smote$X,train_smote$Y)
colnames(train_smote_balanced)[which(names(train_smote_balanced) == "train_smote$Y")] <- "Churn"
train_smote_balanced$isTrain=TRUE
table(train_smote_balanced$Churn)
data1=rbind(train_smote_balanced,test)
#or use SmoteClassif
#5. Under sampling using TOMEK links
#applied on whole data
data1=datacopy
table(data1$Churn)
#data_tomek=ubBalance(X=data1[,!colnames(data1)=="Churn"], Y=data1$Churn, positive = 2, type="ubTomek", verbose = TRUE)
library(UBL)
tomek=TomekClassif(Churn~., data1, dist = "HEOM", rem = "maj")
class(tomek)
tomek1=as.data.frame(tomek[[1]])
data1=tomek1
table(data1$Churn)
#************************check numeric variable normality******************
#a.account.length
hist(data1$account.length)
#b.number.vmail.messages
hist(data1$number.vmail.messages)
#c.total.day.minutes
hist(data1$total.day.minutes)
#d.total.day.calls
hist(data1$total.day.calls)
#e.total.eve.minutes
hist(data1$total.eve.minutes)
#f.total.eve.calls
hist(data1$total.eve.calls)
#g.total.night.minutes
hist(data1$total.night.minutes)
#h.total.night.calls
hist(data1$total.night.calls)
#i.total.intl.minutes
hist(data1$total.intl.minutes)
#j.total.intl.calls
hist(data1$total.intl.calls)
#k.number.customer.service.calls
hist(data1$number.customer.service.calls)
##################### OR VIEW HISTOGRAMS IN SINGLE PANE#############
data1 %>%
keep(is.numeric) %>%
gather() %>%
ggplot(aes(value)) +
facet_wrap(~ key, scales = "free") +
geom_histogram()
#***********************FEATURE SCALING***********************
#apply normalization on data
numeric_index = sapply(data1,is.numeric) #selecting only numeric
numeric_data = data1[,numeric_index]
cnames=colnames(numeric_data)
for(i in cnames){
print(i)
data1[,i] = (data1[,i] - min(data1[,i]))/
(max(data1[,i] - min(data1[,i])))
}
##Apply Classification algorithms
errorfunction <- function(cm){
TN=cm$table[1,1]
FN=cm$table[1,2]
FP=cm$table[2,1]
TP=cm$table[2,2]
FNR=((FN*100)/(FN+TP))
acc=(((TP+TN)*100)/(TP+TN+FP+FN))
sens=(TP*100/(TP+FN))
spec=(TN*100/(TN+FP))
prec=(TP*100/(TP+FP))
cat(sprintf("FALSE NEGATIVE RATE :%.2f %%\nACCURACY :%.2f %%\nSENSTIVITY :%.2f %%\nSPECIFICITY :%.2f %%\nPRECISION :%.2f %%",FNR,acc,sens,spec,prec))
}
train=subset(data1,isTrain==TRUE)
train=subset(train,select=-(isTrain))
test=subset(data1,isTrain==FALSE)
test=subset(test,select=-(isTrain))
#1.DECISION TREE CLASSIFIER
#Develop Model on training data
DT_model = C5.0(Churn ~., train, trials = 100, rules = TRUE)
#Summary of DT model
summary(DT_model)
#write rules into disk
write(capture.output(summary(DT_model)), "DT_Rules.txt")
#Lets predict for test cases
DT_Predictions = predict(DT_model, test[,!colnames(test)=="Churn"], type = "class")
##Evaluate the performance of classification model
ConfMatrix_DT = table(predictions=DT_Predictions,actual=test$Churn)
cm1=confusionMatrix(ConfMatrix_DT, positive='2')
print("DECISION TREE ERROR METRICS")
errorfunction(cm1)
roc.curve(test$Churn,DT_Predictions)
#2.RANDOM FOREST CLASSIFIER
RF_model = randomForest(Churn ~ ., train, importance = TRUE, ntree = 500)
#Extract rules fromn random forest
#transform rf object to an inTrees' format
treeList = RF2List(RF_model)
# #Extract rules
exec = extractRules(treeList, train[,!colnames(test)=="Churn"]) # R-executable conditions
# #Visualize some rules
exec[1:2,]
# #Make rules more readable:
readableRules = presentRules(exec, colnames(train))
readableRules[1:2,]
#Predict test data using random forest model
RF_Predictions = predict(RF_model, test[,!colnames(test)=="Churn"])
##Evaluate the performance of classification model
ConfMatrix_RF = table(predictions=RF_Predictions,actual=test$Churn)
cm2=confusionMatrix(ConfMatrix_RF, positive='2')
print("RANDOM FOREST ERROR METRICS")
errorfunction(cm2)
#ROC-AUC
roc.curve(test$Churn,RF_Predictions)
#3.Logistic Regression
logit_model = glm(Churn ~ ., data = train, family = "binomial")
#summary of the model
summary(logit_model)
#predict using logistic regression
logit_Predictions = predict(logit_model, newdata = test, type = "response")
#convert prob
logit_Predictions = ifelse(logit_Predictions > 0.3, 2, 1)
##Evaluate the performance of classification model
ConfMatrix_RF = table(predictions=logit_Predictions,actual=test$Churn)
cm3=confusionMatrix(ConfMatrix_RF, positive='2')
print("LOGISTIC REGRESSION ERROR METRICS")
errorfunction(cm3)
#ROC-AUC
roc.curve(test$Churn,logit_Predictions)
#4. k-nearest neighbors Classifier
library(class)
#Predict test data
#enter the number of neighbors
k=13
KNN_Predictions = knn(train[,!colnames(test)=="Churn"], test[,!colnames(test)=="Churn"], train$Churn, k = k)
#Confusion matrix
Conf_matrix = table(KNN_Predictions, test$Churn)
cm4=confusionMatrix(Conf_matrix, positive='2')
sprintf("KNN classifier ERROR METRICS for k= %d",k)
errorfunction(cm4)
roc.curve(test$Churn,KNN_Predictions)
#5. Naive Bayes
#Develop model
NB_model = naiveBayes(Churn ~ ., data = train)
#predict on test cases #raw
NB_Predictions = predict(NB_model, test[,!colnames(test)=="Churn"], type = 'class')
#Look at confusion matrix
Conf_matrix = table(predicted = NB_Predictions, actual = test$Churn)
cm5=confusionMatrix(Conf_matrix, positive='2')
print("NAIVE BAYES ERROR METRICS")
errorfunction(cm5)
roc.curve(test$Churn,NB_Predictions)
| /R_code.R | no_license | parulsahi/Churn_reduction | R | false | false | 11,608 | r | rm(list=ls(all=T))
##set working directory
setwd("C:/Users/parul/Desktop/Data Science/PROJECT/project1")
##load libraries
#loading multiple packages at once
x = c("ggplot2", "corrgram", "DMwR", "caret", "randomForest",'imbalance', "unbalanced", "C50", "dummies",
"e1071", "Information","MASS", "rpart", "gbm", "ROSE", 'sampling', 'class','e1071','Metrics',
'DataCombine', 'gplots','inTrees','GGally','purrr','ROCR','tidyr','ggplot2','pROC')
#install.packages(x)
lapply(x, require, character.only = TRUE)
rm(x)
## Read the data
train = read.csv("Train_data.csv", header = T, na.strings = c(" ", "", "NA"),stringsAsFactors = FALSE)
test = read.csv("Test_data.csv", header = T, na.strings = c(" ", "", "NA"),stringsAsFactors = FALSE)
train$isTrain=TRUE
test$isTrain=FALSE
##combine train and test data to preprocess data before feeding it to ML algorithms
data1=rbind(train,test)
##**************************DATA EXPLORATION******************************
dim(data1)
str(data1)
data1$international.plan=as.factor(data1$international.plan)
data1$voice.mail.plan=as.factor(data1$voice.mail.plan)
data1$area.code=as.factor(data1$area.code)
data1$Churn=as.factor(data1$Churn)
data1$state=as.factor(data1$state)
#***************************MISSING VALUE ANALYSIS********************************************
#create dataframe with missing percentage
missing_val = data.frame(apply(data1,2,function(x){sum(is.na(x))}))
#convert row names into columns
missing_val$Columns = row.names(missing_val)
row.names(missing_val) = NULL
#Rename the variable conating missing values
names(missing_val)[1] = "Missing_percentage"
#calculate missing percentage
missing_val$Missing_percentage = (missing_val$Missing_percentage/nrow(data1)) * 100
missing_val = missing_val[,c(2,1)]
##NO MISSING DATA##
#********************************DATA VISUALIZATION*************************
print("proportion of Churn in each class: 1: negative class, 2: positive class")
prop.table(table(data1$Churn))
#1. target variable: Churn
ggplot(data1,aes(factor(Churn))) +geom_bar(fill = "coral",alpha = 0.7)+labs(y="count",x="Churn") + theme_classic()+ggtitle("Customer Churn")
#2.#effect of area code on churn
ggplot(data1, aes(area.code, Churn)) + geom_bar(stat = "identity", aes(fill = factor(Churn)))
#3.#effect of state on churn
ggplot(data1, aes(state, Churn)) + geom_bar(stat = "identity", aes(fill = factor(Churn)))
#4.#effect of voice mail plan on churn
ggplot(data1, aes(voice.mail.plan, Churn)) + geom_bar(stat = "identity", aes(fill = factor(Churn)))
#5.#effect of international plan on churn
ggplot(data1, aes(international.plan, Churn)) + geom_bar(stat = "identity", aes(fill = factor(Churn)))
#6.#effect of number of service calls on churn
ggplot(data1, aes(number.customer.service.calls, Churn)) + geom_bar(stat = "identity", aes(fill = factor(Churn)))
##convert factor strings to numeric factor##
##Data Manupulation; convert string categories into factor numeric
for(i in 1:ncol(data1)){
if(class(data1[,i]) == 'factor'){
data1[,i] = factor(data1[,i], labels=(1:length(levels(factor(data1[,i])))))
}
}
#**************************************FEATURE SELECTION***********************************
# ## Find correlated independent variables
numeric_index = sapply(data1,is.numeric) #selecting only numeric
numeric_data = data1[,numeric_index]
cnames=colnames(numeric_data)
#visual plot of correlation matrix
ggcorr(data1[cnames],label=TRUE,label_alpha = TRUE)
cormatrix=cor(data1[cnames])
cormatrix[!lower.tri(cormatrix)]=0
#abc.new <- data[,!apply(cormatrix,2,function(x) any(abs(x) > 0.95))]
cor_var=c()
for(i in cnames){
for(j in cnames){
if(abs(cormatrix[j,i])>0.95){
cor_var=append(cor_var,j)
}}}
#remove correlated variables from data
data1=data1[, !colnames(data1) %in% cor_var]
##chi-square test
cat_var=list("state","area.code","internatiional.plan","voice.mail.plan")
factor_index = sapply(data1,is.factor)
factor_data = data1[,factor_index]
for (i in 1:dim(factor_data)[2])
{
print(names(factor_data)[i])
print(chisq.test(table(factor_data$Churn,factor_data[,i])))
}
#drop the categorical variable for which p-value> 0.05
#Null hypo, H0: predictor and target variable are independent
#Reject H0 when p-value <0.05 (alpha value), hence select (drop) those variables for which p-value<0.05
#Drop phone number as it is an irrelevant variable for churn prediction
drop_var=c("phone.number","area.code")
data1=data1[, !colnames(data1) %in% drop_var]
#drop 'state' as it has too many levels
data1=subset(data1,select=-c(state))
datacopy=data1
data1=datacopy
#******************SOLVING TARGET CLASS IMBALANCE PROBLEM*******************************
##divide data into train and test sets and perform Resampling
#load original data
data1=datacopy
#1. Random Over Sampling
#applied only on train data
library(ROSE)
data1=datacopy
train=subset(data1,isTrain==TRUE)
test=subset(data1,isTrain==FALSE)
table(train$Churn)
train_over=ovun.sample(Churn~. , data=train, method = "over" , N=2850*2)$data
table(train_over$Churn)
#combine to generate complete data
data1=rbind(train_over,test)
#2. Random under Sampling
#applied on whole data
data1=datacopy
table(data1$Churn)
data1=ovun.sample(Churn~. , data=data1, method = "under" , N=707*2)$data
table(data1$Churn)
# 3. Combining under and over sampling
#applied on train data
data1=datacopy
train=subset(data1,isTrain==TRUE)
test=subset(data1,isTrain==FALSE)
table(train$Churn)
train_both=ovun.sample(Churn~. , data=train, method = "over" , p=0.5)$data
data1=rbind(train_both,test)
# 4. Generate synthetic data using SMOTE oversampling
library(unbalanced)
data1=datacopy
train=subset(data1,isTrain==TRUE)
test=subset(data1,isTrain==FALSE)
table(train$Churn)
train_smote=ubBalance(X=train[,!colnames(train)=="Churn"],Y=train$Churn, positive=2, type = "ubSMOTE", verbose=TRUE)
train_smote_balanced=cbind(train_smote$X,train_smote$Y)
colnames(train_smote_balanced)[which(names(train_smote_balanced) == "train_smote$Y")] <- "Churn"
train_smote_balanced$isTrain=TRUE
table(train_smote_balanced$Churn)
data1=rbind(train_smote_balanced,test)
#or use SmoteClassif
#5. Under sampling using TOMEK links
#applied on whole data
data1=datacopy
table(data1$Churn)
#data_tomek=ubBalance(X=data1[,!colnames(data1)=="Churn"], Y=data1$Churn, positive = 2, type="ubTomek", verbose = TRUE)
library(UBL)
tomek=TomekClassif(Churn~., data1, dist = "HEOM", rem = "maj")
class(tomek)
tomek1=as.data.frame(tomek[[1]])
data1=tomek1
table(data1$Churn)
#************************check numeric variable normality******************
#a.account.length
hist(data1$account.length)
#b.number.vmail.messages
hist(data1$number.vmail.messages)
#c.total.day.minutes
hist(data1$total.day.minutes)
#d.total.day.calls
hist(data1$total.day.calls)
#e.total.eve.minutes
hist(data1$total.eve.minutes)
#f.total.eve.calls
hist(data1$total.eve.calls)
#g.total.night.minutes
hist(data1$total.night.minutes)
#h.total.night.calls
hist(data1$total.night.calls)
#i.total.intl.minutes
hist(data1$total.intl.minutes)
#j.total.intl.calls
hist(data1$total.intl.calls)
#k.number.customer.service.calls
hist(data1$number.customer.service.calls)
##################### OR VIEW HISTOGRAMS IN SINGLE PANE#############
data1 %>%
keep(is.numeric) %>%
gather() %>%
ggplot(aes(value)) +
facet_wrap(~ key, scales = "free") +
geom_histogram()
#***********************FEATURE SCALING***********************
#apply normalization on data
numeric_index = sapply(data1,is.numeric) #selecting only numeric
numeric_data = data1[,numeric_index]
cnames=colnames(numeric_data)
for(i in cnames){
print(i)
data1[,i] = (data1[,i] - min(data1[,i]))/
(max(data1[,i] - min(data1[,i])))
}
##Apply Classification algorithms
errorfunction <- function(cm){
TN=cm$table[1,1]
FN=cm$table[1,2]
FP=cm$table[2,1]
TP=cm$table[2,2]
FNR=((FN*100)/(FN+TP))
acc=(((TP+TN)*100)/(TP+TN+FP+FN))
sens=(TP*100/(TP+FN))
spec=(TN*100/(TN+FP))
prec=(TP*100/(TP+FP))
cat(sprintf("FALSE NEGATIVE RATE :%.2f %%\nACCURACY :%.2f %%\nSENSTIVITY :%.2f %%\nSPECIFICITY :%.2f %%\nPRECISION :%.2f %%",FNR,acc,sens,spec,prec))
}
train=subset(data1,isTrain==TRUE)
train=subset(train,select=-(isTrain))
test=subset(data1,isTrain==FALSE)
test=subset(test,select=-(isTrain))
#1.DECISION TREE CLASSIFIER
#Develop Model on training data
DT_model = C5.0(Churn ~., train, trials = 100, rules = TRUE)
#Summary of DT model
summary(DT_model)
#write rules into disk
write(capture.output(summary(DT_model)), "DT_Rules.txt")
#Lets predict for test cases
DT_Predictions = predict(DT_model, test[,!colnames(test)=="Churn"], type = "class")
##Evaluate the performance of classification model
ConfMatrix_DT = table(predictions=DT_Predictions,actual=test$Churn)
cm1=confusionMatrix(ConfMatrix_DT, positive='2')
print("DECISION TREE ERROR METRICS")
errorfunction(cm1)
roc.curve(test$Churn,DT_Predictions)
#2.RANDOM FOREST CLASSIFIER
RF_model = randomForest(Churn ~ ., train, importance = TRUE, ntree = 500)
#Extract rules fromn random forest
#transform rf object to an inTrees' format
treeList = RF2List(RF_model)
# #Extract rules
exec = extractRules(treeList, train[,!colnames(test)=="Churn"]) # R-executable conditions
# #Visualize some rules
exec[1:2,]
# #Make rules more readable:
readableRules = presentRules(exec, colnames(train))
readableRules[1:2,]
#Predict test data using random forest model
RF_Predictions = predict(RF_model, test[,!colnames(test)=="Churn"])
##Evaluate the performance of classification model
ConfMatrix_RF = table(predictions=RF_Predictions,actual=test$Churn)
cm2=confusionMatrix(ConfMatrix_RF, positive='2')
print("RANDOM FOREST ERROR METRICS")
errorfunction(cm2)
#ROC-AUC
roc.curve(test$Churn,RF_Predictions)
#3.Logistic Regression
logit_model = glm(Churn ~ ., data = train, family = "binomial")
#summary of the model
summary(logit_model)
#predict using logistic regression
logit_Predictions = predict(logit_model, newdata = test, type = "response")
#convert prob
logit_Predictions = ifelse(logit_Predictions > 0.3, 2, 1)
##Evaluate the performance of classification model
ConfMatrix_RF = table(predictions=logit_Predictions,actual=test$Churn)
cm3=confusionMatrix(ConfMatrix_RF, positive='2')
print("LOGISTIC REGRESSION ERROR METRICS")
errorfunction(cm3)
#ROC-AUC
roc.curve(test$Churn,logit_Predictions)
#4. k-nearest neighbors Classifier
library(class)
#Predict test data
#enter the number of neighbors
k=13
KNN_Predictions = knn(train[,!colnames(test)=="Churn"], test[,!colnames(test)=="Churn"], train$Churn, k = k)
#Confusion matrix
Conf_matrix = table(KNN_Predictions, test$Churn)
cm4=confusionMatrix(Conf_matrix, positive='2')
sprintf("KNN classifier ERROR METRICS for k= %d",k)
errorfunction(cm4)
roc.curve(test$Churn,KNN_Predictions)
#5. Naive Bayes
#Develop model
NB_model = naiveBayes(Churn ~ ., data = train)
#predict on test cases #raw
NB_Predictions = predict(NB_model, test[,!colnames(test)=="Churn"], type = 'class')
#Look at confusion matrix
Conf_matrix = table(predicted = NB_Predictions, actual = test$Churn)
cm5=confusionMatrix(Conf_matrix, positive='2')
print("NAIVE BAYES ERROR METRICS")
errorfunction(cm5)
roc.curve(test$Churn,NB_Predictions)
|
PlotPatch <- function(distances, area, node.area, width, file.name) {
## plots the 2d distribution of individuals, the nodes, and the corridors
# distances is the output from PropaguleDistances2D
# area is the area for the entire metapopulation
# node.area is the area for each of the nodes (i.e., reserves)
# width is the width of the corridors
# file.name is the prefix to give the pdf a distinct filename
# seperate pops
pop1 <- distances[distances[, 2] == 1, 1:4]
pop2 <- distances[distances[, 2] == 2, 1:4]
pop3 <- distances[distances[, 2] == 3, 1:4]
pop4 <- distances[distances[, 2] == 4, 1:4]
# add nonsense points if population has gone extinct
if(length(pop1) < 4) { pop1 <- matrix(-4, 4, 4)}
if(length(pop2) < 4) { pop2 <- matrix(-4, 4, 4)}
if(length(pop3) < 4) { pop3 <- matrix(-4, 4, 4)}
if(length(pop4) < 4) { pop4 <- matrix(-4, 4, 4)}
# reformat if only a single individual
if(length(pop1) == 4) { pop1 <- as.matrix(t(pop1))}
if(length(pop2) == 4) { pop2 <- as.matrix(t(pop2))}
if(length(pop3) == 4) { pop3 <- as.matrix(t(pop3))}
if(length(pop4) == 4) { pop4 <- as.matrix(t(pop4))}
# plot organisms
pdf(paste(file.name,"corridors.pdf"))
plot(pop1[, 3], pop1[, 4], xlab = "Distance (km)", ylab = "Distance (km)", xlim = c(min(distances[, 3])-1, max(distances[, 3]+1)), ylim = c(min(distances[, 4]-1), max(distances[, 4]+1)))
points(pop2[, 3],pop2[, 4], col = "blue")
points(pop3[, 3],pop3[, 4],, col = "green")
points(pop4[, 3],pop4[, 4], col = "red")
# plot node boundaries (reserves)
x <- sqrt(area)
nodes <- c(c(x, x),c(x*2, x),c(x*2, x*2),c(x, x*2)) # find center of each deme
nodes <- c(c(0, 0),c(x, 0),c(x, x),c(0, x)) # node locations: bottom left, bottom right, top right, top left
node.length <- sqrt(node.area)/2
xs <- nodes[seq(1, length(nodes), 2)]
ys <- nodes[seq(2, length(nodes), 2)]
x1 <- xs-node.length
x2 <- xs+node.length
y1 <- ys-node.length
y2 <- ys+node.length
point1 <- cbind(x1, y1) # bottom left, right, top right, left
point2 <- cbind(x2, y1)
point3 <- cbind(x2, y2)
point4 <- cbind(x1, y2)
segments(point1[, 1], point1[, 2], point2[, 1], point2[, 2], col="purple", lwd = 3)
segments(point3[, 1], point3[, 2], point2[, 1], point2[, 2], col="purple", lwd = 3)
segments(point3[, 1], point3[, 2], point4[, 1], point4[, 2], col="purple", lwd = 3)
segments(point1[, 1], point1[, 2], point4[, 1], point4[, 2], col="purple", lwd = 3)
square1 <- c(point1[1, 1], point1[1, 2], point2[1, 1], point2[1, 2], point2[1, 1], point3[1, 2], point4[1, 1], point4[1, 2])
square2 <- c(point1[2, 1], point1[2, 2], point2[2, 1], point2[2, 2], point2[2, 1], point3[2, 2], point4[2, 1], point4[2, 2])
square3 <- c(point1[3, 1], point1[3, 2], point2[3, 1], point2[3, 2], point2[3, 1], point3[3, 2], point4[3, 1], point4[3, 2])
square4 <- c(point1[4, 1], point1[4, 2], point2[4, 1], point2[4, 2], point2[4, 1], point3[4, 2], point4[4, 1], point4[4, 2])
# plot corridors
# bottom corridor
#width <- 0.5
xleft1 <- point2[1, 1]
xright1 <- point2[4, 2]
ytop1 <- ys[1]+width
ybottom1 <- ys[1]-width
rect(xleft1, ybottom1, xright1, ytop1, border="purple", lwd=2)
square5 <- c(xleft1, ybottom1, xright1, ybottom1, xright1, ytop1, xleft1, ytop1)
# top corridor
xleft1 <- point2[1, 1]
xright1 <- point2[4, 2]
ytop1 <- ys[3]+width
ybottom1 <- ys[3]-width
rect(xleft1, ybottom1, xright1, ytop1, border="purple", lwd=2)
square6 <- c(xleft1, ybottom1, xright1, ybottom1, xright1, ytop1, xleft1, ytop1)
# left corridor
xleft1 <- xs[2]-width
xright1 <- xs[2]+width
ytop1 <- point2[3, 2]
ybottom1 <- point3[1, 2]
rect(xleft1, ybottom1, xright1, ytop1, border="purple", lwd=2)
square7 <- c(xleft1, ybottom1, xright1, ybottom1, xright1, ytop1, xleft1, ytop1)
# right corridor
xleft1 <- xs[1]-width
xright1 <- xs[1]+width
ytop1 <- point2[3, 2]
ybottom1 <- point3[1, 2]
rect(xleft1, ybottom1, xright1, ytop1, border="purple", lwd=2)
square8 <- c(xleft1, ybottom1, xright1, ybottom1, xright1, ytop1, xleft1, ytop1)
dev.off()
coords <- rbind(square1, square2, square3, square4, square5, square6, square7, square8)
return(coords)
}
| /source/PlotPatch.R | no_license | RedpathsRepos/Corridors | R | false | false | 4,229 | r | PlotPatch <- function(distances, area, node.area, width, file.name) {
## plots the 2d distribution of individuals, the nodes, and the corridors
# distances is the output from PropaguleDistances2D
# area is the area for the entire metapopulation
# node.area is the area for each of the nodes (i.e., reserves)
# width is the width of the corridors
# file.name is the prefix to give the pdf a distinct filename
# seperate pops
pop1 <- distances[distances[, 2] == 1, 1:4]
pop2 <- distances[distances[, 2] == 2, 1:4]
pop3 <- distances[distances[, 2] == 3, 1:4]
pop4 <- distances[distances[, 2] == 4, 1:4]
# add nonsense points if population has gone extinct
if(length(pop1) < 4) { pop1 <- matrix(-4, 4, 4)}
if(length(pop2) < 4) { pop2 <- matrix(-4, 4, 4)}
if(length(pop3) < 4) { pop3 <- matrix(-4, 4, 4)}
if(length(pop4) < 4) { pop4 <- matrix(-4, 4, 4)}
# reformat if only a single individual
if(length(pop1) == 4) { pop1 <- as.matrix(t(pop1))}
if(length(pop2) == 4) { pop2 <- as.matrix(t(pop2))}
if(length(pop3) == 4) { pop3 <- as.matrix(t(pop3))}
if(length(pop4) == 4) { pop4 <- as.matrix(t(pop4))}
# plot organisms
pdf(paste(file.name,"corridors.pdf"))
plot(pop1[, 3], pop1[, 4], xlab = "Distance (km)", ylab = "Distance (km)", xlim = c(min(distances[, 3])-1, max(distances[, 3]+1)), ylim = c(min(distances[, 4]-1), max(distances[, 4]+1)))
points(pop2[, 3],pop2[, 4], col = "blue")
points(pop3[, 3],pop3[, 4],, col = "green")
points(pop4[, 3],pop4[, 4], col = "red")
# plot node boundaries (reserves)
x <- sqrt(area)
nodes <- c(c(x, x),c(x*2, x),c(x*2, x*2),c(x, x*2)) # find center of each deme
nodes <- c(c(0, 0),c(x, 0),c(x, x),c(0, x)) # node locations: bottom left, bottom right, top right, top left
node.length <- sqrt(node.area)/2
xs <- nodes[seq(1, length(nodes), 2)]
ys <- nodes[seq(2, length(nodes), 2)]
x1 <- xs-node.length
x2 <- xs+node.length
y1 <- ys-node.length
y2 <- ys+node.length
point1 <- cbind(x1, y1) # bottom left, right, top right, left
point2 <- cbind(x2, y1)
point3 <- cbind(x2, y2)
point4 <- cbind(x1, y2)
segments(point1[, 1], point1[, 2], point2[, 1], point2[, 2], col="purple", lwd = 3)
segments(point3[, 1], point3[, 2], point2[, 1], point2[, 2], col="purple", lwd = 3)
segments(point3[, 1], point3[, 2], point4[, 1], point4[, 2], col="purple", lwd = 3)
segments(point1[, 1], point1[, 2], point4[, 1], point4[, 2], col="purple", lwd = 3)
square1 <- c(point1[1, 1], point1[1, 2], point2[1, 1], point2[1, 2], point2[1, 1], point3[1, 2], point4[1, 1], point4[1, 2])
square2 <- c(point1[2, 1], point1[2, 2], point2[2, 1], point2[2, 2], point2[2, 1], point3[2, 2], point4[2, 1], point4[2, 2])
square3 <- c(point1[3, 1], point1[3, 2], point2[3, 1], point2[3, 2], point2[3, 1], point3[3, 2], point4[3, 1], point4[3, 2])
square4 <- c(point1[4, 1], point1[4, 2], point2[4, 1], point2[4, 2], point2[4, 1], point3[4, 2], point4[4, 1], point4[4, 2])
# plot corridors
# bottom corridor
#width <- 0.5
xleft1 <- point2[1, 1]
xright1 <- point2[4, 2]
ytop1 <- ys[1]+width
ybottom1 <- ys[1]-width
rect(xleft1, ybottom1, xright1, ytop1, border="purple", lwd=2)
square5 <- c(xleft1, ybottom1, xright1, ybottom1, xright1, ytop1, xleft1, ytop1)
# top corridor
xleft1 <- point2[1, 1]
xright1 <- point2[4, 2]
ytop1 <- ys[3]+width
ybottom1 <- ys[3]-width
rect(xleft1, ybottom1, xright1, ytop1, border="purple", lwd=2)
square6 <- c(xleft1, ybottom1, xright1, ybottom1, xright1, ytop1, xleft1, ytop1)
# left corridor
xleft1 <- xs[2]-width
xright1 <- xs[2]+width
ytop1 <- point2[3, 2]
ybottom1 <- point3[1, 2]
rect(xleft1, ybottom1, xright1, ytop1, border="purple", lwd=2)
square7 <- c(xleft1, ybottom1, xright1, ybottom1, xright1, ytop1, xleft1, ytop1)
# right corridor
xleft1 <- xs[1]-width
xright1 <- xs[1]+width
ytop1 <- point2[3, 2]
ybottom1 <- point3[1, 2]
rect(xleft1, ybottom1, xright1, ytop1, border="purple", lwd=2)
square8 <- c(xleft1, ybottom1, xright1, ybottom1, xright1, ytop1, xleft1, ytop1)
dev.off()
coords <- rbind(square1, square2, square3, square4, square5, square6, square7, square8)
return(coords)
}
|
library(dplyr)
# 문제1
str(ggplot2::mpg)
mpg <- as.data.frame(ggplot2::mpg)
# 1-1
dim(mpg)
# 1-2
head(mpg, 10)
# 1-3
tail(mpg, 10)
# 1-4
View(mpg)
# 1-5
summary(mpg)
# 1-6
str(mpg)
# 문제2
# 2-1
mpg <- mpg %>% rename(city=cty, highway=hwy)
# 2-2
head(mpg,6)
# 문제3
# 3-1
midwest <- as.data.frame(ggplot2::midwest)
str(midwest)
summary(midwest)
# 3-2
midwest <- midwest %>% rename(total=poptotal,
asian=popasian)
# 3-3
midwest <- midwest %>% mutate(asian_percnt = asian/total*100)
# 3-4
mean_asian_percnt <- mean(midwest$asian_percnt)
midwest <- midwest %>% mutate( asian_size = ifelse(asian_percnt > mean_asian_percnt, 'large', 'small') )
# 문제4
mpg <- as.data.frame(ggplot2::mpg)
# 4-1
undr5_mean_hwy <- mpg %>%
filter(displ<=4) %>%
summarise(mean(hwy)) %>%
as.numeric()
over4_mean_hwy <- mpg %>%
filter(displ>=5) %>%
summarise(mean(hwy)) %>%
as.numeric()
undr5_mean_hwy; over4_mean_hwy # 따라서 배기량 4 이하인 자동차의 평균 hwy 더 높음
# 4-2
audi_mean_cty <- mpg %>%
filter(manufacturer=='audi') %>%
summarise(mean(cty)) %>%
as.numeric()
toyota_mean_cty <- mpg %>%
filter(manufacturer=='toyota') %>%
summarise(mean(cty)) %>%
as.numeric()
audi_mean_cty; toyota_mean_cty # 따라서 toyota의 평균 cty 더 높음
# 4-3
tmp_vec <- unique(mpg$manufacturer)[c(2, 4, 5)]
mpg %>%
filter(manufacturer %in% tmp_vec) %>%
summarise(mean_hwy = mean(hwy))
# 문제5
mpg <- as.data.frame(ggplot2::mpg)
# 5-1
mpg_new <- mpg %>% select(class, cty)
mpg_new %>% head
# 5-2
mpg_new %>%
filter(class=='suv') %>%
summarise(mean_cty_suv = mean(cty))
mpg_new %>%
filter(class=='compact') %>%
summarise(mean_cty_compact = mean(cty))
# 따라서, compact 차종의 평균 cty 더 높음
# 문제6-1
mpg %>%
filter(manufacturer=='audi') %>%
arrange(desc(hwy)) %>%
head(5)
| /R_training/실습제출/김사무엘/19년11월/dplyr_lab2.R | no_license | BaeYS-marketing/R | R | false | false | 1,888 | r | library(dplyr)
# 문제1
str(ggplot2::mpg)
mpg <- as.data.frame(ggplot2::mpg)
# 1-1
dim(mpg)
# 1-2
head(mpg, 10)
# 1-3
tail(mpg, 10)
# 1-4
View(mpg)
# 1-5
summary(mpg)
# 1-6
str(mpg)
# 문제2
# 2-1
mpg <- mpg %>% rename(city=cty, highway=hwy)
# 2-2
head(mpg,6)
# 문제3
# 3-1
midwest <- as.data.frame(ggplot2::midwest)
str(midwest)
summary(midwest)
# 3-2
midwest <- midwest %>% rename(total=poptotal,
asian=popasian)
# 3-3
midwest <- midwest %>% mutate(asian_percnt = asian/total*100)
# 3-4
mean_asian_percnt <- mean(midwest$asian_percnt)
midwest <- midwest %>% mutate( asian_size = ifelse(asian_percnt > mean_asian_percnt, 'large', 'small') )
# 문제4
mpg <- as.data.frame(ggplot2::mpg)
# 4-1
undr5_mean_hwy <- mpg %>%
filter(displ<=4) %>%
summarise(mean(hwy)) %>%
as.numeric()
over4_mean_hwy <- mpg %>%
filter(displ>=5) %>%
summarise(mean(hwy)) %>%
as.numeric()
undr5_mean_hwy; over4_mean_hwy # 따라서 배기량 4 이하인 자동차의 평균 hwy 더 높음
# 4-2
audi_mean_cty <- mpg %>%
filter(manufacturer=='audi') %>%
summarise(mean(cty)) %>%
as.numeric()
toyota_mean_cty <- mpg %>%
filter(manufacturer=='toyota') %>%
summarise(mean(cty)) %>%
as.numeric()
audi_mean_cty; toyota_mean_cty # 따라서 toyota의 평균 cty 더 높음
# 4-3
tmp_vec <- unique(mpg$manufacturer)[c(2, 4, 5)]
mpg %>%
filter(manufacturer %in% tmp_vec) %>%
summarise(mean_hwy = mean(hwy))
# 문제5
mpg <- as.data.frame(ggplot2::mpg)
# 5-1
mpg_new <- mpg %>% select(class, cty)
mpg_new %>% head
# 5-2
mpg_new %>%
filter(class=='suv') %>%
summarise(mean_cty_suv = mean(cty))
mpg_new %>%
filter(class=='compact') %>%
summarise(mean_cty_compact = mean(cty))
# 따라서, compact 차종의 평균 cty 더 높음
# 문제6-1
mpg %>%
filter(manufacturer=='audi') %>%
arrange(desc(hwy)) %>%
head(5)
|
plot.nnet<-function(mod.in,nid=T,all.out=T,all.in=T,wts.only=F,rel.rsc=5,circle.cex=5,node.labs=T,
line.stag=NULL,cex.val=1,alpha.val=1,circle.col='lightgrey',pos.col='black',neg.col='grey',...){
require(scales)
#gets weights for neural network, output is list
#if rescaled argument is true, weights are returned but rescaled based on abs value
nnet.vals<-function(mod.in,nid,rel.rsc){
library(scales)
layers<-mod.in$n
wts<-mod.in$wts
if(nid) wts<-rescale(abs(wts),c(1,rel.rsc))
indices<-matrix(seq(1,layers[1]*layers[2]+layers[2]),ncol=layers[2])
out.ls<-list()
for(i in 1:ncol(indices)){
out.ls[[paste('hidden',i)]]<-wts[indices[,i]]
}
if(layers[3]==1) out.ls[['out 1']]<-wts[(max(indices)+1):length(wts)]
else{
out.indices<-matrix(seq(max(indices)+1,length(wts)),ncol=layers[3])
for(i in 1:ncol(out.indices)){
out.ls[[paste('out',i)]]<-wts[out.indices[,i]]
}
}
out.ls
}
wts<-nnet.vals(mod.in,nid=F)
if(wts.only) return(wts)
#par(mar=numeric(4),oma=numeric(4),family='serif')
library(scales)
struct<-mod.in$n
x.range<-c(0,100)
y.range<-c(0,100)
#these are all proportions from 0-1
if(is.null(line.stag)) line.stag<-0.011*circle.cex/2
layer.x<-seq(0.17,0.9,length=3)
bias.x<-c(mean(layer.x[1:2]),mean(layer.x[2:3]))
bias.y<-0.95
in.col<-bord.col<-circle.col
circle.cex<-circle.cex
#get variable names from nnet object
if(is.null(mod.in$call$formula)){
x.names<-colnames(eval(mod.in$call$x))
y.names<-colnames(eval(mod.in$call$y))
}
else{
forms<-eval(mod.in$call$formula)
dat.names<-model.frame(forms,data=eval(mod.in$call$data))
y.names<-as.character(forms)[2]
x.names<-names(dat.names)[!names(dat.names) %in% y.names]
}
#initiate plot
plot(x.range,y.range,type='n',axes=F,ylab='',xlab='',...)
#function for getting y locations for input, hidden, output layers
#input is integer value from 'struct'
get.ys<-function(lyr){
spacing<-diff(c(0*diff(y.range),0.9*diff(y.range)))/max(struct)
seq(0.5*(diff(y.range)+spacing*(lyr-1)),0.5*(diff(y.range)-spacing*(lyr-1)),
length=lyr)
}
#function for plotting nodes
#'layer' specifies which layer, integer from 'struct'
#'x.loc' indicates x location for layer, integer from 'layer.x'
#'layer.name' is string indicating text to put in node
layer.points<-function(layer,x.loc,layer.name,cex=cex.val){
x<-rep(x.loc*diff(x.range),layer)
y<-get.ys(layer)
points(x,y,pch=21,cex=circle.cex,col=in.col,bg=bord.col)
if(node.labs) text(x,y,paste(layer.name,1:layer,sep=''),cex=cex.val)
if(layer.name=='I' & node.labs){
text(x-line.stag*diff(x.range),y,x.names,pos=2,cex=cex.val)
}
if(layer.name=='O' & node.labs)
text(x+line.stag*diff(x.range),y,y.names,pos=4,cex=cex.val)
}
#function for plotting bias points
#'bias.x' is vector of values for x locations
#'bias.y' is vector for y location
#'layer.name' is string indicating text to put in node
bias.points<-function(bias.x,bias.y,layer.name,cex,...){
for(val in 1:length(bias.x)){
points(
diff(x.range)*bias.x[val],
bias.y*diff(y.range),
pch=21,col=in.col,bg=bord.col,cex=circle.cex
)
if(node.labs)
text(
diff(x.range)*bias.x[val],
bias.y*diff(y.range),
paste(layer.name,val,sep=''),
cex=cex.val
)
}
}
#function creates lines colored by direction and width as proportion of magnitude
#use 'all.in' argument if you want to plot connection lines for only a single input node
layer.lines<-function(mod.in,h.layer,layer1=1,layer2=2,out.layer=F,nid,rel.rsc,all.in,pos.col,
neg.col,...){
x0<-rep(layer.x[layer1]*diff(x.range)+line.stag*diff(x.range),struct[layer1])
x1<-rep(layer.x[layer2]*diff(x.range)-line.stag*diff(x.range),struct[layer1])
if(out.layer==T){
y0<-get.ys(struct[layer1])
y1<-rep(get.ys(struct[layer2])[h.layer],struct[layer1])
src.str<-paste('out',h.layer)
wts<-nnet.vals(mod.in,nid=F,rel.rsc)
wts<-wts[grep(src.str,names(wts))][[1]][-1]
wts.rs<-nnet.vals(mod.in,nid=T,rel.rsc)
wts.rs<-wts.rs[grep(src.str,names(wts.rs))][[1]][-1]
cols<-rep(pos.col,struct[layer1])
cols[wts<0]<-neg.col
if(nid) segments(x0,y0,x1,y1,col=cols,lwd=wts.rs)
else segments(x0,y0,x1,y1)
}
else{
if(is.logical(all.in)) all.in<-h.layer
else all.in<-which(x.names==all.in)
y0<-rep(get.ys(struct[layer1])[all.in],struct[2])
y1<-get.ys(struct[layer2])
src.str<-'hidden'
wts<-nnet.vals(mod.in,nid=F,rel.rsc)
wts<-unlist(lapply(wts[grep(src.str,names(wts))],function(x) x[all.in+1]))
wts.rs<-nnet.vals(mod.in,nid=T,rel.rsc)
wts.rs<-unlist(lapply(wts.rs[grep(src.str,names(wts.rs))],function(x) x[all.in+1]))
cols<-rep(pos.col,struct[layer2])
cols[wts<0]<-neg.col
if(nid) segments(x0,y0,x1,y1,col=cols,lwd=wts.rs)
else segments(x0,y0,x1,y1)
}
}
bias.lines<-function(bias.x,mod.in,nid,rel.rsc,all.out,pos.col,neg.col,...){
if(is.logical(all.out)) all.out<-1:struct[3]
else all.out<-which(y.names==all.out)
for(val in 1:length(bias.x)){
wts<-nnet.vals(mod.in,nid=F,rel.rsc)
wts.rs<-nnet.vals(mod.in,nid=T,rel.rsc)
if(val==1){
wts<-wts[grep('out',names(wts),invert=T)]
wts.rs<-wts.rs[grep('out',names(wts.rs),invert=T)]
}
if(val==2){
wts<-wts[grep('out',names(wts))]
wts.rs<-wts.rs[grep('out',names(wts.rs))]
}
cols<-rep(pos.col,length(wts))
cols[unlist(lapply(wts,function(x) x[1]))<0]<-neg.col
wts.rs<-unlist(lapply(wts.rs,function(x) x[1]))
if(nid==F){
wts.rs<-rep(1,struct[val+1])
cols<-rep('black',struct[val+1])
}
if(val==1){
segments(
rep(diff(x.range)*bias.x[val]+diff(x.range)*line.stag,struct[val+1]),
rep(bias.y*diff(y.range),struct[val+1]),
rep(diff(x.range)*layer.x[val+1]-diff(x.range)*line.stag,struct[val+1]),
get.ys(struct[val+1]),
lwd=wts.rs,
col=cols
)
}
if(val==2){
segments(
rep(diff(x.range)*bias.x[val]+diff(x.range)*line.stag,struct[val+1]),
rep(bias.y*diff(y.range),struct[val+1]),
rep(diff(x.range)*layer.x[val+1]-diff(x.range)*line.stag,struct[val+1]),
get.ys(struct[val+1])[all.out],
lwd=wts.rs[all.out],
col=cols[all.out]
)
}
}
}
#use functions to plot connections between layers
#bias lines
bias.lines(bias.x,mod.in,nid=nid,rel.rsc=rel.rsc,all.out=all.out,pos.col=alpha(pos.col,alpha.val),
neg.col=alpha(neg.col,alpha.val))
#layer lines, makes use of arguments to plot all or for individual layers
#starts with input-hidden
#uses 'all.in' argument to plot connection lines for all input nodes or a single node
if(is.logical(all.in)){
mapply(
function(x) layer.lines(mod.in,x,layer1=1,layer2=2,nid=nid,rel.rsc=rel.rsc,all.in=all.in,
pos.col=alpha(pos.col,alpha.val),neg.col=alpha(neg.col,alpha.val)),
1:struct[1]
)
}
else{
node.in<-which(x.names==all.in)
layer.lines(mod.in,node.in,layer1=1,layer2=2,nid=nid,rel.rsc=rel.rsc,all.in=all.in,
pos.col=alpha(pos.col,alpha.val),neg.col=alpha(neg.col,alpha.val))
}
#lines for hidden-output
#uses 'all.out' argument to plot connection lines for all output nodes or a single node
if(is.logical(all.out))
mapply(
function(x) layer.lines(mod.in,x,layer1=2,layer2=3,out.layer=T,nid=nid,rel.rsc=rel.rsc,
all.in=all.in,pos.col=alpha(pos.col,alpha.val),neg.col=alpha(neg.col,alpha.val)),
1:struct[3]
)
else{
all.out<-which(y.names==all.out)
layer.lines(mod.in,all.out,layer1=2,layer2=3,out.layer=T,nid=nid,rel.rsc=rel.rsc,
pos.col=pos.col,neg.col=neg.col)
}
#use functions to plot nodes
layer.points(struct[1],layer.x[1],'I')
layer.points(struct[2],layer.x[2],'H')
layer.points(struct[3],layer.x[3],'O')
bias.points(bias.x,bias.y,'B')
}
| /lec07/plot.nnet.R | permissive | tgbarross/MLClass | R | false | false | 8,223 | r |
plot.nnet<-function(mod.in,nid=T,all.out=T,all.in=T,wts.only=F,rel.rsc=5,circle.cex=5,node.labs=T,
line.stag=NULL,cex.val=1,alpha.val=1,circle.col='lightgrey',pos.col='black',neg.col='grey',...){
require(scales)
#gets weights for neural network, output is list
#if rescaled argument is true, weights are returned but rescaled based on abs value
nnet.vals<-function(mod.in,nid,rel.rsc){
library(scales)
layers<-mod.in$n
wts<-mod.in$wts
if(nid) wts<-rescale(abs(wts),c(1,rel.rsc))
indices<-matrix(seq(1,layers[1]*layers[2]+layers[2]),ncol=layers[2])
out.ls<-list()
for(i in 1:ncol(indices)){
out.ls[[paste('hidden',i)]]<-wts[indices[,i]]
}
if(layers[3]==1) out.ls[['out 1']]<-wts[(max(indices)+1):length(wts)]
else{
out.indices<-matrix(seq(max(indices)+1,length(wts)),ncol=layers[3])
for(i in 1:ncol(out.indices)){
out.ls[[paste('out',i)]]<-wts[out.indices[,i]]
}
}
out.ls
}
wts<-nnet.vals(mod.in,nid=F)
if(wts.only) return(wts)
#par(mar=numeric(4),oma=numeric(4),family='serif')
library(scales)
struct<-mod.in$n
x.range<-c(0,100)
y.range<-c(0,100)
#these are all proportions from 0-1
if(is.null(line.stag)) line.stag<-0.011*circle.cex/2
layer.x<-seq(0.17,0.9,length=3)
bias.x<-c(mean(layer.x[1:2]),mean(layer.x[2:3]))
bias.y<-0.95
in.col<-bord.col<-circle.col
circle.cex<-circle.cex
#get variable names from nnet object
if(is.null(mod.in$call$formula)){
x.names<-colnames(eval(mod.in$call$x))
y.names<-colnames(eval(mod.in$call$y))
}
else{
forms<-eval(mod.in$call$formula)
dat.names<-model.frame(forms,data=eval(mod.in$call$data))
y.names<-as.character(forms)[2]
x.names<-names(dat.names)[!names(dat.names) %in% y.names]
}
#initiate plot
plot(x.range,y.range,type='n',axes=F,ylab='',xlab='',...)
#function for getting y locations for input, hidden, output layers
#input is integer value from 'struct'
get.ys<-function(lyr){
spacing<-diff(c(0*diff(y.range),0.9*diff(y.range)))/max(struct)
seq(0.5*(diff(y.range)+spacing*(lyr-1)),0.5*(diff(y.range)-spacing*(lyr-1)),
length=lyr)
}
#function for plotting nodes
#'layer' specifies which layer, integer from 'struct'
#'x.loc' indicates x location for layer, integer from 'layer.x'
#'layer.name' is string indicating text to put in node
layer.points<-function(layer,x.loc,layer.name,cex=cex.val){
x<-rep(x.loc*diff(x.range),layer)
y<-get.ys(layer)
points(x,y,pch=21,cex=circle.cex,col=in.col,bg=bord.col)
if(node.labs) text(x,y,paste(layer.name,1:layer,sep=''),cex=cex.val)
if(layer.name=='I' & node.labs){
text(x-line.stag*diff(x.range),y,x.names,pos=2,cex=cex.val)
}
if(layer.name=='O' & node.labs)
text(x+line.stag*diff(x.range),y,y.names,pos=4,cex=cex.val)
}
#function for plotting bias points
#'bias.x' is vector of values for x locations
#'bias.y' is vector for y location
#'layer.name' is string indicating text to put in node
bias.points<-function(bias.x,bias.y,layer.name,cex,...){
for(val in 1:length(bias.x)){
points(
diff(x.range)*bias.x[val],
bias.y*diff(y.range),
pch=21,col=in.col,bg=bord.col,cex=circle.cex
)
if(node.labs)
text(
diff(x.range)*bias.x[val],
bias.y*diff(y.range),
paste(layer.name,val,sep=''),
cex=cex.val
)
}
}
#function creates lines colored by direction and width as proportion of magnitude
#use 'all.in' argument if you want to plot connection lines for only a single input node
layer.lines<-function(mod.in,h.layer,layer1=1,layer2=2,out.layer=F,nid,rel.rsc,all.in,pos.col,
neg.col,...){
x0<-rep(layer.x[layer1]*diff(x.range)+line.stag*diff(x.range),struct[layer1])
x1<-rep(layer.x[layer2]*diff(x.range)-line.stag*diff(x.range),struct[layer1])
if(out.layer==T){
y0<-get.ys(struct[layer1])
y1<-rep(get.ys(struct[layer2])[h.layer],struct[layer1])
src.str<-paste('out',h.layer)
wts<-nnet.vals(mod.in,nid=F,rel.rsc)
wts<-wts[grep(src.str,names(wts))][[1]][-1]
wts.rs<-nnet.vals(mod.in,nid=T,rel.rsc)
wts.rs<-wts.rs[grep(src.str,names(wts.rs))][[1]][-1]
cols<-rep(pos.col,struct[layer1])
cols[wts<0]<-neg.col
if(nid) segments(x0,y0,x1,y1,col=cols,lwd=wts.rs)
else segments(x0,y0,x1,y1)
}
else{
if(is.logical(all.in)) all.in<-h.layer
else all.in<-which(x.names==all.in)
y0<-rep(get.ys(struct[layer1])[all.in],struct[2])
y1<-get.ys(struct[layer2])
src.str<-'hidden'
wts<-nnet.vals(mod.in,nid=F,rel.rsc)
wts<-unlist(lapply(wts[grep(src.str,names(wts))],function(x) x[all.in+1]))
wts.rs<-nnet.vals(mod.in,nid=T,rel.rsc)
wts.rs<-unlist(lapply(wts.rs[grep(src.str,names(wts.rs))],function(x) x[all.in+1]))
cols<-rep(pos.col,struct[layer2])
cols[wts<0]<-neg.col
if(nid) segments(x0,y0,x1,y1,col=cols,lwd=wts.rs)
else segments(x0,y0,x1,y1)
}
}
bias.lines<-function(bias.x,mod.in,nid,rel.rsc,all.out,pos.col,neg.col,...){
if(is.logical(all.out)) all.out<-1:struct[3]
else all.out<-which(y.names==all.out)
for(val in 1:length(bias.x)){
wts<-nnet.vals(mod.in,nid=F,rel.rsc)
wts.rs<-nnet.vals(mod.in,nid=T,rel.rsc)
if(val==1){
wts<-wts[grep('out',names(wts),invert=T)]
wts.rs<-wts.rs[grep('out',names(wts.rs),invert=T)]
}
if(val==2){
wts<-wts[grep('out',names(wts))]
wts.rs<-wts.rs[grep('out',names(wts.rs))]
}
cols<-rep(pos.col,length(wts))
cols[unlist(lapply(wts,function(x) x[1]))<0]<-neg.col
wts.rs<-unlist(lapply(wts.rs,function(x) x[1]))
if(nid==F){
wts.rs<-rep(1,struct[val+1])
cols<-rep('black',struct[val+1])
}
if(val==1){
segments(
rep(diff(x.range)*bias.x[val]+diff(x.range)*line.stag,struct[val+1]),
rep(bias.y*diff(y.range),struct[val+1]),
rep(diff(x.range)*layer.x[val+1]-diff(x.range)*line.stag,struct[val+1]),
get.ys(struct[val+1]),
lwd=wts.rs,
col=cols
)
}
if(val==2){
segments(
rep(diff(x.range)*bias.x[val]+diff(x.range)*line.stag,struct[val+1]),
rep(bias.y*diff(y.range),struct[val+1]),
rep(diff(x.range)*layer.x[val+1]-diff(x.range)*line.stag,struct[val+1]),
get.ys(struct[val+1])[all.out],
lwd=wts.rs[all.out],
col=cols[all.out]
)
}
}
}
#use functions to plot connections between layers
#bias lines
bias.lines(bias.x,mod.in,nid=nid,rel.rsc=rel.rsc,all.out=all.out,pos.col=alpha(pos.col,alpha.val),
neg.col=alpha(neg.col,alpha.val))
#layer lines, makes use of arguments to plot all or for individual layers
#starts with input-hidden
#uses 'all.in' argument to plot connection lines for all input nodes or a single node
if(is.logical(all.in)){
mapply(
function(x) layer.lines(mod.in,x,layer1=1,layer2=2,nid=nid,rel.rsc=rel.rsc,all.in=all.in,
pos.col=alpha(pos.col,alpha.val),neg.col=alpha(neg.col,alpha.val)),
1:struct[1]
)
}
else{
node.in<-which(x.names==all.in)
layer.lines(mod.in,node.in,layer1=1,layer2=2,nid=nid,rel.rsc=rel.rsc,all.in=all.in,
pos.col=alpha(pos.col,alpha.val),neg.col=alpha(neg.col,alpha.val))
}
#lines for hidden-output
#uses 'all.out' argument to plot connection lines for all output nodes or a single node
if(is.logical(all.out))
mapply(
function(x) layer.lines(mod.in,x,layer1=2,layer2=3,out.layer=T,nid=nid,rel.rsc=rel.rsc,
all.in=all.in,pos.col=alpha(pos.col,alpha.val),neg.col=alpha(neg.col,alpha.val)),
1:struct[3]
)
else{
all.out<-which(y.names==all.out)
layer.lines(mod.in,all.out,layer1=2,layer2=3,out.layer=T,nid=nid,rel.rsc=rel.rsc,
pos.col=pos.col,neg.col=neg.col)
}
#use functions to plot nodes
layer.points(struct[1],layer.x[1],'I')
layer.points(struct[2],layer.x[2],'H')
layer.points(struct[3],layer.x[3],'O')
bias.points(bias.x,bias.y,'B')
}
|
\encoding{utf8}
\name{plot.HOF}
\alias{plot.HOF}
\alias{plot.HOF.list}
\title{Plot Hierarchical Logistic Regression Models}
\description{Plot single or multiple HOF models with or without model parameters.}
\usage{
\method{plot}{HOF}(x, marginal = c('bar', 'rug', 'hist', 'points', 'n'), boxp = TRUE,
las.h = 1, yl, main, model, test = 'AICc', modeltypes, onlybest = TRUE, penal, para =
FALSE, gam.se = FALSE, color, newdata = NULL, lwd=1, leg = TRUE, add=FALSE, xlabel, ...)
\method{plot}{HOF.list}(x, plottype = c("layout", "lattice", "all") , xlabel = NULL,
test = 'AICc', modeltypes, border.top = 0.1, color, yl, leg = FALSE, ...)
}
\arguments{
\item{x}{an object from \code{HOF(spec, \dots)}.}
\item{marginal}{type of marginal representation for occurrences/absences.}
\item{boxp}{plotting of horizontal boxplots}
\item{las.h}{orientation of axes labels (0 = vertical, 1 = horizontal)}
\item{yl}{range of y axis, useful for rare species. Must be given as fraction of M (between 0 and 1).}
\item{main}{optional plot titel}
\item{model}{specific HOF model used, if not selected automatically.}
\item{test}{test for model selection. Alternatives are \code{"AICc"} (default), \code{"F"},
\code{"Chisq"}, \code{"AIC"}, \code{"BIC"} and \code{"Dev"iance}. }
\item{modeltypes}{vector of suggested model types}
\item{onlybest}{plot only the best model according to chosen Information criterion. If set to FALSE all calculated models will be plotted, but the best model with a thicker line.}
\item{penal}{penalty term for model types, default is the number of model parameter}
\item{para}{should model parameters (optima, raw.mean, niche,..) be plotted.}
\item{gam.se}{plotting of two times standard error of predict.gam as confidence interval}
\item{color}{model line color, vector of length seven}
\item{newdata}{curves are plotted for original x-values. Otherwise you have to provide a vector with new gradient values.}
\item{leg}{legend for model type (and parameters)}
\item{lwd}{line width of model curve(s)}
\item{plottype}{plottype, see details}
\item{add}{add to existing plot}
\item{xlabel}{x axis label}
\item{border.top}{height of top border for legend}
\item{\dots}{further arguments passed to or from other methods.}
}
\details{
Plottype \code{layout} will give a normal plot for a single species, or if the HOF object contains several species,
the graphics display will be divided by \code{\link{autolayout}}. Multiple species can also be plottet by a \code{'lattice'}
xyplot and plotted with plot.HOF for every species. The third option (plottype='all') plots all selected species
on the same graph which might be useful to evaluate e.g. the species within one vegetation plot, see examples.
A \code{rug} adds a rug representation (1-d plot) of the data to the plot. A rug plot is a compact way of
illustrating the marginal distributions of x. Positions of the data points along x and y are denoted by tick marks,
reminiscent of the tassels on a rug. Rug marks are overlaid onto the axis.
A \code{dit='bar'} plot will display the original response values. For binary data this will be identical to rug.
}
\seealso{\code{\link{HOF}} }
\references{
de la Cruz Rot M (2005) Improving the Presentation of Results of Logistic Regression with R.
Bulletin of the Ecological Society of America 86: 41-48
}
\examples{
data(acre)
sel <- c('MATRREC', 'RUMEACT', 'SILENOC', 'APHAARV', 'MYOSARV', 'DESUSOP', 'ARTE#VU')
mo <- HOF(acre[match(sel, names(acre))], acre.env$PH_KCL, M=1, bootstrap=NULL)
par(mar=c(2,2,1,.1))
plot(mo, para=TRUE)
# An example for plottype='all' to show species responses for the species within
# the most acidic and the most calcareous vegetation plot.
\dontrun{
allSpeciesFromAnAcidicPlot <- acre['57',] > 0
mods.acidic <- HOF(acre[,allSpeciesFromAnAcidicPlot],acre.env$PH_KCL,M=1,bootstrap=NULL)
allSpeciesFromAnCalcareousPlot <- acre['87',] > 0
mods.calc <- HOF(acre[,allSpeciesFromAnCalcareousPlot],acre.env$PH_KCL,M=1,bootstrap=NULL)
autolayout(2)
plot(mods.acidic, plottype='all', main='Plot with low pH')
abline(v=acre.env$PH_KCL[acre.env$RELEVE_NR == '57])
names(mods.acidic)
plot(mods.calc, plottype='all', main='Plot with high pH')
abline(v=acre.env$PH_KCL[acre.env$RELEVE_NR == '87'])
names(mods.calc)
}
}
\author{ Florian Jansen }
\keyword{ model }
| /man/plot.HOF.Rd | no_license | shahar710/eHOF | R | false | false | 4,399 | rd | \encoding{utf8}
\name{plot.HOF}
\alias{plot.HOF}
\alias{plot.HOF.list}
\title{Plot Hierarchical Logistic Regression Models}
\description{Plot single or multiple HOF models with or without model parameters.}
\usage{
\method{plot}{HOF}(x, marginal = c('bar', 'rug', 'hist', 'points', 'n'), boxp = TRUE,
las.h = 1, yl, main, model, test = 'AICc', modeltypes, onlybest = TRUE, penal, para =
FALSE, gam.se = FALSE, color, newdata = NULL, lwd=1, leg = TRUE, add=FALSE, xlabel, ...)
\method{plot}{HOF.list}(x, plottype = c("layout", "lattice", "all") , xlabel = NULL,
test = 'AICc', modeltypes, border.top = 0.1, color, yl, leg = FALSE, ...)
}
\arguments{
\item{x}{an object from \code{HOF(spec, \dots)}.}
\item{marginal}{type of marginal representation for occurrences/absences.}
\item{boxp}{plotting of horizontal boxplots}
\item{las.h}{orientation of axes labels (0 = vertical, 1 = horizontal)}
\item{yl}{range of y axis, useful for rare species. Must be given as fraction of M (between 0 and 1).}
\item{main}{optional plot titel}
\item{model}{specific HOF model used, if not selected automatically.}
\item{test}{test for model selection. Alternatives are \code{"AICc"} (default), \code{"F"},
\code{"Chisq"}, \code{"AIC"}, \code{"BIC"} and \code{"Dev"iance}. }
\item{modeltypes}{vector of suggested model types}
\item{onlybest}{plot only the best model according to chosen Information criterion. If set to FALSE all calculated models will be plotted, but the best model with a thicker line.}
\item{penal}{penalty term for model types, default is the number of model parameter}
\item{para}{should model parameters (optima, raw.mean, niche,..) be plotted.}
\item{gam.se}{plotting of two times standard error of predict.gam as confidence interval}
\item{color}{model line color, vector of length seven}
\item{newdata}{curves are plotted for original x-values. Otherwise you have to provide a vector with new gradient values.}
\item{leg}{legend for model type (and parameters)}
\item{lwd}{line width of model curve(s)}
\item{plottype}{plottype, see details}
\item{add}{add to existing plot}
\item{xlabel}{x axis label}
\item{border.top}{height of top border for legend}
\item{\dots}{further arguments passed to or from other methods.}
}
\details{
Plottype \code{layout} will give a normal plot for a single species, or if the HOF object contains several species,
the graphics display will be divided by \code{\link{autolayout}}. Multiple species can also be plottet by a \code{'lattice'}
xyplot and plotted with plot.HOF for every species. The third option (plottype='all') plots all selected species
on the same graph which might be useful to evaluate e.g. the species within one vegetation plot, see examples.
A \code{rug} adds a rug representation (1-d plot) of the data to the plot. A rug plot is a compact way of
illustrating the marginal distributions of x. Positions of the data points along x and y are denoted by tick marks,
reminiscent of the tassels on a rug. Rug marks are overlaid onto the axis.
A \code{dit='bar'} plot will display the original response values. For binary data this will be identical to rug.
}
\seealso{\code{\link{HOF}} }
\references{
de la Cruz Rot M (2005) Improving the Presentation of Results of Logistic Regression with R.
Bulletin of the Ecological Society of America 86: 41-48
}
\examples{
data(acre)
sel <- c('MATRREC', 'RUMEACT', 'SILENOC', 'APHAARV', 'MYOSARV', 'DESUSOP', 'ARTE#VU')
mo <- HOF(acre[match(sel, names(acre))], acre.env$PH_KCL, M=1, bootstrap=NULL)
par(mar=c(2,2,1,.1))
plot(mo, para=TRUE)
# An example for plottype='all' to show species responses for the species within
# the most acidic and the most calcareous vegetation plot.
\dontrun{
allSpeciesFromAnAcidicPlot <- acre['57',] > 0
mods.acidic <- HOF(acre[,allSpeciesFromAnAcidicPlot],acre.env$PH_KCL,M=1,bootstrap=NULL)
allSpeciesFromAnCalcareousPlot <- acre['87',] > 0
mods.calc <- HOF(acre[,allSpeciesFromAnCalcareousPlot],acre.env$PH_KCL,M=1,bootstrap=NULL)
autolayout(2)
plot(mods.acidic, plottype='all', main='Plot with low pH')
abline(v=acre.env$PH_KCL[acre.env$RELEVE_NR == '57])
names(mods.acidic)
plot(mods.calc, plottype='all', main='Plot with high pH')
abline(v=acre.env$PH_KCL[acre.env$RELEVE_NR == '87'])
names(mods.calc)
}
}
\author{ Florian Jansen }
\keyword{ model }
|
#
# Chapter 03 - EDA.R -- Based on material from Chapter 3 of Larose and Larose, 2015
# Uses the tidyverse rather than plain R.
#
# 12/22/2018 - Jeff Smith
#
library(tidyverse)
churn <- read_csv("../data/churn.txt")
# Explore the data
summary(churn)
# Sample histogram -- account length
ggplot(data = churn) +
geom_histogram(mapping = aes(x = AccountLength))
# DayMins
ggplot(data = churn) +
geom_histogram(mapping = aes(x = DayMins))
# DayCalls
ggplot(data = churn) +
geom_histogram(mapping = aes(x = DayCalls))
# DayCharge
ggplot(data = churn) +
geom_histogram(mapping = aes(x = DayCharge))
# Churners
# Churn bar chart
ggplot(data = churn) +
geom_bar(mapping = aes(x=Churn)) +
coord_flip()
# Summaries with percentages
churn %>%
group_by(Churn) %>%
summarize(n = n()) %>%
mutate(freq = n / sum(n))
# With some other variable means
churn %>%
group_by(Churn) %>%
summarize(n = n(), AvgDayMins = mean(DayMins), AvgNightMins = mean(NightMins), AvgCustServ = mean(CustServCalls))
# International Plan
# International plan bar chart
ggplot(data = churn) +
geom_bar(mapping = aes(x=IntPlan)) +
coord_flip()
# Summary
churn %>%
group_by(IntPlan) %>%
summarise(n = n()) %>%
mutate(freq = n / sum(n))
# Churn with Int'l plan
ggplot(data = churn) +
geom_bar(mapping = aes(x=Churn, fill=IntPlan)) +
coord_flip()
# normalize
ggplot(data = churn) +
geom_bar(mapping = aes(x=Churn, fill=IntPlan), position='fill') +
coord_flip()
# flip the variables
ggplot(data = churn) +
geom_bar(mapping = aes(x=IntPlan, fill=Churn)) +
coord_flip()
# normalize
ggplot(data = churn) +
geom_bar(mapping = aes(x=IntPlan, fill=Churn), position='fill') +
coord_flip()
# Contingency tables
# group and summarize
churn %>%
group_by(IntPlan, Churn) %>%
summarize(n = n())
# Spread by IntPlan value
churn %>%
group_by(IntPlan, Churn) %>%
summarize(n = n()) %>%
spread(key = IntPlan, value = n)
# Same data, but spead by Churn
churn %>%
group_by(Churn, IntPlan) %>%
summarize(n = n()) %>%
spread(key = Churn, value = n)
# Churn vs. Voice Mail Plan
ggplot(data = churn) +
geom_bar(mapping = aes(x=VMailPlan, fill=Churn)) +
coord_flip()
# normalized
ggplot(data = churn) +
geom_bar(mapping = aes(x=VMailPlan, fill=Churn), position='fill') +
coord_flip()
# contingency table
churn %>%
group_by(Churn, VMailPlan) %>%
summarise(n = n()) %>%
spread(key = VMailPlan, value = n)
# Numerical Variables
# Customer Service Calls
ggplot(data = churn) +
geom_bar(mapping = aes(x=CustServCalls, fill=Churn))
ggplot(data = churn) +
geom_bar(mapping = aes(x=CustServCalls, fill=Churn), position='fill')
# numbers
churn %>%
group_by(CustServCalls) %>%
summarize(n = n())
filter(churn, CustServCalls == 9)[,c('AccountLength', 'DayMins', 'Churn')]
# Day Minutes
ggplot(data = churn) +
geom_histogram(mapping = aes(x=DayMins, fill=Churn))
ggplot(data = churn) +
geom_histogram(mapping = aes(x=DayMins, fill=Churn), position='fill')
#
# Scatter plot of Evening minutes vs. Day minutes
# Seems like a clear transition line
ggplot(data=churn) +
geom_point(aes(x=EveMins, y=DayMins,color=Churn))
# Guess a line:
# y = 400 - .6x is the book value.
ggplot(data=churn) +
geom_point(aes(x=EveMins, y=DayMins,color=Churn)) +
geom_abline(intercept=385, slope=-0.6)
# Add a flag variable to indicate the bad side of the line
churn$Talkers <- 0
index <- churn$DayMins > 385 - .6*churn$EveMins
churn$Talkers[index] <- 1
# Create two datasets
talkers <- filter(churn, Talkers == 1)
nontalkers <- filter(churn, Talkers == 0)
# Compare the talkers and nontalkers
# scatter
ggplot(data=talkers) +
geom_point(aes(x=EveMins, y=DayMins,color=Churn))
ggplot(data=nontalkers) +
geom_point(aes(x=EveMins, y=DayMins,color=Churn))
# Churn bar chart for talkers and then nontalkers
ggplot(data = talkers) +
geom_bar(mapping = aes(x=Churn)) +
coord_flip()
ggplot(data = nontalkers) +
geom_bar(mapping = aes(x=Churn)) +
coord_flip()
# Summaries with percentages
talkers %>%
group_by(Churn) %>%
summarize(n = n()) %>%
mutate(freq = n / sum(n))
nontalkers %>%
group_by(Churn) %>%
summarize(n = n()) %>%
mutate(freq = n / sum(n))
#
# Cust service calls vs Day Mins
# Seems like a couple of clumps of churners (upper left, lower right)
ggplot(data=churn) +
geom_point(aes(x=DayMins, y=CustServCalls,color=Churn))
# Filter out the upper left clump
clump1 <- churn %>%
filter(CustServCalls>4, DayMins<200)
ggplot(data=clump1) +
geom_point(aes(x=EveMins, y=DayMins,color=Churn))
ggplot(data=clump1) +
geom_point(aes(x=DayMins, y=CustServCalls,color=Churn))
# Partition the dataset
churners <- filter(churn, Churn == 'True.')
notchurners <- filter(churn, Churn == 'False.')
# Correlation among predictors
pairs(~churn$DayMins+churn$DayCalls+churn$DayCharge)
# Will discuss the details of this method soon.
fit <- lm(churn$DayCharge~churn$DayMins)
summary(fit)
pairs(~churn$NightMins+churn$NightCalls+churn$NightCharge)
# an add-in library for ggplot2
library(GGally)
ggpairs(select(churn, c('NightMins', 'NightCharge', 'DayMins', 'DayCharge')))
ggpairs(select(churn, c('Churn', 'NightMins', 'DayMins')))
# Big plots -- look at Zoomed version
ggpairs(data= select(churn, c('NightMins','NightCharge', 'DayMins','DayCharge', 'IntlMins', 'IntPlan', 'Churn')))
# Add color based on Churn
ggpairs(data= select(churn, c('NightMins','NightCharge', 'DayMins','DayCharge', 'IntlMins', 'IntPlan', 'Churn')),
mapping=ggplot2::aes(colour = Churn))
| /R/Chapter 03 - EDA.R | no_license | ausim/DataAnalyticsForOperations | R | false | false | 5,532 | r | #
# Chapter 03 - EDA.R -- Based on material from Chapter 3 of Larose and Larose, 2015
# Uses the tidyverse rather than plain R.
#
# 12/22/2018 - Jeff Smith
#
library(tidyverse)
churn <- read_csv("../data/churn.txt")
# Explore the data
summary(churn)
# Sample histogram -- account length
ggplot(data = churn) +
geom_histogram(mapping = aes(x = AccountLength))
# DayMins
ggplot(data = churn) +
geom_histogram(mapping = aes(x = DayMins))
# DayCalls
ggplot(data = churn) +
geom_histogram(mapping = aes(x = DayCalls))
# DayCharge
ggplot(data = churn) +
geom_histogram(mapping = aes(x = DayCharge))
# Churners
# Churn bar chart
ggplot(data = churn) +
geom_bar(mapping = aes(x=Churn)) +
coord_flip()
# Summaries with percentages
churn %>%
group_by(Churn) %>%
summarize(n = n()) %>%
mutate(freq = n / sum(n))
# With some other variable means
churn %>%
group_by(Churn) %>%
summarize(n = n(), AvgDayMins = mean(DayMins), AvgNightMins = mean(NightMins), AvgCustServ = mean(CustServCalls))
# International Plan
# International plan bar chart
ggplot(data = churn) +
geom_bar(mapping = aes(x=IntPlan)) +
coord_flip()
# Summary
churn %>%
group_by(IntPlan) %>%
summarise(n = n()) %>%
mutate(freq = n / sum(n))
# Churn with Int'l plan
ggplot(data = churn) +
geom_bar(mapping = aes(x=Churn, fill=IntPlan)) +
coord_flip()
# normalize
ggplot(data = churn) +
geom_bar(mapping = aes(x=Churn, fill=IntPlan), position='fill') +
coord_flip()
# flip the variables
ggplot(data = churn) +
geom_bar(mapping = aes(x=IntPlan, fill=Churn)) +
coord_flip()
# normalize
ggplot(data = churn) +
geom_bar(mapping = aes(x=IntPlan, fill=Churn), position='fill') +
coord_flip()
# Contingency tables
# group and summarize
churn %>%
group_by(IntPlan, Churn) %>%
summarize(n = n())
# Spread by IntPlan value
churn %>%
group_by(IntPlan, Churn) %>%
summarize(n = n()) %>%
spread(key = IntPlan, value = n)
# Same data, but spead by Churn
churn %>%
group_by(Churn, IntPlan) %>%
summarize(n = n()) %>%
spread(key = Churn, value = n)
# Churn vs. Voice Mail Plan
ggplot(data = churn) +
geom_bar(mapping = aes(x=VMailPlan, fill=Churn)) +
coord_flip()
# normalized
ggplot(data = churn) +
geom_bar(mapping = aes(x=VMailPlan, fill=Churn), position='fill') +
coord_flip()
# contingency table
churn %>%
group_by(Churn, VMailPlan) %>%
summarise(n = n()) %>%
spread(key = VMailPlan, value = n)
# Numerical Variables
# Customer Service Calls
ggplot(data = churn) +
geom_bar(mapping = aes(x=CustServCalls, fill=Churn))
ggplot(data = churn) +
geom_bar(mapping = aes(x=CustServCalls, fill=Churn), position='fill')
# numbers
churn %>%
group_by(CustServCalls) %>%
summarize(n = n())
filter(churn, CustServCalls == 9)[,c('AccountLength', 'DayMins', 'Churn')]
# Day Minutes
ggplot(data = churn) +
geom_histogram(mapping = aes(x=DayMins, fill=Churn))
ggplot(data = churn) +
geom_histogram(mapping = aes(x=DayMins, fill=Churn), position='fill')
#
# Scatter plot of Evening minutes vs. Day minutes
# Seems like a clear transition line
ggplot(data=churn) +
geom_point(aes(x=EveMins, y=DayMins,color=Churn))
# Guess a line:
# y = 400 - .6x is the book value.
ggplot(data=churn) +
geom_point(aes(x=EveMins, y=DayMins,color=Churn)) +
geom_abline(intercept=385, slope=-0.6)
# Add a flag variable to indicate the bad side of the line
churn$Talkers <- 0
index <- churn$DayMins > 385 - .6*churn$EveMins
churn$Talkers[index] <- 1
# Create two datasets
talkers <- filter(churn, Talkers == 1)
nontalkers <- filter(churn, Talkers == 0)
# Compare the talkers and nontalkers
# scatter
ggplot(data=talkers) +
geom_point(aes(x=EveMins, y=DayMins,color=Churn))
ggplot(data=nontalkers) +
geom_point(aes(x=EveMins, y=DayMins,color=Churn))
# Churn bar chart for talkers and then nontalkers
ggplot(data = talkers) +
geom_bar(mapping = aes(x=Churn)) +
coord_flip()
ggplot(data = nontalkers) +
geom_bar(mapping = aes(x=Churn)) +
coord_flip()
# Summaries with percentages
talkers %>%
group_by(Churn) %>%
summarize(n = n()) %>%
mutate(freq = n / sum(n))
nontalkers %>%
group_by(Churn) %>%
summarize(n = n()) %>%
mutate(freq = n / sum(n))
#
# Cust service calls vs Day Mins
# Seems like a couple of clumps of churners (upper left, lower right)
ggplot(data=churn) +
geom_point(aes(x=DayMins, y=CustServCalls,color=Churn))
# Filter out the upper left clump
clump1 <- churn %>%
filter(CustServCalls>4, DayMins<200)
ggplot(data=clump1) +
geom_point(aes(x=EveMins, y=DayMins,color=Churn))
ggplot(data=clump1) +
geom_point(aes(x=DayMins, y=CustServCalls,color=Churn))
# Partition the dataset
churners <- filter(churn, Churn == 'True.')
notchurners <- filter(churn, Churn == 'False.')
# Correlation among predictors
pairs(~churn$DayMins+churn$DayCalls+churn$DayCharge)
# Will discuss the details of this method soon.
fit <- lm(churn$DayCharge~churn$DayMins)
summary(fit)
pairs(~churn$NightMins+churn$NightCalls+churn$NightCharge)
# an add-in library for ggplot2
library(GGally)
ggpairs(select(churn, c('NightMins', 'NightCharge', 'DayMins', 'DayCharge')))
ggpairs(select(churn, c('Churn', 'NightMins', 'DayMins')))
# Big plots -- look at Zoomed version
ggpairs(data= select(churn, c('NightMins','NightCharge', 'DayMins','DayCharge', 'IntlMins', 'IntPlan', 'Churn')))
# Add color based on Churn
ggpairs(data= select(churn, c('NightMins','NightCharge', 'DayMins','DayCharge', 'IntlMins', 'IntPlan', 'Churn')),
mapping=ggplot2::aes(colour = Churn))
|
with(a34d63c2a53c1499a8dad04db3bfe74e6, {ROOT <- 'D:/SEMOSS/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';rm(list=ls())}); | /80bb2a25-ac5d-47d0-abfc-b3f3811f0936/R/Temp/aj3uvje2MouZF.R | no_license | ayanmanna8/test | R | false | false | 201 | r | with(a34d63c2a53c1499a8dad04db3bfe74e6, {ROOT <- 'D:/SEMOSS/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';rm(list=ls())}); |
File <- "household_power_consumption.txt"
Data <- read.table(File, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
SubSet <- Data[Data$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subSet$Date, subSet$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSet$Global_active_power)
subMetering1 <- as.numeric(subSet$Sub_metering_1)
subMetering2 <- as.numeric(subSet$Sub_metering_2)
subMetering3 <- as.numeric(subSet$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off() | /Plot3.R | no_license | spsuryaprakash/ExData_Plotting1 | R | false | false | 842 | r | File <- "household_power_consumption.txt"
Data <- read.table(File, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
SubSet <- Data[Data$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subSet$Date, subSet$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSet$Global_active_power)
subMetering1 <- as.numeric(subSet$Sub_metering_1)
subMetering2 <- as.numeric(subSet$Sub_metering_2)
subMetering3 <- as.numeric(subSet$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off() |
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$text1 <- renderText({
"Input your measurements below to receive a body fat index based on average values."
})
output$text2 <- renderText({
"Use a tape measure to determine your waist, wrist, hip and forearm circumference."
})
v = reactiveValues(doCalculate = FALSE)
observeEvent(input$calculate, {
# 0 will be coerced to FALSE
# 1+ will be coerced to TRUE
v$doCalculate <- input$calculate
})
output$text3 <- renderText({
if (v$doCalculate == FALSE) return("Please enter your body data and click the button")
"Your body fat percentage is:"
})
output$bodyfat <- renderText({
if (v$doCalculate == FALSE) return()
bodyfat = 7.776 - 0.1263 * input$height_cm + 0.05329 * input$age - 0.37239 * input$neck + 0.72955 * input$abdomen
+ 0.27822 * input$forearm - 1.6408 * input$writst
paste(bodyfat)
})
output$text4 <- renderText({
if (v$doCalculate == FALSE) return()
"Body Fat Percentage Categorie:"
})
output$static <- renderTable({
if (v$doCalculate == FALSE) return()
data.frame("Classification" = c("Essential Fat", "Athletes", "Fitness", "Acceptable", "Obese"),
"Women" = c("10-12%", "14-20%", "21-24%", "25-31%", "32% +"),
"Men" = c("2-4%", "6-13%", "14-17%", "18-25%", "25% +"))
})
output$text5 <- renderText({
if (v$doCalculate == FALSE) return()
"Please check out what category you belong to and do more exercise if you are overweight"
})
output$text6 <- renderText({
"The author and maintainer of this app is Chenhao Fang. Please contact throught cfang45@wisc.edu if you encountered any bugs."
})
output$text7 <- renderText({
"For source code of this app, please check https://github.com/USTCLink/STAT628-Module-2."
})
})
| /app/BodyFatCalculater/server.R | permissive | USTCLink/STAT628-Module-2 | R | false | false | 2,436 | r | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$text1 <- renderText({
"Input your measurements below to receive a body fat index based on average values."
})
output$text2 <- renderText({
"Use a tape measure to determine your waist, wrist, hip and forearm circumference."
})
v = reactiveValues(doCalculate = FALSE)
observeEvent(input$calculate, {
# 0 will be coerced to FALSE
# 1+ will be coerced to TRUE
v$doCalculate <- input$calculate
})
output$text3 <- renderText({
if (v$doCalculate == FALSE) return("Please enter your body data and click the button")
"Your body fat percentage is:"
})
output$bodyfat <- renderText({
if (v$doCalculate == FALSE) return()
bodyfat = 7.776 - 0.1263 * input$height_cm + 0.05329 * input$age - 0.37239 * input$neck + 0.72955 * input$abdomen
+ 0.27822 * input$forearm - 1.6408 * input$writst
paste(bodyfat)
})
output$text4 <- renderText({
if (v$doCalculate == FALSE) return()
"Body Fat Percentage Categorie:"
})
output$static <- renderTable({
if (v$doCalculate == FALSE) return()
data.frame("Classification" = c("Essential Fat", "Athletes", "Fitness", "Acceptable", "Obese"),
"Women" = c("10-12%", "14-20%", "21-24%", "25-31%", "32% +"),
"Men" = c("2-4%", "6-13%", "14-17%", "18-25%", "25% +"))
})
output$text5 <- renderText({
if (v$doCalculate == FALSE) return()
"Please check out what category you belong to and do more exercise if you are overweight"
})
output$text6 <- renderText({
"The author and maintainer of this app is Chenhao Fang. Please contact throught cfang45@wisc.edu if you encountered any bugs."
})
output$text7 <- renderText({
"For source code of this app, please check https://github.com/USTCLink/STAT628-Module-2."
})
})
|
################################################################################*
# Dataset 244, Channel Islands, CA benthos
#
# Data and metadata can be found here: http://esapubs.org/archive/ecol/E094/245
# Formatted by Sara Snell and Allen Hurlbert
# Note that this is the Benthic Density data, which includes more species than
# the Benthic Cover data within this dataset.
#-------------------------------------------------------------------------------*
# ---- SET-UP ----
#===============================================================================*
# This script is best viewed in RStudio. I like to reduced the size of my window
# to roughly the width of the section lines (as above). Additionally, ensure
# that your global options are set to soft-wrap by selecting:
# Tools/Global Options .../Code Editing/Soft-wrap R source files
# Load libraries:
library(stringr)
library(plyr)
library(ggplot2)
library(grid)
library(gridExtra)
library(MASS)
# Source the functions file:
getwd()
source('scripts/R-scripts/core-transient_functions.R')
# Get data. First specify the dataset number ('datasetID') you are working with.
#####
datasetID = 244
list.files('data/raw_datasets/dataset_244')
dataset = read.csv(paste('data/raw_datasets/dataset_244.csv', sep = ''))
dataFormattingTable = read.csv('data_formatting_table.csv')
dataFormattingTable[,'Raw_datafile_name'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_datafile_name',
#--! PROVIDE INFO !--#
'Benthic Density Data.csv')
########################################################
# ANALYSIS CRITERIA #
########################################################
# Min number of time samples required
minNTime = 6
# Min number of species required
minSpRich = 10
# Ultimately, the largest number of spatial and
# temporal subsamples will be chosen to characterize
# an assemblage such that at least this fraction
# of site-years will be represented.
topFractionSites = 0.5
#######################################################
#-------------------------------------------------------------------------------*
# ---- EXPLORE THE DATASET ----
#===============================================================================*
# Here, you are predominantly interested in getting to know the dataset, and determine what the fields represent and which fields are relavent.
# View field names:
names(dataset)
# View how many records and fields:
dim(dataset)
# View the structure of the dataset:
# View first 6 rows of the dataset:
head(dataset)
# Here, we can see that there are some fields that we won't use. Let's remove them, note that I've given a new name here "dataset1", this is to ensure that we don't have to go back to square 1 if we've miscoded anything.
# If all fields will be used, then set unusedFields = 9999.
names(dataset)
#####
unusedFieldNames = c('Replicates', 'AreaPerReplicate', 'DensitySE')
unusedFields = which(names(dataset) %in% unusedFieldNames)
dataset1 = dataset[,-unusedFields]
# You also might want to change the names of the identified species field [to 'species'] and/or the identified site field [to 'site']. Just make sure you make specific comments on what the field name was before you made the change, as seen above.
# Explore, if everything looks okay, you're ready to move forward. If not, retrace your steps to look for and fix errors.
head(dataset1, 10)
# I've found it helpful to explore more than just the first 6 data points given with just a head(), so I used head(dataset#, 10) or even 20 to 50 to get a better snapshot of what the data looks like. Do this periodically throughout the formatting process
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE DATA WERE MODIFIED!
#!DATA FORMATTING TABLE UPDATE!
# Are the ONLY site identifiers the latitude and longitude of the observation or
# sample? (I.e., there are no site names or site IDs or other designations) Y/N
dataFormattingTable[,'LatLong_sites'] =
dataFormattingTableFieldUpdate(datasetID, 'LatLong_sites', # Fill value in below
#####
'N')
#-------------------------------------------------------------------------------*
# ---- FORMAT TIME DATA ----
#===============================================================================*
# Here, we need to extract the sampling dates.
# What is the name of the field that has information on sampling date?
# If date info is in separate columns (e.g., 'day', 'month', and 'year' cols),
# then write these field names as a vector from largest to smallest temporal grain.
#####
dateFieldName = c('Date')
# If necessary, paste together date info from multiple columns into single field
if (length(dateFieldName) > 1) {
newDateField = dataset1[, dateFieldName[1]]
for (i in dateFieldName[2:length(dateFieldName)]) { newDateField = paste(newDateField, dataset[,i], sep = "-") }
dataset1$date = newDateField
datefield = 'date'
} else {
datefield = dateFieldName
}
# What is the format in which date data is recorded? For example, if it is
# recorded as 5/30/94, then this would be '%m/%d/%y', while 1994-5-30 would
# be '%Y-%m-%d'. Type "?strptime" for other examples of date formatting.
#####
dateformat = '%d-%b-%Y'
# If date is only listed in years:
# dateformat = '%Y'
# If the date is just a year, then make sure it is of class numeric
# and not a factor. Otherwise change to a true date object.
if (dateformat == '%Y' | dateformat == '%y') {
date = as.numeric(as.character(dataset1[, datefield]))
} else {
date = as.POSIXct(strptime(dataset1[, datefield], dateformat))
}
# A check on the structure lets you know that date field is now a date object:
class(date)
# Give a double-check, if everything looks okay replace the column:
head(dataset1[, datefield])
head(date)
dataset2 = dataset1
# Delete the old date field
dataset2 = dataset2[, -which(names(dataset2) %in% dateFieldName)]
# Assign the new date values in a field called 'date'
dataset2$date = date
# Check the results:
head(dataset2)
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE DATE DATA WERE MODIFIED!
#!DATA FORMATTING TABLE UPDATE!
# Notes_timeFormat. Provide a thorough description of any modifications that were made to the time field.
dataFormattingTable[,'Notes_timeFormat'] =
dataFormattingTableFieldUpdate(datasetID, 'Notes_timeFormat', # Fill value in below
#####
'Temporal data provided as sampling dates')
# subannualTgrain. After exploring the time data, was this dataset sampled at a sub-annual temporal grain? Y/N
dataFormattingTable[,'subannualTgrain'] =
dataFormattingTableFieldUpdate(datasetID, 'subannualTgrain', # Fill value in below
#####
'Y')
#-------------------------------------------------------------------------------*
# ---- EXPLORE AND FORMAT SITE DATA ----
#===============================================================================*
# From the previous head commmand, we can see that sites are broken up into (potentially) 5 fields. Find the metadata link in the data formatting table use that link to determine how sites are characterized.
# -- If sampling is nested (e.g., site, block, plot, quad as in this study), use each of the identifying fields and separate each field with an underscore. For nested samples be sure the order of concatenated columns goes from coarser to finer scales (e.g. "km_m_cm")
# -- If sites are listed as lats and longs, use the finest available grain and separate lat and long fields with an underscore.
# -- If the site definition is clear, make a new site column as necessary.
# -- If the dataset is for just a single site, and there is no site column, then add one.
# In this dataset we have 10 quadrats per station, distributed along
# a 50 m transect.
# Here, we will concatenate all of the potential fields that describe the site
# in hierarchical order from largest to smallest grain. Based on the dataset,
# fill in the fields that specify nested spatial grains below.
#####
site_grain_names = c("Site")
# We will now create the site field with these codes concatenated if there
# are multiple grain fields. Otherwise, site will just be the single grain field.
num_grains = length(site_grain_names)
site = dataset2[, site_grain_names[1]]
if (num_grains > 1) {
for (i in 2:num_grains) {
site = paste(site, dataset2[, site_grain_names[i]], sep = "_")
}
}
# What is the spatial grain of the finest sampling scale? For example, this might be
# a 0.25 m2 quadrat, or a 5 m transect, or a 50 ml water sample.
dataFormattingTable[,'Raw_spatial_grain'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_spatial_grain',
#--! PROVIDE INFO !--#
60)
dataFormattingTable[,'Raw_spatial_grain_unit'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_spatial_grain_unit',
#--! PROVIDE INFO !--#
'm2')
# BEFORE YOU CONTINUE. We need to make sure that there are at least minNTime for sites at the coarsest possilbe spatial grain.
siteCoarse = dataset2[, site_grain_names[1]]
if (dateformat == '%Y' | dateformat == '%y') {
dateYear = dataset2$date
} else {
dateYear = format(dataset2$date, '%Y')
}
datasetYearTest = data.frame(siteCoarse, dateYear)
ddply(datasetYearTest, .(siteCoarse), summarise,
lengthYears = length(unique(dateYear)))
# If the dataset has less than minNTime years per site, do not continue processing.
# Do some quality control by comparing the site fields in the dataset with the new vector of sites:
head(site)
# Check how evenly represented all of the sites are in the dataset. If this is the
# type of dataset where every site was sampled on a regular schedule, then you
# expect to see similar values here across sites. Sites that only show up a small
# percent of the time may reflect typos.
data.frame(table(site))
# Note that several swaths occur with much less frequency than others.
# Mike Kenner, one of the data authors says via email:
# "I don't have the data at my fingertips but the explanation actually lies in
# the fact that site 6 was lost to sand inundation around 1982 or 83. When it was
# recovered those two swaths were changed. 22 L was established to make up for
# the loss if 45 R and 39 was switched from R to L (or the reverse, can't recall
# which we sample now). Anyway, that's the story with the odd swath count history
# there."
# All looks correct, so replace the site column in the dataset (as a factor) and remove the unnecessary fields, start by renaming the dataset to dataset2:
dataset3 = dataset2
dataset3$site = factor(site)
# Remove any hierarchical site related fields that are no longer needed, IF NECESSARY.
#####dataset3 = dataset3[,-c(1:2)]
# Check the new dataset (are the columns as they should be?):
head(dataset3)
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE SITE DATA WERE MODIFIED!
# !DATA FORMATTING TABLE UPDATE!
# Raw_siteUnit. How a site is coded (i.e. if the field was concatenated such as this one, it was coded as "site_block_plot_quad"). Alternatively, if the site were concatenated from latitude and longitude fields, the encoding would be "lat_long".
dataFormattingTable[,'Raw_siteUnit'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_siteUnit', # Fill value below in quotes
#####
'Site')
# spatial_scale_variable. Is a site potentially nested (e.g., plot within a quad or decimal lat longs that could be scaled up)? Y/N
dataFormattingTable[,'spatial_scale_variable'] =
dataFormattingTableFieldUpdate(datasetID, 'spatial_scale_variable',
#####
'N') # Fill value here in quotes
# Notes_siteFormat. Use this field to THOROUGHLY describe any changes made to the site field during formatting.
dataFormattingTable[,'Notes_siteFormat'] =
dataFormattingTableFieldUpdate(datasetID, 'Notes_siteFormat', # Fill value below in quotes
#####
'Site field converted to factor, otherwise unchanged.')
#-------------------------------------------------------------------------------*
# ---- EXPLORE AND FORMAT COUNT DATA ----
#===============================================================================*
# Next, we need to explore the count records. For filling out the data formatting table, we need to change the name of the field which represents counts, densities, percent cover, etc to "count". Then we will clean up unnecessary values.
names(dataset3)
summary(dataset3)
# Fill in the original field name here
#####
countfield = 'DensityMean'
# Renaming it
names(dataset3)[which(names(dataset3) == countfield)] = 'count'
# Raw values are densities per m2 aggregating across multiple sampling methods of
# different spatial scales. We here multiply the density x 60 to reflect the
# number of individuals expected over the coarsest of the sampling scales, 60 m2.
dataset3$count = dataset3$count * 60
# Now we will remove zero counts and NA's:
summary(dataset3)
# Can usually tell if there are any zeros or NAs from that summary(). If there aren't any showing, still run these functions or continue with the update of dataset# so that you are consistent with this template.
# Subset to records > 0 (if applicable):
dataset4 = subset(dataset3, count > 0)
summary(dataset4)
# Check to make sure that by removing 0's that you haven't completely removed
# any sampling events in which nothing was observed. Compare the number of
# unique site-dates in dataset3 and dataset4.
# If there are no sampling events lost, then we can go ahead and use the
# smaller dataset4 which could save some time in subsequent analyses.
# If there are sampling events lost, then we'll keep the 0's (use dataset3).
numEventsd3 = nrow(unique(dataset3[, c('site', 'date')]))
numEventsd4 = nrow(unique(dataset4[, c('site', 'date')]))
if(numEventsd3 > numEventsd4) {
dataset4 = dataset3
} else {
dataset4 = dataset4
}
# Remove NA's:
dataset5 = na.omit(dataset4)
# How does it look?
head(dataset5)
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE COUNT DATA WERE MODIFIED!
#!DATA FORMATTING TABLE UPDATE!
# Possible values for countFormat field are density, cover, presence and count.
dataFormattingTable[,'countFormat'] =
dataFormattingTableFieldUpdate(datasetID, 'countFormat', # Fill value below in quotes
#####
'density')
dataFormattingTable[,'Notes_countFormat'] =
dataFormattingTableFieldUpdate(datasetID, 'Notes_countFormat', # Fill value below in quotes
#####
"Raw data are density per m2 based on multiple sampling methods of different spatial scales. We here multiply the density x 60 to reflect the number of individuals expected over the coarsest of the sampling scales, 60 m2 (band transects).")
#-------------------------------------------------------------------------------*
# ---- EXPLORE AND FORMAT SPECIES DATA ----
#===============================================================================*
# Here, your primary goal is to ensure that all of your species are valid. To do so, you need to look at the list of unique species very carefully. Avoid being too liberal in interpretation, if you notice an entry that MIGHT be a problem, but you can't say with certainty, create an issue on GitHub.
# First, what is the field name in which species or taxonomic data are stored?
# It will get converted to 'species'
#####
speciesField = 'Species'
dataset5$species = dataset5[, speciesField]
dataset5 = dataset5[, -which(names(dataset5) == speciesField)]
# Look at the individual species present and how frequently they occur: This way you can more easily scan the species names (listed alphabetically) and identify potential misspellings, extra characters or blank space, or other issues.
data.frame(table(dataset5$species))
# If there are entries that only specify the genus while there are others that specify the species in addition to that same genus, they need to be regrouped in order to avoid ambiguity. For example, if there are entries of 'Cygnus', 'Cygnus_columbianus', and 'Cygnus_cygnus', 'Cygnus' could refer to either species, but the observer could not identify it. This causes ambiguity in the data, and must be fixed by either 1. deleting the genus-only entry altogether, or 2. renaming the genus-species entries to just the genus-only entry.
# This decision can be fairly subjective, but generally if less than 25% of the entries are genus-only, then they can be deleted (using bad_sp). If more than 25% of the entries for that genus are only specified to the genus, then the genus-species entries should be renamed to be genus-only (using typo_name).
table(dataset5$species)
# If species names are coded (not scientific names) go back to study's metadata to learn what species should and shouldn't be in the data.
# Species information is available in Table4B_benthic_density_variables.csv from
# http://esapubs.org/archive/ecol/E094/245/metadata.php
#####
# Excluding spiny lobster and fishes from "benthic community", so that it
# includes algae, sponges, corals, gastropods, sea stars and urchins.
# Also excluding species where only presences come towards the end of the time series b/c:
# "Some species have been added to the monitoring protocols during the 30+ years
# of monitoring. Thus the absence of these species from the data early in
# monitoring cannot be taken as evidence of absence. For this reason, instead of
# a 0 or blank, the code "NA" is entered into the dataset as the density for
# species in years they were not counted."
#foo = ddply(dataset, .(Year, Species), summarize, mean = mean(DensityMean, na.rm = T))
#foo = foo[order(foo$Species, foo$Year),]
#View(foo)
bad_sp = c('8001', # spiny lobster
'14025', # goby
'14026', # goby
'14027', # kelpfish
'2015', # Dictyoneuropsis reticulata/Agarum fimbriatum
'2015.5', # Dictyoneuropsis reticulata/Agarum fimbriatum
'2016', # Sargassum horneri, invasive
'2016.5', # Sargassum horneri, invasive
'9012', # Haliotis assimilis, only a single record from 2011
'9014', # Tegula regina, NA prior to 2006
'11009') # Centrostephanus coronatus, NA prior to 1996
dataset6 = dataset5[!dataset5$species %in% bad_sp,]
# It may be useful to count the number of times each name occurs, as misspellings or typos will likely
# only show up one time.
table(dataset6$species)
# If you find any potential typos, try to confirm that the "mispelling" isn't actually a valid name.
# If not, then list the typos in typo_name, and the correct spellings in good_name,
# and then replace them using the for loop below:
#####
typo_name = c(2002.5, #small Macrocystis pyrifera)
2015.5, #Dictyoneuropsis reticulata/Agarum fimbriatum, juvenile
2016.5) #Sargassum horneri, juvenile (less than 50cm in height and no recepticles)
#####
good_name = c(2002, #combined with large M. pyrifera)
2015, #combined with large Dictyoneuropsis reticulata/Agarum fimbriatum
2016) #combined with large Sargassum horneri
if (length(typo_name) > 0) {
for (n in 1:length(typo_name)) {
dataset6$species[dataset6$species == typo_name[n]] = good_name[n]
}
}
# Reset the factor levels:
dataset6$species = factor(dataset6$species)
# Let's look at how the removal of bad species and altered the length of the dataset:
nrow(dataset5)
nrow(dataset6)
# Look at the head of the dataset to ensure everything is correct:
head(dataset6)
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE SPECIES DATA WERE MODIFIED!
#!DATA FORMATTING TABLE UPDATE!
# Column M. Notes_spFormat. Provide a THOROUGH description of any changes made
# to the species field, including why any species were removed.
dataFormattingTable[,'Notes_spFormat'] =
dataFormattingTableFieldUpdate(datasetID, 'Notes_spFormat', # Fill value below in quotes
#####
'Codes reflecting different size classes of the same species were lumped; several species that were probably not targeted originally for sampling were removed (i.e. present only for the end of the time series).')
#-------------------------------------------------------------------------------*
# ---- MAKE DATA FRAME OF COUNT BY SITES, SPECIES, AND YEAR ----
#===============================================================================*
# Now we will make the final formatted dataset, add a datasetID field, check for errors, and remove records that cant be used for our purposes.
# First, lets add the datasetID:
dataset6$datasetID = datasetID
# Now make the compiled dataframe:
dataset7 = ddply(dataset6,.(datasetID, site, date, species),
summarize, count = sum(count))
# Explore the data frame:
dim(dataset7)
head(dataset7, 15)
summary(dataset7)
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE DATA WERE MODIFIED!
#-------------------------------------------------------------------------------*
# ---- UPDATE THE DATA FORMATTING TABLE AND WRITE OUTPUT DATA FRAMES ----
#===============================================================================*
# Update the data formatting table (this may take a moment to process). Note that the inputs for this are 'datasetID', the datasetID and the dataset form that you consider to be fully formatted.
dataFormattingTable = dataFormattingTableUpdate(datasetID, dataset7)
# Take a final look at the dataset:
head(dataset7)
summary (dataset7)
# If everything is looks okay we're ready to write formatted data frame:
write.csv(dataset7, paste("data/formatted_datasets/dataset_", datasetID, ".csv", sep = ""), row.names = F)
# !GIT-ADD-COMMIT-PUSH THE FORMATTED DATASET IN THE DATA FILE, THEN GIT-ADD-COMMIT-PUSH THE UPDATED DATA FOLDER!
# As we've now successfully created the formatted dataset, we will now update the format flag field.
dataFormattingTable[,'format_flag'] =
dataFormattingTableFieldUpdate(datasetID, 'format_flag', # Fill value below
#####
1)
# Flag codes are as follows:
# 0 = not currently worked on
# 1 = formatting complete
# 2 = formatting in process
# 3 = formatting halted, issue
# 4 = data unavailable
# 5 = data insufficient for generating occupancy data
# !GIT-ADD-COMMIT-PUSH THE DATA FORMATTING TABLE!
###################################################################################*
# ---- END DATA FORMATTING. START PROPOCC AND DATA SUMMARY ----
###################################################################################*
# We have now formatted the dataset to the finest possible spatial and temporal grain, removed bad species, and added the dataset ID. It's now to make some scale decisions and determine the proportional occupancies.
# Load additional required libraries and dataset:
library(dplyr)
library(tidyr)
# Read in formatted dataset if skipping above formatting code (lines 1-450).
#dataset7 = read.csv(paste("data/formatted_datasets/dataset_",
# datasetID, ".csv", sep =''))
# Have a look at the dimensions of the dataset and number of sites:
dim(dataset7)
length(unique(dataset7$site))
length(unique(dataset7$date))
head(dataset7)
# Get the data formatting table for that dataset:
dataDescription = dataFormattingTable[dataFormattingTable$dataset_ID == datasetID,]
# or read it in from the saved data_formatting_table.csv if skipping lines 1-450.
#dataDescription = subset(read.csv("data_formatting_table.csv"),
# dataset_ID == datasetID)
# Check relevant table values:
dataDescription$LatLong_sites
dataDescription$spatial_scale_variable
dataDescription$Raw_siteUnit
dataDescription$subannualTgrain
# Before proceeding, we need to make decisions about the spatial and temporal grains at
# which we will conduct our analyses. Except in unusual circumstances, the temporal
# grain will almost always be 'year', but the spatial grain that best represents the
# scale of a "community" will vary based on the sampling design and the taxonomic
# group. Justify your spatial scale below with a comment.
#####
tGrain = 'year'
# Refresh your memory about the spatial grain names if this is NOT a lat-long-only
# based dataset. Set sGrain = to the hierarchical scale for analysis.
# HOWEVER, if the sites are purely defined by lat-longs, then sGrain should equal
# a numerical value specifying the block size in degrees latitude for analysis.
site_grain_names
#####
sGrain = 'site'
# This is a reasonable choice of spatial grain because ...
# ...a 1m2 quadrat is probably too small given the size of some of these
# organisms. A 50 m transect characterized by 10 quadrats seems more appropriate,
# while aggregating all 7 Stations which are many km apart would be inappropriate.
# The function "richnessYearSubsetFun" below will subset the data to sites with an
# adequate number of years of sampling and species richness. If there are no
# adequate years, the function will return a custom error message and you can
# try resetting sGrain above to something coarser. Keep trying until this
# runs without an error. If a particular sGrain value led to an error in this
# function, you can make a note of that in the spatial grain justification comment
# above. If this function fails for ALL spatial grains, then this dataset will
# not be suitable for analysis and you can STOP HERE.
richnessYearsTest = richnessYearSubsetFun(dataset7, spatialGrain = sGrain,
temporalGrain = tGrain,
minNTime = minNTime,
minSpRich = minSpRich,
dataDescription)
head(richnessYearsTest)
dim(richnessYearsTest) ; dim(dataset7)
#Number of unique sites meeting criteria
goodSites = unique(richnessYearsTest$analysisSite)
length(goodSites)
# Now subset dataset7 to just those goodSites as defined. This is tricky though
# because assuming Sgrain is not the finest resolution, we will need to use
# grep to match site names that begin with the string in goodSites.
# The reason to do this is that sites which don't meet the criteria (e.g. not
# enough years of data) may also have low sampling intensity that constrains
# the subsampling level of the well sampled sites.
uniqueSites = unique(dataset7$site)
fullGoodSites = c()
for (s in goodSites) {
tmp = as.character(uniqueSites[grepl(paste(s, "_", sep = ""), paste(uniqueSites, "_", sep = ""))])
fullGoodSites = c(fullGoodSites, tmp)
}
dataset8 = subset(dataset7, site %in% fullGoodSites)
# Once we've settled on spatial and temporal grains that pass our test above,
# we then need to 1) figure out what levels of spatial and temporal subsampling
# we should use to characterize that analysis grain, and 2) subset the
# formatted dataset down to that standardized level of subsampling.
# For example, if some sites had 20 spatial subsamples (e.g. quads) per year while
# others had only 16, or 10, we would identify the level of subsampling that
# at least 'topFractionSites' of sites met (with a default of 50%). We would
# discard "poorly subsampled" sites (based on this criterion) from further analysis.
# For the "well-sampled" sites, the function below randomly samples the
# appropriate number of subsamples for each year or site,
# and bases the characterization of the community in that site-year based on
# the aggregate of those standardized subsamples.
dataSubset = subsetDataFun(dataset8,
datasetID,
spatialGrain = sGrain,
temporalGrain = tGrain,
minNTime = minNTime, minSpRich = minSpRich,
proportionalThreshold = topFractionSites,
dataDescription)
subsettedData = dataSubset$data
write.csv(subsettedData, paste("data/standardized_datasets/dataset_", datasetID, ".csv", sep = ""), row.names = F)
# Take a look at the propOcc:
head(propOccFun(subsettedData))
hist(propOccFun(subsettedData)$propOcc)
mean(propOccFun(subsettedData)$propOcc)
# Take a look at the site summary frame:
siteSummaryFun(subsettedData)
# If everything looks good, write the files:
writePropOccSiteSummary(subsettedData)
# Save the spatial and temporal subsampling values to the data formatting table:
dataFormattingTable[,'Spatial_subsamples'] =
dataFormattingTableFieldUpdate(datasetID, 'Spatial_subsamples', dataSubset$w)
dataFormattingTable[,'Temporal_subsamples'] =
dataFormattingTableFieldUpdate(datasetID, 'Temporal_subsamples', dataSubset$z)
# Update Data Formatting Table with summary stats of the formatted,
# properly subsetted dataset
dataFormattingTable = dataFormattingTableUpdateFinished(datasetID, subsettedData)
# And write the final data formatting table:
write.csv(dataFormattingTable, 'data_formatting_table.csv', row.names = F)
# Remove all objects except for functions from the environment:
rm(list = setdiff(ls(), lsf.str()))
| /scripts/R-scripts/data_cleaning_scripts/dwork_244.R | no_license | hurlbertlab/core-transient | R | false | false | 29,618 | r | ################################################################################*
# Dataset 244, Channel Islands, CA benthos
#
# Data and metadata can be found here: http://esapubs.org/archive/ecol/E094/245
# Formatted by Sara Snell and Allen Hurlbert
# Note that this is the Benthic Density data, which includes more species than
# the Benthic Cover data within this dataset.
#-------------------------------------------------------------------------------*
# ---- SET-UP ----
#===============================================================================*
# This script is best viewed in RStudio. I like to reduced the size of my window
# to roughly the width of the section lines (as above). Additionally, ensure
# that your global options are set to soft-wrap by selecting:
# Tools/Global Options .../Code Editing/Soft-wrap R source files
# Load libraries:
library(stringr)
library(plyr)
library(ggplot2)
library(grid)
library(gridExtra)
library(MASS)
# Source the functions file:
getwd()
source('scripts/R-scripts/core-transient_functions.R')
# Get data. First specify the dataset number ('datasetID') you are working with.
#####
datasetID = 244
list.files('data/raw_datasets/dataset_244')
dataset = read.csv(paste('data/raw_datasets/dataset_244.csv', sep = ''))
dataFormattingTable = read.csv('data_formatting_table.csv')
dataFormattingTable[,'Raw_datafile_name'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_datafile_name',
#--! PROVIDE INFO !--#
'Benthic Density Data.csv')
########################################################
# ANALYSIS CRITERIA #
########################################################
# Min number of time samples required
minNTime = 6
# Min number of species required
minSpRich = 10
# Ultimately, the largest number of spatial and
# temporal subsamples will be chosen to characterize
# an assemblage such that at least this fraction
# of site-years will be represented.
topFractionSites = 0.5
#######################################################
#-------------------------------------------------------------------------------*
# ---- EXPLORE THE DATASET ----
#===============================================================================*
# Here, you are predominantly interested in getting to know the dataset, and determine what the fields represent and which fields are relavent.
# View field names:
names(dataset)
# View how many records and fields:
dim(dataset)
# View the structure of the dataset:
# View first 6 rows of the dataset:
head(dataset)
# Here, we can see that there are some fields that we won't use. Let's remove them, note that I've given a new name here "dataset1", this is to ensure that we don't have to go back to square 1 if we've miscoded anything.
# If all fields will be used, then set unusedFields = 9999.
names(dataset)
#####
unusedFieldNames = c('Replicates', 'AreaPerReplicate', 'DensitySE')
unusedFields = which(names(dataset) %in% unusedFieldNames)
dataset1 = dataset[,-unusedFields]
# You also might want to change the names of the identified species field [to 'species'] and/or the identified site field [to 'site']. Just make sure you make specific comments on what the field name was before you made the change, as seen above.
# Explore, if everything looks okay, you're ready to move forward. If not, retrace your steps to look for and fix errors.
head(dataset1, 10)
# I've found it helpful to explore more than just the first 6 data points given with just a head(), so I used head(dataset#, 10) or even 20 to 50 to get a better snapshot of what the data looks like. Do this periodically throughout the formatting process
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE DATA WERE MODIFIED!
#!DATA FORMATTING TABLE UPDATE!
# Are the ONLY site identifiers the latitude and longitude of the observation or
# sample? (I.e., there are no site names or site IDs or other designations) Y/N
dataFormattingTable[,'LatLong_sites'] =
dataFormattingTableFieldUpdate(datasetID, 'LatLong_sites', # Fill value in below
#####
'N')
#-------------------------------------------------------------------------------*
# ---- FORMAT TIME DATA ----
#===============================================================================*
# Here, we need to extract the sampling dates.
# What is the name of the field that has information on sampling date?
# If date info is in separate columns (e.g., 'day', 'month', and 'year' cols),
# then write these field names as a vector from largest to smallest temporal grain.
#####
dateFieldName = c('Date')
# If necessary, paste together date info from multiple columns into single field
if (length(dateFieldName) > 1) {
newDateField = dataset1[, dateFieldName[1]]
for (i in dateFieldName[2:length(dateFieldName)]) { newDateField = paste(newDateField, dataset[,i], sep = "-") }
dataset1$date = newDateField
datefield = 'date'
} else {
datefield = dateFieldName
}
# What is the format in which date data is recorded? For example, if it is
# recorded as 5/30/94, then this would be '%m/%d/%y', while 1994-5-30 would
# be '%Y-%m-%d'. Type "?strptime" for other examples of date formatting.
#####
dateformat = '%d-%b-%Y'
# If date is only listed in years:
# dateformat = '%Y'
# If the date is just a year, then make sure it is of class numeric
# and not a factor. Otherwise change to a true date object.
if (dateformat == '%Y' | dateformat == '%y') {
date = as.numeric(as.character(dataset1[, datefield]))
} else {
date = as.POSIXct(strptime(dataset1[, datefield], dateformat))
}
# A check on the structure lets you know that date field is now a date object:
class(date)
# Give a double-check, if everything looks okay replace the column:
head(dataset1[, datefield])
head(date)
dataset2 = dataset1
# Delete the old date field
dataset2 = dataset2[, -which(names(dataset2) %in% dateFieldName)]
# Assign the new date values in a field called 'date'
dataset2$date = date
# Check the results:
head(dataset2)
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE DATE DATA WERE MODIFIED!
#!DATA FORMATTING TABLE UPDATE!
# Notes_timeFormat. Provide a thorough description of any modifications that were made to the time field.
dataFormattingTable[,'Notes_timeFormat'] =
dataFormattingTableFieldUpdate(datasetID, 'Notes_timeFormat', # Fill value in below
#####
'Temporal data provided as sampling dates')
# subannualTgrain. After exploring the time data, was this dataset sampled at a sub-annual temporal grain? Y/N
dataFormattingTable[,'subannualTgrain'] =
dataFormattingTableFieldUpdate(datasetID, 'subannualTgrain', # Fill value in below
#####
'Y')
#-------------------------------------------------------------------------------*
# ---- EXPLORE AND FORMAT SITE DATA ----
#===============================================================================*
# From the previous head commmand, we can see that sites are broken up into (potentially) 5 fields. Find the metadata link in the data formatting table use that link to determine how sites are characterized.
# -- If sampling is nested (e.g., site, block, plot, quad as in this study), use each of the identifying fields and separate each field with an underscore. For nested samples be sure the order of concatenated columns goes from coarser to finer scales (e.g. "km_m_cm")
# -- If sites are listed as lats and longs, use the finest available grain and separate lat and long fields with an underscore.
# -- If the site definition is clear, make a new site column as necessary.
# -- If the dataset is for just a single site, and there is no site column, then add one.
# In this dataset we have 10 quadrats per station, distributed along
# a 50 m transect.
# Here, we will concatenate all of the potential fields that describe the site
# in hierarchical order from largest to smallest grain. Based on the dataset,
# fill in the fields that specify nested spatial grains below.
#####
site_grain_names = c("Site")
# We will now create the site field with these codes concatenated if there
# are multiple grain fields. Otherwise, site will just be the single grain field.
num_grains = length(site_grain_names)
site = dataset2[, site_grain_names[1]]
if (num_grains > 1) {
for (i in 2:num_grains) {
site = paste(site, dataset2[, site_grain_names[i]], sep = "_")
}
}
# What is the spatial grain of the finest sampling scale? For example, this might be
# a 0.25 m2 quadrat, or a 5 m transect, or a 50 ml water sample.
dataFormattingTable[,'Raw_spatial_grain'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_spatial_grain',
#--! PROVIDE INFO !--#
60)
dataFormattingTable[,'Raw_spatial_grain_unit'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_spatial_grain_unit',
#--! PROVIDE INFO !--#
'm2')
# BEFORE YOU CONTINUE. We need to make sure that there are at least minNTime for sites at the coarsest possilbe spatial grain.
siteCoarse = dataset2[, site_grain_names[1]]
if (dateformat == '%Y' | dateformat == '%y') {
dateYear = dataset2$date
} else {
dateYear = format(dataset2$date, '%Y')
}
datasetYearTest = data.frame(siteCoarse, dateYear)
ddply(datasetYearTest, .(siteCoarse), summarise,
lengthYears = length(unique(dateYear)))
# If the dataset has less than minNTime years per site, do not continue processing.
# Do some quality control by comparing the site fields in the dataset with the new vector of sites:
head(site)
# Check how evenly represented all of the sites are in the dataset. If this is the
# type of dataset where every site was sampled on a regular schedule, then you
# expect to see similar values here across sites. Sites that only show up a small
# percent of the time may reflect typos.
data.frame(table(site))
# Note that several swaths occur with much less frequency than others.
# Mike Kenner, one of the data authors says via email:
# "I don't have the data at my fingertips but the explanation actually lies in
# the fact that site 6 was lost to sand inundation around 1982 or 83. When it was
# recovered those two swaths were changed. 22 L was established to make up for
# the loss if 45 R and 39 was switched from R to L (or the reverse, can't recall
# which we sample now). Anyway, that's the story with the odd swath count history
# there."
# All looks correct, so replace the site column in the dataset (as a factor) and remove the unnecessary fields, start by renaming the dataset to dataset2:
dataset3 = dataset2
dataset3$site = factor(site)
# Remove any hierarchical site related fields that are no longer needed, IF NECESSARY.
#####dataset3 = dataset3[,-c(1:2)]
# Check the new dataset (are the columns as they should be?):
head(dataset3)
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE SITE DATA WERE MODIFIED!
# !DATA FORMATTING TABLE UPDATE!
# Raw_siteUnit. How a site is coded (i.e. if the field was concatenated such as this one, it was coded as "site_block_plot_quad"). Alternatively, if the site were concatenated from latitude and longitude fields, the encoding would be "lat_long".
dataFormattingTable[,'Raw_siteUnit'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_siteUnit', # Fill value below in quotes
#####
'Site')
# spatial_scale_variable. Is a site potentially nested (e.g., plot within a quad or decimal lat longs that could be scaled up)? Y/N
dataFormattingTable[,'spatial_scale_variable'] =
dataFormattingTableFieldUpdate(datasetID, 'spatial_scale_variable',
#####
'N') # Fill value here in quotes
# Notes_siteFormat. Use this field to THOROUGHLY describe any changes made to the site field during formatting.
dataFormattingTable[,'Notes_siteFormat'] =
dataFormattingTableFieldUpdate(datasetID, 'Notes_siteFormat', # Fill value below in quotes
#####
'Site field converted to factor, otherwise unchanged.')
#-------------------------------------------------------------------------------*
# ---- EXPLORE AND FORMAT COUNT DATA ----
#===============================================================================*
# Next, we need to explore the count records. For filling out the data formatting table, we need to change the name of the field which represents counts, densities, percent cover, etc to "count". Then we will clean up unnecessary values.
names(dataset3)
summary(dataset3)
# Fill in the original field name here
#####
countfield = 'DensityMean'
# Renaming it
names(dataset3)[which(names(dataset3) == countfield)] = 'count'
# Raw values are densities per m2 aggregating across multiple sampling methods of
# different spatial scales. We here multiply the density x 60 to reflect the
# number of individuals expected over the coarsest of the sampling scales, 60 m2.
dataset3$count = dataset3$count * 60
# Now we will remove zero counts and NA's:
summary(dataset3)
# Can usually tell if there are any zeros or NAs from that summary(). If there aren't any showing, still run these functions or continue with the update of dataset# so that you are consistent with this template.
# Subset to records > 0 (if applicable):
dataset4 = subset(dataset3, count > 0)
summary(dataset4)
# Check to make sure that by removing 0's that you haven't completely removed
# any sampling events in which nothing was observed. Compare the number of
# unique site-dates in dataset3 and dataset4.
# If there are no sampling events lost, then we can go ahead and use the
# smaller dataset4 which could save some time in subsequent analyses.
# If there are sampling events lost, then we'll keep the 0's (use dataset3).
numEventsd3 = nrow(unique(dataset3[, c('site', 'date')]))
numEventsd4 = nrow(unique(dataset4[, c('site', 'date')]))
if(numEventsd3 > numEventsd4) {
dataset4 = dataset3
} else {
dataset4 = dataset4
}
# Remove NA's:
dataset5 = na.omit(dataset4)
# How does it look?
head(dataset5)
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE COUNT DATA WERE MODIFIED!
#!DATA FORMATTING TABLE UPDATE!
# Possible values for countFormat field are density, cover, presence and count.
dataFormattingTable[,'countFormat'] =
dataFormattingTableFieldUpdate(datasetID, 'countFormat', # Fill value below in quotes
#####
'density')
dataFormattingTable[,'Notes_countFormat'] =
dataFormattingTableFieldUpdate(datasetID, 'Notes_countFormat', # Fill value below in quotes
#####
"Raw data are density per m2 based on multiple sampling methods of different spatial scales. We here multiply the density x 60 to reflect the number of individuals expected over the coarsest of the sampling scales, 60 m2 (band transects).")
#-------------------------------------------------------------------------------*
# ---- EXPLORE AND FORMAT SPECIES DATA ----
#===============================================================================*
# Here, your primary goal is to ensure that all of your species are valid. To do so, you need to look at the list of unique species very carefully. Avoid being too liberal in interpretation, if you notice an entry that MIGHT be a problem, but you can't say with certainty, create an issue on GitHub.
# First, what is the field name in which species or taxonomic data are stored?
# It will get converted to 'species'
#####
speciesField = 'Species'
dataset5$species = dataset5[, speciesField]
dataset5 = dataset5[, -which(names(dataset5) == speciesField)]
# Look at the individual species present and how frequently they occur: This way you can more easily scan the species names (listed alphabetically) and identify potential misspellings, extra characters or blank space, or other issues.
data.frame(table(dataset5$species))
# If there are entries that only specify the genus while there are others that specify the species in addition to that same genus, they need to be regrouped in order to avoid ambiguity. For example, if there are entries of 'Cygnus', 'Cygnus_columbianus', and 'Cygnus_cygnus', 'Cygnus' could refer to either species, but the observer could not identify it. This causes ambiguity in the data, and must be fixed by either 1. deleting the genus-only entry altogether, or 2. renaming the genus-species entries to just the genus-only entry.
# This decision can be fairly subjective, but generally if less than 25% of the entries are genus-only, then they can be deleted (using bad_sp). If more than 25% of the entries for that genus are only specified to the genus, then the genus-species entries should be renamed to be genus-only (using typo_name).
table(dataset5$species)
# If species names are coded (not scientific names) go back to study's metadata to learn what species should and shouldn't be in the data.
# Species information is available in Table4B_benthic_density_variables.csv from
# http://esapubs.org/archive/ecol/E094/245/metadata.php
#####
# Excluding spiny lobster and fishes from "benthic community", so that it
# includes algae, sponges, corals, gastropods, sea stars and urchins.
# Also excluding species where only presences come towards the end of the time series b/c:
# "Some species have been added to the monitoring protocols during the 30+ years
# of monitoring. Thus the absence of these species from the data early in
# monitoring cannot be taken as evidence of absence. For this reason, instead of
# a 0 or blank, the code "NA" is entered into the dataset as the density for
# species in years they were not counted."
#foo = ddply(dataset, .(Year, Species), summarize, mean = mean(DensityMean, na.rm = T))
#foo = foo[order(foo$Species, foo$Year),]
#View(foo)
bad_sp = c('8001', # spiny lobster
'14025', # goby
'14026', # goby
'14027', # kelpfish
'2015', # Dictyoneuropsis reticulata/Agarum fimbriatum
'2015.5', # Dictyoneuropsis reticulata/Agarum fimbriatum
'2016', # Sargassum horneri, invasive
'2016.5', # Sargassum horneri, invasive
'9012', # Haliotis assimilis, only a single record from 2011
'9014', # Tegula regina, NA prior to 2006
'11009') # Centrostephanus coronatus, NA prior to 1996
dataset6 = dataset5[!dataset5$species %in% bad_sp,]
# It may be useful to count the number of times each name occurs, as misspellings or typos will likely
# only show up one time.
table(dataset6$species)
# If you find any potential typos, try to confirm that the "mispelling" isn't actually a valid name.
# If not, then list the typos in typo_name, and the correct spellings in good_name,
# and then replace them using the for loop below:
#####
typo_name = c(2002.5, #small Macrocystis pyrifera)
2015.5, #Dictyoneuropsis reticulata/Agarum fimbriatum, juvenile
2016.5) #Sargassum horneri, juvenile (less than 50cm in height and no recepticles)
#####
good_name = c(2002, #combined with large M. pyrifera)
2015, #combined with large Dictyoneuropsis reticulata/Agarum fimbriatum
2016) #combined with large Sargassum horneri
if (length(typo_name) > 0) {
for (n in 1:length(typo_name)) {
dataset6$species[dataset6$species == typo_name[n]] = good_name[n]
}
}
# Reset the factor levels:
dataset6$species = factor(dataset6$species)
# Let's look at how the removal of bad species and altered the length of the dataset:
nrow(dataset5)
nrow(dataset6)
# Look at the head of the dataset to ensure everything is correct:
head(dataset6)
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE SPECIES DATA WERE MODIFIED!
#!DATA FORMATTING TABLE UPDATE!
# Column M. Notes_spFormat. Provide a THOROUGH description of any changes made
# to the species field, including why any species were removed.
dataFormattingTable[,'Notes_spFormat'] =
dataFormattingTableFieldUpdate(datasetID, 'Notes_spFormat', # Fill value below in quotes
#####
'Codes reflecting different size classes of the same species were lumped; several species that were probably not targeted originally for sampling were removed (i.e. present only for the end of the time series).')
#-------------------------------------------------------------------------------*
# ---- MAKE DATA FRAME OF COUNT BY SITES, SPECIES, AND YEAR ----
#===============================================================================*
# Now we will make the final formatted dataset, add a datasetID field, check for errors, and remove records that cant be used for our purposes.
# First, lets add the datasetID:
dataset6$datasetID = datasetID
# Now make the compiled dataframe:
dataset7 = ddply(dataset6,.(datasetID, site, date, species),
summarize, count = sum(count))
# Explore the data frame:
dim(dataset7)
head(dataset7, 15)
summary(dataset7)
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE DATA WERE MODIFIED!
#-------------------------------------------------------------------------------*
# ---- UPDATE THE DATA FORMATTING TABLE AND WRITE OUTPUT DATA FRAMES ----
#===============================================================================*
# Update the data formatting table (this may take a moment to process). Note that the inputs for this are 'datasetID', the datasetID and the dataset form that you consider to be fully formatted.
dataFormattingTable = dataFormattingTableUpdate(datasetID, dataset7)
# Take a final look at the dataset:
head(dataset7)
summary (dataset7)
# If everything is looks okay we're ready to write formatted data frame:
write.csv(dataset7, paste("data/formatted_datasets/dataset_", datasetID, ".csv", sep = ""), row.names = F)
# !GIT-ADD-COMMIT-PUSH THE FORMATTED DATASET IN THE DATA FILE, THEN GIT-ADD-COMMIT-PUSH THE UPDATED DATA FOLDER!
# As we've now successfully created the formatted dataset, we will now update the format flag field.
dataFormattingTable[,'format_flag'] =
dataFormattingTableFieldUpdate(datasetID, 'format_flag', # Fill value below
#####
1)
# Flag codes are as follows:
# 0 = not currently worked on
# 1 = formatting complete
# 2 = formatting in process
# 3 = formatting halted, issue
# 4 = data unavailable
# 5 = data insufficient for generating occupancy data
# !GIT-ADD-COMMIT-PUSH THE DATA FORMATTING TABLE!
###################################################################################*
# ---- END DATA FORMATTING. START PROPOCC AND DATA SUMMARY ----
###################################################################################*
# We have now formatted the dataset to the finest possible spatial and temporal grain, removed bad species, and added the dataset ID. It's now to make some scale decisions and determine the proportional occupancies.
# Load additional required libraries and dataset:
library(dplyr)
library(tidyr)
# Read in formatted dataset if skipping above formatting code (lines 1-450).
#dataset7 = read.csv(paste("data/formatted_datasets/dataset_",
# datasetID, ".csv", sep =''))
# Have a look at the dimensions of the dataset and number of sites:
dim(dataset7)
length(unique(dataset7$site))
length(unique(dataset7$date))
head(dataset7)
# Get the data formatting table for that dataset:
dataDescription = dataFormattingTable[dataFormattingTable$dataset_ID == datasetID,]
# or read it in from the saved data_formatting_table.csv if skipping lines 1-450.
#dataDescription = subset(read.csv("data_formatting_table.csv"),
# dataset_ID == datasetID)
# Check relevant table values:
dataDescription$LatLong_sites
dataDescription$spatial_scale_variable
dataDescription$Raw_siteUnit
dataDescription$subannualTgrain
# Before proceeding, we need to make decisions about the spatial and temporal grains at
# which we will conduct our analyses. Except in unusual circumstances, the temporal
# grain will almost always be 'year', but the spatial grain that best represents the
# scale of a "community" will vary based on the sampling design and the taxonomic
# group. Justify your spatial scale below with a comment.
#####
tGrain = 'year'
# Refresh your memory about the spatial grain names if this is NOT a lat-long-only
# based dataset. Set sGrain = to the hierarchical scale for analysis.
# HOWEVER, if the sites are purely defined by lat-longs, then sGrain should equal
# a numerical value specifying the block size in degrees latitude for analysis.
site_grain_names
#####
sGrain = 'site'
# This is a reasonable choice of spatial grain because ...
# ...a 1m2 quadrat is probably too small given the size of some of these
# organisms. A 50 m transect characterized by 10 quadrats seems more appropriate,
# while aggregating all 7 Stations which are many km apart would be inappropriate.
# The function "richnessYearSubsetFun" below will subset the data to sites with an
# adequate number of years of sampling and species richness. If there are no
# adequate years, the function will return a custom error message and you can
# try resetting sGrain above to something coarser. Keep trying until this
# runs without an error. If a particular sGrain value led to an error in this
# function, you can make a note of that in the spatial grain justification comment
# above. If this function fails for ALL spatial grains, then this dataset will
# not be suitable for analysis and you can STOP HERE.
richnessYearsTest = richnessYearSubsetFun(dataset7, spatialGrain = sGrain,
temporalGrain = tGrain,
minNTime = minNTime,
minSpRich = minSpRich,
dataDescription)
head(richnessYearsTest)
dim(richnessYearsTest) ; dim(dataset7)
#Number of unique sites meeting criteria
goodSites = unique(richnessYearsTest$analysisSite)
length(goodSites)
# Now subset dataset7 to just those goodSites as defined. This is tricky though
# because assuming Sgrain is not the finest resolution, we will need to use
# grep to match site names that begin with the string in goodSites.
# The reason to do this is that sites which don't meet the criteria (e.g. not
# enough years of data) may also have low sampling intensity that constrains
# the subsampling level of the well sampled sites.
uniqueSites = unique(dataset7$site)
fullGoodSites = c()
for (s in goodSites) {
tmp = as.character(uniqueSites[grepl(paste(s, "_", sep = ""), paste(uniqueSites, "_", sep = ""))])
fullGoodSites = c(fullGoodSites, tmp)
}
dataset8 = subset(dataset7, site %in% fullGoodSites)
# Once we've settled on spatial and temporal grains that pass our test above,
# we then need to 1) figure out what levels of spatial and temporal subsampling
# we should use to characterize that analysis grain, and 2) subset the
# formatted dataset down to that standardized level of subsampling.
# For example, if some sites had 20 spatial subsamples (e.g. quads) per year while
# others had only 16, or 10, we would identify the level of subsampling that
# at least 'topFractionSites' of sites met (with a default of 50%). We would
# discard "poorly subsampled" sites (based on this criterion) from further analysis.
# For the "well-sampled" sites, the function below randomly samples the
# appropriate number of subsamples for each year or site,
# and bases the characterization of the community in that site-year based on
# the aggregate of those standardized subsamples.
dataSubset = subsetDataFun(dataset8,
datasetID,
spatialGrain = sGrain,
temporalGrain = tGrain,
minNTime = minNTime, minSpRich = minSpRich,
proportionalThreshold = topFractionSites,
dataDescription)
subsettedData = dataSubset$data
write.csv(subsettedData, paste("data/standardized_datasets/dataset_", datasetID, ".csv", sep = ""), row.names = F)
# Take a look at the propOcc:
head(propOccFun(subsettedData))
hist(propOccFun(subsettedData)$propOcc)
mean(propOccFun(subsettedData)$propOcc)
# Take a look at the site summary frame:
siteSummaryFun(subsettedData)
# If everything looks good, write the files:
writePropOccSiteSummary(subsettedData)
# Save the spatial and temporal subsampling values to the data formatting table:
dataFormattingTable[,'Spatial_subsamples'] =
dataFormattingTableFieldUpdate(datasetID, 'Spatial_subsamples', dataSubset$w)
dataFormattingTable[,'Temporal_subsamples'] =
dataFormattingTableFieldUpdate(datasetID, 'Temporal_subsamples', dataSubset$z)
# Update Data Formatting Table with summary stats of the formatted,
# properly subsetted dataset
dataFormattingTable = dataFormattingTableUpdateFinished(datasetID, subsettedData)
# And write the final data formatting table:
write.csv(dataFormattingTable, 'data_formatting_table.csv', row.names = F)
# Remove all objects except for functions from the environment:
rm(list = setdiff(ls(), lsf.str()))
|
# If you run a different locale you need to do this to get
# correct abbreviations for weekdays on the x-axis
Sys.setlocale("LC_TIME", "English")
# Read the file to a data frame
file <- 'household_power_consumption.txt'
df <- read.csv(file, header=TRUE, sep =';', na.strings = '?')
# Load subset of the interesting dates into a smaller data frame
# using dates as factors
dates <- c("1/2/2007", "2/2/2007")
df2 <- df[df$Date %in% dates,]
# Convert Date and Time to POSIXlt time type
df2$DateTime <- strptime(paste(df2$Date, df2$Time, sep=' '), format='%d/%m/%Y %H:%M:%S')
# open png device
png("plot3.png", width=480, height=480, units="px")
# Create basic plot
plot(df2$DateTime, df2$Sub_metering_1, type="n",
xlab="",
ylab="Energy sub metering")
lines(df2$DateTime, df2$Sub_metering_1, col="black")
lines(df2$DateTime, df2$Sub_metering_2, col="red")
lines(df2$DateTime, df2$Sub_metering_3, col="blue")
legend("topright",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=1,
lwd=2.5,
col=c("black", "red", "blue"))
# Save to file
dev.off()
| /plot3.R | no_license | pndsc/ExData_Plotting1 | R | false | false | 1,102 | r | # If you run a different locale you need to do this to get
# correct abbreviations for weekdays on the x-axis
Sys.setlocale("LC_TIME", "English")
# Read the file to a data frame
file <- 'household_power_consumption.txt'
df <- read.csv(file, header=TRUE, sep =';', na.strings = '?')
# Load subset of the interesting dates into a smaller data frame
# using dates as factors
dates <- c("1/2/2007", "2/2/2007")
df2 <- df[df$Date %in% dates,]
# Convert Date and Time to POSIXlt time type
df2$DateTime <- strptime(paste(df2$Date, df2$Time, sep=' '), format='%d/%m/%Y %H:%M:%S')
# open png device
png("plot3.png", width=480, height=480, units="px")
# Create basic plot
plot(df2$DateTime, df2$Sub_metering_1, type="n",
xlab="",
ylab="Energy sub metering")
lines(df2$DateTime, df2$Sub_metering_1, col="black")
lines(df2$DateTime, df2$Sub_metering_2, col="red")
lines(df2$DateTime, df2$Sub_metering_3, col="blue")
legend("topright",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
lty=1,
lwd=2.5,
col=c("black", "red", "blue"))
# Save to file
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cosinor2.R
\docType{data}
\name{PA_extraverts}
\alias{PA_extraverts}
\title{Self-reported positive affect of extraverts}
\format{A data frame with 24 rows and 6 variables:
\describe{
\item{X1, X2, X3, X4, X5, X6}{Responses of subjects at 6 measurement points (hours).}
}}
\source{
Mutak, A., Pavlović, M. & Zibar, K. (2017, May). \emph{Postoje li razlike između introverata i ekstraverata u cirkadijurnim ritmovima raspoloženja?} [\emph{Are There Differences Between Introverts and Extraverts in Circadian Mood Rhythms?}]. Study presented at the 3rd \emph{Regionalni susret studenata psihologije - SPIRI} [\emph{Regional Meeting of Psychology Students - SPIRI}] conference, Rijeka, Croatia.
}
\usage{
PA_extraverts
}
\description{
A dataset containing the responses of 24 subjects on the Positive Affect scale of the shortened version of the PANAS questionnaire (Watson, Clark & Tellegen, 1988) in January 2017.
}
\details{
Measurements were taken at 10 AM, 12 PM, 2 PM, 4 PM, 6 PM and 8 PM \eqn{\pm} 30 minutes in the period of January 16 - 22, 2017. The data contained in this dataset has been averaged for each hour across 7 days of measurement.
}
\references{
Watson, D., Clark, L. A. & Tellegen, A. (1988). Development and Validation of Brief Measures of Positive and Negative Affect: The PANAS Scales. \emph{Journal of Personality and Social Psychology}, \emph{54(6)}, 1063-1070.
}
\keyword{datasets}
| /man/PA_extraverts.Rd | no_license | cran/cosinor2 | R | false | true | 1,518 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cosinor2.R
\docType{data}
\name{PA_extraverts}
\alias{PA_extraverts}
\title{Self-reported positive affect of extraverts}
\format{A data frame with 24 rows and 6 variables:
\describe{
\item{X1, X2, X3, X4, X5, X6}{Responses of subjects at 6 measurement points (hours).}
}}
\source{
Mutak, A., Pavlović, M. & Zibar, K. (2017, May). \emph{Postoje li razlike između introverata i ekstraverata u cirkadijurnim ritmovima raspoloženja?} [\emph{Are There Differences Between Introverts and Extraverts in Circadian Mood Rhythms?}]. Study presented at the 3rd \emph{Regionalni susret studenata psihologije - SPIRI} [\emph{Regional Meeting of Psychology Students - SPIRI}] conference, Rijeka, Croatia.
}
\usage{
PA_extraverts
}
\description{
A dataset containing the responses of 24 subjects on the Positive Affect scale of the shortened version of the PANAS questionnaire (Watson, Clark & Tellegen, 1988) in January 2017.
}
\details{
Measurements were taken at 10 AM, 12 PM, 2 PM, 4 PM, 6 PM and 8 PM \eqn{\pm} 30 minutes in the period of January 16 - 22, 2017. The data contained in this dataset has been averaged for each hour across 7 days of measurement.
}
\references{
Watson, D., Clark, L. A. & Tellegen, A. (1988). Development and Validation of Brief Measures of Positive and Negative Affect: The PANAS Scales. \emph{Journal of Personality and Social Psychology}, \emph{54(6)}, 1063-1070.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_range.R
\name{get_range}
\alias{get_range}
\title{Select Bromeliaceae Species Ranges by Taxonomy and Traits}
\usage{
get_range(
scientific = NULL,
canonical = NULL,
genus = NULL,
subfamily = NULL,
life_form = NULL,
assessment = NULL,
range_size = NULL,
type = "polygon"
)
}
\arguments{
\item{scientific}{a character vector of full scientific names including authorities, of the species of interest}
\item{canonical}{a character vector of canonical names, of the species of interest.}
\item{genus}{a character vector of genera names to select.}
\item{subfamily}{a character vector of subfamily names to select.}
\item{life_form}{a character vector of life forms to select.}
\item{assessment}{a character vector of conservation assessment to select.}
\item{range_size}{a vector of two numericals with the minimum and maximum range size (in square kilometres), to select.}
\item{type}{a cahracter defining the output format, see details}
}
\value{
Depending on the \dQuote{type} argument. If \dQuote{binary} a presence/absence raster based on the modelled habitat suitability,
at 100x100 km resolution in Behrmann projection,
if \dQuote{suitability} the habitat suitability as predicted by an ensemble model in Behrmann projection, and
if {polygon} a simple feature object in lat/lon projection.
.
}
\description{
Get the geographic range for all species selected via the arguments.
The type of range estimate depends on the \dQuote{type} argument.
}
\details{
Modelled ranges are available for 542 species,
range polygons for 2395 species. For species with model distribution, the range polygons are based on the models, otherwise
on a convex hull around available occurrence records, or a 50 km buffer for species with less than 3 occurrence records available.
See Zizka et al 2019 for methods.
}
\examples{
get_range(scientific = "Aechmea mexicana Baker")
get_range(scientific = "Aechmea mexicana Baker", type = "binary")
get_range(scientific = "Aechmea mexicana Baker", type = "suitability")
get_range(canonical = "Aechmea mexicana")
get_range(genus = "Aechmea")
get_range(genus = "Aechmea", type = "binary")
get_range(genus = c("Aechmea", "Zizkaea"))
get_range(subfamily = "Pitcairnioideae")
get_range(life_form = "epiphyte")
get_range(assessment = c("CR", "VU"))
get_range(range_size = c(1000, 10000))
}
| /man/get_range.Rd | permissive | kjrom-sol/bromeliad | R | false | true | 2,416 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_range.R
\name{get_range}
\alias{get_range}
\title{Select Bromeliaceae Species Ranges by Taxonomy and Traits}
\usage{
get_range(
scientific = NULL,
canonical = NULL,
genus = NULL,
subfamily = NULL,
life_form = NULL,
assessment = NULL,
range_size = NULL,
type = "polygon"
)
}
\arguments{
\item{scientific}{a character vector of full scientific names including authorities, of the species of interest}
\item{canonical}{a character vector of canonical names, of the species of interest.}
\item{genus}{a character vector of genera names to select.}
\item{subfamily}{a character vector of subfamily names to select.}
\item{life_form}{a character vector of life forms to select.}
\item{assessment}{a character vector of conservation assessment to select.}
\item{range_size}{a vector of two numericals with the minimum and maximum range size (in square kilometres), to select.}
\item{type}{a cahracter defining the output format, see details}
}
\value{
Depending on the \dQuote{type} argument. If \dQuote{binary} a presence/absence raster based on the modelled habitat suitability,
at 100x100 km resolution in Behrmann projection,
if \dQuote{suitability} the habitat suitability as predicted by an ensemble model in Behrmann projection, and
if {polygon} a simple feature object in lat/lon projection.
.
}
\description{
Get the geographic range for all species selected via the arguments.
The type of range estimate depends on the \dQuote{type} argument.
}
\details{
Modelled ranges are available for 542 species,
range polygons for 2395 species. For species with model distribution, the range polygons are based on the models, otherwise
on a convex hull around available occurrence records, or a 50 km buffer for species with less than 3 occurrence records available.
See Zizka et al 2019 for methods.
}
\examples{
get_range(scientific = "Aechmea mexicana Baker")
get_range(scientific = "Aechmea mexicana Baker", type = "binary")
get_range(scientific = "Aechmea mexicana Baker", type = "suitability")
get_range(canonical = "Aechmea mexicana")
get_range(genus = "Aechmea")
get_range(genus = "Aechmea", type = "binary")
get_range(genus = c("Aechmea", "Zizkaea"))
get_range(subfamily = "Pitcairnioideae")
get_range(life_form = "epiphyte")
get_range(assessment = c("CR", "VU"))
get_range(range_size = c(1000, 10000))
}
|
# -------------------------------------
#
# temp calibration
#
# -------------------------------------
rm(list=ls())
library(vein)
library(sf)
library(sp)
library(ggplot2)
library(data.table)
library(kohonen)
library("viridis")
library(units)
library(dplyr)
setwd("L:/# DIRUR #/ASMEQ/bosistas/joaobazzo/master-thesis-repo1/")
# importa dados
trips <- readr::read_rds("dados/Percursos/estendido/intersection-new_centroids_cap_adj.rds") %>%
sf::st_transform(4326)
pc <- data.table::fread("dados/Pesquisa_OD_IPPUC/arquivos_saida/csv/estendida/fluxo_horario_deslocamentos.csv")
pc <- pc[,rel := freq/sum(freq)][,rel]
# ---
# ajuste para funcao BPR
counts <- sf::read_sf("dados/Pesquisa_OD_IPPUC/D536_015_BR/screen_line.shp") %>%
data.table::data.table()
#counts[,razao := N_Autos_/Volume_Dia][,.(razao)] %>% as.vector() %>% summary()
#counts[,.(N_Autos_,Volume_Dia)] %>% summary()
counts <- counts[,V_dia := N_Autos_ + N_Taxis_ + N_Vans_][,.(V_dia,Volume_Dia)]
adj <- (1 / counts[,V_dia/Volume_Dia])
adj <- quantile(adj,0.55) %>% as.numeric()
# --
#
# net
#
# --
data <- trips %>% data.table::as.data.table()
data$lkm <- trips %>%
sf::st_length() %>%
units::set_units("m") %>%
units::set_units("km")
net <- lapply(1:nrow(data), function(i){ # nrow(data)
# i = 1
aux <- trips$geometry[[i]] %>%
sf::st_coordinates() %>%
as.data.frame() %>%
dplyr::select("X","Y") %>%
list() %>%
sp::Line() %>%
sp::Lines(ID=i)
return(aux)
}) %>% sp::SpatialLines() %>%
sp::SpatialLinesDataFrame(data=data)
(net@data$trips/10^6) %>% sum()
# --
# speed [53817 x 24] ####
# --
FATOR = 1.2
pcm <- as.matrix((FATOR * adj) * net@data$trips) %*% t( as.matrix(pc) )
net@data$trips <- net@data$trips * FATOR
# pcm[1,] %>% sum()
# net@data$trips[1] * adj
# speed index
vel <- as.data.frame(matrix(0,nrow = nrow(trips),ncol = ncol(pcm)))
alpha <- 0.15; beta <- 4
vel <- lapply(1:nrow(trips), function(i){ # nrow(trips)
# i = 1
net@data$speed[i]/(1 + alpha*(pcm[i,]/net@data$cap[i])^beta)
}) %>% simplify2array() %>% t()
# --
# save
# --
break()
readr::write_rds(vel,"simulacoes/estendida/traffic_input/speed_ADD20CI_A37_PL_C_AUT.rds")
readr::write_rds(net,"simulacoes/estendida/traffic_input/net_ADD20CI_A37_PL_C_AUT.rds")
readr::write_rds(pc,"simulacoes/estendida/traffic_input/pc1.rds")
# --
# analise de agrupamento
# --
trips1 <- as.data.table(trips)[,trip_cap := (trips * max(pc)) / cap][,.(tto, trip_cap,speed_km_h)]
trips1 <- trips1[,tto := tto/sum(tto)]
trips1 <- trips1[,trip_cap := trip_cap/sum(trip_cap)]
trips1 <- trips1[,speed_km_h := speed_km_h/sum(speed_km_h)]
training <- sample(nrow(trips1), 1000)
Xtraining <- scale(trips1[training, ])
somnet <- som(Xtraining, kohonen::somgrid(2, 2, "rectangular"))
output <- map(somnet,
scale(trips1, # trips1
center=attr(Xtraining, "scaled:center"),
scale=attr(Xtraining, "scaled:scale")))
#trips2 <- trips #trips[-training,]
trips$group <- output$unit.classif
# --
# net to sldf
# --
# data <- trips %>% data.table::as.data.table()
# data$lkm <- trips %>% sf::st_length() %>% units::set_units("m")
# net <- lapply(1:nrow(data), function(i){
# aux <- trips$geometry[[i]] %>%
# sf::st_coordinates() %>%
# as.data.frame() %>%
# dplyr::select("X","Y") %>%
# list() %>%
# sp::Line() %>%
# sp::Lines(ID=i)
# return(aux)
# }) %>% sp::SpatialLines() %>% sp::SpatialLinesDataFrame(data=data)
# --
#
# parameters
# trips2 <- as.data.table(trips)
# par(mfrow=c(2,3))
temp_trips <- as.data.table(trips)
c1 <- temp_trips$tto - 1
c2 <- (temp_trips$trips * 1.46 * max(pc)) / temp_trips$cap
df1 <- data.table::data.table(c1 = c1, c2 = c2,group = temp_trips$group)
ggplot()+
geom_point(data = df1,aes(x=c1,y=c2,color=group))+
scale_color_viridis()
break()
for(i in unique(trips$group)){
temp_trips <- as.data.table(trips)[group == i,]
c1 <- temp_trips$tto - 1
c2 <- (temp_trips$trips * max(pc)) / temp_trips$cap
# plot(c1,c2,main = paste0("group ",i),xlim=c(0,2),ylim=c(0,3))
# }
ds <- data.table::data.table(c1,c2)
nlc <- nls.control(maxiter = 1000)
m <- nls(c1 ~ I(alfa * (c2)^beta), data = ds, control = nlc,
start = list(alfa = 1.35, beta = 5),
trace = F)
# m <- nls(c2 ~ I((c1/alfa)^(1/beta)), data = ds,
# start = list(alfa = 0.15, beta = 2),
# trace = F)
sm <- summary(m)
alfa <- sm$parameters[[1]]
beta <- sm$parameters[[2]]
message(paste("alfa =",alfa))
message(paste("beta =",beta))
message(i)
}
trips2
c1 <- trips$tto - 1
c2 <- (trips$trips * max(pc[,rel])) / trips$cap
ds <- data.table::data.table(c1,c2)
# function
#ds <- ds[1:100,]
m <- nls(c1 ~ I(alfa * (c2)^beta), data = ds,
start = list(alfa = 0.15, beta = 5),
trace = T)
summary(m)
dim(pcm)
# ---
# Profile Traffic Hour [24 x 1] ####
# ---
pc[,rel := freq/max(freq)][,rel]
pc[,rel]
# --
# speed [79680 x 24] ####
# --
pcm <- as.matrix(net@data$ldv) %*%
t( pc[,rel] %>% as.matrix() )
# speed index
vec <- c(0,30,50,70,110,130) + 1
spind <- list()
i=1
for(i in 1:5){
spind[[i]] <- which(setDT(trips)[,speed] %between% c(vec[i],vec[i+1]))
}
# parameters
i=3
c2 <- lapply(spind[[i]],function(i){pcm[i,]}) %>% unlist()
c1 <- net@data$cap[rep(spind[[i]],each=24)]
x <- c2/c1
y <- rep(0.15,length(c1)) + rnorm(length(c1),0,1)
ds <- data.frame(x = x,y = y)
# function
#ds <- ds[1:100,]
m <- nls(y ~ I(alfa * x^beta), data = ds,
start = list(alfa = 0.25, beta = 5),
trace = T)
summary(m)
dim(pcm)
vel <- as.data.frame(matrix(0,nrow = nrow(trips),ncol = ncol(pcm)))
alpha <- 0.15; beta <- 4
vel <- lapply(1:nrow(trips), function(i){ # nrow(trips)
net@data$speed[i]/(1+alpha*(pcm[i,]/net@data$cap[i])^beta)
}) %>% simplify2array() %>% t()
set.seed(1485)
len <- 24
x <- runif(len)
y <- x^3 + rnorm(len, 0, 0.06)
ds <- data.frame(x = x, y = y)
str(ds)
plot(y ~ x, main = "Known cubic, with noise")
s <- seq(0, 1, length = 100)
lines(s, s^3, lty = 2, col = "green")
m <- nls(y ~ I(x^power + b), data = ds, start = list(power = 1, b= 0),trace = T)
m
summary(m)
| /modelo_transporte/09_bpr_par_calibration.R | no_license | Joaobazzo/Master-thesis-scripts | R | false | false | 6,310 | r | # -------------------------------------
#
# temp calibration
#
# -------------------------------------
rm(list=ls())
library(vein)
library(sf)
library(sp)
library(ggplot2)
library(data.table)
library(kohonen)
library("viridis")
library(units)
library(dplyr)
setwd("L:/# DIRUR #/ASMEQ/bosistas/joaobazzo/master-thesis-repo1/")
# importa dados
trips <- readr::read_rds("dados/Percursos/estendido/intersection-new_centroids_cap_adj.rds") %>%
sf::st_transform(4326)
pc <- data.table::fread("dados/Pesquisa_OD_IPPUC/arquivos_saida/csv/estendida/fluxo_horario_deslocamentos.csv")
pc <- pc[,rel := freq/sum(freq)][,rel]
# ---
# ajuste para funcao BPR
counts <- sf::read_sf("dados/Pesquisa_OD_IPPUC/D536_015_BR/screen_line.shp") %>%
data.table::data.table()
#counts[,razao := N_Autos_/Volume_Dia][,.(razao)] %>% as.vector() %>% summary()
#counts[,.(N_Autos_,Volume_Dia)] %>% summary()
counts <- counts[,V_dia := N_Autos_ + N_Taxis_ + N_Vans_][,.(V_dia,Volume_Dia)]
adj <- (1 / counts[,V_dia/Volume_Dia])
adj <- quantile(adj,0.55) %>% as.numeric()
# --
#
# net
#
# --
data <- trips %>% data.table::as.data.table()
data$lkm <- trips %>%
sf::st_length() %>%
units::set_units("m") %>%
units::set_units("km")
net <- lapply(1:nrow(data), function(i){ # nrow(data)
# i = 1
aux <- trips$geometry[[i]] %>%
sf::st_coordinates() %>%
as.data.frame() %>%
dplyr::select("X","Y") %>%
list() %>%
sp::Line() %>%
sp::Lines(ID=i)
return(aux)
}) %>% sp::SpatialLines() %>%
sp::SpatialLinesDataFrame(data=data)
(net@data$trips/10^6) %>% sum()
# --
# speed [53817 x 24] ####
# --
FATOR = 1.2
pcm <- as.matrix((FATOR * adj) * net@data$trips) %*% t( as.matrix(pc) )
net@data$trips <- net@data$trips * FATOR
# pcm[1,] %>% sum()
# net@data$trips[1] * adj
# speed index
vel <- as.data.frame(matrix(0,nrow = nrow(trips),ncol = ncol(pcm)))
alpha <- 0.15; beta <- 4
vel <- lapply(1:nrow(trips), function(i){ # nrow(trips)
# i = 1
net@data$speed[i]/(1 + alpha*(pcm[i,]/net@data$cap[i])^beta)
}) %>% simplify2array() %>% t()
# --
# save
# --
break()
readr::write_rds(vel,"simulacoes/estendida/traffic_input/speed_ADD20CI_A37_PL_C_AUT.rds")
readr::write_rds(net,"simulacoes/estendida/traffic_input/net_ADD20CI_A37_PL_C_AUT.rds")
readr::write_rds(pc,"simulacoes/estendida/traffic_input/pc1.rds")
# --
# analise de agrupamento
# --
trips1 <- as.data.table(trips)[,trip_cap := (trips * max(pc)) / cap][,.(tto, trip_cap,speed_km_h)]
trips1 <- trips1[,tto := tto/sum(tto)]
trips1 <- trips1[,trip_cap := trip_cap/sum(trip_cap)]
trips1 <- trips1[,speed_km_h := speed_km_h/sum(speed_km_h)]
training <- sample(nrow(trips1), 1000)
Xtraining <- scale(trips1[training, ])
somnet <- som(Xtraining, kohonen::somgrid(2, 2, "rectangular"))
output <- map(somnet,
scale(trips1, # trips1
center=attr(Xtraining, "scaled:center"),
scale=attr(Xtraining, "scaled:scale")))
#trips2 <- trips #trips[-training,]
trips$group <- output$unit.classif
# --
# net to sldf
# --
# data <- trips %>% data.table::as.data.table()
# data$lkm <- trips %>% sf::st_length() %>% units::set_units("m")
# net <- lapply(1:nrow(data), function(i){
# aux <- trips$geometry[[i]] %>%
# sf::st_coordinates() %>%
# as.data.frame() %>%
# dplyr::select("X","Y") %>%
# list() %>%
# sp::Line() %>%
# sp::Lines(ID=i)
# return(aux)
# }) %>% sp::SpatialLines() %>% sp::SpatialLinesDataFrame(data=data)
# --
#
# parameters
# trips2 <- as.data.table(trips)
# par(mfrow=c(2,3))
temp_trips <- as.data.table(trips)
c1 <- temp_trips$tto - 1
c2 <- (temp_trips$trips * 1.46 * max(pc)) / temp_trips$cap
df1 <- data.table::data.table(c1 = c1, c2 = c2,group = temp_trips$group)
ggplot()+
geom_point(data = df1,aes(x=c1,y=c2,color=group))+
scale_color_viridis()
break()
for(i in unique(trips$group)){
temp_trips <- as.data.table(trips)[group == i,]
c1 <- temp_trips$tto - 1
c2 <- (temp_trips$trips * max(pc)) / temp_trips$cap
# plot(c1,c2,main = paste0("group ",i),xlim=c(0,2),ylim=c(0,3))
# }
ds <- data.table::data.table(c1,c2)
nlc <- nls.control(maxiter = 1000)
m <- nls(c1 ~ I(alfa * (c2)^beta), data = ds, control = nlc,
start = list(alfa = 1.35, beta = 5),
trace = F)
# m <- nls(c2 ~ I((c1/alfa)^(1/beta)), data = ds,
# start = list(alfa = 0.15, beta = 2),
# trace = F)
sm <- summary(m)
alfa <- sm$parameters[[1]]
beta <- sm$parameters[[2]]
message(paste("alfa =",alfa))
message(paste("beta =",beta))
message(i)
}
trips2
c1 <- trips$tto - 1
c2 <- (trips$trips * max(pc[,rel])) / trips$cap
ds <- data.table::data.table(c1,c2)
# function
#ds <- ds[1:100,]
m <- nls(c1 ~ I(alfa * (c2)^beta), data = ds,
start = list(alfa = 0.15, beta = 5),
trace = T)
summary(m)
dim(pcm)
# ---
# Profile Traffic Hour [24 x 1] ####
# ---
pc[,rel := freq/max(freq)][,rel]
pc[,rel]
# --
# speed [79680 x 24] ####
# --
pcm <- as.matrix(net@data$ldv) %*%
t( pc[,rel] %>% as.matrix() )
# speed index
vec <- c(0,30,50,70,110,130) + 1
spind <- list()
i=1
for(i in 1:5){
spind[[i]] <- which(setDT(trips)[,speed] %between% c(vec[i],vec[i+1]))
}
# parameters
i=3
c2 <- lapply(spind[[i]],function(i){pcm[i,]}) %>% unlist()
c1 <- net@data$cap[rep(spind[[i]],each=24)]
x <- c2/c1
y <- rep(0.15,length(c1)) + rnorm(length(c1),0,1)
ds <- data.frame(x = x,y = y)
# function
#ds <- ds[1:100,]
m <- nls(y ~ I(alfa * x^beta), data = ds,
start = list(alfa = 0.25, beta = 5),
trace = T)
summary(m)
dim(pcm)
vel <- as.data.frame(matrix(0,nrow = nrow(trips),ncol = ncol(pcm)))
alpha <- 0.15; beta <- 4
vel <- lapply(1:nrow(trips), function(i){ # nrow(trips)
net@data$speed[i]/(1+alpha*(pcm[i,]/net@data$cap[i])^beta)
}) %>% simplify2array() %>% t()
set.seed(1485)
len <- 24
x <- runif(len)
y <- x^3 + rnorm(len, 0, 0.06)
ds <- data.frame(x = x, y = y)
str(ds)
plot(y ~ x, main = "Known cubic, with noise")
s <- seq(0, 1, length = 100)
lines(s, s^3, lty = 2, col = "green")
m <- nls(y ~ I(x^power + b), data = ds, start = list(power = 1, b= 0),trace = T)
m
summary(m)
|
# Vectors
a <- c(1,2,5.3,6,-2,4)
b <- c("one","two","three")
c <- c(TRUE,TRUE,TRUE,FALSE,TRUE,FALSE)
d <- c(1,"a",TRUE)
class(a)
class(b)
class(c)
class(d)
# Matrix
# generates 5 x 4 numeric matrix
mat1<-matrix(1:20, nrow=5,ncol=4)
mat1
mat2<-matrix(1:17, nrow=5,ncol=4,byrow=TRUE)
mat2
# Arguments
?matrix
# Lists
a<-c("a","b","c")
b <- 1:100
mylist<-list(c1=a,c2=b,c3=mat1)
mylist
mylist$c1
# Access values in an element using [[]]
mylist[[1]]
mylist[1]
mylist[1][1] # Does not yield desired result
mylist[[1]][1] # Correct way of writing code
# Data frames
d <- c(1,2,3,4)
e <- c("red", "white", "red", NA)
f <- c(TRUE,TRUE,TRUE,FALSE)
mydata <- data.frame(d,e,f)
mydata
names(mydata)
rownames(mydata)
names(mydata) <- c("ID","Color","Passed") # Rename variables
names(mydata)
mydata | /3_data_structures.r | no_license | shraban020/r-analytics | R | false | false | 850 | r | # Vectors
a <- c(1,2,5.3,6,-2,4)
b <- c("one","two","three")
c <- c(TRUE,TRUE,TRUE,FALSE,TRUE,FALSE)
d <- c(1,"a",TRUE)
class(a)
class(b)
class(c)
class(d)
# Matrix
# generates 5 x 4 numeric matrix
mat1<-matrix(1:20, nrow=5,ncol=4)
mat1
mat2<-matrix(1:17, nrow=5,ncol=4,byrow=TRUE)
mat2
# Arguments
?matrix
# Lists
a<-c("a","b","c")
b <- 1:100
mylist<-list(c1=a,c2=b,c3=mat1)
mylist
mylist$c1
# Access values in an element using [[]]
mylist[[1]]
mylist[1]
mylist[1][1] # Does not yield desired result
mylist[[1]][1] # Correct way of writing code
# Data frames
d <- c(1,2,3,4)
e <- c("red", "white", "red", NA)
f <- c(TRUE,TRUE,TRUE,FALSE)
mydata <- data.frame(d,e,f)
mydata
names(mydata)
rownames(mydata)
names(mydata) <- c("ID","Color","Passed") # Rename variables
names(mydata)
mydata |
#' Selection of variables from ESM/EMA study on smoking lapse.
#'
#' A dataset containing three psychological variables related to smoking lapse.
#' Obtained from:
#' Bolman, C., Verboon, P., Jacobs, N., Thewissen, V., Boonen, V., & Soons, K. (2018).
#' Predicting smoking lapses in the first week of quitting: an ecological momentary assessment study.
#' Journal of Addiction Medicine, 12 (1), 65-71.
#'
#'
#' @format A data frame with 2935 rows and 6 variables:
#' \describe{
#' \item{subjnr}{subject number (N = 49)}
#' \item{beepnr}{beep number: 1-10}
#' \item{daynr}{day number: 1-7}
#' \item{intention}{intention to quit smoking}
#' \item{stress}{perceived stress}
#' \item{positiveAffect}{perceived positiveAffect}
#' ...
#' }
#' @source \url
"smokedat"
| /cyclicESM/R/smokedat.R | no_license | PeterVerboon/Cyclic-models | R | false | false | 775 | r | #' Selection of variables from ESM/EMA study on smoking lapse.
#'
#' A dataset containing three psychological variables related to smoking lapse.
#' Obtained from:
#' Bolman, C., Verboon, P., Jacobs, N., Thewissen, V., Boonen, V., & Soons, K. (2018).
#' Predicting smoking lapses in the first week of quitting: an ecological momentary assessment study.
#' Journal of Addiction Medicine, 12 (1), 65-71.
#'
#'
#' @format A data frame with 2935 rows and 6 variables:
#' \describe{
#' \item{subjnr}{subject number (N = 49)}
#' \item{beepnr}{beep number: 1-10}
#' \item{daynr}{day number: 1-7}
#' \item{intention}{intention to quit smoking}
#' \item{stress}{perceived stress}
#' \item{positiveAffect}{perceived positiveAffect}
#' ...
#' }
#' @source \url
"smokedat"
|
context("scipy sparse matrix")
library(Matrix)
check_matrix_conversion <- function(r_matrix, python_matrix) {
# check that the conversion to python works
expect_true(all(py_to_r(python_matrix$toarray()) == as.matrix(r_matrix)))
# check that the conversion to r works
expect_true(all(py_to_r(python_matrix) == r_matrix))
# check that S3 methods work
expect_equal(dim(python_matrix), dim(r_matrix))
expect_equal(length(python_matrix), length(r_matrix))
}
test_that("Conversion to scipy sparse matrix S3 methods behave with null pointers", {
skip_on_cran()
skip_if_no_scipy()
N <- 1000
x <- sparseMatrix(
i = sample(N, N),
j = sample(N, N),
x = runif(N),
dims = c(N, N))
result <- r_to_py(x)
temp_file <- file.path(tempdir(), "sparse_matrix.rds")
saveRDS(result, temp_file)
result <- readRDS(temp_file)
# check that S3 methods behave with null pointers
expect_true(is(result, "scipy.sparse.csc.csc_matrix"))
expect_true(is.null(dim(result)))
expect_true(length(result) == 0L)
file.remove(temp_file)
})
test_that("Conversion between Matrix::dgCMatrix and Scipy sparse CSC matrix works", {
skip_on_cran()
skip_if_no_scipy()
N <- 1000
x <- sparseMatrix(
i = sample(N, N),
j = sample(N, N),
x = runif(N),
dims = c(N, N))
result <- r_to_py(x)
# check that we are testing the right classes
expect_true(is(result, "scipy.sparse.csc.csc_matrix"))
expect_true(is(py_to_r(result), "dgCMatrix"))
check_matrix_conversion(x, result)
})
test_that("Conversion between Matrix::dgRMatrix and Scipy sparse CSR matrix works", {
skip_on_cran()
skip_if_no_scipy()
N <- 1000
x <- sparseMatrix(
i = sample(N, N),
j = sample(N, N),
x = runif(N),
dims = c(N, N))
x <- as(x, "RsparseMatrix")
result <- r_to_py(x)
# check that we are testing the right classes
expect_true(is(result, "scipy.sparse.csr.csr_matrix"))
expect_true(is(py_to_r(result), "dgRMatrix"))
check_matrix_conversion(x, result)
})
test_that("Conversion between Matrix::dgTMatrix and Scipy sparse COO matrix works", {
skip_on_cran()
skip_if_no_scipy()
N <- 1000
x <- sparseMatrix(
i = sample(N, N),
j = sample(N, N),
x = runif(N),
dims = c(N, N))
x <- as(x, "TsparseMatrix")
result <- r_to_py(x)
# check that we are testing the right classes
expect_true(is(result, "scipy.sparse.coo.coo_matrix"))
expect_true(is(py_to_r(result), "dgTMatrix"))
check_matrix_conversion(x, result)
})
test_that("Conversion between Scipy sparse matrices without specific conversion functions works", {
skip_on_cran()
skip_if_no_scipy()
N <- 1000
x <- sparseMatrix(
i = sample(N, N),
j = sample(N, N),
x = runif(N),
dims = c(N, N))
result <- r_to_py(x)$tolil()
# check that we are testing the right classes
expect_true(is(result, "scipy.sparse.lil.lil_matrix"))
expect_true(is(py_to_r(result), "dgCMatrix"))
check_matrix_conversion(x, result)
})
test_that("Conversion between R sparse matrices without specific conversion functions works", {
skip_on_cran()
skip_if_no_scipy()
N <- 1000
x <- sparseMatrix(
i = sample(N, N),
j = sample(N, N),
x = runif(N),
dims = c(N, N))
# symmetrize
x <- x + t(x)
x <- as(x, "symmetricMatrix")
result <- r_to_py(x)
# check that we are testing the right classes
expect_true(is(x, "dsCMatrix"))
expect_true(is(result, "scipy.sparse.csc.csc_matrix"))
check_matrix_conversion(x, result)
})
| /tests/testthat/test-python-scipy-sparse-matrix.R | permissive | nadiahalidi/reticulate | R | false | false | 3,511 | r | context("scipy sparse matrix")
library(Matrix)
check_matrix_conversion <- function(r_matrix, python_matrix) {
# check that the conversion to python works
expect_true(all(py_to_r(python_matrix$toarray()) == as.matrix(r_matrix)))
# check that the conversion to r works
expect_true(all(py_to_r(python_matrix) == r_matrix))
# check that S3 methods work
expect_equal(dim(python_matrix), dim(r_matrix))
expect_equal(length(python_matrix), length(r_matrix))
}
test_that("Conversion to scipy sparse matrix S3 methods behave with null pointers", {
skip_on_cran()
skip_if_no_scipy()
N <- 1000
x <- sparseMatrix(
i = sample(N, N),
j = sample(N, N),
x = runif(N),
dims = c(N, N))
result <- r_to_py(x)
temp_file <- file.path(tempdir(), "sparse_matrix.rds")
saveRDS(result, temp_file)
result <- readRDS(temp_file)
# check that S3 methods behave with null pointers
expect_true(is(result, "scipy.sparse.csc.csc_matrix"))
expect_true(is.null(dim(result)))
expect_true(length(result) == 0L)
file.remove(temp_file)
})
test_that("Conversion between Matrix::dgCMatrix and Scipy sparse CSC matrix works", {
skip_on_cran()
skip_if_no_scipy()
N <- 1000
x <- sparseMatrix(
i = sample(N, N),
j = sample(N, N),
x = runif(N),
dims = c(N, N))
result <- r_to_py(x)
# check that we are testing the right classes
expect_true(is(result, "scipy.sparse.csc.csc_matrix"))
expect_true(is(py_to_r(result), "dgCMatrix"))
check_matrix_conversion(x, result)
})
test_that("Conversion between Matrix::dgRMatrix and Scipy sparse CSR matrix works", {
skip_on_cran()
skip_if_no_scipy()
N <- 1000
x <- sparseMatrix(
i = sample(N, N),
j = sample(N, N),
x = runif(N),
dims = c(N, N))
x <- as(x, "RsparseMatrix")
result <- r_to_py(x)
# check that we are testing the right classes
expect_true(is(result, "scipy.sparse.csr.csr_matrix"))
expect_true(is(py_to_r(result), "dgRMatrix"))
check_matrix_conversion(x, result)
})
test_that("Conversion between Matrix::dgTMatrix and Scipy sparse COO matrix works", {
skip_on_cran()
skip_if_no_scipy()
N <- 1000
x <- sparseMatrix(
i = sample(N, N),
j = sample(N, N),
x = runif(N),
dims = c(N, N))
x <- as(x, "TsparseMatrix")
result <- r_to_py(x)
# check that we are testing the right classes
expect_true(is(result, "scipy.sparse.coo.coo_matrix"))
expect_true(is(py_to_r(result), "dgTMatrix"))
check_matrix_conversion(x, result)
})
test_that("Conversion between Scipy sparse matrices without specific conversion functions works", {
skip_on_cran()
skip_if_no_scipy()
N <- 1000
x <- sparseMatrix(
i = sample(N, N),
j = sample(N, N),
x = runif(N),
dims = c(N, N))
result <- r_to_py(x)$tolil()
# check that we are testing the right classes
expect_true(is(result, "scipy.sparse.lil.lil_matrix"))
expect_true(is(py_to_r(result), "dgCMatrix"))
check_matrix_conversion(x, result)
})
test_that("Conversion between R sparse matrices without specific conversion functions works", {
skip_on_cran()
skip_if_no_scipy()
N <- 1000
x <- sparseMatrix(
i = sample(N, N),
j = sample(N, N),
x = runif(N),
dims = c(N, N))
# symmetrize
x <- x + t(x)
x <- as(x, "symmetricMatrix")
result <- r_to_py(x)
# check that we are testing the right classes
expect_true(is(x, "dsCMatrix"))
expect_true(is(result, "scipy.sparse.csc.csc_matrix"))
check_matrix_conversion(x, result)
})
|
#Read data from CSV and store in Data Frame
prc <- read.csv("post73_updated.csv",stringsAsFactors = FALSE)
str(prc) #check if structured
#Remove inessential columns - only keep features, classifier column
KeepCols <- c("total.points","assists","shot.attempts.allowed","free.throws.made"
,"made.shots.allowed","Playoff.Win")
prc <- prc[,KeepCols]
prc
#See Classifier column data
table(prc$Playoff.Win)
prc$Playoff.Win <- factor(prc$Playoff.Win, levels = c("1", "0"), labels = c("Won Playoff", "Lost Playoff"))
table(prc$Playoff.Win)
#r=832, k=29
#Normalize all data in Data Frame
normalize <- function(x){
return( (x - min(x)) / ( max(x) - min(x)) )
}
prc_n <- as.data.frame(lapply(prc[1:5], normalize))
summary(prc_n$shots.attemptec)
#Setting Training and Testing data
prc_train <- prc_n[1:802,]
prc_test <- prc_n[803:832,]
#Including Classifier labels
prc_train_labels <- prc[1:802, 6]
prc_test_labels <- prc[803:832, 6]
#Install and use package 'class' for knn
#install.packages("class")
library(class)
#Apply knn using k=29 and store in prc_test_pred
prc_test_pred <- knn(train = prc_train, test = prc_test, cl = prc_train_labels, k=29)
#Check if values in prc_test_pred matches with prc_test_labels
#install.packages("gmodels")
library(gmodels)
CrossTable(x = prc_test_labels, y = prc_test_pred, prop.chisq = FALSE)
| /K-Nearest Neighbors/KNN wiith Feature Selection/73-05_Dataset/73-05_PlayoffWins.R | no_license | aayushagarwal7/Predicting_Good_NBA_Teams | R | false | false | 1,348 | r | #Read data from CSV and store in Data Frame
prc <- read.csv("post73_updated.csv",stringsAsFactors = FALSE)
str(prc) #check if structured
#Remove inessential columns - only keep features, classifier column
KeepCols <- c("total.points","assists","shot.attempts.allowed","free.throws.made"
,"made.shots.allowed","Playoff.Win")
prc <- prc[,KeepCols]
prc
#See Classifier column data
table(prc$Playoff.Win)
prc$Playoff.Win <- factor(prc$Playoff.Win, levels = c("1", "0"), labels = c("Won Playoff", "Lost Playoff"))
table(prc$Playoff.Win)
#r=832, k=29
#Normalize all data in Data Frame
normalize <- function(x){
return( (x - min(x)) / ( max(x) - min(x)) )
}
prc_n <- as.data.frame(lapply(prc[1:5], normalize))
summary(prc_n$shots.attemptec)
#Setting Training and Testing data
prc_train <- prc_n[1:802,]
prc_test <- prc_n[803:832,]
#Including Classifier labels
prc_train_labels <- prc[1:802, 6]
prc_test_labels <- prc[803:832, 6]
#Install and use package 'class' for knn
#install.packages("class")
library(class)
#Apply knn using k=29 and store in prc_test_pred
prc_test_pred <- knn(train = prc_train, test = prc_test, cl = prc_train_labels, k=29)
#Check if values in prc_test_pred matches with prc_test_labels
#install.packages("gmodels")
library(gmodels)
CrossTable(x = prc_test_labels, y = prc_test_pred, prop.chisq = FALSE)
|
plot_It_SI_Rt <- function(estimate_R_obj, agregar_importados = FALSE) {
p_I <- plot(estimate_R_obj, "incid", add_imported_cases = agregar_importados)
p_SI <- plot(estimate_R_obj, "SI")
p_Ri <- plot(estimate_R_obj, "R")
return(gridExtra::grid.arrange(p_I, p_SI, p_Ri, ncol = 1))
}
| /funciones/plot_It_SI_Rt.R | permissive | Anahurtado1978/taller-curso-PUJ-Covid | R | false | false | 289 | r | plot_It_SI_Rt <- function(estimate_R_obj, agregar_importados = FALSE) {
p_I <- plot(estimate_R_obj, "incid", add_imported_cases = agregar_importados)
p_SI <- plot(estimate_R_obj, "SI")
p_Ri <- plot(estimate_R_obj, "R")
return(gridExtra::grid.arrange(p_I, p_SI, p_Ri, ncol = 1))
}
|
# test_that code for the overlap package
# library(testthat)
# library(overlap)
# test_file("./overlap/inst/tests/test-all.R")
require(overlap) # otherwise can't find simulatedData
context("Built-in data sets")
test_that("built-in data sets are unchanged", {
data(simulatedData)
expect_that(round(mean(tigerTrue), 6), equals(0.157957))
expect_that(round(mean(pigTrue), 6), equals(0.157913))
expect_that(round(mean(tigerObs), 6), equals(3.248677))
expect_that(round(mean(pigObs), 6), equals(3.328342))
data(kerinci)
expect_that(dim(kerinci), equals(c(1098, 3)))
expect_that(names(kerinci), equals(c("Zone", "Sps", "Time")))
expect_that(sum(kerinci$Time), equals(540.68))
expect_that(sum(kerinci$Zone), equals(2950))
expect_that(nlevels(kerinci$Sps), equals(8))
expect_that(summary(kerinci$Sps),
is_equivalent_to(c(28, 86, 104, 273, 200, 25, 181, 201)))
expect_that(levels(kerinci$Sps),
equals(c("boar", "clouded", "golden", "macaque", "muntjac",
"sambar", "tapir", "tiger")))
data(simCalls)
expect_that(dim(simCalls), equals(c(100, 2)))
expect_that(names(simCalls), equals(c("time", "dates")))
expect_that(round(sum(simCalls$time), 4), equals(210.7662))
expect_true(is.character(simCalls$dates))
} )
context("Main computation functions")
test_that("overlapTrue gives correct answer", {
data(simulatedData)
expect_that(overlapTrue(tigerTrue, pigTrue), equals(0.2910917, tolerance = 1e-6))
expect_that(overlapTrue(cbind(tigerTrue, pigTrue)), equals(0.2910917, tolerance = 1e-6))
})
test_that("densityFit gives correct answer", {
data(simulatedData)
expect_that(densityFit(tigerObs, c(0, pi/2, pi, 3*pi/2, 2*pi), 30),
equals(c(0.02440435, 0.44522913, 0.02179983, 0.50513539, 0.02440435), tolerance = 1e-7))
expect_that(densityFit(pigObs, c(0, pi/2, pi, 3*pi/2, 2*pi), 10),
equals(c(7.877244e-06, 4.522317e-02, 4.622752e-01, 1.216268e-01, 7.877244e-06),
tolerance = 1e-7))
})
test_that("getBandWidth gives correct answer", {
data(simulatedData)
# expect_that(getBandWidth(tigerObs), equals(29.90645, tolerance = 1e-7))
expect_that(getBandWidth(tigerObs), equals(29.90650, tolerance = 1e-5))
# expect_that(getBandWidth(pigObs), equals(10.42065, tolerance = 1e-7))
expect_that(getBandWidth(pigObs), equals(10.42076, tolerance = 1e-5))
})
test_that("overlapEst gives correct answer", {
data(simulatedData)
expect_that(round(overlapEst(tigerObs, pigObs), 5),
is_equivalent_to(c(0.29086, 0.26920, 0.22750)))
expect_that(
round(overlapEst(tigerObs, pigObs, adjust=c(1.2, 1.5, 1)), 5),
is_equivalent_to(c(0.31507, 0.28849, 0.23750)))
expect_that(
round(overlapEst(tigerObs, pigObs, adjust=c(NA, 1, NA)), 6),
is_equivalent_to(c(NA_real_, 0.269201, NA_real_)))
expect_that(
round(overlapEst(tigerObs, pigObs, type="Dhat4"), 6),
is_equivalent_to(0.269201))
})
test_that("sunTime gives correct answer", {
data(simCalls)
Dates <- as.POSIXct(simCalls$dates, tz="GMT")
coords <- matrix(c(-3, 56), nrow=1)
Coords <- sp::SpatialPoints(coords, proj4string=sp::CRS("+proj=longlat +datum=WGS84"))
st <- sunTime(simCalls$time, Dates, Coords)
expect_that(round(sum(st), 4), equals(207.0542))
})
stopifnot(getRversion() >= '3.6.0')
context("Bootstrap functions")
test_that("bootstrap smooth=TRUE gives correct answer", {
data(simulatedData)
set.seed(123)
boots <- bootstrap(tigerObs, pigObs, nb=99)
expect_that(round(mean(boots), 6),
is_equivalent_to(0.345504))
# set.seed(123) # parallel not reproducible
# bootpar <- bootstrap(tigerObs, pigObs, nb=99, cores=2)
# expect_that(round(mean(bootpar), 6),
# is_equivalent_to(0.304968))
set.seed(123)
boots <- bootstrap(tigerObs, pigObs, nb=99, type="Dhat4")
expect_that(round(mean(boots), 6),
is_equivalent_to(0.33061))
})
test_that("bootstrap smooth=FALSE gives correct answer", {
data(simulatedData)
set.seed(123)
boots <- bootstrap(tigerObs, pigObs, nb=99, smooth=FALSE)
expect_that(round(mean(boots), 6),
is_equivalent_to(0.28488))
# set.seed(123) # parallel not reproducible
# bootpar <- bootstrap(tigerObs, pigObs, nb=99, cores=2)
# expect_that(round(mean(bootpar), 6),
# is_equivalent_to(0.304968))
set.seed(123)
boots <- bootstrap(tigerObs, pigObs, nb=99, smooth=FALSE, type="Dhat4")
expect_that(round(mean(boots), 6),
is_equivalent_to(0.26333))
})
test_that("resample smooth=TRUE gives correct answer", {
data(simulatedData)
set.seed(123)
tigSim <- resample(tigerObs, 5, TRUE)
expect_that(round(colMeans(tigSim), 6),
equals(c(3.088229, 3.459810, 3.103107, 3.149954, 3.055276)))
pigSim <- resample(pigObs, 5, TRUE)
expect_that(round(colMeans(pigSim), 6),
equals(c(3.184782, 3.193389, 3.180786, 3.316040, 3.317885)))
boots <- bootEst(tigSim, pigSim)
expect_that(round(colMeans(boots), 6),
is_equivalent_to(c(0.342983, 0.326681, 0.310500)))
bootpar <- bootEst(tigSim, pigSim, cores=2)
expect_that(round(colMeans(bootpar), 6),
is_equivalent_to(c(0.342983, 0.326681, 0.310500)))
boots <- bootEst(tigSim, pigSim, adjust=c(NA, 1, NA))
expect_that(round(colMeans(boots), 6),
is_equivalent_to(c(NA_real_, 0.326681, NA_real_)))
boots <- bootEst(tigSim, pigSim, type="Dhat4")
expect_that(round(mean(boots), 6),
is_equivalent_to(0.326681))
})
test_that("resample smooth=FALSE gives correct answer", {
data(simulatedData)
set.seed(123)
tigSim <- resample(tigerObs, 5, FALSE)
expect_that(round(colMeans(tigSim), 6),
equals(c(3.305859, 3.110860, 3.184909, 3.271987, 3.262150)))
pigSim <- resample(pigObs, 5, FALSE)
expect_that(round(colMeans(pigSim), 6),
equals(c(3.347331, 3.524023, 3.279544, 3.265070, 3.374756)))
boots <- bootEst(tigSim, pigSim)
expect_that(round(colMeans(boots), 6),
is_equivalent_to(c(0.281553, 0.260792, 0.207000)))
bootpar <- bootEst(tigSim, pigSim, cores=2)
expect_that(round(colMeans(bootpar), 6),
is_equivalent_to(c(0.281553, 0.260792, 0.207000)))
boots <- bootEst(tigSim, pigSim, adjust=c(NA, 1, NA))
expect_that(round(colMeans(boots), 6),
is_equivalent_to(c(NA_real_, 0.260792, NA_real_)))
boots <- bootEst(tigSim, pigSim, type="Dhat4")
expect_that(round(mean(boots), 6),
is_equivalent_to(0.260792))
})
context("Confidence intervals")
test_that("bootCI gives same results as boot.ci for common CIs", {
require(boot)
set.seed(123)
dat <- runif(20)
mean.b <- function(d,p,...) mean(d[p])
bootout <- boot(dat, mean.b, 999)
t0 <- bootout$t0
bt <- as.vector(bootout$t)
expect_that(t0, equals(mean(dat)))
expect_that(bootCI(t0, bt)['norm',],
is_equivalent_to(boot.ci(bootout, 0.95, "norm")$norm[2:3]))
expect_that(bootCI(t0, bt)['basic',],
is_equivalent_to(boot.ci(bootout, 0.95, "basic")$basic[4:5]))
expect_that(bootCI(t0, bt)['perc',],
is_equivalent_to(boot.ci(bootout, 0.95, "perc")$perc[4:5]))
expect_that(bootCI(t0, bt, 0.8)['norm',],
is_equivalent_to(boot.ci(bootout, 0.8, "norm")$norm[2:3]))
expect_that(bootCI(t0, bt, 0.8)['basic',],
is_equivalent_to(boot.ci(bootout, 0.8, "basic")$basic[4:5]))
expect_that(bootCI(t0, bt, 0.8)['perc',],
is_equivalent_to(boot.ci(bootout, 0.8, "perc")$perc[4:5]))
expect_that(bootCIlogit(t0, bt)['norm',],
is_equivalent_to(boot.ci(bootout, 0.95, "norm", h=qlogis, hinv=plogis)$norm[2:3]))
expect_that(bootCIlogit(t0, bt)['basic',],
is_equivalent_to(boot.ci(bootout, 0.95, "basic", h=qlogis, hinv=plogis)$basic[4:5]))
} )
test_that("bootCI gives correct results", {
set.seed(123)
dat <- runif(20)
t0 <- sd(dat)
bootmat <- matrix(sample(dat, 20*999, replace=TRUE), 20, 999)
bt <- apply(bootmat, 2, sd)
expect_that(round(bootCI(t0, bt)['norm',], 6),
is_equivalent_to(c(0.257335, 0.389638)))
expect_that(round(bootCI(t0, bt)['perc',], 6),
is_equivalent_to(c(0.229293, 0.364734 )))
expect_that(round(bootCI(t0, bt)['basic',], 6),
is_equivalent_to(c(0.262208, 0.397649 )))
expect_that(round(bootCI(t0, bt)['norm0',], 6),
is_equivalent_to(c(0.247319, 0.379623 )))
expect_that(round(bootCI(t0, bt)['basic0',], 6),
is_equivalent_to(c(0.239309, 0.374750)))
} )
test_that("bootCIlogit gives correct results", {
set.seed(123)
dat <- runif(20)
t0 <- sd(dat)
bootmat <- matrix(sample(dat, 20*999, replace=TRUE), 20, 999)
bt <- apply(bootmat, 2, sd)
expect_that(round(bootCIlogit(t0, bt)['norm',], 6),
is_equivalent_to(c(0.258635, 0.398876)))
expect_that(round(bootCIlogit(t0, bt)['perc',], 6),
is_equivalent_to(c(0.229293, 0.364734)))
expect_that(round(bootCIlogit(t0, bt)['basic',], 6),
is_equivalent_to(c(0.266392, 0.412031)))
expect_that(round(bootCIlogit(t0, bt)['norm0',], 6),
is_equivalent_to(c(0.248729, 0.386398)))
expect_that(round(bootCIlogit(t0, bt)['basic0',], 6),
is_equivalent_to(c(0.238671, 0.376942)))
} )
context("Output from plotting functions")
test_that("densityPlot gives correct output", {
data(simulatedData)
foo <- densityPlot(pigObs)
expect_that(class(foo), equals("data.frame"))
expect_that(names(foo), equals(c("x", "y")))
expect_that(nrow(foo), equals(128))
wanted <- foo$x > 0 & foo$x < 24
expect_that(round(mean(foo$y[wanted]) * 24, 4), equals( 0.9961))
foo <- densityPlot(tigerObs, xscale = NA, xcenter = "m", n.grid=1024)
expect_that(class(foo), equals("data.frame"))
expect_that(names(foo), equals(c("x", "y")))
expect_that(nrow(foo), equals(1024))
wanted <- foo$x > -pi & foo$x < pi
expect_that(round(mean(foo$y[wanted]) * 2 * pi, 4), equals( 1.0004))
expect_error(densityPlot(factor(LETTERS)), "The times of observations must be in a numeric vector.")
expect_error(densityPlot(trees), "The times of observations must be in a numeric vector.")
expect_error(densityPlot(read.csv), "The times of observations must be in a numeric vector.")
expect_error(densityPlot(numeric(0)), "You have 0 different observations")
expect_error(densityPlot(2), "You have 1 different observations")
expect_error(densityPlot(rep(2, 5)), "You have 1 different observations")
expect_error(densityPlot(c(1,2,3,NA)), "Your data have missing values.")
expect_error(densityPlot(c(1,2,3,-2)), "You have times")
expect_error(densityPlot(c(1,2,3,10)), "You have times")
})
test_that("overlapPlot gives correct output", {
data(simulatedData)
foo <- overlapPlot(pigObs, tigerObs)
expect_that(class(foo), equals("data.frame"))
expect_that(names(foo), equals(c("x", "densityA", "densityB")))
expect_that(nrow(foo), equals(128))
wanted <- foo$x > 0 & foo$x < 24
expect_that(round(mean(foo$densityA[wanted]) * 24, 4), equals( 1.0079))
expect_that(round(mean(foo$densityB[wanted]) * 24, 4), equals( 1.0067))
foo <- overlapPlot(pigObs, tigerObs, xscale = NA, xcenter = "m", n.grid=1024)
expect_that(class(foo), equals("data.frame"))
expect_that(names(foo), equals(c("x", "densityA", "densityB")))
expect_that(nrow(foo), equals(1024))
wanted <- foo$x > -pi & foo$x < pi
expect_that(round(mean(foo$densityA[wanted]) * 2 * pi, 4), equals(0.9981))
expect_that(round(mean(foo$densityB[wanted]) * 2 * pi, 4), equals(1.0008))
expect_error(overlapPlot(pigObs, factor(LETTERS)), "The times of observations must be in a numeric vector.")
expect_error(overlapPlot(trees, pigObs), "The times of observations must be in a numeric vector.")
expect_error(overlapPlot(tigerObs, read.csv), "The times of observations must be in a numeric vector.")
expect_error(overlapPlot(numeric(0), tigerObs), "You have 0 different observations")
expect_error(overlapPlot(2, tigerObs), "You have 1 different observations")
expect_error(overlapPlot(rep(2, 5), pigObs), "You have 1 different observations")
expect_error(overlapPlot(pigObs, c(1,2,3,NA)), "Your data have missing values.")
expect_error(overlapPlot(c(1,2,3,-2), pigObs), "You have times")
expect_error(overlapPlot(c(1,2,3,10), tigerObs), "You have times")
})
graphics.off()
| /inst/tests/testthat/test-all.R | no_license | cran/overlap | R | false | false | 12,253 | r |
# test_that code for the overlap package
# library(testthat)
# library(overlap)
# test_file("./overlap/inst/tests/test-all.R")
require(overlap) # otherwise can't find simulatedData
context("Built-in data sets")
test_that("built-in data sets are unchanged", {
data(simulatedData)
expect_that(round(mean(tigerTrue), 6), equals(0.157957))
expect_that(round(mean(pigTrue), 6), equals(0.157913))
expect_that(round(mean(tigerObs), 6), equals(3.248677))
expect_that(round(mean(pigObs), 6), equals(3.328342))
data(kerinci)
expect_that(dim(kerinci), equals(c(1098, 3)))
expect_that(names(kerinci), equals(c("Zone", "Sps", "Time")))
expect_that(sum(kerinci$Time), equals(540.68))
expect_that(sum(kerinci$Zone), equals(2950))
expect_that(nlevels(kerinci$Sps), equals(8))
expect_that(summary(kerinci$Sps),
is_equivalent_to(c(28, 86, 104, 273, 200, 25, 181, 201)))
expect_that(levels(kerinci$Sps),
equals(c("boar", "clouded", "golden", "macaque", "muntjac",
"sambar", "tapir", "tiger")))
data(simCalls)
expect_that(dim(simCalls), equals(c(100, 2)))
expect_that(names(simCalls), equals(c("time", "dates")))
expect_that(round(sum(simCalls$time), 4), equals(210.7662))
expect_true(is.character(simCalls$dates))
} )
context("Main computation functions")
test_that("overlapTrue gives correct answer", {
data(simulatedData)
expect_that(overlapTrue(tigerTrue, pigTrue), equals(0.2910917, tolerance = 1e-6))
expect_that(overlapTrue(cbind(tigerTrue, pigTrue)), equals(0.2910917, tolerance = 1e-6))
})
test_that("densityFit gives correct answer", {
data(simulatedData)
expect_that(densityFit(tigerObs, c(0, pi/2, pi, 3*pi/2, 2*pi), 30),
equals(c(0.02440435, 0.44522913, 0.02179983, 0.50513539, 0.02440435), tolerance = 1e-7))
expect_that(densityFit(pigObs, c(0, pi/2, pi, 3*pi/2, 2*pi), 10),
equals(c(7.877244e-06, 4.522317e-02, 4.622752e-01, 1.216268e-01, 7.877244e-06),
tolerance = 1e-7))
})
test_that("getBandWidth gives correct answer", {
data(simulatedData)
# expect_that(getBandWidth(tigerObs), equals(29.90645, tolerance = 1e-7))
expect_that(getBandWidth(tigerObs), equals(29.90650, tolerance = 1e-5))
# expect_that(getBandWidth(pigObs), equals(10.42065, tolerance = 1e-7))
expect_that(getBandWidth(pigObs), equals(10.42076, tolerance = 1e-5))
})
test_that("overlapEst gives correct answer", {
data(simulatedData)
expect_that(round(overlapEst(tigerObs, pigObs), 5),
is_equivalent_to(c(0.29086, 0.26920, 0.22750)))
expect_that(
round(overlapEst(tigerObs, pigObs, adjust=c(1.2, 1.5, 1)), 5),
is_equivalent_to(c(0.31507, 0.28849, 0.23750)))
expect_that(
round(overlapEst(tigerObs, pigObs, adjust=c(NA, 1, NA)), 6),
is_equivalent_to(c(NA_real_, 0.269201, NA_real_)))
expect_that(
round(overlapEst(tigerObs, pigObs, type="Dhat4"), 6),
is_equivalent_to(0.269201))
})
test_that("sunTime gives correct answer", {
data(simCalls)
Dates <- as.POSIXct(simCalls$dates, tz="GMT")
coords <- matrix(c(-3, 56), nrow=1)
Coords <- sp::SpatialPoints(coords, proj4string=sp::CRS("+proj=longlat +datum=WGS84"))
st <- sunTime(simCalls$time, Dates, Coords)
expect_that(round(sum(st), 4), equals(207.0542))
})
stopifnot(getRversion() >= '3.6.0')
context("Bootstrap functions")
test_that("bootstrap smooth=TRUE gives correct answer", {
data(simulatedData)
set.seed(123)
boots <- bootstrap(tigerObs, pigObs, nb=99)
expect_that(round(mean(boots), 6),
is_equivalent_to(0.345504))
# set.seed(123) # parallel not reproducible
# bootpar <- bootstrap(tigerObs, pigObs, nb=99, cores=2)
# expect_that(round(mean(bootpar), 6),
# is_equivalent_to(0.304968))
set.seed(123)
boots <- bootstrap(tigerObs, pigObs, nb=99, type="Dhat4")
expect_that(round(mean(boots), 6),
is_equivalent_to(0.33061))
})
test_that("bootstrap smooth=FALSE gives correct answer", {
data(simulatedData)
set.seed(123)
boots <- bootstrap(tigerObs, pigObs, nb=99, smooth=FALSE)
expect_that(round(mean(boots), 6),
is_equivalent_to(0.28488))
# set.seed(123) # parallel not reproducible
# bootpar <- bootstrap(tigerObs, pigObs, nb=99, cores=2)
# expect_that(round(mean(bootpar), 6),
# is_equivalent_to(0.304968))
set.seed(123)
boots <- bootstrap(tigerObs, pigObs, nb=99, smooth=FALSE, type="Dhat4")
expect_that(round(mean(boots), 6),
is_equivalent_to(0.26333))
})
test_that("resample smooth=TRUE gives correct answer", {
data(simulatedData)
set.seed(123)
tigSim <- resample(tigerObs, 5, TRUE)
expect_that(round(colMeans(tigSim), 6),
equals(c(3.088229, 3.459810, 3.103107, 3.149954, 3.055276)))
pigSim <- resample(pigObs, 5, TRUE)
expect_that(round(colMeans(pigSim), 6),
equals(c(3.184782, 3.193389, 3.180786, 3.316040, 3.317885)))
boots <- bootEst(tigSim, pigSim)
expect_that(round(colMeans(boots), 6),
is_equivalent_to(c(0.342983, 0.326681, 0.310500)))
bootpar <- bootEst(tigSim, pigSim, cores=2)
expect_that(round(colMeans(bootpar), 6),
is_equivalent_to(c(0.342983, 0.326681, 0.310500)))
boots <- bootEst(tigSim, pigSim, adjust=c(NA, 1, NA))
expect_that(round(colMeans(boots), 6),
is_equivalent_to(c(NA_real_, 0.326681, NA_real_)))
boots <- bootEst(tigSim, pigSim, type="Dhat4")
expect_that(round(mean(boots), 6),
is_equivalent_to(0.326681))
})
test_that("resample smooth=FALSE gives correct answer", {
data(simulatedData)
set.seed(123)
tigSim <- resample(tigerObs, 5, FALSE)
expect_that(round(colMeans(tigSim), 6),
equals(c(3.305859, 3.110860, 3.184909, 3.271987, 3.262150)))
pigSim <- resample(pigObs, 5, FALSE)
expect_that(round(colMeans(pigSim), 6),
equals(c(3.347331, 3.524023, 3.279544, 3.265070, 3.374756)))
boots <- bootEst(tigSim, pigSim)
expect_that(round(colMeans(boots), 6),
is_equivalent_to(c(0.281553, 0.260792, 0.207000)))
bootpar <- bootEst(tigSim, pigSim, cores=2)
expect_that(round(colMeans(bootpar), 6),
is_equivalent_to(c(0.281553, 0.260792, 0.207000)))
boots <- bootEst(tigSim, pigSim, adjust=c(NA, 1, NA))
expect_that(round(colMeans(boots), 6),
is_equivalent_to(c(NA_real_, 0.260792, NA_real_)))
boots <- bootEst(tigSim, pigSim, type="Dhat4")
expect_that(round(mean(boots), 6),
is_equivalent_to(0.260792))
})
context("Confidence intervals")
test_that("bootCI gives same results as boot.ci for common CIs", {
require(boot)
set.seed(123)
dat <- runif(20)
mean.b <- function(d,p,...) mean(d[p])
bootout <- boot(dat, mean.b, 999)
t0 <- bootout$t0
bt <- as.vector(bootout$t)
expect_that(t0, equals(mean(dat)))
expect_that(bootCI(t0, bt)['norm',],
is_equivalent_to(boot.ci(bootout, 0.95, "norm")$norm[2:3]))
expect_that(bootCI(t0, bt)['basic',],
is_equivalent_to(boot.ci(bootout, 0.95, "basic")$basic[4:5]))
expect_that(bootCI(t0, bt)['perc',],
is_equivalent_to(boot.ci(bootout, 0.95, "perc")$perc[4:5]))
expect_that(bootCI(t0, bt, 0.8)['norm',],
is_equivalent_to(boot.ci(bootout, 0.8, "norm")$norm[2:3]))
expect_that(bootCI(t0, bt, 0.8)['basic',],
is_equivalent_to(boot.ci(bootout, 0.8, "basic")$basic[4:5]))
expect_that(bootCI(t0, bt, 0.8)['perc',],
is_equivalent_to(boot.ci(bootout, 0.8, "perc")$perc[4:5]))
expect_that(bootCIlogit(t0, bt)['norm',],
is_equivalent_to(boot.ci(bootout, 0.95, "norm", h=qlogis, hinv=plogis)$norm[2:3]))
expect_that(bootCIlogit(t0, bt)['basic',],
is_equivalent_to(boot.ci(bootout, 0.95, "basic", h=qlogis, hinv=plogis)$basic[4:5]))
} )
test_that("bootCI gives correct results", {
set.seed(123)
dat <- runif(20)
t0 <- sd(dat)
bootmat <- matrix(sample(dat, 20*999, replace=TRUE), 20, 999)
bt <- apply(bootmat, 2, sd)
expect_that(round(bootCI(t0, bt)['norm',], 6),
is_equivalent_to(c(0.257335, 0.389638)))
expect_that(round(bootCI(t0, bt)['perc',], 6),
is_equivalent_to(c(0.229293, 0.364734 )))
expect_that(round(bootCI(t0, bt)['basic',], 6),
is_equivalent_to(c(0.262208, 0.397649 )))
expect_that(round(bootCI(t0, bt)['norm0',], 6),
is_equivalent_to(c(0.247319, 0.379623 )))
expect_that(round(bootCI(t0, bt)['basic0',], 6),
is_equivalent_to(c(0.239309, 0.374750)))
} )
test_that("bootCIlogit gives correct results", {
set.seed(123)
dat <- runif(20)
t0 <- sd(dat)
bootmat <- matrix(sample(dat, 20*999, replace=TRUE), 20, 999)
bt <- apply(bootmat, 2, sd)
expect_that(round(bootCIlogit(t0, bt)['norm',], 6),
is_equivalent_to(c(0.258635, 0.398876)))
expect_that(round(bootCIlogit(t0, bt)['perc',], 6),
is_equivalent_to(c(0.229293, 0.364734)))
expect_that(round(bootCIlogit(t0, bt)['basic',], 6),
is_equivalent_to(c(0.266392, 0.412031)))
expect_that(round(bootCIlogit(t0, bt)['norm0',], 6),
is_equivalent_to(c(0.248729, 0.386398)))
expect_that(round(bootCIlogit(t0, bt)['basic0',], 6),
is_equivalent_to(c(0.238671, 0.376942)))
} )
context("Output from plotting functions")
test_that("densityPlot gives correct output", {
data(simulatedData)
foo <- densityPlot(pigObs)
expect_that(class(foo), equals("data.frame"))
expect_that(names(foo), equals(c("x", "y")))
expect_that(nrow(foo), equals(128))
wanted <- foo$x > 0 & foo$x < 24
expect_that(round(mean(foo$y[wanted]) * 24, 4), equals( 0.9961))
foo <- densityPlot(tigerObs, xscale = NA, xcenter = "m", n.grid=1024)
expect_that(class(foo), equals("data.frame"))
expect_that(names(foo), equals(c("x", "y")))
expect_that(nrow(foo), equals(1024))
wanted <- foo$x > -pi & foo$x < pi
expect_that(round(mean(foo$y[wanted]) * 2 * pi, 4), equals( 1.0004))
expect_error(densityPlot(factor(LETTERS)), "The times of observations must be in a numeric vector.")
expect_error(densityPlot(trees), "The times of observations must be in a numeric vector.")
expect_error(densityPlot(read.csv), "The times of observations must be in a numeric vector.")
expect_error(densityPlot(numeric(0)), "You have 0 different observations")
expect_error(densityPlot(2), "You have 1 different observations")
expect_error(densityPlot(rep(2, 5)), "You have 1 different observations")
expect_error(densityPlot(c(1,2,3,NA)), "Your data have missing values.")
expect_error(densityPlot(c(1,2,3,-2)), "You have times")
expect_error(densityPlot(c(1,2,3,10)), "You have times")
})
test_that("overlapPlot gives correct output", {
data(simulatedData)
foo <- overlapPlot(pigObs, tigerObs)
expect_that(class(foo), equals("data.frame"))
expect_that(names(foo), equals(c("x", "densityA", "densityB")))
expect_that(nrow(foo), equals(128))
wanted <- foo$x > 0 & foo$x < 24
expect_that(round(mean(foo$densityA[wanted]) * 24, 4), equals( 1.0079))
expect_that(round(mean(foo$densityB[wanted]) * 24, 4), equals( 1.0067))
foo <- overlapPlot(pigObs, tigerObs, xscale = NA, xcenter = "m", n.grid=1024)
expect_that(class(foo), equals("data.frame"))
expect_that(names(foo), equals(c("x", "densityA", "densityB")))
expect_that(nrow(foo), equals(1024))
wanted <- foo$x > -pi & foo$x < pi
expect_that(round(mean(foo$densityA[wanted]) * 2 * pi, 4), equals(0.9981))
expect_that(round(mean(foo$densityB[wanted]) * 2 * pi, 4), equals(1.0008))
expect_error(overlapPlot(pigObs, factor(LETTERS)), "The times of observations must be in a numeric vector.")
expect_error(overlapPlot(trees, pigObs), "The times of observations must be in a numeric vector.")
expect_error(overlapPlot(tigerObs, read.csv), "The times of observations must be in a numeric vector.")
expect_error(overlapPlot(numeric(0), tigerObs), "You have 0 different observations")
expect_error(overlapPlot(2, tigerObs), "You have 1 different observations")
expect_error(overlapPlot(rep(2, 5), pigObs), "You have 1 different observations")
expect_error(overlapPlot(pigObs, c(1,2,3,NA)), "Your data have missing values.")
expect_error(overlapPlot(c(1,2,3,-2), pigObs), "You have times")
expect_error(overlapPlot(c(1,2,3,10), tigerObs), "You have times")
})
graphics.off()
|
#TODO make encode windows compatible, will be a pain, need to remove perl dependencies, maybe best as an extension to markdown package rather than here.
#' knit a Rmd file and wrap it in bootstrap styles
#'
#' This function includes the knitrBootstrap html headers to wrap the knitr
#' output in bootstrap styled html.
#'
#' @param infile Rmd input file to knit
#' @param boot_style the bootstrap style to use, if NULL uses the default, if
#' TRUE a menu is shown with the available styles.
#' @param code_style the highlight.js code style to use, if NULL uses the default, if
#' TRUE a menu is shown with the available styles.
#' @param chooser if "boot", adds a bootstrap style chooser to the html, if
#' "code" adds the bootstrap code chooser.
#' @param graphics what graphics to use for the menus, only applicable if
#' code_style or boot_style are true.
#' @param ... additional arguments which are passed to knit2html
#' @export
#' @examples
#' writeLines(c("# hello markdown", '```{r hello-random, echo=TRUE}', 'rnorm(5)', '```'), 'test.Rmd')
#' knit_bootstrap('test.Rmd', boot_style='Amelia', code_style='Dark', chooser=c('boot','code'))
#' if(interactive()) browseURL('test.html')
knit_bootstrap <-
function(infile, boot_style=NULL, code_style=NULL, chooser=NULL,
markdown_options=c('mathjax', 'base64_images', 'use_xhtml'),
graphics = getOption("menu.graphics"), ...){
header = create_header(boot_style, code_style, chooser, graphics)
require(markdown)
require(knitr)
knit2html(
infile,
header=header,
stylesheet='',
options=markdown_options,
...
)
}
style_url="http://netdna.bootstrapcdn.com/bootswatch/2.3.1/$style/bootstrap.min.css"
link_pattern='<link rel="stylesheet".*href="'
default_boot_style='http://yandex.st/highlightjs/7.3/styles/vs.min.css'
default_code_style='http://netdna.bootstrapcdn.com/twitter-bootstrap/2.3.0/css/bootstrap-combined.min.css'
get_style <- function(style, style_type, title, graphics = getOption("menu.graphics")){
style = if(!is.null(style) && style %in% names(style_type)){
style_type[style]
}
else if(!is.null(style) && style == TRUE){
style_type[menu(names(style_type), graphics, title)]
}
else {
style_type[1]
}
return(style)
}
create_header <-
function(boot_style=NULL, code_style=NULL, chooser=c('boot', 'code'),
graphics = getOption("menu.graphics")){
boot_style=get_style(boot_style, boot_styles, 'Bootstrap Style', graphics)
code_style=get_style(code_style, code_styles, 'Code Block Style', graphics)
package_root = system.file(package='knitrBootstrap')
header = paste(package_root, 'templates/knitr_bootstrap.html', sep='/')
header_lines = file_lines(header)
#update bootstrap style
header_lines =
gsub(paste('(', link_pattern, ')(', default_boot_style, ')', sep=''),
paste('\\1', boot_style, '"', sep=''), header_lines)
#update code style
header_lines =
gsub(paste('(', link_pattern, ')(', default_code_style, ')', sep=''),
paste('\\1', code_style, '"', sep=''), header_lines)
chooser = match.arg(chooser, several.ok=TRUE)
filenames = if('boot' %in% chooser){
paste(package_root, 'templates/knitr_bootstrap_style_toggle.html', sep='/')
}
filenames = if('code' %in% chooser){
c(filenames, paste(package_root, 'templates/knitr_bootstrap_code_style_toggle.html', sep='/'))
}
outfile = paste(package_root, 'tmp/knitr_bootstrap_full.html', sep='/')
cat(paste(header_lines, append_files(filenames, outfile), sep='\n'), '\n', file=outfile)
outfile
}
append_files <- function(files, output){
paste(mapply(file_lines, files), collapse='\n')
}
file_lines <- function(file){
stopifnot(file.exists(file))
paste(readLines(file), collapse='\n')
}
boot_styles = c(
'Default'='http://netdna.bootstrapcdn.com/twitter-bootstrap/2.3.0/css/bootstrap-combined.min.css',
'Amelia'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/amelia/bootstrap.min.css',
'Cerulean'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/cerulean/bootstrap.min.css',
'Cosmo'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/cosmo/bootstrap.min.css',
'Cyborg'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/cyborg/bootstrap.min.css',
'Journal'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/journal/bootstrap.min.css',
'Readable'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/readable/bootstrap.min.css',
'Simplex'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/simplex/bootstrap.min.css',
'Slate'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/slate/bootstrap.min.css',
'Spacelab'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/spacelab/bootstrap.min.css',
'Spruce'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/spruce/bootstrap.min.css',
'Superhero'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/superhero/bootstrap.min.css',
'United'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/united/bootstrap.min.css'
)
code_styles = c(
'Default'='http://yandex.st/highlightjs/7.3/styles/default.min.css',
'Dark'='http://yandex.st/highlightjs/7.3/styles/dark.min.css',
'FAR'='http://yandex.st/highlightjs/7.3/styles/far.min.css',
'IDEA'='http://yandex.st/highlightjs/7.3/styles/idea.min.css',
'Sunburst'='http://yandex.st/highlightjs/7.3/styles/sunburst.min.css',
'Zenburn'='http://yandex.st/highlightjs/7.3/styles/zenburn.min.css',
'Visual Studio'='http://yandex.st/highlightjs/7.3/styles/vs.min.css',
'Ascetic'='http://yandex.st/highlightjs/7.3/styles/ascetic.min.css',
'Magula'='http://yandex.st/highlightjs/7.3/styles/magula.min.css',
'GitHub'='http://yandex.st/highlightjs/7.3/styles/github.min.css',
'Google Code'='http://yandex.st/highlightjs/7.3/styles/googlecode.min.css',
'Brown Paper'='http://yandex.st/highlightjs/7.3/styles/brown_paper.min.css',
'School Book'='http://yandex.st/highlightjs/7.3/styles/school_book.min.css',
'IR Black'='http://yandex.st/highlightjs/7.3/styles/ir_black.min.css',
'Solarized - Dark'='http://yandex.st/highlightjs/7.3/styles/solarized_dark.min.css',
'Solarized - Light'='http://yandex.st/highlightjs/7.3/styles/solarized_light.min.css',
'Arta'='http://yandex.st/highlightjs/7.3/styles/arta.min.css',
'Monokai'='http://yandex.st/highlightjs/7.3/styles/monokai.min.css',
'XCode'='http://yandex.st/highlightjs/7.3/styles/xcode.min.css',
'Pojoaque'='http://yandex.st/highlightjs/7.3/styles/pojoaque.min.css',
'Rainbow'='http://yandex.st/highlightjs/7.3/styles/rainbow.min.css',
'Tomorrow'='http://yandex.st/highlightjs/7.3/styles/tomorrow.min.css',
'Tomorrow Night'='http://yandex.st/highlightjs/7.3/styles/tomorrow-night.min.css',
'Tomorrow Night Bright'='http://yandex.st/highlightjs/7.3/styles/tomorrow-night-bright.min.css',
'Tomorrow Night Blue'='http://yandex.st/highlightjs/7.3/styles/tomorrow-night-blue.min.css',
'Tomorrow Night Eighties'='http://yandex.st/highlightjs/7.3/styles/tomorrow-night-eighties.min.css'
)
| /R/knit_bootstrap.R | permissive | fandres70/knitrBootstrap | R | false | false | 6,976 | r | #TODO make encode windows compatible, will be a pain, need to remove perl dependencies, maybe best as an extension to markdown package rather than here.
#' knit a Rmd file and wrap it in bootstrap styles
#'
#' This function includes the knitrBootstrap html headers to wrap the knitr
#' output in bootstrap styled html.
#'
#' @param infile Rmd input file to knit
#' @param boot_style the bootstrap style to use, if NULL uses the default, if
#' TRUE a menu is shown with the available styles.
#' @param code_style the highlight.js code style to use, if NULL uses the default, if
#' TRUE a menu is shown with the available styles.
#' @param chooser if "boot", adds a bootstrap style chooser to the html, if
#' "code" adds the bootstrap code chooser.
#' @param graphics what graphics to use for the menus, only applicable if
#' code_style or boot_style are true.
#' @param ... additional arguments which are passed to knit2html
#' @export
#' @examples
#' writeLines(c("# hello markdown", '```{r hello-random, echo=TRUE}', 'rnorm(5)', '```'), 'test.Rmd')
#' knit_bootstrap('test.Rmd', boot_style='Amelia', code_style='Dark', chooser=c('boot','code'))
#' if(interactive()) browseURL('test.html')
knit_bootstrap <-
function(infile, boot_style=NULL, code_style=NULL, chooser=NULL,
markdown_options=c('mathjax', 'base64_images', 'use_xhtml'),
graphics = getOption("menu.graphics"), ...){
header = create_header(boot_style, code_style, chooser, graphics)
require(markdown)
require(knitr)
knit2html(
infile,
header=header,
stylesheet='',
options=markdown_options,
...
)
}
style_url="http://netdna.bootstrapcdn.com/bootswatch/2.3.1/$style/bootstrap.min.css"
link_pattern='<link rel="stylesheet".*href="'
default_boot_style='http://yandex.st/highlightjs/7.3/styles/vs.min.css'
default_code_style='http://netdna.bootstrapcdn.com/twitter-bootstrap/2.3.0/css/bootstrap-combined.min.css'
get_style <- function(style, style_type, title, graphics = getOption("menu.graphics")){
style = if(!is.null(style) && style %in% names(style_type)){
style_type[style]
}
else if(!is.null(style) && style == TRUE){
style_type[menu(names(style_type), graphics, title)]
}
else {
style_type[1]
}
return(style)
}
create_header <-
function(boot_style=NULL, code_style=NULL, chooser=c('boot', 'code'),
graphics = getOption("menu.graphics")){
boot_style=get_style(boot_style, boot_styles, 'Bootstrap Style', graphics)
code_style=get_style(code_style, code_styles, 'Code Block Style', graphics)
package_root = system.file(package='knitrBootstrap')
header = paste(package_root, 'templates/knitr_bootstrap.html', sep='/')
header_lines = file_lines(header)
#update bootstrap style
header_lines =
gsub(paste('(', link_pattern, ')(', default_boot_style, ')', sep=''),
paste('\\1', boot_style, '"', sep=''), header_lines)
#update code style
header_lines =
gsub(paste('(', link_pattern, ')(', default_code_style, ')', sep=''),
paste('\\1', code_style, '"', sep=''), header_lines)
chooser = match.arg(chooser, several.ok=TRUE)
filenames = if('boot' %in% chooser){
paste(package_root, 'templates/knitr_bootstrap_style_toggle.html', sep='/')
}
filenames = if('code' %in% chooser){
c(filenames, paste(package_root, 'templates/knitr_bootstrap_code_style_toggle.html', sep='/'))
}
outfile = paste(package_root, 'tmp/knitr_bootstrap_full.html', sep='/')
cat(paste(header_lines, append_files(filenames, outfile), sep='\n'), '\n', file=outfile)
outfile
}
append_files <- function(files, output){
paste(mapply(file_lines, files), collapse='\n')
}
file_lines <- function(file){
stopifnot(file.exists(file))
paste(readLines(file), collapse='\n')
}
boot_styles = c(
'Default'='http://netdna.bootstrapcdn.com/twitter-bootstrap/2.3.0/css/bootstrap-combined.min.css',
'Amelia'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/amelia/bootstrap.min.css',
'Cerulean'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/cerulean/bootstrap.min.css',
'Cosmo'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/cosmo/bootstrap.min.css',
'Cyborg'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/cyborg/bootstrap.min.css',
'Journal'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/journal/bootstrap.min.css',
'Readable'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/readable/bootstrap.min.css',
'Simplex'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/simplex/bootstrap.min.css',
'Slate'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/slate/bootstrap.min.css',
'Spacelab'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/spacelab/bootstrap.min.css',
'Spruce'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/spruce/bootstrap.min.css',
'Superhero'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/superhero/bootstrap.min.css',
'United'='http://netdna.bootstrapcdn.com/bootswatch/2.3.1/united/bootstrap.min.css'
)
code_styles = c(
'Default'='http://yandex.st/highlightjs/7.3/styles/default.min.css',
'Dark'='http://yandex.st/highlightjs/7.3/styles/dark.min.css',
'FAR'='http://yandex.st/highlightjs/7.3/styles/far.min.css',
'IDEA'='http://yandex.st/highlightjs/7.3/styles/idea.min.css',
'Sunburst'='http://yandex.st/highlightjs/7.3/styles/sunburst.min.css',
'Zenburn'='http://yandex.st/highlightjs/7.3/styles/zenburn.min.css',
'Visual Studio'='http://yandex.st/highlightjs/7.3/styles/vs.min.css',
'Ascetic'='http://yandex.st/highlightjs/7.3/styles/ascetic.min.css',
'Magula'='http://yandex.st/highlightjs/7.3/styles/magula.min.css',
'GitHub'='http://yandex.st/highlightjs/7.3/styles/github.min.css',
'Google Code'='http://yandex.st/highlightjs/7.3/styles/googlecode.min.css',
'Brown Paper'='http://yandex.st/highlightjs/7.3/styles/brown_paper.min.css',
'School Book'='http://yandex.st/highlightjs/7.3/styles/school_book.min.css',
'IR Black'='http://yandex.st/highlightjs/7.3/styles/ir_black.min.css',
'Solarized - Dark'='http://yandex.st/highlightjs/7.3/styles/solarized_dark.min.css',
'Solarized - Light'='http://yandex.st/highlightjs/7.3/styles/solarized_light.min.css',
'Arta'='http://yandex.st/highlightjs/7.3/styles/arta.min.css',
'Monokai'='http://yandex.st/highlightjs/7.3/styles/monokai.min.css',
'XCode'='http://yandex.st/highlightjs/7.3/styles/xcode.min.css',
'Pojoaque'='http://yandex.st/highlightjs/7.3/styles/pojoaque.min.css',
'Rainbow'='http://yandex.st/highlightjs/7.3/styles/rainbow.min.css',
'Tomorrow'='http://yandex.st/highlightjs/7.3/styles/tomorrow.min.css',
'Tomorrow Night'='http://yandex.st/highlightjs/7.3/styles/tomorrow-night.min.css',
'Tomorrow Night Bright'='http://yandex.st/highlightjs/7.3/styles/tomorrow-night-bright.min.css',
'Tomorrow Night Blue'='http://yandex.st/highlightjs/7.3/styles/tomorrow-night-blue.min.css',
'Tomorrow Night Eighties'='http://yandex.st/highlightjs/7.3/styles/tomorrow-night-eighties.min.css'
)
|
SE <- sqrt(0.257*(1-0.257)/1412 + 0.307*(1-0.307)/1213); SE
| /inst/snippets/Example6.20.R | no_license | klaassenj/Lock5withR | R | false | false | 61 | r | SE <- sqrt(0.257*(1-0.257)/1412 + 0.307*(1-0.307)/1213); SE
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DL.R
\name{dl}
\alias{dl}
\title{Day length calculation}
\usage{
dl(latitude, DOY, model = "CBM", Tmax = NULL, Tmin = NULL, p = 0.5)
}
\arguments{
\item{latitude}{geographical coordinates in decimal degrees. It should be negative
for southern hemisphere}
\item{model}{character Type of model:"CBM" (Schoolfield, 1982),"BGC" (Running & Coughlan, 1988),
"CERES" (Ritchie, 1991) or "FAO56"}
\item{Tmax}{Numeric. Maximum air Temperature in degree Celsius}
\item{Tmin}{Numeric. Minimum air Temperature in degree Celsius}
\item{p}{numeric. CMB parameter}
}
\value{
DL
}
\description{
Day length calculation
}
\examples{
DOY="2001-8-1"
latitude=0
model="CBM"
Tmax=31
Tmin=26
mod=dl(latitude,DOY,model,Tmax,Tmin=Tmin)
}
\author{
George Owusu
}
\references{
\itemize{
\item{}{Schoolfield R. (1982). Expressing daylength as a function of latitude and Julian date.}
\item{}{Running S. W. and Coughlan J. C. (1988). A general model of forest ecosystem processes
for regional applications I. Hydrologic balance, canopy gas exchange and primary production
processes. Ecological Modelling, 42(2), 125-154 http://dx.doi.org/10.1016/0304-3800(88)90112-3.}
\item{}{Ritchie J. T. (1991). Wheat Phasic Development. Modeling Plant and Soil Systems.
Agronomy Monograph, 31.}
\item{}{Allen R. G., Pereira L. S., Raes D. and Smith M. (1998). Crop evapotranspiration:
Guidelines for computing crop water requirements. FAO Irrigation and Drainage Paper, 56, 300.}
}
}
| /man/dl.Rd | no_license | gowusu/sebkc | R | false | true | 1,530 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DL.R
\name{dl}
\alias{dl}
\title{Day length calculation}
\usage{
dl(latitude, DOY, model = "CBM", Tmax = NULL, Tmin = NULL, p = 0.5)
}
\arguments{
\item{latitude}{geographical coordinates in decimal degrees. It should be negative
for southern hemisphere}
\item{model}{character Type of model:"CBM" (Schoolfield, 1982),"BGC" (Running & Coughlan, 1988),
"CERES" (Ritchie, 1991) or "FAO56"}
\item{Tmax}{Numeric. Maximum air Temperature in degree Celsius}
\item{Tmin}{Numeric. Minimum air Temperature in degree Celsius}
\item{p}{numeric. CMB parameter}
}
\value{
DL
}
\description{
Day length calculation
}
\examples{
DOY="2001-8-1"
latitude=0
model="CBM"
Tmax=31
Tmin=26
mod=dl(latitude,DOY,model,Tmax,Tmin=Tmin)
}
\author{
George Owusu
}
\references{
\itemize{
\item{}{Schoolfield R. (1982). Expressing daylength as a function of latitude and Julian date.}
\item{}{Running S. W. and Coughlan J. C. (1988). A general model of forest ecosystem processes
for regional applications I. Hydrologic balance, canopy gas exchange and primary production
processes. Ecological Modelling, 42(2), 125-154 http://dx.doi.org/10.1016/0304-3800(88)90112-3.}
\item{}{Ritchie J. T. (1991). Wheat Phasic Development. Modeling Plant and Soil Systems.
Agronomy Monograph, 31.}
\item{}{Allen R. G., Pereira L. S., Raes D. and Smith M. (1998). Crop evapotranspiration:
Guidelines for computing crop water requirements. FAO Irrigation and Drainage Paper, 56, 300.}
}
}
|
testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.8535350260597e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142))
result <- do.call(dcurver:::ddc,testlist)
str(result) | /dcurver/inst/testfiles/ddc/AFL_ddc/ddc_valgrind_files/1609867372-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 830 | r | testlist <- list(phi = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), x = c(1.36656528938164e-311, -1.65791256519293e+82, 1.29418168595419e-228, -1.8535350260597e+293, 8.08855267383463e-84, -4.03929894096111e-178, 6.04817943207006e-103, -1.66738461804717e-220, -8.8217241872956e-21, -7.84828807007467e-146, -7.48864562038427e+21, -1.00905374512e-187, 5.22970923741951e-218, 2.77992264324548e-197, -5.29147138128251e+140, -1.71332436886848e-93, -1.52261021137076e-52, 2.0627472502345e-21, 1.07149136185465e+184, 4.41748962512848e+47, -4.05885894997926e-142))
result <- do.call(dcurver:::ddc,testlist)
str(result) |
#Load the functions and the packages
library(disparity)
source('~/STD/Analysis/disparity.R')
source('~/STD/Analysis/disparity_fun.R')
source('~/STD/Analysis/time.disparity.R')
###################
#Reading the files
###################
#Selecting the file
chain_name='Slater2013'
data_path='../../Data/'
file_matrix='../../Data/2013-Slater-MEE-matrix-morpho.nex'
file_tree='../../Data/2013-Slater-MEE-TEM.tre'
intervals=as.numeric(strsplit(c(noquote('170.300,168.300,166.100,163.500,157.300,152.100,145.000,139.800,132.900,129.400,125.000,113.000,100.500,93.900,89.800,86.300,83.600,72.100,66.000,61.600,59.200,56.000,47.800,41.300,38.000,33.900,28.100,23.030,23.030,20.440,15.970,13.820,11.620,7.246,5.333,0.000')), split=',')[[1]])
slices=as.numeric(strsplit(c(noquote('170,165,160,155,150,145,140,135,130,125,120,115,110,105,100,95,90,85,80,75,70,65,60,55,50,45,40,35,30,25,20,15,10,5,0')), split=',')[[1]])
FADLAD='../../Data/Slater2013_FADLAD.csv'
#matrix
Nexus_data<-ReadMorphNexus(file_matrix)
Nexus_matrix<-Nexus_data$matrix
#tree
Tree_data<-read.nexus(file_tree)
#FAD/LAD
FADLAD<-read.csv(FADLAD, row.names=1)
######################################
#Cleaning the matrices and the trees
######################################
#Remove species with only missing data before hand
if(any(apply(is.na(Nexus_matrix), 1, all))) {
Nexus_matrix<-Nexus_matrix[-c(which(apply(is.na(Nexus_matrix), 1, all))),]
}
#Cleaning the tree and the table
#making the saving folder
tree<-clean.tree(Tree_data, Nexus_matrix)
table<-clean.table(Nexus_matrix, Tree_data)
Nexus_data$matrix<-table
#Forcing the tree to be binary
tree<-bin.tree(tree)
#Adding node labels to the tree
tree<-lapply.root(tree, max(tree.age(tree)$age))
#load the distance matrix
load(paste(data_path, chain_name, '/', chain_name, '_distance-nodes95.Rda', sep='')) #dist_nodes95
trimmed_max_data_nodes95<-TrimMorphDistMatrix(dist_nodes95$gower.dist.matrix)
tree_nodes95<-drop.tip(tree, trimmed_max_data_nodes95$removed.taxa) ; tree_nodes95$root.time<-max(tree.age(tree_nodes95)[,1])
trimmed_max_data_nodes95$dist.matrix<-trimmed_max_data_nodes95$dist.matrix[c(tree_nodes95$tip.label, tree_nodes95$node.label),c(tree_nodes95$tip.label, tree_nodes95$node.label)]
#pco
pco_data_nodes95<-cmdscale(trimmed_max_data_nodes95$dist.matrix, k=nrow(trimmed_max_data_nodes95$dist.matrix) - 2, add=T)$points
#slices
pco_slices_nodes95_acc<-slice.pco(pco_data_nodes95, tree_nodes95, slices, method='acctran', FAD_LAD=FADLAD, verbose=TRUE, diversity=TRUE)
slices_nodes95_div<-pco_slices_nodes95_acc[[2]] ; pco_slices_nodes95_acc<-pco_slices_nodes95_acc[[1]]
#Disparity
disp_sli_nodes95_acc<-time.disparity(pco_slices_nodes95_acc, verbose=TRUE, rarefaction=TRUE, save.all=TRUE)
save(disp_sli_nodes95_acc, file=paste(data_path, chain_name, '/',chain_name,'-disp_sli_nodes95_acc.Rda', sep=''))
#Observed disparity
disp_sli_nodes95_acc_obs<-time.disparity(pco_slices_nodes95_acc, method='centroid', bootstraps=0, verbose=TRUE, rarefaction=TRUE, save.all=TRUE, centroid.type='full')
save(disp_sli_nodes95_acc_obs,file=paste(data_path, chain_name, '/',chain_name,'-disp_sli_nodes95_acc_obs.Rda', sep=''))
#Observed disparity (BS)
disp_sli_nodes95_acc_obs_BS<-time.disparity(pco_slices_nodes95_acc, method='centroid', bootstraps=1000, verbose=TRUE, rarefaction=TRUE, save.all=TRUE, centroid.type='full')
save(disp_sli_nodes95_acc_obs_BS,file=paste(data_path, chain_name, '/',chain_name,'-disp_sli_nodes95_acc_obs_BS.Rda', sep=''))
| /Analysis/Disparity_calculations/Slater2013_disparity_nodes95_sli_acc.R | no_license | yassato/SpatioTemporal_Disparity | R | false | false | 3,486 | r |
#Load the functions and the packages
library(disparity)
source('~/STD/Analysis/disparity.R')
source('~/STD/Analysis/disparity_fun.R')
source('~/STD/Analysis/time.disparity.R')
###################
#Reading the files
###################
#Selecting the file
chain_name='Slater2013'
data_path='../../Data/'
file_matrix='../../Data/2013-Slater-MEE-matrix-morpho.nex'
file_tree='../../Data/2013-Slater-MEE-TEM.tre'
intervals=as.numeric(strsplit(c(noquote('170.300,168.300,166.100,163.500,157.300,152.100,145.000,139.800,132.900,129.400,125.000,113.000,100.500,93.900,89.800,86.300,83.600,72.100,66.000,61.600,59.200,56.000,47.800,41.300,38.000,33.900,28.100,23.030,23.030,20.440,15.970,13.820,11.620,7.246,5.333,0.000')), split=',')[[1]])
slices=as.numeric(strsplit(c(noquote('170,165,160,155,150,145,140,135,130,125,120,115,110,105,100,95,90,85,80,75,70,65,60,55,50,45,40,35,30,25,20,15,10,5,0')), split=',')[[1]])
FADLAD='../../Data/Slater2013_FADLAD.csv'
#matrix
Nexus_data<-ReadMorphNexus(file_matrix)
Nexus_matrix<-Nexus_data$matrix
#tree
Tree_data<-read.nexus(file_tree)
#FAD/LAD
FADLAD<-read.csv(FADLAD, row.names=1)
######################################
#Cleaning the matrices and the trees
######################################
#Remove species with only missing data before hand
if(any(apply(is.na(Nexus_matrix), 1, all))) {
Nexus_matrix<-Nexus_matrix[-c(which(apply(is.na(Nexus_matrix), 1, all))),]
}
#Cleaning the tree and the table
#making the saving folder
tree<-clean.tree(Tree_data, Nexus_matrix)
table<-clean.table(Nexus_matrix, Tree_data)
Nexus_data$matrix<-table
#Forcing the tree to be binary
tree<-bin.tree(tree)
#Adding node labels to the tree
tree<-lapply.root(tree, max(tree.age(tree)$age))
#load the distance matrix
load(paste(data_path, chain_name, '/', chain_name, '_distance-nodes95.Rda', sep='')) #dist_nodes95
trimmed_max_data_nodes95<-TrimMorphDistMatrix(dist_nodes95$gower.dist.matrix)
tree_nodes95<-drop.tip(tree, trimmed_max_data_nodes95$removed.taxa) ; tree_nodes95$root.time<-max(tree.age(tree_nodes95)[,1])
trimmed_max_data_nodes95$dist.matrix<-trimmed_max_data_nodes95$dist.matrix[c(tree_nodes95$tip.label, tree_nodes95$node.label),c(tree_nodes95$tip.label, tree_nodes95$node.label)]
#pco
pco_data_nodes95<-cmdscale(trimmed_max_data_nodes95$dist.matrix, k=nrow(trimmed_max_data_nodes95$dist.matrix) - 2, add=T)$points
#slices
pco_slices_nodes95_acc<-slice.pco(pco_data_nodes95, tree_nodes95, slices, method='acctran', FAD_LAD=FADLAD, verbose=TRUE, diversity=TRUE)
slices_nodes95_div<-pco_slices_nodes95_acc[[2]] ; pco_slices_nodes95_acc<-pco_slices_nodes95_acc[[1]]
#Disparity
disp_sli_nodes95_acc<-time.disparity(pco_slices_nodes95_acc, verbose=TRUE, rarefaction=TRUE, save.all=TRUE)
save(disp_sli_nodes95_acc, file=paste(data_path, chain_name, '/',chain_name,'-disp_sli_nodes95_acc.Rda', sep=''))
#Observed disparity
disp_sli_nodes95_acc_obs<-time.disparity(pco_slices_nodes95_acc, method='centroid', bootstraps=0, verbose=TRUE, rarefaction=TRUE, save.all=TRUE, centroid.type='full')
save(disp_sli_nodes95_acc_obs,file=paste(data_path, chain_name, '/',chain_name,'-disp_sli_nodes95_acc_obs.Rda', sep=''))
#Observed disparity (BS)
disp_sli_nodes95_acc_obs_BS<-time.disparity(pco_slices_nodes95_acc, method='centroid', bootstraps=1000, verbose=TRUE, rarefaction=TRUE, save.all=TRUE, centroid.type='full')
save(disp_sli_nodes95_acc_obs_BS,file=paste(data_path, chain_name, '/',chain_name,'-disp_sli_nodes95_acc_obs_BS.Rda', sep=''))
|
## Name: Elizabeth Lee
## Date: 6/6/16
## Function: functions to export INLA results as data files and diagnostic figures -- specific to county scale
## Filenames: reference_data/USstate_shapefiles/gz_2010_us_040_00_500k
## Data Source: shapefile from US Census 2010 - https://www.census.gov/geo/maps-data/data/cbf/cbf_state.html
## Notes:
##
## useful commands:
## install.packages("pkg", dependencies=TRUE, lib="/usr/local/lib/R/site-library") # in sudo R
## update.packages(lib.loc = "/usr/local/lib/R/site-library")
require(RColorBrewer); require(ggplot2); require(maps); require(scales); require(classInt); require(data.table)
#### functions for diagnostic plots ################################
plot_countyChoro <- function(exportPath, pltDat, pltVarTxt, code, zeroes){
# draw state choropleth with tiers or gradient colors and export to file
print(match.call())
countyMap <- map_data("county")
data(county.fips)
# plot formatting
h <- 5; w <- 8; dp <- 300
# merge county data
polynameSplit <- tstrsplit(county.fips$polyname, ",")
ctyMap <- tbl_df(county.fips) %>%
mutate(fips = substr.Right(paste0("0", fips), 5)) %>%
mutate(region = polynameSplit[[1]]) %>%
mutate(subregion = polynameSplit[[2]]) %>%
full_join(countyMap, by = c("region", "subregion")) %>%
filter(!is.na(polyname) & !is.na(long)) %>%
rename(state = region, county = subregion) %>%
rename(region = fips) %>%
select(-polyname)
# tier choropleth
if (code == 'tier'){
# process data for tiers
# 7/21/16: natural breaks w/ classIntervals
pltDat <- pltDat %>% rename_(pltVar = pltVarTxt)
# create natural break intervals with jenks algorithm
intervals <- classIntervals(pltDat$pltVar[!is.na(pltDat$pltVar)], n = 5, style = "jenks")
if (zeroes){
# 0s have their own color
if (0 %in% intervals$brks){
breakList <- intervals$brks
} else {
breakList <- c(0, intervals$brks)
}
breaks <- sort(c(0, breakList))
} else{
breaks <- c(intervals$brks)
}
breaksRound <- round(breaks, 1)
breakLabels <- matrix(1:(length(breaksRound)-1))
for (i in 1:length(breakLabels)){
# create legend labels
breakLabels[i] <- paste0("(",as.character(breaksRound[i]), "-", as.character(breaksRound[i+1]), "]")}
# reverse order of break labels so zeros are green and larger values are red
breakLabels <- rev(breakLabels)
pltDat2 <- pltDat %>%
mutate(pltVarBin = factor(.bincode(pltVar, breaks, right = TRUE, include.lowest = TRUE))) %>%
mutate(pltVarBin = factor(pltVarBin, levels = rev(levels(pltVarBin))))
choro <- ggplot() +
geom_map(data = ctyMap, map = ctyMap, aes(x = long, y = lat, map_id = region)) +
geom_map(data = pltDat2, map = ctyMap, aes(fill = pltVarBin, map_id = fips), color = "grey25", size = 0.15) +
scale_fill_brewer(name = pltVarTxt, palette = "RdYlGn", label = breakLabels, na.value = "grey60") +
expand_limits(x = ctyMap$long, y = ctyMap$lat) +
theme_minimal() +
theme(text = element_text(size = 18), axis.ticks = element_blank(), axis.text = element_blank(), axis.title = element_blank(), panel.grid = element_blank(), legend.position = "bottom")
}
# gradient choropleth
else if (code == 'gradient'){
# data for gradient has minimal processing
pltDat <- pltDat %>% rename_(pltVar = pltVarTxt)
choro <- ggplot() +
geom_map(data = ctyMap, map = ctyMap, aes(x = long, y = lat, map_id=region)) +
geom_map(data = pltDat, map = ctyMap, aes(fill = pltVar, map_id = fips), color = "grey25", size = 0.15) +
scale_fill_continuous(name = pltVarTxt, low = "#f0fff0", high = "#006400") +
expand_limits(x = ctyMap$long, y = ctyMap$lat) +
theme_minimal() +
theme(text = element_text(size = 18), axis.ticks = element_blank(), axis.text = element_blank(), axis.title = element_blank(), panel.grid = element_blank(), legend.position = "bottom")
}
ggsave(exportPath, choro, height = h, width = w, dpi = dp)
}
################################
| /programs/source_export_inlaData_cty.R | no_license | eclee25/flu-SDI-scales | R | false | false | 4,103 | r |
## Name: Elizabeth Lee
## Date: 6/6/16
## Function: functions to export INLA results as data files and diagnostic figures -- specific to county scale
## Filenames: reference_data/USstate_shapefiles/gz_2010_us_040_00_500k
## Data Source: shapefile from US Census 2010 - https://www.census.gov/geo/maps-data/data/cbf/cbf_state.html
## Notes:
##
## useful commands:
## install.packages("pkg", dependencies=TRUE, lib="/usr/local/lib/R/site-library") # in sudo R
## update.packages(lib.loc = "/usr/local/lib/R/site-library")
require(RColorBrewer); require(ggplot2); require(maps); require(scales); require(classInt); require(data.table)
#### functions for diagnostic plots ################################
plot_countyChoro <- function(exportPath, pltDat, pltVarTxt, code, zeroes){
# draw state choropleth with tiers or gradient colors and export to file
print(match.call())
countyMap <- map_data("county")
data(county.fips)
# plot formatting
h <- 5; w <- 8; dp <- 300
# merge county data
polynameSplit <- tstrsplit(county.fips$polyname, ",")
ctyMap <- tbl_df(county.fips) %>%
mutate(fips = substr.Right(paste0("0", fips), 5)) %>%
mutate(region = polynameSplit[[1]]) %>%
mutate(subregion = polynameSplit[[2]]) %>%
full_join(countyMap, by = c("region", "subregion")) %>%
filter(!is.na(polyname) & !is.na(long)) %>%
rename(state = region, county = subregion) %>%
rename(region = fips) %>%
select(-polyname)
# tier choropleth
if (code == 'tier'){
# process data for tiers
# 7/21/16: natural breaks w/ classIntervals
pltDat <- pltDat %>% rename_(pltVar = pltVarTxt)
# create natural break intervals with jenks algorithm
intervals <- classIntervals(pltDat$pltVar[!is.na(pltDat$pltVar)], n = 5, style = "jenks")
if (zeroes){
# 0s have their own color
if (0 %in% intervals$brks){
breakList <- intervals$brks
} else {
breakList <- c(0, intervals$brks)
}
breaks <- sort(c(0, breakList))
} else{
breaks <- c(intervals$brks)
}
breaksRound <- round(breaks, 1)
breakLabels <- matrix(1:(length(breaksRound)-1))
for (i in 1:length(breakLabels)){
# create legend labels
breakLabels[i] <- paste0("(",as.character(breaksRound[i]), "-", as.character(breaksRound[i+1]), "]")}
# reverse order of break labels so zeros are green and larger values are red
breakLabels <- rev(breakLabels)
pltDat2 <- pltDat %>%
mutate(pltVarBin = factor(.bincode(pltVar, breaks, right = TRUE, include.lowest = TRUE))) %>%
mutate(pltVarBin = factor(pltVarBin, levels = rev(levels(pltVarBin))))
choro <- ggplot() +
geom_map(data = ctyMap, map = ctyMap, aes(x = long, y = lat, map_id = region)) +
geom_map(data = pltDat2, map = ctyMap, aes(fill = pltVarBin, map_id = fips), color = "grey25", size = 0.15) +
scale_fill_brewer(name = pltVarTxt, palette = "RdYlGn", label = breakLabels, na.value = "grey60") +
expand_limits(x = ctyMap$long, y = ctyMap$lat) +
theme_minimal() +
theme(text = element_text(size = 18), axis.ticks = element_blank(), axis.text = element_blank(), axis.title = element_blank(), panel.grid = element_blank(), legend.position = "bottom")
}
# gradient choropleth
else if (code == 'gradient'){
# data for gradient has minimal processing
pltDat <- pltDat %>% rename_(pltVar = pltVarTxt)
choro <- ggplot() +
geom_map(data = ctyMap, map = ctyMap, aes(x = long, y = lat, map_id=region)) +
geom_map(data = pltDat, map = ctyMap, aes(fill = pltVar, map_id = fips), color = "grey25", size = 0.15) +
scale_fill_continuous(name = pltVarTxt, low = "#f0fff0", high = "#006400") +
expand_limits(x = ctyMap$long, y = ctyMap$lat) +
theme_minimal() +
theme(text = element_text(size = 18), axis.ticks = element_blank(), axis.text = element_blank(), axis.title = element_blank(), panel.grid = element_blank(), legend.position = "bottom")
}
ggsave(exportPath, choro, height = h, width = w, dpi = dp)
}
################################
|
test_that("downloadTCGA() function works properly", {
expect_dimsSize_equal <- function(parameters, dimsSize){
tmp <- tempdir()
downloadTCGA( cancerTypes = parameters[[1]],
destDir = tmp,
date = parameters[[2]])
list.files(tmp) %>%
grep("Clinical", x = ., value = TRUE) %>%
file.path(tmp, .) -> folder
folder %>%
list.files() %>%
grep("clin.merged", x = ., value=TRUE) %>%
file.path(folder, .) %>%
readTCGA(path = ., "clinical") -> clinical_data
expect_equal( dim(clinical_data), dimsSize )
unlink(tmp)
}
expect_dimsSize_equal( list( "ACC", "2015-06-01" ), c(92, 1115) )
}) | /tests/testthat/test_read.R | no_license | xtmgah/RTCGA | R | false | false | 795 | r | test_that("downloadTCGA() function works properly", {
expect_dimsSize_equal <- function(parameters, dimsSize){
tmp <- tempdir()
downloadTCGA( cancerTypes = parameters[[1]],
destDir = tmp,
date = parameters[[2]])
list.files(tmp) %>%
grep("Clinical", x = ., value = TRUE) %>%
file.path(tmp, .) -> folder
folder %>%
list.files() %>%
grep("clin.merged", x = ., value=TRUE) %>%
file.path(folder, .) %>%
readTCGA(path = ., "clinical") -> clinical_data
expect_equal( dim(clinical_data), dimsSize )
unlink(tmp)
}
expect_dimsSize_equal( list( "ACC", "2015-06-01" ), c(92, 1115) )
}) |
library(feisr)
### Name: feistest
### Title: Artificial Regression Test
### Aliases: feistest
### ** Examples
data("mwp", package = "feisr")
feis.mod <- feis(lnw ~ marry + enrol | year,
data = mwp, id = "id", robust = TRUE)
ht <- feistest(feis.mod, robust = TRUE, type = "all")
summary(ht)
| /data/genthat_extracted_code/feisr/examples/feistest.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 314 | r | library(feisr)
### Name: feistest
### Title: Artificial Regression Test
### Aliases: feistest
### ** Examples
data("mwp", package = "feisr")
feis.mod <- feis(lnw ~ marry + enrol | year,
data = mwp, id = "id", robust = TRUE)
ht <- feistest(feis.mod, robust = TRUE, type = "all")
summary(ht)
|
################################################################
################################################################
##
## Funzioni per leggere i meteo
##
################################################################
################################################################
################################################################
## Read.folder legge tutti i file in una cartella
read.folder <- function (dpath,dateformat='NULL',checkdata=FALSE) {
if ( dateformat == 'NULL' ) {
stop("\nDateformat is not set: read.folder(..., dateformat=)")
}
#Elenco dei files
files <- list.files(dpath)
nfiles <- length(files)
#Read first file to initialize data frame
dataframe <- read.station(files[1],dpath,dateformat=dateformat,checkdata=checkdata)
if (checkdata) {
print('Dim dataframe in read.folder')
print(dim(dataframe))
}
for (ifile in 2:nfiles) {
r <- read.station(files[ifile],dpath,dateformat=dateformat,checkdata=checkdata)
dataframe <- merge(dataframe,r,by="Date",all=TRUE,sort=TRUE)
if (checkdata) {
print('Dim dataframe in read.folder')
print(dim(dataframe))
}
}
return(dataframe)
}
################################################################
## Read.folder.img legge tutti i file in una cartella
## I file sono organizzati per anni
## Restituisce una lista
read.folder.img <- function (dpath,checkdata=FALSE) {
#Elenco delle cartelle
folders <- list.files(dpath)
nfolders <- length(folders)
count<-1
for (ifolder in 1:nfolders) {
#Elenco delle cartelle
ddpath <- paste0(dpath,folders[ifolder],"/")
files <- list.files(ddpath)
nfiles <- length(files)
print(paste0(nfiles, " files in folder: ",folders[ifolder]))
for (ifile in 1:nfiles) {
y<-strsplit(as.character(strsplit(files[ifile],split=".img")),split="_")[[1]][2]
m<-strsplit(as.character(strsplit(files[ifile],split=".img")),split="_")[[1]][3]
d<-strsplit(as.character(strsplit(files[ifile],split=".img")),split="_")[[1]][4]
# print(paste0("Year:",y," Month:",m," Decad:",d))
r <- read.img.leap(files[ifile],ddpath,coord=T)
if (count==1) {
r.list <- list(lon=r$x,lat=r$y,y=list(),m=list(),d=list(),p=list())
r.list$y[[count]] <- y
r.list$m[[count]] <- m
r.list$d[[count]] <- d
r.list$p[[count]] <- r$p
} else {
r.list$y[[count]] <- y
r.list$m[[count]] <- m
r.list$d[[count]] <- d
r.list$p[[count]] <- r$p
}
count<-count+1
}
}
return(r.list)
}
################################################################
## Read.folder.img legge tutti i file in una cartella
## I file sono organizzati per anni
## Restituisce una lista di GridSpatialData
read.folder.img.spatial <- function (dpath,checkdata=FALSE) {
#Elenco delle cartelle
folders <- list.files(dpath)
nfolders <- length(folders)
count<-1
r.list <- list(y=list(),m=list(),d=list(),p=list())
for (ifolder in 1:nfolders) {
#Elenco delle cartelle
ddpath <- paste0(dpath,folders[ifolder],"/")
files <- list.files(ddpath)
nfiles <- length(files)
print(paste0(nfiles, " files in folder: ",folders[ifolder]))
for (ifile in 1:nfiles) {
y<-strsplit(as.character(strsplit(files[ifile],split=".img")),split="_")[[1]][2]
m<-strsplit(as.character(strsplit(files[ifile],split=".img")),split="_")[[1]][3]
d<-strsplit(as.character(strsplit(files[ifile],split=".img")),split="_")[[1]][4]
# print(paste0("Year:",y," Month:",m," Decad:",d))
r <- read.img.leap(files[ifile],ddpath,coord=T)
filename <- paste0(ddpath,files[ifile])
r <- readGDAL(filename,silent=TRUE)
r.list$y[[count]] <- y
r.list$m[[count]] <- m
r.list$d[[count]] <- d
r.list$p[[count]] <- r
count <- count+1
}
}
return(r.list)
}
################################################################
## Read.station legge i singoli file di stazione
read.station <- function (filename,dpath,dateformat='NULL',checkdata=FALSE) {
print(paste0('Reading: ',filename))
if ( dateformat == 'NULL' ) {
stop("\nDateformat is not set: read.folder(..., dateformat=)\n
Available dateformats:\n
%Y%m%d\n
%d.%B.%Y")
}
filepath <- paste0(dpath,filename)
fileformat <- unlist(strsplit(filename,'.',fixed=TRUE))[2]
namestation <- unlist(strsplit(filename,'.',fixed=TRUE))[1]
#Legge la stazione
#On Windows, the perl based routines (read.xls, xls2sep, xls2csv, xls2tab,
#xls2tsv, sheetNames, sheetCount) will work with ActiveState perl but not with
#Rtools perl.
#See http://cran.r-project.org/web/packages/gdata/INSTALL
# http://cran.at.r-project.org/doc/manuals/R-admin.html#The-Windows-toolset
if ( fileformat == 'xls' ) { dta <- read.xls(filepath) }
if ( fileformat == 'csv' ) { dta <- read.table(filepath,header=TRUE,sep=';') }
#Check class
if (checkdata) {
cclass<-'Check Class: '
for (i in 1:dim(dta)[2]) {
cclass<-paste(cclass,as.character(class(dta[[i]])))
}
print(cclass)
print(dim(dta))
}
if (dateformat == "%Y%m%d") {
dta <- melt(dta,c(1,2))
names(dta) <- c('month','day','year','Rain')
#
#Converte le date
dta$year <- as.integer(sub('X','',dta$year))
Date <- as.POSIXct(as.character(10000*dta$year+100*dta$month+dta$day),"",dateformat)
dta <- cbind.data.frame(Date,dta$Rain)
}
if (dateformat == "%d.%B.%Y") {
dta <- melt(dta,1)
names(dta) <- c('monthday','year','Rain')
#
#Converte le date
dta$year <- as.integer(sub('X','',dta$year))
# Date <- as.POSIXct(paste0(as.character(dta$monthday),'.',as.character(dta$year)),"",dateformat)
Date <- as.Date(paste0(as.character(dta$monthday),'.',as.character(dta$year)),dateformat)
dta <- cbind.data.frame(Date,dta$Rain)
}
if (checkdata) {
print('Dim dta dopo converte le date')
print(dim(dta))
}
names(dta) <- c("Date",namestation)
#Identifica NA
dta[namestation][dta[namestation]<0] <- NA
# }
dta <- dta[!is.na(dta$Date),]
if (checkdata) {
print('Dim dta Dopo rimuovi na')
print(dim(dta))
}
return(dta)
}
################################################################
## Read.img.leap legge le singole mappe LEAP
read.img.leap<-function(name,dpath,coord=FALSE){
filename <- paste0(dpath,name)
x <- readGDAL(filename,silent=TRUE)
info <- GDALinfo(filename,silent=TRUE)
if (coord) {
offs <- info[c("ll.x", "ll.y")]
scl <- info[c("res.x", "res.y")]
dimn <- info[c("columns", "rows")]
xs <- seq(offs[1], by = scl[1], length = dimn[1]) + scl[1]/2
ys <- seq(offs[2], by = scl[2], length = dimn[2]) + scl[2]/2
}
gg<-x$band1
g2<-matrix(gg,info[2],info[1])
g2=g2[,rev(seq_len(ncol(g2)))]
#Return data with coordinates
if (coord) {
ret.data<-list(xs,ys,g2)
names(ret.data)<-c("x","y","p")
return (ret.data)
} else {
#Return data without coordinates
ret.data<-list(g2)
names(ret.data)<-c("p")
return (ret.data)
}
}
################################################################
## Read.station legge i singoli file di stazione
read.station.campbell <- function (filename,dpath,dateformat='NULL',checkdata=FALSE) {
#Read data in the format of the Campbell datalogger
#Adds directly information on the day and month
#Flag NaN
filepath <- paste0(dpath,filename)
dta <- read.table(filepath,header=TRUE,sep=',')
origin <- as.Date(paste0(dta$year, "-01-01"),tz = "UTC") - days(1)
#Compute date
dta_date <- as.Date(dta$doy, origin = origin, tz = "UTC")
ret.data <- cbind.data.frame(month=month(dta_date),day=day(dta_date), dta)
#Flag NaNs
ret.data[which(ret.data==-9999.00,arr.ind=T)] <- NaN
return(ret.data)
} | /readtools.R | no_license | sandrocalmanti/med-gold | R | false | false | 7,984 | r | ################################################################
################################################################
##
## Funzioni per leggere i meteo
##
################################################################
################################################################
################################################################
## Read.folder legge tutti i file in una cartella
read.folder <- function (dpath,dateformat='NULL',checkdata=FALSE) {
if ( dateformat == 'NULL' ) {
stop("\nDateformat is not set: read.folder(..., dateformat=)")
}
#Elenco dei files
files <- list.files(dpath)
nfiles <- length(files)
#Read first file to initialize data frame
dataframe <- read.station(files[1],dpath,dateformat=dateformat,checkdata=checkdata)
if (checkdata) {
print('Dim dataframe in read.folder')
print(dim(dataframe))
}
for (ifile in 2:nfiles) {
r <- read.station(files[ifile],dpath,dateformat=dateformat,checkdata=checkdata)
dataframe <- merge(dataframe,r,by="Date",all=TRUE,sort=TRUE)
if (checkdata) {
print('Dim dataframe in read.folder')
print(dim(dataframe))
}
}
return(dataframe)
}
################################################################
## Read.folder.img legge tutti i file in una cartella
## I file sono organizzati per anni
## Restituisce una lista
read.folder.img <- function (dpath,checkdata=FALSE) {
#Elenco delle cartelle
folders <- list.files(dpath)
nfolders <- length(folders)
count<-1
for (ifolder in 1:nfolders) {
#Elenco delle cartelle
ddpath <- paste0(dpath,folders[ifolder],"/")
files <- list.files(ddpath)
nfiles <- length(files)
print(paste0(nfiles, " files in folder: ",folders[ifolder]))
for (ifile in 1:nfiles) {
y<-strsplit(as.character(strsplit(files[ifile],split=".img")),split="_")[[1]][2]
m<-strsplit(as.character(strsplit(files[ifile],split=".img")),split="_")[[1]][3]
d<-strsplit(as.character(strsplit(files[ifile],split=".img")),split="_")[[1]][4]
# print(paste0("Year:",y," Month:",m," Decad:",d))
r <- read.img.leap(files[ifile],ddpath,coord=T)
if (count==1) {
r.list <- list(lon=r$x,lat=r$y,y=list(),m=list(),d=list(),p=list())
r.list$y[[count]] <- y
r.list$m[[count]] <- m
r.list$d[[count]] <- d
r.list$p[[count]] <- r$p
} else {
r.list$y[[count]] <- y
r.list$m[[count]] <- m
r.list$d[[count]] <- d
r.list$p[[count]] <- r$p
}
count<-count+1
}
}
return(r.list)
}
################################################################
## Read.folder.img legge tutti i file in una cartella
## I file sono organizzati per anni
## Restituisce una lista di GridSpatialData
read.folder.img.spatial <- function (dpath,checkdata=FALSE) {
#Elenco delle cartelle
folders <- list.files(dpath)
nfolders <- length(folders)
count<-1
r.list <- list(y=list(),m=list(),d=list(),p=list())
for (ifolder in 1:nfolders) {
#Elenco delle cartelle
ddpath <- paste0(dpath,folders[ifolder],"/")
files <- list.files(ddpath)
nfiles <- length(files)
print(paste0(nfiles, " files in folder: ",folders[ifolder]))
for (ifile in 1:nfiles) {
y<-strsplit(as.character(strsplit(files[ifile],split=".img")),split="_")[[1]][2]
m<-strsplit(as.character(strsplit(files[ifile],split=".img")),split="_")[[1]][3]
d<-strsplit(as.character(strsplit(files[ifile],split=".img")),split="_")[[1]][4]
# print(paste0("Year:",y," Month:",m," Decad:",d))
r <- read.img.leap(files[ifile],ddpath,coord=T)
filename <- paste0(ddpath,files[ifile])
r <- readGDAL(filename,silent=TRUE)
r.list$y[[count]] <- y
r.list$m[[count]] <- m
r.list$d[[count]] <- d
r.list$p[[count]] <- r
count <- count+1
}
}
return(r.list)
}
################################################################
## Read.station legge i singoli file di stazione
read.station <- function (filename,dpath,dateformat='NULL',checkdata=FALSE) {
print(paste0('Reading: ',filename))
if ( dateformat == 'NULL' ) {
stop("\nDateformat is not set: read.folder(..., dateformat=)\n
Available dateformats:\n
%Y%m%d\n
%d.%B.%Y")
}
filepath <- paste0(dpath,filename)
fileformat <- unlist(strsplit(filename,'.',fixed=TRUE))[2]
namestation <- unlist(strsplit(filename,'.',fixed=TRUE))[1]
#Legge la stazione
#On Windows, the perl based routines (read.xls, xls2sep, xls2csv, xls2tab,
#xls2tsv, sheetNames, sheetCount) will work with ActiveState perl but not with
#Rtools perl.
#See http://cran.r-project.org/web/packages/gdata/INSTALL
# http://cran.at.r-project.org/doc/manuals/R-admin.html#The-Windows-toolset
if ( fileformat == 'xls' ) { dta <- read.xls(filepath) }
if ( fileformat == 'csv' ) { dta <- read.table(filepath,header=TRUE,sep=';') }
#Check class
if (checkdata) {
cclass<-'Check Class: '
for (i in 1:dim(dta)[2]) {
cclass<-paste(cclass,as.character(class(dta[[i]])))
}
print(cclass)
print(dim(dta))
}
if (dateformat == "%Y%m%d") {
dta <- melt(dta,c(1,2))
names(dta) <- c('month','day','year','Rain')
#
#Converte le date
dta$year <- as.integer(sub('X','',dta$year))
Date <- as.POSIXct(as.character(10000*dta$year+100*dta$month+dta$day),"",dateformat)
dta <- cbind.data.frame(Date,dta$Rain)
}
if (dateformat == "%d.%B.%Y") {
dta <- melt(dta,1)
names(dta) <- c('monthday','year','Rain')
#
#Converte le date
dta$year <- as.integer(sub('X','',dta$year))
# Date <- as.POSIXct(paste0(as.character(dta$monthday),'.',as.character(dta$year)),"",dateformat)
Date <- as.Date(paste0(as.character(dta$monthday),'.',as.character(dta$year)),dateformat)
dta <- cbind.data.frame(Date,dta$Rain)
}
if (checkdata) {
print('Dim dta dopo converte le date')
print(dim(dta))
}
names(dta) <- c("Date",namestation)
#Identifica NA
dta[namestation][dta[namestation]<0] <- NA
# }
dta <- dta[!is.na(dta$Date),]
if (checkdata) {
print('Dim dta Dopo rimuovi na')
print(dim(dta))
}
return(dta)
}
################################################################
## Read.img.leap legge le singole mappe LEAP
read.img.leap<-function(name,dpath,coord=FALSE){
filename <- paste0(dpath,name)
x <- readGDAL(filename,silent=TRUE)
info <- GDALinfo(filename,silent=TRUE)
if (coord) {
offs <- info[c("ll.x", "ll.y")]
scl <- info[c("res.x", "res.y")]
dimn <- info[c("columns", "rows")]
xs <- seq(offs[1], by = scl[1], length = dimn[1]) + scl[1]/2
ys <- seq(offs[2], by = scl[2], length = dimn[2]) + scl[2]/2
}
gg<-x$band1
g2<-matrix(gg,info[2],info[1])
g2=g2[,rev(seq_len(ncol(g2)))]
#Return data with coordinates
if (coord) {
ret.data<-list(xs,ys,g2)
names(ret.data)<-c("x","y","p")
return (ret.data)
} else {
#Return data without coordinates
ret.data<-list(g2)
names(ret.data)<-c("p")
return (ret.data)
}
}
################################################################
## Read.station legge i singoli file di stazione
read.station.campbell <- function (filename,dpath,dateformat='NULL',checkdata=FALSE) {
#Read data in the format of the Campbell datalogger
#Adds directly information on the day and month
#Flag NaN
filepath <- paste0(dpath,filename)
dta <- read.table(filepath,header=TRUE,sep=',')
origin <- as.Date(paste0(dta$year, "-01-01"),tz = "UTC") - days(1)
#Compute date
dta_date <- as.Date(dta$doy, origin = origin, tz = "UTC")
ret.data <- cbind.data.frame(month=month(dta_date),day=day(dta_date), dta)
#Flag NaNs
ret.data[which(ret.data==-9999.00,arr.ind=T)] <- NaN
return(ret.data)
} |
require(data.table); require(lubridate); require(caret); require(sqldf); require(xgboost); require(xlsx); require(dplyr); require(readr); require(doParallel); require(bit64)
#rm(list = ls())
Spanish2English <- fread("D:\\kaggle\\SANTANDER\\DATA\\Spanish2English.csv", data.table = F)
train_raw <- fread("D:\\kaggle\\SANTANDER\\DATA\\train.csv", data.table = F)
extra <- cbind.data.frame( ID = train_raw$ID,
var3 = train_raw$var3,
var15 = train_raw$var15,
var38 = train_raw$var38,
TARGET = train_raw$TARGET )
train_raw <- train_raw[, !(names(train_raw) %in% names(extra))]
names(train_raw) <- Spanish2English$English
train_raw <- cbind(train_raw, extra)
test_raw <- fread("D:\\kaggle\\SANTANDER\\DATA\\test.csv", data.table = F)
extra <- cbind.data.frame( ID = test_raw$ID,
var3 = test_raw$var3,
var15 = test_raw$var15,
var38 = test_raw$var38 )
test_raw <- test_raw[, !(names(test_raw) %in% names(extra))]
names(test_raw) <- Spanish2English$English
test_raw <- cbind(test_raw, extra)
response <- train_raw$TARGET
train_raw$TARGET <- NULL
train_raw$ID <- NULL
test_raw <- fread("D:\\kaggle\\SANTANDER\\DATA\\test.csv", data.table = F)
id <- test_raw$ID
test_raw$ID <- NULL
tmp <- rbind(train_raw, test_raw)
# # categorical and discrete are grouped into a single group
#
# categorical_vars <- c()
#
# remove_vars <- c("PropertyField6", "GeographicField10A")
#
#
# tmp <- tmp[, !(names(tmp) %in% remove_vars)]
#
#
# tmp$Original_Quote_Date <- as.Date(tmp$Original_Quote_Date)
#
# tmp$month <- as.integer(format(tmp$Original_Quote_Date, "%m"))
#
# tmp$year <- as.integer(format(tmp$Original_Quote_Date, "%y"))
#
# tmp$day <- weekdays(as.Date(tmp$Original_Quote_Date))
#
# tmp$week <- week((as.Date(tmp$Original_Quote_Date)))
#
# tmp$date <- (((tmp$year * 52 ) + tmp$week) %% 4)
#
#########################################################################################
a <- lapply(tmp, function(x) length(unique(x)))
len_unique <- rep(0, ncol(tmp))
for(i in 1:length(a))
{
if(a[[i]] < 30) {
len_unique[i] <- (names(a[i]))
}
}
len_unique <- len_unique[len_unique != 0]
tmp_unique <- tmp[, len_unique]
tmp[is.na(tmp)] <- -1
row_NA <- apply(tmp, 1, function(x) sum(x == -1))
tmp$row_NA <- row_NA
# seperate character columns
char <- rep(0, length(names(tmp)))
for(i in names(tmp))
{
if(class(tmp[, i]) == "character"){
char <- c(char, i)
}
char <- char[char != 0 ]
}
# convert char columns to factors to dummify them
tmp_char <- tmp[, char]
# rm(tmp_unique)
for(f in names(tmp_char)){
levels <- unique(tmp_char[, f])
tmp_char[,f] <- factor(tmp_char[,f], levels = levels)
}
dummies <- dummyVars( ~., data = tmp_char)
tmp_char <- predict(dummies, newdata = tmp_char)
tmp_char <- data.frame(tmp_char)
rm(dummies)
gc()
for (f in names(tmp)) {
if (class(tmp[[f]])=="character") {
levels <- unique(tmp[[f]])
tmp[[f]] <- as.integer(factor(tmp[[f]], levels=levels))
}
}
#################################################################################################
high_card <- c("PersonalField16", "PersonalField17", "PersonalField14", "PersonalField18", "PersonalField19" )
tmp_high_card <- tmp[, high_card]
str(tmp_high_card, list.len = 999)
cat("assuming text variables are categorical & replacing them with numeric ids\n")
for (f in names(tmp_high_card)) {
if (class(tmp_high_card[[f]])=="character") {
levels <- unique(c(tmp[[f]]))
tmp_high_card[[f]] <- as.integer(factor(tmp_high_card[[f]], levels=levels))
}
}
str(tmp_high_card, list.len = 999)
# converting to factors
len = length(names(tmp_high_card))
for (i in 1:len) {
print(paste0( i / (len) *100, "%"))
tmp_high_card[ , i] <- as.factor(tmp_high_card[ , i])
}
# counts ;
tmp_factors <- tmp_high_card
# 2 way count
nms <- combn(names(tmp_factors), 2)
dim(nms)
nms_df <- data.frame(nms)
len = length(names(nms_df))
for (i in 1:len) {
nms_df[, i] <- as.character(nms_df[, i])
}
tmp_count <- data.frame(id = 1:dim(tmp)[1])
for(i in 1:dim(nms_df)[2]){
#new df
print(paste0(((i / dim(nms_df)[2]) * 100), "%"))
tmp_count[, paste(names(nms_df)[i], "_two", sep="")] <- my.f2cnt(th2 = tmp_high_card,
vn1 = nms_df[1,i],
vn2 = nms_df[2,i] )
}
#3 way count
nms <- combn(names(tmp_factors), 3)
dim(nms)
nms_df <- data.frame(nms); #nms_df <- nms_df[ c(1:3), c(1:100)]
len = length(names(nms_df))
for (i in 1:len) {
print(paste0(((i / len) * 100), "%"))
nms_df[, i] <- as.character(nms_df[, i])
}
for(i in 1:dim(nms_df)[2]){
#new df
print(paste0(((i / dim(nms_df)[2]) * 100), "%"))
tmp_count[, paste(names(nms_df)[i], "_three", sep="")] <- my.f3cnt(th2 = tmp_high_card,
vn1 = nms_df[1,i],
vn2 = nms_df[2,i],
vn3 = nms_df[3,i])
}
#one way count
len = length(names(tmp_factors))
for(i in 1:len){
print(paste0(((i / len) * 100), "%") )
tmp_factors$x <- tmp_factors[, i]
sum1 <- sqldf("select x, count(1) as cnt
from tmp_factors group by 1 ")
tmp1 <- sqldf("select cnt from tmp_factors a left join sum1 b on a.x=b.x")
tmp_count[, paste(names(tmp_factors)[i], "_cnt", sep="")] <- tmp1$cnt
}
##################################################################################################
tmp_cont <- tmp[, continous_vars]
tmp_cont$Original_Quote_Date <- NULL
tmp_pre <- preProcess(tmp_cont, method = ("BoxCox"))
tmp_cont_new <- predict(tmp_pre, tmp_cont)
###################################################################################################
tmp <- tmp[, !(names(tmp) %in% c(continous_vars))]
tmp_new <- cbind(tmp, tmp_char, tmp_cont_new)
rm(test_raw); rm(train_raw); rm(tmp_char)
#############################################################################################
# add interaction terms
imp <- read_csv("D:\\kaggle\\HOMESITE\\FEATURE_IMP\\12062015_1.csv")
top_50 <- imp$Feature[1:5]
tmp_int <- tmp[, top_50]
for (f in top_50) {
if (class(tmp_int[[f]])=="character") {
levels <- unique(tmp_int[[f]])
tmp_int[[f]] <- as.integer(factor(tmp_int[[f]], levels=levels))
}
}
gc()
rm(imp);
#############################################################################################
# plus interaction
for (i in 1:ncol(tmp_int)) {
for (j in (i + 1) : (ncol(tmp_int) + 1)) {
# a = i; b= j
var.x <- colnames(tmp_int)[i]
var.y <- colnames(tmp_int)[j]
var.new <- paste0(var.x, '_plus_', var.y)
tmp_int[ , paste0(var.new)] <- tmp_int[, i] + tmp_int[, j]
}
}
gc()
tmp_new <- cbind(tmp_new, tmp_int)
rm(tmp_int)
gc()
############################################################################################
# create - interaction features
# add interaction terms
imp <- read_csv("D:\\kaggle\\HOMESITE\\FEATURE_IMP\\12062015_1.csv")
top_50 <- imp$Feature[1:5]
tmp_int <- tmp[, top_50]
for (f in top_50) {
if (class(tmp_int[[f]])=="character") {
levels <- unique(tmp_int[[f]])
tmp_int[[f]] <- as.integer(factor(tmp_int[[f]], levels=levels))
}
}
gc()
rm(imp);
for (i in 1:ncol(tmp_int)) {
for (j in (i + 1) : (ncol(tmp_int) + 1)) {
var.x <- colnames(tmp_int)[i]
var.y <- colnames(tmp_int)[j]
var.new <- paste0(var.x, '_minus_', var.y)
tmp_int[ , paste0(var.new)] <- tmp_int[, i] - tmp_int[, j]
}
}
gc()
tmp_new <- cbind(tmp_new, tmp_int)
rm(tmp_int)
gc()
#############################################################################################
# create * interaction features
# add interaction terms
imp <- read_csv("D:\\kaggle\\HOMESITE\\FEATURE_IMP\\12062015_1.csv")
top_50 <- imp$Feature[1:5]
tmp_int <- tmp[, top_50]
for (f in top_50) {
if (class(tmp_int[[f]])=="character") {
levels <- unique(tmp_int[[f]])
tmp_int[[f]] <- as.integer(factor(tmp_int[[f]], levels=levels))
}
}
gc()
rm(imp);
for (i in 1:ncol(tmp_int)) {
for (j in (i + 1) : (ncol(tmp_int) + 1)) {
var.x <- colnames(tmp_int)[i]
var.y <- colnames(tmp_int)[j]
var.new <- paste0(var.x, '_mult_', var.y)
tmp_int[ , paste0(var.new)] <- tmp_int[, i] * tmp_int[, j]
}
}
tmp_new <- cbind(tmp_new, tmp_int)
rm(tmp_int)
gc()
#############################################################################################
# create ^ interaction features
# not using division interaction features - NA's
imp <- read_csv("D:\\kaggle\\HOMESITE\\FEATURE_IMP\\12062015_1.csv")
top_50 <- imp$Feature[1:5]
tmp_int <- tmp[, top_50]
for (f in top_50) {
if (class(tmp_int[[f]])=="character") {
levels <- unique(tmp_int[[f]])
tmp_int[[f]] <- as.integer(factor(tmp_int[[f]], levels=levels))
}
}
gc()
rm(imp);
for (i in 1:ncol(tmp_int)) {
for (j in (i + 1) : (ncol(tmp_int) + 1)) {
var.x <- colnames(tmp_int)[i]
var.y <- colnames(tmp_int)[j]
var.new <- paste0(var.x, '_order_', var.y)
tmp_int[, paste0(var.new)] <- (tmp_int[, i] * tmp_int[, j]) ^ 2
}
}
#############################################################################################
# NA terms test
a <- lapply(tmp_int, function(x) sum(is.na(x)))
len_unique <- rep(0, ncol(tmp_int))
for(i in 1:length(a))
{
if(a[[i]] != 0) {
len_unique[i] <- (names(a[i]))
}
}
len_unique <- len_unique[len_unique != 0]
tmp_new <- cbind(tmp_new, tmp_int)
rm(tmp_int)
gc()
##################################################################################################
# create 3^ interaction features
# not using division interaction features - NA's
imp <- read_csv("D:\\kaggle\\HOMESITE\\FEATURE_IMP\\12062015_1.csv")
top_50 <- imp$Feature[1:5]
tmp_int <- tmp[, top_50]
for (f in top_50) {
if (class(tmp_int[[f]])=="character") {
levels <- unique(tmp_int[[f]])
tmp_int[[f]] <- as.integer(factor(tmp_int[[f]], levels=levels))
}
}
gc()
rm(imp);
for (i in 1:ncol(tmp_int)) {
for (j in (i + 1) : (ncol(tmp_int) + 1)) {
var.x <- colnames(tmp_int)[i]
var.y <- colnames(tmp_int)[j]
var.new <- paste0(var.x, '_orderss_', var.y)
tmp_int[, paste0(var.new)] <- (tmp_int[, i] * tmp_int[, j]) ^ 3
}
}
tmp_new <- cbind(tmp_new, tmp_int)
rm(tmp_int)
gc()
##################################################################################################
# getting NA's with the below code
# create 4^ interaction features
# not using division interaction features - NA's
imp <- read_csv("D:\\kaggle\\HOMESITE\\FEATURE_IMP\\12062015_1.csv")
top_50 <- imp$Feature[1:5]
tmp_int <- tmp[, top_50]
for (f in top_50) {
if (class(tmp_int[[f]])=="character") {
levels <- unique(tmp_int[[f]])
tmp_int[[f]] <- as.integer(factor(tmp_int[[f]], levels=levels))
}
}
gc()
rm(imp);
for (i in 1:ncol(tmp_int)) {
for (j in (i + 1) : (ncol(tmp_int) + 1)) {
var.x <- colnames(tmp_int)[i]
var.y <- colnames(tmp_int)[j]
var.new <- paste0(var.x, 'four_orderss_', var.y)
tmp_int[, paste0(var.new)] <- (tmp_int[, i] * tmp_int[, j]) ^ 4
}
}
a <- lapply(tmp_int, function(x) sum(is.na(x)))
len_unique <- rep(0, ncol(tmp_int))
for(i in 1:length(a))
{
if(a[[i]] != 0) {
len_unique[i] <- (names(a[i]))
}
}
len_unique <- len_unique[len_unique != 0]
tmp_new <- cbind(tmp_new, tmp_int)
rm(tmp_int)
gc()
##############################################################################################
tmp_new <- tmp_new[, !(names(tmp_new) %in% top_50)]
imp <- read_csv("D:\\kaggle\\HOMESITE\\FEATURE_IMP\\12062015_1.csv")
top_50 <- imp$Feature[1:5]
tmp_int <- tmp[, top_50]
for (f in top_50) {
if (class(tmp_int[[f]])=="character") {
levels <- unique(tmp_int[[f]])
tmp_int[[f]] <- as.integer(factor(tmp_int[[f]], levels=levels))
}
}
tmp_new <- cbind(tmp_new, tmp_int)
rm(tmp_int); rm(tmp)
##################################################################################
rm(tmp); rm(test_raw); rm(train_raw); rm(tmp_char); rm(tmp_int); rm(imp)
train <- tmp_new[c(1:260753), ]
test <- tmp_new[c(260754:434589), ]
rm(tmp_new)
gc()
#train[is.na(train)] <- -1
#test[is.na(test)] <- -1
write_csv(train, "D:\\kaggle\\HOMESITE\\Data\\New_folder\\train_01262016.csv")
write_csv(test, "D:\\kaggle\\HOMESITE\\Data\\New_folder\\test_01262016.csv")
###################################################################################################
feature.names <- names(train)
h<-sample(nrow(train),2000)
dval<-xgb.DMatrix(data=data.matrix(train[h,]),label=response[h])
dtrain<-xgb.DMatrix(data=data.matrix(train[-h,]),label=response[-h])
#dtrain<-xgb.DMatrix(data=data.matrix(train),label=response, )
watchlist<-list(val=dval,train=dtrain)
param <- list( objective = "binary:logistic",
booster = "gbtree",
eval_metric = "auc",
eta = 0.023, # 0.06, #0.01,
max_depth = 6, #changed from default of 8
subsample = 0.83, # 0.7
colsample_bytree = 0.77, # 0.7
num_parallel_tree = 2
)
start <- Sys.time()
require(doParallel)
cl <- makeCluster(2); registerDoParallel(cl)
set.seed(12*25*15)
#cv <- xgb.cv(params = param, data = dtrain,
# nrounds = 1800,
# nfold = 4,
# showsd = T,
# maximize = F)
clf <- xgb.train( params = param,
data = dtrain,
nrounds = 3000,
verbose = 1, #1
#early.stop.round = 150,
watchlist = watchlist,
maximize = T,
nthread = 2)
xgb.save(clf, "D:\\kaggle\\HOMESITE\\models\\12252015_1.R")
rm(submission)
pred <- predict(clf, data.matrix(test[,feature.names]), ntreelimit = 2000)
submission <- data.frame(QuoteNumber = id, QuoteConversion_Flag = pred)
write_csv(submission, "D:\\kaggle\\HOMESITE\\submission\\12072015\\12252015_2.csv")
time_taken <- Sys.time() - start
| /SANTANDER/Version_Control/laptop/01032016.R | no_license | dearkafka/kaggle_2 | R | false | false | 15,717 | r |
require(data.table); require(lubridate); require(caret); require(sqldf); require(xgboost); require(xlsx); require(dplyr); require(readr); require(doParallel); require(bit64)
#rm(list = ls())
Spanish2English <- fread("D:\\kaggle\\SANTANDER\\DATA\\Spanish2English.csv", data.table = F)
train_raw <- fread("D:\\kaggle\\SANTANDER\\DATA\\train.csv", data.table = F)
extra <- cbind.data.frame( ID = train_raw$ID,
var3 = train_raw$var3,
var15 = train_raw$var15,
var38 = train_raw$var38,
TARGET = train_raw$TARGET )
train_raw <- train_raw[, !(names(train_raw) %in% names(extra))]
names(train_raw) <- Spanish2English$English
train_raw <- cbind(train_raw, extra)
test_raw <- fread("D:\\kaggle\\SANTANDER\\DATA\\test.csv", data.table = F)
extra <- cbind.data.frame( ID = test_raw$ID,
var3 = test_raw$var3,
var15 = test_raw$var15,
var38 = test_raw$var38 )
test_raw <- test_raw[, !(names(test_raw) %in% names(extra))]
names(test_raw) <- Spanish2English$English
test_raw <- cbind(test_raw, extra)
response <- train_raw$TARGET
train_raw$TARGET <- NULL
train_raw$ID <- NULL
test_raw <- fread("D:\\kaggle\\SANTANDER\\DATA\\test.csv", data.table = F)
id <- test_raw$ID
test_raw$ID <- NULL
tmp <- rbind(train_raw, test_raw)
# # categorical and discrete are grouped into a single group
#
# categorical_vars <- c()
#
# remove_vars <- c("PropertyField6", "GeographicField10A")
#
#
# tmp <- tmp[, !(names(tmp) %in% remove_vars)]
#
#
# tmp$Original_Quote_Date <- as.Date(tmp$Original_Quote_Date)
#
# tmp$month <- as.integer(format(tmp$Original_Quote_Date, "%m"))
#
# tmp$year <- as.integer(format(tmp$Original_Quote_Date, "%y"))
#
# tmp$day <- weekdays(as.Date(tmp$Original_Quote_Date))
#
# tmp$week <- week((as.Date(tmp$Original_Quote_Date)))
#
# tmp$date <- (((tmp$year * 52 ) + tmp$week) %% 4)
#
#########################################################################################
a <- lapply(tmp, function(x) length(unique(x)))
len_unique <- rep(0, ncol(tmp))
for(i in 1:length(a))
{
if(a[[i]] < 30) {
len_unique[i] <- (names(a[i]))
}
}
len_unique <- len_unique[len_unique != 0]
tmp_unique <- tmp[, len_unique]
tmp[is.na(tmp)] <- -1
row_NA <- apply(tmp, 1, function(x) sum(x == -1))
tmp$row_NA <- row_NA
# seperate character columns
char <- rep(0, length(names(tmp)))
for(i in names(tmp))
{
if(class(tmp[, i]) == "character"){
char <- c(char, i)
}
char <- char[char != 0 ]
}
# convert char columns to factors to dummify them
tmp_char <- tmp[, char]
# rm(tmp_unique)
for(f in names(tmp_char)){
levels <- unique(tmp_char[, f])
tmp_char[,f] <- factor(tmp_char[,f], levels = levels)
}
dummies <- dummyVars( ~., data = tmp_char)
tmp_char <- predict(dummies, newdata = tmp_char)
tmp_char <- data.frame(tmp_char)
rm(dummies)
gc()
for (f in names(tmp)) {
if (class(tmp[[f]])=="character") {
levels <- unique(tmp[[f]])
tmp[[f]] <- as.integer(factor(tmp[[f]], levels=levels))
}
}
#################################################################################################
high_card <- c("PersonalField16", "PersonalField17", "PersonalField14", "PersonalField18", "PersonalField19" )
tmp_high_card <- tmp[, high_card]
str(tmp_high_card, list.len = 999)
cat("assuming text variables are categorical & replacing them with numeric ids\n")
for (f in names(tmp_high_card)) {
if (class(tmp_high_card[[f]])=="character") {
levels <- unique(c(tmp[[f]]))
tmp_high_card[[f]] <- as.integer(factor(tmp_high_card[[f]], levels=levels))
}
}
str(tmp_high_card, list.len = 999)
# converting to factors
len = length(names(tmp_high_card))
for (i in 1:len) {
print(paste0( i / (len) *100, "%"))
tmp_high_card[ , i] <- as.factor(tmp_high_card[ , i])
}
# counts ;
tmp_factors <- tmp_high_card
# 2 way count
nms <- combn(names(tmp_factors), 2)
dim(nms)
nms_df <- data.frame(nms)
len = length(names(nms_df))
for (i in 1:len) {
nms_df[, i] <- as.character(nms_df[, i])
}
tmp_count <- data.frame(id = 1:dim(tmp)[1])
for(i in 1:dim(nms_df)[2]){
#new df
print(paste0(((i / dim(nms_df)[2]) * 100), "%"))
tmp_count[, paste(names(nms_df)[i], "_two", sep="")] <- my.f2cnt(th2 = tmp_high_card,
vn1 = nms_df[1,i],
vn2 = nms_df[2,i] )
}
#3 way count
nms <- combn(names(tmp_factors), 3)
dim(nms)
nms_df <- data.frame(nms); #nms_df <- nms_df[ c(1:3), c(1:100)]
len = length(names(nms_df))
for (i in 1:len) {
print(paste0(((i / len) * 100), "%"))
nms_df[, i] <- as.character(nms_df[, i])
}
for(i in 1:dim(nms_df)[2]){
#new df
print(paste0(((i / dim(nms_df)[2]) * 100), "%"))
tmp_count[, paste(names(nms_df)[i], "_three", sep="")] <- my.f3cnt(th2 = tmp_high_card,
vn1 = nms_df[1,i],
vn2 = nms_df[2,i],
vn3 = nms_df[3,i])
}
#one way count
len = length(names(tmp_factors))
for(i in 1:len){
print(paste0(((i / len) * 100), "%") )
tmp_factors$x <- tmp_factors[, i]
sum1 <- sqldf("select x, count(1) as cnt
from tmp_factors group by 1 ")
tmp1 <- sqldf("select cnt from tmp_factors a left join sum1 b on a.x=b.x")
tmp_count[, paste(names(tmp_factors)[i], "_cnt", sep="")] <- tmp1$cnt
}
##################################################################################################
tmp_cont <- tmp[, continous_vars]
tmp_cont$Original_Quote_Date <- NULL
tmp_pre <- preProcess(tmp_cont, method = ("BoxCox"))
tmp_cont_new <- predict(tmp_pre, tmp_cont)
###################################################################################################
tmp <- tmp[, !(names(tmp) %in% c(continous_vars))]
tmp_new <- cbind(tmp, tmp_char, tmp_cont_new)
rm(test_raw); rm(train_raw); rm(tmp_char)
#############################################################################################
# add interaction terms
imp <- read_csv("D:\\kaggle\\HOMESITE\\FEATURE_IMP\\12062015_1.csv")
top_50 <- imp$Feature[1:5]
tmp_int <- tmp[, top_50]
for (f in top_50) {
if (class(tmp_int[[f]])=="character") {
levels <- unique(tmp_int[[f]])
tmp_int[[f]] <- as.integer(factor(tmp_int[[f]], levels=levels))
}
}
gc()
rm(imp);
#############################################################################################
# plus interaction
for (i in 1:ncol(tmp_int)) {
for (j in (i + 1) : (ncol(tmp_int) + 1)) {
# a = i; b= j
var.x <- colnames(tmp_int)[i]
var.y <- colnames(tmp_int)[j]
var.new <- paste0(var.x, '_plus_', var.y)
tmp_int[ , paste0(var.new)] <- tmp_int[, i] + tmp_int[, j]
}
}
gc()
tmp_new <- cbind(tmp_new, tmp_int)
rm(tmp_int)
gc()
############################################################################################
# create - interaction features
# add interaction terms
imp <- read_csv("D:\\kaggle\\HOMESITE\\FEATURE_IMP\\12062015_1.csv")
top_50 <- imp$Feature[1:5]
tmp_int <- tmp[, top_50]
for (f in top_50) {
if (class(tmp_int[[f]])=="character") {
levels <- unique(tmp_int[[f]])
tmp_int[[f]] <- as.integer(factor(tmp_int[[f]], levels=levels))
}
}
gc()
rm(imp);
for (i in 1:ncol(tmp_int)) {
for (j in (i + 1) : (ncol(tmp_int) + 1)) {
var.x <- colnames(tmp_int)[i]
var.y <- colnames(tmp_int)[j]
var.new <- paste0(var.x, '_minus_', var.y)
tmp_int[ , paste0(var.new)] <- tmp_int[, i] - tmp_int[, j]
}
}
gc()
tmp_new <- cbind(tmp_new, tmp_int)
rm(tmp_int)
gc()
#############################################################################################
# create * interaction features
# add interaction terms
imp <- read_csv("D:\\kaggle\\HOMESITE\\FEATURE_IMP\\12062015_1.csv")
top_50 <- imp$Feature[1:5]
tmp_int <- tmp[, top_50]
for (f in top_50) {
if (class(tmp_int[[f]])=="character") {
levels <- unique(tmp_int[[f]])
tmp_int[[f]] <- as.integer(factor(tmp_int[[f]], levels=levels))
}
}
gc()
rm(imp);
for (i in 1:ncol(tmp_int)) {
for (j in (i + 1) : (ncol(tmp_int) + 1)) {
var.x <- colnames(tmp_int)[i]
var.y <- colnames(tmp_int)[j]
var.new <- paste0(var.x, '_mult_', var.y)
tmp_int[ , paste0(var.new)] <- tmp_int[, i] * tmp_int[, j]
}
}
tmp_new <- cbind(tmp_new, tmp_int)
rm(tmp_int)
gc()
#############################################################################################
# create ^ interaction features
# not using division interaction features - NA's
imp <- read_csv("D:\\kaggle\\HOMESITE\\FEATURE_IMP\\12062015_1.csv")
top_50 <- imp$Feature[1:5]
tmp_int <- tmp[, top_50]
for (f in top_50) {
if (class(tmp_int[[f]])=="character") {
levels <- unique(tmp_int[[f]])
tmp_int[[f]] <- as.integer(factor(tmp_int[[f]], levels=levels))
}
}
gc()
rm(imp);
for (i in 1:ncol(tmp_int)) {
for (j in (i + 1) : (ncol(tmp_int) + 1)) {
var.x <- colnames(tmp_int)[i]
var.y <- colnames(tmp_int)[j]
var.new <- paste0(var.x, '_order_', var.y)
tmp_int[, paste0(var.new)] <- (tmp_int[, i] * tmp_int[, j]) ^ 2
}
}
#############################################################################################
# NA terms test
a <- lapply(tmp_int, function(x) sum(is.na(x)))
len_unique <- rep(0, ncol(tmp_int))
for(i in 1:length(a))
{
if(a[[i]] != 0) {
len_unique[i] <- (names(a[i]))
}
}
len_unique <- len_unique[len_unique != 0]
tmp_new <- cbind(tmp_new, tmp_int)
rm(tmp_int)
gc()
##################################################################################################
# create 3^ interaction features
# not using division interaction features - NA's
imp <- read_csv("D:\\kaggle\\HOMESITE\\FEATURE_IMP\\12062015_1.csv")
top_50 <- imp$Feature[1:5]
tmp_int <- tmp[, top_50]
for (f in top_50) {
if (class(tmp_int[[f]])=="character") {
levels <- unique(tmp_int[[f]])
tmp_int[[f]] <- as.integer(factor(tmp_int[[f]], levels=levels))
}
}
gc()
rm(imp);
for (i in 1:ncol(tmp_int)) {
for (j in (i + 1) : (ncol(tmp_int) + 1)) {
var.x <- colnames(tmp_int)[i]
var.y <- colnames(tmp_int)[j]
var.new <- paste0(var.x, '_orderss_', var.y)
tmp_int[, paste0(var.new)] <- (tmp_int[, i] * tmp_int[, j]) ^ 3
}
}
tmp_new <- cbind(tmp_new, tmp_int)
rm(tmp_int)
gc()
##################################################################################################
# getting NA's with the below code
# create 4^ interaction features
# not using division interaction features - NA's
imp <- read_csv("D:\\kaggle\\HOMESITE\\FEATURE_IMP\\12062015_1.csv")
top_50 <- imp$Feature[1:5]
tmp_int <- tmp[, top_50]
for (f in top_50) {
if (class(tmp_int[[f]])=="character") {
levels <- unique(tmp_int[[f]])
tmp_int[[f]] <- as.integer(factor(tmp_int[[f]], levels=levels))
}
}
gc()
rm(imp);
for (i in 1:ncol(tmp_int)) {
for (j in (i + 1) : (ncol(tmp_int) + 1)) {
var.x <- colnames(tmp_int)[i]
var.y <- colnames(tmp_int)[j]
var.new <- paste0(var.x, 'four_orderss_', var.y)
tmp_int[, paste0(var.new)] <- (tmp_int[, i] * tmp_int[, j]) ^ 4
}
}
a <- lapply(tmp_int, function(x) sum(is.na(x)))
len_unique <- rep(0, ncol(tmp_int))
for(i in 1:length(a))
{
if(a[[i]] != 0) {
len_unique[i] <- (names(a[i]))
}
}
len_unique <- len_unique[len_unique != 0]
tmp_new <- cbind(tmp_new, tmp_int)
rm(tmp_int)
gc()
##############################################################################################
tmp_new <- tmp_new[, !(names(tmp_new) %in% top_50)]
imp <- read_csv("D:\\kaggle\\HOMESITE\\FEATURE_IMP\\12062015_1.csv")
top_50 <- imp$Feature[1:5]
tmp_int <- tmp[, top_50]
for (f in top_50) {
if (class(tmp_int[[f]])=="character") {
levels <- unique(tmp_int[[f]])
tmp_int[[f]] <- as.integer(factor(tmp_int[[f]], levels=levels))
}
}
tmp_new <- cbind(tmp_new, tmp_int)
rm(tmp_int); rm(tmp)
##################################################################################
rm(tmp); rm(test_raw); rm(train_raw); rm(tmp_char); rm(tmp_int); rm(imp)
train <- tmp_new[c(1:260753), ]
test <- tmp_new[c(260754:434589), ]
rm(tmp_new)
gc()
#train[is.na(train)] <- -1
#test[is.na(test)] <- -1
write_csv(train, "D:\\kaggle\\HOMESITE\\Data\\New_folder\\train_01262016.csv")
write_csv(test, "D:\\kaggle\\HOMESITE\\Data\\New_folder\\test_01262016.csv")
###################################################################################################
feature.names <- names(train)
h<-sample(nrow(train),2000)
dval<-xgb.DMatrix(data=data.matrix(train[h,]),label=response[h])
dtrain<-xgb.DMatrix(data=data.matrix(train[-h,]),label=response[-h])
#dtrain<-xgb.DMatrix(data=data.matrix(train),label=response, )
watchlist<-list(val=dval,train=dtrain)
param <- list( objective = "binary:logistic",
booster = "gbtree",
eval_metric = "auc",
eta = 0.023, # 0.06, #0.01,
max_depth = 6, #changed from default of 8
subsample = 0.83, # 0.7
colsample_bytree = 0.77, # 0.7
num_parallel_tree = 2
)
start <- Sys.time()
require(doParallel)
cl <- makeCluster(2); registerDoParallel(cl)
set.seed(12*25*15)
#cv <- xgb.cv(params = param, data = dtrain,
# nrounds = 1800,
# nfold = 4,
# showsd = T,
# maximize = F)
clf <- xgb.train( params = param,
data = dtrain,
nrounds = 3000,
verbose = 1, #1
#early.stop.round = 150,
watchlist = watchlist,
maximize = T,
nthread = 2)
xgb.save(clf, "D:\\kaggle\\HOMESITE\\models\\12252015_1.R")
rm(submission)
pred <- predict(clf, data.matrix(test[,feature.names]), ntreelimit = 2000)
submission <- data.frame(QuoteNumber = id, QuoteConversion_Flag = pred)
write_csv(submission, "D:\\kaggle\\HOMESITE\\submission\\12072015\\12252015_2.csv")
time_taken <- Sys.time() - start
|
#' Smooth a CIFTI
#'
#' Smooth CIFTI data. This uses the \code{-cifti-smoothing} command
#' from Connectome Workbench.
#'
#' If the CIFTI is a ".dlabel" file (intent 3007), then it will be converted
#' to a ".dscalar" file because the values will no longer be integer indices.
#' Unless the label values were ordinal, this is probably not desired so a
#' warning will be printed.
#'
#' The input can also be a \code{"xifti"} object.
#'
#' Surfaces are required for each hemisphere in the CIFTI. If they are not provided,
#' the inflated surfaces included in \code{"ciftiTools"} will be used.
#' @inheritSection Connectome_Workbench_Description Connectome Workbench Requirement
#'
#' @param x The \code{"xifti"} object or CIFTI file to smooth.
#' @param cifti_target_fname The file name to save the smoothed CIFTI. If
#' \code{NULL}, will be set to a file in a temporary directory.
#' @param surface_sigma The sigma for the gaussian surface smoothing kernel, in mm
#' @param volume_sigma The sigma for the gaussian volume smoothing kernel, in mm
#' @param surfL_fname,surfR_fname (Required if the
#' corresponding cortex is present) Surface GIFTI files for the left and right
#' cortical surface
#' @param cerebellum_fname (Optional) Surface GIFTI file for the
#' cerebellar surface
#' @param subcortical_zeroes_as_NA,cortical_zeroes_as_NA Should zero-values in
#' the subcortical volume or cortex be treated as NA? Default: \code{FALSE}.
#' @param subcortical_merged Smooth across subcortical structure boundaries?
#' Default: \code{FALSE}.
#' @inheritParams wb_path_Param
#'
#' @return The \code{cifti_target_fname}, invisibly
#'
#' @export
#'
smooth_cifti <- function(
x, cifti_target_fname=NULL,
surface_sigma, volume_sigma,
surfL_fname=NULL, surfR_fname=NULL, cerebellum_fname=NULL,
subcortical_zeroes_as_NA=FALSE, cortical_zeroes_as_NA=FALSE,
subcortical_merged=FALSE,
wb_path=NULL){
input_is_xifti <- is.xifti(x, messages=FALSE)
# Setup ----------------------------------------------------------------------
# Get the metadata and set `cifti_target_fname` if NULL.
# Also write out the CIFTI and get the surfaces present if x is a "xifti"
if (input_is_xifti) {
# Get metadata.
cifti_info <- x$meta
## Get Intent.
x_intent <- x$meta$cifti$intent
if (!is.null(x_intent) && (x_intent %in% supported_intents()$value)) {
x_extn <- supported_intents()$extension[supported_intents()$value == x_intent]
} else {
warning("The CIFTI intent was unknown, so smoothing as a dscalar.")
x_extn <- "dscalar.nii"
}
## Get brainstructures.
brainstructures <- x$meta$cifti$brainstructures
if (is.null(brainstructures)) {
brainstructures <- vector("character", 0)
if (!is.null(x$data$cortex_left)) { brainstructures <- c(brainstructures, "left") }
if (!is.null(x$data$cortex_right)) { brainstructures <- c(brainstructures, "right") }
if (!is.null(x$data$subcort)) { brainstructures <- c(brainstructures, "subcortical") }
}
# Write out the CIFTI.
cifti_original_fname <- file.path(tempdir(), paste0("to_smooth.", x_extn))
write_cifti(x, cifti_original_fname, verbose=FALSE)
# Set the target CIFTI file name if null.
if (is.null(cifti_target_fname)) {
cifti_target_fname <- gsub(
"to_smooth.", "smoothed.",
cifti_original_fname, fixed=TRUE
)
}
# Get the surfaces present.
if (is.null(surfL_fname) && !is.null(x$surf$cortex_left)) {
surfL_fname <- file.path(tempdir(), "left.surf.gii")
write_surf_gifti(x$surf$cortex_left, surfL_fname, hemisphere="left")
}
if (is.null(surfR_fname) && !is.null(x$surf$cortex_right)) {
surfR_fname <- file.path(tempdir(), "right.surf.gii")
write_surf_gifti(x$surf$cortex_right, surfR_fname, hemisphere="right")
}
} else {
cifti_original_fname <- x
stopifnot(file.exists(cifti_original_fname))
# Get metadata.
cifti_info <- info_cifti(cifti_original_fname)
## Get brainstructures.
brainstructures <- cifti_info$cifti$brainstructures
# Set the target CIFTI file name if null.
if (is.null(cifti_target_fname)) {
cifti_target_fname <- file.path(tempdir(), basename(cifti_original_fname))
}
}
# If the input is a .dlabel file, the target should be .dscalar not .dlabel.
fix_dlabel <- FALSE
if (!is.null(cifti_info$cifti$intent)) {
if (cifti_info$cifti$intent == 3007) {
warning(paste(
"Smoothing a label file will convert the labels to their numeric",
"indices. Coercing `cifti_target_fname` to a \".dscalar\" file.\n"
))
fix_dlabel <- TRUE
cifti_target_fname <- gsub(
"dlabel.nii", "dscalar.nii",
cifti_target_fname, fixed=TRUE
)
}
}
# Build the Connectome Workbench command.
cmd <- paste(
"-cifti-smoothing",
sys_path(cifti_original_fname),
surface_sigma,
volume_sigma,
"COLUMN",
sys_path(cifti_target_fname)
)
# Add default surface(s) where missing ---------------------------------------
# If cortex data is present but its surface geometry is missing, use the
# surface included with `ciftiTools.`
if ("left" %in% brainstructures && is.null(surfL_fname)) {
ciftiTools_warn(paste(
"No left surface provided to `smooth_cifti`,",
"so using the surface included in `ciftiTools`."
))
if (!is.xifti(x, messages=FALSE)) { x <- read_cifti(x, brainstructures=brainstructures) }
## Try in this order: `resamp_res`, medial wall mask, data length
if (!is.null(x$meta$cifti$resamp_res)) {
x_res <- x$meta$cifti$resamp_res
} else if (!is.null(x$meta$cortex$medial_wall_mask$left)) {
x_res <- length(x$meta$cortex$medial_wall_mask$left)
} else {
if (!is.null(x$data$cortex_left) && !is.null(x$data$cortex_right)) {
if (nrow(x$data$cortex_left) != nrow(x$data$cortex_right)) {
stop(paste(
"The cortex resolution needs to be known to resample the cortex surface",
"for use in smoothing. But, there was no resampling resolution",
"or left medial wall mask in the `xifti`. Furthermore, the number of",
"data vertices differed between the left and right cortices, meaning",
"the cortex resolution cannot be inferred in any way."
))
}
}
warning(paste(
"No resampling resolution or left medial wall mask in the `xifti`.",
"Using the number of left cortex vertices. This may cause an error if",
"medial wall values were masked out."
))
x_res <- nrow(x$data$cortex_left)
}
surfL_fname <- file.path(tempdir(), "left.surf.gii")
surfL_fname <- resample_gifti(
demo_files()$surf["left"],
surfL_fname, hemisphere="left", file_type="surface", resamp_res=x_res
)
}
## Try in this order: `resamp_res`, medial wall mask, data length
if ("right" %in% brainstructures && is.null(surfR_fname)) {
ciftiTools_warn(paste(
"No right surface provided to `smooth_cifti`,",
"so using the surface included in `ciftiTools`."
))
if (!is.xifti(x, messages=FALSE)) { x <- read_cifti(x, brainstructures=brainstructures) }
if (!is.null(x$meta$cifti$resamp_res)) {
x_res <- x$meta$cifti$resamp_res
} else if (!is.null(x$meta$cortex$medial_wall_mask$right)) {
x_res <- length(x$meta$cortex$medial_wall_mask$right)
} else {
if (!is.null(x$data$cortex_right) && !is.null(x$data$cortex_right)) {
if (nrow(x$data$cortex_right) != nrow(x$data$cortex_right)) {
stop(paste(
"The cortex resolution needs to be known to resample the cortex surface",
"for use in smoothing. But, there was no resampling resolution",
"or right medial wall mask in the `xifti`. Furthermore, the number of",
"data vertices differed between the right and right cortices, meaning",
"the cortex resolution cannot be inferred in any way."
))
}
}
warning(paste(
"No resampling resolution or right medial wall mask in the `xifti`.",
"Using the number of right cortex vertices. This may cause an error if",
"medial wall values were masked out."
))
x_res <- nrow(x$data$cortex_right)
}
surfR_fname <- file.path(tempdir(), "right.surf.gii")
surfR_fname <- resample_gifti(
demo_files()$surf["right"],
surfR_fname, hemisphere="right", file_type="surface", resamp_res=x_res
)
}
# Build and run command ------------------------------------------------------
if (!is.null(surfL_fname)) { cmd <- paste(cmd, "-left-surface", sys_path(surfL_fname)) }
if (!is.null(surfR_fname)) { cmd <- paste(cmd, "-right-surface", sys_path(surfR_fname)) }
if (!is.null(cerebellum_fname)) { cmd <- paste(cmd, "-cerebellum-surface", sys_path(cerebellum_fname)) }
if (subcortical_zeroes_as_NA) { cmd <- paste(cmd, "-fix-zeros-volume") }
if (cortical_zeroes_as_NA) { cmd <- paste(cmd, "-fix-zeros-surface") }
if (subcortical_merged) { cmd <- paste(cmd, "-merged-volume") }
run_wb_cmd(cmd, wb_path)
# Fix .dlabel output ---------------------------------------------------------
if (fix_dlabel) {
old_target_fname <- cifti_target_fname
cifti_target_fname <- gsub("dlabel", "dscalar", old_target_fname)
names_fname <- tempfile()
cat(names(cifti_info$cifti$labels), file = names_fname, sep = "\n")
run_wb_cmd(
paste(
"-cifti-change-mapping", old_target_fname,
"ROW", cifti_target_fname,
"-scalar", "-name-file", names_fname
),
wb_path
)
}
# Return results -------------------------------------------------------------
if (input_is_xifti) {
return(read_xifti(cifti_target_fname, brainstructures=brainstructures))
} else {
return(invisible(cifti_target_fname))
}
}
#' @rdname smooth_cifti
#' @export
smoothCIfTI <- function(
x, cifti_target_fname,
surface_sigma, volume_sigma,
surfL_fname=NULL, surfR_fname=NULL, cerebellum_fname=NULL,
subcortical_zeroes_as_NA=FALSE, cortical_zeroes_as_NA=FALSE,
subcortical_merged=FALSE,
wb_path=NULL){
smooth_cifti(
x=x, cifti_target_fname=cifti_target_fname,
surface_sigma=surface_sigma, volume_sigma=volume_sigma,
surfL_fname=surfL_fname, surfR_fname=surfR_fname, cerebellum_fname=cerebellum_fname,
subcortical_zeroes_as_NA=subcortical_zeroes_as_NA, cortical_zeroes_as_NA=cortical_zeroes_as_NA,
subcortical_merged=subcortical_merged,
wb_path=wb_path
)
}
#' @rdname smooth_cifti
#' @export
smoothcii <- function(
x, cifti_target_fname,
surface_sigma, volume_sigma,
surfL_fname=NULL, surfR_fname=NULL, cerebellum_fname=NULL,
subcortical_zeroes_as_NA=FALSE, cortical_zeroes_as_NA=FALSE,
subcortical_merged=FALSE,
wb_path=NULL){
smooth_cifti(
x=x, cifti_target_fname=cifti_target_fname,
surface_sigma=surface_sigma, volume_sigma=volume_sigma,
surfL_fname=surfL_fname, surfR_fname=surfR_fname, cerebellum_fname=cerebellum_fname,
subcortical_zeroes_as_NA=subcortical_zeroes_as_NA, cortical_zeroes_as_NA=cortical_zeroes_as_NA,
subcortical_merged=subcortical_merged,
wb_path=wb_path
)
} | /R/smooth_cifti.R | no_license | yoaman/r-cran-ciftiTools | R | false | false | 11,572 | r | #' Smooth a CIFTI
#'
#' Smooth CIFTI data. This uses the \code{-cifti-smoothing} command
#' from Connectome Workbench.
#'
#' If the CIFTI is a ".dlabel" file (intent 3007), then it will be converted
#' to a ".dscalar" file because the values will no longer be integer indices.
#' Unless the label values were ordinal, this is probably not desired so a
#' warning will be printed.
#'
#' The input can also be a \code{"xifti"} object.
#'
#' Surfaces are required for each hemisphere in the CIFTI. If they are not provided,
#' the inflated surfaces included in \code{"ciftiTools"} will be used.
#' @inheritSection Connectome_Workbench_Description Connectome Workbench Requirement
#'
#' @param x The \code{"xifti"} object or CIFTI file to smooth.
#' @param cifti_target_fname The file name to save the smoothed CIFTI. If
#' \code{NULL}, will be set to a file in a temporary directory.
#' @param surface_sigma The sigma for the gaussian surface smoothing kernel, in mm
#' @param volume_sigma The sigma for the gaussian volume smoothing kernel, in mm
#' @param surfL_fname,surfR_fname (Required if the
#' corresponding cortex is present) Surface GIFTI files for the left and right
#' cortical surface
#' @param cerebellum_fname (Optional) Surface GIFTI file for the
#' cerebellar surface
#' @param subcortical_zeroes_as_NA,cortical_zeroes_as_NA Should zero-values in
#' the subcortical volume or cortex be treated as NA? Default: \code{FALSE}.
#' @param subcortical_merged Smooth across subcortical structure boundaries?
#' Default: \code{FALSE}.
#' @inheritParams wb_path_Param
#'
#' @return The \code{cifti_target_fname}, invisibly
#'
#' @export
#'
smooth_cifti <- function(
x, cifti_target_fname=NULL,
surface_sigma, volume_sigma,
surfL_fname=NULL, surfR_fname=NULL, cerebellum_fname=NULL,
subcortical_zeroes_as_NA=FALSE, cortical_zeroes_as_NA=FALSE,
subcortical_merged=FALSE,
wb_path=NULL){
input_is_xifti <- is.xifti(x, messages=FALSE)
# Setup ----------------------------------------------------------------------
# Get the metadata and set `cifti_target_fname` if NULL.
# Also write out the CIFTI and get the surfaces present if x is a "xifti"
if (input_is_xifti) {
# Get metadata.
cifti_info <- x$meta
## Get Intent.
x_intent <- x$meta$cifti$intent
if (!is.null(x_intent) && (x_intent %in% supported_intents()$value)) {
x_extn <- supported_intents()$extension[supported_intents()$value == x_intent]
} else {
warning("The CIFTI intent was unknown, so smoothing as a dscalar.")
x_extn <- "dscalar.nii"
}
## Get brainstructures.
brainstructures <- x$meta$cifti$brainstructures
if (is.null(brainstructures)) {
brainstructures <- vector("character", 0)
if (!is.null(x$data$cortex_left)) { brainstructures <- c(brainstructures, "left") }
if (!is.null(x$data$cortex_right)) { brainstructures <- c(brainstructures, "right") }
if (!is.null(x$data$subcort)) { brainstructures <- c(brainstructures, "subcortical") }
}
# Write out the CIFTI.
cifti_original_fname <- file.path(tempdir(), paste0("to_smooth.", x_extn))
write_cifti(x, cifti_original_fname, verbose=FALSE)
# Set the target CIFTI file name if null.
if (is.null(cifti_target_fname)) {
cifti_target_fname <- gsub(
"to_smooth.", "smoothed.",
cifti_original_fname, fixed=TRUE
)
}
# Get the surfaces present.
if (is.null(surfL_fname) && !is.null(x$surf$cortex_left)) {
surfL_fname <- file.path(tempdir(), "left.surf.gii")
write_surf_gifti(x$surf$cortex_left, surfL_fname, hemisphere="left")
}
if (is.null(surfR_fname) && !is.null(x$surf$cortex_right)) {
surfR_fname <- file.path(tempdir(), "right.surf.gii")
write_surf_gifti(x$surf$cortex_right, surfR_fname, hemisphere="right")
}
} else {
cifti_original_fname <- x
stopifnot(file.exists(cifti_original_fname))
# Get metadata.
cifti_info <- info_cifti(cifti_original_fname)
## Get brainstructures.
brainstructures <- cifti_info$cifti$brainstructures
# Set the target CIFTI file name if null.
if (is.null(cifti_target_fname)) {
cifti_target_fname <- file.path(tempdir(), basename(cifti_original_fname))
}
}
# If the input is a .dlabel file, the target should be .dscalar not .dlabel.
fix_dlabel <- FALSE
if (!is.null(cifti_info$cifti$intent)) {
if (cifti_info$cifti$intent == 3007) {
warning(paste(
"Smoothing a label file will convert the labels to their numeric",
"indices. Coercing `cifti_target_fname` to a \".dscalar\" file.\n"
))
fix_dlabel <- TRUE
cifti_target_fname <- gsub(
"dlabel.nii", "dscalar.nii",
cifti_target_fname, fixed=TRUE
)
}
}
# Build the Connectome Workbench command.
cmd <- paste(
"-cifti-smoothing",
sys_path(cifti_original_fname),
surface_sigma,
volume_sigma,
"COLUMN",
sys_path(cifti_target_fname)
)
# Add default surface(s) where missing ---------------------------------------
# If cortex data is present but its surface geometry is missing, use the
# surface included with `ciftiTools.`
if ("left" %in% brainstructures && is.null(surfL_fname)) {
ciftiTools_warn(paste(
"No left surface provided to `smooth_cifti`,",
"so using the surface included in `ciftiTools`."
))
if (!is.xifti(x, messages=FALSE)) { x <- read_cifti(x, brainstructures=brainstructures) }
## Try in this order: `resamp_res`, medial wall mask, data length
if (!is.null(x$meta$cifti$resamp_res)) {
x_res <- x$meta$cifti$resamp_res
} else if (!is.null(x$meta$cortex$medial_wall_mask$left)) {
x_res <- length(x$meta$cortex$medial_wall_mask$left)
} else {
if (!is.null(x$data$cortex_left) && !is.null(x$data$cortex_right)) {
if (nrow(x$data$cortex_left) != nrow(x$data$cortex_right)) {
stop(paste(
"The cortex resolution needs to be known to resample the cortex surface",
"for use in smoothing. But, there was no resampling resolution",
"or left medial wall mask in the `xifti`. Furthermore, the number of",
"data vertices differed between the left and right cortices, meaning",
"the cortex resolution cannot be inferred in any way."
))
}
}
warning(paste(
"No resampling resolution or left medial wall mask in the `xifti`.",
"Using the number of left cortex vertices. This may cause an error if",
"medial wall values were masked out."
))
x_res <- nrow(x$data$cortex_left)
}
surfL_fname <- file.path(tempdir(), "left.surf.gii")
surfL_fname <- resample_gifti(
demo_files()$surf["left"],
surfL_fname, hemisphere="left", file_type="surface", resamp_res=x_res
)
}
## Try in this order: `resamp_res`, medial wall mask, data length
if ("right" %in% brainstructures && is.null(surfR_fname)) {
ciftiTools_warn(paste(
"No right surface provided to `smooth_cifti`,",
"so using the surface included in `ciftiTools`."
))
if (!is.xifti(x, messages=FALSE)) { x <- read_cifti(x, brainstructures=brainstructures) }
if (!is.null(x$meta$cifti$resamp_res)) {
x_res <- x$meta$cifti$resamp_res
} else if (!is.null(x$meta$cortex$medial_wall_mask$right)) {
x_res <- length(x$meta$cortex$medial_wall_mask$right)
} else {
if (!is.null(x$data$cortex_right) && !is.null(x$data$cortex_right)) {
if (nrow(x$data$cortex_right) != nrow(x$data$cortex_right)) {
stop(paste(
"The cortex resolution needs to be known to resample the cortex surface",
"for use in smoothing. But, there was no resampling resolution",
"or right medial wall mask in the `xifti`. Furthermore, the number of",
"data vertices differed between the right and right cortices, meaning",
"the cortex resolution cannot be inferred in any way."
))
}
}
warning(paste(
"No resampling resolution or right medial wall mask in the `xifti`.",
"Using the number of right cortex vertices. This may cause an error if",
"medial wall values were masked out."
))
x_res <- nrow(x$data$cortex_right)
}
surfR_fname <- file.path(tempdir(), "right.surf.gii")
surfR_fname <- resample_gifti(
demo_files()$surf["right"],
surfR_fname, hemisphere="right", file_type="surface", resamp_res=x_res
)
}
# Build and run command ------------------------------------------------------
if (!is.null(surfL_fname)) { cmd <- paste(cmd, "-left-surface", sys_path(surfL_fname)) }
if (!is.null(surfR_fname)) { cmd <- paste(cmd, "-right-surface", sys_path(surfR_fname)) }
if (!is.null(cerebellum_fname)) { cmd <- paste(cmd, "-cerebellum-surface", sys_path(cerebellum_fname)) }
if (subcortical_zeroes_as_NA) { cmd <- paste(cmd, "-fix-zeros-volume") }
if (cortical_zeroes_as_NA) { cmd <- paste(cmd, "-fix-zeros-surface") }
if (subcortical_merged) { cmd <- paste(cmd, "-merged-volume") }
run_wb_cmd(cmd, wb_path)
# Fix .dlabel output ---------------------------------------------------------
if (fix_dlabel) {
old_target_fname <- cifti_target_fname
cifti_target_fname <- gsub("dlabel", "dscalar", old_target_fname)
names_fname <- tempfile()
cat(names(cifti_info$cifti$labels), file = names_fname, sep = "\n")
run_wb_cmd(
paste(
"-cifti-change-mapping", old_target_fname,
"ROW", cifti_target_fname,
"-scalar", "-name-file", names_fname
),
wb_path
)
}
# Return results -------------------------------------------------------------
if (input_is_xifti) {
return(read_xifti(cifti_target_fname, brainstructures=brainstructures))
} else {
return(invisible(cifti_target_fname))
}
}
#' @rdname smooth_cifti
#' @export
smoothCIfTI <- function(
x, cifti_target_fname,
surface_sigma, volume_sigma,
surfL_fname=NULL, surfR_fname=NULL, cerebellum_fname=NULL,
subcortical_zeroes_as_NA=FALSE, cortical_zeroes_as_NA=FALSE,
subcortical_merged=FALSE,
wb_path=NULL){
smooth_cifti(
x=x, cifti_target_fname=cifti_target_fname,
surface_sigma=surface_sigma, volume_sigma=volume_sigma,
surfL_fname=surfL_fname, surfR_fname=surfR_fname, cerebellum_fname=cerebellum_fname,
subcortical_zeroes_as_NA=subcortical_zeroes_as_NA, cortical_zeroes_as_NA=cortical_zeroes_as_NA,
subcortical_merged=subcortical_merged,
wb_path=wb_path
)
}
#' @rdname smooth_cifti
#' @export
smoothcii <- function(
x, cifti_target_fname,
surface_sigma, volume_sigma,
surfL_fname=NULL, surfR_fname=NULL, cerebellum_fname=NULL,
subcortical_zeroes_as_NA=FALSE, cortical_zeroes_as_NA=FALSE,
subcortical_merged=FALSE,
wb_path=NULL){
smooth_cifti(
x=x, cifti_target_fname=cifti_target_fname,
surface_sigma=surface_sigma, volume_sigma=volume_sigma,
surfL_fname=surfL_fname, surfR_fname=surfR_fname, cerebellum_fname=cerebellum_fname,
subcortical_zeroes_as_NA=subcortical_zeroes_as_NA, cortical_zeroes_as_NA=cortical_zeroes_as_NA,
subcortical_merged=subcortical_merged,
wb_path=wb_path
)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dc.acs.employment-package.R
\docType{package}
\name{dc.acs.employment-package}
\alias{dc.acs.employment}
\alias{dc.acs.employment-package}
\title{dc.acs.employment: Social Data Commons: Education & Training: Employment Rate}
\description{
Allows user to easily get employment rate data from the SDAD Data Commons. Data can be provided directly or in a file.
}
\author{
\strong{Maintainer}: Hanna Charankevich \email{hc2cc@virginia.edu}
Authors:
\itemize{
\item Social and Decision Analytics Division (SDAD), Biocomplexity Institute, University of Virginia
}
}
\keyword{internal}
| /man/dc.acs.employment-package.Rd | permissive | uva-bi-sdad/dc.acs.employment | R | false | true | 661 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dc.acs.employment-package.R
\docType{package}
\name{dc.acs.employment-package}
\alias{dc.acs.employment}
\alias{dc.acs.employment-package}
\title{dc.acs.employment: Social Data Commons: Education & Training: Employment Rate}
\description{
Allows user to easily get employment rate data from the SDAD Data Commons. Data can be provided directly or in a file.
}
\author{
\strong{Maintainer}: Hanna Charankevich \email{hc2cc@virginia.edu}
Authors:
\itemize{
\item Social and Decision Analytics Division (SDAD), Biocomplexity Institute, University of Virginia
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/old_functions.R
\name{get_domain_rating}
\alias{get_domain_rating}
\title{Get Domain Rating}
\usage{
get_domain_rating(url, api_key)
}
\arguments{
\item{url}{URL we want the rank for}
\item{api_key}{API key (also called token)}
}
\value{
A single number
}
\description{
Depreciated: will be removed soon
}
\examples{
\dontrun{
get_domain_rating('google.com', ahrefs_key)
}
}
| /man/get_domain_rating.Rd | no_license | mhairi/ahrefs | R | false | true | 455 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/old_functions.R
\name{get_domain_rating}
\alias{get_domain_rating}
\title{Get Domain Rating}
\usage{
get_domain_rating(url, api_key)
}
\arguments{
\item{url}{URL we want the rank for}
\item{api_key}{API key (also called token)}
}
\value{
A single number
}
\description{
Depreciated: will be removed soon
}
\examples{
\dontrun{
get_domain_rating('google.com', ahrefs_key)
}
}
|
#' Transform a tibble to a data frame
#'
#' Simply uses \code{as.data.frame()} with an input tibble and ensures that
#' \code{stringsAsFactors} is \code{FALSE}.
#' @param tbl A tibble.
#' @noRd
asdf <- function(tbl) {
tbl %>% as.data.frame(stringsAsFactors = FALSE)
}
#' The default attribute theme
#' @noRd
#' @importFrom dplyr tribble
attr_theme_default <- function() {
dplyr::tribble(
~attr, ~value, ~attr_type,
"layout", "neato", "graph",
"outputorder", "edgesfirst", "graph",
"bgcolor", "white", "graph",
"fontname", "Helvetica", "node",
"fontsize", "10", "node",
"shape", "circle", "node",
"fixedsize", "true", "node",
"width", "0.5", "node",
"style", "filled", "node",
"fillcolor", "aliceblue", "node",
"color", "gray70", "node",
"fontcolor", "gray50", "node",
"fontname", "Helvetica", "edge",
"fontsize", "8", "edge",
"len", "1.5", "edge",
"color", "gray80", "edge",
"arrowsize", "0.5", "edge"
) %>%
asdf()
}
#' The lr attribute theme
#' @noRd
#' @importFrom dplyr bind_rows tribble
attr_theme_lr <- function() {
dplyr::bind_rows(
dplyr::tribble(
~attr, ~value, ~attr_type,
"layout", "dot", "graph",
"rankdir", "LR", "graph"
) %>%
asdf(),
attr_theme_default()[-1, ])
}
#' The tb attribute theme
#' @noRd
#' @importFrom dplyr bind_rows tribble
attr_theme_tb <- function() {
dplyr::bind_rows(
dplyr::tribble(
~attr, ~value, ~attr_type,
"layout", "dot", "graph",
"rankdir", "TB", "graph"
) %>%
asdf(),
attr_theme_default()[-1, ])
}
#' The rl attribute theme
#' @noRd
#' @importFrom dplyr bind_rows tribble
attr_theme_rl <- function() {
dplyr::bind_rows(
dplyr::tribble(
~attr, ~value, ~attr_type,
"layout", "dot", "graph",
"rankdir", "RL", "graph"
) %>%
asdf(),
attr_theme_default()[-1, ])
}
#' The bt attribute theme
#' @noRd
#' @importFrom dplyr bind_rows tribble
attr_theme_bt <- function() {
dplyr::bind_rows(
dplyr::tribble(
~attr, ~value, ~attr_type,
"layout", "dot", "graph",
"rankdir", "BT", "graph"
) %>%
asdf(),
attr_theme_default()[-1, ])
}
#' The fdp attribute theme
#' @noRd
#' @importFrom dplyr tribble
attr_theme_fdp <- function() {
dplyr::tribble(
~attr, ~value, ~attr_type,
"layout", "fdp", "graph",
"outputorder", "edgesfirst", "graph",
"bgcolor", "white", "graph",
"fontname", "Helvetica", "node",
"fontsize", "5", "node",
"shape", "circle", "node",
"fixedsize", "true", "node",
"width", "0.12", "node",
"style", "filled", "node",
"fillcolor", "aliceblue", "node",
"color", "gray70", "node",
"fontcolor", "gray70", "node",
"fontname", "Helvetica", "edge",
"fontsize", "5", "edge",
"len", "1.5", "edge",
"color", "gray80", "edge",
"arrowsize", "0.5", "edge"
) %>%
asdf()
}
#' The kk attribute theme
#' @noRd
#' @importFrom dplyr bind_rows tribble
attr_theme_kk <- function() {
dplyr::bind_rows(
dplyr::tribble(
~attr, ~value, ~attr_type,
"layout", "neato", "graph",
"mode", "KK", "graph"
) %>%
asdf(),
attr_theme_default()[-1, ])
}
| /R/attr_themes.R | permissive | tvc-amaffei/DiagrammeR | R | false | false | 4,750 | r | #' Transform a tibble to a data frame
#'
#' Simply uses \code{as.data.frame()} with an input tibble and ensures that
#' \code{stringsAsFactors} is \code{FALSE}.
#' @param tbl A tibble.
#' @noRd
asdf <- function(tbl) {
tbl %>% as.data.frame(stringsAsFactors = FALSE)
}
#' The default attribute theme
#' @noRd
#' @importFrom dplyr tribble
attr_theme_default <- function() {
dplyr::tribble(
~attr, ~value, ~attr_type,
"layout", "neato", "graph",
"outputorder", "edgesfirst", "graph",
"bgcolor", "white", "graph",
"fontname", "Helvetica", "node",
"fontsize", "10", "node",
"shape", "circle", "node",
"fixedsize", "true", "node",
"width", "0.5", "node",
"style", "filled", "node",
"fillcolor", "aliceblue", "node",
"color", "gray70", "node",
"fontcolor", "gray50", "node",
"fontname", "Helvetica", "edge",
"fontsize", "8", "edge",
"len", "1.5", "edge",
"color", "gray80", "edge",
"arrowsize", "0.5", "edge"
) %>%
asdf()
}
#' The lr attribute theme
#' @noRd
#' @importFrom dplyr bind_rows tribble
attr_theme_lr <- function() {
dplyr::bind_rows(
dplyr::tribble(
~attr, ~value, ~attr_type,
"layout", "dot", "graph",
"rankdir", "LR", "graph"
) %>%
asdf(),
attr_theme_default()[-1, ])
}
#' The tb attribute theme
#' @noRd
#' @importFrom dplyr bind_rows tribble
attr_theme_tb <- function() {
dplyr::bind_rows(
dplyr::tribble(
~attr, ~value, ~attr_type,
"layout", "dot", "graph",
"rankdir", "TB", "graph"
) %>%
asdf(),
attr_theme_default()[-1, ])
}
#' The rl attribute theme
#' @noRd
#' @importFrom dplyr bind_rows tribble
attr_theme_rl <- function() {
dplyr::bind_rows(
dplyr::tribble(
~attr, ~value, ~attr_type,
"layout", "dot", "graph",
"rankdir", "RL", "graph"
) %>%
asdf(),
attr_theme_default()[-1, ])
}
#' The bt attribute theme
#' @noRd
#' @importFrom dplyr bind_rows tribble
attr_theme_bt <- function() {
dplyr::bind_rows(
dplyr::tribble(
~attr, ~value, ~attr_type,
"layout", "dot", "graph",
"rankdir", "BT", "graph"
) %>%
asdf(),
attr_theme_default()[-1, ])
}
#' The fdp attribute theme
#' @noRd
#' @importFrom dplyr tribble
attr_theme_fdp <- function() {
dplyr::tribble(
~attr, ~value, ~attr_type,
"layout", "fdp", "graph",
"outputorder", "edgesfirst", "graph",
"bgcolor", "white", "graph",
"fontname", "Helvetica", "node",
"fontsize", "5", "node",
"shape", "circle", "node",
"fixedsize", "true", "node",
"width", "0.12", "node",
"style", "filled", "node",
"fillcolor", "aliceblue", "node",
"color", "gray70", "node",
"fontcolor", "gray70", "node",
"fontname", "Helvetica", "edge",
"fontsize", "5", "edge",
"len", "1.5", "edge",
"color", "gray80", "edge",
"arrowsize", "0.5", "edge"
) %>%
asdf()
}
#' The kk attribute theme
#' @noRd
#' @importFrom dplyr bind_rows tribble
attr_theme_kk <- function() {
dplyr::bind_rows(
dplyr::tribble(
~attr, ~value, ~attr_type,
"layout", "neato", "graph",
"mode", "KK", "graph"
) %>%
asdf(),
attr_theme_default()[-1, ])
}
|
\name{MEDIPS.selectSignificants}
\alias{MEDIPS.selectSignificants}
\title{
Selects candidate ROIs that show significant differential methylation between two MEDIPS SETs.
}
\description{
Based on the results matrix returned from the MEDIPS.diffMethyl function,
the function selects candidate ROIs that show significant differential methylation between the CONTROL.SET and the TREAT.SET in consideration of the background data included in the INPUT.SET.
Filtering for significant frames proceeds in the following order:
ROIs that do not contain any data either in the CONTROL.SET nor in the TREAT.SET are neglected first;
ROIs associated to p-values > p.value are neglected;
ROIs with a CONTROL/TREATMENT ratio < up (or > down, respectively) are neglected;
From the INPUT mean rpm distribution, a mean rpm threshold was defined by the quant parameter and
all ROIs that have a mean rpm value within the CONTROL.SET (or TREAT.SET, respectively) smaller than the estimated background rpm threshold are discarded;
The last filter is again based on the INPUT data. While the latter filter estimates a minimum rpm signal for the CONTROL.SET (or TREAT.SET, respectively) from the total background distribution,
we now define that the rpm value from the CONTROL SET (or TREAT.SET, respectively) of a ROI exceeds the local background data of the INPUT.SET by the parameter up.
This is, because MeDIP-Seq background data varies along the chromosomes due to varying DNA availability.
}
\usage{
MEDIPS.selectSignificants(frames = NULL, input = T, control = T, up = 1.333333, down = 0.75, p.value = 0.01,quant = 0.9)
}
\arguments{
\item{frames}{
specifies the results table derived from the MEDIPS.diffMethyl
}
\item{input}{
default=T; Setting the parameter to TRUE requires that the results table includes a column for summarized rpm values of an INPUT SET.
In case, there is no INPUT data available, the input parameter has to be set to a rpm value that will be used as threshold during the subsequent analysis.
How to estimate such a threshold without background data is not yet solved by MEDIPS.
}
\item{control}{
can be either TRUE or FALSE;
MEDIPS allows for selecting frames that are higher methylated in the CONTROL SET compared to the TREAT SET and vice versa but both approaches have to be perfomed in two independent runs.
By setting control=T, MEDIPS selects genomic regions, where the CONTROL SET is higher methylated.
By setting control=F, MEDIPS selects genomic regions, where the TREAT SET is higher methylated.
}
\item{up}{
default=1.333333; defines the lower threshold for the ratio CONTROL/TREAT as well as for the lower ratio for CONTROL/INPUT (if control=T) or TREATMENT/INPUT (if control=F), respectively.
}
\item{down}{
default=0.75; defines the upper threshold for the ratio: CONTROL/TREATMENT (only if control=F).
}
\item{p.value}{
default=0.01; defines the threshold for the p-values.
One of the p-values derived from the wilcox.test or t.test function has to be <= p.value.
}
\item{quant}{
default=0.9; from the distribution of all summarized INPUT rpm values,
MEDIPS calculates the rpm value that represents the quant quantile of the whole INPUT distribution.}
}
\value{
\item{chr}{the chromosome of the ROI}
\item{start}{the start position of the ROI}
\item{stop}{the stop position of the ROI}
\item{length}{the number of genomic bins included in the ROI}
\item{coupling}{the mean coupling factor of the ROI}
\item{input}{the mean reads per million value of the INPUT MEDIPS SET at input (if provided)}
\item{rpm_A}{the mean reads per million value for the MEDIPS SET at data1}
\item{rpm_B}{the mean reads per million value for the MEDIPS SET at data2}
\item{rms_A}{the mean relative mathylation score for the MEDIPS SET at data1}
\item{rms_B}{the mean relative methylation score for the MEDIPS SET at data2}
\item{ams_A}{the mean absolute mathylation score for the MEDIPS SET at data1.
The ams scores are derived by dividing the mean rms value of the ROI by the mean coupling factor of the ROI before the log2 and interval transformations are performed.}
\item{ams_B}{the mean absolute mathylation score for the MEDIPS SET at data2.
The ams scores are derived by dividing the mean rms value of the ROI by the mean coupling factor of the ROI before the log2 and interval transformations are performed.}
\item{var_A}{the variance of the rpm or rms values (please see the parameter select) of the MEDIPS SET at data1}
\item{var_B}{the variance of the rpm or rms values (please see the parameter select) of the MEDIPS SET at data2}
\item{var_co_A}{the variance coefficient of the rpm or rms values (please see the parameter select) of the MEDIPS SET at data1}
\item{var_co_B}{the variance coefficient of the rpm or rms values (please see the parameter select) of the MEDIPS SET at data2}
\item{ratio}{rpm_A/rpm_B or rms_A/rms_B, respectively (please see the parameter select)}
\item{pvalue.wilcox}{the p.value returned by R's wilcox.test function for comparing the rpm values (or rms values, respectively; please see the parameter select)
of the MEDIPS SET at data1 and of the MEDIPS SET at data2}
\item{pvalue.ttest}{the p.value returned by R's t.test function for comparing the rpm values (or rms values, respectively; please see the parameter select)
of the MEDIPS SET at data1 and of the MEDIPS SET at data2}
}
\author{
Lukas Chavez
}
\examples{
library(BSgenome.Hsapiens.UCSC.hg19)
file=system.file("extdata", "MeDIP_hESCs_chr22.txt", package="MEDIPS")
CONTROL.SET = MEDIPS.readAlignedSequences(BSgenome="BSgenome.Hsapiens.UCSC.hg19", file=file)
CONTROL.SET = MEDIPS.genomeVector(data = CONTROL.SET, bin_size = 50, extend = 400)
CONTROL.SET = MEDIPS.getPositions(data = CONTROL.SET, pattern = "CG")
CONTROL.SET = MEDIPS.couplingVector(data = CONTROL.SET, fragmentLength = 700, func = "count")
CONTROL.SET = MEDIPS.calibrationCurve(data = CONTROL.SET)
CONTROL.SET = MEDIPS.normalize(data = CONTROL.SET)
file=system.file("extdata", "MeDIP_DE_chr22.txt", package="MEDIPS")
TREAT.SET = MEDIPS.readAlignedSequences(BSgenome = "BSgenome.Hsapiens.UCSC.hg19", file = file)
TREAT.SET = MEDIPS.genomeVector(data = TREAT.SET, bin_size = 50, extend = 400)
TREAT.SET = MEDIPS.getPositions(data = TREAT.SET, pattern = "CG")
TREAT.SET = MEDIPS.couplingVector(data = TREAT.SET, fragmentLength = 700, func = "count")
TREAT.SET = MEDIPS.calibrationCurve(data = TREAT.SET)
TREAT.SET = MEDIPS.normalize(data = TREAT.SET)
file=system.file("extdata", "Input_StemCells_chr22.txt", package="MEDIPS")
INPUT.SET = MEDIPS.readAlignedSequences(BSgenome = "BSgenome.Hsapiens.UCSC.hg19", file = file)
INPUT.SET = MEDIPS.genomeVector(data = INPUT.SET, bin_size = 50, extend = 400)
diff.methyl = MEDIPS.methylProfiling(data1 = CONTROL.SET, data2= TREAT.SET, input=INPUT.SET, chr="chr22", frame_size=1000, select=1)
diff.methyl.sig=MEDIPS.selectSignificants(diff.methyl)
}
| /man/MEDIPS.selectSignificants.Rd | no_license | HUNNNGRY/MEDIPS1.0.0 | R | false | false | 6,883 | rd | \name{MEDIPS.selectSignificants}
\alias{MEDIPS.selectSignificants}
\title{
Selects candidate ROIs that show significant differential methylation between two MEDIPS SETs.
}
\description{
Based on the results matrix returned from the MEDIPS.diffMethyl function,
the function selects candidate ROIs that show significant differential methylation between the CONTROL.SET and the TREAT.SET in consideration of the background data included in the INPUT.SET.
Filtering for significant frames proceeds in the following order:
ROIs that do not contain any data either in the CONTROL.SET nor in the TREAT.SET are neglected first;
ROIs associated to p-values > p.value are neglected;
ROIs with a CONTROL/TREATMENT ratio < up (or > down, respectively) are neglected;
From the INPUT mean rpm distribution, a mean rpm threshold was defined by the quant parameter and
all ROIs that have a mean rpm value within the CONTROL.SET (or TREAT.SET, respectively) smaller than the estimated background rpm threshold are discarded;
The last filter is again based on the INPUT data. While the latter filter estimates a minimum rpm signal for the CONTROL.SET (or TREAT.SET, respectively) from the total background distribution,
we now define that the rpm value from the CONTROL SET (or TREAT.SET, respectively) of a ROI exceeds the local background data of the INPUT.SET by the parameter up.
This is, because MeDIP-Seq background data varies along the chromosomes due to varying DNA availability.
}
\usage{
MEDIPS.selectSignificants(frames = NULL, input = T, control = T, up = 1.333333, down = 0.75, p.value = 0.01,quant = 0.9)
}
\arguments{
\item{frames}{
specifies the results table derived from the MEDIPS.diffMethyl
}
\item{input}{
default=T; Setting the parameter to TRUE requires that the results table includes a column for summarized rpm values of an INPUT SET.
In case, there is no INPUT data available, the input parameter has to be set to a rpm value that will be used as threshold during the subsequent analysis.
How to estimate such a threshold without background data is not yet solved by MEDIPS.
}
\item{control}{
can be either TRUE or FALSE;
MEDIPS allows for selecting frames that are higher methylated in the CONTROL SET compared to the TREAT SET and vice versa but both approaches have to be perfomed in two independent runs.
By setting control=T, MEDIPS selects genomic regions, where the CONTROL SET is higher methylated.
By setting control=F, MEDIPS selects genomic regions, where the TREAT SET is higher methylated.
}
\item{up}{
default=1.333333; defines the lower threshold for the ratio CONTROL/TREAT as well as for the lower ratio for CONTROL/INPUT (if control=T) or TREATMENT/INPUT (if control=F), respectively.
}
\item{down}{
default=0.75; defines the upper threshold for the ratio: CONTROL/TREATMENT (only if control=F).
}
\item{p.value}{
default=0.01; defines the threshold for the p-values.
One of the p-values derived from the wilcox.test or t.test function has to be <= p.value.
}
\item{quant}{
default=0.9; from the distribution of all summarized INPUT rpm values,
MEDIPS calculates the rpm value that represents the quant quantile of the whole INPUT distribution.}
}
\value{
\item{chr}{the chromosome of the ROI}
\item{start}{the start position of the ROI}
\item{stop}{the stop position of the ROI}
\item{length}{the number of genomic bins included in the ROI}
\item{coupling}{the mean coupling factor of the ROI}
\item{input}{the mean reads per million value of the INPUT MEDIPS SET at input (if provided)}
\item{rpm_A}{the mean reads per million value for the MEDIPS SET at data1}
\item{rpm_B}{the mean reads per million value for the MEDIPS SET at data2}
\item{rms_A}{the mean relative mathylation score for the MEDIPS SET at data1}
\item{rms_B}{the mean relative methylation score for the MEDIPS SET at data2}
\item{ams_A}{the mean absolute mathylation score for the MEDIPS SET at data1.
The ams scores are derived by dividing the mean rms value of the ROI by the mean coupling factor of the ROI before the log2 and interval transformations are performed.}
\item{ams_B}{the mean absolute mathylation score for the MEDIPS SET at data2.
The ams scores are derived by dividing the mean rms value of the ROI by the mean coupling factor of the ROI before the log2 and interval transformations are performed.}
\item{var_A}{the variance of the rpm or rms values (please see the parameter select) of the MEDIPS SET at data1}
\item{var_B}{the variance of the rpm or rms values (please see the parameter select) of the MEDIPS SET at data2}
\item{var_co_A}{the variance coefficient of the rpm or rms values (please see the parameter select) of the MEDIPS SET at data1}
\item{var_co_B}{the variance coefficient of the rpm or rms values (please see the parameter select) of the MEDIPS SET at data2}
\item{ratio}{rpm_A/rpm_B or rms_A/rms_B, respectively (please see the parameter select)}
\item{pvalue.wilcox}{the p.value returned by R's wilcox.test function for comparing the rpm values (or rms values, respectively; please see the parameter select)
of the MEDIPS SET at data1 and of the MEDIPS SET at data2}
\item{pvalue.ttest}{the p.value returned by R's t.test function for comparing the rpm values (or rms values, respectively; please see the parameter select)
of the MEDIPS SET at data1 and of the MEDIPS SET at data2}
}
\author{
Lukas Chavez
}
\examples{
library(BSgenome.Hsapiens.UCSC.hg19)
file=system.file("extdata", "MeDIP_hESCs_chr22.txt", package="MEDIPS")
CONTROL.SET = MEDIPS.readAlignedSequences(BSgenome="BSgenome.Hsapiens.UCSC.hg19", file=file)
CONTROL.SET = MEDIPS.genomeVector(data = CONTROL.SET, bin_size = 50, extend = 400)
CONTROL.SET = MEDIPS.getPositions(data = CONTROL.SET, pattern = "CG")
CONTROL.SET = MEDIPS.couplingVector(data = CONTROL.SET, fragmentLength = 700, func = "count")
CONTROL.SET = MEDIPS.calibrationCurve(data = CONTROL.SET)
CONTROL.SET = MEDIPS.normalize(data = CONTROL.SET)
file=system.file("extdata", "MeDIP_DE_chr22.txt", package="MEDIPS")
TREAT.SET = MEDIPS.readAlignedSequences(BSgenome = "BSgenome.Hsapiens.UCSC.hg19", file = file)
TREAT.SET = MEDIPS.genomeVector(data = TREAT.SET, bin_size = 50, extend = 400)
TREAT.SET = MEDIPS.getPositions(data = TREAT.SET, pattern = "CG")
TREAT.SET = MEDIPS.couplingVector(data = TREAT.SET, fragmentLength = 700, func = "count")
TREAT.SET = MEDIPS.calibrationCurve(data = TREAT.SET)
TREAT.SET = MEDIPS.normalize(data = TREAT.SET)
file=system.file("extdata", "Input_StemCells_chr22.txt", package="MEDIPS")
INPUT.SET = MEDIPS.readAlignedSequences(BSgenome = "BSgenome.Hsapiens.UCSC.hg19", file = file)
INPUT.SET = MEDIPS.genomeVector(data = INPUT.SET, bin_size = 50, extend = 400)
diff.methyl = MEDIPS.methylProfiling(data1 = CONTROL.SET, data2= TREAT.SET, input=INPUT.SET, chr="chr22", frame_size=1000, select=1)
diff.methyl.sig=MEDIPS.selectSignificants(diff.methyl)
}
|
Data1<-read.table("household_power_consumption.txt", header = TRUE, sep=";",na.strings="?",nrows=70000)
Data2<-subset(Data1, Date=="1/2/2007" | Date=="2/2/2007")
png(filename = "plot1.png",width=480,height=480)
with(Data2,hist(Data2$Global_active_power,col='red',xlab="Global Active Power (kilowatts)",main="Global Active Power"))
dev.off() | /plot1.R | no_license | prithviml/ExData_Plotting1 | R | false | false | 341 | r | Data1<-read.table("household_power_consumption.txt", header = TRUE, sep=";",na.strings="?",nrows=70000)
Data2<-subset(Data1, Date=="1/2/2007" | Date=="2/2/2007")
png(filename = "plot1.png",width=480,height=480)
with(Data2,hist(Data2$Global_active_power,col='red',xlab="Global Active Power (kilowatts)",main="Global Active Power"))
dev.off() |
#clustering, logistic regression and PCA
#building the data frame
#chosen dimensions - age, gender, allele ratio (log sum)
#110 rows, 3 columns
rm(list=ls())
AD_info = read.table('/home/macrina/Documents/BioinfCoure/project/ROSMAP_Mito/ADindRat.txt',header=TRUE)
NAD_info = read.table('/home/macrina/Documents/BioinfCoure/project/ROSMAP_Mito/NADindRat.txt',header=TRUE)
numAD = nrow(AD_info)
numNAD = nrow(NAD_info)
#combine AD_info and NAD_info
dataIP = rbind(AD_info,NAD_info)
dataIPTrunc = subset(dataIP,select=c(2,3,9))
dimnames(dataIPTrunc) = list(c(1:110),c('age','gender','allelic_ratio'))
dataIPTrunc = apply(dataIPTrunc,c(1,2),as.numeric)
mydata1 = scale(dataIPTrunc[,1])
mydata2 = scale(dataIPTrunc[,3])
mydata = cbind(mydata1,dataIPTrunc[,2],mydata2)
mydata <-data.frame(mydata)
#mydata$X2 = as.factor(mydata$X2)
indices = c(rep(1,numAD),rep(0,numNAD))
#plotting data
#install.packages("scatterplot3d")
library(scatterplot3d)
colnames(mydata) = c('age','gender','allele_ratio')
# data
DF <- data.frame(mydata$age,mydata$gender,mydata$allele_ratio,group = indices)
# create the plot
s3d <- with(DF, scatterplot3d(mydata$age, mydata$gender, mydata$allele_ratio, color = c(rep('green',numAD),rep('blue',numNAD)), pch = 19))
#legend
#legend(s3d$(mydata$age, mydata$gender, mydata$allele_ratiomydata$age, mydata$gender, mydata$allele_ratio.convert(0.5, 0.7, 0.5), pch = 19, yjust=0,legend = levels(DF$group), col = seq_along(levels(DF$group)))
#mydata = cbind(mydata,indices)
#logistic regression
fit.logit <- glm(indices~mydata$age+mydata$gender+mydata$allele_ratio,family="binomial")
fit.logit$coefficients
#probit regression
fit.probit <- glm(indices~mydata$age+mydata$gender+mydata$allele_ratio,family=binomial(link="probit"))
fit.probit$coefficients
#with dividing data into train and test sets (70-30)
numTrain = ceiling(0.7*110)
numTest = 110 - numTrain
#randomly generate nuTrain indices for training
indTrain = sample(1:110,numTrain,replace=FALSE)
mydataTrain = mydata[indTrain,]
mydataTrain1 = mydataTrain
indTest = setdiff(1:110, indTrain)
#rownames(mydataTest) = c(1:numTest)
#logisti cregression
fit.logit2 <- glm(indices[indTrain]~mydataTrain$age+mydataTrain$gender+mydataTrain$allele_ratio,family="binomial")
fit.logit2$coefficients
#probit regression
fit.probit2 <- glm(indices[indTrain]~mydataTrain$age+mydataTrain$gender+mydataTrain$allele_ratio,family=binomial(link="probit"))
fit.probit2$coefficients
summary(fit.logit2)
summary(fit.probit2)
#testing
#testing data is named mydataTrain for convienece to predict function
mydataTrain = mydata[indTest,]
predicted <- predict(fit.logit2,mydataTrain,type='response')
#rescaling to [0.1]
#predicted = (predicted - min(predicted))/(max(predicted) - min(predicted))
indxNAD = names(which(predicted<=0.5))
indxAD = names(which(predicted>0.5))
tp = 0
fp = 0
tn = 0
fn = 0
#actual OP for the predcition of AD
actual_AD = indices[as.numeric(rownames(mydataTrain[indxAD,]))]
actual_NAD = indices[as.numeric(rownames(mydataTrain[indxNAD,]))]
tp = sum(actual_AD)
tn = length(actual_NAD) - sum(actual_NAD)
fp = length(actual_AD) - sum(actual_AD)
fn = sum(actual_NAD)
f1 = (2*tp)/(2*tp + fp + fn)
accuracy = (tp + tn)/numTest
error_rate = (1-accuracy)
tpr = tp/(tp+fn)
fpr = fp/(fp+tn)
precision = tp/(tp+fp)
print(accuracy)
#plotting logistic
#too many dimensions; dont' plot
#don't think there's need for clustering; simple plotting will do for this application
# #clustering
# # Determine number of clusters
# wss <- (nrow(mydata)-1)*sum(apply(mydata,2,var))
# for (i in 2:15) {
# wss[i] <- sum(kmeans(mydata,centers=i)$withinss)
# }
# plot(1:15, wss, type="b", xlab="Number of Clusters",ylab="Within groups sum of squares")
#
# #cluster; with 2 clusters
# fit <- kmeans(mydata, 2) # 2 cluster solution
# # get cluster means
# aggregate(mydata,by=list(fit$cluster),FUN=mean)
# # append cluster assignment
# mydata <- data.frame(mydata, fit$cluster)
#
| /ml_test.R | no_license | macrinalobo/mitochondrial-mutations | R | false | false | 3,954 | r | #clustering, logistic regression and PCA
#building the data frame
#chosen dimensions - age, gender, allele ratio (log sum)
#110 rows, 3 columns
rm(list=ls())
AD_info = read.table('/home/macrina/Documents/BioinfCoure/project/ROSMAP_Mito/ADindRat.txt',header=TRUE)
NAD_info = read.table('/home/macrina/Documents/BioinfCoure/project/ROSMAP_Mito/NADindRat.txt',header=TRUE)
numAD = nrow(AD_info)
numNAD = nrow(NAD_info)
#combine AD_info and NAD_info
dataIP = rbind(AD_info,NAD_info)
dataIPTrunc = subset(dataIP,select=c(2,3,9))
dimnames(dataIPTrunc) = list(c(1:110),c('age','gender','allelic_ratio'))
dataIPTrunc = apply(dataIPTrunc,c(1,2),as.numeric)
mydata1 = scale(dataIPTrunc[,1])
mydata2 = scale(dataIPTrunc[,3])
mydata = cbind(mydata1,dataIPTrunc[,2],mydata2)
mydata <-data.frame(mydata)
#mydata$X2 = as.factor(mydata$X2)
indices = c(rep(1,numAD),rep(0,numNAD))
#plotting data
#install.packages("scatterplot3d")
library(scatterplot3d)
colnames(mydata) = c('age','gender','allele_ratio')
# data
DF <- data.frame(mydata$age,mydata$gender,mydata$allele_ratio,group = indices)
# create the plot
s3d <- with(DF, scatterplot3d(mydata$age, mydata$gender, mydata$allele_ratio, color = c(rep('green',numAD),rep('blue',numNAD)), pch = 19))
#legend
#legend(s3d$(mydata$age, mydata$gender, mydata$allele_ratiomydata$age, mydata$gender, mydata$allele_ratio.convert(0.5, 0.7, 0.5), pch = 19, yjust=0,legend = levels(DF$group), col = seq_along(levels(DF$group)))
#mydata = cbind(mydata,indices)
#logistic regression
fit.logit <- glm(indices~mydata$age+mydata$gender+mydata$allele_ratio,family="binomial")
fit.logit$coefficients
#probit regression
fit.probit <- glm(indices~mydata$age+mydata$gender+mydata$allele_ratio,family=binomial(link="probit"))
fit.probit$coefficients
#with dividing data into train and test sets (70-30)
numTrain = ceiling(0.7*110)
numTest = 110 - numTrain
#randomly generate nuTrain indices for training
indTrain = sample(1:110,numTrain,replace=FALSE)
mydataTrain = mydata[indTrain,]
mydataTrain1 = mydataTrain
indTest = setdiff(1:110, indTrain)
#rownames(mydataTest) = c(1:numTest)
#logisti cregression
fit.logit2 <- glm(indices[indTrain]~mydataTrain$age+mydataTrain$gender+mydataTrain$allele_ratio,family="binomial")
fit.logit2$coefficients
#probit regression
fit.probit2 <- glm(indices[indTrain]~mydataTrain$age+mydataTrain$gender+mydataTrain$allele_ratio,family=binomial(link="probit"))
fit.probit2$coefficients
summary(fit.logit2)
summary(fit.probit2)
#testing
#testing data is named mydataTrain for convienece to predict function
mydataTrain = mydata[indTest,]
predicted <- predict(fit.logit2,mydataTrain,type='response')
#rescaling to [0.1]
#predicted = (predicted - min(predicted))/(max(predicted) - min(predicted))
indxNAD = names(which(predicted<=0.5))
indxAD = names(which(predicted>0.5))
tp = 0
fp = 0
tn = 0
fn = 0
#actual OP for the predcition of AD
actual_AD = indices[as.numeric(rownames(mydataTrain[indxAD,]))]
actual_NAD = indices[as.numeric(rownames(mydataTrain[indxNAD,]))]
tp = sum(actual_AD)
tn = length(actual_NAD) - sum(actual_NAD)
fp = length(actual_AD) - sum(actual_AD)
fn = sum(actual_NAD)
f1 = (2*tp)/(2*tp + fp + fn)
accuracy = (tp + tn)/numTest
error_rate = (1-accuracy)
tpr = tp/(tp+fn)
fpr = fp/(fp+tn)
precision = tp/(tp+fp)
print(accuracy)
#plotting logistic
#too many dimensions; dont' plot
#don't think there's need for clustering; simple plotting will do for this application
# #clustering
# # Determine number of clusters
# wss <- (nrow(mydata)-1)*sum(apply(mydata,2,var))
# for (i in 2:15) {
# wss[i] <- sum(kmeans(mydata,centers=i)$withinss)
# }
# plot(1:15, wss, type="b", xlab="Number of Clusters",ylab="Within groups sum of squares")
#
# #cluster; with 2 clusters
# fit <- kmeans(mydata, 2) # 2 cluster solution
# # get cluster means
# aggregate(mydata,by=list(fit$cluster),FUN=mean)
# # append cluster assignment
# mydata <- data.frame(mydata, fit$cluster)
#
|
print(paste0(Sys.time(), ": Script Started"))
library(lubridate, quietly = TRUE)
library(httr)
library(jsonlite)
print(paste0(Sys.time(), ": Libraries Loaded. Refreshing Ecobee Creds"))
creds <- read.csv("/home/jacobrozran/ecobee/ecobee.config")
refresh <- paste0("https://api.ecobee.com/token?grant_type=refresh_token&code=",
creds$refresh_token[1], "&client_id=", creds$client_id[1])
ref <- POST(refresh)
if(grepl("access_token", as.character(ref)) == FALSE) {
print(paste0(Sys.time(), ": Auth has broken - login and fix it"))
system(paste0("/bin/bash /home/jacobrozran/ecobee/send_notification.sh >>",
" /home/jacobrozran/ecobee/send_notification.log"))
break
}
at <- gsub("(^.*access_token\": \")(\\w+)(\".*$)", "\\2", as.character(ref))
rt <- gsub("(^.*refresh_token\": \")(\\w+)(\".*$)", "\\2", as.character(ref))
creds <- data.frame(access_token = at,
refresh_token = rt,
client_id = creds$client_id[1])
write.csv(creds, "/home/jacobrozran/ecobee/ecobee.config", row.names = FALSE)
print(paste0(Sys.time(), ": Refreshed Ecobee Creds. Getting temps."))
therm <- paste0("curl -s -H 'Content-Type: text/json' -H 'Authorization: Bearer ",
creds$access_token[1] , "' 'https://api.ecobee.com/1/thermostat?",
"format=json&body=\\{\"selection\":\\{\"selectionType\":\"regi",
"stered\",\"selectionMatch\":\"\",\"includeSensors\":true\\}",
"\\}' > /home/jacobrozran/ecobee/response.json")
system(therm)
response <- read_json("/home/jacobrozran/ecobee/response.json")
print(paste0(Sys.time(), ": Got Temps. Formatting Data."))
info <- data.frame()
for(sensor in 1:3){
name <- response$thermostatList[[1]]$remoteSensors[[sensor]]$name
temp <- as.numeric(response$thermostatList[[1]]$remoteSensors[[sensor]]$capability[[1]]$value) / 10
tmp <- data.frame(name = name,
temp = temp)
tmp$time_utc <- response$thermostatList[[1]]$utcTime
tmp$time_local <- response$thermostatList[[1]]$thermostatTime
info <- rbind(info, tmp)
}
info$name <- tolower(gsub("'s Room", "", info$name))
info <- info[info$name == "ellie", ]
current_time <- hour(Sys.time()) + (minute(Sys.time()) / 60)
is_morning_nap <- ifelse(current_time >= 11.5 & current_time <= 13, TRUE, FALSE)
is_afternoon_nap <- ifelse(current_time >= 15 & current_time <= 18.5, TRUE, FALSE)
is_sleeptime <- ifelse(current_time >= 22 | current_time <= 10, TRUE, FALSE)
info$action <- ifelse(((is_morning_nap == TRUE | is_sleeptime == TRUE)
& info$temp >= 68) | (!(is_morning_nap == TRUE |
is_afternoon_nap == TRUE |
is_sleeptime == TRUE) &
info$temp >= 72) |
is_afternoon_nap == TRUE,
"on", "off")
print(paste0(Sys.time(), ": Formatted Data. Telling VeSync what to do."))
print(paste0(Sys.time(), ": here is the current status of each room and the state it should be in:"))
print(info)
for(python in 1:dim(info)[1]){
py_cmd <- paste0("python3 /home/jacobrozran/ecobee/vesync.py ",
info$name[python], " ", info$action[python],
" >> /home/jacobrozran/ecobee/vesync.log 2>&1")
system(py_cmd)
}
print(paste0(Sys.time(), ": Updated the rooms as needed. Script Complete"))
| /ecobee.R | no_license | jrozra200/ecobee_vesync_connect_AC | R | false | false | 3,520 | r | print(paste0(Sys.time(), ": Script Started"))
library(lubridate, quietly = TRUE)
library(httr)
library(jsonlite)
print(paste0(Sys.time(), ": Libraries Loaded. Refreshing Ecobee Creds"))
creds <- read.csv("/home/jacobrozran/ecobee/ecobee.config")
refresh <- paste0("https://api.ecobee.com/token?grant_type=refresh_token&code=",
creds$refresh_token[1], "&client_id=", creds$client_id[1])
ref <- POST(refresh)
if(grepl("access_token", as.character(ref)) == FALSE) {
print(paste0(Sys.time(), ": Auth has broken - login and fix it"))
system(paste0("/bin/bash /home/jacobrozran/ecobee/send_notification.sh >>",
" /home/jacobrozran/ecobee/send_notification.log"))
break
}
at <- gsub("(^.*access_token\": \")(\\w+)(\".*$)", "\\2", as.character(ref))
rt <- gsub("(^.*refresh_token\": \")(\\w+)(\".*$)", "\\2", as.character(ref))
creds <- data.frame(access_token = at,
refresh_token = rt,
client_id = creds$client_id[1])
write.csv(creds, "/home/jacobrozran/ecobee/ecobee.config", row.names = FALSE)
print(paste0(Sys.time(), ": Refreshed Ecobee Creds. Getting temps."))
therm <- paste0("curl -s -H 'Content-Type: text/json' -H 'Authorization: Bearer ",
creds$access_token[1] , "' 'https://api.ecobee.com/1/thermostat?",
"format=json&body=\\{\"selection\":\\{\"selectionType\":\"regi",
"stered\",\"selectionMatch\":\"\",\"includeSensors\":true\\}",
"\\}' > /home/jacobrozran/ecobee/response.json")
system(therm)
response <- read_json("/home/jacobrozran/ecobee/response.json")
print(paste0(Sys.time(), ": Got Temps. Formatting Data."))
info <- data.frame()
for(sensor in 1:3){
name <- response$thermostatList[[1]]$remoteSensors[[sensor]]$name
temp <- as.numeric(response$thermostatList[[1]]$remoteSensors[[sensor]]$capability[[1]]$value) / 10
tmp <- data.frame(name = name,
temp = temp)
tmp$time_utc <- response$thermostatList[[1]]$utcTime
tmp$time_local <- response$thermostatList[[1]]$thermostatTime
info <- rbind(info, tmp)
}
info$name <- tolower(gsub("'s Room", "", info$name))
info <- info[info$name == "ellie", ]
current_time <- hour(Sys.time()) + (minute(Sys.time()) / 60)
is_morning_nap <- ifelse(current_time >= 11.5 & current_time <= 13, TRUE, FALSE)
is_afternoon_nap <- ifelse(current_time >= 15 & current_time <= 18.5, TRUE, FALSE)
is_sleeptime <- ifelse(current_time >= 22 | current_time <= 10, TRUE, FALSE)
info$action <- ifelse(((is_morning_nap == TRUE | is_sleeptime == TRUE)
& info$temp >= 68) | (!(is_morning_nap == TRUE |
is_afternoon_nap == TRUE |
is_sleeptime == TRUE) &
info$temp >= 72) |
is_afternoon_nap == TRUE,
"on", "off")
print(paste0(Sys.time(), ": Formatted Data. Telling VeSync what to do."))
print(paste0(Sys.time(), ": here is the current status of each room and the state it should be in:"))
print(info)
for(python in 1:dim(info)[1]){
py_cmd <- paste0("python3 /home/jacobrozran/ecobee/vesync.py ",
info$name[python], " ", info$action[python],
" >> /home/jacobrozran/ecobee/vesync.log 2>&1")
system(py_cmd)
}
print(paste0(Sys.time(), ": Updated the rooms as needed. Script Complete"))
|
library(Seurat)
library(ggplot2)
library(matrixStats)
library(gridExtra)
library(RColorBrewer)
library(ggsci)
library(gplots)
library(ComplexHeatmap)
library(circlize)
library(matrixStats)
args = commandArgs(trailingOnly=TRUE)
colorer <- c("Normal_pouch"="dodgerblue2","Pouchitis"="red3", "UC_colon"="forestgreen", "UC_inflamed"="darkorange1")
colors_clusters = c(pal_d3("category10")(10), pal_d3("category20b")(20), pal_igv("default")(51))
#
#
#
s_obj <- readRDS("OBJECTS/Myeloid_cells/seurat_obj.rds")
s_obj@meta.data$MinorPopulations2 <- factor(s_obj@meta.data$MinorPopulations2,
levels = c("mono_mac1", "mono_mac2", "mono_mac3", "mast_cells",
"DC", "pdcs"))
Idents(s_obj) = s_obj@meta.data$MinorPopulations2
colorer <- c("mast_cells"="navy",
"pdcs"="pink", "DC"="darkorange1",
"mono_mac1"="red3", "mono_mac2"="purple", "mono_mac3"="forestgreen")
genes = c("TREM1", "CXCL10")
gradient_colors = c("gray85", "red2")
plot_umap =
FeaturePlot(s_obj, features=genes, reduction = "umap", cells = sample(colnames(s_obj)), cols = gradient_colors, ncol = 2, min.cutoff=0)
ggsave("FIGURES/SF3/FigureSF3_TREM1.pdf", plot = plot_umap, width = 10, height = 5, units = "in")
ggsave("FIGURES/SF3/FigureSF3_TREM1.png", plot = plot_umap, width = 10, height = 5, units = "in")
#
# and heatmap of top genes
#
#
#
#
marker_list = read.table("OBJECTS/Myeloid_cells/clusters-MinorPopulations2-clust6/markers-global/markers.clust6.wilcox.all.csv", T, ',')
clusts = levels(s_obj@meta.data$MinorPopulations2)
good_genes <- data.frame(gene=NA, ct = NA)
i=1
type = clusts[i]
gener = subset(marker_list, cluster == clusts[i])
gener <- gener[order(gener$avg_logFC, gener$p_val_adj, decreasing=T),]
gener = subset(gener, avg_logFC>1.2)
genes = as.character(gener$gene)
genes2 = data.frame(gene=genes, ct = clusts[i])
print(length(genes))
good_genes <- rbind(good_genes, genes2)
#good_genes <- c(good_genes, "CD8A", "CD4", "CD3D", "CD79A", "FOS", "FOXP3", "IL17A")
good_genes <- good_genes[-1,]
good_good <- unique(good_genes$gene)
good_good <- intersect(good_good, rownames(s_obj))
print(good_good)
good_genes2 <- subset(good_genes, good_genes$gene %in% good_good)
good_genes2 <- subset(good_genes2, !is.na(gene))
good_genes2$ct <- factor(good_genes2$ct, levels = levels(s_obj@meta.data$MinorPopulations2))
good_genes2 <- good_genes2[order(good_genes2$ct),]
#tab2write
figSF3_tab = subset(marker_list, gene %in% good_good)
write.table(figSF3_tab, "FIGURES/SF3/FigureSF3_Top_markers.txt", sep='\t', row.names=F, quote=F)
#
counts <- data.frame(s_obj@assays$integrated@scale.data)[good_genes2$gene,]
#monomac1 = gsub(":", "\\.", rownames(subset(s_obj@meta.data, MinorPopulations2 == "mono_mac1")))
#monomac1 = gsub("-", "\\.", monomac1)
#monomac2 = gsub(":", "\\.", rownames(subset(s_obj@meta.data, MinorPopulations2 == "mono_mac2")))
#monomac2 = gsub("-", "\\.", monomac2)
#monomac3 = gsub(":", "\\.", rownames(subset(s_obj@meta.data, MinorPopulations2 == "mono_mac3")))
#monomac3 = gsub("-", "\\.", monomac3)
#monomac=c(monomac1, monomac2, monomac3)
#counts = counts[,monomac]
#rownames(s_obj@meta.data) <- make.names(colnames(s_obj))
#clusterofchoice = "MinorPopulations2"
cluster2 = s_obj@meta.data$MinorPopulations2
#cluster2 = factor(cluster2, levels = c("mono_mac1", "mono_mac2", "mono_mac3"))
#scounts <- aggregate(tcounts, by=list(cluster2), 'median')
#rownames(scounts) <- scounts[,1]
#scounts <- scounts[,-1]
#scounts[is.na(scounts)] <- 0
#counts=t(scale(t(counts)))
ha <- columnAnnotation(df = data.frame(Cluster=cluster2),
col=list(Cluster = c("mast_cells"="navy",
"pdcs"="pink", "DC"='darkorange1',
"mono_mac1"="red3", "mono_mac2"="purple", "mono_mac3"="forestgreen")))
namer <- paste0("FIGURES/SF3/FigureSF3_heatmap.pdf")
pdf(namer, height = 8, width = 8)
Heatmap(data.matrix(counts), show_row_names = T, show_column_names = F,
top_annotation = ha,
heatmap_legend_param = list(title = "Log2 Expression\nLevel"),
cluster_rows = T, cluster_columns = T, #row_names_side = 'left',
row_names_gp = gpar(fontsize=10),
row_title_gp = gpar(fontsize = 10),
row_names_max_width = unit(10,'cm'),
use_raster = T,
cluster_column_slices=F,
column_split = cluster2,
#split = cluster2,
#left_annotation = ha,
col = colorRamp2(c(-2,0,2), c("blue", "white", "red")))
dev.off()
| /SCRIPTS/FigureSF3.R | no_license | jcooperdevlin/Pouch | R | false | false | 4,594 | r | library(Seurat)
library(ggplot2)
library(matrixStats)
library(gridExtra)
library(RColorBrewer)
library(ggsci)
library(gplots)
library(ComplexHeatmap)
library(circlize)
library(matrixStats)
args = commandArgs(trailingOnly=TRUE)
colorer <- c("Normal_pouch"="dodgerblue2","Pouchitis"="red3", "UC_colon"="forestgreen", "UC_inflamed"="darkorange1")
colors_clusters = c(pal_d3("category10")(10), pal_d3("category20b")(20), pal_igv("default")(51))
#
#
#
s_obj <- readRDS("OBJECTS/Myeloid_cells/seurat_obj.rds")
s_obj@meta.data$MinorPopulations2 <- factor(s_obj@meta.data$MinorPopulations2,
levels = c("mono_mac1", "mono_mac2", "mono_mac3", "mast_cells",
"DC", "pdcs"))
Idents(s_obj) = s_obj@meta.data$MinorPopulations2
colorer <- c("mast_cells"="navy",
"pdcs"="pink", "DC"="darkorange1",
"mono_mac1"="red3", "mono_mac2"="purple", "mono_mac3"="forestgreen")
genes = c("TREM1", "CXCL10")
gradient_colors = c("gray85", "red2")
plot_umap =
FeaturePlot(s_obj, features=genes, reduction = "umap", cells = sample(colnames(s_obj)), cols = gradient_colors, ncol = 2, min.cutoff=0)
ggsave("FIGURES/SF3/FigureSF3_TREM1.pdf", plot = plot_umap, width = 10, height = 5, units = "in")
ggsave("FIGURES/SF3/FigureSF3_TREM1.png", plot = plot_umap, width = 10, height = 5, units = "in")
#
# and heatmap of top genes
#
#
#
#
marker_list = read.table("OBJECTS/Myeloid_cells/clusters-MinorPopulations2-clust6/markers-global/markers.clust6.wilcox.all.csv", T, ',')
clusts = levels(s_obj@meta.data$MinorPopulations2)
good_genes <- data.frame(gene=NA, ct = NA)
i=1
type = clusts[i]
gener = subset(marker_list, cluster == clusts[i])
gener <- gener[order(gener$avg_logFC, gener$p_val_adj, decreasing=T),]
gener = subset(gener, avg_logFC>1.2)
genes = as.character(gener$gene)
genes2 = data.frame(gene=genes, ct = clusts[i])
print(length(genes))
good_genes <- rbind(good_genes, genes2)
#good_genes <- c(good_genes, "CD8A", "CD4", "CD3D", "CD79A", "FOS", "FOXP3", "IL17A")
good_genes <- good_genes[-1,]
good_good <- unique(good_genes$gene)
good_good <- intersect(good_good, rownames(s_obj))
print(good_good)
good_genes2 <- subset(good_genes, good_genes$gene %in% good_good)
good_genes2 <- subset(good_genes2, !is.na(gene))
good_genes2$ct <- factor(good_genes2$ct, levels = levels(s_obj@meta.data$MinorPopulations2))
good_genes2 <- good_genes2[order(good_genes2$ct),]
#tab2write
figSF3_tab = subset(marker_list, gene %in% good_good)
write.table(figSF3_tab, "FIGURES/SF3/FigureSF3_Top_markers.txt", sep='\t', row.names=F, quote=F)
#
counts <- data.frame(s_obj@assays$integrated@scale.data)[good_genes2$gene,]
#monomac1 = gsub(":", "\\.", rownames(subset(s_obj@meta.data, MinorPopulations2 == "mono_mac1")))
#monomac1 = gsub("-", "\\.", monomac1)
#monomac2 = gsub(":", "\\.", rownames(subset(s_obj@meta.data, MinorPopulations2 == "mono_mac2")))
#monomac2 = gsub("-", "\\.", monomac2)
#monomac3 = gsub(":", "\\.", rownames(subset(s_obj@meta.data, MinorPopulations2 == "mono_mac3")))
#monomac3 = gsub("-", "\\.", monomac3)
#monomac=c(monomac1, monomac2, monomac3)
#counts = counts[,monomac]
#rownames(s_obj@meta.data) <- make.names(colnames(s_obj))
#clusterofchoice = "MinorPopulations2"
cluster2 = s_obj@meta.data$MinorPopulations2
#cluster2 = factor(cluster2, levels = c("mono_mac1", "mono_mac2", "mono_mac3"))
#scounts <- aggregate(tcounts, by=list(cluster2), 'median')
#rownames(scounts) <- scounts[,1]
#scounts <- scounts[,-1]
#scounts[is.na(scounts)] <- 0
#counts=t(scale(t(counts)))
ha <- columnAnnotation(df = data.frame(Cluster=cluster2),
col=list(Cluster = c("mast_cells"="navy",
"pdcs"="pink", "DC"='darkorange1',
"mono_mac1"="red3", "mono_mac2"="purple", "mono_mac3"="forestgreen")))
namer <- paste0("FIGURES/SF3/FigureSF3_heatmap.pdf")
pdf(namer, height = 8, width = 8)
Heatmap(data.matrix(counts), show_row_names = T, show_column_names = F,
top_annotation = ha,
heatmap_legend_param = list(title = "Log2 Expression\nLevel"),
cluster_rows = T, cluster_columns = T, #row_names_side = 'left',
row_names_gp = gpar(fontsize=10),
row_title_gp = gpar(fontsize = 10),
row_names_max_width = unit(10,'cm'),
use_raster = T,
cluster_column_slices=F,
column_split = cluster2,
#split = cluster2,
#left_annotation = ha,
col = colorRamp2(c(-2,0,2), c("blue", "white", "red")))
dev.off()
|
############ LIBRARIES #####
library(dplyr)
library(ggplot2)
library(lubridate)
library(caret)
library(e1071)
library(gbm)
library(data.table)
library(tictoc)
test <- read_csv("C:/Users/admin/Desktop/data science/project/predicfeturesaIes/test.csv")
sales_train <- read_csv("C:/Users/admin/Desktop/data science/project/predicfeturesaIes/sales_train.csv")
items <- read_csv("C:/Users/admin/Desktop/data science/project/predicfeturesaIes/items.csv")
dim(test)
dim(sales_train)
dim(items)
sum(is.na(items))
sum(is.na(test))
sum(is.na(sales_train))
summary(sales_train)
summary(test)
summary(items)
glimpse(sales_train)
glimpse(items)
glimpse(test)
sales_data = merge(sales_train, items[,c("item_id", "item_category_id")], by = "item_id", all.x = T)
sales_data$date = as.Date(sales_data$date, "%d.%m.%Y")
View(sales_data)
dim(sales_data)
reg1<-lm(item_cnt_day~.,data=sales_data)
summary(reg1)
predict<-predict(reg1,test[,c("shop_id","item_id")])
reg1$residuals
sum(reg1$residuals)
mean(reg1$residuals)
sqrt(sum(reg1$residuals^2)/nrow(sales_data)) #RMSE
sqrt(mean(reg1$residuals^2))
confint(reg1,level=0.95)
predict(reg1,interval="predict")
linear_model = lm(formula = item_cnt_day ~ shop_id + item_id,
data = sales_data)
linear_model
summary(linear_model)
result = predict(linear_model, test[,c("shop_id","item_id")])
submission = data.frame(ID = test$ID,
item_cnt_month = result)
head(submission)
write.csv(submission, "submission1.csv", row.names = F)
# GBM Model
library(tictoc)
tic("Time Taken to Run GBM Model ")
gbm_model = gbm(item_cnt_day ~ shop_id + item_id,
data = sales_data,
shrinkage = 0.01,
distribution = "gaussian",
n.trees = 1000,
interaction.depth = 5,
bag.fraction = 0.5,
train.fraction = 0.8,
# cv.folds = 5,
n.cores = -1,
verbose = T)
toc()
summary(gbm_model)
result2 = predict(gbm_model,newdata = test[c("shop_id","item_id")], n.trees = 1000)
summary(result2)
str(result2)
sub2 = data.frame(ID = test$ID,
item_cnt_month = result2)
ggplot(data = items_in_shop,
mapping = aes(x = reorder(shop_id,item_id),
y = item_id,
fill = factor(shop_id)))+
geom_histogram(stat = "identity", color = "yellow") +
xlab(" Shop ID")+ ylab(" Items in shop")+
ggtitle("Most Items in Shops") +
coord_flip()+
theme(
# get rid of panel grids
panel.grid.major = element_blank(),
panel.grid.minor = element_line(colour = "gray",linetype = "dotted"),
# Change plot and panel background
plot.background=element_rect(fill = "black"),
panel.background = element_rect(fill = 'black'),
# Change legend
# legend.position = c(0.6, 0.07),
# legend.direction = "horizontal",
legend.background = element_rect(fill = "black", color = NA),
legend.key = element_rect(color = "gray", fill = "black"),
legend.title = element_text(color = "white"),
legend.text = element_text(color = "white"),
# align title to top center, top ledt is by default.
plot.title = element_text(color = "white", hjust = 0.5, face = "bold"),
# axis ticks to bold black
axis.text=element_text(colour = "yellow",face = "bold"),
axis.title.x = element_text(color = "white"),
axis.title.y = element_text(color = "white")
)
write.csv(sub2, "sub2.csv", row.names = F)
install.packages("tictoc", type = "source")
install.packages("gbm")
| /sales_predict.R | no_license | PUNAM-CODE/Sales_predict | R | false | false | 3,736 | r | ############ LIBRARIES #####
library(dplyr)
library(ggplot2)
library(lubridate)
library(caret)
library(e1071)
library(gbm)
library(data.table)
library(tictoc)
test <- read_csv("C:/Users/admin/Desktop/data science/project/predicfeturesaIes/test.csv")
sales_train <- read_csv("C:/Users/admin/Desktop/data science/project/predicfeturesaIes/sales_train.csv")
items <- read_csv("C:/Users/admin/Desktop/data science/project/predicfeturesaIes/items.csv")
dim(test)
dim(sales_train)
dim(items)
sum(is.na(items))
sum(is.na(test))
sum(is.na(sales_train))
summary(sales_train)
summary(test)
summary(items)
glimpse(sales_train)
glimpse(items)
glimpse(test)
sales_data = merge(sales_train, items[,c("item_id", "item_category_id")], by = "item_id", all.x = T)
sales_data$date = as.Date(sales_data$date, "%d.%m.%Y")
View(sales_data)
dim(sales_data)
reg1<-lm(item_cnt_day~.,data=sales_data)
summary(reg1)
predict<-predict(reg1,test[,c("shop_id","item_id")])
reg1$residuals
sum(reg1$residuals)
mean(reg1$residuals)
sqrt(sum(reg1$residuals^2)/nrow(sales_data)) #RMSE
sqrt(mean(reg1$residuals^2))
confint(reg1,level=0.95)
predict(reg1,interval="predict")
linear_model = lm(formula = item_cnt_day ~ shop_id + item_id,
data = sales_data)
linear_model
summary(linear_model)
result = predict(linear_model, test[,c("shop_id","item_id")])
submission = data.frame(ID = test$ID,
item_cnt_month = result)
head(submission)
write.csv(submission, "submission1.csv", row.names = F)
# GBM Model
library(tictoc)
tic("Time Taken to Run GBM Model ")
gbm_model = gbm(item_cnt_day ~ shop_id + item_id,
data = sales_data,
shrinkage = 0.01,
distribution = "gaussian",
n.trees = 1000,
interaction.depth = 5,
bag.fraction = 0.5,
train.fraction = 0.8,
# cv.folds = 5,
n.cores = -1,
verbose = T)
toc()
summary(gbm_model)
result2 = predict(gbm_model,newdata = test[c("shop_id","item_id")], n.trees = 1000)
summary(result2)
str(result2)
sub2 = data.frame(ID = test$ID,
item_cnt_month = result2)
ggplot(data = items_in_shop,
mapping = aes(x = reorder(shop_id,item_id),
y = item_id,
fill = factor(shop_id)))+
geom_histogram(stat = "identity", color = "yellow") +
xlab(" Shop ID")+ ylab(" Items in shop")+
ggtitle("Most Items in Shops") +
coord_flip()+
theme(
# get rid of panel grids
panel.grid.major = element_blank(),
panel.grid.minor = element_line(colour = "gray",linetype = "dotted"),
# Change plot and panel background
plot.background=element_rect(fill = "black"),
panel.background = element_rect(fill = 'black'),
# Change legend
# legend.position = c(0.6, 0.07),
# legend.direction = "horizontal",
legend.background = element_rect(fill = "black", color = NA),
legend.key = element_rect(color = "gray", fill = "black"),
legend.title = element_text(color = "white"),
legend.text = element_text(color = "white"),
# align title to top center, top ledt is by default.
plot.title = element_text(color = "white", hjust = 0.5, face = "bold"),
# axis ticks to bold black
axis.text=element_text(colour = "yellow",face = "bold"),
axis.title.x = element_text(color = "white"),
axis.title.y = element_text(color = "white")
)
write.csv(sub2, "sub2.csv", row.names = F)
install.packages("tictoc", type = "source")
install.packages("gbm")
|
rm(list=ls())
#setwd('C:/Users/Industrial Stat Lab/Desktop')
#setwd('C:/Users/lswsi/OneDrive/바탕 화면/대학교/20-여름방학/공모전/2020빅콘테스트 문제데이터(데이터분석분야-챔피언리그)/01_제공데이터')
#setwd('C:/Users/lswsi/Desktop/2020빅콘테스트 문제데이터(데이터분석분야-챔피언리그)/01_제공데이터')
setwd('C:\\Users\\62190\\Documents\\BigContest\\datas')
library(readxl)
library(randomForest)
library(ggplot2)
library(GGally)
library(caret)
library(e1071)
library(gbm)
library(dplyr)
library(xgboost)
library(tidytext)
library(tm)
library(text2vec)
library(wordcloud)
library(SnowballC)
library(stringr)
library(data.table)
library(mltools)
library(FactoMineR)
library(factoextra)
library(lightgbm)
library(Matrix)
# install.packages('randomForest')
# install.packages('GGally')
# install.packages('e1071')
# install.packages('caret')
# install.packages('gbm')
# install.packages('xgboost')
# install.packages('tidytext')
# install.packages('text2vec')
# install.packages('tm')
# install.packages('wordcloud')
# install.packages('SnowballC')
# install.packages('stringr')
# install.packages('data.table')
# install.packages('mltools')
# install.packages('FactoMineR')
# install.packages('factoextra')
# install.packages('tidyverse')
# install.packages('mlr')
# install.packages('Metrics')
# install.packages('Matrix')
# write down at the terminal tab,
# previously install
# 1. CMake (https://cmake.org/download/)
#
# 2. git (https://git-scm.com/download/win)
#
# 3. Rtools (https://cran.r-project.org/bin/windows/Rtools)
#
# ( 설치 과정중에, 환경변수를 추가하는 옵션 체크 해줄것)
#
# 4. Visual Studio (https://www.visualstudio.com/thank-you-downloading-visual-studio/?sku=Community&rel=15)
#
# (설치 후, 재부팅 필수)
git clone --recursive https://github.com/microsoft/LightGBM
cd LightGBM
Rscript build_r.R
PKG_URL <- "https://github.com/microsoft/LightGBM/releases/download/v3.0.0rc1/lightgbm_3.0.0-1-r-cran.tar.gz"
remotes::install_url(PKG_URL)
PKG_URL <- "https://github.com/microsoft/LightGBM/releases/download/v3.0.0rc1/lightgbm-3.0.0-1-r40-windows.zip"
local_file <- paste0("lightgbm.", tools::file_ext(PKG_URL))
download.file(
url = PKG_URL
, destfile = local_file
)
install.packages(
pkgs = local_file
, type = "binary"
, repos = NULL
)
# install.packages('devtools')
# library(devtools)
#
devtools::install_github("Laurae2/lgbdl")
#
options(devtools.install.args = "--no-multiarch")
devtools::install_github("Microsoft/LightGBM", subdir = "R-package")
#
# running code to verify successful installation of package
library(lightgbm)
data(agaricus.train, package='lightgbm')
train <- agaricus.train
train %>% head
dtrain <- lgb.Dataset(train$data, label=train$label)
dtrain
params <- list(objective="regression", metric="l2")
model <- lgb.cv(params, dtrain, 10, nfold=5, min_data=1, learning_rate=1, early_stopping_rounds=10)
model
#custom MAPE function for xgboost use feval
MAPE <- function(preds,dtrain){
labels <- getinfo(dtrain, 'label')
my_mape <- sum(abs((as.numeric(labels)-as.numeric(preds))/(as.numeric(preds))))*100
my_mape <- my_mape/length(as.numeric(preds))
return(list(metric='mape',value=my_mape))
}
# dataset reading
d1 <- read.csv('나만의데이터.csv')
str(d1)
d1 <- as.matrix(d1)
d1%>%head
d1 %>% dim
d1 <- na.omit(d1)
day <- (d1[,2])
day <- as.factor(day)
day
date <- d1[,1]
month <- as.factor(d1[,3])
time <- as.factor(d1[,4])
con_time <- as.numeric(d1[,5])
exposure <- as.numeric(d1[,9])
brand <- as.factor(d1[,10])
code_name <- as.factor(d1[,11])
merch_name <- tolower(d1[,12])
category <- (d1[,13])
category %>% unique
category[category=='의류'] <- 1
category[category=='속옷'] <- 2
category[category=='주방'] <- 3
category[category=='농수축'] <- 4
category[category=='이미용'] <- 5
category[category=='가전'] <- 6
category[category=='생활용품'] <- 7
category[category=='건강기능'] <- 8
category[category=='잡화'] <- 9
category[category=='가구'] <- 10
category[category=='침구'] <- 11
category <- factor(category)
category
price <- as.numeric(d1[,20])
total_revenue <- as.numeric(d1[,23])
seemean <- as.numeric(d1[,21])
min(seemean[seemean!=0])
seemean[seemean==0] <- 0.00006
precipitation <- as.numeric(d1[,15])
mean_temp <- as.numeric(d1[,14])
cold_sc <- as.numeric(d1[,16])
flu_sc <- as.numeric(d1[,17])
pneumonia_sc <- as.numeric(d1[,18])
coronavirus_sc <- as.numeric(d1[,19])
data0 <- data.frame(day,date,month,time,con_time,exposure,brand,code_name,merch_name,category,price,
total_revenue,seemean,precipitation,mean_temp,cold_sc,flu_sc,pneumonia_sc,coronavirus_sc)
sum(is.na(data0))
#Giving seq to data
data0 <- arrange(data0,code_name)
data0 <- arrange(data0,brand)
View(data0)
sell_sequence <- rep(NA, length(data0$code_name))
sell_sequence[1] <- 1
for(i in 1:length(data0$code_name)){
ifelse((data0$date[i]==data0$date[i+1]& data0$code_name[i]==data0$code_name[i+1]
& data0$day[i]==data0$day[i+1])
,sell_sequence[i+1] <- sell_sequence[i]+1, sell_sequence[i+1] <- 1 )
}
sell_sequence
sum(is.na(sell_sequence))
sell_sequence[sell_sequence==7] <- 1
sell_sequence[sell_sequence==8] <- 2
sell_sequence[sell_sequence==9] <- 3
sell_sequence[sell_sequence==10] <- 4
sell_sequence[sell_sequence==11] <- 5
sell_sequence[sell_sequence==12] <- 6
sell_sequence <- factor(sell_sequence,order=T,levels=c(1,2,3,4,5,6))
data00 <- data.frame(data0,sell_sequence)
head(data00)
str(data00)
data_seq1 <- data00[data00$sell_sequence==1,]
data_seq2 <- data00[data00$sell_sequence==2,]
data_seq3 <- data00[data00$sell_sequence==3,]
data_seq4 <- data00[data00$sell_sequence==4,]
data_seq5 <- data00[data00$sell_sequence==5,]
data_seq6 <- data00[data00$sell_sequence==6,]
data_seq_mean <- data.frame(mean(data_seq1$total_revenue),mean(data_seq2$total_revenue),mean(data_seq3$total_revenue),
mean(data_seq4$total_revenue),mean(data_seq5$total_revenue),mean(data_seq6$total_revenue))
data_seq_var <- data.frame(var(data_seq1$total_revenue),var(data_seq2$total_revenue),var(data_seq3$total_revenue),
var(data_seq4$total_revenue),var(data_seq5$total_revenue),var(data_seq6$total_revenue))
order(data_seq_mean)
data_seq_mean[order(data_seq_mean)]
rank_seq_mean <- rep(0,length(data00$total_revenue))
for(i in 1:length(data00$total_revenue)){
if(data00$sell_sequence[i]==1){rank_seq_mean[i] <- 1}
else(if(data00$sell_sequence[i]==5){rank_seq_mean[i] <- 2}
else(if(data00$sell_sequence[i]==2){rank_seq_mean[i] <- 3}
else(if(data00$sell_sequence[i]==4){rank_seq_mean[i] <- 4}
else(if(data00$sell_sequence[i]==6){rank_seq_mean[i] <- 5}
else(if(data00$sell_sequence[i]==3){rank_seq_mean[i] <- 6})))))
}
unique(rank_seq_mean)
str(rank_seq_mean)
order(data_seq_var)
data_seq_var[order(data_seq_var)]
rank_seq_var <- rep(0,length(data00$total_revenue))
for(i in 1:length(data00$total_revenue)){
if(data00$sell_sequence[i]==1){rank_seq_var[i] <- 1}
else(if(data00$sell_sequence[i]==5){rank_seq_var[i] <- 2}
else(if(data00$sell_sequence[i]==2){rank_seq_var[i] <- 3}
else(if(data00$sell_sequence[i]==6){rank_seq_var[i] <- 4}
else(if(data00$sell_sequence[i]==3){rank_seq_var[i] <- 5}
else(if(data00$sell_sequence[i]==4){rank_seq_var[i] <- 6})))))
}
unique(rank_seq_var)
str(rank_seq_var)
#giving rank to brand name
data_merch_name <- read_xlsx('seungwonrawdata.xlsx')
data_merch_name %>% head
data_merch_name <- as.matrix(data_merch_name)
brand_name <- data_merch_name[,10]
brand_name %>% head
corpus_top_name <- Corpus(VectorSource(brand_name),
readerControl=list(language='kor'))
corpus_top_name <- tm_map(corpus_top_name,content_transformer(tolower))
corpus_top_name <- tm_map(corpus_top_name,removePunctuation)
text_top_name <- TermDocumentMatrix(corpus_top_name)
dtm_top_name <- as.matrix(text_top_name)
dtm_sum_top_merch_name <- sort(rowSums(dtm_top_name),decreasing=F)
dtm_df_top_merch_name <- data.frame(word=names(dtm_sum_top_merch_name),
freq=dtm_sum_top_merch_name)
dtm_df_top_merch_name %>% head(10)
#wordcloud(words=dtm_df_top_merch_name$word, freq=dtm_df_top_merch_name$freq,
#min.freq=100,max.words=100,random.order = F,rot.per=0.15,
#colors=brewer.pal(5,'Dark2'))
top_brand_name <- rownames(dtm_df_top_merch_name)
rank_brand <- rep(1, length(data00$merch_name))
rank_brand[grep('삼성',data00$merch_name)]
for(i in 1:length(top_brand_name)){
rank_brand[grep(top_brand_name[i],data00$merch_name)] <- i
}
length(unique(rank_brand))
data00 <- data.frame(data00,rank_seq_mean,rank_seq_var,rank_brand)
#data00$rank_brand <- factor(data00$rank_brand,order=T)
data00$rank_brand <- as.numeric(data00$rank_brand)
data00$temp_diff <- data00$top_temp-data00$bottom_temp
#XG boost
#
#
set.seed(123)
#data00 <- data00[data00$total_revenue!=50000,]
head(data00)
new_data00 <- select(data00,total_revenue,day, month, time, con_time, category, price,
#seemean, seevar, mean_temp, top_temp, bottom_temp, rank_seq_var
precipitation, temp_diff, mean_temp,
sell_sequence, rank_seq_mean,rank_brand)
head(new_data00)
str(new_data00)
# ggplot(data=new_data00, aes(x=precipitation, y=total_revenue))+
# geom_point(size=2)
#
# unique(new_data00$precipitation)#0, 9.4, 28.9, 56.5
#
# ggplot(data=new_data00[new_data00$precipitation>=9.4,], aes(x=precipitation, y=total_revenue))+
# geom_point(size=2)
#
# ggplot(data=new_data00, aes(x=mean_temp, y=total_revenue))+
# geom_point(size=2)
#
# ggplot(data=new_data00, aes(x=top_temp, y=total_revenue))+
# geom_point(size=2)
#
# ggplot(data=new_data00, aes(x=bottom_temp, y=total_revenue))+
# geom_point(size=2)
#
# ggplot(data=new_data00, aes(x=temp_diff, y=total_revenue))+
# geom_point(size=2)
new_data00$sell_sequence <- as.numeric(new_data00$sell_sequence)
new_data00$rank_seq_mean <- as.numeric(new_data00$rank_seq_mean)
#new_data00$rank_seq_var <- as.numeric(new_data00$rank_seq_var)
# category_precipitation <- rep(NA, length(new_data00$precipitation))
# for( i in 1: length(new_data00$precipitation)){
# if(new_data00$precipitation[i]>=0&new_data00$precipitation[i]<9.4){
# category_precipitation[i] <- 4
# }else(if(new_data00$precipitation[i]>=9.4&new_data00$precipitation[i]<28.9){
# category_precipitation[i] <- 3
# }else(if(new_data00$precipitation[i]>=28.9&new_data00$precipitation[i]<56.5){
# category_precipitation[i] <- 2
# }else(if(new_data00$precipitation[i]>=56.5){
# category_precipitation[i] <- 1
# })))
# }
# sum(is.na(category_precipitation))
# c_precipitation <- as.numeric(category_precipitation)
# c_precipitation
#
# new_data00 %>% dim
# new_data00 <- data.frame(new_data00, c_precipitation)
# new_data00 %>% head
# new_data00 %>% head
# View(new_data00 %>% filter(total_revenue>=100000000))
# new_data00 %>% filter(total_revenue>=100000000) %>% select(month) %>% unlist() %>% as.numeric() %>% hist()
# new_data00 %>% filter(total_revenue>=100000000) %>% select(category) %>% unlist() %>% as.numeric() %>% hist()
# new_data00 %>% filter(total_revenue>=100000000) %>% select(time) %>% unlist() %>% as.numeric() %>% hist()
# new_data00 %>% filter(total_revenue>=100000000) %>% select(price) %>% unlist() %>% as.numeric() %>% hist()
# new_data00 %>% filter(total_revenue>=100000000) %>% select(sell_sequence) %>% unlist() %>% as.numeric() %>% hist()
# new_data00 %>% filter(total_revenue>=100000000) %>% select(rank_brand) %>% unlist() %>% as.numeric() %>% hist()
# new_data00 %>% filter(total_revenue>=100000000) %>% select(rank_seq_mean) %>% unlist() %>% as.numeric() %>% hist()
#
#
#
#
#
# new_data00 %>% filter(total_revenue==50000) %>% dim()
# plot(sort(new_data00$total_revenue[new_data00$total_revenue>90000000],decreasing=F))
# length(new_data00$total_revenue[new_data00$total_revenue>90000000])
# new_data00$total_revenue[order(new_data00$total_revenue,decreasing=T)]
# new_data00$total_revenue %>% quantile(c(0.996,0.997,0.998,0.999))
#new_data001 <- new_data00[new_data00$category==1,]
#new_data001 %>% head
# ggplot(data=new_data00, aes(x=rank_brand,y=total_revenue))+
# geom_point(size=2)+
# geom_smooth(method='lm')
#
# ggplot(data=new_data00, aes(x=seemean,y=total_revenue))+
# geom_point(size=2)+
# geom_smooth(method='lm')
#
# ggplot(data=new_data00, aes(x=seemax,y=total_revenue))+
# geom_point(size=2)+
# geom_smooth(method='lm')
# ggplot(data=new_data00, aes(x=seevar,y=total_revenue))+
# geom_point(size=2)+
# geom_smooth(method='lm')
#FAMD for data (PCA approach both for categorical and numerical)
# res.famd <- FAMD(new_data00,ncp=10,graph=F)
# eig.val <- get_eigenvalue(res.famd)
# eig.val
#
# vari <- get_famd_var(res.famd)
# vari$contrib
#
# fviz_contrib(res.famd,'var',repel=T,col.var='contrib',axes=1)
#XG boost
index <- sample(1:nrow(new_data00),size=round(0.75*nrow(new_data00)),replace=F)
trs1 <- new_data00[index,]
trs1 %>% head
tts1 <- new_data00[-index,]
#
# trs_labels1 <- as.numeric(trs1$category_50000)-1
# str(trs_labels1)
# tts_labels1 <- as.numeric(tts1$category_50000)-1
# new_trs1 <- model.matrix(~.,trs1[-1])
# head(new_trs1)
# new_tts1 <- model.matrix(~.,tts1[-1])
# head(new_tts1)
#
#
# xg_train1 <- xgb.DMatrix(data=new_trs1,label=trs_labels1)
# xg_test1 <- xgb.DMatrix(data=new_tts1,label=tts_labels1)
# str(xg_train1)
#
#
# def_param1 <- list(booster='gbtree',objective='binary:logistic',eta=0.3,gamma=0,max_depth=6,
# min_child_weight=1,subsample=1,colsample_bytree=1)
#
# xgbcv1 <- xgb.cv(params=def_param1,data=xg_train1,nrounds=100,nfold=5,
# showsd=T,stratified = T,print_every_n = 5,
# early_stopping_rounds = 20,maximize = F)
#
# min(xgbcv1$test.error.mean)
#
# xgb1 <- xgb.train(params=def_param1,data=xg_train1,nrounds=65,
# watchlist = list(val=xg_test1,train=xg_train1),
# print_every_n =10,early_stopping_rounds = 20,
# maximize=F,eval_matrix="error")
#
# xgbpred1 <- predict(xgb1,xg_test1)
# xgbpred1 <- ifelse(xgbpred1>0.5,1,0)
#
# xgbpred1
# tts_labels1
# confusionMatrix(as.factor(xgbpred1),as.factor(tts_labels1))
#
# imp_mat1 <- xgb.importance(feature_names = colnames(new_trs1),model=xgb1)
# imp_mat1
# xgb.plot.importance(importance_matrix = imp_mat1)
#iteration 1
trs_labels1 <- (trs1$total_revenue)
str(trs_labels1)
tts_labels1 <- (tts1$total_revenue)
new_trs1 <- model.matrix(~.,trs1[-1])
head(new_trs1) %>% dim
new_tts1 <- model.matrix(~.,tts1[-1])
head(new_tts1)
xg_train1 <- xgb.DMatrix(data=new_trs1,label=trs_labels1)
xg_test1 <- xgb.DMatrix(data=new_tts1,label=tts_labels1)
def_param1 <- list(booster='gbtree',objective='reg:squarederror',eta=0.1,gamma=0,max_depth=6,
min_child_weight=1,subsample=1,colsample_bytree=1)
xgbcv1 <- xgb.cv(params=def_param1,data=xg_train1,nrounds=5000,nfold=5,
showsd=T,stratified = T,print_every_n = 1,
early_stopping_rounds = 1,maximize = F,eval_metric=MAPE)
which.min(xgbcv1$evaluation_log$test_mape_mean)
xgb1 <- xgb.train(params=def_param1,data=xg_train1,nrounds=57,
watchlist = list(train=xg_train1,test=xg_test1),
print_every_n = 1,early_stopping_rounds = 20,
maximize= F, eval_metric= 'mae')
which.min(xgb1$evaluation_log$test_mae)
xgbpred1 <- predict(xgb1,xg_test1)
xgbpred1
chk_xgb1 <- data.frame(original=tts1$total_revenue,prediction=(xgbpred1))
#chk_xgb1[chk_xgb1$prediction<0,]
# chk <- data.frame(original=tts1,prediction=xgbpred1)
# chk[chk$prediction<0,] %>% head
# chk[chk$prediction<0,] %>% dim
# sum(chk[chk$prediction<0,]$original.sell_sequence==1)
# chk[chk$prediction<0,] %>% filter(original.sell_sequence==1)
# min(chk_xgb1$prediction[chk_xgb1$prediction>=0])
# mean(chk_xgb1$prediction[chk_xgb1$prediction>=0])
# median(chk_xgb1$prediction[chk_xgb1$prediction>=0])
#
#chk_xgb1$prediction[chk_xgb1$prediction<0] <- 2500000
#chk_xgb1$prediction[chk_xgb1$prediction<0] <- (-1)*(chk_xgb1$prediction[chk_xgb1$prediction<0])
# (chk_xgb1$prediction[chk_xgb1$prediction<0])%>% max
chk_xgb1
sum(abs((chk_xgb1$prediction-chk_xgb1$original)/(chk_xgb1$prediction)))*100/length(chk_xgb1$prediction)
imp_mat1 <- xgb.importance(feature_names = colnames(new_trs1),model=xgb1)
imp_mat1
xgb.plot.importance(importance_matrix = imp_mat1)
# find_non_zero <- function(trs1,tts1){
# x <- vector(mode='list',length=1000)
# y <- vector(mode='list',length=1000)
# w <- vector(mode='list',length=1000)
# z <- rep(NA,1000)
# x[[1]] <- tts1
# new_trs1 <- model.matrix(~.,trs1[-1])
# xg_train1 <- xgb.DMatrix(data=new_trs1,label=trs1$total_revenue)
# def_param1 <- list(booster='gbtree',objective='reg:squarederror',eta=0.1,gamma=0,max_depth=8,
# min_child_weight=1,subsample=1,colsample_bytree=1)
#
# new_tts1 <- model.matrix(~.,x[[1]][-1])
# xg_test1 <- xgb.DMatrix(data=new_tts1,label=x[[1]]$total_revenue)
# y[[1]] <- xgb.cv(params=def_param1,data=xg_train1,nrounds=2000,nfold=5,
# showsd=T,stratified = T,print_every_n = 50,
# early_stopping_rounds = 20,maximize = F)
#
# xgb1 <- xgb.train(params=def_param1,data=xg_train1,nrounds=which.min(y[[1]]$evaluation_log$test_rmse_mean),
# watchlist = list(val=xg_test1,train=xg_train1),
# print_every_n = 50,early_stopping_rounds = 20,
# maximize=F,eval_matrix="error")
#
# xgbpred1 <- predict(xgb1,xg_test1)
# w[[1]] <- data.frame(original=x[[1]]$total_revenue,prediction=xgbpred1)
#
# z[1] <- sum(abs((w[[1]]$prediction-w[[1]]$original)/(w[[1]]$prediction)))*100/length(w[[1]]$prediction)
#
# #start of new iteration
# for(i in 1:1000){
# if(length(w[[i]]$prediction[w[[i]]$prediction<0])!=0){
# print('not yet')
# xgb_train2 <- data.frame(x[[1]],prediction=w[[1]]$prediction)
#
# x[[i+1]] <- xgb_train2[xgb_train2$prediction<0,][,-13]
#
# tts_labels2 <- x[[i+1]]$total_revenue
# new_tts2 <- model.matrix(~.,x[[i+1]][-1])
#
# xg_test2 <- xgb.DMatrix(data=new_tts2,label=tts_labels2)
#
# y[[i+1]] <- xgb.cv(params=def_param1,data=xg_train1,nrounds=2000,nfold=5,
# showsd=T,stratified = T,print_every_n = 150,
# early_stopping_rounds = 20,maximize = F)
#
# xgb2 <- xgb.train(params=def_param1,data=xg_train1,nrounds=which.min(y[[i+1]]$evaluation_log$test_rmse_mean),
# watchlist = list(val=xg_test2,train=xg_train1),
# print_every_n = 150,early_stopping_rounds = 20,
# maximize=F,eval_matrix="error")
#
# xgbpred2 <- predict(xgb2,xg_test2)
#
# w[[i+1]] <- data.frame(original=x[[i+1]]$total_revenue,prediction=xgbpred2)
# w[[1]][w[[1]]$prediction<0,] <- w[[i+1]]
#
# z[i+1] <- sum(abs((w[[1]]$prediction-w[[1]]$original)/(w[[1]]$prediction)))*100/length(w[[1]]$prediction)
# }
# else(if(length(w[[i]]$prediction[w[[i]]$prediction<0])==0){
# break})
# }
# return(list(prediction=z,chk=w[[1]]))
#
# }
#
# a <- find_non_zero(trs1,tts1)
# a
#
# sum(a$chk<0)
# min(a$prediction)
#hyperparameter tuning
# Create empty lists
# lowest_error_list = list()
# parameters_list = list()
# Create 10,000 rows with random hyperparameters
# set.seed(123)
# for (iter in 1:10000){
# param <- list(booster = "gbtree",
# objective = "reg:squarederror",
# max_depth = sample(3:10, 1),
# eta = runif(1, 0.01, 0.3),
# subsample = runif(1, 0.5, 0.8),
# colsample_bytree = runif(1, 0.5, 0.9),
# min_child_weight = sample(0:10, 1)
# )
# parameters <- as.data.frame(param)
# parameters_list[[iter]] <- parameters
# }
#
# parameters_list
# # Create object that contains all randomly created hyperparameters
# parameters_df = do.call(rbind, parameters_list)
# nrow(parameters_df[1,])
# parameters_df %>% head
# # x<-list(c(1,2,3),c(4,5,6))
# # x
# # lapply(x,sum)
# # lapply(x,rbind)
# # do.call(sum,x)
# # do.call(rbind,x)
#
# # Use randomly created parameters to create 10,000 XGBoost-models
# for (row in 1:nrow(parameters_df)) {
# set.seed(123)
# best_iteration <- matrix(NA,nrow = 10, ncol=2)
# for(j in 1:10){
# xgbcv1 <- xgb.cv(data=xg_train1,nrounds=5000,nfold=5,
# max_depth = parameters_df$max_depth[row],
# eta = parameters_df$eta[row],
# subsample = parameters_df$subsample[row],
# colsample_bytree = parameters_df$colsample_bytree[row],
# min_child_weight = parameters_df$min_child_weight[row],
# showsd=T,stratified = T,print_every_n = 1,
# early_stopping_rounds = 1,maximize = F,eval_metric=MAPE)
# best_iteration[j,] <- c(which.min(xgbcv1$evaluation_log$test_mape_mean),min(xgbcv1$evaluation_log$test_mape_mean))
# }
#
# xgb1 <- xgb.train(data=xg_train1,
# booster = "gbtree",
# objective = "reg:squarederror",
# max_depth = parameters_df$max_depth[row],
# eta = parameters_df$eta[row],
# subsample = parameters_df$subsample[row],
# colsample_bytree = parameters_df$colsample_bytree[row],
# min_child_weight = parameters_df$min_child_weight[row],
# nrounds= best_iteration[which.min(best_iteration[,2])],
# eval_metric = "mae",
# early_stopping_rounds= 20,
# print_every_n = 150,
# watchlist = list(train=xg_test1,test=xg_train1))
# xgbpred <- predict(xgb1,xg_test1)
# chk_xgb <- data.frame(original=tts1$total_revenue,prediction=xgbpred)
# lowest_error <- sum(abs((chk_xgb$prediction-chk_xgb$original)/(chk_xgb$prediction)))*100/length(chk_xgb$prediction)
# lowest_error_list[row] <- lowest_error
# }
#
# # Create object that contains all accuracy's
# lowest_error_df <- do.call(rbind, lowest_error_list)
# lowest_error_df
#
# # Bind columns of accuracy values and random hyperparameter values
# randomsearch <- cbind(lowest_error_df, parameters_df)
# randomsearch
# light gbm
MAPE2 <- function(preds,dtrain){
labels <- getinfo(dtrain, 'label')
my_mape <- sum(abs((as.numeric(labels)-as.numeric(preds))/(as.numeric(preds))))*100
my_mape <- my_mape/length(as.numeric(preds))
return(list(name='mape',value=my_mape,higher_better=F))
}
new_trs1 %>% head
new_trs1 %>% str
as.data.frame(new_trs1)
lg_trainm <- sparse.model.matrix(total_revenue~., data=trs1)
lg_train_label <- trs1$total_revenue
lg_testm <- sparse.model.matrix(total_revenue~., data=tts1)
lg_test_label <- tts1$total_revenue
lg_train <- lgb.Dataset(data=as.matrix(lg_trainm),label=lg_train_label)
lg_train
lg_test <- lgb.Dataset(data=as.matrix(lg_testm),label=lg_test_label)
lg_test
getinfo(lg_train,'label')
def_param2 <- list(boosting ='gbdt',objective='regression', num_leaves= 31, max_depth= -1,
feature_fraction=0.7, bagging_fraction=0.7,
bagging_freq=5, learning_rate=0.1, num_threads=2)
lgbcv1 <- lgb.cv(params=def_param2,data=lg_train, nrounds=5000,
early_stopping_rounds = 20, eval_freq = 150,
nfold=5, showsd=5, stratified = T,verbose=1,eval=MAPE2)
lgbcv1$best_iter
lgb1 <- lgb.train(params=def_param2,objective='regression',data=lg_train,
nrounds=lgbcv1$best_iter,
eval_freq=150)
lgbpred1 <- predict(lgb1,new_tts1)
lgbpred1
chk_lgb1 <- data.frame(original=tts1$total_revenue,prediction=(lgbpred1))
chk_lgb1
chk_lgb1[chk_lgb1$prediction<0,]
chk_lgb1
sum(abs((chk_lgb1$prediction-chk_lgb1$original)/(chk_lgb1$prediction)))*100/length(chk_lgb1$prediction)
imp_mat2 <- lgb.importance(model=lgb1,percentage=T)
imp_mat2
lgb.plot.importance(imp_mat2,measure="Gain", top_n=60)
| /나데정.R | permissive | HolaTeo/BigContest | R | false | false | 24,997 | r | rm(list=ls())
#setwd('C:/Users/Industrial Stat Lab/Desktop')
#setwd('C:/Users/lswsi/OneDrive/바탕 화면/대학교/20-여름방학/공모전/2020빅콘테스트 문제데이터(데이터분석분야-챔피언리그)/01_제공데이터')
#setwd('C:/Users/lswsi/Desktop/2020빅콘테스트 문제데이터(데이터분석분야-챔피언리그)/01_제공데이터')
setwd('C:\\Users\\62190\\Documents\\BigContest\\datas')
library(readxl)
library(randomForest)
library(ggplot2)
library(GGally)
library(caret)
library(e1071)
library(gbm)
library(dplyr)
library(xgboost)
library(tidytext)
library(tm)
library(text2vec)
library(wordcloud)
library(SnowballC)
library(stringr)
library(data.table)
library(mltools)
library(FactoMineR)
library(factoextra)
library(lightgbm)
library(Matrix)
# install.packages('randomForest')
# install.packages('GGally')
# install.packages('e1071')
# install.packages('caret')
# install.packages('gbm')
# install.packages('xgboost')
# install.packages('tidytext')
# install.packages('text2vec')
# install.packages('tm')
# install.packages('wordcloud')
# install.packages('SnowballC')
# install.packages('stringr')
# install.packages('data.table')
# install.packages('mltools')
# install.packages('FactoMineR')
# install.packages('factoextra')
# install.packages('tidyverse')
# install.packages('mlr')
# install.packages('Metrics')
# install.packages('Matrix')
# write down at the terminal tab,
# previously install
# 1. CMake (https://cmake.org/download/)
#
# 2. git (https://git-scm.com/download/win)
#
# 3. Rtools (https://cran.r-project.org/bin/windows/Rtools)
#
# ( 설치 과정중에, 환경변수를 추가하는 옵션 체크 해줄것)
#
# 4. Visual Studio (https://www.visualstudio.com/thank-you-downloading-visual-studio/?sku=Community&rel=15)
#
# (설치 후, 재부팅 필수)
git clone --recursive https://github.com/microsoft/LightGBM
cd LightGBM
Rscript build_r.R
PKG_URL <- "https://github.com/microsoft/LightGBM/releases/download/v3.0.0rc1/lightgbm_3.0.0-1-r-cran.tar.gz"
remotes::install_url(PKG_URL)
PKG_URL <- "https://github.com/microsoft/LightGBM/releases/download/v3.0.0rc1/lightgbm-3.0.0-1-r40-windows.zip"
local_file <- paste0("lightgbm.", tools::file_ext(PKG_URL))
download.file(
url = PKG_URL
, destfile = local_file
)
install.packages(
pkgs = local_file
, type = "binary"
, repos = NULL
)
# install.packages('devtools')
# library(devtools)
#
devtools::install_github("Laurae2/lgbdl")
#
options(devtools.install.args = "--no-multiarch")
devtools::install_github("Microsoft/LightGBM", subdir = "R-package")
#
# running code to verify successful installation of package
library(lightgbm)
data(agaricus.train, package='lightgbm')
train <- agaricus.train
train %>% head
dtrain <- lgb.Dataset(train$data, label=train$label)
dtrain
params <- list(objective="regression", metric="l2")
model <- lgb.cv(params, dtrain, 10, nfold=5, min_data=1, learning_rate=1, early_stopping_rounds=10)
model
#custom MAPE function for xgboost use feval
MAPE <- function(preds,dtrain){
labels <- getinfo(dtrain, 'label')
my_mape <- sum(abs((as.numeric(labels)-as.numeric(preds))/(as.numeric(preds))))*100
my_mape <- my_mape/length(as.numeric(preds))
return(list(metric='mape',value=my_mape))
}
# dataset reading
d1 <- read.csv('나만의데이터.csv')
str(d1)
d1 <- as.matrix(d1)
d1%>%head
d1 %>% dim
d1 <- na.omit(d1)
day <- (d1[,2])
day <- as.factor(day)
day
date <- d1[,1]
month <- as.factor(d1[,3])
time <- as.factor(d1[,4])
con_time <- as.numeric(d1[,5])
exposure <- as.numeric(d1[,9])
brand <- as.factor(d1[,10])
code_name <- as.factor(d1[,11])
merch_name <- tolower(d1[,12])
category <- (d1[,13])
category %>% unique
category[category=='의류'] <- 1
category[category=='속옷'] <- 2
category[category=='주방'] <- 3
category[category=='농수축'] <- 4
category[category=='이미용'] <- 5
category[category=='가전'] <- 6
category[category=='생활용품'] <- 7
category[category=='건강기능'] <- 8
category[category=='잡화'] <- 9
category[category=='가구'] <- 10
category[category=='침구'] <- 11
category <- factor(category)
category
price <- as.numeric(d1[,20])
total_revenue <- as.numeric(d1[,23])
seemean <- as.numeric(d1[,21])
min(seemean[seemean!=0])
seemean[seemean==0] <- 0.00006
precipitation <- as.numeric(d1[,15])
mean_temp <- as.numeric(d1[,14])
cold_sc <- as.numeric(d1[,16])
flu_sc <- as.numeric(d1[,17])
pneumonia_sc <- as.numeric(d1[,18])
coronavirus_sc <- as.numeric(d1[,19])
data0 <- data.frame(day,date,month,time,con_time,exposure,brand,code_name,merch_name,category,price,
total_revenue,seemean,precipitation,mean_temp,cold_sc,flu_sc,pneumonia_sc,coronavirus_sc)
sum(is.na(data0))
#Giving seq to data
data0 <- arrange(data0,code_name)
data0 <- arrange(data0,brand)
View(data0)
sell_sequence <- rep(NA, length(data0$code_name))
sell_sequence[1] <- 1
for(i in 1:length(data0$code_name)){
ifelse((data0$date[i]==data0$date[i+1]& data0$code_name[i]==data0$code_name[i+1]
& data0$day[i]==data0$day[i+1])
,sell_sequence[i+1] <- sell_sequence[i]+1, sell_sequence[i+1] <- 1 )
}
sell_sequence
sum(is.na(sell_sequence))
sell_sequence[sell_sequence==7] <- 1
sell_sequence[sell_sequence==8] <- 2
sell_sequence[sell_sequence==9] <- 3
sell_sequence[sell_sequence==10] <- 4
sell_sequence[sell_sequence==11] <- 5
sell_sequence[sell_sequence==12] <- 6
sell_sequence <- factor(sell_sequence,order=T,levels=c(1,2,3,4,5,6))
data00 <- data.frame(data0,sell_sequence)
head(data00)
str(data00)
data_seq1 <- data00[data00$sell_sequence==1,]
data_seq2 <- data00[data00$sell_sequence==2,]
data_seq3 <- data00[data00$sell_sequence==3,]
data_seq4 <- data00[data00$sell_sequence==4,]
data_seq5 <- data00[data00$sell_sequence==5,]
data_seq6 <- data00[data00$sell_sequence==6,]
data_seq_mean <- data.frame(mean(data_seq1$total_revenue),mean(data_seq2$total_revenue),mean(data_seq3$total_revenue),
mean(data_seq4$total_revenue),mean(data_seq5$total_revenue),mean(data_seq6$total_revenue))
data_seq_var <- data.frame(var(data_seq1$total_revenue),var(data_seq2$total_revenue),var(data_seq3$total_revenue),
var(data_seq4$total_revenue),var(data_seq5$total_revenue),var(data_seq6$total_revenue))
order(data_seq_mean)
data_seq_mean[order(data_seq_mean)]
rank_seq_mean <- rep(0,length(data00$total_revenue))
for(i in 1:length(data00$total_revenue)){
if(data00$sell_sequence[i]==1){rank_seq_mean[i] <- 1}
else(if(data00$sell_sequence[i]==5){rank_seq_mean[i] <- 2}
else(if(data00$sell_sequence[i]==2){rank_seq_mean[i] <- 3}
else(if(data00$sell_sequence[i]==4){rank_seq_mean[i] <- 4}
else(if(data00$sell_sequence[i]==6){rank_seq_mean[i] <- 5}
else(if(data00$sell_sequence[i]==3){rank_seq_mean[i] <- 6})))))
}
unique(rank_seq_mean)
str(rank_seq_mean)
order(data_seq_var)
data_seq_var[order(data_seq_var)]
rank_seq_var <- rep(0,length(data00$total_revenue))
for(i in 1:length(data00$total_revenue)){
if(data00$sell_sequence[i]==1){rank_seq_var[i] <- 1}
else(if(data00$sell_sequence[i]==5){rank_seq_var[i] <- 2}
else(if(data00$sell_sequence[i]==2){rank_seq_var[i] <- 3}
else(if(data00$sell_sequence[i]==6){rank_seq_var[i] <- 4}
else(if(data00$sell_sequence[i]==3){rank_seq_var[i] <- 5}
else(if(data00$sell_sequence[i]==4){rank_seq_var[i] <- 6})))))
}
unique(rank_seq_var)
str(rank_seq_var)
#giving rank to brand name
data_merch_name <- read_xlsx('seungwonrawdata.xlsx')
data_merch_name %>% head
data_merch_name <- as.matrix(data_merch_name)
brand_name <- data_merch_name[,10]
brand_name %>% head
corpus_top_name <- Corpus(VectorSource(brand_name),
readerControl=list(language='kor'))
corpus_top_name <- tm_map(corpus_top_name,content_transformer(tolower))
corpus_top_name <- tm_map(corpus_top_name,removePunctuation)
text_top_name <- TermDocumentMatrix(corpus_top_name)
dtm_top_name <- as.matrix(text_top_name)
dtm_sum_top_merch_name <- sort(rowSums(dtm_top_name),decreasing=F)
dtm_df_top_merch_name <- data.frame(word=names(dtm_sum_top_merch_name),
freq=dtm_sum_top_merch_name)
dtm_df_top_merch_name %>% head(10)
#wordcloud(words=dtm_df_top_merch_name$word, freq=dtm_df_top_merch_name$freq,
#min.freq=100,max.words=100,random.order = F,rot.per=0.15,
#colors=brewer.pal(5,'Dark2'))
top_brand_name <- rownames(dtm_df_top_merch_name)
rank_brand <- rep(1, length(data00$merch_name))
rank_brand[grep('삼성',data00$merch_name)]
for(i in 1:length(top_brand_name)){
rank_brand[grep(top_brand_name[i],data00$merch_name)] <- i
}
length(unique(rank_brand))
data00 <- data.frame(data00,rank_seq_mean,rank_seq_var,rank_brand)
#data00$rank_brand <- factor(data00$rank_brand,order=T)
data00$rank_brand <- as.numeric(data00$rank_brand)
data00$temp_diff <- data00$top_temp-data00$bottom_temp
#XG boost
#
#
set.seed(123)
#data00 <- data00[data00$total_revenue!=50000,]
head(data00)
new_data00 <- select(data00,total_revenue,day, month, time, con_time, category, price,
#seemean, seevar, mean_temp, top_temp, bottom_temp, rank_seq_var
precipitation, temp_diff, mean_temp,
sell_sequence, rank_seq_mean,rank_brand)
head(new_data00)
str(new_data00)
# ggplot(data=new_data00, aes(x=precipitation, y=total_revenue))+
# geom_point(size=2)
#
# unique(new_data00$precipitation)#0, 9.4, 28.9, 56.5
#
# ggplot(data=new_data00[new_data00$precipitation>=9.4,], aes(x=precipitation, y=total_revenue))+
# geom_point(size=2)
#
# ggplot(data=new_data00, aes(x=mean_temp, y=total_revenue))+
# geom_point(size=2)
#
# ggplot(data=new_data00, aes(x=top_temp, y=total_revenue))+
# geom_point(size=2)
#
# ggplot(data=new_data00, aes(x=bottom_temp, y=total_revenue))+
# geom_point(size=2)
#
# ggplot(data=new_data00, aes(x=temp_diff, y=total_revenue))+
# geom_point(size=2)
new_data00$sell_sequence <- as.numeric(new_data00$sell_sequence)
new_data00$rank_seq_mean <- as.numeric(new_data00$rank_seq_mean)
#new_data00$rank_seq_var <- as.numeric(new_data00$rank_seq_var)
# category_precipitation <- rep(NA, length(new_data00$precipitation))
# for( i in 1: length(new_data00$precipitation)){
# if(new_data00$precipitation[i]>=0&new_data00$precipitation[i]<9.4){
# category_precipitation[i] <- 4
# }else(if(new_data00$precipitation[i]>=9.4&new_data00$precipitation[i]<28.9){
# category_precipitation[i] <- 3
# }else(if(new_data00$precipitation[i]>=28.9&new_data00$precipitation[i]<56.5){
# category_precipitation[i] <- 2
# }else(if(new_data00$precipitation[i]>=56.5){
# category_precipitation[i] <- 1
# })))
# }
# sum(is.na(category_precipitation))
# c_precipitation <- as.numeric(category_precipitation)
# c_precipitation
#
# new_data00 %>% dim
# new_data00 <- data.frame(new_data00, c_precipitation)
# new_data00 %>% head
# new_data00 %>% head
# View(new_data00 %>% filter(total_revenue>=100000000))
# new_data00 %>% filter(total_revenue>=100000000) %>% select(month) %>% unlist() %>% as.numeric() %>% hist()
# new_data00 %>% filter(total_revenue>=100000000) %>% select(category) %>% unlist() %>% as.numeric() %>% hist()
# new_data00 %>% filter(total_revenue>=100000000) %>% select(time) %>% unlist() %>% as.numeric() %>% hist()
# new_data00 %>% filter(total_revenue>=100000000) %>% select(price) %>% unlist() %>% as.numeric() %>% hist()
# new_data00 %>% filter(total_revenue>=100000000) %>% select(sell_sequence) %>% unlist() %>% as.numeric() %>% hist()
# new_data00 %>% filter(total_revenue>=100000000) %>% select(rank_brand) %>% unlist() %>% as.numeric() %>% hist()
# new_data00 %>% filter(total_revenue>=100000000) %>% select(rank_seq_mean) %>% unlist() %>% as.numeric() %>% hist()
#
#
#
#
#
# new_data00 %>% filter(total_revenue==50000) %>% dim()
# plot(sort(new_data00$total_revenue[new_data00$total_revenue>90000000],decreasing=F))
# length(new_data00$total_revenue[new_data00$total_revenue>90000000])
# new_data00$total_revenue[order(new_data00$total_revenue,decreasing=T)]
# new_data00$total_revenue %>% quantile(c(0.996,0.997,0.998,0.999))
#new_data001 <- new_data00[new_data00$category==1,]
#new_data001 %>% head
# ggplot(data=new_data00, aes(x=rank_brand,y=total_revenue))+
# geom_point(size=2)+
# geom_smooth(method='lm')
#
# ggplot(data=new_data00, aes(x=seemean,y=total_revenue))+
# geom_point(size=2)+
# geom_smooth(method='lm')
#
# ggplot(data=new_data00, aes(x=seemax,y=total_revenue))+
# geom_point(size=2)+
# geom_smooth(method='lm')
# ggplot(data=new_data00, aes(x=seevar,y=total_revenue))+
# geom_point(size=2)+
# geom_smooth(method='lm')
#FAMD for data (PCA approach both for categorical and numerical)
# res.famd <- FAMD(new_data00,ncp=10,graph=F)
# eig.val <- get_eigenvalue(res.famd)
# eig.val
#
# vari <- get_famd_var(res.famd)
# vari$contrib
#
# fviz_contrib(res.famd,'var',repel=T,col.var='contrib',axes=1)
#XG boost
index <- sample(1:nrow(new_data00),size=round(0.75*nrow(new_data00)),replace=F)
trs1 <- new_data00[index,]
trs1 %>% head
tts1 <- new_data00[-index,]
#
# trs_labels1 <- as.numeric(trs1$category_50000)-1
# str(trs_labels1)
# tts_labels1 <- as.numeric(tts1$category_50000)-1
# new_trs1 <- model.matrix(~.,trs1[-1])
# head(new_trs1)
# new_tts1 <- model.matrix(~.,tts1[-1])
# head(new_tts1)
#
#
# xg_train1 <- xgb.DMatrix(data=new_trs1,label=trs_labels1)
# xg_test1 <- xgb.DMatrix(data=new_tts1,label=tts_labels1)
# str(xg_train1)
#
#
# def_param1 <- list(booster='gbtree',objective='binary:logistic',eta=0.3,gamma=0,max_depth=6,
# min_child_weight=1,subsample=1,colsample_bytree=1)
#
# xgbcv1 <- xgb.cv(params=def_param1,data=xg_train1,nrounds=100,nfold=5,
# showsd=T,stratified = T,print_every_n = 5,
# early_stopping_rounds = 20,maximize = F)
#
# min(xgbcv1$test.error.mean)
#
# xgb1 <- xgb.train(params=def_param1,data=xg_train1,nrounds=65,
# watchlist = list(val=xg_test1,train=xg_train1),
# print_every_n =10,early_stopping_rounds = 20,
# maximize=F,eval_matrix="error")
#
# xgbpred1 <- predict(xgb1,xg_test1)
# xgbpred1 <- ifelse(xgbpred1>0.5,1,0)
#
# xgbpred1
# tts_labels1
# confusionMatrix(as.factor(xgbpred1),as.factor(tts_labels1))
#
# imp_mat1 <- xgb.importance(feature_names = colnames(new_trs1),model=xgb1)
# imp_mat1
# xgb.plot.importance(importance_matrix = imp_mat1)
#iteration 1
trs_labels1 <- (trs1$total_revenue)
str(trs_labels1)
tts_labels1 <- (tts1$total_revenue)
new_trs1 <- model.matrix(~.,trs1[-1])
head(new_trs1) %>% dim
new_tts1 <- model.matrix(~.,tts1[-1])
head(new_tts1)
xg_train1 <- xgb.DMatrix(data=new_trs1,label=trs_labels1)
xg_test1 <- xgb.DMatrix(data=new_tts1,label=tts_labels1)
def_param1 <- list(booster='gbtree',objective='reg:squarederror',eta=0.1,gamma=0,max_depth=6,
min_child_weight=1,subsample=1,colsample_bytree=1)
xgbcv1 <- xgb.cv(params=def_param1,data=xg_train1,nrounds=5000,nfold=5,
showsd=T,stratified = T,print_every_n = 1,
early_stopping_rounds = 1,maximize = F,eval_metric=MAPE)
which.min(xgbcv1$evaluation_log$test_mape_mean)
xgb1 <- xgb.train(params=def_param1,data=xg_train1,nrounds=57,
watchlist = list(train=xg_train1,test=xg_test1),
print_every_n = 1,early_stopping_rounds = 20,
maximize= F, eval_metric= 'mae')
which.min(xgb1$evaluation_log$test_mae)
xgbpred1 <- predict(xgb1,xg_test1)
xgbpred1
chk_xgb1 <- data.frame(original=tts1$total_revenue,prediction=(xgbpred1))
#chk_xgb1[chk_xgb1$prediction<0,]
# chk <- data.frame(original=tts1,prediction=xgbpred1)
# chk[chk$prediction<0,] %>% head
# chk[chk$prediction<0,] %>% dim
# sum(chk[chk$prediction<0,]$original.sell_sequence==1)
# chk[chk$prediction<0,] %>% filter(original.sell_sequence==1)
# min(chk_xgb1$prediction[chk_xgb1$prediction>=0])
# mean(chk_xgb1$prediction[chk_xgb1$prediction>=0])
# median(chk_xgb1$prediction[chk_xgb1$prediction>=0])
#
#chk_xgb1$prediction[chk_xgb1$prediction<0] <- 2500000
#chk_xgb1$prediction[chk_xgb1$prediction<0] <- (-1)*(chk_xgb1$prediction[chk_xgb1$prediction<0])
# (chk_xgb1$prediction[chk_xgb1$prediction<0])%>% max
chk_xgb1
sum(abs((chk_xgb1$prediction-chk_xgb1$original)/(chk_xgb1$prediction)))*100/length(chk_xgb1$prediction)
imp_mat1 <- xgb.importance(feature_names = colnames(new_trs1),model=xgb1)
imp_mat1
xgb.plot.importance(importance_matrix = imp_mat1)
# find_non_zero <- function(trs1,tts1){
# x <- vector(mode='list',length=1000)
# y <- vector(mode='list',length=1000)
# w <- vector(mode='list',length=1000)
# z <- rep(NA,1000)
# x[[1]] <- tts1
# new_trs1 <- model.matrix(~.,trs1[-1])
# xg_train1 <- xgb.DMatrix(data=new_trs1,label=trs1$total_revenue)
# def_param1 <- list(booster='gbtree',objective='reg:squarederror',eta=0.1,gamma=0,max_depth=8,
# min_child_weight=1,subsample=1,colsample_bytree=1)
#
# new_tts1 <- model.matrix(~.,x[[1]][-1])
# xg_test1 <- xgb.DMatrix(data=new_tts1,label=x[[1]]$total_revenue)
# y[[1]] <- xgb.cv(params=def_param1,data=xg_train1,nrounds=2000,nfold=5,
# showsd=T,stratified = T,print_every_n = 50,
# early_stopping_rounds = 20,maximize = F)
#
# xgb1 <- xgb.train(params=def_param1,data=xg_train1,nrounds=which.min(y[[1]]$evaluation_log$test_rmse_mean),
# watchlist = list(val=xg_test1,train=xg_train1),
# print_every_n = 50,early_stopping_rounds = 20,
# maximize=F,eval_matrix="error")
#
# xgbpred1 <- predict(xgb1,xg_test1)
# w[[1]] <- data.frame(original=x[[1]]$total_revenue,prediction=xgbpred1)
#
# z[1] <- sum(abs((w[[1]]$prediction-w[[1]]$original)/(w[[1]]$prediction)))*100/length(w[[1]]$prediction)
#
# #start of new iteration
# for(i in 1:1000){
# if(length(w[[i]]$prediction[w[[i]]$prediction<0])!=0){
# print('not yet')
# xgb_train2 <- data.frame(x[[1]],prediction=w[[1]]$prediction)
#
# x[[i+1]] <- xgb_train2[xgb_train2$prediction<0,][,-13]
#
# tts_labels2 <- x[[i+1]]$total_revenue
# new_tts2 <- model.matrix(~.,x[[i+1]][-1])
#
# xg_test2 <- xgb.DMatrix(data=new_tts2,label=tts_labels2)
#
# y[[i+1]] <- xgb.cv(params=def_param1,data=xg_train1,nrounds=2000,nfold=5,
# showsd=T,stratified = T,print_every_n = 150,
# early_stopping_rounds = 20,maximize = F)
#
# xgb2 <- xgb.train(params=def_param1,data=xg_train1,nrounds=which.min(y[[i+1]]$evaluation_log$test_rmse_mean),
# watchlist = list(val=xg_test2,train=xg_train1),
# print_every_n = 150,early_stopping_rounds = 20,
# maximize=F,eval_matrix="error")
#
# xgbpred2 <- predict(xgb2,xg_test2)
#
# w[[i+1]] <- data.frame(original=x[[i+1]]$total_revenue,prediction=xgbpred2)
# w[[1]][w[[1]]$prediction<0,] <- w[[i+1]]
#
# z[i+1] <- sum(abs((w[[1]]$prediction-w[[1]]$original)/(w[[1]]$prediction)))*100/length(w[[1]]$prediction)
# }
# else(if(length(w[[i]]$prediction[w[[i]]$prediction<0])==0){
# break})
# }
# return(list(prediction=z,chk=w[[1]]))
#
# }
#
# a <- find_non_zero(trs1,tts1)
# a
#
# sum(a$chk<0)
# min(a$prediction)
#hyperparameter tuning
# Create empty lists
# lowest_error_list = list()
# parameters_list = list()
# Create 10,000 rows with random hyperparameters
# set.seed(123)
# for (iter in 1:10000){
# param <- list(booster = "gbtree",
# objective = "reg:squarederror",
# max_depth = sample(3:10, 1),
# eta = runif(1, 0.01, 0.3),
# subsample = runif(1, 0.5, 0.8),
# colsample_bytree = runif(1, 0.5, 0.9),
# min_child_weight = sample(0:10, 1)
# )
# parameters <- as.data.frame(param)
# parameters_list[[iter]] <- parameters
# }
#
# parameters_list
# # Create object that contains all randomly created hyperparameters
# parameters_df = do.call(rbind, parameters_list)
# nrow(parameters_df[1,])
# parameters_df %>% head
# # x<-list(c(1,2,3),c(4,5,6))
# # x
# # lapply(x,sum)
# # lapply(x,rbind)
# # do.call(sum,x)
# # do.call(rbind,x)
#
# # Use randomly created parameters to create 10,000 XGBoost-models
# for (row in 1:nrow(parameters_df)) {
# set.seed(123)
# best_iteration <- matrix(NA,nrow = 10, ncol=2)
# for(j in 1:10){
# xgbcv1 <- xgb.cv(data=xg_train1,nrounds=5000,nfold=5,
# max_depth = parameters_df$max_depth[row],
# eta = parameters_df$eta[row],
# subsample = parameters_df$subsample[row],
# colsample_bytree = parameters_df$colsample_bytree[row],
# min_child_weight = parameters_df$min_child_weight[row],
# showsd=T,stratified = T,print_every_n = 1,
# early_stopping_rounds = 1,maximize = F,eval_metric=MAPE)
# best_iteration[j,] <- c(which.min(xgbcv1$evaluation_log$test_mape_mean),min(xgbcv1$evaluation_log$test_mape_mean))
# }
#
# xgb1 <- xgb.train(data=xg_train1,
# booster = "gbtree",
# objective = "reg:squarederror",
# max_depth = parameters_df$max_depth[row],
# eta = parameters_df$eta[row],
# subsample = parameters_df$subsample[row],
# colsample_bytree = parameters_df$colsample_bytree[row],
# min_child_weight = parameters_df$min_child_weight[row],
# nrounds= best_iteration[which.min(best_iteration[,2])],
# eval_metric = "mae",
# early_stopping_rounds= 20,
# print_every_n = 150,
# watchlist = list(train=xg_test1,test=xg_train1))
# xgbpred <- predict(xgb1,xg_test1)
# chk_xgb <- data.frame(original=tts1$total_revenue,prediction=xgbpred)
# lowest_error <- sum(abs((chk_xgb$prediction-chk_xgb$original)/(chk_xgb$prediction)))*100/length(chk_xgb$prediction)
# lowest_error_list[row] <- lowest_error
# }
#
# # Create object that contains all accuracy's
# lowest_error_df <- do.call(rbind, lowest_error_list)
# lowest_error_df
#
# # Bind columns of accuracy values and random hyperparameter values
# randomsearch <- cbind(lowest_error_df, parameters_df)
# randomsearch
# light gbm
MAPE2 <- function(preds,dtrain){
labels <- getinfo(dtrain, 'label')
my_mape <- sum(abs((as.numeric(labels)-as.numeric(preds))/(as.numeric(preds))))*100
my_mape <- my_mape/length(as.numeric(preds))
return(list(name='mape',value=my_mape,higher_better=F))
}
new_trs1 %>% head
new_trs1 %>% str
as.data.frame(new_trs1)
lg_trainm <- sparse.model.matrix(total_revenue~., data=trs1)
lg_train_label <- trs1$total_revenue
lg_testm <- sparse.model.matrix(total_revenue~., data=tts1)
lg_test_label <- tts1$total_revenue
lg_train <- lgb.Dataset(data=as.matrix(lg_trainm),label=lg_train_label)
lg_train
lg_test <- lgb.Dataset(data=as.matrix(lg_testm),label=lg_test_label)
lg_test
getinfo(lg_train,'label')
def_param2 <- list(boosting ='gbdt',objective='regression', num_leaves= 31, max_depth= -1,
feature_fraction=0.7, bagging_fraction=0.7,
bagging_freq=5, learning_rate=0.1, num_threads=2)
lgbcv1 <- lgb.cv(params=def_param2,data=lg_train, nrounds=5000,
early_stopping_rounds = 20, eval_freq = 150,
nfold=5, showsd=5, stratified = T,verbose=1,eval=MAPE2)
lgbcv1$best_iter
lgb1 <- lgb.train(params=def_param2,objective='regression',data=lg_train,
nrounds=lgbcv1$best_iter,
eval_freq=150)
lgbpred1 <- predict(lgb1,new_tts1)
lgbpred1
chk_lgb1 <- data.frame(original=tts1$total_revenue,prediction=(lgbpred1))
chk_lgb1
chk_lgb1[chk_lgb1$prediction<0,]
chk_lgb1
sum(abs((chk_lgb1$prediction-chk_lgb1$original)/(chk_lgb1$prediction)))*100/length(chk_lgb1$prediction)
imp_mat2 <- lgb.importance(model=lgb1,percentage=T)
imp_mat2
lgb.plot.importance(imp_mat2,measure="Gain", top_n=60)
|
addPhewasDescription <- function(data, keep.unmatched.rows=F,for.plots=F) {
if(class(data) %in% c("character", "factor")) {data=data.frame(phenotype=data,stringsAsFactors=F)}
names=names(data)
first_match=grep("pheno|phewas",names,ignore.case=T)[1]
if(is.na(first_match)) {
warning("Name matching 'pheno' or 'phewas' not found, using the first column")
name=names[1]
} else {
name=names[first_match]
}
if(class(data[,name])!="character") {
if(class(data[,name])=="factor") {
warning("Factor phenotype input mapped to characters")
data[,name]=as.character(data[,name])
} else {
stop("Non-character or non-factor phenotypes passed in, so an accurate phewas code mapping is not possible.")
}
}
pd=pheinfo
if(for.plots) {
names(pd)=c("phenotype","description")
}
data=merge(pd,data,by.x=names(pd)[1],by.y=name,all.y=keep.unmatched.rows)
data
} | /R/addPhewasDescription.R | no_license | shameer/PheWAS | R | false | false | 911 | r | addPhewasDescription <- function(data, keep.unmatched.rows=F,for.plots=F) {
if(class(data) %in% c("character", "factor")) {data=data.frame(phenotype=data,stringsAsFactors=F)}
names=names(data)
first_match=grep("pheno|phewas",names,ignore.case=T)[1]
if(is.na(first_match)) {
warning("Name matching 'pheno' or 'phewas' not found, using the first column")
name=names[1]
} else {
name=names[first_match]
}
if(class(data[,name])!="character") {
if(class(data[,name])=="factor") {
warning("Factor phenotype input mapped to characters")
data[,name]=as.character(data[,name])
} else {
stop("Non-character or non-factor phenotypes passed in, so an accurate phewas code mapping is not possible.")
}
}
pd=pheinfo
if(for.plots) {
names(pd)=c("phenotype","description")
}
data=merge(pd,data,by.x=names(pd)[1],by.y=name,all.y=keep.unmatched.rows)
data
} |
\name{[.data.frame.lab}
\Rdversion{1.1}
\alias{[.data.frame.lab}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Selection of a labelled data.frame
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
[.data.frame.lab(x, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
%% ~~Describe \code{x} here~~
}
\item{\dots}{
%% ~~Describe \code{\dots} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (x, ...)
{
lab <- labs(x)
ret <- get("[.data.frame")(x, ...)
if (inherits(ret, "data.frame"))
labs(ret) <- lab
ret
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /etc/manbk/data.frame.lab.Rd | no_license | gmonette/spida15 | R | false | false | 1,476 | rd | \name{[.data.frame.lab}
\Rdversion{1.1}
\alias{[.data.frame.lab}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Selection of a labelled data.frame
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
[.data.frame.lab(x, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
%% ~~Describe \code{x} here~~
}
\item{\dots}{
%% ~~Describe \code{\dots} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (x, ...)
{
lab <- labs(x)
ret <- get("[.data.frame")(x, ...)
if (inherits(ret, "data.frame"))
labs(ret) <- lab
ret
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
#' Download source file from web
#'
#' Pull the file from the cdc website
#'
#' @usage download_natality(type)
#' @param type Either ps or us
#' @param year The year of the data you want to pull
#' @export
#'
#' @details Leads to having the file locally
#'
#' @examples
#' download_natality('ps')
download_natality <- function(type, year = 2013) {
stopifnot(type %in% c('us', 'ps'))
# Create dir for data files.
dir <- "zips"
dir.create(dir, showWarnings = FALSE)
temp <- tempfile()
# Location of the files.
url <- 'ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/Datasets/DVS/natality/Nat'
# Creat full path
url <- paste0(url, year, type, '.zip')
# Take the base of the file name at this loaction.
file <- basename(url)
# Download the file from the internet.
download.file(url, file)
# Extract zipped contents to directory.
unzip(file, exdir = dir)
# The list of unzipped files.
fileList <- grep(type, list.files(dir), ignore.case = TRUE, value = TRUE)
fileList <- grep(year, fileList, value = TRUE)
# Full location to the files.
paste(dir, fileList, sep = "/")
}
| /R/download_natality.R | no_license | darrkj/Natality | R | false | false | 1,129 | r |
#' Download source file from web
#'
#' Pull the file from the cdc website
#'
#' @usage download_natality(type)
#' @param type Either ps or us
#' @param year The year of the data you want to pull
#' @export
#'
#' @details Leads to having the file locally
#'
#' @examples
#' download_natality('ps')
download_natality <- function(type, year = 2013) {
stopifnot(type %in% c('us', 'ps'))
# Create dir for data files.
dir <- "zips"
dir.create(dir, showWarnings = FALSE)
temp <- tempfile()
# Location of the files.
url <- 'ftp://ftp.cdc.gov/pub/Health_Statistics/NCHS/Datasets/DVS/natality/Nat'
# Creat full path
url <- paste0(url, year, type, '.zip')
# Take the base of the file name at this loaction.
file <- basename(url)
# Download the file from the internet.
download.file(url, file)
# Extract zipped contents to directory.
unzip(file, exdir = dir)
# The list of unzipped files.
fileList <- grep(type, list.files(dir), ignore.case = TRUE, value = TRUE)
fileList <- grep(year, fileList, value = TRUE)
# Full location to the files.
paste(dir, fileList, sep = "/")
}
|
set.seed(1)
library(rafalib)
dat <- read.csv("mice_pheno.csv")
controlpopulation <- read.csv("femaleControlsPopulation.csv")
controlpopulation <- unlist(controlpopulation)
ttestgenerator <- function(n){
cases <- sample(controlpopulation,n)
controls <- sample(controlpopulation,n)
tstat <- (mean(cases) - mean(controls))/
sqrt(var(cases)/n + var(controls) / n)
return(tstat)
}
ttests <- replicate(1000, ttestgenerator(10))
hist(ttests)
qqnorm(ttests)
abline(0,1)
ttests <- replicate(1000,ttestgenerator(3))
qqnorm(ttests)
abline(0,1)
ps <- (seq(0.999)+0.5)/1000
qqplot(qt(ps,df=2*3-2),ttests,xlim=c(-6,6),ylim=c(-6,6))
abline(0,1)
qnorm(controlpopulation)
qqline(controlpopulation)
controls <- rnorm(5000,mean = 24, sd= 3.5)
ttestgenerator <- function(n,mean = 24, sd=3.5){
cases <- rnorm(n,mean,sd)
controls <- rnorm(n,mean,sd)
tstat <- (mean(cases)-mean(controls))/
sqrt(var(cases)/n + var(controls)/n )
return(tstat)
}
ttests <- replicate(1000,ttestgenerator(3))
qnorm(ttest)
abline(0,1)
| /Monte Carlo Simulation.R | no_license | Gabo226/R | R | false | false | 1,077 | r | set.seed(1)
library(rafalib)
dat <- read.csv("mice_pheno.csv")
controlpopulation <- read.csv("femaleControlsPopulation.csv")
controlpopulation <- unlist(controlpopulation)
ttestgenerator <- function(n){
cases <- sample(controlpopulation,n)
controls <- sample(controlpopulation,n)
tstat <- (mean(cases) - mean(controls))/
sqrt(var(cases)/n + var(controls) / n)
return(tstat)
}
ttests <- replicate(1000, ttestgenerator(10))
hist(ttests)
qqnorm(ttests)
abline(0,1)
ttests <- replicate(1000,ttestgenerator(3))
qqnorm(ttests)
abline(0,1)
ps <- (seq(0.999)+0.5)/1000
qqplot(qt(ps,df=2*3-2),ttests,xlim=c(-6,6),ylim=c(-6,6))
abline(0,1)
qnorm(controlpopulation)
qqline(controlpopulation)
controls <- rnorm(5000,mean = 24, sd= 3.5)
ttestgenerator <- function(n,mean = 24, sd=3.5){
cases <- rnorm(n,mean,sd)
controls <- rnorm(n,mean,sd)
tstat <- (mean(cases)-mean(controls))/
sqrt(var(cases)/n + var(controls)/n )
return(tstat)
}
ttests <- replicate(1000,ttestgenerator(3))
qnorm(ttest)
abline(0,1)
|
\name{dependency_clauses}
\alias{dependency_clauses}
\title{Creates the `Depends:` clause by concatenating individual packages and adding their compare clauses.}
\usage{
dependency_clauses(dependencies)
}
\arguments{
\item{dependencies}{a data.frame with dependency package,
compare, and version set.}
}
\description{
Creates the `Depends:` clause by concatenating individual
packages and adding their compare clauses.
}
| /man/dependency_clauses.Rd | no_license | tor5/rbundler | R | false | false | 432 | rd | \name{dependency_clauses}
\alias{dependency_clauses}
\title{Creates the `Depends:` clause by concatenating individual packages and adding their compare clauses.}
\usage{
dependency_clauses(dependencies)
}
\arguments{
\item{dependencies}{a data.frame with dependency package,
compare, and version set.}
}
\description{
Creates the `Depends:` clause by concatenating individual
packages and adding their compare clauses.
}
|
setwd('/Users/lorenzgahn/Documents/CaamSeniorDesign')
install.packages("xlsx")
library("xlsx")
#read raw data, call it 'dat'
dat = read.xlsx('Image Features Liver Masks 20ttp cases anonymized.xlsx',sheetIndex = 1)
#filter to only LabelID=1
dat_Label1 = subset(dat,dat$LabelID==1)
#Make separate data fram with columns that are constant across all rows for a patient
dat_constant = unique(dat_Label1[,c('Pt.ID','Count','Volume','ExtentX','ExtentY','ExtentZ')])
#Subset to only the columns that aren't in dat_constant above
dat_Label1 = dat_Label1[,c(1,2,3,4,5,6)]
#reshape data that isn't constant across all rows for a patient
dat_reshape = reshape(dat_Label1,idvar="Pt.ID",timevar = "FeatureID",direction="wide")
#join two data frames together on patient ID
dat_rawclean = merge(dat_reshape,dat_constant,by.x = "Pt.ID",by.y = "Pt.ID")
#vector with names of columns
cols = colnames(dat_rawclean)
#specify that all columns except Pt.ID will be scaled
scalevars = setdiff(cols,"Pt.ID")
#Scale columns (z-scores)
dat_scaled = data.frame(sapply(dat_rawclean[,scalevars],scale),Pt.ID=dat_rawclean[,"Pt.ID"])
#Move Patient ID column to first
PtID_idx = grep("Pt.ID",colnames(dat_scaled))
dat_scaled = dat_scaled[,c(PtID_idx, (1:ncol(dat_scaled))[-PtID_idx])]
#remove columns that are entirely full of Na's
dat_scaled = dat_scaled[,colSums(is.na(dat_scaled)) != nrow(dat_scaled)]
#Show NA count for each column
na_count = sapply(dat_scaled,function(y) sum(length(which(is.na(y)))))
na_count=data.frame(na_count)
#replace na's with 0
dat_scaled[is.na(dat_scaled)] = 0
#run pca
pca = prcomp(dat_scaled[,2:402])
#summary of pca results
summary(pca)
rot = pca$rotation
rot = data.frame(rot)
pcaCharts(pca)
install.packages("factoextra")
library("factoextra")
#Plot of importance of each PC
fviz_screeplot(pca,ncp=20)
#Plot with top contributing variable to PC1
fviz_pca_contrib(pca,choice = "var",axes=2,xlab="variable",top=10) | /Archive/pca code.R | no_license | kennygrosz/LandLL | R | false | false | 1,938 | r | setwd('/Users/lorenzgahn/Documents/CaamSeniorDesign')
install.packages("xlsx")
library("xlsx")
#read raw data, call it 'dat'
dat = read.xlsx('Image Features Liver Masks 20ttp cases anonymized.xlsx',sheetIndex = 1)
#filter to only LabelID=1
dat_Label1 = subset(dat,dat$LabelID==1)
#Make separate data fram with columns that are constant across all rows for a patient
dat_constant = unique(dat_Label1[,c('Pt.ID','Count','Volume','ExtentX','ExtentY','ExtentZ')])
#Subset to only the columns that aren't in dat_constant above
dat_Label1 = dat_Label1[,c(1,2,3,4,5,6)]
#reshape data that isn't constant across all rows for a patient
dat_reshape = reshape(dat_Label1,idvar="Pt.ID",timevar = "FeatureID",direction="wide")
#join two data frames together on patient ID
dat_rawclean = merge(dat_reshape,dat_constant,by.x = "Pt.ID",by.y = "Pt.ID")
#vector with names of columns
cols = colnames(dat_rawclean)
#specify that all columns except Pt.ID will be scaled
scalevars = setdiff(cols,"Pt.ID")
#Scale columns (z-scores)
dat_scaled = data.frame(sapply(dat_rawclean[,scalevars],scale),Pt.ID=dat_rawclean[,"Pt.ID"])
#Move Patient ID column to first
PtID_idx = grep("Pt.ID",colnames(dat_scaled))
dat_scaled = dat_scaled[,c(PtID_idx, (1:ncol(dat_scaled))[-PtID_idx])]
#remove columns that are entirely full of Na's
dat_scaled = dat_scaled[,colSums(is.na(dat_scaled)) != nrow(dat_scaled)]
#Show NA count for each column
na_count = sapply(dat_scaled,function(y) sum(length(which(is.na(y)))))
na_count=data.frame(na_count)
#replace na's with 0
dat_scaled[is.na(dat_scaled)] = 0
#run pca
pca = prcomp(dat_scaled[,2:402])
#summary of pca results
summary(pca)
rot = pca$rotation
rot = data.frame(rot)
pcaCharts(pca)
install.packages("factoextra")
library("factoextra")
#Plot of importance of each PC
fviz_screeplot(pca,ncp=20)
#Plot with top contributing variable to PC1
fviz_pca_contrib(pca,choice = "var",axes=2,xlab="variable",top=10) |
#' @name SDMXOrganisationSchemes
#' @rdname SDMXOrganisationSchemes
#' @aliases SDMXOrganisationSchemes,SDMXOrganisationSchemes-method
#'
#' @usage
#' SDMXOrganisationSchemes(xmlObj, namespaces)
#'
#' @param xmlObj object of class "XMLInternalDocument derived from XML package
#' @param namespaces object of class "data.frame" given the list of namespace URIs
#' @return an object of class "OrganisationSchemes"
#'
#' @seealso \link{readSDMX}
#'
SDMXOrganisationSchemes <- function(xmlObj, namespaces){
new("SDMXOrganisationSchemes",
SDMX(xmlObj, namespaces),
organisationSchemes = organisationSchemes.SDMXOrganisationSchemes(xmlObj, namespaces)
)
}
#get list of SDMXOrganisationScheme (SDMXAgencyScheme)
#================================================
organisationSchemes.SDMXOrganisationSchemes <- function(xmlObj, namespaces){
agSchemes <- list()
sdmxVersion <- version.SDMXSchema(xmlObj, namespaces)
VERSION.21 <- sdmxVersion == "2.1"
messageNsString <- "message"
if(isRegistryInterfaceEnvelope(xmlObj, FALSE)) messageNsString <- "registry"
messageNs <- findNamespace(namespaces, messageNsString)
strNs <- findNamespace(namespaces, "structure")
#agencyScheme
if(VERSION.21){
agXML <- getNodeSet(xmlObj,"//mes:Structures/str:OrganisationSchemes/str:AgencyScheme",
namespaces = c(mes = as.character(messageNs), str = as.character(strNs)))
agSchemes <- lapply(agXML, SDMXAgencyScheme, namespaces)
}
return(agSchemes)
}
#methods
as.data.frame.SDMXOrganisationSchemes <- function(x, ...){
out <- do.call("rbind.fill",
lapply(x@organisationSchemes,
function(as){
#TODO implement as.data.frame
asf <- data.frame(
id = slot(as, "id"),
agencyID = slot(as, "agencyID"),
version = slot(as, "version"),
uri = slot(as, "uri"),
urn = slot(as, "urn"),
isExternalReference = slot(as, "isExternalReference"),
isFinal = slot(as, "isFinal"),
validFrom = slot(as, "validFrom"),
validTo = slot(as, "validTo"),
stringsAsFactors = FALSE
)
return(asf)
})
)
return(encodeSDMXOutput(out))
}
setAs("SDMXOrganisationSchemes", "data.frame",
function(from) as.data.frame.SDMXOrganisationSchemes(from)) | /R/SDMXOrganisationSchemes-methods.R | no_license | cran/rsdmx | R | false | false | 2,704 | r | #' @name SDMXOrganisationSchemes
#' @rdname SDMXOrganisationSchemes
#' @aliases SDMXOrganisationSchemes,SDMXOrganisationSchemes-method
#'
#' @usage
#' SDMXOrganisationSchemes(xmlObj, namespaces)
#'
#' @param xmlObj object of class "XMLInternalDocument derived from XML package
#' @param namespaces object of class "data.frame" given the list of namespace URIs
#' @return an object of class "OrganisationSchemes"
#'
#' @seealso \link{readSDMX}
#'
SDMXOrganisationSchemes <- function(xmlObj, namespaces){
new("SDMXOrganisationSchemes",
SDMX(xmlObj, namespaces),
organisationSchemes = organisationSchemes.SDMXOrganisationSchemes(xmlObj, namespaces)
)
}
#get list of SDMXOrganisationScheme (SDMXAgencyScheme)
#================================================
organisationSchemes.SDMXOrganisationSchemes <- function(xmlObj, namespaces){
agSchemes <- list()
sdmxVersion <- version.SDMXSchema(xmlObj, namespaces)
VERSION.21 <- sdmxVersion == "2.1"
messageNsString <- "message"
if(isRegistryInterfaceEnvelope(xmlObj, FALSE)) messageNsString <- "registry"
messageNs <- findNamespace(namespaces, messageNsString)
strNs <- findNamespace(namespaces, "structure")
#agencyScheme
if(VERSION.21){
agXML <- getNodeSet(xmlObj,"//mes:Structures/str:OrganisationSchemes/str:AgencyScheme",
namespaces = c(mes = as.character(messageNs), str = as.character(strNs)))
agSchemes <- lapply(agXML, SDMXAgencyScheme, namespaces)
}
return(agSchemes)
}
#methods
as.data.frame.SDMXOrganisationSchemes <- function(x, ...){
out <- do.call("rbind.fill",
lapply(x@organisationSchemes,
function(as){
#TODO implement as.data.frame
asf <- data.frame(
id = slot(as, "id"),
agencyID = slot(as, "agencyID"),
version = slot(as, "version"),
uri = slot(as, "uri"),
urn = slot(as, "urn"),
isExternalReference = slot(as, "isExternalReference"),
isFinal = slot(as, "isFinal"),
validFrom = slot(as, "validFrom"),
validTo = slot(as, "validTo"),
stringsAsFactors = FALSE
)
return(asf)
})
)
return(encodeSDMXOutput(out))
}
setAs("SDMXOrganisationSchemes", "data.frame",
function(from) as.data.frame.SDMXOrganisationSchemes(from)) |
# -------------------------------------------------------------
# -------Method to anonymize data for analysis + sharing-------
# -------------------------------------------------------------
# define function to generate random alphanumeric strings
getRandomString <- function(n) {
a <- do.call(paste0, replicate(5, sample(LETTERS, n, TRUE), FALSE))
paste0(a, sprintf("%04d", sample(9999, n, TRUE)), sample(LETTERS, n, TRUE))
}
# set file location and name
filepath <- './'
filename <- 'appended_data.csv'
# set wd to filepath so output writes there
setwd(filepath)
# read in data
data <- read.csv(paste(filepath,filename,sep=''),header=TRUE)
# get unique sona ids (or whatever)
sonaids <- unique(data$sonaid)
# create random alphanumeric identifiers of same length
ss_codes <- getRandomString(length(sonaids))
# insert random sscodes
for (row in 1:nrow(data)){
data$sonaid[row] <- ss_codes[match(data$sonaid[row],sonaids)]
}
# rename column
names(data)[names(data) == "sonaid"] <- "ss_code"
# delete identifying information from file
#anonymized <- data[,!names(data) %in% c("sonaid")]
# save
write.csv(data, file = "appended_data_anonymized.csv",row.names=FALSE)
| /anonymize.R | no_license | bcwralph/useful-R-functions | R | false | false | 1,225 | r | # -------------------------------------------------------------
# -------Method to anonymize data for analysis + sharing-------
# -------------------------------------------------------------
# define function to generate random alphanumeric strings
getRandomString <- function(n) {
a <- do.call(paste0, replicate(5, sample(LETTERS, n, TRUE), FALSE))
paste0(a, sprintf("%04d", sample(9999, n, TRUE)), sample(LETTERS, n, TRUE))
}
# set file location and name
filepath <- './'
filename <- 'appended_data.csv'
# set wd to filepath so output writes there
setwd(filepath)
# read in data
data <- read.csv(paste(filepath,filename,sep=''),header=TRUE)
# get unique sona ids (or whatever)
sonaids <- unique(data$sonaid)
# create random alphanumeric identifiers of same length
ss_codes <- getRandomString(length(sonaids))
# insert random sscodes
for (row in 1:nrow(data)){
data$sonaid[row] <- ss_codes[match(data$sonaid[row],sonaids)]
}
# rename column
names(data)[names(data) == "sonaid"] <- "ss_code"
# delete identifying information from file
#anonymized <- data[,!names(data) %in% c("sonaid")]
# save
write.csv(data, file = "appended_data_anonymized.csv",row.names=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_shapefiles.R
\name{all_shapefiles}
\alias{all_shapefiles}
\title{Return path to all shapefiles}
\usage{
all_shapefiles(check_dl = FALSE, dataset = c("nhdh", "hydrolakes",
"nhdplusv2"), feature_type = c("waterbody", "flowline"))
}
\arguments{
\item{check_dl}{If TRUE, checks to ensure all files for that dataset have been downloaded.
This check takes some time (~30 seconds) to check all files (and much longer to dowload if necessary).}
\item{dataset}{name of dataset to use for matching.}
\item{feature_type}{name of feature layer to match. The hydrolakes dataset does not include a flowline layer.}
}
\description{
Returns list of paths to all locally cached shapefiles for a specific dataset
for use in custom processing.
If \code{check_dl == TRUE}, all shapefiles for the specified dataset
are downloaded to your local machine (skipping those that have been
previously downloaded). This is a great way to pre-cache all
shapefiles for a specific dataset. The files can be loaded into R and iterated
over for custom mapping or processing of entire U.S. National or
Global datasets.
}
| /man/all_shapefiles.Rd | no_license | cran/hydrolinks | R | false | true | 1,199 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/all_shapefiles.R
\name{all_shapefiles}
\alias{all_shapefiles}
\title{Return path to all shapefiles}
\usage{
all_shapefiles(check_dl = FALSE, dataset = c("nhdh", "hydrolakes",
"nhdplusv2"), feature_type = c("waterbody", "flowline"))
}
\arguments{
\item{check_dl}{If TRUE, checks to ensure all files for that dataset have been downloaded.
This check takes some time (~30 seconds) to check all files (and much longer to dowload if necessary).}
\item{dataset}{name of dataset to use for matching.}
\item{feature_type}{name of feature layer to match. The hydrolakes dataset does not include a flowline layer.}
}
\description{
Returns list of paths to all locally cached shapefiles for a specific dataset
for use in custom processing.
If \code{check_dl == TRUE}, all shapefiles for the specified dataset
are downloaded to your local machine (skipping those that have been
previously downloaded). This is a great way to pre-cache all
shapefiles for a specific dataset. The files can be loaded into R and iterated
over for custom mapping or processing of entire U.S. National or
Global datasets.
}
|
\alias{gtkRecentChooserMenuGetShowNumbers}
\name{gtkRecentChooserMenuGetShowNumbers}
\title{gtkRecentChooserMenuGetShowNumbers}
\description{Returns the value set by \code{\link{gtkRecentChooserMenuSetShowNumbers}}.}
\usage{gtkRecentChooserMenuGetShowNumbers(object)}
\arguments{\item{\verb{object}}{a \code{\link{GtkRecentChooserMenu}}}}
\details{Since 2.10}
\value{[logical] \code{TRUE} if numbers should be shown.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /RGtk2/man/gtkRecentChooserMenuGetShowNumbers.Rd | no_license | lawremi/RGtk2 | R | false | false | 489 | rd | \alias{gtkRecentChooserMenuGetShowNumbers}
\name{gtkRecentChooserMenuGetShowNumbers}
\title{gtkRecentChooserMenuGetShowNumbers}
\description{Returns the value set by \code{\link{gtkRecentChooserMenuSetShowNumbers}}.}
\usage{gtkRecentChooserMenuGetShowNumbers(object)}
\arguments{\item{\verb{object}}{a \code{\link{GtkRecentChooserMenu}}}}
\details{Since 2.10}
\value{[logical] \code{TRUE} if numbers should be shown.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sc_hc.R
\name{SC_hc}
\alias{SC_hc}
\title{SC_hc hierachical clustering of single cells}
\usage{
SC_hc(dataFile, baseName, cuttree_k = 4)
}
\arguments{
\item{dataFile}{a tab delimited txt file of expression data, columns are cells, rows are genes.}
\item{baseName}{prefix name of resulting files}
\item{cuttree_k}{number of clusters to be generated by cutting the dendrogram.}
}
\description{
SC_hc hierachical clustering of single cells
}
| /man/SC_hc.Rd | no_license | dynverse/Mpath | R | false | true | 519 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sc_hc.R
\name{SC_hc}
\alias{SC_hc}
\title{SC_hc hierachical clustering of single cells}
\usage{
SC_hc(dataFile, baseName, cuttree_k = 4)
}
\arguments{
\item{dataFile}{a tab delimited txt file of expression data, columns are cells, rows are genes.}
\item{baseName}{prefix name of resulting files}
\item{cuttree_k}{number of clusters to be generated by cutting the dendrogram.}
}
\description{
SC_hc hierachical clustering of single cells
}
|
library(testthat)
library(hadcol)
test_check("hadcol")
| /tests/testthat.R | no_license | hadley/hadcol | R | false | false | 56 | r | library(testthat)
library(hadcol)
test_check("hadcol")
|
# we expect the variable data (a data.table) to exist and contain all the observations
# at the end of the script, the tibble `asfr` will contain the age specific fertility rate (for all the women) by census
printf <- function(...) cat(sprintf(...))
year <- 1973
data_asfr <- data[, list(YEAR, SERIAL, SEX, AGE, RELATE, MOMLOC, STEPMOM, PERNUM, CHBORN, CHSURV)]
data_asfr <- data_asfr[YEAR==year]
data_asfr$motherAgeAtBirth <-0;
# Assuming we are operating on one YEAR at a time
setkey(data_asfr, SERIAL)
data_asfr$personId <- 1:nrow(data_asfr)
#Age specific graph
women <- subset(data_asfr, YEAR==year & SEX==2 & AGE>=15 & AGE<=19)
totWomen <- women %>%
summarise(num_women = (sum(SEX, na.rm=T))/2) #denominator: total women in age group
totWomen
#the idea here is to be sure that women between the age group are having new borns in that period,that is that the children is between 0 - 4 years. To do so I think I can substract the age og the children from the age of the mother, and counted as a birth just if is between 0 and 4.
#for each serial=ss, I need to substract the age(AGE) of the children (RELATE==3) from the age of the mother (RELATE==2). This will give me a variable of age at birth.
lowestAge = 15;
binSize = 5;
highestAge = 55;
numBins = (highestAge - lowestAge) / binSize;
motherArray = array(0, dim = numBins)
# for each woman select all people from her household
#lastSerial <- max(data_asfr$SERIAL)
#for (serial in seq(1000, lastSerial, by = 1000))
serials = unique(data_asfr$SERIAL)
numSerials = length(serials)
for (ser in 1:numSerials)
{
serial = serials[ser];
#household <- data_asfr[SERIAL == serial]
household <- data_asfr[list(serial)]
if(nrow(household) == 1)
{
next;
}
mothersInHouse = array(0, dim = nrow(household))
# household=data_asfr[SERIAL==104000]
# for each potential child, retrieve the age and store in the motherAgeAtBirth column the value motherAge-age
for (p in 1:nrow(household))
{
person <- household[p];
if(person$MOMLOC != 0 & person$STEPMOM == 0)
{
# this person is a child
mother <- subset(household, PERNUM == person$MOMLOC);
if(nrow(mother) != 1)
{ # the mother is not here, maybe because we are using a subset of the whole database?
next;
}
motherAge = mother$AGE;
childAge <- household[[p, "AGE"]]
id <- household[p]$personId
motherAgeAtBirth <- motherAge - childAge
data_asfr[id]$motherAgeAtBirth <- motherAgeAtBirth # very sloooow
#bin <- as.integer((motherAgeAtBirth - lowestAge) / binSize + 1)
#motherAgeBin <- as.integer((motherAge - lowestAge) / binSize + 1)
#if(motherAgeBin == bin && bin >= 1 && bin <= numBins){
# motherArray[bin] = motherArray[bin] + 1;
#}
}
}
if(ser %% 1000 == 0)
printf("Elapsed %d, remaining %d, completed: %.2f%%\n", ser, (numSerials - ser), ser/numSerials * 100)
}
sersfokjopohuftrd
data_asfr$YearBirth <- data_asfr$YEAR - data_asfr$AGE
write.table(data_asfr, file="data_asfr_73.csv")
motherArray
#1973
# Numerator: 12696 39870 33027 23754 17500 7517 1972 521
#denominator :
#1985
#Numerator: 16193 54976 45427 26163 14715 5358 1420 424
# denominator:
#1993
#Numerator: 18820 55068 49173 32539 17231 5951 1489 818
#denominator
#2005
#numerator: 29037 60726 47762 33517 21146 8937 2011 966
asfr_1973 <- tibble(
age = seq(15, 50, by=5),
numerator = c(2696, 39870, 33027, 23754, 17500, 7517, 1972, 521),
denominator = c(122424, 94887, 70229, 56805, 53946, 43568, 35856, 36977),
asfr = numerator/denominator,
year= "1973"
)
asfr_1985 <- tibble(
age = seq(15, 50, by=5),
numerator = c(16193, 54976, 45427, 26163, 14715, 5358, 1420, 424),
denominator = c( 159038, 150702, 122574, 93139, 81244, 58596, 52723, 58178 ),
asfr = numerator/denominator,
year= "1985"
)
asfr_1993 <- tibble(
age = seq(15, 50, by=5),
numerator = c(18820, 55068, 49173, 32539, 17231, 5951, 1489, 818),
denominator = c(164526, 161059, 152653, 136086, 113659, 85746, 66033, 68512),
asfr = numerator/denominator,
year= "1993"
)
asfr_2005 <- tibble(
age = seq(15, 50, by=5),
numerator = c(29037, 60726, 47762, 33517, 21146, 8937, 2011, 966),
denominator = c(186191, 167066, 153211, 139201, 137878, 126329, 107327, 103550),
asfr = numerator/denominator,
year= "2005"
)
asfr <- bind_rows(asfr_1973, asfr_1985, asfr_1993, asfr_2005)
library(extrafont)
# Install **TTF** Latin Modern Roman fonts from www.fontsquirrel.com/fonts/latin-modern-roman
# Import the newly installed LModern fonts, change the pattern according to the
# filename of the lmodern ttf files in your fonts folder
font_import(pattern = "lmodern*")
loadfonts(device = "win")
par(family = "LM Roman 10")
ggplot(asfr, aes(x=age, y=asfr, group = year)) + geom_point(aes(shape=year)) + geom_line(aes(linetype= year))+ scale_x_continuous(breaks=seq(15, 50, by=5), labels= c("15-20", "20-25", "25-30", "30-35", "35-40", "40-45", "45-50", "50-55")) + xlab("Age") + ylab("ASFR") + theme_bw() + theme(legend.position="bottom", legend.box = "horizontal", legend.title=element_blank(),
panel.border = element_blank(),panel.background = element_blank())
| /asfr.R | no_license | jje90/qq-colombia | R | false | false | 5,231 | r | # we expect the variable data (a data.table) to exist and contain all the observations
# at the end of the script, the tibble `asfr` will contain the age specific fertility rate (for all the women) by census
printf <- function(...) cat(sprintf(...))
year <- 1973
data_asfr <- data[, list(YEAR, SERIAL, SEX, AGE, RELATE, MOMLOC, STEPMOM, PERNUM, CHBORN, CHSURV)]
data_asfr <- data_asfr[YEAR==year]
data_asfr$motherAgeAtBirth <-0;
# Assuming we are operating on one YEAR at a time
setkey(data_asfr, SERIAL)
data_asfr$personId <- 1:nrow(data_asfr)
#Age specific graph
women <- subset(data_asfr, YEAR==year & SEX==2 & AGE>=15 & AGE<=19)
totWomen <- women %>%
summarise(num_women = (sum(SEX, na.rm=T))/2) #denominator: total women in age group
totWomen
#the idea here is to be sure that women between the age group are having new borns in that period,that is that the children is between 0 - 4 years. To do so I think I can substract the age og the children from the age of the mother, and counted as a birth just if is between 0 and 4.
#for each serial=ss, I need to substract the age(AGE) of the children (RELATE==3) from the age of the mother (RELATE==2). This will give me a variable of age at birth.
lowestAge = 15;
binSize = 5;
highestAge = 55;
numBins = (highestAge - lowestAge) / binSize;
motherArray = array(0, dim = numBins)
# for each woman select all people from her household
#lastSerial <- max(data_asfr$SERIAL)
#for (serial in seq(1000, lastSerial, by = 1000))
serials = unique(data_asfr$SERIAL)
numSerials = length(serials)
for (ser in 1:numSerials)
{
serial = serials[ser];
#household <- data_asfr[SERIAL == serial]
household <- data_asfr[list(serial)]
if(nrow(household) == 1)
{
next;
}
mothersInHouse = array(0, dim = nrow(household))
# household=data_asfr[SERIAL==104000]
# for each potential child, retrieve the age and store in the motherAgeAtBirth column the value motherAge-age
for (p in 1:nrow(household))
{
person <- household[p];
if(person$MOMLOC != 0 & person$STEPMOM == 0)
{
# this person is a child
mother <- subset(household, PERNUM == person$MOMLOC);
if(nrow(mother) != 1)
{ # the mother is not here, maybe because we are using a subset of the whole database?
next;
}
motherAge = mother$AGE;
childAge <- household[[p, "AGE"]]
id <- household[p]$personId
motherAgeAtBirth <- motherAge - childAge
data_asfr[id]$motherAgeAtBirth <- motherAgeAtBirth # very sloooow
#bin <- as.integer((motherAgeAtBirth - lowestAge) / binSize + 1)
#motherAgeBin <- as.integer((motherAge - lowestAge) / binSize + 1)
#if(motherAgeBin == bin && bin >= 1 && bin <= numBins){
# motherArray[bin] = motherArray[bin] + 1;
#}
}
}
if(ser %% 1000 == 0)
printf("Elapsed %d, remaining %d, completed: %.2f%%\n", ser, (numSerials - ser), ser/numSerials * 100)
}
sersfokjopohuftrd
data_asfr$YearBirth <- data_asfr$YEAR - data_asfr$AGE
write.table(data_asfr, file="data_asfr_73.csv")
motherArray
#1973
# Numerator: 12696 39870 33027 23754 17500 7517 1972 521
#denominator :
#1985
#Numerator: 16193 54976 45427 26163 14715 5358 1420 424
# denominator:
#1993
#Numerator: 18820 55068 49173 32539 17231 5951 1489 818
#denominator
#2005
#numerator: 29037 60726 47762 33517 21146 8937 2011 966
asfr_1973 <- tibble(
age = seq(15, 50, by=5),
numerator = c(2696, 39870, 33027, 23754, 17500, 7517, 1972, 521),
denominator = c(122424, 94887, 70229, 56805, 53946, 43568, 35856, 36977),
asfr = numerator/denominator,
year= "1973"
)
asfr_1985 <- tibble(
age = seq(15, 50, by=5),
numerator = c(16193, 54976, 45427, 26163, 14715, 5358, 1420, 424),
denominator = c( 159038, 150702, 122574, 93139, 81244, 58596, 52723, 58178 ),
asfr = numerator/denominator,
year= "1985"
)
asfr_1993 <- tibble(
age = seq(15, 50, by=5),
numerator = c(18820, 55068, 49173, 32539, 17231, 5951, 1489, 818),
denominator = c(164526, 161059, 152653, 136086, 113659, 85746, 66033, 68512),
asfr = numerator/denominator,
year= "1993"
)
asfr_2005 <- tibble(
age = seq(15, 50, by=5),
numerator = c(29037, 60726, 47762, 33517, 21146, 8937, 2011, 966),
denominator = c(186191, 167066, 153211, 139201, 137878, 126329, 107327, 103550),
asfr = numerator/denominator,
year= "2005"
)
asfr <- bind_rows(asfr_1973, asfr_1985, asfr_1993, asfr_2005)
library(extrafont)
# Install **TTF** Latin Modern Roman fonts from www.fontsquirrel.com/fonts/latin-modern-roman
# Import the newly installed LModern fonts, change the pattern according to the
# filename of the lmodern ttf files in your fonts folder
font_import(pattern = "lmodern*")
loadfonts(device = "win")
par(family = "LM Roman 10")
ggplot(asfr, aes(x=age, y=asfr, group = year)) + geom_point(aes(shape=year)) + geom_line(aes(linetype= year))+ scale_x_continuous(breaks=seq(15, 50, by=5), labels= c("15-20", "20-25", "25-30", "30-35", "35-40", "40-45", "45-50", "50-55")) + xlab("Age") + ylab("ASFR") + theme_bw() + theme(legend.position="bottom", legend.box = "horizontal", legend.title=element_blank(),
panel.border = element_blank(),panel.background = element_blank())
|
#' @title covidSidoInf: Corona19 City Current status of Korea
#' @description Corona19 City Current status of Korea obtained from http://openapi.data.go.kr/
#' @format A data frame with 5143 rows and 16 variables:
#' \describe{
#' \item{\code{X}}{integer COLUMN_DESCRIPTION}
#' \item{\code{createDt}}{character Date and time of registration}
#' \item{\code{deathCnt}}{integer the number of deaths}
#' \item{\code{defCnt}}{integer number of confirmed infections}
#' \item{\code{gubun}}{character name of city or do(prefecture) in Korean}
#' \item{\code{gubunCn}}{character name of city or do(prefecture) in Chinese characters}
#' \item{\code{gubunEn}}{character name of city or do(prefecture) in English}
#' \item{\code{incDec}}{integer Number of increases and decreases compared to the previous day}
#' \item{\code{isolClearCnt}}{integer Unisolated Number}
#' \item{\code{isolIngCnt}}{integer Number of isolated people}
#' \item{\code{localOccCnt}}{integer number of local occurrences}
#' \item{\code{overFlowCnt}}{integer Number of inflow from abroad}
#' \item{\code{qurRate}}{character incidence per 100,000 people}
#' \item{\code{seq}}{integer Post number (infection status unique value)}
#' \item{\code{stdDay}}{character state date}
#' \item{\code{updateDt}}{character Update Date and Time Minute}
#'}
#' @details DETAILS
#' data from Korea Ministry of Health and Welfare
#' Corona19 City Current status inquiry service
#' http://openapi.data.go.kr/openapi/service/rest/Covid19/getCovid19SidoInfStateJson
#' You can find detailed description for this data from
#' https://www.data.go.kr/tcs/dss/selectApiDataDetailView.do?publicDataPk=15043378
#' check : defCnt, isolClearCnt
"covidSidoInf"
| /R/data_covidSidoInf.R | permissive | jykang00/overcomeCovidKor | R | false | false | 1,734 | r | #' @title covidSidoInf: Corona19 City Current status of Korea
#' @description Corona19 City Current status of Korea obtained from http://openapi.data.go.kr/
#' @format A data frame with 5143 rows and 16 variables:
#' \describe{
#' \item{\code{X}}{integer COLUMN_DESCRIPTION}
#' \item{\code{createDt}}{character Date and time of registration}
#' \item{\code{deathCnt}}{integer the number of deaths}
#' \item{\code{defCnt}}{integer number of confirmed infections}
#' \item{\code{gubun}}{character name of city or do(prefecture) in Korean}
#' \item{\code{gubunCn}}{character name of city or do(prefecture) in Chinese characters}
#' \item{\code{gubunEn}}{character name of city or do(prefecture) in English}
#' \item{\code{incDec}}{integer Number of increases and decreases compared to the previous day}
#' \item{\code{isolClearCnt}}{integer Unisolated Number}
#' \item{\code{isolIngCnt}}{integer Number of isolated people}
#' \item{\code{localOccCnt}}{integer number of local occurrences}
#' \item{\code{overFlowCnt}}{integer Number of inflow from abroad}
#' \item{\code{qurRate}}{character incidence per 100,000 people}
#' \item{\code{seq}}{integer Post number (infection status unique value)}
#' \item{\code{stdDay}}{character state date}
#' \item{\code{updateDt}}{character Update Date and Time Minute}
#'}
#' @details DETAILS
#' data from Korea Ministry of Health and Welfare
#' Corona19 City Current status inquiry service
#' http://openapi.data.go.kr/openapi/service/rest/Covid19/getCovid19SidoInfStateJson
#' You can find detailed description for this data from
#' https://www.data.go.kr/tcs/dss/selectApiDataDetailView.do?publicDataPk=15043378
#' check : defCnt, isolClearCnt
"covidSidoInf"
|
`read.tucson` <- function(fname, header = NULL, long = FALSE,
encoding = getOption("encoding"),
edge.zeros = TRUE)
{
## Checks that the input is good. The input variables are vectors
## ('series', 'decade.yr') or matrices ('x') containing most of
## the data acquired from the input file 'fname'.
input.ok <- function(series, decade.yr, x) {
if (length(series) == 0) {
return(FALSE)
}
## Number of values allowed per row depends on first year modulo 10
n.per.row <-
apply(x, 1,
function(x) {
notna <- which(!is.na(x))
n.notna <- length(notna)
if (n.notna == 0) {
0
} else {
notna[n.notna]
}
})
full.per.row <- 10 - decade.yr %% 10
## One extra column per row is allowed:
## a. enough space will be allocated (max.year is larger than
## last year of any series)
## b. the extra col may contain a stop marker (non-standard location)
idx.bad <- which(n.per.row > full.per.row + 1)
n.bad <- length(idx.bad)
if (n.bad > 0) {
warn.fmt <-
ngettext(n.bad,
"%d row has too many values (ID, decade %s)",
"%d rows have too many values (IDs, decades %s)",
domain="R-dplR")
if (n.bad > 5) {
idx.bad <- sample(idx.bad, 5)
ids.decades <- paste(paste(series[idx.bad], decade.yr[idx.bad],
sep=", ", collapse="; "),
"...", sep="; ")
} else {
ids.decades <- paste(series[idx.bad], decade.yr[idx.bad],
sep="-", collapse=", ")
}
warning(sprintf(warn.fmt, n.bad, ids.decades), domain=NA)
return(FALSE)
}
series.ids <- unique(series)
nseries <- length(series.ids)
series.index <- match(series, series.ids)
last.row.of.series <- logical(length(series))
for (i in seq_len(nseries)) {
idx.these <- which(series.index == i)
last.row.of.series[idx.these[which.max(decade.yr[idx.these])]] <-
TRUE
}
flag.bad2 <- n.per.row < full.per.row
if (!all(last.row.of.series) && all(flag.bad2[!last.row.of.series])) {
warning("all rows (last rows excluded) have too few values")
return(FALSE)
}
min.year <- min(decade.yr)
max.year <- ((max(decade.yr)+10) %/% 10) * 10
if (max.year > as.numeric(format(Sys.Date(), "%Y")) + 100) {
## Must do something to stop R from trying to build huge
## data structures if the maximum year is not detected
## correctly. Not too strict (allow about 100 years past
## today).
warning("file format problems (or data from the future)")
return(FALSE)
}
# look for duplicate IDs -- common problem with Tucson RWL files
span <- max.year - min.year + 1
val.count <- matrix(0, span, nseries)
for (i in seq_along(series)) {
this.col <- series.index[i]
these.rows <- seq(from = decade.yr[i] - min.year + 1, by = 1,
length.out = n.per.row[i])
val.count[these.rows, this.col] <-
val.count[these.rows, this.col] + 1
}
extra.vals <- which(val.count > 1, arr.ind=TRUE)
n.extra <- nrow(extra.vals)
print(n.extra)
# if (n.extra > 0) {
# warn.fmt <-
# ngettext(n.bad,
# "Duplicated series ID detected with overlap in %d pair of ID, year: %s",
# "Duplicated series ID detected with overlaps in %d pairs of ID, year: %s",
# domain="R-dplR")
# if (n.extra > 5) {
# extra.vals <- extra.vals[sample(n.extra, 5), ]
# ids.years <- paste(paste(series.ids[extra.vals[, 2]],
# min.year - 1 + extra.vals[, 1],
# sep=", ", collapse="; "),
# "...", sep="; ")
# } else {
# ids.years <- paste(series.ids[extra.vals[, 2]],
# min.year - 1 + extra.vals[, 1],
# sep=", ", collapse="; ")
# }
# warning(sprintf(warn.fmt, n.extra, ids.years), domain=NA)
# FALSE
# }
####################
# simplifying error message to user to make it clearer that there are
# duplicate IDs
if (n.extra > 0) {
warn.fmt <- gettext("Duplicated series ID detected: %s",domain="R-dplR")
ids.dup <- paste(unique(series.ids[extra.vals[, 2]]),
sep = ", ",collapse = "; ")
warning(sprintf(warn.fmt, ids.dup), domain=NA)
return(FALSE)
}
else {
return(TRUE)
}
} # end input.ok func
## Read data file into memory
con <- file(fname, encoding = encoding)
on.exit(close(con))
goodLines <- readLines(con)
close(con)
on.exit()
## Strip empty lines (caused by CR CR LF endings etc.)
goodLines <- goodLines[nzchar(goodLines)]
## Remove comment lines (print them?)
foo <- regexpr("#", goodLines, fixed=TRUE)
commentFlag <- foo >= 1 & foo <= 78
goodLines <- goodLines[!commentFlag]
## Temporary file for 'goodLines'. Reading from this file is
## faster than making a textConnection to 'goodLines'.
tf <- tempfile()
check.tempdir()
tfcon <- file(tf, encoding="UTF-8")
on.exit(close(tfcon))
on.exit(unlink(tf), add=TRUE)
writeLines(goodLines, tf)
## New connection for reading from the temp file
close(tfcon)
tfcon <- file(tf, encoding="UTF-8")
if (is.null(header)) {
## Try to determine if the file has a header. This is failable.
## 3 lines in file
hdr1 <- readLines(tfcon, n=1)
if (length(hdr1) == 0) {
stop("file is empty")
}
if (nchar(hdr1) < 12) {
stop("first line in rwl file ends before col 12")
}
is.head <- FALSE
yrcheck <- suppressWarnings(as.numeric(substr(hdr1, 9, 12)))
if (is.null(yrcheck) || length(yrcheck) != 1 || is.na(yrcheck) ||
yrcheck < -1e04 || yrcheck > 1e04 ||
round(yrcheck) != yrcheck) {
is.head <- TRUE
}
if (!is.head) {
datacheck <- substring(hdr1,
seq(from=13, by=6, length=10),
seq(from=18, by=6, length=10))
datacheck <- sub("^[[:blank:]]+", "", datacheck)
idx.good <- which(nzchar(datacheck))
n.good <- length(idx.good)
if (n.good == 0) {
is.head <- TRUE
} else {
datacheck <- datacheck[seq_len(idx.good[n.good])]
if (any(grepl("[[:alpha:]]", datacheck))) {
is.head <- TRUE
} else {
datacheck <- suppressWarnings(as.numeric(datacheck))
if (is.null(datacheck) ||
any(!is.na(datacheck) &
round(datacheck) != datacheck)) {
is.head <- TRUE
}
}
}
}
if (is.head) {
hdr1.split <- strsplit(str_trim(hdr1, side="both"),
split="[[:space:]]+")[[1]]
n.parts <- length(hdr1.split)
if (n.parts >= 3 && n.parts <= 13) {
hdr1.split <- hdr1.split[2:n.parts]
if (!any(grepl("[[:alpha:]]", hdr1.split))) {
yrdatacheck <- suppressWarnings(as.numeric(hdr1.split))
if (!(is.null(yrdatacheck) ||
any(!is.na(yrdatacheck) &
round(yrdatacheck) != yrdatacheck))) {
is.head <- FALSE
}
}
}
}
if (is.head) {
cat(gettext("There appears to be a header in the rwl file\n",
domain="R-dplR"))
} else {
cat(gettext("There does not appear to be a header in the rwl file\n",
domain="R-dplR"))
}
} else if (!is.logical(header) || length(header) != 1 || is.na(header)) {
stop("'header' must be NULL, TRUE or FALSE")
} else {
is.head <- header
}
skip.lines <- if (is.head) 3 else 0
data1 <- readLines(tfcon, n=skip.lines + 1)
if (length(data1) < skip.lines + 1) {
stop("file has no data")
}
on.exit(unlink(tf))
## Test for presence of tabs
if (!grepl("\t", data1[length(data1)])) {
## Using a connection instead of a file name in read.fwf and
## read.table allows the function to support different encodings.
if (isTRUE(long)) {
## Reading 11 years per decade allows nonstandard use of stop
## marker at the end of a line that already has 10
## measurements. Such files exist in ITRDB.
fields <- c(7, 5, rep(6, 11))
} else {
fields <- c(8, 4, rep(6, 11))
}
## First, try fixed width columns as in Tucson "standard"
dat <-
tryCatch(read.fwf(tfcon, widths=fields, skip=skip.lines,
comment.char="", strip.white=TRUE,
blank.lines.skip=FALSE,
colClasses=c("character", rep("integer", 11),
"character")),
error = function(...) {
## If predefined column classes fail
## (e.g. missing values marked with "."), convert
## types manually
tfcon <- file(tf, encoding="UTF-8")
tmp <-
read.fwf(tfcon, widths=fields, skip=skip.lines,
strip.white=TRUE, blank.lines.skip=FALSE,
colClasses="character", comment.char="")
for (idx in 2:12) {
asnum <- as.numeric(tmp[[idx]])
if (!identical(round(asnum), asnum)) {
stop("non-integral numbers found")
}
tmp[[idx]] <- as.integer(asnum)
}
tmp
})
dat <- dat[!is.na(dat[[2]]), , drop=FALSE] # requires non-NA year
series <- dat[[1]]
decade.yr <- dat[[2]]
series.fixed <- series
decade.fixed <- decade.yr
x <- as.matrix(dat[3:12])
## Convert values <= 0 or < 0 (not -9999) to NA
if (isTRUE(edge.zeros)) {
x[x < 0 & x != -9999] <- NA
} else {
x[x <= 0 & x != -9999] <- NA
}
x.fixed <- x
fixed.ok <- input.ok(series, decade.yr, x)
} else {
warning("tabs used, assuming non-standard, tab-delimited file")
fixed.ok <- FALSE
}
## If that fails, try columns separated by white space (non-standard)
if (!fixed.ok) {
warning("fixed width failed, trying to reread with variable width columns")
tfcon <- file(tf, encoding="UTF-8")
## Number of columns is decided by length(col.names)
dat <-
tryCatch(read.table(tfcon, skip=skip.lines, blank.lines.skip=FALSE,
comment.char="", col.names=letters[1:13],
colClasses=c("character", rep("integer", 11),
"character"), fill=TRUE, quote=""),
error = function(...) {
## In case predefined column classes fail
tfcon <- file(tf, encoding="UTF-8")
tmp <- read.table(tfcon, skip=skip.lines,
blank.lines.skip=FALSE, quote="",
comment.char="", fill=TRUE,
col.names=letters[1:13],
colClasses="character")
tmp[[1]] <- as.character(tmp[[1]])
for (idx in 2:12) {
asnum <- as.numeric(tmp[[idx]])
if (!identical(round(asnum), asnum)) {
stop("non-integral numbers found")
}
tmp[[idx]] <- as.integer(asnum)
}
tmp
})
dat <- dat[!is.na(dat[[2]]), , drop=FALSE] # requires non-NA year
series <- dat[[1]]
decade.yr <- dat[[2]]
x <- as.matrix(dat[3:12])
if (isTRUE(edge.zeros)) {
x[x < 0 & x != -9999] <- NA
} else {
x[x <= 0 & x != -9999] <- NA
}
if (!input.ok(series, decade.yr, x)) {
if (exists("series.fixed", inherits=FALSE) &&
exists("decade.fixed", inherits=FALSE) &&
exists("x.fixed", inherits=FALSE) &&
(any(is.na(x) != is.na(x.fixed)) ||
any(x != x.fixed, na.rm=TRUE))) {
series <- series.fixed
decade.yr <- decade.fixed
warning("trying fixed width names, years, variable width data")
if (!input.ok(series, decade.yr, x)) {
stop("failed to read rwl file")
}
} else {
stop("failed to read rwl file")
}
}
}
series.ids <- unique(series)
nseries <- length(series.ids)
## At this time match does not support long vectors in the second
## argument and always returns integers, but let's check the
## result anyway.
series.index <- tryCatch(as.integer(match(series, series.ids)),
warning = conditionMessage,
error = conditionMessage)
if (!is.integer(series.index)) {
stop(gettextf("series.index must be integer: %s",
paste(as.character(series.index), collapse = ", "),
domain = "R-dplR"))
}
extra.col <- dat[[13]]
res <- .Call(dplR.readloop, series.index, decade.yr, x)
rw.mat <- res[[1]]
min.year <- res[[2]]
prec.rproc <- res[[3]]
span <- nrow(rw.mat)
if (span == 0) {
rw.df <- as.data.frame(rw.mat)
names(rw.df) <- as.character(series.ids)
return(rw.df)
}
max.year <- min.year + (span - 1)
rownames(rw.mat) <- min.year:max.year
## The operations in the loop depend on the precision of each series.
## It's not exactly clear whether the Tucson format allows mixed
## precisions in the same file, but we can support that in any case.
prec.unknown <- logical(nseries)
for (i in seq_len(nseries)) {
if (!(prec.rproc[i] %in% c(100, 1000))) {
these.rows <- which(series.index == i)
these.decades <- decade.yr[these.rows]
has.stop <- which(extra.col[these.rows] %in% c("999", "-9999"))
if (length(has.stop) == 1 &&
which.max(these.decades) == has.stop) {
warning(gettextf("bad location of stop marker in series %s",
series.ids[i], domain="R-dplR"),
domain=NA)
if (extra.col[these.rows[has.stop]] == "999") {
prec.rproc[i] <- 100
} else {
prec.rproc[i] <- 1000
}
}
}
this.prec.rproc <- prec.rproc[i]
if (this.prec.rproc == 100) {
## Convert stop marker (and any other) 999 to NA (precision 0.01)
rw.mat[rw.mat[, i] == 999, i] <- NA
} else if (this.prec.rproc == 1000) {
## Ditto, -9999 to NA (precision 0.001)
rw.mat[rw.mat[, i] == -9999, i] <- NA
} else {
prec.unknown[i] <- TRUE
}
## Convert to mm
rw.mat[, i] <- rw.mat[, i] / this.prec.rproc
}
if (all(prec.unknown)) {
stop("precision unknown in all series")
}
## Accommodate mid-series upper and lower case differences: If a
## series doesn't end with a stop marker, see if the series ID of
## the next row in the file matches when case differences are
## ignored.
if (any(prec.unknown)) {
upper.ids <- toupper(series.ids)
new.united <- TRUE
series.united <- 1:ncol(rw.mat)
while (new.united) {
new.united <- FALSE
for (this.series in which(prec.unknown)) {
these.rows <- which(series.index == this.series)
last.row <- these.rows[length(these.rows)]
next.series <- series.united[series.index[last.row + 1]]
if (last.row == length(series) ||
upper.ids[this.series] != upper.ids[next.series]) {
new.united <- FALSE
break
}
last.decade <- decade.yr[last.row]
next.decade <- decade.yr[last.row + 1]
if (!prec.unknown[next.series] &&
next.decade > last.decade &&
next.decade <= last.decade + 10) {
val.count <- numeric(span)
this.col <- rw.mat[, this.series]
next.col <- rw.mat[, next.series]
flag.this <- !is.na(this.col) & this.col != 0
val.count[flag.this] <- 1
flag.next <- !is.na(next.col) & next.col != 0
val.count[flag.next] <- val.count[flag.next] + 1
if (any(val.count > 1)) {
new.united <- FALSE
break
}
this.prec.rproc <- prec.rproc[next.series]
if (this.prec.rproc == 100) {
this.col[this.col == 999] <- NA
} else if (this.prec.rproc == 1000) {
this.col[this.col == -9999] <- NA
}
this.col <- this.col / this.prec.rproc
rw.mat[flag.this, next.series] <- this.col[flag.this]
series.united[this.series] <- next.series
new.united <- TRUE
prec.unknown[this.series] <- FALSE
warning(gettextf("combining series %s and %s",
series.ids[this.series],
series.ids[next.series],
domain="R-dplR"), domain=NA)
}
}
}
prec.unknown <- which(prec.unknown)
n.unknown <- length(prec.unknown)
if (n.unknown > 0) {
stop(sprintf(ngettext(n.unknown,
"precision unknown in series %s",
"precision unknown in series %s",
domain="R-dplR"),
paste0(series.ids[prec.unknown], collapse=", ")),
domain=NA)
} else {
to.keep <- which(series.united == 1:ncol(rw.mat))
rw.mat <- rw.mat[, to.keep, drop=FALSE]
nseries <- length(to.keep)
series.ids <- series.ids[to.keep]
prec.rproc <- prec.rproc[to.keep]
}
}
the.range <-
as.matrix(apply(rw.mat, 2, yr.range, yr.vec=min.year:max.year))
series.min <- the.range[1, ]
series.max <- the.range[2, ]
series.min.char <- format(series.min, scientific=FALSE, trim=TRUE)
series.max.char <- format(series.max, scientific=FALSE, trim=TRUE)
seq.series.char <- format(seq_len(nseries), scientific=FALSE, trim=TRUE)
cat(sprintf(ngettext(nseries,
"There is %d series\n",
"There are %d series\n",
domain="R-dplR"),
nseries))
cat(paste0(format(seq.series.char, width=5), "\t",
format(series.ids, width=8), "\t",
format(series.min.char, width=5, justify="right"), "\t",
format(series.max.char, width=5, justify="right"), "\t",
format(1/prec.rproc, scientific=FALSE,drop0trailing=TRUE),"\n"),
sep="")
## trim the front and back of the output to remove blank rows
good.series <- !is.na(series.min)
if (!any(good.series)) {
stop("file has no good data")
}
incl.rows <- seq.int(min(series.min[good.series])-min.year+1,
max(series.max[good.series])-min.year+1)
## trim
rw.mat <- rw.mat[incl.rows, , drop=FALSE]
## Fix internal NAs. These are coded as 0 in the DPL programs
fix.internal.na <- function(x) {
na.flag <- is.na(x)
good.idx <- which(!na.flag)
y <- x
if (length(good.idx) >= 2) {
min.good <- min(good.idx)
max.good <- max(good.idx)
fix.flag <- na.flag & c(rep(FALSE, min.good),
rep(TRUE, max.good-min.good-1),
rep(FALSE, length(x)-max.good+1))
y[fix.flag] <- 0
}
y
}
rw.df <- as.data.frame(apply(rw.mat, 2, fix.internal.na))
names(rw.df) <- as.character(series.ids)
class(rw.df) <- c("rwl", "data.frame")
rw.df
}
| /R/read.tucson.R | no_license | AndyBunn/dplR | R | false | false | 19,576 | r | `read.tucson` <- function(fname, header = NULL, long = FALSE,
encoding = getOption("encoding"),
edge.zeros = TRUE)
{
## Checks that the input is good. The input variables are vectors
## ('series', 'decade.yr') or matrices ('x') containing most of
## the data acquired from the input file 'fname'.
input.ok <- function(series, decade.yr, x) {
if (length(series) == 0) {
return(FALSE)
}
## Number of values allowed per row depends on first year modulo 10
n.per.row <-
apply(x, 1,
function(x) {
notna <- which(!is.na(x))
n.notna <- length(notna)
if (n.notna == 0) {
0
} else {
notna[n.notna]
}
})
full.per.row <- 10 - decade.yr %% 10
## One extra column per row is allowed:
## a. enough space will be allocated (max.year is larger than
## last year of any series)
## b. the extra col may contain a stop marker (non-standard location)
idx.bad <- which(n.per.row > full.per.row + 1)
n.bad <- length(idx.bad)
if (n.bad > 0) {
warn.fmt <-
ngettext(n.bad,
"%d row has too many values (ID, decade %s)",
"%d rows have too many values (IDs, decades %s)",
domain="R-dplR")
if (n.bad > 5) {
idx.bad <- sample(idx.bad, 5)
ids.decades <- paste(paste(series[idx.bad], decade.yr[idx.bad],
sep=", ", collapse="; "),
"...", sep="; ")
} else {
ids.decades <- paste(series[idx.bad], decade.yr[idx.bad],
sep="-", collapse=", ")
}
warning(sprintf(warn.fmt, n.bad, ids.decades), domain=NA)
return(FALSE)
}
series.ids <- unique(series)
nseries <- length(series.ids)
series.index <- match(series, series.ids)
last.row.of.series <- logical(length(series))
for (i in seq_len(nseries)) {
idx.these <- which(series.index == i)
last.row.of.series[idx.these[which.max(decade.yr[idx.these])]] <-
TRUE
}
flag.bad2 <- n.per.row < full.per.row
if (!all(last.row.of.series) && all(flag.bad2[!last.row.of.series])) {
warning("all rows (last rows excluded) have too few values")
return(FALSE)
}
min.year <- min(decade.yr)
max.year <- ((max(decade.yr)+10) %/% 10) * 10
if (max.year > as.numeric(format(Sys.Date(), "%Y")) + 100) {
## Must do something to stop R from trying to build huge
## data structures if the maximum year is not detected
## correctly. Not too strict (allow about 100 years past
## today).
warning("file format problems (or data from the future)")
return(FALSE)
}
# look for duplicate IDs -- common problem with Tucson RWL files
span <- max.year - min.year + 1
val.count <- matrix(0, span, nseries)
for (i in seq_along(series)) {
this.col <- series.index[i]
these.rows <- seq(from = decade.yr[i] - min.year + 1, by = 1,
length.out = n.per.row[i])
val.count[these.rows, this.col] <-
val.count[these.rows, this.col] + 1
}
extra.vals <- which(val.count > 1, arr.ind=TRUE)
n.extra <- nrow(extra.vals)
print(n.extra)
# if (n.extra > 0) {
# warn.fmt <-
# ngettext(n.bad,
# "Duplicated series ID detected with overlap in %d pair of ID, year: %s",
# "Duplicated series ID detected with overlaps in %d pairs of ID, year: %s",
# domain="R-dplR")
# if (n.extra > 5) {
# extra.vals <- extra.vals[sample(n.extra, 5), ]
# ids.years <- paste(paste(series.ids[extra.vals[, 2]],
# min.year - 1 + extra.vals[, 1],
# sep=", ", collapse="; "),
# "...", sep="; ")
# } else {
# ids.years <- paste(series.ids[extra.vals[, 2]],
# min.year - 1 + extra.vals[, 1],
# sep=", ", collapse="; ")
# }
# warning(sprintf(warn.fmt, n.extra, ids.years), domain=NA)
# FALSE
# }
####################
# simplifying error message to user to make it clearer that there are
# duplicate IDs
if (n.extra > 0) {
warn.fmt <- gettext("Duplicated series ID detected: %s",domain="R-dplR")
ids.dup <- paste(unique(series.ids[extra.vals[, 2]]),
sep = ", ",collapse = "; ")
warning(sprintf(warn.fmt, ids.dup), domain=NA)
return(FALSE)
}
else {
return(TRUE)
}
} # end input.ok func
## Read data file into memory
con <- file(fname, encoding = encoding)
on.exit(close(con))
goodLines <- readLines(con)
close(con)
on.exit()
## Strip empty lines (caused by CR CR LF endings etc.)
goodLines <- goodLines[nzchar(goodLines)]
## Remove comment lines (print them?)
foo <- regexpr("#", goodLines, fixed=TRUE)
commentFlag <- foo >= 1 & foo <= 78
goodLines <- goodLines[!commentFlag]
## Temporary file for 'goodLines'. Reading from this file is
## faster than making a textConnection to 'goodLines'.
tf <- tempfile()
check.tempdir()
tfcon <- file(tf, encoding="UTF-8")
on.exit(close(tfcon))
on.exit(unlink(tf), add=TRUE)
writeLines(goodLines, tf)
## New connection for reading from the temp file
close(tfcon)
tfcon <- file(tf, encoding="UTF-8")
if (is.null(header)) {
## Try to determine if the file has a header. This is failable.
## 3 lines in file
hdr1 <- readLines(tfcon, n=1)
if (length(hdr1) == 0) {
stop("file is empty")
}
if (nchar(hdr1) < 12) {
stop("first line in rwl file ends before col 12")
}
is.head <- FALSE
yrcheck <- suppressWarnings(as.numeric(substr(hdr1, 9, 12)))
if (is.null(yrcheck) || length(yrcheck) != 1 || is.na(yrcheck) ||
yrcheck < -1e04 || yrcheck > 1e04 ||
round(yrcheck) != yrcheck) {
is.head <- TRUE
}
if (!is.head) {
datacheck <- substring(hdr1,
seq(from=13, by=6, length=10),
seq(from=18, by=6, length=10))
datacheck <- sub("^[[:blank:]]+", "", datacheck)
idx.good <- which(nzchar(datacheck))
n.good <- length(idx.good)
if (n.good == 0) {
is.head <- TRUE
} else {
datacheck <- datacheck[seq_len(idx.good[n.good])]
if (any(grepl("[[:alpha:]]", datacheck))) {
is.head <- TRUE
} else {
datacheck <- suppressWarnings(as.numeric(datacheck))
if (is.null(datacheck) ||
any(!is.na(datacheck) &
round(datacheck) != datacheck)) {
is.head <- TRUE
}
}
}
}
if (is.head) {
hdr1.split <- strsplit(str_trim(hdr1, side="both"),
split="[[:space:]]+")[[1]]
n.parts <- length(hdr1.split)
if (n.parts >= 3 && n.parts <= 13) {
hdr1.split <- hdr1.split[2:n.parts]
if (!any(grepl("[[:alpha:]]", hdr1.split))) {
yrdatacheck <- suppressWarnings(as.numeric(hdr1.split))
if (!(is.null(yrdatacheck) ||
any(!is.na(yrdatacheck) &
round(yrdatacheck) != yrdatacheck))) {
is.head <- FALSE
}
}
}
}
if (is.head) {
cat(gettext("There appears to be a header in the rwl file\n",
domain="R-dplR"))
} else {
cat(gettext("There does not appear to be a header in the rwl file\n",
domain="R-dplR"))
}
} else if (!is.logical(header) || length(header) != 1 || is.na(header)) {
stop("'header' must be NULL, TRUE or FALSE")
} else {
is.head <- header
}
skip.lines <- if (is.head) 3 else 0
data1 <- readLines(tfcon, n=skip.lines + 1)
if (length(data1) < skip.lines + 1) {
stop("file has no data")
}
on.exit(unlink(tf))
## Test for presence of tabs
if (!grepl("\t", data1[length(data1)])) {
## Using a connection instead of a file name in read.fwf and
## read.table allows the function to support different encodings.
if (isTRUE(long)) {
## Reading 11 years per decade allows nonstandard use of stop
## marker at the end of a line that already has 10
## measurements. Such files exist in ITRDB.
fields <- c(7, 5, rep(6, 11))
} else {
fields <- c(8, 4, rep(6, 11))
}
## First, try fixed width columns as in Tucson "standard"
dat <-
tryCatch(read.fwf(tfcon, widths=fields, skip=skip.lines,
comment.char="", strip.white=TRUE,
blank.lines.skip=FALSE,
colClasses=c("character", rep("integer", 11),
"character")),
error = function(...) {
## If predefined column classes fail
## (e.g. missing values marked with "."), convert
## types manually
tfcon <- file(tf, encoding="UTF-8")
tmp <-
read.fwf(tfcon, widths=fields, skip=skip.lines,
strip.white=TRUE, blank.lines.skip=FALSE,
colClasses="character", comment.char="")
for (idx in 2:12) {
asnum <- as.numeric(tmp[[idx]])
if (!identical(round(asnum), asnum)) {
stop("non-integral numbers found")
}
tmp[[idx]] <- as.integer(asnum)
}
tmp
})
dat <- dat[!is.na(dat[[2]]), , drop=FALSE] # requires non-NA year
series <- dat[[1]]
decade.yr <- dat[[2]]
series.fixed <- series
decade.fixed <- decade.yr
x <- as.matrix(dat[3:12])
## Convert values <= 0 or < 0 (not -9999) to NA
if (isTRUE(edge.zeros)) {
x[x < 0 & x != -9999] <- NA
} else {
x[x <= 0 & x != -9999] <- NA
}
x.fixed <- x
fixed.ok <- input.ok(series, decade.yr, x)
} else {
warning("tabs used, assuming non-standard, tab-delimited file")
fixed.ok <- FALSE
}
## If that fails, try columns separated by white space (non-standard)
if (!fixed.ok) {
warning("fixed width failed, trying to reread with variable width columns")
tfcon <- file(tf, encoding="UTF-8")
## Number of columns is decided by length(col.names)
dat <-
tryCatch(read.table(tfcon, skip=skip.lines, blank.lines.skip=FALSE,
comment.char="", col.names=letters[1:13],
colClasses=c("character", rep("integer", 11),
"character"), fill=TRUE, quote=""),
error = function(...) {
## In case predefined column classes fail
tfcon <- file(tf, encoding="UTF-8")
tmp <- read.table(tfcon, skip=skip.lines,
blank.lines.skip=FALSE, quote="",
comment.char="", fill=TRUE,
col.names=letters[1:13],
colClasses="character")
tmp[[1]] <- as.character(tmp[[1]])
for (idx in 2:12) {
asnum <- as.numeric(tmp[[idx]])
if (!identical(round(asnum), asnum)) {
stop("non-integral numbers found")
}
tmp[[idx]] <- as.integer(asnum)
}
tmp
})
dat <- dat[!is.na(dat[[2]]), , drop=FALSE] # requires non-NA year
series <- dat[[1]]
decade.yr <- dat[[2]]
x <- as.matrix(dat[3:12])
if (isTRUE(edge.zeros)) {
x[x < 0 & x != -9999] <- NA
} else {
x[x <= 0 & x != -9999] <- NA
}
if (!input.ok(series, decade.yr, x)) {
if (exists("series.fixed", inherits=FALSE) &&
exists("decade.fixed", inherits=FALSE) &&
exists("x.fixed", inherits=FALSE) &&
(any(is.na(x) != is.na(x.fixed)) ||
any(x != x.fixed, na.rm=TRUE))) {
series <- series.fixed
decade.yr <- decade.fixed
warning("trying fixed width names, years, variable width data")
if (!input.ok(series, decade.yr, x)) {
stop("failed to read rwl file")
}
} else {
stop("failed to read rwl file")
}
}
}
series.ids <- unique(series)
nseries <- length(series.ids)
## At this time match does not support long vectors in the second
## argument and always returns integers, but let's check the
## result anyway.
series.index <- tryCatch(as.integer(match(series, series.ids)),
warning = conditionMessage,
error = conditionMessage)
if (!is.integer(series.index)) {
stop(gettextf("series.index must be integer: %s",
paste(as.character(series.index), collapse = ", "),
domain = "R-dplR"))
}
extra.col <- dat[[13]]
res <- .Call(dplR.readloop, series.index, decade.yr, x)
rw.mat <- res[[1]]
min.year <- res[[2]]
prec.rproc <- res[[3]]
span <- nrow(rw.mat)
if (span == 0) {
rw.df <- as.data.frame(rw.mat)
names(rw.df) <- as.character(series.ids)
return(rw.df)
}
max.year <- min.year + (span - 1)
rownames(rw.mat) <- min.year:max.year
## The operations in the loop depend on the precision of each series.
## It's not exactly clear whether the Tucson format allows mixed
## precisions in the same file, but we can support that in any case.
prec.unknown <- logical(nseries)
for (i in seq_len(nseries)) {
if (!(prec.rproc[i] %in% c(100, 1000))) {
these.rows <- which(series.index == i)
these.decades <- decade.yr[these.rows]
has.stop <- which(extra.col[these.rows] %in% c("999", "-9999"))
if (length(has.stop) == 1 &&
which.max(these.decades) == has.stop) {
warning(gettextf("bad location of stop marker in series %s",
series.ids[i], domain="R-dplR"),
domain=NA)
if (extra.col[these.rows[has.stop]] == "999") {
prec.rproc[i] <- 100
} else {
prec.rproc[i] <- 1000
}
}
}
this.prec.rproc <- prec.rproc[i]
if (this.prec.rproc == 100) {
## Convert stop marker (and any other) 999 to NA (precision 0.01)
rw.mat[rw.mat[, i] == 999, i] <- NA
} else if (this.prec.rproc == 1000) {
## Ditto, -9999 to NA (precision 0.001)
rw.mat[rw.mat[, i] == -9999, i] <- NA
} else {
prec.unknown[i] <- TRUE
}
## Convert to mm
rw.mat[, i] <- rw.mat[, i] / this.prec.rproc
}
if (all(prec.unknown)) {
stop("precision unknown in all series")
}
## Accommodate mid-series upper and lower case differences: If a
## series doesn't end with a stop marker, see if the series ID of
## the next row in the file matches when case differences are
## ignored.
if (any(prec.unknown)) {
upper.ids <- toupper(series.ids)
new.united <- TRUE
series.united <- 1:ncol(rw.mat)
while (new.united) {
new.united <- FALSE
for (this.series in which(prec.unknown)) {
these.rows <- which(series.index == this.series)
last.row <- these.rows[length(these.rows)]
next.series <- series.united[series.index[last.row + 1]]
if (last.row == length(series) ||
upper.ids[this.series] != upper.ids[next.series]) {
new.united <- FALSE
break
}
last.decade <- decade.yr[last.row]
next.decade <- decade.yr[last.row + 1]
if (!prec.unknown[next.series] &&
next.decade > last.decade &&
next.decade <= last.decade + 10) {
val.count <- numeric(span)
this.col <- rw.mat[, this.series]
next.col <- rw.mat[, next.series]
flag.this <- !is.na(this.col) & this.col != 0
val.count[flag.this] <- 1
flag.next <- !is.na(next.col) & next.col != 0
val.count[flag.next] <- val.count[flag.next] + 1
if (any(val.count > 1)) {
new.united <- FALSE
break
}
this.prec.rproc <- prec.rproc[next.series]
if (this.prec.rproc == 100) {
this.col[this.col == 999] <- NA
} else if (this.prec.rproc == 1000) {
this.col[this.col == -9999] <- NA
}
this.col <- this.col / this.prec.rproc
rw.mat[flag.this, next.series] <- this.col[flag.this]
series.united[this.series] <- next.series
new.united <- TRUE
prec.unknown[this.series] <- FALSE
warning(gettextf("combining series %s and %s",
series.ids[this.series],
series.ids[next.series],
domain="R-dplR"), domain=NA)
}
}
}
prec.unknown <- which(prec.unknown)
n.unknown <- length(prec.unknown)
if (n.unknown > 0) {
stop(sprintf(ngettext(n.unknown,
"precision unknown in series %s",
"precision unknown in series %s",
domain="R-dplR"),
paste0(series.ids[prec.unknown], collapse=", ")),
domain=NA)
} else {
to.keep <- which(series.united == 1:ncol(rw.mat))
rw.mat <- rw.mat[, to.keep, drop=FALSE]
nseries <- length(to.keep)
series.ids <- series.ids[to.keep]
prec.rproc <- prec.rproc[to.keep]
}
}
the.range <-
as.matrix(apply(rw.mat, 2, yr.range, yr.vec=min.year:max.year))
series.min <- the.range[1, ]
series.max <- the.range[2, ]
series.min.char <- format(series.min, scientific=FALSE, trim=TRUE)
series.max.char <- format(series.max, scientific=FALSE, trim=TRUE)
seq.series.char <- format(seq_len(nseries), scientific=FALSE, trim=TRUE)
cat(sprintf(ngettext(nseries,
"There is %d series\n",
"There are %d series\n",
domain="R-dplR"),
nseries))
cat(paste0(format(seq.series.char, width=5), "\t",
format(series.ids, width=8), "\t",
format(series.min.char, width=5, justify="right"), "\t",
format(series.max.char, width=5, justify="right"), "\t",
format(1/prec.rproc, scientific=FALSE,drop0trailing=TRUE),"\n"),
sep="")
## trim the front and back of the output to remove blank rows
good.series <- !is.na(series.min)
if (!any(good.series)) {
stop("file has no good data")
}
incl.rows <- seq.int(min(series.min[good.series])-min.year+1,
max(series.max[good.series])-min.year+1)
## trim
rw.mat <- rw.mat[incl.rows, , drop=FALSE]
## Fix internal NAs. These are coded as 0 in the DPL programs
fix.internal.na <- function(x) {
na.flag <- is.na(x)
good.idx <- which(!na.flag)
y <- x
if (length(good.idx) >= 2) {
min.good <- min(good.idx)
max.good <- max(good.idx)
fix.flag <- na.flag & c(rep(FALSE, min.good),
rep(TRUE, max.good-min.good-1),
rep(FALSE, length(x)-max.good+1))
y[fix.flag] <- 0
}
y
}
rw.df <- as.data.frame(apply(rw.mat, 2, fix.internal.na))
names(rw.df) <- as.character(series.ids)
class(rw.df) <- c("rwl", "data.frame")
rw.df
}
|
install.packages("neuralnet")
install.packages("nnet")
library(readr)
library(neuralnet)
library(nnet)
library(DataExplorer)
library(plyr)
library(ggplot2)
library(psych)
#Importing Dataset
Startups <- read.csv("C:\\Users\\91755\\Desktop\\Assignment\\11 - Neural Network\\50_Startups.csv")
attach(Startups)
head(Startups)
#EDA and Statistical Analysis
sum(is.na(Startups))
str(Startups)
class(Startups)
table(Startups$State)
Startups$State <- as.numeric(revalue(Startups$State, c("California"="0", "Florida"="1", "New York"="2")))
summary(Startups)
#Graphical Representation
table(Startups)
pairs(Startups)
plot(State, Profit)
plot(Administration, Profit)
pairs.panels(Startups)
ggplot(Startups, aes(x=R.D.Spend, y=Profit))+geom_point()
#Normalization
normal <- function(x)
{
return((x-min(x))/(max(x)-min(x)))
}
Startups_nor <- as.data.frame(lapply(Startups, FUN=normal))
head(Startups_nor)
summary(Startups$Profit)
#Data Splitting
set.seed(123)
split <- sample(2, nrow(Startups_nor), replace = T, prob = c(0.75, 0.25))
Startnorm_train <- Startups_nor[split==1,]
Startnorm_test <- Startups_nor[split==2,]
head(Startnorm_train)
#Model Building
set.seed(333)
Model_1 <- neuralnet(Profit~., data = Startnorm_train)
summary(Model_1)
plot(Model_1, rep = "best")
#Evaluation
set.seed(123)
Model1_result <- compute(Model_1, Startnorm_test[,1:4])
Model1_result
pred_1 <- Model1_result$net.result
cor(pred_1, Startnorm_test$Profit) #Accuracy = 96.02%
#Since the prediction on profit is in the normalised form.
#To compare, need to denormalise the predicted profit value
startup_min <- min(Startups$Profit)
startup_max <- max(Startups$Profit)
denormalize <- function(x, min, max){
return(x*(max-min)+min)
}
Profit_pred <- denormalize(pred_1, startup_min, startup_max)
data.frame(head(Profit_pred), head(Startups$Profit))
#Model Building Using Two Hidden Layers
set.seed(1234)
Model_2 <- neuralnet(Profit~., data = Startnorm_train, hidden = 2)
str(Model_2)
plot(Model_2, rep = "best")
#Evaluation
set.seed(333)
Model2_result <- compute(Model_2, Startnorm_test[,1:4])
Model2_result
pred_2 <- Model2_result$net.result
cor(pred_2, Startnorm_test$Profit) #Accuracy = 96.88%
#Model Buiding Using Five Hidden Layers
set.seed(2222)
Model_3 <- neuralnet(Profit~., data = Startnorm_train, hidden = 6)
str(Model_3)
plot(Model_3, rep = "best")
#Evaluation
set.seed(4444)
Model3_result <- compute(Model_3, Startnorm_test[1:4])
Model3_result
pred_3 <- Model3_result$net.result
cor(pred_3, Startnorm_test$Profit) #Accuarcy=96.26%
#Accuracy is increase by increasing the hidden layers | /Startups_neuralnetworks.R | no_license | aksaannamathew/Neural-Networks | R | false | false | 2,687 | r | install.packages("neuralnet")
install.packages("nnet")
library(readr)
library(neuralnet)
library(nnet)
library(DataExplorer)
library(plyr)
library(ggplot2)
library(psych)
#Importing Dataset
Startups <- read.csv("C:\\Users\\91755\\Desktop\\Assignment\\11 - Neural Network\\50_Startups.csv")
attach(Startups)
head(Startups)
#EDA and Statistical Analysis
sum(is.na(Startups))
str(Startups)
class(Startups)
table(Startups$State)
Startups$State <- as.numeric(revalue(Startups$State, c("California"="0", "Florida"="1", "New York"="2")))
summary(Startups)
#Graphical Representation
table(Startups)
pairs(Startups)
plot(State, Profit)
plot(Administration, Profit)
pairs.panels(Startups)
ggplot(Startups, aes(x=R.D.Spend, y=Profit))+geom_point()
#Normalization
normal <- function(x)
{
return((x-min(x))/(max(x)-min(x)))
}
Startups_nor <- as.data.frame(lapply(Startups, FUN=normal))
head(Startups_nor)
summary(Startups$Profit)
#Data Splitting
set.seed(123)
split <- sample(2, nrow(Startups_nor), replace = T, prob = c(0.75, 0.25))
Startnorm_train <- Startups_nor[split==1,]
Startnorm_test <- Startups_nor[split==2,]
head(Startnorm_train)
#Model Building
set.seed(333)
Model_1 <- neuralnet(Profit~., data = Startnorm_train)
summary(Model_1)
plot(Model_1, rep = "best")
#Evaluation
set.seed(123)
Model1_result <- compute(Model_1, Startnorm_test[,1:4])
Model1_result
pred_1 <- Model1_result$net.result
cor(pred_1, Startnorm_test$Profit) #Accuracy = 96.02%
#Since the prediction on profit is in the normalised form.
#To compare, need to denormalise the predicted profit value
startup_min <- min(Startups$Profit)
startup_max <- max(Startups$Profit)
denormalize <- function(x, min, max){
return(x*(max-min)+min)
}
Profit_pred <- denormalize(pred_1, startup_min, startup_max)
data.frame(head(Profit_pred), head(Startups$Profit))
#Model Building Using Two Hidden Layers
set.seed(1234)
Model_2 <- neuralnet(Profit~., data = Startnorm_train, hidden = 2)
str(Model_2)
plot(Model_2, rep = "best")
#Evaluation
set.seed(333)
Model2_result <- compute(Model_2, Startnorm_test[,1:4])
Model2_result
pred_2 <- Model2_result$net.result
cor(pred_2, Startnorm_test$Profit) #Accuracy = 96.88%
#Model Buiding Using Five Hidden Layers
set.seed(2222)
Model_3 <- neuralnet(Profit~., data = Startnorm_train, hidden = 6)
str(Model_3)
plot(Model_3, rep = "best")
#Evaluation
set.seed(4444)
Model3_result <- compute(Model_3, Startnorm_test[1:4])
Model3_result
pred_3 <- Model3_result$net.result
cor(pred_3, Startnorm_test$Profit) #Accuarcy=96.26%
#Accuracy is increase by increasing the hidden layers |
#install.packages(c("leaflet","billboarder","randgeo", "ggiraph" ,"tidyverse","TTR","pals", "shiny","dplyr", "htmltools", "highcharter", "rgdal", "raster", "tigris", "shinythemes", "raster", "ggpolt2", "gganimate", "transfromr", "sp", "shinyWidgets","ggiraph", "randgeo", "tidyverse" ))
library(leaflet)
library(shiny)
library(htmltools)
library(ggplot2)
library(gganimate)
library(transformr)
library(sp)
library(rgdal)
library(raster)
library(shinythemes)
library(raster)
library(pals)
library(tigris)
library(shinyWidgets)
library(highcharter)
library(dplyr)
library(billboarder)
require(htmltools)
require(html)
require(shiny)
require(leaflet)
require(htmltools)
require(ggplot2)
library(highcharter)
library(billboarder)
library(lubridate)
library(tidyverse)
library(ggiraph)
library(randgeo)
#******************MAPA******************************
# Read Africa Data Set
mapafrica<- readOGR(".", "Africa")
projeto_2015r<- read.csv("DataSetProject2015.csv", sep = ",", header = TRUE)
projeto_2015<- geo_join(mapafrica, projeto_2015r, "COUNTRY", "Entity", how="left")
projeto_2015$GPD[ which( is.na(projeto_2015$GPD))] = 0
#******************LOLIPOP******************************
lollipop_data<- data_set[- c(1275:1404),c(1,2,3,7,6,10)]
lollipop_data$MalariaDeaths= as.double(as.character(lollipop_data$MalariaDeaths))
lollipop_data$HIVDeaths= as.double(as.character(lollipop_data$HIVDeaths))
lollipop_data$MalariaDeaths= round(lollipop_data$MalariaDeaths,2)
lollipop_data$HIVDeaths= round(lollipop_data$HIVDeaths,2)
lollipop_data=transform(lollipop_data,minimo =pmin(HIVDeaths, MalariaDeaths))
lollipop_data=transform(lollipop_data, maximo= pmax(HIVDeaths, MalariaDeaths))
#******************BARPLOT******************************
data_barplot<-as.data.frame(data_set)
data_barplot=data_barplot[, c(1,3,4,10)]
data_barplot=data_barplot[- c(1275:1404),]
#******************TIME SERIES******************************
timeseries_data=read.csv('DataSetMGD.csv')
timeseries_data$val=round(timeseries_data$val,2)
timeseries_data$year=as.character((timeseries_data$year))
timeseries_data$year=as.Date((timeseries_data$year), "%Y")
###PERSONALSAR######
titulo <- tags$a(href = 'https://www.youtube.com/watch?v=L7m61Em4A5k',
'Evolution of diseases in Africa',style = "font-family: 'verdana', cursive;font-weight: 1000; line-height: 1.1;color: #262626;")
css_codes <- tags$style(type = "text/css",".irs-bar {background: #ff9900; border-top: 1px #ff9900 ; border-bottom: 1px #ff9900;}
.irs-bar-edge {background: #ff9900; border: 1px #ff9900; width: 20px;}
.irs-line {border: 1px #ff9900;}
.irs-from, .irs-to, .irs-single {background: #ff9900}
.irs-grid-text {color: #ff9900; font-weight: bold;}
.label-default {background: #ff9900;}
}
")
css_panels <- tags$style(HTML(".tabbable > .nav > li[class=active] > a {background-color: #ff9900; color:white;}"),
HTML(".tabbable > .nav > li[class=desactive] > a {background-color: #ffa31a ; color:#ffa31a}"))
css_slider_back <- tags$head(tags$style(HTML('
#sidebar {
background-color: #ffebcc;
border: 1px #ffebcc;
}')))
### UI ######
ui <- fluidPage( theme=shinytheme("united"),css_codes, css_panels, css_slider_back,
setBackgroundColor("#ffebcc"),
titlePanel(h1(titulo)),
tabsetPanel(
tabPanel("Home",
sidebarLayout(
sidebarPanel(id="sidebar",
h4(div(HTML('<P align="center", style= "position:relative;top5px;color: gray15"><b>Context</b></p>'))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > Africa as one of the largest continents worldwide is characterized by the disparity of values on global statistics.</p>"))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > By the year 2015 the Gross Domestic Product (GDP) of the African Countries was set on 5,7%, being the lowest in the world.</p>"))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > Comparing with other factors, such as health data, a direct relationship is observed Africa has the highest number of preventable diseases such as Malaria and HIV. </p>"))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > </p>"))),
br(),
br(),
h5(div(HTML('<P align="left", style= "position:relative;top3px;color: gray15"><b>Presentation @ NOVA IMS</b></p>'))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > António Macedo (m20181271) </p>"))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > Filipe Lopes (m20180937)</p>"))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > Helena Vilela (m20180361)</p>")))
),
mainPanel(leafletOutput("map", height = 500, width = 800)))
),
tabPanel('GDP by Country',
sidebarPanel(
sliderInput("barplot_year",
"Select Year Range",
min=1990,
max=2015,
value= format(1990,big.mark = " ")),
checkboxGroupInput(
inputId = "focus",
label = "Region",
choices = c("Northern Africa" , "Middle Africa", "Western Africa","Southern Africa", "Eastern Africa"),
inline = TRUE
),
h4(div(HTML('<P align="center", style= "position:relative;top5px;color: gray15"><b>Context</b></p>'))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > Despite owning the richest natural resources, the African continent continuous to be the poorest. Between 1990 to 2015, Africa was a stage to multiple civil wars, dictators and tyranians governments, climate catastrophes which were among the causes to increase the distance between wealth and poverty.</p>"))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > As seen on the plot, through the years the major income region is the Southern Africa region, contradicted by the poorest regions being the Northern, Middle and Western Africa.</p>")))
),
mainPanel(h4(HTML('<p align = "center"; style="color:coral"><b>Gross Domestic Product (GDP) per capita</b></p>')),
billboarderOutput("barplot", width = "100%", height = "450px"))
),
tabPanel("HIV and Malaria",
sidebarPanel(
sliderInput("lolli_year",
"Select Year Range:",
min=(1990),
max=(2015),
value= 1990
),
selectInput("lolli_region",
"Select Region:",
choices = list("Northern Africa" ="Northern Africa",
"Middle Africa"= "Middle Africa",
"Western Africa"="Western Africa",
"Southern Africa"="Southern Africa",
"Eastern Africa"="Eastern Africa"),
selected = "Western Africa"),
h4(div(HTML('<P align="center", style= "position:relative;top5px;color: gray15"><b>Context</b></p>'))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > HIV and Malaria have been on top of the biggest causes of death in the last 30 years in the African continent.</p>"))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > Over the period of 1990 to 2015, both HIV and Malaria deaths rose steadily peaking between 2004 and 2006, entering in a decreasing trend until 2015.</p>")))
),
mainPanel(h4(HTML('<p align = "center"; style="color:coral"><b>Comparation between HIV and Malaria</b></p>')),
ggiraphOutput("Lolli"))
),
tabPanel('Malaria Deaths',
sidebarPanel(
checkboxGroupInput("timeseries_location",
"Select Region:",
choices = list("African Region" ="African Region",
"Eastern Mediterranean Region"= "Eastern Mediterranean Region",
"European Region"="European Region",
"Region of the Americas"="Region of the Americas",
"South-East Asia Region"="South-East Asia Region",
"Western Pacific Region"="Western Pacific Region",
"WHO region"="WHO region"),
selected = c("WHO region","European Region", "African Region","Eastern Mediterranean Region", "Eastern Mediterranean Region","Region of the Americas", "South-East Asia Region" ,"Western Pacific Region" )),
h4(div(HTML('<P align="center", style= "position:relative;top5px;color: gray15"><b>Context</b></p>'))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > Deaths by Malaria saw a clear rise-peak-fall trend, increasing from around 670,000 deaths in 1990; peaking at around 930,000 in 2004; and then declining (although at varying rates) to around 620,000 in 2017 (Roser & Ritchie, 2017).</p>"))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > More than 90% of the estimated 300–500 million malaria cases that occur worldwide every year are in Africa. (WHO, 2014).</p>")))
),
mainPanel(h4(HTML('<p align = "center"; style="color:coral"><b>Global Malaria Deaths</b></p>')),
plotOutput("my_MGD"))
)
)
)
server <- function(input, output) {
#################################################################################################
###################################### MAPA #####################################################
################################################################################################
output$map <- renderLeaflet({
mytext<- paste("<strong>","Country:", "</strong>", projeto_2015$COUNTRY,"<br/>", "<strong>", "GDP per capita: ","</strong>", format(as.numeric(projeto_2015$GPD),nsmall=0, big.mark = "."),"$", "<br/>") %>%
lapply(htmltools::HTML)
mybins=c(0,500,1000,2000,3000,5000,10000,50000)
mypalette = colorBin( palette="Oranges", domain=projeto_2015$GPD, na.color="transparent", bins=mybins)
leaflet(projeto_2015) %>%
setView(lng = 8.032837,
lat = 8.997194,
zoom = 3.47) %>%
addProviderTiles(providers$CartoDB.PositronNoLabels) %>%
addPolygons(
fillColor = ~mypalette(GPD),
stroke=TRUE,
fillOpacity = 0.9,
color="white", weight=0.3,
highlightOptions = highlightOptions(color = '#800000', weight=4, bringToFront = TRUE, opacity = 1),
label = mytext,
labelOptions = labelOptions( style = list("font-weight" = "normal", padding = "3px 8px"), textsize = "13px", direction = "auto")
) %>%
addLegend(pal=mypalette,
values = ~projeto_2015$GDP,
title = 'GDP per Capita',
position = 'bottomright',
labels = c("No value", "1$ - 1000$", "1000$ - 2000$", "2000$ - 3000$", "3000$ - 5000$", "5000$ - 10000$", "10000$ - 20000$"))
})
#################################################################################################
###################################### LOLLIPOP #################################################
#################################################################################################
observe({
by_duration <-
lollipop_data[(lollipop_data$Year==input$lolli_year) & (lollipop_data$Region==input$lolli_region),] %>%
arrange(maximo) %>%
mutate(Entity=factor(Entity, levels=Entity))%>%
na.omit(by_duration$Entity)
output$Lolli <- renderggiraph({
lolli <- ggplot(by_duration, aes(x = Entity, y = maximo)) +
geom_segment( aes(x=Entity, xend=Entity, y=minimo, yend=maximo), color="grey", size= 1) +
geom_point_interactive( aes(x=Entity, y=minimo, tooltip = minimo, color='chocolate1'), size=2.7) +
geom_point_interactive( aes(x=Entity, y=maximo, tooltip = maximo, color='firebrick2'), size=2.7) +
scale_x_discrete() +
scale_color_manual(name = NULL, labels = c("Malaria","HIV"), values = c("chocolate1","firebrick2"))+
theme_light() +
theme(legend.position = c(0.85,0.2),
plot.title = element_text(hjust = 0.5),
legend.title = element_text("Legend"),
panel.background = element_rect(fill = "transparent"),
plot.background = element_rect(fill = "transparent", color = NA),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.background = element_rect(fill = "transparent"),
legend.box.background = element_rect(fill = "transparent"),
axis.title.x = element_text(colour = "gray15", face="bold"),
axis.title.y = element_text(colour = "gray15", face="bold"),
axis.text.x = element_text(colour="gray15"),
axis.text.y = element_text(colour="gray15"),
legend.key = element_rect(fill = "transparent"),
legend.text = element_text(colour="gray15",face="bold"),
panel.border = element_blank(),
axis.line.x = element_line(colour = "gray30", size = 0.6),
axis.line.y = element_line(colour = "gray30", size= 0.6)) +
xlab("Countries") +
ylab("Death Rate per 100 mil persons")+
coord_flip()
girafe(ggobj = lolli)
})
})
#################################################################################################
###################################### BAR GRAFIC GPD ############################################
#################################################################################################
observe({
bar_arrange <- data_barplot[(data_barplot$Year==input$barplot_year) ,] %>%
#ifelse((data_barplot$Region == input$focus), 1,0)%>%
group_by(Region)%>%
arrange(desc(GPD)) %>%
mutate(Entity=factor(Entity, levels=Entity))%>%
na.omit(by_duration$Entity)
output$barplot <-renderBillboarder({
billboarder() %>%
bb_barchart(data = bar_arrange, mapping=bbaes(x= Entity, y= GPD, group=Region), rotated = TRUE, color = "#ff9900")%>%
#bb_y_grid(show = TRUE) %>%
bb_bar(width= list(ratio= 3))%>%
bb_y_axis(tick = list(format = suffix("$"), fit= TRUE),
label = list(text = "GDP", position = "outer-top")) %>%
bb_x_axis( tick = list(
values = c(" ", ""),
outer = FALSE)) %>%
bb_color(palette = c("#331400", "#662900", "#993d00", "#cc5200", "#ff751a"))%>%
bb_legend(show = FALSE)# %>%
})
})
#Highlight
observeEvent(input$focus,
{
billboarderProxy("barplot") %>%
bb_proxy_focus(input$focus)
}, ignoreNULL = FALSE)
#################################################################################################
###################################### TIME SERIES MALARIA ############################################
#################################################################################################
observe({
by_timeseries <-
timeseries_data[(timeseries_data$location==input$timeseries_location) ,]
#LINE CHART
output$my_MGD <- renderPlot({
ggplot(by_timeseries, aes(x=year, y=val, group=location))+
geom_line(aes(x=year, y=val, color= location), size=1.3)+
scale_color_manual(values = c("#331400", "#662900", "#993d00", "#cc5200", "#ff751a", "#ffa366", "#ffffff"))+
theme_light() +
theme(legend.position = "right",
plot.title = element_text(hjust = 0.5),
legend.title = element_blank(),
panel.background = element_rect(fill = "transparent"),
plot.background = element_rect(fill = "#ffebcc", color = NA),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.background = element_rect(fill = "transparent"),
legend.box.background = element_rect(fill = "transparent"),
# legend.box.margin = element_rect(fill= "transparent"),
axis.title.x = element_text(colour = "gray15", face="bold"),
axis.title.y = element_text(colour = "gray15", face="bold"),
axis.text.x = element_text(colour="gray15"),
axis.text.y = element_text(colour="gray15"),
legend.key = element_rect(fill = "transparent"),
legend.text = element_text(colour="gray15",face="bold"),
panel.border = element_blank(),
axis.line.x = element_line(colour = "gray30", size = 0.6),
axis.line.y = element_line(colour = "gray30", size= 0.6)) +
xlab("Year") +
ylab("Number of Malaria Deaths")
})
})
}
shinyApp(ui, server)
| /EvolutionDiseasesAfrica.R | no_license | macedo-2311/Data-Visualization-Project | R | false | false | 20,232 | r | #install.packages(c("leaflet","billboarder","randgeo", "ggiraph" ,"tidyverse","TTR","pals", "shiny","dplyr", "htmltools", "highcharter", "rgdal", "raster", "tigris", "shinythemes", "raster", "ggpolt2", "gganimate", "transfromr", "sp", "shinyWidgets","ggiraph", "randgeo", "tidyverse" ))
library(leaflet)
library(shiny)
library(htmltools)
library(ggplot2)
library(gganimate)
library(transformr)
library(sp)
library(rgdal)
library(raster)
library(shinythemes)
library(raster)
library(pals)
library(tigris)
library(shinyWidgets)
library(highcharter)
library(dplyr)
library(billboarder)
require(htmltools)
require(html)
require(shiny)
require(leaflet)
require(htmltools)
require(ggplot2)
library(highcharter)
library(billboarder)
library(lubridate)
library(tidyverse)
library(ggiraph)
library(randgeo)
#******************MAPA******************************
# Read Africa Data Set
mapafrica<- readOGR(".", "Africa")
projeto_2015r<- read.csv("DataSetProject2015.csv", sep = ",", header = TRUE)
projeto_2015<- geo_join(mapafrica, projeto_2015r, "COUNTRY", "Entity", how="left")
projeto_2015$GPD[ which( is.na(projeto_2015$GPD))] = 0
#******************LOLIPOP******************************
lollipop_data<- data_set[- c(1275:1404),c(1,2,3,7,6,10)]
lollipop_data$MalariaDeaths= as.double(as.character(lollipop_data$MalariaDeaths))
lollipop_data$HIVDeaths= as.double(as.character(lollipop_data$HIVDeaths))
lollipop_data$MalariaDeaths= round(lollipop_data$MalariaDeaths,2)
lollipop_data$HIVDeaths= round(lollipop_data$HIVDeaths,2)
lollipop_data=transform(lollipop_data,minimo =pmin(HIVDeaths, MalariaDeaths))
lollipop_data=transform(lollipop_data, maximo= pmax(HIVDeaths, MalariaDeaths))
#******************BARPLOT******************************
data_barplot<-as.data.frame(data_set)
data_barplot=data_barplot[, c(1,3,4,10)]
data_barplot=data_barplot[- c(1275:1404),]
#******************TIME SERIES******************************
timeseries_data=read.csv('DataSetMGD.csv')
timeseries_data$val=round(timeseries_data$val,2)
timeseries_data$year=as.character((timeseries_data$year))
timeseries_data$year=as.Date((timeseries_data$year), "%Y")
###PERSONALSAR######
titulo <- tags$a(href = 'https://www.youtube.com/watch?v=L7m61Em4A5k',
'Evolution of diseases in Africa',style = "font-family: 'verdana', cursive;font-weight: 1000; line-height: 1.1;color: #262626;")
css_codes <- tags$style(type = "text/css",".irs-bar {background: #ff9900; border-top: 1px #ff9900 ; border-bottom: 1px #ff9900;}
.irs-bar-edge {background: #ff9900; border: 1px #ff9900; width: 20px;}
.irs-line {border: 1px #ff9900;}
.irs-from, .irs-to, .irs-single {background: #ff9900}
.irs-grid-text {color: #ff9900; font-weight: bold;}
.label-default {background: #ff9900;}
}
")
css_panels <- tags$style(HTML(".tabbable > .nav > li[class=active] > a {background-color: #ff9900; color:white;}"),
HTML(".tabbable > .nav > li[class=desactive] > a {background-color: #ffa31a ; color:#ffa31a}"))
css_slider_back <- tags$head(tags$style(HTML('
#sidebar {
background-color: #ffebcc;
border: 1px #ffebcc;
}')))
### UI ######
ui <- fluidPage( theme=shinytheme("united"),css_codes, css_panels, css_slider_back,
setBackgroundColor("#ffebcc"),
titlePanel(h1(titulo)),
tabsetPanel(
tabPanel("Home",
sidebarLayout(
sidebarPanel(id="sidebar",
h4(div(HTML('<P align="center", style= "position:relative;top5px;color: gray15"><b>Context</b></p>'))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > Africa as one of the largest continents worldwide is characterized by the disparity of values on global statistics.</p>"))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > By the year 2015 the Gross Domestic Product (GDP) of the African Countries was set on 5,7%, being the lowest in the world.</p>"))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > Comparing with other factors, such as health data, a direct relationship is observed Africa has the highest number of preventable diseases such as Malaria and HIV. </p>"))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > </p>"))),
br(),
br(),
h5(div(HTML('<P align="left", style= "position:relative;top3px;color: gray15"><b>Presentation @ NOVA IMS</b></p>'))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > António Macedo (m20181271) </p>"))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > Filipe Lopes (m20180937)</p>"))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > Helena Vilela (m20180361)</p>")))
),
mainPanel(leafletOutput("map", height = 500, width = 800)))
),
tabPanel('GDP by Country',
sidebarPanel(
sliderInput("barplot_year",
"Select Year Range",
min=1990,
max=2015,
value= format(1990,big.mark = " ")),
checkboxGroupInput(
inputId = "focus",
label = "Region",
choices = c("Northern Africa" , "Middle Africa", "Western Africa","Southern Africa", "Eastern Africa"),
inline = TRUE
),
h4(div(HTML('<P align="center", style= "position:relative;top5px;color: gray15"><b>Context</b></p>'))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > Despite owning the richest natural resources, the African continent continuous to be the poorest. Between 1990 to 2015, Africa was a stage to multiple civil wars, dictators and tyranians governments, climate catastrophes which were among the causes to increase the distance between wealth and poverty.</p>"))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > As seen on the plot, through the years the major income region is the Southern Africa region, contradicted by the poorest regions being the Northern, Middle and Western Africa.</p>")))
),
mainPanel(h4(HTML('<p align = "center"; style="color:coral"><b>Gross Domestic Product (GDP) per capita</b></p>')),
billboarderOutput("barplot", width = "100%", height = "450px"))
),
tabPanel("HIV and Malaria",
sidebarPanel(
sliderInput("lolli_year",
"Select Year Range:",
min=(1990),
max=(2015),
value= 1990
),
selectInput("lolli_region",
"Select Region:",
choices = list("Northern Africa" ="Northern Africa",
"Middle Africa"= "Middle Africa",
"Western Africa"="Western Africa",
"Southern Africa"="Southern Africa",
"Eastern Africa"="Eastern Africa"),
selected = "Western Africa"),
h4(div(HTML('<P align="center", style= "position:relative;top5px;color: gray15"><b>Context</b></p>'))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > HIV and Malaria have been on top of the biggest causes of death in the last 30 years in the African continent.</p>"))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > Over the period of 1990 to 2015, both HIV and Malaria deaths rose steadily peaking between 2004 and 2006, entering in a decreasing trend until 2015.</p>")))
),
mainPanel(h4(HTML('<p align = "center"; style="color:coral"><b>Comparation between HIV and Malaria</b></p>')),
ggiraphOutput("Lolli"))
),
tabPanel('Malaria Deaths',
sidebarPanel(
checkboxGroupInput("timeseries_location",
"Select Region:",
choices = list("African Region" ="African Region",
"Eastern Mediterranean Region"= "Eastern Mediterranean Region",
"European Region"="European Region",
"Region of the Americas"="Region of the Americas",
"South-East Asia Region"="South-East Asia Region",
"Western Pacific Region"="Western Pacific Region",
"WHO region"="WHO region"),
selected = c("WHO region","European Region", "African Region","Eastern Mediterranean Region", "Eastern Mediterranean Region","Region of the Americas", "South-East Asia Region" ,"Western Pacific Region" )),
h4(div(HTML('<P align="center", style= "position:relative;top5px;color: gray15"><b>Context</b></p>'))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > Deaths by Malaria saw a clear rise-peak-fall trend, increasing from around 670,000 deaths in 1990; peaking at around 930,000 in 2004; and then declining (although at varying rates) to around 620,000 in 2017 (Roser & Ritchie, 2017).</p>"))),
p(div(HTML("<p align='justify';style='color:gray10; font-size:15px;' > More than 90% of the estimated 300–500 million malaria cases that occur worldwide every year are in Africa. (WHO, 2014).</p>")))
),
mainPanel(h4(HTML('<p align = "center"; style="color:coral"><b>Global Malaria Deaths</b></p>')),
plotOutput("my_MGD"))
)
)
)
server <- function(input, output) {
#################################################################################################
###################################### MAPA #####################################################
################################################################################################
output$map <- renderLeaflet({
mytext<- paste("<strong>","Country:", "</strong>", projeto_2015$COUNTRY,"<br/>", "<strong>", "GDP per capita: ","</strong>", format(as.numeric(projeto_2015$GPD),nsmall=0, big.mark = "."),"$", "<br/>") %>%
lapply(htmltools::HTML)
mybins=c(0,500,1000,2000,3000,5000,10000,50000)
mypalette = colorBin( palette="Oranges", domain=projeto_2015$GPD, na.color="transparent", bins=mybins)
leaflet(projeto_2015) %>%
setView(lng = 8.032837,
lat = 8.997194,
zoom = 3.47) %>%
addProviderTiles(providers$CartoDB.PositronNoLabels) %>%
addPolygons(
fillColor = ~mypalette(GPD),
stroke=TRUE,
fillOpacity = 0.9,
color="white", weight=0.3,
highlightOptions = highlightOptions(color = '#800000', weight=4, bringToFront = TRUE, opacity = 1),
label = mytext,
labelOptions = labelOptions( style = list("font-weight" = "normal", padding = "3px 8px"), textsize = "13px", direction = "auto")
) %>%
addLegend(pal=mypalette,
values = ~projeto_2015$GDP,
title = 'GDP per Capita',
position = 'bottomright',
labels = c("No value", "1$ - 1000$", "1000$ - 2000$", "2000$ - 3000$", "3000$ - 5000$", "5000$ - 10000$", "10000$ - 20000$"))
})
#################################################################################################
###################################### LOLLIPOP #################################################
#################################################################################################
observe({
by_duration <-
lollipop_data[(lollipop_data$Year==input$lolli_year) & (lollipop_data$Region==input$lolli_region),] %>%
arrange(maximo) %>%
mutate(Entity=factor(Entity, levels=Entity))%>%
na.omit(by_duration$Entity)
output$Lolli <- renderggiraph({
lolli <- ggplot(by_duration, aes(x = Entity, y = maximo)) +
geom_segment( aes(x=Entity, xend=Entity, y=minimo, yend=maximo), color="grey", size= 1) +
geom_point_interactive( aes(x=Entity, y=minimo, tooltip = minimo, color='chocolate1'), size=2.7) +
geom_point_interactive( aes(x=Entity, y=maximo, tooltip = maximo, color='firebrick2'), size=2.7) +
scale_x_discrete() +
scale_color_manual(name = NULL, labels = c("Malaria","HIV"), values = c("chocolate1","firebrick2"))+
theme_light() +
theme(legend.position = c(0.85,0.2),
plot.title = element_text(hjust = 0.5),
legend.title = element_text("Legend"),
panel.background = element_rect(fill = "transparent"),
plot.background = element_rect(fill = "transparent", color = NA),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.background = element_rect(fill = "transparent"),
legend.box.background = element_rect(fill = "transparent"),
axis.title.x = element_text(colour = "gray15", face="bold"),
axis.title.y = element_text(colour = "gray15", face="bold"),
axis.text.x = element_text(colour="gray15"),
axis.text.y = element_text(colour="gray15"),
legend.key = element_rect(fill = "transparent"),
legend.text = element_text(colour="gray15",face="bold"),
panel.border = element_blank(),
axis.line.x = element_line(colour = "gray30", size = 0.6),
axis.line.y = element_line(colour = "gray30", size= 0.6)) +
xlab("Countries") +
ylab("Death Rate per 100 mil persons")+
coord_flip()
girafe(ggobj = lolli)
})
})
#################################################################################################
###################################### BAR GRAFIC GPD ############################################
#################################################################################################
observe({
bar_arrange <- data_barplot[(data_barplot$Year==input$barplot_year) ,] %>%
#ifelse((data_barplot$Region == input$focus), 1,0)%>%
group_by(Region)%>%
arrange(desc(GPD)) %>%
mutate(Entity=factor(Entity, levels=Entity))%>%
na.omit(by_duration$Entity)
output$barplot <-renderBillboarder({
billboarder() %>%
bb_barchart(data = bar_arrange, mapping=bbaes(x= Entity, y= GPD, group=Region), rotated = TRUE, color = "#ff9900")%>%
#bb_y_grid(show = TRUE) %>%
bb_bar(width= list(ratio= 3))%>%
bb_y_axis(tick = list(format = suffix("$"), fit= TRUE),
label = list(text = "GDP", position = "outer-top")) %>%
bb_x_axis( tick = list(
values = c(" ", ""),
outer = FALSE)) %>%
bb_color(palette = c("#331400", "#662900", "#993d00", "#cc5200", "#ff751a"))%>%
bb_legend(show = FALSE)# %>%
})
})
#Highlight
observeEvent(input$focus,
{
billboarderProxy("barplot") %>%
bb_proxy_focus(input$focus)
}, ignoreNULL = FALSE)
#################################################################################################
###################################### TIME SERIES MALARIA ############################################
#################################################################################################
observe({
by_timeseries <-
timeseries_data[(timeseries_data$location==input$timeseries_location) ,]
#LINE CHART
output$my_MGD <- renderPlot({
ggplot(by_timeseries, aes(x=year, y=val, group=location))+
geom_line(aes(x=year, y=val, color= location), size=1.3)+
scale_color_manual(values = c("#331400", "#662900", "#993d00", "#cc5200", "#ff751a", "#ffa366", "#ffffff"))+
theme_light() +
theme(legend.position = "right",
plot.title = element_text(hjust = 0.5),
legend.title = element_blank(),
panel.background = element_rect(fill = "transparent"),
plot.background = element_rect(fill = "#ffebcc", color = NA),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.background = element_rect(fill = "transparent"),
legend.box.background = element_rect(fill = "transparent"),
# legend.box.margin = element_rect(fill= "transparent"),
axis.title.x = element_text(colour = "gray15", face="bold"),
axis.title.y = element_text(colour = "gray15", face="bold"),
axis.text.x = element_text(colour="gray15"),
axis.text.y = element_text(colour="gray15"),
legend.key = element_rect(fill = "transparent"),
legend.text = element_text(colour="gray15",face="bold"),
panel.border = element_blank(),
axis.line.x = element_line(colour = "gray30", size = 0.6),
axis.line.y = element_line(colour = "gray30", size= 0.6)) +
xlab("Year") +
ylab("Number of Malaria Deaths")
})
})
}
shinyApp(ui, server)
|
# Iteratively Reweighted Least Squares implementation of Logistic Regression
# This simple implementation assumes there are no missing values,
# and all y values are either 0 or 1.
# Below, the variable 'p' is often referred to as 'mu' (mean of the link function).
# Also, 'yhat' is often referred to as 'eta' (link function).
logistic_regression = function( formula, dataset, tolerance=1.0e-6 ) {
initial.model = model.frame( formula, dataset )
X = model.matrix( formula, data = dataset )
y = model.response( initial.model, "numeric" ) # y values should be 0 and 1
p = ifelse( y==0, 0.25, 0.75 ) # initial values; all y values are 0 or 1
yhat = log(p/(1-p))
prev_deviance = 0
deviance = 2*sum( y*log(1/p) + (1-y)*log(1/(1-p)) )
while (abs(deviance - prev_deviance) > tolerance) {
w = p * (1-p)
ynew = yhat + (y-p)/w
model = lm( ynew ~ X - 1, weights = w ) # weighted least squares
yhat = model$fit
p = 1/(1 + exp(-yhat))
prev_deviance = deviance
deviance = 2 * sum( y*log(1/p) + (1-y)*log(1/(1-p)) )
}
rss = sum( residuals( model, type="pearson")^2 ) # weighted RSS
dispersion = rss / model$df.residual
return(list( coef = coef(model), stderr = sqrt( diag(vcov(model)) ) / sqrt(dispersion) ))
}
demo = function() {
data(iris)
zero_one_iris = transform( iris, Species = ifelse( unclass(Species)==2, 0, 1 ) )
logistic_regression( Species ~ ., zero_one_iris )
}
| /final_exam/final_exam_questions/attractiveness/IRLS_logistic_regression.R | no_license | niulongjia/CS249 | R | false | false | 1,465 | r | # Iteratively Reweighted Least Squares implementation of Logistic Regression
# This simple implementation assumes there are no missing values,
# and all y values are either 0 or 1.
# Below, the variable 'p' is often referred to as 'mu' (mean of the link function).
# Also, 'yhat' is often referred to as 'eta' (link function).
logistic_regression = function( formula, dataset, tolerance=1.0e-6 ) {
initial.model = model.frame( formula, dataset )
X = model.matrix( formula, data = dataset )
y = model.response( initial.model, "numeric" ) # y values should be 0 and 1
p = ifelse( y==0, 0.25, 0.75 ) # initial values; all y values are 0 or 1
yhat = log(p/(1-p))
prev_deviance = 0
deviance = 2*sum( y*log(1/p) + (1-y)*log(1/(1-p)) )
while (abs(deviance - prev_deviance) > tolerance) {
w = p * (1-p)
ynew = yhat + (y-p)/w
model = lm( ynew ~ X - 1, weights = w ) # weighted least squares
yhat = model$fit
p = 1/(1 + exp(-yhat))
prev_deviance = deviance
deviance = 2 * sum( y*log(1/p) + (1-y)*log(1/(1-p)) )
}
rss = sum( residuals( model, type="pearson")^2 ) # weighted RSS
dispersion = rss / model$df.residual
return(list( coef = coef(model), stderr = sqrt( diag(vcov(model)) ) / sqrt(dispersion) ))
}
demo = function() {
data(iris)
zero_one_iris = transform( iris, Species = ifelse( unclass(Species)==2, 0, 1 ) )
logistic_regression( Species ~ ., zero_one_iris )
}
|
#This is the analysis file. The functions used in this file are cointained in synthetic_control_functions.R
#There are two model variants:
# *_full - Full synthetic control model with all covariates (excluding user-specified covariates).
# *_time - Trend adjustment using the specified variable (e.g., non-respiratory hospitalization or population size) as the denominator.
#############################
# #
# System Preparations #
# #
#############################
source('_scripts/paper_6/paper_6_uti_inpatient/paper_6_uti_inpatient_synthetic_control_functions.R', local = TRUE)
#############################
packages <-
c(
'parallel',
'splines',
'lubridate',
'loo',
'RcppRoll',
'pomp',
'lme4',
'BoomSpikeSlab',
'ggplot2',
'reshape',
'dummies'
)
packageHandler(packages, update_packages, install_packages)
sapply(packages,
library,
quietly = TRUE,
character.only = TRUE)
#Detect if pogit package installed; if not download archive (no longer on cran)
if("BayesLogit" %in% rownames(installed.packages())==FALSE) {
if (.Platform$OS.type == "windows") {
#url_BayesLogit<- "https://mran.microsoft.com/snapshot/2017-02-04/src/contrib/BayesLogit_0.6.tar.gz"
install_github("jwindle/BayesLogit")
} else{
url_BayesLogit <-
"https://github.com/weinbergerlab/synthetic-control-poisson/blob/master/packages/BayesLogit_0.6_mac.tgz?raw=true"
}
pkgFile_BayesLogit <- "BayesLogit.tar.gz"
download.file(url = url_BayesLogit, destfile = pkgFile_BayesLogit)
install.packages(url_BayesLogit, type = "source", repos = NULL)
}
if ("pogit" %in% rownames(installed.packages()) == FALSE) {
url_pogit <-
"https://cran.r-project.org/src/contrib/Archive/pogit/pogit_1.1.0.tar.gz"
pkgFile_pogit <- "pogit_1.1.0.tar.gz"
download.file(url = url_pogit, destfile = pkgFile_pogit)
install.packages(pkgs = pkgFile_pogit,
type = "source",
repos = NULL)
install.packages('logistf')
}
library(pogit)
#Detects number of available cores on computers. Used for parallel processing to speed up analysis.
n_cores <- detectCores()
set.seed(1)
###################################################
# #
# Directory setup and initialization of constants #
# #
###################################################
dir.create(output_directory,
recursive = TRUE,
showWarnings = FALSE)
groups <-
as.character(unique(unlist(prelog_data[, group_name], use.names = FALSE)))
if (exists('exclude_group')) {
groups <- groups[!(groups %in% exclude_group)]
}
###############################################
# #
# Data and covariate preparation for analysis #
# #
###############################################
#Make sure we are in right format
prelog_data[, date_name] <-
as.Date(as.character(prelog_data[, date_name]),
tryFormats = c("%m/%d/%Y", '%Y-%m-%d'))
prelog_data[, date_name] <- formatDate(prelog_data[, date_name])
prelog_data <-
setNames(
lapply(
groups,
FUN = splitGroup,
ungrouped_data = prelog_data,
group_name = group_name,
date_name = date_name,
start_date = start_date,
end_date = end_date,
no_filter = c(group_name, date_name, outcome_name, denom_name)
),
groups
)
#if (exists('exclude_group')) {prelog_data <- prelog_data[!(names(prelog_data) %in% exclude_group)]}
#Log-transform all variables, adding 0.5 to counts of 0.
ds <-
setNames(lapply(
prelog_data,
FUN = logTransform,
no_log = c(group_name, date_name, outcome_name)
), groups)
time_points <- unique(ds[[1]][, date_name])
#Monthly dummies
if(n_seasons==4) {
dt <- quarter(as.Date(time_points))
}
if (n_seasons == 12) {
dt <- month(as.Date(time_points))
}
if (n_seasons == 3) {
dt.m <- month(as.Date(time_points))
dt <- dt.m
dt[dt.m %in% c(1, 2, 3, 4)] <- 1
dt[dt.m %in% c(5, 6, 7, 8)] <- 2
dt[dt.m %in% c(9, 10, 11, 12)] <- 3
}
season.dummies <- dummy(dt)
season.dummies <- as.data.frame(season.dummies)
names(season.dummies) <- paste0('s', 1:n_seasons)
season.dummies <- season.dummies[, -n_seasons]
ds <- lapply(ds, function(ds) {
if (!(denom_name %in% colnames(ds))) {
ds[denom_name] <- 0
}
return(ds)
})
# Checks for each age_group whether any control columns remain after above transformation
sparse_groups <- sapply(ds, function(ds) {
return(ncol(ds[!(
colnames(ds) %in% c(
date_name,
group_name,
denom_name,
outcome_name,
exclude_covar
)
)]) == 0)
})
# removes age_group without control columns
ds <- ds[!sparse_groups]
groups <- groups[!sparse_groups]
#Process and standardize the covariates. For the Brazil data, adjust for 2008 coding change.
covars_full <- setNames(lapply(ds, makeCovars), groups)
covars_full <-
lapply(
covars_full,
FUN = function(covars) {
covars[,!(colnames(covars) %in% exclude_covar), drop = FALSE]
}
)
covars_time <-
setNames(lapply(
covars_full,
FUN = function(covars) {
as.data.frame(list(cbind(
season.dummies, time_index = 1:nrow(covars)
)))
}
), groups)
covars_null <-
setNames(lapply(
covars_full,
FUN = function(covars) {
as.data.frame(list(cbind(season.dummies)))
}
), groups)
#Standardize the outcome variable and save the original mean and SD for later analysis.
outcome <-
sapply(
ds,
FUN = function(data) {
data[, outcome_name]
}
)
outcome_plot = outcome
offset <- sapply(
ds,
FUN = function(data)
exp(data[, denom_name])
) # offset term on original scale; 1 column per age group
################################
#set up for STL+PCA
################################
##SECTION 1: CREATING SMOOTHED VERSIONS OF CONTROL TIME SERIES AND APPENDING THEM ONTO ORIGINAL DATAFRAME OF CONTROLS
#EXTRACT LONG TERM TREND WITH DIFFERENT LEVELS OF SMOOTHNESS USING STL
# Set a list of parameters for STL
stl.covars <- mapply(smooth_func, ds.list = ds, covar.list = covars_full, SIMPLIFY=FALSE)
post.start.index <- which(time_points == post_period[1])
stl.data.setup <- mapply(stl_data_fun, covars = stl.covars, ds.sub = ds, SIMPLIFY = FALSE) #list of lists that has covariates per regression per strata
##SECTION 2: run first stage models
n_cores <- detectCores()-1
glm.results<- vector("list", length=length(stl.data.setup)) #combine models into a list
cl1 <- makeCluster(n_cores)
clusterEvalQ(cl1, {library(lme4, quietly = TRUE)})
clusterExport(cl1, c('stl.data.setup', 'glm.fun', 'time_points', 'n_seasons','post.start.index'), environment())
for(i in 1:length(stl.data.setup)){
glm.results[[i]]<-parLapply(cl=cl1 , stl.data.setup[[i]], fun=glm.fun )
}
stopCluster(cl1)
######################
#Combine the outcome, covariates, and time point information.
data_full <-
setNames(lapply(groups, makeTimeSeries, outcome = outcome, covars = covars_full),
groups)
data_time <-
setNames(
lapply(
groups,
makeTimeSeries,
outcome = outcome,
covars = covars_time,
trend = TRUE
),
groups
)
data_pca <-
mapply(
FUN = pca_top_var,
glm.results.in = glm.results,
covars = stl.covars,
ds.in = ds,
SIMPLIFY = FALSE
)
names(data_pca) <- groups
#Null model where we only include seasonal terms but no covariates
data_null <-
setNames(
lapply(
groups,
makeTimeSeries,
outcome = outcome,
covars = covars_null,
trend = FALSE
),
groups
)
#Time trend model but without a denominator
data_time_no_offset <-
setNames(
lapply(
groups,
makeTimeSeries,
outcome = outcome,
covars = covars_time,
trend = FALSE
),
groups
)
###############################
# #
# Main analysis #
# #
###############################
#Start Cluster for CausalImpact (the main analysis function).
cl <- makeCluster(n_cores)
clusterEvalQ(cl, {
library(pogit, quietly = TRUE)
library(lubridate, quietly = TRUE)
})
clusterExport(
cl,
c(
'doCausalImpact',
'intervention_date',
'time_points',
'n_seasons',
'crossval'
),
environment()
)
impact_full <-
setNames(
parLapply(
cl,
data_full,
doCausalImpact,
intervention_date = intervention_date,
var.select.on = TRUE,
time_points = time_points
),
groups
)
impact_time <-
setNames(
parLapply(
cl,
data_time,
doCausalImpact,
intervention_date = intervention_date,
var.select.on = FALSE,
time_points = time_points,
trend = TRUE
),
groups
)
impact_time_no_offset <-
setNames(
parLapply(
cl,
data_time_no_offset,
doCausalImpact,
intervention_date = intervention_date,
var.select.on = FALSE,
time_points = time_points,
trend = FALSE
),
groups
)
impact_pca <-
setNames(
parLapply(
cl,
data_pca,
doCausalImpact,
intervention_date = intervention_date,
var.select.on = FALSE,
time_points = time_points
),
groups
)
stopCluster(cl)
####################################################
####################################################
#CROSS VALIDATION
####################################################
if (crossval) {
# Creates List of lists:
# 1 entry for each stratum; within this, there are CV datasets for each year left out,
# and within this, there are 2 lists, one with full dataset, and one with the CV dataset
cv.data_full <- lapply(data_full, makeCV)
cv.data_time <- lapply(data_time, makeCV)
cv.data_time_no_offset <- lapply(data_time_no_offset, makeCV)
cv.data_pca <- lapply(data_pca, makeCV)
#zoo_data<-cv.data_time[[1]][[2]]
#Run the models on each of these datasets
# Start the clock!--takes ~45 minutes
ptm <- proc.time()
cl <- makeCluster(n_cores)
clusterEvalQ(cl, {
library(pogit, quietly = TRUE)
library(lubridate, quietly = TRUE)
})
clusterExport(
cl,
c(
'doCausalImpact',
'intervention_date',
'time_points',
'n_seasons',
'crossval'
),
environment()
)
cv_impact_full <-
setNames(parLapply(cl, cv.data_full, function(x)
lapply(
x,
doCausalImpact,
crossval = TRUE,
intervention_date = intervention_date,
var.select.on = TRUE,
time_points = time_points
)), groups)
cv_impact_time_no_offset <-
setNames(parLapply(cl, cv.data_time_no_offset, function(x)
lapply(
x,
doCausalImpact,
crossval = TRUE,
trend = FALSE,
intervention_date = intervention_date,
var.select.on = FALSE,
time_points = time_points
)),
groups)
cv_impact_time <-
setNames(parLapply(cl, cv.data_time, function(x)
lapply(
x,
doCausalImpact,
crossval = TRUE,
trend = TRUE,
intervention_date = intervention_date,
var.select.on = FALSE,
time_points = time_points
)), groups)
cv_impact_pca <-
setNames(parLapply(cl, cv.data_pca, function(x)
lapply(
x,
doCausalImpact,
crossval = TRUE,
intervention_date = intervention_date,
var.select.on = FALSE,
time_points = time_points
)), groups)
stopCluster(cl)
# Stop the clock
proc.time() - ptm
#Calculate pointwise log likelihood for cross-val prediction sample vs observed
#These are N_iter*N_obs*N_cross_val array
ll.cv.full <- lapply(
cv_impact_full, function(x)
lapply(x, crossval.log.lik)
)
ll.cv.full2 <- lapply(ll.cv.full, reshape.arr)
#
ll.cv.time_no_offset <- lapply(
cv_impact_time_no_offset, function(x)
lapply(x, crossval.log.lik)
)
ll.cv.time_no_offset2 <- lapply(ll.cv.time_no_offset, reshape.arr)
#
ll.cv.time <- lapply(
cv_impact_time, function(x)
lapply(x, crossval.log.lik)
)
ll.cv.time2 <- lapply(ll.cv.time, reshape.arr)
#
ll.cv.pca <- lapply(
cv_impact_pca, function(x)
lapply(x, crossval.log.lik)
)
ll.cv.pca2 <- lapply(ll.cv.pca, reshape.arr)
#Create list that has model result for each stratum
ll.compare <- vector("list", length(ll.cv.pca2)) # with length = number of age_groups
stacking_weights.all <- matrix(
NA,
nrow = length(ll.cv.pca2), # number of matrixes in ll.compare, essentially number of age_groups
ncol = 4 # number of models tested (SC, ITS, ITS without offset and STL+PCA)
)
for (i in 1:length(ll.compare)) { # essentially, for each age_group in age_groups
ll.compare[[i]] <-
cbind(ll.cv.full2[[i]],
ll.cv.time_no_offset2[[i]],
ll.cv.time2[[i]],
ll.cv.pca2[[i]])
#will get NAs if one of covariates is constant in fitting period (ie pandemic flu dummy)...shoud=ld fix this above
keep <- complete.cases(ll.compare[[i]])
ll.compare[[i]] <- ll.compare[[i]][keep, ]
#occasionally if there is a very poor fit, likelihood is very very small, which leads to underflow issue and log(0)...
#... delete these rows to avoid this as a dirty solution. Better would be to fix underflow
row.min <- apply(exp(ll.compare[[i]]), 1, min)
ll.compare[[i]] <- ll.compare[[i]][!(row.min == 0), ]
#if(min(exp(ll.compare[[i]]))>0){
stacking_weights.all[i, ] <- stacking_weights(ll.compare[[i]])
#}
}
stacking_weights.all <- as.data.frame(round(stacking_weights.all, 3))
names(stacking_weights.all) <-
c('Synthetic Controls',
'Time trend',
'Time trend (no offset)',
'STL+PCA')
stacking_weights.all <- cbind.data.frame(groups, stacking_weights.all)
stacking_weights.all.m <- melt(stacking_weights.all, id.vars = 'groups')
# stacking_weights.all.m<-stacking_weights.all.m[order(stacking_weights.all.m$groups),]
stacked.ests <-
mapply(
FUN = stack.mean,
group = groups,
impact_full = impact_full,
impact_time = impact_time,
impact_time_no_offset = impact_time_no_offset,
impact_pca = impact_pca,
SIMPLIFY = FALSE
)
#plot.stacked.ests <- lapply(stacked.ests, plot.stack.est)
quantiles_stack <-
setNames(lapply(
groups,
FUN = function(group) {
rrPredQuantiles(
impact = stacked.ests[[group]],
denom_data = ds[[group]][, denom_name],
eval_period = eval_period,
post_period = post_period
)
}
), groups)
pred_quantiles_stack <- sapply(quantiles_stack, getPred, simplify = 'array')
rr_roll_stack <-
sapply(
quantiles_stack,
FUN = function(quantiles_stack) {
quantiles_stack$roll_rr
},
simplify = 'array'
)
rr_mean_stack <- round(t(sapply(quantiles_stack, getRR)), 2)
rr_mean_stack_intervals <-
data.frame(
'Stacking Estimate (95% CI)' = makeInterval(rr_mean_stack[, 2], rr_mean_stack[, 3], rr_mean_stack[, 1]),
check.names = FALSE,
row.names = groups
)
cumsum_prevented_stack <-
sapply(groups,
FUN = cumsum_func,
quantiles = quantiles_stack,
simplify = 'array')
ann_pred_quantiles_stack <- sapply(quantiles_stack, getAnnPred, simplify = FALSE)
#Preds: Compare observed and expected
pred.cv.full <-
lapply(cv_impact_full, function(x)
sapply(x, pred.cv, simplify = 'array'))
pred.cv.pca <-
lapply(cv_impact_pca, function(x)
sapply(x, pred.cv, simplify = 'array'))
# # par(mfrow=c(3,2))
# plot.grp = 9
# for (i in 1:6) {
# matplot(
# pred.cv.full[[plot.grp]][, c(2:4), i],
# type = 'l',
# ylab = 'Count',
# col = '#1b9e77',
# lty = c(2, 1, 2),
# bty = 'l',
# ylim = range(pred.cv.full[[plot.grp]][, c(1), i]) * c(0.8, 1.2)
# )
# points(pred.cv.full[[plot.grp]][, c(1), i], pch = 16)
# title("Synthetic controls: Cross validation")
# matplot(
# pred.cv.pca[[plot.grp]][, c(2:4), i],
# type = 'l',
# ylab = 'Count',
# col = '#d95f02',
# lty = c(2, 1, 2),
# bty = 'l',
# ylim = range(pred.cv.full[[plot.grp]][, c(1), i]) * c(0.8, 1.2)
# )
# points(pred.cv.pca[[plot.grp]][, c(1), i], pch = 16)
# title("STL+PCA: Cross validation")
# }
save.stack.est <-
list(
pred_quantiles_stack,
rr_roll_stack,
rr_mean_stack,
rr_mean_stack_intervals,
cumsum_prevented_stack
)
names(save.stack.est) <-
c(
'pred_quantiles_stack',
'rr_roll_stack',
'rr_mean_stack',
'rr_mean_stack_intervals',
'cumsum_prevented_stack'
)
saveRDS(save.stack.est, file = paste0(output_directory, country, "Stack estimates.rds"))
#Pointwise RR and uncertainty for second stage meta analysis
log_rr_quantiles_stack <-
sapply(
quantiles_stack,
FUN = function(quantiles) {
quantiles$log_rr_full_t_quantiles
},
simplify = 'array'
)
dimnames(log_rr_quantiles_stack)[[1]] <- time_points
log_rr_full_t_samples.stack.prec <-
sapply(
quantiles_stack,
FUN = function(quantiles) {
quantiles$log_rr_full_t_samples.prec.post
},
simplify = 'array'
)
#log_rr_sd.stack <- sapply(quantiles_stack, FUN = function(quantiles) {quantiles$log_rr_full_t_sd}, simplify = 'array')
saveRDS(
log_rr_quantiles_stack,
file = paste0(output_directory, country, "_log_rr_quantiles_stack.rds")
)
saveRDS(
log_rr_full_t_samples.stack.prec,
file = paste0(
output_directory,
country,
"_log_rr_full_t_samples.stack.prec.rds"
)
)
}
##########################################################################
##########################################################################
#Save the inclusion probabilities from each of the models.
inclusion_prob_full <- setNames(lapply(impact_full, inclusionProb), groups)
inclusion_prob_time <- setNames(lapply(impact_time, inclusionProb), groups)
#All model results combined
quantiles_full <-
setNames(lapply(
groups,
FUN = function(group) {
rrPredQuantiles(
impact = impact_full[[group]],
denom_data = ds[[group]][, denom_name],
eval_period = eval_period,
post_period = post_period
)
}
), groups)
quantiles_time <-
setNames(lapply(
groups,
FUN = function(group) {
rrPredQuantiles(
impact = impact_time[[group]],
denom_data = ds[[group]][, denom_name],
eval_period = eval_period,
post_period = post_period
)
}
), groups)
quantiles_time_no_offset <-
setNames(lapply(
groups,
FUN = function(group) {
rrPredQuantiles(
impact = impact_time_no_offset[[group]],
denom_data = ds[[group]][, denom_name],
eval_period = eval_period,
post_period = post_period
)
}
), groups)
quantiles_pca <-
setNames(lapply(
groups,
FUN = function(group) {
rrPredQuantiles(
impact = impact_pca[[group]],
denom_data = ds[[group]][, denom_name],
eval_period = eval_period,
post_period = post_period
)
}
), groups)
#Model predicitons
pred_quantiles_full <- sapply(quantiles_full, getPred, simplify = 'array')
pred_quantiles_time <- sapply(quantiles_time, getPred, simplify = 'array')
pred_quantiles_time_no_offset <- sapply(quantiles_time_no_offset, getPred, simplify = 'array')
pred_quantiles_pca <- sapply(quantiles_pca, getPred, simplify = 'array')
#Predictions, aggregated by year
ann_pred_quantiles_full <- sapply(quantiles_full, getAnnPred, simplify = FALSE)
ann_pred_quantiles_time <- sapply(quantiles_time, getAnnPred, simplify = FALSE)
ann_pred_quantiles_time_no_offset <- sapply(quantiles_time_no_offset, getAnnPred, simplify = FALSE)
ann_pred_quantiles_pca <- sapply(quantiles_pca, getAnnPred, simplify = FALSE)
#Pointwise RR and uncertainty for second stage meta analysis
log_rr_quantiles <-
sapply(
quantiles_full,
FUN = function(quantiles) {
quantiles$log_rr_full_t_quantiles
},
simplify = 'array'
)
dimnames(log_rr_quantiles)[[1]] <- time_points
log_rr_sd <-
sapply(
quantiles_full,
FUN = function(quantiles) {
quantiles$log_rr_full_t_sd
},
simplify = 'array'
)
log_rr_full_t_samples.prec <-
sapply(
quantiles_full,
FUN = function(quantiles) {
quantiles$log_rr_full_t_samples.prec
},
simplify = 'array'
)
saveRDS(log_rr_quantiles, file = paste0(output_directory, country, "_log_rr_quantiles.rds"))
saveRDS(log_rr_sd, file = paste0(output_directory, country, "_log_rr_sd.rds"))
saveRDS(log_rr_full_t_samples.prec, file = paste0(output_directory, country, "_log_rr_full_t_samples.prec.rds"))
#Rolling rate ratios
rr_roll_full <-
sapply(
quantiles_full,
FUN = function(quantiles_full) {
quantiles_full$roll_rr
},
simplify = 'array'
)
rr_roll_time <-
sapply(
quantiles_time,
FUN = function(quantiles_time) {
quantiles_time$roll_rr
},
simplify = 'array'
)
rr_roll_time_no_offset <-
sapply(
quantiles_time_no_offset,
FUN = function(quantiles_time) {
quantiles_time$roll_rr
},
simplify = 'array'
)
rr_roll_pca <-
sapply(
quantiles_pca,
FUN = function(quantiles_pca) {
quantiles_pca$roll_rr
},
simplify = 'array'
)
#Rate ratios for evaluation period.
rr_mean_full <- t(sapply(quantiles_full, getRR))
rr_mean_time <- t(sapply(quantiles_time, getRR))
rr_mean_time_no_offset <- t(sapply(quantiles_time_no_offset, getRR))
rr_mean_pca <- t(sapply(quantiles_pca, getRR))
rr_mean_full_intervals <-
data.frame(
'SC Estimate (95% CI)' = makeInterval(rr_mean_full[, 2], rr_mean_full[, 3], rr_mean_full[, 1]),
check.names = FALSE,
row.names = groups
)
rr_mean_time_intervals <-
data.frame(
'Time trend Estimate (95% CI)' = makeInterval(rr_mean_time[, 2], rr_mean_time[, 3], rr_mean_time[, 1]),
check.names = FALSE,
row.names = groups
)
rr_mean_time_no_offset_intervals <-
data.frame(
'Time trend (no offset) Estimate (95% CI)' = makeInterval(
rr_mean_time_no_offset[, 2],
rr_mean_time_no_offset[, 3],
rr_mean_time_no_offset[, 1]
),
check.names = FALSE,
row.names = groups
)
rr_mean_pca_intervals <-
data.frame(
'STL+PCA Estimate (95% CI)' = makeInterval(rr_mean_pca[, 2], rr_mean_pca[, 3], rr_mean_pca[, 1]),
check.names = FALSE,
row.names = groups
)
colnames(rr_mean_time) <- paste('Time_trend', colnames(rr_mean_time))
#Combine RRs into 1 file for plotting
rr_mean_combo <-
as.data.frame(rbind(
cbind(
rep(1, nrow(rr_mean_full)),
groups,
seq(
from = 1,
by = 1,
length.out = nrow(rr_mean_full)
),
rr_mean_full
),
cbind(
rep(2, nrow(rr_mean_time)),
groups,
seq(
from = 1,
by = 1,
length.out = nrow(rr_mean_full)
),
rr_mean_time
),
cbind(
rep(3, nrow(rr_mean_time_no_offset)),
groups,
seq(
from = 1,
by = 1,
length.out = nrow(rr_mean_full)
),
rr_mean_time_no_offset
),
cbind(
rep(4, nrow(rr_mean_pca)),
groups,
seq(
from = 1,
by = 1,
length.out = nrow(rr_mean_full)
),
rr_mean_pca
)
))
names(rr_mean_combo) <- c('Model', 'groups', 'group.index', 'lcl', 'mean.rr', 'ucl')
if (crossval) {
point.weights2 <- stacking_weights.all.m
}else{
point.weights2<-as.data.frame(matrix(rep(1,nrow(rr_mean_combo)), ncol=1))
names(point.weights2)<-'value'
}
rr_mean_combo$point.weights <- point.weights2$value
rr_mean_combo$group.index <- as.numeric(as.character(rr_mean_combo$group.index))
rr_mean_combo$mean.rr <- as.numeric(as.character(rr_mean_combo$mean.rr))
rr_mean_combo$lcl <- as.numeric(as.character(rr_mean_combo$lcl))
rr_mean_combo$ucl <- as.numeric(as.character(rr_mean_combo$ucl))
rr_mean_combo$group.index[rr_mean_combo$Model == 2] <- rr_mean_combo$group.index[rr_mean_combo$Model == 2] + 0.15
rr_mean_combo$group.index[rr_mean_combo$Model == 3] <- rr_mean_combo$group.index[rr_mean_combo$Model == 3] + 0.3
rr_mean_combo$group.index[rr_mean_combo$Model == 4] <- rr_mean_combo$group.index[rr_mean_combo$Model == 4] + 0.45
rr_mean_combo$Model <- as.character(rr_mean_combo$Model)
rr_mean_combo$Model[rr_mean_combo$Model == '1'] <- "Synthetic Controls"
rr_mean_combo$Model[rr_mean_combo$Model == '2'] <- "Time trend"
rr_mean_combo$Model[rr_mean_combo$Model == '3'] <- "Time trend (No offset)"
rr_mean_combo$Model[rr_mean_combo$Model == '4'] <- "STL+PCA"
cbPalette <- c("#1b9e77", "#d95f02", "#7570b3", '#e7298a')
rr_mean_combo$est.index <- as.factor(1:nrow(rr_mean_combo))
#Fix order for axis
rr_mean_combo$Model <- as.factor(rr_mean_combo$Model)
rr_mean_combo$Model = factor(rr_mean_combo$Model, levels(rr_mean_combo$Model)[c(2, 3, 4, 1)])
#print(levels(rr_mean_combo$Model))
cumsum_prevented <-
sapply(groups,
FUN = cumsum_func,
quantiles = quantiles_full,
simplify = 'array')
cumsum_prevented_pca <-
sapply(groups,
FUN = cumsum_func,
quantiles = quantiles_pca,
simplify = 'array')
cumsum_prevented_time <-
sapply(groups,
FUN = cumsum_func,
quantiles = quantiles_time,
simplify = 'array')
################################
# #
# Sensitivity Analyses #
# #
################################
# Pred Sensitivity Analysis--tests effect of changing prior on Ncovars from 3 to 2 to 10
# cl <- makeCluster(n_cores)
# clusterEvalQ(cl, {
# library(CausalImpact, quietly = TRUE)
# library(lubridate, quietly = TRUE)
# library(RcppRoll, quietly = TRUE)
# })
# clusterExport(
# cl,
# c(
# 'doCausalImpact',
# 'predSensitivityAnalysis',
# 'inclusionProb',
# 'rrPredQuantiles',
# 'getPred',
# 'getRR',
# 'groups',
# 'ds',
# 'data_full',
# 'denom_name',
# 'outcome_mean',
# 'outcome_sd',
# 'intervention_date',
# 'eval_period',
# 'post_period',
# 'time_points',
# 'n_seasons'
# ),
# environment()
# )
#
# sensitivity_analysis_pred_2 <-
# setNames(as.data.frame(t(
# parSapply(
# cl,
# groups,
# predSensitivityAnalysis,
# ds = ds,
# zoo_data = data_full,
# denom_name = denom_name,
# outcome_mean = outcome_mean,
# outcome_sd = outcome_sd,
# intervention_date = intervention_date,
# eval_period = eval_period,
# post_period = post_period,
# time_points = time_points,
# n_seasons = n_seasons,
# n_pred = 2
# )
# )), c('Lower CI', 'Point Estimate', 'Upper CI'))
#
# sensitivity_analysis_pred_10 <-
# setNames(as.data.frame(t(
# parSapply(
# cl,
# groups,
# predSensitivityAnalysis,
# ds = ds,
# zoo_data = data_full,
# denom_name = denom_name,
# outcome_mean = outcome_mean,
# outcome_sd = outcome_sd,
# intervention_date = intervention_date,
# eval_period = eval_period,
# post_period = post_period,
# time_points = time_points,
# n_seasons = n_seasons,
# n_pred = 10
# )
# )), c('Lower CI', 'Point Estimate', 'Upper CI'))
#
# stopCluster(cl)
#
# sensitivity_analysis_pred_2_intervals <-
# data.frame(
# 'Estimate (95% CI)' = makeInterval(
# sensitivity_analysis_pred_2[, 2],
# sensitivity_analysis_pred_2[, 3],
# sensitivity_analysis_pred_2[, 1]
# ),
# row.names = groups,
# check.names = FALSE
# )
#
# sensitivity_analysis_pred_10_intervals <-
# data.frame(
# 'Estimate (95% CI)' = makeInterval(
# sensitivity_analysis_pred_10[, 2],
# sensitivity_analysis_pred_10[, 3],
# sensitivity_analysis_pred_10[, 1]
# ),
# row.names = groups,
# check.names = FALSE
# )
if(sensitivity){
bad_sensitivity_groups <- # sapply over each age_group, check if number of columns is equal or less than 3, later exclude those groups
sapply(covars_full, function (covar) {
ncol(covar) <= n_seasons-1+3
})
sensitivity_covars_full <- covars_full[!bad_sensitivity_groups]
sensitivity_ds <- ds[!bad_sensitivity_groups]
sensitivity_impact_full <- impact_full[!bad_sensitivity_groups]
sensitivity_groups <- groups[!bad_sensitivity_groups]
#Weight Sensitivity Analysis - top weighted variables are excluded and analysis is re-run.
if (length(sensitivity_groups)!=0) {
cl <- makeCluster(n_cores)
clusterEvalQ(cl, {
library(pogit, quietly = TRUE)
library(lubridate, quietly = TRUE)
library(RcppRoll, quietly = TRUE)
})
clusterExport(
cl,
c(
'sensitivity_ds',
'doCausalImpact',
'year_def',
'weightSensitivityAnalysis',
'rrPredQuantiles',
'sensitivity_groups',
'intervention_date',
'outcome',
'time_points',
'n_seasons',
'eval_period',
'post_period',
'crossval'
),
environment()
)
sensitivity_analysis_full <-
setNames(
parLapply(
cl,
sensitivity_groups,
weightSensitivityAnalysis,
covars = sensitivity_covars_full,
ds = sensitivity_ds,
impact = sensitivity_impact_full,
time_points = time_points,
intervention_date = intervention_date,
n_seasons = n_seasons,
outcome = outcome,
eval_period = eval_period,
post_period = post_period
),
sensitivity_groups
)
stopCluster(cl)
sensitivity_pred_quantiles <-
lapply(
sensitivity_analysis_full,
FUN = function(sensitivity_analysis) {
pred_list <-
vector(mode = 'list', length = length(sensitivity_analysis))
for (sensitivity_index in 1:length(sensitivity_analysis)) {
pred_list[[sensitivity_index]] <-
getPred(sensitivity_analysis[[sensitivity_index]])
}
return(pred_list)
}
)
#Table of rate ratios for each sensitivity analysis level
sensitivity_table <-
t(
sapply(
sensitivity_groups,
sensitivityTable,
sensitivity_analysis = sensitivity_analysis_full,
original_rr = rr_mean_full
)
)
sensitivity_table_intervals <-
data.frame(
'Estimate (95% CI)' = makeInterval(sensitivity_table[, 2], sensitivity_table[, 3], sensitivity_table[, 1]),
'Top Control 1' = sensitivity_table[, 'Top Control 1'],
'Inclusion Probability of Control 1' = sensitivity_table[, 'Inclusion Probability of Control 1'],
'Control 1 Estimate (95% CI)' = makeInterval(sensitivity_table[, 7], sensitivity_table[, 8], sensitivity_table[, 6]),
'Top Control 2' = sensitivity_table[, 'Top Control 2'],
'Inclusion Probability of Control 2' = sensitivity_table[, 'Inclusion Probability of Control 2'],
'Control 2 Estimate (95% CI)' = makeInterval(sensitivity_table[, 12], sensitivity_table[, 13], sensitivity_table[, 11]),
'Top Control 3' = sensitivity_table[, 'Top Control 3'],
'Inclusion Probability of Control 3' = sensitivity_table[, 'Inclusion Probability of Control 3'],
'Control 3 Estimate (95% CI)' = makeInterval(sensitivity_table[, 17], sensitivity_table[, 18], sensitivity_table[, 16]),
check.names = FALSE
)
rr_table <- cbind.data.frame(round(rr_mean_time[!bad_sensitivity_groups,], 2), sensitivity_table)
rr_table_intervals <- cbind('ITS Estimate (95% CI)' = rr_mean_time_intervals[!bad_sensitivity_groups,], sensitivity_table_intervals)
} else {
sensitivity_table_intervals <- NA
}
} | /_scripts/paper_6/paper_6_uti_inpatient/paper_6_uti_inpatient_synthetic_control_analysis.R | permissive | eliaseythorsson/phd_thesis | R | false | false | 30,385 | r | #This is the analysis file. The functions used in this file are cointained in synthetic_control_functions.R
#There are two model variants:
# *_full - Full synthetic control model with all covariates (excluding user-specified covariates).
# *_time - Trend adjustment using the specified variable (e.g., non-respiratory hospitalization or population size) as the denominator.
#############################
# #
# System Preparations #
# #
#############################
source('_scripts/paper_6/paper_6_uti_inpatient/paper_6_uti_inpatient_synthetic_control_functions.R', local = TRUE)
#############################
packages <-
c(
'parallel',
'splines',
'lubridate',
'loo',
'RcppRoll',
'pomp',
'lme4',
'BoomSpikeSlab',
'ggplot2',
'reshape',
'dummies'
)
packageHandler(packages, update_packages, install_packages)
sapply(packages,
library,
quietly = TRUE,
character.only = TRUE)
#Detect if pogit package installed; if not download archive (no longer on cran)
if("BayesLogit" %in% rownames(installed.packages())==FALSE) {
if (.Platform$OS.type == "windows") {
#url_BayesLogit<- "https://mran.microsoft.com/snapshot/2017-02-04/src/contrib/BayesLogit_0.6.tar.gz"
install_github("jwindle/BayesLogit")
} else{
url_BayesLogit <-
"https://github.com/weinbergerlab/synthetic-control-poisson/blob/master/packages/BayesLogit_0.6_mac.tgz?raw=true"
}
pkgFile_BayesLogit <- "BayesLogit.tar.gz"
download.file(url = url_BayesLogit, destfile = pkgFile_BayesLogit)
install.packages(url_BayesLogit, type = "source", repos = NULL)
}
if ("pogit" %in% rownames(installed.packages()) == FALSE) {
url_pogit <-
"https://cran.r-project.org/src/contrib/Archive/pogit/pogit_1.1.0.tar.gz"
pkgFile_pogit <- "pogit_1.1.0.tar.gz"
download.file(url = url_pogit, destfile = pkgFile_pogit)
install.packages(pkgs = pkgFile_pogit,
type = "source",
repos = NULL)
install.packages('logistf')
}
library(pogit)
#Detects number of available cores on computers. Used for parallel processing to speed up analysis.
n_cores <- detectCores()
set.seed(1)
###################################################
# #
# Directory setup and initialization of constants #
# #
###################################################
dir.create(output_directory,
recursive = TRUE,
showWarnings = FALSE)
groups <-
as.character(unique(unlist(prelog_data[, group_name], use.names = FALSE)))
if (exists('exclude_group')) {
groups <- groups[!(groups %in% exclude_group)]
}
###############################################
# #
# Data and covariate preparation for analysis #
# #
###############################################
#Make sure we are in right format
prelog_data[, date_name] <-
as.Date(as.character(prelog_data[, date_name]),
tryFormats = c("%m/%d/%Y", '%Y-%m-%d'))
prelog_data[, date_name] <- formatDate(prelog_data[, date_name])
prelog_data <-
setNames(
lapply(
groups,
FUN = splitGroup,
ungrouped_data = prelog_data,
group_name = group_name,
date_name = date_name,
start_date = start_date,
end_date = end_date,
no_filter = c(group_name, date_name, outcome_name, denom_name)
),
groups
)
#if (exists('exclude_group')) {prelog_data <- prelog_data[!(names(prelog_data) %in% exclude_group)]}
#Log-transform all variables, adding 0.5 to counts of 0.
ds <-
setNames(lapply(
prelog_data,
FUN = logTransform,
no_log = c(group_name, date_name, outcome_name)
), groups)
time_points <- unique(ds[[1]][, date_name])
#Monthly dummies
if(n_seasons==4) {
dt <- quarter(as.Date(time_points))
}
if (n_seasons == 12) {
dt <- month(as.Date(time_points))
}
if (n_seasons == 3) {
dt.m <- month(as.Date(time_points))
dt <- dt.m
dt[dt.m %in% c(1, 2, 3, 4)] <- 1
dt[dt.m %in% c(5, 6, 7, 8)] <- 2
dt[dt.m %in% c(9, 10, 11, 12)] <- 3
}
season.dummies <- dummy(dt)
season.dummies <- as.data.frame(season.dummies)
names(season.dummies) <- paste0('s', 1:n_seasons)
season.dummies <- season.dummies[, -n_seasons]
ds <- lapply(ds, function(ds) {
if (!(denom_name %in% colnames(ds))) {
ds[denom_name] <- 0
}
return(ds)
})
# Checks for each age_group whether any control columns remain after above transformation
sparse_groups <- sapply(ds, function(ds) {
return(ncol(ds[!(
colnames(ds) %in% c(
date_name,
group_name,
denom_name,
outcome_name,
exclude_covar
)
)]) == 0)
})
# removes age_group without control columns
ds <- ds[!sparse_groups]
groups <- groups[!sparse_groups]
#Process and standardize the covariates. For the Brazil data, adjust for 2008 coding change.
covars_full <- setNames(lapply(ds, makeCovars), groups)
covars_full <-
lapply(
covars_full,
FUN = function(covars) {
covars[,!(colnames(covars) %in% exclude_covar), drop = FALSE]
}
)
covars_time <-
setNames(lapply(
covars_full,
FUN = function(covars) {
as.data.frame(list(cbind(
season.dummies, time_index = 1:nrow(covars)
)))
}
), groups)
covars_null <-
setNames(lapply(
covars_full,
FUN = function(covars) {
as.data.frame(list(cbind(season.dummies)))
}
), groups)
#Standardize the outcome variable and save the original mean and SD for later analysis.
outcome <-
sapply(
ds,
FUN = function(data) {
data[, outcome_name]
}
)
outcome_plot = outcome
offset <- sapply(
ds,
FUN = function(data)
exp(data[, denom_name])
) # offset term on original scale; 1 column per age group
################################
#set up for STL+PCA
################################
##SECTION 1: CREATING SMOOTHED VERSIONS OF CONTROL TIME SERIES AND APPENDING THEM ONTO ORIGINAL DATAFRAME OF CONTROLS
#EXTRACT LONG TERM TREND WITH DIFFERENT LEVELS OF SMOOTHNESS USING STL
# Set a list of parameters for STL
stl.covars <- mapply(smooth_func, ds.list = ds, covar.list = covars_full, SIMPLIFY=FALSE)
post.start.index <- which(time_points == post_period[1])
stl.data.setup <- mapply(stl_data_fun, covars = stl.covars, ds.sub = ds, SIMPLIFY = FALSE) #list of lists that has covariates per regression per strata
##SECTION 2: run first stage models
n_cores <- detectCores()-1
glm.results<- vector("list", length=length(stl.data.setup)) #combine models into a list
cl1 <- makeCluster(n_cores)
clusterEvalQ(cl1, {library(lme4, quietly = TRUE)})
clusterExport(cl1, c('stl.data.setup', 'glm.fun', 'time_points', 'n_seasons','post.start.index'), environment())
for(i in 1:length(stl.data.setup)){
glm.results[[i]]<-parLapply(cl=cl1 , stl.data.setup[[i]], fun=glm.fun )
}
stopCluster(cl1)
######################
#Combine the outcome, covariates, and time point information.
data_full <-
setNames(lapply(groups, makeTimeSeries, outcome = outcome, covars = covars_full),
groups)
data_time <-
setNames(
lapply(
groups,
makeTimeSeries,
outcome = outcome,
covars = covars_time,
trend = TRUE
),
groups
)
data_pca <-
mapply(
FUN = pca_top_var,
glm.results.in = glm.results,
covars = stl.covars,
ds.in = ds,
SIMPLIFY = FALSE
)
names(data_pca) <- groups
#Null model where we only include seasonal terms but no covariates
data_null <-
setNames(
lapply(
groups,
makeTimeSeries,
outcome = outcome,
covars = covars_null,
trend = FALSE
),
groups
)
#Time trend model but without a denominator
data_time_no_offset <-
setNames(
lapply(
groups,
makeTimeSeries,
outcome = outcome,
covars = covars_time,
trend = FALSE
),
groups
)
###############################
# #
# Main analysis #
# #
###############################
#Start Cluster for CausalImpact (the main analysis function).
cl <- makeCluster(n_cores)
clusterEvalQ(cl, {
library(pogit, quietly = TRUE)
library(lubridate, quietly = TRUE)
})
clusterExport(
cl,
c(
'doCausalImpact',
'intervention_date',
'time_points',
'n_seasons',
'crossval'
),
environment()
)
impact_full <-
setNames(
parLapply(
cl,
data_full,
doCausalImpact,
intervention_date = intervention_date,
var.select.on = TRUE,
time_points = time_points
),
groups
)
impact_time <-
setNames(
parLapply(
cl,
data_time,
doCausalImpact,
intervention_date = intervention_date,
var.select.on = FALSE,
time_points = time_points,
trend = TRUE
),
groups
)
impact_time_no_offset <-
setNames(
parLapply(
cl,
data_time_no_offset,
doCausalImpact,
intervention_date = intervention_date,
var.select.on = FALSE,
time_points = time_points,
trend = FALSE
),
groups
)
impact_pca <-
setNames(
parLapply(
cl,
data_pca,
doCausalImpact,
intervention_date = intervention_date,
var.select.on = FALSE,
time_points = time_points
),
groups
)
stopCluster(cl)
####################################################
####################################################
#CROSS VALIDATION
####################################################
if (crossval) {
# Creates List of lists:
# 1 entry for each stratum; within this, there are CV datasets for each year left out,
# and within this, there are 2 lists, one with full dataset, and one with the CV dataset
cv.data_full <- lapply(data_full, makeCV)
cv.data_time <- lapply(data_time, makeCV)
cv.data_time_no_offset <- lapply(data_time_no_offset, makeCV)
cv.data_pca <- lapply(data_pca, makeCV)
#zoo_data<-cv.data_time[[1]][[2]]
#Run the models on each of these datasets
# Start the clock!--takes ~45 minutes
ptm <- proc.time()
cl <- makeCluster(n_cores)
clusterEvalQ(cl, {
library(pogit, quietly = TRUE)
library(lubridate, quietly = TRUE)
})
clusterExport(
cl,
c(
'doCausalImpact',
'intervention_date',
'time_points',
'n_seasons',
'crossval'
),
environment()
)
cv_impact_full <-
setNames(parLapply(cl, cv.data_full, function(x)
lapply(
x,
doCausalImpact,
crossval = TRUE,
intervention_date = intervention_date,
var.select.on = TRUE,
time_points = time_points
)), groups)
cv_impact_time_no_offset <-
setNames(parLapply(cl, cv.data_time_no_offset, function(x)
lapply(
x,
doCausalImpact,
crossval = TRUE,
trend = FALSE,
intervention_date = intervention_date,
var.select.on = FALSE,
time_points = time_points
)),
groups)
cv_impact_time <-
setNames(parLapply(cl, cv.data_time, function(x)
lapply(
x,
doCausalImpact,
crossval = TRUE,
trend = TRUE,
intervention_date = intervention_date,
var.select.on = FALSE,
time_points = time_points
)), groups)
cv_impact_pca <-
setNames(parLapply(cl, cv.data_pca, function(x)
lapply(
x,
doCausalImpact,
crossval = TRUE,
intervention_date = intervention_date,
var.select.on = FALSE,
time_points = time_points
)), groups)
stopCluster(cl)
# Stop the clock
proc.time() - ptm
#Calculate pointwise log likelihood for cross-val prediction sample vs observed
#These are N_iter*N_obs*N_cross_val array
ll.cv.full <- lapply(
cv_impact_full, function(x)
lapply(x, crossval.log.lik)
)
ll.cv.full2 <- lapply(ll.cv.full, reshape.arr)
#
ll.cv.time_no_offset <- lapply(
cv_impact_time_no_offset, function(x)
lapply(x, crossval.log.lik)
)
ll.cv.time_no_offset2 <- lapply(ll.cv.time_no_offset, reshape.arr)
#
ll.cv.time <- lapply(
cv_impact_time, function(x)
lapply(x, crossval.log.lik)
)
ll.cv.time2 <- lapply(ll.cv.time, reshape.arr)
#
ll.cv.pca <- lapply(
cv_impact_pca, function(x)
lapply(x, crossval.log.lik)
)
ll.cv.pca2 <- lapply(ll.cv.pca, reshape.arr)
#Create list that has model result for each stratum
ll.compare <- vector("list", length(ll.cv.pca2)) # with length = number of age_groups
stacking_weights.all <- matrix(
NA,
nrow = length(ll.cv.pca2), # number of matrixes in ll.compare, essentially number of age_groups
ncol = 4 # number of models tested (SC, ITS, ITS without offset and STL+PCA)
)
for (i in 1:length(ll.compare)) { # essentially, for each age_group in age_groups
ll.compare[[i]] <-
cbind(ll.cv.full2[[i]],
ll.cv.time_no_offset2[[i]],
ll.cv.time2[[i]],
ll.cv.pca2[[i]])
#will get NAs if one of covariates is constant in fitting period (ie pandemic flu dummy)...shoud=ld fix this above
keep <- complete.cases(ll.compare[[i]])
ll.compare[[i]] <- ll.compare[[i]][keep, ]
#occasionally if there is a very poor fit, likelihood is very very small, which leads to underflow issue and log(0)...
#... delete these rows to avoid this as a dirty solution. Better would be to fix underflow
row.min <- apply(exp(ll.compare[[i]]), 1, min)
ll.compare[[i]] <- ll.compare[[i]][!(row.min == 0), ]
#if(min(exp(ll.compare[[i]]))>0){
stacking_weights.all[i, ] <- stacking_weights(ll.compare[[i]])
#}
}
stacking_weights.all <- as.data.frame(round(stacking_weights.all, 3))
names(stacking_weights.all) <-
c('Synthetic Controls',
'Time trend',
'Time trend (no offset)',
'STL+PCA')
stacking_weights.all <- cbind.data.frame(groups, stacking_weights.all)
stacking_weights.all.m <- melt(stacking_weights.all, id.vars = 'groups')
# stacking_weights.all.m<-stacking_weights.all.m[order(stacking_weights.all.m$groups),]
stacked.ests <-
mapply(
FUN = stack.mean,
group = groups,
impact_full = impact_full,
impact_time = impact_time,
impact_time_no_offset = impact_time_no_offset,
impact_pca = impact_pca,
SIMPLIFY = FALSE
)
#plot.stacked.ests <- lapply(stacked.ests, plot.stack.est)
quantiles_stack <-
setNames(lapply(
groups,
FUN = function(group) {
rrPredQuantiles(
impact = stacked.ests[[group]],
denom_data = ds[[group]][, denom_name],
eval_period = eval_period,
post_period = post_period
)
}
), groups)
pred_quantiles_stack <- sapply(quantiles_stack, getPred, simplify = 'array')
rr_roll_stack <-
sapply(
quantiles_stack,
FUN = function(quantiles_stack) {
quantiles_stack$roll_rr
},
simplify = 'array'
)
rr_mean_stack <- round(t(sapply(quantiles_stack, getRR)), 2)
rr_mean_stack_intervals <-
data.frame(
'Stacking Estimate (95% CI)' = makeInterval(rr_mean_stack[, 2], rr_mean_stack[, 3], rr_mean_stack[, 1]),
check.names = FALSE,
row.names = groups
)
cumsum_prevented_stack <-
sapply(groups,
FUN = cumsum_func,
quantiles = quantiles_stack,
simplify = 'array')
ann_pred_quantiles_stack <- sapply(quantiles_stack, getAnnPred, simplify = FALSE)
#Preds: Compare observed and expected
pred.cv.full <-
lapply(cv_impact_full, function(x)
sapply(x, pred.cv, simplify = 'array'))
pred.cv.pca <-
lapply(cv_impact_pca, function(x)
sapply(x, pred.cv, simplify = 'array'))
# # par(mfrow=c(3,2))
# plot.grp = 9
# for (i in 1:6) {
# matplot(
# pred.cv.full[[plot.grp]][, c(2:4), i],
# type = 'l',
# ylab = 'Count',
# col = '#1b9e77',
# lty = c(2, 1, 2),
# bty = 'l',
# ylim = range(pred.cv.full[[plot.grp]][, c(1), i]) * c(0.8, 1.2)
# )
# points(pred.cv.full[[plot.grp]][, c(1), i], pch = 16)
# title("Synthetic controls: Cross validation")
# matplot(
# pred.cv.pca[[plot.grp]][, c(2:4), i],
# type = 'l',
# ylab = 'Count',
# col = '#d95f02',
# lty = c(2, 1, 2),
# bty = 'l',
# ylim = range(pred.cv.full[[plot.grp]][, c(1), i]) * c(0.8, 1.2)
# )
# points(pred.cv.pca[[plot.grp]][, c(1), i], pch = 16)
# title("STL+PCA: Cross validation")
# }
save.stack.est <-
list(
pred_quantiles_stack,
rr_roll_stack,
rr_mean_stack,
rr_mean_stack_intervals,
cumsum_prevented_stack
)
names(save.stack.est) <-
c(
'pred_quantiles_stack',
'rr_roll_stack',
'rr_mean_stack',
'rr_mean_stack_intervals',
'cumsum_prevented_stack'
)
saveRDS(save.stack.est, file = paste0(output_directory, country, "Stack estimates.rds"))
#Pointwise RR and uncertainty for second stage meta analysis
log_rr_quantiles_stack <-
sapply(
quantiles_stack,
FUN = function(quantiles) {
quantiles$log_rr_full_t_quantiles
},
simplify = 'array'
)
dimnames(log_rr_quantiles_stack)[[1]] <- time_points
log_rr_full_t_samples.stack.prec <-
sapply(
quantiles_stack,
FUN = function(quantiles) {
quantiles$log_rr_full_t_samples.prec.post
},
simplify = 'array'
)
#log_rr_sd.stack <- sapply(quantiles_stack, FUN = function(quantiles) {quantiles$log_rr_full_t_sd}, simplify = 'array')
saveRDS(
log_rr_quantiles_stack,
file = paste0(output_directory, country, "_log_rr_quantiles_stack.rds")
)
saveRDS(
log_rr_full_t_samples.stack.prec,
file = paste0(
output_directory,
country,
"_log_rr_full_t_samples.stack.prec.rds"
)
)
}
##########################################################################
##########################################################################
#Save the inclusion probabilities from each of the models.
inclusion_prob_full <- setNames(lapply(impact_full, inclusionProb), groups)
inclusion_prob_time <- setNames(lapply(impact_time, inclusionProb), groups)
#All model results combined
quantiles_full <-
setNames(lapply(
groups,
FUN = function(group) {
rrPredQuantiles(
impact = impact_full[[group]],
denom_data = ds[[group]][, denom_name],
eval_period = eval_period,
post_period = post_period
)
}
), groups)
quantiles_time <-
setNames(lapply(
groups,
FUN = function(group) {
rrPredQuantiles(
impact = impact_time[[group]],
denom_data = ds[[group]][, denom_name],
eval_period = eval_period,
post_period = post_period
)
}
), groups)
quantiles_time_no_offset <-
setNames(lapply(
groups,
FUN = function(group) {
rrPredQuantiles(
impact = impact_time_no_offset[[group]],
denom_data = ds[[group]][, denom_name],
eval_period = eval_period,
post_period = post_period
)
}
), groups)
quantiles_pca <-
setNames(lapply(
groups,
FUN = function(group) {
rrPredQuantiles(
impact = impact_pca[[group]],
denom_data = ds[[group]][, denom_name],
eval_period = eval_period,
post_period = post_period
)
}
), groups)
#Model predicitons
pred_quantiles_full <- sapply(quantiles_full, getPred, simplify = 'array')
pred_quantiles_time <- sapply(quantiles_time, getPred, simplify = 'array')
pred_quantiles_time_no_offset <- sapply(quantiles_time_no_offset, getPred, simplify = 'array')
pred_quantiles_pca <- sapply(quantiles_pca, getPred, simplify = 'array')
#Predictions, aggregated by year
ann_pred_quantiles_full <- sapply(quantiles_full, getAnnPred, simplify = FALSE)
ann_pred_quantiles_time <- sapply(quantiles_time, getAnnPred, simplify = FALSE)
ann_pred_quantiles_time_no_offset <- sapply(quantiles_time_no_offset, getAnnPred, simplify = FALSE)
ann_pred_quantiles_pca <- sapply(quantiles_pca, getAnnPred, simplify = FALSE)
#Pointwise RR and uncertainty for second stage meta analysis
log_rr_quantiles <-
sapply(
quantiles_full,
FUN = function(quantiles) {
quantiles$log_rr_full_t_quantiles
},
simplify = 'array'
)
dimnames(log_rr_quantiles)[[1]] <- time_points
log_rr_sd <-
sapply(
quantiles_full,
FUN = function(quantiles) {
quantiles$log_rr_full_t_sd
},
simplify = 'array'
)
log_rr_full_t_samples.prec <-
sapply(
quantiles_full,
FUN = function(quantiles) {
quantiles$log_rr_full_t_samples.prec
},
simplify = 'array'
)
saveRDS(log_rr_quantiles, file = paste0(output_directory, country, "_log_rr_quantiles.rds"))
saveRDS(log_rr_sd, file = paste0(output_directory, country, "_log_rr_sd.rds"))
saveRDS(log_rr_full_t_samples.prec, file = paste0(output_directory, country, "_log_rr_full_t_samples.prec.rds"))
#Rolling rate ratios
rr_roll_full <-
sapply(
quantiles_full,
FUN = function(quantiles_full) {
quantiles_full$roll_rr
},
simplify = 'array'
)
rr_roll_time <-
sapply(
quantiles_time,
FUN = function(quantiles_time) {
quantiles_time$roll_rr
},
simplify = 'array'
)
rr_roll_time_no_offset <-
sapply(
quantiles_time_no_offset,
FUN = function(quantiles_time) {
quantiles_time$roll_rr
},
simplify = 'array'
)
rr_roll_pca <-
sapply(
quantiles_pca,
FUN = function(quantiles_pca) {
quantiles_pca$roll_rr
},
simplify = 'array'
)
#Rate ratios for evaluation period.
rr_mean_full <- t(sapply(quantiles_full, getRR))
rr_mean_time <- t(sapply(quantiles_time, getRR))
rr_mean_time_no_offset <- t(sapply(quantiles_time_no_offset, getRR))
rr_mean_pca <- t(sapply(quantiles_pca, getRR))
rr_mean_full_intervals <-
data.frame(
'SC Estimate (95% CI)' = makeInterval(rr_mean_full[, 2], rr_mean_full[, 3], rr_mean_full[, 1]),
check.names = FALSE,
row.names = groups
)
rr_mean_time_intervals <-
data.frame(
'Time trend Estimate (95% CI)' = makeInterval(rr_mean_time[, 2], rr_mean_time[, 3], rr_mean_time[, 1]),
check.names = FALSE,
row.names = groups
)
rr_mean_time_no_offset_intervals <-
data.frame(
'Time trend (no offset) Estimate (95% CI)' = makeInterval(
rr_mean_time_no_offset[, 2],
rr_mean_time_no_offset[, 3],
rr_mean_time_no_offset[, 1]
),
check.names = FALSE,
row.names = groups
)
rr_mean_pca_intervals <-
data.frame(
'STL+PCA Estimate (95% CI)' = makeInterval(rr_mean_pca[, 2], rr_mean_pca[, 3], rr_mean_pca[, 1]),
check.names = FALSE,
row.names = groups
)
colnames(rr_mean_time) <- paste('Time_trend', colnames(rr_mean_time))
#Combine RRs into 1 file for plotting
rr_mean_combo <-
as.data.frame(rbind(
cbind(
rep(1, nrow(rr_mean_full)),
groups,
seq(
from = 1,
by = 1,
length.out = nrow(rr_mean_full)
),
rr_mean_full
),
cbind(
rep(2, nrow(rr_mean_time)),
groups,
seq(
from = 1,
by = 1,
length.out = nrow(rr_mean_full)
),
rr_mean_time
),
cbind(
rep(3, nrow(rr_mean_time_no_offset)),
groups,
seq(
from = 1,
by = 1,
length.out = nrow(rr_mean_full)
),
rr_mean_time_no_offset
),
cbind(
rep(4, nrow(rr_mean_pca)),
groups,
seq(
from = 1,
by = 1,
length.out = nrow(rr_mean_full)
),
rr_mean_pca
)
))
names(rr_mean_combo) <- c('Model', 'groups', 'group.index', 'lcl', 'mean.rr', 'ucl')
if (crossval) {
point.weights2 <- stacking_weights.all.m
}else{
point.weights2<-as.data.frame(matrix(rep(1,nrow(rr_mean_combo)), ncol=1))
names(point.weights2)<-'value'
}
rr_mean_combo$point.weights <- point.weights2$value
rr_mean_combo$group.index <- as.numeric(as.character(rr_mean_combo$group.index))
rr_mean_combo$mean.rr <- as.numeric(as.character(rr_mean_combo$mean.rr))
rr_mean_combo$lcl <- as.numeric(as.character(rr_mean_combo$lcl))
rr_mean_combo$ucl <- as.numeric(as.character(rr_mean_combo$ucl))
rr_mean_combo$group.index[rr_mean_combo$Model == 2] <- rr_mean_combo$group.index[rr_mean_combo$Model == 2] + 0.15
rr_mean_combo$group.index[rr_mean_combo$Model == 3] <- rr_mean_combo$group.index[rr_mean_combo$Model == 3] + 0.3
rr_mean_combo$group.index[rr_mean_combo$Model == 4] <- rr_mean_combo$group.index[rr_mean_combo$Model == 4] + 0.45
rr_mean_combo$Model <- as.character(rr_mean_combo$Model)
rr_mean_combo$Model[rr_mean_combo$Model == '1'] <- "Synthetic Controls"
rr_mean_combo$Model[rr_mean_combo$Model == '2'] <- "Time trend"
rr_mean_combo$Model[rr_mean_combo$Model == '3'] <- "Time trend (No offset)"
rr_mean_combo$Model[rr_mean_combo$Model == '4'] <- "STL+PCA"
cbPalette <- c("#1b9e77", "#d95f02", "#7570b3", '#e7298a')
rr_mean_combo$est.index <- as.factor(1:nrow(rr_mean_combo))
#Fix order for axis
rr_mean_combo$Model <- as.factor(rr_mean_combo$Model)
rr_mean_combo$Model = factor(rr_mean_combo$Model, levels(rr_mean_combo$Model)[c(2, 3, 4, 1)])
#print(levels(rr_mean_combo$Model))
cumsum_prevented <-
sapply(groups,
FUN = cumsum_func,
quantiles = quantiles_full,
simplify = 'array')
cumsum_prevented_pca <-
sapply(groups,
FUN = cumsum_func,
quantiles = quantiles_pca,
simplify = 'array')
cumsum_prevented_time <-
sapply(groups,
FUN = cumsum_func,
quantiles = quantiles_time,
simplify = 'array')
################################
# #
# Sensitivity Analyses #
# #
################################
# Pred Sensitivity Analysis--tests effect of changing prior on Ncovars from 3 to 2 to 10
# cl <- makeCluster(n_cores)
# clusterEvalQ(cl, {
# library(CausalImpact, quietly = TRUE)
# library(lubridate, quietly = TRUE)
# library(RcppRoll, quietly = TRUE)
# })
# clusterExport(
# cl,
# c(
# 'doCausalImpact',
# 'predSensitivityAnalysis',
# 'inclusionProb',
# 'rrPredQuantiles',
# 'getPred',
# 'getRR',
# 'groups',
# 'ds',
# 'data_full',
# 'denom_name',
# 'outcome_mean',
# 'outcome_sd',
# 'intervention_date',
# 'eval_period',
# 'post_period',
# 'time_points',
# 'n_seasons'
# ),
# environment()
# )
#
# sensitivity_analysis_pred_2 <-
# setNames(as.data.frame(t(
# parSapply(
# cl,
# groups,
# predSensitivityAnalysis,
# ds = ds,
# zoo_data = data_full,
# denom_name = denom_name,
# outcome_mean = outcome_mean,
# outcome_sd = outcome_sd,
# intervention_date = intervention_date,
# eval_period = eval_period,
# post_period = post_period,
# time_points = time_points,
# n_seasons = n_seasons,
# n_pred = 2
# )
# )), c('Lower CI', 'Point Estimate', 'Upper CI'))
#
# sensitivity_analysis_pred_10 <-
# setNames(as.data.frame(t(
# parSapply(
# cl,
# groups,
# predSensitivityAnalysis,
# ds = ds,
# zoo_data = data_full,
# denom_name = denom_name,
# outcome_mean = outcome_mean,
# outcome_sd = outcome_sd,
# intervention_date = intervention_date,
# eval_period = eval_period,
# post_period = post_period,
# time_points = time_points,
# n_seasons = n_seasons,
# n_pred = 10
# )
# )), c('Lower CI', 'Point Estimate', 'Upper CI'))
#
# stopCluster(cl)
#
# sensitivity_analysis_pred_2_intervals <-
# data.frame(
# 'Estimate (95% CI)' = makeInterval(
# sensitivity_analysis_pred_2[, 2],
# sensitivity_analysis_pred_2[, 3],
# sensitivity_analysis_pred_2[, 1]
# ),
# row.names = groups,
# check.names = FALSE
# )
#
# sensitivity_analysis_pred_10_intervals <-
# data.frame(
# 'Estimate (95% CI)' = makeInterval(
# sensitivity_analysis_pred_10[, 2],
# sensitivity_analysis_pred_10[, 3],
# sensitivity_analysis_pred_10[, 1]
# ),
# row.names = groups,
# check.names = FALSE
# )
if(sensitivity){
bad_sensitivity_groups <- # sapply over each age_group, check if number of columns is equal or less than 3, later exclude those groups
sapply(covars_full, function (covar) {
ncol(covar) <= n_seasons-1+3
})
sensitivity_covars_full <- covars_full[!bad_sensitivity_groups]
sensitivity_ds <- ds[!bad_sensitivity_groups]
sensitivity_impact_full <- impact_full[!bad_sensitivity_groups]
sensitivity_groups <- groups[!bad_sensitivity_groups]
#Weight Sensitivity Analysis - top weighted variables are excluded and analysis is re-run.
if (length(sensitivity_groups)!=0) {
cl <- makeCluster(n_cores)
clusterEvalQ(cl, {
library(pogit, quietly = TRUE)
library(lubridate, quietly = TRUE)
library(RcppRoll, quietly = TRUE)
})
clusterExport(
cl,
c(
'sensitivity_ds',
'doCausalImpact',
'year_def',
'weightSensitivityAnalysis',
'rrPredQuantiles',
'sensitivity_groups',
'intervention_date',
'outcome',
'time_points',
'n_seasons',
'eval_period',
'post_period',
'crossval'
),
environment()
)
sensitivity_analysis_full <-
setNames(
parLapply(
cl,
sensitivity_groups,
weightSensitivityAnalysis,
covars = sensitivity_covars_full,
ds = sensitivity_ds,
impact = sensitivity_impact_full,
time_points = time_points,
intervention_date = intervention_date,
n_seasons = n_seasons,
outcome = outcome,
eval_period = eval_period,
post_period = post_period
),
sensitivity_groups
)
stopCluster(cl)
sensitivity_pred_quantiles <-
lapply(
sensitivity_analysis_full,
FUN = function(sensitivity_analysis) {
pred_list <-
vector(mode = 'list', length = length(sensitivity_analysis))
for (sensitivity_index in 1:length(sensitivity_analysis)) {
pred_list[[sensitivity_index]] <-
getPred(sensitivity_analysis[[sensitivity_index]])
}
return(pred_list)
}
)
#Table of rate ratios for each sensitivity analysis level
sensitivity_table <-
t(
sapply(
sensitivity_groups,
sensitivityTable,
sensitivity_analysis = sensitivity_analysis_full,
original_rr = rr_mean_full
)
)
sensitivity_table_intervals <-
data.frame(
'Estimate (95% CI)' = makeInterval(sensitivity_table[, 2], sensitivity_table[, 3], sensitivity_table[, 1]),
'Top Control 1' = sensitivity_table[, 'Top Control 1'],
'Inclusion Probability of Control 1' = sensitivity_table[, 'Inclusion Probability of Control 1'],
'Control 1 Estimate (95% CI)' = makeInterval(sensitivity_table[, 7], sensitivity_table[, 8], sensitivity_table[, 6]),
'Top Control 2' = sensitivity_table[, 'Top Control 2'],
'Inclusion Probability of Control 2' = sensitivity_table[, 'Inclusion Probability of Control 2'],
'Control 2 Estimate (95% CI)' = makeInterval(sensitivity_table[, 12], sensitivity_table[, 13], sensitivity_table[, 11]),
'Top Control 3' = sensitivity_table[, 'Top Control 3'],
'Inclusion Probability of Control 3' = sensitivity_table[, 'Inclusion Probability of Control 3'],
'Control 3 Estimate (95% CI)' = makeInterval(sensitivity_table[, 17], sensitivity_table[, 18], sensitivity_table[, 16]),
check.names = FALSE
)
rr_table <- cbind.data.frame(round(rr_mean_time[!bad_sensitivity_groups,], 2), sensitivity_table)
rr_table_intervals <- cbind('ITS Estimate (95% CI)' = rr_mean_time_intervals[!bad_sensitivity_groups,], sensitivity_table_intervals)
} else {
sensitivity_table_intervals <- NA
}
} |
# set working directory to source file
library(tidyverse)
library(gridExtra)
source("C:/Users/jflun/Dropbox/Dissertation/Tuning Research/Grid Search/Plot Scripts/get_data.R")
lig <- datagridClassGBM("C:/Users/jflun/Dropbox/Dissertation/Tuning Research/Grid Search/Grid Data/GBM/Binary/Lichen Small",
dataset = "Lichen")
#------------------------------------------------------------------------------
#
# Graphs for all data
#
#------------------------------------------------------------------------------
alldat <- lig$datAll
alldat1 <- alldat[alldat$Shrinkage == -1, ]
alldat2 <- alldat[alldat$Shrinkage == -2, ]
alldat3 <- alldat[alldat$Shrinkage == -3, ]
# Lichen Errors for shrinkage = 10^(-1)
li1.1 <- alldat1[alldat1$cat == "Lichen", ]
g1.1 <- ggplot(li1.1, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Error)) +
theme_bw() +
labs(x = "interaction.depth", y = "Number of Trees") +
ggtitle("All Errors for Lichen Data, Shrinkage = 10^(-1)") +
facet_wrap(~MinNode, ncol = 4)
g2.1 <- ggplot(li1.1, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = ErrUCL)) +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("All Error UCLs for Lichen Data, Shrinkage = 10^(-1)") +
facet_wrap(~MinNode, ncol = 4)
g3.1 <- ggplot(li1.1, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Time)) +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("All Times for Lichen Data, Shrinkage = 10^(-1)") +
facet_wrap(~MinNode, ncol = 4)
# Lichen Errors for shrinkage = 10^(-2)
li1.2 <- alldat2[alldat2$cat == "Lichen", ]
g1.2 <- ggplot(li1.2, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Error)) +
theme_bw() +
labs(x = "interaction.depth", y = "Number of Trees") +
ggtitle("All Errors for Lichen Data, Shrinkage = 10^(-2)") +
facet_wrap(~MinNode, ncol = 4)
g2.2 <- ggplot(li1.2, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = ErrUCL)) +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("All Error UCLs for Lichen Data, Shrinkage = 10^(-2)") +
facet_wrap(~MinNode, ncol = 4)
g3.2 <- ggplot(li1.2, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Time)) +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("All Times for Lichen Data, Shrinkage = 10^(-2)") +
facet_wrap(~MinNode, ncol = 4)
# Lichen Errors for shrinkage = 10^(-2)
li1.3 <- alldat3[alldat3$cat == "Lichen", ]
g1.3 <- ggplot(li1.3, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Error)) +
theme_bw() +
labs(x = "interaction.depth", y = "Number of Trees") +
ggtitle("All Errors for Lichen Data, Shrinkage = 10^(-3)") +
facet_wrap(~MinNode, ncol = 4)
g2.3 <- ggplot(li1.3, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = ErrUCL)) +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("All Error UCLs for Lichen Data, Shrinkage = 10^(-3)") +
facet_wrap(~MinNode, ncol = 4)
g3.3 <- ggplot(li1.3, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Time)) +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("All Times for Lichen Data, Shrinkage = 10^(-3)") +
facet_wrap(~MinNode, ncol = 4)
#------------------------------------------------------------------------------
# Graphs for best 20 percent
#------------------------------------------------------------------------------
dat20A <- lig$dat20A
dat20LCL <- lig$dat20LCL
dat20Time <- lig$dat20Time
best20A <- lig$top20acc
best20LCL <- lig$top20LCL
best20Time <- lig$top20Time
dat20A.1 <- dat20A[dat20A$Shrinkage == -1, ]
dat20A.2 <- dat20A[dat20A$Shrinkage == -2, ]
dat20A.3 <- dat20A[dat20A$Shrinkage == -3, ]
dat20LCL.1 <- dat20LCL[dat20LCL$Shrinkage == -1, ]
dat20LCL.2 <- dat20LCL[dat20LCL$Shrinkage == -2, ]
dat20LCL.3 <- dat20LCL[dat20LCL$Shrinkage == -3, ]
dat20Time.1 <- dat20Time[dat20Time$Shrinkage == -1, ]
dat20Time.2 <- dat20Time[dat20Time$Shrinkage == -2, ]
dat20Time.3 <- dat20Time[dat20Time$Shrinkage == -3, ]
best20A.1 <- best20A[best20A$Shrinkage == -1, ]
best20A.2 <- best20A[best20A$Shrinkage == -2, ]
best20A.3 <- best20A[best20A$Shrinkage == -3, ]
best20LCL.1 <- best20LCL[best20LCL$Shrinkage == -1, ]
best20LCL.2 <- best20LCL[best20LCL$Shrinkage == -2, ]
best20LCL.3 <- best20LCL[best20LCL$Shrinkage == -3, ]
best20Time.1 <- best20Time[best20Time$Shrinkage == -1, ]
best20Time.2 <- best20Time[best20Time$Shrinkage == -2, ]
best20Time.3 <- best20Time[best20Time$Shrinkage == -3, ]
# Plots for shrinkage 10^(-1)
g4.1 <- ggplot(dat20A.1, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Error)) +
geom_point(data = best20A.1, aes(IntDepth, NumTrees),
size = 2, color = "orange1") +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("Best 20% Errors for Lichen Data, Shrinkage = 10^(-1)") +
facet_wrap(~MinNode, ncol = 4)
g5.1 <- ggplot(dat20LCL.1, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = ErrUCL)) +
geom_point(data = best20LCL.1, aes(IntDepth, NumTrees),
size = 2, color = "orange1") +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("Best 20% UCLs for Lichen Data, Shrinkage = 10^(-1)") +
facet_wrap(~MinNode, ncol = 4)
g6.1 <- ggplot(dat20Time.1, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Time)) +
geom_point(data = best20Time.1, aes(IntDepth, NumTrees),
size = 2, color = "orange1") +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("Best 20% Times for Lichen Data, Shrinkage = 10^(-1)") +
facet_wrap(~MinNode, ncol = 4)
# Plots for shrinkage 10^(-2)
g4.2 <- ggplot(dat20A.2, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Error)) +
geom_point(data = best20A.2, aes(IntDepth, NumTrees),
size = 2, color = "orange1") +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("Best 20% Errors for Lichen Data, Shrinkage = 10^(-2)") +
facet_wrap(~MinNode, ncol = 4)
g5.2 <- ggplot(dat20LCL.2, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = ErrUCL)) +
geom_point(data = best20LCL.2, aes(IntDepth, NumTrees),
size = 2, color = "orange1") +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("Best 20% UCLs for Lichen Data, Shrinkage = 10^(-2)") +
facet_wrap(~MinNode, ncol = 4)
g6.2 <- ggplot(dat20Time.2, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Time)) +
geom_point(data = best20Time.2, aes(IntDepth, NumTrees),
size = 2, color = "orange1") +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("Best 20% Times for Lichen Data, Shrinkage = 10^(-2)") +
facet_wrap(~MinNode, ncol = 4)
# Plots for shrinkage 10^(-3)
g4.3 <- ggplot(dat20A.3, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Error)) +
geom_point(data = best20A.3, aes(IntDepth, NumTrees),
size = 2, color = "orange1") +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("Best 20% Errors for Lichen Data, Shrinkage = 10^(-3)") +
facet_wrap(~MinNode, ncol = 4)
g5.3 <- ggplot(dat20LCL.3, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = ErrUCL)) +
geom_point(data = best20LCL.3, aes(IntDepth, NumTrees),
size = 2, color = "orange1") +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("Best 20% UCLs for Lichen Data, Shrinkage = 10^(-3)") +
facet_wrap(~MinNode, ncol = 4)
g6.3 <- ggplot(dat20Time.3, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Time)) +
geom_point(data = best20Time.3, aes(IntDepth, NumTrees),
size = 2, color = "orange1") +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("Best 20% Times for Lichen Data, Shrinkage = 10^(-3)") +
facet_wrap(~MinNode, ncol = 4)
pdf("../Grid Search Plots/GBM/Binary/GBM_Binary_Small_Lichen.pdf", height = 9, width = 6.5)
grid.arrange(g1.1, g1.2, g1.3, ncol = 1)
grid.arrange(g4.1, g4.2, g4.3, ncol = 1)
grid.arrange(g2.1, g2.2, g2.3, ncol = 1)
grid.arrange(g5.1, g5.2, g5.3, ncol = 1)
grid.arrange(g3.1, g3.2, g3.3, ncol = 1)
grid.arrange(g6.1, g6.2, g6.3, ncol = 1)
dev.off()
| /Tuning Research/Examples of Grid Search Code/GBM/Binary/GBM_binary_plots_small_Lichen.R | no_license | jillbo1000/Dissertation-code | R | false | false | 8,495 | r | # set working directory to source file
library(tidyverse)
library(gridExtra)
source("C:/Users/jflun/Dropbox/Dissertation/Tuning Research/Grid Search/Plot Scripts/get_data.R")
lig <- datagridClassGBM("C:/Users/jflun/Dropbox/Dissertation/Tuning Research/Grid Search/Grid Data/GBM/Binary/Lichen Small",
dataset = "Lichen")
#------------------------------------------------------------------------------
#
# Graphs for all data
#
#------------------------------------------------------------------------------
alldat <- lig$datAll
alldat1 <- alldat[alldat$Shrinkage == -1, ]
alldat2 <- alldat[alldat$Shrinkage == -2, ]
alldat3 <- alldat[alldat$Shrinkage == -3, ]
# Lichen Errors for shrinkage = 10^(-1)
li1.1 <- alldat1[alldat1$cat == "Lichen", ]
g1.1 <- ggplot(li1.1, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Error)) +
theme_bw() +
labs(x = "interaction.depth", y = "Number of Trees") +
ggtitle("All Errors for Lichen Data, Shrinkage = 10^(-1)") +
facet_wrap(~MinNode, ncol = 4)
g2.1 <- ggplot(li1.1, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = ErrUCL)) +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("All Error UCLs for Lichen Data, Shrinkage = 10^(-1)") +
facet_wrap(~MinNode, ncol = 4)
g3.1 <- ggplot(li1.1, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Time)) +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("All Times for Lichen Data, Shrinkage = 10^(-1)") +
facet_wrap(~MinNode, ncol = 4)
# Lichen Errors for shrinkage = 10^(-2)
li1.2 <- alldat2[alldat2$cat == "Lichen", ]
g1.2 <- ggplot(li1.2, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Error)) +
theme_bw() +
labs(x = "interaction.depth", y = "Number of Trees") +
ggtitle("All Errors for Lichen Data, Shrinkage = 10^(-2)") +
facet_wrap(~MinNode, ncol = 4)
g2.2 <- ggplot(li1.2, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = ErrUCL)) +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("All Error UCLs for Lichen Data, Shrinkage = 10^(-2)") +
facet_wrap(~MinNode, ncol = 4)
g3.2 <- ggplot(li1.2, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Time)) +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("All Times for Lichen Data, Shrinkage = 10^(-2)") +
facet_wrap(~MinNode, ncol = 4)
# Lichen Errors for shrinkage = 10^(-2)
li1.3 <- alldat3[alldat3$cat == "Lichen", ]
g1.3 <- ggplot(li1.3, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Error)) +
theme_bw() +
labs(x = "interaction.depth", y = "Number of Trees") +
ggtitle("All Errors for Lichen Data, Shrinkage = 10^(-3)") +
facet_wrap(~MinNode, ncol = 4)
g2.3 <- ggplot(li1.3, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = ErrUCL)) +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("All Error UCLs for Lichen Data, Shrinkage = 10^(-3)") +
facet_wrap(~MinNode, ncol = 4)
g3.3 <- ggplot(li1.3, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Time)) +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("All Times for Lichen Data, Shrinkage = 10^(-3)") +
facet_wrap(~MinNode, ncol = 4)
#------------------------------------------------------------------------------
# Graphs for best 20 percent
#------------------------------------------------------------------------------
dat20A <- lig$dat20A
dat20LCL <- lig$dat20LCL
dat20Time <- lig$dat20Time
best20A <- lig$top20acc
best20LCL <- lig$top20LCL
best20Time <- lig$top20Time
dat20A.1 <- dat20A[dat20A$Shrinkage == -1, ]
dat20A.2 <- dat20A[dat20A$Shrinkage == -2, ]
dat20A.3 <- dat20A[dat20A$Shrinkage == -3, ]
dat20LCL.1 <- dat20LCL[dat20LCL$Shrinkage == -1, ]
dat20LCL.2 <- dat20LCL[dat20LCL$Shrinkage == -2, ]
dat20LCL.3 <- dat20LCL[dat20LCL$Shrinkage == -3, ]
dat20Time.1 <- dat20Time[dat20Time$Shrinkage == -1, ]
dat20Time.2 <- dat20Time[dat20Time$Shrinkage == -2, ]
dat20Time.3 <- dat20Time[dat20Time$Shrinkage == -3, ]
best20A.1 <- best20A[best20A$Shrinkage == -1, ]
best20A.2 <- best20A[best20A$Shrinkage == -2, ]
best20A.3 <- best20A[best20A$Shrinkage == -3, ]
best20LCL.1 <- best20LCL[best20LCL$Shrinkage == -1, ]
best20LCL.2 <- best20LCL[best20LCL$Shrinkage == -2, ]
best20LCL.3 <- best20LCL[best20LCL$Shrinkage == -3, ]
best20Time.1 <- best20Time[best20Time$Shrinkage == -1, ]
best20Time.2 <- best20Time[best20Time$Shrinkage == -2, ]
best20Time.3 <- best20Time[best20Time$Shrinkage == -3, ]
# Plots for shrinkage 10^(-1)
g4.1 <- ggplot(dat20A.1, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Error)) +
geom_point(data = best20A.1, aes(IntDepth, NumTrees),
size = 2, color = "orange1") +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("Best 20% Errors for Lichen Data, Shrinkage = 10^(-1)") +
facet_wrap(~MinNode, ncol = 4)
g5.1 <- ggplot(dat20LCL.1, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = ErrUCL)) +
geom_point(data = best20LCL.1, aes(IntDepth, NumTrees),
size = 2, color = "orange1") +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("Best 20% UCLs for Lichen Data, Shrinkage = 10^(-1)") +
facet_wrap(~MinNode, ncol = 4)
g6.1 <- ggplot(dat20Time.1, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Time)) +
geom_point(data = best20Time.1, aes(IntDepth, NumTrees),
size = 2, color = "orange1") +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("Best 20% Times for Lichen Data, Shrinkage = 10^(-1)") +
facet_wrap(~MinNode, ncol = 4)
# Plots for shrinkage 10^(-2)
g4.2 <- ggplot(dat20A.2, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Error)) +
geom_point(data = best20A.2, aes(IntDepth, NumTrees),
size = 2, color = "orange1") +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("Best 20% Errors for Lichen Data, Shrinkage = 10^(-2)") +
facet_wrap(~MinNode, ncol = 4)
g5.2 <- ggplot(dat20LCL.2, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = ErrUCL)) +
geom_point(data = best20LCL.2, aes(IntDepth, NumTrees),
size = 2, color = "orange1") +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("Best 20% UCLs for Lichen Data, Shrinkage = 10^(-2)") +
facet_wrap(~MinNode, ncol = 4)
g6.2 <- ggplot(dat20Time.2, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Time)) +
geom_point(data = best20Time.2, aes(IntDepth, NumTrees),
size = 2, color = "orange1") +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("Best 20% Times for Lichen Data, Shrinkage = 10^(-2)") +
facet_wrap(~MinNode, ncol = 4)
# Plots for shrinkage 10^(-3)
g4.3 <- ggplot(dat20A.3, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Error)) +
geom_point(data = best20A.3, aes(IntDepth, NumTrees),
size = 2, color = "orange1") +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("Best 20% Errors for Lichen Data, Shrinkage = 10^(-3)") +
facet_wrap(~MinNode, ncol = 4)
g5.3 <- ggplot(dat20LCL.3, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = ErrUCL)) +
geom_point(data = best20LCL.3, aes(IntDepth, NumTrees),
size = 2, color = "orange1") +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("Best 20% UCLs for Lichen Data, Shrinkage = 10^(-3)") +
facet_wrap(~MinNode, ncol = 4)
g6.3 <- ggplot(dat20Time.3, aes(x = IntDepth, y = NumTrees)) +
geom_tile(aes(fill = Time)) +
geom_point(data = best20Time.3, aes(IntDepth, NumTrees),
size = 2, color = "orange1") +
theme_bw() +
labs(x = "interaction.depth", y = "n.trees") +
ggtitle("Best 20% Times for Lichen Data, Shrinkage = 10^(-3)") +
facet_wrap(~MinNode, ncol = 4)
pdf("../Grid Search Plots/GBM/Binary/GBM_Binary_Small_Lichen.pdf", height = 9, width = 6.5)
grid.arrange(g1.1, g1.2, g1.3, ncol = 1)
grid.arrange(g4.1, g4.2, g4.3, ncol = 1)
grid.arrange(g2.1, g2.2, g2.3, ncol = 1)
grid.arrange(g5.1, g5.2, g5.3, ncol = 1)
grid.arrange(g3.1, g3.2, g3.3, ncol = 1)
grid.arrange(g6.1, g6.2, g6.3, ncol = 1)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/delete_one_group.R
\name{delete_MNAR_one_group}
\alias{delete_MNAR_one_group}
\title{Create MNAR values by deleting values in one of two groups}
\usage{
delete_MNAR_one_group(
ds,
p,
cols_mis,
cutoff_fun = median,
prop = 0.5,
use_lpSolve = TRUE,
ordered_as_unordered = FALSE,
n_mis_stochastic = FALSE,
...,
miss_cols,
stochastic
)
}
\arguments{
\item{ds}{A data frame or matrix in which missing values will be created.}
\item{p}{A numeric vector with length one or equal to length \code{cols_mis};
the probability that a value is missing.}
\item{cols_mis}{A vector of column names or indices of columns in which
missing values will be created.}
\item{cutoff_fun}{Function that calculates the cutoff values in the
\code{cols_ctrl}.}
\item{prop}{Numeric of length one; (minimum) proportion of rows in group 1
(only used for unordered factors).}
\item{use_lpSolve}{Logical; should lpSolve be used for the determination of
groups, if \code{cols_ctrl[i]} is an unordered factor.}
\item{ordered_as_unordered}{Logical; should ordered factors be treated as
unordered factors.}
\item{n_mis_stochastic}{Logical, should the number of missing values be
stochastic? If \code{n_mis_stochastic = TRUE}, the number of missing values
for a column with missing values \code{cols_mis[i]} is a random variable
with expected value \code{nrow(ds) * p[i]}. If \code{n_mis_stochastic =
FALSE}, the number of missing values will be deterministic. Normally, the
number of missing values for a column with missing values
\code{cols_mis[i]} is \code{round(nrow(ds) * p[i])}. Possible deviations
from this value, if any exists, are documented in Details.}
\item{...}{Further arguments passed to \code{cutoff_fun}.}
\item{miss_cols}{Deprecated, use \code{cols_mis} instead.}
\item{stochastic}{Deprecated, use \code{n_mis_stochastic} instead.}
}
\value{
An object of the same class as \code{ds} with missing values.
}
\description{
Create missing not at random (MNAR) values by deleting values in one of two
groups in a data frame or a matrix
}
\details{
The functions \code{delete_MNAR_one_group} and \code{\link{delete_MAR_one_group}} are sisters. The only difference between these two functions is the column that controls the generation of missing values. In \code{\link{delete_MAR_one_group}} a separate column \code{cols_ctrl[i]} controls the generation of missing values in \code{cols_mis[i]}. In contrast, in \code{delete_MNAR_one_group} the generation of missing values in \code{cols_mis[i]} is controlled by \code{cols_mis[i]} itself. All other aspects are identical for both functions. Therefore, further details can be found in \code{\link{delete_MAR_one_group}}.
}
\examples{
ds <- data.frame(X = 1:20, Y = 101:120)
delete_MNAR_one_group(ds, 0.2, "X")
}
\references{
Santos, M. S., Pereira, R. C., Costa, A. F., Soares, J. P.,
Santos, J., & Abreu, P. H. (2019). Generating Synthetic Missing Data: A
Review by Missing Mechanism. \emph{IEEE Access}, 7, 11651-11667
}
\seealso{
\code{\link{delete_MAR_one_group}}
Other functions to create MNAR:
\code{\link{delete_MNAR_1_to_x}()},
\code{\link{delete_MNAR_censoring}()},
\code{\link{delete_MNAR_rank}()}
}
\concept{functions to create MNAR}
| /man/delete_MNAR_one_group.Rd | no_license | cran/missMethods | R | false | true | 3,369 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/delete_one_group.R
\name{delete_MNAR_one_group}
\alias{delete_MNAR_one_group}
\title{Create MNAR values by deleting values in one of two groups}
\usage{
delete_MNAR_one_group(
ds,
p,
cols_mis,
cutoff_fun = median,
prop = 0.5,
use_lpSolve = TRUE,
ordered_as_unordered = FALSE,
n_mis_stochastic = FALSE,
...,
miss_cols,
stochastic
)
}
\arguments{
\item{ds}{A data frame or matrix in which missing values will be created.}
\item{p}{A numeric vector with length one or equal to length \code{cols_mis};
the probability that a value is missing.}
\item{cols_mis}{A vector of column names or indices of columns in which
missing values will be created.}
\item{cutoff_fun}{Function that calculates the cutoff values in the
\code{cols_ctrl}.}
\item{prop}{Numeric of length one; (minimum) proportion of rows in group 1
(only used for unordered factors).}
\item{use_lpSolve}{Logical; should lpSolve be used for the determination of
groups, if \code{cols_ctrl[i]} is an unordered factor.}
\item{ordered_as_unordered}{Logical; should ordered factors be treated as
unordered factors.}
\item{n_mis_stochastic}{Logical, should the number of missing values be
stochastic? If \code{n_mis_stochastic = TRUE}, the number of missing values
for a column with missing values \code{cols_mis[i]} is a random variable
with expected value \code{nrow(ds) * p[i]}. If \code{n_mis_stochastic =
FALSE}, the number of missing values will be deterministic. Normally, the
number of missing values for a column with missing values
\code{cols_mis[i]} is \code{round(nrow(ds) * p[i])}. Possible deviations
from this value, if any exists, are documented in Details.}
\item{...}{Further arguments passed to \code{cutoff_fun}.}
\item{miss_cols}{Deprecated, use \code{cols_mis} instead.}
\item{stochastic}{Deprecated, use \code{n_mis_stochastic} instead.}
}
\value{
An object of the same class as \code{ds} with missing values.
}
\description{
Create missing not at random (MNAR) values by deleting values in one of two
groups in a data frame or a matrix
}
\details{
The functions \code{delete_MNAR_one_group} and \code{\link{delete_MAR_one_group}} are sisters. The only difference between these two functions is the column that controls the generation of missing values. In \code{\link{delete_MAR_one_group}} a separate column \code{cols_ctrl[i]} controls the generation of missing values in \code{cols_mis[i]}. In contrast, in \code{delete_MNAR_one_group} the generation of missing values in \code{cols_mis[i]} is controlled by \code{cols_mis[i]} itself. All other aspects are identical for both functions. Therefore, further details can be found in \code{\link{delete_MAR_one_group}}.
}
\examples{
ds <- data.frame(X = 1:20, Y = 101:120)
delete_MNAR_one_group(ds, 0.2, "X")
}
\references{
Santos, M. S., Pereira, R. C., Costa, A. F., Soares, J. P.,
Santos, J., & Abreu, P. H. (2019). Generating Synthetic Missing Data: A
Review by Missing Mechanism. \emph{IEEE Access}, 7, 11651-11667
}
\seealso{
\code{\link{delete_MAR_one_group}}
Other functions to create MNAR:
\code{\link{delete_MNAR_1_to_x}()},
\code{\link{delete_MNAR_censoring}()},
\code{\link{delete_MNAR_rank}()}
}
\concept{functions to create MNAR}
|
# Desc: A script exploring how to make, manipulate and save maps made using coordinate specified directly in code
rm(list = ls())
require(raster)
require(sf)
require(viridis)
require(units)
pop_dens <- data.frame(n_km2 = c(260, 67, 151, 4500, 133), country = c('England', 'Scotland', 'Wales', 'London', 'Northern Ireland'))
print(pop_dens)
##############################################
##########MAKING VECTORS FROM COORDINATES#####
##############################################
# Create coordinate for each country
# - this is a list of sets of coordinates forming the edge of the polygon
# - note that they have to _close_ (have the same coordinate at either end)
scotland <- rbind(c(-5, 58.6), c(-3, 58.6), c(-4, 57.6), c(-1.5, 57.6), c(-2, 55.8), c(-3, 55), c(-5, 55), c(-6, 56), c(-5, 58.6))
england <- rbind(c(-2,55.8),c(0.5, 52.8), c(1.6, 52.8), c(0.7, 50.7), c(-5.7,50), c(-2.7, 51.5), c(-3, 53.4),c(-3, 55), c(-2,55.8))
wales <- rbind(c(-2.5, 51.3), c(-5.3,51.8), c(-4.5, 53.4), c(-2.8, 53.4), c(-2.5, 51.3))
ireland <- rbind(c(-10,51.5), c(-10, 54.2), c(-7.5, 55.3), c(-5.9, 55.3), c(-5.9, 52.2), c(-10,51.5))
# Convert these coordinate into feature geometries
# - these are simple coordinates sets with no projection information.
scotland <- st_polygon(list(scotland))
england <- st_polygon(list(england))
wales <- st_polygon(list(wales))
ireland <- st_polygon(list(ireland))
# Combine geometries into a simple feature column
uk_eire <- st_sfc(wales, england, scotland, ireland, crs = 4326)
plot(uk_eire, asp = 1)
uk_eire_capitals <- data.frame(long = c(-.1, -3.2, -3.2, -6, -6.25), lat = c(51.5, 51.5, 55.8, 54.6, 53.3), name = c('London', 'Cardiff', 'Edinburgh', 'Belfast', 'Dublin'))
uk_eire_capitals <- st_as_sf(uk_eire_capitals, coords = c('long', 'lat'), crs = 4326)
########################################################
##########VECTRO GEOMETRY OPERATIONS####################
########################################################
st_pauls <- st_point(x = c(-0.098056, 51.513611))
london <- st_buffer(st_pauls, 0.25)
england_no_london <- st_difference(england, london)
# Count the points and show the number of rings within the polygon features
lengths(scotland)
lengths(england_no_london)
wales <- st_difference(wales, england)
# A rough that includes Northern Ireland and surrounding sea.
# - not the alternative way of providing the coordinates.
ni_area <- st_polygon(list(cbind(x=c(-8.1, -6, -5, -6, -8.1), y=c(54.4, 56, 55, 54, 54.4))))
northern_ireland <- st_intersection(ireland, ni_area)
eire <- st_difference(ireland, ni_area)
# Combine the final geometries
uk_eire <- st_sfc(wales, england_no_london, scotland, london, northern_ireland, eire, crs = 4326)
########################################################
##############FEATURES AND GEOMETRIES###################
########################################################
# make the UK into a single feature
uk_country <- st_union(uk_eire[-6])
# Compare six polygon features with one multipolygon feature
print(uk_eire)
print(uk_country)
# Plot them
par(mfrow = c(1, 2), mar = c(3, 3, 1, 1))
plot(uk_eire, asp = 1, col = rainbow(6))
plot(st_geometry(uk_eire_capitals), add = T)
plot(uk_country, asp = 1, col = 'lightblue')
########################################################
############VECTOR DATA AND ATTRIBUTES##################
########################################################
uk_eire <- st_sf(name = c('Wales', 'England', 'Scotland', 'London', 'Northern Ireland', 'Eire'), geometry = uk_eire)
plot(uk_eire, asp = 1)
uk_eire$capital <- c('London', 'Edinburgh', ' Cardiff', NA, 'Belfast', 'Dublin')
uk_eire <- merge(uk_eire, pop_dens, by.x = 'name', by.y = 'country', all.x = T)
print(uk_eire)
##prevents crash?
dev.off()
########################################################
#################SPATIAL ATTIBUTES######################
########################################################
uk_eire_centroids <- st_centroid(uk_eire)
uk_eire$area <- st_area(uk_eire) ## causing crashes?!
# The length of a polygon is the perimeter length
# - nte that this includes the length of internal holes.
uk_eire$length <- st_length(uk_eire)
# look at the result
print(uk_eire)
# You can change units in a neat way
uk_eire$area <- set_units(uk_eire$area, 'km^2')
uk_eire$length <- set_units(uk_eire$length, 'km')
# And which won't let you make silly error like turning a length into weight
#uk_eire$area <- set_units(uk_eire$area, 'kg')
#Or you can simply convert the 'units' version to simple numbers
uk_eire$length <- as.numeric(uk_eire$length) # will be a string by default
print(uk_eire)
st_distance(uk_eire)
st_distance(uk_eire_centroids)
########################################################
#############PLOTTING sf OBJECTS########################
########################################################
plot(uk_eire['n_km2'], asp = 1, logz = T) #task: to log the scale, use logz or log data beforehand
########################################################
#############REPROJECTING VECTOR DATA###################
########################################################
# British National Grid (EPSG:27700)
uk_eire_BNG <- st_transform(uk_eire, 27700)
# The bounding box of the data shows the change in units
st_bbox(uk_eire)
st_bbox(uk_eire_BNG)
# UTM50N (EPSG:32650)
uk_eire_UTM50N <- st_transform(uk_eire, 32650)
# plot the results
par(mfrow = c(1, 3), mar = c(3, 3, 1, 1))
plot(st_geometry(uk_eire), asp = 1, axes = T, main = 'WGS 84')
plot(st_geometry(uk_eire), asp = 1, axes = T, main = 'OSGB 1936 / BNG')
plot(st_geometry(uk_eire_UTM50N), axes = T, main = 'UTM 50N')
########################################################
###############Proj4 STRINGS############################
########################################################
# Set up some points seperated by 1 degree latitude and longitude from St. Pauls
st_pauls <- st_sfc(st_pauls, crs = 4326)
one_deg_west_pt <- st_sfc(st_pauls - c(1, 0), crs = 4326) # near Goring
one_deg_north_pt <- st_sfc(st_pauls + c(0, 1), crs = 4326) # near Peterborough
# Calculate the distance between St pauls and each point
st_distance(st_pauls, one_deg_west_pt)
st_distance(st_pauls, one_deg_north_pt)
st_distance(st_transform(st_pauls, 27700), st_transform(one_deg_west_pt, 27700))
####IMPROVE LONDON CIRCLE###
## task -make london buffer 25km
londonBNG <- st_buffer(st_transform(st_pauls,27700), 25000)
# In one line, transform england to BNG and cut out London
england_no_london_BNG <- st_difference(st_transform(st_sfc(england, crs = 4326), 27700), londonBNG)
# project the other features and combine everything together
others_BNG <- st_transform(st_sfc(eire, northern_ireland, scotland, wales, crs = 4326), 27700)
corrected <- c(others_BNG, londonBNG, england_no_london_BNG)
# plot that
graphics.off()
par(mar = c(3, 3, 1, 1))
plot(corrected, main = "25km radius London", axes = T)
########################################################
#############RASTERS####################################
#######Creating a raster################################
########################################################
# create an empty raster object covering UK and Eire
uk_raster_WGS84 <- raster(xmn = -11, xmx = 2, ymn = 49.5, ymx = 59, res = .5, crs = "+init=EPSG:4326")
hasValues(uk_raster_WGS84)
## add data to raster
values(uk_raster_WGS84) <- seq(length(uk_raster_WGS84))
plot(uk_raster_WGS84)
plot(st_geometry(uk_eire), add = T, border = 'black', lwd = 2, col = "#FFFFFF44")
#############CHANGING RASTER RESOLUTION############
# define a simple 4x4 square raster
m <- matrix(c(1, 1, 3, 3,
1, 2, 4, 3,
5, 5, 7, 8,
6, 6, 7, 7), ncol = 4, byrow = T)
square <- raster(m)
#########AGGREGATING RASTERS############
# average values
square_agg_mean <- aggregate(square, fact = 2, fun = mean)
values(square_agg_mean)
# Maximum values
square_agg_max <- aggregate(square, fact = 2, fun = max)
values(square_agg_max)
# modal values for categories
square_agg_modal <- aggregate(square, fact = 2, fun = modal)
values(square_agg_modal)
###############DISAGGREGATING RASTERS#################
# copy parents
square_disagg <- disaggregate(square, fact =2)
# Interpolate
square_disagg_interp <- disaggregate(square, fact = 2, method = 'bilinear')
################REPROJECTING A RASTER###################
# make two simple `sfc` objects containing points in the lower left and top right of the two grids
uk_pts_WGS84 <- st_sfc(st_point(c(-11, 49.5)), st_point(c(2, 59)), crs = 4326)
uk_pts_BNG <- st_sfc(st_point(c(-2e5, 0)), st_point(c(7e5, 1e6)), crs = 27700)
# use st_make_grid to quickly create a polygon grid with the right cellsize
uk_grid_WGS84 <- st_make_grid(uk_pts_WGS84, cellsize = 0.5)
uk_grid_BNG <- st_make_grid(uk_pts_BNG, cellsize = 1e5)
# Reproject BNG grid into WGS4
uk_grid_BNG_as_WGS84 <- st_transform(uk_grid_BNG, 4326)
# Plot the features
plot(uk_grid_WGS84, asp = 1, border = 'grey', xlim = c(-13,4))
plot(st_geometry(uk_eire), add = T, border = 'darkgreen', lwd = 2)
plot(uk_grid_BNG_as_WGS84, border = 'red', add = T)
# Create the target raster
uk_raster_BNG <- raster(xmn=-200000, xmx=700000, ymn=0, ymx=1000000, res=100000, crs='+init=EPSG:27700')
#uk_raster_BNG_interp <- projectRaster(uk_raster_WGS84, uk_raster_BNG, method='bilinear')
#uk_raster_BNG_ngb <- projectRaster(uk_raster_WGS84, uk_raster_BNG, method='ngb')
#par(mfrow=c(1,3), mar=c(1,1,2,1))
#plot(uk_raster_BNG_interp, main='Interpolated', axes=FALSE, legend=FALSE)
#plot(uk_raster_BNG_ngb, main='Nearest Neighbour',axes=FALSE, legend=FALSE)
############VECTOR TO RASTER################
# Create the target raster
uk_20km <- raster(xmn=-200000, xmx=650000, ymn=0, ymx=1000000, res=20000, crs='+init=EPSG:27700')
# Rasterising polygons
uk_eire_poly_20km <- rasterize(as(uk_eire_BNG, 'Spatial'), uk_20km, field = 'name')
#Rasterising lines
uk_eire_BNG_line <- st_cast(uk_eire_BNG, 'LINESTRING')
st_agr(uk_eire_BNG) <- 'constant'
# Rasterising lines
uk_eire_BNG_line <- st_cast(uk_eire_BNG, 'LINESTRING')
uk_eire_line_20km <- rasterize(as(uk_eire_BNG_line, 'Spatial'), uk_20km, field = 'name')
# Rasterizing points
# - This isn't quite as neat. You need to take two steps in the cast and need to convert
# the name factor to numeric.
uk_eire_BNG_point <- st_cast(st_cast(uk_eire_BNG, 'MULTIPOINT'), 'POINT')
uk_eire_BNG_point$name <- as.numeric(uk_eire_BNG_point$name)
uk_eire_point_20km <- rasterize(as(uk_eire_BNG_point, 'Spatial'), uk_20km, field = 'name')
# Plotting those different outcomes
par(mfrow = c(1, 3), mar = c(1, 1, 1, 1))
plot(uk_eire_poly_20km, col = viridis(6, alpha = .5), legend = F, axes =F)
plot(st_geometry(uk_eire_BNG), add=TRUE, border='grey')
plot(uk_eire_line_20km, col=viridis(6, alpha=0.5), legend=FALSE, axes=FALSE)
plot(st_geometry(uk_eire_BNG), add=TRUE, border='grey')
plot(uk_eire_point_20km, col=viridis(6, alpha=0.5), legend=FALSE, axes=FALSE)
plot(st_geometry(uk_eire_BNG), add=TRUE, border='grey')
#############Raster to vector#################
# rasterToPolygons returns a polygon for each cell and return a Spatial object
poly_from_rast <- rasterToPolygons(uk_eire_poly_20km)
poly_from_rast <- as(poly_from_rast, 'sf')
# but can be set to dissolve the boundaries between cells with identical values
poly_from_rast_dissolve <- rasterToPolygons(uk_eire_poly_20km, dissolve =T)
poly_from_rast_dissolve <- as(poly_from_rast_dissolve, 'sf')
# rasterToPoints returns a matrix of coordinates and values
points_from_rast <- rasterToPoints(uk_eire_poly_20km)
points_from_rast <- st_as_sf(data.frame(points_from_rast), coords = c('x','y'))
# Plot the outputs - using key.pos=NULL to suppress the key and
# reset=FALSE to avoid plot.sf altering the par() options
par(mfrow=c(1,3), mar=c(1,1,1,1))
plot(poly_from_rast['layer'], key.pos = NULL, reset = FALSE)
plot(poly_from_rast_dissolve, key.pos = NULL, reset = FALSE)
plot(points_from_rast, key.pos = NULL, reset = FALSE)
##################LOADING RASTER DATA##############
# Read in Southern Ocean example data
so_data <- read.csv('../data/Southern_Ocean.csv', header=TRUE)
head(so_data)
# Convert the data frame to an sf object
so_data <- st_as_sf(so_data, coords=c('long', 'lat'), crs=4326)
head(so_data)
##############SAVING VECTOR DATA####################
st_write(uk_eire, '../data/uk_eire_WGS84.shp')
## Writing layer `uk_eire_WGS84' to data source `data/uk_eire_WGS84.shp' using driver `ESRI Shapefile'
## Writing 6 features with 5 fields and geometry type Polygon.
st_write(uk_eire_BNG, '../data/uk_eire_BNG.shp')
## Writing layer `uk_eire_BNG' to data source `data/uk_eire_BNG.shp' using driver `ESRI Shapefile'
## Writing 6 features with 5 fields and geometry type Polygon.
st_write(uk_eire, '../data/uk_eire_WGS84.geojson')
## Writing layer `uk_eire_WGS84' to data source `data/uk_eire_WGS84.geojson' using driver `GeoJSON'
## Writing 6 features with 5 fields and geometry type Polygon.
st_write(uk_eire, '../data/uk_eire_WGS84.gpkg')
## Updating layer `uk_eire_WGS84' to data source `data/uk_eire_WGS84.gpkg' using driver `GPKG'
## Writing 6 features with 5 fields and geometry type Polygon.
st_write(uk_eire, '../data/uk_eire_WGS84.json', driver='GeoJSON')
## Writing layer `uk_eire_WGS84' to data source `data/uk_eire_WGS84.json' using driver `GeoJSON'
## Writing 6 features with 5 fields and geometry type Polygon.
| /Week5/Code/GIS_Rpub.R | no_license | Don-Burns/CMEECourseWork | R | false | false | 13,416 | r | # Desc: A script exploring how to make, manipulate and save maps made using coordinate specified directly in code
rm(list = ls())
require(raster)
require(sf)
require(viridis)
require(units)
pop_dens <- data.frame(n_km2 = c(260, 67, 151, 4500, 133), country = c('England', 'Scotland', 'Wales', 'London', 'Northern Ireland'))
print(pop_dens)
##############################################
##########MAKING VECTORS FROM COORDINATES#####
##############################################
# Create coordinate for each country
# - this is a list of sets of coordinates forming the edge of the polygon
# - note that they have to _close_ (have the same coordinate at either end)
scotland <- rbind(c(-5, 58.6), c(-3, 58.6), c(-4, 57.6), c(-1.5, 57.6), c(-2, 55.8), c(-3, 55), c(-5, 55), c(-6, 56), c(-5, 58.6))
england <- rbind(c(-2,55.8),c(0.5, 52.8), c(1.6, 52.8), c(0.7, 50.7), c(-5.7,50), c(-2.7, 51.5), c(-3, 53.4),c(-3, 55), c(-2,55.8))
wales <- rbind(c(-2.5, 51.3), c(-5.3,51.8), c(-4.5, 53.4), c(-2.8, 53.4), c(-2.5, 51.3))
ireland <- rbind(c(-10,51.5), c(-10, 54.2), c(-7.5, 55.3), c(-5.9, 55.3), c(-5.9, 52.2), c(-10,51.5))
# Convert these coordinate into feature geometries
# - these are simple coordinates sets with no projection information.
scotland <- st_polygon(list(scotland))
england <- st_polygon(list(england))
wales <- st_polygon(list(wales))
ireland <- st_polygon(list(ireland))
# Combine geometries into a simple feature column
uk_eire <- st_sfc(wales, england, scotland, ireland, crs = 4326)
plot(uk_eire, asp = 1)
uk_eire_capitals <- data.frame(long = c(-.1, -3.2, -3.2, -6, -6.25), lat = c(51.5, 51.5, 55.8, 54.6, 53.3), name = c('London', 'Cardiff', 'Edinburgh', 'Belfast', 'Dublin'))
uk_eire_capitals <- st_as_sf(uk_eire_capitals, coords = c('long', 'lat'), crs = 4326)
########################################################
##########VECTRO GEOMETRY OPERATIONS####################
########################################################
st_pauls <- st_point(x = c(-0.098056, 51.513611))
london <- st_buffer(st_pauls, 0.25)
england_no_london <- st_difference(england, london)
# Count the points and show the number of rings within the polygon features
lengths(scotland)
lengths(england_no_london)
wales <- st_difference(wales, england)
# A rough that includes Northern Ireland and surrounding sea.
# - not the alternative way of providing the coordinates.
ni_area <- st_polygon(list(cbind(x=c(-8.1, -6, -5, -6, -8.1), y=c(54.4, 56, 55, 54, 54.4))))
northern_ireland <- st_intersection(ireland, ni_area)
eire <- st_difference(ireland, ni_area)
# Combine the final geometries
uk_eire <- st_sfc(wales, england_no_london, scotland, london, northern_ireland, eire, crs = 4326)
########################################################
##############FEATURES AND GEOMETRIES###################
########################################################
# make the UK into a single feature
uk_country <- st_union(uk_eire[-6])
# Compare six polygon features with one multipolygon feature
print(uk_eire)
print(uk_country)
# Plot them
par(mfrow = c(1, 2), mar = c(3, 3, 1, 1))
plot(uk_eire, asp = 1, col = rainbow(6))
plot(st_geometry(uk_eire_capitals), add = T)
plot(uk_country, asp = 1, col = 'lightblue')
########################################################
############VECTOR DATA AND ATTRIBUTES##################
########################################################
uk_eire <- st_sf(name = c('Wales', 'England', 'Scotland', 'London', 'Northern Ireland', 'Eire'), geometry = uk_eire)
plot(uk_eire, asp = 1)
uk_eire$capital <- c('London', 'Edinburgh', ' Cardiff', NA, 'Belfast', 'Dublin')
uk_eire <- merge(uk_eire, pop_dens, by.x = 'name', by.y = 'country', all.x = T)
print(uk_eire)
##prevents crash?
dev.off()
########################################################
#################SPATIAL ATTIBUTES######################
########################################################
uk_eire_centroids <- st_centroid(uk_eire)
uk_eire$area <- st_area(uk_eire) ## causing crashes?!
# The length of a polygon is the perimeter length
# - nte that this includes the length of internal holes.
uk_eire$length <- st_length(uk_eire)
# look at the result
print(uk_eire)
# You can change units in a neat way
uk_eire$area <- set_units(uk_eire$area, 'km^2')
uk_eire$length <- set_units(uk_eire$length, 'km')
# And which won't let you make silly error like turning a length into weight
#uk_eire$area <- set_units(uk_eire$area, 'kg')
#Or you can simply convert the 'units' version to simple numbers
uk_eire$length <- as.numeric(uk_eire$length) # will be a string by default
print(uk_eire)
st_distance(uk_eire)
st_distance(uk_eire_centroids)
########################################################
#############PLOTTING sf OBJECTS########################
########################################################
plot(uk_eire['n_km2'], asp = 1, logz = T) #task: to log the scale, use logz or log data beforehand
########################################################
#############REPROJECTING VECTOR DATA###################
########################################################
# British National Grid (EPSG:27700)
uk_eire_BNG <- st_transform(uk_eire, 27700)
# The bounding box of the data shows the change in units
st_bbox(uk_eire)
st_bbox(uk_eire_BNG)
# UTM50N (EPSG:32650)
uk_eire_UTM50N <- st_transform(uk_eire, 32650)
# plot the results
par(mfrow = c(1, 3), mar = c(3, 3, 1, 1))
plot(st_geometry(uk_eire), asp = 1, axes = T, main = 'WGS 84')
plot(st_geometry(uk_eire), asp = 1, axes = T, main = 'OSGB 1936 / BNG')
plot(st_geometry(uk_eire_UTM50N), axes = T, main = 'UTM 50N')
########################################################
###############Proj4 STRINGS############################
########################################################
# Set up some points seperated by 1 degree latitude and longitude from St. Pauls
st_pauls <- st_sfc(st_pauls, crs = 4326)
one_deg_west_pt <- st_sfc(st_pauls - c(1, 0), crs = 4326) # near Goring
one_deg_north_pt <- st_sfc(st_pauls + c(0, 1), crs = 4326) # near Peterborough
# Calculate the distance between St pauls and each point
st_distance(st_pauls, one_deg_west_pt)
st_distance(st_pauls, one_deg_north_pt)
st_distance(st_transform(st_pauls, 27700), st_transform(one_deg_west_pt, 27700))
####IMPROVE LONDON CIRCLE###
## task -make london buffer 25km
londonBNG <- st_buffer(st_transform(st_pauls,27700), 25000)
# In one line, transform england to BNG and cut out London
england_no_london_BNG <- st_difference(st_transform(st_sfc(england, crs = 4326), 27700), londonBNG)
# project the other features and combine everything together
others_BNG <- st_transform(st_sfc(eire, northern_ireland, scotland, wales, crs = 4326), 27700)
corrected <- c(others_BNG, londonBNG, england_no_london_BNG)
# plot that
graphics.off()
par(mar = c(3, 3, 1, 1))
plot(corrected, main = "25km radius London", axes = T)
########################################################
#############RASTERS####################################
#######Creating a raster################################
########################################################
# create an empty raster object covering UK and Eire
uk_raster_WGS84 <- raster(xmn = -11, xmx = 2, ymn = 49.5, ymx = 59, res = .5, crs = "+init=EPSG:4326")
hasValues(uk_raster_WGS84)
## add data to raster
values(uk_raster_WGS84) <- seq(length(uk_raster_WGS84))
plot(uk_raster_WGS84)
plot(st_geometry(uk_eire), add = T, border = 'black', lwd = 2, col = "#FFFFFF44")
#############CHANGING RASTER RESOLUTION############
# define a simple 4x4 square raster
m <- matrix(c(1, 1, 3, 3,
1, 2, 4, 3,
5, 5, 7, 8,
6, 6, 7, 7), ncol = 4, byrow = T)
square <- raster(m)
#########AGGREGATING RASTERS############
# average values
square_agg_mean <- aggregate(square, fact = 2, fun = mean)
values(square_agg_mean)
# Maximum values
square_agg_max <- aggregate(square, fact = 2, fun = max)
values(square_agg_max)
# modal values for categories
square_agg_modal <- aggregate(square, fact = 2, fun = modal)
values(square_agg_modal)
###############DISAGGREGATING RASTERS#################
# copy parents
square_disagg <- disaggregate(square, fact =2)
# Interpolate
square_disagg_interp <- disaggregate(square, fact = 2, method = 'bilinear')
################REPROJECTING A RASTER###################
# make two simple `sfc` objects containing points in the lower left and top right of the two grids
uk_pts_WGS84 <- st_sfc(st_point(c(-11, 49.5)), st_point(c(2, 59)), crs = 4326)
uk_pts_BNG <- st_sfc(st_point(c(-2e5, 0)), st_point(c(7e5, 1e6)), crs = 27700)
# use st_make_grid to quickly create a polygon grid with the right cellsize
uk_grid_WGS84 <- st_make_grid(uk_pts_WGS84, cellsize = 0.5)
uk_grid_BNG <- st_make_grid(uk_pts_BNG, cellsize = 1e5)
# Reproject BNG grid into WGS4
uk_grid_BNG_as_WGS84 <- st_transform(uk_grid_BNG, 4326)
# Plot the features
plot(uk_grid_WGS84, asp = 1, border = 'grey', xlim = c(-13,4))
plot(st_geometry(uk_eire), add = T, border = 'darkgreen', lwd = 2)
plot(uk_grid_BNG_as_WGS84, border = 'red', add = T)
# Create the target raster
uk_raster_BNG <- raster(xmn=-200000, xmx=700000, ymn=0, ymx=1000000, res=100000, crs='+init=EPSG:27700')
#uk_raster_BNG_interp <- projectRaster(uk_raster_WGS84, uk_raster_BNG, method='bilinear')
#uk_raster_BNG_ngb <- projectRaster(uk_raster_WGS84, uk_raster_BNG, method='ngb')
#par(mfrow=c(1,3), mar=c(1,1,2,1))
#plot(uk_raster_BNG_interp, main='Interpolated', axes=FALSE, legend=FALSE)
#plot(uk_raster_BNG_ngb, main='Nearest Neighbour',axes=FALSE, legend=FALSE)
############VECTOR TO RASTER################
# Create the target raster
uk_20km <- raster(xmn=-200000, xmx=650000, ymn=0, ymx=1000000, res=20000, crs='+init=EPSG:27700')
# Rasterising polygons
uk_eire_poly_20km <- rasterize(as(uk_eire_BNG, 'Spatial'), uk_20km, field = 'name')
#Rasterising lines
uk_eire_BNG_line <- st_cast(uk_eire_BNG, 'LINESTRING')
st_agr(uk_eire_BNG) <- 'constant'
# Rasterising lines
uk_eire_BNG_line <- st_cast(uk_eire_BNG, 'LINESTRING')
uk_eire_line_20km <- rasterize(as(uk_eire_BNG_line, 'Spatial'), uk_20km, field = 'name')
# Rasterizing points
# - This isn't quite as neat. You need to take two steps in the cast and need to convert
# the name factor to numeric.
uk_eire_BNG_point <- st_cast(st_cast(uk_eire_BNG, 'MULTIPOINT'), 'POINT')
uk_eire_BNG_point$name <- as.numeric(uk_eire_BNG_point$name)
uk_eire_point_20km <- rasterize(as(uk_eire_BNG_point, 'Spatial'), uk_20km, field = 'name')
# Plotting those different outcomes
par(mfrow = c(1, 3), mar = c(1, 1, 1, 1))
plot(uk_eire_poly_20km, col = viridis(6, alpha = .5), legend = F, axes =F)
plot(st_geometry(uk_eire_BNG), add=TRUE, border='grey')
plot(uk_eire_line_20km, col=viridis(6, alpha=0.5), legend=FALSE, axes=FALSE)
plot(st_geometry(uk_eire_BNG), add=TRUE, border='grey')
plot(uk_eire_point_20km, col=viridis(6, alpha=0.5), legend=FALSE, axes=FALSE)
plot(st_geometry(uk_eire_BNG), add=TRUE, border='grey')
#############Raster to vector#################
# rasterToPolygons returns a polygon for each cell and return a Spatial object
poly_from_rast <- rasterToPolygons(uk_eire_poly_20km)
poly_from_rast <- as(poly_from_rast, 'sf')
# but can be set to dissolve the boundaries between cells with identical values
poly_from_rast_dissolve <- rasterToPolygons(uk_eire_poly_20km, dissolve =T)
poly_from_rast_dissolve <- as(poly_from_rast_dissolve, 'sf')
# rasterToPoints returns a matrix of coordinates and values
points_from_rast <- rasterToPoints(uk_eire_poly_20km)
points_from_rast <- st_as_sf(data.frame(points_from_rast), coords = c('x','y'))
# Plot the outputs - using key.pos=NULL to suppress the key and
# reset=FALSE to avoid plot.sf altering the par() options
par(mfrow=c(1,3), mar=c(1,1,1,1))
plot(poly_from_rast['layer'], key.pos = NULL, reset = FALSE)
plot(poly_from_rast_dissolve, key.pos = NULL, reset = FALSE)
plot(points_from_rast, key.pos = NULL, reset = FALSE)
##################LOADING RASTER DATA##############
# Read in Southern Ocean example data
so_data <- read.csv('../data/Southern_Ocean.csv', header=TRUE)
head(so_data)
# Convert the data frame to an sf object
so_data <- st_as_sf(so_data, coords=c('long', 'lat'), crs=4326)
head(so_data)
##############SAVING VECTOR DATA####################
st_write(uk_eire, '../data/uk_eire_WGS84.shp')
## Writing layer `uk_eire_WGS84' to data source `data/uk_eire_WGS84.shp' using driver `ESRI Shapefile'
## Writing 6 features with 5 fields and geometry type Polygon.
st_write(uk_eire_BNG, '../data/uk_eire_BNG.shp')
## Writing layer `uk_eire_BNG' to data source `data/uk_eire_BNG.shp' using driver `ESRI Shapefile'
## Writing 6 features with 5 fields and geometry type Polygon.
st_write(uk_eire, '../data/uk_eire_WGS84.geojson')
## Writing layer `uk_eire_WGS84' to data source `data/uk_eire_WGS84.geojson' using driver `GeoJSON'
## Writing 6 features with 5 fields and geometry type Polygon.
st_write(uk_eire, '../data/uk_eire_WGS84.gpkg')
## Updating layer `uk_eire_WGS84' to data source `data/uk_eire_WGS84.gpkg' using driver `GPKG'
## Writing 6 features with 5 fields and geometry type Polygon.
st_write(uk_eire, '../data/uk_eire_WGS84.json', driver='GeoJSON')
## Writing layer `uk_eire_WGS84' to data source `data/uk_eire_WGS84.json' using driver `GeoJSON'
## Writing 6 features with 5 fields and geometry type Polygon.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocText.R
\name{preprocText}
\alias{preprocText}
\title{preprocText}
\usage{
preprocText(text, convert_text, tolower, soundex,
usps_address, convert_text_to)
}
\arguments{
\item{text}{A vector of text data to convert.}
\item{convert_text}{Whether to convert text to the desired encoding, where
the encoding is specified in the 'convert_text_to' argument. Default is
TRUE}
\item{tolower}{Whether to normalize the text to be all lowercase. Default is
TRUE.}
\item{soundex}{Whether to convert the field to the Census's soundex encoding.
Default is FALSE.}
\item{usps_address}{Whether to use USPS address standardization rules to clean address fields.
Default is FALSE.}
\item{convert_text_to}{Which encoding to use when converting text. Default is 'Latin-ASCII'.
Full list of encodings in the \code{stri_trans_list()} function in the \code{stringi} package.}
}
\value{
\code{preprocText()} returns the preprocessed vector of text.
}
\description{
Preprocess text data such as names and addresses.
}
\author{
Ben Fifield <benfifield@gmail.com>
}
| /man/preprocText.Rd | no_license | rbagd/fastLink | R | false | true | 1,130 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocText.R
\name{preprocText}
\alias{preprocText}
\title{preprocText}
\usage{
preprocText(text, convert_text, tolower, soundex,
usps_address, convert_text_to)
}
\arguments{
\item{text}{A vector of text data to convert.}
\item{convert_text}{Whether to convert text to the desired encoding, where
the encoding is specified in the 'convert_text_to' argument. Default is
TRUE}
\item{tolower}{Whether to normalize the text to be all lowercase. Default is
TRUE.}
\item{soundex}{Whether to convert the field to the Census's soundex encoding.
Default is FALSE.}
\item{usps_address}{Whether to use USPS address standardization rules to clean address fields.
Default is FALSE.}
\item{convert_text_to}{Which encoding to use when converting text. Default is 'Latin-ASCII'.
Full list of encodings in the \code{stri_trans_list()} function in the \code{stringi} package.}
}
\value{
\code{preprocText()} returns the preprocessed vector of text.
}
\description{
Preprocess text data such as names and addresses.
}
\author{
Ben Fifield <benfifield@gmail.com>
}
|
shapley_mfoc <-
function(n=NA,a=NA,d=NA,K=NA){
if (is.na(a)==T|sum(is.na(d)==T)==length(d)|sum(is.na(K)==T)==length(K)){
cat("Values for a, d and K are necessary. Please, check them.", sep="\n")
} else {
cat("Shapley-Value", sep="\n")
dk<-order(d/K)
d<-d[dk];K<-K[dk]
cind<-as.vector(mfoc(n,a,d,K,cooperation=0))
shapley<-c();shapley[1]<-cind[1]/n
for (i in 2:n){
aux<-0
for (j in 2:i){aux<-aux+(cind[j]-cind[j-1])/(n-j+1)}
shapley[i]<-shapley[1]+aux
}
return(shapley)
}
}
| /R/shapley_mfoc.R | no_license | cran/Inventorymodel | R | false | false | 552 | r | shapley_mfoc <-
function(n=NA,a=NA,d=NA,K=NA){
if (is.na(a)==T|sum(is.na(d)==T)==length(d)|sum(is.na(K)==T)==length(K)){
cat("Values for a, d and K are necessary. Please, check them.", sep="\n")
} else {
cat("Shapley-Value", sep="\n")
dk<-order(d/K)
d<-d[dk];K<-K[dk]
cind<-as.vector(mfoc(n,a,d,K,cooperation=0))
shapley<-c();shapley[1]<-cind[1]/n
for (i in 2:n){
aux<-0
for (j in 2:i){aux<-aux+(cind[j]-cind[j-1])/(n-j+1)}
shapley[i]<-shapley[1]+aux
}
return(shapley)
}
}
|
###############################################################################
###############################################################################
###############################################################################
# spusť v R 64 bit!!!
## CPU instalace knihovny "keras" ---------------------------------------------
install.packages("keras")
## GPU instalace knihovny "keras" ---------------------------------------------
devtools::install_github("rstudio/keras")
## inicializace knihovny "keras" ----------------------------------------------
library(keras)
## instalace IDE Anaconda -----------------------------------------------------
#### pro Windows lze z https://www.anaconda.com/download/#windows
install_keras(
conda = "C:/Users/student/Anaconda3/Scripts/conda.exe"
)
## ----------------------------------------------------------------------------
###############################################################################
###############################################################################
###############################################################################
library(keras)
mnist <- dataset_mnist()
x_train <- mnist$train$x
y_train <- mnist$train$y
x_test <- mnist$test$x
y_test <- mnist$test$y
# reshape
x_train <- array_reshape(x_train, c(nrow(x_train), 784))
x_test <- array_reshape(x_test, c(nrow(x_test), 784))
# rescale
x_train <- x_train / 255
x_test <- x_test / 255
y_train <- to_categorical(y_train, 10)
y_test <- to_categorical(y_test, 10)
model <- keras_model_sequential()
model %>%
layer_dense(units = 256, activation = 'relu', input_shape = c(784)) %>%
layer_dropout(rate = 0.4) %>%
layer_dense(units = 128, activation = 'relu') %>%
layer_dropout(rate = 0.3) %>%
layer_dense(units = 10, activation = 'softmax')
summary(model)
model %>% compile(
loss = 'categorical_crossentropy',
optimizer = optimizer_rmsprop(),
metrics = c('accuracy')
)
history <- model %>% fit(
x_train, y_train,
epochs = 30, batch_size = 128,
validation_split = 0.2
)
plot(history)
model %>% evaluate(x_test, y_test)
model %>% predict_classes(x_test)
## ----------------------------------------------------------------------------
###############################################################################
###############################################################################
###############################################################################
| /_script_keras_.R | no_license | LStepanek/17VSADR_Skriptovani_a_analyza_dat_v_jazyce_R | R | false | false | 2,582 | r | ###############################################################################
###############################################################################
###############################################################################
# spusť v R 64 bit!!!
## CPU instalace knihovny "keras" ---------------------------------------------
install.packages("keras")
## GPU instalace knihovny "keras" ---------------------------------------------
devtools::install_github("rstudio/keras")
## inicializace knihovny "keras" ----------------------------------------------
library(keras)
## instalace IDE Anaconda -----------------------------------------------------
#### pro Windows lze z https://www.anaconda.com/download/#windows
install_keras(
conda = "C:/Users/student/Anaconda3/Scripts/conda.exe"
)
## ----------------------------------------------------------------------------
###############################################################################
###############################################################################
###############################################################################
library(keras)
mnist <- dataset_mnist()
x_train <- mnist$train$x
y_train <- mnist$train$y
x_test <- mnist$test$x
y_test <- mnist$test$y
# reshape
x_train <- array_reshape(x_train, c(nrow(x_train), 784))
x_test <- array_reshape(x_test, c(nrow(x_test), 784))
# rescale
x_train <- x_train / 255
x_test <- x_test / 255
y_train <- to_categorical(y_train, 10)
y_test <- to_categorical(y_test, 10)
model <- keras_model_sequential()
model %>%
layer_dense(units = 256, activation = 'relu', input_shape = c(784)) %>%
layer_dropout(rate = 0.4) %>%
layer_dense(units = 128, activation = 'relu') %>%
layer_dropout(rate = 0.3) %>%
layer_dense(units = 10, activation = 'softmax')
summary(model)
model %>% compile(
loss = 'categorical_crossentropy',
optimizer = optimizer_rmsprop(),
metrics = c('accuracy')
)
history <- model %>% fit(
x_train, y_train,
epochs = 30, batch_size = 128,
validation_split = 0.2
)
plot(history)
model %>% evaluate(x_test, y_test)
model %>% predict_classes(x_test)
## ----------------------------------------------------------------------------
###############################################################################
###############################################################################
###############################################################################
|
##================================================================================
## This file is part of the evoper package - EvoPER
##
## (C)2016 Antonio Prestes Garcia <@>
## For license terms see DESCRIPTION and/or LICENSE
##
## $Id$
##================================================================================
#' @title compare.algorithms1
#'
#' @description Compare the number of function evalutions and convergence for the
#' following optimization algorithms, ("saa","pso","acor","ees1").
#'
#' @param F The function to be tested
#' @param seeds The random seeds which will be used for testing algorithms
#'
#' @examples \dontrun{
#' rm(list=ls())
#' d.cigar4<- compare.algorithms1(f0.cigar4)
#' d.schaffer4<- compare.algorithms1(f0.schaffer4)
#' d.griewank4<- compare.algorithms1(f0.griewank4)
#' d.bohachevsky4<- compare.algorithms1(f0.bohachevsky4)
#' d.rosenbrock4<- compare.algorithms1(f0.rosenbrock4)
#' }
#'
#' @export
compare.algorithms1<- function(F, seeds= c(27, 2718282, 36190727, 3141593, -91190721, -140743, 1321)) {
algorithms<- c("saa","pso","acor","ees1")
mydata<- c()
for(algorithm in algorithms) {
for(seed in seeds) {
set.seed(seed)
f<- PlainFunction$new(F)
f$setTolerance(10^-1)
f$Parameter(name="x1",min=-100,max=100)
f$Parameter(name="x2",min=-100,max=100)
f$Parameter(name="x3",min=-100,max=100)
f$Parameter(name="x4",min=-100,max=100)
v<- extremize(algorithm, f)
myrow<- cbind(algorithm, seed, f$stats(), v$getBest())
mydata<- rbind(mydata, myrow)
}
}
as.data.frame(mydata)
}
#' @title summarize.comp1
#'
#' @description Provides as summary with averged values of experimental setup
#'
#' @param mydata The data frame generated with 'compare.algorithms1'
#'
#' @return The summarized data
#'
#' @import plyr
#' @export
summarize.comp1<- function(mydata) {
with(mydata,ddply(mydata, .(algorithm), summarize, evals=mean(total_evals), convergence=mean(converged), fitness=mean(fitness)))
}
#' @title show.comp1
#'
#' @description Generates a barplot comparing the number of evalutions for
#' algorithms ("saa","pso","acor","ees1").
#'
#' @param mydata The data generated with 'summarize.comp1'
#' @param what The name of variable to plot on 'y' axis
#' @param title the plot title
#'
#' @examples \dontrun{
#' p.a<- show.comp1(d.cigar4,"evals","(a) Cigar function")
#' p.b<- show.comp1(d.schaffer4,"evals","(b) Schafer function")
#' p.c<- show.comp1(d.griewank4,"evals","(c) Griewank function")
#' p.d<- show.comp1(d.bohachevsky4,"evals","(d) Bohachevsky function")
#' }
#'
#' @importFrom ggplot2 geom_bar
#' @export
show.comp1<- function(mydata, what, title=NULL) {
mydata$title<- sprintf("%s", title)
p<- ggplot(data= mydata, with( mydata, aes_string(x="algorithm", y=what)) )
p<- p + geom_bar(stat="identity", fill=ifelse(mydata$convergence < 0.6,"gray", "steelblue"))
p<- p + facet_grid(. ~ title)
p
}
| /R/experiments.R | permissive | antonio-pgarcia/evoper | R | false | false | 2,949 | r | ##================================================================================
## This file is part of the evoper package - EvoPER
##
## (C)2016 Antonio Prestes Garcia <@>
## For license terms see DESCRIPTION and/or LICENSE
##
## $Id$
##================================================================================
#' @title compare.algorithms1
#'
#' @description Compare the number of function evalutions and convergence for the
#' following optimization algorithms, ("saa","pso","acor","ees1").
#'
#' @param F The function to be tested
#' @param seeds The random seeds which will be used for testing algorithms
#'
#' @examples \dontrun{
#' rm(list=ls())
#' d.cigar4<- compare.algorithms1(f0.cigar4)
#' d.schaffer4<- compare.algorithms1(f0.schaffer4)
#' d.griewank4<- compare.algorithms1(f0.griewank4)
#' d.bohachevsky4<- compare.algorithms1(f0.bohachevsky4)
#' d.rosenbrock4<- compare.algorithms1(f0.rosenbrock4)
#' }
#'
#' @export
compare.algorithms1<- function(F, seeds= c(27, 2718282, 36190727, 3141593, -91190721, -140743, 1321)) {
algorithms<- c("saa","pso","acor","ees1")
mydata<- c()
for(algorithm in algorithms) {
for(seed in seeds) {
set.seed(seed)
f<- PlainFunction$new(F)
f$setTolerance(10^-1)
f$Parameter(name="x1",min=-100,max=100)
f$Parameter(name="x2",min=-100,max=100)
f$Parameter(name="x3",min=-100,max=100)
f$Parameter(name="x4",min=-100,max=100)
v<- extremize(algorithm, f)
myrow<- cbind(algorithm, seed, f$stats(), v$getBest())
mydata<- rbind(mydata, myrow)
}
}
as.data.frame(mydata)
}
#' @title summarize.comp1
#'
#' @description Provides as summary with averged values of experimental setup
#'
#' @param mydata The data frame generated with 'compare.algorithms1'
#'
#' @return The summarized data
#'
#' @import plyr
#' @export
summarize.comp1<- function(mydata) {
with(mydata,ddply(mydata, .(algorithm), summarize, evals=mean(total_evals), convergence=mean(converged), fitness=mean(fitness)))
}
#' @title show.comp1
#'
#' @description Generates a barplot comparing the number of evalutions for
#' algorithms ("saa","pso","acor","ees1").
#'
#' @param mydata The data generated with 'summarize.comp1'
#' @param what The name of variable to plot on 'y' axis
#' @param title the plot title
#'
#' @examples \dontrun{
#' p.a<- show.comp1(d.cigar4,"evals","(a) Cigar function")
#' p.b<- show.comp1(d.schaffer4,"evals","(b) Schafer function")
#' p.c<- show.comp1(d.griewank4,"evals","(c) Griewank function")
#' p.d<- show.comp1(d.bohachevsky4,"evals","(d) Bohachevsky function")
#' }
#'
#' @importFrom ggplot2 geom_bar
#' @export
show.comp1<- function(mydata, what, title=NULL) {
mydata$title<- sprintf("%s", title)
p<- ggplot(data= mydata, with( mydata, aes_string(x="algorithm", y=what)) )
p<- p + geom_bar(stat="identity", fill=ifelse(mydata$convergence < 0.6,"gray", "steelblue"))
p<- p + facet_grid(. ~ title)
p
}
|
# Data-Analytics-with-R-Excel-Tableau_Session1 Assignment1
#2.Recycling of elements in a vector
#3.Example of Recycling of elements in a vector
a <- c(10,2,23,4)+c(2,10)
print(a)
b<- c(1,2,3,4,5,6,7) + c(1,3)
print(b)
x <- c(1,2,3,4,5,6)+c(2,10)
print(x)
y<- c(1,2,3,4,5,6,7) + c(10,30)
print(y) | /assignment1_1.R | no_license | munmun55/Data-Analytics-with-R-Excel-Tableau_Session1Assignment1 | R | false | false | 314 | r | # Data-Analytics-with-R-Excel-Tableau_Session1 Assignment1
#2.Recycling of elements in a vector
#3.Example of Recycling of elements in a vector
a <- c(10,2,23,4)+c(2,10)
print(a)
b<- c(1,2,3,4,5,6,7) + c(1,3)
print(b)
x <- c(1,2,3,4,5,6)+c(2,10)
print(x)
y<- c(1,2,3,4,5,6,7) + c(10,30)
print(y) |
context("SnowFor_Redis")
test_that("redis_base", {
er = ErInit()
go_fun = function(x){
Sys.sleep(0.5)
print(x)
x
}
a = snowFor(1:10, go_fun,cores = 2,er=er)
expect_equal(unlist(a), 1:10)
})
| /tests/testthat/test_SnowFor_redis.R | no_license | itsaquestion/SnowFor | R | false | false | 219 | r | context("SnowFor_Redis")
test_that("redis_base", {
er = ErInit()
go_fun = function(x){
Sys.sleep(0.5)
print(x)
x
}
a = snowFor(1:10, go_fun,cores = 2,er=er)
expect_equal(unlist(a), 1:10)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MBDETES-Calibration.R
\name{MBDETES_LeaveLaden}
\alias{MBDETES_LeaveLaden}
\title{MBDETES: Probability of Leaving, post prandially (laden mosquito)}
\usage{
MBDETES_LeaveLaden()
}
\description{
MBDETES: Probability of Leaving, post prandially (laden mosquito)
}
| /MASH-dev/SeanWu/MBITES/man/MBDETES_LeaveLaden.Rd | no_license | aucarter/MASH-Main | R | false | true | 340 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MBDETES-Calibration.R
\name{MBDETES_LeaveLaden}
\alias{MBDETES_LeaveLaden}
\title{MBDETES: Probability of Leaving, post prandially (laden mosquito)}
\usage{
MBDETES_LeaveLaden()
}
\description{
MBDETES: Probability of Leaving, post prandially (laden mosquito)
}
|
#' Find the shortest distance
#' @param graph A dataframe.
#' @param init_node the initial node.
#' @return The shortest distance to each node starting from the initial node.
#' @examples
#' dijkstra(wiki_graph,1)
#' dijkstra(wiki_graph,3)
dijkstra<- function (graph, init_node){
v1<-graph[,1] #The start of the edge
v2<-graph[,2] #The end of the edge
w <-graph[,3] #The weight of the edge
nodes<-unique(v1) #The nodes in the graph
stopifnot(init_node %in% nodes)
stopifnot(is.numeric(init_node))
stopifnot(is.data.frame(graph))
stopifnot(length(graph) == 3)
stopifnot(names(graph) == c("v1", "v2", "w"))
stopifnot(is.numeric(v1))
stopifnot(is.numeric(v2))
stopifnot(is.numeric(w))
stopifnot(NA %in% names(graph)==FALSE)
distance<-rep(Inf,length(nodes)) #Vector of the distance for each node, will be updated after every step of the algorithm
distance[which (init_node==nodes)]<-0 #Set the distance of the initial node to 0
current_node <- init_node
while (length(nodes) != 0) {
neighbournodes<-v2[which (current_node==v1)]# The neighbouring nodes of the current node
neighbourweights<-w[which (current_node==v1)]#The weights of the edges connecting the current node to the neighbouring nodes.
neighbourdistance<-distance[neighbournodes]
alt_distance<-distance[current_node] + neighbourweights #Alternative distance
distance[neighbournodes]<-pmin(neighbourdistance,alt_distance)#Set the distance of a node to the minimum
nodes <- nodes[nodes != current_node]#Remove the checked node
#Now I want to choose the node with minium distance as the current node
current_node <- nodes[1]
min_dist_node <- distance[current_node]
for (node in nodes) {
if(distance[node] < min_dist_node)
{
min_dist_node <- distance[node]
current_node <- node
}
}
}
return(distance)
}
| /Lab3/R/Dijkstra.R | permissive | KarDeMumman/Lab3 | R | false | false | 1,879 | r | #' Find the shortest distance
#' @param graph A dataframe.
#' @param init_node the initial node.
#' @return The shortest distance to each node starting from the initial node.
#' @examples
#' dijkstra(wiki_graph,1)
#' dijkstra(wiki_graph,3)
dijkstra<- function (graph, init_node){
v1<-graph[,1] #The start of the edge
v2<-graph[,2] #The end of the edge
w <-graph[,3] #The weight of the edge
nodes<-unique(v1) #The nodes in the graph
stopifnot(init_node %in% nodes)
stopifnot(is.numeric(init_node))
stopifnot(is.data.frame(graph))
stopifnot(length(graph) == 3)
stopifnot(names(graph) == c("v1", "v2", "w"))
stopifnot(is.numeric(v1))
stopifnot(is.numeric(v2))
stopifnot(is.numeric(w))
stopifnot(NA %in% names(graph)==FALSE)
distance<-rep(Inf,length(nodes)) #Vector of the distance for each node, will be updated after every step of the algorithm
distance[which (init_node==nodes)]<-0 #Set the distance of the initial node to 0
current_node <- init_node
while (length(nodes) != 0) {
neighbournodes<-v2[which (current_node==v1)]# The neighbouring nodes of the current node
neighbourweights<-w[which (current_node==v1)]#The weights of the edges connecting the current node to the neighbouring nodes.
neighbourdistance<-distance[neighbournodes]
alt_distance<-distance[current_node] + neighbourweights #Alternative distance
distance[neighbournodes]<-pmin(neighbourdistance,alt_distance)#Set the distance of a node to the minimum
nodes <- nodes[nodes != current_node]#Remove the checked node
#Now I want to choose the node with minium distance as the current node
current_node <- nodes[1]
min_dist_node <- distance[current_node]
for (node in nodes) {
if(distance[node] < min_dist_node)
{
min_dist_node <- distance[node]
current_node <- node
}
}
}
return(distance)
}
|
testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -13379799L, -1815221204L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707632L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156041943L, 823641812L, -1106109928L, -1048157941L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result) | /dexterMST/inst/testfiles/is_person_booklet_sorted/AFL_is_person_booklet_sorted/is_person_booklet_sorted_valgrind_files/1615940778-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 824 | r | testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -13379799L, -1815221204L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707632L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156041943L, 823641812L, -1106109928L, -1048157941L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result) |
permutest <- function(x, ...)
UseMethod("permutest")
permutest.default <- function(x, ...)
stop("No default permutation test defined")
`permutest.cca` <-
function (x, permutations = how(nperm=99),
model = c("reduced", "direct", "full"), by = NULL, first = FALSE,
strata = NULL, parallel = getOption("mc.cores") , ...)
{
## do something sensible with insensible input (no constraints)
if (is.null(x$CCA)) {
sol <- list(call = match.call(), testcall = x$call, model = NA,
F.0 = NA, F.perm = NA, chi = c(0, x$CA$tot.chi),
num = 0, den = x$CA$tot.chi,
df = c(0, nrow(x$CA$u) - max(x$pCCA$rank,0) - 1),
nperm = 0, method = x$method, first = FALSE,
Random.seed = NA)
class(sol) <- "permutest.cca"
return(sol)
}
## compatible arguments?
if (!is.null(by)) {
if (first)
stop("'by' cannot be used with option 'first=TRUE'")
by <- match.arg(by, c("onedf", "terms"))
if (by == "terms" && is.null(x$terminfo))
stop("by='terms' needs a model fitted with a formula")
}
model <- match.arg(model)
## special cases
isCCA <- !inherits(x, "rda") # weighting
isPartial <- !is.null(x$pCCA) # handle conditions
isDB <- inherits(x, c("dbrda")) # only dbrda is distance-based
## C function to get the statististics in one loop
getF <- function(indx, ...)
{
if (!is.matrix(indx))
indx <- matrix(indx, nrow=1)
out <- .Call(do_getF, indx, E, Q, QZ, effects, first, isPartial, isDB)
p <- length(effects)
if (!isPartial && !first)
out[,p+1] <- Chi.tot - rowSums(out[,seq_len(p), drop=FALSE])
if (p > 1) {
if (by == "terms")
out[, seq_len(p)] <- sweep(out[, seq_len(p), drop = FALSE],
2, q, "/")
out <- cbind(out, sweep(out[,seq_len(p), drop=FALSE], 1,
out[,p+1]/r, "/"))
}
else
out <- cbind(out, (out[,1]/q)/(out[,2]/r))
out
}
## end getF
## QR decomposition
Q <- x$CCA$QR
if (isPartial) {
QZ <- x$pCCA$QR
} else {
QZ <- NULL
}
## statistics: overall tests
if (first) {
Chi.z <- x$CCA$eig[1]
q <- 1
}
else {
Chi.z <- x$CCA$tot.chi
names(Chi.z) <- "Model"
q <- x$CCA$qrank
}
## effects
if (!is.null(by)) {
partXbar <- ordiYbar(x, "partial")
if (by == "onedf") {
effects <- seq_len(q)
termlabs <-
if (isPartial)
colnames(Q$qr)[effects + x$pCCA$rank]
else
colnames(Q$qr)[effects]
} else { # by = "terms"
ass <- x$terminfo$assign
## ass was introduced in vegan_2.5-0
if (is.null(ass))
stop("update() old ordination result object")
pivot <- Q$pivot
if (isPartial)
pivot <- pivot[pivot > x$pCCA$rank] - x$pCCA$rank
ass <- ass[pivot[seq_len(x$CCA$qrank)]]
effects <- cumsum(rle(ass)$length)
termlabs <- labels(terms(x$terminfo))
if (isPartial)
termlabs <- termlabs[termlabs %in% labels(terms(x))]
termlabs <-termlabs[unique(ass)]
}
q <- diff(c(0, effects)) # d.o.f.
if (isPartial)
effects <- effects + x$pCCA$rank
F.0 <- numeric(length(effects))
for (k in seq_along(effects)) {
fv <- qr.fitted(Q, partXbar, k = effects[k])
F.0[k] <- if (isDB) sum(diag(fv)) else sum(fv^2)
}
}
else {
effects <- 0
termlabs <- "Model"
}
## Set up
Chi.xz <- x$CA$tot.chi
names(Chi.xz) <- "Residual"
r <- nobs(x) - Q$rank - 1
if (model == "full")
Chi.tot <- Chi.xz
else Chi.tot <- Chi.z + Chi.xz
if (is.null(by))
F.0 <- (Chi.z/q)/(Chi.xz/r)
else {
Chi.z <- numeric(length(effects))
for (k in seq_along(effects)) {
fv <- qr.fitted(Q, partXbar, k = effects[k])
Chi.z[k] <- if (isDB) sum(diag(fv)) else sum(fv^2)
}
Chi.z <- diff(c(0, F.0))
F.0 <- Chi.z/q * r/Chi.xz
}
## permutation data
E <- switch(model,
"direct" = ordiYbar(x, "initial"),
"reduced" = ordiYbar(x, "partial"),
"full" = ordiYbar(x, "CA"))
## vegan < 2.5-0 cannot use direct model in partial dbRDA
if (is.null(E) && isDB && isPartial)
stop("'direct' model cannot be used in old partial-dbrda: update ordination")
## Save dimensions
N <- nrow(E)
permutations <- getPermuteMatrix(permutations, N, strata = strata)
nperm <- nrow(permutations)
## Parallel processing (similar as in oecosimu)
if (is.null(parallel))
parallel <- 1
hasClus <- inherits(parallel, "cluster")
if (hasClus || parallel > 1) {
if(.Platform$OS.type == "unix" && !hasClus) {
tmp <- do.call(rbind,
mclapply(1:nperm,
function(i) getF(permutations[i,]),
mc.cores = parallel))
} else {
## if hasClus, do not set up and stop a temporary cluster
if (!hasClus) {
parallel <- makeCluster(parallel)
}
tmp <- parRapply(parallel, permutations, function(i) getF(i))
tmp <- matrix(tmp, ncol=3, byrow=TRUE)
if (!hasClus)
stopCluster(parallel)
}
} else {
tmp <- getF(permutations)
}
if ((p <- length(effects)) > 1) {
num <- tmp[,seq_len(p)]
den <- tmp[,p+1]
F.perm <- tmp[, seq_len(p) + p + 1]
} else {
num <- tmp[,1]
den <- tmp[,2]
F.perm <- tmp[,3, drop=FALSE]
}
Call <- match.call()
Call[[1]] <- as.name("permutest")
sol <- list(call = Call, testcall = x$call, model = model,
F.0 = F.0, F.perm = F.perm, chi = c(Chi.z, Chi.xz),
num = num, den = den, df = c(q, r), nperm = nperm,
method = x$method, first = first, termlabels = termlabs)
sol$Random.seed <- attr(permutations, "seed")
sol$control <- attr(permutations, "control")
if (!missing(strata)) {
sol$strata <- deparse(substitute(strata))
sol$stratum.values <- strata
}
class(sol) <- "permutest.cca"
sol
}
| /R/permutest.cca.R | no_license | nemochina2008/vegan | R | false | false | 6,700 | r | permutest <- function(x, ...)
UseMethod("permutest")
permutest.default <- function(x, ...)
stop("No default permutation test defined")
`permutest.cca` <-
function (x, permutations = how(nperm=99),
model = c("reduced", "direct", "full"), by = NULL, first = FALSE,
strata = NULL, parallel = getOption("mc.cores") , ...)
{
## do something sensible with insensible input (no constraints)
if (is.null(x$CCA)) {
sol <- list(call = match.call(), testcall = x$call, model = NA,
F.0 = NA, F.perm = NA, chi = c(0, x$CA$tot.chi),
num = 0, den = x$CA$tot.chi,
df = c(0, nrow(x$CA$u) - max(x$pCCA$rank,0) - 1),
nperm = 0, method = x$method, first = FALSE,
Random.seed = NA)
class(sol) <- "permutest.cca"
return(sol)
}
## compatible arguments?
if (!is.null(by)) {
if (first)
stop("'by' cannot be used with option 'first=TRUE'")
by <- match.arg(by, c("onedf", "terms"))
if (by == "terms" && is.null(x$terminfo))
stop("by='terms' needs a model fitted with a formula")
}
model <- match.arg(model)
## special cases
isCCA <- !inherits(x, "rda") # weighting
isPartial <- !is.null(x$pCCA) # handle conditions
isDB <- inherits(x, c("dbrda")) # only dbrda is distance-based
## C function to get the statististics in one loop
getF <- function(indx, ...)
{
if (!is.matrix(indx))
indx <- matrix(indx, nrow=1)
out <- .Call(do_getF, indx, E, Q, QZ, effects, first, isPartial, isDB)
p <- length(effects)
if (!isPartial && !first)
out[,p+1] <- Chi.tot - rowSums(out[,seq_len(p), drop=FALSE])
if (p > 1) {
if (by == "terms")
out[, seq_len(p)] <- sweep(out[, seq_len(p), drop = FALSE],
2, q, "/")
out <- cbind(out, sweep(out[,seq_len(p), drop=FALSE], 1,
out[,p+1]/r, "/"))
}
else
out <- cbind(out, (out[,1]/q)/(out[,2]/r))
out
}
## end getF
## QR decomposition
Q <- x$CCA$QR
if (isPartial) {
QZ <- x$pCCA$QR
} else {
QZ <- NULL
}
## statistics: overall tests
if (first) {
Chi.z <- x$CCA$eig[1]
q <- 1
}
else {
Chi.z <- x$CCA$tot.chi
names(Chi.z) <- "Model"
q <- x$CCA$qrank
}
## effects
if (!is.null(by)) {
partXbar <- ordiYbar(x, "partial")
if (by == "onedf") {
effects <- seq_len(q)
termlabs <-
if (isPartial)
colnames(Q$qr)[effects + x$pCCA$rank]
else
colnames(Q$qr)[effects]
} else { # by = "terms"
ass <- x$terminfo$assign
## ass was introduced in vegan_2.5-0
if (is.null(ass))
stop("update() old ordination result object")
pivot <- Q$pivot
if (isPartial)
pivot <- pivot[pivot > x$pCCA$rank] - x$pCCA$rank
ass <- ass[pivot[seq_len(x$CCA$qrank)]]
effects <- cumsum(rle(ass)$length)
termlabs <- labels(terms(x$terminfo))
if (isPartial)
termlabs <- termlabs[termlabs %in% labels(terms(x))]
termlabs <-termlabs[unique(ass)]
}
q <- diff(c(0, effects)) # d.o.f.
if (isPartial)
effects <- effects + x$pCCA$rank
F.0 <- numeric(length(effects))
for (k in seq_along(effects)) {
fv <- qr.fitted(Q, partXbar, k = effects[k])
F.0[k] <- if (isDB) sum(diag(fv)) else sum(fv^2)
}
}
else {
effects <- 0
termlabs <- "Model"
}
## Set up
Chi.xz <- x$CA$tot.chi
names(Chi.xz) <- "Residual"
r <- nobs(x) - Q$rank - 1
if (model == "full")
Chi.tot <- Chi.xz
else Chi.tot <- Chi.z + Chi.xz
if (is.null(by))
F.0 <- (Chi.z/q)/(Chi.xz/r)
else {
Chi.z <- numeric(length(effects))
for (k in seq_along(effects)) {
fv <- qr.fitted(Q, partXbar, k = effects[k])
Chi.z[k] <- if (isDB) sum(diag(fv)) else sum(fv^2)
}
Chi.z <- diff(c(0, F.0))
F.0 <- Chi.z/q * r/Chi.xz
}
## permutation data
E <- switch(model,
"direct" = ordiYbar(x, "initial"),
"reduced" = ordiYbar(x, "partial"),
"full" = ordiYbar(x, "CA"))
## vegan < 2.5-0 cannot use direct model in partial dbRDA
if (is.null(E) && isDB && isPartial)
stop("'direct' model cannot be used in old partial-dbrda: update ordination")
## Save dimensions
N <- nrow(E)
permutations <- getPermuteMatrix(permutations, N, strata = strata)
nperm <- nrow(permutations)
## Parallel processing (similar as in oecosimu)
if (is.null(parallel))
parallel <- 1
hasClus <- inherits(parallel, "cluster")
if (hasClus || parallel > 1) {
if(.Platform$OS.type == "unix" && !hasClus) {
tmp <- do.call(rbind,
mclapply(1:nperm,
function(i) getF(permutations[i,]),
mc.cores = parallel))
} else {
## if hasClus, do not set up and stop a temporary cluster
if (!hasClus) {
parallel <- makeCluster(parallel)
}
tmp <- parRapply(parallel, permutations, function(i) getF(i))
tmp <- matrix(tmp, ncol=3, byrow=TRUE)
if (!hasClus)
stopCluster(parallel)
}
} else {
tmp <- getF(permutations)
}
if ((p <- length(effects)) > 1) {
num <- tmp[,seq_len(p)]
den <- tmp[,p+1]
F.perm <- tmp[, seq_len(p) + p + 1]
} else {
num <- tmp[,1]
den <- tmp[,2]
F.perm <- tmp[,3, drop=FALSE]
}
Call <- match.call()
Call[[1]] <- as.name("permutest")
sol <- list(call = Call, testcall = x$call, model = model,
F.0 = F.0, F.perm = F.perm, chi = c(Chi.z, Chi.xz),
num = num, den = den, df = c(q, r), nperm = nperm,
method = x$method, first = first, termlabels = termlabs)
sol$Random.seed <- attr(permutations, "seed")
sol$control <- attr(permutations, "control")
if (!missing(strata)) {
sol$strata <- deparse(substitute(strata))
sol$stratum.values <- strata
}
class(sol) <- "permutest.cca"
sol
}
|
#' Decorrelation stretch
#'
#' @return
#' @export
#' @examples
#' @importFrom magrittr "%>%"
#'
# Decorrelation Stretching raster images in R
# based on https://gist.github.com/fickse/82faf625242f6843249774f1545d7958
decorrelation_stretch <- function(pathr, outdir="."){
#load raster
r <- raster::brick(pathr)#[[c(13,55,134)]]
#get plot name
fname <- substring(pathr, nchar(pathr)-12+1, nchar(pathr)-4)
# r must be a >= 3 band raster
# determine eigenspace
means_per_layer <- lapply(1:dim(r)[3], function(x) fill_gaps(r[[x]])) #
r <- do.call(raster::brick, means_per_layer)
pc <- princomp(r[])
# get inverse rotation matrix
R0 <- solve(pc$loadings)
# 'stretch' values in pc space, then transform back to RGB space
fun <- function(x){(x-min(x))/(max(x)-min(x))*255}
scp <- apply(predict(pc), 2, function(x) scale(ecdf(x)(x), scale = FALSE))
scpt <- scp %*% R0
r[] <- apply(scpt, 2, fun)
raster::writeRaster(r, filename = paste(outdir, "/", fname, ".tif", sep=""),
datatype = 'INT2U',
overwrite=TRUE)
}
# example
# b <- brick(system.file("external/rlogo.grd", package="raster"))
# dc <- decorrelation_stretch(b)
# plotRGB(dc)
fill_gaps <- function(r){
mean_r <- cellStats(r, 'mean', na.rm=TRUE)
values(r)[is.na(values(r))] = mean_r
return(r)
}
| /R/decorrelation_stretch.R | no_license | weecology/TreeSegmentation | R | false | false | 1,333 | r | #' Decorrelation stretch
#'
#' @return
#' @export
#' @examples
#' @importFrom magrittr "%>%"
#'
# Decorrelation Stretching raster images in R
# based on https://gist.github.com/fickse/82faf625242f6843249774f1545d7958
decorrelation_stretch <- function(pathr, outdir="."){
#load raster
r <- raster::brick(pathr)#[[c(13,55,134)]]
#get plot name
fname <- substring(pathr, nchar(pathr)-12+1, nchar(pathr)-4)
# r must be a >= 3 band raster
# determine eigenspace
means_per_layer <- lapply(1:dim(r)[3], function(x) fill_gaps(r[[x]])) #
r <- do.call(raster::brick, means_per_layer)
pc <- princomp(r[])
# get inverse rotation matrix
R0 <- solve(pc$loadings)
# 'stretch' values in pc space, then transform back to RGB space
fun <- function(x){(x-min(x))/(max(x)-min(x))*255}
scp <- apply(predict(pc), 2, function(x) scale(ecdf(x)(x), scale = FALSE))
scpt <- scp %*% R0
r[] <- apply(scpt, 2, fun)
raster::writeRaster(r, filename = paste(outdir, "/", fname, ".tif", sep=""),
datatype = 'INT2U',
overwrite=TRUE)
}
# example
# b <- brick(system.file("external/rlogo.grd", package="raster"))
# dc <- decorrelation_stretch(b)
# plotRGB(dc)
fill_gaps <- function(r){
mean_r <- cellStats(r, 'mean', na.rm=TRUE)
values(r)[is.na(values(r))] = mean_r
return(r)
}
|
### ----------------------------------------------------------- ###
# -------------------- Combine Summary Tables --------------------#
### ----------------------------------------------------------- ###
# Jonathan Jupke
# 06.06.19
# Paper: Should ecologists prefer model- over algorithm-based multivariate methods?
# Combine summary tables from all methods into one homogenized table
## -- OVERVIEW -- ##
# 01.Setup
# 02.Build Table
# 03.Work on Table
# 04.Save to File
## -------------- ##
# 01. Setup -------------------------------------------------------------------
pacman::p_load(data.table, dplyr, magrittr)
# other required packages: fs, here, stringr, tidyr, readr
# set wd
setwd(here::here("result_data/05_collected_results/"))
# 02. Combine Tables -------------------------------------------------------------
# Read all tables from with lapply
result_files =
fs::dir_ls() %>%
as.character %>%
.[stringr::str_detect(. ,"_results")]
result_files = result_files[!(stringr::str_detect(result_files, "old"))]
all_tables <- lapply(result_files, fread)
# Assign each list element to own object so I can modify them
cca <- all_tables[[1]]
cqo <- all_tables[[2]]
dbrda <- all_tables[[3]]
mvglm <- all_tables[[4]]
# Row bind all tables
all = rbind(cca,cqo,dbrda, mvglm)
# 03. Modify Table --------------------------------------------------------
# For the Response combination LB env1 and env2 have to be reversed
all$variable[with(all, which(response == "LB" &
variable == "env1"))] <- "Placeholder"
all$variable[with(all, which(response == "LB" &
variable == "env2"))] <- "env1"
all$variable[with(all, which(response == "LB" &
variable == "Placeholder"))] <- "env2"
# Split Response column
all <-
tidyr::separate(
all,
col = response,
into = c("response1", "response2"),
sep = 1,
remove = F
)
# 04. Save to File ---------------------------------------------------------------
save.path = "all_results.csv"
readr::write_csv(x = all, path = save.path)
# -------------------------------------------------------------------- # | /r_scripts/03_analyse_results/combine_summary_tables.R | no_license | JonJup/Should-ecologists-prefer-model-over-distance-based-multivariate-methods | R | false | false | 2,261 | r | ### ----------------------------------------------------------- ###
# -------------------- Combine Summary Tables --------------------#
### ----------------------------------------------------------- ###
# Jonathan Jupke
# 06.06.19
# Paper: Should ecologists prefer model- over algorithm-based multivariate methods?
# Combine summary tables from all methods into one homogenized table
## -- OVERVIEW -- ##
# 01.Setup
# 02.Build Table
# 03.Work on Table
# 04.Save to File
## -------------- ##
# 01. Setup -------------------------------------------------------------------
pacman::p_load(data.table, dplyr, magrittr)
# other required packages: fs, here, stringr, tidyr, readr
# set wd
setwd(here::here("result_data/05_collected_results/"))
# 02. Combine Tables -------------------------------------------------------------
# Read all tables from with lapply
result_files =
fs::dir_ls() %>%
as.character %>%
.[stringr::str_detect(. ,"_results")]
result_files = result_files[!(stringr::str_detect(result_files, "old"))]
all_tables <- lapply(result_files, fread)
# Assign each list element to own object so I can modify them
cca <- all_tables[[1]]
cqo <- all_tables[[2]]
dbrda <- all_tables[[3]]
mvglm <- all_tables[[4]]
# Row bind all tables
all = rbind(cca,cqo,dbrda, mvglm)
# 03. Modify Table --------------------------------------------------------
# For the Response combination LB env1 and env2 have to be reversed
all$variable[with(all, which(response == "LB" &
variable == "env1"))] <- "Placeholder"
all$variable[with(all, which(response == "LB" &
variable == "env2"))] <- "env1"
all$variable[with(all, which(response == "LB" &
variable == "Placeholder"))] <- "env2"
# Split Response column
all <-
tidyr::separate(
all,
col = response,
into = c("response1", "response2"),
sep = 1,
remove = F
)
# 04. Save to File ---------------------------------------------------------------
save.path = "all_results.csv"
readr::write_csv(x = all, path = save.path)
# -------------------------------------------------------------------- # |
\name{plot.cv.biglasso}
\alias{plot.cv.biglasso}
\title{Plots the cross-validation curve from a "cv.biglasso" object}
\description{
Plot the cross-validation curve from a \code{\link{cv.biglasso}} object,
along with standard error bars.
}
\usage{
\method{plot}{cv.biglasso}(x, log.l = TRUE, type = c("cve", "rsq", "scale", "snr", "pred", "all"),
selected = TRUE, vertical.line = TRUE, col = "red", ...)
}
\arguments{
\item{x}{A \code{"cv.biglasso"} object.}
\item{log.l}{Should horizontal axis be on the log scale? Default is
TRUE.}
\item{type}{What to plot on the vertical axis. \code{cve} plots the
cross-validation error (deviance); \code{rsq} plots an estimate of
the fraction of the deviance explained by the model (R-squared);
\code{snr} plots an estimate of the signal-to-noise ratio;
\code{scale} plots, for \code{family="gaussian"}, an estimate of the
scale parameter (standard deviation); \code{pred} plots, for
\code{family="binomial"}, the estimated prediction error; \code{all}
produces all of the above.}
\item{selected}{If \code{TRUE} (the default), places an axis on top of
the plot denoting the number of variables in the model (i.e., that
have a nonzero regression coefficient) at that value of
\code{lambda}.}
\item{vertical.line}{If \code{TRUE} (the default), draws a vertical
line at the value where cross-validaton error is minimized.}
\item{col}{Controls the color of the dots (CV estimates).}
\item{\dots}{Other graphical parameters to \code{plot}}
}
\details{
Error bars representing approximate 68\% confidence intervals are
plotted along with the estimates at value of \code{lambda}. For
\code{rsq} and \code{snr}, these confidence intervals are quite crude,
especially near.}
\author{
Yaohui Zeng and Patrick Breheny
Maintainer: Yaohui Zeng <yaohui-zeng@uiowa.edu>
}
\seealso{\code{\link{biglasso}}, \code{\link{cv.biglasso}}}
\examples{
## See examples in "cv.biglasso"
}
\keyword{models}
\keyword{regression}
| /man/plot.cv.biglasso.Rd | no_license | BenJamesbabala/biglasso | R | false | false | 2,035 | rd | \name{plot.cv.biglasso}
\alias{plot.cv.biglasso}
\title{Plots the cross-validation curve from a "cv.biglasso" object}
\description{
Plot the cross-validation curve from a \code{\link{cv.biglasso}} object,
along with standard error bars.
}
\usage{
\method{plot}{cv.biglasso}(x, log.l = TRUE, type = c("cve", "rsq", "scale", "snr", "pred", "all"),
selected = TRUE, vertical.line = TRUE, col = "red", ...)
}
\arguments{
\item{x}{A \code{"cv.biglasso"} object.}
\item{log.l}{Should horizontal axis be on the log scale? Default is
TRUE.}
\item{type}{What to plot on the vertical axis. \code{cve} plots the
cross-validation error (deviance); \code{rsq} plots an estimate of
the fraction of the deviance explained by the model (R-squared);
\code{snr} plots an estimate of the signal-to-noise ratio;
\code{scale} plots, for \code{family="gaussian"}, an estimate of the
scale parameter (standard deviation); \code{pred} plots, for
\code{family="binomial"}, the estimated prediction error; \code{all}
produces all of the above.}
\item{selected}{If \code{TRUE} (the default), places an axis on top of
the plot denoting the number of variables in the model (i.e., that
have a nonzero regression coefficient) at that value of
\code{lambda}.}
\item{vertical.line}{If \code{TRUE} (the default), draws a vertical
line at the value where cross-validaton error is minimized.}
\item{col}{Controls the color of the dots (CV estimates).}
\item{\dots}{Other graphical parameters to \code{plot}}
}
\details{
Error bars representing approximate 68\% confidence intervals are
plotted along with the estimates at value of \code{lambda}. For
\code{rsq} and \code{snr}, these confidence intervals are quite crude,
especially near.}
\author{
Yaohui Zeng and Patrick Breheny
Maintainer: Yaohui Zeng <yaohui-zeng@uiowa.edu>
}
\seealso{\code{\link{biglasso}}, \code{\link{cv.biglasso}}}
\examples{
## See examples in "cv.biglasso"
}
\keyword{models}
\keyword{regression}
|
library(dash)
library(dashCoreComponents)
library(dashHtmlComponents)
library(dashTable)
app <- Dash$new()
#You can download the dataset at
#https://raw.githubusercontent.com/plotly/datasets/master/gapminderDataFiveYear.csv
#and put the csv in your assets folder!
df <- read.csv(
file = "datasets/gapminderDataFiveYear.csv",
stringsAsFactor=FALSE,
check.names=FALSE
)
countries = as.list(unique(df$country))
app$layout(htmlDiv(list(
dccStore(id='memory-output'),
dccDropdown(id='memory-countries', options=lapply(countries, function(x){list('value' = x, 'label' = x)}),
multi=TRUE,
value=list('Canada', 'United States')),
dccDropdown(id='memory-field', options=list(
list('value'= 'lifeExp', 'label'= 'Life expectancy'),
list('value'= 'gdpPercap', 'label'= 'GDP per capita')
), value='lifeExp'),
htmlDiv(list(
dccGraph(id='memory-graph'),
dashDataTable(
id='memory-table',
columns= lapply(colnames(df), function(x){list('name' = x, 'id' = x)})
)
))
)))
app$callback(
output = list(id="memory-output", property = 'data'),
params = list(input(id = "memory-countries", property = 'value')),
function(countries_selected){
if(length(countries_selected) < 1){
return(df_to_list(df))
}
filtered = df[which(df$country %in% countries_selected), ]
return(df_to_list(filtered))
})
app$callback(
output = list(id="memory-table", property = 'data'),
params = list(input(id = "memory-output", property = 'data')),
function(data){
if(is.null(data) == TRUE){
return()
}
return(data)
})
app$callback(
output = list(id="memory-graph", property = 'figure'),
params = list(input(id = "memory-output", property = 'data'),
input(id = "memory-field", property = 'value')),
function(data, field){
data = data.frame(matrix(unlist(data), nrow=length(data), byrow=T))
colnames(data)[1:ncol(data)] = c('country', 'year','pop','continent','lifeExp', 'gdpPercap')
if(is.null(data) == TRUE){
return()
}
aggregation = list()
data <- split(data, f = data$country)
for (row in 1:length(data)) {
aggregation[[row]] <- list(
x = unlist(data[[row]][[field]]),
y = unlist(data[[row]]['year']),
text = data[[row]]['country'],
mode = 'lines+markers',
name = as.character(unique(data[[row]]['country'])$country)
)
}
return(list(
'data' = aggregation))
})
app$run_server()
| /dash_docs/chapters/dash_core_components/Store/examples/sharecallbacks.R | permissive | plotly/dash-docs | R | false | false | 2,509 | r | library(dash)
library(dashCoreComponents)
library(dashHtmlComponents)
library(dashTable)
app <- Dash$new()
#You can download the dataset at
#https://raw.githubusercontent.com/plotly/datasets/master/gapminderDataFiveYear.csv
#and put the csv in your assets folder!
df <- read.csv(
file = "datasets/gapminderDataFiveYear.csv",
stringsAsFactor=FALSE,
check.names=FALSE
)
countries = as.list(unique(df$country))
app$layout(htmlDiv(list(
dccStore(id='memory-output'),
dccDropdown(id='memory-countries', options=lapply(countries, function(x){list('value' = x, 'label' = x)}),
multi=TRUE,
value=list('Canada', 'United States')),
dccDropdown(id='memory-field', options=list(
list('value'= 'lifeExp', 'label'= 'Life expectancy'),
list('value'= 'gdpPercap', 'label'= 'GDP per capita')
), value='lifeExp'),
htmlDiv(list(
dccGraph(id='memory-graph'),
dashDataTable(
id='memory-table',
columns= lapply(colnames(df), function(x){list('name' = x, 'id' = x)})
)
))
)))
app$callback(
output = list(id="memory-output", property = 'data'),
params = list(input(id = "memory-countries", property = 'value')),
function(countries_selected){
if(length(countries_selected) < 1){
return(df_to_list(df))
}
filtered = df[which(df$country %in% countries_selected), ]
return(df_to_list(filtered))
})
app$callback(
output = list(id="memory-table", property = 'data'),
params = list(input(id = "memory-output", property = 'data')),
function(data){
if(is.null(data) == TRUE){
return()
}
return(data)
})
app$callback(
output = list(id="memory-graph", property = 'figure'),
params = list(input(id = "memory-output", property = 'data'),
input(id = "memory-field", property = 'value')),
function(data, field){
data = data.frame(matrix(unlist(data), nrow=length(data), byrow=T))
colnames(data)[1:ncol(data)] = c('country', 'year','pop','continent','lifeExp', 'gdpPercap')
if(is.null(data) == TRUE){
return()
}
aggregation = list()
data <- split(data, f = data$country)
for (row in 1:length(data)) {
aggregation[[row]] <- list(
x = unlist(data[[row]][[field]]),
y = unlist(data[[row]]['year']),
text = data[[row]]['country'],
mode = 'lines+markers',
name = as.character(unique(data[[row]]['country'])$country)
)
}
return(list(
'data' = aggregation))
})
app$run_server()
|
# Question 1:
#Reading the data to the DF
london_crime <- read.csv("london-crime-data.csv", na = "")
# structure of the DF
str(london_crime)
# Date has a particular requirement as it should contain day, month, year
# adding the day and Date field element
london_crime$Date <- paste("01", london_crime$month, london_crime$year, sep='/')
# Structure of the DF with Date field
str(london_crime)
# Question 2:
# Display the variable names of the Df
names(london_crime)
# Modifying the variable names from the DF to a new names as required
names(london_crime) [2] <- "Borough"
names(london_crime) [3] <- "MajorCategory"
names(london_crime) [4] <- "SubCategory"
names(london_crime) [5] <- "Value"
names(london_crime) [8] <- "CrimeDate"
# Display the updated variable names of the Df
names(london_crime)
str(london_crime)
# Only displays the required variables for further executing
london_crime <- london_crime[c("Borough", "MajorCategory", "SubCategory",
"Value", "CrimeDate")]
# structure of the DF with updated variable names
str(london_crime)
# Question 3:
# Change the date variable to a Date
london_crime$CrimeDate <- as.Date(london_crime$CrimeDate, "%d/%m/%Y")
# Structure of the DF
str(london_crime)
# Question 4:
display_settings <- par(no.readonly = TRUE)
# convert to a factor first
london_crime$Borough <- factor(london_crime$Borough)
# Plot the Borough variable field using the plot() function
plot(london_crime$Borough)
# you can plot the summary() of the data
summary(london_crime$Borough)
# Labelling the X and Y axis
plot(london_crime$Borough, main="Crime Rate", xlab="Borough Names", ylab="Rate Count")
# Answer
# The "Croydon" has the highest level of crime.
# The "City of London" has the lowest level of crime.
# Question 5:
# convert to a factor first
london_crime$MajorCategory <- factor(london_crime$MajorCategory)
# Showing the summary of the data
summary(london_crime$MajorCategory)
# using pie() function plot the MajorCategory
x <- c(9082, 17727, 10313, 2140, 6737, 8025, 917, 33759, 27347)
labels <- c("Burglary", "Criminal Damange", "Drugs", "Fraud or Forgery",
"Other Notifiable Offences", "Robbery", "Sexual Offences",
"Theft and Handling ", "Violence Against the Person")
pie(x, labels)
# Answer
# The "Theft and Handling" has the highest level of crimes
# The "Sexual Offences" has the lowest level of crime
# Question 6:
# Creating a new variable called Region and store
# within it the correct region for each BOROUGH
london_crime$Region[london_crime$Borough == "Barking and Dagenham"|london_crime$Borough =="Bexley"|
london_crime$Borough == "Greenwich"|london_crime$Borough =="Havering"|
london_crime$Borough == "Kingston upon Thames"|london_crime$Borough =="Newham"|
london_crime$Borough == "Redbridge"|london_crime$Borough =="Wandsworth"] <- "East"
london_crime$Region[london_crime$Borough == "Barnet"|london_crime$Borough =="Camden"|
london_crime$Borough == "Enfield"|london_crime$Borough =="Hackney"|
london_crime$Borough == "Haringey"] <- "North"
london_crime$Region[london_crime$Borough == "Bromley"|london_crime$Borough =="Croydon"|
london_crime$Borough == "Merton"|london_crime$Borough =="Sutton"] <- "South"
london_crime$Region[london_crime$Borough == "Islington"|london_crime$Borough =="Kensington and Chelsea"|
london_crime$Borough == "Lambeth"|london_crime$Borough =="Lewisham"|
london_crime$Borough == "Southwark"|london_crime$Borough =="Tower Hamlets"|
london_crime$Borough == "Waltham Forest"|london_crime$Borough =="Westminster"] <- "Central"
london_crime$Region[london_crime$Borough == "Brent"|london_crime$Borough =="Ealing"|
london_crime$Borough == "Hammersmith and Fulham"|london_crime$Borough =="Harrow"|
london_crime$Borough == "Hillingdon"|london_crime$Borough =="Hounslow"|
london_crime$Borough == "Richmond upon Thames"] <- "West"
# Displaying the DF with new REGION field
london_crime
# structure of the DF
str(london_crime)
# Checking the missig DATA for REGIION
missing_data <- london_crime[!complete.cases(london_crime$Region),]
no_missing_data <- na.omit(missing_data)
no_missing_data
# Analysing that by VIM
library(VIM)
missing_values <- aggr(london_crime, prop = FALSE, numbers = TRUE)
# Showing the summary for if any values missing
summary(missing_values)
# Question 7:
# converting to a factor first
london_crime$Region <- factor(london_crime$Region)
# Plot the Region variable field using the plot() function
plot(london_crime$Region)
# # Labelling the X and Y axis
plot(london_crime$Region, main="Crimes by Region", xlab="Region Names", ylab="Crimes Investigated")
summary(london_crime$Region)
# Question 8:
# Extracting the subset
london_crime_subset <- subset(london_crime, Region == "Central" | Region == "South")
london_crime_subset
# Question 9:
# Plotting the summary function
summary(london_crime)
# Question 10:
# Saving the modified DF with the new name
write.csv(london_crime, file = "london-crime-modified.csv")
# Finally Uploaded all the script in the GIT_HUB along with the CSV
| /DS_Assessment_2.R | no_license | nikhilpatadelyit/London | R | false | false | 5,331 | r | # Question 1:
#Reading the data to the DF
london_crime <- read.csv("london-crime-data.csv", na = "")
# structure of the DF
str(london_crime)
# Date has a particular requirement as it should contain day, month, year
# adding the day and Date field element
london_crime$Date <- paste("01", london_crime$month, london_crime$year, sep='/')
# Structure of the DF with Date field
str(london_crime)
# Question 2:
# Display the variable names of the Df
names(london_crime)
# Modifying the variable names from the DF to a new names as required
names(london_crime) [2] <- "Borough"
names(london_crime) [3] <- "MajorCategory"
names(london_crime) [4] <- "SubCategory"
names(london_crime) [5] <- "Value"
names(london_crime) [8] <- "CrimeDate"
# Display the updated variable names of the Df
names(london_crime)
str(london_crime)
# Only displays the required variables for further executing
london_crime <- london_crime[c("Borough", "MajorCategory", "SubCategory",
"Value", "CrimeDate")]
# structure of the DF with updated variable names
str(london_crime)
# Question 3:
# Change the date variable to a Date
london_crime$CrimeDate <- as.Date(london_crime$CrimeDate, "%d/%m/%Y")
# Structure of the DF
str(london_crime)
# Question 4:
display_settings <- par(no.readonly = TRUE)
# convert to a factor first
london_crime$Borough <- factor(london_crime$Borough)
# Plot the Borough variable field using the plot() function
plot(london_crime$Borough)
# you can plot the summary() of the data
summary(london_crime$Borough)
# Labelling the X and Y axis
plot(london_crime$Borough, main="Crime Rate", xlab="Borough Names", ylab="Rate Count")
# Answer
# The "Croydon" has the highest level of crime.
# The "City of London" has the lowest level of crime.
# Question 5:
# convert to a factor first
london_crime$MajorCategory <- factor(london_crime$MajorCategory)
# Showing the summary of the data
summary(london_crime$MajorCategory)
# using pie() function plot the MajorCategory
x <- c(9082, 17727, 10313, 2140, 6737, 8025, 917, 33759, 27347)
labels <- c("Burglary", "Criminal Damange", "Drugs", "Fraud or Forgery",
"Other Notifiable Offences", "Robbery", "Sexual Offences",
"Theft and Handling ", "Violence Against the Person")
pie(x, labels)
# Answer
# The "Theft and Handling" has the highest level of crimes
# The "Sexual Offences" has the lowest level of crime
# Question 6:
# Creating a new variable called Region and store
# within it the correct region for each BOROUGH
london_crime$Region[london_crime$Borough == "Barking and Dagenham"|london_crime$Borough =="Bexley"|
london_crime$Borough == "Greenwich"|london_crime$Borough =="Havering"|
london_crime$Borough == "Kingston upon Thames"|london_crime$Borough =="Newham"|
london_crime$Borough == "Redbridge"|london_crime$Borough =="Wandsworth"] <- "East"
london_crime$Region[london_crime$Borough == "Barnet"|london_crime$Borough =="Camden"|
london_crime$Borough == "Enfield"|london_crime$Borough =="Hackney"|
london_crime$Borough == "Haringey"] <- "North"
london_crime$Region[london_crime$Borough == "Bromley"|london_crime$Borough =="Croydon"|
london_crime$Borough == "Merton"|london_crime$Borough =="Sutton"] <- "South"
london_crime$Region[london_crime$Borough == "Islington"|london_crime$Borough =="Kensington and Chelsea"|
london_crime$Borough == "Lambeth"|london_crime$Borough =="Lewisham"|
london_crime$Borough == "Southwark"|london_crime$Borough =="Tower Hamlets"|
london_crime$Borough == "Waltham Forest"|london_crime$Borough =="Westminster"] <- "Central"
london_crime$Region[london_crime$Borough == "Brent"|london_crime$Borough =="Ealing"|
london_crime$Borough == "Hammersmith and Fulham"|london_crime$Borough =="Harrow"|
london_crime$Borough == "Hillingdon"|london_crime$Borough =="Hounslow"|
london_crime$Borough == "Richmond upon Thames"] <- "West"
# Displaying the DF with new REGION field
london_crime
# structure of the DF
str(london_crime)
# Checking the missig DATA for REGIION
missing_data <- london_crime[!complete.cases(london_crime$Region),]
no_missing_data <- na.omit(missing_data)
no_missing_data
# Analysing that by VIM
library(VIM)
missing_values <- aggr(london_crime, prop = FALSE, numbers = TRUE)
# Showing the summary for if any values missing
summary(missing_values)
# Question 7:
# converting to a factor first
london_crime$Region <- factor(london_crime$Region)
# Plot the Region variable field using the plot() function
plot(london_crime$Region)
# # Labelling the X and Y axis
plot(london_crime$Region, main="Crimes by Region", xlab="Region Names", ylab="Crimes Investigated")
summary(london_crime$Region)
# Question 8:
# Extracting the subset
london_crime_subset <- subset(london_crime, Region == "Central" | Region == "South")
london_crime_subset
# Question 9:
# Plotting the summary function
summary(london_crime)
# Question 10:
# Saving the modified DF with the new name
write.csv(london_crime, file = "london-crime-modified.csv")
# Finally Uploaded all the script in the GIT_HUB along with the CSV
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/databasemigrationservice_operations.R
\name{databasemigrationservice_modify_endpoint}
\alias{databasemigrationservice_modify_endpoint}
\title{Modifies the specified endpoint}
\usage{
databasemigrationservice_modify_endpoint(EndpointArn,
EndpointIdentifier, EndpointType, EngineName, Username, Password,
ServerName, Port, DatabaseName, ExtraConnectionAttributes,
CertificateArn, SslMode, ServiceAccessRoleArn, ExternalTableDefinition,
DynamoDbSettings, S3Settings, DmsTransferSettings, MongoDbSettings,
KinesisSettings, KafkaSettings, ElasticsearchSettings, NeptuneSettings,
RedshiftSettings, PostgreSQLSettings, MySQLSettings, OracleSettings,
SybaseSettings, MicrosoftSQLServerSettings, IBMDb2Settings,
DocDbSettings)
}
\arguments{
\item{EndpointArn}{[required] The Amazon Resource Name (ARN) string that uniquely identifies the
endpoint.}
\item{EndpointIdentifier}{The database endpoint identifier. Identifiers must begin with a letter
and must contain only ASCII letters, digits, and hyphens. They can't end
with a hyphen or contain two consecutive hyphens.}
\item{EndpointType}{The type of endpoint. Valid values are \code{source} and \code{target}.}
\item{EngineName}{The type of engine for the endpoint. Valid values, depending on the
EndpointType, include \code{"mysql"}, \code{"oracle"}, \code{"postgres"}, \code{"mariadb"},
\code{"aurora"}, \code{"aurora-postgresql"}, \code{"redshift"}, \code{"s3"}, \code{"db2"},
\code{"azuredb"}, \code{"sybase"}, \code{"dynamodb"}, \code{"mongodb"}, \code{"kinesis"},
\code{"kafka"}, \code{"elasticsearch"}, \code{"documentdb"}, \code{"sqlserver"}, and
\code{"neptune"}.}
\item{Username}{The user name to be used to login to the endpoint database.}
\item{Password}{The password to be used to login to the endpoint database.}
\item{ServerName}{The name of the server where the endpoint database resides.}
\item{Port}{The port used by the endpoint database.}
\item{DatabaseName}{The name of the endpoint database.}
\item{ExtraConnectionAttributes}{Additional attributes associated with the connection. To reset this
parameter, pass the empty string ("") as an argument.}
\item{CertificateArn}{The Amazon Resource Name (ARN) of the certificate used for SSL
connection.}
\item{SslMode}{The SSL mode used to connect to the endpoint. The default value is
\code{none}.}
\item{ServiceAccessRoleArn}{The Amazon Resource Name (ARN) for the service access role you want to
use to modify the endpoint.}
\item{ExternalTableDefinition}{The external table definition.}
\item{DynamoDbSettings}{Settings in JSON format for the target Amazon DynamoDB endpoint. For
information about other available settings, see \href{https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.DynamoDB.html}{Using Object Mapping to Migrate Data to DynamoDB}
in the \emph{AWS Database Migration Service User Guide.}}
\item{S3Settings}{Settings in JSON format for the target Amazon S3 endpoint. For more
information about the available settings, see \href{https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring}{Extra Connection Attributes When Using Amazon S3 as a Target for AWS DMS}
in the \emph{AWS Database Migration Service User Guide.}}
\item{DmsTransferSettings}{The settings in JSON format for the DMS transfer type of source
endpoint.
Attributes include the following:
\itemize{
\item serviceAccessRoleArn - The AWS Identity and Access Management (IAM)
role that has permission to access the Amazon S3 bucket.
\item BucketName - The name of the S3 bucket to use.
\item compressionType - An optional parameter to use GZIP to compress the
target files. Either set this parameter to NONE (the default) or
don't use it to leave the files uncompressed.
}
Shorthand syntax for these settings is as follows:
\verb{ServiceAccessRoleArn=string ,BucketName=string,CompressionType=string}
JSON syntax for these settings is as follows:
\verb{\{ "ServiceAccessRoleArn": "string", "BucketName": "string", "CompressionType": "none"|"gzip" \} }}
\item{MongoDbSettings}{Settings in JSON format for the source MongoDB endpoint. For more
information about the available settings, see the configuration
properties section in \href{https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MongoDB.html}{Using MongoDB as a Target for AWS Database Migration Service}
in the \emph{AWS Database Migration Service User Guide.}}
\item{KinesisSettings}{Settings in JSON format for the target endpoint for Amazon Kinesis Data
Streams. For more information about the available settings, see \href{https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kinesis.html}{Using Amazon Kinesis Data Streams as a Target for AWS Database Migration Service}
in the \emph{AWS Database Migration Service User Guide.}}
\item{KafkaSettings}{Settings in JSON format for the target Apache Kafka endpoint. For more
information about the available settings, see \href{https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kafka.html}{Using Apache Kafka as a Target for AWS Database Migration Service}
in the \emph{AWS Database Migration Service User Guide.}}
\item{ElasticsearchSettings}{Settings in JSON format for the target Elasticsearch endpoint. For more
information about the available settings, see \href{https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Elasticsearch.html#CHAP_Target.Elasticsearch.Configuration}{Extra Connection Attributes When Using Elasticsearch as a Target for AWS DMS}
in the \emph{AWS Database Migration Service User Guide.}}
\item{NeptuneSettings}{Settings in JSON format for the target Amazon Neptune endpoint. For more
information about the available settings, see \href{https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.EndpointSettings}{Specifying Endpoint Settings for Amazon Neptune as a Target}
in the \emph{AWS Database Migration Service User Guide.}}
\item{RedshiftSettings}{}
\item{PostgreSQLSettings}{Settings in JSON format for the source and target PostgreSQL endpoint.
For information about other available settings, see Extra connection
attributes when using PostgreSQL as a source for AWS DMS and Extra
connection attributes when using PostgreSQL as a target for AWS DMS in
the \emph{AWS Database Migration Service User Guide.}}
\item{MySQLSettings}{Settings in JSON format for the source and target MySQL endpoint. For
information about other available settings, see Extra connection
attributes when using MySQL as a source for AWS DMS and Extra connection
attributes when using a MySQL-compatible database as a target for AWS
DMS in the \emph{AWS Database Migration Service User Guide.}}
\item{OracleSettings}{Settings in JSON format for the source and target Oracle endpoint. For
information about other available settings, see Extra connection
attributes when using Oracle as a source for AWS DMS and Extra
connection attributes when using Oracle as a target for AWS DMS in the
\emph{AWS Database Migration Service User Guide.}}
\item{SybaseSettings}{Settings in JSON format for the source and target SAP ASE endpoint. For
information about other available settings, see Extra connection
attributes when using SAP ASE as a source for AWS DMS and Extra
connection attributes when using SAP ASE as a target for AWS DMS in the
\emph{AWS Database Migration Service User Guide.}}
\item{MicrosoftSQLServerSettings}{Settings in JSON format for the source and target Microsoft SQL Server
endpoint. For information about other available settings, see Extra
connection attributes when using SQL Server as a source for AWS DMS and
Extra connection attributes when using SQL Server as a target for AWS
DMS in the \emph{AWS Database Migration Service User Guide.}}
\item{IBMDb2Settings}{Settings in JSON format for the source IBM Db2 LUW endpoint. For
information about other available settings, see Extra connection
attributes when using Db2 LUW as a source for AWS DMS in the \emph{AWS
Database Migration Service User Guide.}}
\item{DocDbSettings}{Settings in JSON format for the source DocumentDB endpoint. For more
information about the available settings, see the configuration
properties section in \href{https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.DocumentDB.html}{Using DocumentDB as a Target for AWS Database Migration Service}
in the \emph{AWS Database Migration Service User Guide.}}
}
\value{
A list with the following syntax:\preformatted{list(
Endpoint = list(
EndpointIdentifier = "string",
EndpointType = "source"|"target",
EngineName = "string",
EngineDisplayName = "string",
Username = "string",
ServerName = "string",
Port = 123,
DatabaseName = "string",
ExtraConnectionAttributes = "string",
Status = "string",
KmsKeyId = "string",
EndpointArn = "string",
CertificateArn = "string",
SslMode = "none"|"require"|"verify-ca"|"verify-full",
ServiceAccessRoleArn = "string",
ExternalTableDefinition = "string",
ExternalId = "string",
DynamoDbSettings = list(
ServiceAccessRoleArn = "string"
),
S3Settings = list(
ServiceAccessRoleArn = "string",
ExternalTableDefinition = "string",
CsvRowDelimiter = "string",
CsvDelimiter = "string",
BucketFolder = "string",
BucketName = "string",
CompressionType = "none"|"gzip",
EncryptionMode = "sse-s3"|"sse-kms",
ServerSideEncryptionKmsKeyId = "string",
DataFormat = "csv"|"parquet",
EncodingType = "plain"|"plain-dictionary"|"rle-dictionary",
DictPageSizeLimit = 123,
RowGroupLength = 123,
DataPageSize = 123,
ParquetVersion = "parquet-1-0"|"parquet-2-0",
EnableStatistics = TRUE|FALSE,
IncludeOpForFullLoad = TRUE|FALSE,
CdcInsertsOnly = TRUE|FALSE,
TimestampColumnName = "string",
ParquetTimestampInMillisecond = TRUE|FALSE,
CdcInsertsAndUpdates = TRUE|FALSE,
DatePartitionEnabled = TRUE|FALSE,
DatePartitionSequence = "YYYYMMDD"|"YYYYMMDDHH"|"YYYYMM"|"MMYYYYDD"|"DDMMYYYY",
DatePartitionDelimiter = "SLASH"|"UNDERSCORE"|"DASH"|"NONE",
UseCsvNoSupValue = TRUE|FALSE,
CsvNoSupValue = "string",
PreserveTransactions = TRUE|FALSE,
CdcPath = "string"
),
DmsTransferSettings = list(
ServiceAccessRoleArn = "string",
BucketName = "string"
),
MongoDbSettings = list(
Username = "string",
Password = "string",
ServerName = "string",
Port = 123,
DatabaseName = "string",
AuthType = "no"|"password",
AuthMechanism = "default"|"mongodb_cr"|"scram_sha_1",
NestingLevel = "none"|"one",
ExtractDocId = "string",
DocsToInvestigate = "string",
AuthSource = "string",
KmsKeyId = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
KinesisSettings = list(
StreamArn = "string",
MessageFormat = "json"|"json-unformatted",
ServiceAccessRoleArn = "string",
IncludeTransactionDetails = TRUE|FALSE,
IncludePartitionValue = TRUE|FALSE,
PartitionIncludeSchemaTable = TRUE|FALSE,
IncludeTableAlterOperations = TRUE|FALSE,
IncludeControlDetails = TRUE|FALSE,
IncludeNullAndEmpty = TRUE|FALSE
),
KafkaSettings = list(
Broker = "string",
Topic = "string",
MessageFormat = "json"|"json-unformatted",
IncludeTransactionDetails = TRUE|FALSE,
IncludePartitionValue = TRUE|FALSE,
PartitionIncludeSchemaTable = TRUE|FALSE,
IncludeTableAlterOperations = TRUE|FALSE,
IncludeControlDetails = TRUE|FALSE,
MessageMaxBytes = 123,
IncludeNullAndEmpty = TRUE|FALSE
),
ElasticsearchSettings = list(
ServiceAccessRoleArn = "string",
EndpointUri = "string",
FullLoadErrorPercentage = 123,
ErrorRetryDuration = 123
),
NeptuneSettings = list(
ServiceAccessRoleArn = "string",
S3BucketName = "string",
S3BucketFolder = "string",
ErrorRetryDuration = 123,
MaxFileSize = 123,
MaxRetryCount = 123,
IamAuthEnabled = TRUE|FALSE
),
RedshiftSettings = list(
AcceptAnyDate = TRUE|FALSE,
AfterConnectScript = "string",
BucketFolder = "string",
BucketName = "string",
CaseSensitiveNames = TRUE|FALSE,
CompUpdate = TRUE|FALSE,
ConnectionTimeout = 123,
DatabaseName = "string",
DateFormat = "string",
EmptyAsNull = TRUE|FALSE,
EncryptionMode = "sse-s3"|"sse-kms",
ExplicitIds = TRUE|FALSE,
FileTransferUploadStreams = 123,
LoadTimeout = 123,
MaxFileSize = 123,
Password = "string",
Port = 123,
RemoveQuotes = TRUE|FALSE,
ReplaceInvalidChars = "string",
ReplaceChars = "string",
ServerName = "string",
ServiceAccessRoleArn = "string",
ServerSideEncryptionKmsKeyId = "string",
TimeFormat = "string",
TrimBlanks = TRUE|FALSE,
TruncateColumns = TRUE|FALSE,
Username = "string",
WriteBufferSize = 123,
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
PostgreSQLSettings = list(
AfterConnectScript = "string",
CaptureDdls = TRUE|FALSE,
MaxFileSize = 123,
DatabaseName = "string",
DdlArtifactsSchema = "string",
ExecuteTimeout = 123,
FailTasksOnLobTruncation = TRUE|FALSE,
Password = "string",
Port = 123,
ServerName = "string",
Username = "string",
SlotName = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
MySQLSettings = list(
AfterConnectScript = "string",
DatabaseName = "string",
EventsPollInterval = 123,
TargetDbType = "specific-database"|"multiple-databases",
MaxFileSize = 123,
ParallelLoadThreads = 123,
Password = "string",
Port = 123,
ServerName = "string",
ServerTimezone = "string",
Username = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
OracleSettings = list(
AddSupplementalLogging = TRUE|FALSE,
ArchivedLogDestId = 123,
AdditionalArchivedLogDestId = 123,
AllowSelectNestedTables = TRUE|FALSE,
ParallelAsmReadThreads = 123,
ReadAheadBlocks = 123,
AccessAlternateDirectly = TRUE|FALSE,
UseAlternateFolderForOnline = TRUE|FALSE,
OraclePathPrefix = "string",
UsePathPrefix = "string",
ReplacePathPrefix = TRUE|FALSE,
EnableHomogenousTablespace = TRUE|FALSE,
DirectPathNoLog = TRUE|FALSE,
ArchivedLogsOnly = TRUE|FALSE,
AsmPassword = "string",
AsmServer = "string",
AsmUser = "string",
CharLengthSemantics = "default"|"char"|"byte",
DatabaseName = "string",
DirectPathParallelLoad = TRUE|FALSE,
FailTasksOnLobTruncation = TRUE|FALSE,
NumberDatatypeScale = 123,
Password = "string",
Port = 123,
ReadTableSpaceName = TRUE|FALSE,
RetryInterval = 123,
SecurityDbEncryption = "string",
SecurityDbEncryptionName = "string",
ServerName = "string",
Username = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string",
SecretsManagerOracleAsmAccessRoleArn = "string",
SecretsManagerOracleAsmSecretId = "string"
),
SybaseSettings = list(
DatabaseName = "string",
Password = "string",
Port = 123,
ServerName = "string",
Username = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
MicrosoftSQLServerSettings = list(
Port = 123,
BcpPacketSize = 123,
DatabaseName = "string",
ControlTablesFileGroup = "string",
Password = "string",
ReadBackupOnly = TRUE|FALSE,
SafeguardPolicy = "rely-on-sql-server-replication-agent"|"exclusive-automatic-truncation"|"shared-automatic-truncation",
ServerName = "string",
Username = "string",
UseBcpFullLoad = TRUE|FALSE,
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
IBMDb2Settings = list(
DatabaseName = "string",
Password = "string",
Port = 123,
ServerName = "string",
SetDataCaptureChanges = TRUE|FALSE,
CurrentLsn = "string",
MaxKBytesPerRead = 123,
Username = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
DocDbSettings = list(
Username = "string",
Password = "string",
ServerName = "string",
Port = 123,
DatabaseName = "string",
NestingLevel = "none"|"one",
ExtractDocId = TRUE|FALSE,
DocsToInvestigate = 123,
KmsKeyId = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
)
)
)
}
}
\description{
Modifies the specified endpoint.
}
\section{Request syntax}{
\preformatted{svc$modify_endpoint(
EndpointArn = "string",
EndpointIdentifier = "string",
EndpointType = "source"|"target",
EngineName = "string",
Username = "string",
Password = "string",
ServerName = "string",
Port = 123,
DatabaseName = "string",
ExtraConnectionAttributes = "string",
CertificateArn = "string",
SslMode = "none"|"require"|"verify-ca"|"verify-full",
ServiceAccessRoleArn = "string",
ExternalTableDefinition = "string",
DynamoDbSettings = list(
ServiceAccessRoleArn = "string"
),
S3Settings = list(
ServiceAccessRoleArn = "string",
ExternalTableDefinition = "string",
CsvRowDelimiter = "string",
CsvDelimiter = "string",
BucketFolder = "string",
BucketName = "string",
CompressionType = "none"|"gzip",
EncryptionMode = "sse-s3"|"sse-kms",
ServerSideEncryptionKmsKeyId = "string",
DataFormat = "csv"|"parquet",
EncodingType = "plain"|"plain-dictionary"|"rle-dictionary",
DictPageSizeLimit = 123,
RowGroupLength = 123,
DataPageSize = 123,
ParquetVersion = "parquet-1-0"|"parquet-2-0",
EnableStatistics = TRUE|FALSE,
IncludeOpForFullLoad = TRUE|FALSE,
CdcInsertsOnly = TRUE|FALSE,
TimestampColumnName = "string",
ParquetTimestampInMillisecond = TRUE|FALSE,
CdcInsertsAndUpdates = TRUE|FALSE,
DatePartitionEnabled = TRUE|FALSE,
DatePartitionSequence = "YYYYMMDD"|"YYYYMMDDHH"|"YYYYMM"|"MMYYYYDD"|"DDMMYYYY",
DatePartitionDelimiter = "SLASH"|"UNDERSCORE"|"DASH"|"NONE",
UseCsvNoSupValue = TRUE|FALSE,
CsvNoSupValue = "string",
PreserveTransactions = TRUE|FALSE,
CdcPath = "string"
),
DmsTransferSettings = list(
ServiceAccessRoleArn = "string",
BucketName = "string"
),
MongoDbSettings = list(
Username = "string",
Password = "string",
ServerName = "string",
Port = 123,
DatabaseName = "string",
AuthType = "no"|"password",
AuthMechanism = "default"|"mongodb_cr"|"scram_sha_1",
NestingLevel = "none"|"one",
ExtractDocId = "string",
DocsToInvestigate = "string",
AuthSource = "string",
KmsKeyId = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
KinesisSettings = list(
StreamArn = "string",
MessageFormat = "json"|"json-unformatted",
ServiceAccessRoleArn = "string",
IncludeTransactionDetails = TRUE|FALSE,
IncludePartitionValue = TRUE|FALSE,
PartitionIncludeSchemaTable = TRUE|FALSE,
IncludeTableAlterOperations = TRUE|FALSE,
IncludeControlDetails = TRUE|FALSE,
IncludeNullAndEmpty = TRUE|FALSE
),
KafkaSettings = list(
Broker = "string",
Topic = "string",
MessageFormat = "json"|"json-unformatted",
IncludeTransactionDetails = TRUE|FALSE,
IncludePartitionValue = TRUE|FALSE,
PartitionIncludeSchemaTable = TRUE|FALSE,
IncludeTableAlterOperations = TRUE|FALSE,
IncludeControlDetails = TRUE|FALSE,
MessageMaxBytes = 123,
IncludeNullAndEmpty = TRUE|FALSE
),
ElasticsearchSettings = list(
ServiceAccessRoleArn = "string",
EndpointUri = "string",
FullLoadErrorPercentage = 123,
ErrorRetryDuration = 123
),
NeptuneSettings = list(
ServiceAccessRoleArn = "string",
S3BucketName = "string",
S3BucketFolder = "string",
ErrorRetryDuration = 123,
MaxFileSize = 123,
MaxRetryCount = 123,
IamAuthEnabled = TRUE|FALSE
),
RedshiftSettings = list(
AcceptAnyDate = TRUE|FALSE,
AfterConnectScript = "string",
BucketFolder = "string",
BucketName = "string",
CaseSensitiveNames = TRUE|FALSE,
CompUpdate = TRUE|FALSE,
ConnectionTimeout = 123,
DatabaseName = "string",
DateFormat = "string",
EmptyAsNull = TRUE|FALSE,
EncryptionMode = "sse-s3"|"sse-kms",
ExplicitIds = TRUE|FALSE,
FileTransferUploadStreams = 123,
LoadTimeout = 123,
MaxFileSize = 123,
Password = "string",
Port = 123,
RemoveQuotes = TRUE|FALSE,
ReplaceInvalidChars = "string",
ReplaceChars = "string",
ServerName = "string",
ServiceAccessRoleArn = "string",
ServerSideEncryptionKmsKeyId = "string",
TimeFormat = "string",
TrimBlanks = TRUE|FALSE,
TruncateColumns = TRUE|FALSE,
Username = "string",
WriteBufferSize = 123,
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
PostgreSQLSettings = list(
AfterConnectScript = "string",
CaptureDdls = TRUE|FALSE,
MaxFileSize = 123,
DatabaseName = "string",
DdlArtifactsSchema = "string",
ExecuteTimeout = 123,
FailTasksOnLobTruncation = TRUE|FALSE,
Password = "string",
Port = 123,
ServerName = "string",
Username = "string",
SlotName = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
MySQLSettings = list(
AfterConnectScript = "string",
DatabaseName = "string",
EventsPollInterval = 123,
TargetDbType = "specific-database"|"multiple-databases",
MaxFileSize = 123,
ParallelLoadThreads = 123,
Password = "string",
Port = 123,
ServerName = "string",
ServerTimezone = "string",
Username = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
OracleSettings = list(
AddSupplementalLogging = TRUE|FALSE,
ArchivedLogDestId = 123,
AdditionalArchivedLogDestId = 123,
AllowSelectNestedTables = TRUE|FALSE,
ParallelAsmReadThreads = 123,
ReadAheadBlocks = 123,
AccessAlternateDirectly = TRUE|FALSE,
UseAlternateFolderForOnline = TRUE|FALSE,
OraclePathPrefix = "string",
UsePathPrefix = "string",
ReplacePathPrefix = TRUE|FALSE,
EnableHomogenousTablespace = TRUE|FALSE,
DirectPathNoLog = TRUE|FALSE,
ArchivedLogsOnly = TRUE|FALSE,
AsmPassword = "string",
AsmServer = "string",
AsmUser = "string",
CharLengthSemantics = "default"|"char"|"byte",
DatabaseName = "string",
DirectPathParallelLoad = TRUE|FALSE,
FailTasksOnLobTruncation = TRUE|FALSE,
NumberDatatypeScale = 123,
Password = "string",
Port = 123,
ReadTableSpaceName = TRUE|FALSE,
RetryInterval = 123,
SecurityDbEncryption = "string",
SecurityDbEncryptionName = "string",
ServerName = "string",
Username = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string",
SecretsManagerOracleAsmAccessRoleArn = "string",
SecretsManagerOracleAsmSecretId = "string"
),
SybaseSettings = list(
DatabaseName = "string",
Password = "string",
Port = 123,
ServerName = "string",
Username = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
MicrosoftSQLServerSettings = list(
Port = 123,
BcpPacketSize = 123,
DatabaseName = "string",
ControlTablesFileGroup = "string",
Password = "string",
ReadBackupOnly = TRUE|FALSE,
SafeguardPolicy = "rely-on-sql-server-replication-agent"|"exclusive-automatic-truncation"|"shared-automatic-truncation",
ServerName = "string",
Username = "string",
UseBcpFullLoad = TRUE|FALSE,
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
IBMDb2Settings = list(
DatabaseName = "string",
Password = "string",
Port = 123,
ServerName = "string",
SetDataCaptureChanges = TRUE|FALSE,
CurrentLsn = "string",
MaxKBytesPerRead = 123,
Username = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
DocDbSettings = list(
Username = "string",
Password = "string",
ServerName = "string",
Port = 123,
DatabaseName = "string",
NestingLevel = "none"|"one",
ExtractDocId = TRUE|FALSE,
DocsToInvestigate = 123,
KmsKeyId = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
)
)
}
}
\examples{
\dontrun{
# Modifies the specified endpoint.
svc$modify_endpoint(
CertificateArn = "",
DatabaseName = "",
EndpointArn = "",
EndpointIdentifier = "",
EndpointType = "source",
EngineName = "",
ExtraConnectionAttributes = "",
Password = "",
Port = 123L,
ServerName = "",
SslMode = "require",
Username = ""
)
}
}
\keyword{internal}
| /cran/paws.migration/man/databasemigrationservice_modify_endpoint.Rd | permissive | paws-r/paws | R | false | true | 25,402 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/databasemigrationservice_operations.R
\name{databasemigrationservice_modify_endpoint}
\alias{databasemigrationservice_modify_endpoint}
\title{Modifies the specified endpoint}
\usage{
databasemigrationservice_modify_endpoint(EndpointArn,
EndpointIdentifier, EndpointType, EngineName, Username, Password,
ServerName, Port, DatabaseName, ExtraConnectionAttributes,
CertificateArn, SslMode, ServiceAccessRoleArn, ExternalTableDefinition,
DynamoDbSettings, S3Settings, DmsTransferSettings, MongoDbSettings,
KinesisSettings, KafkaSettings, ElasticsearchSettings, NeptuneSettings,
RedshiftSettings, PostgreSQLSettings, MySQLSettings, OracleSettings,
SybaseSettings, MicrosoftSQLServerSettings, IBMDb2Settings,
DocDbSettings)
}
\arguments{
\item{EndpointArn}{[required] The Amazon Resource Name (ARN) string that uniquely identifies the
endpoint.}
\item{EndpointIdentifier}{The database endpoint identifier. Identifiers must begin with a letter
and must contain only ASCII letters, digits, and hyphens. They can't end
with a hyphen or contain two consecutive hyphens.}
\item{EndpointType}{The type of endpoint. Valid values are \code{source} and \code{target}.}
\item{EngineName}{The type of engine for the endpoint. Valid values, depending on the
EndpointType, include \code{"mysql"}, \code{"oracle"}, \code{"postgres"}, \code{"mariadb"},
\code{"aurora"}, \code{"aurora-postgresql"}, \code{"redshift"}, \code{"s3"}, \code{"db2"},
\code{"azuredb"}, \code{"sybase"}, \code{"dynamodb"}, \code{"mongodb"}, \code{"kinesis"},
\code{"kafka"}, \code{"elasticsearch"}, \code{"documentdb"}, \code{"sqlserver"}, and
\code{"neptune"}.}
\item{Username}{The user name to be used to login to the endpoint database.}
\item{Password}{The password to be used to login to the endpoint database.}
\item{ServerName}{The name of the server where the endpoint database resides.}
\item{Port}{The port used by the endpoint database.}
\item{DatabaseName}{The name of the endpoint database.}
\item{ExtraConnectionAttributes}{Additional attributes associated with the connection. To reset this
parameter, pass the empty string ("") as an argument.}
\item{CertificateArn}{The Amazon Resource Name (ARN) of the certificate used for SSL
connection.}
\item{SslMode}{The SSL mode used to connect to the endpoint. The default value is
\code{none}.}
\item{ServiceAccessRoleArn}{The Amazon Resource Name (ARN) for the service access role you want to
use to modify the endpoint.}
\item{ExternalTableDefinition}{The external table definition.}
\item{DynamoDbSettings}{Settings in JSON format for the target Amazon DynamoDB endpoint. For
information about other available settings, see \href{https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.DynamoDB.html}{Using Object Mapping to Migrate Data to DynamoDB}
in the \emph{AWS Database Migration Service User Guide.}}
\item{S3Settings}{Settings in JSON format for the target Amazon S3 endpoint. For more
information about the available settings, see \href{https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.S3.html#CHAP_Target.S3.Configuring}{Extra Connection Attributes When Using Amazon S3 as a Target for AWS DMS}
in the \emph{AWS Database Migration Service User Guide.}}
\item{DmsTransferSettings}{The settings in JSON format for the DMS transfer type of source
endpoint.
Attributes include the following:
\itemize{
\item serviceAccessRoleArn - The AWS Identity and Access Management (IAM)
role that has permission to access the Amazon S3 bucket.
\item BucketName - The name of the S3 bucket to use.
\item compressionType - An optional parameter to use GZIP to compress the
target files. Either set this parameter to NONE (the default) or
don't use it to leave the files uncompressed.
}
Shorthand syntax for these settings is as follows:
\verb{ServiceAccessRoleArn=string ,BucketName=string,CompressionType=string}
JSON syntax for these settings is as follows:
\verb{\{ "ServiceAccessRoleArn": "string", "BucketName": "string", "CompressionType": "none"|"gzip" \} }}
\item{MongoDbSettings}{Settings in JSON format for the source MongoDB endpoint. For more
information about the available settings, see the configuration
properties section in \href{https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.MongoDB.html}{Using MongoDB as a Target for AWS Database Migration Service}
in the \emph{AWS Database Migration Service User Guide.}}
\item{KinesisSettings}{Settings in JSON format for the target endpoint for Amazon Kinesis Data
Streams. For more information about the available settings, see \href{https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kinesis.html}{Using Amazon Kinesis Data Streams as a Target for AWS Database Migration Service}
in the \emph{AWS Database Migration Service User Guide.}}
\item{KafkaSettings}{Settings in JSON format for the target Apache Kafka endpoint. For more
information about the available settings, see \href{https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Kafka.html}{Using Apache Kafka as a Target for AWS Database Migration Service}
in the \emph{AWS Database Migration Service User Guide.}}
\item{ElasticsearchSettings}{Settings in JSON format for the target Elasticsearch endpoint. For more
information about the available settings, see \href{https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Elasticsearch.html#CHAP_Target.Elasticsearch.Configuration}{Extra Connection Attributes When Using Elasticsearch as a Target for AWS DMS}
in the \emph{AWS Database Migration Service User Guide.}}
\item{NeptuneSettings}{Settings in JSON format for the target Amazon Neptune endpoint. For more
information about the available settings, see \href{https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Target.Neptune.html#CHAP_Target.Neptune.EndpointSettings}{Specifying Endpoint Settings for Amazon Neptune as a Target}
in the \emph{AWS Database Migration Service User Guide.}}
\item{RedshiftSettings}{}
\item{PostgreSQLSettings}{Settings in JSON format for the source and target PostgreSQL endpoint.
For information about other available settings, see Extra connection
attributes when using PostgreSQL as a source for AWS DMS and Extra
connection attributes when using PostgreSQL as a target for AWS DMS in
the \emph{AWS Database Migration Service User Guide.}}
\item{MySQLSettings}{Settings in JSON format for the source and target MySQL endpoint. For
information about other available settings, see Extra connection
attributes when using MySQL as a source for AWS DMS and Extra connection
attributes when using a MySQL-compatible database as a target for AWS
DMS in the \emph{AWS Database Migration Service User Guide.}}
\item{OracleSettings}{Settings in JSON format for the source and target Oracle endpoint. For
information about other available settings, see Extra connection
attributes when using Oracle as a source for AWS DMS and Extra
connection attributes when using Oracle as a target for AWS DMS in the
\emph{AWS Database Migration Service User Guide.}}
\item{SybaseSettings}{Settings in JSON format for the source and target SAP ASE endpoint. For
information about other available settings, see Extra connection
attributes when using SAP ASE as a source for AWS DMS and Extra
connection attributes when using SAP ASE as a target for AWS DMS in the
\emph{AWS Database Migration Service User Guide.}}
\item{MicrosoftSQLServerSettings}{Settings in JSON format for the source and target Microsoft SQL Server
endpoint. For information about other available settings, see Extra
connection attributes when using SQL Server as a source for AWS DMS and
Extra connection attributes when using SQL Server as a target for AWS
DMS in the \emph{AWS Database Migration Service User Guide.}}
\item{IBMDb2Settings}{Settings in JSON format for the source IBM Db2 LUW endpoint. For
information about other available settings, see Extra connection
attributes when using Db2 LUW as a source for AWS DMS in the \emph{AWS
Database Migration Service User Guide.}}
\item{DocDbSettings}{Settings in JSON format for the source DocumentDB endpoint. For more
information about the available settings, see the configuration
properties section in \href{https://docs.aws.amazon.com/dms/latest/userguide/CHAP_Source.DocumentDB.html}{Using DocumentDB as a Target for AWS Database Migration Service}
in the \emph{AWS Database Migration Service User Guide.}}
}
\value{
A list with the following syntax:\preformatted{list(
Endpoint = list(
EndpointIdentifier = "string",
EndpointType = "source"|"target",
EngineName = "string",
EngineDisplayName = "string",
Username = "string",
ServerName = "string",
Port = 123,
DatabaseName = "string",
ExtraConnectionAttributes = "string",
Status = "string",
KmsKeyId = "string",
EndpointArn = "string",
CertificateArn = "string",
SslMode = "none"|"require"|"verify-ca"|"verify-full",
ServiceAccessRoleArn = "string",
ExternalTableDefinition = "string",
ExternalId = "string",
DynamoDbSettings = list(
ServiceAccessRoleArn = "string"
),
S3Settings = list(
ServiceAccessRoleArn = "string",
ExternalTableDefinition = "string",
CsvRowDelimiter = "string",
CsvDelimiter = "string",
BucketFolder = "string",
BucketName = "string",
CompressionType = "none"|"gzip",
EncryptionMode = "sse-s3"|"sse-kms",
ServerSideEncryptionKmsKeyId = "string",
DataFormat = "csv"|"parquet",
EncodingType = "plain"|"plain-dictionary"|"rle-dictionary",
DictPageSizeLimit = 123,
RowGroupLength = 123,
DataPageSize = 123,
ParquetVersion = "parquet-1-0"|"parquet-2-0",
EnableStatistics = TRUE|FALSE,
IncludeOpForFullLoad = TRUE|FALSE,
CdcInsertsOnly = TRUE|FALSE,
TimestampColumnName = "string",
ParquetTimestampInMillisecond = TRUE|FALSE,
CdcInsertsAndUpdates = TRUE|FALSE,
DatePartitionEnabled = TRUE|FALSE,
DatePartitionSequence = "YYYYMMDD"|"YYYYMMDDHH"|"YYYYMM"|"MMYYYYDD"|"DDMMYYYY",
DatePartitionDelimiter = "SLASH"|"UNDERSCORE"|"DASH"|"NONE",
UseCsvNoSupValue = TRUE|FALSE,
CsvNoSupValue = "string",
PreserveTransactions = TRUE|FALSE,
CdcPath = "string"
),
DmsTransferSettings = list(
ServiceAccessRoleArn = "string",
BucketName = "string"
),
MongoDbSettings = list(
Username = "string",
Password = "string",
ServerName = "string",
Port = 123,
DatabaseName = "string",
AuthType = "no"|"password",
AuthMechanism = "default"|"mongodb_cr"|"scram_sha_1",
NestingLevel = "none"|"one",
ExtractDocId = "string",
DocsToInvestigate = "string",
AuthSource = "string",
KmsKeyId = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
KinesisSettings = list(
StreamArn = "string",
MessageFormat = "json"|"json-unformatted",
ServiceAccessRoleArn = "string",
IncludeTransactionDetails = TRUE|FALSE,
IncludePartitionValue = TRUE|FALSE,
PartitionIncludeSchemaTable = TRUE|FALSE,
IncludeTableAlterOperations = TRUE|FALSE,
IncludeControlDetails = TRUE|FALSE,
IncludeNullAndEmpty = TRUE|FALSE
),
KafkaSettings = list(
Broker = "string",
Topic = "string",
MessageFormat = "json"|"json-unformatted",
IncludeTransactionDetails = TRUE|FALSE,
IncludePartitionValue = TRUE|FALSE,
PartitionIncludeSchemaTable = TRUE|FALSE,
IncludeTableAlterOperations = TRUE|FALSE,
IncludeControlDetails = TRUE|FALSE,
MessageMaxBytes = 123,
IncludeNullAndEmpty = TRUE|FALSE
),
ElasticsearchSettings = list(
ServiceAccessRoleArn = "string",
EndpointUri = "string",
FullLoadErrorPercentage = 123,
ErrorRetryDuration = 123
),
NeptuneSettings = list(
ServiceAccessRoleArn = "string",
S3BucketName = "string",
S3BucketFolder = "string",
ErrorRetryDuration = 123,
MaxFileSize = 123,
MaxRetryCount = 123,
IamAuthEnabled = TRUE|FALSE
),
RedshiftSettings = list(
AcceptAnyDate = TRUE|FALSE,
AfterConnectScript = "string",
BucketFolder = "string",
BucketName = "string",
CaseSensitiveNames = TRUE|FALSE,
CompUpdate = TRUE|FALSE,
ConnectionTimeout = 123,
DatabaseName = "string",
DateFormat = "string",
EmptyAsNull = TRUE|FALSE,
EncryptionMode = "sse-s3"|"sse-kms",
ExplicitIds = TRUE|FALSE,
FileTransferUploadStreams = 123,
LoadTimeout = 123,
MaxFileSize = 123,
Password = "string",
Port = 123,
RemoveQuotes = TRUE|FALSE,
ReplaceInvalidChars = "string",
ReplaceChars = "string",
ServerName = "string",
ServiceAccessRoleArn = "string",
ServerSideEncryptionKmsKeyId = "string",
TimeFormat = "string",
TrimBlanks = TRUE|FALSE,
TruncateColumns = TRUE|FALSE,
Username = "string",
WriteBufferSize = 123,
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
PostgreSQLSettings = list(
AfterConnectScript = "string",
CaptureDdls = TRUE|FALSE,
MaxFileSize = 123,
DatabaseName = "string",
DdlArtifactsSchema = "string",
ExecuteTimeout = 123,
FailTasksOnLobTruncation = TRUE|FALSE,
Password = "string",
Port = 123,
ServerName = "string",
Username = "string",
SlotName = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
MySQLSettings = list(
AfterConnectScript = "string",
DatabaseName = "string",
EventsPollInterval = 123,
TargetDbType = "specific-database"|"multiple-databases",
MaxFileSize = 123,
ParallelLoadThreads = 123,
Password = "string",
Port = 123,
ServerName = "string",
ServerTimezone = "string",
Username = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
OracleSettings = list(
AddSupplementalLogging = TRUE|FALSE,
ArchivedLogDestId = 123,
AdditionalArchivedLogDestId = 123,
AllowSelectNestedTables = TRUE|FALSE,
ParallelAsmReadThreads = 123,
ReadAheadBlocks = 123,
AccessAlternateDirectly = TRUE|FALSE,
UseAlternateFolderForOnline = TRUE|FALSE,
OraclePathPrefix = "string",
UsePathPrefix = "string",
ReplacePathPrefix = TRUE|FALSE,
EnableHomogenousTablespace = TRUE|FALSE,
DirectPathNoLog = TRUE|FALSE,
ArchivedLogsOnly = TRUE|FALSE,
AsmPassword = "string",
AsmServer = "string",
AsmUser = "string",
CharLengthSemantics = "default"|"char"|"byte",
DatabaseName = "string",
DirectPathParallelLoad = TRUE|FALSE,
FailTasksOnLobTruncation = TRUE|FALSE,
NumberDatatypeScale = 123,
Password = "string",
Port = 123,
ReadTableSpaceName = TRUE|FALSE,
RetryInterval = 123,
SecurityDbEncryption = "string",
SecurityDbEncryptionName = "string",
ServerName = "string",
Username = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string",
SecretsManagerOracleAsmAccessRoleArn = "string",
SecretsManagerOracleAsmSecretId = "string"
),
SybaseSettings = list(
DatabaseName = "string",
Password = "string",
Port = 123,
ServerName = "string",
Username = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
MicrosoftSQLServerSettings = list(
Port = 123,
BcpPacketSize = 123,
DatabaseName = "string",
ControlTablesFileGroup = "string",
Password = "string",
ReadBackupOnly = TRUE|FALSE,
SafeguardPolicy = "rely-on-sql-server-replication-agent"|"exclusive-automatic-truncation"|"shared-automatic-truncation",
ServerName = "string",
Username = "string",
UseBcpFullLoad = TRUE|FALSE,
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
IBMDb2Settings = list(
DatabaseName = "string",
Password = "string",
Port = 123,
ServerName = "string",
SetDataCaptureChanges = TRUE|FALSE,
CurrentLsn = "string",
MaxKBytesPerRead = 123,
Username = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
DocDbSettings = list(
Username = "string",
Password = "string",
ServerName = "string",
Port = 123,
DatabaseName = "string",
NestingLevel = "none"|"one",
ExtractDocId = TRUE|FALSE,
DocsToInvestigate = 123,
KmsKeyId = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
)
)
)
}
}
\description{
Modifies the specified endpoint.
}
\section{Request syntax}{
\preformatted{svc$modify_endpoint(
EndpointArn = "string",
EndpointIdentifier = "string",
EndpointType = "source"|"target",
EngineName = "string",
Username = "string",
Password = "string",
ServerName = "string",
Port = 123,
DatabaseName = "string",
ExtraConnectionAttributes = "string",
CertificateArn = "string",
SslMode = "none"|"require"|"verify-ca"|"verify-full",
ServiceAccessRoleArn = "string",
ExternalTableDefinition = "string",
DynamoDbSettings = list(
ServiceAccessRoleArn = "string"
),
S3Settings = list(
ServiceAccessRoleArn = "string",
ExternalTableDefinition = "string",
CsvRowDelimiter = "string",
CsvDelimiter = "string",
BucketFolder = "string",
BucketName = "string",
CompressionType = "none"|"gzip",
EncryptionMode = "sse-s3"|"sse-kms",
ServerSideEncryptionKmsKeyId = "string",
DataFormat = "csv"|"parquet",
EncodingType = "plain"|"plain-dictionary"|"rle-dictionary",
DictPageSizeLimit = 123,
RowGroupLength = 123,
DataPageSize = 123,
ParquetVersion = "parquet-1-0"|"parquet-2-0",
EnableStatistics = TRUE|FALSE,
IncludeOpForFullLoad = TRUE|FALSE,
CdcInsertsOnly = TRUE|FALSE,
TimestampColumnName = "string",
ParquetTimestampInMillisecond = TRUE|FALSE,
CdcInsertsAndUpdates = TRUE|FALSE,
DatePartitionEnabled = TRUE|FALSE,
DatePartitionSequence = "YYYYMMDD"|"YYYYMMDDHH"|"YYYYMM"|"MMYYYYDD"|"DDMMYYYY",
DatePartitionDelimiter = "SLASH"|"UNDERSCORE"|"DASH"|"NONE",
UseCsvNoSupValue = TRUE|FALSE,
CsvNoSupValue = "string",
PreserveTransactions = TRUE|FALSE,
CdcPath = "string"
),
DmsTransferSettings = list(
ServiceAccessRoleArn = "string",
BucketName = "string"
),
MongoDbSettings = list(
Username = "string",
Password = "string",
ServerName = "string",
Port = 123,
DatabaseName = "string",
AuthType = "no"|"password",
AuthMechanism = "default"|"mongodb_cr"|"scram_sha_1",
NestingLevel = "none"|"one",
ExtractDocId = "string",
DocsToInvestigate = "string",
AuthSource = "string",
KmsKeyId = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
KinesisSettings = list(
StreamArn = "string",
MessageFormat = "json"|"json-unformatted",
ServiceAccessRoleArn = "string",
IncludeTransactionDetails = TRUE|FALSE,
IncludePartitionValue = TRUE|FALSE,
PartitionIncludeSchemaTable = TRUE|FALSE,
IncludeTableAlterOperations = TRUE|FALSE,
IncludeControlDetails = TRUE|FALSE,
IncludeNullAndEmpty = TRUE|FALSE
),
KafkaSettings = list(
Broker = "string",
Topic = "string",
MessageFormat = "json"|"json-unformatted",
IncludeTransactionDetails = TRUE|FALSE,
IncludePartitionValue = TRUE|FALSE,
PartitionIncludeSchemaTable = TRUE|FALSE,
IncludeTableAlterOperations = TRUE|FALSE,
IncludeControlDetails = TRUE|FALSE,
MessageMaxBytes = 123,
IncludeNullAndEmpty = TRUE|FALSE
),
ElasticsearchSettings = list(
ServiceAccessRoleArn = "string",
EndpointUri = "string",
FullLoadErrorPercentage = 123,
ErrorRetryDuration = 123
),
NeptuneSettings = list(
ServiceAccessRoleArn = "string",
S3BucketName = "string",
S3BucketFolder = "string",
ErrorRetryDuration = 123,
MaxFileSize = 123,
MaxRetryCount = 123,
IamAuthEnabled = TRUE|FALSE
),
RedshiftSettings = list(
AcceptAnyDate = TRUE|FALSE,
AfterConnectScript = "string",
BucketFolder = "string",
BucketName = "string",
CaseSensitiveNames = TRUE|FALSE,
CompUpdate = TRUE|FALSE,
ConnectionTimeout = 123,
DatabaseName = "string",
DateFormat = "string",
EmptyAsNull = TRUE|FALSE,
EncryptionMode = "sse-s3"|"sse-kms",
ExplicitIds = TRUE|FALSE,
FileTransferUploadStreams = 123,
LoadTimeout = 123,
MaxFileSize = 123,
Password = "string",
Port = 123,
RemoveQuotes = TRUE|FALSE,
ReplaceInvalidChars = "string",
ReplaceChars = "string",
ServerName = "string",
ServiceAccessRoleArn = "string",
ServerSideEncryptionKmsKeyId = "string",
TimeFormat = "string",
TrimBlanks = TRUE|FALSE,
TruncateColumns = TRUE|FALSE,
Username = "string",
WriteBufferSize = 123,
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
PostgreSQLSettings = list(
AfterConnectScript = "string",
CaptureDdls = TRUE|FALSE,
MaxFileSize = 123,
DatabaseName = "string",
DdlArtifactsSchema = "string",
ExecuteTimeout = 123,
FailTasksOnLobTruncation = TRUE|FALSE,
Password = "string",
Port = 123,
ServerName = "string",
Username = "string",
SlotName = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
MySQLSettings = list(
AfterConnectScript = "string",
DatabaseName = "string",
EventsPollInterval = 123,
TargetDbType = "specific-database"|"multiple-databases",
MaxFileSize = 123,
ParallelLoadThreads = 123,
Password = "string",
Port = 123,
ServerName = "string",
ServerTimezone = "string",
Username = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
OracleSettings = list(
AddSupplementalLogging = TRUE|FALSE,
ArchivedLogDestId = 123,
AdditionalArchivedLogDestId = 123,
AllowSelectNestedTables = TRUE|FALSE,
ParallelAsmReadThreads = 123,
ReadAheadBlocks = 123,
AccessAlternateDirectly = TRUE|FALSE,
UseAlternateFolderForOnline = TRUE|FALSE,
OraclePathPrefix = "string",
UsePathPrefix = "string",
ReplacePathPrefix = TRUE|FALSE,
EnableHomogenousTablespace = TRUE|FALSE,
DirectPathNoLog = TRUE|FALSE,
ArchivedLogsOnly = TRUE|FALSE,
AsmPassword = "string",
AsmServer = "string",
AsmUser = "string",
CharLengthSemantics = "default"|"char"|"byte",
DatabaseName = "string",
DirectPathParallelLoad = TRUE|FALSE,
FailTasksOnLobTruncation = TRUE|FALSE,
NumberDatatypeScale = 123,
Password = "string",
Port = 123,
ReadTableSpaceName = TRUE|FALSE,
RetryInterval = 123,
SecurityDbEncryption = "string",
SecurityDbEncryptionName = "string",
ServerName = "string",
Username = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string",
SecretsManagerOracleAsmAccessRoleArn = "string",
SecretsManagerOracleAsmSecretId = "string"
),
SybaseSettings = list(
DatabaseName = "string",
Password = "string",
Port = 123,
ServerName = "string",
Username = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
MicrosoftSQLServerSettings = list(
Port = 123,
BcpPacketSize = 123,
DatabaseName = "string",
ControlTablesFileGroup = "string",
Password = "string",
ReadBackupOnly = TRUE|FALSE,
SafeguardPolicy = "rely-on-sql-server-replication-agent"|"exclusive-automatic-truncation"|"shared-automatic-truncation",
ServerName = "string",
Username = "string",
UseBcpFullLoad = TRUE|FALSE,
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
IBMDb2Settings = list(
DatabaseName = "string",
Password = "string",
Port = 123,
ServerName = "string",
SetDataCaptureChanges = TRUE|FALSE,
CurrentLsn = "string",
MaxKBytesPerRead = 123,
Username = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
),
DocDbSettings = list(
Username = "string",
Password = "string",
ServerName = "string",
Port = 123,
DatabaseName = "string",
NestingLevel = "none"|"one",
ExtractDocId = TRUE|FALSE,
DocsToInvestigate = 123,
KmsKeyId = "string",
SecretsManagerAccessRoleArn = "string",
SecretsManagerSecretId = "string"
)
)
}
}
\examples{
\dontrun{
# Modifies the specified endpoint.
svc$modify_endpoint(
CertificateArn = "",
DatabaseName = "",
EndpointArn = "",
EndpointIdentifier = "",
EndpointType = "source",
EngineName = "",
ExtraConnectionAttributes = "",
Password = "",
Port = 123L,
ServerName = "",
SslMode = "require",
Username = ""
)
}
}
\keyword{internal}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.