content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
# Upload the data ---------------------------------------------------------
cocktails <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-05-26/cocktails.csv')
# Upload the packages -----------------------------------------------------
pacman::p_load(dplyr, lubridate, tidyverse, ggplot2)
# Prepare the data --------------------------------------------------------
cocktails$alcoholic[cocktails$alcoholic == "Non Alcoholic"] <- "Non alcoholic"
monthtile2<-cocktails %>%
mutate(year=format(as.Date(cocktails$date_modified, format="%Y-%m-%d %H:%M:%S"),"%Y"),
monthA=format(as.Date(cocktails$date_modified, format="%Y-%m-%d %H:%M:%S"),"%m"),
category = str_wrap(category, 6), # wrap it here. If I wrap in the graph, it changes the order
month = recode(monthA, "01" = "January",
"02" = "February",
"03" = "March",
"04" = "April",
"05" = "May",
"06" = "June",
"07" = "July",
"08" = "August",
"09" = "September",
"10" = "October",
"11" = "November",
"12" = "December",
)) %>%
group_by(month,category) %>%
summarise(total=n()) %>% na.omit()
# Level for months
monthtile2$month = fct_relevel(monthtile2$month ,
c("December",
"November",
"October",
"September",
"August",
"July",
"April",
"February",
"January")
)
# geom_tile ---------------------------------------------------------------
tile2heatmap2 <- monthtile2 %>%
ggplot(aes(x = category, y = month)) +
geom_tile(aes(fill = total), color = "#2b2b2b") +
geom_text(aes(label = total), color = "#22292F") +
scale_fill_gradient(low = "#ffef9a", high = "#e13d3d")+
scale_x_discrete(position = "top") +
guides(fill = NULL) +
labs(x = "",y = "",
title = "Cocktail consumption analysis",
subtitle = "What drink is consumed the most in every month?",
caption = "Source:Tidy Tuesday\nVisualization: JuanmaMN (Twitter @Juanma_MN)") +
theme(
plot.title = element_text(margin = margin(b = 8),
color = "#22222b",face = "bold",size = 14,
hjust = 0.5,
family = "Arial"),
plot.subtitle = element_text(margin = margin(t=10,b = 25),
color = "#22222b", size = 9, family = "Arial",
hjust = 0.5),
plot.caption = element_text(margin = margin(t = 20),
color = "#22222b", size = 9, family = "Arial",
hjust = 0.95),
axis.title.x = element_text(margin = margin(t = 10),
color = "#22222b"),
axis.title.y = element_text(margin = margin(r = 15),
color = "#22222b"),
legend.position = "none",
axis.text.x = element_text(color = "#22222b"),
axis.text.y = element_text(color = "#22222b"),
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.major.y = element_blank(),
panel.grid.minor = element_blank(),
plot.background = element_rect(fill = "#f7f7f7"),
plot.margin = unit(c(1, 2, 2, 1), "cm"),
axis.ticks = element_blank()
)
tile2heatmap2 +
annotate(geom = "text",
x = 0.5, y = -1,
label = "No data for March, May, June\n", hjust = "left",
color="#808080",
size = 2.5)
|
/2020/May/TidyTuesday 26-5-2020.R
|
no_license
|
JuanmaMN/TidyTuesday
|
R
| false
| false
| 3,956
|
r
|
# Upload the data ---------------------------------------------------------
cocktails <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2020/2020-05-26/cocktails.csv')
# Upload the packages -----------------------------------------------------
pacman::p_load(dplyr, lubridate, tidyverse, ggplot2)
# Prepare the data --------------------------------------------------------
cocktails$alcoholic[cocktails$alcoholic == "Non Alcoholic"] <- "Non alcoholic"
monthtile2<-cocktails %>%
mutate(year=format(as.Date(cocktails$date_modified, format="%Y-%m-%d %H:%M:%S"),"%Y"),
monthA=format(as.Date(cocktails$date_modified, format="%Y-%m-%d %H:%M:%S"),"%m"),
category = str_wrap(category, 6), # wrap it here. If I wrap in the graph, it changes the order
month = recode(monthA, "01" = "January",
"02" = "February",
"03" = "March",
"04" = "April",
"05" = "May",
"06" = "June",
"07" = "July",
"08" = "August",
"09" = "September",
"10" = "October",
"11" = "November",
"12" = "December",
)) %>%
group_by(month,category) %>%
summarise(total=n()) %>% na.omit()
# Level for months
monthtile2$month = fct_relevel(monthtile2$month ,
c("December",
"November",
"October",
"September",
"August",
"July",
"April",
"February",
"January")
)
# geom_tile ---------------------------------------------------------------
tile2heatmap2 <- monthtile2 %>%
ggplot(aes(x = category, y = month)) +
geom_tile(aes(fill = total), color = "#2b2b2b") +
geom_text(aes(label = total), color = "#22292F") +
scale_fill_gradient(low = "#ffef9a", high = "#e13d3d")+
scale_x_discrete(position = "top") +
guides(fill = NULL) +
labs(x = "",y = "",
title = "Cocktail consumption analysis",
subtitle = "What drink is consumed the most in every month?",
caption = "Source:Tidy Tuesday\nVisualization: JuanmaMN (Twitter @Juanma_MN)") +
theme(
plot.title = element_text(margin = margin(b = 8),
color = "#22222b",face = "bold",size = 14,
hjust = 0.5,
family = "Arial"),
plot.subtitle = element_text(margin = margin(t=10,b = 25),
color = "#22222b", size = 9, family = "Arial",
hjust = 0.5),
plot.caption = element_text(margin = margin(t = 20),
color = "#22222b", size = 9, family = "Arial",
hjust = 0.95),
axis.title.x = element_text(margin = margin(t = 10),
color = "#22222b"),
axis.title.y = element_text(margin = margin(r = 15),
color = "#22222b"),
legend.position = "none",
axis.text.x = element_text(color = "#22222b"),
axis.text.y = element_text(color = "#22222b"),
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.major.y = element_blank(),
panel.grid.minor = element_blank(),
plot.background = element_rect(fill = "#f7f7f7"),
plot.margin = unit(c(1, 2, 2, 1), "cm"),
axis.ticks = element_blank()
)
tile2heatmap2 +
annotate(geom = "text",
x = 0.5, y = -1,
label = "No data for March, May, June\n", hjust = "left",
color="#808080",
size = 2.5)
|
#install.packages("yaml", repos="http://cran.rstudio.com/")
library("yaml")
config = yaml.load_file("config.yaml")
spark_home <- config$spark$home
spark_r_location <- paste0(spark_home,"/R/lib")
spark_server <- config$spark$server
library("SparkR", lib.loc = spark_r_location)
sc <- sparkR.init(master = spark_server, appName = "SparkR_Wordcount",
sparkHome = spark_home)
sqlContext <- sparkRSQL.init(sc)
path <- file.path("sparkR/people.json")
peopleDF <- jsonFile(sqlContext, path)
printSchema(peopleDF)
head(peopleDF)
|
/scripts/WordCount.R
|
permissive
|
luohuazju/sillycat-predict
|
R
| false
| false
| 548
|
r
|
#install.packages("yaml", repos="http://cran.rstudio.com/")
library("yaml")
config = yaml.load_file("config.yaml")
spark_home <- config$spark$home
spark_r_location <- paste0(spark_home,"/R/lib")
spark_server <- config$spark$server
library("SparkR", lib.loc = spark_r_location)
sc <- sparkR.init(master = spark_server, appName = "SparkR_Wordcount",
sparkHome = spark_home)
sqlContext <- sparkRSQL.init(sc)
path <- file.path("sparkR/people.json")
peopleDF <- jsonFile(sqlContext, path)
printSchema(peopleDF)
head(peopleDF)
|
design <-
function(covariates,indep,chip.id,random) {
UseMethod("design")}
|
/R/design.R
|
no_license
|
cran/CpGassoc
|
R
| false
| false
| 78
|
r
|
design <-
function(covariates,indep,chip.id,random) {
UseMethod("design")}
|
internetmobiletime
attach(internetmobiletime)
names(internetmobiletime)
Mu = 144
stDev = sd(Minutes)
n = length(Minutes)
xBar = mean(Minutes)
xBar
tStat = ( xBar - Mu ) / (stDev / sqrt(n))
tStat
pvalue = 2 * pt(tStat , df = n-1 , lower.tail = FALSE)
pvalue
# Area is 23 % which is greater than the reject zone area i.e 5%
#do not reject Null Hypothesis
# p-low Null let go
t.test(Minutes ,Mu = 144)
t.test(Minutes , mu = Mu , alternative = "two.sided", conf.level = 0.95)
t.test(Minutes , mu = Mu , alternative = "two.sided", conf.level = 0.99)
|
/SMDM-t-Test.R
|
no_license
|
panupind/SMDM
|
R
| false
| false
| 561
|
r
|
internetmobiletime
attach(internetmobiletime)
names(internetmobiletime)
Mu = 144
stDev = sd(Minutes)
n = length(Minutes)
xBar = mean(Minutes)
xBar
tStat = ( xBar - Mu ) / (stDev / sqrt(n))
tStat
pvalue = 2 * pt(tStat , df = n-1 , lower.tail = FALSE)
pvalue
# Area is 23 % which is greater than the reject zone area i.e 5%
#do not reject Null Hypothesis
# p-low Null let go
t.test(Minutes ,Mu = 144)
t.test(Minutes , mu = Mu , alternative = "two.sided", conf.level = 0.95)
t.test(Minutes , mu = Mu , alternative = "two.sided", conf.level = 0.99)
|
#GENERAL FUNCTION: #importbalsh.f, teamname.f
#source functions
#netcash.v.f, addloan.f, bgn2.td.df.f
setwd(maincode.dir); source("70_01_BGN2Functions.R")
#ENVIRONMENT OUTPUT:
#balsh_y.df, teamname12, netcash.v
#bgn2.td.df
###############################################################
#MAIN INPUT: bgn1.td.df
###############################################################
#Import year y balance sheet
balsh_y.df <- importbalsh.f(meta.dir, fulltran.dir, yy)
#find netcash for each team
teamname12 <- teamname.f()
netcash.v <- netcash.v.f(teamname12, bgn1.td.df, balsh_y.df)
#bgn2.td.df
bgn2.td.df <- bgn2.td.df.f(netcash.v, teamname12, bgn1.td.df, loanlength = 2, minbalance = 2e6, interest = 0.12, yy)
|
/_MainCode/70_BGNstloan.R
|
no_license
|
junyitt/HM2016
|
R
| false
| false
| 739
|
r
|
#GENERAL FUNCTION: #importbalsh.f, teamname.f
#source functions
#netcash.v.f, addloan.f, bgn2.td.df.f
setwd(maincode.dir); source("70_01_BGN2Functions.R")
#ENVIRONMENT OUTPUT:
#balsh_y.df, teamname12, netcash.v
#bgn2.td.df
###############################################################
#MAIN INPUT: bgn1.td.df
###############################################################
#Import year y balance sheet
balsh_y.df <- importbalsh.f(meta.dir, fulltran.dir, yy)
#find netcash for each team
teamname12 <- teamname.f()
netcash.v <- netcash.v.f(teamname12, bgn1.td.df, balsh_y.df)
#bgn2.td.df
bgn2.td.df <- bgn2.td.df.f(netcash.v, teamname12, bgn1.td.df, loanlength = 2, minbalance = 2e6, interest = 0.12, yy)
|
# With the raw data file in the working directory, the file is read
# in as csv2 because of semicolon separator
plotDF1 <- read.csv2("household_power_consumption.txt",stringsAsFactors=FALSE)
# File is subsetted for the required dates: February 1st and 2nd, 2007
plotDF2 <- subset(plotDF1,plotDF1$Date=="1/2/2007"|plotDF1$Date=="2/2/2007")
# Date and Time variables are combined, formatted, reordered and unnecessary
# variables are dropped
plotDF2$datetime <- paste(plotDF2$Date,plotDF2$Time)
plotDF2$datetime <- strptime(plotDF2$datetime,format="%d/%m/%Y %H:%M:%S")
plotDF2$Date <- NULL
plotDF2$Time <- NULL
# Other variables are converted to numeric
plotDF2 <- plotDF2[,c(8,1:7)]
index <- sapply(plotDF2,is.character)
plotDF2[index] <- lapply(plotDF2[index],as.numeric)
# Plot1 is made. Width and height are each 480 pixels by default.
png(file = "plot1.png")
hist(plotDF2$Global_active_power, col = "red", main = "Global Active Power",
xlab = "Global Active Power (kilowatts)",bty = "n")
# turn png device off
dev.off()
|
/plot1.R
|
no_license
|
DavidCline/ExData_Plotting1
|
R
| false
| false
| 1,037
|
r
|
# With the raw data file in the working directory, the file is read
# in as csv2 because of semicolon separator
plotDF1 <- read.csv2("household_power_consumption.txt",stringsAsFactors=FALSE)
# File is subsetted for the required dates: February 1st and 2nd, 2007
plotDF2 <- subset(plotDF1,plotDF1$Date=="1/2/2007"|plotDF1$Date=="2/2/2007")
# Date and Time variables are combined, formatted, reordered and unnecessary
# variables are dropped
plotDF2$datetime <- paste(plotDF2$Date,plotDF2$Time)
plotDF2$datetime <- strptime(plotDF2$datetime,format="%d/%m/%Y %H:%M:%S")
plotDF2$Date <- NULL
plotDF2$Time <- NULL
# Other variables are converted to numeric
plotDF2 <- plotDF2[,c(8,1:7)]
index <- sapply(plotDF2,is.character)
plotDF2[index] <- lapply(plotDF2[index],as.numeric)
# Plot1 is made. Width and height are each 480 pixels by default.
png(file = "plot1.png")
hist(plotDF2$Global_active_power, col = "red", main = "Global Active Power",
xlab = "Global Active Power (kilowatts)",bty = "n")
# turn png device off
dev.off()
|
createAdditionalPlots <-
function(mogavs, epsilonBand=0, kBest=1, method=c("MSE","kBest")){
method<-tolower(method)
archiveSet<-mogavs$archiveSet
obj1ArchiveSet<-mogavs$obj1ArchiveSet
obj2ArchiveSet<-mogavs$obj2ArchiveSet
N<-ncol(mogavs$archiveSet)
if(method=="mse" && missing(epsilonBand)){
warning("Arg epsilonBand not supplied, defaulting to zero",call.=FALSE)
epsilonBand<-0
}
if(method=="kbest" && missing(kBest)){
warning("Arg kBest not supplied, defaulting to one")
kBest<-1
}
sizeArchive<-nrow(archiveSet)
epsMembers<-list()
epsMembersTemp<-list()
minMSE<-list(NA)
for(i in 1:N){
#Finds all members with i number of variables
iMembers<-which(obj1ArchiveSet==i)
if(length(iMembers)==0){
break
}
#Finds the member with i number of variables and minimum MSE
minMSE[[i]]<-min(obj2ArchiveSet[iMembers])
#Finds the tag of all members with i variables within epsilon range
if(length((minMSE[[i]]))>0){
epsMembersTemp[[i]]<-obj2ArchiveSet[iMembers]-minMSE[[i]]<=epsilonBand
#Stores all the members with i number of variables within epsilon range
epsMembers[[i]]<-iMembers[epsMembersTemp[[i]]]
} else{
epsMembersTemp[[i]]<-""
epsMembers[[i]]<-""
}
}
if(method=="mse"){
temp<-data.frame()
for(i in 1:N){
if(length(epsMembers[[i]])>0){
temp<-rbind(temp,cbind(obj1ArchiveSet[epsMembers[[i]]],obj2ArchiveSet[epsMembers[[i]]]))
}
}
plot(obj1ArchiveSet,obj2ArchiveSet,col="red",pch=8)
points(temp,col="blue",pch=8)
legend("topright",c("Entire Archive Set",paste("Members in Epsilon (=",epsilonBand,") band",sep="")),col=c("red","blue"),pch=c(8,8))
rm(temp)
}
if(method=="kbest"){
#Create new figure to plot k best models for each number of variables
plot(obj1ArchiveSet,obj2ArchiveSet,col="red",pch=8)
orderedMSE<-list()
kBestMembersTemp<-list()
kBestMembers<-list()
for(i in 1:N){
#Finds all members with i number of variables
iMembers<-which(obj1ArchiveSet==i)
#Finds the member with i number of variables and ordered MSE
orderedMSE[[i]]<-sort(obj2ArchiveSet[iMembers])
I<-order(obj2ArchiveSet[iMembers])
#finds the tag of all members that are kBest
if(length(orderedMSE[[i]])>0){
kBestMembersTemp[[i]]<-iMembers[I]
s<-min(length(kBestMembersTemp[[i]]),kBest)
kBestMembers[[i]]<-kBestMembersTemp[[i]][1:s]
}
else {
kBestMembers[[i]]<-"NA"
}
}
for(i in 1:N){
if(length(kBestMembers[[i]])>0){
#Plot the members with i number of variables and kBest
points(obj1ArchiveSet[kBestMembers[[i]]],obj2ArchiveSet[kBestMembers[[i]]],col="blue",pch=8)
legend("topright",c("Entire Archive Set",paste("kBest(=",kBest,") members for each level of variables",sep="")),col=c("red","blue"),pch=c(8,8))
}
}
}
}
|
/mogavs/R/createAdditionalPlots.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 2,962
|
r
|
createAdditionalPlots <-
function(mogavs, epsilonBand=0, kBest=1, method=c("MSE","kBest")){
method<-tolower(method)
archiveSet<-mogavs$archiveSet
obj1ArchiveSet<-mogavs$obj1ArchiveSet
obj2ArchiveSet<-mogavs$obj2ArchiveSet
N<-ncol(mogavs$archiveSet)
if(method=="mse" && missing(epsilonBand)){
warning("Arg epsilonBand not supplied, defaulting to zero",call.=FALSE)
epsilonBand<-0
}
if(method=="kbest" && missing(kBest)){
warning("Arg kBest not supplied, defaulting to one")
kBest<-1
}
sizeArchive<-nrow(archiveSet)
epsMembers<-list()
epsMembersTemp<-list()
minMSE<-list(NA)
for(i in 1:N){
#Finds all members with i number of variables
iMembers<-which(obj1ArchiveSet==i)
if(length(iMembers)==0){
break
}
#Finds the member with i number of variables and minimum MSE
minMSE[[i]]<-min(obj2ArchiveSet[iMembers])
#Finds the tag of all members with i variables within epsilon range
if(length((minMSE[[i]]))>0){
epsMembersTemp[[i]]<-obj2ArchiveSet[iMembers]-minMSE[[i]]<=epsilonBand
#Stores all the members with i number of variables within epsilon range
epsMembers[[i]]<-iMembers[epsMembersTemp[[i]]]
} else{
epsMembersTemp[[i]]<-""
epsMembers[[i]]<-""
}
}
if(method=="mse"){
temp<-data.frame()
for(i in 1:N){
if(length(epsMembers[[i]])>0){
temp<-rbind(temp,cbind(obj1ArchiveSet[epsMembers[[i]]],obj2ArchiveSet[epsMembers[[i]]]))
}
}
plot(obj1ArchiveSet,obj2ArchiveSet,col="red",pch=8)
points(temp,col="blue",pch=8)
legend("topright",c("Entire Archive Set",paste("Members in Epsilon (=",epsilonBand,") band",sep="")),col=c("red","blue"),pch=c(8,8))
rm(temp)
}
if(method=="kbest"){
#Create new figure to plot k best models for each number of variables
plot(obj1ArchiveSet,obj2ArchiveSet,col="red",pch=8)
orderedMSE<-list()
kBestMembersTemp<-list()
kBestMembers<-list()
for(i in 1:N){
#Finds all members with i number of variables
iMembers<-which(obj1ArchiveSet==i)
#Finds the member with i number of variables and ordered MSE
orderedMSE[[i]]<-sort(obj2ArchiveSet[iMembers])
I<-order(obj2ArchiveSet[iMembers])
#finds the tag of all members that are kBest
if(length(orderedMSE[[i]])>0){
kBestMembersTemp[[i]]<-iMembers[I]
s<-min(length(kBestMembersTemp[[i]]),kBest)
kBestMembers[[i]]<-kBestMembersTemp[[i]][1:s]
}
else {
kBestMembers[[i]]<-"NA"
}
}
for(i in 1:N){
if(length(kBestMembers[[i]])>0){
#Plot the members with i number of variables and kBest
points(obj1ArchiveSet[kBestMembers[[i]]],obj2ArchiveSet[kBestMembers[[i]]],col="blue",pch=8)
legend("topright",c("Entire Archive Set",paste("kBest(=",kBest,") members for each level of variables",sep="")),col=c("red","blue"),pch=c(8,8))
}
}
}
}
|
\name{chx.artefacts}
\alias{chx.artefacts}
\title{Artefacts visible on codon enrichment, due to cycloheximide or other drug}
\usage{
chx.artefacts(XP.conditions, XP.names, pathout, algo="unbiased")
}
\arguments{
\item{XP.conditions}{Vector of experimental conditions for each sample}
\item{XP.names}{Vector of names for each sample}
\item{pathout}{Address where output files will be written}
\item{algo}{Algorithm used, either "unbiased" (default) or "Hussmann"}
}
\value{
This function returns a list containing the following :
\describe{
\item{plot}{Address of png plot file}
\item{value}{Standard deviation of enrichment for each codon}
\item{color}{Color white/orange/red corresponding to good/warning/poor level of quality}
\item{recommendation}{Description and recommendation based on value}
}
}
\description{
\code{chx.artefacts} This function takes a list of samples and their conditions as input and
visualizes enrichment around AUG +/- 90 codons, where possible artefacts due
to drugs used in the experiment should be visible.
}
\details{
This function plots enrichment of codons around AUG and 2 further plots dedicated to
Arginine codons or to Lys-aaa codon.
}
\examples{
\donttest{
# Sequenced reads aligned to mRNA (and containing no rRNA, depleted previously),
# in bam format
readsBAM.1.1 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep1.bam",sep="")
readsBAM.1.2 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep2.bam",sep="")
readsBAM.1.3 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep3.bam",sep="")
readsBAM.2.1 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep1.bam",sep="")
readsBAM.2.2 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep2.bam",sep="")
readsBAM.2.3 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep3.bam",sep="")
list.bam <- list(readsBAM.1.1, readsBAM.1.2, readsBAM.1.3,
readsBAM.2.1, readsBAM.2.2, readsBAM.2.3)
#
## Experimental conditions, in text and as indicators :
# 0 for control
# 1 for a condition, treatment, case, etc...
# 2, 3, etc. for further conditions
XP.conditions <- c("cond1","cond1","cond1","cond2", "cond2","cond2")
XP.conditions.i <- c( 1,1,1,2,2,2)
XP.names <- c("C1.R1", "C1.R2", "C1.R3",
"C2.R1", "C2.R2", "C2.R3")
#
## Reference annotation for mRNAs' CDS.
#
refCDS <- paste(system.file(package="RiboVIEW", mustWork = TRUE), "/extdata/synth.tsv", sep="")
# Note : CDS annotation can be obtained from a GTF file,
# using gtf2table(my-gtf-file, outfile = my-cds-file)
# (for example GTF file as provided by Ensembl.org work well with gtf2table)
#
## Reference sequences for mRNAs.
#
refFASTA <- paste(system.file(package="RiboVIEW", mustWork = TRUE), "/extdata/synth.fasta", sep="")
#
## Work and output folder.
#
pathout <- paste(tempdir(),"/", sep="")
## !! This is a temporary directory, which will be erased when you leave R !!
## For your own analyses you would probably prefer to point to a permanent repository :
# pathout <- /home/me/address-to-my-output-repository/ # Define address,
# #including a final slash.
# system(paste('mkdir',pathout)) # Create folder at said address.
# setwd(pathout) # Go to this directory. This is useful if you want to
# #save additional tables or figures.
#
## A-site coverage periodicity by length
#
periodicity(list.bam, refCDS, refFASTA, pathout, XP.names, versionStrip = FALSE)
#
## Select footprint length with sufficient periodicity
#
attach(listminmax <- select.FPlen(list.bam, pathout, XP.names))
#
## Codon occupancy, codon enrichment.
#
enrichmentNoccupancy(list.bam, refCDS, refFASTA, mini, maxi, XP.names,
pathout, versionStrip = FALSE)
#
## Potential artefacts due to Cycloheximide or other drugs
#
chx.artefacts.res <- chx.artefacts(XP.conditions, XP.names, pathout)
chx.artefacts.res
}
\dontshow{
# Sequenced reads aligned to mRNA (and containing no rRNA, depleted previously),
# in bam format
readsBAM.1.1 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep1.bam",sep="")
readsBAM.1.2 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep2.bam",sep="")
readsBAM.1.3 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep3.bam",sep="")
readsBAM.2.1 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep1.bam",sep="")
readsBAM.2.2 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep2.bam",sep="")
readsBAM.2.3 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep3.bam",sep="")
list.bam <- list(readsBAM.1.1, readsBAM.1.2, readsBAM.1.3,
readsBAM.2.1, readsBAM.2.2, readsBAM.2.3)
#
## Experimental conditions, in text and as indicators :
# 0 for control
# 1 for a condition, treatment, case, etc...
# 2, 3, etc. for further conditions
XP.conditions <- c("cond1","cond1","cond1","cond2", "cond2","cond2")
XP.conditions.i <- c( 1,1,1,2,2,2)
XP.names <- c("C1.R1", "C1.R2", "C1.R3",
"C2.R1", "C2.R2", "C2.R3")
#
## Reference annotation for mRNAs' CDS.
#
refCDS <- paste(system.file(package="RiboVIEW", mustWork = TRUE), "/extdata/synth.tsv", sep="")
# Note : CDS annotation can be obtained from a GTF file,
# using gtf2table(my-gtf-file, outfile = my-cds-file)
# (for example GTF file as provided by Ensembl.org work well with gtf2table)
#
## Reference sequences for mRNAs.
#
refFASTA <- paste(system.file(package="RiboVIEW", mustWork = TRUE), "/extdata/synth.fasta", sep="")
#
## Work and output folder.
#
pathout <- paste(tempdir(),"/", sep="")
## !! This is a temporary directory, which will be erased when you leave R !!
## For your own analyses you would probably prefer to point to a permanent repository :
# pathout <- /home/me/address-to-my-output-repository/ # Define address,
# #including a final slash.
# system(paste('mkdir',pathout)) # Create folder at said address.
# setwd(pathout) # Go to this directory. This is useful if you want to
# #save additional tables or figures.
#
## A-site coverage periodicity by length
#
suppressMessages(periodicity(list.bam, refCDS, refFASTA, pathout, XP.names,
versionStrip = FALSE,
python.messages=FALSE))
#
## Select footprint length with sufficient periodicity
#
attach(listminmax <- select.FPlen(list.bam, pathout, XP.names))
#
## Codon occupancy, codon enrichment.
#
enrichmentNoccupancy(list.bam, refCDS, refFASTA, mini, maxi, XP.names,
pathout, versionStrip = FALSE,
r.messages=FALSE,
python.messages=FALSE)
#
## Potential artefacts due to Cycloheximide or other drugs
#
chx.artefacts.res <- chx.artefacts(XP.conditions, XP.names, pathout)
}
}
|
/man/chx.artefacts.Rd
|
no_license
|
carinelegrand/RiboVIEW
|
R
| false
| false
| 7,987
|
rd
|
\name{chx.artefacts}
\alias{chx.artefacts}
\title{Artefacts visible on codon enrichment, due to cycloheximide or other drug}
\usage{
chx.artefacts(XP.conditions, XP.names, pathout, algo="unbiased")
}
\arguments{
\item{XP.conditions}{Vector of experimental conditions for each sample}
\item{XP.names}{Vector of names for each sample}
\item{pathout}{Address where output files will be written}
\item{algo}{Algorithm used, either "unbiased" (default) or "Hussmann"}
}
\value{
This function returns a list containing the following :
\describe{
\item{plot}{Address of png plot file}
\item{value}{Standard deviation of enrichment for each codon}
\item{color}{Color white/orange/red corresponding to good/warning/poor level of quality}
\item{recommendation}{Description and recommendation based on value}
}
}
\description{
\code{chx.artefacts} This function takes a list of samples and their conditions as input and
visualizes enrichment around AUG +/- 90 codons, where possible artefacts due
to drugs used in the experiment should be visible.
}
\details{
This function plots enrichment of codons around AUG and 2 further plots dedicated to
Arginine codons or to Lys-aaa codon.
}
\examples{
\donttest{
# Sequenced reads aligned to mRNA (and containing no rRNA, depleted previously),
# in bam format
readsBAM.1.1 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep1.bam",sep="")
readsBAM.1.2 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep2.bam",sep="")
readsBAM.1.3 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep3.bam",sep="")
readsBAM.2.1 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep1.bam",sep="")
readsBAM.2.2 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep2.bam",sep="")
readsBAM.2.3 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep3.bam",sep="")
list.bam <- list(readsBAM.1.1, readsBAM.1.2, readsBAM.1.3,
readsBAM.2.1, readsBAM.2.2, readsBAM.2.3)
#
## Experimental conditions, in text and as indicators :
# 0 for control
# 1 for a condition, treatment, case, etc...
# 2, 3, etc. for further conditions
XP.conditions <- c("cond1","cond1","cond1","cond2", "cond2","cond2")
XP.conditions.i <- c( 1,1,1,2,2,2)
XP.names <- c("C1.R1", "C1.R2", "C1.R3",
"C2.R1", "C2.R2", "C2.R3")
#
## Reference annotation for mRNAs' CDS.
#
refCDS <- paste(system.file(package="RiboVIEW", mustWork = TRUE), "/extdata/synth.tsv", sep="")
# Note : CDS annotation can be obtained from a GTF file,
# using gtf2table(my-gtf-file, outfile = my-cds-file)
# (for example GTF file as provided by Ensembl.org work well with gtf2table)
#
## Reference sequences for mRNAs.
#
refFASTA <- paste(system.file(package="RiboVIEW", mustWork = TRUE), "/extdata/synth.fasta", sep="")
#
## Work and output folder.
#
pathout <- paste(tempdir(),"/", sep="")
## !! This is a temporary directory, which will be erased when you leave R !!
## For your own analyses you would probably prefer to point to a permanent repository :
# pathout <- /home/me/address-to-my-output-repository/ # Define address,
# #including a final slash.
# system(paste('mkdir',pathout)) # Create folder at said address.
# setwd(pathout) # Go to this directory. This is useful if you want to
# #save additional tables or figures.
#
## A-site coverage periodicity by length
#
periodicity(list.bam, refCDS, refFASTA, pathout, XP.names, versionStrip = FALSE)
#
## Select footprint length with sufficient periodicity
#
attach(listminmax <- select.FPlen(list.bam, pathout, XP.names))
#
## Codon occupancy, codon enrichment.
#
enrichmentNoccupancy(list.bam, refCDS, refFASTA, mini, maxi, XP.names,
pathout, versionStrip = FALSE)
#
## Potential artefacts due to Cycloheximide or other drugs
#
chx.artefacts.res <- chx.artefacts(XP.conditions, XP.names, pathout)
chx.artefacts.res
}
\dontshow{
# Sequenced reads aligned to mRNA (and containing no rRNA, depleted previously),
# in bam format
readsBAM.1.1 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep1.bam",sep="")
readsBAM.1.2 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep2.bam",sep="")
readsBAM.1.3 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond1-Rep3.bam",sep="")
readsBAM.2.1 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep1.bam",sep="")
readsBAM.2.2 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep2.bam",sep="")
readsBAM.2.3 <- paste(system.file(package="RiboVIEW", mustWork = TRUE),
"/extdata/Cond2-Rep3.bam",sep="")
list.bam <- list(readsBAM.1.1, readsBAM.1.2, readsBAM.1.3,
readsBAM.2.1, readsBAM.2.2, readsBAM.2.3)
#
## Experimental conditions, in text and as indicators :
# 0 for control
# 1 for a condition, treatment, case, etc...
# 2, 3, etc. for further conditions
XP.conditions <- c("cond1","cond1","cond1","cond2", "cond2","cond2")
XP.conditions.i <- c( 1,1,1,2,2,2)
XP.names <- c("C1.R1", "C1.R2", "C1.R3",
"C2.R1", "C2.R2", "C2.R3")
#
## Reference annotation for mRNAs' CDS.
#
refCDS <- paste(system.file(package="RiboVIEW", mustWork = TRUE), "/extdata/synth.tsv", sep="")
# Note : CDS annotation can be obtained from a GTF file,
# using gtf2table(my-gtf-file, outfile = my-cds-file)
# (for example GTF file as provided by Ensembl.org work well with gtf2table)
#
## Reference sequences for mRNAs.
#
refFASTA <- paste(system.file(package="RiboVIEW", mustWork = TRUE), "/extdata/synth.fasta", sep="")
#
## Work and output folder.
#
pathout <- paste(tempdir(),"/", sep="")
## !! This is a temporary directory, which will be erased when you leave R !!
## For your own analyses you would probably prefer to point to a permanent repository :
# pathout <- /home/me/address-to-my-output-repository/ # Define address,
# #including a final slash.
# system(paste('mkdir',pathout)) # Create folder at said address.
# setwd(pathout) # Go to this directory. This is useful if you want to
# #save additional tables or figures.
#
## A-site coverage periodicity by length
#
suppressMessages(periodicity(list.bam, refCDS, refFASTA, pathout, XP.names,
versionStrip = FALSE,
python.messages=FALSE))
#
## Select footprint length with sufficient periodicity
#
attach(listminmax <- select.FPlen(list.bam, pathout, XP.names))
#
## Codon occupancy, codon enrichment.
#
enrichmentNoccupancy(list.bam, refCDS, refFASTA, mini, maxi, XP.names,
pathout, versionStrip = FALSE,
r.messages=FALSE,
python.messages=FALSE)
#
## Potential artefacts due to Cycloheximide or other drugs
#
chx.artefacts.res <- chx.artefacts(XP.conditions, XP.names, pathout)
}
}
|
move_files <- function(old_dir, new_dir){
f_rel <- list.files(old_dir)
f_abs <- paste0(old_dir,list.files(old_dir))
new_f <- paste0(new_dir,f_rel)
file.rename(f_abs,new_f)
}
|
/R_Models/games/game_1/utils/moveFiles.R
|
no_license
|
jandraor/HealthSim
|
R
| false
| false
| 181
|
r
|
move_files <- function(old_dir, new_dir){
f_rel <- list.files(old_dir)
f_abs <- paste0(old_dir,list.files(old_dir))
new_f <- paste0(new_dir,f_rel)
file.rename(f_abs,new_f)
}
|
##### MCMC code for sampling gaussian correlation parameters in a gaussian process
#' Univariate, Uniform step length MCMC function, meant to be used by gpMCMC function
#'
#' @param nmcmc number of MCMC samples to be generated before thinning and burning
#' @param burn number of mcmc samples to burn
#' @param thin keep one of every 'thin' samples
#' @param x covariates
#' @param y response
#' @param reg only option currently is "constant"
#' @param step step length for mcmc
#' @param priortheta only option currently is "Exp"
#'
#' @return returns a list containing mcmc.ma (samples) and accept (acceptance rates)
#' @export
#'
#' @examples
#'
#'nsamp <- 100
#' burn <- 200
#' thin <- 10
#'
#' n <- 10
#' x1 <- seq(-5,10,length.out = n)
#' x2 <- seq(0,15,length.out = n)
#' x <- expand.grid(x1,x2)
#' d2 <- c(0.01,0.2,0,0) #here we set the theta parameters to be 0.01 and 0.2.
#' # These are the modes of the distribution that we will sample from using MCMC
#' cor.par <- data.frame(matrix(data = d2,nrow = dim(x)[2],ncol = 2))
#' names(cor.par) <- c("Theta.y","Alpha.y")
#'
#' R <- cor.matrix(x,cor.par) # obtain covariance matrix
#' L <- chol(R)
#' z <- as.matrix(rnorm(n^2))
#' y <- L%*%z
#'
#' gp <- bceMCMC(1000,10,10,x,y,reg = "constant",step =0.1, priortheta = "Exp")
#' mean(gp$mcmc.ma[,2]) #these means should be similar to the theta parameters set above
#' mean(gp$mcmc.ma[,1])
bceMCMC<-function(nmcmc,burn,thin,x,y,reg,step, priortheta){
ddd <- c(rep(1,dim(x)[2]),rep(0,dim(x)[2]))
cor.par <- data.frame(matrix(data = ddd,nrow = dim(x)[2],ncol = 2))
names(cor.par) <- c("Theta.y","Alpha.y")
cor.par2 <- cor.par
p<-ncol(x)
j=0
#final mcmc trials
mcmc.ma<-matrix(nrow=(nmcmc-burn)/thin,ncol=ncol(x))
#we use the following vector to count acceptances
accept<-matrix(c(rep(0,p+2)),nrow=1,ncol=p+2,byrow=T)
#initial guesses
phi<-c(rep(0.1,p))
#phi<-cor.par[,1]
#step length
phi.w<-c(rep(step,p))
if(reg=="constant"){
for(i in 1:nmcmc){
for(k in 1:p){
phi.cond<-phi
d <- 0
while(d == 0){
phi.cond[k]<-log(phi[k])+(runif(1)-0.5)*phi.w[k]
if(exp(phi.cond[k])>0){
d <- 1
}
}
phi.cond[k] <- exp(phi.cond[k])
if(phi.cond[k]>0){
phi_cond=phi.cond
phi_or=phi
#com.phi<-log.post1.constant(x,y,phi_cond, priortheta)$logpost-log.post1.constant(x,y,phi_or, priortheta)$logpost
cor.par[,1] <- phi_cond
cor.par2[,1] <- phi_or
com.phi <- log_posterior(x,y,as.matrix(rep(1,dim(x)[1])),cor.par, prior = "Exp") - log_posterior(x,y,as.matrix(rep(1,dim(x)[1])),cor.par2, prior = "Exp")
u<-runif(1)
if(log(u)<com.phi){
phi<-phi.cond
accept[1,(2+k)]=accept[1,(2+k)]+1
}
}
}
if(i>burn&&((i-burn)%%thin==0)){
j=j+1
mcmc.ma[j,]=phi
}
#if(i>burn&&((i-burn)%%thin==0)){
#j=j+1
#res[j,]=pred1.constant(x,y,xtest1,mcmc.ma[i,3:(p+2)], priortheta, priorsigma)$res
#v.term2[j,]=pred1.constant(x,y,xtest1,mcmc.ma[i,3:(p+2)], priortheta, priorsigma)$v.term2
#}
if ((i%%(0.1*nmcmc))==0){
print(c(paste("reg=", reg), paste("priortheta=", priortheta) ,i/nmcmc))
#print(c('prior=1',i/nmcmc))
}
}
#mcmc.ma<-mcmc.ma[,-(1:2)]
m<-list(mcmc.ma=mcmc.ma, accept = accept)
return(m)
}
}
|
/R/MCMC.R
|
no_license
|
galotalp/gpMCMC
|
R
| false
| false
| 3,228
|
r
|
##### MCMC code for sampling gaussian correlation parameters in a gaussian process
#' Univariate, Uniform step length MCMC function, meant to be used by gpMCMC function
#'
#' @param nmcmc number of MCMC samples to be generated before thinning and burning
#' @param burn number of mcmc samples to burn
#' @param thin keep one of every 'thin' samples
#' @param x covariates
#' @param y response
#' @param reg only option currently is "constant"
#' @param step step length for mcmc
#' @param priortheta only option currently is "Exp"
#'
#' @return returns a list containing mcmc.ma (samples) and accept (acceptance rates)
#' @export
#'
#' @examples
#'
#'nsamp <- 100
#' burn <- 200
#' thin <- 10
#'
#' n <- 10
#' x1 <- seq(-5,10,length.out = n)
#' x2 <- seq(0,15,length.out = n)
#' x <- expand.grid(x1,x2)
#' d2 <- c(0.01,0.2,0,0) #here we set the theta parameters to be 0.01 and 0.2.
#' # These are the modes of the distribution that we will sample from using MCMC
#' cor.par <- data.frame(matrix(data = d2,nrow = dim(x)[2],ncol = 2))
#' names(cor.par) <- c("Theta.y","Alpha.y")
#'
#' R <- cor.matrix(x,cor.par) # obtain covariance matrix
#' L <- chol(R)
#' z <- as.matrix(rnorm(n^2))
#' y <- L%*%z
#'
#' gp <- bceMCMC(1000,10,10,x,y,reg = "constant",step =0.1, priortheta = "Exp")
#' mean(gp$mcmc.ma[,2]) #these means should be similar to the theta parameters set above
#' mean(gp$mcmc.ma[,1])
bceMCMC<-function(nmcmc,burn,thin,x,y,reg,step, priortheta){
ddd <- c(rep(1,dim(x)[2]),rep(0,dim(x)[2]))
cor.par <- data.frame(matrix(data = ddd,nrow = dim(x)[2],ncol = 2))
names(cor.par) <- c("Theta.y","Alpha.y")
cor.par2 <- cor.par
p<-ncol(x)
j=0
#final mcmc trials
mcmc.ma<-matrix(nrow=(nmcmc-burn)/thin,ncol=ncol(x))
#we use the following vector to count acceptances
accept<-matrix(c(rep(0,p+2)),nrow=1,ncol=p+2,byrow=T)
#initial guesses
phi<-c(rep(0.1,p))
#phi<-cor.par[,1]
#step length
phi.w<-c(rep(step,p))
if(reg=="constant"){
for(i in 1:nmcmc){
for(k in 1:p){
phi.cond<-phi
d <- 0
while(d == 0){
phi.cond[k]<-log(phi[k])+(runif(1)-0.5)*phi.w[k]
if(exp(phi.cond[k])>0){
d <- 1
}
}
phi.cond[k] <- exp(phi.cond[k])
if(phi.cond[k]>0){
phi_cond=phi.cond
phi_or=phi
#com.phi<-log.post1.constant(x,y,phi_cond, priortheta)$logpost-log.post1.constant(x,y,phi_or, priortheta)$logpost
cor.par[,1] <- phi_cond
cor.par2[,1] <- phi_or
com.phi <- log_posterior(x,y,as.matrix(rep(1,dim(x)[1])),cor.par, prior = "Exp") - log_posterior(x,y,as.matrix(rep(1,dim(x)[1])),cor.par2, prior = "Exp")
u<-runif(1)
if(log(u)<com.phi){
phi<-phi.cond
accept[1,(2+k)]=accept[1,(2+k)]+1
}
}
}
if(i>burn&&((i-burn)%%thin==0)){
j=j+1
mcmc.ma[j,]=phi
}
#if(i>burn&&((i-burn)%%thin==0)){
#j=j+1
#res[j,]=pred1.constant(x,y,xtest1,mcmc.ma[i,3:(p+2)], priortheta, priorsigma)$res
#v.term2[j,]=pred1.constant(x,y,xtest1,mcmc.ma[i,3:(p+2)], priortheta, priorsigma)$v.term2
#}
if ((i%%(0.1*nmcmc))==0){
print(c(paste("reg=", reg), paste("priortheta=", priortheta) ,i/nmcmc))
#print(c('prior=1',i/nmcmc))
}
}
#mcmc.ma<-mcmc.ma[,-(1:2)]
m<-list(mcmc.ma=mcmc.ma, accept = accept)
return(m)
}
}
|
train <- read.csv("~/Documents/Kaggle/TitanicSurivivalPrediction/train.csv")
test <- read.csv("~/Documents/Kaggle/TitanicSurivivalPrediction/test.csv")
test$Survived <- rep(0,418)
train$Child <- 0
train$Child[train$Age < 18] <- 1
aggregate(Survived ~ Child+Sex, data=train,FUN=sum)
aggregate(Survived ~ Child+Sex+Pclass, data=train,FUN=length)
aggregate(Survived ~ Child+Sex, data=train,FUN=function(x) {sum(x)/length(x)})
library(rpart)
library(rattle)
library(rpart.plot)
library(RColorBrewer)
fit <- rpart(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked, data=train,
method="class", control = rpart.control(minsplit=2,cp=0))
fancyRpartPlot(fit)
test$Survived <- NA
combi <- rbind(train,test)
combi$Name <- as.character(combi$Name)
strsplit(combi$Name[1],split='[,.]')[[1]][2]
combi$Title <- sapply(combi$Name, FUN = function(x) {strsplit(x,split='[,.]')[[1]][2]})
combi$Title <- sub(' ','',combi$Title)
combi$Title[combi$Title %in% c('Mme','Mlle')] <- 'Mlle'
combi$Title[combi$Title %in% c('Capt','Don','Major','Sir')] <- 'Sir'
combi$Title[combi$Title %in% c('Dona','Lady','the Countess','Jonkheer')] <- 'Lady'
combi$Title <- factor(combi$Title)
combi$FamilySize <- combi$SibSp + combi$Parch +1
# Want to find last names... so that we can create familyID
combi$Surname <- sapply(combi$Name, FUN = function(x) {strsplit(x,split='[,.]')[[1]][1]})
combi$FamilyID <- paste(as.character(combi$FamilySize),combi$Surname,sep='')
combi$FamilyID[combi$FamilySize <= 2] <- 'Small'
# There are FamilyID's that only have count of 1 or 2.. which means that
# their family members might have had different last names
famIDs <- data.frame(table(combi$FamilyID))
famIDs <- famIDs[famIDs$Freq <= 2,]
# Now we clean these IDs out from FamilyID
combi$FamilyID[combi$FamilyID %in% famIDs$Var1] <- 'Small'
combi$FamilyID <- factor(combi$FamilyID)
# Now we can split the combined dataset with newly engineered features
train <- combi[1:891,]
test <- combi[892:1309,]
fit <- rpart(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked +
Title + FamilySize + FamilyID,
data=train,
method="class")
Prediction <- predict(fit,test,type='class')
# Replace missing Age values using a decision tree!
Agefit <- rpart(Age ~ Pclass+Sex+SibSp+Parch+Fare+Embarked+Title+FamilySize,
data=combi[!is.na(combi$Age),], method='anova')
combi$Age[is.na(combi$Age)] <- predict(Agefit,combi[is.na(combi$Age),])
combi$Embarked[c(which(combi$Embarked==''))] = 'S'
combi$Embarked <- factor(combi$Embarked)
combi$Fare[which(is.na(combi$Fare))] <- median(combi$Fare, na.rm=TRUE)
# reduce the number of levels of the FamilyID factor to less than 32
# becuase RandomForest only takes that many levels
combi$FamilyID2 <- as.character(combi$FamilyID)
combi$FamilyID2[combi$FamilySize <= 3] <- 'Small'
combi$FamilyID2 <- factor(combi$FamilyID2)
library(randomForest)
# to ensure the same randomness everytime
set.seed(415)
fit <- randomForest(as.factor(Survived) ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked +
Title + FamilySize + FamilyID2,
data=train, importance=TRUE, ntree=2000)
varImpPlot(fit)
# this library is a forest of conditional inference trees
library(party)
Prediction <- predict(fit,test)
fit <- cforest(as.factor(Survived) ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked +
Title + FamilySize + FamilyID,
data=train, controls=cforest_unbiased(ntree=2000, mtry=3))
Prediction2 <- predict(fit,test,OOB=TRUE, type='response')
length(Prediction2[Prediction != Prediction2])
submit<- data.frame(PassengerId = test$PassengerId, Survived=Prediction2)
write.csv(submit,file="CForest.csv",row.names=FALSE)
|
/TitanicSurivivalPrediction/Titanic_in_R.R
|
no_license
|
stevenydc/Kaggle
|
R
| false
| false
| 3,776
|
r
|
train <- read.csv("~/Documents/Kaggle/TitanicSurivivalPrediction/train.csv")
test <- read.csv("~/Documents/Kaggle/TitanicSurivivalPrediction/test.csv")
test$Survived <- rep(0,418)
train$Child <- 0
train$Child[train$Age < 18] <- 1
aggregate(Survived ~ Child+Sex, data=train,FUN=sum)
aggregate(Survived ~ Child+Sex+Pclass, data=train,FUN=length)
aggregate(Survived ~ Child+Sex, data=train,FUN=function(x) {sum(x)/length(x)})
library(rpart)
library(rattle)
library(rpart.plot)
library(RColorBrewer)
fit <- rpart(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked, data=train,
method="class", control = rpart.control(minsplit=2,cp=0))
fancyRpartPlot(fit)
test$Survived <- NA
combi <- rbind(train,test)
combi$Name <- as.character(combi$Name)
strsplit(combi$Name[1],split='[,.]')[[1]][2]
combi$Title <- sapply(combi$Name, FUN = function(x) {strsplit(x,split='[,.]')[[1]][2]})
combi$Title <- sub(' ','',combi$Title)
combi$Title[combi$Title %in% c('Mme','Mlle')] <- 'Mlle'
combi$Title[combi$Title %in% c('Capt','Don','Major','Sir')] <- 'Sir'
combi$Title[combi$Title %in% c('Dona','Lady','the Countess','Jonkheer')] <- 'Lady'
combi$Title <- factor(combi$Title)
combi$FamilySize <- combi$SibSp + combi$Parch +1
# Want to find last names... so that we can create familyID
combi$Surname <- sapply(combi$Name, FUN = function(x) {strsplit(x,split='[,.]')[[1]][1]})
combi$FamilyID <- paste(as.character(combi$FamilySize),combi$Surname,sep='')
combi$FamilyID[combi$FamilySize <= 2] <- 'Small'
# There are FamilyID's that only have count of 1 or 2.. which means that
# their family members might have had different last names
famIDs <- data.frame(table(combi$FamilyID))
famIDs <- famIDs[famIDs$Freq <= 2,]
# Now we clean these IDs out from FamilyID
combi$FamilyID[combi$FamilyID %in% famIDs$Var1] <- 'Small'
combi$FamilyID <- factor(combi$FamilyID)
# Now we can split the combined dataset with newly engineered features
train <- combi[1:891,]
test <- combi[892:1309,]
fit <- rpart(Survived ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked +
Title + FamilySize + FamilyID,
data=train,
method="class")
Prediction <- predict(fit,test,type='class')
# Replace missing Age values using a decision tree!
Agefit <- rpart(Age ~ Pclass+Sex+SibSp+Parch+Fare+Embarked+Title+FamilySize,
data=combi[!is.na(combi$Age),], method='anova')
combi$Age[is.na(combi$Age)] <- predict(Agefit,combi[is.na(combi$Age),])
combi$Embarked[c(which(combi$Embarked==''))] = 'S'
combi$Embarked <- factor(combi$Embarked)
combi$Fare[which(is.na(combi$Fare))] <- median(combi$Fare, na.rm=TRUE)
# reduce the number of levels of the FamilyID factor to less than 32
# becuase RandomForest only takes that many levels
combi$FamilyID2 <- as.character(combi$FamilyID)
combi$FamilyID2[combi$FamilySize <= 3] <- 'Small'
combi$FamilyID2 <- factor(combi$FamilyID2)
library(randomForest)
# to ensure the same randomness everytime
set.seed(415)
fit <- randomForest(as.factor(Survived) ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked +
Title + FamilySize + FamilyID2,
data=train, importance=TRUE, ntree=2000)
varImpPlot(fit)
# this library is a forest of conditional inference trees
library(party)
Prediction <- predict(fit,test)
fit <- cforest(as.factor(Survived) ~ Pclass + Sex + Age + SibSp + Parch + Fare + Embarked +
Title + FamilySize + FamilyID,
data=train, controls=cforest_unbiased(ntree=2000, mtry=3))
Prediction2 <- predict(fit,test,OOB=TRUE, type='response')
length(Prediction2[Prediction != Prediction2])
submit<- data.frame(PassengerId = test$PassengerId, Survived=Prediction2)
write.csv(submit,file="CForest.csv",row.names=FALSE)
|
context("colwise mutate/summarise")
test_that("funs found in current environment", {
f <- function(x) 1
df <- data.frame(x = c(2:10, 1000))
out <- summarise_all(df, funs(f, mean, median))
expect_equal(out, data.frame(f = 1, mean = 105.4, median = 6.5))
})
test_that("can use character vectors", {
df <- data.frame(x = 1:3)
expect_equal(summarise_all(df, "mean"), summarise_all(df, funs(mean)))
expect_equal(mutate_all(df, list(mean = "mean")), mutate_all(df, funs(mean = mean)))
})
test_that("can use bare functions", {
df <- data.frame(x = 1:3)
expect_equal(summarise_all(df, mean), summarise_all(df, funs(mean)))
expect_equal(mutate_all(df, mean), mutate_all(df, funs(mean)))
})
test_that("default names are smallest unique set", {
df <- data.frame(x = 1:3, y = 1:3)
expect_named(summarise_at(df, vars(x:y), funs(mean)), c("x", "y"))
expect_named(summarise_at(df, vars(x), funs(mean, sd)), c("mean", "sd"))
expect_named(summarise_at(df, vars(x:y), funs(mean, sd)), c("x_mean", "y_mean", "x_sd", "y_sd"))
expect_named(summarise_at(df, vars(x:y), funs(base::mean, stats::sd)), c("x_base::mean", "y_base::mean", "x_stats::sd", "y_stats::sd"))
})
test_that("named arguments force complete named", {
df <- data.frame(x = 1:3, y = 1:3)
expect_named(summarise_at(df, vars(x:y), funs(mean = mean)), c("x_mean", "y_mean"))
expect_named(summarise_at(df, vars(x = x), funs(mean, sd)), c("x_mean", "x_sd"))
})
expect_classes <- function(tbl, expected) {
classes <- unname(map_chr(tbl, class))
classes <- paste0(substring(classes, 0, 1), collapse = "")
expect_equal(classes, expected)
}
test_that("can select colwise", {
columns <- iris %>% mutate_at(vars(starts_with("Petal")), as.character)
expect_classes(columns, "nnccf")
numeric <- iris %>% mutate_at(c(1, 3), as.character)
expect_classes(numeric, "cncnf")
character <- iris %>% mutate_at("Species", as.character)
expect_classes(character, "nnnnc")
})
test_that("can probe colwise", {
predicate <- iris %>% mutate_if(is.factor, as.character)
expect_classes(predicate, "nnnnc")
logical <- iris %>% mutate_if(c(TRUE, FALSE, TRUE, TRUE, FALSE), as.character)
expect_classes(logical, "cnccf")
})
test_that("non syntactic colnames work", {
df <- data_frame(`x 1` = 1:3)
expect_identical(summarise_at(df, "x 1", sum)[[1]], 6L)
expect_identical(summarise_if(df, is.numeric, sum)[[1]], 6L)
expect_identical(summarise_all(df, sum)[[1]], 6L)
expect_identical(mutate_all(df, `*`, 2)[[1]], (1:3) * 2)
})
test_that("empty selection does not select everything (#2009, #1989)", {
expect_equal(mtcars, mutate_if(mtcars, is.factor, as.character))
})
test_that("error is thrown with improper additional arguments", {
# error messages by base R, not checked
expect_error(mutate_all(mtcars, round, 0, 0))
expect_error(mutate_all(mtcars, mean, na.rm = TRUE, na.rm = TRUE))
})
test_that("predicate can be quoted", {
expected <- mutate_if(mtcars, is_integerish, mean)
expect_identical(mutate_if(mtcars, "is_integerish", mean), expected)
expect_identical(mutate_if(mtcars, ~is_integerish(.x), mean), expected)
})
test_that("transmute verbs do not retain original variables", {
expect_named(transmute_all(data_frame(x = 1:3, y = 1:3), funs(mean, sd)), c("x_mean", "y_mean", "x_sd", "y_sd"))
expect_named(transmute_if(data_frame(x = 1:3, y = 1:3), is_integer, funs(mean, sd)), c("x_mean", "y_mean", "x_sd", "y_sd"))
expect_named(transmute_at(data_frame(x = 1:3, y = 1:3), vars(x:y), funs(mean, sd)), c("x_mean", "y_mean", "x_sd", "y_sd"))
})
test_that("can rename with vars() (#2594)", {
expect_equal(mutate_at(tibble(x = 1:3), vars(y = x), mean), tibble(x = 1:3, y = c(2, 2, 2)))
})
test_that("selection works with grouped data frames (#2624)", {
gdf <- group_by(iris, Species)
expect_identical(mutate_if(gdf, is.factor, as.character), gdf)
})
test_that("at selection works even if not all ops are named (#2634)", {
df <- tibble(x = 1, y = 2)
expect_identical(mutate_at(df, vars(z = x, y), funs(. + 1)), tibble(x = 1, y = 3, z = 2))
})
test_that("can use a purrr-style lambda", {
expect_identical(summarise_at(mtcars, vars(1:2), ~mean(.x)), summarise(mtcars, mpg = mean(mpg), cyl = mean(cyl)))
})
# Deprecated ---------------------------------------------------------
test_that("_each() and _all() families agree", {
df <- data.frame(x = 1:3, y = 1:3)
expect_equal(summarise_each(df, funs(mean)), summarise_all(df, mean))
expect_equal(summarise_each(df, funs(mean), x:y), summarise_at(df, vars(x:y), mean))
expect_equal(summarise_each(df, funs(mean), z = y), summarise_at(df, vars(z = y), mean))
expect_equal(mutate_each(df, funs(mean)), mutate_all(df, mean))
expect_equal(mutate_each(df, funs(mean), x:y), mutate_at(df, vars(x:y), mean))
expect_equal(mutate_each(df, funs(mean), z = y), mutate_at(df, vars(z = y), mean))
})
test_that("specific directions are given for _all() and _at() versions", {
summarise_each(mtcars, funs(mean))
summarise_each(mtcars, funs(mean), cyl)
mutate_each(mtcars, funs(mean))
mutate_each(mtcars, funs(mean), cyl)
})
|
/tests/testthat/test-colwise-mutate.R
|
permissive
|
datacamp/dplyr
|
R
| false
| false
| 5,122
|
r
|
context("colwise mutate/summarise")
test_that("funs found in current environment", {
f <- function(x) 1
df <- data.frame(x = c(2:10, 1000))
out <- summarise_all(df, funs(f, mean, median))
expect_equal(out, data.frame(f = 1, mean = 105.4, median = 6.5))
})
test_that("can use character vectors", {
df <- data.frame(x = 1:3)
expect_equal(summarise_all(df, "mean"), summarise_all(df, funs(mean)))
expect_equal(mutate_all(df, list(mean = "mean")), mutate_all(df, funs(mean = mean)))
})
test_that("can use bare functions", {
df <- data.frame(x = 1:3)
expect_equal(summarise_all(df, mean), summarise_all(df, funs(mean)))
expect_equal(mutate_all(df, mean), mutate_all(df, funs(mean)))
})
test_that("default names are smallest unique set", {
df <- data.frame(x = 1:3, y = 1:3)
expect_named(summarise_at(df, vars(x:y), funs(mean)), c("x", "y"))
expect_named(summarise_at(df, vars(x), funs(mean, sd)), c("mean", "sd"))
expect_named(summarise_at(df, vars(x:y), funs(mean, sd)), c("x_mean", "y_mean", "x_sd", "y_sd"))
expect_named(summarise_at(df, vars(x:y), funs(base::mean, stats::sd)), c("x_base::mean", "y_base::mean", "x_stats::sd", "y_stats::sd"))
})
test_that("named arguments force complete named", {
df <- data.frame(x = 1:3, y = 1:3)
expect_named(summarise_at(df, vars(x:y), funs(mean = mean)), c("x_mean", "y_mean"))
expect_named(summarise_at(df, vars(x = x), funs(mean, sd)), c("x_mean", "x_sd"))
})
expect_classes <- function(tbl, expected) {
classes <- unname(map_chr(tbl, class))
classes <- paste0(substring(classes, 0, 1), collapse = "")
expect_equal(classes, expected)
}
test_that("can select colwise", {
columns <- iris %>% mutate_at(vars(starts_with("Petal")), as.character)
expect_classes(columns, "nnccf")
numeric <- iris %>% mutate_at(c(1, 3), as.character)
expect_classes(numeric, "cncnf")
character <- iris %>% mutate_at("Species", as.character)
expect_classes(character, "nnnnc")
})
test_that("can probe colwise", {
predicate <- iris %>% mutate_if(is.factor, as.character)
expect_classes(predicate, "nnnnc")
logical <- iris %>% mutate_if(c(TRUE, FALSE, TRUE, TRUE, FALSE), as.character)
expect_classes(logical, "cnccf")
})
test_that("non syntactic colnames work", {
df <- data_frame(`x 1` = 1:3)
expect_identical(summarise_at(df, "x 1", sum)[[1]], 6L)
expect_identical(summarise_if(df, is.numeric, sum)[[1]], 6L)
expect_identical(summarise_all(df, sum)[[1]], 6L)
expect_identical(mutate_all(df, `*`, 2)[[1]], (1:3) * 2)
})
test_that("empty selection does not select everything (#2009, #1989)", {
expect_equal(mtcars, mutate_if(mtcars, is.factor, as.character))
})
test_that("error is thrown with improper additional arguments", {
# error messages by base R, not checked
expect_error(mutate_all(mtcars, round, 0, 0))
expect_error(mutate_all(mtcars, mean, na.rm = TRUE, na.rm = TRUE))
})
test_that("predicate can be quoted", {
expected <- mutate_if(mtcars, is_integerish, mean)
expect_identical(mutate_if(mtcars, "is_integerish", mean), expected)
expect_identical(mutate_if(mtcars, ~is_integerish(.x), mean), expected)
})
test_that("transmute verbs do not retain original variables", {
expect_named(transmute_all(data_frame(x = 1:3, y = 1:3), funs(mean, sd)), c("x_mean", "y_mean", "x_sd", "y_sd"))
expect_named(transmute_if(data_frame(x = 1:3, y = 1:3), is_integer, funs(mean, sd)), c("x_mean", "y_mean", "x_sd", "y_sd"))
expect_named(transmute_at(data_frame(x = 1:3, y = 1:3), vars(x:y), funs(mean, sd)), c("x_mean", "y_mean", "x_sd", "y_sd"))
})
test_that("can rename with vars() (#2594)", {
expect_equal(mutate_at(tibble(x = 1:3), vars(y = x), mean), tibble(x = 1:3, y = c(2, 2, 2)))
})
test_that("selection works with grouped data frames (#2624)", {
gdf <- group_by(iris, Species)
expect_identical(mutate_if(gdf, is.factor, as.character), gdf)
})
test_that("at selection works even if not all ops are named (#2634)", {
df <- tibble(x = 1, y = 2)
expect_identical(mutate_at(df, vars(z = x, y), funs(. + 1)), tibble(x = 1, y = 3, z = 2))
})
test_that("can use a purrr-style lambda", {
expect_identical(summarise_at(mtcars, vars(1:2), ~mean(.x)), summarise(mtcars, mpg = mean(mpg), cyl = mean(cyl)))
})
# Deprecated ---------------------------------------------------------
test_that("_each() and _all() families agree", {
df <- data.frame(x = 1:3, y = 1:3)
expect_equal(summarise_each(df, funs(mean)), summarise_all(df, mean))
expect_equal(summarise_each(df, funs(mean), x:y), summarise_at(df, vars(x:y), mean))
expect_equal(summarise_each(df, funs(mean), z = y), summarise_at(df, vars(z = y), mean))
expect_equal(mutate_each(df, funs(mean)), mutate_all(df, mean))
expect_equal(mutate_each(df, funs(mean), x:y), mutate_at(df, vars(x:y), mean))
expect_equal(mutate_each(df, funs(mean), z = y), mutate_at(df, vars(z = y), mean))
})
test_that("specific directions are given for _all() and _at() versions", {
summarise_each(mtcars, funs(mean))
summarise_each(mtcars, funs(mean), cyl)
mutate_each(mtcars, funs(mean))
mutate_each(mtcars, funs(mean), cyl)
})
|
library(testthat)
library(sunits)
test_check("sunits")
|
/tests/testthat.R
|
permissive
|
jameelalsalam/sunits
|
R
| false
| false
| 56
|
r
|
library(testthat)
library(sunits)
test_check("sunits")
|
## Getting full dataset
data_full <- read.csv("./Project Assignment 1/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Datetime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="")
})
## Saving to file
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
|
/plot4.R
|
no_license
|
irichgreen/Exploratory_Data_Analysis_PRJ1
|
R
| false
| false
| 1,332
|
r
|
## Getting full dataset
data_full <- read.csv("./Project Assignment 1/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 4
par(mfrow=c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(data, {
plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage (volt)", xlab="")
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power~Datetime, type="l",
ylab="Global Rective Power (kilowatts)",xlab="")
})
## Saving to file
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off()
|
#Wastewater Surveillance Data Hub Uploader
#Written by Shelley Peterson, Wastewater Surveillance, National Microbiology Laboratory, Public Health Agency of Canada
#Version: 2021-09-24
library(tidyverse)
library(dplyr)
library(readxl)
library(writexl)
library(zoo)
library(lubridate)
file_rawdata <- file.choose()
data.df <- read_excel(file_rawdata, guess_max = 10000)
##-----------------------------------------------------SAMPLE TAB
Sample <- select(data.df, Sample_ID, Location, Date_sampled, Collection_start, Collection_end)
Sample <- rename(Sample,
"sampleID" = Sample_ID,
"siteID" = Location,
"dateTime" = Date_sampled,
"dateTimeStart" = Collection_start,
"dateTimeEnd" = Collection_end)
#Constants that are the same for each sample
Sample$type <- "rawWW"
Sample$collection <- "cp"
Sample$sizeL <- "0.5"
Sample$fieldSampleTempC <- "4.0"
Sample$shippedOnIce <- "yes"
Sample$notes <- NA
Sample <- relocate(Sample, c(type, collection), .after = siteID)
Sample <- Sample %>% mutate(dateTime = replace(dateTime, !is.na(dateTimeStart), NA))
##-----------------------------------------------------MEASUREMENT TAB
###Liquid concentrates data
MeasurementC <- select(data.df, Sample_ID, InstrumentC, Reported_by, C_qPCR_date, Date_reported, C_N1_1_cp, C_N1_2_cp, C_N2_1_cp, C_N2_2_cp, C_PMMV_1_cp, C_PMMV_2_cp)
MeasurementC <- gather(MeasurementC, key = type, value = value, c(C_N1_1_cp, C_N1_2_cp, C_N2_1_cp, C_N2_2_cp, C_PMMV_1_cp, C_PMMV_2_cp))
MeasurementC <- filter(MeasurementC, !is.na(value))
#Constants that are the same for each Concentrates sample
MeasurementC$assayMethodID <- "NML_Conc"
MeasurementC$fractionAnalyzed <- "Liquid"
MeasurementC$index <- MeasurementC$type
MeasurementC$index <- sub(".*_1_cp*","1", MeasurementC$index)
MeasurementC$index <- sub(".*_2_cp*","2", MeasurementC$index)
MeasurementC$type <- recode(MeasurementC$type, "C_N1_1_cp" = "covN1", "C_N1_2_cp" = "covN1", "C_N2_1_cp" = "covN2", "C_N2_2_cp" = "covN2",
"C_PMMV_1_cp" = "nPMMoV", "C_PMMV_2_cp" = "nPMMoV")
MeasurementC$CF <- 30
MeasurementC$ESV <- 0.75
MeasurementC <- rename(MeasurementC, "analysisDate" = C_qPCR_date,
"instrumentID" = InstrumentC)
###Solids data
MeasurementS<- select(data.df, Sample_ID, InstrumentS, S_qPCR_date, Reported_by, Date_reported, S_N1_1_cp, S_N1_2_cp, S_N2_1_cp, S_N2_2_cp, S_PMMV_1_cp, S_PMMV_2_cp)
MeasurementS <- gather(MeasurementS, key = type, value = value, c(S_N1_1_cp, S_N1_2_cp, S_N2_1_cp, S_N2_2_cp, S_PMMV_1_cp, S_PMMV_2_cp))
MeasurementS <- filter(MeasurementS, !is.na(value))
#Constants that are the same for each Solids sample
MeasurementS$assayMethodID <- "NML_Conc"
MeasurementS$fractionAnalyzed <- "Solid"
MeasurementS$index <- MeasurementS$type
MeasurementS$index <- sub(".*_1_cp*","1", MeasurementS$index)
MeasurementS$index <- sub(".*_2_cp*","2", MeasurementS$index)
MeasurementS$type <- recode(MeasurementS$type, "S_N1_1_cp" = "covN1", "S_N1_2_cp" = "covN1", "S_N2_1_cp" = "covN2", "S_N2_2_cp" = "covN2",
"S_PMMV_1_cp" = "nPMMoV", "S_PMMV_2_cp" = "nPMMoV")
MeasurementS$CF <- 60
MeasurementS$ESV <- 1.5
MeasurementS <- rename(MeasurementS, "analysisDate" = S_qPCR_date,
"instrumentID" = InstrumentS)
#Combine Concentrates and Solids Data
Measurement <- rbind(MeasurementC, MeasurementS)
#Constants in the Measurement tab that are the same for both Concentrates and Solids
Measurement$labID <- "NML_MangatCh"
Measurement$typeOther <- NA
Measurement$unit <- "gcMl"
Measurement$unitOther <- NA
Measurement$qualityFlag <- NA
Measurement$notes <- NA
#Reorganize, rename, and reformat to match Ontario Data Template
Measurement <- rename(Measurement,
"sampleID" = Sample_ID,
"reportDate" = Date_reported,
"ReporterID" = Reported_by)
Measurement$analysisDate <- as.Date(Measurement$analysisDate)
Measurement$reportDate <- as.Date(Measurement$reportDate)
Measurement <- relocate(Measurement, c(labID, assayMethodID, instrumentID, ReporterID), .after = sampleID)
Measurement <- relocate(Measurement, fractionAnalyzed, .after = reportDate)
Measurement <- relocate(Measurement, c(typeOther, unit, unitOther, index), .after = type)
Measurement <- relocate(Measurement, qualityFlag, .after = value)
Measurement <- arrange(Measurement, sampleID)
Measurement$qualityFlag[Measurement$value == "ND"] <- "ND"
Measurement$value <- gsub("ND", "", Measurement$value)
Measurement$qualityFlag[Measurement$value == "UQ"] <- "UQ"
Measurement$value <- gsub("UQ", "", Measurement$value)
#============================================= Now put all tabs together into a Excel single workbook
write_xlsx(list("Sample" = Sample,
"Measurement" = Measurement),
paste(format(Sys.time(), "%Y-%m-%d"), "_national_export.xlsx"))
cat("\n\ Data Uploader is ready! :)\n\n")
|
/ODM Data Converter.R
|
no_license
|
ShelleyPeterson/ODM-Converter
|
R
| false
| false
| 5,124
|
r
|
#Wastewater Surveillance Data Hub Uploader
#Written by Shelley Peterson, Wastewater Surveillance, National Microbiology Laboratory, Public Health Agency of Canada
#Version: 2021-09-24
library(tidyverse)
library(dplyr)
library(readxl)
library(writexl)
library(zoo)
library(lubridate)
file_rawdata <- file.choose()
data.df <- read_excel(file_rawdata, guess_max = 10000)
##-----------------------------------------------------SAMPLE TAB
Sample <- select(data.df, Sample_ID, Location, Date_sampled, Collection_start, Collection_end)
Sample <- rename(Sample,
"sampleID" = Sample_ID,
"siteID" = Location,
"dateTime" = Date_sampled,
"dateTimeStart" = Collection_start,
"dateTimeEnd" = Collection_end)
#Constants that are the same for each sample
Sample$type <- "rawWW"
Sample$collection <- "cp"
Sample$sizeL <- "0.5"
Sample$fieldSampleTempC <- "4.0"
Sample$shippedOnIce <- "yes"
Sample$notes <- NA
Sample <- relocate(Sample, c(type, collection), .after = siteID)
Sample <- Sample %>% mutate(dateTime = replace(dateTime, !is.na(dateTimeStart), NA))
##-----------------------------------------------------MEASUREMENT TAB
###Liquid concentrates data
MeasurementC <- select(data.df, Sample_ID, InstrumentC, Reported_by, C_qPCR_date, Date_reported, C_N1_1_cp, C_N1_2_cp, C_N2_1_cp, C_N2_2_cp, C_PMMV_1_cp, C_PMMV_2_cp)
MeasurementC <- gather(MeasurementC, key = type, value = value, c(C_N1_1_cp, C_N1_2_cp, C_N2_1_cp, C_N2_2_cp, C_PMMV_1_cp, C_PMMV_2_cp))
MeasurementC <- filter(MeasurementC, !is.na(value))
#Constants that are the same for each Concentrates sample
MeasurementC$assayMethodID <- "NML_Conc"
MeasurementC$fractionAnalyzed <- "Liquid"
MeasurementC$index <- MeasurementC$type
MeasurementC$index <- sub(".*_1_cp*","1", MeasurementC$index)
MeasurementC$index <- sub(".*_2_cp*","2", MeasurementC$index)
MeasurementC$type <- recode(MeasurementC$type, "C_N1_1_cp" = "covN1", "C_N1_2_cp" = "covN1", "C_N2_1_cp" = "covN2", "C_N2_2_cp" = "covN2",
"C_PMMV_1_cp" = "nPMMoV", "C_PMMV_2_cp" = "nPMMoV")
MeasurementC$CF <- 30
MeasurementC$ESV <- 0.75
MeasurementC <- rename(MeasurementC, "analysisDate" = C_qPCR_date,
"instrumentID" = InstrumentC)
###Solids data
MeasurementS<- select(data.df, Sample_ID, InstrumentS, S_qPCR_date, Reported_by, Date_reported, S_N1_1_cp, S_N1_2_cp, S_N2_1_cp, S_N2_2_cp, S_PMMV_1_cp, S_PMMV_2_cp)
MeasurementS <- gather(MeasurementS, key = type, value = value, c(S_N1_1_cp, S_N1_2_cp, S_N2_1_cp, S_N2_2_cp, S_PMMV_1_cp, S_PMMV_2_cp))
MeasurementS <- filter(MeasurementS, !is.na(value))
#Constants that are the same for each Solids sample
MeasurementS$assayMethodID <- "NML_Conc"
MeasurementS$fractionAnalyzed <- "Solid"
MeasurementS$index <- MeasurementS$type
MeasurementS$index <- sub(".*_1_cp*","1", MeasurementS$index)
MeasurementS$index <- sub(".*_2_cp*","2", MeasurementS$index)
MeasurementS$type <- recode(MeasurementS$type, "S_N1_1_cp" = "covN1", "S_N1_2_cp" = "covN1", "S_N2_1_cp" = "covN2", "S_N2_2_cp" = "covN2",
"S_PMMV_1_cp" = "nPMMoV", "S_PMMV_2_cp" = "nPMMoV")
MeasurementS$CF <- 60
MeasurementS$ESV <- 1.5
MeasurementS <- rename(MeasurementS, "analysisDate" = S_qPCR_date,
"instrumentID" = InstrumentS)
#Combine Concentrates and Solids Data
Measurement <- rbind(MeasurementC, MeasurementS)
#Constants in the Measurement tab that are the same for both Concentrates and Solids
Measurement$labID <- "NML_MangatCh"
Measurement$typeOther <- NA
Measurement$unit <- "gcMl"
Measurement$unitOther <- NA
Measurement$qualityFlag <- NA
Measurement$notes <- NA
#Reorganize, rename, and reformat to match Ontario Data Template
Measurement <- rename(Measurement,
"sampleID" = Sample_ID,
"reportDate" = Date_reported,
"ReporterID" = Reported_by)
Measurement$analysisDate <- as.Date(Measurement$analysisDate)
Measurement$reportDate <- as.Date(Measurement$reportDate)
Measurement <- relocate(Measurement, c(labID, assayMethodID, instrumentID, ReporterID), .after = sampleID)
Measurement <- relocate(Measurement, fractionAnalyzed, .after = reportDate)
Measurement <- relocate(Measurement, c(typeOther, unit, unitOther, index), .after = type)
Measurement <- relocate(Measurement, qualityFlag, .after = value)
Measurement <- arrange(Measurement, sampleID)
Measurement$qualityFlag[Measurement$value == "ND"] <- "ND"
Measurement$value <- gsub("ND", "", Measurement$value)
Measurement$qualityFlag[Measurement$value == "UQ"] <- "UQ"
Measurement$value <- gsub("UQ", "", Measurement$value)
#============================================= Now put all tabs together into a Excel single workbook
write_xlsx(list("Sample" = Sample,
"Measurement" = Measurement),
paste(format(Sys.time(), "%Y-%m-%d"), "_national_export.xlsx"))
cat("\n\ Data Uploader is ready! :)\n\n")
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469119L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
/IntervalSurgeon/inst/testfiles/rcpp_pile/AFL_rcpp_pile/rcpp_pile_valgrind_files/1609860717-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 729
|
r
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1358782356L, -128659642L, -14914341L, 1092032927L, 1837701012L, 1632068659L), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469119L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tomtom.R
\name{plotMotifMatches}
\alias{plotMotifMatches}
\title{plotMotifMatches}
\usage{
plotMotifMatches(x, fill = "p_value", color = "transparent")
}
\arguments{
\item{x}{a MotifCompareResult object.}
\item{fill}{the statistic to plot. One of p_value, e_value, q_value
(default: p_value).}
\item{color}{color use for the tile border (default: transparent).}
}
\description{
Plot a matrix with rows and columns representing motifs in the query and
target database, and the fill color representing one of the three statistics
(p_value, e_value, q_value) measuring the significance of the similarity
between the motifs.
}
\details{
The original data only contains the mortif pairs matching. When using
a color for the tiles different from transparent, only those tiles present in
the original data are present. To get around this problem the missing cells are
created and assigned NA values. For this expand.grid and apply are used, whic
may slow down the method for large matrices (when comparing large motif libraries).
}
\examples{
NULL
}
|
/man/plotMotifMatches.Rd
|
permissive
|
ddiez/motiftools
|
R
| false
| true
| 1,129
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tomtom.R
\name{plotMotifMatches}
\alias{plotMotifMatches}
\title{plotMotifMatches}
\usage{
plotMotifMatches(x, fill = "p_value", color = "transparent")
}
\arguments{
\item{x}{a MotifCompareResult object.}
\item{fill}{the statistic to plot. One of p_value, e_value, q_value
(default: p_value).}
\item{color}{color use for the tile border (default: transparent).}
}
\description{
Plot a matrix with rows and columns representing motifs in the query and
target database, and the fill color representing one of the three statistics
(p_value, e_value, q_value) measuring the significance of the similarity
between the motifs.
}
\details{
The original data only contains the mortif pairs matching. When using
a color for the tiles different from transparent, only those tiles present in
the original data are present. To get around this problem the missing cells are
created and assigned NA values. For this expand.grid and apply are used, whic
may slow down the method for large matrices (when comparing large motif libraries).
}
\examples{
NULL
}
|
library(pivotaltrackR)
### Name: story
### Title: Create, read, update, and delete a story
### Aliases: story getStory createStory editStory deleteStory
### ** Examples
## Not run:
##D new_bug <- createStory(
##D name="Flux capacitor hangs at 0.9 gigawatts",
##D description="Please investigate and fix.",
##D story_type="bug"
##D )
##D new_bug <- editStory(new_bug, current_state="started")
##D deleteStory(new_bug)
## End(Not run)
|
/data/genthat_extracted_code/pivotaltrackR/examples/story.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 453
|
r
|
library(pivotaltrackR)
### Name: story
### Title: Create, read, update, and delete a story
### Aliases: story getStory createStory editStory deleteStory
### ** Examples
## Not run:
##D new_bug <- createStory(
##D name="Flux capacitor hangs at 0.9 gigawatts",
##D description="Please investigate and fix.",
##D story_type="bug"
##D )
##D new_bug <- editStory(new_bug, current_state="started")
##D deleteStory(new_bug)
## End(Not run)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_app.R
\name{run_app}
\alias{run_app}
\title{Run app}
\usage{
run_app()
}
\value{
Web application served
}
\description{
Run the Shiny portfolio web application
}
|
/man/run_app.Rd
|
permissive
|
databrew/portfoliodash
|
R
| false
| true
| 244
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_app.R
\name{run_app}
\alias{run_app}
\title{Run app}
\usage{
run_app()
}
\value{
Web application served
}
\description{
Run the Shiny portfolio web application
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/name_sets.R
\name{nameset}
\alias{nameset}
\alias{nameset_get}
\alias{nameset_taxonomy}
\title{Perform actions with name sets}
\usage{
nameset_get(uuid, options = NULL, ...)
nameset_taxonomy(uuid, options = NULL, ...)
}
\arguments{
\item{uuid}{the UUID of a set of taxonomic names}
\item{options}{(character) One or more of citationStart, html,
namebankID, root, string, type, uid, uri, and/or votes}
\item{...}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
}
\value{
a named list
}
\description{
Perform actions with name sets
}
\details{
\code{nameset_get()} retrieves information on a set of taxonomic
names. \code{nameset_taxonomy()} collects taxonomic data for a set of
taxonomic names.
}
\section{\code{options} parameter}{
Same as those for \code{name_*()} functions
}
\examples{
\dontrun{
# Retrieves information on a set of taxonomic names.
id <- "8d9a9ea3-95cc-414d-1000-4b683ce04be2"
nameset_get(uuid = id)
nameset_get(uuid = id, options=c('names','string'))
# Collects taxonomic data for a name.
nameset_taxonomy(uuid = "8d9a9ea3-95cc-414d-1000-4b683ce04be2",
options = "string")
nameset_taxonomy(uuid = "8d9a9ea3-95cc-414d-1000-4b683ce04be2",
supertaxa="immediate", options=c("string","namebankID"))
}
}
|
/man/nameset.Rd
|
permissive
|
GabsPalomo/rphylopic
|
R
| false
| true
| 1,327
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/name_sets.R
\name{nameset}
\alias{nameset}
\alias{nameset_get}
\alias{nameset_taxonomy}
\title{Perform actions with name sets}
\usage{
nameset_get(uuid, options = NULL, ...)
nameset_taxonomy(uuid, options = NULL, ...)
}
\arguments{
\item{uuid}{the UUID of a set of taxonomic names}
\item{options}{(character) One or more of citationStart, html,
namebankID, root, string, type, uid, uri, and/or votes}
\item{...}{curl options passed on to \link[crul:HttpClient]{crul::HttpClient}}
}
\value{
a named list
}
\description{
Perform actions with name sets
}
\details{
\code{nameset_get()} retrieves information on a set of taxonomic
names. \code{nameset_taxonomy()} collects taxonomic data for a set of
taxonomic names.
}
\section{\code{options} parameter}{
Same as those for \code{name_*()} functions
}
\examples{
\dontrun{
# Retrieves information on a set of taxonomic names.
id <- "8d9a9ea3-95cc-414d-1000-4b683ce04be2"
nameset_get(uuid = id)
nameset_get(uuid = id, options=c('names','string'))
# Collects taxonomic data for a name.
nameset_taxonomy(uuid = "8d9a9ea3-95cc-414d-1000-4b683ce04be2",
options = "string")
nameset_taxonomy(uuid = "8d9a9ea3-95cc-414d-1000-4b683ce04be2",
supertaxa="immediate", options=c("string","namebankID"))
}
}
|
# MAKE PLOTS OF DMS AND DMSP VS. PHYTO COUNTS AND ADDITIONAL Z VARIABLES
library(RColorBrewer)
library(dplyr)
# Load data
genpath <- '~/Desktop/GreenEdge/GCMS/'
pdir <- 'plots_pigments_vs_DMS_zColorScale/'
# prof.all <- read.csv(file = paste0(genpath,'GE2016.profiles.ALL.OK.csv'), header = T)
prof.all <- read.csv(file = paste0(genpath,'GE2016.profiles.ALL.OK.csv'), header = T)
surf.all <- read.csv(file = paste0(genpath,'GE2016.casts.ALLSURF.csv'), header = T)
# Exporting image?
exportimg <- T
opath <- "~/Desktop/GreenEdge/MS_DMS_GE16_Elementa/"
# ---------------------
# Rename DMS variable and remove unnecessary DMS variables
prof.all$dms <- prof.all$dms_consens_cf68
toinclude <- names(prof.all)[grep("dms",names(prof.all), invert = T)]
toinclude <- c(toinclude,"dms","dmspt")
prof.all <- prof.all[,toinclude]
# Remove data where no DMS or DMSPt are available
prof.all <- prof.all[(!is.na(prof.all$dms) | !is.na(prof.all$dmspt)) & !is.na(prof.all$depth),]
# !!!!!! Correct DMS and DMSPt in stn 519 surface !!!!!
# prof.all[prof.all$stn==519 & prof.all$depth==0.7,c("dms","dmspt")] <- c(3.93,79.9)
prof.all[prof.all$stn==519 & prof.all$depth==0.7,c("dms","dmspt")] <- c(11.42,79.9)
# Add MIZ classification
icecrit1 <- 0.15
icecrit2 <- 0.70
ICE <- surf.all[,c("SICm2d","SICm1d","SICday")]
icemin <- apply(ICE, 1, min, na.rm = T) # Min-max SIC criterion
icemax <- apply(ICE, 1, max, na.rm = T) # Min-max SIC criterion
icemean <- apply(ICE, 1, min, na.rm = T) # Mean concentration criterion
surf.all$sic_class <- NA
surf.all$sic_class[icemax<icecrit1] <- "OW"
surf.all$sic_class[icemin>icecrit2] <- "ICE"
surf.all$sic_class[icemax>=icecrit1 & icemin<=icecrit2] <- "MIZ"
# Merge with profiles to get clustering coefficient and SIC classification
pplot <- merge(x = prof.all, y = surf.all, all.x = T, all.y = F, by = 'stn', suffixes = "")
# Remove duplicated columns with NA in their names
pplot <- pplot[,grep("NA",names(pplot), invert = T)]
# Remove duplicated rows
dd <- duplicated(pplot[,c("dmspt","dms","cast","depth")]) | is.na(pplot$dmspt)
pplot <- pplot[!dd,]
# Hide data from stn 400 (either entire or just surface)
# pplot[pplot$stn<=400,] <- NA
pplot[pplot$stn<=400 & pplot$depth < 5,] <- NA
# Change units of N2 from s-2 to h-1
pplot$N2 <- sqrt(pplot$N2) * 3600
# Select surface data
# pplot <- pplot[pplot$depth == 0.7 & !is.na(pplot$Phaeo), c("stn","depth","dms","Phaeo","tchla","cpsmooth1","wsp24","wsc24","fdmsW97c24")]
pplot <- pplot[pplot$depth == 0.7, c("stn","depth","dms","Phaeo","tchla","cpsmooth1","wsp24","wsc24","fdmsW97c24")]
# Add variables
pplot$dms2phaeo <- pplot$dms/pplot$Phaeo
pplot$kvent <- pplot$fdmsW97c24/pplot$dms
# Print wsp vs dms corr
print(cor.test(pplot$wsp24, pplot$dms, method = "spearman"))
# -------------------------------
# Remove NA
# ff <- lsfit(pplot$Phaeo, pplot$dms, wt = NULL, intercept = TRUE, tolerance = 1e-07)
ff <- lsfit(pplot$tchla, pplot$dms, wt = NULL, intercept = TRUE, tolerance = 1e-07)
# ff <- lsfit(pplot$cpsmooth1, pplot$dms, wt = NULL, intercept = TRUE, tolerance = 1e-07)
print(cor.test(pplot$wsp24, ff$residuals, method = "spearman"))
print(cor.test(pplot$kvent, ff$residuals, method = "spearman"))
|
/MS_DMS_GE16_Elementa/surf_dmsPhaeo_windSpeed.R
|
no_license
|
mgali/GreenEdge
|
R
| false
| false
| 3,193
|
r
|
# MAKE PLOTS OF DMS AND DMSP VS. PHYTO COUNTS AND ADDITIONAL Z VARIABLES
library(RColorBrewer)
library(dplyr)
# Load data
genpath <- '~/Desktop/GreenEdge/GCMS/'
pdir <- 'plots_pigments_vs_DMS_zColorScale/'
# prof.all <- read.csv(file = paste0(genpath,'GE2016.profiles.ALL.OK.csv'), header = T)
prof.all <- read.csv(file = paste0(genpath,'GE2016.profiles.ALL.OK.csv'), header = T)
surf.all <- read.csv(file = paste0(genpath,'GE2016.casts.ALLSURF.csv'), header = T)
# Exporting image?
exportimg <- T
opath <- "~/Desktop/GreenEdge/MS_DMS_GE16_Elementa/"
# ---------------------
# Rename DMS variable and remove unnecessary DMS variables
prof.all$dms <- prof.all$dms_consens_cf68
toinclude <- names(prof.all)[grep("dms",names(prof.all), invert = T)]
toinclude <- c(toinclude,"dms","dmspt")
prof.all <- prof.all[,toinclude]
# Remove data where no DMS or DMSPt are available
prof.all <- prof.all[(!is.na(prof.all$dms) | !is.na(prof.all$dmspt)) & !is.na(prof.all$depth),]
# !!!!!! Correct DMS and DMSPt in stn 519 surface !!!!!
# prof.all[prof.all$stn==519 & prof.all$depth==0.7,c("dms","dmspt")] <- c(3.93,79.9)
prof.all[prof.all$stn==519 & prof.all$depth==0.7,c("dms","dmspt")] <- c(11.42,79.9)
# Add MIZ classification
icecrit1 <- 0.15
icecrit2 <- 0.70
ICE <- surf.all[,c("SICm2d","SICm1d","SICday")]
icemin <- apply(ICE, 1, min, na.rm = T) # Min-max SIC criterion
icemax <- apply(ICE, 1, max, na.rm = T) # Min-max SIC criterion
icemean <- apply(ICE, 1, min, na.rm = T) # Mean concentration criterion
surf.all$sic_class <- NA
surf.all$sic_class[icemax<icecrit1] <- "OW"
surf.all$sic_class[icemin>icecrit2] <- "ICE"
surf.all$sic_class[icemax>=icecrit1 & icemin<=icecrit2] <- "MIZ"
# Merge with profiles to get clustering coefficient and SIC classification
pplot <- merge(x = prof.all, y = surf.all, all.x = T, all.y = F, by = 'stn', suffixes = "")
# Remove duplicated columns with NA in their names
pplot <- pplot[,grep("NA",names(pplot), invert = T)]
# Remove duplicated rows
dd <- duplicated(pplot[,c("dmspt","dms","cast","depth")]) | is.na(pplot$dmspt)
pplot <- pplot[!dd,]
# Hide data from stn 400 (either entire or just surface)
# pplot[pplot$stn<=400,] <- NA
pplot[pplot$stn<=400 & pplot$depth < 5,] <- NA
# Change units of N2 from s-2 to h-1
pplot$N2 <- sqrt(pplot$N2) * 3600
# Select surface data
# pplot <- pplot[pplot$depth == 0.7 & !is.na(pplot$Phaeo), c("stn","depth","dms","Phaeo","tchla","cpsmooth1","wsp24","wsc24","fdmsW97c24")]
pplot <- pplot[pplot$depth == 0.7, c("stn","depth","dms","Phaeo","tchla","cpsmooth1","wsp24","wsc24","fdmsW97c24")]
# Add variables
pplot$dms2phaeo <- pplot$dms/pplot$Phaeo
pplot$kvent <- pplot$fdmsW97c24/pplot$dms
# Print wsp vs dms corr
print(cor.test(pplot$wsp24, pplot$dms, method = "spearman"))
# -------------------------------
# Remove NA
# ff <- lsfit(pplot$Phaeo, pplot$dms, wt = NULL, intercept = TRUE, tolerance = 1e-07)
ff <- lsfit(pplot$tchla, pplot$dms, wt = NULL, intercept = TRUE, tolerance = 1e-07)
# ff <- lsfit(pplot$cpsmooth1, pplot$dms, wt = NULL, intercept = TRUE, tolerance = 1e-07)
print(cor.test(pplot$wsp24, ff$residuals, method = "spearman"))
print(cor.test(pplot$kvent, ff$residuals, method = "spearman"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/threshold.R
\name{gamma_CI}
\alias{gamma_CI}
\title{Confidence set for estimated threshold location following Hansen (1999, JOE)}
\usage{
gamma_CI(results, alpha = 0.05)
}
\arguments{
\item{results}{List of results from bootstrap_F1}
\item{alpha}{Confidence level. Defaults to 0.05}
}
\value{
Vector of values for gamma from \code{results$gamma_seq} that do not
give a significantly different sum of squared residuals than \code{results$gamma_hat}
based on a likelihood ratio test with level alpha
}
\description{
Confidence set for estimated threshold location following Hansen (1999, JOE)
}
\examples{
test_data <- dgp_threshold()
results <- bootstrap_F1(test_data$X, test_data$Y)
gamma_CI(results)
}
|
/man/gamma_CI.Rd
|
no_license
|
fditraglia/forcedMigration
|
R
| false
| true
| 782
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/threshold.R
\name{gamma_CI}
\alias{gamma_CI}
\title{Confidence set for estimated threshold location following Hansen (1999, JOE)}
\usage{
gamma_CI(results, alpha = 0.05)
}
\arguments{
\item{results}{List of results from bootstrap_F1}
\item{alpha}{Confidence level. Defaults to 0.05}
}
\value{
Vector of values for gamma from \code{results$gamma_seq} that do not
give a significantly different sum of squared residuals than \code{results$gamma_hat}
based on a likelihood ratio test with level alpha
}
\description{
Confidence set for estimated threshold location following Hansen (1999, JOE)
}
\examples{
test_data <- dgp_threshold()
results <- bootstrap_F1(test_data$X, test_data$Y)
gamma_CI(results)
}
|
\name{a}
\alias{a}
\docType{data}
\title{
a vector
}
\description{
a vector
}
\usage{data("a")}
\format{
The format is:
int [1:12] 1 2 3 4 5 6 7 8 9 10 ...
}
\examples{
data(a)
## maybe str(a) ; plot(a) ...
}
\keyword{datasets}
|
/man/a.Rd
|
no_license
|
onesand1/TERCP
|
R
| false
| false
| 252
|
rd
|
\name{a}
\alias{a}
\docType{data}
\title{
a vector
}
\description{
a vector
}
\usage{data("a")}
\format{
The format is:
int [1:12] 1 2 3 4 5 6 7 8 9 10 ...
}
\examples{
data(a)
## maybe str(a) ; plot(a) ...
}
\keyword{datasets}
|
library(tidyverse)
SCC <- readRDS("Source_Classification_Code.rds")
NEI <- readRDS("summarySCC_PM25.rds")
# Prevents histogram from printing in scientific notation
NEI <- NEI %>%
filter(fips == '24510')
# Plot 3
png(filename='plot3.png')
ggplot(NEI,aes(factor(year),Emissions,fill=type)) +
geom_bar(stat="identity") +
facet_grid(.~type,scales = "free",space="free") +
labs(x="year",
y=expression("Total PM"[2.5]*" Emission (Tons)"),
title=expression("PM"[2.5]*" Emissions,
Baltimore City 1999-2008 by Source Type"))
dev.off()
|
/Project2/plot3.R
|
no_license
|
nselvak/Exploratory_Data_Analysis
|
R
| false
| false
| 594
|
r
|
library(tidyverse)
SCC <- readRDS("Source_Classification_Code.rds")
NEI <- readRDS("summarySCC_PM25.rds")
# Prevents histogram from printing in scientific notation
NEI <- NEI %>%
filter(fips == '24510')
# Plot 3
png(filename='plot3.png')
ggplot(NEI,aes(factor(year),Emissions,fill=type)) +
geom_bar(stat="identity") +
facet_grid(.~type,scales = "free",space="free") +
labs(x="year",
y=expression("Total PM"[2.5]*" Emission (Tons)"),
title=expression("PM"[2.5]*" Emissions,
Baltimore City 1999-2008 by Source Type"))
dev.off()
|
#' Calculate permutation p-values and plot group differences
#'
#' For a given (global- or vertex-level) graph measure, determine the
#' permutation p-value and create a plot showing group differences, either
#' across densities or regions. You may specify the \eqn{\alpha}-level; a red
#' asterisk is added if \eqn{p < \alpha} and a blue asterisk is added if
#' \eqn{\alpha < p < 0.1} (i.e. a "trend"). You may also choose whether you want
#' a one- or two-sided test.
#'
#' @param g1 List of igraph graph objects for group 1
#' @param g2 List of igraph graph objects for group 2
#' @param perm.dt Data table with the permutation results
#' @param measure Character string for the graph measure of interest
#' @param level Character string, either 'graph' or 'vertex'
#' @param auc Logical indicating whether the data refer to area-under-the-curve
#' (across all densities) (default: FALSE)
#' @param alternative Character string, whether to do a two- or one-sided test
#' (default: 'two.sided')
#' @param alpha Significance level (default: 0.05)
#' @param groups Character vector of group names (default: NULL)
#' @param ylabel Character string for y-axis label (default: NULL)
#' @export
#'
#' @return A list with three elements:
#' \item{dt}{A data table with p-values for each density/region}
#' \item{p1}{A \code{\link[ggplot2]{ggplot}} plotting object}
#' \item{p2}{A \code{\link[ggplot2]{ggplot}} plotting object}
#'
#' @seealso \code{\link{permute.group}}
#' @author Christopher G. Watson, \email{cgwatson@@bu.edu}
#' @examples
#' \dontrun{
#' perms.mod.sig <- perms.sig(g[[1]], g[[2]], perms.all, 'mod', level='graph',
#' 'less', groups, ylabel='Modularity')
#' perms.mod.btwn <- perms.sig(g[[1]], g[[2]], perms.btwn, 'btwn.cent',
#' level='vertex')
#' }
plot_perm_diffs <- function(g1, g2, perm.dt, measure,
level=c('graph', 'vertex'), auc=FALSE,
alternative=c('two.sided', 'less', 'greater'),
alpha=0.05, groups=NULL, ylabel=NULL) {
p <- perm.diff <- obs.diff <- sig <- trend <- yloc <- obs <- Group <- mean.diff <-
ci.low <- ci.high <- region <- reg.num <- NULL
if (!isTRUE(auc)) {
densities.perm <- perm.dt[, unique(density)]
densities.g <- which(round(sapply(g1, graph_attr, 'density'), 2) %in% round(densities.perm, 2))
g1 <- g1[densities.g]
g2 <- g2[densities.g]
N <- perm.dt[, .N, by=density]$N # Handles diff num. of perm's across densities
} else {
densities.g <- sapply(g1, graph_attr, 'density')
N <- nrow(perm.dt)
}
if (is.null(groups)) groups <- c('Group 1', 'Group 2')
alt <- match.arg(alternative)
level <- match.arg(level)
# Graph-level permutations
#-------------------------------------
if (level == 'graph') {
if (!measure %in% names(perm.dt)) {
stop(sprintf('Measure %s is not valid!', deparse(substitute(measure))))
}
if (is_igraph(g1)) {
meas.obs1 <- graph_attr(g1, measure)
meas.obs2 <- graph_attr(g2, measure)
} else {
meas.obs1 <- sapply(g1, graph_attr, measure)
meas.obs2 <- sapply(g2, graph_attr, measure)
}
perm.dt <- perm.dt[, list(density, perm.diff=get(measure))]
perm.dt$obs.diff <- rep(meas.obs1 - meas.obs2, times=N)
if (alt == 'two.sided') {
perm.dt[, p := (sum(abs(perm.diff) >= abs(unique(obs.diff))) + 1) / (.N + 1),
by=density]
ci <- c(alpha / 2, 1 - (alpha / 2))
} else if (alt == 'less') {
perm.dt[, p := (sum(perm.diff <= unique(obs.diff)) + 1) / (.N + 1), by=density]
ci <- c(alpha, NULL)
} else if (alt == 'greater') {
perm.dt[, p := (sum(perm.diff >= unique(obs.diff)) + 1) / (.N + 1), by=density]
ci <- c(NULL, 1 - alpha)
}
result.dt <- data.table(Group=rep(groups, each=length(densities.perm)),
density=densities.perm,
N=rep(N, length(groups)),
obs=c(meas.obs1, meas.obs2),
perm.diff=rep(perm.dt[, mean(perm.diff), by=density]$V1, length(groups)),
p=perm.dt[, unique(p), by=density]$V1,
sig='', trend='')
result.dt[, p.fdr := p.adjust(p, 'fdr')]
result.dt[p < alpha, sig := '*']
result.dt[p >= alpha & p < 0.1, trend := '*']
result.dt[, yloc := round(min(obs) - 0.05 * diff(range(obs)), 3)]
sigplot <- ggplot(data=result.dt, aes(x=density, y=obs, col=Group)) +
geom_line() +
geom_text(aes(y=yloc, label=sig), col='red', size=8) +
geom_text(aes(y=yloc, label=trend), col='blue', size=8) +
theme(legend.position='none')
if (!is.null(ylabel)) {
sigplot <- sigplot + ylab(ylabel)
} else {
sigplot <- sigplot + ylab(measure)
}
perm.dt[, mean.diff := mean(perm.diff), by=density]
perm.dt[, c('ci.low', 'ci.high') := as.list(sort(perm.diff)[.N * ci]), by=density]
sigplot2 <- ggplot(result.dt[, list(obs.diff=-diff(obs)), by=density],
aes(x=as.factor(density))) +
geom_line(data=perm.dt[, list(ci.low=unique(ci.low)), by=density],
aes(x=density, y=ci.low), lty=2) +
geom_line(data=perm.dt[, list(ci.high=unique(ci.high)), by=density],
aes(x=density, y=ci.high), lty=2) +
geom_line(aes(x=density, y=obs.diff), col='red') +
geom_point(aes(x=density, y=obs.diff), col='red', size=3) +
geom_line(data=perm.dt, aes(x=density, y=mean.diff), lty=2) +
#geom_boxplot(data=perm.dt, aes(x=as.factor(density), y=perm.diff),
# fill='cyan3', outlier.size=0) +
#scale_x_discrete(breaks=seq(densities.perm[1],
# densities.perm[length(densities.perm)], by=0.05)) +
labs(x='Density',
y=sprintf('Permutation difference (%s - %s)', groups[1], groups[2]),
plot.title=ylabel)
# Vertex-level permutations
#-------------------------------------
} else if (level == 'vertex') {
if (is_igraph(g1)) {
meas.obs1 <- vertex_attr(g1, measure)
meas.obs2 <- vertex_attr(g2, measure)
} else {
meas.obs1 <- sapply(g1, vertex_attr, measure)
meas.obs2 <- sapply(g2, vertex_attr, measure)
}
if (isTRUE(auc)) {
perm.dt$density <- 1
meas.obs <- list(t(meas.obs1), t(meas.obs2))
obs.diff <- sapply(seq_len(ncol(perm.dt)-1), function(y)
auc_diff(densities.g, cbind(meas.obs[[1]][, y], meas.obs[[2]][, y])))
} else {
obs.diff <- as.vector(meas.obs1 - meas.obs2)
}
perm.dt[, N := .N, by=density]
perm.m <- melt(perm.dt, id.vars=c('density', 'N'), variable.name='region',
value.name='perm.diff')
setkey(perm.m, density, region)
perm.m$obs.diff <- rep(obs.diff,
times=rep(N, each=ncol(perm.dt)-2))
if (alt == 'two.sided') {
perm.m[, p := (sum(abs(perm.diff) >= abs(unique(obs.diff))) + 1) / (.N + 1),
by=list(density, region)]
} else if (alt == 'less') {
perm.m[, p := (sum(perm.diff <= unique(obs.diff)) + 1) / (.N + 1),
by=list(density, region)]
} else if (alt == 'greater') {
perm.m[, p := (sum(perm.diff >= unique(obs.diff)) + 1) / (.N + 1),
by=list(density, region)]
}
p.fdr <- perm.m[, list(p=unique(p)), by=list(region, density)][, p.adjust(p, 'fdr'), by=density]$V1
perm.m$p.fdr <- rep(p.fdr, times=rep(N, each=ncol(perm.dt)-2))
perm.m.sig <- perm.m[p < alpha]
perm.m.sig[, reg.num := rep(seq_len(length(unique(region))), each=unique(N)),
by=density]
sigplot <- ggplot(perm.m.sig, aes(x=region, y=perm.diff)) +
geom_boxplot(fill='cyan3', outlier.size=0) +
geom_segment(data=perm.m.sig[, list(obs.diff=unique(obs.diff)), by=list(density, reg.num)],
aes(x=reg.num-0.25, xend=reg.num+0.25, y=obs.diff, yend=obs.diff), col='red') +
facet_wrap(~ density, scales='free')
sigplot <- sigplot +
ylab(sprintf('Permutation difference (%s - %s)', groups[1], groups[2]))
result.dt <- perm.m
sigplot2 <- NULL
}
return(list(dt=result.dt, p1=sigplot, p2=sigplot2))
}
|
/R/plot_perm_diffs.R
|
no_license
|
nagyistge/brainGraph
|
R
| false
| false
| 8,215
|
r
|
#' Calculate permutation p-values and plot group differences
#'
#' For a given (global- or vertex-level) graph measure, determine the
#' permutation p-value and create a plot showing group differences, either
#' across densities or regions. You may specify the \eqn{\alpha}-level; a red
#' asterisk is added if \eqn{p < \alpha} and a blue asterisk is added if
#' \eqn{\alpha < p < 0.1} (i.e. a "trend"). You may also choose whether you want
#' a one- or two-sided test.
#'
#' @param g1 List of igraph graph objects for group 1
#' @param g2 List of igraph graph objects for group 2
#' @param perm.dt Data table with the permutation results
#' @param measure Character string for the graph measure of interest
#' @param level Character string, either 'graph' or 'vertex'
#' @param auc Logical indicating whether the data refer to area-under-the-curve
#' (across all densities) (default: FALSE)
#' @param alternative Character string, whether to do a two- or one-sided test
#' (default: 'two.sided')
#' @param alpha Significance level (default: 0.05)
#' @param groups Character vector of group names (default: NULL)
#' @param ylabel Character string for y-axis label (default: NULL)
#' @export
#'
#' @return A list with three elements:
#' \item{dt}{A data table with p-values for each density/region}
#' \item{p1}{A \code{\link[ggplot2]{ggplot}} plotting object}
#' \item{p2}{A \code{\link[ggplot2]{ggplot}} plotting object}
#'
#' @seealso \code{\link{permute.group}}
#' @author Christopher G. Watson, \email{cgwatson@@bu.edu}
#' @examples
#' \dontrun{
#' perms.mod.sig <- perms.sig(g[[1]], g[[2]], perms.all, 'mod', level='graph',
#' 'less', groups, ylabel='Modularity')
#' perms.mod.btwn <- perms.sig(g[[1]], g[[2]], perms.btwn, 'btwn.cent',
#' level='vertex')
#' }
plot_perm_diffs <- function(g1, g2, perm.dt, measure,
level=c('graph', 'vertex'), auc=FALSE,
alternative=c('two.sided', 'less', 'greater'),
alpha=0.05, groups=NULL, ylabel=NULL) {
p <- perm.diff <- obs.diff <- sig <- trend <- yloc <- obs <- Group <- mean.diff <-
ci.low <- ci.high <- region <- reg.num <- NULL
if (!isTRUE(auc)) {
densities.perm <- perm.dt[, unique(density)]
densities.g <- which(round(sapply(g1, graph_attr, 'density'), 2) %in% round(densities.perm, 2))
g1 <- g1[densities.g]
g2 <- g2[densities.g]
N <- perm.dt[, .N, by=density]$N # Handles diff num. of perm's across densities
} else {
densities.g <- sapply(g1, graph_attr, 'density')
N <- nrow(perm.dt)
}
if (is.null(groups)) groups <- c('Group 1', 'Group 2')
alt <- match.arg(alternative)
level <- match.arg(level)
# Graph-level permutations
#-------------------------------------
if (level == 'graph') {
if (!measure %in% names(perm.dt)) {
stop(sprintf('Measure %s is not valid!', deparse(substitute(measure))))
}
if (is_igraph(g1)) {
meas.obs1 <- graph_attr(g1, measure)
meas.obs2 <- graph_attr(g2, measure)
} else {
meas.obs1 <- sapply(g1, graph_attr, measure)
meas.obs2 <- sapply(g2, graph_attr, measure)
}
perm.dt <- perm.dt[, list(density, perm.diff=get(measure))]
perm.dt$obs.diff <- rep(meas.obs1 - meas.obs2, times=N)
if (alt == 'two.sided') {
perm.dt[, p := (sum(abs(perm.diff) >= abs(unique(obs.diff))) + 1) / (.N + 1),
by=density]
ci <- c(alpha / 2, 1 - (alpha / 2))
} else if (alt == 'less') {
perm.dt[, p := (sum(perm.diff <= unique(obs.diff)) + 1) / (.N + 1), by=density]
ci <- c(alpha, NULL)
} else if (alt == 'greater') {
perm.dt[, p := (sum(perm.diff >= unique(obs.diff)) + 1) / (.N + 1), by=density]
ci <- c(NULL, 1 - alpha)
}
result.dt <- data.table(Group=rep(groups, each=length(densities.perm)),
density=densities.perm,
N=rep(N, length(groups)),
obs=c(meas.obs1, meas.obs2),
perm.diff=rep(perm.dt[, mean(perm.diff), by=density]$V1, length(groups)),
p=perm.dt[, unique(p), by=density]$V1,
sig='', trend='')
result.dt[, p.fdr := p.adjust(p, 'fdr')]
result.dt[p < alpha, sig := '*']
result.dt[p >= alpha & p < 0.1, trend := '*']
result.dt[, yloc := round(min(obs) - 0.05 * diff(range(obs)), 3)]
sigplot <- ggplot(data=result.dt, aes(x=density, y=obs, col=Group)) +
geom_line() +
geom_text(aes(y=yloc, label=sig), col='red', size=8) +
geom_text(aes(y=yloc, label=trend), col='blue', size=8) +
theme(legend.position='none')
if (!is.null(ylabel)) {
sigplot <- sigplot + ylab(ylabel)
} else {
sigplot <- sigplot + ylab(measure)
}
perm.dt[, mean.diff := mean(perm.diff), by=density]
perm.dt[, c('ci.low', 'ci.high') := as.list(sort(perm.diff)[.N * ci]), by=density]
sigplot2 <- ggplot(result.dt[, list(obs.diff=-diff(obs)), by=density],
aes(x=as.factor(density))) +
geom_line(data=perm.dt[, list(ci.low=unique(ci.low)), by=density],
aes(x=density, y=ci.low), lty=2) +
geom_line(data=perm.dt[, list(ci.high=unique(ci.high)), by=density],
aes(x=density, y=ci.high), lty=2) +
geom_line(aes(x=density, y=obs.diff), col='red') +
geom_point(aes(x=density, y=obs.diff), col='red', size=3) +
geom_line(data=perm.dt, aes(x=density, y=mean.diff), lty=2) +
#geom_boxplot(data=perm.dt, aes(x=as.factor(density), y=perm.diff),
# fill='cyan3', outlier.size=0) +
#scale_x_discrete(breaks=seq(densities.perm[1],
# densities.perm[length(densities.perm)], by=0.05)) +
labs(x='Density',
y=sprintf('Permutation difference (%s - %s)', groups[1], groups[2]),
plot.title=ylabel)
# Vertex-level permutations
#-------------------------------------
} else if (level == 'vertex') {
if (is_igraph(g1)) {
meas.obs1 <- vertex_attr(g1, measure)
meas.obs2 <- vertex_attr(g2, measure)
} else {
meas.obs1 <- sapply(g1, vertex_attr, measure)
meas.obs2 <- sapply(g2, vertex_attr, measure)
}
if (isTRUE(auc)) {
perm.dt$density <- 1
meas.obs <- list(t(meas.obs1), t(meas.obs2))
obs.diff <- sapply(seq_len(ncol(perm.dt)-1), function(y)
auc_diff(densities.g, cbind(meas.obs[[1]][, y], meas.obs[[2]][, y])))
} else {
obs.diff <- as.vector(meas.obs1 - meas.obs2)
}
perm.dt[, N := .N, by=density]
perm.m <- melt(perm.dt, id.vars=c('density', 'N'), variable.name='region',
value.name='perm.diff')
setkey(perm.m, density, region)
perm.m$obs.diff <- rep(obs.diff,
times=rep(N, each=ncol(perm.dt)-2))
if (alt == 'two.sided') {
perm.m[, p := (sum(abs(perm.diff) >= abs(unique(obs.diff))) + 1) / (.N + 1),
by=list(density, region)]
} else if (alt == 'less') {
perm.m[, p := (sum(perm.diff <= unique(obs.diff)) + 1) / (.N + 1),
by=list(density, region)]
} else if (alt == 'greater') {
perm.m[, p := (sum(perm.diff >= unique(obs.diff)) + 1) / (.N + 1),
by=list(density, region)]
}
p.fdr <- perm.m[, list(p=unique(p)), by=list(region, density)][, p.adjust(p, 'fdr'), by=density]$V1
perm.m$p.fdr <- rep(p.fdr, times=rep(N, each=ncol(perm.dt)-2))
perm.m.sig <- perm.m[p < alpha]
perm.m.sig[, reg.num := rep(seq_len(length(unique(region))), each=unique(N)),
by=density]
sigplot <- ggplot(perm.m.sig, aes(x=region, y=perm.diff)) +
geom_boxplot(fill='cyan3', outlier.size=0) +
geom_segment(data=perm.m.sig[, list(obs.diff=unique(obs.diff)), by=list(density, reg.num)],
aes(x=reg.num-0.25, xend=reg.num+0.25, y=obs.diff, yend=obs.diff), col='red') +
facet_wrap(~ density, scales='free')
sigplot <- sigplot +
ylab(sprintf('Permutation difference (%s - %s)', groups[1], groups[2]))
result.dt <- perm.m
sigplot2 <- NULL
}
return(list(dt=result.dt, p1=sigplot, p2=sigplot2))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_map_state}
\alias{fars_map_state}
\title{\subsection{fars_map_state}{
}}
\usage{
fars_map_state(state.num, year)
}
\arguments{
\item{state.num}{number of a state in the US}
\item{year}{a given year}
}
\value{
a plot of accidents' locations of a state
}
\description{
This function prints filename with a given year.
}
\examples{
x <- fars_map_state(26, "2015")
x <- fars_map_state(26, "201"): error because of invalid year
}
|
/man/fars_map_state.Rd
|
no_license
|
alaeddine-abbagh/Test_R
|
R
| false
| true
| 532
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_map_state}
\alias{fars_map_state}
\title{\subsection{fars_map_state}{
}}
\usage{
fars_map_state(state.num, year)
}
\arguments{
\item{state.num}{number of a state in the US}
\item{year}{a given year}
}
\value{
a plot of accidents' locations of a state
}
\description{
This function prints filename with a given year.
}
\examples{
x <- fars_map_state(26, "2015")
x <- fars_map_state(26, "201"): error because of invalid year
}
|
library(RSQLite)
args = commandArgs(trailingOnly = TRUE)
DB = args[1]
con = dbConnect(SQLite(), DB)
pos = dbReadTable(con, "company_locations")
sp = dbReadTable(con, "company_name")
w = tolower(pos$company) %in% tolower(sp$sandp_comp)
mtch = match(tolower(pos$company)[w], tolower(sp$sandp_comp))
z = data.frame(orig = sp$sandp_comp[mtch], loc = pos$company[w], ticker = sp$ticker[mtch], stringsAsFactors = FALSE)
table(tolower(z$orig) == tolower(z$loc))
# 4 that have lower/upper case differences
# Now put these tickers into Fortune500Locations.csv
tmp = read.csv("Fortune500Locations.csv", stringsAsFactors = FALSE)
i = match(z$loc, tmp$company)
tmp$ticker = ""
tmp$ticker[i] = z$ticker
tmp = tmp[tmp$ticker != "",]
# Drop the old locations table.
dbSendQuery(con, "DROP TABLE company_locations")
# Write the new locations table.
dbWriteTable(con, name = "company_locations", tmp)
dbDisconnect(con)
|
/matchTickerLocationCompanyNames.R
|
no_license
|
clarkfitzg/SQLworkshop
|
R
| false
| false
| 916
|
r
|
library(RSQLite)
args = commandArgs(trailingOnly = TRUE)
DB = args[1]
con = dbConnect(SQLite(), DB)
pos = dbReadTable(con, "company_locations")
sp = dbReadTable(con, "company_name")
w = tolower(pos$company) %in% tolower(sp$sandp_comp)
mtch = match(tolower(pos$company)[w], tolower(sp$sandp_comp))
z = data.frame(orig = sp$sandp_comp[mtch], loc = pos$company[w], ticker = sp$ticker[mtch], stringsAsFactors = FALSE)
table(tolower(z$orig) == tolower(z$loc))
# 4 that have lower/upper case differences
# Now put these tickers into Fortune500Locations.csv
tmp = read.csv("Fortune500Locations.csv", stringsAsFactors = FALSE)
i = match(z$loc, tmp$company)
tmp$ticker = ""
tmp$ticker[i] = z$ticker
tmp = tmp[tmp$ticker != "",]
# Drop the old locations table.
dbSendQuery(con, "DROP TABLE company_locations")
# Write the new locations table.
dbWriteTable(con, name = "company_locations", tmp)
dbDisconnect(con)
|
#' Conflict Traffic Volume of i-Ramp at Five-way Roundabout
#'
#' Conflict traffic volume (pcph) of approach i at 5-way roundabout
#' It follows <Table 11-7> in KHCM(2013) p.493, 505.
#' @param v_i1 Traffic from ramp i to other ramp 1(pcph)
#' @param v_i2 Traffic from ramp i to other ramp 2(pcph)
#' @param v_i3 Traffic from ramp i to other ramp 3(pcph)
#' @param v_i4 Traffic from ramp i to other ramp 4(pcph)
#' @param v_u Sum of U-turn traffic excluding U-turn traffic on ramp i(pcph)
#' @keywords conflict traffic volume five-way roundabout
#' @seealso \code{\link{V_c_NB_rab}}, \code{\link{V_c_i_3rab}}
#' @export V_c_i_5rab Conflicting traffic volume (vph)
#' @examples
#' V_c_i_5rab(v_i1 = 132, v_i2 = 494, v_i3 = 90, v_i4 = 343, v_u = 100)
V_c_i_5rab <- function(v_i1 = NULL, v_i2 = NULL, v_i3 = NULL, v_i4 = NULL, v_u = NULL){
if (v_i1 >= 0 & v_i2 >= 0 & v_i3 >= 0 & v_i4 >= 0 & v_u >= 0){v <- v_i1 + v_i2 + v_i3 + v_i4 + v_u}
else {v <- 'Error : [v_i1], [v_i2], [v_i3], [v_i4], [v_u] must be positive(pcph). Please check that.'}
v
}
|
/R/V_c_i_5rab.R
|
no_license
|
regenesis90/KHCMinR
|
R
| false
| false
| 1,054
|
r
|
#' Conflict Traffic Volume of i-Ramp at Five-way Roundabout
#'
#' Conflict traffic volume (pcph) of approach i at 5-way roundabout
#' It follows <Table 11-7> in KHCM(2013) p.493, 505.
#' @param v_i1 Traffic from ramp i to other ramp 1(pcph)
#' @param v_i2 Traffic from ramp i to other ramp 2(pcph)
#' @param v_i3 Traffic from ramp i to other ramp 3(pcph)
#' @param v_i4 Traffic from ramp i to other ramp 4(pcph)
#' @param v_u Sum of U-turn traffic excluding U-turn traffic on ramp i(pcph)
#' @keywords conflict traffic volume five-way roundabout
#' @seealso \code{\link{V_c_NB_rab}}, \code{\link{V_c_i_3rab}}
#' @export V_c_i_5rab Conflicting traffic volume (vph)
#' @examples
#' V_c_i_5rab(v_i1 = 132, v_i2 = 494, v_i3 = 90, v_i4 = 343, v_u = 100)
V_c_i_5rab <- function(v_i1 = NULL, v_i2 = NULL, v_i3 = NULL, v_i4 = NULL, v_u = NULL){
if (v_i1 >= 0 & v_i2 >= 0 & v_i3 >= 0 & v_i4 >= 0 & v_u >= 0){v <- v_i1 + v_i2 + v_i3 + v_i4 + v_u}
else {v <- 'Error : [v_i1], [v_i2], [v_i3], [v_i4], [v_u] must be positive(pcph). Please check that.'}
v
}
|
##' acfana function adapted from libassp
##'
##' Analysis of short-term autocorrelation function of
##' the signals in <listOFFiles>.
##' Analysis results will be written to a file with the
##' base name of the input file and extension '.acf'.
##' Default output is in SSFF binary format (track 'acf').
##' @title acfana
##' @param listOfFiles vector of file paths to be processed by function
##' @param optLogFilePath path to option log file
##' @param beginTime = <time>: set begin of analysis interval to <time> seconds (default: 0 = beginning of file)
##' @param centerTime = <time>: set single-frame analysis with the analysis window centred at <time> seconds;
##' overrules BeginTime, EndTime and WindowShift options
##' @param endTime = <time>: set end of analysis interval to <time> seconds (default: 0 = end of file)
##' @param windowShift = <dur>: set analysis window shift to <dur> ms (default: 5.0)
##' @param windowSize = <dur>: set analysis window size to <dur> ms; overrules EffectiveLength parameter
##' @param effectiveLength make window size effective rather than exact
##' @param window = <type>: set analysis window function to <type> (default: BLACKMAN)
##' @param analysisOrder = <num>: set analysis order to <num> (default: 0 = sample rate in kHz + 3)
##' @param energyNormalization calculate energy-normalized autocorrelation
##' @param lengthNormalization calculate length-normalized autocorrelation
##' @param toFile write results to file (default extension is .acf)
##' @param explicitExt set if you wish to override the default extension
##' @param outputDirectory directory in which output files are stored. Defaults to NULL, i.e.
##' the directory of the input files
##' @param forceToLog is set by the global package variable useWrasspLogger. This is set
##' to FALSE by default and should be set to TRUE is logging is desired.
##' @param verbose display infos & show progress bar
##' @return nrOfProcessedFiles or if only one file to process return AsspDataObj of that file
##' @author Raphael Winkelmann
##' @author Lasse Bombien
##' @useDynLib wrassp, .registration = TRUE
##' @examples
##' # get path to audio file
##' path2wav <- list.files(system.file("extdata", package = "wrassp"),
##' pattern = glob2rx("*.wav"),
##' full.names = TRUE)[1]
##'
##' # calculate short-term autocorrelation
##' res <- acfana(path2wav, toFile=FALSE)
##'
##' # plot short-term autocorrelation values
##' matplot(seq(0,numRecs.AsspDataObj(res) - 1) / rate.AsspDataObj(res) +
##' attr(res, 'startTime'),
##' res$acf,
##' type='l',
##' xlab='time (s)',
##' ylab='short-term autocorrelation values')
##'
##' @export
'acfana' <- function(listOfFiles = NULL, optLogFilePath = NULL,
beginTime = 0.0, centerTime = FALSE,
endTime = 0.0, windowShift = 5.0,
windowSize = 20.0, effectiveLength = TRUE,
window = "BLACKMAN", analysisOrder = 0,
energyNormalization = FALSE, lengthNormalization = FALSE,
toFile = TRUE, explicitExt = NULL, outputDirectory = NULL,
forceToLog = useWrasspLogger, verbose = TRUE){
###########################
# a few parameter checks and expand paths
if (is.null(listOfFiles)) {
stop(paste("listOfFiles is NULL! It has to be a string or vector of file",
"paths (min length = 1) pointing to valid file(s) to perform",
"the given analysis function."))
}
if (is.null(optLogFilePath) && forceToLog){
stop("optLogFilePath is NULL! -> not logging!")
}else{
if(forceToLog){
optLogFilePath = path.expand(optLogFilePath)
}
}
if(!isAsspWindowType(window)){
stop("WindowFunction of type '", window,"' is not supported!")
}
if (!is.null(outputDirectory)) {
outputDirectory = normalizePath(path.expand(outputDirectory))
finfo <- file.info(outputDirectory)
if (is.na(finfo$isdir))
if (!dir.create(outputDirectory, recursive=TRUE))
stop('Unable to create output directory.')
else if (!finfo$isdir)
stop(paste(outputDirectory, 'exists but is not a directory.'))
}
###########################
# Pre-process file list
listOfFiles <- prepareFiles(listOfFiles)
###########################
# perform analysis
if(length(listOfFiles)==1 | !verbose){
pb <- NULL
}else{
if(toFile==FALSE){
stop("length(listOfFiles) is > 1 and toFile=FALSE! toFile=FALSE only permitted for single files.")
}
cat('\n INFO: applying acfana to', length(listOfFiles), 'files\n')
pb <- utils::txtProgressBar(min = 0, max = length(listOfFiles), style = 3)
}
externalRes = invisible(.External("performAssp", listOfFiles,
fname = "acfana", beginTime = beginTime,
centerTime = centerTime, endTime = endTime,
windowShift = windowShift, windowSize = windowSize,
effectiveLength = effectiveLength, window = window,
analysisOrder = as.integer(analysisOrder), energyNormalization = energyNormalization,
lengthNormalization = lengthNormalization, toFile = toFile,
explicitExt = explicitExt, progressBar = pb,
outputDirectory = outputDirectory, PACKAGE = "wrassp"))
############################
# write options to options log file
if (forceToLog){
optionsGivenAsArgs = as.list(match.call(expand.dots = TRUE))
wrassp.logger(optionsGivenAsArgs[[1]], optionsGivenAsArgs[-1],
optLogFilePath, listOfFiles)
}
#############################
# return dataObj if length only one file
if(!is.null(pb)){
close(pb)
}else{
return(externalRes)
}
}
|
/R/acfana.R
|
no_license
|
IPS-LMU/wrassp
|
R
| false
| false
| 6,019
|
r
|
##' acfana function adapted from libassp
##'
##' Analysis of short-term autocorrelation function of
##' the signals in <listOFFiles>.
##' Analysis results will be written to a file with the
##' base name of the input file and extension '.acf'.
##' Default output is in SSFF binary format (track 'acf').
##' @title acfana
##' @param listOfFiles vector of file paths to be processed by function
##' @param optLogFilePath path to option log file
##' @param beginTime = <time>: set begin of analysis interval to <time> seconds (default: 0 = beginning of file)
##' @param centerTime = <time>: set single-frame analysis with the analysis window centred at <time> seconds;
##' overrules BeginTime, EndTime and WindowShift options
##' @param endTime = <time>: set end of analysis interval to <time> seconds (default: 0 = end of file)
##' @param windowShift = <dur>: set analysis window shift to <dur> ms (default: 5.0)
##' @param windowSize = <dur>: set analysis window size to <dur> ms; overrules EffectiveLength parameter
##' @param effectiveLength make window size effective rather than exact
##' @param window = <type>: set analysis window function to <type> (default: BLACKMAN)
##' @param analysisOrder = <num>: set analysis order to <num> (default: 0 = sample rate in kHz + 3)
##' @param energyNormalization calculate energy-normalized autocorrelation
##' @param lengthNormalization calculate length-normalized autocorrelation
##' @param toFile write results to file (default extension is .acf)
##' @param explicitExt set if you wish to override the default extension
##' @param outputDirectory directory in which output files are stored. Defaults to NULL, i.e.
##' the directory of the input files
##' @param forceToLog is set by the global package variable useWrasspLogger. This is set
##' to FALSE by default and should be set to TRUE is logging is desired.
##' @param verbose display infos & show progress bar
##' @return nrOfProcessedFiles or if only one file to process return AsspDataObj of that file
##' @author Raphael Winkelmann
##' @author Lasse Bombien
##' @useDynLib wrassp, .registration = TRUE
##' @examples
##' # get path to audio file
##' path2wav <- list.files(system.file("extdata", package = "wrassp"),
##' pattern = glob2rx("*.wav"),
##' full.names = TRUE)[1]
##'
##' # calculate short-term autocorrelation
##' res <- acfana(path2wav, toFile=FALSE)
##'
##' # plot short-term autocorrelation values
##' matplot(seq(0,numRecs.AsspDataObj(res) - 1) / rate.AsspDataObj(res) +
##' attr(res, 'startTime'),
##' res$acf,
##' type='l',
##' xlab='time (s)',
##' ylab='short-term autocorrelation values')
##'
##' @export
'acfana' <- function(listOfFiles = NULL, optLogFilePath = NULL,
beginTime = 0.0, centerTime = FALSE,
endTime = 0.0, windowShift = 5.0,
windowSize = 20.0, effectiveLength = TRUE,
window = "BLACKMAN", analysisOrder = 0,
energyNormalization = FALSE, lengthNormalization = FALSE,
toFile = TRUE, explicitExt = NULL, outputDirectory = NULL,
forceToLog = useWrasspLogger, verbose = TRUE){
###########################
# a few parameter checks and expand paths
if (is.null(listOfFiles)) {
stop(paste("listOfFiles is NULL! It has to be a string or vector of file",
"paths (min length = 1) pointing to valid file(s) to perform",
"the given analysis function."))
}
if (is.null(optLogFilePath) && forceToLog){
stop("optLogFilePath is NULL! -> not logging!")
}else{
if(forceToLog){
optLogFilePath = path.expand(optLogFilePath)
}
}
if(!isAsspWindowType(window)){
stop("WindowFunction of type '", window,"' is not supported!")
}
if (!is.null(outputDirectory)) {
outputDirectory = normalizePath(path.expand(outputDirectory))
finfo <- file.info(outputDirectory)
if (is.na(finfo$isdir))
if (!dir.create(outputDirectory, recursive=TRUE))
stop('Unable to create output directory.')
else if (!finfo$isdir)
stop(paste(outputDirectory, 'exists but is not a directory.'))
}
###########################
# Pre-process file list
listOfFiles <- prepareFiles(listOfFiles)
###########################
# perform analysis
if(length(listOfFiles)==1 | !verbose){
pb <- NULL
}else{
if(toFile==FALSE){
stop("length(listOfFiles) is > 1 and toFile=FALSE! toFile=FALSE only permitted for single files.")
}
cat('\n INFO: applying acfana to', length(listOfFiles), 'files\n')
pb <- utils::txtProgressBar(min = 0, max = length(listOfFiles), style = 3)
}
externalRes = invisible(.External("performAssp", listOfFiles,
fname = "acfana", beginTime = beginTime,
centerTime = centerTime, endTime = endTime,
windowShift = windowShift, windowSize = windowSize,
effectiveLength = effectiveLength, window = window,
analysisOrder = as.integer(analysisOrder), energyNormalization = energyNormalization,
lengthNormalization = lengthNormalization, toFile = toFile,
explicitExt = explicitExt, progressBar = pb,
outputDirectory = outputDirectory, PACKAGE = "wrassp"))
############################
# write options to options log file
if (forceToLog){
optionsGivenAsArgs = as.list(match.call(expand.dots = TRUE))
wrassp.logger(optionsGivenAsArgs[[1]], optionsGivenAsArgs[-1],
optLogFilePath, listOfFiles)
}
#############################
# return dataObj if length only one file
if(!is.null(pb)){
close(pb)
}else{
return(externalRes)
}
}
|
#' Plot overview over the experiment within flies.
#'
#' Plot overview over the experiment within flies. Creates a \code{_overview.pdf} per fly in a given experiment containing one line plot including
#' all single tracking events and the fly's mean and median response time-course.
#'
#' @param object object a \code{data.frame} as produced by \code{read.esm}.
#' @return one \code{_overview.pdf} per fly analyzed.
#' @export
plot_trace_all_1to7 <- function(object){
colnames(object) <- c("fly.nr", "odour", "pulse", seq(-9.95, 9.95, 0.1))
data.short <- data.frame(cbind(object[, 1:3], object[, 94:173]))
data.clean <- data.short[!apply(is.na(data.short), 1, any),]
animals <- levels(as.factor(data.clean$fly.nr))
i.animals <- 1:length(levels(as.factor(data.clean$fly.nr)))
for(i in i.animals){
animalx <- subset(data.clean, fly.nr == animals[i])
animalx$odour <- factor(animalx$odour)
pdf(file = paste(animals[i], "_overview.pdf"), paper = "a4", height = 11.69, width = 8.27)
k.animal <- 1:dim(animalx)[1]
par(mfrow = c(3, 3))
odors <- levels(as.factor(animalx$odour))
j.odors <- 1:length(odors)
for (j in j.odors){
odorx <- subset(animalx, odour == odors[j])
odorx$odour <- factor(odorx$odour)
mediodor <- apply(odorx[, 4:83], 2, median, na.rm = T)
meanodor <- apply(odorx[, 4:83], 2, mean, na.rm = T)
ylim <- c(-1, 1)
x <- seq(-0.95, 6.95, 0.1)
xr <- rev(x)
plot(seq(-0.95, 6.95, 0.1), -mediodor, type = "n", xlim = c(-1, 7), ylim = ylim,
main = paste(animalx[1, 1], odorx$odour[1]), xlab = "time(s)", ylab = "mean speed (cm/s)", bty = "n")
for(l in 1:dim(odorx)[1]){
lines(x, -odorx[l, 4:83], col = "gray")
}
lines(x, -mediodor, col = "red")
lines(x, -meanodor, col = "green")
lines(x = c(0, 0), y = ylim)
legend("topright", legend = paste("N =", dim(odorx)[1]), bty = "n")
}
dev.off()
}
}
|
/R/plot_trace_all_1to7.R
|
permissive
|
michathoma/flywalkr
|
R
| false
| false
| 1,927
|
r
|
#' Plot overview over the experiment within flies.
#'
#' Plot overview over the experiment within flies. Creates a \code{_overview.pdf} per fly in a given experiment containing one line plot including
#' all single tracking events and the fly's mean and median response time-course.
#'
#' @param object object a \code{data.frame} as produced by \code{read.esm}.
#' @return one \code{_overview.pdf} per fly analyzed.
#' @export
plot_trace_all_1to7 <- function(object){
colnames(object) <- c("fly.nr", "odour", "pulse", seq(-9.95, 9.95, 0.1))
data.short <- data.frame(cbind(object[, 1:3], object[, 94:173]))
data.clean <- data.short[!apply(is.na(data.short), 1, any),]
animals <- levels(as.factor(data.clean$fly.nr))
i.animals <- 1:length(levels(as.factor(data.clean$fly.nr)))
for(i in i.animals){
animalx <- subset(data.clean, fly.nr == animals[i])
animalx$odour <- factor(animalx$odour)
pdf(file = paste(animals[i], "_overview.pdf"), paper = "a4", height = 11.69, width = 8.27)
k.animal <- 1:dim(animalx)[1]
par(mfrow = c(3, 3))
odors <- levels(as.factor(animalx$odour))
j.odors <- 1:length(odors)
for (j in j.odors){
odorx <- subset(animalx, odour == odors[j])
odorx$odour <- factor(odorx$odour)
mediodor <- apply(odorx[, 4:83], 2, median, na.rm = T)
meanodor <- apply(odorx[, 4:83], 2, mean, na.rm = T)
ylim <- c(-1, 1)
x <- seq(-0.95, 6.95, 0.1)
xr <- rev(x)
plot(seq(-0.95, 6.95, 0.1), -mediodor, type = "n", xlim = c(-1, 7), ylim = ylim,
main = paste(animalx[1, 1], odorx$odour[1]), xlab = "time(s)", ylab = "mean speed (cm/s)", bty = "n")
for(l in 1:dim(odorx)[1]){
lines(x, -odorx[l, 4:83], col = "gray")
}
lines(x, -mediodor, col = "red")
lines(x, -meanodor, col = "green")
lines(x = c(0, 0), y = ylim)
legend("topright", legend = paste("N =", dim(odorx)[1]), bty = "n")
}
dev.off()
}
}
|
/Scripts/Model_building.R
|
no_license
|
48u51r0/evaluacion-presidente
|
R
| false
| false
| 1,633
|
r
| ||
#url = "http://www.vacavillehonda.com/search/new/tp/"
#doc = htmlParse(url)
#baselink = substr(url, 1, gregexpr("/",url)[[1]][3]-1)
#url = "http://www.harr.com/search/new/tp/"
#url = "http://www.herbconnollychevrolet.com/search/new/tp/"
#url = "http://www.bmwps.com/search/new-bmw/tp-mk8/"
#grab the linklist
#small function to get page links
getLinklist.2 = function(url){
baselink = substr(url, 1, gregexpr("/",url)[[1]][3]-1)
txt = getURLContent(url, useragent = "R")
doc = htmlParse(txt, asText = TRUE)
href = unique(getHTMLLinks(doc))
index = gregexpr("/",url)[[1]]
pattern = substr(url, index[3],nchar(url) )
temp = href[grep(pattern,href)]
ind = grep("p:", gsub(pattern, "", temp))
#get last page's link
lastpage = paste0(baselink, temp[ind[length(ind)]])
#get the number of total pages from the last page link
lastpagenumber = as.numeric(gsub(".*p:([0-9]+).*", "\\1", lastpage))
Linklist = sapply(1:lastpagenumber, function(i)gsub("p:([0-9]+)", paste0("p:", i), lastpage))
return(Linklist)
}
#scrapping data
getdatacontent.2 = function(node){
xmlAttrs(node)["content"]
}
scrapeInfo.2 <- function(url)
{
txt = getURLContent(url, useragent = "R")
doc = htmlParse(txt, asText = TRUE)
vin.node = getNodeSet(doc, "//meta[@itemprop='serialNumber']")
vins = unique(sapply(vin.node,getdatacontent.2))
if(length(vins)==0){
return(NULL)
}
else{
make.node = getNodeSet(doc, "//meta[@itemprop='manufacturer']")
make = sapply(make.node,getdatacontent.2)
model = "NA"
year = NA
trim = "NA"
df <- data.frame(vins,make,model,trim,as.numeric(year), stringsAsFactors = F)
colnames(df) <- c("VIN", "Make", "Model", "Trim", "Year")
#print(url)
return(df)
}
}
#scrape car information from all the pages
alldata.2 = function(url){
links = getLinklist.2(url)
tt = lapply(links, scrapeInfo.2)
cardata = Reduce(function(x, y) rbind(x, y), tt)
return(cardata)
}
#cc = alldata.2(url)
|
/Case Studies/Case study-2.R
|
no_license
|
jpzhangvincent/Dealership-Scraping
|
R
| false
| false
| 1,964
|
r
|
#url = "http://www.vacavillehonda.com/search/new/tp/"
#doc = htmlParse(url)
#baselink = substr(url, 1, gregexpr("/",url)[[1]][3]-1)
#url = "http://www.harr.com/search/new/tp/"
#url = "http://www.herbconnollychevrolet.com/search/new/tp/"
#url = "http://www.bmwps.com/search/new-bmw/tp-mk8/"
#grab the linklist
#small function to get page links
getLinklist.2 = function(url){
baselink = substr(url, 1, gregexpr("/",url)[[1]][3]-1)
txt = getURLContent(url, useragent = "R")
doc = htmlParse(txt, asText = TRUE)
href = unique(getHTMLLinks(doc))
index = gregexpr("/",url)[[1]]
pattern = substr(url, index[3],nchar(url) )
temp = href[grep(pattern,href)]
ind = grep("p:", gsub(pattern, "", temp))
#get last page's link
lastpage = paste0(baselink, temp[ind[length(ind)]])
#get the number of total pages from the last page link
lastpagenumber = as.numeric(gsub(".*p:([0-9]+).*", "\\1", lastpage))
Linklist = sapply(1:lastpagenumber, function(i)gsub("p:([0-9]+)", paste0("p:", i), lastpage))
return(Linklist)
}
#scrapping data
getdatacontent.2 = function(node){
xmlAttrs(node)["content"]
}
scrapeInfo.2 <- function(url)
{
txt = getURLContent(url, useragent = "R")
doc = htmlParse(txt, asText = TRUE)
vin.node = getNodeSet(doc, "//meta[@itemprop='serialNumber']")
vins = unique(sapply(vin.node,getdatacontent.2))
if(length(vins)==0){
return(NULL)
}
else{
make.node = getNodeSet(doc, "//meta[@itemprop='manufacturer']")
make = sapply(make.node,getdatacontent.2)
model = "NA"
year = NA
trim = "NA"
df <- data.frame(vins,make,model,trim,as.numeric(year), stringsAsFactors = F)
colnames(df) <- c("VIN", "Make", "Model", "Trim", "Year")
#print(url)
return(df)
}
}
#scrape car information from all the pages
alldata.2 = function(url){
links = getLinklist.2(url)
tt = lapply(links, scrapeInfo.2)
cardata = Reduce(function(x, y) rbind(x, y), tt)
return(cardata)
}
#cc = alldata.2(url)
|
library(tidyverse)
### Bradford
## READ and FORMAT the raw ASCII data from Synergy H1
####### a function to read in all the biotek files, gather the data and reformat the time columns
read_and_gather <- function(file) {
print(file)
raw_in <- read_lines(file)
two_rows_before_data_row <- grep(raw_in, pattern = "Results")
n_rows_to_read <- 16
data_in <- read_tsv(file, col_names = F, skip = (two_rows_before_data_row + 1),
n_max = n_rows_to_read, locale = locale(encoding = 'windows-1250'))
names(data_in) <- c("row", as.character(seq(1, 12, 1)), "nM")
data_in <- data_in %>%
mutate("row" = c('A', 'A', 'B', 'B', 'C', 'C', 'D', 'D', 'E', 'E', 'F', 'F', 'G', 'G', 'H', 'H'))
data_gathered <- data_in %>%
gather(., key = col, value = abs, -nM, -row) %>%
filter( ! is.na(abs)) %>%
mutate("well" = str_c(row, col, sep = "")) %>%
select(-row, -col) %>%
spread(key = nM, value = abs) %>%
mutate("ratio_abs" = as.double(`595`)/as.double(`450`))
return(data_gathered)
}
file <- "GTPase_assay/Bradfords/20190228_Bradford2.txt"
index <- read_tsv("GTPase_assay/Bradfords/20190228_Bradford_index2.txt")
calibration_index <- index %>% filter(!is.na(conc))
data_index <- index %>% filter( is.na(conc) )
data <- read_and_gather(file)
data %>%
ggplot(aes(x = `450`, y = `595`)) + geom_point()
calibration <- calibration_index %>%
inner_join(., data, by = "well") %>%
mutate("conc" = as.double(conc)) %>%
arrange(conc) %>%
mutate("row" = str_sub(well, 1, 1), "column" = str_sub(well, 2))
low_conc <- c("A", "B", "C", "D", "E", "F", "G", "buffer")
high_conc <- c("a", "b", "c", "d", "e", "f", "g", "g_half", "h", "i", "buff")
### enzyme
calibration_low <- calibration %>% filter(sample %in% low_conc)
linear_fit <- lm(conc ~ ratio_abs, data = calibration_low)
cal_slope <- linear_fit$coefficients[2]
m_var <- (coef(summary(linear_fit))[2, 2])^2 # slope variance
cal_intercept <- linear_fit$coefficients[1]
b_var <- (coef(summary(linear_fit))[1, 2])^2 # intercept variance
calibration_low %>%
ggplot(aes(x = ratio_abs, y = conc, color = row)) +
geom_point() + geom_abline(slope = cal_slope, intercept = cal_intercept)
gap_data <- data_index %>%
inner_join(., data, by = "well") %>%
group_by(sample) %>%
summarize("mean" = mean(ratio_abs), "x_var" = var(ratio_abs)) %>%
mutate( "ug_ml_conc_error" = sqrt( (m_var * x_var + cal_slope^2 * x_var + mean^2 * m_var) + b_var ) ) %>%
mutate("conc_ug_ml" = (cal_slope * mean + cal_intercept)) %>%
mutate("percent_error" = ug_ml_conc_error / conc_ug_ml * 100) %>%
mutate("conc_uM" = conc_ug_ml / 43.25, "um_error" = percent_error/100 * conc_uM)
#write_tsv(calibration, path = "GTPase_assay/Bradfords/20190214_Brad_cali.txt")
#write_tsv(gap_data, path = "GTPase_assay/Bradfords/20190214_Brad_data.txt")
### Gsp1
calibration_high <- calibration %>% filter(sample %in% high_conc)
linear_fit <- lm(conc ~ ratio_abs, data = calibration_high)
cal_slope <- linear_fit$coefficients[2]
m_var <- (coef(summary(linear_fit))[2, 2])^2 # slope variance
cal_intercept <- linear_fit$coefficients[1]
b_var <- (coef(summary(linear_fit))[1, 2])^2 # intercept variance
calibration_high %>%
ggplot(aes(x = ratio_abs, y = conc, color = row)) +
geom_point() + geom_abline(slope = cal_slope, intercept = cal_intercept)
ran_data <- data_index %>%
inner_join(., data, by = "well") %>%
group_by(sample) %>%
summarize("mean" = mean(ratio_abs), "x_var" = var(ratio_abs)) %>%
mutate( "ug_ml_conc_error" = sqrt( (m_var * x_var + cal_slope^2 * x_var + mean^2 * m_var) + b_var ) ) %>%
mutate("conc_ug_ml" = (cal_slope * mean + cal_intercept)) %>%
mutate("percent_error" = ug_ml_conc_error / conc_ug_ml * 100) %>%
mutate("conc_uM" = conc_ug_ml / 24.8, "um_error" = percent_error/100 * conc_uM)
|
/GTPase_assay/Bradfords/Bradford.R
|
permissive
|
tinaperica/GEF_exchange_FRET
|
R
| false
| false
| 3,846
|
r
|
library(tidyverse)
### Bradford
## READ and FORMAT the raw ASCII data from Synergy H1
####### a function to read in all the biotek files, gather the data and reformat the time columns
read_and_gather <- function(file) {
print(file)
raw_in <- read_lines(file)
two_rows_before_data_row <- grep(raw_in, pattern = "Results")
n_rows_to_read <- 16
data_in <- read_tsv(file, col_names = F, skip = (two_rows_before_data_row + 1),
n_max = n_rows_to_read, locale = locale(encoding = 'windows-1250'))
names(data_in) <- c("row", as.character(seq(1, 12, 1)), "nM")
data_in <- data_in %>%
mutate("row" = c('A', 'A', 'B', 'B', 'C', 'C', 'D', 'D', 'E', 'E', 'F', 'F', 'G', 'G', 'H', 'H'))
data_gathered <- data_in %>%
gather(., key = col, value = abs, -nM, -row) %>%
filter( ! is.na(abs)) %>%
mutate("well" = str_c(row, col, sep = "")) %>%
select(-row, -col) %>%
spread(key = nM, value = abs) %>%
mutate("ratio_abs" = as.double(`595`)/as.double(`450`))
return(data_gathered)
}
file <- "GTPase_assay/Bradfords/20190228_Bradford2.txt"
index <- read_tsv("GTPase_assay/Bradfords/20190228_Bradford_index2.txt")
calibration_index <- index %>% filter(!is.na(conc))
data_index <- index %>% filter( is.na(conc) )
data <- read_and_gather(file)
data %>%
ggplot(aes(x = `450`, y = `595`)) + geom_point()
calibration <- calibration_index %>%
inner_join(., data, by = "well") %>%
mutate("conc" = as.double(conc)) %>%
arrange(conc) %>%
mutate("row" = str_sub(well, 1, 1), "column" = str_sub(well, 2))
low_conc <- c("A", "B", "C", "D", "E", "F", "G", "buffer")
high_conc <- c("a", "b", "c", "d", "e", "f", "g", "g_half", "h", "i", "buff")
### enzyme
calibration_low <- calibration %>% filter(sample %in% low_conc)
linear_fit <- lm(conc ~ ratio_abs, data = calibration_low)
cal_slope <- linear_fit$coefficients[2]
m_var <- (coef(summary(linear_fit))[2, 2])^2 # slope variance
cal_intercept <- linear_fit$coefficients[1]
b_var <- (coef(summary(linear_fit))[1, 2])^2 # intercept variance
calibration_low %>%
ggplot(aes(x = ratio_abs, y = conc, color = row)) +
geom_point() + geom_abline(slope = cal_slope, intercept = cal_intercept)
gap_data <- data_index %>%
inner_join(., data, by = "well") %>%
group_by(sample) %>%
summarize("mean" = mean(ratio_abs), "x_var" = var(ratio_abs)) %>%
mutate( "ug_ml_conc_error" = sqrt( (m_var * x_var + cal_slope^2 * x_var + mean^2 * m_var) + b_var ) ) %>%
mutate("conc_ug_ml" = (cal_slope * mean + cal_intercept)) %>%
mutate("percent_error" = ug_ml_conc_error / conc_ug_ml * 100) %>%
mutate("conc_uM" = conc_ug_ml / 43.25, "um_error" = percent_error/100 * conc_uM)
#write_tsv(calibration, path = "GTPase_assay/Bradfords/20190214_Brad_cali.txt")
#write_tsv(gap_data, path = "GTPase_assay/Bradfords/20190214_Brad_data.txt")
### Gsp1
calibration_high <- calibration %>% filter(sample %in% high_conc)
linear_fit <- lm(conc ~ ratio_abs, data = calibration_high)
cal_slope <- linear_fit$coefficients[2]
m_var <- (coef(summary(linear_fit))[2, 2])^2 # slope variance
cal_intercept <- linear_fit$coefficients[1]
b_var <- (coef(summary(linear_fit))[1, 2])^2 # intercept variance
calibration_high %>%
ggplot(aes(x = ratio_abs, y = conc, color = row)) +
geom_point() + geom_abline(slope = cal_slope, intercept = cal_intercept)
ran_data <- data_index %>%
inner_join(., data, by = "well") %>%
group_by(sample) %>%
summarize("mean" = mean(ratio_abs), "x_var" = var(ratio_abs)) %>%
mutate( "ug_ml_conc_error" = sqrt( (m_var * x_var + cal_slope^2 * x_var + mean^2 * m_var) + b_var ) ) %>%
mutate("conc_ug_ml" = (cal_slope * mean + cal_intercept)) %>%
mutate("percent_error" = ug_ml_conc_error / conc_ug_ml * 100) %>%
mutate("conc_uM" = conc_ug_ml / 24.8, "um_error" = percent_error/100 * conc_uM)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/image_analysis.R
\name{wbt_median_filter}
\alias{wbt_median_filter}
\title{Median filter}
\usage{
wbt_median_filter(
input,
output,
filterx = 11,
filtery = 11,
sig_digits = 2,
wd = NULL,
verbose_mode = FALSE
)
}
\arguments{
\item{input}{Input raster file.}
\item{output}{Output raster file.}
\item{filterx}{Size of the filter kernel in the x-direction.}
\item{filtery}{Size of the filter kernel in the y-direction.}
\item{sig_digits}{Number of significant digits.}
\item{wd}{Changes the working directory.}
\item{verbose_mode}{Sets verbose mode. If verbose mode is False, tools will not print output messages.}
}
\value{
Returns the tool text outputs.
}
\description{
Performs a median filter on an input image.
}
|
/man/wbt_median_filter.Rd
|
permissive
|
gitWayneZhang/whiteboxR
|
R
| false
| true
| 812
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/image_analysis.R
\name{wbt_median_filter}
\alias{wbt_median_filter}
\title{Median filter}
\usage{
wbt_median_filter(
input,
output,
filterx = 11,
filtery = 11,
sig_digits = 2,
wd = NULL,
verbose_mode = FALSE
)
}
\arguments{
\item{input}{Input raster file.}
\item{output}{Output raster file.}
\item{filterx}{Size of the filter kernel in the x-direction.}
\item{filtery}{Size of the filter kernel in the y-direction.}
\item{sig_digits}{Number of significant digits.}
\item{wd}{Changes the working directory.}
\item{verbose_mode}{Sets verbose mode. If verbose mode is False, tools will not print output messages.}
}
\value{
Returns the tool text outputs.
}
\description{
Performs a median filter on an input image.
}
|
library(clinDR)
### Name: predict.emaxsim
### Title: Mean response and SE for specified doses for each replicate data
### set in an emaxsim object
### Aliases: predict.emaxsim
### Keywords: nonlinear
### ** Examples
## Not run:
##D ## random number seed changed by this example
##D nsim<-50
##D idmax<-5
##D doselev<-c(0,5,25,50,100)
##D n<-c(78,81,81,81,77)
##D
##D ### population parameters for simulation
##D e0<-2.465375
##D ed50<-67.481113
##D emax<-15.127726
##D sdy<-7.967897
##D pop.parm<-c(log(ed50),emax,e0)
##D meanlev<-emaxfun(doselev,pop.parm)
##D
##D ###FixedMean is specialized constructor function for emaxsim
##D gen.parm<-FixedMean(n,doselev,meanlev,sdy)
##D D1 <- emaxsim(nsim,gen.parm)
##D
##D predout<-predict(D1,c(75,150))
## End(Not run)
## Don't show:
## random number seed changed by this example
nsim<-3
doselev<-c(0,5,25,50,100)
n<-c(78,81,81,81,77)
### population parameters for simulation
e0<-2.465375
ed50<-67.481113
emax<-15.127726
sdy<-7.967897
pop.parm<-c(log(ed50),emax,e0)
meanlev<-emaxfun(doselev,pop.parm)
###FixedMean is specialized constructor function for emaxsim
gen.parm<-FixedMean(n,doselev,meanlev,sdy)
D1 <- emaxsim(nsim,gen.parm,nproc=1)
predout<-predict(D1,c(75,150))
## End(Don't show)
|
/data/genthat_extracted_code/clinDR/examples/predict.emaxsim.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,272
|
r
|
library(clinDR)
### Name: predict.emaxsim
### Title: Mean response and SE for specified doses for each replicate data
### set in an emaxsim object
### Aliases: predict.emaxsim
### Keywords: nonlinear
### ** Examples
## Not run:
##D ## random number seed changed by this example
##D nsim<-50
##D idmax<-5
##D doselev<-c(0,5,25,50,100)
##D n<-c(78,81,81,81,77)
##D
##D ### population parameters for simulation
##D e0<-2.465375
##D ed50<-67.481113
##D emax<-15.127726
##D sdy<-7.967897
##D pop.parm<-c(log(ed50),emax,e0)
##D meanlev<-emaxfun(doselev,pop.parm)
##D
##D ###FixedMean is specialized constructor function for emaxsim
##D gen.parm<-FixedMean(n,doselev,meanlev,sdy)
##D D1 <- emaxsim(nsim,gen.parm)
##D
##D predout<-predict(D1,c(75,150))
## End(Not run)
## Don't show:
## random number seed changed by this example
nsim<-3
doselev<-c(0,5,25,50,100)
n<-c(78,81,81,81,77)
### population parameters for simulation
e0<-2.465375
ed50<-67.481113
emax<-15.127726
sdy<-7.967897
pop.parm<-c(log(ed50),emax,e0)
meanlev<-emaxfun(doselev,pop.parm)
###FixedMean is specialized constructor function for emaxsim
gen.parm<-FixedMean(n,doselev,meanlev,sdy)
D1 <- emaxsim(nsim,gen.parm,nproc=1)
predout<-predict(D1,c(75,150))
## End(Don't show)
|
#' @export
format.altair.vegalite.v2.api.TopLevelMixin <- function(x, ...){
utils::str(x)
}
|
/R/format.R
|
permissive
|
vegawidget/altair
|
R
| false
| false
| 94
|
r
|
#' @export
format.altair.vegalite.v2.api.TopLevelMixin <- function(x, ...){
utils::str(x)
}
|
household_power_consumption <- read.csv("D:/Courses/data/household_power_consumption.txt", sep=";", na.strings="?")
household_power_consumption<-cbind(paste(household_power_consumption$Date,household_power_consumption$Time), household_power_consumption)
names(household_power_consumption)[1]<-"DateTime"
household_power_consumption$Date<-as.Date(household_power_consumption$Date,"%d/%m/%Y")
household_power_consumption$DateTime<-strptime(household_power_consumption$DateTime, "%d/%m/%Y %H:%M:%S")
data<-household_power_consumption[c(household_power_consumption$Date==as.Date("2007/02/01")|household_power_consumption$Date==as.Date("2007/02/02")),]
plot(data$DateTime, data$Global_active_power, type="l", ylab= "Global Active Power (kilowatts)", xlab="")
png(file="plot2.png", bg="transparent")
dev.off()
|
/plot2.R
|
no_license
|
Lee20110102/ExData_Plotting1
|
R
| false
| false
| 803
|
r
|
household_power_consumption <- read.csv("D:/Courses/data/household_power_consumption.txt", sep=";", na.strings="?")
household_power_consumption<-cbind(paste(household_power_consumption$Date,household_power_consumption$Time), household_power_consumption)
names(household_power_consumption)[1]<-"DateTime"
household_power_consumption$Date<-as.Date(household_power_consumption$Date,"%d/%m/%Y")
household_power_consumption$DateTime<-strptime(household_power_consumption$DateTime, "%d/%m/%Y %H:%M:%S")
data<-household_power_consumption[c(household_power_consumption$Date==as.Date("2007/02/01")|household_power_consumption$Date==as.Date("2007/02/02")),]
plot(data$DateTime, data$Global_active_power, type="l", ylab= "Global Active Power (kilowatts)", xlab="")
png(file="plot2.png", bg="transparent")
dev.off()
|
/analysis.R
|
no_license
|
remiroazocar/population_adjustment_simstudy
|
R
| false
| false
| 34,629
|
r
| ||
library('knitr')
# knitr::knit2html('Help.Rmd')
knitr::render('Help.Rmd')
|
/lib/R/knitHelp.R
|
no_license
|
Chebuu/bigrex
|
R
| false
| false
| 74
|
r
|
library('knitr')
# knitr::knit2html('Help.Rmd')
knitr::render('Help.Rmd')
|
#' The \code{LR.cont.boot} function performs a Wald test for a SNP effect.
#' The function returns the p-value of the Wald test. It is an Internal function used by the \link{bootstrap.snp} function.
#' @export
#' @title Wald test for an adjusted model.
#' @name LR.cont.boot
#' @param x numeric vector corresponding to the new response variable (from parametric bootstrap).
#' @param formula an object of class "formula" : a symbolic description of the model to be fitted
#' without the interaction term.
#' @param data a data frame containing the variables in the model.
#' @return p-value of the Wald test for a SNP effect.
#' @author Benoit Liquet \email{benoit.liquet@@isped.u-bordeaux2.fr}\cr
#' Therese Truong \email{therese.truong@@inserm.fr}
LR.cont.boot <- function(x,formula,data){
data <- data.frame(data,yboot=x)
model1 <- glm(formula=update(formula,yboot~.+x),data=data,family=binomial(link="logit"))
pval <- summary(model1)$coef[dim(summary(model1)$coef)[1],4]
return(pval)
}
|
/R/LR.cont.boot.R
|
no_license
|
cran/PIGE
|
R
| false
| false
| 1,005
|
r
|
#' The \code{LR.cont.boot} function performs a Wald test for a SNP effect.
#' The function returns the p-value of the Wald test. It is an Internal function used by the \link{bootstrap.snp} function.
#' @export
#' @title Wald test for an adjusted model.
#' @name LR.cont.boot
#' @param x numeric vector corresponding to the new response variable (from parametric bootstrap).
#' @param formula an object of class "formula" : a symbolic description of the model to be fitted
#' without the interaction term.
#' @param data a data frame containing the variables in the model.
#' @return p-value of the Wald test for a SNP effect.
#' @author Benoit Liquet \email{benoit.liquet@@isped.u-bordeaux2.fr}\cr
#' Therese Truong \email{therese.truong@@inserm.fr}
LR.cont.boot <- function(x,formula,data){
data <- data.frame(data,yboot=x)
model1 <- glm(formula=update(formula,yboot~.+x),data=data,family=binomial(link="logit"))
pval <- summary(model1)$coef[dim(summary(model1)$coef)[1],4]
return(pval)
}
|
# Read data from household_power_consumption.txt file
# Extract rows that has the dates from 2007-02-01 to 2007-02-02
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", dec=".", stringsAsFactors=FALSE)
sdata <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
# plot3 to save the Global Active Power by day and time in plot3.png file
subMetering1 <- as.numeric(sdata$Sub_metering_1)
subMetering2 <- as.numeric(sdata$Sub_metering_2)
subMetering3 <- as.numeric(sdata$Sub_metering_3)
datetime <- strptime(paste(sdata$Date, sdata$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
png("plot3.png", width=480, height=480)
plot(datetime, subMetering1, type="l", xlab="", ylab="Energy sub metering")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()
|
/plot3.R
|
no_license
|
Gu-Go/ExData_Plotting1
|
R
| false
| false
| 940
|
r
|
# Read data from household_power_consumption.txt file
# Extract rows that has the dates from 2007-02-01 to 2007-02-02
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", dec=".", stringsAsFactors=FALSE)
sdata <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
# plot3 to save the Global Active Power by day and time in plot3.png file
subMetering1 <- as.numeric(sdata$Sub_metering_1)
subMetering2 <- as.numeric(sdata$Sub_metering_2)
subMetering3 <- as.numeric(sdata$Sub_metering_3)
datetime <- strptime(paste(sdata$Date, sdata$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
png("plot3.png", width=480, height=480)
plot(datetime, subMetering1, type="l", xlab="", ylab="Energy sub metering")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off()
|
###################################################################################### Read Expression suite file
"readCT"
#' Read files form Expression suite software output
#' @description Read files form Expression suite software output
#' @param file Name of the file to read including location
#' @param skip Number of line to skip form reading. Default is 14.
#' @param ... additional parameter to input.
#'
#' @return Data frame. Need to develop in Expression class set.
#'
#' @details The input file can be in txt, csv or xls formate.
#' @author Mohammad Tanvir Ahamed (mashranga@yahoo.com)
#'
#' @examples
#' sfile <- system.file("extdata", "Ahlam-biopsies_20170920_131310.txt", package = "AsaLab")
#' mdata <- readCT(file = sfile, skip = 14, header = FALSE)
#'
#' @import tools utils
#'
#'
#' @export
readCT <- function(file, skip = 14,...)
{
### Reading file in formate txt, xls or csv
if(file_ext(file)%in%c("txt","xls","csv"))
{
if(file_ext(file)=="txt")
{ targets <- read.csv(file = file, stringsAsFactors = FALSE, skip=skip, sep = "\t") }
if(file_ext(file)=="xls")
{ targets <- gdata::read.xls(xls = file, sheet=1, skip = skip ) }
if(file_ext(file)=="csv")
{ targets <- read.csv(file = file, stringsAsFactors = FALSE, skip = skip ) }
}
else
{ stop("Input file should in .txt, .xls or .csv formated", call. = FALSE) }
targets
}
###################################################################################### Filter expression suite file based on CT value condition
"filterCT"
#' Filter CT value on different condition.
#' @description Filter CT value on different condition. See details.
#' @param file Object output of \link[AsaLab]{readCT}.
#' @param sample Sample column name. Default "Sample.Name"
#' @param target Target column name. Default "Target.Name"
#' @param ct CT column name. Default "Ct"
#' @param ctlimit Limit to keep CT value. Default min= 15 and max = 40.
#' @param undet Value to replace undetermine. Default is NULL. Input value should be numeric.
#' @param ampscore Amplification score to reject the sample. Default 0.6
#' @param phenotype group classification for sample name.
#' @param ctd Absulute difference between ct values to accept. Default is 2. See user documentation for details.
#' @param omit If "TRUE" , sample with TRUE value in omit column will delete. Default is NULL.
#' @param out Name of output file to save the result in CSV formate. Default name "filterCT".
#'
#' @details Details of criteria to delete CT values.
#' For phenotype, The 1st column should be sample name and the column name will be same as the column name of sample in input file.
#' And the second column will be the classification for each sample.The unclassified sample will show NA. If the phonotype
#' dataframe is supplied, in the output dataframe a phenotype colun will show otherwise not.
#'
#' @return This function will return a dataframe of filtered CT values. Also this result will save in CSV formate in current working
#' directory and default name of the file is filterCT.csv
#'
#' @import psych
#'
#' @examples
#' ###### Example 1
#' # Read expression data
#' sfile <- "D:\\R Working\\Workflow\\Data\\Asa\\celiac3_20161222_123248_Results_Export.txt"
#' mdata <- readCT(file = sfile)
#' sname <- unlist(lapply(strsplit(mdata$Sample.Name," "),"[",1))
#' mdata$Sample.Name <- sname
#' # Read phynotype data
#' phen <- read.csv (file="D:\\R Working\\Workflow\\Data\\Asa\\role-multiple.txt", sep="\t")
#' # Filter CT valued data and add phynotype
#' res <- filterCT (file = mdata,omit = "True",phenotype = phen)
#'
#'
#' ###### Example 2
#' # Read expression data
#' sfile <- "D:\\R Working\\Workflow\\Data\\simon_20161102_134801_Results_Export.txt"
#' mdata <- readCT(file = sfile)
#' # Read phynotype data
#' phen <- read.csv (file="D:\\R Working\\Workflow\\Data\\pheno.txt",sep="\t")
#' # Filter CT valued data and add phynotype
#' res <- filterCT (file = mdata,omit = "True",phenotype = phen)
#'
#'
#' ###### Example 3
#' # Read expression data
#' sfile <- "D:\\R Working\\Workflow\\Data\\CT.txt"
#' mdata <- readCT(file = sfile, skip = 0, header = FALSE)
#' # Read phynotype data
#' phen <- read.csv (file="D:\\R Working\\Workflow\\Data\\phenotypes.txt",sep="\t")
#' # Filter CT valued data and add phynotype
#' res <- filterCT (file = mdata,sample = "Sample", target = "Detector", phenotype = phen)
#'
#'
#' ####### Example
#' # Read expression data
#' sfile <- system.file("extdata", "Ahlam-biopsies_20170920_131310.txt", package = "AsaLab")
#' mdata <- readCT(file = sfile, skip = 14, header = FALSE)
#'
#' # Read phynotype data
#' pfile <- system.file("extdata", "Ahlam-biopsies_20170920_131310_pheno.txt", package = "AsaLab")
#' phen <- read.csv (file = pfile, sep="\t")
#'
#' # Filter CT valued data and add phynotype
#' res <- filterCT (file = mdata,sample = "Sample.Name", target = "Target.Name", phenotype = phen)
#'
#' @export
filterCT <- function(file, sample = "Sample.Name",
target = "Target.Name", ct = "Ct",
ctlimit = c(15,40), ampscore = 0.6,
undet = NULL, phenotype = NULL,ctd = 2,
omit = NULL, out = "filterCT")
{
# Delete sample Omit = TRUE
if(length(omit) > 0 ) file <- file[-which(file$Omit==omit),]
if(length(omit) ==0 ) file <- file
# Replace all undetermine CT value with NA
if(length(undet) > 0)
{
if(is.numeric(undet)==FALSE) stop(" The value to replace undetermind should be numeric", call. = TRUE)
file[,ct][file[,ct]=="Undetermined"]<- undet
} else
{ file[,ct][file[,ct]=="Undetermined"]<-NA}
# Replace all CT value with NA outside of limit (ctlimit)
class(file[,ct])<- "numeric"
file[,ct][which( file[,ct] < ctlimit[1] | file[,ct] > ctlimit[2])] <- NA
# Delete sample with amplification score < thrashhold
#file <- file[which(file$Amp.Score >= ampscore),]
# Split data by sample name and target name
file <- split(file, file[,sample])
file <- lapply(file, function(samplename) { res <- split(samplename, samplename[,target])})
# Delete the Target name whose all CT == NA
file1 <- unlist(file, recursive = FALSE)
file2 <- lapply(file1, function(sam) {res<- all(is.na(sam[,ct]))})
file <- file1[which(as.vector(unlist(file2))==FALSE)]
# Delete sample based on CT value
myfun_1 <- function(sam)
{
sam <- sam[order(sam[,ct]),]
a <-as.numeric(sam[,ct])
# If any vector has missing value
if ( any(is.na(a))==TRUE )
{
# n = 1 non missing
if (length(a[!is.na(a)]) == 1)
{
# Missing value is replace by the single value
sam[,ct][which(is.na(a))]<- mean(a,na.rm = TRUE)
}
# n = 2 non missing
if (length(a[!is.na(a)]) == 2)
{
# Consequtive difference (d) is > 2, delete the sample or if d <= 2, replace missing value by mean
a1 <- a[!is.na(a)]
if(abs(as.numeric(dist(a1))) > ctd) sam <-NULL
else
{
sam[,ct][which(is.na(a))]<- mean(a,na.rm = TRUE)
}
}
# n = 3 non missing
if (length(a[!is.na(a)]) == 3)
{
# Difference between all consecutive sample is greater than 2, delete it
a1 <- a[!is.na(a)]
d <- abs(diff(a1))
if(all(d > ctd)) sam <- NULL
else
{
# d2 > 2d1 or d1 > 2d2
if(d[2] > 2*d[1])
{
sam <- sam[-which(sam[,ct]==a1[3]),]
sam[,ct][which(is.na(sam[,ct]))] <- mean(sam[,ct],na.rm = TRUE)
}
if(d[1] > 2*d[2])
{
sam <- sam[-which(sam[,ct]==a1[1]),]
sam[,ct][which(is.na(sam[,ct]))] <- mean(sam[,ct],na.rm = TRUE)
}
}
}
# n >= 4 non missing
if (length(a[!is.na(a)]) >= 4)
{
# Delete those sample , Ct > median value
sam <- sam[-which(abs(a-median(a)) > ctd),]
sam[,ct][which(is.na(sam[,ct]))] <- mean(sam[,ct],na.rm = TRUE)
}
}
# No missing value
if ( any(is.na(a))==FALSE )
{
# n = 2
if(length(a)==2)
{
# Consequtive difference is <= 2
if(abs(as.numeric(dist(a))) > ctd) sam <-NULL
}
# n = 3
if(length(a) == 3)
{
# Difference between all consecutive sample is greater than 2, delete it
d <- abs(diff(a))
if(all(d > ctd)) sam <- NULL
else
{
# d2 > 2d1 or d1 > 2d2
if(d[2] > 2*d[1]) sam <- sam[-3,]
if(d[1] > 2*d[2]) sam <- sam[-1,]
}
}
# n = 4
if(length(a) >= 4)
{
# Take madian of 4 and delete which is greater than median
sam <- sam[which(abs(a-median(a)) < ctd),]
}
}
sam
}
file <- lapply(file, myfun_1)
# Get mean on ct value on every splied section by sample and target
mufun_2 <- function(i)
{
res <- data.frame(t(c(unique(i[,sample]), unique(i[,target]), round(mean(as.numeric(i[,ct])),3 ))))
if (length(res) >= 2 ) names(res) <- c(sample, target , "Ct.Mean" )
else res <- NULL
res
}
file <- lapply(file,mufun_2)
file <- do.call(rbind, file)
###### Reshape data
file <- reshape(file, v.names = "Ct.Mean", idvar = sample, timevar = target, direction = "wide")
nam <- c(sample, unlist(lapply(strsplit(names(file)[-1],"Ct.Mean."),"[[",2)))
names(file) <- nam
samname <- file[,sample]
file[,sample] <- NULL
file <- apply(file,2, function(i) as.numeric(as.character(i)) )
rownames(file) <- samname
file1<- file
###### Add phenotype data [Only if phenotype data supplied]
if (length(phenotype) > 0 )
{
if(ncol(phenotype) > 2 ) stop(" Phoneotype file should contain 2 column. Check the file.", call. = FALSE)
file1 <- data.frame(file1)
file1 <- merge(phenotype,file1, by.x= colnames(phenotype)[1], by.y = "row.names")
file1
}
if (length(phenotype) == 0 )
{
sample <- rownames(file1)
file1 <- cbind(sample,file1)
file1 <- data.frame(file1)
rownames(file1) <- NULL
}
write.csv(x = file1, file = paste0(out,".csv"), row.names = FALSE)
file1
}
###################################################################################### Analyse CT values
"analyzeCT"
#' Analyze CT value on different condition.
#' @description Analyze CT values on defferent condition
#'
#' @param file data in Matrix. Output of \link[AsaLab]{filterCT} function.
#' @param skip Number of column to skip form 1st column in input file. Default is 2.
#' @param hkgene House keeping gene.
#' @param control Name of phynotype that will assign as control. Default is "CONTROL".
#' @param del Percentage of missing value for which any gene or sample will be excuded form analysis. Default is 0.7 (70 percentage).
#' @param missCT If missing value will replace with 40 or otheres. Default is NULL.
#' @param gylim Define Upper and lower limit for the boxplot. Default is c(-30,30).
#'
#' @return This function will return a list of deleted genes, deleted sample, delta CT values based on supplied housekeeping genes
#' and delta CT values based average of supplied housekeeping genes.
#' CT values will be saved in a csv file after deleteing the gene and sample with preferred percentage of missing values. Also save results
#' of the boxplot of delta CT values and p-values for t-test based on both individual and average of
#' housekeeping genes. Boxplot and p values will save in pdf formate and delta CT values based on both individual and average
#' housekeeping genes will save in CSV formate in current working directory.
#' @import impute
#' @import psych
#' @import lattice
#' @import graphics
#' @import grDevices
#' @import stats
#'
#' @examples
#' ####### Example 1
#' ## Read expression data #Sample : 384
#' sfile <- "D:\\R Working\\Workflow\\Data\\Asa\\celiac3_20161222_123248_Results_Export.txt"
#' mdata <- readCT(file = sfile)
#' sname <- unlist(lapply(strsplit(mdata$Sample.Name," "),"[",1))
#' mdata$Sample.Name <- sname
#' ## Read phynotype data #Sample : 446
#' phen <- read.csv (file="D:\\R Working\\Workflow\\Data\\Asa\\role-multiple.txt", sep="\t")
#' ## Filter CT valued data and add phynotype #Sample : 330
#' res <- filterCT (file = mdata,omit = "True",phenotype = phen)
#' ## Analyze CT values
#' hkg <- c("YWHAZ_047","GUSB_627","HPRT1_909")
#' CTres <- analyzeCT (file = res, skip=2, hkgene = hkg,control="CONTROL",del = 0.7, missCT = NULL)
#'
#'
#'
#' ###### Example 2
#' ## Read expression data #Sample : 80
#' sfile <- "D:\\R Working\\Workflow\\Data\\simon_20161102_134801_Results_Export.txt"
#' mdata <- readCT(file = sfile)
#' ## Read phynotype data #Sample : 80
#' phen <- read.csv (file="D:\\R Working\\Workflow\\Data\\pheno.txt",sep="\t")
#' ## Filter CT valued data and add phynotype #Sample : 80
#' res <- filterCT (file = mdata,omit = "True",phenotype = phen)
#' ## Analyze CT values
#' hkg <- c("YWHAZ","GUSB","HSPB1")
#' CTres <- analyzeCT (file = res, skip=2, hkgene = hkg,control="control",del = 0.7, missCT = 40)
#'
#'
#'
#' ###### Example 3
#' ## Read expression data #Sample : 55
#' sfile <- "D:\\R Working\\Workflow\\Data\\CT.txt"
#' mdata <- readCT(file = sfile, skip = 0, header = FALSE)
#' ## Read phynotype data #Sample : 114
#' phen <- read.csv (file="D:\\R Working\\Workflow\\Data\\phenotypes.txt",sep="\t")
#' ## Filter CT valued data and add phynotype #Sample : 54
#' res <- filterCT (file = mdata,sample = "Sample", target = "Detector", phenotype = phen)
#' hkg <- c("ZMYM2","GUSB_627","HFE2")
#' CTres <- analyzeCT (file = res, skip=2, hkgene = hkg,control="0",del = 0.7, missCT = 40)
#'
#'
#' ###### Example 4
#' ## Read expression data #Sample : 135
#' sfile <- "Z:\\TANVIR\\George\\Nasal Polys-George_20160808_150714_Results_Export (2).txt"
#' mdata <- readCT(file = sfile, skip = 0, header = FALSE)
#' ## Read phynotype data #Sample : 72
#' phen <- read.csv (file="Z:\\TANVIR\\George\\phenotypes.txt" ,sep="\t")
#' ## Filter CT valued data and add phynotype #Sample : 71
#' res <- filterCT (file = mdata,sample = "Sample.Name", target = "Target.Name", phenotype = phen)
#' hkg <- c("HPRT1","YWHAZ")
#' CTres <- analyzeCT (file = res, skip=2, hkgene = hkg,control="0",del = 0.1, missCT = 40)
#'
#'
#' ###### Example 5
#' ## Read expression data #Sample : 135
#' sfile <- "Z:\\TANVIR\\George\\control sample\\CD for analysis.txt"
#' mdata <- readCT(file = sfile, skip = 0, header = FALSE)
#' ## Read phynotype data #Sample : 72
#' phen <- read.csv (file="Z:\\TANVIR\\George\\control sample\\phenotypes.txt" ,sep="\t")
#' ## Filter CT valued data and add phynotype #Sample : 71
#' res <- filterCT(file=mdata,sample="Sample.Name",
#' target="Target.Name",undet=40,ctd=1,phenotype=phen)
#' hkge <- c("HPRT1.909","YWHAZ.047")
#' CTres <- analyzeCT (file = res, skip=2, hkgene = hkge,control="0",del = 0.8, missCT = NULL)
#'
#'
#' ##### Example
#' #' # Read expression data
#' sfile <- system.file("extdata", "Ahlam-biopsies_20170920_131310.txt", package = "AsaLab")
#' mdata <- readCT(file = sfile, skip = 14, header = FALSE)
#'
#' # Read phynotype data
#' pfile <- system.file("extdata", "Ahlam-biopsies_20170920_131310_pheno.txt", package = "AsaLab")
#' phen <- read.csv (file = pfile, sep="\t")
#'
#' # Filter CT valued data and add phynotype
#' res <- filterCT (file = mdata,sample = "Sample.Name", target = "Target.Name", phenotype = phen)
#'
#' hkge <- c("DUSP1","HPRT1","YWHAZ")
#' CTres <- analyzeCT (file = res, skip=2, hkgene = hkge,control="CONTROL",del = 0.9, missCT = NULL)
#'
#'
#'
#' @export
analyzeCT <- function(file, skip = 2, hkgene ,control="CONTROL", del = 0.7, missCT = NULL,gylim = c(-30,30))
{
if (class(file[,1])=="integer"){ class(file[,1])<-"character" }
if (class(file[,2])=="integer"){ class(file[,2])<-"character" }
###### Check house keepimng gene
if(any(hkgene%in%colnames(file)==FALSE)) {stop("\nError : House keeping gene not in data. Check house keeping gene name.\n","- Availabel gene name:\n",paste0(colnames(file)[-c(1:2)],sep = "; "),"\n- Supplied Housekeeping gene:\n",paste0(hkgene,sep = "; "),call. = TRUE)}
###### Separate CT values and sample information
sample <- file [,c(1:skip)]
ct <- data.frame(as.matrix(file [,-c(1:skip)]))
###### Delete gene with 70% missing value and Missing value imputation for house keeping gene
# Delete Gene for missing value
if (length(which(colMeans(is.na(file)) > del)) > 0 )
{
file <- ct[, -which(colMeans(is.na(ct)) > del) ]
#sample <- sample[, -which(colMeans(is.na(ct)) > del) ]
delGene <- colnames(ct)[which(colMeans(is.na(ct)) > del)]
message("Deleted Genes names for ",del*100,"% of missing values : \n", paste0(delGene, sep = "; "))
} else
{
file <- ct
sample <- sample
delGene <- NA
message("Deleted Genes names for ",del*100,"% of missing values : \n", delGene)
}
# Delete sample for missing value
if (length(which(rowMeans(is.na(file)) > del)) > 0 )
{
file1 <- file[-which(rowMeans(is.na(file)) > del), ]
delSample <- unique(as.vector(sample[,1][which(rowMeans(is.na(file)) > del)])) # Deleted sample
sample1 <- sample[-which(rowMeans(is.na(file)) > del),]
file <- file1
sample <- sample1
message("\nDeleted sample names for ",del*100,"% of missing values : \n", paste0(delSample, sep = "; "))
} else
{
file <- file
sample <- sample
delSample <- NA
message("\nDeleted sample names for ",del*100,"% of missing values : \n", NA)
}
# Impute missing value
#require(impute)
#file <- suppressWarnings(t(impute.knn(t(file))$data))
# Replace missing value with 40 or not
if(length(missCT) == 0)
{
file <- file
} else
{
file[is.na(file)]<- missCT
file <- file
}
###### Save file
write.csv(x = cbind(sample,file), file = "filterCT_1.csv", row.names = FALSE)
###### House keeping gene : Delta ct valuses
# Availabel house keeping gene
hkg <- as.list(hkgene[hkgene%in%colnames(file)])
hkg_ge <- file[,match(hkg, colnames(file))]
hkg_file <- data.frame(file[which(rowMeans(is.na(hkg_ge)) <= (length(hkg)-1)/length(hkg)), ]) ## Filter missing values
hkg_sa <- data.frame(sample[which(rowMeans(is.na(hkg_ge)) <= (length(hkg)-1)/length(hkg)), ]) ## Filter missing values
hkg_ge <- data.frame(hkg_ge[which(rowMeans(is.na(hkg_ge)) <= (length(hkg)-1)/length(hkg)), ]) ## Filter missing values
#write.csv(x = cbind(sample,file[,match(hkg, colnames(file))]), file = "hkg.csv", row.names = FALSE)
write.csv(x = cbind(hkg_sa,hkg_ge), file = "hkg.csv", row.names = FALSE)
# Average CT values form house keeping gene
#avgHKG <- rowMeans(file[,match(hkg, colnames(file))], na.rm = TRUE)
#avgCT <- round(file-avgHKG,3)
#avgCT <- data.frame(cbind(sample,avgCT))
#write.csv(x = avgCT, file = "avg_deltaCT.csv", row.names = FALSE)
avgHKG <- rowMeans(t(impute.knn(t(hkg_ge))$data), na.rm= TRUE)
#avgCT <- round(hkg_file-avgHKG,3)
avgCT <- round(avgHKG-hkg_file,3)
avgCT <- data.frame(cbind(hkg_sa,avgCT))
write.csv(x = avgCT, file = "avg_deltaCT.csv", row.names = FALSE)
# Form individual house keeping gene
file <- lapply(hkg,function(i)
{
res0 <- file[,!(colnames(file) == i)]
res1 <- file[,i]
#res1 <- file[,i,drop=FALSE]
res2 <- round(res1-res0,3)
res3 <- data.frame(cbind(sample,res2))
write.csv(x = res3, file = paste0(i,"_deltaCT",".csv"), row.names = FALSE)
res3
}
)
file$avCT <- avgCT
names(file) <- c(hkg, "avg_deltaCT")
delCT <- file
###### Split data by phinotype
file <- lapply(file, function(i) split(i, i[,2]))
###### Reshape data
#library(reshape)
file <- suppressMessages (lapply(file, reshape::melt))
file <- lapply(file, function(i)
{
res <- split(i, i[,3])
res
})
#print(file[[1]][[1]])
###### Ploting Gene
res <- suppressWarnings( lapply(seq_along(file), function(i)
{
## Name of refgene
nam <- names(file[i])
pdf(paste0(nam,".pdf"))
n <- length(file[[i]])
for(j in 1:n )
{
v <- file[[i]][[j]]
# Change levels to put CONTROL group at first
#print(paste0(i, " and ", j))
v[,2] <- factor(v[,2], levels= c(control,setdiff(unique(v[,2]),control)))
v <- v[order(v[,2]), ]
# Scalling data based on control group
v1 <- split(v,v[,2])
cn <- which(names(v1)==control)
mn <- mean(as.vector(v1[[cn]][,4]),na.rm = TRUE)
v2 <- v
v2[,4] <- v2[,4]-mn
v<-v2
# Group mean
po <- split(v,v[,2])
gmn <- lapply(po, function(i)
{
k <- mean(as.vector(i[,4]),na.rm = TRUE)
})
gmn <- unlist(gmn)
####### Plot bar plot and p-value
op <- par()
par(mfrow = c(1, 2))
# P-value
g<- v
colnames(g)[2]<- "CD"
kp<- combn(as.character(unique(g$CD)), 2,
FUN = function(y)
{
#print(y)
gk <- droplevels(g[g$CD %in% y ,])
if(any(rowSums(table(gk$CD,gk$value))<=1))
{
res <- list(0)
res$p.value <- NA
res
} else{
res<- t.test(value ~ CD, g[g$CD %in% y ,])
res
}
}
, simplify = FALSE)
pp<- unlist(lapply(kp, function(i) i$p.value))
combs <- combn(as.character(unique(g$CD)), 2)
N <- pp
inputs <- as.character(unique(g$CD))
out2 <- N
class(out2) <- "dist"
attr(out2, "Labels") <- as.character(inputs)
attr(out2, "Size") <- length(inputs)
attr(out2, "Diag") <- attr(out2, "Upper") <- FALSE
out2 <- round(as.matrix(out2),3)
out2[upper.tri(out2,diag = TRUE)] <- "-"
if (ncol(out2) > 2)
{
out2 <- out2[,colSums(is.na(out2)) < nrow(out2)]
out2 <- out2[rowSums(is.na(out2)) < ncol(out2),]
}
#require(lattice)
myPanel <- function(x, y, z, ...) {
panel.levelplot(x,y,z,...)
panel.text(x, y, out2[cbind(x,y)],cex = 0.3) ## use handy matrix indexing
}
p1<- suppressWarnings( levelplot(out2,panel =myPanel, scales=list(x=list(rot=90)),
xlab ="", ylab ="", main = "p-value",sub = "NA = Either 1 or 0 observation\n[No t-test has performed] ",
at = seq(0,0.05,by = 0.001),
col.regions = c(heat.colors(51))))
plot(1,axes=FALSE, col = "white", xlab = "", ylab="")
print(p1, position = c(0, 0, 0.5, 1), more = TRUE)
# Bar plot
if(length(names(table(v[,2]))) ==2 )
{
#boxplot(v[which(v[,2]==names(table(v[,2]))[1]),][,4], xlim = c(0.5, 2+0.5),ylim = range (v[,4],na.rm = TRUE),col="white",
# boxfill=rgb(1, 1, 1, alpha=1), border=rgb(1, 1, 1, alpha=1), ylab = "Fold",
# main = paste0("Ref gene: ", nam," \nGene: ", names(file[[i]][j] ))) #invisible boxes
boxplot(v[which(v[,2]==names(table(v[,2]))[1]),][,4], xlim = c(0.5, 2+0.5),ylim = c(gylim[1],gylim[2]),col="white",
boxfill=rgb(1, 1, 1, alpha=1), border=rgb(1, 1, 1, alpha=1), ylab = "Fold",
main = paste0("Ref gene: ", nam," \nGene: ", names(file[[i]][j] ))) #invisible boxes
boxplot(v[which(v[,2]==names(table(v[,2]))[1]),][,4], xaxt = "n", add = TRUE,at = 1) #shift these left by -0.15
points(rep(1, length(v[which(v[,2]==names(table(v[,2]))[1]),][,4] )),v[which(v[,2]==names(table(v[,2]))[1]),][,4],type = "p",cex=0.5, col = "orange")
points(1,gmn[1],col="blue",lwd = 2)
boxplot(v[which(v[,2]==names(table(v[,2]))[2]),][,4], xaxt = "n", add = TRUE,at = 2) #shift these left by -0.15
points(rep(2,length(v[which(v[,2]==names(table(v[,2]))[2]),][,4]) ),v[which(v[,2]==names(table(v[,2]))[2]),][,4],type = "p",cex=0.5, col = "orange")
points(2,gmn[2],col="blue",lwd = 2)
axis(1, at=c(1,2), labels = names(table(v[,2])) )
} else
{
#suppressWarnings(plot(v[,2],v[,4], main = paste0("Ref gene: ", nam," \nGene: ", names(file[[i]][j] )), ylab="Fold", xlab=NA,las = 2, cex.axis = 0.7,
# ylim = c( ifelse(is.infinite(min(v[,4],na.rm= TRUE))==TRUE,-10,min(v[,4],na.rm= TRUE)) ,
# ifelse(is.infinite(max(v[,4],na.rm= TRUE))==TRUE,10,max(v[,4],na.rm= TRUE))) ))
suppressWarnings(plot(v[,2],v[,4], main = paste0("Ref gene: ", nam," \nGene: ", names(file[[i]][j] )), ylab="Fold", xlab=NA,las = 2, cex.axis = 0.7,
ylim = c(gylim[1],gylim[2]) ))
points(v[,2],v[,4],type = "p",cex=0.5, col = "orange")
points(gmn, col = "blue", lwd = 2)
}
suppressWarnings (par(op))
}
dev.off()
}))
res <- list (del_gene = delGene,del_sample = delSample, delta_CT = delCT)
}
|
/R/ct.R
|
no_license
|
mashranga/GWAS-analysis
|
R
| false
| false
| 25,018
|
r
|
###################################################################################### Read Expression suite file
"readCT"
#' Read files form Expression suite software output
#' @description Read files form Expression suite software output
#' @param file Name of the file to read including location
#' @param skip Number of line to skip form reading. Default is 14.
#' @param ... additional parameter to input.
#'
#' @return Data frame. Need to develop in Expression class set.
#'
#' @details The input file can be in txt, csv or xls formate.
#' @author Mohammad Tanvir Ahamed (mashranga@yahoo.com)
#'
#' @examples
#' sfile <- system.file("extdata", "Ahlam-biopsies_20170920_131310.txt", package = "AsaLab")
#' mdata <- readCT(file = sfile, skip = 14, header = FALSE)
#'
#' @import tools utils
#'
#'
#' @export
readCT <- function(file, skip = 14,...)
{
### Reading file in formate txt, xls or csv
if(file_ext(file)%in%c("txt","xls","csv"))
{
if(file_ext(file)=="txt")
{ targets <- read.csv(file = file, stringsAsFactors = FALSE, skip=skip, sep = "\t") }
if(file_ext(file)=="xls")
{ targets <- gdata::read.xls(xls = file, sheet=1, skip = skip ) }
if(file_ext(file)=="csv")
{ targets <- read.csv(file = file, stringsAsFactors = FALSE, skip = skip ) }
}
else
{ stop("Input file should in .txt, .xls or .csv formated", call. = FALSE) }
targets
}
###################################################################################### Filter expression suite file based on CT value condition
"filterCT"
#' Filter CT value on different condition.
#' @description Filter CT value on different condition. See details.
#' @param file Object output of \link[AsaLab]{readCT}.
#' @param sample Sample column name. Default "Sample.Name"
#' @param target Target column name. Default "Target.Name"
#' @param ct CT column name. Default "Ct"
#' @param ctlimit Limit to keep CT value. Default min= 15 and max = 40.
#' @param undet Value to replace undetermine. Default is NULL. Input value should be numeric.
#' @param ampscore Amplification score to reject the sample. Default 0.6
#' @param phenotype group classification for sample name.
#' @param ctd Absulute difference between ct values to accept. Default is 2. See user documentation for details.
#' @param omit If "TRUE" , sample with TRUE value in omit column will delete. Default is NULL.
#' @param out Name of output file to save the result in CSV formate. Default name "filterCT".
#'
#' @details Details of criteria to delete CT values.
#' For phenotype, The 1st column should be sample name and the column name will be same as the column name of sample in input file.
#' And the second column will be the classification for each sample.The unclassified sample will show NA. If the phonotype
#' dataframe is supplied, in the output dataframe a phenotype colun will show otherwise not.
#'
#' @return This function will return a dataframe of filtered CT values. Also this result will save in CSV formate in current working
#' directory and default name of the file is filterCT.csv
#'
#' @import psych
#'
#' @examples
#' ###### Example 1
#' # Read expression data
#' sfile <- "D:\\R Working\\Workflow\\Data\\Asa\\celiac3_20161222_123248_Results_Export.txt"
#' mdata <- readCT(file = sfile)
#' sname <- unlist(lapply(strsplit(mdata$Sample.Name," "),"[",1))
#' mdata$Sample.Name <- sname
#' # Read phynotype data
#' phen <- read.csv (file="D:\\R Working\\Workflow\\Data\\Asa\\role-multiple.txt", sep="\t")
#' # Filter CT valued data and add phynotype
#' res <- filterCT (file = mdata,omit = "True",phenotype = phen)
#'
#'
#' ###### Example 2
#' # Read expression data
#' sfile <- "D:\\R Working\\Workflow\\Data\\simon_20161102_134801_Results_Export.txt"
#' mdata <- readCT(file = sfile)
#' # Read phynotype data
#' phen <- read.csv (file="D:\\R Working\\Workflow\\Data\\pheno.txt",sep="\t")
#' # Filter CT valued data and add phynotype
#' res <- filterCT (file = mdata,omit = "True",phenotype = phen)
#'
#'
#' ###### Example 3
#' # Read expression data
#' sfile <- "D:\\R Working\\Workflow\\Data\\CT.txt"
#' mdata <- readCT(file = sfile, skip = 0, header = FALSE)
#' # Read phynotype data
#' phen <- read.csv (file="D:\\R Working\\Workflow\\Data\\phenotypes.txt",sep="\t")
#' # Filter CT valued data and add phynotype
#' res <- filterCT (file = mdata,sample = "Sample", target = "Detector", phenotype = phen)
#'
#'
#' ####### Example
#' # Read expression data
#' sfile <- system.file("extdata", "Ahlam-biopsies_20170920_131310.txt", package = "AsaLab")
#' mdata <- readCT(file = sfile, skip = 14, header = FALSE)
#'
#' # Read phynotype data
#' pfile <- system.file("extdata", "Ahlam-biopsies_20170920_131310_pheno.txt", package = "AsaLab")
#' phen <- read.csv (file = pfile, sep="\t")
#'
#' # Filter CT valued data and add phynotype
#' res <- filterCT (file = mdata,sample = "Sample.Name", target = "Target.Name", phenotype = phen)
#'
#' @export
filterCT <- function(file, sample = "Sample.Name",
target = "Target.Name", ct = "Ct",
ctlimit = c(15,40), ampscore = 0.6,
undet = NULL, phenotype = NULL,ctd = 2,
omit = NULL, out = "filterCT")
{
# Delete sample Omit = TRUE
if(length(omit) > 0 ) file <- file[-which(file$Omit==omit),]
if(length(omit) ==0 ) file <- file
# Replace all undetermine CT value with NA
if(length(undet) > 0)
{
if(is.numeric(undet)==FALSE) stop(" The value to replace undetermind should be numeric", call. = TRUE)
file[,ct][file[,ct]=="Undetermined"]<- undet
} else
{ file[,ct][file[,ct]=="Undetermined"]<-NA}
# Replace all CT value with NA outside of limit (ctlimit)
class(file[,ct])<- "numeric"
file[,ct][which( file[,ct] < ctlimit[1] | file[,ct] > ctlimit[2])] <- NA
# Delete sample with amplification score < thrashhold
#file <- file[which(file$Amp.Score >= ampscore),]
# Split data by sample name and target name
file <- split(file, file[,sample])
file <- lapply(file, function(samplename) { res <- split(samplename, samplename[,target])})
# Delete the Target name whose all CT == NA
file1 <- unlist(file, recursive = FALSE)
file2 <- lapply(file1, function(sam) {res<- all(is.na(sam[,ct]))})
file <- file1[which(as.vector(unlist(file2))==FALSE)]
# Delete sample based on CT value
myfun_1 <- function(sam)
{
sam <- sam[order(sam[,ct]),]
a <-as.numeric(sam[,ct])
# If any vector has missing value
if ( any(is.na(a))==TRUE )
{
# n = 1 non missing
if (length(a[!is.na(a)]) == 1)
{
# Missing value is replace by the single value
sam[,ct][which(is.na(a))]<- mean(a,na.rm = TRUE)
}
# n = 2 non missing
if (length(a[!is.na(a)]) == 2)
{
# Consequtive difference (d) is > 2, delete the sample or if d <= 2, replace missing value by mean
a1 <- a[!is.na(a)]
if(abs(as.numeric(dist(a1))) > ctd) sam <-NULL
else
{
sam[,ct][which(is.na(a))]<- mean(a,na.rm = TRUE)
}
}
# n = 3 non missing
if (length(a[!is.na(a)]) == 3)
{
# Difference between all consecutive sample is greater than 2, delete it
a1 <- a[!is.na(a)]
d <- abs(diff(a1))
if(all(d > ctd)) sam <- NULL
else
{
# d2 > 2d1 or d1 > 2d2
if(d[2] > 2*d[1])
{
sam <- sam[-which(sam[,ct]==a1[3]),]
sam[,ct][which(is.na(sam[,ct]))] <- mean(sam[,ct],na.rm = TRUE)
}
if(d[1] > 2*d[2])
{
sam <- sam[-which(sam[,ct]==a1[1]),]
sam[,ct][which(is.na(sam[,ct]))] <- mean(sam[,ct],na.rm = TRUE)
}
}
}
# n >= 4 non missing
if (length(a[!is.na(a)]) >= 4)
{
# Delete those sample , Ct > median value
sam <- sam[-which(abs(a-median(a)) > ctd),]
sam[,ct][which(is.na(sam[,ct]))] <- mean(sam[,ct],na.rm = TRUE)
}
}
# No missing value
if ( any(is.na(a))==FALSE )
{
# n = 2
if(length(a)==2)
{
# Consequtive difference is <= 2
if(abs(as.numeric(dist(a))) > ctd) sam <-NULL
}
# n = 3
if(length(a) == 3)
{
# Difference between all consecutive sample is greater than 2, delete it
d <- abs(diff(a))
if(all(d > ctd)) sam <- NULL
else
{
# d2 > 2d1 or d1 > 2d2
if(d[2] > 2*d[1]) sam <- sam[-3,]
if(d[1] > 2*d[2]) sam <- sam[-1,]
}
}
# n = 4
if(length(a) >= 4)
{
# Take madian of 4 and delete which is greater than median
sam <- sam[which(abs(a-median(a)) < ctd),]
}
}
sam
}
file <- lapply(file, myfun_1)
# Get mean on ct value on every splied section by sample and target
mufun_2 <- function(i)
{
res <- data.frame(t(c(unique(i[,sample]), unique(i[,target]), round(mean(as.numeric(i[,ct])),3 ))))
if (length(res) >= 2 ) names(res) <- c(sample, target , "Ct.Mean" )
else res <- NULL
res
}
file <- lapply(file,mufun_2)
file <- do.call(rbind, file)
###### Reshape data
file <- reshape(file, v.names = "Ct.Mean", idvar = sample, timevar = target, direction = "wide")
nam <- c(sample, unlist(lapply(strsplit(names(file)[-1],"Ct.Mean."),"[[",2)))
names(file) <- nam
samname <- file[,sample]
file[,sample] <- NULL
file <- apply(file,2, function(i) as.numeric(as.character(i)) )
rownames(file) <- samname
file1<- file
###### Add phenotype data [Only if phenotype data supplied]
if (length(phenotype) > 0 )
{
if(ncol(phenotype) > 2 ) stop(" Phoneotype file should contain 2 column. Check the file.", call. = FALSE)
file1 <- data.frame(file1)
file1 <- merge(phenotype,file1, by.x= colnames(phenotype)[1], by.y = "row.names")
file1
}
if (length(phenotype) == 0 )
{
sample <- rownames(file1)
file1 <- cbind(sample,file1)
file1 <- data.frame(file1)
rownames(file1) <- NULL
}
write.csv(x = file1, file = paste0(out,".csv"), row.names = FALSE)
file1
}
###################################################################################### Analyse CT values
"analyzeCT"
#' Analyze CT value on different condition.
#' @description Analyze CT values on defferent condition
#'
#' @param file data in Matrix. Output of \link[AsaLab]{filterCT} function.
#' @param skip Number of column to skip form 1st column in input file. Default is 2.
#' @param hkgene House keeping gene.
#' @param control Name of phynotype that will assign as control. Default is "CONTROL".
#' @param del Percentage of missing value for which any gene or sample will be excuded form analysis. Default is 0.7 (70 percentage).
#' @param missCT If missing value will replace with 40 or otheres. Default is NULL.
#' @param gylim Define Upper and lower limit for the boxplot. Default is c(-30,30).
#'
#' @return This function will return a list of deleted genes, deleted sample, delta CT values based on supplied housekeeping genes
#' and delta CT values based average of supplied housekeeping genes.
#' CT values will be saved in a csv file after deleteing the gene and sample with preferred percentage of missing values. Also save results
#' of the boxplot of delta CT values and p-values for t-test based on both individual and average of
#' housekeeping genes. Boxplot and p values will save in pdf formate and delta CT values based on both individual and average
#' housekeeping genes will save in CSV formate in current working directory.
#' @import impute
#' @import psych
#' @import lattice
#' @import graphics
#' @import grDevices
#' @import stats
#'
#' @examples
#' ####### Example 1
#' ## Read expression data #Sample : 384
#' sfile <- "D:\\R Working\\Workflow\\Data\\Asa\\celiac3_20161222_123248_Results_Export.txt"
#' mdata <- readCT(file = sfile)
#' sname <- unlist(lapply(strsplit(mdata$Sample.Name," "),"[",1))
#' mdata$Sample.Name <- sname
#' ## Read phynotype data #Sample : 446
#' phen <- read.csv (file="D:\\R Working\\Workflow\\Data\\Asa\\role-multiple.txt", sep="\t")
#' ## Filter CT valued data and add phynotype #Sample : 330
#' res <- filterCT (file = mdata,omit = "True",phenotype = phen)
#' ## Analyze CT values
#' hkg <- c("YWHAZ_047","GUSB_627","HPRT1_909")
#' CTres <- analyzeCT (file = res, skip=2, hkgene = hkg,control="CONTROL",del = 0.7, missCT = NULL)
#'
#'
#'
#' ###### Example 2
#' ## Read expression data #Sample : 80
#' sfile <- "D:\\R Working\\Workflow\\Data\\simon_20161102_134801_Results_Export.txt"
#' mdata <- readCT(file = sfile)
#' ## Read phynotype data #Sample : 80
#' phen <- read.csv (file="D:\\R Working\\Workflow\\Data\\pheno.txt",sep="\t")
#' ## Filter CT valued data and add phynotype #Sample : 80
#' res <- filterCT (file = mdata,omit = "True",phenotype = phen)
#' ## Analyze CT values
#' hkg <- c("YWHAZ","GUSB","HSPB1")
#' CTres <- analyzeCT (file = res, skip=2, hkgene = hkg,control="control",del = 0.7, missCT = 40)
#'
#'
#'
#' ###### Example 3
#' ## Read expression data #Sample : 55
#' sfile <- "D:\\R Working\\Workflow\\Data\\CT.txt"
#' mdata <- readCT(file = sfile, skip = 0, header = FALSE)
#' ## Read phynotype data #Sample : 114
#' phen <- read.csv (file="D:\\R Working\\Workflow\\Data\\phenotypes.txt",sep="\t")
#' ## Filter CT valued data and add phynotype #Sample : 54
#' res <- filterCT (file = mdata,sample = "Sample", target = "Detector", phenotype = phen)
#' hkg <- c("ZMYM2","GUSB_627","HFE2")
#' CTres <- analyzeCT (file = res, skip=2, hkgene = hkg,control="0",del = 0.7, missCT = 40)
#'
#'
#' ###### Example 4
#' ## Read expression data #Sample : 135
#' sfile <- "Z:\\TANVIR\\George\\Nasal Polys-George_20160808_150714_Results_Export (2).txt"
#' mdata <- readCT(file = sfile, skip = 0, header = FALSE)
#' ## Read phynotype data #Sample : 72
#' phen <- read.csv (file="Z:\\TANVIR\\George\\phenotypes.txt" ,sep="\t")
#' ## Filter CT valued data and add phynotype #Sample : 71
#' res <- filterCT (file = mdata,sample = "Sample.Name", target = "Target.Name", phenotype = phen)
#' hkg <- c("HPRT1","YWHAZ")
#' CTres <- analyzeCT (file = res, skip=2, hkgene = hkg,control="0",del = 0.1, missCT = 40)
#'
#'
#' ###### Example 5
#' ## Read expression data #Sample : 135
#' sfile <- "Z:\\TANVIR\\George\\control sample\\CD for analysis.txt"
#' mdata <- readCT(file = sfile, skip = 0, header = FALSE)
#' ## Read phynotype data #Sample : 72
#' phen <- read.csv (file="Z:\\TANVIR\\George\\control sample\\phenotypes.txt" ,sep="\t")
#' ## Filter CT valued data and add phynotype #Sample : 71
#' res <- filterCT(file=mdata,sample="Sample.Name",
#' target="Target.Name",undet=40,ctd=1,phenotype=phen)
#' hkge <- c("HPRT1.909","YWHAZ.047")
#' CTres <- analyzeCT (file = res, skip=2, hkgene = hkge,control="0",del = 0.8, missCT = NULL)
#'
#'
#' ##### Example
#' #' # Read expression data
#' sfile <- system.file("extdata", "Ahlam-biopsies_20170920_131310.txt", package = "AsaLab")
#' mdata <- readCT(file = sfile, skip = 14, header = FALSE)
#'
#' # Read phynotype data
#' pfile <- system.file("extdata", "Ahlam-biopsies_20170920_131310_pheno.txt", package = "AsaLab")
#' phen <- read.csv (file = pfile, sep="\t")
#'
#' # Filter CT valued data and add phynotype
#' res <- filterCT (file = mdata,sample = "Sample.Name", target = "Target.Name", phenotype = phen)
#'
#' hkge <- c("DUSP1","HPRT1","YWHAZ")
#' CTres <- analyzeCT (file = res, skip=2, hkgene = hkge,control="CONTROL",del = 0.9, missCT = NULL)
#'
#'
#'
#' @export
analyzeCT <- function(file, skip = 2, hkgene ,control="CONTROL", del = 0.7, missCT = NULL,gylim = c(-30,30))
{
if (class(file[,1])=="integer"){ class(file[,1])<-"character" }
if (class(file[,2])=="integer"){ class(file[,2])<-"character" }
###### Check house keepimng gene
if(any(hkgene%in%colnames(file)==FALSE)) {stop("\nError : House keeping gene not in data. Check house keeping gene name.\n","- Availabel gene name:\n",paste0(colnames(file)[-c(1:2)],sep = "; "),"\n- Supplied Housekeeping gene:\n",paste0(hkgene,sep = "; "),call. = TRUE)}
###### Separate CT values and sample information
sample <- file [,c(1:skip)]
ct <- data.frame(as.matrix(file [,-c(1:skip)]))
###### Delete gene with 70% missing value and Missing value imputation for house keeping gene
# Delete Gene for missing value
if (length(which(colMeans(is.na(file)) > del)) > 0 )
{
file <- ct[, -which(colMeans(is.na(ct)) > del) ]
#sample <- sample[, -which(colMeans(is.na(ct)) > del) ]
delGene <- colnames(ct)[which(colMeans(is.na(ct)) > del)]
message("Deleted Genes names for ",del*100,"% of missing values : \n", paste0(delGene, sep = "; "))
} else
{
file <- ct
sample <- sample
delGene <- NA
message("Deleted Genes names for ",del*100,"% of missing values : \n", delGene)
}
# Delete sample for missing value
if (length(which(rowMeans(is.na(file)) > del)) > 0 )
{
file1 <- file[-which(rowMeans(is.na(file)) > del), ]
delSample <- unique(as.vector(sample[,1][which(rowMeans(is.na(file)) > del)])) # Deleted sample
sample1 <- sample[-which(rowMeans(is.na(file)) > del),]
file <- file1
sample <- sample1
message("\nDeleted sample names for ",del*100,"% of missing values : \n", paste0(delSample, sep = "; "))
} else
{
file <- file
sample <- sample
delSample <- NA
message("\nDeleted sample names for ",del*100,"% of missing values : \n", NA)
}
# Impute missing value
#require(impute)
#file <- suppressWarnings(t(impute.knn(t(file))$data))
# Replace missing value with 40 or not
if(length(missCT) == 0)
{
file <- file
} else
{
file[is.na(file)]<- missCT
file <- file
}
###### Save file
write.csv(x = cbind(sample,file), file = "filterCT_1.csv", row.names = FALSE)
###### House keeping gene : Delta ct valuses
# Availabel house keeping gene
hkg <- as.list(hkgene[hkgene%in%colnames(file)])
hkg_ge <- file[,match(hkg, colnames(file))]
hkg_file <- data.frame(file[which(rowMeans(is.na(hkg_ge)) <= (length(hkg)-1)/length(hkg)), ]) ## Filter missing values
hkg_sa <- data.frame(sample[which(rowMeans(is.na(hkg_ge)) <= (length(hkg)-1)/length(hkg)), ]) ## Filter missing values
hkg_ge <- data.frame(hkg_ge[which(rowMeans(is.na(hkg_ge)) <= (length(hkg)-1)/length(hkg)), ]) ## Filter missing values
#write.csv(x = cbind(sample,file[,match(hkg, colnames(file))]), file = "hkg.csv", row.names = FALSE)
write.csv(x = cbind(hkg_sa,hkg_ge), file = "hkg.csv", row.names = FALSE)
# Average CT values form house keeping gene
#avgHKG <- rowMeans(file[,match(hkg, colnames(file))], na.rm = TRUE)
#avgCT <- round(file-avgHKG,3)
#avgCT <- data.frame(cbind(sample,avgCT))
#write.csv(x = avgCT, file = "avg_deltaCT.csv", row.names = FALSE)
avgHKG <- rowMeans(t(impute.knn(t(hkg_ge))$data), na.rm= TRUE)
#avgCT <- round(hkg_file-avgHKG,3)
avgCT <- round(avgHKG-hkg_file,3)
avgCT <- data.frame(cbind(hkg_sa,avgCT))
write.csv(x = avgCT, file = "avg_deltaCT.csv", row.names = FALSE)
# Form individual house keeping gene
file <- lapply(hkg,function(i)
{
res0 <- file[,!(colnames(file) == i)]
res1 <- file[,i]
#res1 <- file[,i,drop=FALSE]
res2 <- round(res1-res0,3)
res3 <- data.frame(cbind(sample,res2))
write.csv(x = res3, file = paste0(i,"_deltaCT",".csv"), row.names = FALSE)
res3
}
)
file$avCT <- avgCT
names(file) <- c(hkg, "avg_deltaCT")
delCT <- file
###### Split data by phinotype
file <- lapply(file, function(i) split(i, i[,2]))
###### Reshape data
#library(reshape)
file <- suppressMessages (lapply(file, reshape::melt))
file <- lapply(file, function(i)
{
res <- split(i, i[,3])
res
})
#print(file[[1]][[1]])
###### Ploting Gene
res <- suppressWarnings( lapply(seq_along(file), function(i)
{
## Name of refgene
nam <- names(file[i])
pdf(paste0(nam,".pdf"))
n <- length(file[[i]])
for(j in 1:n )
{
v <- file[[i]][[j]]
# Change levels to put CONTROL group at first
#print(paste0(i, " and ", j))
v[,2] <- factor(v[,2], levels= c(control,setdiff(unique(v[,2]),control)))
v <- v[order(v[,2]), ]
# Scalling data based on control group
v1 <- split(v,v[,2])
cn <- which(names(v1)==control)
mn <- mean(as.vector(v1[[cn]][,4]),na.rm = TRUE)
v2 <- v
v2[,4] <- v2[,4]-mn
v<-v2
# Group mean
po <- split(v,v[,2])
gmn <- lapply(po, function(i)
{
k <- mean(as.vector(i[,4]),na.rm = TRUE)
})
gmn <- unlist(gmn)
####### Plot bar plot and p-value
op <- par()
par(mfrow = c(1, 2))
# P-value
g<- v
colnames(g)[2]<- "CD"
kp<- combn(as.character(unique(g$CD)), 2,
FUN = function(y)
{
#print(y)
gk <- droplevels(g[g$CD %in% y ,])
if(any(rowSums(table(gk$CD,gk$value))<=1))
{
res <- list(0)
res$p.value <- NA
res
} else{
res<- t.test(value ~ CD, g[g$CD %in% y ,])
res
}
}
, simplify = FALSE)
pp<- unlist(lapply(kp, function(i) i$p.value))
combs <- combn(as.character(unique(g$CD)), 2)
N <- pp
inputs <- as.character(unique(g$CD))
out2 <- N
class(out2) <- "dist"
attr(out2, "Labels") <- as.character(inputs)
attr(out2, "Size") <- length(inputs)
attr(out2, "Diag") <- attr(out2, "Upper") <- FALSE
out2 <- round(as.matrix(out2),3)
out2[upper.tri(out2,diag = TRUE)] <- "-"
if (ncol(out2) > 2)
{
out2 <- out2[,colSums(is.na(out2)) < nrow(out2)]
out2 <- out2[rowSums(is.na(out2)) < ncol(out2),]
}
#require(lattice)
myPanel <- function(x, y, z, ...) {
panel.levelplot(x,y,z,...)
panel.text(x, y, out2[cbind(x,y)],cex = 0.3) ## use handy matrix indexing
}
p1<- suppressWarnings( levelplot(out2,panel =myPanel, scales=list(x=list(rot=90)),
xlab ="", ylab ="", main = "p-value",sub = "NA = Either 1 or 0 observation\n[No t-test has performed] ",
at = seq(0,0.05,by = 0.001),
col.regions = c(heat.colors(51))))
plot(1,axes=FALSE, col = "white", xlab = "", ylab="")
print(p1, position = c(0, 0, 0.5, 1), more = TRUE)
# Bar plot
if(length(names(table(v[,2]))) ==2 )
{
#boxplot(v[which(v[,2]==names(table(v[,2]))[1]),][,4], xlim = c(0.5, 2+0.5),ylim = range (v[,4],na.rm = TRUE),col="white",
# boxfill=rgb(1, 1, 1, alpha=1), border=rgb(1, 1, 1, alpha=1), ylab = "Fold",
# main = paste0("Ref gene: ", nam," \nGene: ", names(file[[i]][j] ))) #invisible boxes
boxplot(v[which(v[,2]==names(table(v[,2]))[1]),][,4], xlim = c(0.5, 2+0.5),ylim = c(gylim[1],gylim[2]),col="white",
boxfill=rgb(1, 1, 1, alpha=1), border=rgb(1, 1, 1, alpha=1), ylab = "Fold",
main = paste0("Ref gene: ", nam," \nGene: ", names(file[[i]][j] ))) #invisible boxes
boxplot(v[which(v[,2]==names(table(v[,2]))[1]),][,4], xaxt = "n", add = TRUE,at = 1) #shift these left by -0.15
points(rep(1, length(v[which(v[,2]==names(table(v[,2]))[1]),][,4] )),v[which(v[,2]==names(table(v[,2]))[1]),][,4],type = "p",cex=0.5, col = "orange")
points(1,gmn[1],col="blue",lwd = 2)
boxplot(v[which(v[,2]==names(table(v[,2]))[2]),][,4], xaxt = "n", add = TRUE,at = 2) #shift these left by -0.15
points(rep(2,length(v[which(v[,2]==names(table(v[,2]))[2]),][,4]) ),v[which(v[,2]==names(table(v[,2]))[2]),][,4],type = "p",cex=0.5, col = "orange")
points(2,gmn[2],col="blue",lwd = 2)
axis(1, at=c(1,2), labels = names(table(v[,2])) )
} else
{
#suppressWarnings(plot(v[,2],v[,4], main = paste0("Ref gene: ", nam," \nGene: ", names(file[[i]][j] )), ylab="Fold", xlab=NA,las = 2, cex.axis = 0.7,
# ylim = c( ifelse(is.infinite(min(v[,4],na.rm= TRUE))==TRUE,-10,min(v[,4],na.rm= TRUE)) ,
# ifelse(is.infinite(max(v[,4],na.rm= TRUE))==TRUE,10,max(v[,4],na.rm= TRUE))) ))
suppressWarnings(plot(v[,2],v[,4], main = paste0("Ref gene: ", nam," \nGene: ", names(file[[i]][j] )), ylab="Fold", xlab=NA,las = 2, cex.axis = 0.7,
ylim = c(gylim[1],gylim[2]) ))
points(v[,2],v[,4],type = "p",cex=0.5, col = "orange")
points(gmn, col = "blue", lwd = 2)
}
suppressWarnings (par(op))
}
dev.off()
}))
res <- list (del_gene = delGene,del_sample = delSample, delta_CT = delCT)
}
|
#Random Rectangles Shiny App
#
# Created By Jason Preszler, 2018
#
# License: GPLv3
#
#
library(shiny)
library(dplyr)
library(ggplot2)
library(stringr)
library(markdown)
library(googlesheets)
#######
#UPDATE EACH TERM?
#######
term<- "Spring 2019"
#get data
bugtownDF <- read.csv("data.org", sep="|", header=TRUE, strip.white = TRUE)
#clean
bugtownDF <- filter(bugtownDF, !is.na(apartNumber))
gs_auth(token="shiny_token.rds")
subSheet <- gs_key("1Q_DsqB1roB2OJyAhlqaEMOJ6Qa_lYKZa3QUNn2BHUno")
ui <- fluidPage(
# Application title
titlePanel("Random Rectangles"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
selectInput(inputId="apartmentSample",
label="Number of Apartments to sample:",
choices=c("All"=-1,"5"=5,"10"=10, "15"=15),selected = "All"),
h4("Area Information"),
dataTableOutput("sampArea")
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(type="tabs",
tabPanel("Instructions",includeMarkdown("instructions.md")),
tabPanel("BugTown Apartments",h3("BugTown Apartments"),
plotOutput("apartPlot", height = "700px")),
tabPanel("area submission",
textInput("method2", label = h3("Apts. For Method 2:"), value = "Enter Apt. numbers sep. by commas..."),
hr(),
textInput("method3", label = h3("Apts. For Method 3:"), value = "Enter Apt. numbers sep. by commas..."),
#tags$head(tags$script(src = "message-handler.js")),
hr(),
actionButton("submit", "Submit", class = "btn-primary"),
hr(),
h3("Data Submitted:"),
fluidRow(column(4, verbatimTextOutput("value2"))),
fluidRow(column(4, verbatimTextOutput("value3")))
)
) #,
#h4("Area Information"),
# dataTableOutput("sampArea")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
reactBTsamp<- reactive({
req(input$apartmentSample)
#get sample
apts<-unique(bugtownDF$apartNumber)
if(input$apartmentSample==-1){
btSample<- bugtownDF
}
else{
btSample <- bugtownDF[bugtownDF$apartNumber %in% sample(apts, input$apartmentSample),]
}
})
output$apartPlot <- renderPlot({
#plot sampled appartments
ggplot(reactBTsamp(), aes(x=Xcoord, y=Ycoord))+geom_tile(col="black", fill="white", size=1)+geom_text(aes(label=apartNumber))+theme_void()
})
#using eventReactive instead of observeEvent to only return
#submitted data for each press of submit
vals <- eventReactive(
input$submit,{
gs_add_row(subSheet, input = c(input$method2,input$method3,term))
data.frame(method2=input$method2, method3=input$method3)
}
)
output$value2 <- renderPrint({ vals()$method2})
output$value3 <- renderPrint({ vals()$method3})
output$sampArea <- renderDataTable({
reactBTsamp() %>% dplyr::select(apartNumber, area) %>% transmute(Apartment=apartNumber, area=area) %>% group_by(Apartment) %>% summarise(Area=mean(area))
}, options = list(searching=FALSE))
#to only allow submit with valid input, needs work
# observe({
# check if all mandatory fields have a value
# mandatoryFilled <-
# vapply(c(input$area2,input$area3),
# function(x) {
# str_detect(x, "[:digit:]+,[:digit:]+,[:digit:]+,[:digit:]+,[:digit:]+")
# },
# logical(1))
# mandatoryFilled <- all(mandatoryFilled)
# enable/disable the submit button
# shinyjs::toggleState(id = "submit", condition = mandatoryFilled)
# })
}
# Run the application
shinyApp(ui = ui, server = server)
|
/Random_Rectangles/app.R
|
no_license
|
jpreszler/CofI-Shiny
|
R
| false
| false
| 3,960
|
r
|
#Random Rectangles Shiny App
#
# Created By Jason Preszler, 2018
#
# License: GPLv3
#
#
library(shiny)
library(dplyr)
library(ggplot2)
library(stringr)
library(markdown)
library(googlesheets)
#######
#UPDATE EACH TERM?
#######
term<- "Spring 2019"
#get data
bugtownDF <- read.csv("data.org", sep="|", header=TRUE, strip.white = TRUE)
#clean
bugtownDF <- filter(bugtownDF, !is.na(apartNumber))
gs_auth(token="shiny_token.rds")
subSheet <- gs_key("1Q_DsqB1roB2OJyAhlqaEMOJ6Qa_lYKZa3QUNn2BHUno")
ui <- fluidPage(
# Application title
titlePanel("Random Rectangles"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
selectInput(inputId="apartmentSample",
label="Number of Apartments to sample:",
choices=c("All"=-1,"5"=5,"10"=10, "15"=15),selected = "All"),
h4("Area Information"),
dataTableOutput("sampArea")
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(type="tabs",
tabPanel("Instructions",includeMarkdown("instructions.md")),
tabPanel("BugTown Apartments",h3("BugTown Apartments"),
plotOutput("apartPlot", height = "700px")),
tabPanel("area submission",
textInput("method2", label = h3("Apts. For Method 2:"), value = "Enter Apt. numbers sep. by commas..."),
hr(),
textInput("method3", label = h3("Apts. For Method 3:"), value = "Enter Apt. numbers sep. by commas..."),
#tags$head(tags$script(src = "message-handler.js")),
hr(),
actionButton("submit", "Submit", class = "btn-primary"),
hr(),
h3("Data Submitted:"),
fluidRow(column(4, verbatimTextOutput("value2"))),
fluidRow(column(4, verbatimTextOutput("value3")))
)
) #,
#h4("Area Information"),
# dataTableOutput("sampArea")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
reactBTsamp<- reactive({
req(input$apartmentSample)
#get sample
apts<-unique(bugtownDF$apartNumber)
if(input$apartmentSample==-1){
btSample<- bugtownDF
}
else{
btSample <- bugtownDF[bugtownDF$apartNumber %in% sample(apts, input$apartmentSample),]
}
})
output$apartPlot <- renderPlot({
#plot sampled appartments
ggplot(reactBTsamp(), aes(x=Xcoord, y=Ycoord))+geom_tile(col="black", fill="white", size=1)+geom_text(aes(label=apartNumber))+theme_void()
})
#using eventReactive instead of observeEvent to only return
#submitted data for each press of submit
vals <- eventReactive(
input$submit,{
gs_add_row(subSheet, input = c(input$method2,input$method3,term))
data.frame(method2=input$method2, method3=input$method3)
}
)
output$value2 <- renderPrint({ vals()$method2})
output$value3 <- renderPrint({ vals()$method3})
output$sampArea <- renderDataTable({
reactBTsamp() %>% dplyr::select(apartNumber, area) %>% transmute(Apartment=apartNumber, area=area) %>% group_by(Apartment) %>% summarise(Area=mean(area))
}, options = list(searching=FALSE))
#to only allow submit with valid input, needs work
# observe({
# check if all mandatory fields have a value
# mandatoryFilled <-
# vapply(c(input$area2,input$area3),
# function(x) {
# str_detect(x, "[:digit:]+,[:digit:]+,[:digit:]+,[:digit:]+,[:digit:]+")
# },
# logical(1))
# mandatoryFilled <- all(mandatoryFilled)
# enable/disable the submit button
# shinyjs::toggleState(id = "submit", condition = mandatoryFilled)
# })
}
# Run the application
shinyApp(ui = ui, server = server)
|
\name{as.scidb}
\alias{as.scidb}
\title{
Upload an R matrix or data.frame to a SciDB array.
}
\description{
Cast an R matrix or data.frame to a SciDB array, returning a reference scidb object.
}
\usage{
as.scidb(X, name = ifelse(exists(as.character(match.call()[2])), as.character(match.call()[2]), tmpnam("array")), rowChunkSize = 1000L, colChunkSize = 1000L, start = c(0L,0L), gc=FALSE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{ A matrix of double-precision floating point values or a data.frame. }
\item{name}{ The name of the SciDB array to create, defaulting to the R variable name if available. }
\item{rowChunkSize}{Maximum SciDB chunk size for the 1st array dimension.}
\item{colChunkSize}{Maximum SciDB chunk size for the 2nd array dimension (ignored for vectors and data.frames). }
\item{start}{Starting dimension numeric index value or values.}
\item{gc}{Set to TRUE to remove SciDB array when R object is garbage collected or R exists. FALSE means SciDB array persists.}
\item{\dots}{ additional arguments to pass to \code{df2scidb} (see \code{\link{df2scidb}}).}
}
\details{
Used with a matrix or vector argument,
the \code{as.scidb} function creates a single-attribute SciDB array named
\code{name} and copies the data from \code{X} into it, returning a \code{scidb}
object reference to the new array. The SciDB array will be 1-D if \code{X} is a
vector, and 2-D if \code{X} is a matrix.
If \code{X} is a data.frame, then \code{as.scidb} creates a one-dimensional
multi-attribute SciDB array, with SciDB attributes representing each column
of the data.frame. The functions \code{as.scidb} and {df2scidb} are equivalent
in this use case.
The SciDB array row and column chunk sizes are set to the minimum of the number
of rows and columns of \code{X} and the specified \code{rowChunkSize} and
\code{colChunkSize} arguments, respectively. The column chunk size argument is
ignored if the \code{X} is a vector.
This function supports double-precision, integer (32-bit), logical, and single-character
array attribute types.
}
\value{A \code{scidb} reference object. }
\author{
B. W. Lewis <blewis@paradigm4.com>
}
\seealso{
\code{\link{scidb}}
\code{\link{df2scidb}}
}
\examples{
\dontrun{
X <- matrix(runif(20),5)
A <- as.scidb(X)
as.scidb(iris)
scidblist()
print(A)
}
}
|
/man/as.scidb.Rd
|
no_license
|
benmarwick/SciDBR
|
R
| false
| false
| 2,345
|
rd
|
\name{as.scidb}
\alias{as.scidb}
\title{
Upload an R matrix or data.frame to a SciDB array.
}
\description{
Cast an R matrix or data.frame to a SciDB array, returning a reference scidb object.
}
\usage{
as.scidb(X, name = ifelse(exists(as.character(match.call()[2])), as.character(match.call()[2]), tmpnam("array")), rowChunkSize = 1000L, colChunkSize = 1000L, start = c(0L,0L), gc=FALSE, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{X}{ A matrix of double-precision floating point values or a data.frame. }
\item{name}{ The name of the SciDB array to create, defaulting to the R variable name if available. }
\item{rowChunkSize}{Maximum SciDB chunk size for the 1st array dimension.}
\item{colChunkSize}{Maximum SciDB chunk size for the 2nd array dimension (ignored for vectors and data.frames). }
\item{start}{Starting dimension numeric index value or values.}
\item{gc}{Set to TRUE to remove SciDB array when R object is garbage collected or R exists. FALSE means SciDB array persists.}
\item{\dots}{ additional arguments to pass to \code{df2scidb} (see \code{\link{df2scidb}}).}
}
\details{
Used with a matrix or vector argument,
the \code{as.scidb} function creates a single-attribute SciDB array named
\code{name} and copies the data from \code{X} into it, returning a \code{scidb}
object reference to the new array. The SciDB array will be 1-D if \code{X} is a
vector, and 2-D if \code{X} is a matrix.
If \code{X} is a data.frame, then \code{as.scidb} creates a one-dimensional
multi-attribute SciDB array, with SciDB attributes representing each column
of the data.frame. The functions \code{as.scidb} and {df2scidb} are equivalent
in this use case.
The SciDB array row and column chunk sizes are set to the minimum of the number
of rows and columns of \code{X} and the specified \code{rowChunkSize} and
\code{colChunkSize} arguments, respectively. The column chunk size argument is
ignored if the \code{X} is a vector.
This function supports double-precision, integer (32-bit), logical, and single-character
array attribute types.
}
\value{A \code{scidb} reference object. }
\author{
B. W. Lewis <blewis@paradigm4.com>
}
\seealso{
\code{\link{scidb}}
\code{\link{df2scidb}}
}
\examples{
\dontrun{
X <- matrix(runif(20),5)
A <- as.scidb(X)
as.scidb(iris)
scidblist()
print(A)
}
}
|
library(FNN)
library(dplyr)
CON_POS_WEIGHT_METHOD <- list(KNN=1,
SIMILARITY_SCORE=2)
#
# Elements in the output weight vector are in the same order as all the positives
# in the input input data
#
WeightPositives <- function(y, X, posPatientIDsInData,
posWeightMethod, similarityScoreFile="",
resultDir)
{
if (posWeightMethod == CON_POS_WEIGHT_METHOD$KNN)
{
k <- 5
kNNIDs <- (get.knnx(data=X, query=X[y==1,], k=k, algo="kd_tree"))$nn.index
kNNIDs <- kNNIDs[, 2:ncol(kNNIDs)] # self
weights <- rep(0, nrow(kNNIDs))
for (iPos in 1:length(weights))
{
neighbours <- y[kNNIDs[iPos, ]]
weights[iPos] <- sum(neighbours==1) / k
}
# normalise the weights so that max(weights) = 1
if (max(weights) == 0)
stop(paste("Error! Weights for positive data cannot be computed from KNN ",
"because none of the positive patients has any positive neighbours."))
weights <- weights / max(weights)
weightDF <- as.data.frame(cbind(posPatientIDsInData, weights))
colnames(weightDF) <- c("PATIENT_ID", "weight")
} else if (posWeightMethod == CON_POS_WEIGHT_METHOD$SIMILARITY_SCORE)
{
if (is.null(posPatientIDsInData))
{
stop(paste("Error! In order to use SIMILARITY_SCORE for WeightPositives, ",
"the input data must have the column of 'PATIENT_ID'."))
}
print("Warning! This is just a temporary solution for SIMILARITY_SCORE. ")
print("A formal solution will need to merge positive patients via PATIENT_ID.")
weights <- read.csv(similarityScoreFile, header=T, check.names=F, sep=",")
# make sure that the order is the same as the positives in the input data
posPatientIDsInData <- matrix(posPatientIDsInData, ncol=1)
colnames(posPatientIDsInData) <- "PATIENT_ID"
posPatientIDsInData <- as.data.frame(posPatientIDsInData)
colnames(weights) <- c("PATIENT_ID", "weight")
weights <- as.data.frame(weights)
weightDF <- left_join(posPatientIDsInData, weights)
} else
stop("Error! Invalid posWeightMethod value!")
# save for investigation
# make sure there's no 0 weights, for the ease of identifying weights for
# positive patients among all patients
weightDF$weight[weightDF$weight==0] <- 1e-6
write.table(weightDF, sep=",", row.names=F,
file=paste(resultDir, "posWeights.csv", sep=""))
return (weightDF$weight)
}
|
/functions/WeightPositives.R
|
no_license
|
jzhao0802/easy_positive
|
R
| false
| false
| 2,500
|
r
|
library(FNN)
library(dplyr)
CON_POS_WEIGHT_METHOD <- list(KNN=1,
SIMILARITY_SCORE=2)
#
# Elements in the output weight vector are in the same order as all the positives
# in the input input data
#
WeightPositives <- function(y, X, posPatientIDsInData,
posWeightMethod, similarityScoreFile="",
resultDir)
{
if (posWeightMethod == CON_POS_WEIGHT_METHOD$KNN)
{
k <- 5
kNNIDs <- (get.knnx(data=X, query=X[y==1,], k=k, algo="kd_tree"))$nn.index
kNNIDs <- kNNIDs[, 2:ncol(kNNIDs)] # self
weights <- rep(0, nrow(kNNIDs))
for (iPos in 1:length(weights))
{
neighbours <- y[kNNIDs[iPos, ]]
weights[iPos] <- sum(neighbours==1) / k
}
# normalise the weights so that max(weights) = 1
if (max(weights) == 0)
stop(paste("Error! Weights for positive data cannot be computed from KNN ",
"because none of the positive patients has any positive neighbours."))
weights <- weights / max(weights)
weightDF <- as.data.frame(cbind(posPatientIDsInData, weights))
colnames(weightDF) <- c("PATIENT_ID", "weight")
} else if (posWeightMethod == CON_POS_WEIGHT_METHOD$SIMILARITY_SCORE)
{
if (is.null(posPatientIDsInData))
{
stop(paste("Error! In order to use SIMILARITY_SCORE for WeightPositives, ",
"the input data must have the column of 'PATIENT_ID'."))
}
print("Warning! This is just a temporary solution for SIMILARITY_SCORE. ")
print("A formal solution will need to merge positive patients via PATIENT_ID.")
weights <- read.csv(similarityScoreFile, header=T, check.names=F, sep=",")
# make sure that the order is the same as the positives in the input data
posPatientIDsInData <- matrix(posPatientIDsInData, ncol=1)
colnames(posPatientIDsInData) <- "PATIENT_ID"
posPatientIDsInData <- as.data.frame(posPatientIDsInData)
colnames(weights) <- c("PATIENT_ID", "weight")
weights <- as.data.frame(weights)
weightDF <- left_join(posPatientIDsInData, weights)
} else
stop("Error! Invalid posWeightMethod value!")
# save for investigation
# make sure there's no 0 weights, for the ease of identifying weights for
# positive patients among all patients
weightDF$weight[weightDF$weight==0] <- 1e-6
write.table(weightDF, sep=",", row.names=F,
file=paste(resultDir, "posWeights.csv", sep=""))
return (weightDF$weight)
}
|
#Read in data
data <- read.csv('household_power_consumption.txt', stringsAsFactors = FALSE, sep=";", strip.white = TRUE)
#Create timestamp used for graphing
data$Datetime <- strptime(paste(data$Date, data$Time), format="%d/%m/%Y %H:%M:%S")
#Convert to numeric values
data$Global_active_power <- as.numeric(data$Global_active_power)
data$Sub_metering_1 <- as.numeric(data$Sub_metering_1)
data$Sub_metering_2 <- as.numeric(data$Sub_metering_2)
data$Sub_metering_3 <- as.numeric(data$Sub_metering_3)
#Take sample of data for specific date range
graphData <- data[data$Date=="1/2/2007" | data$Date=="2/2/2007",]
#Output and draw graph
png(filename="plot2.png", width=480, height=480, units="px") #comment out to write graph to screen
plot(graphData$Datetime, graphData$Global_active_power, type = "n", ylab="Global Active Power (kilowatts)", xlab="")
lines(graphData$Datetime, graphData$Global_active_power)
dev.off() #comment out to write graph to screen
|
/plot2.R
|
no_license
|
mathero/ExData_Plotting1
|
R
| false
| false
| 963
|
r
|
#Read in data
data <- read.csv('household_power_consumption.txt', stringsAsFactors = FALSE, sep=";", strip.white = TRUE)
#Create timestamp used for graphing
data$Datetime <- strptime(paste(data$Date, data$Time), format="%d/%m/%Y %H:%M:%S")
#Convert to numeric values
data$Global_active_power <- as.numeric(data$Global_active_power)
data$Sub_metering_1 <- as.numeric(data$Sub_metering_1)
data$Sub_metering_2 <- as.numeric(data$Sub_metering_2)
data$Sub_metering_3 <- as.numeric(data$Sub_metering_3)
#Take sample of data for specific date range
graphData <- data[data$Date=="1/2/2007" | data$Date=="2/2/2007",]
#Output and draw graph
png(filename="plot2.png", width=480, height=480, units="px") #comment out to write graph to screen
plot(graphData$Datetime, graphData$Global_active_power, type = "n", ylab="Global Active Power (kilowatts)", xlab="")
lines(graphData$Datetime, graphData$Global_active_power)
dev.off() #comment out to write graph to screen
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute.R
\name{wait_for_provisioning_completion}
\alias{wait_for_provisioning_completion}
\title{Wait for a cluster to finish provisioning}
\usage{
wait_for_provisioning_completion(cluster, show_output = FALSE)
}
\arguments{
\item{cluster}{The \code{AmlCompute} or \code{AksCompute} object.}
\item{show_output}{If \code{TRUE}, more verbose output will be provided.}
}
\description{
Wait for a cluster to finish provisioning. Typically invoked after a
\code{create_aml_compute()} or \code{create_aks_compute()} call.
}
\seealso{
\code{create_aml_compute()}, \code{create_aks_compute()}
}
|
/man/wait_for_provisioning_completion.Rd
|
permissive
|
kdahiya/azureml-sdk-for-r
|
R
| false
| true
| 667
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/compute.R
\name{wait_for_provisioning_completion}
\alias{wait_for_provisioning_completion}
\title{Wait for a cluster to finish provisioning}
\usage{
wait_for_provisioning_completion(cluster, show_output = FALSE)
}
\arguments{
\item{cluster}{The \code{AmlCompute} or \code{AksCompute} object.}
\item{show_output}{If \code{TRUE}, more verbose output will be provided.}
}
\description{
Wait for a cluster to finish provisioning. Typically invoked after a
\code{create_aml_compute()} or \code{create_aks_compute()} call.
}
\seealso{
\code{create_aml_compute()}, \code{create_aks_compute()}
}
|
## ----eval=FALSE---------------------------------------------------------------
# d = Simdata
# Z = d$Z # True Disease Status
# S = d$S # Risk Score
# phi = 0.1 #10% of patients taking viral load test
# TGST( Z, S, phi, method="nonpar")
## ----eval=FALSE---------------------------------------------------------------
# d = Simdata
# Z = d$Z # True Disease Status
# S = d$S # Risk Score
# Check.exp.tilt( Z, S)
## ----eval=FALSE---------------------------------------------------------------
# data = Simdata
# Z = d$Z # True Disease Status
# S = d$S # Risk Score
# phi = 0.1 #10% of patients taking viral load test
# Obj = TVLT(Z, S, phi, method="nonpar")
# CV.TGST(Obj, lambda, K=10)
## ----eval=FALSE---------------------------------------------------------------
# d = Simdata
# Z = d$Z # True Disease Status
# S = d$S # Risk Score
# phi = 0.1 #10% of patients taking viral load test
# lambda = 0.5
# Obj = TGST(Z, S, phi, method="nonpar")
# OptimalRule(Obj, lambda)
## ----eval=FALSE---------------------------------------------------------------
# d = Simdata
# Z = d$Z # True Disease Status
# S = d$S # Risk Score
# phi = 0.1 #10% of patients taking viral load test
# lambda = 0.5
# Obj = TGST(Z, S, phi, method="nonpar")
# ROCAnalysis(Obj, plot=TRUE)
## ----eval=FALSE---------------------------------------------------------------
# d = Simdata
# Z = d$Z # True Disease Status
# S = d$S # Risk Score
# phi = 0.1 #10\% of patients taking viral load test
# nonpar.rules( Z, S, phi)
## ----eval=FALSE---------------------------------------------------------------
# d = Simdata
# Z = d$Z # True Disease Status
# S = d$S # Risk Score
# phi = 0.1 #10\% of patients taking viral load test
# rules = nonpar.rules( Z, S, phi)
# nonpar.fnr.fpr(Z,S,rules[1,1],rules[1,2])
## ----eval=FALSE---------------------------------------------------------------
# d = Simdata
# Z = d$Z # True Disease Status
# S = d$S # Risk Score
# phi = 0.1 #10\% of patients taking viral load test
# rules = nonpar.rules( Z, S, phi)
# semipar.fnr.fpr(Z,S,rules[1,1],rules[1,2])
## ----eval=FALSE---------------------------------------------------------------
# d = Simdata
# Z = d$Z # True Disease Status
# S = d$S # Risk Score
# phi = 0.1 #10% of patients taking viral load test
# rules = nonpar.rules( Z, S, phi)
# cal.AUC(Z,S,rules[,1],rules[,2])
## ----eval=FALSE---------------------------------------------------------------
# data(Simdata)
# summary(Simdata)
# plot(Simdata)
|
/inst/doc/TGST-vignette.R
|
no_license
|
cran/TGST
|
R
| false
| false
| 2,524
|
r
|
## ----eval=FALSE---------------------------------------------------------------
# d = Simdata
# Z = d$Z # True Disease Status
# S = d$S # Risk Score
# phi = 0.1 #10% of patients taking viral load test
# TGST( Z, S, phi, method="nonpar")
## ----eval=FALSE---------------------------------------------------------------
# d = Simdata
# Z = d$Z # True Disease Status
# S = d$S # Risk Score
# Check.exp.tilt( Z, S)
## ----eval=FALSE---------------------------------------------------------------
# data = Simdata
# Z = d$Z # True Disease Status
# S = d$S # Risk Score
# phi = 0.1 #10% of patients taking viral load test
# Obj = TVLT(Z, S, phi, method="nonpar")
# CV.TGST(Obj, lambda, K=10)
## ----eval=FALSE---------------------------------------------------------------
# d = Simdata
# Z = d$Z # True Disease Status
# S = d$S # Risk Score
# phi = 0.1 #10% of patients taking viral load test
# lambda = 0.5
# Obj = TGST(Z, S, phi, method="nonpar")
# OptimalRule(Obj, lambda)
## ----eval=FALSE---------------------------------------------------------------
# d = Simdata
# Z = d$Z # True Disease Status
# S = d$S # Risk Score
# phi = 0.1 #10% of patients taking viral load test
# lambda = 0.5
# Obj = TGST(Z, S, phi, method="nonpar")
# ROCAnalysis(Obj, plot=TRUE)
## ----eval=FALSE---------------------------------------------------------------
# d = Simdata
# Z = d$Z # True Disease Status
# S = d$S # Risk Score
# phi = 0.1 #10\% of patients taking viral load test
# nonpar.rules( Z, S, phi)
## ----eval=FALSE---------------------------------------------------------------
# d = Simdata
# Z = d$Z # True Disease Status
# S = d$S # Risk Score
# phi = 0.1 #10\% of patients taking viral load test
# rules = nonpar.rules( Z, S, phi)
# nonpar.fnr.fpr(Z,S,rules[1,1],rules[1,2])
## ----eval=FALSE---------------------------------------------------------------
# d = Simdata
# Z = d$Z # True Disease Status
# S = d$S # Risk Score
# phi = 0.1 #10\% of patients taking viral load test
# rules = nonpar.rules( Z, S, phi)
# semipar.fnr.fpr(Z,S,rules[1,1],rules[1,2])
## ----eval=FALSE---------------------------------------------------------------
# d = Simdata
# Z = d$Z # True Disease Status
# S = d$S # Risk Score
# phi = 0.1 #10% of patients taking viral load test
# rules = nonpar.rules( Z, S, phi)
# cal.AUC(Z,S,rules[,1],rules[,2])
## ----eval=FALSE---------------------------------------------------------------
# data(Simdata)
# summary(Simdata)
# plot(Simdata)
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/haematopoietic.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.1,family="gaussian",standardize=TRUE)
sink('./Model/EN/Correlation/haematopoietic/haematopoietic_027.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Correlation/haematopoietic/haematopoietic_027.R
|
no_license
|
leon1003/QSMART
|
R
| false
| false
| 383
|
r
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/haematopoietic.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.1,family="gaussian",standardize=TRUE)
sink('./Model/EN/Correlation/haematopoietic/haematopoietic_027.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
#
# Haskel_T1.R, 13 Mar 17
# Data from:
# Estimating {UK} investment in intangible assets and Intellectual Property Rights
# Peter Goodridge and Jonathan Haskel and Gavin Wallis
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
pal_col=rainbow(5)
econ=read.csv(paste0(ESEUR_dir, "economics/Haskel_T1.csv.xz"), as.is=TRUE)
plot(econ$Year, econ$All.intangibles, type="l", col=pal_col[1],
ylim=c(0, 140),
xlab="Year", ylab="£ Billion\n")
lines(econ$Year, econ$All.tangibles, col=pal_col[2])
lines(econ$Year, econ$Economic.Competencies, col=pal_col[3])
lines(econ$Year, econ$Innovative.Property, col=pal_col[4])
lines(econ$Year, econ$Own.account.Software+econ$Purchased.Software, col=pal_col[5])
legend(x="topleft", legend=c("Economic competencies", "Innovative property", "Software"), bty="n", fill=pal_col[3:5], cex=1.2)
text(2005, 115, "Intangibles", cex=1.2, pos=4)
text(2005, 102, "Tangibles", cex=1.2, pos=4)
|
/economics/Haskel_T1.R
|
no_license
|
alanponce/ESEUR-code-data
|
R
| false
| false
| 976
|
r
|
#
# Haskel_T1.R, 13 Mar 17
# Data from:
# Estimating {UK} investment in intangible assets and Intellectual Property Rights
# Peter Goodridge and Jonathan Haskel and Gavin Wallis
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
pal_col=rainbow(5)
econ=read.csv(paste0(ESEUR_dir, "economics/Haskel_T1.csv.xz"), as.is=TRUE)
plot(econ$Year, econ$All.intangibles, type="l", col=pal_col[1],
ylim=c(0, 140),
xlab="Year", ylab="£ Billion\n")
lines(econ$Year, econ$All.tangibles, col=pal_col[2])
lines(econ$Year, econ$Economic.Competencies, col=pal_col[3])
lines(econ$Year, econ$Innovative.Property, col=pal_col[4])
lines(econ$Year, econ$Own.account.Software+econ$Purchased.Software, col=pal_col[5])
legend(x="topleft", legend=c("Economic competencies", "Innovative property", "Software"), bty="n", fill=pal_col[3:5], cex=1.2)
text(2005, 115, "Intangibles", cex=1.2, pos=4)
text(2005, 102, "Tangibles", cex=1.2, pos=4)
|
## renders all Rmd in the current folder
library(tidyverse)
library(filesstrings)
own_dir <- dirname(rstudioapi::getSourceEditorContext()$path)
rmd_files <- list.files(path = own_dir, pattern = "\\.Rmd",
full.names = TRUE) %>%
as.list()
rmd_files
for(i in seq_along(rmd_files)){
purrr::map(rmd_files[[i]], rmarkdown::render)
}
## remove the Rmd files (exercises only) that contain the
## answers to the exercises and puts them in the "/answers folder
## put the /answers folder in gitignore
## TODO: write a function that puts them back in a lab, on the basis
## of a lab name
#library(tidyverse)
#library(filesstrings)
own_dir <- dirname(rstudioapi::getSourceEditorContext()$path)
rmd_files <- list.files(path = own_dir, pattern = "\\.Rmd",
full.names = TRUE)
rmd_files_df <- rmd_files %>%
enframe(name = NULL)
rmd_files_df <- rmd_files_df %>%
mutate(file_name = basename(value))
rmd_files_df
ind <- str_detect(string = rmd_files_df$file_name,
pattern = "._exercise_.")
exercises <- rmd_files_df[ind, "value"] %>%
mutate(file_name = basename(value))
exercises
destination <- here::here("ANSWERS")
rmd_copied_to <- file.path(destination, exercises$file_name[2:3]) %>%
enframe(name = NULL)
## save rmd new locations
write_csv(rmd_copied_to, path = file.path(own_dir, "rmd_copied_to.csv"))
map(exercises, file.move, destinations = destination)
|
/labs/1_unit_intro_to_r_bioconductor/render_and_remove_answers.R
|
no_license
|
uashogeschoolutrecht/ABDS_2019
|
R
| false
| false
| 1,444
|
r
|
## renders all Rmd in the current folder
library(tidyverse)
library(filesstrings)
own_dir <- dirname(rstudioapi::getSourceEditorContext()$path)
rmd_files <- list.files(path = own_dir, pattern = "\\.Rmd",
full.names = TRUE) %>%
as.list()
rmd_files
for(i in seq_along(rmd_files)){
purrr::map(rmd_files[[i]], rmarkdown::render)
}
## remove the Rmd files (exercises only) that contain the
## answers to the exercises and puts them in the "/answers folder
## put the /answers folder in gitignore
## TODO: write a function that puts them back in a lab, on the basis
## of a lab name
#library(tidyverse)
#library(filesstrings)
own_dir <- dirname(rstudioapi::getSourceEditorContext()$path)
rmd_files <- list.files(path = own_dir, pattern = "\\.Rmd",
full.names = TRUE)
rmd_files_df <- rmd_files %>%
enframe(name = NULL)
rmd_files_df <- rmd_files_df %>%
mutate(file_name = basename(value))
rmd_files_df
ind <- str_detect(string = rmd_files_df$file_name,
pattern = "._exercise_.")
exercises <- rmd_files_df[ind, "value"] %>%
mutate(file_name = basename(value))
exercises
destination <- here::here("ANSWERS")
rmd_copied_to <- file.path(destination, exercises$file_name[2:3]) %>%
enframe(name = NULL)
## save rmd new locations
write_csv(rmd_copied_to, path = file.path(own_dir, "rmd_copied_to.csv"))
map(exercises, file.move, destinations = destination)
|
fitSphere <- function(mat){
# Source (python): http://jekel.me/2015/Least-Squares-Sphere-Fit/
# Make the A matrix
A <- cbind(mat*2, rep(1, nrow(mat)))
# Make the f matrix
f <- matrix(0, nrow(mat), 1)
f[, 1] = (mat[, 1]*mat[, 1]) + (mat[, 2]*mat[, 2]) + (mat[, 3]*mat[, 3])
# Solve equations
a_si <- tryCatch({
solve(t(A) %*% A)
}, warning = function(cond) {
return(NULL)
}, error = function(cond) {
return(NULL)
})
if(is.null(a_si)) return(NULL)
ab <- t(A) %*% f
cen <- a_si %*% ab
# Solve for radius
rad2 = (cen[1]*cen[1])+(cen[2]*cen[2])+(cen[3]*cen[3])+cen[4]
radius = sqrt(rad2)
rlist <- list(
'C'=cen[1:3],
'R'=radius
)
class(rlist) <- 'sphere'
rlist
}
|
/R/fitSphere.R
|
no_license
|
aaronolsen/linkR
|
R
| false
| false
| 727
|
r
|
fitSphere <- function(mat){
# Source (python): http://jekel.me/2015/Least-Squares-Sphere-Fit/
# Make the A matrix
A <- cbind(mat*2, rep(1, nrow(mat)))
# Make the f matrix
f <- matrix(0, nrow(mat), 1)
f[, 1] = (mat[, 1]*mat[, 1]) + (mat[, 2]*mat[, 2]) + (mat[, 3]*mat[, 3])
# Solve equations
a_si <- tryCatch({
solve(t(A) %*% A)
}, warning = function(cond) {
return(NULL)
}, error = function(cond) {
return(NULL)
})
if(is.null(a_si)) return(NULL)
ab <- t(A) %*% f
cen <- a_si %*% ab
# Solve for radius
rad2 = (cen[1]*cen[1])+(cen[2]*cen[2])+(cen[3]*cen[3])+cen[4]
radius = sqrt(rad2)
rlist <- list(
'C'=cen[1:3],
'R'=radius
)
class(rlist) <- 'sphere'
rlist
}
|
#' Brute Force Algorithm for the Knapsack problem
#'
#' With this algorithm we can solve the Knapsack problem in the brute force way, this approach is of complexity O(2^n)
#'
#'@param x data frame with two columns \code{w}(weight) and \code{v}(value) of items
#'
#'@param W maximum weight the knapsack can hold
#'
#'@return theoretical maximum \code{$value} (knapsack value) composed of \code{$elements} (which items)
#'
#'@export
# x <- data frame of knapsack
# W <- knapsack size
brute_force_knapsack<- function(x, W){
if(!is.data.frame(x)){stop("Not correct object")}
if(!is.numeric(W) || W < 0){stop("Not correct W")}
elements <- which.max(x$w <= W)
weight <- x$w[elements]
value <- x$v[elements]
res <- list(value = value, elements = elements)
if(sum(x$w) <= W){
res$value <- sum(x$v)
res$elements <- row.names(x)
}
for (i in seq(from = 2, to = nrow(x)-1)) {
all_combinations <- combn(as.integer(row.names(x)), i)
all_weights <- combn(x$w, i, sum)
all_values <- combn(x$v, i, sum)
possible_combination <- which(all_weights <= W)
max_value <- which.max(all_values[possible_combination])
tmp_weight <- all_weights[possible_combination[max_value]]
tmp_value <- all_values[possible_combination[max_value]]
tmp_elements <- all_combinations[, possible_combination[max_value]]
if (any(tmp_value > value, is.na(value))) {
weight <- tmp_weight
value <- tmp_value
elements <- tmp_elements
res$value <- value
res$elements <- elements
}
else{return(res)}
}
return(res)
}
|
/lab6/lab6/R/brute_force_knapsack.R
|
no_license
|
Raikao/R_programming
|
R
| false
| false
| 1,606
|
r
|
#' Brute Force Algorithm for the Knapsack problem
#'
#' With this algorithm we can solve the Knapsack problem in the brute force way, this approach is of complexity O(2^n)
#'
#'@param x data frame with two columns \code{w}(weight) and \code{v}(value) of items
#'
#'@param W maximum weight the knapsack can hold
#'
#'@return theoretical maximum \code{$value} (knapsack value) composed of \code{$elements} (which items)
#'
#'@export
# x <- data frame of knapsack
# W <- knapsack size
brute_force_knapsack<- function(x, W){
if(!is.data.frame(x)){stop("Not correct object")}
if(!is.numeric(W) || W < 0){stop("Not correct W")}
elements <- which.max(x$w <= W)
weight <- x$w[elements]
value <- x$v[elements]
res <- list(value = value, elements = elements)
if(sum(x$w) <= W){
res$value <- sum(x$v)
res$elements <- row.names(x)
}
for (i in seq(from = 2, to = nrow(x)-1)) {
all_combinations <- combn(as.integer(row.names(x)), i)
all_weights <- combn(x$w, i, sum)
all_values <- combn(x$v, i, sum)
possible_combination <- which(all_weights <= W)
max_value <- which.max(all_values[possible_combination])
tmp_weight <- all_weights[possible_combination[max_value]]
tmp_value <- all_values[possible_combination[max_value]]
tmp_elements <- all_combinations[, possible_combination[max_value]]
if (any(tmp_value > value, is.na(value))) {
weight <- tmp_weight
value <- tmp_value
elements <- tmp_elements
res$value <- value
res$elements <- elements
}
else{return(res)}
}
return(res)
}
|
#
# Map-x (c) unepgrid 2017-present
#
#
# Map div
#
ui_map_section <- tagList(
tags$div(
id=config[["map"]][["id"]]
)
)
|
/src/ui/map.R
|
no_license
|
cuulee/map-x-mgl
|
R
| false
| false
| 136
|
r
|
#
# Map-x (c) unepgrid 2017-present
#
#
# Map div
#
ui_map_section <- tagList(
tags$div(
id=config[["map"]][["id"]]
)
)
|
library(ggplot2)
library(dplyr)
library(here)
## locate and read in csv file
fhplus_data <- read.csv(
here::here(
"processed-data",
"08_harmony_BayesSpace",
"fasthplus_results.csv"
),
sep = "\t",
# data is tab delimited
)
head(fhplus_data)
# remove redundant lines
fhplus_data <- fhplus_data[fhplus_data$k != "k", ]
# convert k to class integer so it's ordered in the plot
fhplus_data$k <- as.integer(fhplus_data$k)
fhplus_data$fasthplus <- as.numeric(fhplus_data$fasthplus)
fhplus_data$t_value <- as.integer(fhplus_data$t_value)
dim(fhplus_data)
# [1] 99 5
27 * 2 * 2 ## 27 k values * whole/targeted * GM/all spots
# [1] 108
type_list <- c("wholegenome", "targeted")
spots_set_list <- c("grey_matter", "all_spots")
with(fhplus_data, tapply(t_value, paste0(type, "_", spots_set), summary))
# $targeted_all_spots
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 1408 1905 1905 1885 1905 1905
#
# $targeted_grey_matter
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 1680 1713 1713 1712 1714 1714
#
# $wholegenome_all_spots
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 1905 1905 1905 1905 1905 1905
#
# $wholegenome_grey_matter
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 1664 1667 1668 1667 1668 1668
## plot output directory
dir_plots <-
here::here("plots", "08_harmony_BayesSpace", "fasthplus")
# dir.create(dir_plots)
## create line plots
for (t in type_list) {
for (s in spots_set_list) {
pdf(
file = here::here(
"plots",
"08_harmony_BayesSpace",
"fasthplus",
paste0(
"fasthplus_results_",
t, "_", s,
".pdf"
)
),
width = 8
)
df_subset <- subset(fhplus_data, type == t & spots_set == s)
df_subset <- na.omit(df_subset) # some fasthplus values were NA
plot <- ggplot(df_subset, aes(
x = k,
y = 1 - fasthplus,
group = 1
)) +
geom_line() +
geom_point()
print(plot)
dev.off()
}
}
## Reproducibility information
print("Reproducibility information:")
Sys.time()
proc.time()
options(width = 120)
session_info()
|
/code/08_harmony_BayesSpace/fasthplus_line_plots .R
|
no_license
|
LieberInstitute/Visium_SPG_AD
|
R
| false
| false
| 2,366
|
r
|
library(ggplot2)
library(dplyr)
library(here)
## locate and read in csv file
fhplus_data <- read.csv(
here::here(
"processed-data",
"08_harmony_BayesSpace",
"fasthplus_results.csv"
),
sep = "\t",
# data is tab delimited
)
head(fhplus_data)
# remove redundant lines
fhplus_data <- fhplus_data[fhplus_data$k != "k", ]
# convert k to class integer so it's ordered in the plot
fhplus_data$k <- as.integer(fhplus_data$k)
fhplus_data$fasthplus <- as.numeric(fhplus_data$fasthplus)
fhplus_data$t_value <- as.integer(fhplus_data$t_value)
dim(fhplus_data)
# [1] 99 5
27 * 2 * 2 ## 27 k values * whole/targeted * GM/all spots
# [1] 108
type_list <- c("wholegenome", "targeted")
spots_set_list <- c("grey_matter", "all_spots")
with(fhplus_data, tapply(t_value, paste0(type, "_", spots_set), summary))
# $targeted_all_spots
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 1408 1905 1905 1885 1905 1905
#
# $targeted_grey_matter
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 1680 1713 1713 1712 1714 1714
#
# $wholegenome_all_spots
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 1905 1905 1905 1905 1905 1905
#
# $wholegenome_grey_matter
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 1664 1667 1668 1667 1668 1668
## plot output directory
dir_plots <-
here::here("plots", "08_harmony_BayesSpace", "fasthplus")
# dir.create(dir_plots)
## create line plots
for (t in type_list) {
for (s in spots_set_list) {
pdf(
file = here::here(
"plots",
"08_harmony_BayesSpace",
"fasthplus",
paste0(
"fasthplus_results_",
t, "_", s,
".pdf"
)
),
width = 8
)
df_subset <- subset(fhplus_data, type == t & spots_set == s)
df_subset <- na.omit(df_subset) # some fasthplus values were NA
plot <- ggplot(df_subset, aes(
x = k,
y = 1 - fasthplus,
group = 1
)) +
geom_line() +
geom_point()
print(plot)
dev.off()
}
}
## Reproducibility information
print("Reproducibility information:")
Sys.time()
proc.time()
options(width = 120)
session_info()
|
a <- c(95,75,85,90,77,97,67,99,88,82)
b <- c(100,65,70,70,80,60,88,99,75,90)
scoreA <- matrix(a, nrow = 2, byrow = TRUE)
scoreB <- matrix(b, nrow = 2, byrow = TRUE)
rownames(scoreA) <- c("MID", "FINAL")
colnames(scoreA) <- c("KOR", "ENG", "MAT", "SOC", "SCI")
rownames(scoreB) <- c("MID", "FINAL")
colnames(scoreB) <- c("KOR", "ENG", "MAT", "SOC", "SCI")
scoreA
scoreB
A <- sum(scoreA["FINAL",])
B <- sum(scoreB["FINAL",])
A
B
ans <- ifelse(A>B, "A", "B")
colnames(scoreA)[col(scoreA)[which(scoreA==max(scoreA))]]
colnames(scoreB)[col(scoreB)[which(scoreB==max(scoreB))]]
|
/R/실습_7주차.R
|
no_license
|
huskycat1202/2-2
|
R
| false
| false
| 572
|
r
|
a <- c(95,75,85,90,77,97,67,99,88,82)
b <- c(100,65,70,70,80,60,88,99,75,90)
scoreA <- matrix(a, nrow = 2, byrow = TRUE)
scoreB <- matrix(b, nrow = 2, byrow = TRUE)
rownames(scoreA) <- c("MID", "FINAL")
colnames(scoreA) <- c("KOR", "ENG", "MAT", "SOC", "SCI")
rownames(scoreB) <- c("MID", "FINAL")
colnames(scoreB) <- c("KOR", "ENG", "MAT", "SOC", "SCI")
scoreA
scoreB
A <- sum(scoreA["FINAL",])
B <- sum(scoreB["FINAL",])
A
B
ans <- ifelse(A>B, "A", "B")
colnames(scoreA)[col(scoreA)[which(scoreA==max(scoreA))]]
colnames(scoreB)[col(scoreB)[which(scoreB==max(scoreB))]]
|
#' 定义销售订单的表头字段
#'
#' @slot FDate character.单据日期
#'
#' @return 没有返回值
#' @export
#' @include class_bill.R
#' @examples 不做示例
setClass('salesOrderHead',slots=c(FDate='character'),contains = 'bill',
prototype = prototype(FInterId=1L,FNumber='000001',FName='sample_record_1',FDate='2018-01-01'));
|
/R/class_salesOrderHead.R
|
no_license
|
takewiki/tsdm
|
R
| false
| false
| 349
|
r
|
#' 定义销售订单的表头字段
#'
#' @slot FDate character.单据日期
#'
#' @return 没有返回值
#' @export
#' @include class_bill.R
#' @examples 不做示例
setClass('salesOrderHead',slots=c(FDate='character'),contains = 'bill',
prototype = prototype(FInterId=1L,FNumber='000001',FName='sample_record_1',FDate='2018-01-01'));
|
f.loglikeNHPPvec <-
function (thetain)
{
iter.count <- get(envir = .frame0, "iter.count") + 1
assign(envir = .frame0, inherits = TRUE,"iter.count", iter.count )
data.rdu <- get(envir = .frame0, "data.ld")
debug1<- get(envir = .frame0, "debug1")
model <- get(envir = .frame0, "model")
form <- model$form
f.origparam <- model$f.origparam
theta.origparam <- f.origparam(thetain, model)
if (iter.count < 4)
browser()
the.log.like <- loglikeNHPPvec(data.rdu, theta.origparam,
form)
cat(iter.count, format(the.log.like), "theta1=", format(theta.origparam[1]),
format(theta.origparam[2]), "\n")
return(-the.log.like)
}
|
/R/f.loglikeNHPPvec.R
|
no_license
|
Ammar-K/SMRD
|
R
| false
| false
| 692
|
r
|
f.loglikeNHPPvec <-
function (thetain)
{
iter.count <- get(envir = .frame0, "iter.count") + 1
assign(envir = .frame0, inherits = TRUE,"iter.count", iter.count )
data.rdu <- get(envir = .frame0, "data.ld")
debug1<- get(envir = .frame0, "debug1")
model <- get(envir = .frame0, "model")
form <- model$form
f.origparam <- model$f.origparam
theta.origparam <- f.origparam(thetain, model)
if (iter.count < 4)
browser()
the.log.like <- loglikeNHPPvec(data.rdu, theta.origparam,
form)
cat(iter.count, format(the.log.like), "theta1=", format(theta.origparam[1]),
format(theta.origparam[2]), "\n")
return(-the.log.like)
}
|
# Library -----------------------------------------------------------------
library(dplyr)
library(tidyr)
library(ggplot2)
library(ggmap)
# About -------------------------------------------------------------------
# Exploring .shp files and spatial analysis in R.
# Layering ridership data from Obama and Trump inauguration days with
# the geo-coordinates from the .shp file
# Load Data ---------------------------------------------------------------
# Data from https://data.world/transportation/wmata-ridership-data
obama <- read.csv("data/inaugurations/2009-Inauguration-Records-Raw.csv")
trump <- read.csv("data/inaugurations/2017-Inauguration-Records-Raw.csv")
stations <- st_read("data/stations/stations.shp")
# Variable Description
# AM Peak: Station Opening to 9:30 AM.
# Mid-day: 9:30 AM – 3:00 PM.
# PM Peak: 3:00 PM- 7:00 PM.
# Evening: 7:00 PM to Station Closing.
# The “entry” number is what is used for official ridership stats.
# Cleaning ----------------------------------------------------------------
# Prep the time-of-day columns for conversion to numeric type
times <- colnames(obama)[4:7]
obama[times] <- apply(obama[times], 2, function(x){gsub(",","",x)})
# Clean
obama <- obama %>%
select(!c(equivalent_date_last_yr, sum)) %>%
filter(type == "Entry") %>%
mutate_at(times, as.numeric)
# Prep name columns for match
obama$stations_name <- gsub("/|-", " ", obama$stations_name)
stations$name <- gsub("/|-", " ", stations$name)
# Because names don't match exactly, use fuzzy matching and store the comparisons.
matches <- NULL
for (i in 1:nrow(obama)){
index <- agrep(obama$stations_name[i], stations$name, ignore.case = TRUE, max.distance = .25)
if (length(index) > 0){
match <- data.frame(obama = obama$stations_name[i], stations = stations$name[index])
}
if (length(index) == 0){
match <- data.frame(obama = obama$stations_name[i], stations = "NO MATCH")
}
matches <- rbind(matches, match)
}
# Find where agrep returned multiple matches for the same pattern
ambiguousMatches <- matches %>%
group_by(obama) %>%
tally() %>%
filter(n > 1)
# Find the ambiguous matches in the matches df.
ambiguousAudit <- NULL
for (i in 1:nrow(ambiguousMatches)){
ambiguousMatch <- matches[which(matches$obama == ambiguousMatches$obama[i]), ]
ambiguousAudit <- rbind(ambiguousAudit, ambiguousMatch)
}
ambiguousAudit
# Manually review and update matches using the row numbers found in ambiguousAudit
matches <- matches[-c(2, 25, 29, 32, 34, 35, 49, 57, 69, 84, 97),]
matches
# Find all the missing matches.
missingMatches <- matches %>%
group_by(obama) %>%
filter(stations == "NO MATCH")
missingMatches
matches$stations[79] <- stations$name[9]
# Drop stations that don't have matches
matches <- matches %>%
filter(stations != "NO MATCH")
matches
# Rename back to original, for ease of joins.
colnames(matches)[2] <- "name"
colnames(matches)[1] <- "stations_name"
# Join the data
stations <- inner_join(matches, stations, by = "name")
obama <- inner_join(matches, obama, by = "stations_name")
fullset <- inner_join(stations, obama)
# Drop unnecessary columns
fullset <- fullset %>%
select(-c("type", "name", "marker.sym"))
head(fullset)
# Mapping -----------------------------------------------------------------
# Add Lat/Long to the dataset as discrete X/Y columns.
fullset <- cbind(fullset,
st_coordinates(fullset$geometry))
# Get sums for each line at each timepoint.
fullset %>%
drop_na() %>%
group_by(line) %>%
summarise_at(times, mean)
# Plot ridership by time of day.
ggplot(data = fullset,
aes(x = X, y = Y, color = line, size = am_peak)) +
geom_point()
ggplot(data = fullset,
aes(x = X, y = Y, color = line, size = midday)) +
geom_point()
ggplot(data = fullset,
aes(x = X, y = Y, color = line, size = pm_peak)) +
geom_point()
ggplot(data = fullset,
aes(x = X, y = Y, color = line, size = env)) +
geom_point()
|
/inauguration.R
|
permissive
|
mfalling/Intro-to-Spatial-Analysis
|
R
| false
| false
| 3,968
|
r
|
# Library -----------------------------------------------------------------
library(dplyr)
library(tidyr)
library(ggplot2)
library(ggmap)
# About -------------------------------------------------------------------
# Exploring .shp files and spatial analysis in R.
# Layering ridership data from Obama and Trump inauguration days with
# the geo-coordinates from the .shp file
# Load Data ---------------------------------------------------------------
# Data from https://data.world/transportation/wmata-ridership-data
obama <- read.csv("data/inaugurations/2009-Inauguration-Records-Raw.csv")
trump <- read.csv("data/inaugurations/2017-Inauguration-Records-Raw.csv")
stations <- st_read("data/stations/stations.shp")
# Variable Description
# AM Peak: Station Opening to 9:30 AM.
# Mid-day: 9:30 AM – 3:00 PM.
# PM Peak: 3:00 PM- 7:00 PM.
# Evening: 7:00 PM to Station Closing.
# The “entry” number is what is used for official ridership stats.
# Cleaning ----------------------------------------------------------------
# Prep the time-of-day columns for conversion to numeric type
times <- colnames(obama)[4:7]
obama[times] <- apply(obama[times], 2, function(x){gsub(",","",x)})
# Clean
obama <- obama %>%
select(!c(equivalent_date_last_yr, sum)) %>%
filter(type == "Entry") %>%
mutate_at(times, as.numeric)
# Prep name columns for match
obama$stations_name <- gsub("/|-", " ", obama$stations_name)
stations$name <- gsub("/|-", " ", stations$name)
# Because names don't match exactly, use fuzzy matching and store the comparisons.
matches <- NULL
for (i in 1:nrow(obama)){
index <- agrep(obama$stations_name[i], stations$name, ignore.case = TRUE, max.distance = .25)
if (length(index) > 0){
match <- data.frame(obama = obama$stations_name[i], stations = stations$name[index])
}
if (length(index) == 0){
match <- data.frame(obama = obama$stations_name[i], stations = "NO MATCH")
}
matches <- rbind(matches, match)
}
# Find where agrep returned multiple matches for the same pattern
ambiguousMatches <- matches %>%
group_by(obama) %>%
tally() %>%
filter(n > 1)
# Find the ambiguous matches in the matches df.
ambiguousAudit <- NULL
for (i in 1:nrow(ambiguousMatches)){
ambiguousMatch <- matches[which(matches$obama == ambiguousMatches$obama[i]), ]
ambiguousAudit <- rbind(ambiguousAudit, ambiguousMatch)
}
ambiguousAudit
# Manually review and update matches using the row numbers found in ambiguousAudit
matches <- matches[-c(2, 25, 29, 32, 34, 35, 49, 57, 69, 84, 97),]
matches
# Find all the missing matches.
missingMatches <- matches %>%
group_by(obama) %>%
filter(stations == "NO MATCH")
missingMatches
matches$stations[79] <- stations$name[9]
# Drop stations that don't have matches
matches <- matches %>%
filter(stations != "NO MATCH")
matches
# Rename back to original, for ease of joins.
colnames(matches)[2] <- "name"
colnames(matches)[1] <- "stations_name"
# Join the data
stations <- inner_join(matches, stations, by = "name")
obama <- inner_join(matches, obama, by = "stations_name")
fullset <- inner_join(stations, obama)
# Drop unnecessary columns
fullset <- fullset %>%
select(-c("type", "name", "marker.sym"))
head(fullset)
# Mapping -----------------------------------------------------------------
# Add Lat/Long to the dataset as discrete X/Y columns.
fullset <- cbind(fullset,
st_coordinates(fullset$geometry))
# Get sums for each line at each timepoint.
fullset %>%
drop_na() %>%
group_by(line) %>%
summarise_at(times, mean)
# Plot ridership by time of day.
ggplot(data = fullset,
aes(x = X, y = Y, color = line, size = am_peak)) +
geom_point()
ggplot(data = fullset,
aes(x = X, y = Y, color = line, size = midday)) +
geom_point()
ggplot(data = fullset,
aes(x = X, y = Y, color = line, size = pm_peak)) +
geom_point()
ggplot(data = fullset,
aes(x = X, y = Y, color = line, size = env)) +
geom_point()
|
########################################################
# 1 Merges the training and the test sets to create one data set.
test<-read.table("test/X_test.txt")
train<-read.table("./train/X_train.txt")
data1<-rbind(train,test)
#create activity joint column
test_activity<-read.table("test/y_test.txt")
train_activity<-read.table("train/y_train.txt")
activity_col<-rbind(train_activity,test_activity)
#add activity_col to data1
data2<-cbind(data1,activity_col)
#create joint tester column
test_performer<-read.table("test/subject_test.txt")
train_performer<-read.table("train/subject_train.txt")
performer_col<-rbind(train_performer,test_performer)
#add activity_col to data1
data3<-cbind(data2,performer_col)
########################################################################
# 2 Extracts only the measurements on the mean and standard deviation for each measurement
#copy and paste manually relevant variables from "features.txt" into a new
#txt file, called "relevant_var.txt"
#then extract the column with the var number
relevant_var<-read.table("relevant_var.txt", sep=" ")
#add 2last 2 cols
relevant_var_index<-relevant_var[,1]
a<-c(562,563)
relevant_var_index2<-c(relevant_var_index,a)
#extract relevant columns from data1
data4<-data3[,relevant_var_index2]
##########################################################################
# 3 Uses descriptive activity names to name the activities in the data set
#rename activities column with relevant name
labels<-read.table("activity_labels.txt", sep=" ")
count<-nrow(data4)
data5<-data4
i<-1
while(i<=count){
if(data4[i,67]==1){
data4[i,67]<-"WALKING"
next
}
if(data4[i,67]==2){
data4[i,67]<-"WALKING_UPSTAIRS"
next
}
if(data4[i,67]==3){
data4[i,67]<-"WALKING_DOWNSTAIRS"
next
}
if(data4[i,67]==4){
data4[i,67]<-"SITTING"
next
}
if(data4[i,67]==5){
data4[i,67]<-"STANDING"
next
}
if(data4[i,67]==6){
data4[i,67]<-"LAYING"
next
}
i<-i+1
}
#bring lat 2 column in front
#data4<-data4[,c(68:67,1:(ncol(data4)-2))]
##########################################################################
# 4 rename columns
tmp<-as.character(relevant_var[,2])
tmp1<-c("activity","tester_id")
cn<-c(tmp,tmp1)
colnames(data4)<-cn
#####################################################################
# 5 Create new dataset with average values for each activity for each test performer
u<-1
res<-data.frame(matrix(0,ncol=68, nrow=0))
colnames(res)<-cn
while(u<=30){
tmpdf<-subset(data4,data4[,68]==u)
v<-1
while(v<= 6){
tmpdf1<-subset(tmpdf,data5[,67]==labels[v,2])
#tmpdf1<-tmpdf1[complete.cases(tmpdf1),]
tmp_res<-apply(tmpdf[,1:66],2,mean)
tmp_res<-c(tmp_res,labels[v,2],u)
res<-rbind(res,tmp_res)
v<-v+1
}
u<-u+1
colnames(res)<-cn
}
#order results by tester and activity
res<-res[with(res, order(tester_id,activity)), ]
##########################################################################
#rename activity column with relevant name
labels<-read.table("activity_labels.txt", sep=" ")
count<-nrow(res)
i<-1
while(i<=count){
if(res[i,67]==1){
res[i,67]<-"WALKING"
next
}
if(res[i,67]==2){
res[i,67]<-"WALKING_UPSTAIRS"
next
}
if(res[i,67]==3){
res[i,67]<-"WALKING_DOWNSTAIRS"
next
}
if(res[i,67]==4){
res[i,67]<-"SITTING"
next
}
if(res[i,67]==5){
res[i,67]<-"STANDING"
next
}
if(res[i,67]==6){
res[i,67]<-"LAYING"
next
}
i<-i+1
}
#bring lat 2 column in front
res<-res[,c(68:67,1:(ncol(res)-2))]
# export result to txt file
write.table(res,"tidyDataSet.txt", sep=" ")
|
/run_analysis.R
|
no_license
|
daymos/gacd_project
|
R
| false
| false
| 4,571
|
r
|
########################################################
# 1 Merges the training and the test sets to create one data set.
test<-read.table("test/X_test.txt")
train<-read.table("./train/X_train.txt")
data1<-rbind(train,test)
#create activity joint column
test_activity<-read.table("test/y_test.txt")
train_activity<-read.table("train/y_train.txt")
activity_col<-rbind(train_activity,test_activity)
#add activity_col to data1
data2<-cbind(data1,activity_col)
#create joint tester column
test_performer<-read.table("test/subject_test.txt")
train_performer<-read.table("train/subject_train.txt")
performer_col<-rbind(train_performer,test_performer)
#add activity_col to data1
data3<-cbind(data2,performer_col)
########################################################################
# 2 Extracts only the measurements on the mean and standard deviation for each measurement
#copy and paste manually relevant variables from "features.txt" into a new
#txt file, called "relevant_var.txt"
#then extract the column with the var number
relevant_var<-read.table("relevant_var.txt", sep=" ")
#add 2last 2 cols
relevant_var_index<-relevant_var[,1]
a<-c(562,563)
relevant_var_index2<-c(relevant_var_index,a)
#extract relevant columns from data1
data4<-data3[,relevant_var_index2]
##########################################################################
# 3 Uses descriptive activity names to name the activities in the data set
#rename activities column with relevant name
labels<-read.table("activity_labels.txt", sep=" ")
count<-nrow(data4)
data5<-data4
i<-1
while(i<=count){
if(data4[i,67]==1){
data4[i,67]<-"WALKING"
next
}
if(data4[i,67]==2){
data4[i,67]<-"WALKING_UPSTAIRS"
next
}
if(data4[i,67]==3){
data4[i,67]<-"WALKING_DOWNSTAIRS"
next
}
if(data4[i,67]==4){
data4[i,67]<-"SITTING"
next
}
if(data4[i,67]==5){
data4[i,67]<-"STANDING"
next
}
if(data4[i,67]==6){
data4[i,67]<-"LAYING"
next
}
i<-i+1
}
#bring lat 2 column in front
#data4<-data4[,c(68:67,1:(ncol(data4)-2))]
##########################################################################
# 4 rename columns
tmp<-as.character(relevant_var[,2])
tmp1<-c("activity","tester_id")
cn<-c(tmp,tmp1)
colnames(data4)<-cn
#####################################################################
# 5 Create new dataset with average values for each activity for each test performer
u<-1
res<-data.frame(matrix(0,ncol=68, nrow=0))
colnames(res)<-cn
while(u<=30){
tmpdf<-subset(data4,data4[,68]==u)
v<-1
while(v<= 6){
tmpdf1<-subset(tmpdf,data5[,67]==labels[v,2])
#tmpdf1<-tmpdf1[complete.cases(tmpdf1),]
tmp_res<-apply(tmpdf[,1:66],2,mean)
tmp_res<-c(tmp_res,labels[v,2],u)
res<-rbind(res,tmp_res)
v<-v+1
}
u<-u+1
colnames(res)<-cn
}
#order results by tester and activity
res<-res[with(res, order(tester_id,activity)), ]
##########################################################################
#rename activity column with relevant name
labels<-read.table("activity_labels.txt", sep=" ")
count<-nrow(res)
i<-1
while(i<=count){
if(res[i,67]==1){
res[i,67]<-"WALKING"
next
}
if(res[i,67]==2){
res[i,67]<-"WALKING_UPSTAIRS"
next
}
if(res[i,67]==3){
res[i,67]<-"WALKING_DOWNSTAIRS"
next
}
if(res[i,67]==4){
res[i,67]<-"SITTING"
next
}
if(res[i,67]==5){
res[i,67]<-"STANDING"
next
}
if(res[i,67]==6){
res[i,67]<-"LAYING"
next
}
i<-i+1
}
#bring lat 2 column in front
res<-res[,c(68:67,1:(ncol(res)-2))]
# export result to txt file
write.table(res,"tidyDataSet.txt", sep=" ")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/superclasses.R
\name{superClass}
\alias{superClass}
\alias{superClass.somRes}
\alias{print.somSC}
\alias{summary.somSC}
\alias{plot.somSC}
\alias{projectIGraph.somSC}
\title{Create super-clusters from SOM results}
\usage{
superClass(sommap, method, members, k, h, ...)
\method{print}{somSC}(x, ...)
\method{summary}{somSC}(object, ...)
\method{plot}{somSC}(x, type = c("dendrogram", "grid", "hitmap", "lines",
"barplot", "boxplot", "mds", "color", "poly.dist", "pie", "graph",
"dendro3d", "radar", "projgraph"), plot.var = TRUE,
plot.legend = FALSE, add.type = FALSE, print.title = FALSE,
the.titles = paste("Cluster", 1:prod(x$som$parameters$the.grid$dim)),
...)
\method{projectIGraph}{somSC}(object, init.graph, ...)
}
\arguments{
\item{sommap}{A \code{somRes} object.}
\item{method}{Argument passed to the \code{\link{hclust}} function.}
\item{members}{Argument passed to the \code{\link{hclust}} function.}
\item{k}{Argument passed to the \code{\link{cutree}} function (number of
super-clusters to cut the dendrogram).}
\item{h}{Argument passed to the \code{\link{cutree}} function (height where
to cut the dendrogram).}
\item{\dots}{Used for \code{plot.somSC}: further arguments passed either to
the function \code{\link{plot}} (case \code{type="dendro"}) or to
\code{\link{plot.myGrid}} (case \code{type="grid"}) or to
\code{\link{plot.somRes}} (all other cases).}
\item{x}{A \code{somSC} object.}
\item{object}{A \code{somSC} object.}
\item{type}{The type of plot to draw. Default value is \code{"dendrogram"},
to plot the dendrogram of the clustering. Case \code{"grid"} plots the grid
in color according to the super clustering. Case \code{"projgraph"} uses an
\link[igraph]{igraph} object passed to the argument \code{variable} and plots
the projected graph as defined by the function \code{projectIGraph.somSC}.
All other cases are those available in the function \code{\link{plot.somRes}}
and surimpose the super-clusters over these plots.}
\item{plot.var}{A boolean indicating whether a graph showing the evolution of
the explained variance should be plotted. This argument is only used when
\code{type="dendrogram"}, its default value is \code{TRUE}.}
\item{plot.legend}{A boolean indicating whether a legend should be added to
the plot. This argument is only used when \code{type} is either \code{"grid"}
or \code{"hitmap"} or \code{"mds"}. Its default value is \code{FALSE}.}
\item{add.type}{A boolean, which default value is \code{FALSE}, indicating
whether you are giving an additional variable to the argument \code{variable}
or not. If you do, the function \code{\link{plot.somRes}} will be called with
the argument \code{what} set to \code{"add"}.}
\item{print.title}{Whether the cluster titles must be printed in center of
the grid or not for \code{type="grid"}. Default to \code{FALSE} (titles not
displayed).}
\item{the.titles}{If \code{print.title = TRUE}, values of the title to
display for \code{type="grid"}. Default to "Cluster " followed by the cluster
number.}
\item{init.graph}{An \link[igraph]{igraph} object which is projected
according to the super-clusters. The number of vertices of \code{init.graph}
must be equal to the number of rows in the original dataset processed by the
SOM (case \code{"korresp"} is not handled by this function). In the projected
graph, the vertices are positionned at the center of gravity of the
super-clusters (more details in the section \strong{Details} below).}
}
\value{
The \code{superClass} function returns an object of class
\code{somSC} which is a list of the following elements: \itemize{
\item{cluster}{The super clustering of the prototypes (only if either
\code{k} or \code{h} are given by user).}
\item{tree}{An \code{\link{hclust}} object.}
\item{som}{The \code{somRes} object given as argument (see
\code{\link{trainSOM}} for details).}
}
The \code{projectIGraph.somSC} function returns an object of class
\code{\link{igraph}} with the following attributes: \itemize{
\item the graph attribute \code{layout} which provides the layout of the
projected graph according to the center of gravity of the super-clusters
positionned on the SOM grid;
\item the vertex attributes \code{name} and \code{size} which, respectively
are the vertex number on the grid and the number of vertexes included in
the corresponding cluster;
\item the edge attribute \code{weight} which gives the number of edges (or
the sum of the weights) between the vertexes of the two corresponding
clusters.
}
}
\description{
Aggregate the resulting clustering of the SOM algorithm into
super-clusters.
}
\details{
The \code{superClass} function can be used in 2 ways:
\itemize{
\item to choose the number of super clusters via an \code{\link{hclust}}
object: then, both arguments \code{k} and \code{h} are not filled.
\item to cut the clustering into super clusters: then, either argument
\code{k} or argument \code{h} must be filled. See \code{\link{cutree}} for
details on these arguments.
}
The squared distance between prototypes is passed to the algorithm.
\code{summary} on a \code{superClass} object produces a complete summary of
the results that displays the number of clusters and super-clusters, the
clustering itself and performs ANOVA analyses. For \code{type="numeric"} the
ANOVA is performed for each input variable and test the difference of this
variable accross the super-clusters of the map. For \code{type="relational"}
a dissimilarity ANOVA is performed (see (Anderson, 2001), except that in the
present version, a crude estimate of the p-value is used which is based on
the Fisher distribution and not on a permutation test.
On plots, the different super classes are identified in the following ways:
\itemize{
\item either with different color, when \code{type} is set among:
\code{"grid"} (*, #), \code{"hitmap"} (*, #), \code{"lines"} (*, #),
\code{"barplot"} (*, #), \code{"boxplot"}, \code{"mds"} (*, #),
\code{"dendro3d"} (*, #), \code{"graph"} (*, #)
\item or with title, when \code{type} is set among: \code{"color"} (*),
\code{"poly.dist"} (*, #), \code{"pie"} (#), \code{"radar"} (#)
}
In the list above, the charts available for a \code{korresp} SOM are marked
with a * whereas those available for a \code{relational} SOM are marked with
a #.
\code{projectIGraph.somSC} produces a projected graph from the
\link[igraph]{igraph} object passed to the argument \code{variable} as
described in (Olteanu and Villa-Vialaneix, 2015). The attributes of this
graph are the same than the ones obtained from the SOM map itself in the
function \code{\link{projectIGraph.somRes}}. \code{plot.somSC} used with
\code{type="projgraph"} calculates this graph and represents it by
positionning the super-vertexes at the center of gravity of the
super-clusters. This feature can be combined with \code{pie.graph=TRUE} to
super-impose the information from an external factor related to the
individuals in the original dataset (or, equivalently, to the vertexes of the
graph).
}
\examples{
set.seed(11051729)
my.som <- trainSOM(x.data=iris[,1:4])
# choose the number of super-clusters
sc <- superClass(my.som)
plot(sc)
# cut the clustering
sc <- superClass(my.som, k=4)
summary(sc)
plot(sc)
plot(sc, type="hitmap", plot.legend=TRUE)
}
\references{
Anderson M.J. (2001). A new method for non-parametric multivariate analysis
of variance. \emph{Austral Ecology}, \strong{26}, 32-46.
Olteanu M., Villa-Vialaneix N. (2015) Using SOMbrero for clustering and
visualizing graphs. \emph{Journal de la Societe Francaise de Statistique},
\strong{156}, 95-119.
}
\seealso{
\code{\link{hclust}}, \code{\link{cutree}}, \code{\link{trainSOM}},
\code{\link{plot.somRes}}
}
\author{
Madalina Olteanu \email{madalina.olteanu@univ-paris1.fr}\cr
Nathalie Vialaneix \email{nathalie.vialaneix@inrae.fr}
}
|
/man/superClass.Rd
|
no_license
|
vkresch/SOMbrero
|
R
| false
| true
| 7,959
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/superclasses.R
\name{superClass}
\alias{superClass}
\alias{superClass.somRes}
\alias{print.somSC}
\alias{summary.somSC}
\alias{plot.somSC}
\alias{projectIGraph.somSC}
\title{Create super-clusters from SOM results}
\usage{
superClass(sommap, method, members, k, h, ...)
\method{print}{somSC}(x, ...)
\method{summary}{somSC}(object, ...)
\method{plot}{somSC}(x, type = c("dendrogram", "grid", "hitmap", "lines",
"barplot", "boxplot", "mds", "color", "poly.dist", "pie", "graph",
"dendro3d", "radar", "projgraph"), plot.var = TRUE,
plot.legend = FALSE, add.type = FALSE, print.title = FALSE,
the.titles = paste("Cluster", 1:prod(x$som$parameters$the.grid$dim)),
...)
\method{projectIGraph}{somSC}(object, init.graph, ...)
}
\arguments{
\item{sommap}{A \code{somRes} object.}
\item{method}{Argument passed to the \code{\link{hclust}} function.}
\item{members}{Argument passed to the \code{\link{hclust}} function.}
\item{k}{Argument passed to the \code{\link{cutree}} function (number of
super-clusters to cut the dendrogram).}
\item{h}{Argument passed to the \code{\link{cutree}} function (height where
to cut the dendrogram).}
\item{\dots}{Used for \code{plot.somSC}: further arguments passed either to
the function \code{\link{plot}} (case \code{type="dendro"}) or to
\code{\link{plot.myGrid}} (case \code{type="grid"}) or to
\code{\link{plot.somRes}} (all other cases).}
\item{x}{A \code{somSC} object.}
\item{object}{A \code{somSC} object.}
\item{type}{The type of plot to draw. Default value is \code{"dendrogram"},
to plot the dendrogram of the clustering. Case \code{"grid"} plots the grid
in color according to the super clustering. Case \code{"projgraph"} uses an
\link[igraph]{igraph} object passed to the argument \code{variable} and plots
the projected graph as defined by the function \code{projectIGraph.somSC}.
All other cases are those available in the function \code{\link{plot.somRes}}
and surimpose the super-clusters over these plots.}
\item{plot.var}{A boolean indicating whether a graph showing the evolution of
the explained variance should be plotted. This argument is only used when
\code{type="dendrogram"}, its default value is \code{TRUE}.}
\item{plot.legend}{A boolean indicating whether a legend should be added to
the plot. This argument is only used when \code{type} is either \code{"grid"}
or \code{"hitmap"} or \code{"mds"}. Its default value is \code{FALSE}.}
\item{add.type}{A boolean, which default value is \code{FALSE}, indicating
whether you are giving an additional variable to the argument \code{variable}
or not. If you do, the function \code{\link{plot.somRes}} will be called with
the argument \code{what} set to \code{"add"}.}
\item{print.title}{Whether the cluster titles must be printed in center of
the grid or not for \code{type="grid"}. Default to \code{FALSE} (titles not
displayed).}
\item{the.titles}{If \code{print.title = TRUE}, values of the title to
display for \code{type="grid"}. Default to "Cluster " followed by the cluster
number.}
\item{init.graph}{An \link[igraph]{igraph} object which is projected
according to the super-clusters. The number of vertices of \code{init.graph}
must be equal to the number of rows in the original dataset processed by the
SOM (case \code{"korresp"} is not handled by this function). In the projected
graph, the vertices are positionned at the center of gravity of the
super-clusters (more details in the section \strong{Details} below).}
}
\value{
The \code{superClass} function returns an object of class
\code{somSC} which is a list of the following elements: \itemize{
\item{cluster}{The super clustering of the prototypes (only if either
\code{k} or \code{h} are given by user).}
\item{tree}{An \code{\link{hclust}} object.}
\item{som}{The \code{somRes} object given as argument (see
\code{\link{trainSOM}} for details).}
}
The \code{projectIGraph.somSC} function returns an object of class
\code{\link{igraph}} with the following attributes: \itemize{
\item the graph attribute \code{layout} which provides the layout of the
projected graph according to the center of gravity of the super-clusters
positionned on the SOM grid;
\item the vertex attributes \code{name} and \code{size} which, respectively
are the vertex number on the grid and the number of vertexes included in
the corresponding cluster;
\item the edge attribute \code{weight} which gives the number of edges (or
the sum of the weights) between the vertexes of the two corresponding
clusters.
}
}
\description{
Aggregate the resulting clustering of the SOM algorithm into
super-clusters.
}
\details{
The \code{superClass} function can be used in 2 ways:
\itemize{
\item to choose the number of super clusters via an \code{\link{hclust}}
object: then, both arguments \code{k} and \code{h} are not filled.
\item to cut the clustering into super clusters: then, either argument
\code{k} or argument \code{h} must be filled. See \code{\link{cutree}} for
details on these arguments.
}
The squared distance between prototypes is passed to the algorithm.
\code{summary} on a \code{superClass} object produces a complete summary of
the results that displays the number of clusters and super-clusters, the
clustering itself and performs ANOVA analyses. For \code{type="numeric"} the
ANOVA is performed for each input variable and test the difference of this
variable accross the super-clusters of the map. For \code{type="relational"}
a dissimilarity ANOVA is performed (see (Anderson, 2001), except that in the
present version, a crude estimate of the p-value is used which is based on
the Fisher distribution and not on a permutation test.
On plots, the different super classes are identified in the following ways:
\itemize{
\item either with different color, when \code{type} is set among:
\code{"grid"} (*, #), \code{"hitmap"} (*, #), \code{"lines"} (*, #),
\code{"barplot"} (*, #), \code{"boxplot"}, \code{"mds"} (*, #),
\code{"dendro3d"} (*, #), \code{"graph"} (*, #)
\item or with title, when \code{type} is set among: \code{"color"} (*),
\code{"poly.dist"} (*, #), \code{"pie"} (#), \code{"radar"} (#)
}
In the list above, the charts available for a \code{korresp} SOM are marked
with a * whereas those available for a \code{relational} SOM are marked with
a #.
\code{projectIGraph.somSC} produces a projected graph from the
\link[igraph]{igraph} object passed to the argument \code{variable} as
described in (Olteanu and Villa-Vialaneix, 2015). The attributes of this
graph are the same than the ones obtained from the SOM map itself in the
function \code{\link{projectIGraph.somRes}}. \code{plot.somSC} used with
\code{type="projgraph"} calculates this graph and represents it by
positionning the super-vertexes at the center of gravity of the
super-clusters. This feature can be combined with \code{pie.graph=TRUE} to
super-impose the information from an external factor related to the
individuals in the original dataset (or, equivalently, to the vertexes of the
graph).
}
\examples{
set.seed(11051729)
my.som <- trainSOM(x.data=iris[,1:4])
# choose the number of super-clusters
sc <- superClass(my.som)
plot(sc)
# cut the clustering
sc <- superClass(my.som, k=4)
summary(sc)
plot(sc)
plot(sc, type="hitmap", plot.legend=TRUE)
}
\references{
Anderson M.J. (2001). A new method for non-parametric multivariate analysis
of variance. \emph{Austral Ecology}, \strong{26}, 32-46.
Olteanu M., Villa-Vialaneix N. (2015) Using SOMbrero for clustering and
visualizing graphs. \emph{Journal de la Societe Francaise de Statistique},
\strong{156}, 95-119.
}
\seealso{
\code{\link{hclust}}, \code{\link{cutree}}, \code{\link{trainSOM}},
\code{\link{plot.somRes}}
}
\author{
Madalina Olteanu \email{madalina.olteanu@univ-paris1.fr}\cr
Nathalie Vialaneix \email{nathalie.vialaneix@inrae.fr}
}
|
#' @importFrom utils getFromNamespace
launch_yaml_addin <- function() {
stop_if_not_installed(c("miniUI", "shinyBS"))
addin_dir <- system.file("addin", "new_yaml", package = "ymlthis")
app <- shiny::shinyAppDir(addin_dir)
shiny::runGadget(
app,
viewer = shiny::dialogViewer("New YAML", height = 700)
)
}
|
/R/addins.R
|
permissive
|
r-lib/ymlthis
|
R
| false
| false
| 323
|
r
|
#' @importFrom utils getFromNamespace
launch_yaml_addin <- function() {
stop_if_not_installed(c("miniUI", "shinyBS"))
addin_dir <- system.file("addin", "new_yaml", package = "ymlthis")
app <- shiny::shinyAppDir(addin_dir)
shiny::runGadget(
app,
viewer = shiny::dialogViewer("New YAML", height = 700)
)
}
|
## Caching the Inverse of a Matrix:
## Matrix inversion is usually a costly computation and there may be some benefit to caching the inverse of a matrix rather than compute it repeatedly.
## Below are a pair of functions that are used to create a special object that stores a matrix and caches its inverse.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
## @x: a square invertible matrix
## return: a list containing functions to
## 1. set the matrix
## 2. get the matrix
## 3. set the inverse
## 4. get the inverse
## this list is used as the input to cacheSolve()
inv = NULL
set = function(y) {
# use `<<-` to assign a value to an object in an environment
# different from the current environment.
x <<- y
inv <<- NULL
}
get = function() x
setinv = function(inverse) inv <<- inverse
getinv = function() inv
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## This function computes the inverse of the special "matrix" created by
## makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then it should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## @x: output of makeCacheMatrix()
## return: inverse of the original matrix input to makeCacheMatrix()
inv = x$getinv()
# if the inverse has already been calculated
if (!is.null(inv)){
# get it from the cache and skips the computation.
message("getting cached data")
return(inv)
}
# otherwise, calculates the inverse
mat.data = x$get()
inv = solve(mat.data, ...)
# sets the value of the inverse in the cache via the setinv function.
x$setinv(inv)
return(inv)
}
|
/cachematrix.R
|
no_license
|
Lydia-Yeh/ProgrammingAssignment2
|
R
| false
| false
| 2,109
|
r
|
## Caching the Inverse of a Matrix:
## Matrix inversion is usually a costly computation and there may be some benefit to caching the inverse of a matrix rather than compute it repeatedly.
## Below are a pair of functions that are used to create a special object that stores a matrix and caches its inverse.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
## @x: a square invertible matrix
## return: a list containing functions to
## 1. set the matrix
## 2. get the matrix
## 3. set the inverse
## 4. get the inverse
## this list is used as the input to cacheSolve()
inv = NULL
set = function(y) {
# use `<<-` to assign a value to an object in an environment
# different from the current environment.
x <<- y
inv <<- NULL
}
get = function() x
setinv = function(inverse) inv <<- inverse
getinv = function() inv
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
## This function computes the inverse of the special "matrix" created by
## makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then it should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## @x: output of makeCacheMatrix()
## return: inverse of the original matrix input to makeCacheMatrix()
inv = x$getinv()
# if the inverse has already been calculated
if (!is.null(inv)){
# get it from the cache and skips the computation.
message("getting cached data")
return(inv)
}
# otherwise, calculates the inverse
mat.data = x$get()
inv = solve(mat.data, ...)
# sets the value of the inverse in the cache via the setinv function.
x$setinv(inv)
return(inv)
}
|
#' eSpark Colors
#'
#' This function loads colors that conform with eSpark's branding guide.
#' @param color
#' @keywords blue, green, orange, purple, red, yellow, gray
#' @export
#' @examples
#' espark_color("blue")
eSparkColor <- function(colors) {
colors <- tolower(colors)
espark_colors = list(
"blue" = rgb(0, 102, 204, maxColorValue = 255),
"lightblue" = rgb(26, 137, 185, maxColorValue = 255),
"green" = rgb(128, 209, 65, maxColorValue = 255),
"orange" = rgb(255, 153, 0, maxColorValue = 255),
"purple" = rgb(160, 90, 200, maxColorValue = 255),
"red" = rgb(255, 80, 80, maxColorValue = 255),
"yellow" = rgb(255, 204, 51, maxColorValue = 255),
"gray" = rgb(205, 205, 205, maxColorValue = 255)
)
unname(sapply(colors, function(col) espark_colors[[as.character(col)]]))
}
|
/R/espark_colors.R
|
no_license
|
eSpark/esparkcolors
|
R
| false
| false
| 818
|
r
|
#' eSpark Colors
#'
#' This function loads colors that conform with eSpark's branding guide.
#' @param color
#' @keywords blue, green, orange, purple, red, yellow, gray
#' @export
#' @examples
#' espark_color("blue")
eSparkColor <- function(colors) {
colors <- tolower(colors)
espark_colors = list(
"blue" = rgb(0, 102, 204, maxColorValue = 255),
"lightblue" = rgb(26, 137, 185, maxColorValue = 255),
"green" = rgb(128, 209, 65, maxColorValue = 255),
"orange" = rgb(255, 153, 0, maxColorValue = 255),
"purple" = rgb(160, 90, 200, maxColorValue = 255),
"red" = rgb(255, 80, 80, maxColorValue = 255),
"yellow" = rgb(255, 204, 51, maxColorValue = 255),
"gray" = rgb(205, 205, 205, maxColorValue = 255)
)
unname(sapply(colors, function(col) espark_colors[[as.character(col)]]))
}
|
library(readr)
library(tidyverse)
library(stringr)
library(lubridate)
###############################
#### 1. LOADING IN DATAFRAMES ####
PrisonCovid <- read_csv("Data/UCLA_Historical-Data_CA.csv")
CACovid <- read_csv("Data/CA_COVID_Daily.csv")
CDCR <- read_csv("Data/CDCR/CDCR_Weekly_Cleaned.csv")
CDCR_Names <- data.frame(Name = unique(CDCR$Name))
Covid_Names <- data.frame(Name = unique(PrisonCovid$Name))
Rates_Demo <- read_csv("Data/rates_demo_mod.csv")
######################
#### 2. Prison COVID ####
PrisonCovid <- PrisonCovid %>% mutate(Name = str_to_upper(Name),
County = str_to_title(County),
Date = as.Date(Date, "%Y-%m-%d"))
# add month and year column for merging
PrisonCovid <- PrisonCovid %>% mutate(Year = as.numeric(str_extract(as.character(Date), '[:digit:]{4}(?=-)')),
Month =as.numeric(str_extract(as.character(Date), '(?<=(2020|2021)-)[:digit:]{2}')))
# filter to only include prisons
PrisonCovid <- PrisonCovid %>% filter(Jurisdiction == "state")
# select only variables of interest
PrisonCovid <- PrisonCovid %>% select(Jurisdiction, Name, County, Date, Year, Month,
Residents.Confirmed, Staff.Confirmed,
Residents.Deaths, Staff.Deaths,
Residents.Population, Staff.Population,
Latitude, Longitude, County.FIPS, Age,
Gender, Capacity, BJS.ID
)
####################
#### 3. Rates_Demo ####
Rates_Demo <- Rates_Demo %>% mutate(Date = mdy(Date),
Week = mdy(Week))
Rates_Demo <- Rates_Demo %>% select(Name, Date, Week, Gender,
female_prison_adm_rate, male_prison_adm_rate,
white_prison_adm_rate)
###############
#### 3. CDCR ####
# split institution name
CDCR <- CDCR %>% mutate(Name = str_extract(Institution, ".*(?= \\()"),
Date = mdy(Date))
# adding month and year columns
CDCR <- CDCR %>% mutate(Year = as.numeric(str_extract(as.character(Date), '[:digit:]{4}(?=-)')),
Month =as.numeric(str_extract(as.character(Date), '(?<=(2020|2021)-)[:digit:]{2}')))
# adding jurisdiction column
CDCR <- CDCR %>% mutate(Jurisdiction = "state")
# uppercase name for future merging
CDCR <- CDCR %>% mutate(Name = str_to_upper(Name))
# fix name formatting on some prisons
CDCR$Name[CDCR$Name == "CALIFORNIA HEALTH CARE FACILITY - STOCKTON"] <-
"CALIFORNIA HEALTH CARE FACILITY"
CDCR$Name[CDCR$Name == "CALIFORNIA STATE PRISON, CORCORAN"] <-
"CALIFORNIA STATE PRISON CORCORAN"
CDCR$Name[CDCR$Name == "CALIFORNIA STATE PRISON, LOS ANGELES COUNTY"] <-
"CALIFORNIA STATE PRISON LOS ANGELES"
CDCR$Name[CDCR$Name == "CALIFORNIA STATE PRISON, SACRAMENTO"] <-
"CALIFORNIA STATE PRISON SACRAMENTO"
CDCR$Name[CDCR$Name == "CALIFORNIA STATE PRISON, SOLANO"] <-
"CALIFORNIA STATE PRISON SOLANO"
CDCR$Name[CDCR$Name == "CENTRAL CALIFORNIA WOMEN'S FACILITY"] <-
"CENTRAL CALIFORNIA WOMENS FACILITY"
CDCR$Name[CDCR$Name == "CALIFORNIA SUBSTANCE ABUSE TREATMENT FACILITY"] <-
"SUBSTANCE ABUSE TREATMENT FACILITY"
CDCR$Name[CDCR$Name == "SAN QUENTIN STATE PRISON"] <-
"CALIFORNIA STATE PRISON SAN QUENTIN"
## Create Week variable for merging with PrisonCovid
# create variables that hold the unique weeks / days of each dataset
PrisonCovid_dates <- ymd(unique(PrisonCovid$Date))
CDCR_weeks <- (unique(CDCR$Date))
# now create "Week" variable that assigns the appropriate week
# to each unique date in the time series data
time_series_weeks <- data.frame(cbind("Date"=as.character(PrisonCovid_dates),
"Week"=character(length(PrisonCovid_dates))))
for(i in 1:length(PrisonCovid_dates)){
# calculate difference between date and vector of weeks
difference <- PrisonCovid_dates[i] - CDCR_weeks
# find the minimum positive value of the differences (this the week the date belongs to)
# however, if difference is 0, then the week is equal to the date
min_pos_diff <- min(difference[difference>=0])
# now add appropriate week to time_series_weeks variable
time_series_weeks[i,2] <- as.character(CDCR_weeks[which(difference == min_pos_diff)])
}
time_series_weeks <- time_series_weeks[order(time_series_weeks$Date),]
# merge back into PrisonCovid
PrisonCovid$Date <- as.character(PrisonCovid$Date)
# now we can merge the weeks into PrisonCovid:
PrisonCovid <- merge(PrisonCovid, time_series_weeks, by="Date")
# fix date name for merging
CDCR <- CDCR %>% rename(Week = Date)
## deciding which facilities need gender updates
cdcr_genders <- CDCR %>% group_by(Name, Gender) %>% summarise(mean(Percent_Occupied))
cdcr_genders[duplicated(cdcr_genders$Name),]
prison_genders <- PrisonCovid %>% group_by(Name, Gender) %>% summarise(mean(Residents.Population))
prison_genders %>% filter(Name %in% cdcr_genders[duplicated(cdcr_genders$Name),]$Name)
# only need to mix FOLOM and CA Medical
## clean FULSOM and CA Medical to include males and females
# subset of CDCR data containing rows for Folsom State Prison and combine data for males + females
CDCR_folsom <- CDCR %>%
filter(Name == "FOLSOM STATE PRISON") %>%
group_by(Week) %>%
summarize(Total_Population = sum(Total_Population),
Design_Capacity = sum(Design_Capacity),
Staffed_Capacity = sum(Staffed_Capacity),
Year = mean(Year),
Month = mean(Month)) %>%
mutate(Percent_Occupied = Total_Population / Design_Capacity * 100,
Gender = "Mixed",
Name = "FOLSOM STATE PRISON",
Institution = "FOLSOM STATE PRISON",
Jurisdiction = "state") %>%
select(Week, Institution, Total_Population, Design_Capacity,
Percent_Occupied, Staffed_Capacity, Gender, Name, Year, Month,
Jurisdiction)
CDCR_CAmedical <- CDCR %>%
filter(Name == "CALIFORNIA MEDICAL FACILITY") %>%
group_by(Week) %>%
summarize(Total_Population = sum(Total_Population),
Design_Capacity = sum(Design_Capacity),
Staffed_Capacity = sum(Staffed_Capacity),
Year = mean(Year),
Month = mean(Month)) %>%
mutate(Percent_Occupied = Total_Population / Design_Capacity * 100,
Gender = "Mixed",
Name = "CALIFORNIA MEDICAL FACILITY",
Institution = "CALIFORNIA MEDICAL FACILITY",
Jurisdiction = "state") %>%
select(Week, Institution, Total_Population, Design_Capacity,
Percent_Occupied, Staffed_Capacity, Gender, Name, Year, Month,
Jurisdiction)
# replace old Folsom State Prison data with this new combined data in CDCR data
CDCR <- CDCR %>% filter(Name != "FOLSOM STATE PRISON" & Name != "CALIFORNIA MEDICAL FACILITY")
CDCR <- rbind(CDCR, CDCR_folsom, CDCR_CAmedical)
##################################################
#### 4. PrisonCovid = PrisonCovid Merged w/ CDCR ####
# convert to character for merging
CDCR$Week <- as.character(CDCR$Week)
## merge 2 options: inner and left join (I will model with the inner join)
PrisonCovid.inner <- PrisonCovid %>% inner_join(CDCR, by = c("Name", "Gender", "Week", "Year"))
PrisonCovid.left <- PrisonCovid %>% left_join(CDCR, by = c("Name","Gender", "Week", "Year"))
## check to make sure NO double names
test <- PrisonCovid.inner[which(PrisonCovid.inner$Month.x != PrisonCovid.inner$Month.y),]
test <- PrisonCovid.inner[which(PrisonCovid.inner$Gender.x != PrisonCovid.inner$Gender.y),]
# should return empty data frame
# it's okay that month.x != month.y cuz some weeks include multiple months
## merge in race and gender data
# briefly change dates back to character
Rates_Demo <- Rates_Demo %>% mutate(Date = as.character(Date), Week = as.character(Week))
PrisonCovid.inner <- PrisonCovid.inner %>% left_join(Rates_Demo, by = c("Name", "Date", "Week", "Gender"))
PrisonCovid.inner <- PrisonCovid.inner %>% mutate(Date = ymd(Date), Week = ymd(Week))
## fill in missing values with most recent value
PrisonCovid.inner <- PrisonCovid.inner %>% arrange(Date)%>% group_by(Name) %>%
mutate(Residents.Confirmed.F = Residents.Confirmed,
male_prison_adm_rate.F = male_prison_adm_rate,
female_prison_adm_rate.F = female_prison_adm_rate,
white_prison_adm_rate.F = white_prison_adm_rate) %>% fill(Residents.Confirmed.F,
male_prison_adm_rate.F,
female_prison_adm_rate.F,
white_prison_adm_rate.F)
## add daily change variable from cumulative
CalculateChange <- function(vec){
return(vec - c(0,vec[-(length(vec))]))
}
PrisonCovid.inner <- PrisonCovid.inner %>% mutate(Residents.Confirmed_DailyChange = CalculateChange(Residents.Confirmed.F))
## save data
write.csv(PrisonCovid.inner, "Data/PrisonCovid_innerjoin.csv")
|
/Data_Cleaning.R
|
no_license
|
EmilyJAllen/COVID-19_Prisons
|
R
| false
| false
| 9,185
|
r
|
library(readr)
library(tidyverse)
library(stringr)
library(lubridate)
###############################
#### 1. LOADING IN DATAFRAMES ####
PrisonCovid <- read_csv("Data/UCLA_Historical-Data_CA.csv")
CACovid <- read_csv("Data/CA_COVID_Daily.csv")
CDCR <- read_csv("Data/CDCR/CDCR_Weekly_Cleaned.csv")
CDCR_Names <- data.frame(Name = unique(CDCR$Name))
Covid_Names <- data.frame(Name = unique(PrisonCovid$Name))
Rates_Demo <- read_csv("Data/rates_demo_mod.csv")
######################
#### 2. Prison COVID ####
PrisonCovid <- PrisonCovid %>% mutate(Name = str_to_upper(Name),
County = str_to_title(County),
Date = as.Date(Date, "%Y-%m-%d"))
# add month and year column for merging
PrisonCovid <- PrisonCovid %>% mutate(Year = as.numeric(str_extract(as.character(Date), '[:digit:]{4}(?=-)')),
Month =as.numeric(str_extract(as.character(Date), '(?<=(2020|2021)-)[:digit:]{2}')))
# filter to only include prisons
PrisonCovid <- PrisonCovid %>% filter(Jurisdiction == "state")
# select only variables of interest
PrisonCovid <- PrisonCovid %>% select(Jurisdiction, Name, County, Date, Year, Month,
Residents.Confirmed, Staff.Confirmed,
Residents.Deaths, Staff.Deaths,
Residents.Population, Staff.Population,
Latitude, Longitude, County.FIPS, Age,
Gender, Capacity, BJS.ID
)
####################
#### 3. Rates_Demo ####
Rates_Demo <- Rates_Demo %>% mutate(Date = mdy(Date),
Week = mdy(Week))
Rates_Demo <- Rates_Demo %>% select(Name, Date, Week, Gender,
female_prison_adm_rate, male_prison_adm_rate,
white_prison_adm_rate)
###############
#### 3. CDCR ####
# split institution name
CDCR <- CDCR %>% mutate(Name = str_extract(Institution, ".*(?= \\()"),
Date = mdy(Date))
# adding month and year columns
CDCR <- CDCR %>% mutate(Year = as.numeric(str_extract(as.character(Date), '[:digit:]{4}(?=-)')),
Month =as.numeric(str_extract(as.character(Date), '(?<=(2020|2021)-)[:digit:]{2}')))
# adding jurisdiction column
CDCR <- CDCR %>% mutate(Jurisdiction = "state")
# uppercase name for future merging
CDCR <- CDCR %>% mutate(Name = str_to_upper(Name))
# fix name formatting on some prisons
CDCR$Name[CDCR$Name == "CALIFORNIA HEALTH CARE FACILITY - STOCKTON"] <-
"CALIFORNIA HEALTH CARE FACILITY"
CDCR$Name[CDCR$Name == "CALIFORNIA STATE PRISON, CORCORAN"] <-
"CALIFORNIA STATE PRISON CORCORAN"
CDCR$Name[CDCR$Name == "CALIFORNIA STATE PRISON, LOS ANGELES COUNTY"] <-
"CALIFORNIA STATE PRISON LOS ANGELES"
CDCR$Name[CDCR$Name == "CALIFORNIA STATE PRISON, SACRAMENTO"] <-
"CALIFORNIA STATE PRISON SACRAMENTO"
CDCR$Name[CDCR$Name == "CALIFORNIA STATE PRISON, SOLANO"] <-
"CALIFORNIA STATE PRISON SOLANO"
CDCR$Name[CDCR$Name == "CENTRAL CALIFORNIA WOMEN'S FACILITY"] <-
"CENTRAL CALIFORNIA WOMENS FACILITY"
CDCR$Name[CDCR$Name == "CALIFORNIA SUBSTANCE ABUSE TREATMENT FACILITY"] <-
"SUBSTANCE ABUSE TREATMENT FACILITY"
CDCR$Name[CDCR$Name == "SAN QUENTIN STATE PRISON"] <-
"CALIFORNIA STATE PRISON SAN QUENTIN"
## Create Week variable for merging with PrisonCovid
# create variables that hold the unique weeks / days of each dataset
PrisonCovid_dates <- ymd(unique(PrisonCovid$Date))
CDCR_weeks <- (unique(CDCR$Date))
# now create "Week" variable that assigns the appropriate week
# to each unique date in the time series data
time_series_weeks <- data.frame(cbind("Date"=as.character(PrisonCovid_dates),
"Week"=character(length(PrisonCovid_dates))))
for(i in 1:length(PrisonCovid_dates)){
# calculate difference between date and vector of weeks
difference <- PrisonCovid_dates[i] - CDCR_weeks
# find the minimum positive value of the differences (this the week the date belongs to)
# however, if difference is 0, then the week is equal to the date
min_pos_diff <- min(difference[difference>=0])
# now add appropriate week to time_series_weeks variable
time_series_weeks[i,2] <- as.character(CDCR_weeks[which(difference == min_pos_diff)])
}
time_series_weeks <- time_series_weeks[order(time_series_weeks$Date),]
# merge back into PrisonCovid
PrisonCovid$Date <- as.character(PrisonCovid$Date)
# now we can merge the weeks into PrisonCovid:
PrisonCovid <- merge(PrisonCovid, time_series_weeks, by="Date")
# fix date name for merging
CDCR <- CDCR %>% rename(Week = Date)
## deciding which facilities need gender updates
cdcr_genders <- CDCR %>% group_by(Name, Gender) %>% summarise(mean(Percent_Occupied))
cdcr_genders[duplicated(cdcr_genders$Name),]
prison_genders <- PrisonCovid %>% group_by(Name, Gender) %>% summarise(mean(Residents.Population))
prison_genders %>% filter(Name %in% cdcr_genders[duplicated(cdcr_genders$Name),]$Name)
# only need to mix FOLOM and CA Medical
## clean FULSOM and CA Medical to include males and females
# subset of CDCR data containing rows for Folsom State Prison and combine data for males + females
CDCR_folsom <- CDCR %>%
filter(Name == "FOLSOM STATE PRISON") %>%
group_by(Week) %>%
summarize(Total_Population = sum(Total_Population),
Design_Capacity = sum(Design_Capacity),
Staffed_Capacity = sum(Staffed_Capacity),
Year = mean(Year),
Month = mean(Month)) %>%
mutate(Percent_Occupied = Total_Population / Design_Capacity * 100,
Gender = "Mixed",
Name = "FOLSOM STATE PRISON",
Institution = "FOLSOM STATE PRISON",
Jurisdiction = "state") %>%
select(Week, Institution, Total_Population, Design_Capacity,
Percent_Occupied, Staffed_Capacity, Gender, Name, Year, Month,
Jurisdiction)
CDCR_CAmedical <- CDCR %>%
filter(Name == "CALIFORNIA MEDICAL FACILITY") %>%
group_by(Week) %>%
summarize(Total_Population = sum(Total_Population),
Design_Capacity = sum(Design_Capacity),
Staffed_Capacity = sum(Staffed_Capacity),
Year = mean(Year),
Month = mean(Month)) %>%
mutate(Percent_Occupied = Total_Population / Design_Capacity * 100,
Gender = "Mixed",
Name = "CALIFORNIA MEDICAL FACILITY",
Institution = "CALIFORNIA MEDICAL FACILITY",
Jurisdiction = "state") %>%
select(Week, Institution, Total_Population, Design_Capacity,
Percent_Occupied, Staffed_Capacity, Gender, Name, Year, Month,
Jurisdiction)
# replace old Folsom State Prison data with this new combined data in CDCR data
CDCR <- CDCR %>% filter(Name != "FOLSOM STATE PRISON" & Name != "CALIFORNIA MEDICAL FACILITY")
CDCR <- rbind(CDCR, CDCR_folsom, CDCR_CAmedical)
##################################################
#### 4. PrisonCovid = PrisonCovid Merged w/ CDCR ####
# convert to character for merging
CDCR$Week <- as.character(CDCR$Week)
## merge 2 options: inner and left join (I will model with the inner join)
PrisonCovid.inner <- PrisonCovid %>% inner_join(CDCR, by = c("Name", "Gender", "Week", "Year"))
PrisonCovid.left <- PrisonCovid %>% left_join(CDCR, by = c("Name","Gender", "Week", "Year"))
## check to make sure NO double names
test <- PrisonCovid.inner[which(PrisonCovid.inner$Month.x != PrisonCovid.inner$Month.y),]
test <- PrisonCovid.inner[which(PrisonCovid.inner$Gender.x != PrisonCovid.inner$Gender.y),]
# should return empty data frame
# it's okay that month.x != month.y cuz some weeks include multiple months
## merge in race and gender data
# briefly change dates back to character
Rates_Demo <- Rates_Demo %>% mutate(Date = as.character(Date), Week = as.character(Week))
PrisonCovid.inner <- PrisonCovid.inner %>% left_join(Rates_Demo, by = c("Name", "Date", "Week", "Gender"))
PrisonCovid.inner <- PrisonCovid.inner %>% mutate(Date = ymd(Date), Week = ymd(Week))
## fill in missing values with most recent value
PrisonCovid.inner <- PrisonCovid.inner %>% arrange(Date)%>% group_by(Name) %>%
mutate(Residents.Confirmed.F = Residents.Confirmed,
male_prison_adm_rate.F = male_prison_adm_rate,
female_prison_adm_rate.F = female_prison_adm_rate,
white_prison_adm_rate.F = white_prison_adm_rate) %>% fill(Residents.Confirmed.F,
male_prison_adm_rate.F,
female_prison_adm_rate.F,
white_prison_adm_rate.F)
## add daily change variable from cumulative
CalculateChange <- function(vec){
return(vec - c(0,vec[-(length(vec))]))
}
PrisonCovid.inner <- PrisonCovid.inner %>% mutate(Residents.Confirmed_DailyChange = CalculateChange(Residents.Confirmed.F))
## save data
write.csv(PrisonCovid.inner, "Data/PrisonCovid_innerjoin.csv")
|
monthly_expiration <- function(year, month){
# find third friday of month
dt_third_friday <- third_friday(year, month)
# prior to 2/20/2015, expirations were listed as a Saturday
if(dt_third_friday < lubridate::ymd(20150220)) {
dt_monthly_exp <- dt_third_friday + 1
} else {
dt_monthly_exp <- dt_third_friday
# if third friday of month falls on a holiday,
# then the expiration is the previous business day
if (!bizdays::is.bizday(dt_third_friday)){
dt_monthly_exp <-
bizdays::add.bizdays(dt_third_friday, -1)
}
}
dt_monthly_exp
}
|
/function/archive/monthly_expiration_MIGRATED.R
|
no_license
|
active-analytics/pqam_2018
|
R
| false
| false
| 644
|
r
|
monthly_expiration <- function(year, month){
# find third friday of month
dt_third_friday <- third_friday(year, month)
# prior to 2/20/2015, expirations were listed as a Saturday
if(dt_third_friday < lubridate::ymd(20150220)) {
dt_monthly_exp <- dt_third_friday + 1
} else {
dt_monthly_exp <- dt_third_friday
# if third friday of month falls on a holiday,
# then the expiration is the previous business day
if (!bizdays::is.bizday(dt_third_friday)){
dt_monthly_exp <-
bizdays::add.bizdays(dt_third_friday, -1)
}
}
dt_monthly_exp
}
|
# Práctica Probabilidad y Estadística II
# Autores:
# - Marcos Chamorro Casillas
# - Arturo Peñas Mohedano
# - Álvaro Adanez Huecas
# - Ionel Constantin Trifan
# Cargamos la seed que necesitamos para la práctica
set.seed(2021)
# Cargamos la tabla con los datos
datos <- read.csv(file = "PYE2DataSet47.csv")
# Mostramos algunos valores de ella
head(datos)
## Parte 1.1: Identificación de Modelo y Muestreo
sleeptime <- sample(datos$sleeptime,200) # Muestras de tamaño 200
steps <- sample(datos$steps, 200) # Muestras de tamaño 200
# Antes que todo instalamos el paquete necesario para algunas representaciones
install.packages("e1071")
# Activar el paquete
library(e1071)
# Debemos hacer con las variables: sleeptime y steps
# Vamos a empezar con la variable SLEEPTIME
# -------------------------------------------------
# Calculamos las medias de sleeptime
summary(sleeptime)
# Histograma con los datos de sleeptime
hist(sleeptime, main = "sleeptime")
# Calculamos el skewness
skewness(sleeptime) # Del resultado podemos concluir que tiene un podo de desviación hacia la derecha
# Calculamos la kurtosis
kurtosis(sleeptime) # La kurtosis difiere bastante de 0 por lo cual la distribución de sleeptime no se parece a una normal
# Calculamos el boxplot de sleeptime
boxplot(sleeptime, main = "sleeptime")
# Para calcular la distribución fit debemos instalar el paquete MASS
install.packages("MASS")
# Lo activamos
library(MASS)
# Lo debemos usar para parecerse a las distribuciones: Normal, Exponencial y Gamma
# Normal:
fitdistr(sleeptime, c("normal"))
# La media y la desviación típica tendrían los valores:
# Media: 9.3860940
# Desv.típica: 4.8795351
# CONCLUSIÓN: Valores lejanos de 0 --> No se parece a una normal
# Gamma:
fitdistr(sleeptime, c("gamma"))
# shape rate
# 3.48100367 0.37086832
# (0.33276383) (0.03813724)
# Exponencial:
fitdistr(sleeptime, c("exponential"))
# El valor de lambda es: 0.106540591
# Calculamos la densidad de sleeptime
density(sleeptime)
# RESULTADOS:
# x y
# Min. :-3.552 Min. :3.442e-05
# 1st Qu.: 3.725 1st Qu.:6.696e-03
# Median :11.001 Median :2.651e-02
# Mean :11.001 Mean :3.432e-02
# 3rd Qu.:18.278 3rd Qu.:5.960e-02
# Max. :25.554 Max. :8.849e-02
# Hacemos el test de Kolmogorov
ks.test(sleeptime, pnorm, mean(sleeptime), sd(sleeptime))
# Lo que nos dice
# es que si p-value es menor que 0.05 va a representar la hipótesis de que los valores
# vienen de una distribución normal
# RESULTADOS:
# data: sleeptime
# D = 0.10616, p-value = 0.02204
# alternative hypothesis: two-sided
# ----------------------------------------
# Vamos a hacer lo mismo con la variable STEPS
#-------------------------------------------------
# Calculamos las medias de sleeptime
summary(steps)
# Histograma con los datos de sleeptime
hist(steps, main = "steps")
# Calculamos el skewness
skewness(steps) # Del resultado podemos concluir que tiene un podo de desviación hacia la izquierda
# Calculamos la kurtosis
kurtosis(steps) # La kurtosis difiere bastante de 0 por lo cual la distribución de steps no se parece a una normal
# Calculamos el boxplot de steps
boxplot(steps, main = "steps")
# Usamos la distribución fit para ver si se parece a las distribuciones: Normal, Exponencial y Gamma
# Normal:
fitdistr(steps, c("normal"))
# La media y la desviación típica tendrían los valores:
# Media: 11403.70607
# Desv.típica: 1381.44905
# CONCLUSIÓN: Valores lejanos de 0 --> No se parece a una normal
# Gamma:
fitdistr(steps, c("gamma"))
# Shape: 6.780212e+01
# Rate: 5.945865e-03
# Exponencial:
fitdistr(steps, c("exponential"))
# El valor de lambda es: 8.769079e-05
# Calculamos la densidad de steps
density(steps)
# RESULTADOS:
# x y
# Min. : 7334 Min. :1.007e-07
# 1st Qu.: 9255 1st Qu.:3.485e-05
# Median :11177 Median :1.028e-04
# Mean :11177 Mean :1.300e-04
# 3rd Qu.:13098 3rd Qu.:2.054e-04
# Max. :15019 Max. :4.097e-04
# Hacemos el test de Kolmogorov
ks.test(steps, pnorm, mean(steps), sd(steps))
# Lo que nos dice
# es que si p-value es menor que 0.05 va a representar la hipótesis de que los valores
# vienen de una distribución normal
# RESULTADOS:
# data: steps
# D = 0.14685, p-value = 0.0003587
# alternative hypothesis: two-sided
# ----------------------------------------
# ---------------------------------------
## Parte 1.2: Identificación de Modelo y Muestreo
# Para esto debemos crear lista de vectores de tamaños 30, 50 y 100 para guardar los valores del atributo Age
# de la tabra datos
EdadT30 <- vector(mode = "list", length = 30) # Para las 30 muestras
EdadT50 <- vector(mode = "list", length = 50) # Para las 50 muestras
EdadT100 <- vector(mode = "list", length = 100) # Para las 100 muestras
i <- 1 # Variable para recorrer los bucles while
# Ahora debemos escribir las listas de vectores con los valores de la tabla datos para Age
# Tamaño 30
while(i < 31)
{
EdadT30[[i]] <- sample(datos$Age, size = 200)
i <- i + 1
}
i <- 1
# Tamaño 50
while(i < 51)
{
EdadT50[[i]] <- sample(datos$Age, size = 200)
i <- i + 1
}
i <- 1
# Tamaño 100
while(i < 101)
{
EdadT100[[i]] <- sample(datos$Age, size = 200)
i <- i + 1
}
# Ya hemos hecho el muestreo de las edades con los tamaños de las listas
# Debemos calcular las medias muestrales de las 3 listas de vectores de Age
MediaAge30 <- vector(length = 30) # Vector de tamaño 30
MediaAge50 <- vector(length = 50) # Vector de tamaño 50
MediaAge100 <- vector(length = 100) # Vector de tamaño 100
i <- 1 # Para recorrer las listas y hacer las medias muestrales
# Lista de 30
while(i < 31)
{
MediaAge30[i] <- mean(EdadT30[[i]])
i <- i + 1
}
i <- 1
# Lista de 50
while(i < 51)
{
MediaAge50[i] <- mean(EdadT50[[i]])
i <- i + 1
}
i <- 1
# Lista de 100
while(i < 101)
{
MediaAge100[i] <- mean(EdadT100[[i]])
i <- i + 1
}
# Ya tenemos las medias muestrales de las distintas listas: 30, 50 y 100
# Para las de tamaño 30
hist(MediaAge30, main = "Lista de 30")
boxplot(MediaAge30, main = "Lista de 30")
fitdistr((MediaAge30), c("normal"))
# mean sd
# 29.07437323 0.22682088
# ( 0.04141164) ( 0.02928245)
#-------------------------------
# Para las de tamaño 50
hist(MediaAge50, main = "Lista de 50")
boxplot(MediaAge50, main = "Lista de 50")
fitdistr((MediaAge50), c("normal"))
# mean sd
# 29.04243771 0.24861621
# ( 0.03515964) ( 0.02486162)
# ------------------------------
# Para las de tamaño 100
hist(MediaAge100, main = "Lista de 100")
boxplot(MediaAge100, main = "Lista de 100")
fitdistr((MediaAge100), c("normal"))
# mean sd
# 29.01645978 0.20293871
# ( 0.02029387) ( 0.01434993)
# Ahora tenemos que hacer lo mismo pero para las varianza muestral
# Nos hacemos 3 listar para poder almacenar las varianzas
VarAge30 <- vector(length = 30) # Vector de tamaño 30
VarAge50 <- vector(length = 50) # Vector de tamaño 50
VarAge100 <- vector(length = 100) # Vector de tamaño 100
i <- 1 # Para recorrer las listas y hacer las varianzas muestrales
# Lista de 30
while(i < 31)
{
VarAge30[i] <- var(EdadT30[[i]])
i <- i + 1
}
i <- 1
# Lista de 50
while(i < 51)
{
VarAge50[i] <- var(EdadT50[[i]])
i <- i + 1
}
i <- 1
# Lista de 100
while(i < 101)
{
VarAge100[i] <- var(EdadT100[[i]])
i <- i + 1
}
# Ya tenemos las distintas varianzas muestrales para las listas de 30, 50 y 100
# Lista de 30 varianzas muestrales
hist(VarAge30, main = "Lista de 30")
boxplot(VarAge30, main = "Lista de 30")
fitdistr((VarAge30), c("normal"))
# mean sd
# 10.7316176 0.9690967
# ( 0.1769320) ( 0.1251098)
# Listas de 50 varianzas muestrales
hist(VarAge50, main = "Lista de 50")
boxplot(VarAge50, main = "Lista de 50")
fitdistr((VarAge50), c("normal"))
# mean sd
# 10.63896821 0.87183533
# ( 0.12329614) ( 0.08718353)
# Listas de 100 varianzas muestrales
hist(VarAge100, main = "Lista de 100")
boxplot(VarAge100, main = "Lista de 100")
fitdistr((VarAge100), c("normal"))
# mean sd
# 10.63259757 0.95567296
# ( 0.09556730) ( 0.06757628)
# A continuación vamos a hacer lo mismo pero para la proporción de Varones y
# Mujeres que tenemos en datos
# Para esto debemos crear lista de vectores de tamaños 30, 50 y 100 para guardar los valores del atributo Sex
# de la tabra datos
SexT30 <- vector(mode = "list", length = 30) # Para las 30 muestras
SexT50 <- vector(mode = "list", length = 50) # Para las 50 muestras
SexT100 <- vector(mode = "list", length = 100) # Para las 100 muestras
i <- 1 # Variable para recorrer los bucles while
# Ahora debemos escribir las listas de vectores con los valores de la tabla datos para Age
# Tamaño 30
while(i < 31)
{
SexT30[[i]] <- c(sample(datos$Sex, size = 200))
i <- i + 1
}
i <- 1
# Tamaño 50
while(i < 51)
{
SexT50[[i]] <- c(sample(datos$Sex, size = 200))
i <- i + 1
}
i <- 1
# Tamaño 100
while(i < 101)
{
SexT100[[i]] <- c(sample(datos$Sex, size = 200))
i <- i + 1
}
# Es la proporción de Mujeres/Hombres esto hay que tenerlo en cuenta!!!
MediaSex30 <- vector(length = 30) # Vector de tamaño 30
MediaSex50 <- vector(length = 50) # Vector de tamaño 50
MediaSex100 <- vector(length = 100) # Vector de tamaño 100
i <- 1 # Para recorrer las listas y hacer las varianzas muestrales
# Lista de 30
while(i < 31)
{
MediaSex30[i] <- sum(SexT30[[i]]=='M')/sum(SexT30[[i]]=='V')
i <- i + 1
}
i <- 1
# Lista de 50
while(i < 51)
{
MediaSex50[i] <- sum(SexT50[[i]]=='M')/sum(SexT50[[i]]=='V')
i <- i + 1
}
i <- 1
# Lista de 100
while(i < 101)
{
MediaSex100[i] <- sum(SexT100[[i]]=='M')/sum(SexT100[[i]]=='V')
i <- i + 1
}
# Para las de tamaño 30
hist(MediaSex30, main = "Lista de 30")
boxplot(MediaSex30, main = "Lista de 30")
fitdistr((MediaSex30), c("normal"))
# mean sd
# 0.94638212 0.12578086
# (0.02296434) (0.01623824)
#-------------------------------
# Para las de tamaño 30
hist(MediaSex50, main = "Lista de 50")
boxplot(MediaSex50, main = "Lista de 50")
fitdistr((MediaSex50), c("normal"))
# mean sd
# 0.99957220 0.14240297
# (0.02013882) (0.01424030)
# Para las de tamaño 30
hist(MediaSex100, main = "Lista de 100")
boxplot(MediaSex100, main = "Lista de 100")
fitdistr((MediaSex100), c("normal"))
# mean sd
# 0.994629935 0.136969829
# (0.013696983) (0.009685229)
#---------------------------------------------------------
## PARTE 2: Estimación clásica (puntual, intervalos)
# 2.1 Estimación puntual
# Tenemos que separar los hombres de las mujeres y entonces con eso ya podemos
# calcular los datos de este punto.
# Sacamos los hombres (cantidad)
datos_varones <- datos[datos$Sex == "V",]
# Sacamos las mujeres (cantidad)
datos_mujeres <- datos[datos$Sex == "M",]
# t.test y var.test
# Usamos sleeptime
# Con el T.test vamos a comprobar (H0) la igualdad de medias:
t.test(x = datos_varones$sleeptime, y = datos_mujeres$sleeptime)
# Welch Two Sample t-test
# data: datos_varones$sleeptime and datos_mujeres$sleeptime
# t = -21.817, df = 9996.7, p-value < 2.2e-16
# alternative hypothesis: true difference in means is not equal to 0
# 95 percent confidence interval:
# -2.150314 -1.795765
# sample estimates:
# mean of x mean of y
# 8.120108 10.093147
# CONCLUSIÓN: Descartamos la hipótesis nula (H0) --> p-value < 0.05
var.test(x = datos_varones$sleeptime, y = datos_mujeres$sleeptime)
# F test to compare two variances
# data: datos_varones$sleeptime and datos_mujeres$sleeptime
# F = 1.0014, num df = 5029, denom df = 4969, p-value = 0.9606
# alternative hypothesis: true ratio of variances is not equal to 1
# 95 percent confidence interval:
# 0.9473796 1.0584892
# sample estimates:
# ratio of variances
# 1.001399
# CONCLUSIÓN:
# Ahora lo hacemos con muestras de tamaño 200
datos_varones200 <- datos_varones[sample(1: nrow(datos_varones), 200),]
datos_mujeres200 <- datos_mujeres[sample(1: nrow(datos_mujeres), 200),]
t.test(x = datos_varones200$sleeptime, y = datos_mujeres200$sleeptime)
# Welch Two Sample t-test
# data: datos_varones200$sleeptime and datos_mujeres200$sleeptime
# t = -3.4079, df = 397.84, p-value = 0.0007216
# alternative hypothesis: true difference in means is not equal to 0
# 95 percent confidence interval:
# -2.539209 -0.681325
# sample estimates:
# mean of x mean of y
# 8.381405 9.991672
# CONCLUSIÓN: Vamos a rechazar H0 --> p-value < 0.05
var.test(x = datos_varones200$sleeptime, y = datos_mujeres200$sleeptime)
# F test to compare two variances
# data: datos_varones200$sleeptime and datos_mujeres200$sleeptime
# F = 0.88748, num df = 199, denom df = 199, p-value = 0.4006
# alternative hypothesis: true ratio of variances is not equal to 1
# 95 percent confidence interval:
# 0.6716366 1.1727015
# sample estimates:
# ratio of variances
# 0.8874848
# Usamos steps
t.test(x = datos_varones$steps, y = datos_mujeres$steps)
# Welch Two Sample t-test
# data: datos_varones$steps and datos_mujeres$steps
# t = -104.03, df = 9991.2, p-value < 2.2e-16
# alternative hypothesis: true difference in means is not equal to 0
# 95 percent confidence interval:
# -2003.686 -1929.574
# sample estimates:
# mean of x mean of y
# 10440.60 12407.23
# CONCLUSIÓN: Vamos a rechazar H0 --> p-value < 0.05
var.test(x = datos_varones$steps, y = datos_mujeres$steps)
# F test to compare two variances
# data: datos_varones$steps and datos_mujeres$steps
# F = 0.97234, num df = 5029, denom df = 4969, p-value = 0.3215
# alternative hypothesis: true ratio of variances is not equal to 1
# 95 percent confidence interval:
# 0.9198916 1.0277774
# sample estimates:
# ratio of variances
# 0.9723439
t.test(x = datos_varones200$steps, y = datos_mujeres200$steps)
# Welch Two Sample t-test
# data: datos_varones200$steps and datos_mujeres200$steps
# t = -21.394, df = 397.99, p-value < 2.2e-16
# alternative hypothesis: true difference in means is not equal to 0
# 95 percent confidence interval:
# -2138.265 -1778.359
# sample estimates:
# mean of x mean of y
# 10490.81 12449.12
# CONCLUSIÓN: Vamos a rechazas H0 --> p-value < 0.05
var.test(x = datos_varones200$steps, y = datos_mujeres200$steps)
# F test to compare two variances
# data: datos_varones200$steps and datos_mujeres200$steps
# F = 0.98883, num df = 199, denom df = 199, p-value = 0.9369
# alternative hypothesis: true ratio of variances is not equal to 1
# 95 percent confidence interval:
# 0.7483301 1.3066111
# sample estimates:
# ratio of variances
# 0.9888258
# Ahora vamos a hacer uso de las funciones Tools y maxLog
# Función de máxima verosimilitud --> maxLogl()
# Lo primero es instalar los paquetes necessarios
install.packages("EstimationTools")
# Hacer uso del paquete
library(EstimationTools)
# Primero lo vamos a hacer para la variable SLEEPTIME
mujeres_MV_Sleep <- maxlogL(x = datos_mujeres$sleeptime, dist = "dnorm", link = list(over = "sd", fun = "log_link"))
# Mostrar resultados
summary(mujeres_MV_Sleep)
# _______________________________________________________________
# Optimization routine: nlminb
# Standard Error calculation: Hessian from optim
# _______________________________________________________________
# AIC BIC
# 29102.28 29115.3
# _______________________________________________________________
# Estimate Std. Error Z value Pr(>|z|)
# mean 10.09315 0.06411 157.4 <2e-16 ***
# sd 4.51973 0.04533 99.7 <2e-16 ***
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
# _______________________________________________________________
# Note: p-values valid under asymptotic normality of estimators
# ---
varones_MV_Sleep <- maxlogL(x = datos_varones$sleeptime, dist = "dnorm", link = list(over = "sd", fun = "log_link"))
# Mostrar resultados
summary(varones_MV_Sleep)
# _______________________________________________________________
# Optimization routine: nlminb
# Standard Error calculation: Hessian from optim
# _______________________________________________________________
# AIC BIC
# 29460.61 29473.65
# _______________________________________________________________
# Estimate Std. Error Z value Pr(>|z|)
# mean 8.12011 0.06377 127.3 <2e-16 ***
# sd 4.52290 0.04509 100.3 <2e-16 ***
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
# _______________________________________________________________
# Note: p-values valid under asymptotic normality of estimators
# ---
# Ahora lo vamos a hacer para la variable STEPS
mujeres_MV_Steps <- maxlogL(x = datos_mujeres$steps, dist = "dnorm", link = list(over = "sd", fun = "log_link"))
summary(mujeres_MV_Steps)
# _______________________________________________________________
# Optimization routine: nlminb
# Standard Error calculation: Hessian from optim
# _______________________________________________________________
# AIC BIC
# 82278.88 82291.9
# _______________________________________________________________
# Estimate Std. Error Z value Pr(>|z|)
# mean 12407.235 13.499 919.13 <2e-16 ***
# sd 951.664 9.546 99.69 <2e-16 ***
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
# _______________________________________________________________
# Note: p-values valid under asymptotic normality of estimators
# ---
varones_MV_Steps <- maxlogL(x = datos_varones$steps, dist = "dnorm", link = list(over = "sd", fun = "log_link"))
summary(varones_MV_Steps)
# _______________________________________________________________
# Optimization routine: nlminb
# Standard Error calculation: Hessian from optim
# _______________________________________________________________
# AIC BIC
# 83131.08 83144.13
# _______________________________________________________________
# Estimate Std. Error Z value Pr(>|z|)
# mean 10440.604 13.232 789.1 <2e-16 ***
# sd 938.414 9.356 100.3 <2e-16 ***
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
# _______________________________________________________________
# Note: p-values valid under asymptotic normality of estimators
# ---
# Parte 2.2 Estimación por Intervalos, una población
# Tenemos que instalar los paquetes necesarios
install.packages("rcompanion")
# Hacemos uso del paquete
library(rcompanion)
# En primer lugar lo vamos a hacer para la proporción de mujeres y hombres en general
# VARONES
# Para SLEEPTIME
# Para una confianza del 90%
groupwiseMean(sleeptime ~ Sex, data = datos_varones, conf = 0.90, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 V 5030 8.12 0.9 8.02 8.23
# Para una confianza del 95%
groupwiseMean(sleeptime ~ Sex, data = datos_varones, conf = 0.95, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 V 5030 8.12 0.95 8 8.25
# Para una confianza del 99%
groupwiseMean(sleeptime ~ Sex, data = datos_varones, conf = 0.99, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 V 5030 8.12 0.99 7.96 8.28
# MUJERES
# Para SLEEPTIME
# Para una confianza del 90%
groupwiseMean(sleeptime ~ Sex, data = datos_mujeres, conf = 0.90, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 M 4970 10.1 0.9 9.99 10.2
# Para una confianza del 95%
groupwiseMean(sleeptime ~ Sex, data = datos_mujeres, conf = 0.95, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 M 4970 10.1 0.95 9.97 10.2
# Para una confianza del 99%
groupwiseMean(sleeptime ~ Sex, data = datos_mujeres, conf = 0.99, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 M 4970 10.1 0.99 9.93 10.3
# Ahora lo vamos a hacer para las muestras de tamaño 200
# HOMBRES
# Para SLEEPTIME
# Para una confianza del 90%
groupwiseMean(sleeptime ~ Sex, data = datos_varones200, conf = 0.90, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 V 200 8.08 0.9 7.56 8.6
# Para una confianza del 95%
groupwiseMean(sleeptime ~ Sex, data = datos_varones200, conf = 0.95, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 V 200 8.08 0.95 7.46 8.7
# Para una confianza del 99%
groupwiseMean(sleeptime ~ Sex, data = datos_varones200, conf = 0.99, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 V 200 8.08 0.99 7.26 8.9
# MUJERES
# Para SLEEPTIME
# Para una confianza del 90%
groupwiseMean(sleeptime ~ Sex, data = datos_mujeres200, conf = 0.90, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 M 200 10.4 0.9 9.88 11
# Para una confianza del 95%
groupwiseMean(sleeptime ~ Sex, data = datos_mujeres200, conf = 0.95, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 M 200 10.4 0.95 9.77 11.1
# Para una confianza del 99%
groupwiseMean(sleeptime ~ Sex, data = datos_mujeres200, conf = 0.99, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 M 200 10.4 0.99 9.56 11.3
# Ahora lo vamos a hacer para la variable STEPS
# HOMBRES
# Para una confianza del 90%
groupwiseMean(steps ~ Sex, data = datos_varones, conf = 0.90, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 V 5030 10400 0.9 10400 10500
# Para una confianza del 95%
groupwiseMean(steps ~ Sex, data = datos_varones, conf = 0.95, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 V 5030 10400 0.95 10400 10500
# Para una confianza del 99%
groupwiseMean(steps ~ Sex, data = datos_varones, conf = 0.99, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 V 5030 10400 0.99 10400 10500
# Para MUJERES
# Para una confianza del 90%
groupwiseMean(steps ~ Sex, data = datos_mujeres, conf = 0.90, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 M 4970 12400 0.9 12400 12400
# Para una confianza del 95%
groupwiseMean(steps ~ Sex, data = datos_mujeres, conf = 0.95, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 M 4970 12400 0.95 12400 12400
# Para una confianza del 99%
groupwiseMean(steps ~ Sex, data = datos_mujeres, conf = 0.99, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 M 4970 12400 0.99 12400 12400
# Ahora vamos a usar la función bootstrap
# Cargamos el paquete
library(boot)
# Intervalos de confianza para los VARONES
VaronesBoot = boot(datos_varones$sleeptime, function(x, i) mean(x[i]), R = 100)
VaronesBootS = boot(datos_varones$steps, function(x, i) mean(x[i]), R = 100)
# Para la variable SLEEPTIME
mean(VaronesBoot$t[,1])
# [1] 8.122187
# IC --> 90%
boot.ci(VaronesBoot, conf = 0.90, type = c("norm"))
# RESULTADOS:
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = VaronesBoot, conf = 0.9, type = c("norm"))
# Intervals :
# Level Normal
# 90% ( 8.025, 8.211 )
# Calculations and Intervals on Original Scale
# IC --> 95%
boot.ci(VaronesBoot, conf = 0.95, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = VaronesBoot, conf = 0.95, type = c("norm"))
# Intervals :
# Level Normal
# 95% ( 8.007, 8.229 )
# Calculations and Intervals on Original Scale
# IC --> 99%
boot.ci(VaronesBoot, conf = 0.99, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = VaronesBoot, conf = 0.99, type = c("norm"))
# Intervals :
# Level Normal
# 99% ( 7.972, 8.264 )
# Calculations and Intervals on Original Scale
# Para la variable STEPS
mean(VaronesBootS$t[,1])
# [1] 10439.89
# IC --> 90%
boot.ci(VaronesBootS, conf = 0.90, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = VaronesBootS, conf = 0.9, type = c("norm"))
# Intervals :
# Level Normal
# 90% (10420, 10463 )
# Calculations and Intervals on Original Scale
# IC --> 95%
boot.ci(VaronesBootS, conf = 0.95, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = VaronesBootS, conf = 0.95, type = c("norm"))
# Intervals :
# Level Normal
# 95% (10416, 10467 )
# Calculations and Intervals on Original Scale
# IC --> 99%
boot.ci(VaronesBootS, conf = 0.99, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = VaronesBootS, conf = 0.99, type = c("norm"))
#Intervals :
# Level Normal
# 99% (10408, 10475 )
# Calculations and Intervals on Original Scale
# Intervalos de confianza para las MUJERES
MujeresBoot = boot(datos_mujeres$sleeptime, function(x, i) mean(x[i]), R = 100)
MujeresBootS = boot(datos_mujeres$steps, function(x, i) mean(x[i]), R = 100)
mean(MujeresBoot$t[,1])
# [1] 10.10151
# IC --> 90%
boot.ci(MujeresBoot, conf = 0.90, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = MujeresBoot, conf = 0.9, type = c("norm"))
# Intervals :
# Level Normal
# 90% ( 9.98, 10.19 )
# Calculations and Intervals on Original Scale
# IC --> 95%
boot.ci(MujeresBoot, conf = 0.95, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = MujeresBoot, conf = 0.95, type = c("norm"))
# Intervals :
# Level Normal
# 95% ( 9.96, 10.21 )
# Calculations and Intervals on Original Scale
# IC --> 99%
boot.ci(MujeresBoot, conf = 0.99, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = MujeresBoot, conf = 0.99, type = c("norm"))
# Intervals :
# Level Normal
# 99% ( 9.92, 10.25 )
# Calculations and Intervals on Original Scale
# Para la variable STEPS
mean(MujeresBootS$t[,1])
# [1] 12408.29
# IC --> 90%
boot.ci(MujeresBootS, conf = 0.90, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = MujeresBootS, conf = 0.9, type = c("norm"))
#Intervals :
# Level Normal
# 90% (12384, 12428 )
# Calculations and Intervals on Original Scale
# IC --> 95%
boot.ci(MujeresBootS, conf = 0.95, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = MujeresBootS, conf = 0.95, type = c("norm"))
# Intervals :
# Level Normal
# 95% (12380, 12433 )
# Calculations and Intervals on Original Scale
# IC --> 99%
boot.ci(MujeresBootS, conf = 0.99, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = MujeresBootS, conf = 0.99, type = c("norm"))
# Intervals :
# Level Normal
# 99% (12371, 12441 )
# Calculations and Intervals on Original Scale
# IC para las VARIANZAS
# Para SLEEPTIME IC DEL 90%
longitud <- length(datos$sleeptime) - 1
varSleeptime <- var(datos$sleeptime)
lower90 = varSleeptime * longitud / qchisq(0.1/2, longitud, lower.tail = FALSE)
upper90 = varSleeptime * longitud / qchisq(1 - 0.1/2, longitud, lower.tail = FALSE)
c(lower90 = lower90, variance = varSleeptime, upper90 = upper90)
# lower90 variance upper90
# 20.92851 21.41773 21.92527
# Para SLEEPTIME IC del 95%
longitud <- length(datos$sleeptime) - 1
varSleeptime <- var(datos$sleeptime)
lower95 = varSleeptime * longitud / qchisq(0.05/2, longitud, lower.tail = FALSE)
upper95 = varSleeptime * longitud / qchisq(1 - 0.05/2, longitud, lower.tail = FALSE)
c(lower95 = lower95, variance = varSleeptime, upper95 = upper95)
# lower variance upper95
# 20.83623 21.41773 22.02405
# Para SLEEPTIME IC del 99%
longitud <- length(datos$sleeptime) - 1
varSleeptime <- var(datos$sleeptime)
lower99 = varSleeptime * longitud / qchisq(0.01/2, longitud, lower.tail = FALSE)
upper99 = varSleeptime * longitud / qchisq(1 - 0.01/2, longitud, lower.tail = FALSE)
c(lower99 = lower99, variance = varSleeptime, upper99 = upper99)
# lower99 variance upper99
# 20.65743 21.41773 22.21881
# Para STEPS IC DEL 90%
longitudSt <- length(datos$steps) - 1
varSteps <- var(datos$steps)
lower90St = varSteps * longitudSt / qchisq(0.1/2, longitudSt, lower.tail = FALSE)
upper90St = varSteps * longitudSt / qchisq(1 - 0.1/2, longitudSt, lower.tail = FALSE)
c(lower90St = lower90St, variance = varSteps, upper90St = upper90St)
# lower90St variance upper90St
# 1817638 1860127 1904207
# Para STEPS IC DEL 95%
longitudSt <- length(datos$steps) - 1
varSteps <- var(datos$steps)
lower95St = varSteps * longitudSt / qchisq(0.05/2, longitudSt, lower.tail = FALSE)
upper95St = varSteps * longitudSt / qchisq(1 - 0.05/2, longitudSt, lower.tail = FALSE)
c(lower95St = lower95St, variance = varSteps, upper95St = upper95St)
# lower95St variance upper95St
# 1809623 1860127 1912785
# Para STEPS IC DEL 99%
longitudSt <- length(datos$steps) - 1
varSteps <- var(datos$steps)
lower99St = varSteps * longitudSt / qchisq(0.01/2, longitudSt, lower.tail = FALSE)
upper99St = varSteps * longitudSt / qchisq(1 - 0.01/2, longitudSt, lower.tail = FALSE)
c(lower99St = lower99St, variance = varSteps, upper99St = upper99St)
# lower99St variance upper99St
# 1794095 1860127 1929700
# Estimación de intervalos, dos poblaciones
# Lo que tenemos que hacer es crear intervalos de confianza para la diferencia de
# medias y también para la razón de varianzas, ya que pertenecen a distintas poblaciones
library("rcompanion")
# Para VARONES
# Para SLEEPTIME
# 90% confianza
groupwiseMean(sleeptime ~ 1, data = datos_varones, conf = 0.90, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
#1 <NA> 5030 8.12 0.9 8.02 8.23
# 95% confianza
groupwiseMean(sleeptime ~ 1, data = datos_varones, conf = 0.95, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 5030 8.12 0.95 8 8.25
# 99% confianza
groupwiseMean(sleeptime ~ 1, data = datos_varones, conf = 0.99, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 5030 8.12 0.99 7.96 8.28
# Para STEPS
# 90% confianza
groupwiseMean(steps ~ 1, data = datos_varones, conf = 0.90, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 5030 10400 0.9 10400 10500
# 95% confianza
groupwiseMean(steps ~ 1, data = datos_varones, conf = 0.95, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 5030 10400 0.95 10400 10500
# 99% confianza
groupwiseMean(steps ~ 1, data = datos_varones, conf = 0.99, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 5030 10400 0.99 10400 10500
# Para MUJERES
# Para SLEEPTIME
# 90% confianza
groupwiseMean(sleeptime ~ 1, data = datos_mujeres, conf = 0.90, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 4970 10.1 0.9 9.99 10.2
# 95% confianza
groupwiseMean(sleeptime ~ 1, data = datos_mujeres, conf = 0.95, digits = 3)
#.id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 4970 10.1 0.95 9.97 10.2
# 99% confianza
groupwiseMean(sleeptime ~ 1, data = datos_mujeres, conf = 0.99, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 4970 10.1 0.99 9.93 10.3
# Para STEPS
# 90% confianza
groupwiseMean(steps ~ 1, data = datos_mujeres, conf = 0.90, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 4970 12400 0.9 12400 12400
# 95% confianza
groupwiseMean(steps ~ 1, data = datos_mujeres, conf = 0.95, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 4970 12400 0.95 12400 12400
# 99% confianza
groupwiseMean(steps ~ 1, data = datos_mujeres, conf = 0.99, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 4970 12400 0.99 12400 12400
|
/Practica Buena/Practica.R
|
no_license
|
Marchaca/Prob_yEst
|
R
| false
| false
| 35,709
|
r
|
# Práctica Probabilidad y Estadística II
# Autores:
# - Marcos Chamorro Casillas
# - Arturo Peñas Mohedano
# - Álvaro Adanez Huecas
# - Ionel Constantin Trifan
# Cargamos la seed que necesitamos para la práctica
set.seed(2021)
# Cargamos la tabla con los datos
datos <- read.csv(file = "PYE2DataSet47.csv")
# Mostramos algunos valores de ella
head(datos)
## Parte 1.1: Identificación de Modelo y Muestreo
sleeptime <- sample(datos$sleeptime,200) # Muestras de tamaño 200
steps <- sample(datos$steps, 200) # Muestras de tamaño 200
# Antes que todo instalamos el paquete necesario para algunas representaciones
install.packages("e1071")
# Activar el paquete
library(e1071)
# Debemos hacer con las variables: sleeptime y steps
# Vamos a empezar con la variable SLEEPTIME
# -------------------------------------------------
# Calculamos las medias de sleeptime
summary(sleeptime)
# Histograma con los datos de sleeptime
hist(sleeptime, main = "sleeptime")
# Calculamos el skewness
skewness(sleeptime) # Del resultado podemos concluir que tiene un podo de desviación hacia la derecha
# Calculamos la kurtosis
kurtosis(sleeptime) # La kurtosis difiere bastante de 0 por lo cual la distribución de sleeptime no se parece a una normal
# Calculamos el boxplot de sleeptime
boxplot(sleeptime, main = "sleeptime")
# Para calcular la distribución fit debemos instalar el paquete MASS
install.packages("MASS")
# Lo activamos
library(MASS)
# Lo debemos usar para parecerse a las distribuciones: Normal, Exponencial y Gamma
# Normal:
fitdistr(sleeptime, c("normal"))
# La media y la desviación típica tendrían los valores:
# Media: 9.3860940
# Desv.típica: 4.8795351
# CONCLUSIÓN: Valores lejanos de 0 --> No se parece a una normal
# Gamma:
fitdistr(sleeptime, c("gamma"))
# shape rate
# 3.48100367 0.37086832
# (0.33276383) (0.03813724)
# Exponencial:
fitdistr(sleeptime, c("exponential"))
# El valor de lambda es: 0.106540591
# Calculamos la densidad de sleeptime
density(sleeptime)
# RESULTADOS:
# x y
# Min. :-3.552 Min. :3.442e-05
# 1st Qu.: 3.725 1st Qu.:6.696e-03
# Median :11.001 Median :2.651e-02
# Mean :11.001 Mean :3.432e-02
# 3rd Qu.:18.278 3rd Qu.:5.960e-02
# Max. :25.554 Max. :8.849e-02
# Hacemos el test de Kolmogorov
ks.test(sleeptime, pnorm, mean(sleeptime), sd(sleeptime))
# Lo que nos dice
# es que si p-value es menor que 0.05 va a representar la hipótesis de que los valores
# vienen de una distribución normal
# RESULTADOS:
# data: sleeptime
# D = 0.10616, p-value = 0.02204
# alternative hypothesis: two-sided
# ----------------------------------------
# Vamos a hacer lo mismo con la variable STEPS
#-------------------------------------------------
# Calculamos las medias de sleeptime
summary(steps)
# Histograma con los datos de sleeptime
hist(steps, main = "steps")
# Calculamos el skewness
skewness(steps) # Del resultado podemos concluir que tiene un podo de desviación hacia la izquierda
# Calculamos la kurtosis
kurtosis(steps) # La kurtosis difiere bastante de 0 por lo cual la distribución de steps no se parece a una normal
# Calculamos el boxplot de steps
boxplot(steps, main = "steps")
# Usamos la distribución fit para ver si se parece a las distribuciones: Normal, Exponencial y Gamma
# Normal:
fitdistr(steps, c("normal"))
# La media y la desviación típica tendrían los valores:
# Media: 11403.70607
# Desv.típica: 1381.44905
# CONCLUSIÓN: Valores lejanos de 0 --> No se parece a una normal
# Gamma:
fitdistr(steps, c("gamma"))
# Shape: 6.780212e+01
# Rate: 5.945865e-03
# Exponencial:
fitdistr(steps, c("exponential"))
# El valor de lambda es: 8.769079e-05
# Calculamos la densidad de steps
density(steps)
# RESULTADOS:
# x y
# Min. : 7334 Min. :1.007e-07
# 1st Qu.: 9255 1st Qu.:3.485e-05
# Median :11177 Median :1.028e-04
# Mean :11177 Mean :1.300e-04
# 3rd Qu.:13098 3rd Qu.:2.054e-04
# Max. :15019 Max. :4.097e-04
# Hacemos el test de Kolmogorov
ks.test(steps, pnorm, mean(steps), sd(steps))
# Lo que nos dice
# es que si p-value es menor que 0.05 va a representar la hipótesis de que los valores
# vienen de una distribución normal
# RESULTADOS:
# data: steps
# D = 0.14685, p-value = 0.0003587
# alternative hypothesis: two-sided
# ----------------------------------------
# ---------------------------------------
## Parte 1.2: Identificación de Modelo y Muestreo
# Para esto debemos crear lista de vectores de tamaños 30, 50 y 100 para guardar los valores del atributo Age
# de la tabra datos
EdadT30 <- vector(mode = "list", length = 30) # Para las 30 muestras
EdadT50 <- vector(mode = "list", length = 50) # Para las 50 muestras
EdadT100 <- vector(mode = "list", length = 100) # Para las 100 muestras
i <- 1 # Variable para recorrer los bucles while
# Ahora debemos escribir las listas de vectores con los valores de la tabla datos para Age
# Tamaño 30
while(i < 31)
{
EdadT30[[i]] <- sample(datos$Age, size = 200)
i <- i + 1
}
i <- 1
# Tamaño 50
while(i < 51)
{
EdadT50[[i]] <- sample(datos$Age, size = 200)
i <- i + 1
}
i <- 1
# Tamaño 100
while(i < 101)
{
EdadT100[[i]] <- sample(datos$Age, size = 200)
i <- i + 1
}
# Ya hemos hecho el muestreo de las edades con los tamaños de las listas
# Debemos calcular las medias muestrales de las 3 listas de vectores de Age
MediaAge30 <- vector(length = 30) # Vector de tamaño 30
MediaAge50 <- vector(length = 50) # Vector de tamaño 50
MediaAge100 <- vector(length = 100) # Vector de tamaño 100
i <- 1 # Para recorrer las listas y hacer las medias muestrales
# Lista de 30
while(i < 31)
{
MediaAge30[i] <- mean(EdadT30[[i]])
i <- i + 1
}
i <- 1
# Lista de 50
while(i < 51)
{
MediaAge50[i] <- mean(EdadT50[[i]])
i <- i + 1
}
i <- 1
# Lista de 100
while(i < 101)
{
MediaAge100[i] <- mean(EdadT100[[i]])
i <- i + 1
}
# Ya tenemos las medias muestrales de las distintas listas: 30, 50 y 100
# Para las de tamaño 30
hist(MediaAge30, main = "Lista de 30")
boxplot(MediaAge30, main = "Lista de 30")
fitdistr((MediaAge30), c("normal"))
# mean sd
# 29.07437323 0.22682088
# ( 0.04141164) ( 0.02928245)
#-------------------------------
# Para las de tamaño 50
hist(MediaAge50, main = "Lista de 50")
boxplot(MediaAge50, main = "Lista de 50")
fitdistr((MediaAge50), c("normal"))
# mean sd
# 29.04243771 0.24861621
# ( 0.03515964) ( 0.02486162)
# ------------------------------
# Para las de tamaño 100
hist(MediaAge100, main = "Lista de 100")
boxplot(MediaAge100, main = "Lista de 100")
fitdistr((MediaAge100), c("normal"))
# mean sd
# 29.01645978 0.20293871
# ( 0.02029387) ( 0.01434993)
# Ahora tenemos que hacer lo mismo pero para las varianza muestral
# Nos hacemos 3 listar para poder almacenar las varianzas
VarAge30 <- vector(length = 30) # Vector de tamaño 30
VarAge50 <- vector(length = 50) # Vector de tamaño 50
VarAge100 <- vector(length = 100) # Vector de tamaño 100
i <- 1 # Para recorrer las listas y hacer las varianzas muestrales
# Lista de 30
while(i < 31)
{
VarAge30[i] <- var(EdadT30[[i]])
i <- i + 1
}
i <- 1
# Lista de 50
while(i < 51)
{
VarAge50[i] <- var(EdadT50[[i]])
i <- i + 1
}
i <- 1
# Lista de 100
while(i < 101)
{
VarAge100[i] <- var(EdadT100[[i]])
i <- i + 1
}
# Ya tenemos las distintas varianzas muestrales para las listas de 30, 50 y 100
# Lista de 30 varianzas muestrales
hist(VarAge30, main = "Lista de 30")
boxplot(VarAge30, main = "Lista de 30")
fitdistr((VarAge30), c("normal"))
# mean sd
# 10.7316176 0.9690967
# ( 0.1769320) ( 0.1251098)
# Listas de 50 varianzas muestrales
hist(VarAge50, main = "Lista de 50")
boxplot(VarAge50, main = "Lista de 50")
fitdistr((VarAge50), c("normal"))
# mean sd
# 10.63896821 0.87183533
# ( 0.12329614) ( 0.08718353)
# Listas de 100 varianzas muestrales
hist(VarAge100, main = "Lista de 100")
boxplot(VarAge100, main = "Lista de 100")
fitdistr((VarAge100), c("normal"))
# mean sd
# 10.63259757 0.95567296
# ( 0.09556730) ( 0.06757628)
# A continuación vamos a hacer lo mismo pero para la proporción de Varones y
# Mujeres que tenemos en datos
# Para esto debemos crear lista de vectores de tamaños 30, 50 y 100 para guardar los valores del atributo Sex
# de la tabra datos
SexT30 <- vector(mode = "list", length = 30) # Para las 30 muestras
SexT50 <- vector(mode = "list", length = 50) # Para las 50 muestras
SexT100 <- vector(mode = "list", length = 100) # Para las 100 muestras
i <- 1 # Variable para recorrer los bucles while
# Ahora debemos escribir las listas de vectores con los valores de la tabla datos para Age
# Tamaño 30
while(i < 31)
{
SexT30[[i]] <- c(sample(datos$Sex, size = 200))
i <- i + 1
}
i <- 1
# Tamaño 50
while(i < 51)
{
SexT50[[i]] <- c(sample(datos$Sex, size = 200))
i <- i + 1
}
i <- 1
# Tamaño 100
while(i < 101)
{
SexT100[[i]] <- c(sample(datos$Sex, size = 200))
i <- i + 1
}
# Es la proporción de Mujeres/Hombres esto hay que tenerlo en cuenta!!!
MediaSex30 <- vector(length = 30) # Vector de tamaño 30
MediaSex50 <- vector(length = 50) # Vector de tamaño 50
MediaSex100 <- vector(length = 100) # Vector de tamaño 100
i <- 1 # Para recorrer las listas y hacer las varianzas muestrales
# Lista de 30
while(i < 31)
{
MediaSex30[i] <- sum(SexT30[[i]]=='M')/sum(SexT30[[i]]=='V')
i <- i + 1
}
i <- 1
# Lista de 50
while(i < 51)
{
MediaSex50[i] <- sum(SexT50[[i]]=='M')/sum(SexT50[[i]]=='V')
i <- i + 1
}
i <- 1
# Lista de 100
while(i < 101)
{
MediaSex100[i] <- sum(SexT100[[i]]=='M')/sum(SexT100[[i]]=='V')
i <- i + 1
}
# Para las de tamaño 30
hist(MediaSex30, main = "Lista de 30")
boxplot(MediaSex30, main = "Lista de 30")
fitdistr((MediaSex30), c("normal"))
# mean sd
# 0.94638212 0.12578086
# (0.02296434) (0.01623824)
#-------------------------------
# Para las de tamaño 30
hist(MediaSex50, main = "Lista de 50")
boxplot(MediaSex50, main = "Lista de 50")
fitdistr((MediaSex50), c("normal"))
# mean sd
# 0.99957220 0.14240297
# (0.02013882) (0.01424030)
# Para las de tamaño 30
hist(MediaSex100, main = "Lista de 100")
boxplot(MediaSex100, main = "Lista de 100")
fitdistr((MediaSex100), c("normal"))
# mean sd
# 0.994629935 0.136969829
# (0.013696983) (0.009685229)
#---------------------------------------------------------
## PARTE 2: Estimación clásica (puntual, intervalos)
# 2.1 Estimación puntual
# Tenemos que separar los hombres de las mujeres y entonces con eso ya podemos
# calcular los datos de este punto.
# Sacamos los hombres (cantidad)
datos_varones <- datos[datos$Sex == "V",]
# Sacamos las mujeres (cantidad)
datos_mujeres <- datos[datos$Sex == "M",]
# t.test y var.test
# Usamos sleeptime
# Con el T.test vamos a comprobar (H0) la igualdad de medias:
t.test(x = datos_varones$sleeptime, y = datos_mujeres$sleeptime)
# Welch Two Sample t-test
# data: datos_varones$sleeptime and datos_mujeres$sleeptime
# t = -21.817, df = 9996.7, p-value < 2.2e-16
# alternative hypothesis: true difference in means is not equal to 0
# 95 percent confidence interval:
# -2.150314 -1.795765
# sample estimates:
# mean of x mean of y
# 8.120108 10.093147
# CONCLUSIÓN: Descartamos la hipótesis nula (H0) --> p-value < 0.05
var.test(x = datos_varones$sleeptime, y = datos_mujeres$sleeptime)
# F test to compare two variances
# data: datos_varones$sleeptime and datos_mujeres$sleeptime
# F = 1.0014, num df = 5029, denom df = 4969, p-value = 0.9606
# alternative hypothesis: true ratio of variances is not equal to 1
# 95 percent confidence interval:
# 0.9473796 1.0584892
# sample estimates:
# ratio of variances
# 1.001399
# CONCLUSIÓN:
# Ahora lo hacemos con muestras de tamaño 200
datos_varones200 <- datos_varones[sample(1: nrow(datos_varones), 200),]
datos_mujeres200 <- datos_mujeres[sample(1: nrow(datos_mujeres), 200),]
t.test(x = datos_varones200$sleeptime, y = datos_mujeres200$sleeptime)
# Welch Two Sample t-test
# data: datos_varones200$sleeptime and datos_mujeres200$sleeptime
# t = -3.4079, df = 397.84, p-value = 0.0007216
# alternative hypothesis: true difference in means is not equal to 0
# 95 percent confidence interval:
# -2.539209 -0.681325
# sample estimates:
# mean of x mean of y
# 8.381405 9.991672
# CONCLUSIÓN: Vamos a rechazar H0 --> p-value < 0.05
var.test(x = datos_varones200$sleeptime, y = datos_mujeres200$sleeptime)
# F test to compare two variances
# data: datos_varones200$sleeptime and datos_mujeres200$sleeptime
# F = 0.88748, num df = 199, denom df = 199, p-value = 0.4006
# alternative hypothesis: true ratio of variances is not equal to 1
# 95 percent confidence interval:
# 0.6716366 1.1727015
# sample estimates:
# ratio of variances
# 0.8874848
# Usamos steps
t.test(x = datos_varones$steps, y = datos_mujeres$steps)
# Welch Two Sample t-test
# data: datos_varones$steps and datos_mujeres$steps
# t = -104.03, df = 9991.2, p-value < 2.2e-16
# alternative hypothesis: true difference in means is not equal to 0
# 95 percent confidence interval:
# -2003.686 -1929.574
# sample estimates:
# mean of x mean of y
# 10440.60 12407.23
# CONCLUSIÓN: Vamos a rechazar H0 --> p-value < 0.05
var.test(x = datos_varones$steps, y = datos_mujeres$steps)
# F test to compare two variances
# data: datos_varones$steps and datos_mujeres$steps
# F = 0.97234, num df = 5029, denom df = 4969, p-value = 0.3215
# alternative hypothesis: true ratio of variances is not equal to 1
# 95 percent confidence interval:
# 0.9198916 1.0277774
# sample estimates:
# ratio of variances
# 0.9723439
t.test(x = datos_varones200$steps, y = datos_mujeres200$steps)
# Welch Two Sample t-test
# data: datos_varones200$steps and datos_mujeres200$steps
# t = -21.394, df = 397.99, p-value < 2.2e-16
# alternative hypothesis: true difference in means is not equal to 0
# 95 percent confidence interval:
# -2138.265 -1778.359
# sample estimates:
# mean of x mean of y
# 10490.81 12449.12
# CONCLUSIÓN: Vamos a rechazas H0 --> p-value < 0.05
var.test(x = datos_varones200$steps, y = datos_mujeres200$steps)
# F test to compare two variances
# data: datos_varones200$steps and datos_mujeres200$steps
# F = 0.98883, num df = 199, denom df = 199, p-value = 0.9369
# alternative hypothesis: true ratio of variances is not equal to 1
# 95 percent confidence interval:
# 0.7483301 1.3066111
# sample estimates:
# ratio of variances
# 0.9888258
# Ahora vamos a hacer uso de las funciones Tools y maxLog
# Función de máxima verosimilitud --> maxLogl()
# Lo primero es instalar los paquetes necessarios
install.packages("EstimationTools")
# Hacer uso del paquete
library(EstimationTools)
# Primero lo vamos a hacer para la variable SLEEPTIME
mujeres_MV_Sleep <- maxlogL(x = datos_mujeres$sleeptime, dist = "dnorm", link = list(over = "sd", fun = "log_link"))
# Mostrar resultados
summary(mujeres_MV_Sleep)
# _______________________________________________________________
# Optimization routine: nlminb
# Standard Error calculation: Hessian from optim
# _______________________________________________________________
# AIC BIC
# 29102.28 29115.3
# _______________________________________________________________
# Estimate Std. Error Z value Pr(>|z|)
# mean 10.09315 0.06411 157.4 <2e-16 ***
# sd 4.51973 0.04533 99.7 <2e-16 ***
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
# _______________________________________________________________
# Note: p-values valid under asymptotic normality of estimators
# ---
varones_MV_Sleep <- maxlogL(x = datos_varones$sleeptime, dist = "dnorm", link = list(over = "sd", fun = "log_link"))
# Mostrar resultados
summary(varones_MV_Sleep)
# _______________________________________________________________
# Optimization routine: nlminb
# Standard Error calculation: Hessian from optim
# _______________________________________________________________
# AIC BIC
# 29460.61 29473.65
# _______________________________________________________________
# Estimate Std. Error Z value Pr(>|z|)
# mean 8.12011 0.06377 127.3 <2e-16 ***
# sd 4.52290 0.04509 100.3 <2e-16 ***
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
# _______________________________________________________________
# Note: p-values valid under asymptotic normality of estimators
# ---
# Ahora lo vamos a hacer para la variable STEPS
mujeres_MV_Steps <- maxlogL(x = datos_mujeres$steps, dist = "dnorm", link = list(over = "sd", fun = "log_link"))
summary(mujeres_MV_Steps)
# _______________________________________________________________
# Optimization routine: nlminb
# Standard Error calculation: Hessian from optim
# _______________________________________________________________
# AIC BIC
# 82278.88 82291.9
# _______________________________________________________________
# Estimate Std. Error Z value Pr(>|z|)
# mean 12407.235 13.499 919.13 <2e-16 ***
# sd 951.664 9.546 99.69 <2e-16 ***
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
# _______________________________________________________________
# Note: p-values valid under asymptotic normality of estimators
# ---
varones_MV_Steps <- maxlogL(x = datos_varones$steps, dist = "dnorm", link = list(over = "sd", fun = "log_link"))
summary(varones_MV_Steps)
# _______________________________________________________________
# Optimization routine: nlminb
# Standard Error calculation: Hessian from optim
# _______________________________________________________________
# AIC BIC
# 83131.08 83144.13
# _______________________________________________________________
# Estimate Std. Error Z value Pr(>|z|)
# mean 10440.604 13.232 789.1 <2e-16 ***
# sd 938.414 9.356 100.3 <2e-16 ***
# ---
# Signif. codes: 0 ‘***’ 0.001 ‘**’ 0.01 ‘*’ 0.05 ‘.’ 0.1 ‘ ’ 1
# _______________________________________________________________
# Note: p-values valid under asymptotic normality of estimators
# ---
# Parte 2.2 Estimación por Intervalos, una población
# Tenemos que instalar los paquetes necesarios
install.packages("rcompanion")
# Hacemos uso del paquete
library(rcompanion)
# En primer lugar lo vamos a hacer para la proporción de mujeres y hombres en general
# VARONES
# Para SLEEPTIME
# Para una confianza del 90%
groupwiseMean(sleeptime ~ Sex, data = datos_varones, conf = 0.90, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 V 5030 8.12 0.9 8.02 8.23
# Para una confianza del 95%
groupwiseMean(sleeptime ~ Sex, data = datos_varones, conf = 0.95, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 V 5030 8.12 0.95 8 8.25
# Para una confianza del 99%
groupwiseMean(sleeptime ~ Sex, data = datos_varones, conf = 0.99, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 V 5030 8.12 0.99 7.96 8.28
# MUJERES
# Para SLEEPTIME
# Para una confianza del 90%
groupwiseMean(sleeptime ~ Sex, data = datos_mujeres, conf = 0.90, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 M 4970 10.1 0.9 9.99 10.2
# Para una confianza del 95%
groupwiseMean(sleeptime ~ Sex, data = datos_mujeres, conf = 0.95, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 M 4970 10.1 0.95 9.97 10.2
# Para una confianza del 99%
groupwiseMean(sleeptime ~ Sex, data = datos_mujeres, conf = 0.99, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 M 4970 10.1 0.99 9.93 10.3
# Ahora lo vamos a hacer para las muestras de tamaño 200
# HOMBRES
# Para SLEEPTIME
# Para una confianza del 90%
groupwiseMean(sleeptime ~ Sex, data = datos_varones200, conf = 0.90, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 V 200 8.08 0.9 7.56 8.6
# Para una confianza del 95%
groupwiseMean(sleeptime ~ Sex, data = datos_varones200, conf = 0.95, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 V 200 8.08 0.95 7.46 8.7
# Para una confianza del 99%
groupwiseMean(sleeptime ~ Sex, data = datos_varones200, conf = 0.99, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 V 200 8.08 0.99 7.26 8.9
# MUJERES
# Para SLEEPTIME
# Para una confianza del 90%
groupwiseMean(sleeptime ~ Sex, data = datos_mujeres200, conf = 0.90, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 M 200 10.4 0.9 9.88 11
# Para una confianza del 95%
groupwiseMean(sleeptime ~ Sex, data = datos_mujeres200, conf = 0.95, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 M 200 10.4 0.95 9.77 11.1
# Para una confianza del 99%
groupwiseMean(sleeptime ~ Sex, data = datos_mujeres200, conf = 0.99, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 M 200 10.4 0.99 9.56 11.3
# Ahora lo vamos a hacer para la variable STEPS
# HOMBRES
# Para una confianza del 90%
groupwiseMean(steps ~ Sex, data = datos_varones, conf = 0.90, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 V 5030 10400 0.9 10400 10500
# Para una confianza del 95%
groupwiseMean(steps ~ Sex, data = datos_varones, conf = 0.95, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 V 5030 10400 0.95 10400 10500
# Para una confianza del 99%
groupwiseMean(steps ~ Sex, data = datos_varones, conf = 0.99, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 V 5030 10400 0.99 10400 10500
# Para MUJERES
# Para una confianza del 90%
groupwiseMean(steps ~ Sex, data = datos_mujeres, conf = 0.90, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 M 4970 12400 0.9 12400 12400
# Para una confianza del 95%
groupwiseMean(steps ~ Sex, data = datos_mujeres, conf = 0.95, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 M 4970 12400 0.95 12400 12400
# Para una confianza del 99%
groupwiseMean(steps ~ Sex, data = datos_mujeres, conf = 0.99, digits = 3)
# Sex n Mean Conf.level Trad.lower Trad.upper
# 1 M 4970 12400 0.99 12400 12400
# Ahora vamos a usar la función bootstrap
# Cargamos el paquete
library(boot)
# Intervalos de confianza para los VARONES
VaronesBoot = boot(datos_varones$sleeptime, function(x, i) mean(x[i]), R = 100)
VaronesBootS = boot(datos_varones$steps, function(x, i) mean(x[i]), R = 100)
# Para la variable SLEEPTIME
mean(VaronesBoot$t[,1])
# [1] 8.122187
# IC --> 90%
boot.ci(VaronesBoot, conf = 0.90, type = c("norm"))
# RESULTADOS:
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = VaronesBoot, conf = 0.9, type = c("norm"))
# Intervals :
# Level Normal
# 90% ( 8.025, 8.211 )
# Calculations and Intervals on Original Scale
# IC --> 95%
boot.ci(VaronesBoot, conf = 0.95, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = VaronesBoot, conf = 0.95, type = c("norm"))
# Intervals :
# Level Normal
# 95% ( 8.007, 8.229 )
# Calculations and Intervals on Original Scale
# IC --> 99%
boot.ci(VaronesBoot, conf = 0.99, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = VaronesBoot, conf = 0.99, type = c("norm"))
# Intervals :
# Level Normal
# 99% ( 7.972, 8.264 )
# Calculations and Intervals on Original Scale
# Para la variable STEPS
mean(VaronesBootS$t[,1])
# [1] 10439.89
# IC --> 90%
boot.ci(VaronesBootS, conf = 0.90, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = VaronesBootS, conf = 0.9, type = c("norm"))
# Intervals :
# Level Normal
# 90% (10420, 10463 )
# Calculations and Intervals on Original Scale
# IC --> 95%
boot.ci(VaronesBootS, conf = 0.95, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = VaronesBootS, conf = 0.95, type = c("norm"))
# Intervals :
# Level Normal
# 95% (10416, 10467 )
# Calculations and Intervals on Original Scale
# IC --> 99%
boot.ci(VaronesBootS, conf = 0.99, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = VaronesBootS, conf = 0.99, type = c("norm"))
#Intervals :
# Level Normal
# 99% (10408, 10475 )
# Calculations and Intervals on Original Scale
# Intervalos de confianza para las MUJERES
MujeresBoot = boot(datos_mujeres$sleeptime, function(x, i) mean(x[i]), R = 100)
MujeresBootS = boot(datos_mujeres$steps, function(x, i) mean(x[i]), R = 100)
mean(MujeresBoot$t[,1])
# [1] 10.10151
# IC --> 90%
boot.ci(MujeresBoot, conf = 0.90, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = MujeresBoot, conf = 0.9, type = c("norm"))
# Intervals :
# Level Normal
# 90% ( 9.98, 10.19 )
# Calculations and Intervals on Original Scale
# IC --> 95%
boot.ci(MujeresBoot, conf = 0.95, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = MujeresBoot, conf = 0.95, type = c("norm"))
# Intervals :
# Level Normal
# 95% ( 9.96, 10.21 )
# Calculations and Intervals on Original Scale
# IC --> 99%
boot.ci(MujeresBoot, conf = 0.99, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = MujeresBoot, conf = 0.99, type = c("norm"))
# Intervals :
# Level Normal
# 99% ( 9.92, 10.25 )
# Calculations and Intervals on Original Scale
# Para la variable STEPS
mean(MujeresBootS$t[,1])
# [1] 12408.29
# IC --> 90%
boot.ci(MujeresBootS, conf = 0.90, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = MujeresBootS, conf = 0.9, type = c("norm"))
#Intervals :
# Level Normal
# 90% (12384, 12428 )
# Calculations and Intervals on Original Scale
# IC --> 95%
boot.ci(MujeresBootS, conf = 0.95, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = MujeresBootS, conf = 0.95, type = c("norm"))
# Intervals :
# Level Normal
# 95% (12380, 12433 )
# Calculations and Intervals on Original Scale
# IC --> 99%
boot.ci(MujeresBootS, conf = 0.99, type = c("norm"))
# BOOTSTRAP CONFIDENCE INTERVAL CALCULATIONS
# Based on 100 bootstrap replicates
# CALL :
# boot.ci(boot.out = MujeresBootS, conf = 0.99, type = c("norm"))
# Intervals :
# Level Normal
# 99% (12371, 12441 )
# Calculations and Intervals on Original Scale
# IC para las VARIANZAS
# Para SLEEPTIME IC DEL 90%
longitud <- length(datos$sleeptime) - 1
varSleeptime <- var(datos$sleeptime)
lower90 = varSleeptime * longitud / qchisq(0.1/2, longitud, lower.tail = FALSE)
upper90 = varSleeptime * longitud / qchisq(1 - 0.1/2, longitud, lower.tail = FALSE)
c(lower90 = lower90, variance = varSleeptime, upper90 = upper90)
# lower90 variance upper90
# 20.92851 21.41773 21.92527
# Para SLEEPTIME IC del 95%
longitud <- length(datos$sleeptime) - 1
varSleeptime <- var(datos$sleeptime)
lower95 = varSleeptime * longitud / qchisq(0.05/2, longitud, lower.tail = FALSE)
upper95 = varSleeptime * longitud / qchisq(1 - 0.05/2, longitud, lower.tail = FALSE)
c(lower95 = lower95, variance = varSleeptime, upper95 = upper95)
# lower variance upper95
# 20.83623 21.41773 22.02405
# Para SLEEPTIME IC del 99%
longitud <- length(datos$sleeptime) - 1
varSleeptime <- var(datos$sleeptime)
lower99 = varSleeptime * longitud / qchisq(0.01/2, longitud, lower.tail = FALSE)
upper99 = varSleeptime * longitud / qchisq(1 - 0.01/2, longitud, lower.tail = FALSE)
c(lower99 = lower99, variance = varSleeptime, upper99 = upper99)
# lower99 variance upper99
# 20.65743 21.41773 22.21881
# Para STEPS IC DEL 90%
longitudSt <- length(datos$steps) - 1
varSteps <- var(datos$steps)
lower90St = varSteps * longitudSt / qchisq(0.1/2, longitudSt, lower.tail = FALSE)
upper90St = varSteps * longitudSt / qchisq(1 - 0.1/2, longitudSt, lower.tail = FALSE)
c(lower90St = lower90St, variance = varSteps, upper90St = upper90St)
# lower90St variance upper90St
# 1817638 1860127 1904207
# Para STEPS IC DEL 95%
longitudSt <- length(datos$steps) - 1
varSteps <- var(datos$steps)
lower95St = varSteps * longitudSt / qchisq(0.05/2, longitudSt, lower.tail = FALSE)
upper95St = varSteps * longitudSt / qchisq(1 - 0.05/2, longitudSt, lower.tail = FALSE)
c(lower95St = lower95St, variance = varSteps, upper95St = upper95St)
# lower95St variance upper95St
# 1809623 1860127 1912785
# Para STEPS IC DEL 99%
longitudSt <- length(datos$steps) - 1
varSteps <- var(datos$steps)
lower99St = varSteps * longitudSt / qchisq(0.01/2, longitudSt, lower.tail = FALSE)
upper99St = varSteps * longitudSt / qchisq(1 - 0.01/2, longitudSt, lower.tail = FALSE)
c(lower99St = lower99St, variance = varSteps, upper99St = upper99St)
# lower99St variance upper99St
# 1794095 1860127 1929700
# Estimación de intervalos, dos poblaciones
# Lo que tenemos que hacer es crear intervalos de confianza para la diferencia de
# medias y también para la razón de varianzas, ya que pertenecen a distintas poblaciones
library("rcompanion")
# Para VARONES
# Para SLEEPTIME
# 90% confianza
groupwiseMean(sleeptime ~ 1, data = datos_varones, conf = 0.90, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
#1 <NA> 5030 8.12 0.9 8.02 8.23
# 95% confianza
groupwiseMean(sleeptime ~ 1, data = datos_varones, conf = 0.95, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 5030 8.12 0.95 8 8.25
# 99% confianza
groupwiseMean(sleeptime ~ 1, data = datos_varones, conf = 0.99, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 5030 8.12 0.99 7.96 8.28
# Para STEPS
# 90% confianza
groupwiseMean(steps ~ 1, data = datos_varones, conf = 0.90, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 5030 10400 0.9 10400 10500
# 95% confianza
groupwiseMean(steps ~ 1, data = datos_varones, conf = 0.95, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 5030 10400 0.95 10400 10500
# 99% confianza
groupwiseMean(steps ~ 1, data = datos_varones, conf = 0.99, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 5030 10400 0.99 10400 10500
# Para MUJERES
# Para SLEEPTIME
# 90% confianza
groupwiseMean(sleeptime ~ 1, data = datos_mujeres, conf = 0.90, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 4970 10.1 0.9 9.99 10.2
# 95% confianza
groupwiseMean(sleeptime ~ 1, data = datos_mujeres, conf = 0.95, digits = 3)
#.id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 4970 10.1 0.95 9.97 10.2
# 99% confianza
groupwiseMean(sleeptime ~ 1, data = datos_mujeres, conf = 0.99, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 4970 10.1 0.99 9.93 10.3
# Para STEPS
# 90% confianza
groupwiseMean(steps ~ 1, data = datos_mujeres, conf = 0.90, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 4970 12400 0.9 12400 12400
# 95% confianza
groupwiseMean(steps ~ 1, data = datos_mujeres, conf = 0.95, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 4970 12400 0.95 12400 12400
# 99% confianza
groupwiseMean(steps ~ 1, data = datos_mujeres, conf = 0.99, digits = 3)
# .id n Mean Conf.level Trad.lower Trad.upper
# 1 <NA> 4970 12400 0.99 12400 12400
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unzipDisMELSResults.R
\name{unzipDisMELSResults}
\alias{unzipDisMELSResults}
\title{Unzips and renames a series of DisMELS results or connectivity files}
\usage{
unzipDisMELSResults(YYYYs = NULL, MMs = NULL, DDs = NULL,
basename = "Results", dir.inp = getwd(), dir.out = getwd())
}
\arguments{
\item{YYYYs}{= years to extract (can be NULL)}
\item{MMs}{= months to extract (can be NULL)}
\item{DDs}{= days to extract (can be NULL)}
\item{basename}{= base name for zipped/unzipped files}
\item{dir.inp}{= folder containing the zip files}
\item{dir.out}{= folder where unzipped files will be written}
}
\description{
Function to unzip and rename a series of DisMELS results or connectivity files.
}
\details{
This function assumes zip files are named something like 'basenameYYYYMMDD.csv.zip' and
that the associated zipped csv file is 'basename.csv'. When the csv file is unzipped
in the folder specified by dir.out, it will be renamed 'basenameYYYYMMDD.csv'. Note
that at least one of YYYYs, MMs, and DDs must be a non-null vector.
}
|
/man/unzipDisMELSResults.Rd
|
permissive
|
wStockhausen/wtsDisMELSConn
|
R
| false
| true
| 1,119
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unzipDisMELSResults.R
\name{unzipDisMELSResults}
\alias{unzipDisMELSResults}
\title{Unzips and renames a series of DisMELS results or connectivity files}
\usage{
unzipDisMELSResults(YYYYs = NULL, MMs = NULL, DDs = NULL,
basename = "Results", dir.inp = getwd(), dir.out = getwd())
}
\arguments{
\item{YYYYs}{= years to extract (can be NULL)}
\item{MMs}{= months to extract (can be NULL)}
\item{DDs}{= days to extract (can be NULL)}
\item{basename}{= base name for zipped/unzipped files}
\item{dir.inp}{= folder containing the zip files}
\item{dir.out}{= folder where unzipped files will be written}
}
\description{
Function to unzip and rename a series of DisMELS results or connectivity files.
}
\details{
This function assumes zip files are named something like 'basenameYYYYMMDD.csv.zip' and
that the associated zipped csv file is 'basename.csv'. When the csv file is unzipped
in the folder specified by dir.out, it will be renamed 'basenameYYYYMMDD.csv'. Note
that at least one of YYYYs, MMs, and DDs must be a non-null vector.
}
|
library(caret)
orgData <- read.csv("D:/Imarticus_DSP/R Project/Attrition.csv")
head(orgData)
summary(orgData)
str(orgData)
anyNA(orgData)
summary(orgData$Attrition)
##### work Happiness index calculation
summary(as.factor(orgData$EnvironmentSatisfaction))
summary(as.factor(orgData$JobSatisfaction))
summary(as.factor(orgData$RelationshipSatisfaction))
orgData["Work_Happiness_Index"] <- ifelse((orgData$JobSatisfaction >= 3 & orgData$EnvironmentSatisfaction >=3 & orgData$RelationshipSatisfaction>=3),"Very Happy",
ifelse((orgData$JobSatisfaction >= 3 & (orgData$EnvironmentSatisfaction >=3 | orgData$RelationshipSatisfaction>=3)),"Happy",
ifelse((orgData$JobSatisfaction <=2 & (orgData$EnvironmentSatisfaction >=3 | orgData$RelationshipSatisfaction >=3)),"Sad",
ifelse((orgData$JobSatisfaction <=2 & orgData$EnvironmentSatisfaction <=2 & orgData$RelationshipSatisfaction<=2),"Very Sad","Somewhat Happy"))))
summary(as.factor(orgData$Work_Happiness_Index))
orgData$Work_Happiness_Index <- as.factor(orgData$Work_Happiness_Index)
############## Domain Knowledge Variable Removal ###############
# Removing columns with only 1 Level = "EmployeeCount","Over18", "StandardHours"
# Removing column "EmployeeNumber" as it has unqiue values
head(orgData[9]) #EmployeeCount
head(orgData[10]) #EmployeeNumber
head(orgData[22]) #Over18
head(orgData[27]) #StandardHours
orgData <- orgData[-c(9,10,22,27)]
############# Splitting files
set.seed(300)
split <- sample (seq_len(nrow(orgData)), size = floor(0.70 * nrow(orgData)))
trainData <- orgData[split, ]
testData <- orgData[-split, ]
summary(as.factor(orgData$Attrition))
summary(as.factor(trainData$Attrition))
summary(as.factor(testData$Attrition))
write.csv(trainData, file = 'D:/Imarticus_DSP/R Project/Code file/trainData_mani.csv')
write.csv(testData, file = 'D:/Imarticus_DSP/R Project/Code file/testData_mani.csv')
################## Code for checking the best Threshold for Logistic Regression Model
lg_model1 <- glm(trainData$Attrition ~ ., family = binomial(link = 'logit'), data = trainData[-c(2)])
summary(lg_model1)
imp <- caret::varImp(lg_model1, scale=FALSE)
df <- data.frame(imp)
write.csv(df, file = 'D:/Imarticus_DSP/R Project/Code file/lm1_importance_mani.csv')
thresh_range = seq(from = 0.25, to = 0.75, by=0.01)
lg_df <- data.frame(matrix(ncol = 10, nrow = 0))
colnames(lg_df) <- c("Threshold","Accuracy","Kappa","Sensitivity","Specificity","Pos_Pred_Value","Neg_Pred_Value","Total_Err","T1_Err","T2_Err")
for (thresh in thresh_range){
log_predict <- predict(lg_model1, newdata = testData[-c(2)], type = "response")
log_predict <- ifelse(log_predict > thresh,"Yes","No")
conf <-table(testData$Attrition, log_predict, dnn = c("Actual", "Predicted"))
print(conf)
print(paste("====================Threshold =",thresh,"================================"))
cm <- confusionMatrix(data = as.factor(log_predict), reference = testData$Attrition, positive="Yes")
lg_df <- rbind(lg_df, data.frame(Threshold=thresh,Accuracy=cm$overall[1],Kappa=cm$overall[2],Sensitivity=cm$byClass[1],Specificity=cm$byClass[2],Pos_Pred_Value=cm$byClass[3],Neg_Pred_Value=cm$byClass[4],Total_Err=(cm$table[2]+cm$table[3]),T1_Err=cm$table[2],T2_Err=cm$table[3]))
}
write.csv(lg_df, file = 'D:/Imarticus_DSP/R Project/Code file/cm_log1_mani2.csv')
################## Code for checking the best Threshold for Logistic Regression Model With all important varialbles based on p value
lg_model2 <- glm(trainData$Attrition ~ Age + BusinessTravel
+ DistanceFromHome + EnvironmentSatisfaction
+ Gender + JobInvolvement
+ JobSatisfaction + MaritalStatus
+ NumCompaniesWorked + OverTime
+ RelationshipSatisfaction + TotalWorkingYears
+ TrainingTimesLastYear + WorkLifeBalance + YearsAtCompany + YearsInCurrentRole
+ YearsSinceLastPromotion + YearsWithCurrManager
, family = binomial(link = 'logit'), data = trainData[-c(2)])
summary(lg_model2)
thresh_range = seq(from = 0.25, to = 0.75, by=0.01)
lg_df <- data.frame(matrix(ncol = 10, nrow = 0))
colnames(lg_df) <- c("Threshold","Accuracy","Kappa","Sensitivity","Specificity","Pos_Pred_Value","Neg_Pred_Value","Total_Err","T1_Err","T2_Err")
for (thresh in thresh_range){
log_predict <- predict(lg_model2, newdata = testData[-c(2)], type = "response")
log_predict <- ifelse(log_predict > thresh,"Yes","No")
conf <-table(testData$Attrition, log_predict, dnn = c("Actual", "Predicted"))
print(conf)
print(paste("====================Threshold =",thresh,"================================"))
cm <- confusionMatrix(data = as.factor(log_predict), reference = testData$Attrition, positive="Yes")
lg_df <- rbind(lg_df, data.frame(Threshold=thresh,Accuracy=cm$overall[1],Kappa=cm$overall[2],Sensitivity=cm$byClass[1],Specificity=cm$byClass[2],Pos_Pred_Value=cm$byClass[3],Neg_Pred_Value=cm$byClass[4],Total_Err=(cm$table[2]+cm$table[3]),T1_Err=cm$table[2],T2_Err=cm$table[3]))
}
write.csv(lg_df, file = 'D:/Imarticus_DSP/R Project/Code file/cm_log2_mani.csv')
################## Code for checking the best Threshold for Logistic Regression Model With all important varialbles based on p value suing chisq test
lg_model4 <- glm(trainData$Attrition ~ Age + BusinessTravel + Department + DistanceFromHome + EnvironmentSatisfaction + JobInvolvement + JobLevel + JobSatisfaction + MaritalStatus + NumCompaniesWorked + OverTime + RelationshipSatisfaction + TrainingTimesLastYear + TotalWorkingYears + WorkLifeBalance + YearsAtCompany + YearsInCurrentRole + YearsSinceLastPromotion + YearsWithCurrManager + Work_Happiness_Index, family = binomial(link = 'logit'), data = trainData[-c(2)])
anova(lg_model4, test = 'Chisq')
lg_df <- data.frame(matrix(ncol = 10, nrow = 0))
colnames(lg_df) <- c("Threshold","Accuracy","Kappa","Sensitivity","Specificity","Pos_Pred_Value","Neg_Pred_Value","Total_Err","T1_Err","T2_Err")
thresh_range = seq(from = 0.25, to = 0.75, by=0.01)
lg_df <- data.frame(matrix(ncol = 10, nrow = 0))
colnames(lg_df) <- c("Threshold","Accuracy","Kappa","Sensitivity","Specificity","Pos_Pred_Value","Neg_Pred_Value","Total_Err","T1_Err","T2_Err")
for (thresh in thresh_range){
log_predict <- predict(lg_model4, newdata = testData[-c(2)], type = "response")
log_predict <- ifelse(log_predict > thresh,"Yes","No")
conf <-table(testData$Attrition, log_predict, dnn = c("Actual", "Predicted"))
print(conf)
print(paste("====================Threshold =",thresh,"================================"))
cm <- confusionMatrix(data = as.factor(log_predict), reference = testData$Attrition, positive="Yes")
lg_df <- rbind(lg_df, data.frame(Threshold=thresh,Accuracy=cm$overall[1],Kappa=cm$overall[2],Sensitivity=cm$byClass[1],Specificity=cm$byClass[2],Pos_Pred_Value=cm$byClass[3],Neg_Pred_Value=cm$byClass[4],Total_Err=(cm$table[2]+cm$table[3]),T1_Err=cm$table[2],T2_Err=cm$table[3]))
}
write.csv(lg_df, file = 'D:/Imarticus_DSP/R Project/Code file/cm_log4_mani.csv')
################## Code for checking the best Threshold (Tune length) for Decison Tree = Information Gain
library(rpart.plot)
library(caret)
dt_tunelng = seq(from = 1, to = 35, by=1)
dt_df <- data.frame(matrix(ncol = 10, nrow = 0))
colnames(dt_df) <- c("Threshold","Accuracy","Kappa","Sensitivity","Specificity","Pos_Pred_Value","Neg_Pred_Value","Total_Err","T1_Err","T2_Err")
dt_df
trctrl <- trainControl(method = "repeatedcv", number = 10, repeats = 3)
for (len in dt_tunelng) {print(paste("====================Tune Length =",len,"================================")); set.seed(3333); dtree_model1 <- train(Attrition ~ ., data = trainData, method = "rpart", parms = list(split = "information"), trControl=trctrl, tuneLength = len); print(dtree_model1); dt_pred <- predict(dtree_model1, newdata = testData); cm <- confusionMatrix(data = as.factor(dt_pred), reference = testData$Attrition, positive="Yes"); print(cm); dt_df <- rbind(dt_df, data.frame(Tune_Length=len,Accuracy=cm$overall[1],Kappa=cm$overall[2],Sensitivity=cm$byClass[1],Specificity=cm$byClass[2],Pos_Pred_Value=cm$byClass[3],Neg_Pred_Value=cm$byClass[4],Total_Err=(cm$table[2]+cm$table[3]),T1_Err=cm$table[2],T2_Err=cm$table[3]))}
prp(dtree_model1$finalModel, box.palette = "Reds", tweak = 1.2)
write.csv(dt_df, file = 'D:/Imarticus_DSP/R Project/Code file/cm_dt1_mani2.csv')
################## Code for checking the best Threshold (Tune length) for Decison Tree = Information Gain with all important Variables
dt_tunelng = seq(from = 1, to = 20, by=1)
dt_df <- data.frame(matrix(ncol = 10, nrow = 0))
colnames(dt_df) <- c("Tune_Length","Accuracy","Kappa","Sensitivity","Specificity","Pos_Pred_Value","Neg_Pred_Value","Total_Err","T1_Err","T2_Err")
dt_df
trctrl <- trainControl(method = "repeatedcv", number = 10, repeats = 3)
for (len in dt_tunelng) {print(paste("====================Tune Length =",len,"================================")); set.seed(3333); dtree_model2 <- train(Attrition ~ Age + BusinessTravel + DailyRate + Department + DistanceFromHome + EnvironmentSatisfaction + JobLevel + JobRole + MaritalStatus + MonthlyIncome + OverTime + StockOptionLevel + TotalWorkingYears + YearsAtCompany + JobInvolvement + PercentSalaryHike, data = trainData, method = "rpart", parms = list(split = "information"), trControl=trctrl, tuneLength = len); print(dtree_model2); dt_pred <- predict(dtree_model2, newdata = testData); cm <- confusionMatrix(data = as.factor(dt_pred), reference = testData$Attrition, positive="Yes"); print(cm); dt_df <- rbind(dt_df, data.frame(Tune_Length=len,Accuracy=cm$overall[1],Kappa=cm$overall[2],Sensitivity=cm$byClass[1],Specificity=cm$byClass[2],Pos_Pred_Value=cm$byClass[3],Neg_Pred_Value=cm$byClass[4],Total_Err=(cm$table[2]+cm$table[3]),T1_Err=cm$table[2],T2_Err=cm$table[3]))}
prp(dtree_model2$finalModel, box.palette = "Reds", tweak = 1.2)
write.csv(dt_df, file = 'D:/Imarticus_DSP/R Project/Code file/cm_dt2_mani.csv')
################## Code for checking the best Threshold (Tune length) for Decison Tree = Gini Index
dt_tunelng = seq(from = 1, to = 35, by=1)
dt_df <- data.frame(matrix(ncol = 10, nrow = 0))
colnames(dt_df) <- c("Tune_Length","Accuracy","Kappa","Sensitivity","Specificity","Pos_Pred_Value","Neg_Pred_Value","Total_Err","T1_Err","T2_Err")
dt_df
trctrl <- trainControl(method = "repeatedcv", number = 10, repeats = 3)
for (len in dt_tunelng) {print(paste("====================Tune Length =",len,"================================")); set.seed(3333); dtree_fit_gini <- train(Attrition ~ ., data = trainData, method = "rpart", parms = list(split = "gini"), trControl=trctrl, tuneLength = len); print(dtree_fit_gini); dt_pred <- predict(dtree_fit_gini, newdata = testData); cm <- confusionMatrix(data = as.factor(dt_pred), reference = testData$Attrition, positive="Yes"); print(cm); dt_df <- rbind(dt_df, data.frame(Tune_Length=len,Accuracy=cm$overall[1],Kappa=cm$overall[2],Sensitivity=cm$byClass[1],Specificity=cm$byClass[2],Pos_Pred_Value=cm$byClass[3],Neg_Pred_Value=cm$byClass[4],Total_Err=(cm$table[2]+cm$table[3]),T1_Err=cm$table[2],T2_Err=cm$table[3]))}
write.csv(dt_df, file = 'D:/Imarticus_DSP/R Project/Code file/cm_gini_dt1_mani2.csv')
prp(dtree_fit_gini$finalModel, box.palette = "Reds", tweak = 1.2)
################## Code for checking the best Threshold (Tune length) for Decison Tree = Gini Index with all important Variables
dt_tunelng = seq(from = 1, to = 20, by=1)
dt_df <- data.frame(matrix(ncol = 10, nrow = 0))
colnames(dt_df) <- c("Tune_Length","Accuracy","Kappa","Sensitivity","Specificity","Pos_Pred_Value","Neg_Pred_Value","Total_Err","T1_Err","T2_Err")
dt_df
trctrl <- trainControl(method = "repeatedcv", number = 10, repeats = 3)
for (len in dt_tunelng) {print(paste("====================Tune Length =",len,"================================")); set.seed(3333); dtree_fit_gini2 <- train(Attrition ~ MonthlyIncome + Age + TotalWorkingYears + YearsAtCompany + OverTime + JobLevel + StockOptionLevel + MaritalStatus + DistanceFromHome + BusinessTravel + EnvironmentSatisfaction + JobRole + PercentSalaryHike + Department + DailyRate + YearsSinceLastPromotion + TrainingTimesLastYear + WorkLifeBalance + YearsInCurrentRole, data = trainData, method = "rpart", parms = list(split = "gini"), trControl=trctrl, tuneLength = len); print(dtree_fit_gini2); dt_pred <- predict(dtree_fit_gini2, newdata = testData); cm <- confusionMatrix(data = as.factor(dt_pred), reference = testData$Attrition, positive="Yes"); print(cm); dt_df <- rbind(dt_df, data.frame(Tune_Length=len,Accuracy=cm$overall[1],Kappa=cm$overall[2],Sensitivity=cm$byClass[1],Specificity=cm$byClass[2],Pos_Pred_Value=cm$byClass[3],Neg_Pred_Value=cm$byClass[4],Total_Err=(cm$table[2]+cm$table[3]),T1_Err=cm$table[2],T2_Err=cm$table[3]))}
write.csv(dt_df, file = 'D:/Imarticus_DSP/R Project/Code file/cm_gini_dt2_mani.csv')
prp(dtree_fit_gini2$finalModel, box.palette = "Reds", tweak = 1.2)
################## Code for checking the best Threshold (ntree) for Random Forest
library(randomForest)
ntree_val = c(seq(from = 5, to = 100, by=5),seq(from = 100, to = 3000, by=100))
rf_df <- data.frame(matrix(ncol = 10, nrow = 0))
colnames(rf_df) <- c("Num_Trees","Accuracy","Kappa","Sensitivity","Specificity","Pos_Pred_Value","Neg_Pred_Value","Total_Err","T1_Err","T2_Err")
rf_df
for (num_tree in ntree_val){ print(paste("================== Tree Value =",num_tree,"====================")); set.seed(500); rf_model1 <- randomForest(trainData$Attrition ~ MonthlyIncome + Age + OverTime + DistanceFromHome + DailyRate + Work_Happiness_Index + TotalWorkingYears + MonthlyRate + HourlyRate + JobRole + YearsAtCompany + StockOptionLevel + EnvironmentSatisfaction + NumCompaniesWorked, data=trainData, importance=TRUE, ntree=num_tree); rf_pred <- predict(rf_model1, testData); rfcm <- confusionMatrix(data = as.factor(rf_pred), reference = testData$Attrition, positive="Yes"); rf_df <- rbind(rf_df, data.frame(Num_Trees=num_tree, Accuracy=rfcm$overall[1], Kappa=rfcm$overall[2], Sensitivity=rfcm$byClass[1], Specificity=rfcm$byClass[2], Pos_Pred_Value=rfcm$byClass[3], Neg_Pred_Value=rfcm$byClass[4], Total_Err=(rfcm$table[2]+rfcm$table[3]), T1_Err=rfcm$table[2], T2_Err=rfcm$table[3])) }
write.csv(rf_df, file = 'D:/Imarticus_DSP/R Project/Code file/cm_rf4_mani.csv')
################## Code for checking the best Threshold (cost/gama) for SVM
library("e1071")
set.seed(500)
svm_mod_acc <- data.frame(matrix(ncol = 11, nrow = 0))
cols <- c("Cost","Gamma","Accuracy","Kappa","Sensitivity","Specificity","Pos_Pred_Value","Neg_Pred_Value","Total_Err","T1_Err","T2_Err")
colnames(svm_mod_acc) <- cols
svm_cost <- c(seq(1,9,1),seq(10,100,10))
svm_gm <- c(seq(0.000,0.1,0.005),seq(0.1,1.0,0.1),seq(1,9,1),seq(10,100,10))
#higher the cost better the classification
#lower gamma means tighter boundary
for (cst in svm_cost){
for (gm in svm_gm){
print(paste("========== Cost =",cst," Gamma=",gm," ============="))
svm1 <- svm(Attrition ~ YearsAtCompany + OverTime + NumCompaniesWorked + DistanceFromHome + JobInvolvement + WorkLifeBalance + YearsSinceLastPromotion + MonthlyIncome + JobSatisfaction + EnvironmentSatisfaction + TotalWorkingYears + RelationshipSatisfaction + Work_Happiness_Index + BusinessTravel + TrainingTimesLastYear + JobLevel + Department + EducationField + Age + Gender + StockOptionLevel, data=trainData, kernel="radial", cost=cst, gamma=gm)
pred <- predict(svm1,testData)
svm_cm <- confusionMatrix(data = as.factor(pred), reference = testData$Attrition, positive="Yes")
svm_mod_acc <- rbind(svm_mod_acc, data.frame(Cost=cst,Gamma=gm,Accuracy=svm_cm$overall[1],Kappa=svm_cm$overall[2],Sensitivity=svm_cm$byClass[1],Specificity=svm_cm$byClass[2],Pos_Pred_Value=svm_cm$byClass[3],Neg_Pred_Value=svm_cm$byClass[4],Total_Err=(svm_cm$table[2]+svm_cm$table[3]),T1_Err=svm_cm$table[2],T2_Err=svm_cm$table[3]))
}
}
write.csv(svm_mod_acc, file = 'D:/Imarticus_DSP/R Project/Code file/cm_svm1_mani.csv')
|
/ATTRITION-Analysis_Code_ Model Finding best threshold.R
|
no_license
|
TJ5582/ATTRITION-Analysis
|
R
| false
| false
| 16,444
|
r
|
library(caret)
orgData <- read.csv("D:/Imarticus_DSP/R Project/Attrition.csv")
head(orgData)
summary(orgData)
str(orgData)
anyNA(orgData)
summary(orgData$Attrition)
##### work Happiness index calculation
summary(as.factor(orgData$EnvironmentSatisfaction))
summary(as.factor(orgData$JobSatisfaction))
summary(as.factor(orgData$RelationshipSatisfaction))
orgData["Work_Happiness_Index"] <- ifelse((orgData$JobSatisfaction >= 3 & orgData$EnvironmentSatisfaction >=3 & orgData$RelationshipSatisfaction>=3),"Very Happy",
ifelse((orgData$JobSatisfaction >= 3 & (orgData$EnvironmentSatisfaction >=3 | orgData$RelationshipSatisfaction>=3)),"Happy",
ifelse((orgData$JobSatisfaction <=2 & (orgData$EnvironmentSatisfaction >=3 | orgData$RelationshipSatisfaction >=3)),"Sad",
ifelse((orgData$JobSatisfaction <=2 & orgData$EnvironmentSatisfaction <=2 & orgData$RelationshipSatisfaction<=2),"Very Sad","Somewhat Happy"))))
summary(as.factor(orgData$Work_Happiness_Index))
orgData$Work_Happiness_Index <- as.factor(orgData$Work_Happiness_Index)
############## Domain Knowledge Variable Removal ###############
# Removing columns with only 1 Level = "EmployeeCount","Over18", "StandardHours"
# Removing column "EmployeeNumber" as it has unqiue values
head(orgData[9]) #EmployeeCount
head(orgData[10]) #EmployeeNumber
head(orgData[22]) #Over18
head(orgData[27]) #StandardHours
orgData <- orgData[-c(9,10,22,27)]
############# Splitting files
set.seed(300)
split <- sample (seq_len(nrow(orgData)), size = floor(0.70 * nrow(orgData)))
trainData <- orgData[split, ]
testData <- orgData[-split, ]
summary(as.factor(orgData$Attrition))
summary(as.factor(trainData$Attrition))
summary(as.factor(testData$Attrition))
write.csv(trainData, file = 'D:/Imarticus_DSP/R Project/Code file/trainData_mani.csv')
write.csv(testData, file = 'D:/Imarticus_DSP/R Project/Code file/testData_mani.csv')
################## Code for checking the best Threshold for Logistic Regression Model
lg_model1 <- glm(trainData$Attrition ~ ., family = binomial(link = 'logit'), data = trainData[-c(2)])
summary(lg_model1)
imp <- caret::varImp(lg_model1, scale=FALSE)
df <- data.frame(imp)
write.csv(df, file = 'D:/Imarticus_DSP/R Project/Code file/lm1_importance_mani.csv')
thresh_range = seq(from = 0.25, to = 0.75, by=0.01)
lg_df <- data.frame(matrix(ncol = 10, nrow = 0))
colnames(lg_df) <- c("Threshold","Accuracy","Kappa","Sensitivity","Specificity","Pos_Pred_Value","Neg_Pred_Value","Total_Err","T1_Err","T2_Err")
for (thresh in thresh_range){
log_predict <- predict(lg_model1, newdata = testData[-c(2)], type = "response")
log_predict <- ifelse(log_predict > thresh,"Yes","No")
conf <-table(testData$Attrition, log_predict, dnn = c("Actual", "Predicted"))
print(conf)
print(paste("====================Threshold =",thresh,"================================"))
cm <- confusionMatrix(data = as.factor(log_predict), reference = testData$Attrition, positive="Yes")
lg_df <- rbind(lg_df, data.frame(Threshold=thresh,Accuracy=cm$overall[1],Kappa=cm$overall[2],Sensitivity=cm$byClass[1],Specificity=cm$byClass[2],Pos_Pred_Value=cm$byClass[3],Neg_Pred_Value=cm$byClass[4],Total_Err=(cm$table[2]+cm$table[3]),T1_Err=cm$table[2],T2_Err=cm$table[3]))
}
write.csv(lg_df, file = 'D:/Imarticus_DSP/R Project/Code file/cm_log1_mani2.csv')
################## Code for checking the best Threshold for Logistic Regression Model With all important varialbles based on p value
lg_model2 <- glm(trainData$Attrition ~ Age + BusinessTravel
+ DistanceFromHome + EnvironmentSatisfaction
+ Gender + JobInvolvement
+ JobSatisfaction + MaritalStatus
+ NumCompaniesWorked + OverTime
+ RelationshipSatisfaction + TotalWorkingYears
+ TrainingTimesLastYear + WorkLifeBalance + YearsAtCompany + YearsInCurrentRole
+ YearsSinceLastPromotion + YearsWithCurrManager
, family = binomial(link = 'logit'), data = trainData[-c(2)])
summary(lg_model2)
thresh_range = seq(from = 0.25, to = 0.75, by=0.01)
lg_df <- data.frame(matrix(ncol = 10, nrow = 0))
colnames(lg_df) <- c("Threshold","Accuracy","Kappa","Sensitivity","Specificity","Pos_Pred_Value","Neg_Pred_Value","Total_Err","T1_Err","T2_Err")
for (thresh in thresh_range){
log_predict <- predict(lg_model2, newdata = testData[-c(2)], type = "response")
log_predict <- ifelse(log_predict > thresh,"Yes","No")
conf <-table(testData$Attrition, log_predict, dnn = c("Actual", "Predicted"))
print(conf)
print(paste("====================Threshold =",thresh,"================================"))
cm <- confusionMatrix(data = as.factor(log_predict), reference = testData$Attrition, positive="Yes")
lg_df <- rbind(lg_df, data.frame(Threshold=thresh,Accuracy=cm$overall[1],Kappa=cm$overall[2],Sensitivity=cm$byClass[1],Specificity=cm$byClass[2],Pos_Pred_Value=cm$byClass[3],Neg_Pred_Value=cm$byClass[4],Total_Err=(cm$table[2]+cm$table[3]),T1_Err=cm$table[2],T2_Err=cm$table[3]))
}
write.csv(lg_df, file = 'D:/Imarticus_DSP/R Project/Code file/cm_log2_mani.csv')
################## Code for checking the best Threshold for Logistic Regression Model With all important varialbles based on p value suing chisq test
lg_model4 <- glm(trainData$Attrition ~ Age + BusinessTravel + Department + DistanceFromHome + EnvironmentSatisfaction + JobInvolvement + JobLevel + JobSatisfaction + MaritalStatus + NumCompaniesWorked + OverTime + RelationshipSatisfaction + TrainingTimesLastYear + TotalWorkingYears + WorkLifeBalance + YearsAtCompany + YearsInCurrentRole + YearsSinceLastPromotion + YearsWithCurrManager + Work_Happiness_Index, family = binomial(link = 'logit'), data = trainData[-c(2)])
anova(lg_model4, test = 'Chisq')
lg_df <- data.frame(matrix(ncol = 10, nrow = 0))
colnames(lg_df) <- c("Threshold","Accuracy","Kappa","Sensitivity","Specificity","Pos_Pred_Value","Neg_Pred_Value","Total_Err","T1_Err","T2_Err")
thresh_range = seq(from = 0.25, to = 0.75, by=0.01)
lg_df <- data.frame(matrix(ncol = 10, nrow = 0))
colnames(lg_df) <- c("Threshold","Accuracy","Kappa","Sensitivity","Specificity","Pos_Pred_Value","Neg_Pred_Value","Total_Err","T1_Err","T2_Err")
for (thresh in thresh_range){
log_predict <- predict(lg_model4, newdata = testData[-c(2)], type = "response")
log_predict <- ifelse(log_predict > thresh,"Yes","No")
conf <-table(testData$Attrition, log_predict, dnn = c("Actual", "Predicted"))
print(conf)
print(paste("====================Threshold =",thresh,"================================"))
cm <- confusionMatrix(data = as.factor(log_predict), reference = testData$Attrition, positive="Yes")
lg_df <- rbind(lg_df, data.frame(Threshold=thresh,Accuracy=cm$overall[1],Kappa=cm$overall[2],Sensitivity=cm$byClass[1],Specificity=cm$byClass[2],Pos_Pred_Value=cm$byClass[3],Neg_Pred_Value=cm$byClass[4],Total_Err=(cm$table[2]+cm$table[3]),T1_Err=cm$table[2],T2_Err=cm$table[3]))
}
write.csv(lg_df, file = 'D:/Imarticus_DSP/R Project/Code file/cm_log4_mani.csv')
################## Code for checking the best Threshold (Tune length) for Decison Tree = Information Gain
library(rpart.plot)
library(caret)
dt_tunelng = seq(from = 1, to = 35, by=1)
dt_df <- data.frame(matrix(ncol = 10, nrow = 0))
colnames(dt_df) <- c("Threshold","Accuracy","Kappa","Sensitivity","Specificity","Pos_Pred_Value","Neg_Pred_Value","Total_Err","T1_Err","T2_Err")
dt_df
trctrl <- trainControl(method = "repeatedcv", number = 10, repeats = 3)
for (len in dt_tunelng) {print(paste("====================Tune Length =",len,"================================")); set.seed(3333); dtree_model1 <- train(Attrition ~ ., data = trainData, method = "rpart", parms = list(split = "information"), trControl=trctrl, tuneLength = len); print(dtree_model1); dt_pred <- predict(dtree_model1, newdata = testData); cm <- confusionMatrix(data = as.factor(dt_pred), reference = testData$Attrition, positive="Yes"); print(cm); dt_df <- rbind(dt_df, data.frame(Tune_Length=len,Accuracy=cm$overall[1],Kappa=cm$overall[2],Sensitivity=cm$byClass[1],Specificity=cm$byClass[2],Pos_Pred_Value=cm$byClass[3],Neg_Pred_Value=cm$byClass[4],Total_Err=(cm$table[2]+cm$table[3]),T1_Err=cm$table[2],T2_Err=cm$table[3]))}
prp(dtree_model1$finalModel, box.palette = "Reds", tweak = 1.2)
write.csv(dt_df, file = 'D:/Imarticus_DSP/R Project/Code file/cm_dt1_mani2.csv')
################## Code for checking the best Threshold (Tune length) for Decison Tree = Information Gain with all important Variables
dt_tunelng = seq(from = 1, to = 20, by=1)
dt_df <- data.frame(matrix(ncol = 10, nrow = 0))
colnames(dt_df) <- c("Tune_Length","Accuracy","Kappa","Sensitivity","Specificity","Pos_Pred_Value","Neg_Pred_Value","Total_Err","T1_Err","T2_Err")
dt_df
trctrl <- trainControl(method = "repeatedcv", number = 10, repeats = 3)
for (len in dt_tunelng) {print(paste("====================Tune Length =",len,"================================")); set.seed(3333); dtree_model2 <- train(Attrition ~ Age + BusinessTravel + DailyRate + Department + DistanceFromHome + EnvironmentSatisfaction + JobLevel + JobRole + MaritalStatus + MonthlyIncome + OverTime + StockOptionLevel + TotalWorkingYears + YearsAtCompany + JobInvolvement + PercentSalaryHike, data = trainData, method = "rpart", parms = list(split = "information"), trControl=trctrl, tuneLength = len); print(dtree_model2); dt_pred <- predict(dtree_model2, newdata = testData); cm <- confusionMatrix(data = as.factor(dt_pred), reference = testData$Attrition, positive="Yes"); print(cm); dt_df <- rbind(dt_df, data.frame(Tune_Length=len,Accuracy=cm$overall[1],Kappa=cm$overall[2],Sensitivity=cm$byClass[1],Specificity=cm$byClass[2],Pos_Pred_Value=cm$byClass[3],Neg_Pred_Value=cm$byClass[4],Total_Err=(cm$table[2]+cm$table[3]),T1_Err=cm$table[2],T2_Err=cm$table[3]))}
prp(dtree_model2$finalModel, box.palette = "Reds", tweak = 1.2)
write.csv(dt_df, file = 'D:/Imarticus_DSP/R Project/Code file/cm_dt2_mani.csv')
################## Code for checking the best Threshold (Tune length) for Decison Tree = Gini Index
dt_tunelng = seq(from = 1, to = 35, by=1)
dt_df <- data.frame(matrix(ncol = 10, nrow = 0))
colnames(dt_df) <- c("Tune_Length","Accuracy","Kappa","Sensitivity","Specificity","Pos_Pred_Value","Neg_Pred_Value","Total_Err","T1_Err","T2_Err")
dt_df
trctrl <- trainControl(method = "repeatedcv", number = 10, repeats = 3)
for (len in dt_tunelng) {print(paste("====================Tune Length =",len,"================================")); set.seed(3333); dtree_fit_gini <- train(Attrition ~ ., data = trainData, method = "rpart", parms = list(split = "gini"), trControl=trctrl, tuneLength = len); print(dtree_fit_gini); dt_pred <- predict(dtree_fit_gini, newdata = testData); cm <- confusionMatrix(data = as.factor(dt_pred), reference = testData$Attrition, positive="Yes"); print(cm); dt_df <- rbind(dt_df, data.frame(Tune_Length=len,Accuracy=cm$overall[1],Kappa=cm$overall[2],Sensitivity=cm$byClass[1],Specificity=cm$byClass[2],Pos_Pred_Value=cm$byClass[3],Neg_Pred_Value=cm$byClass[4],Total_Err=(cm$table[2]+cm$table[3]),T1_Err=cm$table[2],T2_Err=cm$table[3]))}
write.csv(dt_df, file = 'D:/Imarticus_DSP/R Project/Code file/cm_gini_dt1_mani2.csv')
prp(dtree_fit_gini$finalModel, box.palette = "Reds", tweak = 1.2)
################## Code for checking the best Threshold (Tune length) for Decison Tree = Gini Index with all important Variables
dt_tunelng = seq(from = 1, to = 20, by=1)
dt_df <- data.frame(matrix(ncol = 10, nrow = 0))
colnames(dt_df) <- c("Tune_Length","Accuracy","Kappa","Sensitivity","Specificity","Pos_Pred_Value","Neg_Pred_Value","Total_Err","T1_Err","T2_Err")
dt_df
trctrl <- trainControl(method = "repeatedcv", number = 10, repeats = 3)
for (len in dt_tunelng) {print(paste("====================Tune Length =",len,"================================")); set.seed(3333); dtree_fit_gini2 <- train(Attrition ~ MonthlyIncome + Age + TotalWorkingYears + YearsAtCompany + OverTime + JobLevel + StockOptionLevel + MaritalStatus + DistanceFromHome + BusinessTravel + EnvironmentSatisfaction + JobRole + PercentSalaryHike + Department + DailyRate + YearsSinceLastPromotion + TrainingTimesLastYear + WorkLifeBalance + YearsInCurrentRole, data = trainData, method = "rpart", parms = list(split = "gini"), trControl=trctrl, tuneLength = len); print(dtree_fit_gini2); dt_pred <- predict(dtree_fit_gini2, newdata = testData); cm <- confusionMatrix(data = as.factor(dt_pred), reference = testData$Attrition, positive="Yes"); print(cm); dt_df <- rbind(dt_df, data.frame(Tune_Length=len,Accuracy=cm$overall[1],Kappa=cm$overall[2],Sensitivity=cm$byClass[1],Specificity=cm$byClass[2],Pos_Pred_Value=cm$byClass[3],Neg_Pred_Value=cm$byClass[4],Total_Err=(cm$table[2]+cm$table[3]),T1_Err=cm$table[2],T2_Err=cm$table[3]))}
write.csv(dt_df, file = 'D:/Imarticus_DSP/R Project/Code file/cm_gini_dt2_mani.csv')
prp(dtree_fit_gini2$finalModel, box.palette = "Reds", tweak = 1.2)
################## Code for checking the best Threshold (ntree) for Random Forest
library(randomForest)
ntree_val = c(seq(from = 5, to = 100, by=5),seq(from = 100, to = 3000, by=100))
rf_df <- data.frame(matrix(ncol = 10, nrow = 0))
colnames(rf_df) <- c("Num_Trees","Accuracy","Kappa","Sensitivity","Specificity","Pos_Pred_Value","Neg_Pred_Value","Total_Err","T1_Err","T2_Err")
rf_df
for (num_tree in ntree_val){ print(paste("================== Tree Value =",num_tree,"====================")); set.seed(500); rf_model1 <- randomForest(trainData$Attrition ~ MonthlyIncome + Age + OverTime + DistanceFromHome + DailyRate + Work_Happiness_Index + TotalWorkingYears + MonthlyRate + HourlyRate + JobRole + YearsAtCompany + StockOptionLevel + EnvironmentSatisfaction + NumCompaniesWorked, data=trainData, importance=TRUE, ntree=num_tree); rf_pred <- predict(rf_model1, testData); rfcm <- confusionMatrix(data = as.factor(rf_pred), reference = testData$Attrition, positive="Yes"); rf_df <- rbind(rf_df, data.frame(Num_Trees=num_tree, Accuracy=rfcm$overall[1], Kappa=rfcm$overall[2], Sensitivity=rfcm$byClass[1], Specificity=rfcm$byClass[2], Pos_Pred_Value=rfcm$byClass[3], Neg_Pred_Value=rfcm$byClass[4], Total_Err=(rfcm$table[2]+rfcm$table[3]), T1_Err=rfcm$table[2], T2_Err=rfcm$table[3])) }
write.csv(rf_df, file = 'D:/Imarticus_DSP/R Project/Code file/cm_rf4_mani.csv')
################## Code for checking the best Threshold (cost/gama) for SVM
library("e1071")
set.seed(500)
svm_mod_acc <- data.frame(matrix(ncol = 11, nrow = 0))
cols <- c("Cost","Gamma","Accuracy","Kappa","Sensitivity","Specificity","Pos_Pred_Value","Neg_Pred_Value","Total_Err","T1_Err","T2_Err")
colnames(svm_mod_acc) <- cols
svm_cost <- c(seq(1,9,1),seq(10,100,10))
svm_gm <- c(seq(0.000,0.1,0.005),seq(0.1,1.0,0.1),seq(1,9,1),seq(10,100,10))
#higher the cost better the classification
#lower gamma means tighter boundary
for (cst in svm_cost){
for (gm in svm_gm){
print(paste("========== Cost =",cst," Gamma=",gm," ============="))
svm1 <- svm(Attrition ~ YearsAtCompany + OverTime + NumCompaniesWorked + DistanceFromHome + JobInvolvement + WorkLifeBalance + YearsSinceLastPromotion + MonthlyIncome + JobSatisfaction + EnvironmentSatisfaction + TotalWorkingYears + RelationshipSatisfaction + Work_Happiness_Index + BusinessTravel + TrainingTimesLastYear + JobLevel + Department + EducationField + Age + Gender + StockOptionLevel, data=trainData, kernel="radial", cost=cst, gamma=gm)
pred <- predict(svm1,testData)
svm_cm <- confusionMatrix(data = as.factor(pred), reference = testData$Attrition, positive="Yes")
svm_mod_acc <- rbind(svm_mod_acc, data.frame(Cost=cst,Gamma=gm,Accuracy=svm_cm$overall[1],Kappa=svm_cm$overall[2],Sensitivity=svm_cm$byClass[1],Specificity=svm_cm$byClass[2],Pos_Pred_Value=svm_cm$byClass[3],Neg_Pred_Value=svm_cm$byClass[4],Total_Err=(svm_cm$table[2]+svm_cm$table[3]),T1_Err=svm_cm$table[2],T2_Err=svm_cm$table[3]))
}
}
write.csv(svm_mod_acc, file = 'D:/Imarticus_DSP/R Project/Code file/cm_svm1_mani.csv')
|
utils::globalVariables(
c(
"susceptible.1",
"percent_pathogenic",
"N_virulent_isolates",
"gene",
"percent_pathogenic",
"N_virulent_isolates",
"distribution",
"N_samp",
"i.N",
"complexity",
"frequency"
)
)
|
/R/zzz.R
|
permissive
|
openplantpathology/hagis
|
R
| false
| false
| 253
|
r
|
utils::globalVariables(
c(
"susceptible.1",
"percent_pathogenic",
"N_virulent_isolates",
"gene",
"percent_pathogenic",
"N_virulent_isolates",
"distribution",
"N_samp",
"i.N",
"complexity",
"frequency"
)
)
|
library(plotly)
library(ggplot2)
library(dplyr)
url <- "https://raw.githubusercontent.com/charleyferrari/CUNY_DATA608/master/lecture3/data/cleaned-cdc-mortality-1999-2010-2.csv"
mortal <- read.csv(url)
function(input, output, session) {
# get ggplot default colors for 2 color plot
colors <- hcl(h = seq(15, 375, length = 3), l = 65, c = 100)[1:2]
selectedData <- reactive({
sliceMort <- mortal %>%
filter(ICD.Chapter == input$cause, Year == input$yr)
if (nrow(sliceMort) > 0) {
sliceMort <- sliceMort %>%
arrange(desc(Crude.Rate)) %>%
mutate(State = factor(State, State),
Color = factor(c(colors[1], rep(colors[2], length(State) - 1)),
levels = colors))
}
})
output$plot1 <- renderPlotly({
if (length(selectedData()$State) > 0) {
xmax <- 1.1 * max(selectedData()$Crude.Rate)
xmax <- ifelse(xmax > 0, xmax, 1)
p <- plot_ly(selectedData(), x = ~Crude.Rate, y = ~State,
type = "bar", orientation = "h", marker = list(color = ~Color)) %>%
layout(xaxis = list(title = "Crude Rate (Deaths per 100000)",
range = c(0, xmax)),
yaxis = list(autorange = "reversed"))
p$elementId <- NULL
p
} else {
p <- ggplot() +
coord_cartesian(xlim = c(0, 100), ylim = c(0, 100)) +
annotate("text", label = "No Data Available", x = 50, y = 50) +
theme(line = element_blank(),
text = element_blank(),
title = element_blank(),
panel.background = element_blank())
p <- ggplotly(p)
p$elementId <- NULL
p
}
})
}
|
/HW3/Question 1/server.R
|
no_license
|
juddanderman/DATA-608-Knowledge-and-Visual-Analytics-
|
R
| false
| false
| 1,701
|
r
|
library(plotly)
library(ggplot2)
library(dplyr)
url <- "https://raw.githubusercontent.com/charleyferrari/CUNY_DATA608/master/lecture3/data/cleaned-cdc-mortality-1999-2010-2.csv"
mortal <- read.csv(url)
function(input, output, session) {
# get ggplot default colors for 2 color plot
colors <- hcl(h = seq(15, 375, length = 3), l = 65, c = 100)[1:2]
selectedData <- reactive({
sliceMort <- mortal %>%
filter(ICD.Chapter == input$cause, Year == input$yr)
if (nrow(sliceMort) > 0) {
sliceMort <- sliceMort %>%
arrange(desc(Crude.Rate)) %>%
mutate(State = factor(State, State),
Color = factor(c(colors[1], rep(colors[2], length(State) - 1)),
levels = colors))
}
})
output$plot1 <- renderPlotly({
if (length(selectedData()$State) > 0) {
xmax <- 1.1 * max(selectedData()$Crude.Rate)
xmax <- ifelse(xmax > 0, xmax, 1)
p <- plot_ly(selectedData(), x = ~Crude.Rate, y = ~State,
type = "bar", orientation = "h", marker = list(color = ~Color)) %>%
layout(xaxis = list(title = "Crude Rate (Deaths per 100000)",
range = c(0, xmax)),
yaxis = list(autorange = "reversed"))
p$elementId <- NULL
p
} else {
p <- ggplot() +
coord_cartesian(xlim = c(0, 100), ylim = c(0, 100)) +
annotate("text", label = "No Data Available", x = 50, y = 50) +
theme(line = element_blank(),
text = element_blank(),
title = element_blank(),
panel.background = element_blank())
p <- ggplotly(p)
p$elementId <- NULL
p
}
})
}
|
extract_fips <- function(geoid, sumlevel){
## Extract FIPS code from geoid provided a sumlevel
## TODO Add all relevant sumlevels and substrings
fips <- case_when(sumlevel == 80 ~ paste0(substr(geoid, 8, 12), substr(geoid, 23, 29)),
sumlevel == 140 ~ substr(geoid, 8,20),
sumlevel == 160 ~ substr(geoid, 8,14),
TRUE ~ geoid)
return(fips)
}
drop_cols <- function(chastable) {
## Drops all unnecessary columns except GEOID and name
## TODO Add other sumlevel types
chastable$sumlevel <- as.numeric(chastable$sumlevel)
sumlevel <- chastable$sumlevel[1]
if(sumlevel == 140) {
ret <- chastable %>% select(GEOID, year, everything(), -c(source, sumlevel, geoid, st, cnty, tract))
return(ret)
} else if(sumlevel == 160) {
ret <- chastable %>% select(GEOID, year, everything(), -c(source, sumlevel, geoid, st, place))
return(ret)
} else if(sumlevel == 80) {
ret <- chastable %>% select(GEOID, year, everything(), -c(source, sumlevel, geoid))
return(ret)
} else{
break()
}
}
clean_chas <- function(csvpath, year, filter_statement) {
year <- year
raw_chastable <- read_csv(csvpath)
ret <- raw_chastable %>%
mutate(year = year,
GEOID = extract_fips(geoid, sumlevel)) %>%
drop_cols(.) %>%
pivot_longer(-c(GEOID, year, name),
names_to = c("table", "type", "varnum"),
names_pattern = "T(\\d+)_(est|moe)(\\d+)",
values_to = "value") %>%
pivot_wider(names_from = "type", values_from = "value") %>%
mutate(table = as.numeric(table),
varnum = as.numeric(varnum)) %>%
filter(eval(rlang::parse_expr(filter_statement)))
return(ret)
}
data <-
clean_chas_zipped <- function(zip_path, table_path, year, filter_statement) {
year <- year
raw_chastable <- read.table(unz(zip_path, table_path), header=T, quote="\"", sep=",", stringsAsFactors = F)
ret <- raw_chastable %>%
mutate(year = year,
GEOID = extract_fips(geoid, sumlevel)) %>%
drop_cols(.) %>%
pivot_longer(-c(GEOID, year, name),
names_to = c("table", "type", "varnum"),
names_pattern = "T(\\d+)_(est|moe)(\\d+)",
values_to = "value") %>%
pivot_wider(names_from = "type", values_from = "value") %>%
mutate(table = as.numeric(table),
varnum = as.numeric(varnum)) %>%
filter(eval(rlang::parse_expr(filter_statement)))
return(ret)
}
vintage_clean_chas <- function(csvpath, year, filter_statement) {
year <- year
raw_chastable <- read_csv(csvpath)
sumlevel <- as.numeric(substr(raw_chastable$geoid[1], 1, 3))
ret <- raw_chastable %>%
mutate(year = year,
sumlevel = substr(geoid, 1, 3),
name = NA_character_,
GEOID = extract_fips(geoid, as.numeric(sumlevel))) %>%
drop_cols(.) %>%
pivot_longer(-c(GEOID, year, name),
names_to = c("table", "type", "varnum"),
names_pattern = "T(\\d+)_(est|moe)(\\d+)",
values_to = "value") %>%
pivot_wider(names_from = "type", values_from = "value") %>%
mutate(table = as.numeric(table),
varnum = as.numeric(varnum)) %>%
filter(eval(rlang::parse_expr(filter_statement)))
if(sumlevel == 80) {
ret <- ret %>%
group_by(GEOID, year, name, table, varnum) %>%
mutate(newest = sum(unlist(est), na.rm = T),
newmoe = moe_sum(unlist(moe), estimate = newest)) %>% ungroup() %>%
select(GEOID, year, name, table, varnum, est = newest, moe = newmoe) %>%
mutate(moe = case_when(est == 0 ~ 22, ## Hard-code MoE to 22 for zero estimates. Needs more thought. Rationale is that MoEs are not coded right at 080 sumlevel
TRUE ~ moe)) ## However this doesn't seem to work...
} else if(sumlevel == 160) {
ret <- ret %>% select(GEOID, year, name, table, varnum, est, moe)
} else {
break()
}
return(ret)
}
vintage_clean_chas_zipped <- function(zip_path, table_path, year, filter_statement) {
year <- year
raw_chastable <- read.table(unz(zip_path, table_path), header=T, quote="\"", sep=",")
sumlevel <- as.numeric(substr(raw_chastable$geoid[1], 1, 3))
ret <- raw_chastable %>%
mutate(year = year,
sumlevel = substr(geoid, 1, 3),
name = NA_character_,
GEOID = extract_fips(geoid, as.numeric(sumlevel))) %>%
drop_cols(.) %>%
pivot_longer(-c(GEOID, year, name),
names_to = c("table", "type", "varnum"),
names_pattern = "T(\\d+)_(est|moe)(\\d+)",
values_to = "value") %>%
pivot_wider(names_from = "type", values_from = "value") %>%
mutate(table = as.numeric(table),
varnum = as.numeric(varnum)) %>%
filter(eval(rlang::parse_expr(filter_statement)))
if(sumlevel == 80) {
ret <- ret %>%
group_by(GEOID, year, name, table, varnum) %>%
mutate(newest = sum(unlist(est), na.rm = T),
newmoe = moe_sum(unlist(moe), estimate = newest)) %>% ungroup() %>%
select(GEOID, year, name, table, varnum, est = newest, moe = newmoe) %>%
mutate(moe = case_when(est == 0 ~ 22, ## Hard-code MoE to 22 for zero estimates. Needs more thought. Rationale is that MoEs are not coded right at 080 sumlevel
TRUE ~ moe)) ## However this doesn't seem to work...
} else if(sumlevel == 160) {
ret <- ret %>% select(GEOID, year, name, table, varnum, est, moe)
} else {
break()
}
return(ret)
}
|
/scripts/helpful_snippets/chas_helpers.R
|
permissive
|
BPSTechServices/income-analysis
|
R
| false
| false
| 5,583
|
r
|
extract_fips <- function(geoid, sumlevel){
## Extract FIPS code from geoid provided a sumlevel
## TODO Add all relevant sumlevels and substrings
fips <- case_when(sumlevel == 80 ~ paste0(substr(geoid, 8, 12), substr(geoid, 23, 29)),
sumlevel == 140 ~ substr(geoid, 8,20),
sumlevel == 160 ~ substr(geoid, 8,14),
TRUE ~ geoid)
return(fips)
}
drop_cols <- function(chastable) {
## Drops all unnecessary columns except GEOID and name
## TODO Add other sumlevel types
chastable$sumlevel <- as.numeric(chastable$sumlevel)
sumlevel <- chastable$sumlevel[1]
if(sumlevel == 140) {
ret <- chastable %>% select(GEOID, year, everything(), -c(source, sumlevel, geoid, st, cnty, tract))
return(ret)
} else if(sumlevel == 160) {
ret <- chastable %>% select(GEOID, year, everything(), -c(source, sumlevel, geoid, st, place))
return(ret)
} else if(sumlevel == 80) {
ret <- chastable %>% select(GEOID, year, everything(), -c(source, sumlevel, geoid))
return(ret)
} else{
break()
}
}
clean_chas <- function(csvpath, year, filter_statement) {
year <- year
raw_chastable <- read_csv(csvpath)
ret <- raw_chastable %>%
mutate(year = year,
GEOID = extract_fips(geoid, sumlevel)) %>%
drop_cols(.) %>%
pivot_longer(-c(GEOID, year, name),
names_to = c("table", "type", "varnum"),
names_pattern = "T(\\d+)_(est|moe)(\\d+)",
values_to = "value") %>%
pivot_wider(names_from = "type", values_from = "value") %>%
mutate(table = as.numeric(table),
varnum = as.numeric(varnum)) %>%
filter(eval(rlang::parse_expr(filter_statement)))
return(ret)
}
data <-
clean_chas_zipped <- function(zip_path, table_path, year, filter_statement) {
year <- year
raw_chastable <- read.table(unz(zip_path, table_path), header=T, quote="\"", sep=",", stringsAsFactors = F)
ret <- raw_chastable %>%
mutate(year = year,
GEOID = extract_fips(geoid, sumlevel)) %>%
drop_cols(.) %>%
pivot_longer(-c(GEOID, year, name),
names_to = c("table", "type", "varnum"),
names_pattern = "T(\\d+)_(est|moe)(\\d+)",
values_to = "value") %>%
pivot_wider(names_from = "type", values_from = "value") %>%
mutate(table = as.numeric(table),
varnum = as.numeric(varnum)) %>%
filter(eval(rlang::parse_expr(filter_statement)))
return(ret)
}
vintage_clean_chas <- function(csvpath, year, filter_statement) {
year <- year
raw_chastable <- read_csv(csvpath)
sumlevel <- as.numeric(substr(raw_chastable$geoid[1], 1, 3))
ret <- raw_chastable %>%
mutate(year = year,
sumlevel = substr(geoid, 1, 3),
name = NA_character_,
GEOID = extract_fips(geoid, as.numeric(sumlevel))) %>%
drop_cols(.) %>%
pivot_longer(-c(GEOID, year, name),
names_to = c("table", "type", "varnum"),
names_pattern = "T(\\d+)_(est|moe)(\\d+)",
values_to = "value") %>%
pivot_wider(names_from = "type", values_from = "value") %>%
mutate(table = as.numeric(table),
varnum = as.numeric(varnum)) %>%
filter(eval(rlang::parse_expr(filter_statement)))
if(sumlevel == 80) {
ret <- ret %>%
group_by(GEOID, year, name, table, varnum) %>%
mutate(newest = sum(unlist(est), na.rm = T),
newmoe = moe_sum(unlist(moe), estimate = newest)) %>% ungroup() %>%
select(GEOID, year, name, table, varnum, est = newest, moe = newmoe) %>%
mutate(moe = case_when(est == 0 ~ 22, ## Hard-code MoE to 22 for zero estimates. Needs more thought. Rationale is that MoEs are not coded right at 080 sumlevel
TRUE ~ moe)) ## However this doesn't seem to work...
} else if(sumlevel == 160) {
ret <- ret %>% select(GEOID, year, name, table, varnum, est, moe)
} else {
break()
}
return(ret)
}
vintage_clean_chas_zipped <- function(zip_path, table_path, year, filter_statement) {
year <- year
raw_chastable <- read.table(unz(zip_path, table_path), header=T, quote="\"", sep=",")
sumlevel <- as.numeric(substr(raw_chastable$geoid[1], 1, 3))
ret <- raw_chastable %>%
mutate(year = year,
sumlevel = substr(geoid, 1, 3),
name = NA_character_,
GEOID = extract_fips(geoid, as.numeric(sumlevel))) %>%
drop_cols(.) %>%
pivot_longer(-c(GEOID, year, name),
names_to = c("table", "type", "varnum"),
names_pattern = "T(\\d+)_(est|moe)(\\d+)",
values_to = "value") %>%
pivot_wider(names_from = "type", values_from = "value") %>%
mutate(table = as.numeric(table),
varnum = as.numeric(varnum)) %>%
filter(eval(rlang::parse_expr(filter_statement)))
if(sumlevel == 80) {
ret <- ret %>%
group_by(GEOID, year, name, table, varnum) %>%
mutate(newest = sum(unlist(est), na.rm = T),
newmoe = moe_sum(unlist(moe), estimate = newest)) %>% ungroup() %>%
select(GEOID, year, name, table, varnum, est = newest, moe = newmoe) %>%
mutate(moe = case_when(est == 0 ~ 22, ## Hard-code MoE to 22 for zero estimates. Needs more thought. Rationale is that MoEs are not coded right at 080 sumlevel
TRUE ~ moe)) ## However this doesn't seem to work...
} else if(sumlevel == 160) {
ret <- ret %>% select(GEOID, year, name, table, varnum, est, moe)
} else {
break()
}
return(ret)
}
|
## This was a project that looked at the pregnancy rates between smokers and non-smokers using Maximum Likelihood Estimation of a
# hypergeometric distribution (negative binomial). I will also upload the pregnancies file for others to use.
setwd("<your file path>")
data <- read.csv("pregnancies.csv", sep = ",", header = TRUE)
library(stats4)
rep <- rep(1:13, c(29,16,17,4,3,9,4,5,1,1,1,3,7))
idk <- mean(rep)*length(rep)
rep2 <- rep(1:13, c(198,107,55,38,18,22,7,9,5,3,6,6,12))
idk2 <- mean(rep2)*length(rep2)
smoke_like <- function(p) {
like <- (1-p)**(idk-100)*p**100
return(like)
}
smoke_log_like <- function(p) {
log_like <- (idk - 100) * log(1-p) + 100 * log(p)
return(log_like)
}
minus_smoke <- function(p) {
minus_s <- -smoke_log_like(p)
return(minus_s)
}
nonsmoke_like <- function(p) {
like <- (1-p)**(idk2-486)*p**486
return(like)
}
nonsmoke_log_like <- function(p) {
log_like <- (idk2 - 486) * log(1-p) + 486 * log(p)
return(log_like)
}
minus_nonsmoke <- function(p) {
minus_s <- -nonsmoke_log_like(p)
return(minus_s)
}
(res <- summary(mle(minus_smoke, start = list(p=0.2))))
(res2 <- summary(mle(minus_nonsmoke, start = list(p=0.3))))
p <- seq(0.001, 0.99, length = 10000)
loglik <- smoke_log_like(p)
logrl <- loglik - max(loglik)
range(p[logrl > -1.92])
p2 <- seq(0.001, 0.99, length = 10000)
loglik2 <- nonsmoke_log_like(p)
logrl2 <- loglik2 - max(loglik2)
range(p[logrl2 > -1.92])
|
/optim.R
|
no_license
|
corey-puk/R
|
R
| false
| false
| 1,439
|
r
|
## This was a project that looked at the pregnancy rates between smokers and non-smokers using Maximum Likelihood Estimation of a
# hypergeometric distribution (negative binomial). I will also upload the pregnancies file for others to use.
setwd("<your file path>")
data <- read.csv("pregnancies.csv", sep = ",", header = TRUE)
library(stats4)
rep <- rep(1:13, c(29,16,17,4,3,9,4,5,1,1,1,3,7))
idk <- mean(rep)*length(rep)
rep2 <- rep(1:13, c(198,107,55,38,18,22,7,9,5,3,6,6,12))
idk2 <- mean(rep2)*length(rep2)
smoke_like <- function(p) {
like <- (1-p)**(idk-100)*p**100
return(like)
}
smoke_log_like <- function(p) {
log_like <- (idk - 100) * log(1-p) + 100 * log(p)
return(log_like)
}
minus_smoke <- function(p) {
minus_s <- -smoke_log_like(p)
return(minus_s)
}
nonsmoke_like <- function(p) {
like <- (1-p)**(idk2-486)*p**486
return(like)
}
nonsmoke_log_like <- function(p) {
log_like <- (idk2 - 486) * log(1-p) + 486 * log(p)
return(log_like)
}
minus_nonsmoke <- function(p) {
minus_s <- -nonsmoke_log_like(p)
return(minus_s)
}
(res <- summary(mle(minus_smoke, start = list(p=0.2))))
(res2 <- summary(mle(minus_nonsmoke, start = list(p=0.3))))
p <- seq(0.001, 0.99, length = 10000)
loglik <- smoke_log_like(p)
logrl <- loglik - max(loglik)
range(p[logrl > -1.92])
p2 <- seq(0.001, 0.99, length = 10000)
loglik2 <- nonsmoke_log_like(p)
logrl2 <- loglik2 - max(loglik2)
range(p[logrl2 > -1.92])
|
fig10x017<-function(){
#
require(MASS)
#
# Requires textbook R data set Galton.RData
# which contains dataframe Galton
#
graphics.off()
windows(width=4.5,height=5.4,pointsize=12)
par(fin=c(4.45,5.35),pin=c(4.45,5.35),
mai=c(0.85,0.85,0.25,0.25),xaxs="i",yaxs="i",las=1)
plot.new()
#
bwmf<-c(bandwidth.nrd(Galton$Mother+60),
bandwidth.nrd(Galton$Father+60))
#
kdemf<-kde2d(Galton$Mother+60,Galton$Father+60,
h=bwmf,n=500,lims=c(57,71,61,79))
#
image(kdemf,col=terrain.colors(6),
breaks=(0:6)*0.005,axes=FALSE,
xlab="Wife's Height (inch)",
ylab="Husband's Height (inch)")
#
contour(kdemf,xlab="Wife's Height (inch)",
ylab="Husband's Height (inch)",axes=FALSE,
nlevels=9,labcex=0.6,add=TRUE,drawlabels=FALSE)
#
axis(1,at=57.+0:13)
axis(2,at=(61.+0:17),
labels=c("61"," ","63"," ","65"," ","67"," ",
"69"," ","71"," ","73"," ","75"," ","77"," "))
box("plot")
#
dev.copy2eps(file="fig10x017.eps",colormodel="cmyk")
dev.copy2pdf(file="fig10x017.pdf",colormodel="cmyk")
}
|
/graphicsforstatistics_2e_figures_scripts_r/Chapter 10/fig10x017.R
|
no_license
|
saqibarfeen/coding_time
|
R
| false
| false
| 1,005
|
r
|
fig10x017<-function(){
#
require(MASS)
#
# Requires textbook R data set Galton.RData
# which contains dataframe Galton
#
graphics.off()
windows(width=4.5,height=5.4,pointsize=12)
par(fin=c(4.45,5.35),pin=c(4.45,5.35),
mai=c(0.85,0.85,0.25,0.25),xaxs="i",yaxs="i",las=1)
plot.new()
#
bwmf<-c(bandwidth.nrd(Galton$Mother+60),
bandwidth.nrd(Galton$Father+60))
#
kdemf<-kde2d(Galton$Mother+60,Galton$Father+60,
h=bwmf,n=500,lims=c(57,71,61,79))
#
image(kdemf,col=terrain.colors(6),
breaks=(0:6)*0.005,axes=FALSE,
xlab="Wife's Height (inch)",
ylab="Husband's Height (inch)")
#
contour(kdemf,xlab="Wife's Height (inch)",
ylab="Husband's Height (inch)",axes=FALSE,
nlevels=9,labcex=0.6,add=TRUE,drawlabels=FALSE)
#
axis(1,at=57.+0:13)
axis(2,at=(61.+0:17),
labels=c("61"," ","63"," ","65"," ","67"," ",
"69"," ","71"," ","73"," ","75"," ","77"," "))
box("plot")
#
dev.copy2eps(file="fig10x017.eps",colormodel="cmyk")
dev.copy2pdf(file="fig10x017.pdf",colormodel="cmyk")
}
|
#' Total variability of abilities given an age interval
#'
#' This function estimates the total variability of abilities across age bins of a specified interval width between 0-36 months.
#' @param bin_width - (integer) Width of bin in months. Must be a multiple of 36.
#' @param distribution_table (data.frame) The tablue used to interpolate mean and SD estimates of abilities. The data frame requires the following named columns: (1) months, (2) mu, & (3) sigma.
#' @param ... Additional arguments passed to simulate_abilities()
#' @return A dataframe with estimated values for the total variability of abilities in each age bin .
#' @export
#' @examples
#' get_vartot_abilities(mo_min = 3, mo_max = 5.9999, distribution_table = reference)
get_vartot_abilities<-function(bin_width=3, distribution_table, ...){
require(plyr)
require(tidyverse)
if(36%%bin_width!=0){stop("bin_width must be a multiple of 36.")}
month_vec = seq(0,36,by=bin_width)
nn = length(month_vec)
var_abilities_df = data.frame(month_min = month_vec[-nn]) %>%
transform(month_max = month_min+bin_width-1E-4) %>%
transform(bin = mapvalues(month_min, from = month_vec[-nn], to = seq(1,nn-1,by=1)), var_est= NA)
for (i in 1:nrow(var_abilities_df)){
theta_vec_i = do.call(what = "simulate_abilities",
args = list(mo_min = var_abilities_df$month_min[i], mo_max = var_abilities_df$month_max[i]-1E-4,
distribution_table = distribution_table, ... )
)
var_abilities_df$var_est[i] = var(theta_vec_i)
}
return(var_abilities_df)
}
|
/gsedadds/R/get_vartot_abilities.R
|
permissive
|
marcus-waldman/gsedadds
|
R
| false
| false
| 1,666
|
r
|
#' Total variability of abilities given an age interval
#'
#' This function estimates the total variability of abilities across age bins of a specified interval width between 0-36 months.
#' @param bin_width - (integer) Width of bin in months. Must be a multiple of 36.
#' @param distribution_table (data.frame) The tablue used to interpolate mean and SD estimates of abilities. The data frame requires the following named columns: (1) months, (2) mu, & (3) sigma.
#' @param ... Additional arguments passed to simulate_abilities()
#' @return A dataframe with estimated values for the total variability of abilities in each age bin .
#' @export
#' @examples
#' get_vartot_abilities(mo_min = 3, mo_max = 5.9999, distribution_table = reference)
get_vartot_abilities<-function(bin_width=3, distribution_table, ...){
require(plyr)
require(tidyverse)
if(36%%bin_width!=0){stop("bin_width must be a multiple of 36.")}
month_vec = seq(0,36,by=bin_width)
nn = length(month_vec)
var_abilities_df = data.frame(month_min = month_vec[-nn]) %>%
transform(month_max = month_min+bin_width-1E-4) %>%
transform(bin = mapvalues(month_min, from = month_vec[-nn], to = seq(1,nn-1,by=1)), var_est= NA)
for (i in 1:nrow(var_abilities_df)){
theta_vec_i = do.call(what = "simulate_abilities",
args = list(mo_min = var_abilities_df$month_min[i], mo_max = var_abilities_df$month_max[i]-1E-4,
distribution_table = distribution_table, ... )
)
var_abilities_df$var_est[i] = var(theta_vec_i)
}
return(var_abilities_df)
}
|
concerto.var.get = c.get = function(name, global=F, all=F, posOffset = 0){
if(global || concerto$flowIndex == 0) {
if(all) { return(concerto$globals) }
else return(concerto$globals[[name]])
} else {
flowIndex = concerto$flowIndex
if(all) { return(concerto$flow[[flowIndex + posOffset]]$globals) }
else return(concerto$flow[[flowIndex + posOffset]]$globals[[name]])
}
}
|
/src/Concerto/TestBundle/Resources/R/concerto5/R/concerto.var.get.R
|
permissive
|
clabra/concerto-platform
|
R
| false
| false
| 420
|
r
|
concerto.var.get = c.get = function(name, global=F, all=F, posOffset = 0){
if(global || concerto$flowIndex == 0) {
if(all) { return(concerto$globals) }
else return(concerto$globals[[name]])
} else {
flowIndex = concerto$flowIndex
if(all) { return(concerto$flow[[flowIndex + posOffset]]$globals) }
else return(concerto$flow[[flowIndex + posOffset]]$globals[[name]])
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reassortment_model.R
\name{reassort_popn}
\alias{reassort_popn}
\title{old implementation of reassortment}
\usage{
reassort_popn(virus_popn)
}
\arguments{
\item{virus_popn}{numeric vector of length 4, with the number of virions
in each of the 4 strains}
}
\value{
numeric vector of length 4, with the number of virions
in each of the 4 strains, after reassortment
}
\description{
old implementation of reassortment
}
|
/man/reassort_popn.Rd
|
no_license
|
ada-w-yan/reassortment
|
R
| false
| true
| 495
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reassortment_model.R
\name{reassort_popn}
\alias{reassort_popn}
\title{old implementation of reassortment}
\usage{
reassort_popn(virus_popn)
}
\arguments{
\item{virus_popn}{numeric vector of length 4, with the number of virions
in each of the 4 strains}
}
\value{
numeric vector of length 4, with the number of virions
in each of the 4 strains, after reassortment
}
\description{
old implementation of reassortment
}
|
# EXERCISE
# Rectangles
# To understand all the arguments for the themes, you'll modify an existing plot over the next series of exercises.
# Here you'll focus on the rectangles of the plotting object z that has already been created for you. If you type z in the console, you can check it out. The goal is to turn z into the plot in the viewer. Do this by following the instructions step by step.
# INSTRUCTIONS
# 100 XP
# INSTRUCTIONS
# 100 XP
# Plot 1: In the theme() function added to z, set the plot.background argument to element_rect(fill = myPink). myPink is already available in the workspace for you.
# Plot 2: Expand your code for Plot 1 by adding a border to the plot.background. Do this by adding 2 arguments to the element_rect() function in theme(): color and size. Set them to "black" and 3, respectively.
# Plot 3: we don't want the plot panels and legend to appear as they are in Plot 2. A short cut is to remove all rectangles, as defined in the theme object no_panels, and then draw the one we way in the way we want. Copy your theme() layer from Plot 2 and add it to no_panels.
# Starting point
z
# Plot 1: Change the plot background fill to myPink
z +
theme(plot.background = element_rect(fill = myPink))
# Plot 2: Adjust the border to be a black line of size 3
z +
# expanded from plot 1
theme(plot.background = element_rect(fill = myPink, color = "black", size = 3))
# Theme to remove all rectangles
no_panels <- theme(rect = element_blank())
# Plot 3: Combine custom themes
z +
no_panels +
theme(plot.background = element_rect(fill = myPink, color = "black", size = 3))
|
/Course 12 - Data Visualization with ggplot2(part 2)/chapter_03_exercise_01.r
|
no_license
|
thiagolrpinho/DataScience4All
|
R
| false
| false
| 1,615
|
r
|
# EXERCISE
# Rectangles
# To understand all the arguments for the themes, you'll modify an existing plot over the next series of exercises.
# Here you'll focus on the rectangles of the plotting object z that has already been created for you. If you type z in the console, you can check it out. The goal is to turn z into the plot in the viewer. Do this by following the instructions step by step.
# INSTRUCTIONS
# 100 XP
# INSTRUCTIONS
# 100 XP
# Plot 1: In the theme() function added to z, set the plot.background argument to element_rect(fill = myPink). myPink is already available in the workspace for you.
# Plot 2: Expand your code for Plot 1 by adding a border to the plot.background. Do this by adding 2 arguments to the element_rect() function in theme(): color and size. Set them to "black" and 3, respectively.
# Plot 3: we don't want the plot panels and legend to appear as they are in Plot 2. A short cut is to remove all rectangles, as defined in the theme object no_panels, and then draw the one we way in the way we want. Copy your theme() layer from Plot 2 and add it to no_panels.
# Starting point
z
# Plot 1: Change the plot background fill to myPink
z +
theme(plot.background = element_rect(fill = myPink))
# Plot 2: Adjust the border to be a black line of size 3
z +
# expanded from plot 1
theme(plot.background = element_rect(fill = myPink, color = "black", size = 3))
# Theme to remove all rectangles
no_panels <- theme(rect = element_blank())
# Plot 3: Combine custom themes
z +
no_panels +
theme(plot.background = element_rect(fill = myPink, color = "black", size = 3))
|
# Jake Yeung
# Date of Creation: 2019-03-26
# File: ~/projects/scchic/scripts/scripts_analysis/make_tables/create_bam_list_for_peak_analysis_build95.R
# Peak analysis for build 95
rm(list=ls())
library(ggplot2)
library(ggrepel)
library(dplyr)
library(hash)
library(umap)
library(igraph)
source("scripts/Rfunctions/MaraDownstream.R")
source("scripts/Rfunctions/AuxLDA.R")
source("scripts/Rfunctions/Aux.R")
source("scripts/Rfunctions/PlotFunctions.R")
source("scripts/Rfunctions/GetMetaCellHash.R")
# outdir <- "~/data/scchic/tables/bamlist_for_merging_build95"
outdir <- "~/data/scchic/tables/bamlist_for_peak_analysis_build95"
dir.create(outdir)
# load("~/data/scchic/robjs/TFactivity_genelevels_objects.RData", v=T)
load("~/data/scchic/robjs/gene_levels_build95.Rdata", v=T)
jmarks.all <- list("H3K4me1" = "H3K4me1", "H3K4me3" = "H3K4me3", "H3K27me3" = "H3K27me3", "H3K9me3" = "H3K9me3")
# need new experihash
# change name to cell name
# write table summary for all
cellhash <- hash(rownames(barcodes), unlist(barcodes))
cellhash.bc <- hash(unlist(barcodes), paste("cell", rownames(barcodes), sep = ""))
dat.merge <- bind_rows(dat.umap.long.new.lst) %>% dplyr::select(cell, louvain, mark)
dat.merge$cellnew <- sapply(dat.merge$cell, MakeNewCellName.rev, experihash, cellhash)
dat.merge <- dat.merge %>% arrange(mark, louvain, cell)
# Write bam files for each mark -------------------------------------------
for (jmark in jmarks.all){
dat.tmp <- subset(dat.merge, mark == jmark, select = c(cellnew))
fwrite(dat.tmp, file = file.path(outdir, paste0("JY_", jmark, "_bamnames.out")), sep = "\t", col.names = FALSE)
}
|
/scripts/scripts_analysis/make_tables/create_bam_list_for_peak_analysis_build95.R
|
no_license
|
jakeyeung/scChIC-analysis
|
R
| false
| false
| 1,638
|
r
|
# Jake Yeung
# Date of Creation: 2019-03-26
# File: ~/projects/scchic/scripts/scripts_analysis/make_tables/create_bam_list_for_peak_analysis_build95.R
# Peak analysis for build 95
rm(list=ls())
library(ggplot2)
library(ggrepel)
library(dplyr)
library(hash)
library(umap)
library(igraph)
source("scripts/Rfunctions/MaraDownstream.R")
source("scripts/Rfunctions/AuxLDA.R")
source("scripts/Rfunctions/Aux.R")
source("scripts/Rfunctions/PlotFunctions.R")
source("scripts/Rfunctions/GetMetaCellHash.R")
# outdir <- "~/data/scchic/tables/bamlist_for_merging_build95"
outdir <- "~/data/scchic/tables/bamlist_for_peak_analysis_build95"
dir.create(outdir)
# load("~/data/scchic/robjs/TFactivity_genelevels_objects.RData", v=T)
load("~/data/scchic/robjs/gene_levels_build95.Rdata", v=T)
jmarks.all <- list("H3K4me1" = "H3K4me1", "H3K4me3" = "H3K4me3", "H3K27me3" = "H3K27me3", "H3K9me3" = "H3K9me3")
# need new experihash
# change name to cell name
# write table summary for all
cellhash <- hash(rownames(barcodes), unlist(barcodes))
cellhash.bc <- hash(unlist(barcodes), paste("cell", rownames(barcodes), sep = ""))
dat.merge <- bind_rows(dat.umap.long.new.lst) %>% dplyr::select(cell, louvain, mark)
dat.merge$cellnew <- sapply(dat.merge$cell, MakeNewCellName.rev, experihash, cellhash)
dat.merge <- dat.merge %>% arrange(mark, louvain, cell)
# Write bam files for each mark -------------------------------------------
for (jmark in jmarks.all){
dat.tmp <- subset(dat.merge, mark == jmark, select = c(cellnew))
fwrite(dat.tmp, file = file.path(outdir, paste0("JY_", jmark, "_bamnames.out")), sep = "\t", col.names = FALSE)
}
|
context("mf trapezoidal")
test_that("mf trapezoidal degrees", {
mf <- new(mf_trapezoidal, 0, 1, 2, 3)
expect_equal(mf$degree(0), 0)
expect_equal(mf$degree(0.5), 0.5)
expect_equal(mf$degree(1), 1)
expect_equal(mf$degree(2), 1)
expect_equal(mf$degree(2.5), 0.5)
expect_equal(mf$degree(3), 0)
})
|
/fuzzedpackages/FisPro/tests/testthat/test_mf_trapezoidal.R
|
no_license
|
akhikolla/testpackages
|
R
| false
| false
| 321
|
r
|
context("mf trapezoidal")
test_that("mf trapezoidal degrees", {
mf <- new(mf_trapezoidal, 0, 1, 2, 3)
expect_equal(mf$degree(0), 0)
expect_equal(mf$degree(0.5), 0.5)
expect_equal(mf$degree(1), 1)
expect_equal(mf$degree(2), 1)
expect_equal(mf$degree(2.5), 0.5)
expect_equal(mf$degree(3), 0)
})
|
library(ape)
testtree <- read.tree("2702_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2702_0_unrooted.txt")
|
/codeml_files/newick_trees_processed/2702_0/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false
| false
| 135
|
r
|
library(ape)
testtree <- read.tree("2702_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2702_0_unrooted.txt")
|
\name{exp2d.Z}
\alias{exp2d.Z}
\title{ Random Z-values for 2-d Exponential Data }
\description{ Evaluate the functional (mean) response for the 2-d
exponential data (truth) at the \code{X} inputs, and randomly
sample noisy \code{Z}--values having normal error with standard
deviation provided. }
\usage{exp2d.Z(X, sd=0.001)}
\arguments{
\item{X}{Must be a \code{matrix} or a \code{data.frame} with two columns
describing input locations}
\item{sd}{Standard deviation of iid normal noise added to the
responses}
}
\value{
Output is a \code{data.frame} with columns:
\item{Z}{Numeric vector describing the responses (with noise) at the
\code{X} input locations}
\item{Ztrue}{Numeric vector describing the true responses (without
noise) at the \code{X} input locations}
}
\details{
The response is evaluated as
\deqn{Z(X)=x_1 * \exp(x_1^2-x_2^2).}{Z(X) = X1 * exp(-X1^2-X2^2),}
thus creating the outputs \code{Z} and \code{Ztrue}.
Zero-mean normal noise with \code{sd=0.001} is added to the
responses \code{Z} and \code{ZZ}
}
\author{
Robert B. Gramacy, \email{rbg@vt.edu}, and
Matt Taddy, \email{mataddy@amazon.com}
}
\references{
Gramacy, R. B. (2020) \emph{Surrogates: Gaussian Process Modeling,
Design and Optimization for the Applied Sciences}. Boca Raton,
Florida: Chapman Hall/CRC.
\url{https://bobby.gramacy.com/surrogates/}
Gramacy, R. B. (2007). \emph{\pkg{tgp}: An \R Package for Bayesian
Nonstationary, Semiparametric Nonlinear Regression and Design by
Treed Gaussian Process Models.}
Journal of Statistical Software, \bold{19}(9).
\url{https://www.jstatsoft.org/v19/i09}
\doi{10.18637/jss.v019.i09}
Robert B. Gramacy, Matthew Taddy (2010). \emph{Categorical Inputs,
Sensitivity Analysis, Optimization and Importance Tempering with \pkg{tgp}
Version 2, an \R Package for Treed Gaussian Process Models.}
Journal of Statistical Software, \bold{33}(6), 1--48.
\url{https://www.jstatsoft.org/v33/i06/}
\doi{10.18637/jss.v033.i06}
Gramacy, R. B., Lee, H. K. H. (2008).
\emph{Bayesian treed Gaussian process models with an application
to computer modeling}. Journal of the American Statistical Association,
103(483), pp. 1119-1130. Also available as ArXiv article 0710.4536
\url{https://arxiv.org/abs/0710.4536}
\url{https://bobby.gramacy.com/r_packages/tgp/}
}
\seealso{\code{\link{exp2d}}, \code{\link{exp2d.rand}}}
\examples{
N <- 20
x <- seq(-2,6,length=N)
X <- expand.grid(x, x)
Zdata <- exp2d.Z(X)
persp(x,x,matrix(Zdata$Ztrue, nrow=N), theta=-30, phi=20,
main="Z true", xlab="x1", ylab="x2", zlab="Ztrue")
}
\keyword{datagen}
|
/man/exp2d.Z.Rd
|
no_license
|
cran/tgp
|
R
| false
| false
| 2,642
|
rd
|
\name{exp2d.Z}
\alias{exp2d.Z}
\title{ Random Z-values for 2-d Exponential Data }
\description{ Evaluate the functional (mean) response for the 2-d
exponential data (truth) at the \code{X} inputs, and randomly
sample noisy \code{Z}--values having normal error with standard
deviation provided. }
\usage{exp2d.Z(X, sd=0.001)}
\arguments{
\item{X}{Must be a \code{matrix} or a \code{data.frame} with two columns
describing input locations}
\item{sd}{Standard deviation of iid normal noise added to the
responses}
}
\value{
Output is a \code{data.frame} with columns:
\item{Z}{Numeric vector describing the responses (with noise) at the
\code{X} input locations}
\item{Ztrue}{Numeric vector describing the true responses (without
noise) at the \code{X} input locations}
}
\details{
The response is evaluated as
\deqn{Z(X)=x_1 * \exp(x_1^2-x_2^2).}{Z(X) = X1 * exp(-X1^2-X2^2),}
thus creating the outputs \code{Z} and \code{Ztrue}.
Zero-mean normal noise with \code{sd=0.001} is added to the
responses \code{Z} and \code{ZZ}
}
\author{
Robert B. Gramacy, \email{rbg@vt.edu}, and
Matt Taddy, \email{mataddy@amazon.com}
}
\references{
Gramacy, R. B. (2020) \emph{Surrogates: Gaussian Process Modeling,
Design and Optimization for the Applied Sciences}. Boca Raton,
Florida: Chapman Hall/CRC.
\url{https://bobby.gramacy.com/surrogates/}
Gramacy, R. B. (2007). \emph{\pkg{tgp}: An \R Package for Bayesian
Nonstationary, Semiparametric Nonlinear Regression and Design by
Treed Gaussian Process Models.}
Journal of Statistical Software, \bold{19}(9).
\url{https://www.jstatsoft.org/v19/i09}
\doi{10.18637/jss.v019.i09}
Robert B. Gramacy, Matthew Taddy (2010). \emph{Categorical Inputs,
Sensitivity Analysis, Optimization and Importance Tempering with \pkg{tgp}
Version 2, an \R Package for Treed Gaussian Process Models.}
Journal of Statistical Software, \bold{33}(6), 1--48.
\url{https://www.jstatsoft.org/v33/i06/}
\doi{10.18637/jss.v033.i06}
Gramacy, R. B., Lee, H. K. H. (2008).
\emph{Bayesian treed Gaussian process models with an application
to computer modeling}. Journal of the American Statistical Association,
103(483), pp. 1119-1130. Also available as ArXiv article 0710.4536
\url{https://arxiv.org/abs/0710.4536}
\url{https://bobby.gramacy.com/r_packages/tgp/}
}
\seealso{\code{\link{exp2d}}, \code{\link{exp2d.rand}}}
\examples{
N <- 20
x <- seq(-2,6,length=N)
X <- expand.grid(x, x)
Zdata <- exp2d.Z(X)
persp(x,x,matrix(Zdata$Ztrue, nrow=N), theta=-30, phi=20,
main="Z true", xlab="x1", ylab="x2", zlab="Ztrue")
}
\keyword{datagen}
|
context("Clusters to membership vector")
test_that("un-named list of integer vectors correctly transformed to membership vector", {
clusters <- list(c(100L, 1L), c(2L))
clust_ids <- c("A", "B")
elem_ids <- c(1L, 2L, 100L)
expect_equal(clusters_to_membership(clusters),
c("1" = 1L, "100" = 1L, "2" = 2L))
expect_equal(clusters_to_membership(clusters, elem_ids = elem_ids),
c("1" = 1L, "2" = 2L, "100" = 1L))
expect_equal(clusters_to_membership(clusters, clust_ids = clust_ids),
c("1" = "A", "100" = "A", "2" = "B"))
expect_equal(clusters_to_membership(clusters, elem_ids = elem_ids, clust_ids = clust_ids),
c("1" = "A", "2" = "B", "100" = "A"))
})
test_that("un-named list of character vectors correctly transformed to membership vector", {
clusters <- list(c("ELEM3", "ELEM1"), c("ELEM2"))
clust_ids <- c("A", "B")
elem_ids <- c("ELEM3", "ELEM2", "ELEM1")
expect_equal(clusters_to_membership(clusters),
c("ELEM1" = 1L, "ELEM2" = 2L, "ELEM3" = 1L))
expect_equal(clusters_to_membership(clusters, elem_ids = elem_ids),
c("ELEM3" = 1L, "ELEM2" = 2L, "ELEM1" = 1L))
expect_equal(clusters_to_membership(clusters, clust_ids = clust_ids),
c("ELEM1" = "A", "ELEM2" = "B", "ELEM3" = "A"))
expect_equal(clusters_to_membership(clusters, elem_ids = elem_ids, clust_ids = clust_ids),
c("ELEM3" = "A", "ELEM2" = "B", "ELEM1" = "A"))
})
test_that("named list of integer vectors correctly transformed to membership vector", {
clusters <- list("A" = c(100L, 1L), "B" = c(2L))
clust_ids <- c("A", "B")
elem_ids <- c(1L, 2L, 100L)
expect_equal(clusters_to_membership(clusters),
c("1" = "A", "100" = "A", "2" = "B"))
expect_equal(clusters_to_membership(clusters, elem_ids = elem_ids),
c("1" = "A", "2" = "B", "100" = "A"))
expect_equal(clusters_to_membership(clusters, clust_ids = clust_ids),
c("1" = "A", "100" = "A", "2" = "B"))
expect_equal(clusters_to_membership(clusters, elem_ids = elem_ids, clust_ids = clust_ids),
c("1" = "A", "2" = "B", "100" = "A"))
})
context("Membership vector to clusters")
test_that("un-named integer membership vector correctly transformed to list of vectors", {
membership <- c(1L, 2L, 1L)
clust_ids <- c(2L, 1L)
elem_ids <- c(1L, 2L, 100L)
expect_equal(membership_to_clusters(membership),
list("1" = c(1L, 3L), "2" = 2L))
expect_equal(membership_to_clusters(membership, elem_ids = elem_ids),
list("1" = c(1L, 100L), "2" = 2L))
expect_equal(membership_to_clusters(membership, clust_ids = clust_ids),
list("2" = 2L, "1" = c(1L, 3L)))
expect_equal(membership_to_clusters(membership, elem_ids = elem_ids, clust_ids = clust_ids),
list("2" = 2L, "1" = c(1L, 100L)))
})
test_that("un-named character membership vector correctly transformed to list of vectors", {
membership <- c("B", "A", "B")
clust_ids <- c("B", "A")
elem_ids <- c(1L, 2L, 100L)
expect_equal(membership_to_clusters(membership),
list("A" = 2L, "B" = c(1L, 3L)))
expect_equal(membership_to_clusters(membership, elem_ids = elem_ids),
list("A" = 2L, "B" = c(1L, 100L)))
expect_equal(membership_to_clusters(membership, clust_ids = clust_ids),
list("B" = c(1L, 3L), "A" = 2L))
expect_equal(membership_to_clusters(membership, elem_ids = elem_ids, clust_ids = clust_ids),
list("B" = c(1L, 100L), "A" = 2L))
})
test_that("named character membership vector correctly transformed to list of vectors", {
membership <- c("1" = "B", "2" = "A", "100" = "B")
clust_ids <- c("B", "A")
elem_ids <- c(1L, 2L, 100L)
expect_equal(membership_to_clusters(membership),
list("A" = "2", "B" = c("1", "100")))
expect_equal(membership_to_clusters(membership, elem_ids = elem_ids),
list("A" = 2L, "B" = c(1L, 100L)))
expect_equal(membership_to_clusters(membership, clust_ids = clust_ids),
list("B" = c("1", "100"), "A" = "2"))
expect_equal(membership_to_clusters(membership, elem_ids = elem_ids, clust_ids = clust_ids),
list("B" = c(1L, 100L), "A" = 2L))
})
context("Pairs to membership vector")
test_that("integer matrix of pairs correctly transformed to membership vector", {
pairs <- rbind(c(1L, 2L), c(1L, 3L), c(2L, 3L), c(4L, 5L))
elem_ids <- seq_len(5)
expect_equal(pairs_to_membership(pairs, elem_ids),
c("1" = 1, "2" = 1, "3" = 1, "4" = 2, "5" = 2))
})
test_that("special case of no pairs handled correctly", {
pairs <- matrix(0L, nrow = 0, ncol = 2)
elem_ids <- seq_len(5)
expect_equal(pairs_to_membership(pairs, elem_ids),
c("1" = 1, "2" = 2, "3" = 3, "4" = 4, "5" = 5))
})
test_that("character matrix of pairs correctly transformed to membership vector", {
pairs <- rbind(c("A", "B"), c("B", "C"), c("A", "C"), c("D", "E"))
elem_ids <- LETTERS[1:5]
expect_equal(pairs_to_membership(pairs, elem_ids),
c("A" = 1, "B" = 1, "C" = 1, "D" = 2, "E" = 2))
})
test_that("missing element identifiers in pairs produces a warning", {
pairs <- rbind(c(NA, 2L), c(1L, 3L), c(2L, 3L))
elem_ids <- seq_len(5)
expect_warning(pairs_to_membership(pairs, elem_ids))
})
test_that("missing element identifiers in `elem_ids` results in error", {
pairs <- rbind(c(1L, 2L), c(1L, 3L), c(2L, 3L))
elem_ids <- c(1L, NA, 3L)
expect_error(pairs_to_membership(pairs, elem_ids))
})
test_that("passing pairs with incorrect dimensions results in error", {
pairs <- rbind(c(1L, 2L), c(1L, 3L), c(2L, 3L))
elem_ids <- c(1L, 2L, 3L)
expect_error(pairs_to_membership(pairs[,0], elem_ids))
})
context("Canonicalize pairs")
test_that("rows are ordered lexicographically by first column then second column", {
pairs <- rbind(c(3,4), c(1,5), c(1,2))
expect_equal(canonicalize_pairs(pairs),
rbind(c(1,2), c(1,5), c(3,4)))
})
test_that("identifiers are ordered lexicographically within each row", {
pairs <- rbind(c(4,3), c(1,5), c(2,1))
expect_equal(canonicalize_pairs(pairs),
rbind(c(1,2), c(1,5), c(3,4)))
})
test_that("duplicate pairs are removed", {
pairs <- rbind(c(1,2), c(2,1))
expect_equal(canonicalize_pairs(pairs),
rbind(c(1,2)))
})
|
/fuzzedpackages/clevr/tests/testthat/test-transformations.R
|
no_license
|
akhikolla/testpackages
|
R
| false
| false
| 6,361
|
r
|
context("Clusters to membership vector")
test_that("un-named list of integer vectors correctly transformed to membership vector", {
clusters <- list(c(100L, 1L), c(2L))
clust_ids <- c("A", "B")
elem_ids <- c(1L, 2L, 100L)
expect_equal(clusters_to_membership(clusters),
c("1" = 1L, "100" = 1L, "2" = 2L))
expect_equal(clusters_to_membership(clusters, elem_ids = elem_ids),
c("1" = 1L, "2" = 2L, "100" = 1L))
expect_equal(clusters_to_membership(clusters, clust_ids = clust_ids),
c("1" = "A", "100" = "A", "2" = "B"))
expect_equal(clusters_to_membership(clusters, elem_ids = elem_ids, clust_ids = clust_ids),
c("1" = "A", "2" = "B", "100" = "A"))
})
test_that("un-named list of character vectors correctly transformed to membership vector", {
clusters <- list(c("ELEM3", "ELEM1"), c("ELEM2"))
clust_ids <- c("A", "B")
elem_ids <- c("ELEM3", "ELEM2", "ELEM1")
expect_equal(clusters_to_membership(clusters),
c("ELEM1" = 1L, "ELEM2" = 2L, "ELEM3" = 1L))
expect_equal(clusters_to_membership(clusters, elem_ids = elem_ids),
c("ELEM3" = 1L, "ELEM2" = 2L, "ELEM1" = 1L))
expect_equal(clusters_to_membership(clusters, clust_ids = clust_ids),
c("ELEM1" = "A", "ELEM2" = "B", "ELEM3" = "A"))
expect_equal(clusters_to_membership(clusters, elem_ids = elem_ids, clust_ids = clust_ids),
c("ELEM3" = "A", "ELEM2" = "B", "ELEM1" = "A"))
})
test_that("named list of integer vectors correctly transformed to membership vector", {
clusters <- list("A" = c(100L, 1L), "B" = c(2L))
clust_ids <- c("A", "B")
elem_ids <- c(1L, 2L, 100L)
expect_equal(clusters_to_membership(clusters),
c("1" = "A", "100" = "A", "2" = "B"))
expect_equal(clusters_to_membership(clusters, elem_ids = elem_ids),
c("1" = "A", "2" = "B", "100" = "A"))
expect_equal(clusters_to_membership(clusters, clust_ids = clust_ids),
c("1" = "A", "100" = "A", "2" = "B"))
expect_equal(clusters_to_membership(clusters, elem_ids = elem_ids, clust_ids = clust_ids),
c("1" = "A", "2" = "B", "100" = "A"))
})
context("Membership vector to clusters")
test_that("un-named integer membership vector correctly transformed to list of vectors", {
membership <- c(1L, 2L, 1L)
clust_ids <- c(2L, 1L)
elem_ids <- c(1L, 2L, 100L)
expect_equal(membership_to_clusters(membership),
list("1" = c(1L, 3L), "2" = 2L))
expect_equal(membership_to_clusters(membership, elem_ids = elem_ids),
list("1" = c(1L, 100L), "2" = 2L))
expect_equal(membership_to_clusters(membership, clust_ids = clust_ids),
list("2" = 2L, "1" = c(1L, 3L)))
expect_equal(membership_to_clusters(membership, elem_ids = elem_ids, clust_ids = clust_ids),
list("2" = 2L, "1" = c(1L, 100L)))
})
test_that("un-named character membership vector correctly transformed to list of vectors", {
membership <- c("B", "A", "B")
clust_ids <- c("B", "A")
elem_ids <- c(1L, 2L, 100L)
expect_equal(membership_to_clusters(membership),
list("A" = 2L, "B" = c(1L, 3L)))
expect_equal(membership_to_clusters(membership, elem_ids = elem_ids),
list("A" = 2L, "B" = c(1L, 100L)))
expect_equal(membership_to_clusters(membership, clust_ids = clust_ids),
list("B" = c(1L, 3L), "A" = 2L))
expect_equal(membership_to_clusters(membership, elem_ids = elem_ids, clust_ids = clust_ids),
list("B" = c(1L, 100L), "A" = 2L))
})
test_that("named character membership vector correctly transformed to list of vectors", {
membership <- c("1" = "B", "2" = "A", "100" = "B")
clust_ids <- c("B", "A")
elem_ids <- c(1L, 2L, 100L)
expect_equal(membership_to_clusters(membership),
list("A" = "2", "B" = c("1", "100")))
expect_equal(membership_to_clusters(membership, elem_ids = elem_ids),
list("A" = 2L, "B" = c(1L, 100L)))
expect_equal(membership_to_clusters(membership, clust_ids = clust_ids),
list("B" = c("1", "100"), "A" = "2"))
expect_equal(membership_to_clusters(membership, elem_ids = elem_ids, clust_ids = clust_ids),
list("B" = c(1L, 100L), "A" = 2L))
})
context("Pairs to membership vector")
test_that("integer matrix of pairs correctly transformed to membership vector", {
pairs <- rbind(c(1L, 2L), c(1L, 3L), c(2L, 3L), c(4L, 5L))
elem_ids <- seq_len(5)
expect_equal(pairs_to_membership(pairs, elem_ids),
c("1" = 1, "2" = 1, "3" = 1, "4" = 2, "5" = 2))
})
test_that("special case of no pairs handled correctly", {
pairs <- matrix(0L, nrow = 0, ncol = 2)
elem_ids <- seq_len(5)
expect_equal(pairs_to_membership(pairs, elem_ids),
c("1" = 1, "2" = 2, "3" = 3, "4" = 4, "5" = 5))
})
test_that("character matrix of pairs correctly transformed to membership vector", {
pairs <- rbind(c("A", "B"), c("B", "C"), c("A", "C"), c("D", "E"))
elem_ids <- LETTERS[1:5]
expect_equal(pairs_to_membership(pairs, elem_ids),
c("A" = 1, "B" = 1, "C" = 1, "D" = 2, "E" = 2))
})
test_that("missing element identifiers in pairs produces a warning", {
pairs <- rbind(c(NA, 2L), c(1L, 3L), c(2L, 3L))
elem_ids <- seq_len(5)
expect_warning(pairs_to_membership(pairs, elem_ids))
})
test_that("missing element identifiers in `elem_ids` results in error", {
pairs <- rbind(c(1L, 2L), c(1L, 3L), c(2L, 3L))
elem_ids <- c(1L, NA, 3L)
expect_error(pairs_to_membership(pairs, elem_ids))
})
test_that("passing pairs with incorrect dimensions results in error", {
pairs <- rbind(c(1L, 2L), c(1L, 3L), c(2L, 3L))
elem_ids <- c(1L, 2L, 3L)
expect_error(pairs_to_membership(pairs[,0], elem_ids))
})
context("Canonicalize pairs")
test_that("rows are ordered lexicographically by first column then second column", {
pairs <- rbind(c(3,4), c(1,5), c(1,2))
expect_equal(canonicalize_pairs(pairs),
rbind(c(1,2), c(1,5), c(3,4)))
})
test_that("identifiers are ordered lexicographically within each row", {
pairs <- rbind(c(4,3), c(1,5), c(2,1))
expect_equal(canonicalize_pairs(pairs),
rbind(c(1,2), c(1,5), c(3,4)))
})
test_that("duplicate pairs are removed", {
pairs <- rbind(c(1,2), c(2,1))
expect_equal(canonicalize_pairs(pairs),
rbind(c(1,2)))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/new.prop-excess.r
\name{cox}
\alias{cox}
\title{Identifies proportional excess terms of model}
\usage{
cox(x)
}
\arguments{
\item{x}{variable}
}
\description{
Specifies which of the regressors that lead to proportional excess hazard
}
\author{
Thomas Scheike
}
\keyword{survival}
|
/man/cox.Rd
|
no_license
|
scheike/timereg
|
R
| false
| true
| 358
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/new.prop-excess.r
\name{cox}
\alias{cox}
\title{Identifies proportional excess terms of model}
\usage{
cox(x)
}
\arguments{
\item{x}{variable}
}
\description{
Specifies which of the regressors that lead to proportional excess hazard
}
\author{
Thomas Scheike
}
\keyword{survival}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/JunctionTools.R
\name{junctionSummaryReport}
\alias{junctionSummaryReport}
\title{Plot the distribution of your splice junction types}
\usage{
junctionSummaryReport(junction_type, pdf = TRUE,
outfile = "Junction_summary_report.pdf")
}
\arguments{
\item{junction_type}{generated by the function: JunctionType}
\item{pdf}{boolean, should the result be printed in working directory as pdf
or be returned as a list.}
\item{outfile}{Name of the printed pdf}
}
\description{
Plot the distribution of your splice junction types
}
|
/man/junctionSummaryReport.Rd
|
no_license
|
mffrank/RNAseqToCustomFASTA
|
R
| false
| true
| 605
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/JunctionTools.R
\name{junctionSummaryReport}
\alias{junctionSummaryReport}
\title{Plot the distribution of your splice junction types}
\usage{
junctionSummaryReport(junction_type, pdf = TRUE,
outfile = "Junction_summary_report.pdf")
}
\arguments{
\item{junction_type}{generated by the function: JunctionType}
\item{pdf}{boolean, should the result be printed in working directory as pdf
or be returned as a list.}
\item{outfile}{Name of the printed pdf}
}
\description{
Plot the distribution of your splice junction types
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/open_image_rscript.R
\name{image_thresholding}
\alias{image_thresholding}
\title{image thresholding}
\usage{
image_thresholding(image, thresh)
}
\arguments{
\item{image}{matrix or 3-dimensional array where the third dimension is equal to 3}
\item{thresh}{the threshold parameter should be between 0 and 1 if the data is normalized or between 0-255 otherwise}
}
\value{
a matrix
}
\description{
image thresholding
}
\details{
This function applies thresholding to a matrix or to a 3-dimensional array where the third dimension is equal to 3.
}
\examples{
path = system.file("tmp_images", "1.png", package = "OpenImageR")
image = readImage(path)
filt = image_thresholding(image, thresh = 0.5)
}
\author{
Lampros Mouselimis
}
|
/man/image_thresholding.Rd
|
no_license
|
mlampros/OpenImageR
|
R
| false
| true
| 806
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/open_image_rscript.R
\name{image_thresholding}
\alias{image_thresholding}
\title{image thresholding}
\usage{
image_thresholding(image, thresh)
}
\arguments{
\item{image}{matrix or 3-dimensional array where the third dimension is equal to 3}
\item{thresh}{the threshold parameter should be between 0 and 1 if the data is normalized or between 0-255 otherwise}
}
\value{
a matrix
}
\description{
image thresholding
}
\details{
This function applies thresholding to a matrix or to a 3-dimensional array where the third dimension is equal to 3.
}
\examples{
path = system.file("tmp_images", "1.png", package = "OpenImageR")
image = readImage(path)
filt = image_thresholding(image, thresh = 0.5)
}
\author{
Lampros Mouselimis
}
|
#
## PCA PLOT FUNCTION for lake means (creates a pca biplot that is nicer looking than )
# pca is a summary of a pca object (i.e., summary(pca))
# xlab and ylab are character strings to be used for axis labels (e.g., "PC1 - XX.X%")
plot.Lmean<-function(pca,xlab,ylab){
ggplot() +
geom_segment(aes(x=0, y=0, xend=PC1, yend=PC2), arrow=arrow(length = unit(0.15,"cm")),
data=pca$species, color="coral1", alpha=0.5) + theme_classic() + coord_fixed()+
geom_text_repel(data=pca$species, aes(x=PC1,y=PC2,label = row.names(pca$species)),box.padding=.1,point.padding = .01,
colour="red4",fontface="bold", size =2.5, show.legend = F)+
geom_text(data=pca$sites, aes(x=PC1,y=PC2,label = row.names(pca$sites)), size = 3, colour="steelblue4", show.legend = F)+
xlab(xlab)+ylab(ylab)
}
#
##########
# ECOLOGICAL DATA
##########
#
#
###### Depth profiles
# load depth profiles
depth<-read.csv("depth_profiles.csv")[-(90:92),c(1:7)]
# rename cols
colnames(depth)<-c("Lake","Depth.ft","Depth.m","image.area","acres","hectares","scale.fac")
depth$Lake<-droplevels(depth$Lake) #Drops lakes w/o depth data
# the following fits each depth profile with a monotonic spline and interpolates
# area at each .5 m depth increment
depth.by.lake<-by(depth,depth$Lake, FUN = function(x) unique(data.frame(
area=splinefun(x=x$Depth.m,y=x$hectares, ties = mean,method="hyman")(
c((seq(from =0,to= min(x$Depth.m),by=-.5)),min(x$Depth.m))),
depth=c((seq(from =0,to= min(x$Depth.m),by=-.5)),min(x$Depth.m)),
Lake=rep(x$Lake, length(seq(from =0,to= min(x$Depth.m),by=-.5))+1))))
depth.splines<-do.call(rbind,depth.by.lake)
# Uses splines calculated above to estimate proportion of lake shallower than 3 meters
litt.area<-c(by(depth,depth$Lake, FUN = function(x) litt.area=1-splinefun(
x=x$Depth.m,y=x$hectares, ties = mean,method="hyman")(-3.0)/max(x$hectares)))
# Creates figure showing area:depth relationships using interpolated splines
depth.map.splines<-ggplot(data = depth.splines, aes(x=area,y=depth))+
geom_rect(xmin=0,xmax=Inf,ymax=0,ymin=-Inf,fill = "honeydew2")+
labs(x="Area (ha)", y= "Depth (m)")+
geom_ribbon(data=depth.splines, aes(ymin=depth,ymax=0), fill="skyblue1")+
geom_line(aes(y=-3),linetype="dashed", color="skyblue3")+
facet_wrap(~Lake)+theme_classic()
# Creates figure showing area:depth relationships using only measured depths
depth.map.linear<-ggplot(data = depth, aes(x=hectares,y=Depth.m))+
geom_rect(xmin=0,xmax=Inf,ymax=0,ymin=-Inf,fill = "honeydew2")+
geom_ribbon(data=depth, aes(ymin=Depth.m,ymax=0), fill="skyblue1")+
geom_line(aes(y=-3),linetype="dashed", color="skyblue3")+
facet_wrap(~Lake)+theme_classic()
#
#
#
#
#
eco.dat<-read.csv("AK_ENV_zoo_invert_2018-2019_v2.csv")
eco.dat<-data.frame(eco.dat[1:14,c("Lake","LATITUDE","LONGITUDE","Region","Elevation..m.","area..ha.",
"approx.max.z..m.","DOC..mg.L.","TP..ug.L.","TN..ug.L.","ChlA..ug.L.",
"Sp.Cond..uS.cm.","pH","CALCIUM..mg.L.","Total.zoo.not.nauplii..invid.L.",
"Total..Daphnia..indiv.L.","Total.macroinvert..m2","gamaridae",
"chironomidae")],row.names = "Lake")
colnames(eco.dat)<-c("Lat.","Lon.","Region","Elev.","Area","Max_Depth","DOC","TP","TN",
"ChlA","Cond.","pH","Ca","T.Zoop","Daph.","T.macro",
"Gamm.","Chiro.")
eco.dat<-merge(eco.dat,litt.area,by ="row.names",all=T)%>%
rename(Lake=Row.names,littoral=y)
rownames(eco.dat)<-eco.dat$Lake
(lake.pca<-rda(na.omit(eco.dat[,c("Area","Max_Depth","DOC","TP","TN",
"ChlA","Cond.","pH","Ca")]),scale = T))%>%biplot()
(lake.pca.w.prey<-rda(na.omit(eco.dat[,c("Area","Max_Depth","DOC","TP","TN",
"ChlA","Cond.","pH","Ca","Daph.",
"Gamm.","littoral")]),scale = T))%>%biplot()
(lake.pca.trophic<-rda(na.omit(eco.dat[,c("Daph.","Gamm.","littoral")]),scale = T))%>%biplot()
# dimensionality and default biplots
biplot(lake.pca)
estimate.ED.eig(summary(lake.pca)$cont[["importance"]][2,])
biplot(lake.pca.w.prey)
estimate.ED.eig(summary(lake.pca.w.prey)$cont[["importance"]][2,])
biplot(lake.pca.trophic)
estimate.ED.eig(summary(lake.pca.trophic)$cont[["importance"]][2,])
Env.pca<-plot.Lmean(summary(lake.pca),"PC1 - 50.3%","PC2 - 17.4%")
Env.pca_w.prey<-plot.Lmean(summary(lake.pca.w.prey), "PC1 - 46.2%","PC2 - 20.9%")
Env.pca_trophic<-plot.Lmean(summary(lake.pca.trophic),"PC1 - 54.6%","PC2 - 33.5%")
Env.pca/Env.pca_w.prey.cond # 12.5 x 6 inches
(env.pca.vectors<-round(rbind("Eigenvalues"=lake.pca$CA$eig,"Singular Values"=sqrt(lake.pca$CA$eig),
"Proportion Explained"=lake.pca$CA$eig/sum(lake.pca$CA$eig),
lake.pca$CA$u,lake.pca$CA$v),3))%>%write.csv("env.pca.vectors.csv")
(env.pca.vectors_w.prey<-round(rbind("Eigenvalues"=lake.pca.w.prey$CA$eig,"Singular Values"=sqrt(lake.pca.w.prey$CA$eig),
"Proportion Explained"=lake.pca.w.prey$CA$eig/sum(lake.pca.w.prey$CA$eig),
lake.pca.w.prey$CA$u,lake.pca.w.prey$CA$v),3))%>%write.csv("env.pca.w.prey.vectors.csv")
(env.pca.vectors_trophic<-round(rbind("Eigenvalues"=lake.pca.trophic$CA$eig,"Singular Values"=sqrt(lake.pca.trophic$CA$eig),
"Proportion Explained"=lake.pca.trophic$CA$eig/sum(lake.pca.trophic$CA$eig),
lake.pca.trophic$CA$u,lake.pca.trophic$CA$v),3))%>%write.csv("env.pca.trophic.vectors.csv")
# Diet plot showing Daphnia and gammarid abundance w/ % littoral area as the color scale
benthlim.plot<-ggplot(data = eco.dat,aes(x=Gamm.,y=Daph., color=littoral*100)) + theme_classic()+
labs(x="Gammarids",y="Daphnia",color="% littoral")+
geom_point(size=2.5)+ scale_color_gradient(low="steelblue4",high = "seagreen2")+
geom_text_repel(data = eco.dat,aes(label=Lake),color="black",box.padding=.5,point.padding = .2,size=2.5)
#
# MAP
library(rnaturalearth)
library(rnaturalearthdata)
library(ggrepel)
#map
#loads map data
world <- ne_countries(scale=50,returnclass = 'sf')
usa <- subset(world, admin == "United States of America")
can <- subset(world, admin == "Canada")
rus <- subset(world, admin == "Russia")
usacanrus<-rbind(usa,can,rus)
eco.dat$Lake
#inset map of entire state of AK
alaska<-ggplot(data = usacanrus) +
geom_sf(fill = "honeydew2") +
panel_border(color = "grey50")+ theme_grey()+
geom_rect(xmin = -152.5, xmax = -147.5, ymin = 59, ymax = 62,
fill = NA, colour = "black", size = .45)+
coord_sf(xlim = c(-176, -130),
ylim = c(53, 73), expand = FALSE, datum = NA)
# Kenai Peninsula local map
alaskalocal <- ggplot(data = usa) + theme_cowplot(font_size = 9)+
geom_sf(fill = "honeydew2") +coord_sf(xlim = c(-152.5, -147.5), ylim = c(59, 62), expand = F)
# combines inset and local Kenai map, and adds points and labels
AKlocal_inset<-alaskalocal + annotation_custom(
grob = ggplotGrob(alaska),
xmin = -149.4,
xmax = -147.5,
ymin = 57.9,
ymax = 59.8+1.2) +
geom_point(data=eco.dat,aes(x=Lon.,y=Lat.),size = 2, color = "skyblue4")+
geom_text_repel(data = eco.dat,aes(x=Lon.,y=Lat.,label = Lake),
box.padding=.5,point.padding = .2,size = 3, fontface = "bold")
ggsave("AK lake map.pdf", width = 5, height = 6, dpi = "screen")
#
#
# Groups of environmental traits
prey<-c("Daph.","Gamm.","littoral")
physical<-c("Area","Max_Depth","DOC")
chemical<-c("TP","TN","ChlA","Cond.","pH","Ca")
env<-c(prey,physical,chemical)
get.lake.means<-function(traits,traitnames,eco,eco.var){
traits.eco=transform(merge(traits,eco[,eco.var],by.x="Lake",by.y = "row.names", all = T),row.names = ID)
lake.means=group_by(na.omit(as.data.table(traits.eco[,c(-(2))]),cols = traitnames),
Lake)%>%summarise_all(mean,na.rm=T)%>%as.data.frame()
rownames(lake.means)<-lake.means$Lake
lake.means
}
def.lake.means<-get.lake.means(Defense.df,def.trait.list,eco.dat,env)
swi.lake.means<-get.lake.means(Swimming.df,swi.trait.list,eco.dat,env)
tro.lake.means<-get.lake.means(Trophic.df,tro.trait.list,eco.dat,env)
#
eco.dat<-data.frame(eco.dat[,])# exclude Echo, b/c partial eco data
# POPULATION MEANS FOR TRAITS
adj.data.unscaled<-merge(Defense.df[c("ID","Lake",def.trait.list)],
merge(Swimming.df[c("ID",swi.trait.list)],merge(Trophic.df[,c("ID",tro.trait.list)],two.d.coords,
by.x = "ID",by.y = "row.names", all = T), by= "ID",all= T), by = "ID", all = T)
lake.means.adj.data<-group_by(na.omit(adj.data.unscaled[,c(-1,-(19:60))]),Lake)%>%summarise_all(mean,na.rm=T)%>%as.data.frame()# non-landmark traits only
lake.means.data.eco<-merge(lake.means.adj.data,eco.dat[,env],by.x = "Lake", by.y = "row.names")
rownames(lake.means.data.eco)<-lake.means.data.eco$Lake
LD.means<-group_by(merge(merge(LD.def.df,LD.swim.df[-2],by="FishID"),LD.tro.df[-2],by="FishID"),Lake)%>%
summarise_all(mean,na.rm=T)%>%as.data.frame()%>%merge(eco.dat[,env],by.x = "Lake", by.y = "row.names")
lake.means.4tile<-as.matrix(lake.means.data.eco[,-1])
colnames(lake.means.4tile)<-c("LP","DS1l","DS2l","PSl","LPm","SL",
"BD","CP","BC","GW","PW","GR","JL","SN","ED","HL",env)
trait.corr.tile<-rcorr(lake.means.4tile,type="spearman")[[1]][c("LP","DS1l","DS2l",
"PSl","LPm","SL","BD","CP","BC","GW","PW","GR","JL","SN","ED","HL"),env]%>%
corrplot(method ="color",addgrid.col=NA,sig.level=c(.001,.01,.05), insig = "label_sig",tl.col = "black",tl.cex = .5,pch="",
pch.cex = .75,p.mat = rcorr(lake.means.4tile,type="spearman")[[3]][c("LP","DS1l",
"DS2l","PSl","LPm","SL","BD","CP","BC","GW","PW","GR","JL","SN","ED","HL"),env])
LD.corr.tile<-rcorr(as.matrix(LD.means[,-c(1:2)]),type="spearman")[[1]][
c("defense.LD1","swimming.LD1", "trophic.LD1","defense.LD2","swimming.LD2","trophic.LD2","defense.LD3","swimming.LD3","trophic.LD3"),env]%>%
corrplot(method ="color",addgrid.col=NA,sig.level=c(.001,.01,.05), tl.col = "black",tl.cex = .5,pch="",pch.cex = .75,
p.mat = rcorr(as.matrix(LD.means[,-c(1:2)]),type="spearman")[[1]][
c("defense.LD1","swimming.LD1", "trophic.LD1","defense.LD2","swimming.LD2","trophic.LD2","defense.LD3","swimming.LD3","trophic.LD3"),env])
# TRAIT X ENV PLS
# z-transforms trait and eco data
lake.means.data.eco_adj<-data.frame(row.names=(lake.means.data.eco)$Lake,
sapply((lake.means.data.eco[,-1]), function(x) (x-mean(na.omit(x)))/sd(na.omit(x))))
pls.vector.df<-function(pls){
round(data.frame(rbind("singular values"=pls$svd$d,"SV sq"=(pls$svd$d)^2,
pls$left.pls.vectors,pls$right.pls.vectors)),4)
}
# 2-B PLS for Defense, Swimming, and Trophic traits
(eco.def.pls<-two.b.pls(na.omit(lake.means.data.eco_adj)[,env],
na.omit(lake.means.data.eco_adj)[,def.trait.list]))%>%plot()
eco.def.pls.bestfit<-odregress(eco.def.pls$XScores[,1],eco.def.pls$YScores[,1])$coeff[c(2,1)]
(eco.swi.pls<-two.b.pls(na.omit(lake.means.data.eco_adj)[,env],
na.omit(lake.means.data.eco_adj)[,swi.trait.list]))%>%plot()
eco.swi.pls.bestfit<-odregress(eco.swi.pls$XScores[,1],eco.swi.pls$YScores[,1])$coeff[c(2,1)]
(eco.tro.pls<-two.b.pls(na.omit(lake.means.data.eco_adj)[,env],
na.omit(lake.means.data.eco_adj)[,tro.trait.list]))%>%plot()
eco.tro.pls.bestfit<-odregress(eco.tro.pls$XScores[,1],eco.tro.pls$YScores[,1])$coeff[c(2,1)]
# full (except shape)
(eco.full.pls<-two.b.pls(na.omit(lake.means.data.eco_adj)[,env],
na.omit(lake.means.data.eco_adj)[,c(def.trait.list,swi.trait.list,tro.trait.list)]))%>%plot()
eco.full.pls_bestfit<-odregress(eco.full.pls$XScores[,1],eco.full.pls$YScores[,1])$coeff[c(2,1)]
(eco.def.vectors<-round(pls.vector.df(eco.def.pls),3))%>%write.csv("eco-def_PLSvec.csv")
(eco.swi.vectors<-round(pls.vector.df(eco.swi.pls),3))%>%write.csv("eco-swi_PLSvec.csv")
(eco.tro.vectors<-round(pls.vector.df(eco.tro.pls),3))%>%write.csv("eco-tro_PLSvec.csv")
(eco.full.vectors<-round(pls.vector.df(eco.full.pls),3))%>%write.csv("eco-full_PLSvec.csv")
PhenoEco.stats<-data.frame(rbind(c(eco.def.pls$r.pls,eco.def.pls$P.value,eco.def.pls$Z),
c(eco.swi.pls$r.pls,eco.swi.pls$P.value,eco.swi.pls$Z),
c(eco.tro.pls$r.pls,eco.tro.pls$P.value,eco.tro.pls$Z),
c(eco.full.pls$r.pls,eco.full.pls$P.value,eco.full.pls$Z)),
row.names=c("Defense x Eco","Swimming x Eco","Trophic x Eco",
"Def-Swi-Tro x Eco"))
colnames(PhenoEco.stats)<-c("r-PLS","p","Z")
PhenoEco.stats[1:3,"adj.p"]<-p.adjust(PhenoEco.stats[1:3,"p"], method = "fdr")
PhenoEco.stats<-round(PhenoEco.stats,3)
write.csv(PhenoEco.stats,"Pheno-Eco_PLS-stats.csv")
# 2-B PLS for Defense, Swimming, and Trophic traits W/O feeding-related env. traits
(eco.physchem.def.pls<-two.b.pls(na.omit(lake.means.data.eco_adj[,env[-c(1:3)]]),
na.omit(lake.means.data.eco_adj[,c(env[-c(1:3)],def.trait.list)])[,def.trait.list]))%>%plot()
eco.physchem.def.pls.bestfit<-odregress(eco.physchem.def.pls$XScores[,1],eco.physchem.def.pls$YScores[,1])$coeff[c(2,1)]
(eco.physchem.swi.pls<-two.b.pls(na.omit(lake.means.data.eco_adj[,env[-c(1:3)]]),
na.omit(lake.means.data.eco_adj[,c(env[-c(1:3)],swi.trait.list)])[,swi.trait.list]))%>%plot()
eco.physchem.swi.pls.bestfit<-odregress(eco.physchem.swi.pls$XScores[,1],eco.physchem.swi.pls$YScores[,1])$coeff[c(2,1)]
(eco.physchem.tro.pls<-two.b.pls(na.omit(lake.means.data.eco_adj[,env[-c(1:3)]]),
na.omit(lake.means.data.eco_adj[,c(env[-c(1:3)],tro.trait.list)])[,tro.trait.list]))%>%plot()
eco.physchem.tro.pls.bestfit<-odregress(eco.physchem.tro.pls$XScores[,1],eco.physchem.tro.pls$YScores[,1])$coeff[c(2,1)]
# full (except shape)
(eco.physchem.full.pls<-two.b.pls(na.omit(lake.means.data.eco_adj[,env[-c(1:3)]]),
na.omit(lake.means.data.eco_adj[,c(env[-c(1:3)],
def.trait.list,swi.trait.list,tro.trait.list)])
[,c(def.trait.list,swi.trait.list,tro.trait.list)]))%>%plot()
eco.physchem.full.pls.bestfit<-odregress(eco.physchem.full.pls$XScores[,1],eco.physchem.full.pls$YScores[,1])$coeff[c(2,1)]
(eco.physchem.def.vectors<-round(pls.vector.df(eco.physchem.def.pls),3))%>%write.csv("eco.physchem-def_PLSvec.csv")
(eco.physchem.swi.vectors<-round(pls.vector.df(eco.physchem.swi.pls),3))%>%write.csv("eco.physchem-swi_PLSvec.csv")
(eco.physchem.tro.vectors<-round(pls.vector.df(eco.physchem.tro.pls),3))%>%write.csv("eco.physchem-tro_PLSvec.csv")
(eco.physchem.full.vectors<-round(pls.vector.df(eco.physchem.full.pls),3))%>%write.csv("eco.physchem-full_PLSvec.csv")
PhenoEco.physchem.stats<-data.frame(rbind(c(eco.physchem.def.pls$r.pls,eco.physchem.def.pls$P.value,eco.physchem.def.pls$Z),
c(eco.physchem.swi.pls$r.pls,eco.physchem.swi.pls$P.value,eco.physchem.swi.pls$Z),
c(eco.physchem.tro.pls$r.pls,eco.physchem.tro.pls$P.value,eco.physchem.tro.pls$Z),
c(eco.physchem.full.pls$r.pls,eco.physchem.full.pls$P.value,eco.physchem.full.pls$Z)),
row.names=c("Defense x Eco","Swimming x Eco","Trophic x Eco",
"Def-Swi-Tro x Eco"))
colnames(PhenoEco.physchem.stats)<-c("r-PLS","p","Z")
PhenoEco.physchem.stats[1:3,"adj.p"]<-p.adjust(PhenoEco.physchem.stats[1:3,"p"], method = "fdr")
PhenoEco.physchem.stats<-round(PhenoEco.physchem.stats,3)
write.csv(PhenoEco.physchem.stats,"Pheno-Eco.physchem_PLS-stats.csv")
####### THIS EXTRACTS ORTHOGONAL REGRESSION LINE!!!!
eco.pls.plot<-function(pls,bestfit,xlab,ylab){
pls.dat=data.frame(x=pls$XScores[,1],y=pls$YScores[,1])
ggplot(data=pls.dat,aes(x=x,y=y))+ theme_classic()+labs(x=xlab,y=ylab)+
geom_abline(intercept=bestfit[1],slope = bestfit[2], color = "grey80")+
geom_point(color="seagreen3", size=2)+
geom_text_repel(aes(label=rownames(pls.dat)),color="black",box.padding=.3,point.padding = .03,size=2.5)
}
eco.def.pls.plot<-eco.pls.plot(eco.def.pls,eco.def.pls.bestfit,"L Block - Eco","R Block - Defense")
eco.swi.pls.plot<-eco.pls.plot(eco.swi.pls,eco.swi.pls.bestfit,"L Block - Eco","R Block - Swimming")
eco.tro.pls.plot<-eco.pls.plot(eco.tro.pls,eco.tro.pls.bestfit,"L Block - Eco","R Block - Trophic")
eco.full.pls.plot<-eco.pls.plot(eco.full.pls,eco.full.pls_bestfit,"L Block - Eco","R Block - Pheno")
# compound pls plot for full eco variables including littoral, daph., and gamm.
eco.def.pls.plot+eco.tro.pls.plot+eco.swi.pls.plot+eco.full.pls.plot
def.pls.eig<-ED((eco.def.pls$svd$d)^2)
swi.pls.eig<-ED((eco.swi.pls$svd$d)^2)
tro.pls.eig<-ED((eco.tro.pls$svd$d)^2)
full.pls.eig<-ED((eco.full.pls$svd$d)^2)
eco.physchem.def.pls.plot<-eco.pls.plot(eco.physchem.def.pls,eco.physchem.def.pls.bestfit,"L Block - Eco","R Block - Defense")
eco.physchem.swi.pls.plot<-eco.pls.plot(eco.physchem.swi.pls,eco.physchem.swi.pls.bestfit,"L Block - Eco","R Block - Swimming")
eco.physchem.tro.pls.plot<-eco.pls.plot(eco.physchem.tro.pls,eco.physchem.tro.pls.bestfit,"L Block - Eco","R Block - Trophic")
eco.physchem.full.pls.plot<-eco.pls.plot(eco.physchem.full.pls,eco.physchem.full.pls.bestfit,"L Block - Eco","R Block - Pheno")
eco.physchem.def.pls.plot+eco.physchem.tro.pls.plot+
eco.physchem.swi.pls.plot+eco.physchem.full.pls.plot
def.physchem.pls.eig<-ED((eco.physchem.def.pls$svd$d)^2)
swi.physchem.pls.eig<-ED((eco.physchem.swi.pls$svd$d)^2)
tro.physchem.pls.eig<-ED((eco.physchem.tro.pls$svd$d)^2)
full.physchem.pls.eig<-ED((eco.physchem.full.pls$svd$d)^2)
ecoblock.theta<-function(vec1,vec2){
theta.180=vec.theta(vec1,vec2)
theta=if(theta.180>90) 180-theta.180 else theta.180
theta
}
eco.pls.theta<-data.frame(matrix(as.numeric(c(ecoblock.theta(eco.def.pls$left.pls.vectors[,1],eco.swi.pls$left.pls.vectors[,1]),"",
ecoblock.theta(eco.def.pls$left.pls.vectors[,1],eco.tro.pls$left.pls.vectors[,1]),
ecoblock.theta(eco.swi.pls$left.pls.vectors[,1],eco.tro.pls$left.pls.vectors[,1]))),nrow = 2,byrow = T,),
row.names = c("Swimming","Trophic"))
colnames(eco.pls.theta)<-c("Defense","Swimming")
eco.pls.theta<-round(eco.pls.theta,2)
write.csv(eco.pls.theta,"pw.pls.eco_AngleMatrix.csv")
ecoblock.theta(eco.full.pls$left.pls.vectors[,1],eco.def.pls$left.pls.vectors[,1])%>%round(2)
ecoblock.theta(eco.full.pls$left.pls.vectors[,1],eco.swi.pls$left.pls.vectors[,1])%>%round(2)
ecoblock.theta(eco.full.pls$left.pls.vectors[,1],eco.tro.pls$left.pls.vectors[,1])%>%round(2)
eco.physchem.pls.theta<-data.frame(matrix(as.numeric(c(ecoblock.theta(eco.physchem.def.pls$left.pls.vectors[,1],eco.physchem.swi.pls$left.pls.vectors[,1]),"",
ecoblock.theta(eco.physchem.def.pls$left.pls.vectors[,1],eco.physchem.tro.pls$left.pls.vectors[,1]),
ecoblock.theta(eco.physchem.swi.pls$left.pls.vectors[,1],eco.physchem.tro.pls$left.pls.vectors[,1]))),nrow = 2,byrow = T,),
row.names = c("Swimming","Trophic"))
colnames(eco.physchem.pls.theta)<-c("Defense","Swimming")
eco.physchem.pls.theta<-round(eco.physchem.pls.theta,2)
write.csv(eco.physchem.pls.theta,"pw.pls.eco.physchem_AngleMatrix.csv")
ecoblock.theta(eco.physchem.full.pls$left.pls.vectors[,1],eco.physchem.def.pls$left.pls.vectors[,1])%>%round(2)
ecoblock.theta(eco.physchem.full.pls$left.pls.vectors[,1],eco.physchem.swi.pls$left.pls.vectors[,1])%>%round(2)
ecoblock.theta(eco.physchem.full.pls$left.pls.vectors[,1],eco.physchem.tro.pls$left.pls.vectors[,1])%>%round(2)
|
/AKdim-pt5_eco.R
|
no_license
|
sarahsanderson/HendryLab_CodeReview
|
R
| false
| false
| 20,249
|
r
|
#
## PCA PLOT FUNCTION for lake means (creates a pca biplot that is nicer looking than )
# pca is a summary of a pca object (i.e., summary(pca))
# xlab and ylab are character strings to be used for axis labels (e.g., "PC1 - XX.X%")
plot.Lmean<-function(pca,xlab,ylab){
ggplot() +
geom_segment(aes(x=0, y=0, xend=PC1, yend=PC2), arrow=arrow(length = unit(0.15,"cm")),
data=pca$species, color="coral1", alpha=0.5) + theme_classic() + coord_fixed()+
geom_text_repel(data=pca$species, aes(x=PC1,y=PC2,label = row.names(pca$species)),box.padding=.1,point.padding = .01,
colour="red4",fontface="bold", size =2.5, show.legend = F)+
geom_text(data=pca$sites, aes(x=PC1,y=PC2,label = row.names(pca$sites)), size = 3, colour="steelblue4", show.legend = F)+
xlab(xlab)+ylab(ylab)
}
#
##########
# ECOLOGICAL DATA
##########
#
#
###### Depth profiles
# load depth profiles
depth<-read.csv("depth_profiles.csv")[-(90:92),c(1:7)]
# rename cols
colnames(depth)<-c("Lake","Depth.ft","Depth.m","image.area","acres","hectares","scale.fac")
depth$Lake<-droplevels(depth$Lake) #Drops lakes w/o depth data
# the following fits each depth profile with a monotonic spline and interpolates
# area at each .5 m depth increment
depth.by.lake<-by(depth,depth$Lake, FUN = function(x) unique(data.frame(
area=splinefun(x=x$Depth.m,y=x$hectares, ties = mean,method="hyman")(
c((seq(from =0,to= min(x$Depth.m),by=-.5)),min(x$Depth.m))),
depth=c((seq(from =0,to= min(x$Depth.m),by=-.5)),min(x$Depth.m)),
Lake=rep(x$Lake, length(seq(from =0,to= min(x$Depth.m),by=-.5))+1))))
depth.splines<-do.call(rbind,depth.by.lake)
# Uses splines calculated above to estimate proportion of lake shallower than 3 meters
litt.area<-c(by(depth,depth$Lake, FUN = function(x) litt.area=1-splinefun(
x=x$Depth.m,y=x$hectares, ties = mean,method="hyman")(-3.0)/max(x$hectares)))
# Creates figure showing area:depth relationships using interpolated splines
depth.map.splines<-ggplot(data = depth.splines, aes(x=area,y=depth))+
geom_rect(xmin=0,xmax=Inf,ymax=0,ymin=-Inf,fill = "honeydew2")+
labs(x="Area (ha)", y= "Depth (m)")+
geom_ribbon(data=depth.splines, aes(ymin=depth,ymax=0), fill="skyblue1")+
geom_line(aes(y=-3),linetype="dashed", color="skyblue3")+
facet_wrap(~Lake)+theme_classic()
# Creates figure showing area:depth relationships using only measured depths
depth.map.linear<-ggplot(data = depth, aes(x=hectares,y=Depth.m))+
geom_rect(xmin=0,xmax=Inf,ymax=0,ymin=-Inf,fill = "honeydew2")+
geom_ribbon(data=depth, aes(ymin=Depth.m,ymax=0), fill="skyblue1")+
geom_line(aes(y=-3),linetype="dashed", color="skyblue3")+
facet_wrap(~Lake)+theme_classic()
#
#
#
#
#
eco.dat<-read.csv("AK_ENV_zoo_invert_2018-2019_v2.csv")
eco.dat<-data.frame(eco.dat[1:14,c("Lake","LATITUDE","LONGITUDE","Region","Elevation..m.","area..ha.",
"approx.max.z..m.","DOC..mg.L.","TP..ug.L.","TN..ug.L.","ChlA..ug.L.",
"Sp.Cond..uS.cm.","pH","CALCIUM..mg.L.","Total.zoo.not.nauplii..invid.L.",
"Total..Daphnia..indiv.L.","Total.macroinvert..m2","gamaridae",
"chironomidae")],row.names = "Lake")
colnames(eco.dat)<-c("Lat.","Lon.","Region","Elev.","Area","Max_Depth","DOC","TP","TN",
"ChlA","Cond.","pH","Ca","T.Zoop","Daph.","T.macro",
"Gamm.","Chiro.")
eco.dat<-merge(eco.dat,litt.area,by ="row.names",all=T)%>%
rename(Lake=Row.names,littoral=y)
rownames(eco.dat)<-eco.dat$Lake
(lake.pca<-rda(na.omit(eco.dat[,c("Area","Max_Depth","DOC","TP","TN",
"ChlA","Cond.","pH","Ca")]),scale = T))%>%biplot()
(lake.pca.w.prey<-rda(na.omit(eco.dat[,c("Area","Max_Depth","DOC","TP","TN",
"ChlA","Cond.","pH","Ca","Daph.",
"Gamm.","littoral")]),scale = T))%>%biplot()
(lake.pca.trophic<-rda(na.omit(eco.dat[,c("Daph.","Gamm.","littoral")]),scale = T))%>%biplot()
# dimensionality and default biplots
biplot(lake.pca)
estimate.ED.eig(summary(lake.pca)$cont[["importance"]][2,])
biplot(lake.pca.w.prey)
estimate.ED.eig(summary(lake.pca.w.prey)$cont[["importance"]][2,])
biplot(lake.pca.trophic)
estimate.ED.eig(summary(lake.pca.trophic)$cont[["importance"]][2,])
Env.pca<-plot.Lmean(summary(lake.pca),"PC1 - 50.3%","PC2 - 17.4%")
Env.pca_w.prey<-plot.Lmean(summary(lake.pca.w.prey), "PC1 - 46.2%","PC2 - 20.9%")
Env.pca_trophic<-plot.Lmean(summary(lake.pca.trophic),"PC1 - 54.6%","PC2 - 33.5%")
Env.pca/Env.pca_w.prey.cond # 12.5 x 6 inches
(env.pca.vectors<-round(rbind("Eigenvalues"=lake.pca$CA$eig,"Singular Values"=sqrt(lake.pca$CA$eig),
"Proportion Explained"=lake.pca$CA$eig/sum(lake.pca$CA$eig),
lake.pca$CA$u,lake.pca$CA$v),3))%>%write.csv("env.pca.vectors.csv")
(env.pca.vectors_w.prey<-round(rbind("Eigenvalues"=lake.pca.w.prey$CA$eig,"Singular Values"=sqrt(lake.pca.w.prey$CA$eig),
"Proportion Explained"=lake.pca.w.prey$CA$eig/sum(lake.pca.w.prey$CA$eig),
lake.pca.w.prey$CA$u,lake.pca.w.prey$CA$v),3))%>%write.csv("env.pca.w.prey.vectors.csv")
(env.pca.vectors_trophic<-round(rbind("Eigenvalues"=lake.pca.trophic$CA$eig,"Singular Values"=sqrt(lake.pca.trophic$CA$eig),
"Proportion Explained"=lake.pca.trophic$CA$eig/sum(lake.pca.trophic$CA$eig),
lake.pca.trophic$CA$u,lake.pca.trophic$CA$v),3))%>%write.csv("env.pca.trophic.vectors.csv")
# Diet plot showing Daphnia and gammarid abundance w/ % littoral area as the color scale
benthlim.plot<-ggplot(data = eco.dat,aes(x=Gamm.,y=Daph., color=littoral*100)) + theme_classic()+
labs(x="Gammarids",y="Daphnia",color="% littoral")+
geom_point(size=2.5)+ scale_color_gradient(low="steelblue4",high = "seagreen2")+
geom_text_repel(data = eco.dat,aes(label=Lake),color="black",box.padding=.5,point.padding = .2,size=2.5)
#
# MAP
library(rnaturalearth)
library(rnaturalearthdata)
library(ggrepel)
#map
#loads map data
world <- ne_countries(scale=50,returnclass = 'sf')
usa <- subset(world, admin == "United States of America")
can <- subset(world, admin == "Canada")
rus <- subset(world, admin == "Russia")
usacanrus<-rbind(usa,can,rus)
eco.dat$Lake
#inset map of entire state of AK
alaska<-ggplot(data = usacanrus) +
geom_sf(fill = "honeydew2") +
panel_border(color = "grey50")+ theme_grey()+
geom_rect(xmin = -152.5, xmax = -147.5, ymin = 59, ymax = 62,
fill = NA, colour = "black", size = .45)+
coord_sf(xlim = c(-176, -130),
ylim = c(53, 73), expand = FALSE, datum = NA)
# Kenai Peninsula local map
alaskalocal <- ggplot(data = usa) + theme_cowplot(font_size = 9)+
geom_sf(fill = "honeydew2") +coord_sf(xlim = c(-152.5, -147.5), ylim = c(59, 62), expand = F)
# combines inset and local Kenai map, and adds points and labels
AKlocal_inset<-alaskalocal + annotation_custom(
grob = ggplotGrob(alaska),
xmin = -149.4,
xmax = -147.5,
ymin = 57.9,
ymax = 59.8+1.2) +
geom_point(data=eco.dat,aes(x=Lon.,y=Lat.),size = 2, color = "skyblue4")+
geom_text_repel(data = eco.dat,aes(x=Lon.,y=Lat.,label = Lake),
box.padding=.5,point.padding = .2,size = 3, fontface = "bold")
ggsave("AK lake map.pdf", width = 5, height = 6, dpi = "screen")
#
#
# Groups of environmental traits
prey<-c("Daph.","Gamm.","littoral")
physical<-c("Area","Max_Depth","DOC")
chemical<-c("TP","TN","ChlA","Cond.","pH","Ca")
env<-c(prey,physical,chemical)
get.lake.means<-function(traits,traitnames,eco,eco.var){
traits.eco=transform(merge(traits,eco[,eco.var],by.x="Lake",by.y = "row.names", all = T),row.names = ID)
lake.means=group_by(na.omit(as.data.table(traits.eco[,c(-(2))]),cols = traitnames),
Lake)%>%summarise_all(mean,na.rm=T)%>%as.data.frame()
rownames(lake.means)<-lake.means$Lake
lake.means
}
def.lake.means<-get.lake.means(Defense.df,def.trait.list,eco.dat,env)
swi.lake.means<-get.lake.means(Swimming.df,swi.trait.list,eco.dat,env)
tro.lake.means<-get.lake.means(Trophic.df,tro.trait.list,eco.dat,env)
#
eco.dat<-data.frame(eco.dat[,])# exclude Echo, b/c partial eco data
# POPULATION MEANS FOR TRAITS
adj.data.unscaled<-merge(Defense.df[c("ID","Lake",def.trait.list)],
merge(Swimming.df[c("ID",swi.trait.list)],merge(Trophic.df[,c("ID",tro.trait.list)],two.d.coords,
by.x = "ID",by.y = "row.names", all = T), by= "ID",all= T), by = "ID", all = T)
lake.means.adj.data<-group_by(na.omit(adj.data.unscaled[,c(-1,-(19:60))]),Lake)%>%summarise_all(mean,na.rm=T)%>%as.data.frame()# non-landmark traits only
lake.means.data.eco<-merge(lake.means.adj.data,eco.dat[,env],by.x = "Lake", by.y = "row.names")
rownames(lake.means.data.eco)<-lake.means.data.eco$Lake
LD.means<-group_by(merge(merge(LD.def.df,LD.swim.df[-2],by="FishID"),LD.tro.df[-2],by="FishID"),Lake)%>%
summarise_all(mean,na.rm=T)%>%as.data.frame()%>%merge(eco.dat[,env],by.x = "Lake", by.y = "row.names")
lake.means.4tile<-as.matrix(lake.means.data.eco[,-1])
colnames(lake.means.4tile)<-c("LP","DS1l","DS2l","PSl","LPm","SL",
"BD","CP","BC","GW","PW","GR","JL","SN","ED","HL",env)
trait.corr.tile<-rcorr(lake.means.4tile,type="spearman")[[1]][c("LP","DS1l","DS2l",
"PSl","LPm","SL","BD","CP","BC","GW","PW","GR","JL","SN","ED","HL"),env]%>%
corrplot(method ="color",addgrid.col=NA,sig.level=c(.001,.01,.05), insig = "label_sig",tl.col = "black",tl.cex = .5,pch="",
pch.cex = .75,p.mat = rcorr(lake.means.4tile,type="spearman")[[3]][c("LP","DS1l",
"DS2l","PSl","LPm","SL","BD","CP","BC","GW","PW","GR","JL","SN","ED","HL"),env])
LD.corr.tile<-rcorr(as.matrix(LD.means[,-c(1:2)]),type="spearman")[[1]][
c("defense.LD1","swimming.LD1", "trophic.LD1","defense.LD2","swimming.LD2","trophic.LD2","defense.LD3","swimming.LD3","trophic.LD3"),env]%>%
corrplot(method ="color",addgrid.col=NA,sig.level=c(.001,.01,.05), tl.col = "black",tl.cex = .5,pch="",pch.cex = .75,
p.mat = rcorr(as.matrix(LD.means[,-c(1:2)]),type="spearman")[[1]][
c("defense.LD1","swimming.LD1", "trophic.LD1","defense.LD2","swimming.LD2","trophic.LD2","defense.LD3","swimming.LD3","trophic.LD3"),env])
# TRAIT X ENV PLS
# z-transforms trait and eco data
lake.means.data.eco_adj<-data.frame(row.names=(lake.means.data.eco)$Lake,
sapply((lake.means.data.eco[,-1]), function(x) (x-mean(na.omit(x)))/sd(na.omit(x))))
pls.vector.df<-function(pls){
round(data.frame(rbind("singular values"=pls$svd$d,"SV sq"=(pls$svd$d)^2,
pls$left.pls.vectors,pls$right.pls.vectors)),4)
}
# 2-B PLS for Defense, Swimming, and Trophic traits
(eco.def.pls<-two.b.pls(na.omit(lake.means.data.eco_adj)[,env],
na.omit(lake.means.data.eco_adj)[,def.trait.list]))%>%plot()
eco.def.pls.bestfit<-odregress(eco.def.pls$XScores[,1],eco.def.pls$YScores[,1])$coeff[c(2,1)]
(eco.swi.pls<-two.b.pls(na.omit(lake.means.data.eco_adj)[,env],
na.omit(lake.means.data.eco_adj)[,swi.trait.list]))%>%plot()
eco.swi.pls.bestfit<-odregress(eco.swi.pls$XScores[,1],eco.swi.pls$YScores[,1])$coeff[c(2,1)]
(eco.tro.pls<-two.b.pls(na.omit(lake.means.data.eco_adj)[,env],
na.omit(lake.means.data.eco_adj)[,tro.trait.list]))%>%plot()
eco.tro.pls.bestfit<-odregress(eco.tro.pls$XScores[,1],eco.tro.pls$YScores[,1])$coeff[c(2,1)]
# full (except shape)
(eco.full.pls<-two.b.pls(na.omit(lake.means.data.eco_adj)[,env],
na.omit(lake.means.data.eco_adj)[,c(def.trait.list,swi.trait.list,tro.trait.list)]))%>%plot()
eco.full.pls_bestfit<-odregress(eco.full.pls$XScores[,1],eco.full.pls$YScores[,1])$coeff[c(2,1)]
(eco.def.vectors<-round(pls.vector.df(eco.def.pls),3))%>%write.csv("eco-def_PLSvec.csv")
(eco.swi.vectors<-round(pls.vector.df(eco.swi.pls),3))%>%write.csv("eco-swi_PLSvec.csv")
(eco.tro.vectors<-round(pls.vector.df(eco.tro.pls),3))%>%write.csv("eco-tro_PLSvec.csv")
(eco.full.vectors<-round(pls.vector.df(eco.full.pls),3))%>%write.csv("eco-full_PLSvec.csv")
PhenoEco.stats<-data.frame(rbind(c(eco.def.pls$r.pls,eco.def.pls$P.value,eco.def.pls$Z),
c(eco.swi.pls$r.pls,eco.swi.pls$P.value,eco.swi.pls$Z),
c(eco.tro.pls$r.pls,eco.tro.pls$P.value,eco.tro.pls$Z),
c(eco.full.pls$r.pls,eco.full.pls$P.value,eco.full.pls$Z)),
row.names=c("Defense x Eco","Swimming x Eco","Trophic x Eco",
"Def-Swi-Tro x Eco"))
colnames(PhenoEco.stats)<-c("r-PLS","p","Z")
PhenoEco.stats[1:3,"adj.p"]<-p.adjust(PhenoEco.stats[1:3,"p"], method = "fdr")
PhenoEco.stats<-round(PhenoEco.stats,3)
write.csv(PhenoEco.stats,"Pheno-Eco_PLS-stats.csv")
# 2-B PLS for Defense, Swimming, and Trophic traits W/O feeding-related env. traits
(eco.physchem.def.pls<-two.b.pls(na.omit(lake.means.data.eco_adj[,env[-c(1:3)]]),
na.omit(lake.means.data.eco_adj[,c(env[-c(1:3)],def.trait.list)])[,def.trait.list]))%>%plot()
eco.physchem.def.pls.bestfit<-odregress(eco.physchem.def.pls$XScores[,1],eco.physchem.def.pls$YScores[,1])$coeff[c(2,1)]
(eco.physchem.swi.pls<-two.b.pls(na.omit(lake.means.data.eco_adj[,env[-c(1:3)]]),
na.omit(lake.means.data.eco_adj[,c(env[-c(1:3)],swi.trait.list)])[,swi.trait.list]))%>%plot()
eco.physchem.swi.pls.bestfit<-odregress(eco.physchem.swi.pls$XScores[,1],eco.physchem.swi.pls$YScores[,1])$coeff[c(2,1)]
(eco.physchem.tro.pls<-two.b.pls(na.omit(lake.means.data.eco_adj[,env[-c(1:3)]]),
na.omit(lake.means.data.eco_adj[,c(env[-c(1:3)],tro.trait.list)])[,tro.trait.list]))%>%plot()
eco.physchem.tro.pls.bestfit<-odregress(eco.physchem.tro.pls$XScores[,1],eco.physchem.tro.pls$YScores[,1])$coeff[c(2,1)]
# full (except shape)
(eco.physchem.full.pls<-two.b.pls(na.omit(lake.means.data.eco_adj[,env[-c(1:3)]]),
na.omit(lake.means.data.eco_adj[,c(env[-c(1:3)],
def.trait.list,swi.trait.list,tro.trait.list)])
[,c(def.trait.list,swi.trait.list,tro.trait.list)]))%>%plot()
eco.physchem.full.pls.bestfit<-odregress(eco.physchem.full.pls$XScores[,1],eco.physchem.full.pls$YScores[,1])$coeff[c(2,1)]
(eco.physchem.def.vectors<-round(pls.vector.df(eco.physchem.def.pls),3))%>%write.csv("eco.physchem-def_PLSvec.csv")
(eco.physchem.swi.vectors<-round(pls.vector.df(eco.physchem.swi.pls),3))%>%write.csv("eco.physchem-swi_PLSvec.csv")
(eco.physchem.tro.vectors<-round(pls.vector.df(eco.physchem.tro.pls),3))%>%write.csv("eco.physchem-tro_PLSvec.csv")
(eco.physchem.full.vectors<-round(pls.vector.df(eco.physchem.full.pls),3))%>%write.csv("eco.physchem-full_PLSvec.csv")
PhenoEco.physchem.stats<-data.frame(rbind(c(eco.physchem.def.pls$r.pls,eco.physchem.def.pls$P.value,eco.physchem.def.pls$Z),
c(eco.physchem.swi.pls$r.pls,eco.physchem.swi.pls$P.value,eco.physchem.swi.pls$Z),
c(eco.physchem.tro.pls$r.pls,eco.physchem.tro.pls$P.value,eco.physchem.tro.pls$Z),
c(eco.physchem.full.pls$r.pls,eco.physchem.full.pls$P.value,eco.physchem.full.pls$Z)),
row.names=c("Defense x Eco","Swimming x Eco","Trophic x Eco",
"Def-Swi-Tro x Eco"))
colnames(PhenoEco.physchem.stats)<-c("r-PLS","p","Z")
PhenoEco.physchem.stats[1:3,"adj.p"]<-p.adjust(PhenoEco.physchem.stats[1:3,"p"], method = "fdr")
PhenoEco.physchem.stats<-round(PhenoEco.physchem.stats,3)
write.csv(PhenoEco.physchem.stats,"Pheno-Eco.physchem_PLS-stats.csv")
####### THIS EXTRACTS ORTHOGONAL REGRESSION LINE!!!!
eco.pls.plot<-function(pls,bestfit,xlab,ylab){
pls.dat=data.frame(x=pls$XScores[,1],y=pls$YScores[,1])
ggplot(data=pls.dat,aes(x=x,y=y))+ theme_classic()+labs(x=xlab,y=ylab)+
geom_abline(intercept=bestfit[1],slope = bestfit[2], color = "grey80")+
geom_point(color="seagreen3", size=2)+
geom_text_repel(aes(label=rownames(pls.dat)),color="black",box.padding=.3,point.padding = .03,size=2.5)
}
eco.def.pls.plot<-eco.pls.plot(eco.def.pls,eco.def.pls.bestfit,"L Block - Eco","R Block - Defense")
eco.swi.pls.plot<-eco.pls.plot(eco.swi.pls,eco.swi.pls.bestfit,"L Block - Eco","R Block - Swimming")
eco.tro.pls.plot<-eco.pls.plot(eco.tro.pls,eco.tro.pls.bestfit,"L Block - Eco","R Block - Trophic")
eco.full.pls.plot<-eco.pls.plot(eco.full.pls,eco.full.pls_bestfit,"L Block - Eco","R Block - Pheno")
# compound pls plot for full eco variables including littoral, daph., and gamm.
eco.def.pls.plot+eco.tro.pls.plot+eco.swi.pls.plot+eco.full.pls.plot
def.pls.eig<-ED((eco.def.pls$svd$d)^2)
swi.pls.eig<-ED((eco.swi.pls$svd$d)^2)
tro.pls.eig<-ED((eco.tro.pls$svd$d)^2)
full.pls.eig<-ED((eco.full.pls$svd$d)^2)
eco.physchem.def.pls.plot<-eco.pls.plot(eco.physchem.def.pls,eco.physchem.def.pls.bestfit,"L Block - Eco","R Block - Defense")
eco.physchem.swi.pls.plot<-eco.pls.plot(eco.physchem.swi.pls,eco.physchem.swi.pls.bestfit,"L Block - Eco","R Block - Swimming")
eco.physchem.tro.pls.plot<-eco.pls.plot(eco.physchem.tro.pls,eco.physchem.tro.pls.bestfit,"L Block - Eco","R Block - Trophic")
eco.physchem.full.pls.plot<-eco.pls.plot(eco.physchem.full.pls,eco.physchem.full.pls.bestfit,"L Block - Eco","R Block - Pheno")
eco.physchem.def.pls.plot+eco.physchem.tro.pls.plot+
eco.physchem.swi.pls.plot+eco.physchem.full.pls.plot
def.physchem.pls.eig<-ED((eco.physchem.def.pls$svd$d)^2)
swi.physchem.pls.eig<-ED((eco.physchem.swi.pls$svd$d)^2)
tro.physchem.pls.eig<-ED((eco.physchem.tro.pls$svd$d)^2)
full.physchem.pls.eig<-ED((eco.physchem.full.pls$svd$d)^2)
ecoblock.theta<-function(vec1,vec2){
theta.180=vec.theta(vec1,vec2)
theta=if(theta.180>90) 180-theta.180 else theta.180
theta
}
eco.pls.theta<-data.frame(matrix(as.numeric(c(ecoblock.theta(eco.def.pls$left.pls.vectors[,1],eco.swi.pls$left.pls.vectors[,1]),"",
ecoblock.theta(eco.def.pls$left.pls.vectors[,1],eco.tro.pls$left.pls.vectors[,1]),
ecoblock.theta(eco.swi.pls$left.pls.vectors[,1],eco.tro.pls$left.pls.vectors[,1]))),nrow = 2,byrow = T,),
row.names = c("Swimming","Trophic"))
colnames(eco.pls.theta)<-c("Defense","Swimming")
eco.pls.theta<-round(eco.pls.theta,2)
write.csv(eco.pls.theta,"pw.pls.eco_AngleMatrix.csv")
ecoblock.theta(eco.full.pls$left.pls.vectors[,1],eco.def.pls$left.pls.vectors[,1])%>%round(2)
ecoblock.theta(eco.full.pls$left.pls.vectors[,1],eco.swi.pls$left.pls.vectors[,1])%>%round(2)
ecoblock.theta(eco.full.pls$left.pls.vectors[,1],eco.tro.pls$left.pls.vectors[,1])%>%round(2)
eco.physchem.pls.theta<-data.frame(matrix(as.numeric(c(ecoblock.theta(eco.physchem.def.pls$left.pls.vectors[,1],eco.physchem.swi.pls$left.pls.vectors[,1]),"",
ecoblock.theta(eco.physchem.def.pls$left.pls.vectors[,1],eco.physchem.tro.pls$left.pls.vectors[,1]),
ecoblock.theta(eco.physchem.swi.pls$left.pls.vectors[,1],eco.physchem.tro.pls$left.pls.vectors[,1]))),nrow = 2,byrow = T,),
row.names = c("Swimming","Trophic"))
colnames(eco.physchem.pls.theta)<-c("Defense","Swimming")
eco.physchem.pls.theta<-round(eco.physchem.pls.theta,2)
write.csv(eco.physchem.pls.theta,"pw.pls.eco.physchem_AngleMatrix.csv")
ecoblock.theta(eco.physchem.full.pls$left.pls.vectors[,1],eco.physchem.def.pls$left.pls.vectors[,1])%>%round(2)
ecoblock.theta(eco.physchem.full.pls$left.pls.vectors[,1],eco.physchem.swi.pls$left.pls.vectors[,1])%>%round(2)
ecoblock.theta(eco.physchem.full.pls$left.pls.vectors[,1],eco.physchem.tro.pls$left.pls.vectors[,1])%>%round(2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is.singular.R, R/issingular.R
\name{is.singular}
\alias{is.singular}
\title{Check if a square matrix is computational singular}
\usage{
is.singular(X, tol = .Machine$double.eps * 1000)
is.singular(X, tol = .Machine$double.eps * 1000)
}
\description{
Check if a square matrix is computational singular
Check if a square matrix is computational singular
}
\note{
Created: Mon Jan 02 17:58:12 CET 2012;
Current: Mon Jan 02 17:58:37 CET 2012.
Created: Mon Jan 02 17:58:12 CET 2012;
Current: Mon Jan 02 17:58:37 CET 2012.
}
\author{
Feng Li, Department of Statistics, Stockholm University, Sweden.
Feng Li, Department of Statistics, Stockholm University, Sweden.
}
|
/man/is.singular.Rd
|
no_license
|
kl-lab/fformpp
|
R
| false
| true
| 754
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is.singular.R, R/issingular.R
\name{is.singular}
\alias{is.singular}
\title{Check if a square matrix is computational singular}
\usage{
is.singular(X, tol = .Machine$double.eps * 1000)
is.singular(X, tol = .Machine$double.eps * 1000)
}
\description{
Check if a square matrix is computational singular
Check if a square matrix is computational singular
}
\note{
Created: Mon Jan 02 17:58:12 CET 2012;
Current: Mon Jan 02 17:58:37 CET 2012.
Created: Mon Jan 02 17:58:12 CET 2012;
Current: Mon Jan 02 17:58:37 CET 2012.
}
\author{
Feng Li, Department of Statistics, Stockholm University, Sweden.
Feng Li, Department of Statistics, Stockholm University, Sweden.
}
|
#install.packages('missForest', dependencies=TRUE, repos='http://cran.rstudio.com/')
library(caret)
library(tidyr)
tr <- read.csv("Loan_Data.csv", na.strings = c("", "NA"))
head(tr, 10)
##########################EDA##############################
#Get count to check for over/under sampling
table(tr$Dependents)
#Get uniqueness of independent variables
length(unique(tr$LoanAmount))
#Column classes
str(tr)
#Mean, Median, Std. Dev. Q1, Q3
summary(tr)
#Types of outcomes
levels(tr$Loan_Status)
####################Data Preprocessing######################
library(plyr)
tr$Dependents <- revalue(tr$Dependents, c("3+" = "3"))
tr$Dependents <- as.factor(tr$Dependents)
#Imputing missing values
library(mice)
md.pattern(tr)
tempData <- mice(tr, method = "cart", m = 5)
tr <- complete(tempData, 2)
sapply(tr, function(x) sum(is.na(x)))
densityplot(tempData)
stripplot(tempData, pch = 20, cex = 1.2)
#Feature Engineering
tr$LoanAmount <- log(tr$LoanAmount)
tr$Income <- log(tr$ApplicantIncome+tr$CoapplicantIncome)
tr <- tr[, -c(7,8)]
tr$Loan_ID <- NULL
####################Visualization###########################
#Histogram
hist(tr$LoanAmount)
hist(tr$ApplicantIncome)
hist(tr$Income)
#Scatter Plots
plot(tr$ApplicantIncome)
plot(tr$LoanAmount)
plot(tr$CoapplicantIncome)
#Distribution Graph
ggplot(tr, aes(x = LoanAmount)) +
geom_density(aes(fill = Education, alpha = 0.7))
#Distribution on categorical variable
ggplot(tr, aes(x = Property_Area)) +
geom_bar(width=0.5, color="black", aes(fill = Loan_Status)) +
xlab("Gender") +
ylab("Frequency") +
ggtitle("Loan Status by Gender")
#Missing Values graph
library(VIM)
mice_plot <- aggr(tr, numbers = T, sortVars = T,
labels=names(tr), cex.axis=.7,
gap = 2, ylab=c("Missing data","Pattern"))
########Splitting data into training and testing############
index <- createDataPartition(tr$Loan_Status, p = 0.7, list = F)
trainSet <- tr[index,]
testSet <- tr[-index,]
###############Algorithms############################
#Logistic Regression
fit <- glm(formula = Loan_Status ~ .,
family = binomial,
data = trainSet)
train_pred <- predict(fit, newdata = trainSet[, -10], type = "response")
train_pred <- ifelse(train_pred > 0.5, 'Y', 'N')
cm <- table(trainSet[, 10], train_pred)
test_pred <- predict(fit, newdata = testSet[, -10], type = "response")
test_pred <- ifelse(test_pred > 0.5, 'Y', 'N')
cm <- table(testSet[, 10], test_pred)
#Kernel SVM
library(e1071)
fit <- svm(Loan_Status ~ .,
data = trainSet,
kernel = "radial")
train_pred <- predict(fit, newdata = trainSet[-10], type = "C-classification")
cm <- table(trainSet[, 10], train_pred)
test_pred <- predict(fit, newdata = testSet[-10], type = "C-classification")
cm <- table(testSet[, 10], test_pred)
#Decision Tree
library(rpart)
fit <- rpart(Loan_Status ~ ., data = trainSet)
train_pred <- predict(fit, newdata = trainSet[-10], type = "class")
cm <- table(trainSet[, 10], train_pred)
test_pred <- predict(fit, newdata = testSet[-10], type = "class")
cm <- table(testSet[, 10], test_pred)
#Random Forest
library(randomForest)
fit <- randomForest(x = trainSet[-10], y = trainSet$Loan_Status, ntree = 5)
train_pred <- predict(fit, newdata = trainSet[-10])
cm <- table(trainSet[, 10], train_pred)
test_pred <- predict(fit, newdata = testSet[-10])
cm <- table(testSet[, 10], test_pred)
|
/Classification/2. Loan Prediction/Loan Prediction.R
|
no_license
|
hubaditya/data_science_projects
|
R
| false
| false
| 3,413
|
r
|
#install.packages('missForest', dependencies=TRUE, repos='http://cran.rstudio.com/')
library(caret)
library(tidyr)
tr <- read.csv("Loan_Data.csv", na.strings = c("", "NA"))
head(tr, 10)
##########################EDA##############################
#Get count to check for over/under sampling
table(tr$Dependents)
#Get uniqueness of independent variables
length(unique(tr$LoanAmount))
#Column classes
str(tr)
#Mean, Median, Std. Dev. Q1, Q3
summary(tr)
#Types of outcomes
levels(tr$Loan_Status)
####################Data Preprocessing######################
library(plyr)
tr$Dependents <- revalue(tr$Dependents, c("3+" = "3"))
tr$Dependents <- as.factor(tr$Dependents)
#Imputing missing values
library(mice)
md.pattern(tr)
tempData <- mice(tr, method = "cart", m = 5)
tr <- complete(tempData, 2)
sapply(tr, function(x) sum(is.na(x)))
densityplot(tempData)
stripplot(tempData, pch = 20, cex = 1.2)
#Feature Engineering
tr$LoanAmount <- log(tr$LoanAmount)
tr$Income <- log(tr$ApplicantIncome+tr$CoapplicantIncome)
tr <- tr[, -c(7,8)]
tr$Loan_ID <- NULL
####################Visualization###########################
#Histogram
hist(tr$LoanAmount)
hist(tr$ApplicantIncome)
hist(tr$Income)
#Scatter Plots
plot(tr$ApplicantIncome)
plot(tr$LoanAmount)
plot(tr$CoapplicantIncome)
#Distribution Graph
ggplot(tr, aes(x = LoanAmount)) +
geom_density(aes(fill = Education, alpha = 0.7))
#Distribution on categorical variable
ggplot(tr, aes(x = Property_Area)) +
geom_bar(width=0.5, color="black", aes(fill = Loan_Status)) +
xlab("Gender") +
ylab("Frequency") +
ggtitle("Loan Status by Gender")
#Missing Values graph
library(VIM)
mice_plot <- aggr(tr, numbers = T, sortVars = T,
labels=names(tr), cex.axis=.7,
gap = 2, ylab=c("Missing data","Pattern"))
########Splitting data into training and testing############
index <- createDataPartition(tr$Loan_Status, p = 0.7, list = F)
trainSet <- tr[index,]
testSet <- tr[-index,]
###############Algorithms############################
#Logistic Regression
fit <- glm(formula = Loan_Status ~ .,
family = binomial,
data = trainSet)
train_pred <- predict(fit, newdata = trainSet[, -10], type = "response")
train_pred <- ifelse(train_pred > 0.5, 'Y', 'N')
cm <- table(trainSet[, 10], train_pred)
test_pred <- predict(fit, newdata = testSet[, -10], type = "response")
test_pred <- ifelse(test_pred > 0.5, 'Y', 'N')
cm <- table(testSet[, 10], test_pred)
#Kernel SVM
library(e1071)
fit <- svm(Loan_Status ~ .,
data = trainSet,
kernel = "radial")
train_pred <- predict(fit, newdata = trainSet[-10], type = "C-classification")
cm <- table(trainSet[, 10], train_pred)
test_pred <- predict(fit, newdata = testSet[-10], type = "C-classification")
cm <- table(testSet[, 10], test_pred)
#Decision Tree
library(rpart)
fit <- rpart(Loan_Status ~ ., data = trainSet)
train_pred <- predict(fit, newdata = trainSet[-10], type = "class")
cm <- table(trainSet[, 10], train_pred)
test_pred <- predict(fit, newdata = testSet[-10], type = "class")
cm <- table(testSet[, 10], test_pred)
#Random Forest
library(randomForest)
fit <- randomForest(x = trainSet[-10], y = trainSet$Loan_Status, ntree = 5)
train_pred <- predict(fit, newdata = trainSet[-10])
cm <- table(trainSet[, 10], train_pred)
test_pred <- predict(fit, newdata = testSet[-10])
cm <- table(testSet[, 10], test_pred)
|
/modelli_finali/modello_univariato/modello_gerarchico/modello/hierarchical.R
|
no_license
|
araiari/OECD-Bias-in-students-evaluation
|
R
| false
| false
| 11,200
|
r
| ||
library(data.table)
library(lavaan)
library(semPlot)
library(magrittr)
library(dplyr)
library(car)
dt=fread("/home/jose/Escritorio/Gender and STEM/Rdata.csv")
dt=dt[,-c("V1")]
#dt=dt[!is.na(hy_f_STEM)]
#dt=setDT(aggregate(dt, list(dt$Country), mean,na.action=na.pass, na.rm=TRUE))
data=dt[TIME<2000,IQ:=NA][TIME<2000,Nrrent:=NA][TIME>=2000][TIME<2000,muslim:=NA]
#data=data[loggdppc<8]
#setnames(data,"female_eduedu","f_edu")
setnames(data,"female_engi","f_engi")
setnames(data,"female_ah","f_ah")
data$loggdppc=log(data$gdppc*(100-data$Nrrent)/100)
data$hy_f_edu=100*data$female_over_male_edu/(data$female_over_male_edu+1)
data$hy_f_engi=100*data$female_over_male_engi/(data$female_over_male_engi+1)
#LOGS
data$mortality %<>% log()
# data$life_exp %<>% log()
# data$muslim %<>% logit(percents=TRUE)
# data$f_edu %<>% logit(percents=TRUE)
# data$f_STEM %<>% logit(percents=TRUE)
# data$f_engi %<>% logit(percents=TRUE)
# data$female_uni %<>% logit(percents=TRUE)
cols <- c("IQ","muslim","Nrrent","equality_index",
"female_over_male","female_over_male_art","math_anxiety",
"female_uni","loggdppc","mortality","life_exp","gdppc","f_ah","f_STEM")
#data[, (cols) := lapply(.SD, scale), .SDcols=cols]
#Factor
k=prcomp(~life_exp+mortality+loggdppc+IQ, data=data, center = TRUE, scale = TRUE, na.action = na.omit)
resul=scale(data[,.(life_exp,mortality,loggdppc,IQ)], k$center, k$scale) %*% k$rotation %>% as.data.frame()
l=resul["PC1"]
#data$develop=data$life_exp
model="
develop=~loggdppc+life_exp+mortality+IQ
muslim ~~ develop
equality_index~develop+muslim
hy_f_engi~ develop+ equality_index
hy_f_STEM~ develop+ equality_index
female_uni~ develop+equality_index
"
# female_over_male~ equality_index+develop+muslim
#female_over_male_edu~equality_index+develop+muslim
# model="
# loggdppc~~IQ
# loggdppc~Nrrent
# equality_index~loggdppc+muslim+IQ+Nrrent
# muslim~Nrrent+IQ+loggdppc
# IQ~0*Nrrent"
fit = sem(model,data=data,estimator="MLR",missing="FIML",fit.measures=TRUE)
summary(fit,standardized=TRUE)
semPaths(fit,curvePivot=TRUE,what = "std",nCharNodes = 0,
edge.label.cex = 1,label.cex=5,residuals = FALSE,layout="spring",intercepts = FALSE)
#data$develop=as.data.frame(lavPredict(fit))
|
/Gender and STEM/Rmodel.R
|
no_license
|
jlricon/open_nintil
|
R
| false
| false
| 2,337
|
r
|
library(data.table)
library(lavaan)
library(semPlot)
library(magrittr)
library(dplyr)
library(car)
dt=fread("/home/jose/Escritorio/Gender and STEM/Rdata.csv")
dt=dt[,-c("V1")]
#dt=dt[!is.na(hy_f_STEM)]
#dt=setDT(aggregate(dt, list(dt$Country), mean,na.action=na.pass, na.rm=TRUE))
data=dt[TIME<2000,IQ:=NA][TIME<2000,Nrrent:=NA][TIME>=2000][TIME<2000,muslim:=NA]
#data=data[loggdppc<8]
#setnames(data,"female_eduedu","f_edu")
setnames(data,"female_engi","f_engi")
setnames(data,"female_ah","f_ah")
data$loggdppc=log(data$gdppc*(100-data$Nrrent)/100)
data$hy_f_edu=100*data$female_over_male_edu/(data$female_over_male_edu+1)
data$hy_f_engi=100*data$female_over_male_engi/(data$female_over_male_engi+1)
#LOGS
data$mortality %<>% log()
# data$life_exp %<>% log()
# data$muslim %<>% logit(percents=TRUE)
# data$f_edu %<>% logit(percents=TRUE)
# data$f_STEM %<>% logit(percents=TRUE)
# data$f_engi %<>% logit(percents=TRUE)
# data$female_uni %<>% logit(percents=TRUE)
cols <- c("IQ","muslim","Nrrent","equality_index",
"female_over_male","female_over_male_art","math_anxiety",
"female_uni","loggdppc","mortality","life_exp","gdppc","f_ah","f_STEM")
#data[, (cols) := lapply(.SD, scale), .SDcols=cols]
#Factor
k=prcomp(~life_exp+mortality+loggdppc+IQ, data=data, center = TRUE, scale = TRUE, na.action = na.omit)
resul=scale(data[,.(life_exp,mortality,loggdppc,IQ)], k$center, k$scale) %*% k$rotation %>% as.data.frame()
l=resul["PC1"]
#data$develop=data$life_exp
model="
develop=~loggdppc+life_exp+mortality+IQ
muslim ~~ develop
equality_index~develop+muslim
hy_f_engi~ develop+ equality_index
hy_f_STEM~ develop+ equality_index
female_uni~ develop+equality_index
"
# female_over_male~ equality_index+develop+muslim
#female_over_male_edu~equality_index+develop+muslim
# model="
# loggdppc~~IQ
# loggdppc~Nrrent
# equality_index~loggdppc+muslim+IQ+Nrrent
# muslim~Nrrent+IQ+loggdppc
# IQ~0*Nrrent"
fit = sem(model,data=data,estimator="MLR",missing="FIML",fit.measures=TRUE)
summary(fit,standardized=TRUE)
semPaths(fit,curvePivot=TRUE,what = "std",nCharNodes = 0,
edge.label.cex = 1,label.cex=5,residuals = FALSE,layout="spring",intercepts = FALSE)
#data$develop=as.data.frame(lavPredict(fit))
|
setMethod("show",
"MRInput",
function(object){
exposure <- object@exposure
outcome <- object@outcome
snps <- object@snps
Bx = object@betaX
By = object@betaY
Bxse = object@betaXse
Byse = object@betaYse
if("snp" %in% snps) {
snps <- paste("snp", 1:length(Bx), sep = "_")
} else {
snps <- snps
}
betaDF <- data.frame(snps, Bx, Bxse, By, Byse)
colnames(betaDF) <- c("SNP",
paste(exposure, ".beta", sep = ""),
paste(exposure, ".se", sep = ""),
paste(outcome, ".beta", sep = ""),
paste(outcome, ".se", sep = ""))
print(betaDF, digits = 3)
})
#--------------------------------------------------------------------------------------------
setMethod("show",
"MRMVInput",
function(object){
exposure <- object@exposure
outcome <- object@outcome
snps <- object@snps
Bx = object@betaX
By = object@betaY
Bxse = object@betaXse
Byse = object@betaYse
betaDF <- data.frame(snps, Bx, Bxse, By, Byse)
colnames(betaDF) <- c("SNP",
paste(exposure, ".beta", sep = ""),
paste(exposure, ".se", sep = ""),
paste(outcome, ".beta", sep = ""),
paste(outcome, ".se", sep = ""))
print(betaDF, digits = 3)
})
#--------------------------------------------------------------------------------------------
setMethod("show",
"WeightedMedian",
function(object){
Interval_type <- paste(100*(1-object@Alpha), "% CI", sep = "")
Statistic <- c("Method", "Estimate", "Std Error", Interval_type, "", "p-value")
Median_type <- paste(simpleCap(object@Type), " median method", sep = "")
Value <- c(Median_type,
decimals(object@Estimate,3),
decimals(object@StdError,3),
paste(decimals(object@CILower, 3), ",", sep = ""),
decimals(object@CIUpper,3),
decimals(object@Pvalue, 3))
output.table <- data.frame(matrix(Value, nrow = 1))
colnames(output.table) <- Statistic
cat("\n",Median_type, "\n\n")
cat("Number of Variants :", object@SNPs, "\n")
cat("------------------------------------------------------------------\n")
print(output.table, quote = F, row.names = FALSE, justify= "left")
cat("------------------------------------------------------------------\n")
}
)
#--------------------------------------------------------------------------------------------
setMethod("show",
"IVW",
function(object){
Interval_type <- paste(100*(1-object@Alpha), "% CI", sep = "")
Statistic <- c("Method", "Estimate", "Std Error", Interval_type, "", "p-value")
Value <- c("IVW", decimals(c(object@Estimate, object@StdError),3),
paste(decimals(object@CILower, 3), ",", sep = ""), decimals(c(object@CIUpper, object@Pvalue), 3))
output.table <- data.frame(matrix(Value, nrow = 1))
colnames(output.table) <- Statistic
correlation <- ifelse(sum(is.na(object@Correlation)) == 0,
"correlated", "uncorrelated")
penalized <- ifelse(object@Penalized == TRUE, "Weights of genetic variants with heterogeneous causal estimates have been penalized. ", "")
robust <- ifelse(object@Robust == TRUE, "Robust regression used.", "")
cat("\nInverse-variance weighted method\n")
cat("(variants ", correlation, ", ", object@Model, "-effect model)\n\n" , sep = "")
cat("Number of Variants :", object@SNPs, "\n")
cat(penalized, robust, "\n", sep = "")
cat("------------------------------------------------------------------\n")
print(output.table, quote = F, row.names = FALSE, justify = "left")
cat("------------------------------------------------------------------\n")
cat("Residual standard error = ", decimals(object@RSE, 3), "\n")
if(object@Model == "fixed") { cat("Residual standard error is set to 1 in calculation of confidence interval by fixed-effect assumption.\n") }
if(object@RSE<1) { cat("Residual standard error is set to 1 in calculation of confidence interval when its estimate is less than 1.\n") }
if(is.na(object@Heter.Stat[1])) {
cat("Heterogeneity is not calculated when weights are penalized, or when there is only one variant in the analysis.")
} else {
cat("Heterogeneity test statistic = ", decimals(object@Heter.Stat[1],4), " on ", object@SNPs -1,
" degrees of freedom, (p-value = ", decimals(object@Heter.Stat[2], 4),")\n", sep = "")
}
}
)
#--------------------------------------------------------------------------------------------
setMethod("show",
"MaxLik",
function(object){
Interval_type <- paste(100*(1-object@Alpha), "% CI", sep = "")
Statistic <- c("Method", "Estimate", "Std Error", Interval_type, "", "p-value")
Value <- c("MaxLik", decimals(c(object@Estimate, object@StdError),3),
paste(decimals(object@CILower, 3), ",", sep = ""), decimals(c(object@CIUpper, object@Pvalue), 3))
output.table <- data.frame(matrix(Value, nrow = 1))
colnames(output.table) <- Statistic
correlation <- ifelse(sum(is.na(object@Correlation)) == 0,
"correlated", "uncorrelated")
cat("\nMaximum-likelihood method\n")
cat("(variants ", correlation, ", ", object@Model, "-effect model)\n\n" , sep = "")
cat("Number of Variants :", object@SNPs, "\n")
cat("------------------------------------------------------------------\n")
print(output.table, quote = F, row.names = FALSE, justify = "left")
cat("------------------------------------------------------------------\n")
cat("Residual standard error = ", decimals(object@RSE, 3), "\n")
if(object@Model == "fixed") { cat("Residual standard error is set to 1 in calculation of confidence interval by fixed-effect assumption.\n") }
if(object@RSE<1) { cat("Residual standard error is set to 1 in calculation of confidence interval when its estimate is less than 1.\n") }
if(object@Heter.Stat[1] < 1e-16) {
cat("Heterogeneity is not calculated when there is only one variant in the analysis.")
} else {
cat("Heterogeneity test statistic = ", decimals(object@Heter.Stat[1],4), " on ", object@SNPs -1,
" degrees of freedom, (p-value = ", decimals(object@Heter.Stat[2], 4),")\n", sep = "")
}
}
)
#--------------------------------------------------------------------------------------------
setMethod("show",
"MRMBE",
function(object){
Interval_type <- paste(100*(1-object@Alpha), "% CI", sep = "")
Statistic <- c("Method", "Estimate", "Std Error", Interval_type, "", "p-value")
Value <- c("MBE", decimals(c(object@Estimate, object@StdError),3),
paste(decimals(object@CILower, 3), ",", sep = ""), decimals(c(object@CIUpper, object@Pvalue), 3))
output.table <- data.frame(matrix(Value, nrow = 1))
colnames(output.table) <- Statistic
if(object@StdErr=="simple") { nome <- "[assuming NOME]" }
if(object@StdErr=="delta") { nome <- "[not assuming NOME]" }
cat("\nMode-based method of Hartwig et al\n")
cat("(", object@Weighting, ", ", object@StdErr, " standard errors ", nome, ", bandwidth factor = ", object@Phi, ")\n\n" , sep = "")
cat("Number of Variants :", object@SNPs, "\n")
cat("------------------------------------------------------------------\n")
print(output.table, quote = F, row.names = FALSE, justify = "left")
cat("------------------------------------------------------------------\n")
}
)
#--------------------------------------------------------------------------------------------
setMethod("show",
"MRHetPen",
function(object){
if (object@CIMax%in%object@CIRange & object@CIMin%in%object@CIRange) {
cat("Confidence interval range too narrow. Please decrease CIMin and increase CIMax and try again.") }
else if (object@CIMax>max(object@CIRange) & object@CIMin%in%object@CIRange) {
cat("Lower bound of confidence interval range too high. Please decrease CIMin and try again.") }
if (object@CIMax%in%object@CIRange & object@CIMin<min(object@CIRange)) {
cat("Upper bound of confidence interval range too low. Please increase CIMax and try again.") }
if (object@CIMax>max(object@CIRange) & object@CIMin<min(object@CIRange)) {
Interval_type <- paste(100*(1-object@Alpha), "% CI", sep = "")
Statistic <- c("Method", "Estimate", Interval_type, "")
dps = max(ceiling(-log10(object@CIStep)), 1)
Ranges <- ifelse(sum(diff(object@CIRange)>1.01*object@CIStep)==0, "Single range", "Multiple ranges");
if (Ranges == "Single range") {
Value <- c("HetPen", decimals(object@Estimate, dps),
paste(decimals(min(object@CIRange), dps), ",", sep = ""), decimals(max(object@CIRange), dps))
output.table <- data.frame(matrix(Value, nrow = 1))
colnames(output.table) <- Statistic
Ranges.text <- "Note: confidence interval is a single range of values.\n"
}
if (Ranges == "Multiple ranges") {
Value <- c("HetPen", rep("", length(object@CILower)-1),
decimals(object@Estimate, dps), rep("", length(object@CILower)-1),
paste(decimals(object@CILower, dps), ",", sep = ""), decimals(object@CIUpper, dps))
output.table <- data.frame(matrix(Value, nrow = length(object@CILower), byrow=FALSE))
colnames(output.table) <- Statistic
Ranges.text <- "Note: confidence interval contains multiple ranges of values.\n"
}
cat("\nHeterogeneity-penalized method\n")
cat("(Prior probability of instrument validity = ", object@Prior, ")\n\n" , sep = "")
cat("Number of Variants :", object@SNPs, "\n")
cat("------------------------------------------------------------------\n")
print(output.table, quote = F, row.names = FALSE, justify = "left")
cat("------------------------------------------------------------------\n")
cat(Ranges.text)
}
}
)
#--------------------------------------------------------------------------------------------
setMethod("show",
"MRConMix",
function(object){
if (object@CIMax%in%object@CIRange & object@CIMin%in%object@CIRange) {
cat("Confidence interval range too narrow. Please decrease CIMin and increase CIMax and try again.") }
else if (object@CIMax>max(object@CIRange) & object@CIMin%in%object@CIRange) {
cat("Lower bound of confidence interval range too high. Please decrease CIMin and try again.") }
if (object@CIMax%in%object@CIRange & object@CIMin<min(object@CIRange)) {
cat("Upper bound of confidence interval range too low. Please increase CIMax and try again.") }
if (object@CIMax>max(object@CIRange) & object@CIMin<min(object@CIRange)) {
Interval_type <- paste(100*(1-object@Alpha), "% CI", sep = "")
Statistic <- c("Method", "Estimate", Interval_type, "")
dps = max(ceiling(-log10(object@CIStep)), 1)
Ranges <- ifelse(sum(diff(object@CIRange)>1.01*object@CIStep)==0, "Single range", "Multiple ranges");
if (Ranges == "Single range") {
Value <- c("ConMix", decimals(object@Estimate, dps),
paste(decimals(min(object@CIRange), dps), ",", sep = ""), decimals(max(object@CIRange), dps))
output.table <- data.frame(matrix(Value, nrow = 1))
colnames(output.table) <- Statistic
Ranges.text <- "Note: confidence interval is a single range of values.\n"
}
if (Ranges == "Multiple ranges") {
Value <- c("ConMix", rep("", length(object@CILower)-1),
decimals(object@Estimate, dps), rep("", length(object@CILower)-1),
paste(decimals(object@CILower, dps), ",", sep = ""), decimals(object@CIUpper, dps))
output.table <- data.frame(matrix(Value, nrow = length(object@CILower), byrow=FALSE))
colnames(output.table) <- Statistic
Ranges.text <- "Note: confidence interval contains multiple ranges of values.\n"
}
cat("\nContamination mixture method\n")
cat("(Standard deviation of invalid estimands = ", object@Psi, ")\n\n" , sep = "")
cat("Number of Variants :", object@SNPs, "\n")
cat("------------------------------------------------------------------\n")
print(output.table, quote = F, row.names = FALSE, justify = "left")
cat("------------------------------------------------------------------\n")
cat(Ranges.text)
}
}
)
#--------------------------------------------------------------------------------------------
setMethod("show",
"Egger",
function(object){
Interval_type <- paste(100*(1-object@Alpha), "% CI", sep = "")
Statistic <- c("Method", "Estimate", "Std Error", Interval_type, "", "p-value")
Value <- c("MR-Egger", decimals(c(object@Estimate,
object@StdError.Est), 3),
paste(decimals(object@CILower.Est, 3), ",", sep = ""),
decimals(c(object@CIUpper.Est,
object@Pvalue.Est), 3),
"(intercept)", decimals(c(object@Intercept,
object@StdError.Int), 3),
paste(decimals(object@CILower.Int, 3), ",", sep = ""),
decimals(c(object@CIUpper.Int,
object@Pvalue.Int), 3))
output.table <- data.frame(matrix(Value, nrow = 2, byrow = T))
colnames(output.table) <- Statistic
correlation <- ifelse(sum(is.na(object@Correlation)) == 0, "correlated", "uncorrelated")
penalized <- ifelse(object@Penalized == TRUE, "Weights of genetic variants with heterogeneous causal estimates have been penalized. ", "")
robust <- ifelse(object@Robust == TRUE, "Robust model used.", "")
cat("\nMR-Egger method\n")
cat("(variants ", correlation, ", ", object@Model, "-effect model)\n\n" , sep = "")
cat("Number of Variants = ", object@SNPs, "\n")
cat(penalized, robust, "\n", sep = "")
cat("------------------------------------------------------------------\n")
print(output.table, quote = F, row.names = FALSE, justify= "left")
cat("------------------------------------------------------------------\n")
cat("Residual Standard Error : ", decimals(object@RSE, 3), "\n")
if(object@RSE<1) { cat("Residual standard error is set to 1 in calculation of confidence interval when its estimate is less than 1.\n") }
if(is.na(object@Heter.Stat[1])) {
cat("Heterogeneity not calculated when weights are penalized.\n")
} else {
cat("Heterogeneity test statistic = ", decimals(object@Heter.Stat[1],4), " on ", object@SNPs - 2,
" degrees of freedom, (p-value = ", decimals(object@Heter.Stat[2], 4),")\n", sep = "")
}
if(!is.nan(object@I.sq)) {
cat("I^2_GX statistic: ", decimals(object@I.sq*100, 1), "%\n", sep="") }
}
)
#--------------------------------------------------------------------------------------------
setMethod("show",
"MRAll",
function(object){
df <- slot(object, "Values")
df[,2:6] <- decimals(df[,2:6], 3)
space <- rep("", 6)
if(object@Method == "all"){
df <- rbind(df[1:3,],
space,
df[4:7,],
space,
df[8:15,])
} else {
df <- df
}
print(df, justify = "left", row.names = FALSE)
}
)
#--------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------
setMethod("show",
"MVIVW",
function(object){
Interval_type <- paste(100*(1-object@Alpha), "% CI", sep = "")
Statistic <- c("Exposure", "Estimate", "Std Error", Interval_type, "", "p-value")
Value <- cbind(object@Exposure, decimals(object@Estimate, 3), decimals(object@StdError,3),
paste(decimals(object@CILower, 3), ",", sep = ""), decimals(object@CIUpper,3),
decimals(object@Pvalue, 3))
output.table <- data.frame(matrix(Value, nrow = length(object@Exposure)))
colnames(output.table) <- Statistic
correlation <- ifelse(sum(is.na(object@Correlation)) == 0,
"correlated", "uncorrelated")
cat("\nMultivariable inverse-variance weighted method\n")
cat("(variants ", correlation, ", ", object@Model, "-effect model)\n\n" , sep = "")
cat("Number of Variants :", object@SNPs, "\n")
cat("------------------------------------------------------------------\n")
print(output.table, quote = F, row.names = FALSE, justify = "left")
cat("------------------------------------------------------------------\n")
cat("Residual standard error = ", decimals(object@RSE, 3), "\n")
if(object@Model == "fixed") { cat("Residual standard error is set to 1 in calculation of confidence interval by fixed-effect assumption.\n") }
if(object@RSE<1) { cat("Residual standard error is set to 1 in calculation of confidence interval when its estimate is less than 1.\n") }
if(is.na(object@Heter.Stat[1])) {
cat("Heterogeneity is not calculated when weights are penalized, or when there is only one variant in the analysis.")
} else {
cat("Heterogeneity test statistic = ", decimals(object@Heter.Stat[1],4), " on ", object@SNPs-length(object@Exposure),
" degrees of freedom, (p-value = ", decimals(object@Heter.Stat[2], 4),")\n", sep = "")
}
}
)
#--------------------------------------------------------------------------------------------
setMethod("show",
"MVEgger",
function(object){
Interval_type <- paste(100*(1-object@Alpha), "% CI", sep = "")
Statistic <- c("Exposure", "Estimate", "Std Error", Interval_type, "", "p-value")
Value <- cbind(c(object@Exposure, "(intercept)"), decimals(c(object@Estimate, object@Intercept), 3), decimals(c(object@StdError.Est, object@StdError.Int),3),
paste(decimals(c(object@CILower.Est, object@CILower.Int), 3), ",", sep = ""), decimals(c(object@CIUpper.Est, object@CIUpper.Int),3),
decimals(c(object@Pvalue.Est, object@Pvalue.Int), 3))
output.table <- data.frame(matrix(Value, nrow = length(object@Exposure)+1))
colnames(output.table) <- Statistic
correlation <- ifelse(sum(is.na(object@Correlation)) == 0,
"correlated", "uncorrelated")
cat("\nMultivariable MR-Egger method\n")
cat("(variants ", correlation, ", ", object@Model, "-effect model)\n\n" , sep = "")
cat("Orientated to exposure :", object@Orientate, "\n")
cat("Number of Variants :", object@SNPs, "\n")
cat("------------------------------------------------------------------\n")
print(output.table, quote = F, row.names = FALSE, justify = "left")
cat("------------------------------------------------------------------\n")
cat("Residual standard error = ", decimals(object@RSE, 3), "\n")
if(object@RSE<1) { cat("Residual standard error is set to 1 in calculation of confidence interval when its estimate is less than 1.\n") }
cat("Heterogeneity test statistic = ", decimals(object@Heter.Stat[1],4), " on ", object@SNPs-length(object@Exposure)-1,
" degrees of freedom, (p-value = ", decimals(object@Heter.Stat[2], 4),")\n", sep = "")
}
)
|
/R/show-methods.R
|
no_license
|
Kat-Jump/MendelianRandomization
|
R
| false
| false
| 22,023
|
r
|
setMethod("show",
"MRInput",
function(object){
exposure <- object@exposure
outcome <- object@outcome
snps <- object@snps
Bx = object@betaX
By = object@betaY
Bxse = object@betaXse
Byse = object@betaYse
if("snp" %in% snps) {
snps <- paste("snp", 1:length(Bx), sep = "_")
} else {
snps <- snps
}
betaDF <- data.frame(snps, Bx, Bxse, By, Byse)
colnames(betaDF) <- c("SNP",
paste(exposure, ".beta", sep = ""),
paste(exposure, ".se", sep = ""),
paste(outcome, ".beta", sep = ""),
paste(outcome, ".se", sep = ""))
print(betaDF, digits = 3)
})
#--------------------------------------------------------------------------------------------
setMethod("show",
"MRMVInput",
function(object){
exposure <- object@exposure
outcome <- object@outcome
snps <- object@snps
Bx = object@betaX
By = object@betaY
Bxse = object@betaXse
Byse = object@betaYse
betaDF <- data.frame(snps, Bx, Bxse, By, Byse)
colnames(betaDF) <- c("SNP",
paste(exposure, ".beta", sep = ""),
paste(exposure, ".se", sep = ""),
paste(outcome, ".beta", sep = ""),
paste(outcome, ".se", sep = ""))
print(betaDF, digits = 3)
})
#--------------------------------------------------------------------------------------------
setMethod("show",
"WeightedMedian",
function(object){
Interval_type <- paste(100*(1-object@Alpha), "% CI", sep = "")
Statistic <- c("Method", "Estimate", "Std Error", Interval_type, "", "p-value")
Median_type <- paste(simpleCap(object@Type), " median method", sep = "")
Value <- c(Median_type,
decimals(object@Estimate,3),
decimals(object@StdError,3),
paste(decimals(object@CILower, 3), ",", sep = ""),
decimals(object@CIUpper,3),
decimals(object@Pvalue, 3))
output.table <- data.frame(matrix(Value, nrow = 1))
colnames(output.table) <- Statistic
cat("\n",Median_type, "\n\n")
cat("Number of Variants :", object@SNPs, "\n")
cat("------------------------------------------------------------------\n")
print(output.table, quote = F, row.names = FALSE, justify= "left")
cat("------------------------------------------------------------------\n")
}
)
#--------------------------------------------------------------------------------------------
setMethod("show",
"IVW",
function(object){
Interval_type <- paste(100*(1-object@Alpha), "% CI", sep = "")
Statistic <- c("Method", "Estimate", "Std Error", Interval_type, "", "p-value")
Value <- c("IVW", decimals(c(object@Estimate, object@StdError),3),
paste(decimals(object@CILower, 3), ",", sep = ""), decimals(c(object@CIUpper, object@Pvalue), 3))
output.table <- data.frame(matrix(Value, nrow = 1))
colnames(output.table) <- Statistic
correlation <- ifelse(sum(is.na(object@Correlation)) == 0,
"correlated", "uncorrelated")
penalized <- ifelse(object@Penalized == TRUE, "Weights of genetic variants with heterogeneous causal estimates have been penalized. ", "")
robust <- ifelse(object@Robust == TRUE, "Robust regression used.", "")
cat("\nInverse-variance weighted method\n")
cat("(variants ", correlation, ", ", object@Model, "-effect model)\n\n" , sep = "")
cat("Number of Variants :", object@SNPs, "\n")
cat(penalized, robust, "\n", sep = "")
cat("------------------------------------------------------------------\n")
print(output.table, quote = F, row.names = FALSE, justify = "left")
cat("------------------------------------------------------------------\n")
cat("Residual standard error = ", decimals(object@RSE, 3), "\n")
if(object@Model == "fixed") { cat("Residual standard error is set to 1 in calculation of confidence interval by fixed-effect assumption.\n") }
if(object@RSE<1) { cat("Residual standard error is set to 1 in calculation of confidence interval when its estimate is less than 1.\n") }
if(is.na(object@Heter.Stat[1])) {
cat("Heterogeneity is not calculated when weights are penalized, or when there is only one variant in the analysis.")
} else {
cat("Heterogeneity test statistic = ", decimals(object@Heter.Stat[1],4), " on ", object@SNPs -1,
" degrees of freedom, (p-value = ", decimals(object@Heter.Stat[2], 4),")\n", sep = "")
}
}
)
#--------------------------------------------------------------------------------------------
setMethod("show",
"MaxLik",
function(object){
Interval_type <- paste(100*(1-object@Alpha), "% CI", sep = "")
Statistic <- c("Method", "Estimate", "Std Error", Interval_type, "", "p-value")
Value <- c("MaxLik", decimals(c(object@Estimate, object@StdError),3),
paste(decimals(object@CILower, 3), ",", sep = ""), decimals(c(object@CIUpper, object@Pvalue), 3))
output.table <- data.frame(matrix(Value, nrow = 1))
colnames(output.table) <- Statistic
correlation <- ifelse(sum(is.na(object@Correlation)) == 0,
"correlated", "uncorrelated")
cat("\nMaximum-likelihood method\n")
cat("(variants ", correlation, ", ", object@Model, "-effect model)\n\n" , sep = "")
cat("Number of Variants :", object@SNPs, "\n")
cat("------------------------------------------------------------------\n")
print(output.table, quote = F, row.names = FALSE, justify = "left")
cat("------------------------------------------------------------------\n")
cat("Residual standard error = ", decimals(object@RSE, 3), "\n")
if(object@Model == "fixed") { cat("Residual standard error is set to 1 in calculation of confidence interval by fixed-effect assumption.\n") }
if(object@RSE<1) { cat("Residual standard error is set to 1 in calculation of confidence interval when its estimate is less than 1.\n") }
if(object@Heter.Stat[1] < 1e-16) {
cat("Heterogeneity is not calculated when there is only one variant in the analysis.")
} else {
cat("Heterogeneity test statistic = ", decimals(object@Heter.Stat[1],4), " on ", object@SNPs -1,
" degrees of freedom, (p-value = ", decimals(object@Heter.Stat[2], 4),")\n", sep = "")
}
}
)
#--------------------------------------------------------------------------------------------
setMethod("show",
"MRMBE",
function(object){
Interval_type <- paste(100*(1-object@Alpha), "% CI", sep = "")
Statistic <- c("Method", "Estimate", "Std Error", Interval_type, "", "p-value")
Value <- c("MBE", decimals(c(object@Estimate, object@StdError),3),
paste(decimals(object@CILower, 3), ",", sep = ""), decimals(c(object@CIUpper, object@Pvalue), 3))
output.table <- data.frame(matrix(Value, nrow = 1))
colnames(output.table) <- Statistic
if(object@StdErr=="simple") { nome <- "[assuming NOME]" }
if(object@StdErr=="delta") { nome <- "[not assuming NOME]" }
cat("\nMode-based method of Hartwig et al\n")
cat("(", object@Weighting, ", ", object@StdErr, " standard errors ", nome, ", bandwidth factor = ", object@Phi, ")\n\n" , sep = "")
cat("Number of Variants :", object@SNPs, "\n")
cat("------------------------------------------------------------------\n")
print(output.table, quote = F, row.names = FALSE, justify = "left")
cat("------------------------------------------------------------------\n")
}
)
#--------------------------------------------------------------------------------------------
setMethod("show",
"MRHetPen",
function(object){
if (object@CIMax%in%object@CIRange & object@CIMin%in%object@CIRange) {
cat("Confidence interval range too narrow. Please decrease CIMin and increase CIMax and try again.") }
else if (object@CIMax>max(object@CIRange) & object@CIMin%in%object@CIRange) {
cat("Lower bound of confidence interval range too high. Please decrease CIMin and try again.") }
if (object@CIMax%in%object@CIRange & object@CIMin<min(object@CIRange)) {
cat("Upper bound of confidence interval range too low. Please increase CIMax and try again.") }
if (object@CIMax>max(object@CIRange) & object@CIMin<min(object@CIRange)) {
Interval_type <- paste(100*(1-object@Alpha), "% CI", sep = "")
Statistic <- c("Method", "Estimate", Interval_type, "")
dps = max(ceiling(-log10(object@CIStep)), 1)
Ranges <- ifelse(sum(diff(object@CIRange)>1.01*object@CIStep)==0, "Single range", "Multiple ranges");
if (Ranges == "Single range") {
Value <- c("HetPen", decimals(object@Estimate, dps),
paste(decimals(min(object@CIRange), dps), ",", sep = ""), decimals(max(object@CIRange), dps))
output.table <- data.frame(matrix(Value, nrow = 1))
colnames(output.table) <- Statistic
Ranges.text <- "Note: confidence interval is a single range of values.\n"
}
if (Ranges == "Multiple ranges") {
Value <- c("HetPen", rep("", length(object@CILower)-1),
decimals(object@Estimate, dps), rep("", length(object@CILower)-1),
paste(decimals(object@CILower, dps), ",", sep = ""), decimals(object@CIUpper, dps))
output.table <- data.frame(matrix(Value, nrow = length(object@CILower), byrow=FALSE))
colnames(output.table) <- Statistic
Ranges.text <- "Note: confidence interval contains multiple ranges of values.\n"
}
cat("\nHeterogeneity-penalized method\n")
cat("(Prior probability of instrument validity = ", object@Prior, ")\n\n" , sep = "")
cat("Number of Variants :", object@SNPs, "\n")
cat("------------------------------------------------------------------\n")
print(output.table, quote = F, row.names = FALSE, justify = "left")
cat("------------------------------------------------------------------\n")
cat(Ranges.text)
}
}
)
#--------------------------------------------------------------------------------------------
setMethod("show",
"MRConMix",
function(object){
if (object@CIMax%in%object@CIRange & object@CIMin%in%object@CIRange) {
cat("Confidence interval range too narrow. Please decrease CIMin and increase CIMax and try again.") }
else if (object@CIMax>max(object@CIRange) & object@CIMin%in%object@CIRange) {
cat("Lower bound of confidence interval range too high. Please decrease CIMin and try again.") }
if (object@CIMax%in%object@CIRange & object@CIMin<min(object@CIRange)) {
cat("Upper bound of confidence interval range too low. Please increase CIMax and try again.") }
if (object@CIMax>max(object@CIRange) & object@CIMin<min(object@CIRange)) {
Interval_type <- paste(100*(1-object@Alpha), "% CI", sep = "")
Statistic <- c("Method", "Estimate", Interval_type, "")
dps = max(ceiling(-log10(object@CIStep)), 1)
Ranges <- ifelse(sum(diff(object@CIRange)>1.01*object@CIStep)==0, "Single range", "Multiple ranges");
if (Ranges == "Single range") {
Value <- c("ConMix", decimals(object@Estimate, dps),
paste(decimals(min(object@CIRange), dps), ",", sep = ""), decimals(max(object@CIRange), dps))
output.table <- data.frame(matrix(Value, nrow = 1))
colnames(output.table) <- Statistic
Ranges.text <- "Note: confidence interval is a single range of values.\n"
}
if (Ranges == "Multiple ranges") {
Value <- c("ConMix", rep("", length(object@CILower)-1),
decimals(object@Estimate, dps), rep("", length(object@CILower)-1),
paste(decimals(object@CILower, dps), ",", sep = ""), decimals(object@CIUpper, dps))
output.table <- data.frame(matrix(Value, nrow = length(object@CILower), byrow=FALSE))
colnames(output.table) <- Statistic
Ranges.text <- "Note: confidence interval contains multiple ranges of values.\n"
}
cat("\nContamination mixture method\n")
cat("(Standard deviation of invalid estimands = ", object@Psi, ")\n\n" , sep = "")
cat("Number of Variants :", object@SNPs, "\n")
cat("------------------------------------------------------------------\n")
print(output.table, quote = F, row.names = FALSE, justify = "left")
cat("------------------------------------------------------------------\n")
cat(Ranges.text)
}
}
)
#--------------------------------------------------------------------------------------------
setMethod("show",
"Egger",
function(object){
Interval_type <- paste(100*(1-object@Alpha), "% CI", sep = "")
Statistic <- c("Method", "Estimate", "Std Error", Interval_type, "", "p-value")
Value <- c("MR-Egger", decimals(c(object@Estimate,
object@StdError.Est), 3),
paste(decimals(object@CILower.Est, 3), ",", sep = ""),
decimals(c(object@CIUpper.Est,
object@Pvalue.Est), 3),
"(intercept)", decimals(c(object@Intercept,
object@StdError.Int), 3),
paste(decimals(object@CILower.Int, 3), ",", sep = ""),
decimals(c(object@CIUpper.Int,
object@Pvalue.Int), 3))
output.table <- data.frame(matrix(Value, nrow = 2, byrow = T))
colnames(output.table) <- Statistic
correlation <- ifelse(sum(is.na(object@Correlation)) == 0, "correlated", "uncorrelated")
penalized <- ifelse(object@Penalized == TRUE, "Weights of genetic variants with heterogeneous causal estimates have been penalized. ", "")
robust <- ifelse(object@Robust == TRUE, "Robust model used.", "")
cat("\nMR-Egger method\n")
cat("(variants ", correlation, ", ", object@Model, "-effect model)\n\n" , sep = "")
cat("Number of Variants = ", object@SNPs, "\n")
cat(penalized, robust, "\n", sep = "")
cat("------------------------------------------------------------------\n")
print(output.table, quote = F, row.names = FALSE, justify= "left")
cat("------------------------------------------------------------------\n")
cat("Residual Standard Error : ", decimals(object@RSE, 3), "\n")
if(object@RSE<1) { cat("Residual standard error is set to 1 in calculation of confidence interval when its estimate is less than 1.\n") }
if(is.na(object@Heter.Stat[1])) {
cat("Heterogeneity not calculated when weights are penalized.\n")
} else {
cat("Heterogeneity test statistic = ", decimals(object@Heter.Stat[1],4), " on ", object@SNPs - 2,
" degrees of freedom, (p-value = ", decimals(object@Heter.Stat[2], 4),")\n", sep = "")
}
if(!is.nan(object@I.sq)) {
cat("I^2_GX statistic: ", decimals(object@I.sq*100, 1), "%\n", sep="") }
}
)
#--------------------------------------------------------------------------------------------
setMethod("show",
"MRAll",
function(object){
df <- slot(object, "Values")
df[,2:6] <- decimals(df[,2:6], 3)
space <- rep("", 6)
if(object@Method == "all"){
df <- rbind(df[1:3,],
space,
df[4:7,],
space,
df[8:15,])
} else {
df <- df
}
print(df, justify = "left", row.names = FALSE)
}
)
#--------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------
setMethod("show",
"MVIVW",
function(object){
Interval_type <- paste(100*(1-object@Alpha), "% CI", sep = "")
Statistic <- c("Exposure", "Estimate", "Std Error", Interval_type, "", "p-value")
Value <- cbind(object@Exposure, decimals(object@Estimate, 3), decimals(object@StdError,3),
paste(decimals(object@CILower, 3), ",", sep = ""), decimals(object@CIUpper,3),
decimals(object@Pvalue, 3))
output.table <- data.frame(matrix(Value, nrow = length(object@Exposure)))
colnames(output.table) <- Statistic
correlation <- ifelse(sum(is.na(object@Correlation)) == 0,
"correlated", "uncorrelated")
cat("\nMultivariable inverse-variance weighted method\n")
cat("(variants ", correlation, ", ", object@Model, "-effect model)\n\n" , sep = "")
cat("Number of Variants :", object@SNPs, "\n")
cat("------------------------------------------------------------------\n")
print(output.table, quote = F, row.names = FALSE, justify = "left")
cat("------------------------------------------------------------------\n")
cat("Residual standard error = ", decimals(object@RSE, 3), "\n")
if(object@Model == "fixed") { cat("Residual standard error is set to 1 in calculation of confidence interval by fixed-effect assumption.\n") }
if(object@RSE<1) { cat("Residual standard error is set to 1 in calculation of confidence interval when its estimate is less than 1.\n") }
if(is.na(object@Heter.Stat[1])) {
cat("Heterogeneity is not calculated when weights are penalized, or when there is only one variant in the analysis.")
} else {
cat("Heterogeneity test statistic = ", decimals(object@Heter.Stat[1],4), " on ", object@SNPs-length(object@Exposure),
" degrees of freedom, (p-value = ", decimals(object@Heter.Stat[2], 4),")\n", sep = "")
}
}
)
#--------------------------------------------------------------------------------------------
setMethod("show",
"MVEgger",
function(object){
Interval_type <- paste(100*(1-object@Alpha), "% CI", sep = "")
Statistic <- c("Exposure", "Estimate", "Std Error", Interval_type, "", "p-value")
Value <- cbind(c(object@Exposure, "(intercept)"), decimals(c(object@Estimate, object@Intercept), 3), decimals(c(object@StdError.Est, object@StdError.Int),3),
paste(decimals(c(object@CILower.Est, object@CILower.Int), 3), ",", sep = ""), decimals(c(object@CIUpper.Est, object@CIUpper.Int),3),
decimals(c(object@Pvalue.Est, object@Pvalue.Int), 3))
output.table <- data.frame(matrix(Value, nrow = length(object@Exposure)+1))
colnames(output.table) <- Statistic
correlation <- ifelse(sum(is.na(object@Correlation)) == 0,
"correlated", "uncorrelated")
cat("\nMultivariable MR-Egger method\n")
cat("(variants ", correlation, ", ", object@Model, "-effect model)\n\n" , sep = "")
cat("Orientated to exposure :", object@Orientate, "\n")
cat("Number of Variants :", object@SNPs, "\n")
cat("------------------------------------------------------------------\n")
print(output.table, quote = F, row.names = FALSE, justify = "left")
cat("------------------------------------------------------------------\n")
cat("Residual standard error = ", decimals(object@RSE, 3), "\n")
if(object@RSE<1) { cat("Residual standard error is set to 1 in calculation of confidence interval when its estimate is less than 1.\n") }
cat("Heterogeneity test statistic = ", decimals(object@Heter.Stat[1],4), " on ", object@SNPs-length(object@Exposure)-1,
" degrees of freedom, (p-value = ", decimals(object@Heter.Stat[2], 4),")\n", sep = "")
}
)
|
#loads data into tibble variable hpc
source('./LoadHouseholdPowerConsumptionData.R')
plot3 <- function(data = NULL)
{
if(is.null(data)) {
data <- load_data()
}
# Open Png device
png(filename = "plot3.png",
width = 480,
height = 480,
units = "px")
with(data, plot(DateTime,
Sub_metering_1,
type = "l",
ylab = "Energy sub metering"))
with(data, lines(DateTime,
Sub_metering_2,
type = "l",
col = "red"))
with(data, lines(DateTime,
Sub_metering_3,
type = "l",
col = "blue"))
legendLabels <- names(data)[(length(data)-3):(length(data)-1)]
legend("topright", legend=legendLabels, col=c("black", "red", "blue"), lwd=1)
dev.off() # Close the Png file device
}
|
/plot3.R
|
no_license
|
jb1t/ExData_Plotting1
|
R
| false
| false
| 890
|
r
|
#loads data into tibble variable hpc
source('./LoadHouseholdPowerConsumptionData.R')
plot3 <- function(data = NULL)
{
if(is.null(data)) {
data <- load_data()
}
# Open Png device
png(filename = "plot3.png",
width = 480,
height = 480,
units = "px")
with(data, plot(DateTime,
Sub_metering_1,
type = "l",
ylab = "Energy sub metering"))
with(data, lines(DateTime,
Sub_metering_2,
type = "l",
col = "red"))
with(data, lines(DateTime,
Sub_metering_3,
type = "l",
col = "blue"))
legendLabels <- names(data)[(length(data)-3):(length(data)-1)]
legend("topright", legend=legendLabels, col=c("black", "red", "blue"), lwd=1)
dev.off() # Close the Png file device
}
|
#' HTCondor scheduler functions
#'
#' Derives from QSys to provide HTCondor-specific functions
#'
#' @keywords internal
#' @examples
#' \dontrun{
#' options(clustermq.scheduler="HTCONDOR")
#' library(clustermq)
#' library(foreach)
#' register_dopar_cmq(n_jobs=2)
#' foreach(i=1:3) %dopar% sqrt(i) # this will be executed as jobs
#' }
HTCONDOR = R6::R6Class("HTCONDOR",
inherit = QSys,
public = list(
initialize = function(..., template=getOption("clustermq.template", "HTCONDOR")) {
super$initialize(..., template=template)
},
submit_jobs = function(...) {
opts = private$fill_options(...)
private$job_id = opts$job_name
filled = private$fill_template(opts)
success = system("condor_submit", input=filled, ignore.stdout=TRUE)
if (success != 0) {
print(filled)
stop("Job submission failed with error code ", success)
}
},
finalize = function(quiet=self$workers_running == 0) {
if (!private$is_cleaned_up) {
system(paste("condor_rm ", private$job_id),
ignore.stdout=quiet, ignore.stderr=quiet, wait=FALSE)
private$is_cleaned_up = TRUE
}
}
),
private = list(
job_id = NULL
)
)
|
/R/qsys_htcondor.r
|
permissive
|
bomeara/clustermq
|
R
| false
| false
| 1,350
|
r
|
#' HTCondor scheduler functions
#'
#' Derives from QSys to provide HTCondor-specific functions
#'
#' @keywords internal
#' @examples
#' \dontrun{
#' options(clustermq.scheduler="HTCONDOR")
#' library(clustermq)
#' library(foreach)
#' register_dopar_cmq(n_jobs=2)
#' foreach(i=1:3) %dopar% sqrt(i) # this will be executed as jobs
#' }
HTCONDOR = R6::R6Class("HTCONDOR",
inherit = QSys,
public = list(
initialize = function(..., template=getOption("clustermq.template", "HTCONDOR")) {
super$initialize(..., template=template)
},
submit_jobs = function(...) {
opts = private$fill_options(...)
private$job_id = opts$job_name
filled = private$fill_template(opts)
success = system("condor_submit", input=filled, ignore.stdout=TRUE)
if (success != 0) {
print(filled)
stop("Job submission failed with error code ", success)
}
},
finalize = function(quiet=self$workers_running == 0) {
if (!private$is_cleaned_up) {
system(paste("condor_rm ", private$job_id),
ignore.stdout=quiet, ignore.stderr=quiet, wait=FALSE)
private$is_cleaned_up = TRUE
}
}
),
private = list(
job_id = NULL
)
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipe.R
\name{\%$\%}
\alias{\%$\%}
\title{magrittr exposition pipe-operator}
\usage{
lhs \%$\% rhs
}
\arguments{
\item{lhs}{A list, environment, or a data.frame.}
\item{rhs}{An expression where the names in lhs is available.}
}
\description{
Expose the names in \code{lhs} to the \code{rhs} expression. This is useful when functions
do not have a built-in data argument.
}
\details{
Some functions, e.g. \code{lm} and \code{aggregate}, have a
data argument, which allows the direct use of names inside the data as part
of the call. This operator exposes the contents of the left-hand side object
to the expression on the right to give a similar benefit, see the examples.
}
\examples{
iris \%>\%
subset(Sepal.Length > mean(Sepal.Length)) \%$\%
cor(Sepal.Length, Sepal.Width)
data.frame(z = rnorm(100)) \%$\%
ts.plot(z)
}
\seealso{
\code{\link{\%>\%}}, \code{\link{\%<>\%}}, \code{\link{\%$\%}}
}
|
/man/exposition.Rd
|
no_license
|
BeatrizInGitHub/magrittr
|
R
| false
| true
| 987
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pipe.R
\name{\%$\%}
\alias{\%$\%}
\title{magrittr exposition pipe-operator}
\usage{
lhs \%$\% rhs
}
\arguments{
\item{lhs}{A list, environment, or a data.frame.}
\item{rhs}{An expression where the names in lhs is available.}
}
\description{
Expose the names in \code{lhs} to the \code{rhs} expression. This is useful when functions
do not have a built-in data argument.
}
\details{
Some functions, e.g. \code{lm} and \code{aggregate}, have a
data argument, which allows the direct use of names inside the data as part
of the call. This operator exposes the contents of the left-hand side object
to the expression on the right to give a similar benefit, see the examples.
}
\examples{
iris \%>\%
subset(Sepal.Length > mean(Sepal.Length)) \%$\%
cor(Sepal.Length, Sepal.Width)
data.frame(z = rnorm(100)) \%$\%
ts.plot(z)
}
\seealso{
\code{\link{\%>\%}}, \code{\link{\%<>\%}}, \code{\link{\%$\%}}
}
|
#' Parse an html table into a data frame.
#'
#' @section Assumptions:
#'
#' \code{html_table} currently makes a few assumptions:
#'
#' \itemize{
#' \item No cells span multiple rows
#' \item Headers are in the first row
#' }
#' @param x A node, node set or document.
#' @param header Use first row as header? If \code{NA}, will use first row
#' if it consists of \code{<th>} tags.
#' @param trim Remove leading and trailing whitespace within each cell?
#' @param fill If \code{TRUE}, automatically fill rows with fewer than
#' the maximum number of columns with \code{NA}s.
#' @param dec The character used as decimal mark.
#' @export
#' @examples
#' tdist <- read_html("http://en.wikipedia.org/wiki/Student%27s_t-distribution")
#' tdist %>%
#' html_node("table.infobox") %>%
#' html_table(header = FALSE)
#'
#' births <- read_html("https://www.ssa.gov/oact/babynames/numberUSbirths.html")
#' html_table(html_nodes(births, "table")[[2]])
#'
#' # If the table is badly formed, and has different number of rows in
#' # each column use fill = TRUE. Here's it's due to incorrect colspan
#' # specification.
#' skiing <- read_html("http://data.fis-ski.com/dynamic/results.html?sector=CC&raceid=22395")
#' skiing %>%
#' html_table(fill = TRUE)
html_table <- function(x, header = NA, trim = TRUE, fill = FALSE, dec = ".") {
UseMethod("html_table")
}
#' @export
html_table.xml_document <- function(x, header = NA, trim = TRUE, fill = FALSE,
dec = ".") {
tables <- xml2::xml_find_all(x, ".//table")
lapply(tables, html_table, header = header, trim = trim, fill = fill, dec = dec)
}
#' @export
html_table.xml_nodeset <- function(x, header = NA, trim = TRUE, fill = FALSE,
dec = ".") {
# FIXME: guess useful names
lapply(x, html_table, header = header, trim = trim, fill = fill, dec = dec)
}
#' @export
html_table.xml_node <- function(x, header = NA, trim = TRUE,
fill = FALSE, dec = ".") {
stopifnot(html_name(x) == "table")
# Throw error if any rowspan/colspan present
rows <- html_nodes(x, "tr")
n <- length(rows)
cells <- lapply(rows, "html_nodes", xpath = ".//td|.//th")
ncols <- lapply(cells, html_attr, "colspan", default = "1")
ncols <- lapply(ncols, as.integer)
nrows <- lapply(cells, html_attr, "rowspan", default = "1")
nrows <- lapply(nrows, as.integer)
p <- unique(vapply(ncols, sum, integer(1)))
maxp <- max(p)
if (length(p) > 1 & maxp * n != sum(unlist(nrows)) &
maxp * n != sum(unlist(ncols))) {
# then malformed table is not parsable by smart filling solution
if (!fill) { # fill must then be specified to allow filling with NAs
stop("Table has inconsistent number of columns. ",
"Do you want fill = TRUE?", call. = FALSE)
}
}
values <- lapply(cells, html_text, trim = trim)
out <- matrix(NA_character_, nrow = n, ncol = maxp)
# fill colspans right with repetition
for (i in seq_len(n)) {
row <- values[[i]]
ncol <- ncols[[i]]
col <- 1
for (j in seq_len(length(ncol))) {
out[i, col:(col+ncol[j]-1)] <- row[[j]]
col <- col + ncol[j]
}
}
# fill rowspans down with repetition
for (i in seq_len(maxp)) {
for (j in seq_len(n)) {
rowspan <- nrows[[j]][i]; colspan <- ncols[[j]][i]
if (!is.na(rowspan) & (rowspan > 1)) {
if (!is.na(colspan) & (colspan > 1)) {
# special case of colspan and rowspan in same cell
nrows[[j]] <- c(utils::head(nrows[[j]], i),
rep(rowspan, colspan-1),
utils::tail(nrows[[j]], length(rowspan)-(i+1)))
rowspan <- nrows[[j]][i]
}
for (k in seq_len(rowspan - 1)) {
l <- utils::head(out[j+k, ], i-1)
r <- utils::tail(out[j+k, ], maxp-i+1)
out[j + k, ] <- utils::head(c(l, out[j, i], r), maxp)
}
}
}
}
if (is.na(header)) {
header <- all(html_name(cells[[1]]) == "th")
}
if (header) {
col_names <- out[1, , drop = FALSE]
out <- out[-1, , drop = FALSE]
} else {
col_names <- paste0("X", seq_len(ncol(out)))
}
# Convert matrix to list to data frame
df <- lapply(seq_len(maxp), function(i) {
utils::type.convert(out[, i], as.is = TRUE, dec = dec)
})
names(df) <- col_names
class(df) <- "data.frame"
attr(df, "row.names") <- .set_row_names(length(df[[1]]))
if (length(unique(col_names)) < length(col_names)) {
warning('At least two columns have the same name')
}
df
}
|
/R/table.R
|
no_license
|
jrnold/rvest
|
R
| false
| false
| 4,567
|
r
|
#' Parse an html table into a data frame.
#'
#' @section Assumptions:
#'
#' \code{html_table} currently makes a few assumptions:
#'
#' \itemize{
#' \item No cells span multiple rows
#' \item Headers are in the first row
#' }
#' @param x A node, node set or document.
#' @param header Use first row as header? If \code{NA}, will use first row
#' if it consists of \code{<th>} tags.
#' @param trim Remove leading and trailing whitespace within each cell?
#' @param fill If \code{TRUE}, automatically fill rows with fewer than
#' the maximum number of columns with \code{NA}s.
#' @param dec The character used as decimal mark.
#' @export
#' @examples
#' tdist <- read_html("http://en.wikipedia.org/wiki/Student%27s_t-distribution")
#' tdist %>%
#' html_node("table.infobox") %>%
#' html_table(header = FALSE)
#'
#' births <- read_html("https://www.ssa.gov/oact/babynames/numberUSbirths.html")
#' html_table(html_nodes(births, "table")[[2]])
#'
#' # If the table is badly formed, and has different number of rows in
#' # each column use fill = TRUE. Here's it's due to incorrect colspan
#' # specification.
#' skiing <- read_html("http://data.fis-ski.com/dynamic/results.html?sector=CC&raceid=22395")
#' skiing %>%
#' html_table(fill = TRUE)
html_table <- function(x, header = NA, trim = TRUE, fill = FALSE, dec = ".") {
UseMethod("html_table")
}
#' @export
html_table.xml_document <- function(x, header = NA, trim = TRUE, fill = FALSE,
dec = ".") {
tables <- xml2::xml_find_all(x, ".//table")
lapply(tables, html_table, header = header, trim = trim, fill = fill, dec = dec)
}
#' @export
html_table.xml_nodeset <- function(x, header = NA, trim = TRUE, fill = FALSE,
dec = ".") {
# FIXME: guess useful names
lapply(x, html_table, header = header, trim = trim, fill = fill, dec = dec)
}
#' @export
html_table.xml_node <- function(x, header = NA, trim = TRUE,
fill = FALSE, dec = ".") {
stopifnot(html_name(x) == "table")
# Throw error if any rowspan/colspan present
rows <- html_nodes(x, "tr")
n <- length(rows)
cells <- lapply(rows, "html_nodes", xpath = ".//td|.//th")
ncols <- lapply(cells, html_attr, "colspan", default = "1")
ncols <- lapply(ncols, as.integer)
nrows <- lapply(cells, html_attr, "rowspan", default = "1")
nrows <- lapply(nrows, as.integer)
p <- unique(vapply(ncols, sum, integer(1)))
maxp <- max(p)
if (length(p) > 1 & maxp * n != sum(unlist(nrows)) &
maxp * n != sum(unlist(ncols))) {
# then malformed table is not parsable by smart filling solution
if (!fill) { # fill must then be specified to allow filling with NAs
stop("Table has inconsistent number of columns. ",
"Do you want fill = TRUE?", call. = FALSE)
}
}
values <- lapply(cells, html_text, trim = trim)
out <- matrix(NA_character_, nrow = n, ncol = maxp)
# fill colspans right with repetition
for (i in seq_len(n)) {
row <- values[[i]]
ncol <- ncols[[i]]
col <- 1
for (j in seq_len(length(ncol))) {
out[i, col:(col+ncol[j]-1)] <- row[[j]]
col <- col + ncol[j]
}
}
# fill rowspans down with repetition
for (i in seq_len(maxp)) {
for (j in seq_len(n)) {
rowspan <- nrows[[j]][i]; colspan <- ncols[[j]][i]
if (!is.na(rowspan) & (rowspan > 1)) {
if (!is.na(colspan) & (colspan > 1)) {
# special case of colspan and rowspan in same cell
nrows[[j]] <- c(utils::head(nrows[[j]], i),
rep(rowspan, colspan-1),
utils::tail(nrows[[j]], length(rowspan)-(i+1)))
rowspan <- nrows[[j]][i]
}
for (k in seq_len(rowspan - 1)) {
l <- utils::head(out[j+k, ], i-1)
r <- utils::tail(out[j+k, ], maxp-i+1)
out[j + k, ] <- utils::head(c(l, out[j, i], r), maxp)
}
}
}
}
if (is.na(header)) {
header <- all(html_name(cells[[1]]) == "th")
}
if (header) {
col_names <- out[1, , drop = FALSE]
out <- out[-1, , drop = FALSE]
} else {
col_names <- paste0("X", seq_len(ncol(out)))
}
# Convert matrix to list to data frame
df <- lapply(seq_len(maxp), function(i) {
utils::type.convert(out[, i], as.is = TRUE, dec = dec)
})
names(df) <- col_names
class(df) <- "data.frame"
attr(df, "row.names") <- .set_row_names(length(df[[1]]))
if (length(unique(col_names)) < length(col_names)) {
warning('At least two columns have the same name')
}
df
}
|
#' One-Way and Two-Way Histograms/Plots
#'
#' Prepares data and plots a dependent variable across one or two independent variables,
#' yielding a matrix of plots.
#'
#' @param fx Formula - grouping formula of form dv ~ iv or dv ~ iv1 + iv2
#' @param data Data Frame - The data for the plots
#' @param FUN Function - Plotting function compatible with parameters for hist.grouped
#' @param interval.size Numeric - Bin size
#' @param anchor.value Numeric - One of the breakpoints
#' @param width.consider Numeric - Bin sizes to consider when plotting histograms
#' @param right Logical - If TRUE, the histogram cells are right-closed (left open) intervals.
#' @param hist.correct.label Logical - Provide better placement for labels compared with the labeled cells/bins.
#' @param constant.x.axis Logical - If TRUE, ensure all plots have the same domains.
#' @param constant.y.axis Logical - If TRUE, ensure all plots have the same ranges.
#' @param ... Additional parameters - Additional parameters passed to FUN
#'
#' @return No return value
process.group.plot <- function(
fx
,data = NULL
,FUN = hist.grouped
,interval.size = NA
,anchor.value = NA
,width.consider = lolcat.default.width.consider
,right = F
,hist.correct.label = isTRUE(all.equal(FUN,hist.grouped))
#,call.dev.off = T
,constant.x.axis = T
,constant.y.axis = T
,...
) {
par.orig <- par(no.readonly = T)
par(mar=c(2.5,2.5,1,1))
fx.terms<-terms(fx)
response<-all.vars(fx)[attributes(fx.terms)$response]
iv.names<-attributes(terms(fx))$term.labels[which(attributes(fx.terms)$order == 1)]
cell.codes <- compute.group.cell.codes(fx =fx, data = data)
fd.overall <- frequency.dist.grouped(
x = data[[response]]
,interval.size = interval.size
,anchor.value = anchor.value
,width.consider = width.consider
,right = right
)
anchor.value <- fd.overall$midpoint[1]
interval.size <- fd.overall$min[2] - fd.overall$min[1]
all.fd <- lapply(split(data[[response]], cell.codes), FUN = function(x) {
frequency.dist.grouped(
x = x
,interval.size = interval.size
,anchor.value = anchor.value
,right = right
)
} )
xlim <- c(min(fd.overall$midpoint)-interval.size, max(fd.overall$midpoint) + interval.size)
ylim <- c(0, max(unlist(lapply(all.fd, FUN = function(y) { max(y$freq)}))) + 1)
if (length(iv.names) == 0) {
} else if (length(iv.names) == 1) {
layout_matrix <- matrix(1:length(all.fd), ncol=1)
layout(layout_matrix)
response.split <- split(data[[response]], cell.codes)
iv.split <- split(data[[iv.names[1]]], cell.codes)
# Plot rxc ...
correction <- if (hist.correct.label) {
.5*interval.size
} else {
0
}
at <- fd.overall$midpoint-correction
labels <- fd.overall$midpoint
for ( i in 1:length(response.split)) {
x <- response.split[[i]]
if (length(x) > 0) {
FUN(x
,xlim=xlim
,ylim=ylim
,main=paste(response, " (",iv.names[1]," = ",iv.split[[i]][1],")", sep="")
,xlab=NULL
,ylab=NULL
,anchor.value=anchor.value
,interval.size=interval.size
,xaxt="n"
,right = right
,...
)
axis(1, at=at ,labels = labels )
} else {
plot.new()
text(.5,.5, "N/A")
}
}
} else if (length(iv.names) == 2) {
row_var = iv.names[1]
col_var = iv.names[2]
unique_row = unique(as.character(data[[row_var]]))
unique_col = unique(as.character(data[[col_var]]))
#Compute layout matrix and widths/heights
layout_mat <- matrix(1:(length(unique_row)*length(unique_col)) , nrow=length(unique_row), byrow=T) + 1 + 2 + length(unique_row) + length(unique_col)
layout_mat <- cbind(1, layout_mat)
layout_mat <- cbind(1, layout_mat)
layout_mat <- rbind(1, layout_mat)
layout_mat <- rbind(1, layout_mat)
layout_mat[1,3:ncol(layout_mat)] <- 2 # Column Title Index
layout_mat[3:nrow(layout_mat),1] <- 3 # Row Title Index
layout_mat[2,3:ncol(layout_mat)] <- 3+1:(ncol(layout_mat)-2) # Column Titles Index
layout_mat[3:nrow(layout_mat),2] <- layout_mat[2,ncol(layout_mat)]+1:(nrow(layout_mat)-2) # Column Titles Index
widths <- c(.1,.1,rep(.8/(ncol(layout_mat)-2),ncol(layout_mat)-2))
heights <- c(.1,.1,rep(.8/(nrow(layout_mat)-2),nrow(layout_mat)-2))
layout(layout_mat, widths = widths, heights = heights)
#Start with labels
plot.new() #Blank -
plot.new() #Column variable
text(0.5,0.5,col_var,cex=2,font=2)
plot.new() #Row variable
text(0.5,0.5,row_var,cex=2,font=2,srt=90)
for (i in unique_col) {
plot.new()
text(0.5,0.5,i,cex=1.5,font=2)
}
for (i in unique_row) {
plot.new()
text(0.5,0.5,i,cex=1.5,font=2, srt=90)
}
# Plot rxc ...
correction <- if (hist.correct.label) {
.5*interval.size
} else {
0
}
at <- fd.overall$midpoint-correction
labels <- fd.overall$midpoint
#print(correction)
#print(at)
#print(labels)
for (i in unique_row) {
for (j in unique_col) {
x <- data[[response]][which(data[[row_var]] == i & data[[col_var]] == j)]
if (length(x) > 0) {
FUN(x
,xlim=xlim
,ylim=ylim
,main=NULL
,xlab=NULL
,ylab=NULL
,anchor.value=anchor.value
,interval.size=interval.size
,xaxt="n"
,right = right)
axis(1, at=at ,labels = labels )
} else {
plot.new()
text(.5,.5, "N/A")
}
}
}
} else {
stop("Not supported yet...")
}
# x<-1:10
# par(mar=c(2.5,2.5,1,1))
# layout(matrix(c(6,6,6,1,2,3,1,4,5),ncol=3),heights=c(1,3,3), widths = c(1,3,3))
# plot.new()
# text(0.5,0.5,"First title",cex=2,font=2)
# hist.grouped(rnorm(50), main = NULL)
# hist.grouped(rnorm(50), main = NULL)
# hist.grouped(rnorm(50), main = NULL)
# hist.grouped(rnorm(50), main = NULL)
# plot.new()
# text(0.5,0.5,"Third title",cex=2,font=2,srt=90)
#if (call.dev.off) {
# dev.off()
#}
par(par.orig)
}
|
/R/process.group.plot.R
|
permissive
|
burrm/lolcat
|
R
| false
| false
| 6,364
|
r
|
#' One-Way and Two-Way Histograms/Plots
#'
#' Prepares data and plots a dependent variable across one or two independent variables,
#' yielding a matrix of plots.
#'
#' @param fx Formula - grouping formula of form dv ~ iv or dv ~ iv1 + iv2
#' @param data Data Frame - The data for the plots
#' @param FUN Function - Plotting function compatible with parameters for hist.grouped
#' @param interval.size Numeric - Bin size
#' @param anchor.value Numeric - One of the breakpoints
#' @param width.consider Numeric - Bin sizes to consider when plotting histograms
#' @param right Logical - If TRUE, the histogram cells are right-closed (left open) intervals.
#' @param hist.correct.label Logical - Provide better placement for labels compared with the labeled cells/bins.
#' @param constant.x.axis Logical - If TRUE, ensure all plots have the same domains.
#' @param constant.y.axis Logical - If TRUE, ensure all plots have the same ranges.
#' @param ... Additional parameters - Additional parameters passed to FUN
#'
#' @return No return value
process.group.plot <- function(
fx
,data = NULL
,FUN = hist.grouped
,interval.size = NA
,anchor.value = NA
,width.consider = lolcat.default.width.consider
,right = F
,hist.correct.label = isTRUE(all.equal(FUN,hist.grouped))
#,call.dev.off = T
,constant.x.axis = T
,constant.y.axis = T
,...
) {
par.orig <- par(no.readonly = T)
par(mar=c(2.5,2.5,1,1))
fx.terms<-terms(fx)
response<-all.vars(fx)[attributes(fx.terms)$response]
iv.names<-attributes(terms(fx))$term.labels[which(attributes(fx.terms)$order == 1)]
cell.codes <- compute.group.cell.codes(fx =fx, data = data)
fd.overall <- frequency.dist.grouped(
x = data[[response]]
,interval.size = interval.size
,anchor.value = anchor.value
,width.consider = width.consider
,right = right
)
anchor.value <- fd.overall$midpoint[1]
interval.size <- fd.overall$min[2] - fd.overall$min[1]
all.fd <- lapply(split(data[[response]], cell.codes), FUN = function(x) {
frequency.dist.grouped(
x = x
,interval.size = interval.size
,anchor.value = anchor.value
,right = right
)
} )
xlim <- c(min(fd.overall$midpoint)-interval.size, max(fd.overall$midpoint) + interval.size)
ylim <- c(0, max(unlist(lapply(all.fd, FUN = function(y) { max(y$freq)}))) + 1)
if (length(iv.names) == 0) {
} else if (length(iv.names) == 1) {
layout_matrix <- matrix(1:length(all.fd), ncol=1)
layout(layout_matrix)
response.split <- split(data[[response]], cell.codes)
iv.split <- split(data[[iv.names[1]]], cell.codes)
# Plot rxc ...
correction <- if (hist.correct.label) {
.5*interval.size
} else {
0
}
at <- fd.overall$midpoint-correction
labels <- fd.overall$midpoint
for ( i in 1:length(response.split)) {
x <- response.split[[i]]
if (length(x) > 0) {
FUN(x
,xlim=xlim
,ylim=ylim
,main=paste(response, " (",iv.names[1]," = ",iv.split[[i]][1],")", sep="")
,xlab=NULL
,ylab=NULL
,anchor.value=anchor.value
,interval.size=interval.size
,xaxt="n"
,right = right
,...
)
axis(1, at=at ,labels = labels )
} else {
plot.new()
text(.5,.5, "N/A")
}
}
} else if (length(iv.names) == 2) {
row_var = iv.names[1]
col_var = iv.names[2]
unique_row = unique(as.character(data[[row_var]]))
unique_col = unique(as.character(data[[col_var]]))
#Compute layout matrix and widths/heights
layout_mat <- matrix(1:(length(unique_row)*length(unique_col)) , nrow=length(unique_row), byrow=T) + 1 + 2 + length(unique_row) + length(unique_col)
layout_mat <- cbind(1, layout_mat)
layout_mat <- cbind(1, layout_mat)
layout_mat <- rbind(1, layout_mat)
layout_mat <- rbind(1, layout_mat)
layout_mat[1,3:ncol(layout_mat)] <- 2 # Column Title Index
layout_mat[3:nrow(layout_mat),1] <- 3 # Row Title Index
layout_mat[2,3:ncol(layout_mat)] <- 3+1:(ncol(layout_mat)-2) # Column Titles Index
layout_mat[3:nrow(layout_mat),2] <- layout_mat[2,ncol(layout_mat)]+1:(nrow(layout_mat)-2) # Column Titles Index
widths <- c(.1,.1,rep(.8/(ncol(layout_mat)-2),ncol(layout_mat)-2))
heights <- c(.1,.1,rep(.8/(nrow(layout_mat)-2),nrow(layout_mat)-2))
layout(layout_mat, widths = widths, heights = heights)
#Start with labels
plot.new() #Blank -
plot.new() #Column variable
text(0.5,0.5,col_var,cex=2,font=2)
plot.new() #Row variable
text(0.5,0.5,row_var,cex=2,font=2,srt=90)
for (i in unique_col) {
plot.new()
text(0.5,0.5,i,cex=1.5,font=2)
}
for (i in unique_row) {
plot.new()
text(0.5,0.5,i,cex=1.5,font=2, srt=90)
}
# Plot rxc ...
correction <- if (hist.correct.label) {
.5*interval.size
} else {
0
}
at <- fd.overall$midpoint-correction
labels <- fd.overall$midpoint
#print(correction)
#print(at)
#print(labels)
for (i in unique_row) {
for (j in unique_col) {
x <- data[[response]][which(data[[row_var]] == i & data[[col_var]] == j)]
if (length(x) > 0) {
FUN(x
,xlim=xlim
,ylim=ylim
,main=NULL
,xlab=NULL
,ylab=NULL
,anchor.value=anchor.value
,interval.size=interval.size
,xaxt="n"
,right = right)
axis(1, at=at ,labels = labels )
} else {
plot.new()
text(.5,.5, "N/A")
}
}
}
} else {
stop("Not supported yet...")
}
# x<-1:10
# par(mar=c(2.5,2.5,1,1))
# layout(matrix(c(6,6,6,1,2,3,1,4,5),ncol=3),heights=c(1,3,3), widths = c(1,3,3))
# plot.new()
# text(0.5,0.5,"First title",cex=2,font=2)
# hist.grouped(rnorm(50), main = NULL)
# hist.grouped(rnorm(50), main = NULL)
# hist.grouped(rnorm(50), main = NULL)
# hist.grouped(rnorm(50), main = NULL)
# plot.new()
# text(0.5,0.5,"Third title",cex=2,font=2,srt=90)
#if (call.dev.off) {
# dev.off()
#}
par(par.orig)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pp_plot.R
\name{pp_plot}
\alias{pp_plot}
\title{Plot a policy portfolio}
\usage{
pp_plot(D, id = NULL, spacing = FALSE, subtitle = TRUE, caption = NULL)
}
\arguments{
\item{D}{Data frame in a tidy format with the following columns: "Country", "Sector", "Year", "Instrument", "Target" and "covered". "covered" is a binary identificator of whether the portfolio space is covered by policy intervention (1) or not (0). The remaining columns identify the case. Notice that "Year" is a numeric value, while the remaining 4 case identifiers are factors.}
\item{id}{A list with up to two elements, namely "Country", and "Year" indicating the specific identification characteristics of the portfolio(s) that must be processed.}
\item{spacing}{Logical value. When TRUE, some space is added between tiles to help distinguish adjacent spaces. Defaults to FALSE.}
\item{subtitle}{Logical value. When TRUE (the default), a subtitle with the measures of the portfolio is included.}
\item{caption}{A character vector to overimpose the Source of the data. For the CONSENSUS dataset, please use "citation(PolicyPortfolios)" to properly cite its source.}
}
\value{
A tidy dataset containing the portfolio identificators (Country, Sector and Year) plus the Measure identificators (Measure and Measure.label) and the value of the portfolio characteristic.
}
\description{
Plot a policy portfolio.
}
\examples{
data(P.education)
pp_plot(P.education, id = list(Country = "Borduria", Year = 2025))
}
|
/man/pp_plot.Rd
|
no_license
|
xfim/PolicyPortfolios
|
R
| false
| true
| 1,559
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pp_plot.R
\name{pp_plot}
\alias{pp_plot}
\title{Plot a policy portfolio}
\usage{
pp_plot(D, id = NULL, spacing = FALSE, subtitle = TRUE, caption = NULL)
}
\arguments{
\item{D}{Data frame in a tidy format with the following columns: "Country", "Sector", "Year", "Instrument", "Target" and "covered". "covered" is a binary identificator of whether the portfolio space is covered by policy intervention (1) or not (0). The remaining columns identify the case. Notice that "Year" is a numeric value, while the remaining 4 case identifiers are factors.}
\item{id}{A list with up to two elements, namely "Country", and "Year" indicating the specific identification characteristics of the portfolio(s) that must be processed.}
\item{spacing}{Logical value. When TRUE, some space is added between tiles to help distinguish adjacent spaces. Defaults to FALSE.}
\item{subtitle}{Logical value. When TRUE (the default), a subtitle with the measures of the portfolio is included.}
\item{caption}{A character vector to overimpose the Source of the data. For the CONSENSUS dataset, please use "citation(PolicyPortfolios)" to properly cite its source.}
}
\value{
A tidy dataset containing the portfolio identificators (Country, Sector and Year) plus the Measure identificators (Measure and Measure.label) and the value of the portfolio characteristic.
}
\description{
Plot a policy portfolio.
}
\examples{
data(P.education)
pp_plot(P.education, id = list(Country = "Borduria", Year = 2025))
}
|
library(tidyverse)
library(lubridate)
jan <- readxl::read_xlsx("traffic/traffic.xlsx", sheet = 1) %>% mutate(Date = as_date(as.character(일자)), Day = weekdays(Date))
feb <- readxl::read_xlsx("traffic/traffic.xlsx", sheet = 2) %>% mutate(Date = as_date(as.character(일자)), Day = weekdays(Date))
mar <- readxl::read_xlsx("traffic/traffic.xlsx", sheet = 3) %>% mutate(Date = as_date(as.character(일자)), Day = weekdays(Date))
cbd <- jan %>% filter(지점명 %in% c("율곡로(안국역)", "종로(동묘앞역)", "종로(종로3가역)", "을지로(을지로3가역)"))
cbd %>%
select(-c(1:2,4:6,31:32)) %>%
group_by(지점명) %>%
na.omit() %>%
summarise_all(list(mean = mean), na.rm = T) %>%
mutate_if(is.numeric, round, 0) -> summary
summary %>%
reshape2::melt(id = "지점명") %>%
mutate(variable = gsub("_mean", "", .$variable)) %>%
mutate(variable = case_when(variable == "0시" ~ "00시",
variable == "1시" ~ "01시",
variable == "2시" ~ "02시",
variable == "3시" ~ "03시",
variable == "4시" ~ "04시",
variable == "5시" ~ "05시",
variable == "6시" ~ "06시",
variable == "7시" ~ "07시",
variable == "8시" ~ "08시",
variable == "9시" ~ "09시",
TRUE ~ as.character(variable)
)) %>%
ggplot(aes(variable, value, group = 지점명, colour = 지점명)) +
geom_line(size = 2)+
theme_minimal() +
theme(axis.title.x=element_blank(),
axis.ticks.x=element_blank(),
axis.title.y=element_blank(),
axis.text.x=element_text(size = 13, angle = 30),
axis.text.y=element_text(size = 13),
strip.text.x = element_text(size = 13,
margin = margin(.1,0,.1,0, "cm")),
legend.position = "bottom",
legend.title=element_text(size=13),
legend.text=element_text(size=13)
) -> traffic_gg
ggsave("result_traffic_trend.png", traffic_gg, width = 8, height = 6, dpi = 600)
|
/traffic.R
|
no_license
|
kidstoneszk/SeoultrafficABM
|
R
| false
| false
| 2,052
|
r
|
library(tidyverse)
library(lubridate)
jan <- readxl::read_xlsx("traffic/traffic.xlsx", sheet = 1) %>% mutate(Date = as_date(as.character(일자)), Day = weekdays(Date))
feb <- readxl::read_xlsx("traffic/traffic.xlsx", sheet = 2) %>% mutate(Date = as_date(as.character(일자)), Day = weekdays(Date))
mar <- readxl::read_xlsx("traffic/traffic.xlsx", sheet = 3) %>% mutate(Date = as_date(as.character(일자)), Day = weekdays(Date))
cbd <- jan %>% filter(지점명 %in% c("율곡로(안국역)", "종로(동묘앞역)", "종로(종로3가역)", "을지로(을지로3가역)"))
cbd %>%
select(-c(1:2,4:6,31:32)) %>%
group_by(지점명) %>%
na.omit() %>%
summarise_all(list(mean = mean), na.rm = T) %>%
mutate_if(is.numeric, round, 0) -> summary
summary %>%
reshape2::melt(id = "지점명") %>%
mutate(variable = gsub("_mean", "", .$variable)) %>%
mutate(variable = case_when(variable == "0시" ~ "00시",
variable == "1시" ~ "01시",
variable == "2시" ~ "02시",
variable == "3시" ~ "03시",
variable == "4시" ~ "04시",
variable == "5시" ~ "05시",
variable == "6시" ~ "06시",
variable == "7시" ~ "07시",
variable == "8시" ~ "08시",
variable == "9시" ~ "09시",
TRUE ~ as.character(variable)
)) %>%
ggplot(aes(variable, value, group = 지점명, colour = 지점명)) +
geom_line(size = 2)+
theme_minimal() +
theme(axis.title.x=element_blank(),
axis.ticks.x=element_blank(),
axis.title.y=element_blank(),
axis.text.x=element_text(size = 13, angle = 30),
axis.text.y=element_text(size = 13),
strip.text.x = element_text(size = 13,
margin = margin(.1,0,.1,0, "cm")),
legend.position = "bottom",
legend.title=element_text(size=13),
legend.text=element_text(size=13)
) -> traffic_gg
ggsave("result_traffic_trend.png", traffic_gg, width = 8, height = 6, dpi = 600)
|
library(tidyverse)
library(sf)
library(tmap)
library(osmdata)
# Encode OSM data
encode_osm <- function(list){
# For all data frames in query result
for (df in (names(list)[map_lgl(list, is.data.frame)])) {
last <- length(list[[df]])
# For all columns except the last column ("geometry")
for (col in names(list[[df]])[-last]){
# Change the encoding to UTF8
Encoding(list[[df]][[col]]) <- "UTF-8"
}
}
return(list)
}
## Conert polygons to points
polygons_to_points <- function(osm){
points <- osm[["osm_points"]]
polygons <- osm[["osm_polygons"]]
points_not_covered <- points[!lengths(st_intersects(points, polygons)), ]
polygons_centroids <- polygons %>% st_centroid()
bind_rows(points_not_covered, polygons_centroids)
}
## Define Visualization Function
vis_residuals <- function(model, data, mode = "plot", textsize = 1){
districts_n <- df %>% count(name) %>% st_drop_geometry()
tmap_mode(mode)
model %>%
broom::augment_columns(data) %>%
group_by(name) %>%
summarise(error = mean(.resid), fitted = mean(.fitted), price = mean(price)) %>%
inner_join(districts, by = "name") %>%
inner_join(districts_n, by = "name") %>%
st_as_sf() %>%
tm_shape() +
tm_fill(col = "error", breaks = c(-70, -50, -30, -10, 10, 30, 50, 70), popup.vars = c("error", "price", "fitted")) +
tm_text(text = "n", size = textsize) +
tm_borders()
}
|
/analysis/R/helper.R
|
no_license
|
tillschwoerer/airbnb
|
R
| false
| false
| 1,472
|
r
|
library(tidyverse)
library(sf)
library(tmap)
library(osmdata)
# Encode OSM data
encode_osm <- function(list){
# For all data frames in query result
for (df in (names(list)[map_lgl(list, is.data.frame)])) {
last <- length(list[[df]])
# For all columns except the last column ("geometry")
for (col in names(list[[df]])[-last]){
# Change the encoding to UTF8
Encoding(list[[df]][[col]]) <- "UTF-8"
}
}
return(list)
}
## Conert polygons to points
polygons_to_points <- function(osm){
points <- osm[["osm_points"]]
polygons <- osm[["osm_polygons"]]
points_not_covered <- points[!lengths(st_intersects(points, polygons)), ]
polygons_centroids <- polygons %>% st_centroid()
bind_rows(points_not_covered, polygons_centroids)
}
## Define Visualization Function
vis_residuals <- function(model, data, mode = "plot", textsize = 1){
districts_n <- df %>% count(name) %>% st_drop_geometry()
tmap_mode(mode)
model %>%
broom::augment_columns(data) %>%
group_by(name) %>%
summarise(error = mean(.resid), fitted = mean(.fitted), price = mean(price)) %>%
inner_join(districts, by = "name") %>%
inner_join(districts_n, by = "name") %>%
st_as_sf() %>%
tm_shape() +
tm_fill(col = "error", breaks = c(-70, -50, -30, -10, 10, 30, 50, 70), popup.vars = c("error", "price", "fitted")) +
tm_text(text = "n", size = textsize) +
tm_borders()
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.