content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
library(dplyr)
library(data.table)
featnms <- read.table("UCI HAR Datas et/features.txt")
file <- "Coursera_DS3_Final.zip"
if(!file.exists(file)){
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileURL, file, method = "curl")
}
if(!file.exists("UCI HAR Dataset")){
unzip(file)
}
feat <- read.table("UCI HAR Dataset/features.txt", col.names = c("n", "functions"))
act <- read.table("UCI HAR Dataset/activity_labels.txt", col.names = c("code", "activity"))
subj_tst <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names = "subject")
x_tst <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = feat$functions)
y_tst <- read.table("UCI HAR Dataset/test/y_test.txt", col.names = "code")
subj_train <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names = "subject")
x_train <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = feat$functions)
y_train <- read.table("UCI HAR Dataset/train/y_train.txt", col.names = "code")
X <- rbind(x_train, x_tst)
Y <- rbind(y_train, y_tst)
Subj <- rbind(subj_train, subj_tst)
Mrgd_Data <- cbind(Subj, Y, X)
Tidy_Data <- Mrgd_Data %>% select(subject, code, contains("mean"), contains("std"))
Tidy_Data$code <- act[Tidy_Data$code, 2]
names(Tidy_Data)[2] = "activity"
names(Tidy_Data) <- gsub("Acc", "Accelerometer", names(Tidy_Data))
names(Tidy_Data) <- gsub("Gyro", "Gyroscope", names(Tidy_Data))
names(Tidy_Data) <- gsub("BodyBody", "Body", names(Tidy_Data))
names(Tidy_Data) <- gsub("Mag", "Magnitude", names(Tidy_Data))
names(Tidy_Data) <- gsub("^t", "Time", names(Tidy_Data))
names(Tidy_Data) <- gsub("^f", "Frequency", names(Tidy_Data))
names(Tidy_Data) <- gsub("tBody", "TimeBody", names(Tidy_Data))
names(Tidy_Data) <- gsub("-mean()", "Mean", names(Tidy_Data), ignore.case = TRUE)
names(Tidy_Data) <- gsub("-std()", "STD", names(Tidy_Data), ignore.case = TRUE)
names(Tidy_Data) <- gsub("-freq()", "Frequency", names(Tidy_Data), ignore.case = TRUE)
names(Tidy_Data) <- gsub("angle", "Angle", names(Tidy_Data))
names(Tidy_Data) <- gsub("gravity", "Gravity", names(Tidy_Data))
write.table(FinalData, "FinalData.txt", row.name=FALSE)
FinalData <- Tidy_Data %>%
group_by(subject, activity) %>%
summarise_all(funs(mean))
str(FinalData)
FinalData
|
/run_analysis.R
|
no_license
|
sindi50/Rassignment3
|
R
| false
| false
| 2,355
|
r
|
library(dplyr)
library(data.table)
featnms <- read.table("UCI HAR Datas et/features.txt")
file <- "Coursera_DS3_Final.zip"
if(!file.exists(file)){
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileURL, file, method = "curl")
}
if(!file.exists("UCI HAR Dataset")){
unzip(file)
}
feat <- read.table("UCI HAR Dataset/features.txt", col.names = c("n", "functions"))
act <- read.table("UCI HAR Dataset/activity_labels.txt", col.names = c("code", "activity"))
subj_tst <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names = "subject")
x_tst <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = feat$functions)
y_tst <- read.table("UCI HAR Dataset/test/y_test.txt", col.names = "code")
subj_train <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names = "subject")
x_train <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = feat$functions)
y_train <- read.table("UCI HAR Dataset/train/y_train.txt", col.names = "code")
X <- rbind(x_train, x_tst)
Y <- rbind(y_train, y_tst)
Subj <- rbind(subj_train, subj_tst)
Mrgd_Data <- cbind(Subj, Y, X)
Tidy_Data <- Mrgd_Data %>% select(subject, code, contains("mean"), contains("std"))
Tidy_Data$code <- act[Tidy_Data$code, 2]
names(Tidy_Data)[2] = "activity"
names(Tidy_Data) <- gsub("Acc", "Accelerometer", names(Tidy_Data))
names(Tidy_Data) <- gsub("Gyro", "Gyroscope", names(Tidy_Data))
names(Tidy_Data) <- gsub("BodyBody", "Body", names(Tidy_Data))
names(Tidy_Data) <- gsub("Mag", "Magnitude", names(Tidy_Data))
names(Tidy_Data) <- gsub("^t", "Time", names(Tidy_Data))
names(Tidy_Data) <- gsub("^f", "Frequency", names(Tidy_Data))
names(Tidy_Data) <- gsub("tBody", "TimeBody", names(Tidy_Data))
names(Tidy_Data) <- gsub("-mean()", "Mean", names(Tidy_Data), ignore.case = TRUE)
names(Tidy_Data) <- gsub("-std()", "STD", names(Tidy_Data), ignore.case = TRUE)
names(Tidy_Data) <- gsub("-freq()", "Frequency", names(Tidy_Data), ignore.case = TRUE)
names(Tidy_Data) <- gsub("angle", "Angle", names(Tidy_Data))
names(Tidy_Data) <- gsub("gravity", "Gravity", names(Tidy_Data))
write.table(FinalData, "FinalData.txt", row.name=FALSE)
FinalData <- Tidy_Data %>%
group_by(subject, activity) %>%
summarise_all(funs(mean))
str(FinalData)
FinalData
|
########################
# Dates and Times
# "POSIXt", which just functions as a common language between POSIXct and POSIXlt.
Sys.Date()
# 將現在的時間換算成天數(以1970-01-01為基準),
unclass(Sys.Date())
# 取得1970-01-01之前的時間
d2 <- as.Date("1969-01-01")
# 轉化成天數
unclass(d2)
# 未unclass前會得到 年-月-日 時間 地區
t1 <- Sys.time()
# 取得當前時間 類別為"POSIXCT" "POSIXt"
class(t1)
# 若對t1 進行unclass,會出現秒數
unclass(t1)
#
t2 <- as.POSIXlt(Sys.time())
# 發現已將格式轉為天數,
class(t2)
#
unclass(t2)
#
str(unclass(t2))
# weekdays(), months(), and quarters() return the days of week,month and quarter, from any date or time object
weekdays(d1)
months(d1)
quarters(d1)
# strptime() 可輸入字串及日期格式,辨認天(dates)跟時間(times)
t3 <- "October 17, 1986 08:24"
t4 <- strptime(t3, "%B%d,%Y%H:%M")
Sys.time() > t1
Sys.time() - t1
# 用天數來表達日期的差距
difftime(Sys.time(), t1, units = 'days')
# 進階用法 可參考lubridate package
|
/someTips/HowToOperateDateAndTime.R
|
no_license
|
BingHongLi/HowToUseR
|
R
| false
| false
| 1,066
|
r
|
########################
# Dates and Times
# "POSIXt", which just functions as a common language between POSIXct and POSIXlt.
Sys.Date()
# 將現在的時間換算成天數(以1970-01-01為基準),
unclass(Sys.Date())
# 取得1970-01-01之前的時間
d2 <- as.Date("1969-01-01")
# 轉化成天數
unclass(d2)
# 未unclass前會得到 年-月-日 時間 地區
t1 <- Sys.time()
# 取得當前時間 類別為"POSIXCT" "POSIXt"
class(t1)
# 若對t1 進行unclass,會出現秒數
unclass(t1)
#
t2 <- as.POSIXlt(Sys.time())
# 發現已將格式轉為天數,
class(t2)
#
unclass(t2)
#
str(unclass(t2))
# weekdays(), months(), and quarters() return the days of week,month and quarter, from any date or time object
weekdays(d1)
months(d1)
quarters(d1)
# strptime() 可輸入字串及日期格式,辨認天(dates)跟時間(times)
t3 <- "October 17, 1986 08:24"
t4 <- strptime(t3, "%B%d,%Y%H:%M")
Sys.time() > t1
Sys.time() - t1
# 用天數來表達日期的差距
difftime(Sys.time(), t1, units = 'days')
# 進階用法 可參考lubridate package
|
################################
##This script converts hg18 to hg19 build formats##
################################
#if you want see commands in output file
#options(echo=TRUE)
#args <- commandArgs(trailingOnly = TRUE)
#print(args)
#setwd(args)
################################
file <- c()
setwd("~/Desktop/GSE11960") #set ~/Desktop/GSE11960 as working directory
dirs <- list.dirs()
################################
##This function reads the CNprobe format file and converts it to a BED standard format.
##The new BED format file is saved in the same directory as CNprobe file
cnprobeToBed <- function(file){
lstall <- read.table(file, skip=1, sep="\t", fill=TRUE, nrow=length(count.fields(file))-1)
newmat <- as.matrix(lstall)
combine <- matrix(NA, ncol=5, nrow=nrow(lstall))
for (i in 1:nrow(lstall)){
print(i)
combine[i,1] <- newmat[i,2]
combine[i,2] <- newmat[i,3]
combine[i,3] <- as.numeric(newmat[i,3])+1
combine[i,4] <- newmat[i,4]
combine[i,5] <- newmat[i,1]
print("done")
}
combine[,1] <- paste("chr", combine[,1], sep="")
write.table(combine, file=paste(gsub("CNprobes.tab", "CNprobes18.tab", file), sep="/"), quote=FALSE, row.names=FALSE, col.names=FALSE, sep="\t")
print("written to CNprobes18 file")
}
###################################
##This function reads in the BED format file and runs liftOver Command utility tool
##Two new files will be created in the same folder called CNprobes19.mapped.tab and CNprobes19_u.tab
stepLiftOver <- function(file){
inputfile <- gsub("CNprobes", "CNprobes18", file)
outputmappedfile <- gsub("CNprobes.tab", "CNprobes19.mapped.tab", file)
outputunmappedfile <- gsub("CNprobes.tab", "CNprobes19_u.tab", file)
#Syntax: liftOver <inputfile> <location of chain file> <outputfile> <unmappedfile>
#NB: the chain file location has to be hardcoded and correct chain file has to be pasted to ensure functioning of the this tool
command <- paste("~/Desktop/bin/liftOver", inputfile, "~/Desktop/hg18ToHg19.over.chain", outputmappedfile, outputunmappedfile, sep=" ")
system(command)
print("successful conversion")
}
#################################
##This fucntion reads in the mapped format converted files in BED format and generates a CNprobe format file.
##The new file is saved as CNprobes19.tab file in the same folder as the input file
bedToCNprobe <- function(file){
cnprobefile <- gsub("CNprobes.tab", "CNprobes19.mapped.tab", file)
newlist <- read.table(cnprobefile, skip=1, sep="\t", fill=TRUE)
print("read CNprobes 19 mapped file in BED format")
listmat <- as.matrix(newlist)
newcombine <- matrix(NA, ncol=4, nrow=nrow(newlist))
newcombine[1,1] <- "ID"
newcombine[1,2] <- "chro"
newcombine[1,3] <- "pos"
newcombine[1,4] <- "log2"
for(i in 2:nrow(newlist)){ #for loop no. 2 start
chro <- listmat[i,1]
startpos <- listmat[i,2]
endpos <- listmat[i,3]
log2 <- listmat[i,4]
ID <- listmat[i,5]
pos <- round(mean(startpos:endpos), digit=0)
newcombine[i,1] <- ID
newcombine[i,2] <- chro
newcombine[i,3] <- pos
newcombine[i,4] <- log2
} #for loop no. 2 end
#write final file matrix as a tab file
newcombine[,2] <- gsub("chr", "", newcombine[,2])
newcombine[1,2] <- gsub("o", "chro", newcombine[1,2])
write.table(newcombine, file=paste(gsub("CNprobes.tab", "CNprobes19.tab", file), sep="/"), quote=FALSE, row.names=FALSE, col.names=FALSE, sep="\t")
print("CNprobes19 file written in CNprobes format")
}
##################################
##execute the functions for all files in the working directory.
for(i in 2:length(dirs)){
file[i-1] <- paste(dirs[i], "CNprobes.tab", sep="/")
cnprobeToBed(file[i-1])
stepLiftOver(file[i-1])
bedToCNprobe(file[i-1])
}
|
/Prisni/buildConversion.r
|
permissive
|
ainijulia/progenetixR
|
R
| false
| false
| 3,691
|
r
|
################################
##This script converts hg18 to hg19 build formats##
################################
#if you want see commands in output file
#options(echo=TRUE)
#args <- commandArgs(trailingOnly = TRUE)
#print(args)
#setwd(args)
################################
file <- c()
setwd("~/Desktop/GSE11960") #set ~/Desktop/GSE11960 as working directory
dirs <- list.dirs()
################################
##This function reads the CNprobe format file and converts it to a BED standard format.
##The new BED format file is saved in the same directory as CNprobe file
cnprobeToBed <- function(file){
lstall <- read.table(file, skip=1, sep="\t", fill=TRUE, nrow=length(count.fields(file))-1)
newmat <- as.matrix(lstall)
combine <- matrix(NA, ncol=5, nrow=nrow(lstall))
for (i in 1:nrow(lstall)){
print(i)
combine[i,1] <- newmat[i,2]
combine[i,2] <- newmat[i,3]
combine[i,3] <- as.numeric(newmat[i,3])+1
combine[i,4] <- newmat[i,4]
combine[i,5] <- newmat[i,1]
print("done")
}
combine[,1] <- paste("chr", combine[,1], sep="")
write.table(combine, file=paste(gsub("CNprobes.tab", "CNprobes18.tab", file), sep="/"), quote=FALSE, row.names=FALSE, col.names=FALSE, sep="\t")
print("written to CNprobes18 file")
}
###################################
##This function reads in the BED format file and runs liftOver Command utility tool
##Two new files will be created in the same folder called CNprobes19.mapped.tab and CNprobes19_u.tab
stepLiftOver <- function(file){
inputfile <- gsub("CNprobes", "CNprobes18", file)
outputmappedfile <- gsub("CNprobes.tab", "CNprobes19.mapped.tab", file)
outputunmappedfile <- gsub("CNprobes.tab", "CNprobes19_u.tab", file)
#Syntax: liftOver <inputfile> <location of chain file> <outputfile> <unmappedfile>
#NB: the chain file location has to be hardcoded and correct chain file has to be pasted to ensure functioning of the this tool
command <- paste("~/Desktop/bin/liftOver", inputfile, "~/Desktop/hg18ToHg19.over.chain", outputmappedfile, outputunmappedfile, sep=" ")
system(command)
print("successful conversion")
}
#################################
##This fucntion reads in the mapped format converted files in BED format and generates a CNprobe format file.
##The new file is saved as CNprobes19.tab file in the same folder as the input file
bedToCNprobe <- function(file){
cnprobefile <- gsub("CNprobes.tab", "CNprobes19.mapped.tab", file)
newlist <- read.table(cnprobefile, skip=1, sep="\t", fill=TRUE)
print("read CNprobes 19 mapped file in BED format")
listmat <- as.matrix(newlist)
newcombine <- matrix(NA, ncol=4, nrow=nrow(newlist))
newcombine[1,1] <- "ID"
newcombine[1,2] <- "chro"
newcombine[1,3] <- "pos"
newcombine[1,4] <- "log2"
for(i in 2:nrow(newlist)){ #for loop no. 2 start
chro <- listmat[i,1]
startpos <- listmat[i,2]
endpos <- listmat[i,3]
log2 <- listmat[i,4]
ID <- listmat[i,5]
pos <- round(mean(startpos:endpos), digit=0)
newcombine[i,1] <- ID
newcombine[i,2] <- chro
newcombine[i,3] <- pos
newcombine[i,4] <- log2
} #for loop no. 2 end
#write final file matrix as a tab file
newcombine[,2] <- gsub("chr", "", newcombine[,2])
newcombine[1,2] <- gsub("o", "chro", newcombine[1,2])
write.table(newcombine, file=paste(gsub("CNprobes.tab", "CNprobes19.tab", file), sep="/"), quote=FALSE, row.names=FALSE, col.names=FALSE, sep="\t")
print("CNprobes19 file written in CNprobes format")
}
##################################
##execute the functions for all files in the working directory.
for(i in 2:length(dirs)){
file[i-1] <- paste(dirs[i], "CNprobes.tab", sep="/")
cnprobeToBed(file[i-1])
stepLiftOver(file[i-1])
bedToCNprobe(file[i-1])
}
|
# emp_data
library(readr)
ed <- read_csv("C:/Users/Agnelo Christy/Desktop/Data Science!/Assignment 4 - Simple Linear Regression/emp_data.csv")
View(ed)
attach(ed)
# Assigning two vairables into x and y
x <- ed$Salary_hike
x
y <- ed$Churn_out_rate
y
# Finding N.A or missing values
is.na(x)
is.na(y)
# There are NO N.A values
# Calculating Business moments
# Central tendencies
mean(x)
mean(y)
median(x)
median(y)
library(NCmisc)
Mode(x)
Mode(y)
#Standard Deviation
sd(x)
sd(y)
#Variance
var(x)
var(y)
install.packages("e1071") # Installing "e1071" package to find 3rd and 4th Business Moments
library(e1071)
# 3rd B.M
skewness(x) # Right skewed
skewness(y) # positive skewed, slightly right skewed
# 4th B.M
kurtosis(x) # Distribution is flat
kurtosis(y) # Distribution is flat
#Boxplots
boxplot(x,las=1, main="Salary Hike")
boxplot(y, las=1, main="Churnout rate")
# Zero outliers
install.packages("ggcorrplot")
library(ggcorrplot)
corr <- round(cor(ed))
ggcorrplot(ed) # Correlation plot
plot(x,y, xlab = "Salary hike", ylab = "Churn out rate", las=1) # Scatter plot, shows negative linearity and also labelling x,y
cor(x,y)
# r-value = 0.9117 shows -ve strong correlation
# Model building
model <- lm(y ~ x) #Linear regression, on y w.r.t x
model
summary(model) # summary of all attributes
# Applying transformation in order to obtained a better model
#1
x1 <- log(x)
model1 <- lm(y ~x1)
summary(model1)
#2
x2 <- x^2
model2 <- lm(y ~ x2)
summary(model2)
#3
x3 <- 1/x
model3 <- lm(y ~ x3)
summary(model3)
#4
x4 <- 1/(x^2)
model4 <- lm(y ~ x4)
summary(model4)
# Best fit model with p-values < 0.05 and R squared value of 0.8312
model
summary(model)
#Intervals
confint(model, level = 0.95) # Confidence intervals at 5% significance level as per industry standards
predict(model, interval = "predict") # Prediction intervals with fit values for future responses
|
/New_emp_data.R
|
no_license
|
sandeepnjois/ds
|
R
| false
| false
| 2,025
|
r
|
# emp_data
library(readr)
ed <- read_csv("C:/Users/Agnelo Christy/Desktop/Data Science!/Assignment 4 - Simple Linear Regression/emp_data.csv")
View(ed)
attach(ed)
# Assigning two vairables into x and y
x <- ed$Salary_hike
x
y <- ed$Churn_out_rate
y
# Finding N.A or missing values
is.na(x)
is.na(y)
# There are NO N.A values
# Calculating Business moments
# Central tendencies
mean(x)
mean(y)
median(x)
median(y)
library(NCmisc)
Mode(x)
Mode(y)
#Standard Deviation
sd(x)
sd(y)
#Variance
var(x)
var(y)
install.packages("e1071") # Installing "e1071" package to find 3rd and 4th Business Moments
library(e1071)
# 3rd B.M
skewness(x) # Right skewed
skewness(y) # positive skewed, slightly right skewed
# 4th B.M
kurtosis(x) # Distribution is flat
kurtosis(y) # Distribution is flat
#Boxplots
boxplot(x,las=1, main="Salary Hike")
boxplot(y, las=1, main="Churnout rate")
# Zero outliers
install.packages("ggcorrplot")
library(ggcorrplot)
corr <- round(cor(ed))
ggcorrplot(ed) # Correlation plot
plot(x,y, xlab = "Salary hike", ylab = "Churn out rate", las=1) # Scatter plot, shows negative linearity and also labelling x,y
cor(x,y)
# r-value = 0.9117 shows -ve strong correlation
# Model building
model <- lm(y ~ x) #Linear regression, on y w.r.t x
model
summary(model) # summary of all attributes
# Applying transformation in order to obtained a better model
#1
x1 <- log(x)
model1 <- lm(y ~x1)
summary(model1)
#2
x2 <- x^2
model2 <- lm(y ~ x2)
summary(model2)
#3
x3 <- 1/x
model3 <- lm(y ~ x3)
summary(model3)
#4
x4 <- 1/(x^2)
model4 <- lm(y ~ x4)
summary(model4)
# Best fit model with p-values < 0.05 and R squared value of 0.8312
model
summary(model)
#Intervals
confint(model, level = 0.95) # Confidence intervals at 5% significance level as per industry standards
predict(model, interval = "predict") # Prediction intervals with fit values for future responses
|
library(tuneR)
setWavPlayer('/usr/bin/afplay')
## Global variables can go here
freqFromNote <- function(n) {
2^((n-49)/12) * 440
}
noteFromFreq <- function(f) {
round(12 * log2(f/440) + 49)
}
freqFromLength <- function(fr, l) {
fr/l
}
lengthFromFreq <- function(f, fr) {
fr/f
}
x <- seq(0, 2*pi, length = 44100)
channel <- round(32000 * sin(440 * x))
w1 <- Wave(left = channel)
f <- seq(440, 880, length.out=length(x))
channel <- round(32000 * sin(f * x))
w2 <- Wave(left = channel)
d <- read.table('notes.txt', stringsAsFactors=FALSE, header=TRUE)
bpm <- 152
dt <- 60/bpm
beats <- 6.25
t <- seq(0, (beats)*dt, 0.001)
start <- 'Ds3'
end <- 'Ds7'
l <- seq(1, lengthFromFreq(d$freq[d$name == end], d$freq[d$name == start]),
length.out=length(t))
fr <- d$freq[d$name==start]
f <- freqFromLength(fr, l)
x <- seq(0, 2*pi, length = 44100*max(t))
ff <- approx(seq(0, 2*pi, length.out=length(t)), f, x)$y
channel <- round(32000 * sin(ff * x))
w3 <- Wave(left = channel)
|
/02.R
|
no_license
|
richardsc/glissandoApp
|
R
| false
| false
| 997
|
r
|
library(tuneR)
setWavPlayer('/usr/bin/afplay')
## Global variables can go here
freqFromNote <- function(n) {
2^((n-49)/12) * 440
}
noteFromFreq <- function(f) {
round(12 * log2(f/440) + 49)
}
freqFromLength <- function(fr, l) {
fr/l
}
lengthFromFreq <- function(f, fr) {
fr/f
}
x <- seq(0, 2*pi, length = 44100)
channel <- round(32000 * sin(440 * x))
w1 <- Wave(left = channel)
f <- seq(440, 880, length.out=length(x))
channel <- round(32000 * sin(f * x))
w2 <- Wave(left = channel)
d <- read.table('notes.txt', stringsAsFactors=FALSE, header=TRUE)
bpm <- 152
dt <- 60/bpm
beats <- 6.25
t <- seq(0, (beats)*dt, 0.001)
start <- 'Ds3'
end <- 'Ds7'
l <- seq(1, lengthFromFreq(d$freq[d$name == end], d$freq[d$name == start]),
length.out=length(t))
fr <- d$freq[d$name==start]
f <- freqFromLength(fr, l)
x <- seq(0, 2*pi, length = 44100*max(t))
ff <- approx(seq(0, 2*pi, length.out=length(t)), f, x)$y
channel <- round(32000 * sin(ff * x))
w3 <- Wave(left = channel)
|
#------------------------------------#
# Mixed effects modeling of accuracy #
# Kevin Potter #
# Updated 06/01/2017 #
#------------------------------------#
# Clear workspace
rm(list = ls())
# Save current directory
orig_dir = getwd()
# Indicate whether to create figures
plotYes = T
# Indicate whether to save figures
savePlot = T
if ( savePlot & plotYes ) {
setwd('Plots')
pdf( 'Mixed_effects_accuracy.pdf' )
setwd(orig_dir)
}
# Indicate whether to carry out model-fitting
modelYes = T
# Indicate whether debugging messages should be printed
debugging = T
# Index
# Lookup - 01: Initial setup
# Lookup - 02: Plot effects on P(Correct)
# Lookup - 03: Mixed effects modeling
###
### Initial setup
###
# Lookup - 01
# Load in data
setwd( 'Data' )
load( 'FYP_JD.RData' )
setwd( orig_dir )
# For easy manipulation
d = trimDat
colnames( d ) = c( 'S', 'Cnd', 'Tr', 'SS', 'CS',
'LS', 'PT', 'PC', 'PL', 'LP',
'LT', 'RT1', 'Ch', 'RT2', 'Cnf',
'AcL', 'Ac' )
# Load in useful packages
# Load in package for mixed effects modeling
# install.packages( 'lme4' )
library( lme4 )
# Define additional useful functions
source( 'F0_Useful_functions.R' )
###
### Plot effects on P(Correct)
###
# Lookup - 02
if ( plotYes ) {
if ( debugging ) print( "Descriptive figures (Main effects)" )
Cnd_labels = c( '1a: NL', '1b: NC', '1c: BN', '1d: BO' )
### Main effects ###
if (!savePlot) x11()
layout( rbind( c( 1, 1, 2, 2 ), c( 4, 3, 3, 4 ) ) )
if ( debugging ) print( "Figure 1 (Condition)" )
# Condition
ef = aggregate( d$Ac, list( d$Cnd ), mean )
colnames( ef ) = c( 'Cnd', 'P' )
xl = c(.5, 4.5 ); yl = c(0,1)
blankPlot( xl, yl )
segments( rep(.5,3), c(.25,.5,.75), rep(4.5,3),
c(.25,.5,.75), col = 'grey80', lwd = 2 )
customAxes( xl, yl, label = c( 'Condition', 'P(Correct)' ),
inc = c( 0, .25 ) )
axis( 1, 1:4, Cnd_labels, tick = F, line = -.5 )
points( 1:4, ef$P, pch = c(15,19,17,18), cex = 1.5 )
if ( debugging ) print( "Figure 2 (Set-size)" )
# Set-size
ef = aggregate( d$Ac, list( d$SS ), mean )
colnames( ef ) = c( 'SS', 'P' )
xl = c(.5, 4.5 ); yl = c(0,1)
blankPlot( xl, yl )
segments( rep(.5,3), c(.25,.5,.75), rep(4.5,3),
c(.25,.5,.75), col = 'grey80', lwd = 2 )
customAxes( xl, yl, label = c( 'Set-size', 'P(Correct)' ),
inc = c( 0, .25 ) )
axis( 1, 1:4, sort( unique( ef$SS ) ), tick = F, line = -.5 )
lines( 1:4, ef$P, type = 'b', pch = 19, cex = 1.5 )
# Lag
if ( debugging ) print( "Figure 3 (Lag)" )
ef = aggregate( d$Ac, list( d$LP ), mean )
colnames( ef ) = c( 'LP', 'P' )
xl = c(.5, 5.5 ); yl = c(0,1)
blankPlot( xl, yl )
segments( rep(.5,3), c(.25,.5,.75), rep(5.5,3),
c(.25,.5,.75), col = 'grey80', lwd = 2 )
customAxes( xl, yl, label = c( 'Lag', 'P(Correct)' ),
inc = c( 0, .25 ) )
axis( 1, 1:5, sort( unique( ef$LP ) )+1, tick = F, line = -.5 )
lines( 1:5, ef$P, type = 'b', pch = 19, cex = 1.5,
col = c(5,1:4) )
# Reset layout for figures
layout( cbind( 1 ) )
### Interactions ###
# Compute average P(Correct) over all possible conditions
ef = aggregate( d$Ac, list( d$SS, d$LP, d$Cnd ), mean )
colnames( ef ) = c( 'SS', 'LP', 'Cnd', 'P' )
ef = ef[ ef$LP != 0, ] # Remove lag 0 condition
if (!savePlot) x11();
# For 4 separate plotting panels, comment out
# the following line:
layout( matrix( 1:4, 2, 2, byrow = T ) )
if ( debugging ) print( "Descriptive figures (Interactions)" )
for ( i in 1:4 ) {
if ( debugging ) {
print( paste( "Figure", i ) )
}
xl = c(.5, 4.5 ); yl = c(0,1)
blankPlot( xl, yl )
customAxes( xl, yl, label = c( ' ', 'P(Correct)' ),
inc = c( 0, .25 ) )
axis( 1, 1:4, sort( unique( ef$LP ) ) + 1, tick = F, line = -.5 )
mtext( 'Lag', side = 1, line = 2 )
xyd = par( 'usr' )
if ( i == 3 )
legend( xyd[1] + (xyd[2]-xyd[1])*.05,
xyd[3] + (xyd[4]-xyd[3])*.6,
paste( 'Set Size:', sort( unique( ef$SS ) ) ),
fill = 1:5, bty = 'n' )
sel = ef$Cnd == i
inc = 1
for ( lp in sort( unique( ef$SS[sel] ) ) ) {
cur = sel & ef$SS == lp
x = ef$LP[cur]
y = ef$P[cur]
lines( x, y, type = 'b', pch = 19, col = inc )
inc = inc + 1
}
title( paste( "Experiment", Cnd_labels[i] ) )
}
# Reset layout
layout( cbind( 1 ) )
}
###
### Mixed effects modeling
###
# Lookup - 03
# Define function that plots the predicted versus observed
# effects
plot_fit = function( fit, dat, new = T ) {
# Purpose:
# A function that plots the effects on P(Correct) for
# condition, set-size, and lag, averaged over subjects.
# Arguments:
# fit - A lme4 fit object
# dat - The data that was fitted
# new - Logical; if true, a new plotting window is generated
# Returns:
# A figure with 4 panels showing the predicted and observed
# effects on P(Correct).
est = predict( fit )
pred = aggregate( as.vector( est ), list( dat$SS,
dat$LP, dat$Cnd ),
function(x) mean( logistic(x) ) )
colnames( pred ) = c( 'SS', 'LP', 'Cnd', 'P' )
# Compute average P(Correct) over all possible conditions
ef = aggregate( dat$Ac, list( dat$SS, dat$LP, dat$Cnd ), mean )
colnames( ef ) = c( 'SS', 'LP', 'Cnd', 'P' )
ef = ef[ ef$LP != 0, ]
# Extract condition
cur_cnd = unique( ef$Cnd )
if ( new ) x11();
Cnd_labels = c( '1a: NL', '1b: NC', '1c: BN', '1d: BO' )
xl = c(.5, 4.5 ); yl = c(0,1)
blankPlot( xl, yl )
customAxes( xl, yl, label = c( ' ', 'P(Correct)' ),
inc = c( 0, .25 ) )
axis( 1, 1:4, sort( unique( ef$LP ) ) + 1, tick = F, line = -.5 )
mtext( 'Lag', side = 1, line = 2 )
xyd = par( 'usr' )
legend( 'top',
paste( 'Set Size:', sort( unique( ef$SS ) ) ),
fill = 1:4, bty = 'n', horiz = T )
sel = ef$Cnd == cur_cnd
inc = 1
if ( debugging ) {
string = paste( "Model fit figure (", Cnd_labels[cur_cnd],
")", sep = "" )
print( string )
}
for ( lp in sort( unique( ef$SS ) ) ) {
# Plot observed
cur = ef$SS == lp
x = ef$LP[cur]
y = ef$P[cur]
lines( x, y, type = 'b', pch = 19, col = inc )
xp = pred$LP[cur]
yp = pred$P[cur]
lines( xp, yp, type = 'b', pch = 21, lty = 2,
col = inc, cex = 1.25 )
inc = inc + 1
}
title( paste( "Experiment", Cnd_labels[i] ) )
}
if ( modelYes ) {
cnd_iter = 1:4
for (j in cnd_iter) {
if ( debugging ) {
print( paste( "Condition", j ) )
}
# Pull out relevant data
dtbf = d[ d$PT == 0 & d$Cnd == j & d$LP %in% 1:4,
c('S','Ac','Cnd','SS','LP') ]
# Shift numeric representation of set-size
dtbf$SSn = dtbf$SS - 2
# Dummy-coded variables for SS
dtbf$SS3i = 0
dtbf$SS3i[ dtbf$SS == 3 ] = 1
dtbf$SS4i = 0
dtbf$SS4i[ dtbf$SS == 4 ] = 1
dtbf$SS5i = 0
dtbf$SS5i[ dtbf$SS == 5 ] = 1
# Create set-size slopes by lag position (Dummy coded)
dtbf$SSnxL1 = 0;
dtbf$SSnxL2 = 0; dtbf$SSnxL3 = 0;
dtbf$SSnxL4 = 0;
for ( i in 1:4 ) {
sel = dtbf$LP == i
ind = grep( 'SSnxL1', colnames( dtbf ) )
dtbf[ sel, ind + i - 1 ] = dtbf$SSn[ sel ]
}
# Coefficients represent the change in the slope of the
# linear trend for set-size based on each type of lag
# relative to the no-lag condition
if (debugging) print( "Null model" )
m0 = glmer( Ac ~ 1 + (1|S),
family = binomial('logit'), data = dtbf )
if ( plotYes ) {
if (!savePlot) np = T else np = F
plot_fit( m0, dtbf, new = np )
mtext( 'Null Model (No fixed effects)', side = 3,
outer = T, line = -1 )
}
if (debugging) print( "Main effect of set size" )
m1 = glmer( Ac ~ 1 + SSn + (1|S),
family = binomial('logit'), data = dtbf )
if ( plotYes ) {
if (!savePlot) np = T else np = F
plot_fit( m1, dtbf, new = np )
mtext( 'Main Effect of Set Size', side = 3,
outer = T, line = -1 )
}
if (debugging) print( "Main effect of lag" )
m2 = glmer( Ac ~ 1 + LP + (1|S),
family = binomial('logit'), data = dtbf )
if ( plotYes ) {
if (!savePlot) np = T else np = F
plot_fit( m2, dtbf, new = np )
mtext( 'Main Effect of Lag', side = 3,
outer = T, line = -1 )
}
if (debugging) print( "All main effects" )
m3 = glmer( Ac ~ 1 + SSn + LP + (1|S),
family = binomial('logit'), data = dtbf )
if ( plotYes ) {
if (!savePlot) np = T else np = F
plot_fit( m3, dtbf, new = np )
mtext( 'All Main Effects', side = 3,
outer = T, line = -1 )
}
print( 'Interaction of set size and lag (1)' )
m4 = glmer( Ac ~ 1 + SSn + SSnxL2 + SSnxL3 + SSnxL4 + (1|S),
family = binomial('logit'), data = dtbf )
if ( plotYes ) {
if (!savePlot) np = T else np = F
plot_fit( m4, dtbf, new = np )
mtext( 'Interaction of Set Size and Lag', side = 3,
outer = T, line = -1 )
}
print( 'Interaction of set size and lag (2)' )
m5 = glmer( Ac ~ 1 + SS3i + SS4i + SS5i +
SSnxL2 + SSnxL3 + SSnxL4 + (1|S),
family = binomial('logit'), data = dtbf )
if ( plotYes ) {
if (!savePlot) np = T else np = F
plot_fit( m5, dtbf, new = np )
mtext( 'Interaction of Set Size and Lag (2)', side = 3,
outer = T, line = -1 )
}
# Obtain summary and significance of effects
if (0) { # Hack equivalent to block quote
results = list("Null" = m0,
"SS" = m1,
"Lag" = m2,
"SS+Lag" = m3,
"SS*Lag" = m4,
"SS*Lag(2)" = m5)
sink(paste0("model_results_", j, ".txt"))
print(lapply( results , summary ))
sink(NULL)
sink(paste0("model_comp_", j, ".txt"))
print(anova( m0, m1, m2, m3, m4, m5 ))
sink(NULL)
}
}
}
if ( savePlot ) dev.off()
|
/2017/FYP_JD/F2_Accuracy.R
|
no_license
|
rettopnivek/Analyses
|
R
| false
| false
| 10,333
|
r
|
#------------------------------------#
# Mixed effects modeling of accuracy #
# Kevin Potter #
# Updated 06/01/2017 #
#------------------------------------#
# Clear workspace
rm(list = ls())
# Save current directory
orig_dir = getwd()
# Indicate whether to create figures
plotYes = T
# Indicate whether to save figures
savePlot = T
if ( savePlot & plotYes ) {
setwd('Plots')
pdf( 'Mixed_effects_accuracy.pdf' )
setwd(orig_dir)
}
# Indicate whether to carry out model-fitting
modelYes = T
# Indicate whether debugging messages should be printed
debugging = T
# Index
# Lookup - 01: Initial setup
# Lookup - 02: Plot effects on P(Correct)
# Lookup - 03: Mixed effects modeling
###
### Initial setup
###
# Lookup - 01
# Load in data
setwd( 'Data' )
load( 'FYP_JD.RData' )
setwd( orig_dir )
# For easy manipulation
d = trimDat
colnames( d ) = c( 'S', 'Cnd', 'Tr', 'SS', 'CS',
'LS', 'PT', 'PC', 'PL', 'LP',
'LT', 'RT1', 'Ch', 'RT2', 'Cnf',
'AcL', 'Ac' )
# Load in useful packages
# Load in package for mixed effects modeling
# install.packages( 'lme4' )
library( lme4 )
# Define additional useful functions
source( 'F0_Useful_functions.R' )
###
### Plot effects on P(Correct)
###
# Lookup - 02
if ( plotYes ) {
if ( debugging ) print( "Descriptive figures (Main effects)" )
Cnd_labels = c( '1a: NL', '1b: NC', '1c: BN', '1d: BO' )
### Main effects ###
if (!savePlot) x11()
layout( rbind( c( 1, 1, 2, 2 ), c( 4, 3, 3, 4 ) ) )
if ( debugging ) print( "Figure 1 (Condition)" )
# Condition
ef = aggregate( d$Ac, list( d$Cnd ), mean )
colnames( ef ) = c( 'Cnd', 'P' )
xl = c(.5, 4.5 ); yl = c(0,1)
blankPlot( xl, yl )
segments( rep(.5,3), c(.25,.5,.75), rep(4.5,3),
c(.25,.5,.75), col = 'grey80', lwd = 2 )
customAxes( xl, yl, label = c( 'Condition', 'P(Correct)' ),
inc = c( 0, .25 ) )
axis( 1, 1:4, Cnd_labels, tick = F, line = -.5 )
points( 1:4, ef$P, pch = c(15,19,17,18), cex = 1.5 )
if ( debugging ) print( "Figure 2 (Set-size)" )
# Set-size
ef = aggregate( d$Ac, list( d$SS ), mean )
colnames( ef ) = c( 'SS', 'P' )
xl = c(.5, 4.5 ); yl = c(0,1)
blankPlot( xl, yl )
segments( rep(.5,3), c(.25,.5,.75), rep(4.5,3),
c(.25,.5,.75), col = 'grey80', lwd = 2 )
customAxes( xl, yl, label = c( 'Set-size', 'P(Correct)' ),
inc = c( 0, .25 ) )
axis( 1, 1:4, sort( unique( ef$SS ) ), tick = F, line = -.5 )
lines( 1:4, ef$P, type = 'b', pch = 19, cex = 1.5 )
# Lag
if ( debugging ) print( "Figure 3 (Lag)" )
ef = aggregate( d$Ac, list( d$LP ), mean )
colnames( ef ) = c( 'LP', 'P' )
xl = c(.5, 5.5 ); yl = c(0,1)
blankPlot( xl, yl )
segments( rep(.5,3), c(.25,.5,.75), rep(5.5,3),
c(.25,.5,.75), col = 'grey80', lwd = 2 )
customAxes( xl, yl, label = c( 'Lag', 'P(Correct)' ),
inc = c( 0, .25 ) )
axis( 1, 1:5, sort( unique( ef$LP ) )+1, tick = F, line = -.5 )
lines( 1:5, ef$P, type = 'b', pch = 19, cex = 1.5,
col = c(5,1:4) )
# Reset layout for figures
layout( cbind( 1 ) )
### Interactions ###
# Compute average P(Correct) over all possible conditions
ef = aggregate( d$Ac, list( d$SS, d$LP, d$Cnd ), mean )
colnames( ef ) = c( 'SS', 'LP', 'Cnd', 'P' )
ef = ef[ ef$LP != 0, ] # Remove lag 0 condition
if (!savePlot) x11();
# For 4 separate plotting panels, comment out
# the following line:
layout( matrix( 1:4, 2, 2, byrow = T ) )
if ( debugging ) print( "Descriptive figures (Interactions)" )
for ( i in 1:4 ) {
if ( debugging ) {
print( paste( "Figure", i ) )
}
xl = c(.5, 4.5 ); yl = c(0,1)
blankPlot( xl, yl )
customAxes( xl, yl, label = c( ' ', 'P(Correct)' ),
inc = c( 0, .25 ) )
axis( 1, 1:4, sort( unique( ef$LP ) ) + 1, tick = F, line = -.5 )
mtext( 'Lag', side = 1, line = 2 )
xyd = par( 'usr' )
if ( i == 3 )
legend( xyd[1] + (xyd[2]-xyd[1])*.05,
xyd[3] + (xyd[4]-xyd[3])*.6,
paste( 'Set Size:', sort( unique( ef$SS ) ) ),
fill = 1:5, bty = 'n' )
sel = ef$Cnd == i
inc = 1
for ( lp in sort( unique( ef$SS[sel] ) ) ) {
cur = sel & ef$SS == lp
x = ef$LP[cur]
y = ef$P[cur]
lines( x, y, type = 'b', pch = 19, col = inc )
inc = inc + 1
}
title( paste( "Experiment", Cnd_labels[i] ) )
}
# Reset layout
layout( cbind( 1 ) )
}
###
### Mixed effects modeling
###
# Lookup - 03
# Define function that plots the predicted versus observed
# effects
plot_fit = function( fit, dat, new = T ) {
# Purpose:
# A function that plots the effects on P(Correct) for
# condition, set-size, and lag, averaged over subjects.
# Arguments:
# fit - A lme4 fit object
# dat - The data that was fitted
# new - Logical; if true, a new plotting window is generated
# Returns:
# A figure with 4 panels showing the predicted and observed
# effects on P(Correct).
est = predict( fit )
pred = aggregate( as.vector( est ), list( dat$SS,
dat$LP, dat$Cnd ),
function(x) mean( logistic(x) ) )
colnames( pred ) = c( 'SS', 'LP', 'Cnd', 'P' )
# Compute average P(Correct) over all possible conditions
ef = aggregate( dat$Ac, list( dat$SS, dat$LP, dat$Cnd ), mean )
colnames( ef ) = c( 'SS', 'LP', 'Cnd', 'P' )
ef = ef[ ef$LP != 0, ]
# Extract condition
cur_cnd = unique( ef$Cnd )
if ( new ) x11();
Cnd_labels = c( '1a: NL', '1b: NC', '1c: BN', '1d: BO' )
xl = c(.5, 4.5 ); yl = c(0,1)
blankPlot( xl, yl )
customAxes( xl, yl, label = c( ' ', 'P(Correct)' ),
inc = c( 0, .25 ) )
axis( 1, 1:4, sort( unique( ef$LP ) ) + 1, tick = F, line = -.5 )
mtext( 'Lag', side = 1, line = 2 )
xyd = par( 'usr' )
legend( 'top',
paste( 'Set Size:', sort( unique( ef$SS ) ) ),
fill = 1:4, bty = 'n', horiz = T )
sel = ef$Cnd == cur_cnd
inc = 1
if ( debugging ) {
string = paste( "Model fit figure (", Cnd_labels[cur_cnd],
")", sep = "" )
print( string )
}
for ( lp in sort( unique( ef$SS ) ) ) {
# Plot observed
cur = ef$SS == lp
x = ef$LP[cur]
y = ef$P[cur]
lines( x, y, type = 'b', pch = 19, col = inc )
xp = pred$LP[cur]
yp = pred$P[cur]
lines( xp, yp, type = 'b', pch = 21, lty = 2,
col = inc, cex = 1.25 )
inc = inc + 1
}
title( paste( "Experiment", Cnd_labels[i] ) )
}
if ( modelYes ) {
cnd_iter = 1:4
for (j in cnd_iter) {
if ( debugging ) {
print( paste( "Condition", j ) )
}
# Pull out relevant data
dtbf = d[ d$PT == 0 & d$Cnd == j & d$LP %in% 1:4,
c('S','Ac','Cnd','SS','LP') ]
# Shift numeric representation of set-size
dtbf$SSn = dtbf$SS - 2
# Dummy-coded variables for SS
dtbf$SS3i = 0
dtbf$SS3i[ dtbf$SS == 3 ] = 1
dtbf$SS4i = 0
dtbf$SS4i[ dtbf$SS == 4 ] = 1
dtbf$SS5i = 0
dtbf$SS5i[ dtbf$SS == 5 ] = 1
# Create set-size slopes by lag position (Dummy coded)
dtbf$SSnxL1 = 0;
dtbf$SSnxL2 = 0; dtbf$SSnxL3 = 0;
dtbf$SSnxL4 = 0;
for ( i in 1:4 ) {
sel = dtbf$LP == i
ind = grep( 'SSnxL1', colnames( dtbf ) )
dtbf[ sel, ind + i - 1 ] = dtbf$SSn[ sel ]
}
# Coefficients represent the change in the slope of the
# linear trend for set-size based on each type of lag
# relative to the no-lag condition
if (debugging) print( "Null model" )
m0 = glmer( Ac ~ 1 + (1|S),
family = binomial('logit'), data = dtbf )
if ( plotYes ) {
if (!savePlot) np = T else np = F
plot_fit( m0, dtbf, new = np )
mtext( 'Null Model (No fixed effects)', side = 3,
outer = T, line = -1 )
}
if (debugging) print( "Main effect of set size" )
m1 = glmer( Ac ~ 1 + SSn + (1|S),
family = binomial('logit'), data = dtbf )
if ( plotYes ) {
if (!savePlot) np = T else np = F
plot_fit( m1, dtbf, new = np )
mtext( 'Main Effect of Set Size', side = 3,
outer = T, line = -1 )
}
if (debugging) print( "Main effect of lag" )
m2 = glmer( Ac ~ 1 + LP + (1|S),
family = binomial('logit'), data = dtbf )
if ( plotYes ) {
if (!savePlot) np = T else np = F
plot_fit( m2, dtbf, new = np )
mtext( 'Main Effect of Lag', side = 3,
outer = T, line = -1 )
}
if (debugging) print( "All main effects" )
m3 = glmer( Ac ~ 1 + SSn + LP + (1|S),
family = binomial('logit'), data = dtbf )
if ( plotYes ) {
if (!savePlot) np = T else np = F
plot_fit( m3, dtbf, new = np )
mtext( 'All Main Effects', side = 3,
outer = T, line = -1 )
}
print( 'Interaction of set size and lag (1)' )
m4 = glmer( Ac ~ 1 + SSn + SSnxL2 + SSnxL3 + SSnxL4 + (1|S),
family = binomial('logit'), data = dtbf )
if ( plotYes ) {
if (!savePlot) np = T else np = F
plot_fit( m4, dtbf, new = np )
mtext( 'Interaction of Set Size and Lag', side = 3,
outer = T, line = -1 )
}
print( 'Interaction of set size and lag (2)' )
m5 = glmer( Ac ~ 1 + SS3i + SS4i + SS5i +
SSnxL2 + SSnxL3 + SSnxL4 + (1|S),
family = binomial('logit'), data = dtbf )
if ( plotYes ) {
if (!savePlot) np = T else np = F
plot_fit( m5, dtbf, new = np )
mtext( 'Interaction of Set Size and Lag (2)', side = 3,
outer = T, line = -1 )
}
# Obtain summary and significance of effects
if (0) { # Hack equivalent to block quote
results = list("Null" = m0,
"SS" = m1,
"Lag" = m2,
"SS+Lag" = m3,
"SS*Lag" = m4,
"SS*Lag(2)" = m5)
sink(paste0("model_results_", j, ".txt"))
print(lapply( results , summary ))
sink(NULL)
sink(paste0("model_comp_", j, ".txt"))
print(anova( m0, m1, m2, m3, m4, m5 ))
sink(NULL)
}
}
}
if ( savePlot ) dev.off()
|
print.Smean <-
function(x,...) {
cat("\nSmean object: Sample mean estimate\n", sep="")
if(x$call$N == Inf) cat("Without ")
else cat("With ")
cat("finite population correction: N=",x$call$N,"\n", sep="")
cat("\nMean estimate: ", round(x$mean,4), "\n", sep="")
cat("Standard error: ", round(x$se,4), "\n", sep="")
cat(100 * x$call$level,"% confidence interval: [",round(x$ci[1],4),",",round(x$ci[2],4),"]\n\n", sep="")
invisible(x)
}
|
/R/print.Smean.R
|
no_license
|
davan690/samplingbook
|
R
| false
| false
| 458
|
r
|
print.Smean <-
function(x,...) {
cat("\nSmean object: Sample mean estimate\n", sep="")
if(x$call$N == Inf) cat("Without ")
else cat("With ")
cat("finite population correction: N=",x$call$N,"\n", sep="")
cat("\nMean estimate: ", round(x$mean,4), "\n", sep="")
cat("Standard error: ", round(x$se,4), "\n", sep="")
cat(100 * x$call$level,"% confidence interval: [",round(x$ci[1],4),",",round(x$ci[2],4),"]\n\n", sep="")
invisible(x)
}
|
# Code reference: https://rhandbook.wordpress.com/tag/sentiment-analysis-using-r/
# This code requires Rstem and sentiment.
# RStem: http://cran.cnr.berkeley.edu/src/contrib/Archive/Rstem/
# sentiment: https://cran.r-project.org/src/contrib/Archive/sentiment/
list.of.packages <- c("RMySQL", "ggplot2")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if (length(new.packages)) install.packages(new.packages)
library('Rstem')
library('sentiment')
library('RMySQL')
library('ggplot2')
# Connect to the MySQL database
mydb = dbConnect(MySQL(), user='root', password='', dbname='trump', host='127.0.0.1')
# Load 50000 tweets
rs = dbSendQuery(mydb, "select tweet from tweets LIMIT 0,50000")
data = fetch(rs, n=-1)
# classify emotion
class_emotion = classify_emotion(data$tweet, algorithm="bayes", prior=1.0)
# get emotion best fit
emotion = class_emotion[,7]
# substitute NA's by "unknown"
emotion[is.na(emotion)] = "unknown"
# classify polarity
class_polarity= classify_polarity(data$tweet, algorithm="bayes")
# get polarity best fit
polarity = class_polarity[,4]
# data frame with results
tweet_df = data.frame(text=data$tweet, emotion=emotion,
polarity=polarity, stringsAsFactors=FALSE)
# sort data frame
tweet_df = within(sent_df,
emotion <- factor(emotion, levels=names(sort(table(emotion), decreasing=TRUE))))
# Lets generate some plot based on above data set. Plot tweet distribution based on emotions.
ggplot(tweet_df, aes(x=emotion)) +
geom_bar(aes(y=..count.., fill=emotion))+xlab("Emotions Categories") + ylab("Tweet Count")+ggtitle("Sentiment Analysis of Tweets on Emotions")
|
/r/classify_emotion.r
|
no_license
|
mina-gaid/twitter-sentiment-analysis
|
R
| false
| false
| 1,682
|
r
|
# Code reference: https://rhandbook.wordpress.com/tag/sentiment-analysis-using-r/
# This code requires Rstem and sentiment.
# RStem: http://cran.cnr.berkeley.edu/src/contrib/Archive/Rstem/
# sentiment: https://cran.r-project.org/src/contrib/Archive/sentiment/
list.of.packages <- c("RMySQL", "ggplot2")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if (length(new.packages)) install.packages(new.packages)
library('Rstem')
library('sentiment')
library('RMySQL')
library('ggplot2')
# Connect to the MySQL database
mydb = dbConnect(MySQL(), user='root', password='', dbname='trump', host='127.0.0.1')
# Load 50000 tweets
rs = dbSendQuery(mydb, "select tweet from tweets LIMIT 0,50000")
data = fetch(rs, n=-1)
# classify emotion
class_emotion = classify_emotion(data$tweet, algorithm="bayes", prior=1.0)
# get emotion best fit
emotion = class_emotion[,7]
# substitute NA's by "unknown"
emotion[is.na(emotion)] = "unknown"
# classify polarity
class_polarity= classify_polarity(data$tweet, algorithm="bayes")
# get polarity best fit
polarity = class_polarity[,4]
# data frame with results
tweet_df = data.frame(text=data$tweet, emotion=emotion,
polarity=polarity, stringsAsFactors=FALSE)
# sort data frame
tweet_df = within(sent_df,
emotion <- factor(emotion, levels=names(sort(table(emotion), decreasing=TRUE))))
# Lets generate some plot based on above data set. Plot tweet distribution based on emotions.
ggplot(tweet_df, aes(x=emotion)) +
geom_bar(aes(y=..count.., fill=emotion))+xlab("Emotions Categories") + ylab("Tweet Count")+ggtitle("Sentiment Analysis of Tweets on Emotions")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{phenotypical}
\alias{phenotypical}
\title{phenotypical}
\format{
An object of type \code{data.frame}
}
\usage{
phenotypical
}
\description{
phenotypical detailed description2
}
\keyword{datasets}
|
/man/phenotypical.Rd
|
no_license
|
gardiner/daphneg2
|
R
| false
| true
| 302
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{phenotypical}
\alias{phenotypical}
\title{phenotypical}
\format{
An object of type \code{data.frame}
}
\usage{
phenotypical
}
\description{
phenotypical detailed description2
}
\keyword{datasets}
|
###################################################################################
# #
# Filename : Fig12.R #
# Project : BiomJ article "A Utility Approach to Individualized #
# Optimal Dose Selection Using Biomarkers #
# Authors : Pin Li #
# Date : 1.13.2019 #
# Required R packages : tmvtnorm, stats, ggplot2 #
###################################################################################
rm(list = ls())
library(tmvtnorm)
library(stats)
library(ggplot2)
#define the invlogit function
invlogit <- function(x){
return(exp(x) / (exp(x) + 1))
}
#sampling with sample size n and number of covariates p
my.sample <- function(n, p){
x <- rmvnorm(n * 2, mean = rep(0, p))
x <- x[abs(0.4 * rowSums(x[, 1:3]) - 0.8 * x[, 4]) < 1, ]
x <- x[1:n, ]
a <- runif(n, -1, 1)
h_y <- (1 + 0.4 * rowSums(x[, 1:3]) - 0.8 * x[, 4]) * a
h_r <- (1 - 0.4 * rowSums(x[, 1:3]) + 0.8 * x[, 4]) * a
y <- rbinom(n, 1, invlogit(x[, 1] + h_y))
r <- rbinom(n, 1, invlogit(-1.386 - x[, 1] + h_r))
data.frame(y, r, a, x)
}
#get the matrix with interactions
design <- function(mat){
int_mat <- model.matrix( ~ . ^ 2, data = data.frame(mat))
return(int_mat[, 2:(2 * p + 2)])
}
set.seed(1)
n <- 200
p <- 5
case1 <- my.sample(n, p)
case1_val <- case1
#true coefficients
y_coef <- c(0, 1, 1, rep(0, p - 1), rep(0.4, 3), -0.8, rep(0, p - 4))
r_coef <- c(-1.386, 1, -1, rep(0, p - 1), rep(-0.4, 3), 0.8, rep(0, p - 4))
p_y_0 <- invlogit(cbind(1, design(cbind(a = -1, case1_val[, -c(1:3)]))) %*% y_coef)
p_r_0 <- invlogit(cbind(1, design(cbind(a = -1, case1_val[, -c(1:3)]))) %*% r_coef)
#define function to find optimal dose for the population at given lambda
my.opt <- function(lambda) {
D <- rep(NA, n)
for (j in 1:n) {
f <- function(a) invlogit(c(1, design(cbind(a, case1_val[, -c(1:3)]))[j, ]) %*% y_coef)-
lambda * invlogit(c(1, design(cbind(a, case1_val[, -c(1:3)]))[j, ]) %*% r_coef) -
p_y_0[j] + lambda * p_r_0[j]
D[j] <- optimize(f, c(-1, 1), tol = 0.001, maximum = TRUE)$maximum
}
return(D)
}
head(as.matrix(case1_val[, -c(1:3)]) %*% y_coef[8:12] + y_coef[2])
head(as.matrix(case1_val[, -c(1:3)]) %*% r_coef[8:12] + r_coef[2])
# choose sub 4, 11, 15 for Fig 1
ind <- 11
#get the predicted outcome at given dose
a <- seq(-1, 1, 0.01)
y_1 <- rep(NA, 201)
r_1 <- rep(NA, 201)
for (ai in 1:201) {
y_1[ai] <- invlogit(c(1, design(cbind(a[ai], case1_val[, -c(1, 2, 3)]))[ind, ]) %*% y_coef)
r_1[ai] <- invlogit(c(1, design(cbind(a[ai], case1_val[, -c(1, 2, 3)]))[ind, ]) %*% r_coef)
}
ggplot(data = data.frame(a, y_1, r_1), aes(x = a)) +
geom_line(aes(y = 0.5 + (y_1 - y_1[1]) - (r_1 - r_1[1]), linetype = "a")) +
geom_line(aes(y = y_1 - y_1[1], linetype = "b")) +
geom_line(aes(y = r_1 - r_1[1], linetype = "c")) +
scale_linetype_manual(values = c(1, 2, 3), name = "", labels = expression(Utility, delta[E], delta[T])) +
xlab("dose") +
scale_y_continuous(limits = c(0, 1), "Predicted outcome", sec.axis = sec_axis(~. - 0.5, name = "Utility")) +
theme_classic() +
theme(legend.position = "top")
#get the optimal dose at given lambda
lambda <- seq(0.1, 4, length = 100)
d_1 <- rep(NA, 100)
for (li in 1:100) {
f <- function(a) invlogit(c(1, design(cbind(a, case1_val[, -c(1, 2, 3)]))[ind, ]) %*% y_coef) - lambda[li] * invlogit(c(1, design(cbind(a, case1_val[, -c(1, 2, 3)]))[ind, ]) %*% r_coef) - p_y_0[ind] + lambda[li] * p_r_0[ind]
d_1[li] <- optimize(f, c(-1, 1), tol = 0.001, maximum = TRUE)$maximum
}
ggplot(data.frame(lambda, d_1), aes(y = d_1, x = lambda)) +
ylim(-1, 1) +
geom_line() +
theme_classic() +
xlab(expression(theta)) +
ylab("Optimal dose")
#get the population average outcome at given lambda for Fig 2
y_d <- rep(NA, 100)
r_d <- rep(NA, 100)
for (li in 1:100) {
D_d <- my.opt(lambda[li])
r_d[li] <- mean(invlogit(cbind(1, design(cbind(a = D_d, case1_val[, -c(1, 2, 3)]))) %*% r_coef))
y_d[li] <- mean(invlogit(cbind(1, design(cbind(a = D_d, case1_val[, -c(1, 2, 3)]))) %*% y_coef))
}
ggplot(data = data.frame(lambda, y_d, r_d), aes(x = lambda)) +
geom_line(aes(y = y_d, linetype = "a")) +
geom_line(aes(y = r_d, linetype = "b")) +
ylim(0, 1) +
xlab(expression(theta)) +
ylab("Average outcome") +
theme_classic() +
theme(legend.position = c(0.25, 0.85)) +
scale_linetype_manual(values = c(2, 3), name = "",
labels = expression(E[x](Pr(paste(italic(E), "|", italic(d^{opt})),italic(x))),
E[x](Pr(paste(italic(T), "|", italic(d^{opt})),italic(x)))))
ggplot(data.frame(y_d, r_d), aes(y = y_d, x = r_d)) +
geom_line() +
theme_classic() +
xlab("Average toxicity") +
ylab("Average efficacy")
|
/Fig12.R
|
no_license
|
PinLi1018/Utility-ITR
|
R
| false
| false
| 5,263
|
r
|
###################################################################################
# #
# Filename : Fig12.R #
# Project : BiomJ article "A Utility Approach to Individualized #
# Optimal Dose Selection Using Biomarkers #
# Authors : Pin Li #
# Date : 1.13.2019 #
# Required R packages : tmvtnorm, stats, ggplot2 #
###################################################################################
rm(list = ls())
library(tmvtnorm)
library(stats)
library(ggplot2)
#define the invlogit function
invlogit <- function(x){
return(exp(x) / (exp(x) + 1))
}
#sampling with sample size n and number of covariates p
my.sample <- function(n, p){
x <- rmvnorm(n * 2, mean = rep(0, p))
x <- x[abs(0.4 * rowSums(x[, 1:3]) - 0.8 * x[, 4]) < 1, ]
x <- x[1:n, ]
a <- runif(n, -1, 1)
h_y <- (1 + 0.4 * rowSums(x[, 1:3]) - 0.8 * x[, 4]) * a
h_r <- (1 - 0.4 * rowSums(x[, 1:3]) + 0.8 * x[, 4]) * a
y <- rbinom(n, 1, invlogit(x[, 1] + h_y))
r <- rbinom(n, 1, invlogit(-1.386 - x[, 1] + h_r))
data.frame(y, r, a, x)
}
#get the matrix with interactions
design <- function(mat){
int_mat <- model.matrix( ~ . ^ 2, data = data.frame(mat))
return(int_mat[, 2:(2 * p + 2)])
}
set.seed(1)
n <- 200
p <- 5
case1 <- my.sample(n, p)
case1_val <- case1
#true coefficients
y_coef <- c(0, 1, 1, rep(0, p - 1), rep(0.4, 3), -0.8, rep(0, p - 4))
r_coef <- c(-1.386, 1, -1, rep(0, p - 1), rep(-0.4, 3), 0.8, rep(0, p - 4))
p_y_0 <- invlogit(cbind(1, design(cbind(a = -1, case1_val[, -c(1:3)]))) %*% y_coef)
p_r_0 <- invlogit(cbind(1, design(cbind(a = -1, case1_val[, -c(1:3)]))) %*% r_coef)
#define function to find optimal dose for the population at given lambda
my.opt <- function(lambda) {
D <- rep(NA, n)
for (j in 1:n) {
f <- function(a) invlogit(c(1, design(cbind(a, case1_val[, -c(1:3)]))[j, ]) %*% y_coef)-
lambda * invlogit(c(1, design(cbind(a, case1_val[, -c(1:3)]))[j, ]) %*% r_coef) -
p_y_0[j] + lambda * p_r_0[j]
D[j] <- optimize(f, c(-1, 1), tol = 0.001, maximum = TRUE)$maximum
}
return(D)
}
head(as.matrix(case1_val[, -c(1:3)]) %*% y_coef[8:12] + y_coef[2])
head(as.matrix(case1_val[, -c(1:3)]) %*% r_coef[8:12] + r_coef[2])
# choose sub 4, 11, 15 for Fig 1
ind <- 11
#get the predicted outcome at given dose
a <- seq(-1, 1, 0.01)
y_1 <- rep(NA, 201)
r_1 <- rep(NA, 201)
for (ai in 1:201) {
y_1[ai] <- invlogit(c(1, design(cbind(a[ai], case1_val[, -c(1, 2, 3)]))[ind, ]) %*% y_coef)
r_1[ai] <- invlogit(c(1, design(cbind(a[ai], case1_val[, -c(1, 2, 3)]))[ind, ]) %*% r_coef)
}
ggplot(data = data.frame(a, y_1, r_1), aes(x = a)) +
geom_line(aes(y = 0.5 + (y_1 - y_1[1]) - (r_1 - r_1[1]), linetype = "a")) +
geom_line(aes(y = y_1 - y_1[1], linetype = "b")) +
geom_line(aes(y = r_1 - r_1[1], linetype = "c")) +
scale_linetype_manual(values = c(1, 2, 3), name = "", labels = expression(Utility, delta[E], delta[T])) +
xlab("dose") +
scale_y_continuous(limits = c(0, 1), "Predicted outcome", sec.axis = sec_axis(~. - 0.5, name = "Utility")) +
theme_classic() +
theme(legend.position = "top")
#get the optimal dose at given lambda
lambda <- seq(0.1, 4, length = 100)
d_1 <- rep(NA, 100)
for (li in 1:100) {
f <- function(a) invlogit(c(1, design(cbind(a, case1_val[, -c(1, 2, 3)]))[ind, ]) %*% y_coef) - lambda[li] * invlogit(c(1, design(cbind(a, case1_val[, -c(1, 2, 3)]))[ind, ]) %*% r_coef) - p_y_0[ind] + lambda[li] * p_r_0[ind]
d_1[li] <- optimize(f, c(-1, 1), tol = 0.001, maximum = TRUE)$maximum
}
ggplot(data.frame(lambda, d_1), aes(y = d_1, x = lambda)) +
ylim(-1, 1) +
geom_line() +
theme_classic() +
xlab(expression(theta)) +
ylab("Optimal dose")
#get the population average outcome at given lambda for Fig 2
y_d <- rep(NA, 100)
r_d <- rep(NA, 100)
for (li in 1:100) {
D_d <- my.opt(lambda[li])
r_d[li] <- mean(invlogit(cbind(1, design(cbind(a = D_d, case1_val[, -c(1, 2, 3)]))) %*% r_coef))
y_d[li] <- mean(invlogit(cbind(1, design(cbind(a = D_d, case1_val[, -c(1, 2, 3)]))) %*% y_coef))
}
ggplot(data = data.frame(lambda, y_d, r_d), aes(x = lambda)) +
geom_line(aes(y = y_d, linetype = "a")) +
geom_line(aes(y = r_d, linetype = "b")) +
ylim(0, 1) +
xlab(expression(theta)) +
ylab("Average outcome") +
theme_classic() +
theme(legend.position = c(0.25, 0.85)) +
scale_linetype_manual(values = c(2, 3), name = "",
labels = expression(E[x](Pr(paste(italic(E), "|", italic(d^{opt})),italic(x))),
E[x](Pr(paste(italic(T), "|", italic(d^{opt})),italic(x)))))
ggplot(data.frame(y_d, r_d), aes(y = y_d, x = r_d)) +
geom_line() +
theme_classic() +
xlab("Average toxicity") +
ylab("Average efficacy")
|
library("ape")
# disable scientific notation
options(scipen=999)
# set working directory
setwd("~/Dropbox/Metazoan_Partitions/Metazoan_Project/RAxML_single_genes/")
# creates a list of all tree files in the working directory
infiles <- dir(pattern='*bipartitions.OG*')
extract.lengths <- function(file){
tr <- read.tree(file)
# identifies terminal branches with TRUE
terms <- tr$edge[, 2] <= Ntip(tr)
terminal.edges <- tr$edge.length[terms]
# reordering tip labels and assigning them to terminal branches
names(terminal.edges) <- tr$tip.label[tr$edge[terms, 2]]
# putting the data above in a table
table <- data.frame(rbind(terminal.edges))
#sorting columns by name
sort_table <- table[,sort(names(table))]
# get OG number from filename
OG_no <- sub("RAxML_bipartitions.([A-Z]+[0-9]+).phy.no-undet.spurious-out-gappyout.fasta.aln.OUT", "\\1", perl=TRUE, x=file)
# gets average length of tip branches
avg_tip_length <- mean(terminal.edges)
# selects tip branches that are 5 times or more longer than avgerage length
sel <- terminal.edges >= 5 * avg_tip_length
# add OG number to each selection
long_br <- cbind(OG_no,terminal.edges[sel])
# add OG number to each row of tip length table
og_sort_table <- cbind(OG_no,sort_table)
# put the long branched selections in a table
long_br_table <- data.frame(long_br)
# write table with tip lengths
write.table(og_sort_table, file="OG_tip_lengths.csv", row.names=F, col.names=T, append=T, quote=F)
# write table with selected OGs
write.table(long_br_table, file="OG_long_branch.csv", row.names=T, col.names=F, append=T, quote=F)
}
# loop over all files
lapply(infiles, extract.lengths)
|
/long_branches.R
|
no_license
|
alerougon/metazoan_phylogenomics
|
R
| false
| false
| 1,684
|
r
|
library("ape")
# disable scientific notation
options(scipen=999)
# set working directory
setwd("~/Dropbox/Metazoan_Partitions/Metazoan_Project/RAxML_single_genes/")
# creates a list of all tree files in the working directory
infiles <- dir(pattern='*bipartitions.OG*')
extract.lengths <- function(file){
tr <- read.tree(file)
# identifies terminal branches with TRUE
terms <- tr$edge[, 2] <= Ntip(tr)
terminal.edges <- tr$edge.length[terms]
# reordering tip labels and assigning them to terminal branches
names(terminal.edges) <- tr$tip.label[tr$edge[terms, 2]]
# putting the data above in a table
table <- data.frame(rbind(terminal.edges))
#sorting columns by name
sort_table <- table[,sort(names(table))]
# get OG number from filename
OG_no <- sub("RAxML_bipartitions.([A-Z]+[0-9]+).phy.no-undet.spurious-out-gappyout.fasta.aln.OUT", "\\1", perl=TRUE, x=file)
# gets average length of tip branches
avg_tip_length <- mean(terminal.edges)
# selects tip branches that are 5 times or more longer than avgerage length
sel <- terminal.edges >= 5 * avg_tip_length
# add OG number to each selection
long_br <- cbind(OG_no,terminal.edges[sel])
# add OG number to each row of tip length table
og_sort_table <- cbind(OG_no,sort_table)
# put the long branched selections in a table
long_br_table <- data.frame(long_br)
# write table with tip lengths
write.table(og_sort_table, file="OG_tip_lengths.csv", row.names=F, col.names=T, append=T, quote=F)
# write table with selected OGs
write.table(long_br_table, file="OG_long_branch.csv", row.names=T, col.names=F, append=T, quote=F)
}
# loop over all files
lapply(infiles, extract.lengths)
|
# addLatent.R
# copyright 2015-2022, openreliability.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
addLatent<-function(DF, at, mttf, mttr=NULL, inspect=NULL, risk="mean",
display_under=NULL, tag="", label="", name="",name2="", description="") {
at <- tagconnect(DF, at)
if(label!="") {
if(any(DF$Name!="") || any(DF$Name2!="")) {
stop("Cannot use label once name convention has been established.")
}
}
if(any(DF$Label!="")) {
if(name!="" || name2!="") {
stop("Cannot use name convention once label has been established.")
}
}
tp<-2
etp<-0
mt<-DF$P2[which(DF$ID==min(DF$ID))]
if(mt>0) {
etp<-4
}
info<-test.basic(DF, at, display_under, tag)
thisID<-info[1]
parent<-info[2]
gp<-info[3]
condition<-info[4]
if(is.null(mttf)) {stop("latent component must have mttf")}
if(is.null(inspect)) {stop("latent component must have inspection entry")}
## I can't imagine what I was concerned about here; a value as text???
if(is.character(inspect)) {
if(exists("inspect")) {
Tao<-eval((parse(text=inspect)))
}else{
stop("inspection interval object does not exist")
}
}else{
Tao=inspect
}
## pzero is no longer provided as an argument.
## pzero is calculated based on mttr, if it is provided
pzero<-0
if(length(mttr)>0 && mttr>0) {
pzero=mttr/(mttf+mttr)
}
if(risk == "mean") {
## fractional downtime method
pf<-1-1/((1/mttf)*Tao)*(1-exp(-(1/mttf)*Tao))
pf<- 1-(1-pf)*(1-pzero)
}else{
if(risk == "max") {
## The maximum risk probability
pf<-1-exp(-(1/mttf)*Tao)
}else{
stop("only 'mean' or 'max' accepted for risk argument")
}
}
## Now it is okay to set mttr to -1 for ftree entry
if(is.null(mttr) || !mttr>0) { mttr<- (-1)}
Dfrow<-data.frame(
ID= thisID ,
GParent= gp ,
Tag= tag ,
Type= tp ,
CFR= 1/mttf ,
PBF= pf ,
CRT= mttr ,
MOE= 0 ,
Condition= condition,
Cond_Code= 0,
EType= etp,
P1= pzero ,
P2= Tao ,
Collapse= 0 ,
Label= label ,
Name= name ,
Name2= name2 ,
CParent= at ,
Level= DF$Level[parent]+1 ,
Description= description ,
UType= 0 ,
UP1= 0 ,
UP2= 0
)
DF<-rbind(DF, Dfrow)
DF
}
|
/R/addLatent.R
|
no_license
|
jto888/FaultTree
|
R
| false
| false
| 2,760
|
r
|
# addLatent.R
# copyright 2015-2022, openreliability.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
addLatent<-function(DF, at, mttf, mttr=NULL, inspect=NULL, risk="mean",
display_under=NULL, tag="", label="", name="",name2="", description="") {
at <- tagconnect(DF, at)
if(label!="") {
if(any(DF$Name!="") || any(DF$Name2!="")) {
stop("Cannot use label once name convention has been established.")
}
}
if(any(DF$Label!="")) {
if(name!="" || name2!="") {
stop("Cannot use name convention once label has been established.")
}
}
tp<-2
etp<-0
mt<-DF$P2[which(DF$ID==min(DF$ID))]
if(mt>0) {
etp<-4
}
info<-test.basic(DF, at, display_under, tag)
thisID<-info[1]
parent<-info[2]
gp<-info[3]
condition<-info[4]
if(is.null(mttf)) {stop("latent component must have mttf")}
if(is.null(inspect)) {stop("latent component must have inspection entry")}
## I can't imagine what I was concerned about here; a value as text???
if(is.character(inspect)) {
if(exists("inspect")) {
Tao<-eval((parse(text=inspect)))
}else{
stop("inspection interval object does not exist")
}
}else{
Tao=inspect
}
## pzero is no longer provided as an argument.
## pzero is calculated based on mttr, if it is provided
pzero<-0
if(length(mttr)>0 && mttr>0) {
pzero=mttr/(mttf+mttr)
}
if(risk == "mean") {
## fractional downtime method
pf<-1-1/((1/mttf)*Tao)*(1-exp(-(1/mttf)*Tao))
pf<- 1-(1-pf)*(1-pzero)
}else{
if(risk == "max") {
## The maximum risk probability
pf<-1-exp(-(1/mttf)*Tao)
}else{
stop("only 'mean' or 'max' accepted for risk argument")
}
}
## Now it is okay to set mttr to -1 for ftree entry
if(is.null(mttr) || !mttr>0) { mttr<- (-1)}
Dfrow<-data.frame(
ID= thisID ,
GParent= gp ,
Tag= tag ,
Type= tp ,
CFR= 1/mttf ,
PBF= pf ,
CRT= mttr ,
MOE= 0 ,
Condition= condition,
Cond_Code= 0,
EType= etp,
P1= pzero ,
P2= Tao ,
Collapse= 0 ,
Label= label ,
Name= name ,
Name2= name2 ,
CParent= at ,
Level= DF$Level[parent]+1 ,
Description= description ,
UType= 0 ,
UP1= 0 ,
UP2= 0
)
DF<-rbind(DF, Dfrow)
DF
}
|
#' LU
#'
#' Computes the LU factorization of a matrix or batches of matrices A. Returns a
#' tuple containing the LU factorization and pivots of A. Pivoting is done if pivot
#' is set to True.
#'
#' @param A (Tensor) the tensor to factor of size (*, m, n)(*,m,n)
#' @param pivot (bool, optional) – controls whether pivoting is done. Default: TRUE
#' @param get_infos (bool, optional) – if set to True, returns an info IntTensor. Default: FALSE
#' @param out (tuple, optional) – optional output tuple. If get_infos is True, then the elements
#' in the tuple are Tensor, IntTensor, and IntTensor. If get_infos is False, then the
#' elements in the tuple are Tensor, IntTensor. Default: NULL
#'
#' @examples
#'
#' A = torch_randn(c(2, 3, 3))
#' torch_lu(A)
#'
#' @export
torch_lu <- function(A, pivot=TRUE, get_infos=FALSE, out=NULL) {
# If get_infos is True, then we don't need to check for errors and vice versa
result <- torch__lu_with_info(A, pivot, get_infos)
if (!is.null(out)) {
if (!is.list(out))
stop("argument 'out' must be a list of Tensors.")
if (length(out) - as.integer(get_infos) != 2) {
stop("expected tuple of ", 2 + as.integer(get_infos), " elements but got ",
length(out))
}
for (i in seq_len(out)) {
out[[i]] <- out[[i]]$resize_as_(result[[i]])$copy_(result[[i]])
}
return(out)
}
if (get_infos)
return(result)
else
return(result[1:2])
}
torch_logical_not <- function(self) {
.torch_logical_not(self)
}
#' @rdname torch_bartlett_window
torch_bartlett_window <- function(window_length, periodic=TRUE, dtype=NULL,
layout=torch_strided(), device=NULL,
requires_grad=FALSE) {
opt <- torch_tensor_options(dtype = dtype, layout = layout, device = device,
requires_grad = requires_grad)
.torch_bartlett_window(window_length = window_length, periodic = periodic,
options = opt)
}
#' @rdname torch_blackman_window
torch_blackman_window <- function(window_length, periodic=TRUE, dtype=NULL,
layout=torch_strided(), device=NULL,
requires_grad=FALSE) {
opt <- torch_tensor_options(dtype = dtype, layout = layout, device = device,
requires_grad = requires_grad)
.torch_blackman_window(window_length = window_length, periodic = periodic,
options = opt)
}
#' @rdname torch_hamming_window
torch_hamming_window <- function(window_length, periodic=TRUE, alpha=0.54,
beta=0.46, dtype=NULL, layout=torch_strided(),
device=NULL, requires_grad=FALSE) {
opt <- torch_tensor_options(dtype = dtype, layout = layout, device = device,
requires_grad = requires_grad)
.torch_hamming_window(window_length = window_length, periodic = periodic,
alpha = alpha, beta = beta, options = opt)
}
#' @rdname torch_hann_window
torch_hann_window <- function(window_length, periodic=TRUE, dtype=NULL,
layout=torch_strided(), device=NULL,
requires_grad=FALSE) {
if (is.null(dtype))
dtype <- torch_float()
opt <- torch_tensor_options(dtype = dtype, layout = layout, device = device,
requires_grad = requires_grad)
if (is.null(window_length))
value_error("argument 'window_length' must be int, not NULL")
.torch_hann_window(window_length = window_length, periodic = periodic,
options = opt)
}
#' @rdname torch_result_type
torch_result_type <- function(tensor1, tensor2) {
if (is_torch_tensor(tensor1) && is_torch_tensor(tensor2)) {
o <- cpp_torch_namespace_result_type_other_Tensor_tensor_Tensor(
tensor1$ptr,
tensor2$ptr
)
} else if (is_torch_tensor(tensor1) && !is_torch_tensor(tensor2)) {
o <- cpp_torch_namespace_result_type_other_Scalar_tensor_Tensor(
tensor1$ptr,
torch_scalar(tensor2)$ptr
)
} else if (!is_torch_tensor(tensor1) && is_torch_tensor(tensor2)) {
o <- cpp_torch_namespace_result_type_scalar_Scalar_tensor_Tensor(
torch_scalar(tensor1)$ptr,
tensor2$ptr
)
} else if (!is_torch_tensor(tensor1) && !is_torch_tensor(tensor2)) {
o <- cpp_torch_namespace_result_type_scalar1_Scalar_scalar2_Scalar(
torch_scalar(tensor1)$ptr,
torch_scalar(tensor2)$ptr
)
}
torch_dtype$new(ptr = o)
}
#' @rdname torch_sparse_coo_tensor
torch_sparse_coo_tensor <- function(indices, values, size=NULL, dtype=NULL,
device=NULL, requires_grad=FALSE) {
opt <- torch_tensor_options(dtype = dtype, device = device,
requires_grad = requires_grad)
if (is.null(size))
.torch_sparse_coo_tensor(indices, values, options = opt)
else
.torch_sparse_coo_tensor(indices, values, size = size, options = opt)
}
#' @rdname torch_stft
torch_stft <- function(input, n_fft, hop_length=NULL, win_length=NULL,
window=NULL, center=TRUE, pad_mode='reflect',
normalized=FALSE, onesided=TRUE, return_complex = NULL) {
if (center) {
signal_dim <- input$dim()
extended_shape <- c(
rep(1, 3 - signal_dim),
input$size()
)
pad <- as.integer(n_fft %/% 2)
input <- nnf_pad(input = input$view(extended_shape), pad = c(pad, pad),
mode = pad_mode)
input <- input$view(utils::tail(input$shape, signal_dim))
}
if (is.null(return_complex))
return_complex <- FALSE
.torch_stft(self = input, n_fft = n_fft, hop_length = hop_length,
win_length = win_length, window = window,
normalized = normalized, onesided = onesided,
return_complex = return_complex)
}
#' @rdname torch_tensordot
torch_tensordot <- function(a, b, dims = 2) {
if (is.list(dims)) {
dims_a <- dims[[1]]
dims_b <- dims[[2]]
} else if (is_torch_tensor(dims) && dims$numel() > 1) {
dims_a <- as_array(dims[1])
dims_b <- as_array(dims[2])
} else {
if (is_torch_tensor(dims))
dims <- dims$item()
if (dims < 1)
runtime_error("tensordot expects dims >= 1, but got {dims}")
dims_a <- seq(from = -dims, to = -1)
dims_b <- seq(from = 1, to = dims)
}
.torch_tensordot(a, b, dims_a, dims_b)
}
#' @rdname torch_tril_indices
torch_tril_indices <- function(row, col, offset=0, dtype=torch_long(),
device='cpu', layout=torch_strided()) {
opt <- torch_tensor_options(dtype = dtype, device = device, layout = layout)
.torch_tril_indices(row, col, offset, options = opt)
}
#' @rdname torch_triu_indices
torch_triu_indices <- function(row, col, offset=0, dtype=torch_long(),
device='cpu', layout=torch_strided()) {
opt <- torch_tensor_options(dtype = dtype, device = device, layout = layout)
.torch_triu_indices(row, col, offset, options = opt)
}
#' @rdname torch_multilabel_margin_loss
torch_multilabel_margin_loss <- function(self, target, reduction = torch_reduction_mean()) {
.torch_multilabel_margin_loss(self, as_1_based_tensor(target), reduction)
}
#' @rdname torch_multi_margin_loss
torch_multi_margin_loss <- function(self, target, p = 1L, margin = 1L, weight = list(),
reduction = torch_reduction_mean()) {
.torch_multi_margin_loss(self, as_1_based_tensor(target), p, margin, weight,
reduction)
}
#' @rdname torch_topk
torch_topk <- function(self, k, dim = -1L, largest = TRUE, sorted = TRUE) {
o <- .torch_topk(self, k, dim, largest, sorted)
o[[2]]$add_(1L)
o
}
#' @rdname torch_narrow
torch_narrow <- function(self, dim, start, length) {
start <- torch_scalar_tensor(start, dtype = torch_int64())
if (start$item() == 0)
value_error("start indexing starts at 1")
start <- start - 1L
.torch_narrow(self, dim, start, length)
}
#' @rdname torch_quantize_per_tensor
torch_quantize_per_tensor <- function(self, scale, zero_point, dtype) {
args <- list()
if (is.list(self))
args$tensors <- self
else
args$self <- self
if (is.list(scale))
args$scales <- scale
else
args$scale <- scale
if (is.list(zero_point))
args$zero_points <- zero_point
else
args$zero_point <- zero_point
args$dtype <- dtype
do.call(.torch_quantize_per_tensor, args)
}
#' @rdname torch_upsample_nearest1d
torch_upsample_nearest1d <- function(input, self, output_size = NULL,
scale_factors = NULL,
scales = NULL) {
args <- list(input = input, output_size = output_size,
scale_factors = scale_factors, scales = scales)
if (!missing(self))
args$self <- self
do.call(.torch_upsample_nearest1d, args)
}
#' @rdname torch_upsample_nearest2d
torch_upsample_nearest2d <- function(input, self, output_size = NULL,
scale_factors = NULL,
scales_h = NULL, scales_w = NULL) {
args <- list(input = input, output_size = output_size,
scale_factors = scale_factors,
scales_h = scales_h, scales_w = scales_w)
if (!missing(self))
args$self <- self
do.call(.torch_upsample_nearest2d, args)
}
#' @rdname torch_upsample_nearest3d
torch_upsample_nearest3d <- function(input, self, output_size = NULL,
scale_factors = NULL, scales_d = NULL,
scales_h = NULL, scales_w = NULL) {
args <- list(input = input, output_size = output_size,
scale_factors = scale_factors, scales_d = scales_d,
scales_h = scales_h, scales_w = scales_w)
if (!missing(self))
args$self <- self
do.call(.torch_upsample_nearest3d, args)
}
#' @rdname torch_upsample_nearest3d
torch_upsample_trilinear3d <- function(input, self, output_size = NULL, align_corners,
scale_factors = NULL, scales_d = NULL, scales_h = NULL,
scales_w = NULL) {
args <- list(input = input, output_size = output_size,
scale_factors = scale_factors, scales_d = scales_d,
scales_h = scales_h, scales_w = scales_w)
if (!missing(self))
args$self <- self
if (!missing(align_corners))
args$align_corners <- align_corners
do.call(.torch_upsample_trilinear3d, args)
}
#' @rdname torch_atleast_1d
torch_atleast_1d <- function(self) {
if (is_torch_tensor(self))
.torch_atleast_1d(self = self)
else
.torch_atleast_1d(tensors = self)
}
#' @rdname torch_atleast_2d
torch_atleast_2d <- function(self) {
if (is_torch_tensor(self))
.torch_atleast_2d(self = self)
else
.torch_atleast_2d(tensors = self)
}
#' @rdname torch_atleast_3d
torch_atleast_3d <- function(self) {
if (is_torch_tensor(self))
.torch_atleast_3d(self = self)
else
.torch_atleast_3d(tensors = self)
}
#' @rdname torch_dequantize
torch_dequantize <- function(tensor) {
if (is_torch_tensor(tensor))
.torch_dequantize(self = tensor)
else
.torch_dequantize(tensors = tensor)
}
#' @rdname torch_kaiser_window
torch_kaiser_window <- function(window_length, periodic, beta, dtype = torch_float(),
layout = NULL, device = NULL, requires_grad = NULL) {
options <- torch_tensor_options(dtype = dtype, layout = layout, device = device,
requires_grad = requires_grad)
args <- list(window_length = window_length, periodic = periodic,
options = options)
if (!missing(beta))
args$beta <- beta
do.call(.torch_kaiser_window, args)
}
#' @rdname torch_vander
torch_vander <- function(x, N = NULL, increasing = FALSE) {
.torch_vander(x, N, increasing)
}
#' @rdname torch_movedim
torch_movedim <- function(self, source, destination) {
.torch_movedim(self, as_1_based_dim(source), as_1_based_dim(destination))
}
#' @rdname torch_norm
torch_norm <- function(self, p = 2L, dim, keepdim = FALSE, dtype) {
if (missing(dtype)) {
dtype <- self$dtype
}
p <- Scalar$new(p)
if (missing(dim) && !missing(dtype)) {
o <- cpp_torch_namespace_norm_self_Tensor_p_Scalar_dtype_ScalarType(
self = self$ptr,
p = p$ptr,
dtype = dtype$ptr
)
return(Tensor$new(ptr = o))
}
if (is.numeric(unlist(dim))) {
o <- cpp_torch_namespace_norm_self_Tensor_p_Scalar_dim_IntArrayRef_keepdim_bool_dtype_ScalarType(
self = self$ptr, p = p$ptr, dim = unlist(dim), keepdim = keepdim, dtype = dtype$ptr
)
} else if (is.character(unlist(dim))){
o <- cpp_torch_namespace_norm_self_Tensor_p_Scalar_dim_DimnameList_keepdim_bool_dtype_ScalarType(
self = self$ptr, p = p$ptr, dim = DimnameList$new(unlist(dim))$ptr, keepdim = keepdim, dtype = dtype$ptr
)
}
Tensor$new(ptr = o)
}
torch_one_hot <- function(self, num_classes = -1L) {
.torch_one_hot(as_1_based_tensor(self), num_classes)
}
#' @rdname torch_split
torch_split <- function(self, split_size, dim = 1L) {
if (length(split_size) > 1)
torch_split_with_sizes(self, split_size, dim)
else
.torch_split(self, split_size, dim)
}
#' @rdname torch_nonzero
torch_nonzero <- function(self, as_list = FALSE) {
if (!as_list) {
out <- .torch_nonzero(self)
return(out + 1L)
} else {
out <- torch_nonzero_numpy(self)
return(lapply(out, function(x) x + 1L))
}
}
#' Normal distributed
#'
#' @param mean (tensor or scalar double) Mean of the normal distribution.
#' If this is a [torch_tensor()] then the output has the same dim as `mean`
#' and it represents the per-element mean. If it's a scalar value, it's reused
#' for all elements.
#' @param std (tensor or scalar double) The standard deviation of the normal
#' distribution. If this is a [torch_tensor()] then the output has the same size as `std`
#' and it represents the per-element standard deviation. If it's a scalar value,
#' it's reused for all elements.
#' @param size (integers, optional) only used if both `mean` and `std` are scalars.
#' @param generator a random number generator created with [torch_generator()]. If `NULL`
#' a default generator is used.
#' @param ... Tensor option parameters like `dtype`, `layout`, and `device`.
#' Can only be used when `mean` and `std` are both scalar numerics.
#'
#' @rdname torch_normal
#'
#' @export
torch_normal <- function(mean, std, size = NULL, generator = NULL, ...) {
if (missing(mean))
mean <- 0
if (missing(std))
std <- 1
if (!is.null(size)) {
if (is_torch_tensor(mean) || is_torch_tensor(std))
value_error("size is set, but one of mean or std is not a scalar value.")
}
if (!length(list(...)) == 0) {
if (is_torch_tensor(mean) || is_torch_tensor(std))
value_error("options is set, but one of mean or std is not a scalar value.")
}
if (is.null(generator))
generator <- .generator_null
if (!is_torch_tensor(mean) && !is_torch_tensor(std) && is.null(size))
value_error("size is not set.")
if (!is.null(size)) {
if (is.list(size)) size <- unlist(size)
options <- do.call(torch_tensor_options, list(...))
return(Tensor$new(ptr = cpp_namespace_normal_double_double(
mean = mean,
std = std,
size = size,
generator = generator$ptr,
options = options
)))
}
if (is_torch_tensor(mean) && is_torch_tensor(std)) {
return(Tensor$new(ptr = cpp_namespace_normal_tensor_tensor(
mean = mean$ptr,
std = std$ptr,
generator = generator$ptr
)))
}
if (is_torch_tensor(mean)) {
return(Tensor$new(ptr = cpp_namespace_normal_tensor_double(
mean = mean$ptr,
std = std,
generator = generator$ptr
)))
}
if (is_torch_tensor(std)) {
return(Tensor$new(ptr = cpp_namespace_normal_double_tensor(
mean = mean,
std = std$ptr,
generator = generator$ptr
)))
}
value_error("Please report a bug report in GitHub")
}
#' @rdname torch_polygamma
torch_polygamma <- function(n, input) {
input <- input$clone()
input$polygamma_(n = n)
input
}
#' @rdname torch_fft_fft
torch_fft_fft <- function(self, n = NULL, dim = -1L, norm = NULL) {
if (is.null(norm))
norm <- "backward"
.torch_fft_fft(self = self, n = n, dim = dim, norm = norm)
}
#' @rdname torch_fft_ifft
torch_fft_ifft <- function(self, n = NULL, dim = -1L, norm = NULL) {
if (is.null(norm))
norm <- "backward"
.torch_fft_ifft(self = self, n = n, dim = dim, norm = norm)
}
#' @rdname torch_fft_rfft
torch_fft_rfft <- function(self, n = NULL, dim = -1L, norm = NULL) {
if (is.null(norm))
norm <- "backward"
.torch_fft_rfft(self = self, n = n, dim = dim, norm = norm)
}
#' @rdname torch_fft_irfft
torch_fft_irfft <- function(self, n = NULL, dim = -1L, norm = NULL) {
if (is.null(norm))
norm <- "backward"
.torch_fft_irfft(self = self, n = n, dim = dim, norm = norm)
}
torch_broadcast_shapes <- function(...) {
shapes <- rlang::list2(...)
with_no_grad({
scalar <- torch_scalar_tensor(0, device="cpu")
tensors <- lapply(shapes, function(shape) scalar$expand(shape))
out <- torch_broadcast_tensors(tensors)[[1]]$shape
})
out
}
#'@rdname torch_multinomial
torch_multinomial <- function(self, num_samples, replacement = FALSE, generator = NULL) {
r <- .torch_multinomial(self, num_samples, replacement = replacement, generator = generator)
with_no_grad({ r$add_(torch_scalar(1L)) })
r
}
#' Index torch tensors
#'
#' Helper functions to index tensors.
#'
#' @param self (Tensor) Tensor that will be indexed.
#' @param indices (`List[Tensor]`) List of indices. Indices are torch tensors with
#' `torch_long()` dtype.
#'
#' @name torch_index
#' @export
NULL
#' In-place version of `torch_index_put`.
#' @name torch_index_put_
#' @inheritParams torch_index
#' @param values (Tensor) values that will be replaced the indexed location. Used
#' for `torch_index_put` and `torch_index_put_`.
#' @param accumulate (bool) Wether instead of replacing the current values with `values`,
#' you want to add them.
#' @export
NULL
#' Modify values selected by `indices`.
#' @inheritParams torch_index_put_
#' @name torch_index_put
#' @export
NULL
|
/R/wrapers.R
|
permissive
|
snapbuy/torch
|
R
| false
| false
| 18,612
|
r
|
#' LU
#'
#' Computes the LU factorization of a matrix or batches of matrices A. Returns a
#' tuple containing the LU factorization and pivots of A. Pivoting is done if pivot
#' is set to True.
#'
#' @param A (Tensor) the tensor to factor of size (*, m, n)(*,m,n)
#' @param pivot (bool, optional) – controls whether pivoting is done. Default: TRUE
#' @param get_infos (bool, optional) – if set to True, returns an info IntTensor. Default: FALSE
#' @param out (tuple, optional) – optional output tuple. If get_infos is True, then the elements
#' in the tuple are Tensor, IntTensor, and IntTensor. If get_infos is False, then the
#' elements in the tuple are Tensor, IntTensor. Default: NULL
#'
#' @examples
#'
#' A = torch_randn(c(2, 3, 3))
#' torch_lu(A)
#'
#' @export
torch_lu <- function(A, pivot=TRUE, get_infos=FALSE, out=NULL) {
# If get_infos is True, then we don't need to check for errors and vice versa
result <- torch__lu_with_info(A, pivot, get_infos)
if (!is.null(out)) {
if (!is.list(out))
stop("argument 'out' must be a list of Tensors.")
if (length(out) - as.integer(get_infos) != 2) {
stop("expected tuple of ", 2 + as.integer(get_infos), " elements but got ",
length(out))
}
for (i in seq_len(out)) {
out[[i]] <- out[[i]]$resize_as_(result[[i]])$copy_(result[[i]])
}
return(out)
}
if (get_infos)
return(result)
else
return(result[1:2])
}
torch_logical_not <- function(self) {
.torch_logical_not(self)
}
#' @rdname torch_bartlett_window
torch_bartlett_window <- function(window_length, periodic=TRUE, dtype=NULL,
layout=torch_strided(), device=NULL,
requires_grad=FALSE) {
opt <- torch_tensor_options(dtype = dtype, layout = layout, device = device,
requires_grad = requires_grad)
.torch_bartlett_window(window_length = window_length, periodic = periodic,
options = opt)
}
#' @rdname torch_blackman_window
torch_blackman_window <- function(window_length, periodic=TRUE, dtype=NULL,
layout=torch_strided(), device=NULL,
requires_grad=FALSE) {
opt <- torch_tensor_options(dtype = dtype, layout = layout, device = device,
requires_grad = requires_grad)
.torch_blackman_window(window_length = window_length, periodic = periodic,
options = opt)
}
#' @rdname torch_hamming_window
torch_hamming_window <- function(window_length, periodic=TRUE, alpha=0.54,
beta=0.46, dtype=NULL, layout=torch_strided(),
device=NULL, requires_grad=FALSE) {
opt <- torch_tensor_options(dtype = dtype, layout = layout, device = device,
requires_grad = requires_grad)
.torch_hamming_window(window_length = window_length, periodic = periodic,
alpha = alpha, beta = beta, options = opt)
}
#' @rdname torch_hann_window
torch_hann_window <- function(window_length, periodic=TRUE, dtype=NULL,
layout=torch_strided(), device=NULL,
requires_grad=FALSE) {
if (is.null(dtype))
dtype <- torch_float()
opt <- torch_tensor_options(dtype = dtype, layout = layout, device = device,
requires_grad = requires_grad)
if (is.null(window_length))
value_error("argument 'window_length' must be int, not NULL")
.torch_hann_window(window_length = window_length, periodic = periodic,
options = opt)
}
#' @rdname torch_result_type
torch_result_type <- function(tensor1, tensor2) {
if (is_torch_tensor(tensor1) && is_torch_tensor(tensor2)) {
o <- cpp_torch_namespace_result_type_other_Tensor_tensor_Tensor(
tensor1$ptr,
tensor2$ptr
)
} else if (is_torch_tensor(tensor1) && !is_torch_tensor(tensor2)) {
o <- cpp_torch_namespace_result_type_other_Scalar_tensor_Tensor(
tensor1$ptr,
torch_scalar(tensor2)$ptr
)
} else if (!is_torch_tensor(tensor1) && is_torch_tensor(tensor2)) {
o <- cpp_torch_namespace_result_type_scalar_Scalar_tensor_Tensor(
torch_scalar(tensor1)$ptr,
tensor2$ptr
)
} else if (!is_torch_tensor(tensor1) && !is_torch_tensor(tensor2)) {
o <- cpp_torch_namespace_result_type_scalar1_Scalar_scalar2_Scalar(
torch_scalar(tensor1)$ptr,
torch_scalar(tensor2)$ptr
)
}
torch_dtype$new(ptr = o)
}
#' @rdname torch_sparse_coo_tensor
torch_sparse_coo_tensor <- function(indices, values, size=NULL, dtype=NULL,
device=NULL, requires_grad=FALSE) {
opt <- torch_tensor_options(dtype = dtype, device = device,
requires_grad = requires_grad)
if (is.null(size))
.torch_sparse_coo_tensor(indices, values, options = opt)
else
.torch_sparse_coo_tensor(indices, values, size = size, options = opt)
}
#' @rdname torch_stft
torch_stft <- function(input, n_fft, hop_length=NULL, win_length=NULL,
window=NULL, center=TRUE, pad_mode='reflect',
normalized=FALSE, onesided=TRUE, return_complex = NULL) {
if (center) {
signal_dim <- input$dim()
extended_shape <- c(
rep(1, 3 - signal_dim),
input$size()
)
pad <- as.integer(n_fft %/% 2)
input <- nnf_pad(input = input$view(extended_shape), pad = c(pad, pad),
mode = pad_mode)
input <- input$view(utils::tail(input$shape, signal_dim))
}
if (is.null(return_complex))
return_complex <- FALSE
.torch_stft(self = input, n_fft = n_fft, hop_length = hop_length,
win_length = win_length, window = window,
normalized = normalized, onesided = onesided,
return_complex = return_complex)
}
#' @rdname torch_tensordot
torch_tensordot <- function(a, b, dims = 2) {
if (is.list(dims)) {
dims_a <- dims[[1]]
dims_b <- dims[[2]]
} else if (is_torch_tensor(dims) && dims$numel() > 1) {
dims_a <- as_array(dims[1])
dims_b <- as_array(dims[2])
} else {
if (is_torch_tensor(dims))
dims <- dims$item()
if (dims < 1)
runtime_error("tensordot expects dims >= 1, but got {dims}")
dims_a <- seq(from = -dims, to = -1)
dims_b <- seq(from = 1, to = dims)
}
.torch_tensordot(a, b, dims_a, dims_b)
}
#' @rdname torch_tril_indices
torch_tril_indices <- function(row, col, offset=0, dtype=torch_long(),
device='cpu', layout=torch_strided()) {
opt <- torch_tensor_options(dtype = dtype, device = device, layout = layout)
.torch_tril_indices(row, col, offset, options = opt)
}
#' @rdname torch_triu_indices
torch_triu_indices <- function(row, col, offset=0, dtype=torch_long(),
device='cpu', layout=torch_strided()) {
opt <- torch_tensor_options(dtype = dtype, device = device, layout = layout)
.torch_triu_indices(row, col, offset, options = opt)
}
#' @rdname torch_multilabel_margin_loss
torch_multilabel_margin_loss <- function(self, target, reduction = torch_reduction_mean()) {
.torch_multilabel_margin_loss(self, as_1_based_tensor(target), reduction)
}
#' @rdname torch_multi_margin_loss
torch_multi_margin_loss <- function(self, target, p = 1L, margin = 1L, weight = list(),
reduction = torch_reduction_mean()) {
.torch_multi_margin_loss(self, as_1_based_tensor(target), p, margin, weight,
reduction)
}
#' @rdname torch_topk
torch_topk <- function(self, k, dim = -1L, largest = TRUE, sorted = TRUE) {
o <- .torch_topk(self, k, dim, largest, sorted)
o[[2]]$add_(1L)
o
}
#' @rdname torch_narrow
torch_narrow <- function(self, dim, start, length) {
start <- torch_scalar_tensor(start, dtype = torch_int64())
if (start$item() == 0)
value_error("start indexing starts at 1")
start <- start - 1L
.torch_narrow(self, dim, start, length)
}
#' @rdname torch_quantize_per_tensor
torch_quantize_per_tensor <- function(self, scale, zero_point, dtype) {
args <- list()
if (is.list(self))
args$tensors <- self
else
args$self <- self
if (is.list(scale))
args$scales <- scale
else
args$scale <- scale
if (is.list(zero_point))
args$zero_points <- zero_point
else
args$zero_point <- zero_point
args$dtype <- dtype
do.call(.torch_quantize_per_tensor, args)
}
#' @rdname torch_upsample_nearest1d
torch_upsample_nearest1d <- function(input, self, output_size = NULL,
scale_factors = NULL,
scales = NULL) {
args <- list(input = input, output_size = output_size,
scale_factors = scale_factors, scales = scales)
if (!missing(self))
args$self <- self
do.call(.torch_upsample_nearest1d, args)
}
#' @rdname torch_upsample_nearest2d
torch_upsample_nearest2d <- function(input, self, output_size = NULL,
scale_factors = NULL,
scales_h = NULL, scales_w = NULL) {
args <- list(input = input, output_size = output_size,
scale_factors = scale_factors,
scales_h = scales_h, scales_w = scales_w)
if (!missing(self))
args$self <- self
do.call(.torch_upsample_nearest2d, args)
}
#' @rdname torch_upsample_nearest3d
torch_upsample_nearest3d <- function(input, self, output_size = NULL,
scale_factors = NULL, scales_d = NULL,
scales_h = NULL, scales_w = NULL) {
args <- list(input = input, output_size = output_size,
scale_factors = scale_factors, scales_d = scales_d,
scales_h = scales_h, scales_w = scales_w)
if (!missing(self))
args$self <- self
do.call(.torch_upsample_nearest3d, args)
}
#' @rdname torch_upsample_nearest3d
torch_upsample_trilinear3d <- function(input, self, output_size = NULL, align_corners,
scale_factors = NULL, scales_d = NULL, scales_h = NULL,
scales_w = NULL) {
args <- list(input = input, output_size = output_size,
scale_factors = scale_factors, scales_d = scales_d,
scales_h = scales_h, scales_w = scales_w)
if (!missing(self))
args$self <- self
if (!missing(align_corners))
args$align_corners <- align_corners
do.call(.torch_upsample_trilinear3d, args)
}
#' @rdname torch_atleast_1d
torch_atleast_1d <- function(self) {
if (is_torch_tensor(self))
.torch_atleast_1d(self = self)
else
.torch_atleast_1d(tensors = self)
}
#' @rdname torch_atleast_2d
torch_atleast_2d <- function(self) {
if (is_torch_tensor(self))
.torch_atleast_2d(self = self)
else
.torch_atleast_2d(tensors = self)
}
#' @rdname torch_atleast_3d
torch_atleast_3d <- function(self) {
if (is_torch_tensor(self))
.torch_atleast_3d(self = self)
else
.torch_atleast_3d(tensors = self)
}
#' @rdname torch_dequantize
torch_dequantize <- function(tensor) {
if (is_torch_tensor(tensor))
.torch_dequantize(self = tensor)
else
.torch_dequantize(tensors = tensor)
}
#' @rdname torch_kaiser_window
torch_kaiser_window <- function(window_length, periodic, beta, dtype = torch_float(),
layout = NULL, device = NULL, requires_grad = NULL) {
options <- torch_tensor_options(dtype = dtype, layout = layout, device = device,
requires_grad = requires_grad)
args <- list(window_length = window_length, periodic = periodic,
options = options)
if (!missing(beta))
args$beta <- beta
do.call(.torch_kaiser_window, args)
}
#' @rdname torch_vander
torch_vander <- function(x, N = NULL, increasing = FALSE) {
.torch_vander(x, N, increasing)
}
#' @rdname torch_movedim
torch_movedim <- function(self, source, destination) {
.torch_movedim(self, as_1_based_dim(source), as_1_based_dim(destination))
}
#' @rdname torch_norm
torch_norm <- function(self, p = 2L, dim, keepdim = FALSE, dtype) {
if (missing(dtype)) {
dtype <- self$dtype
}
p <- Scalar$new(p)
if (missing(dim) && !missing(dtype)) {
o <- cpp_torch_namespace_norm_self_Tensor_p_Scalar_dtype_ScalarType(
self = self$ptr,
p = p$ptr,
dtype = dtype$ptr
)
return(Tensor$new(ptr = o))
}
if (is.numeric(unlist(dim))) {
o <- cpp_torch_namespace_norm_self_Tensor_p_Scalar_dim_IntArrayRef_keepdim_bool_dtype_ScalarType(
self = self$ptr, p = p$ptr, dim = unlist(dim), keepdim = keepdim, dtype = dtype$ptr
)
} else if (is.character(unlist(dim))){
o <- cpp_torch_namespace_norm_self_Tensor_p_Scalar_dim_DimnameList_keepdim_bool_dtype_ScalarType(
self = self$ptr, p = p$ptr, dim = DimnameList$new(unlist(dim))$ptr, keepdim = keepdim, dtype = dtype$ptr
)
}
Tensor$new(ptr = o)
}
torch_one_hot <- function(self, num_classes = -1L) {
.torch_one_hot(as_1_based_tensor(self), num_classes)
}
#' @rdname torch_split
torch_split <- function(self, split_size, dim = 1L) {
if (length(split_size) > 1)
torch_split_with_sizes(self, split_size, dim)
else
.torch_split(self, split_size, dim)
}
#' @rdname torch_nonzero
torch_nonzero <- function(self, as_list = FALSE) {
if (!as_list) {
out <- .torch_nonzero(self)
return(out + 1L)
} else {
out <- torch_nonzero_numpy(self)
return(lapply(out, function(x) x + 1L))
}
}
#' Normal distributed
#'
#' @param mean (tensor or scalar double) Mean of the normal distribution.
#' If this is a [torch_tensor()] then the output has the same dim as `mean`
#' and it represents the per-element mean. If it's a scalar value, it's reused
#' for all elements.
#' @param std (tensor or scalar double) The standard deviation of the normal
#' distribution. If this is a [torch_tensor()] then the output has the same size as `std`
#' and it represents the per-element standard deviation. If it's a scalar value,
#' it's reused for all elements.
#' @param size (integers, optional) only used if both `mean` and `std` are scalars.
#' @param generator a random number generator created with [torch_generator()]. If `NULL`
#' a default generator is used.
#' @param ... Tensor option parameters like `dtype`, `layout`, and `device`.
#' Can only be used when `mean` and `std` are both scalar numerics.
#'
#' @rdname torch_normal
#'
#' @export
torch_normal <- function(mean, std, size = NULL, generator = NULL, ...) {
if (missing(mean))
mean <- 0
if (missing(std))
std <- 1
if (!is.null(size)) {
if (is_torch_tensor(mean) || is_torch_tensor(std))
value_error("size is set, but one of mean or std is not a scalar value.")
}
if (!length(list(...)) == 0) {
if (is_torch_tensor(mean) || is_torch_tensor(std))
value_error("options is set, but one of mean or std is not a scalar value.")
}
if (is.null(generator))
generator <- .generator_null
if (!is_torch_tensor(mean) && !is_torch_tensor(std) && is.null(size))
value_error("size is not set.")
if (!is.null(size)) {
if (is.list(size)) size <- unlist(size)
options <- do.call(torch_tensor_options, list(...))
return(Tensor$new(ptr = cpp_namespace_normal_double_double(
mean = mean,
std = std,
size = size,
generator = generator$ptr,
options = options
)))
}
if (is_torch_tensor(mean) && is_torch_tensor(std)) {
return(Tensor$new(ptr = cpp_namespace_normal_tensor_tensor(
mean = mean$ptr,
std = std$ptr,
generator = generator$ptr
)))
}
if (is_torch_tensor(mean)) {
return(Tensor$new(ptr = cpp_namespace_normal_tensor_double(
mean = mean$ptr,
std = std,
generator = generator$ptr
)))
}
if (is_torch_tensor(std)) {
return(Tensor$new(ptr = cpp_namespace_normal_double_tensor(
mean = mean,
std = std$ptr,
generator = generator$ptr
)))
}
value_error("Please report a bug report in GitHub")
}
#' @rdname torch_polygamma
torch_polygamma <- function(n, input) {
input <- input$clone()
input$polygamma_(n = n)
input
}
#' @rdname torch_fft_fft
torch_fft_fft <- function(self, n = NULL, dim = -1L, norm = NULL) {
if (is.null(norm))
norm <- "backward"
.torch_fft_fft(self = self, n = n, dim = dim, norm = norm)
}
#' @rdname torch_fft_ifft
torch_fft_ifft <- function(self, n = NULL, dim = -1L, norm = NULL) {
if (is.null(norm))
norm <- "backward"
.torch_fft_ifft(self = self, n = n, dim = dim, norm = norm)
}
#' @rdname torch_fft_rfft
torch_fft_rfft <- function(self, n = NULL, dim = -1L, norm = NULL) {
if (is.null(norm))
norm <- "backward"
.torch_fft_rfft(self = self, n = n, dim = dim, norm = norm)
}
#' @rdname torch_fft_irfft
torch_fft_irfft <- function(self, n = NULL, dim = -1L, norm = NULL) {
if (is.null(norm))
norm <- "backward"
.torch_fft_irfft(self = self, n = n, dim = dim, norm = norm)
}
torch_broadcast_shapes <- function(...) {
shapes <- rlang::list2(...)
with_no_grad({
scalar <- torch_scalar_tensor(0, device="cpu")
tensors <- lapply(shapes, function(shape) scalar$expand(shape))
out <- torch_broadcast_tensors(tensors)[[1]]$shape
})
out
}
#'@rdname torch_multinomial
torch_multinomial <- function(self, num_samples, replacement = FALSE, generator = NULL) {
r <- .torch_multinomial(self, num_samples, replacement = replacement, generator = generator)
with_no_grad({ r$add_(torch_scalar(1L)) })
r
}
#' Index torch tensors
#'
#' Helper functions to index tensors.
#'
#' @param self (Tensor) Tensor that will be indexed.
#' @param indices (`List[Tensor]`) List of indices. Indices are torch tensors with
#' `torch_long()` dtype.
#'
#' @name torch_index
#' @export
NULL
#' In-place version of `torch_index_put`.
#' @name torch_index_put_
#' @inheritParams torch_index
#' @param values (Tensor) values that will be replaced the indexed location. Used
#' for `torch_index_put` and `torch_index_put_`.
#' @param accumulate (bool) Wether instead of replacing the current values with `values`,
#' you want to add them.
#' @export
NULL
#' Modify values selected by `indices`.
#' @inheritParams torch_index_put_
#' @name torch_index_put
#' @export
NULL
|
library(TeachingSampling)
### Name: HT
### Title: The Horvitz-Thompson Estimator
### Aliases: HT
### Keywords: survey
### ** Examples
############
## Example 1
############
# Uses the Lucy data to draw a simple random sample without replacement
data(Lucy)
attach(Lucy)
N <- dim(Lucy)[1]
n <- 400
sam <- sample(N,n)
# The vector of inclusion probabilities for each unit in the sample
pik <- rep(n/N,n)
# The information about the units in the sample is stored in an object called data
data <- Lucy[sam,]
attach(data)
names(data)
# The variables of interest are: Income, Employees and Taxes
# This information is stored in a data frame called estima
estima <- data.frame(Income, Employees, Taxes)
HT(estima, pik)
############
## Example 2
############
# Uses the Lucy data to draw a simple random sample with replacement
data(Lucy)
N <- dim(Lucy)[1]
m <- 400
sam <- sample(N,m,replace=TRUE)
# The vector of selection probabilities of units in the sample
pk <- rep(1/N,m)
# Computation of the inclusion probabilities
pik <- 1-(1-pk)^m
# The information about the units in the sample is stored in an object called data
data <- Lucy[sam,]
attach(data)
names(data)
# The variables of interest are: Income, Employees and Taxes
# This information is stored in a data frame called estima
estima <- data.frame(Income, Employees, Taxes)
HT(estima, pik)
############
## Example 3
############
# Without replacement sampling
# Vector U contains the label of a population of size N=5
U <- c("Yves", "Ken", "Erik", "Sharon", "Leslie")
# Vector y1 and y2 are the values of the variables of interest
y1<-c(32, 34, 46, 89, 35)
y2<-c(1,1,1,0,0)
y3<-cbind(y1,y2)
# The population size is N=5
N <- length(U)
# The sample size is n=2
n <- 2
# The sample membership matrix for fixed size without replacement sampling designs
Ind <- Ik(N,n)
# p is the probability of selection of every possible sample
p <- c(0.13, 0.2, 0.15, 0.1, 0.15, 0.04, 0.02, 0.06, 0.07, 0.08)
# Computation of the inclusion probabilities
inclusion <- Pik(p, Ind)
# Selection of a random sample
sam <- sample(5,2)
# The selected sample
U[sam]
# The inclusion probabilities for these two units
inclusion[sam]
# The values of the variables of interest for the units in the sample
y1[sam]
y2[sam]
y3[sam,]
# The Horvitz-Thompson estimator
HT(y1[sam],inclusion[sam])
HT(y2[sam],inclusion[sam])
HT(y3[sam,],inclusion[sam])
############
## Example 4
############
# Following Example 3... With replacement sampling
# The population size is N=5
N <- length(U)
# The sample size is m=2
m <- 2
# pk is the probability of selection of every single unit
pk <- c(0.9, 0.025, 0.025, 0.025, 0.025)
# Computation of the inclusion probabilities
pik <- 1-(1-pk)^m
# Selection of a random sample with replacement
sam <- sample(5,2, replace=TRUE, prob=pk)
# The selected sample
U[sam]
# The inclusion probabilities for these two units
inclusion[sam]
# The values of the variables of interest for the units in the sample
y1[sam]
y2[sam]
y3[sam,]
# The Horvitz-Thompson estimator
HT(y1[sam],inclusion[sam])
HT(y2[sam],inclusion[sam])
HT(y3[sam,],inclusion[sam])
####################################################################
## Example 5 HT is unbiased for without replacement sampling designs
## Fixed sample size
####################################################################
# Vector U contains the label of a population of size N=5
U <- c("Yves", "Ken", "Erik", "Sharon", "Leslie")
# Vector y1 and y2 are the values of the variables of interest
y<-c(32, 34, 46, 89, 35)
# The population size is N=5
N <- length(U)
# The sample size is n=2
n <- 2
# The sample membership matrix for fixed size without replacement sampling designs
Ind <- Ik(N,n)
Ind
# p is the probability of selection of every possible sample
p <- c(0.13, 0.2, 0.15, 0.1, 0.15, 0.04, 0.02, 0.06, 0.07, 0.08)
sum(p)
# Computation of the inclusion probabilities
inclusion <- Pik(p, Ind)
inclusion
sum(inclusion)
# The support with the values of the elements
Qy <-Support(N,n,ID=y)
Qy
# The HT estimates for every single sample in the support
HT1<- HT(y[Ind[1,]==1], inclusion[Ind[1,]==1])
HT2<- HT(y[Ind[2,]==1], inclusion[Ind[2,]==1])
HT3<- HT(y[Ind[3,]==1], inclusion[Ind[3,]==1])
HT4<- HT(y[Ind[4,]==1], inclusion[Ind[4,]==1])
HT5<- HT(y[Ind[5,]==1], inclusion[Ind[5,]==1])
HT6<- HT(y[Ind[6,]==1], inclusion[Ind[6,]==1])
HT7<- HT(y[Ind[7,]==1], inclusion[Ind[7,]==1])
HT8<- HT(y[Ind[8,]==1], inclusion[Ind[8,]==1])
HT9<- HT(y[Ind[9,]==1], inclusion[Ind[9,]==1])
HT10<- HT(y[Ind[10,]==1], inclusion[Ind[10,]==1])
# The HT estimates arranged in a vector
Est <- c(HT1, HT2, HT3, HT4, HT5, HT6, HT7, HT8, HT9, HT10)
Est
# The HT is actually desgn-unbiased
data.frame(Ind, Est, p)
sum(Est*p)
sum(y)
####################################################################
## Example 6 HT is unbiased for without replacement sampling designs
## Random sample size
####################################################################
# Vector U contains the label of a population of size N=5
U <- c("Yves", "Ken", "Erik", "Sharon", "Leslie")
# Vector y1 and y2 are the values of the variables of interest
y<-c(32, 34, 46, 89, 35)
# The population size is N=5
N <- length(U)
# The sample membership matrix for random size without replacement sampling designs
Ind <- IkRS(N)
Ind
# p is the probability of selection of every possible sample
p <- c(0.59049, 0.06561, 0.06561, 0.06561, 0.06561, 0.06561, 0.00729, 0.00729,
0.00729, 0.00729, 0.00729, 0.00729, 0.00729, 0.00729, 0.00729, 0.00729, 0.00081,
0.00081, 0.00081, 0.00081, 0.00081, 0.00081, 0.00081, 0.00081, 0.00081, 0.00081,
0.00009, 0.00009, 0.00009, 0.00009, 0.00009, 0.00001)
sum(p)
# Computation of the inclusion probabilities
inclusion <- Pik(p, Ind)
inclusion
sum(inclusion)
# The support with the values of the elements
Qy <-SupportRS(N, ID=y)
Qy
# The HT estimates for every single sample in the support
HT1<- HT(y[Ind[1,]==1], inclusion[Ind[1,]==1])
HT2<- HT(y[Ind[2,]==1], inclusion[Ind[2,]==1])
HT3<- HT(y[Ind[3,]==1], inclusion[Ind[3,]==1])
HT4<- HT(y[Ind[4,]==1], inclusion[Ind[4,]==1])
HT5<- HT(y[Ind[5,]==1], inclusion[Ind[5,]==1])
HT6<- HT(y[Ind[6,]==1], inclusion[Ind[6,]==1])
HT7<- HT(y[Ind[7,]==1], inclusion[Ind[7,]==1])
HT8<- HT(y[Ind[8,]==1], inclusion[Ind[8,]==1])
HT9<- HT(y[Ind[9,]==1], inclusion[Ind[9,]==1])
HT10<- HT(y[Ind[10,]==1], inclusion[Ind[10,]==1])
HT11<- HT(y[Ind[11,]==1], inclusion[Ind[11,]==1])
HT12<- HT(y[Ind[12,]==1], inclusion[Ind[12,]==1])
HT13<- HT(y[Ind[13,]==1], inclusion[Ind[13,]==1])
HT14<- HT(y[Ind[14,]==1], inclusion[Ind[14,]==1])
HT15<- HT(y[Ind[15,]==1], inclusion[Ind[15,]==1])
HT16<- HT(y[Ind[16,]==1], inclusion[Ind[16,]==1])
HT17<- HT(y[Ind[17,]==1], inclusion[Ind[17,]==1])
HT18<- HT(y[Ind[18,]==1], inclusion[Ind[18,]==1])
HT19<- HT(y[Ind[19,]==1], inclusion[Ind[19,]==1])
HT20<- HT(y[Ind[20,]==1], inclusion[Ind[20,]==1])
HT21<- HT(y[Ind[21,]==1], inclusion[Ind[21,]==1])
HT22<- HT(y[Ind[22,]==1], inclusion[Ind[22,]==1])
HT23<- HT(y[Ind[23,]==1], inclusion[Ind[23,]==1])
HT24<- HT(y[Ind[24,]==1], inclusion[Ind[24,]==1])
HT25<- HT(y[Ind[25,]==1], inclusion[Ind[25,]==1])
HT26<- HT(y[Ind[26,]==1], inclusion[Ind[26,]==1])
HT27<- HT(y[Ind[27,]==1], inclusion[Ind[27,]==1])
HT28<- HT(y[Ind[28,]==1], inclusion[Ind[28,]==1])
HT29<- HT(y[Ind[29,]==1], inclusion[Ind[29,]==1])
HT30<- HT(y[Ind[30,]==1], inclusion[Ind[30,]==1])
HT31<- HT(y[Ind[31,]==1], inclusion[Ind[31,]==1])
HT32<- HT(y[Ind[32,]==1], inclusion[Ind[32,]==1])
# The HT estimates arranged in a vector
Est <- c(HT1, HT2, HT3, HT4, HT5, HT6, HT7, HT8, HT9, HT10, HT11, HT12, HT13,
HT14, HT15, HT16, HT17, HT18, HT19, HT20, HT21, HT22, HT23, HT24, HT25, HT26,
HT27, HT28, HT29, HT30, HT31, HT32)
Est
# The HT is actually desgn-unbiased
data.frame(Ind, Est, p)
sum(Est*p)
sum(y)
################################################################
## Example 7 HT is unbiased for with replacement sampling designs
################################################################
# Vector U contains the label of a population of size N=5
U <- c("Yves", "Ken", "Erik", "Sharon", "Leslie")
# Vector y1 and y2 are the values of the variables of interest
y<-c(32, 34, 46, 89, 35)
# The population size is N=5
N <- length(U)
# The sample size is m=2
m <- 2
# pk is the probability of selection of every single unit
pk <- c(0.35, 0.225, 0.175, 0.125, 0.125)
# p is the probability of selection of every possible sample
p <- p.WR(N,m,pk)
p
sum(p)
# The sample membership matrix for random size without replacement sampling designs
Ind <- IkWR(N,m)
Ind
# The support with the values of the elements
Qy <- SupportWR(N,m, ID=y)
Qy
# Computation of the inclusion probabilities
pik <- 1-(1-pk)^m
pik
# The HT estimates for every single sample in the support
HT1 <- HT(y[Ind[1,]==1], pik[Ind[1,]==1])
HT2 <- HT(y[Ind[2,]==1], pik[Ind[2,]==1])
HT3 <- HT(y[Ind[3,]==1], pik[Ind[3,]==1])
HT4 <- HT(y[Ind[4,]==1], pik[Ind[4,]==1])
HT5 <- HT(y[Ind[5,]==1], pik[Ind[5,]==1])
HT6 <- HT(y[Ind[6,]==1], pik[Ind[6,]==1])
HT7 <- HT(y[Ind[7,]==1], pik[Ind[7,]==1])
HT8 <- HT(y[Ind[8,]==1], pik[Ind[8,]==1])
HT9 <- HT(y[Ind[9,]==1], pik[Ind[9,]==1])
HT10 <- HT(y[Ind[10,]==1], pik[Ind[10,]==1])
HT11 <- HT(y[Ind[11,]==1], pik[Ind[11,]==1])
HT12 <- HT(y[Ind[12,]==1], pik[Ind[12,]==1])
HT13 <- HT(y[Ind[13,]==1], pik[Ind[13,]==1])
HT14 <- HT(y[Ind[14,]==1], pik[Ind[14,]==1])
HT15 <- HT(y[Ind[15,]==1], pik[Ind[15,]==1])
# The HT estimates arranged in a vector
Est <- c(HT1, HT2, HT3, HT4, HT5, HT6, HT7, HT8, HT9, HT10, HT11, HT12, HT13,
HT14, HT15)
Est
# The HT is actually desgn-unbiased
data.frame(Ind, Est, p)
sum(Est*p)
sum(y)
|
/data/genthat_extracted_code/TeachingSampling/examples/HT.rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 9,700
|
r
|
library(TeachingSampling)
### Name: HT
### Title: The Horvitz-Thompson Estimator
### Aliases: HT
### Keywords: survey
### ** Examples
############
## Example 1
############
# Uses the Lucy data to draw a simple random sample without replacement
data(Lucy)
attach(Lucy)
N <- dim(Lucy)[1]
n <- 400
sam <- sample(N,n)
# The vector of inclusion probabilities for each unit in the sample
pik <- rep(n/N,n)
# The information about the units in the sample is stored in an object called data
data <- Lucy[sam,]
attach(data)
names(data)
# The variables of interest are: Income, Employees and Taxes
# This information is stored in a data frame called estima
estima <- data.frame(Income, Employees, Taxes)
HT(estima, pik)
############
## Example 2
############
# Uses the Lucy data to draw a simple random sample with replacement
data(Lucy)
N <- dim(Lucy)[1]
m <- 400
sam <- sample(N,m,replace=TRUE)
# The vector of selection probabilities of units in the sample
pk <- rep(1/N,m)
# Computation of the inclusion probabilities
pik <- 1-(1-pk)^m
# The information about the units in the sample is stored in an object called data
data <- Lucy[sam,]
attach(data)
names(data)
# The variables of interest are: Income, Employees and Taxes
# This information is stored in a data frame called estima
estima <- data.frame(Income, Employees, Taxes)
HT(estima, pik)
############
## Example 3
############
# Without replacement sampling
# Vector U contains the label of a population of size N=5
U <- c("Yves", "Ken", "Erik", "Sharon", "Leslie")
# Vector y1 and y2 are the values of the variables of interest
y1<-c(32, 34, 46, 89, 35)
y2<-c(1,1,1,0,0)
y3<-cbind(y1,y2)
# The population size is N=5
N <- length(U)
# The sample size is n=2
n <- 2
# The sample membership matrix for fixed size without replacement sampling designs
Ind <- Ik(N,n)
# p is the probability of selection of every possible sample
p <- c(0.13, 0.2, 0.15, 0.1, 0.15, 0.04, 0.02, 0.06, 0.07, 0.08)
# Computation of the inclusion probabilities
inclusion <- Pik(p, Ind)
# Selection of a random sample
sam <- sample(5,2)
# The selected sample
U[sam]
# The inclusion probabilities for these two units
inclusion[sam]
# The values of the variables of interest for the units in the sample
y1[sam]
y2[sam]
y3[sam,]
# The Horvitz-Thompson estimator
HT(y1[sam],inclusion[sam])
HT(y2[sam],inclusion[sam])
HT(y3[sam,],inclusion[sam])
############
## Example 4
############
# Following Example 3... With replacement sampling
# The population size is N=5
N <- length(U)
# The sample size is m=2
m <- 2
# pk is the probability of selection of every single unit
pk <- c(0.9, 0.025, 0.025, 0.025, 0.025)
# Computation of the inclusion probabilities
pik <- 1-(1-pk)^m
# Selection of a random sample with replacement
sam <- sample(5,2, replace=TRUE, prob=pk)
# The selected sample
U[sam]
# The inclusion probabilities for these two units
inclusion[sam]
# The values of the variables of interest for the units in the sample
y1[sam]
y2[sam]
y3[sam,]
# The Horvitz-Thompson estimator
HT(y1[sam],inclusion[sam])
HT(y2[sam],inclusion[sam])
HT(y3[sam,],inclusion[sam])
####################################################################
## Example 5 HT is unbiased for without replacement sampling designs
## Fixed sample size
####################################################################
# Vector U contains the label of a population of size N=5
U <- c("Yves", "Ken", "Erik", "Sharon", "Leslie")
# Vector y1 and y2 are the values of the variables of interest
y<-c(32, 34, 46, 89, 35)
# The population size is N=5
N <- length(U)
# The sample size is n=2
n <- 2
# The sample membership matrix for fixed size without replacement sampling designs
Ind <- Ik(N,n)
Ind
# p is the probability of selection of every possible sample
p <- c(0.13, 0.2, 0.15, 0.1, 0.15, 0.04, 0.02, 0.06, 0.07, 0.08)
sum(p)
# Computation of the inclusion probabilities
inclusion <- Pik(p, Ind)
inclusion
sum(inclusion)
# The support with the values of the elements
Qy <-Support(N,n,ID=y)
Qy
# The HT estimates for every single sample in the support
HT1<- HT(y[Ind[1,]==1], inclusion[Ind[1,]==1])
HT2<- HT(y[Ind[2,]==1], inclusion[Ind[2,]==1])
HT3<- HT(y[Ind[3,]==1], inclusion[Ind[3,]==1])
HT4<- HT(y[Ind[4,]==1], inclusion[Ind[4,]==1])
HT5<- HT(y[Ind[5,]==1], inclusion[Ind[5,]==1])
HT6<- HT(y[Ind[6,]==1], inclusion[Ind[6,]==1])
HT7<- HT(y[Ind[7,]==1], inclusion[Ind[7,]==1])
HT8<- HT(y[Ind[8,]==1], inclusion[Ind[8,]==1])
HT9<- HT(y[Ind[9,]==1], inclusion[Ind[9,]==1])
HT10<- HT(y[Ind[10,]==1], inclusion[Ind[10,]==1])
# The HT estimates arranged in a vector
Est <- c(HT1, HT2, HT3, HT4, HT5, HT6, HT7, HT8, HT9, HT10)
Est
# The HT is actually desgn-unbiased
data.frame(Ind, Est, p)
sum(Est*p)
sum(y)
####################################################################
## Example 6 HT is unbiased for without replacement sampling designs
## Random sample size
####################################################################
# Vector U contains the label of a population of size N=5
U <- c("Yves", "Ken", "Erik", "Sharon", "Leslie")
# Vector y1 and y2 are the values of the variables of interest
y<-c(32, 34, 46, 89, 35)
# The population size is N=5
N <- length(U)
# The sample membership matrix for random size without replacement sampling designs
Ind <- IkRS(N)
Ind
# p is the probability of selection of every possible sample
p <- c(0.59049, 0.06561, 0.06561, 0.06561, 0.06561, 0.06561, 0.00729, 0.00729,
0.00729, 0.00729, 0.00729, 0.00729, 0.00729, 0.00729, 0.00729, 0.00729, 0.00081,
0.00081, 0.00081, 0.00081, 0.00081, 0.00081, 0.00081, 0.00081, 0.00081, 0.00081,
0.00009, 0.00009, 0.00009, 0.00009, 0.00009, 0.00001)
sum(p)
# Computation of the inclusion probabilities
inclusion <- Pik(p, Ind)
inclusion
sum(inclusion)
# The support with the values of the elements
Qy <-SupportRS(N, ID=y)
Qy
# The HT estimates for every single sample in the support
HT1<- HT(y[Ind[1,]==1], inclusion[Ind[1,]==1])
HT2<- HT(y[Ind[2,]==1], inclusion[Ind[2,]==1])
HT3<- HT(y[Ind[3,]==1], inclusion[Ind[3,]==1])
HT4<- HT(y[Ind[4,]==1], inclusion[Ind[4,]==1])
HT5<- HT(y[Ind[5,]==1], inclusion[Ind[5,]==1])
HT6<- HT(y[Ind[6,]==1], inclusion[Ind[6,]==1])
HT7<- HT(y[Ind[7,]==1], inclusion[Ind[7,]==1])
HT8<- HT(y[Ind[8,]==1], inclusion[Ind[8,]==1])
HT9<- HT(y[Ind[9,]==1], inclusion[Ind[9,]==1])
HT10<- HT(y[Ind[10,]==1], inclusion[Ind[10,]==1])
HT11<- HT(y[Ind[11,]==1], inclusion[Ind[11,]==1])
HT12<- HT(y[Ind[12,]==1], inclusion[Ind[12,]==1])
HT13<- HT(y[Ind[13,]==1], inclusion[Ind[13,]==1])
HT14<- HT(y[Ind[14,]==1], inclusion[Ind[14,]==1])
HT15<- HT(y[Ind[15,]==1], inclusion[Ind[15,]==1])
HT16<- HT(y[Ind[16,]==1], inclusion[Ind[16,]==1])
HT17<- HT(y[Ind[17,]==1], inclusion[Ind[17,]==1])
HT18<- HT(y[Ind[18,]==1], inclusion[Ind[18,]==1])
HT19<- HT(y[Ind[19,]==1], inclusion[Ind[19,]==1])
HT20<- HT(y[Ind[20,]==1], inclusion[Ind[20,]==1])
HT21<- HT(y[Ind[21,]==1], inclusion[Ind[21,]==1])
HT22<- HT(y[Ind[22,]==1], inclusion[Ind[22,]==1])
HT23<- HT(y[Ind[23,]==1], inclusion[Ind[23,]==1])
HT24<- HT(y[Ind[24,]==1], inclusion[Ind[24,]==1])
HT25<- HT(y[Ind[25,]==1], inclusion[Ind[25,]==1])
HT26<- HT(y[Ind[26,]==1], inclusion[Ind[26,]==1])
HT27<- HT(y[Ind[27,]==1], inclusion[Ind[27,]==1])
HT28<- HT(y[Ind[28,]==1], inclusion[Ind[28,]==1])
HT29<- HT(y[Ind[29,]==1], inclusion[Ind[29,]==1])
HT30<- HT(y[Ind[30,]==1], inclusion[Ind[30,]==1])
HT31<- HT(y[Ind[31,]==1], inclusion[Ind[31,]==1])
HT32<- HT(y[Ind[32,]==1], inclusion[Ind[32,]==1])
# The HT estimates arranged in a vector
Est <- c(HT1, HT2, HT3, HT4, HT5, HT6, HT7, HT8, HT9, HT10, HT11, HT12, HT13,
HT14, HT15, HT16, HT17, HT18, HT19, HT20, HT21, HT22, HT23, HT24, HT25, HT26,
HT27, HT28, HT29, HT30, HT31, HT32)
Est
# The HT is actually desgn-unbiased
data.frame(Ind, Est, p)
sum(Est*p)
sum(y)
################################################################
## Example 7 HT is unbiased for with replacement sampling designs
################################################################
# Vector U contains the label of a population of size N=5
U <- c("Yves", "Ken", "Erik", "Sharon", "Leslie")
# Vector y1 and y2 are the values of the variables of interest
y<-c(32, 34, 46, 89, 35)
# The population size is N=5
N <- length(U)
# The sample size is m=2
m <- 2
# pk is the probability of selection of every single unit
pk <- c(0.35, 0.225, 0.175, 0.125, 0.125)
# p is the probability of selection of every possible sample
p <- p.WR(N,m,pk)
p
sum(p)
# The sample membership matrix for random size without replacement sampling designs
Ind <- IkWR(N,m)
Ind
# The support with the values of the elements
Qy <- SupportWR(N,m, ID=y)
Qy
# Computation of the inclusion probabilities
pik <- 1-(1-pk)^m
pik
# The HT estimates for every single sample in the support
HT1 <- HT(y[Ind[1,]==1], pik[Ind[1,]==1])
HT2 <- HT(y[Ind[2,]==1], pik[Ind[2,]==1])
HT3 <- HT(y[Ind[3,]==1], pik[Ind[3,]==1])
HT4 <- HT(y[Ind[4,]==1], pik[Ind[4,]==1])
HT5 <- HT(y[Ind[5,]==1], pik[Ind[5,]==1])
HT6 <- HT(y[Ind[6,]==1], pik[Ind[6,]==1])
HT7 <- HT(y[Ind[7,]==1], pik[Ind[7,]==1])
HT8 <- HT(y[Ind[8,]==1], pik[Ind[8,]==1])
HT9 <- HT(y[Ind[9,]==1], pik[Ind[9,]==1])
HT10 <- HT(y[Ind[10,]==1], pik[Ind[10,]==1])
HT11 <- HT(y[Ind[11,]==1], pik[Ind[11,]==1])
HT12 <- HT(y[Ind[12,]==1], pik[Ind[12,]==1])
HT13 <- HT(y[Ind[13,]==1], pik[Ind[13,]==1])
HT14 <- HT(y[Ind[14,]==1], pik[Ind[14,]==1])
HT15 <- HT(y[Ind[15,]==1], pik[Ind[15,]==1])
# The HT estimates arranged in a vector
Est <- c(HT1, HT2, HT3, HT4, HT5, HT6, HT7, HT8, HT9, HT10, HT11, HT12, HT13,
HT14, HT15)
Est
# The HT is actually desgn-unbiased
data.frame(Ind, Est, p)
sum(Est*p)
sum(y)
|
### Author : Markos Viggiato
### Date : August 24th, 2018
### Code developed using resources from 'http://www.rdatamining.com/docs/twitter-analysis-with-r', by Yanchang Zhao
### This code makes of of the 'tm' package to analyze the frequent terms from tweets
#################
# import of useful packages
library(twitteR)
library(ggplot2)
library(tm)
# main function to obtain the most frequent terms through text analysis
frequentTopics <- function(){
# collect the 1000 most recent tweets from Donald Trump
tweets <- userTimeline("realDonaldTrump", n = 1000)
tweets.df <- twListToDF(tweets)
# build a corpus with the text content from the tweet
myCorpus <- Corpus(VectorSource(tweets.df$text))
# convert to lower case
myCorpus <- tm_map(myCorpus, function(x) iconv(enc2utf8(x), sub = "byte"))
myCorpus <- tm_map(myCorpus, content_transformer(tolower))
# remove URLs
removeURL <- function(x) gsub("http[^[:space:]]*", "", x)
myCorpus <- tm_map(myCorpus, content_transformer(removeURL))
# keep only Enlish letters and space
removeNumPunct <- function(x) gsub("[^[:alpha:][:space:]]*", "", x)
myCorpus <- tm_map(myCorpus, content_transformer(removeNumPunct))
# remove stopwords
myStopwords <- c(setdiff(stopwords('english'), c("r", "big")),
"use", "see", "used", "via", "amp", "1", "2", "3", "4", "5", "6", "7", "8", "9", "0")
myCorpus <- tm_map(myCorpus, removeWords, myStopwords)
# remove extra whitespace
myCorpus <- tm_map(myCorpus, stripWhitespace)
# keep a copy for stem completion later
myCorpusCopy <- myCorpus
myCorpus <- tm_map(myCorpus, stemDocument) # stem words
stemCompletion2 <- function(x, dictionary) {
x <- unlist(strsplit(as.character(x), " "))
x <- x[x != ""]
x <- stemCompletion(x, dictionary=dictionary)
x <- paste(x, sep="", collapse=" ")
PlainTextDocument(stripWhitespace(x))
}
myCorpus <- lapply(myCorpus, stemCompletion2, dictionary=myCorpusCopy)
myCorpus <- Corpus(VectorSource(myCorpus))
# helper function to coun the frequency of words
wordFreq <- function(corpus, word) {
results <- lapply(corpus,
function(x) { grep(as.character(x), pattern=paste0("\\<",word)) }
)
sum(unlist(results))
}
tdm <- TermDocumentMatrix(myCorpus, control = list(wordLengths = c(1, Inf)))
idx <- which(dimnames(tdm)$Terms %in% c("r", "data", "mining"))
as.matrix(tdm[idx, 21:30])
# inspect frequent words
(freq.terms <- findFreqTerms(tdm, lowfreq = 20))
term.freq <- rowSums(as.matrix(tdm))
term.freq <- subset(term.freq, term.freq >= 20)
df <- data.frame(term = names(term.freq), freq = term.freq)
# plot the most frequent words using ggplot
ggplot(df, aes(x=term, y=freq)) + geom_bar(stat="identity") +
xlab("Terms") + ylab("Count") + coord_flip() +
theme(axis.text=element_text(size=7))
}
|
/topic_analysis/frequentTopics.R
|
no_license
|
markosviggiato/realTimeInfo
|
R
| false
| false
| 3,156
|
r
|
### Author : Markos Viggiato
### Date : August 24th, 2018
### Code developed using resources from 'http://www.rdatamining.com/docs/twitter-analysis-with-r', by Yanchang Zhao
### This code makes of of the 'tm' package to analyze the frequent terms from tweets
#################
# import of useful packages
library(twitteR)
library(ggplot2)
library(tm)
# main function to obtain the most frequent terms through text analysis
frequentTopics <- function(){
# collect the 1000 most recent tweets from Donald Trump
tweets <- userTimeline("realDonaldTrump", n = 1000)
tweets.df <- twListToDF(tweets)
# build a corpus with the text content from the tweet
myCorpus <- Corpus(VectorSource(tweets.df$text))
# convert to lower case
myCorpus <- tm_map(myCorpus, function(x) iconv(enc2utf8(x), sub = "byte"))
myCorpus <- tm_map(myCorpus, content_transformer(tolower))
# remove URLs
removeURL <- function(x) gsub("http[^[:space:]]*", "", x)
myCorpus <- tm_map(myCorpus, content_transformer(removeURL))
# keep only Enlish letters and space
removeNumPunct <- function(x) gsub("[^[:alpha:][:space:]]*", "", x)
myCorpus <- tm_map(myCorpus, content_transformer(removeNumPunct))
# remove stopwords
myStopwords <- c(setdiff(stopwords('english'), c("r", "big")),
"use", "see", "used", "via", "amp", "1", "2", "3", "4", "5", "6", "7", "8", "9", "0")
myCorpus <- tm_map(myCorpus, removeWords, myStopwords)
# remove extra whitespace
myCorpus <- tm_map(myCorpus, stripWhitespace)
# keep a copy for stem completion later
myCorpusCopy <- myCorpus
myCorpus <- tm_map(myCorpus, stemDocument) # stem words
stemCompletion2 <- function(x, dictionary) {
x <- unlist(strsplit(as.character(x), " "))
x <- x[x != ""]
x <- stemCompletion(x, dictionary=dictionary)
x <- paste(x, sep="", collapse=" ")
PlainTextDocument(stripWhitespace(x))
}
myCorpus <- lapply(myCorpus, stemCompletion2, dictionary=myCorpusCopy)
myCorpus <- Corpus(VectorSource(myCorpus))
# helper function to coun the frequency of words
wordFreq <- function(corpus, word) {
results <- lapply(corpus,
function(x) { grep(as.character(x), pattern=paste0("\\<",word)) }
)
sum(unlist(results))
}
tdm <- TermDocumentMatrix(myCorpus, control = list(wordLengths = c(1, Inf)))
idx <- which(dimnames(tdm)$Terms %in% c("r", "data", "mining"))
as.matrix(tdm[idx, 21:30])
# inspect frequent words
(freq.terms <- findFreqTerms(tdm, lowfreq = 20))
term.freq <- rowSums(as.matrix(tdm))
term.freq <- subset(term.freq, term.freq >= 20)
df <- data.frame(term = names(term.freq), freq = term.freq)
# plot the most frequent words using ggplot
ggplot(df, aes(x=term, y=freq)) + geom_bar(stat="identity") +
xlab("Terms") + ylab("Count") + coord_flip() +
theme(axis.text=element_text(size=7))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mediation1.R
\name{mediation1}
\alias{mediation1}
\title{Simple Mediation}
\usage{
mediation1(
y,
x,
m,
cvs = NULL,
df,
with_out = T,
nboot = 1000,
conf_level = 0.95
)
}
\arguments{
\item{y}{The dependent variable column name from your dataframe.}
\item{x}{The independent variable column name from your dataframe. This column
will be treated as X in mediation or moderation models, please see
diagrams online for examples.}
\item{m}{The mediator for your model, as this model only includes one mediator.}
\item{cvs}{The covariates you would like to include in the model.
Use a \code{c()} concatenated vector to use multiple covariates.}
\item{df}{The dataframe where the columns from the formula can be found.
Note that only the columns used in the analysis will be data screened.}
\item{with_out}{A logical value where you want to keep the outliers in
model \code{TRUE} or exclude them from the model \code{FALSE}.}
\item{nboot}{A numeric value indicating the number of bootstraps you would like to complete.}
\item{conf_level}{A numeric value indicating the confidence interval width for the boostrapped confidence interval.}
}
\description{
This function runs a complete simple mediation analysis with one
mediator, similiar to model 4 in PROCESS by A. Hayes (2013).
As part of the output, you will find data screening,
all three models used in the traditional Baron and
Kenny (1986) steps, total/direct/indirect effects, the z-score and p-value
for the Aroian Sobel test, and the bootstrapped confidence interval
for the indirect effect.
}
\examples{
mediation1(y = "cyl", x = "mpg", m = "disp",
cvs = c("drat", "gear"), df = mtcars)
}
\keyword{bootstrapping}
\keyword{data}
\keyword{mediation,}
\keyword{moderation,}
\keyword{regression,}
\keyword{screening,}
|
/man/mediation1.Rd
|
permissive
|
doomlab/MeMoBootR
|
R
| false
| true
| 1,877
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mediation1.R
\name{mediation1}
\alias{mediation1}
\title{Simple Mediation}
\usage{
mediation1(
y,
x,
m,
cvs = NULL,
df,
with_out = T,
nboot = 1000,
conf_level = 0.95
)
}
\arguments{
\item{y}{The dependent variable column name from your dataframe.}
\item{x}{The independent variable column name from your dataframe. This column
will be treated as X in mediation or moderation models, please see
diagrams online for examples.}
\item{m}{The mediator for your model, as this model only includes one mediator.}
\item{cvs}{The covariates you would like to include in the model.
Use a \code{c()} concatenated vector to use multiple covariates.}
\item{df}{The dataframe where the columns from the formula can be found.
Note that only the columns used in the analysis will be data screened.}
\item{with_out}{A logical value where you want to keep the outliers in
model \code{TRUE} or exclude them from the model \code{FALSE}.}
\item{nboot}{A numeric value indicating the number of bootstraps you would like to complete.}
\item{conf_level}{A numeric value indicating the confidence interval width for the boostrapped confidence interval.}
}
\description{
This function runs a complete simple mediation analysis with one
mediator, similiar to model 4 in PROCESS by A. Hayes (2013).
As part of the output, you will find data screening,
all three models used in the traditional Baron and
Kenny (1986) steps, total/direct/indirect effects, the z-score and p-value
for the Aroian Sobel test, and the bootstrapped confidence interval
for the indirect effect.
}
\examples{
mediation1(y = "cyl", x = "mpg", m = "disp",
cvs = c("drat", "gear"), df = mtcars)
}
\keyword{bootstrapping}
\keyword{data}
\keyword{mediation,}
\keyword{moderation,}
\keyword{regression,}
\keyword{screening,}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TapeR_FIT_LME.f.R
\name{TapeR_FIT_LME.f}
\alias{TapeR_FIT_LME.f}
\title{Fits a taper curve model to the specified diameter-height data}
\usage{
TapeR_FIT_LME.f(Id, x, y, knt_x, ord_x, knt_z, ord_z, IdKOVb = "pdSymm", ...)
}
\arguments{
\item{Id}{Vector of tree identifiers of same length as diameter and height
measurements.}
\item{x}{Numeric vector of height measurements (explanatory variables) along
the stem relative to the tree height.}
\item{y}{Numeric vector of diameter measurements (response) along the stem
(in centimeters).}
\item{knt_x}{Numeric vector of relative knot positions for fixed effects.}
\item{ord_x}{Numeric scalar. Order of fixed effects Spline (4=cubic).}
\item{knt_z}{Numeric vector of relative knot positions for random effects.}
\item{ord_z}{Numeric scalar. Order of random effects Spline (4=cubic).}
\item{IdKOVb}{Character string. Type of covariance matrix used by
\code{lme}. Only "pdSymm" makes sense. Rather reduce number of knots if
function does not converge.}
\item{...}{not currently used}
}
\value{
List of model properties
\itemize{
\item{fit.lme}{Summary of the fitted lme model.}
\item{par.lme}{List of model parameters (e.g., coefficients and
variance-covariance matrices) needed for volume estimation and other
functions in this package.}
Components of the \code{par.lme} list
\itemize{
\item{knt_x}{Relative positions of the fixed effects Spline knots along the stem.}
\item{pad_knt_x}{Padded version of knt_x, as used to define B-Spline design matrix.}
\item{ord_x}{Order of the spline.}
\item{knt_z}{Relative positions of the random effects Spline knots along
the stem.}
\item{pad_knt_z}{Padded version of knt_z, as used to define B-Spline design matrix.}
\item{ord_z}{Order of the spline.}
\item{b_fix}{Fixed-effects spline coefficients.}
\item{KOVb_fix}{Covariance of fixed-effects.}
\item{sig2_eps}{Residual variance.}
\item{dfRes}{Residual degrees of freedom.}
\item{KOVb_rnd}{Covariance of random effects.}
\item{theta}{Variance parameters in natural parametrization. See Details. }
\item{KOV_theta}{Approximate asymptotic covariance matrix of variance parameters.}
}
}
}
\description{
Fits a taper curve model with random effects on tree-level based
on B-Splines to the specified diameter-height data. Number and position of
nodes and order of B-Splines can be specified.
}
\details{
If too few trees are given, the linear mixed model (lme) will not
converge. See examples for a suggestion of node positions.
The variance parameters \code{theta} are stored in the natural parametrization
(Pinheiro and Bates (2004), p. 93). This means log for variances and logit for
covariances. \code{theta} is the vectorized triangle of the random effects
covariance matrix + the residual variance (lSigma). Given there are 2 inner
knots for random effects, the structure will be
c(sig^2_b1, sig_b1 sig_b2, sig_b1 sig_b3, sig_b1 sig_b4, sig^2_b2,...,sig^2_b4, lSigma)
}
\examples{
# load example data
data(DxHx.df)
# prepare the data (could be defined in the function directly)
Id = DxHx.df[,"Id"]
x = DxHx.df[,"Hx"]/DxHx.df[,"Ht"]#calculate relative heights
y = DxHx.df[,"Dx"]
# define the relative knot positions and order of splines
knt_x = c(0.0, 0.1, 0.75, 1.0); ord_x = 4 # B-Spline knots: fix effects; order (cubic = 4)
knt_z = c(0.0, 0.1 ,1.0); ord_z = 4 # B-Spline knots: rnd effects
# fit the model
taper.model <- TapeR_FIT_LME.f(Id, x, y, knt_x, ord_x, knt_z, ord_z,
IdKOVb = "pdSymm")
## save model parameters for documentation or dissimination
## parameters can be load()-ed and used to predict the taper
## or volume using one or several measured dbh
#spruce.taper.pars <- taper.model$par.lme
#save(spruce.taper.pars, file="spruce.taper.pars.rdata")
}
\references{
Kublin, E., Breidenbach, J., Kaendler, G. (2013) A flexible stem
taper and volume prediction method based on mixed-effects B-spline
regression, Eur J For Res, 132:983-997.
}
\seealso{
\code{\link{E_DHx_HmDm_HT.f}}, \code{\link{E_DHx_HmDm_HT_CIdHt.f}},
\code{\link{E_HDx_HmDm_HT.f}}, \code{\link{E_VOL_AB_HmDm_HT.f}}
}
\author{
Edgar Kublin
}
|
/man/TapeR_FIT_LME.f.Rd
|
no_license
|
jonibio/TapeR
|
R
| false
| true
| 4,267
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TapeR_FIT_LME.f.R
\name{TapeR_FIT_LME.f}
\alias{TapeR_FIT_LME.f}
\title{Fits a taper curve model to the specified diameter-height data}
\usage{
TapeR_FIT_LME.f(Id, x, y, knt_x, ord_x, knt_z, ord_z, IdKOVb = "pdSymm", ...)
}
\arguments{
\item{Id}{Vector of tree identifiers of same length as diameter and height
measurements.}
\item{x}{Numeric vector of height measurements (explanatory variables) along
the stem relative to the tree height.}
\item{y}{Numeric vector of diameter measurements (response) along the stem
(in centimeters).}
\item{knt_x}{Numeric vector of relative knot positions for fixed effects.}
\item{ord_x}{Numeric scalar. Order of fixed effects Spline (4=cubic).}
\item{knt_z}{Numeric vector of relative knot positions for random effects.}
\item{ord_z}{Numeric scalar. Order of random effects Spline (4=cubic).}
\item{IdKOVb}{Character string. Type of covariance matrix used by
\code{lme}. Only "pdSymm" makes sense. Rather reduce number of knots if
function does not converge.}
\item{...}{not currently used}
}
\value{
List of model properties
\itemize{
\item{fit.lme}{Summary of the fitted lme model.}
\item{par.lme}{List of model parameters (e.g., coefficients and
variance-covariance matrices) needed for volume estimation and other
functions in this package.}
Components of the \code{par.lme} list
\itemize{
\item{knt_x}{Relative positions of the fixed effects Spline knots along the stem.}
\item{pad_knt_x}{Padded version of knt_x, as used to define B-Spline design matrix.}
\item{ord_x}{Order of the spline.}
\item{knt_z}{Relative positions of the random effects Spline knots along
the stem.}
\item{pad_knt_z}{Padded version of knt_z, as used to define B-Spline design matrix.}
\item{ord_z}{Order of the spline.}
\item{b_fix}{Fixed-effects spline coefficients.}
\item{KOVb_fix}{Covariance of fixed-effects.}
\item{sig2_eps}{Residual variance.}
\item{dfRes}{Residual degrees of freedom.}
\item{KOVb_rnd}{Covariance of random effects.}
\item{theta}{Variance parameters in natural parametrization. See Details. }
\item{KOV_theta}{Approximate asymptotic covariance matrix of variance parameters.}
}
}
}
\description{
Fits a taper curve model with random effects on tree-level based
on B-Splines to the specified diameter-height data. Number and position of
nodes and order of B-Splines can be specified.
}
\details{
If too few trees are given, the linear mixed model (lme) will not
converge. See examples for a suggestion of node positions.
The variance parameters \code{theta} are stored in the natural parametrization
(Pinheiro and Bates (2004), p. 93). This means log for variances and logit for
covariances. \code{theta} is the vectorized triangle of the random effects
covariance matrix + the residual variance (lSigma). Given there are 2 inner
knots for random effects, the structure will be
c(sig^2_b1, sig_b1 sig_b2, sig_b1 sig_b3, sig_b1 sig_b4, sig^2_b2,...,sig^2_b4, lSigma)
}
\examples{
# load example data
data(DxHx.df)
# prepare the data (could be defined in the function directly)
Id = DxHx.df[,"Id"]
x = DxHx.df[,"Hx"]/DxHx.df[,"Ht"]#calculate relative heights
y = DxHx.df[,"Dx"]
# define the relative knot positions and order of splines
knt_x = c(0.0, 0.1, 0.75, 1.0); ord_x = 4 # B-Spline knots: fix effects; order (cubic = 4)
knt_z = c(0.0, 0.1 ,1.0); ord_z = 4 # B-Spline knots: rnd effects
# fit the model
taper.model <- TapeR_FIT_LME.f(Id, x, y, knt_x, ord_x, knt_z, ord_z,
IdKOVb = "pdSymm")
## save model parameters for documentation or dissimination
## parameters can be load()-ed and used to predict the taper
## or volume using one or several measured dbh
#spruce.taper.pars <- taper.model$par.lme
#save(spruce.taper.pars, file="spruce.taper.pars.rdata")
}
\references{
Kublin, E., Breidenbach, J., Kaendler, G. (2013) A flexible stem
taper and volume prediction method based on mixed-effects B-spline
regression, Eur J For Res, 132:983-997.
}
\seealso{
\code{\link{E_DHx_HmDm_HT.f}}, \code{\link{E_DHx_HmDm_HT_CIdHt.f}},
\code{\link{E_HDx_HmDm_HT.f}}, \code{\link{E_VOL_AB_HmDm_HT.f}}
}
\author{
Edgar Kublin
}
|
`print.model.selection` <-
function(x, abbrev.names = TRUE, warnings = getOption("warn") != -1L, ...) {
origx <- x
class(x) <- "data.frame"
xterms <- attr(origx, "terms") # TERMS
if(is.null(xterms) || !all(xterms %in% colnames(x)[seq_along(xterms)])) {
print.data.frame(x, ...)
} else {
if(abbrev.names) xterms <- abbreviateTerms(xterms, 6L, 3L, deflate = TRUE)
colnames(x)[seq_along(xterms)] <- xterms
globcl <- attr(origx, "global.call")
if(!is.null(globcl)) {
cat("Global model call: ")
print(globcl)
cat("---\n")
random.terms <- attr(getAllTerms(attr(origx, "global")), "random.terms")
if(!is.null(random.terms)) random.terms <- list(random.terms)
} else random.terms <- attr(origx, "random.terms")
dig <- c(terms = NA, varying = NA, extra = NA, df = 0L, loglik = 3L, ic = 1L, delta = 2L,
weight = 3L)
column.types <- attr(origx, "column.types")
#stopifnot(names(dig) == levels(column.types)) ## DEBUG
decprint <- dig[column.types[colnames(x)]]
i <- vapply(x, is.numeric, FALSE) & is.na(decprint)
x[, i] <- signif(x[, i], 4L)
k <- which(!is.na(decprint))
for(i in k) x[, i] <- round(x[, i], digits = decprint[i])
vLegend <- NULL
if(abbrev.names) {
vCols <- type2colname(column.types, "varying")
vCols <- vCols[(vCols %in% colnames(x)) & !(vCols %in% c("class"))]
if(!is.null(vCols) && length(vCols) != 0L) {
vlen <- nchar(vCols)
vLegend <- vector(length(vCols), mode = "list")
names(vLegend) <- vCols
x[, vCols] <- droplevels(x[, vCols, drop = FALSE])
for(i in vCols) {
if(!is.factor(x[, i])) next
lev <- levels(x[, i])
lev <- lev[!(lev %in% c("", "NULL"))]
shlev <- abbreviateTerms(lev, nchar(i), deflate = TRUE)
x[, i] <- factor(x[, i], levels = lev, labels = shlev)
if(any(j <- shlev != lev)) vLegend[[i]] <-
paste(shlev[j], "=", sQuote(lev[j]))
}
vLegend <- vLegend[!vapply(vLegend, is.null, TRUE)]
}
}
uqran <- unique(unlist(random.terms, use.names = FALSE))
abbran <- abbreviateTerms(gsub("1 | ", "", uqran, fixed = TRUE), 1L,
deflate = TRUE)
colran <- vapply(random.terms, function(s) paste(abbran[match(s, uqran)],
collapse = "+"), "")
if(addrandcol <- length(unique(colran)) > 1L) {
k <- which(colnames(x) == "df")[1L]
x <- cbind(x[, 1L:(k - 1L), drop = FALSE], random = colran,
x[, k:ncol(x), drop = FALSE], deparse.level = 0L)
}
cat("Model selection table \n")
if(nrow(x) == 0L) {
print.default(colnames(x), quote = FALSE)
cat("<0 rows>", "\n")
} else
print.default(as.matrix(x)[, !vapply(x, function(y) all(is.na(y)), FALSE),
drop = FALSE], na.print = "", quote = FALSE, right = TRUE)
indent <- " "
if(abbrev.names && length(vLegend) != 0L) {
cat("Abbreviations:", sep = "\n")
lab <- format(paste0(indent, names(vLegend), ":"))
for(i in seq_along(vLegend)) {
cat(vLegend[[i]], sep = ", ", fill = TRUE, labels =
c(lab[i], rep(paste0(rep(" ", nchar(lab[i])),
collapse = ""), length(vLegend[[i]]) - 1L)))
}
}
cat("Models ranked by", asChar(attr(attr(origx, 'rank'), "call")), "\n")
if(!is.null(random.terms)) {
if(addrandcol) {
cat("Random terms: \n")
cat(paste0(indent, format(abbran), ": ", uqran), sep = "\n")
} else {
cat("Random terms (all models): \n")
cat(paste(uqran), sep = ", ", fill = TRUE, labels = indent)
cat("\n")
}
}
if (warnings && !is.null(attr(origx, "warnings"))) {
cat("\n")
print.warnings(attr(origx, "warnings"))
}
}
invisible(origx)
}
|
/R/print.model.selection.R
|
no_license
|
cran/MuMIn
|
R
| false
| false
| 3,626
|
r
|
`print.model.selection` <-
function(x, abbrev.names = TRUE, warnings = getOption("warn") != -1L, ...) {
origx <- x
class(x) <- "data.frame"
xterms <- attr(origx, "terms") # TERMS
if(is.null(xterms) || !all(xterms %in% colnames(x)[seq_along(xterms)])) {
print.data.frame(x, ...)
} else {
if(abbrev.names) xterms <- abbreviateTerms(xterms, 6L, 3L, deflate = TRUE)
colnames(x)[seq_along(xterms)] <- xterms
globcl <- attr(origx, "global.call")
if(!is.null(globcl)) {
cat("Global model call: ")
print(globcl)
cat("---\n")
random.terms <- attr(getAllTerms(attr(origx, "global")), "random.terms")
if(!is.null(random.terms)) random.terms <- list(random.terms)
} else random.terms <- attr(origx, "random.terms")
dig <- c(terms = NA, varying = NA, extra = NA, df = 0L, loglik = 3L, ic = 1L, delta = 2L,
weight = 3L)
column.types <- attr(origx, "column.types")
#stopifnot(names(dig) == levels(column.types)) ## DEBUG
decprint <- dig[column.types[colnames(x)]]
i <- vapply(x, is.numeric, FALSE) & is.na(decprint)
x[, i] <- signif(x[, i], 4L)
k <- which(!is.na(decprint))
for(i in k) x[, i] <- round(x[, i], digits = decprint[i])
vLegend <- NULL
if(abbrev.names) {
vCols <- type2colname(column.types, "varying")
vCols <- vCols[(vCols %in% colnames(x)) & !(vCols %in% c("class"))]
if(!is.null(vCols) && length(vCols) != 0L) {
vlen <- nchar(vCols)
vLegend <- vector(length(vCols), mode = "list")
names(vLegend) <- vCols
x[, vCols] <- droplevels(x[, vCols, drop = FALSE])
for(i in vCols) {
if(!is.factor(x[, i])) next
lev <- levels(x[, i])
lev <- lev[!(lev %in% c("", "NULL"))]
shlev <- abbreviateTerms(lev, nchar(i), deflate = TRUE)
x[, i] <- factor(x[, i], levels = lev, labels = shlev)
if(any(j <- shlev != lev)) vLegend[[i]] <-
paste(shlev[j], "=", sQuote(lev[j]))
}
vLegend <- vLegend[!vapply(vLegend, is.null, TRUE)]
}
}
uqran <- unique(unlist(random.terms, use.names = FALSE))
abbran <- abbreviateTerms(gsub("1 | ", "", uqran, fixed = TRUE), 1L,
deflate = TRUE)
colran <- vapply(random.terms, function(s) paste(abbran[match(s, uqran)],
collapse = "+"), "")
if(addrandcol <- length(unique(colran)) > 1L) {
k <- which(colnames(x) == "df")[1L]
x <- cbind(x[, 1L:(k - 1L), drop = FALSE], random = colran,
x[, k:ncol(x), drop = FALSE], deparse.level = 0L)
}
cat("Model selection table \n")
if(nrow(x) == 0L) {
print.default(colnames(x), quote = FALSE)
cat("<0 rows>", "\n")
} else
print.default(as.matrix(x)[, !vapply(x, function(y) all(is.na(y)), FALSE),
drop = FALSE], na.print = "", quote = FALSE, right = TRUE)
indent <- " "
if(abbrev.names && length(vLegend) != 0L) {
cat("Abbreviations:", sep = "\n")
lab <- format(paste0(indent, names(vLegend), ":"))
for(i in seq_along(vLegend)) {
cat(vLegend[[i]], sep = ", ", fill = TRUE, labels =
c(lab[i], rep(paste0(rep(" ", nchar(lab[i])),
collapse = ""), length(vLegend[[i]]) - 1L)))
}
}
cat("Models ranked by", asChar(attr(attr(origx, 'rank'), "call")), "\n")
if(!is.null(random.terms)) {
if(addrandcol) {
cat("Random terms: \n")
cat(paste0(indent, format(abbran), ": ", uqran), sep = "\n")
} else {
cat("Random terms (all models): \n")
cat(paste(uqran), sep = ", ", fill = TRUE, labels = indent)
cat("\n")
}
}
if (warnings && !is.null(attr(origx, "warnings"))) {
cat("\n")
print.warnings(attr(origx, "warnings"))
}
}
invisible(origx)
}
|
# Divide adjacency matrix into chunks, scatter to cluster, gather results, and return mean of mutual links
# From R pgmming, ch 16, loc 9829
source("mutual_outlink.R")
mutlinks <- function(cluster, adjacency_matrix)
{
n <- nrow(adjacency_matrix) - 1
nc <- length(cluster)
options(warn=-1) # don't bother us if split isn't even
ichunks <- c(1,2,3) # this makes the test pass, but who knows why? subscript out of bounds for c(1,4)
#ichunks <- split(1:3, 1:nc) # experiment to suppress sub out of bounds
#ichunks <- split(1:n, 1:nc) # subscript out of bounds, presumably for 4
#print(ichunks)
#print(adjacency_matrix)
options(warn=0)
counts <- clusterApply(cluster,ichunks,mtl,adjacency_matrix)
do.call(sum,counts) / (n*(n-1) / 2) # total the counts, divide by number of unique pairs in the matrix, and return
}
|
/parallel_mutual_outlink.R
|
no_license
|
sjdayday/net
|
R
| false
| false
| 839
|
r
|
# Divide adjacency matrix into chunks, scatter to cluster, gather results, and return mean of mutual links
# From R pgmming, ch 16, loc 9829
source("mutual_outlink.R")
mutlinks <- function(cluster, adjacency_matrix)
{
n <- nrow(adjacency_matrix) - 1
nc <- length(cluster)
options(warn=-1) # don't bother us if split isn't even
ichunks <- c(1,2,3) # this makes the test pass, but who knows why? subscript out of bounds for c(1,4)
#ichunks <- split(1:3, 1:nc) # experiment to suppress sub out of bounds
#ichunks <- split(1:n, 1:nc) # subscript out of bounds, presumably for 4
#print(ichunks)
#print(adjacency_matrix)
options(warn=0)
counts <- clusterApply(cluster,ichunks,mtl,adjacency_matrix)
do.call(sum,counts) / (n*(n-1) / 2) # total the counts, divide by number of unique pairs in the matrix, and return
}
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny);
library(ggplot2);
library(dplyr);
library(tidyr);
library(tidytext);
library(magick);
library(rdrop2);
library(shinyjs);
# Define UI for application that draws a histogram
ui <- fluidPage( useShinyjs(),tags$head(
tagList(
suppressDependencies("bootstrap"),
tags$link(
rel="stylesheet",
href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-beta.3/css/bootstrap.min.css",
integrity="sha384-Zug+QiDoJOrZ5t4lssLdxGhVrurbmBWopoEl+M6BdEfwnCJZtKxi1KgxUyJq13dy",
crossorigin="anonymous"
),
tags$link(
rel="stylesheet",
href="blog.css"
),
tags$script(
src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js",
integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q",
crossorigin="anonymous"
),
tags$script(
src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-beta.3/js/bootstrap.min.js",
integrity="sha384-a5N7Y/aK3qNeh15eJKGWxsqtnX/wWdSZSKp+81YjTmS15nvnvxKHuzaWwXHDli+4",
crossorigin="anonymous"
),
tags$meta(
name="viewport",
content="width=device-width, initial-scale=1.0"
)
)),htmlOutput("inc")
,htmlOutput("others")
#includeHTML(path = 'index.html')
)
# Define server logic required to draw a histogram
server <- function(input, output) {
drop_auth(rdstoken = 'auth/token.rds');
data <- drop_read_csv("stories/td.csv",stringsAsFactors = FALSE);
getPage<-function(dataset = data) {
hd <- data$title;
refurls <- data$url;
content <- data$description;
section <- data$section;
pub_date <- format(as.Date(data$date,format = "%m/%d/%Y"),'%b %d');
size <- length(hd);
cardId <- vector("list",size)
header <- includeHTML(path = 'html/header.html');
menus <- tags$nav(class = 'nav d-flex justify-content-between',list(actionLink('world','World',class='p-2 text-muted'),actionLink('india','India',class='p-2 text-muted'),actionLink('technology','Technology',class='p-2 text-muted'),actionLink('top-news','Top News',class='p-2 text-muted'),actionLink('business','Business',class='p-2 text-muted'),actionLink('politics','Politics',class='p-2 text-muted'),actionLink('science','Science',class='p-2 text-muted'),actionLink('health','Health',class='p-2 text-muted'),actionLink('life-style','Life Style',class='p-2 text-muted'),actionLink('travel','Travel',class='p-2 text-muted')))
jumbotron <- includeHTML(path = 'html/jumbotron.html');
cards <- lapply(c(1:size), function(X){
details = list( tags$div(class = 'container d-flex justify-content-between p-0' ,tags$a(href = section[X], tags$strong(class = 'd-inline-block mb-2 text-success',section[X])),tags$div(class = 'mb-1 text-muted',pub_date[X]) ) , tags$h4(class='mb-1',hd[X]),tags$p(class = 'card-text mb-auto', style = 'font-size: 14px;',content[X]),tags$a(href=refurls[X],'Continue reading'));
cardId[[X]] <- tags$div(class = 'col-md-6',tags$div(class = 'card flex-md-row mb-4 shadow-sm h-md-250',tags$div(class = 'card-body d-flex flex-column align-items-start p-3',details)))
return(cardId[[X]]);
})
footer <- includeHTML(path = 'html/footer.html');
cardholder <- tags$div(cards,class = 'row mb-2');
container <- tags$div(header,menus,jumbotron,cardholder,footer,class = 'container')
return(container);
}
observeEvent(input$world, {
data <- data[data$section == 'world',];
hide("inc");
output$others<- output$inc<-renderUI({getPage(dataset = data)})
})
observeEvent(input$india, {
data <- data[data$section == 'india',];
hide("inc");
output$others<- output$inc<-renderUI({getPage(dataset = data)})
})
output$inc<-renderUI({getPage(data)})
}
# Run the application
shinyApp(ui = ui, server = server)
|
/app.R
|
no_license
|
vm-natarajan/r-headlines
|
R
| false
| false
| 4,033
|
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny);
library(ggplot2);
library(dplyr);
library(tidyr);
library(tidytext);
library(magick);
library(rdrop2);
library(shinyjs);
# Define UI for application that draws a histogram
ui <- fluidPage( useShinyjs(),tags$head(
tagList(
suppressDependencies("bootstrap"),
tags$link(
rel="stylesheet",
href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-beta.3/css/bootstrap.min.css",
integrity="sha384-Zug+QiDoJOrZ5t4lssLdxGhVrurbmBWopoEl+M6BdEfwnCJZtKxi1KgxUyJq13dy",
crossorigin="anonymous"
),
tags$link(
rel="stylesheet",
href="blog.css"
),
tags$script(
src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js",
integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q",
crossorigin="anonymous"
),
tags$script(
src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-beta.3/js/bootstrap.min.js",
integrity="sha384-a5N7Y/aK3qNeh15eJKGWxsqtnX/wWdSZSKp+81YjTmS15nvnvxKHuzaWwXHDli+4",
crossorigin="anonymous"
),
tags$meta(
name="viewport",
content="width=device-width, initial-scale=1.0"
)
)),htmlOutput("inc")
,htmlOutput("others")
#includeHTML(path = 'index.html')
)
# Define server logic required to draw a histogram
server <- function(input, output) {
drop_auth(rdstoken = 'auth/token.rds');
data <- drop_read_csv("stories/td.csv",stringsAsFactors = FALSE);
getPage<-function(dataset = data) {
hd <- data$title;
refurls <- data$url;
content <- data$description;
section <- data$section;
pub_date <- format(as.Date(data$date,format = "%m/%d/%Y"),'%b %d');
size <- length(hd);
cardId <- vector("list",size)
header <- includeHTML(path = 'html/header.html');
menus <- tags$nav(class = 'nav d-flex justify-content-between',list(actionLink('world','World',class='p-2 text-muted'),actionLink('india','India',class='p-2 text-muted'),actionLink('technology','Technology',class='p-2 text-muted'),actionLink('top-news','Top News',class='p-2 text-muted'),actionLink('business','Business',class='p-2 text-muted'),actionLink('politics','Politics',class='p-2 text-muted'),actionLink('science','Science',class='p-2 text-muted'),actionLink('health','Health',class='p-2 text-muted'),actionLink('life-style','Life Style',class='p-2 text-muted'),actionLink('travel','Travel',class='p-2 text-muted')))
jumbotron <- includeHTML(path = 'html/jumbotron.html');
cards <- lapply(c(1:size), function(X){
details = list( tags$div(class = 'container d-flex justify-content-between p-0' ,tags$a(href = section[X], tags$strong(class = 'd-inline-block mb-2 text-success',section[X])),tags$div(class = 'mb-1 text-muted',pub_date[X]) ) , tags$h4(class='mb-1',hd[X]),tags$p(class = 'card-text mb-auto', style = 'font-size: 14px;',content[X]),tags$a(href=refurls[X],'Continue reading'));
cardId[[X]] <- tags$div(class = 'col-md-6',tags$div(class = 'card flex-md-row mb-4 shadow-sm h-md-250',tags$div(class = 'card-body d-flex flex-column align-items-start p-3',details)))
return(cardId[[X]]);
})
footer <- includeHTML(path = 'html/footer.html');
cardholder <- tags$div(cards,class = 'row mb-2');
container <- tags$div(header,menus,jumbotron,cardholder,footer,class = 'container')
return(container);
}
observeEvent(input$world, {
data <- data[data$section == 'world',];
hide("inc");
output$others<- output$inc<-renderUI({getPage(dataset = data)})
})
observeEvent(input$india, {
data <- data[data$section == 'india',];
hide("inc");
output$others<- output$inc<-renderUI({getPage(dataset = data)})
})
output$inc<-renderUI({getPage(data)})
}
# Run the application
shinyApp(ui = ui, server = server)
|
######## R code to import parsed, tidy csv rates after Python parser#########
# load required packages
## build FXRates table from Python parser output
FXRates_ALL <- read.csv('FXRATES_clean.csv',
header=F,
sep=",",
strip.white=T,
colClasses=c('character', 'character', 'myValue'),
col.names=c('CURRENCY',
'DATE',
'RATE'),
stringsAsFactors=F,
fill=T)
# check dimensions
dim(FXRates_ALL)
## coerce date into 'date' class from 'chr'
FXRates_ALL$DATE <- as.Date(as.character(FXRates_ALL$DATE), format='%Y-%m-%d')
## the next section is optional - it adds a corresponding dummy rate for USD - making calculations easier
## add dummy USD rates
## define date ranges - these should correspond to your Quandl API call from step 1
start <- as.Date('2012-01-01')
now <- as.Date('2015-03-06')
## length of date ranges
rows.date <- as.integer(now - start)
## create datafram using daterange sequenece
alldates <- data.frame(seq((start), (now),by = 1))
## colnames
colnames(alldates) <- 'DATE'
## add additional columns
alldates$RATE <- 1
alldates$CURRENCY <- 'USD'
## merge USD dummy data frame with main FXRates data frame ('merge' from base package)
FXRates_ALL <- merge(FXRates_ALL,
alldates,
all = T)
# recheck dimensions
dim(FXRates_ALL)
|
/3.Quandl_FXRate_Load_into_Dataframe.R
|
no_license
|
rjshanahan/Quandl_FXRates_Parser
|
R
| false
| false
| 1,525
|
r
|
######## R code to import parsed, tidy csv rates after Python parser#########
# load required packages
## build FXRates table from Python parser output
FXRates_ALL <- read.csv('FXRATES_clean.csv',
header=F,
sep=",",
strip.white=T,
colClasses=c('character', 'character', 'myValue'),
col.names=c('CURRENCY',
'DATE',
'RATE'),
stringsAsFactors=F,
fill=T)
# check dimensions
dim(FXRates_ALL)
## coerce date into 'date' class from 'chr'
FXRates_ALL$DATE <- as.Date(as.character(FXRates_ALL$DATE), format='%Y-%m-%d')
## the next section is optional - it adds a corresponding dummy rate for USD - making calculations easier
## add dummy USD rates
## define date ranges - these should correspond to your Quandl API call from step 1
start <- as.Date('2012-01-01')
now <- as.Date('2015-03-06')
## length of date ranges
rows.date <- as.integer(now - start)
## create datafram using daterange sequenece
alldates <- data.frame(seq((start), (now),by = 1))
## colnames
colnames(alldates) <- 'DATE'
## add additional columns
alldates$RATE <- 1
alldates$CURRENCY <- 'USD'
## merge USD dummy data frame with main FXRates data frame ('merge' from base package)
FXRates_ALL <- merge(FXRates_ALL,
alldates,
all = T)
# recheck dimensions
dim(FXRates_ALL)
|
## Alex Shchepetkin
## Calculates and caches the inverse of the matrix
##
## Caches and retrieves the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<-y
m <<- NULL
}
get <- function(){
x
}
setinverse <- function(inverse){
inv <<- inverse
}
getinverse <- function(){
inv
}
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## Calculates the inverse of the matrix
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)){
message("Getting cached data")
return(inv)
}
x$set(x$get())
inv <- solve(x$get())
x$setinverse(inv)
return(inv)
}
|
/cachematrix.R
|
no_license
|
alexshchep/ProgrammingAssignment2
|
R
| false
| false
| 702
|
r
|
## Alex Shchepetkin
## Calculates and caches the inverse of the matrix
##
## Caches and retrieves the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<-y
m <<- NULL
}
get <- function(){
x
}
setinverse <- function(inverse){
inv <<- inverse
}
getinverse <- function(){
inv
}
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## Calculates the inverse of the matrix
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if(!is.null(inv)){
message("Getting cached data")
return(inv)
}
x$set(x$get())
inv <- solve(x$get())
x$setinverse(inv)
return(inv)
}
|
library(mlr)
library(dplyr)
set.seed(1792)
mieszkania <- na.omit(read.csv(file = "./data/mieszkania_dane.csv", encoding = "UTF-8"))
# zadania - kazde zadanie to inny zbior danych
predict_price <- makeRegrTask(id = "cenaMieszkan", data = mieszkania, target = "cena_m2")
predict_price2 <- makeRegrTask(id = "cenaMieszkanBezDzielnicy",
data = select(mieszkania, -dzielnica),
target = "cena_m2")
predict_price3 <- makeRegrTask(id = "cenaMieszkanBezMaksPieter",
data = select(mieszkania, -pietro_maks),
target = "cena_m2")
all_learners <- listLearners()
learnerRF <- makeLearner("regr.randomForest")
learnerNN <- makeLearner("regr.nnet")
learnerSVM <- makeLearner("regr.ksvm")
filter(all_learners, class == "regr.svm")
bench_ho <- holdout(learnerRF, predict_price)
bench_cv <- crossval(learnerRF, predict_price)
getRRPredictionList(bench_cv)
bench_regr <- benchmark(learners = list(learnerRF,
learnerNN,
learnerSVM),
tasks = list(predict_price,
predict_price2,
predict_price3))
# patrz również batchmark - obliczenia równoległe
as.data.frame(bench_regr)
getBMRAggrPerformances(bench_regr)
getBMRPerformances(bench_regr)
plotBMRBoxplots(bench_regr)
save(bench_regr, bench_cv, bench_ho, file = "./data/regression_cache.RData")
|
/analysis.R
|
no_license
|
JarekMendyk/MLR-and-STWOR
|
R
| false
| false
| 1,543
|
r
|
library(mlr)
library(dplyr)
set.seed(1792)
mieszkania <- na.omit(read.csv(file = "./data/mieszkania_dane.csv", encoding = "UTF-8"))
# zadania - kazde zadanie to inny zbior danych
predict_price <- makeRegrTask(id = "cenaMieszkan", data = mieszkania, target = "cena_m2")
predict_price2 <- makeRegrTask(id = "cenaMieszkanBezDzielnicy",
data = select(mieszkania, -dzielnica),
target = "cena_m2")
predict_price3 <- makeRegrTask(id = "cenaMieszkanBezMaksPieter",
data = select(mieszkania, -pietro_maks),
target = "cena_m2")
all_learners <- listLearners()
learnerRF <- makeLearner("regr.randomForest")
learnerNN <- makeLearner("regr.nnet")
learnerSVM <- makeLearner("regr.ksvm")
filter(all_learners, class == "regr.svm")
bench_ho <- holdout(learnerRF, predict_price)
bench_cv <- crossval(learnerRF, predict_price)
getRRPredictionList(bench_cv)
bench_regr <- benchmark(learners = list(learnerRF,
learnerNN,
learnerSVM),
tasks = list(predict_price,
predict_price2,
predict_price3))
# patrz również batchmark - obliczenia równoległe
as.data.frame(bench_regr)
getBMRAggrPerformances(bench_regr)
getBMRPerformances(bench_regr)
plotBMRBoxplots(bench_regr)
save(bench_regr, bench_cv, bench_ho, file = "./data/regression_cache.RData")
|
library(beeswarm)
library(data.table)
library(ggplot2)
gordeninEnrichment <- read.csv("/Users/mar/BIO/PROJECTS/APOBEC/Project1_TranscriptionLevel/Results/sample_enrichment.txt", sep = '\t')
gordeninEnrichment <- data.table(gordeninEnrichment)
gordeninEnrichment[, color := ifelse(APOBEC_ENRICHMENT>=2,
rgb(212,42,47,maxColorValue = 255),
rgb(51,159,52,maxColorValue = 255))]
gordeninEnrichment[, fcolor := ifelse(APOBEC_ENRICHMENT>=2,
rgb(233,148,151,maxColorValue = 255),
rgb(152,206,153,maxColorValue = 255))]
tiff("/Users/mar/BIO/PROJECTS/APOBEC/Project1_TranscriptionLevel/paper/pics/enrichment_beeswarm.tiff")
beeswarm(APOBEC_ENRICHMENT ~ CANCER_TYPE, data=gordeninEnrichment, pch=21,
pwcol=gordeninEnrichment[,color], pwbg=gordeninEnrichment[,fcolor], spacing=0.55, cex=1.5,
xlab = "Cancer", ylab = "APOBEC-mutagenesis enrichment")
dev.off()
gordeninEnrichment[, grp := ifelse(APOBEC_ENRICHMENT>=2,
"enriched",
"notenriched")]
pctdata <- gordeninEnrichment[, .(cnt=.N), by=.(CANCER_TYPE,grp)]
pctdata[, total:=sum(cnt), by=CANCER_TYPE]
pctdata[, pct := round(cnt/total,2)]
|
/R/plot_bee_enrichment.R
|
no_license
|
mkazanov/molcompbio
|
R
| false
| false
| 1,332
|
r
|
library(beeswarm)
library(data.table)
library(ggplot2)
gordeninEnrichment <- read.csv("/Users/mar/BIO/PROJECTS/APOBEC/Project1_TranscriptionLevel/Results/sample_enrichment.txt", sep = '\t')
gordeninEnrichment <- data.table(gordeninEnrichment)
gordeninEnrichment[, color := ifelse(APOBEC_ENRICHMENT>=2,
rgb(212,42,47,maxColorValue = 255),
rgb(51,159,52,maxColorValue = 255))]
gordeninEnrichment[, fcolor := ifelse(APOBEC_ENRICHMENT>=2,
rgb(233,148,151,maxColorValue = 255),
rgb(152,206,153,maxColorValue = 255))]
tiff("/Users/mar/BIO/PROJECTS/APOBEC/Project1_TranscriptionLevel/paper/pics/enrichment_beeswarm.tiff")
beeswarm(APOBEC_ENRICHMENT ~ CANCER_TYPE, data=gordeninEnrichment, pch=21,
pwcol=gordeninEnrichment[,color], pwbg=gordeninEnrichment[,fcolor], spacing=0.55, cex=1.5,
xlab = "Cancer", ylab = "APOBEC-mutagenesis enrichment")
dev.off()
gordeninEnrichment[, grp := ifelse(APOBEC_ENRICHMENT>=2,
"enriched",
"notenriched")]
pctdata <- gordeninEnrichment[, .(cnt=.N), by=.(CANCER_TYPE,grp)]
pctdata[, total:=sum(cnt), by=CANCER_TYPE]
pctdata[, pct := round(cnt/total,2)]
|
baixa_dados <- function(directory = "data", estados, update=F){
require(dplyr)
if(file.exists("data/dados.gz")==F | update==T){
download.file("https://data.brasil.io/dataset/covid19/caso_full.csv.gz",
destfile = "data/dados.gz")
}
#extraindo dados
fn <- gzfile("data/dados.gz",open = "r",)
caso_full <- read.csv(fn)
caso_full %>%
filter(substr(city_ibge_code, start = 1, stop = 2) %in% estados) %>%
mutate(CD_GEOCODM = city_ibge_code,
new_confirmed = new_confirmed,
confirmed = last_available_confirmed,
confirmed_per_100k_inhabitants = last_available_confirmed_per_100k_inhabitants,
new_deaths = new_deaths,
deaths = last_available_deaths,
death_rate = last_available_death_rate,
date = last_available_date) %>%
select(CD_GEOCODM, city, new_confirmed,confirmed, confirmed_per_100k_inhabitants, new_deaths,
deaths, death_rate, estimated_population_2019, date) -> df_covid
return(df_covid)
}
get_mapa <- function() {
require(geojsonR)
require(geojsonsf)
require(sf)
require(httr)
#usando apis do IBGE para coleta dos dados
#https://servicodados.ibge.gov.br/api/docs
aux <- httr::GET('https://servicodados.ibge.gov.br/api/v3/malhas/paises/BR?intrarregiao=municipio&formato=application/vnd.geo+json&qualidade=intermediaria')
mapa <- geojsonsf::geojson_sf(httr::content(aux,as = "text"))
dados <- jsonlite::fromJSON('https://servicodados.ibge.gov.br/api/v1/localidades/municipios')
#juntando informacoes
mapa$codarea <- as.numeric(mapa$codarea)
names(mapa) <- c("id","geometry")
mapa <- merge(mapa, dados)
return(mapa)
}
get_entorno <- function(mapa, entorno = c("MANAUS",
"CAREIRO",
"CAREIRO DA VÁRZEA",
"AUTAZES",
"MANAQUIRI",
"BERURI",
"BORBA",
"TAPAUÁ",
"MANICORÉ",
"HUMAITÁ",
"CANUTAMA",
"LÁBREA",
"PORTO VELHO"), estados = c("AM","RO")){
mapa$nome <- toupper(mapa$nome)
result <- mapa[mapa$nome %in% entorno & mapa$microrregiao$mesorregiao$UF$sigla %in% estados,]
return(result)
}
|
/901_auxiliar_functions.R
|
no_license
|
rodolfo-oliveira/covid-boletim
|
R
| false
| false
| 2,599
|
r
|
baixa_dados <- function(directory = "data", estados, update=F){
require(dplyr)
if(file.exists("data/dados.gz")==F | update==T){
download.file("https://data.brasil.io/dataset/covid19/caso_full.csv.gz",
destfile = "data/dados.gz")
}
#extraindo dados
fn <- gzfile("data/dados.gz",open = "r",)
caso_full <- read.csv(fn)
caso_full %>%
filter(substr(city_ibge_code, start = 1, stop = 2) %in% estados) %>%
mutate(CD_GEOCODM = city_ibge_code,
new_confirmed = new_confirmed,
confirmed = last_available_confirmed,
confirmed_per_100k_inhabitants = last_available_confirmed_per_100k_inhabitants,
new_deaths = new_deaths,
deaths = last_available_deaths,
death_rate = last_available_death_rate,
date = last_available_date) %>%
select(CD_GEOCODM, city, new_confirmed,confirmed, confirmed_per_100k_inhabitants, new_deaths,
deaths, death_rate, estimated_population_2019, date) -> df_covid
return(df_covid)
}
get_mapa <- function() {
require(geojsonR)
require(geojsonsf)
require(sf)
require(httr)
#usando apis do IBGE para coleta dos dados
#https://servicodados.ibge.gov.br/api/docs
aux <- httr::GET('https://servicodados.ibge.gov.br/api/v3/malhas/paises/BR?intrarregiao=municipio&formato=application/vnd.geo+json&qualidade=intermediaria')
mapa <- geojsonsf::geojson_sf(httr::content(aux,as = "text"))
dados <- jsonlite::fromJSON('https://servicodados.ibge.gov.br/api/v1/localidades/municipios')
#juntando informacoes
mapa$codarea <- as.numeric(mapa$codarea)
names(mapa) <- c("id","geometry")
mapa <- merge(mapa, dados)
return(mapa)
}
get_entorno <- function(mapa, entorno = c("MANAUS",
"CAREIRO",
"CAREIRO DA VÁRZEA",
"AUTAZES",
"MANAQUIRI",
"BERURI",
"BORBA",
"TAPAUÁ",
"MANICORÉ",
"HUMAITÁ",
"CANUTAMA",
"LÁBREA",
"PORTO VELHO"), estados = c("AM","RO")){
mapa$nome <- toupper(mapa$nome)
result <- mapa[mapa$nome %in% entorno & mapa$microrregiao$mesorregiao$UF$sigla %in% estados,]
return(result)
}
|
# File: 03_trimmomatic_array_job.R
# Auth: umar.niazi@kcl.as.uk
# DESC: create a parameter file and shell script to run array job on hpc
# Date: 15/08/2017
## set variables and source libraries
source('header.R')
## connect to mysql database to get sample information
library('RMySQL')
##### connect to mysql database to get samples
db = dbConnect(MySQL(), user='rstudio', password='12345', dbname='Projects', host='127.0.0.1')
dbListTables(db)
# check how many files each sample has
g_did
q = paste0('select count(File.idSample) as files, Sample.idData, Sample.title, Sample.id as SampleID from File, Sample
where (Sample.idData = 14 and File.idSample = Sample.id) group by File.idSample')
dfQuery = dbGetQuery(db, q)
dfQuery$title = gsub(" ", "", dfQuery$title, fixed = T)
dfQuery
# for each sample id, get the corresponding files
cvQueries = paste0('select File.*, Sample.title from File, Sample
where (Sample.idData = 14 and Sample.id =', dfQuery$SampleID, ') and (File.idSample = Sample.id)')
# set header variables
cvShell = '#!/bin/bash'
cvShell.2 = '#$ -S /bin/bash'
cvProcessors = '#$ -pe smp 1'
cvWorkingDir = '#$ -cwd'
cvJobName = '#$ -N trim-array'
cvStdout = '#$ -j y'
cvMemoryReserve = '#$ -l h_vmem=19G'
# set array job loop
cvArrayJob = '#$ -t 1-8'
# using high memory queue with one slot and 19 Gigs of memory
# set the directory names for trimmomatic
cvInput = 'input/'
cvOutput = 'output/Trimmed/'
cvOutput.unpaired = paste0(cvOutput, 'Unpaired/')
cvTrimmomatic = '/opt/apps/bioinformatics/trimmomatic/0.36/trimmomatic-0.36.jar'
cvIlluminaAdap = '/users/k1625253/brc_scratch/Data/MetaData/trimmomatic_adapters.fa'
# create a parameter file and shell script
dir.create('AutoScripts')
oFile.param = file('AutoScripts/trimmomatic_param.txt', 'wt')
temp = sapply(cvQueries, function(x){
# get the file names
dfFiles = dbGetQuery(db, x)
# check for null return
if (nrow(dfFiles) == 0) return();
# remove white space from title
dfFiles$title = gsub(" ", "", dfFiles$title, fixed=T)
# split the file names into paired end 1 and 2, identified by R1 and R2 in the file name
f = dfFiles$name
d = grepl('_R1_', f)
d = as.character(d)
d[d == 'TRUE'] = 'R1'
d[d == 'FALSE'] = 'R2'
lf = split(f, d)
# write trimmomatic command
in.r1 = paste0(cvInput, lf[[1]])
in.r2 = paste0(cvInput, lf[[2]])
out.r1 = paste0(cvOutput, 'trim_', lf[[1]])
out.r2 = paste0(cvOutput, 'trim_', lf[[2]])
out.r1.up = paste0(cvOutput.unpaired, 'up_', lf[[1]])
out.r2.up = paste0(cvOutput.unpaired, 'up_', lf[[2]])
p1 = paste(in.r1, in.r2, out.r1, out.r1.up, out.r2, out.r2.up, sep=' ')
writeLines(p1, oFile.param)
})
close(oFile.param)
oFile = file('AutoScripts/trimmomatic.sh', 'wt')
writeLines(c('# Autogenerated script from write_trimmomatic_script.R', paste('# date', date())), oFile)
writeLines(c('# make sure directory paths exist before running script'), oFile)
writeLines(c(cvShell, cvShell.2, cvProcessors, cvWorkingDir, cvJobName, cvStdout, cvMemoryReserve, cvArrayJob), oFile)
writeLines('\n\n', oFile)
# module load
writeLines(c('module load general/JRE/1.8.0_65', 'module load bioinformatics/trimmomatic/0.36'), oFile)
writeLines('\n\n', oFile)
## write array job lines
writeLines("# Parse parameter file to get variables.
number=$SGE_TASK_ID
paramfile=trimmomatic_param.txt
inr1=`sed -n ${number}p $paramfile | awk '{print $1}'`
inr2=`sed -n ${number}p $paramfile | awk '{print $2}'`
outr1=`sed -n ${number}p $paramfile | awk '{print $3}'`
outr1up=`sed -n ${number}p $paramfile | awk '{print $4}'`
outr2=`sed -n ${number}p $paramfile | awk '{print $5}'`
outr2up=`sed -n ${number}p $paramfile | awk '{print $6}'`
# 9. Run the program.", oFile)
p1 = paste('java -jar', cvTrimmomatic, 'PE -phred33', '$inr1 $inr2 $outr1 $outr1up $outr2 $outr2up', sep=' ')
p2 = paste0('ILLUMINACLIP:', cvIlluminaAdap, ':2:30:10:8:true LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 MINLEN:36')
com = paste(p1, p2, sep=' ')
writeLines(com, oFile)
writeLines('\n\n', oFile)
close(oFile)
dbDisconnect(db)
|
/S107/03_trimmomatic_array_job.R
|
permissive
|
uhkniazi/BRC_NeuralTube_Miho
|
R
| false
| false
| 4,065
|
r
|
# File: 03_trimmomatic_array_job.R
# Auth: umar.niazi@kcl.as.uk
# DESC: create a parameter file and shell script to run array job on hpc
# Date: 15/08/2017
## set variables and source libraries
source('header.R')
## connect to mysql database to get sample information
library('RMySQL')
##### connect to mysql database to get samples
db = dbConnect(MySQL(), user='rstudio', password='12345', dbname='Projects', host='127.0.0.1')
dbListTables(db)
# check how many files each sample has
g_did
q = paste0('select count(File.idSample) as files, Sample.idData, Sample.title, Sample.id as SampleID from File, Sample
where (Sample.idData = 14 and File.idSample = Sample.id) group by File.idSample')
dfQuery = dbGetQuery(db, q)
dfQuery$title = gsub(" ", "", dfQuery$title, fixed = T)
dfQuery
# for each sample id, get the corresponding files
cvQueries = paste0('select File.*, Sample.title from File, Sample
where (Sample.idData = 14 and Sample.id =', dfQuery$SampleID, ') and (File.idSample = Sample.id)')
# set header variables
cvShell = '#!/bin/bash'
cvShell.2 = '#$ -S /bin/bash'
cvProcessors = '#$ -pe smp 1'
cvWorkingDir = '#$ -cwd'
cvJobName = '#$ -N trim-array'
cvStdout = '#$ -j y'
cvMemoryReserve = '#$ -l h_vmem=19G'
# set array job loop
cvArrayJob = '#$ -t 1-8'
# using high memory queue with one slot and 19 Gigs of memory
# set the directory names for trimmomatic
cvInput = 'input/'
cvOutput = 'output/Trimmed/'
cvOutput.unpaired = paste0(cvOutput, 'Unpaired/')
cvTrimmomatic = '/opt/apps/bioinformatics/trimmomatic/0.36/trimmomatic-0.36.jar'
cvIlluminaAdap = '/users/k1625253/brc_scratch/Data/MetaData/trimmomatic_adapters.fa'
# create a parameter file and shell script
dir.create('AutoScripts')
oFile.param = file('AutoScripts/trimmomatic_param.txt', 'wt')
temp = sapply(cvQueries, function(x){
# get the file names
dfFiles = dbGetQuery(db, x)
# check for null return
if (nrow(dfFiles) == 0) return();
# remove white space from title
dfFiles$title = gsub(" ", "", dfFiles$title, fixed=T)
# split the file names into paired end 1 and 2, identified by R1 and R2 in the file name
f = dfFiles$name
d = grepl('_R1_', f)
d = as.character(d)
d[d == 'TRUE'] = 'R1'
d[d == 'FALSE'] = 'R2'
lf = split(f, d)
# write trimmomatic command
in.r1 = paste0(cvInput, lf[[1]])
in.r2 = paste0(cvInput, lf[[2]])
out.r1 = paste0(cvOutput, 'trim_', lf[[1]])
out.r2 = paste0(cvOutput, 'trim_', lf[[2]])
out.r1.up = paste0(cvOutput.unpaired, 'up_', lf[[1]])
out.r2.up = paste0(cvOutput.unpaired, 'up_', lf[[2]])
p1 = paste(in.r1, in.r2, out.r1, out.r1.up, out.r2, out.r2.up, sep=' ')
writeLines(p1, oFile.param)
})
close(oFile.param)
oFile = file('AutoScripts/trimmomatic.sh', 'wt')
writeLines(c('# Autogenerated script from write_trimmomatic_script.R', paste('# date', date())), oFile)
writeLines(c('# make sure directory paths exist before running script'), oFile)
writeLines(c(cvShell, cvShell.2, cvProcessors, cvWorkingDir, cvJobName, cvStdout, cvMemoryReserve, cvArrayJob), oFile)
writeLines('\n\n', oFile)
# module load
writeLines(c('module load general/JRE/1.8.0_65', 'module load bioinformatics/trimmomatic/0.36'), oFile)
writeLines('\n\n', oFile)
## write array job lines
writeLines("# Parse parameter file to get variables.
number=$SGE_TASK_ID
paramfile=trimmomatic_param.txt
inr1=`sed -n ${number}p $paramfile | awk '{print $1}'`
inr2=`sed -n ${number}p $paramfile | awk '{print $2}'`
outr1=`sed -n ${number}p $paramfile | awk '{print $3}'`
outr1up=`sed -n ${number}p $paramfile | awk '{print $4}'`
outr2=`sed -n ${number}p $paramfile | awk '{print $5}'`
outr2up=`sed -n ${number}p $paramfile | awk '{print $6}'`
# 9. Run the program.", oFile)
p1 = paste('java -jar', cvTrimmomatic, 'PE -phred33', '$inr1 $inr2 $outr1 $outr1up $outr2 $outr2up', sep=' ')
p2 = paste0('ILLUMINACLIP:', cvIlluminaAdap, ':2:30:10:8:true LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 MINLEN:36')
com = paste(p1, p2, sep=' ')
writeLines(com, oFile)
writeLines('\n\n', oFile)
close(oFile)
dbDisconnect(db)
|
# [1] load data
com3<- odbcConnectExcel2007("./dat/triplicate.xlsx")
dat<- sqlFetch(com3, "data")
fishdat<- sqlFetch(com3, "fishdata")
ns_counts<- sqlFetch(com, "Fish data: parasite counts")
odbcClose(com3)
load("./output/ms_mcmc.Rdata")
|
/analysis/src/3_load.R
|
no_license
|
mcolvin/ms_occ_model
|
R
| false
| false
| 261
|
r
|
# [1] load data
com3<- odbcConnectExcel2007("./dat/triplicate.xlsx")
dat<- sqlFetch(com3, "data")
fishdat<- sqlFetch(com3, "fishdata")
ns_counts<- sqlFetch(com, "Fish data: parasite counts")
odbcClose(com3)
load("./output/ms_mcmc.Rdata")
|
\name{ecospat.ESM.Projection}
\alias{ecospat.ESM.Projection}
\title{
Ensamble of Small Models: Projects Simple Bivariate Models Into New Space Or Time
}
\description{
This function projects simple bivariate models on new.env
}
\usage{
ecospat.ESM.Projection(ESM.modeling.output,
new.env,
name.env,
parallel,
cleanup)
}
\arguments{
\item{ESM.modeling.output}{
\code{list} object returned by \code{\link{ecospat.ESM.Modeling}}
}
\item{new.env}{
A set of explanatory variables onto which models will be projected. It could be a \code{data.frame}, a \code{matrix}, or a \code{rasterStack} object. Make sure the column names (\code{data.frame} or \code{matrix}) or layer Names (\code{rasterStack}) perfectly match with the names of variables used to build the models in the previous steps.
}
\item{name.env}{
A name for the new.env object. If not specified (default) the name of the new.env object will be used. It is necessary to specify a unique name when projecting various new.env objects in a loop.
}
\item{parallel}{
Logical. If TRUE, the parallel computing is enabled}
\item{cleanup}{
Numeric. Calls removeTmpFiles() to delete all files from rasterOptions()$tmpdir which are older than the given time (in hours). This might be necessary to prevent running over quota. No cleanup is used by default}
}
\details{
The basic idea of ensemble of small models (ESMs) is to model a species distribution based on small, simple models, for example all possible bivariate models (i.e. models that contain only two predictors at a time out of a larger set of predictors), and then combine all possible bivariate models into an ensemble (Lomba et al. 2010; Breiner et al. 2015).
The ESM set of functions could be used to build ESMs using simple bivariate models which are averaged using weights based on model performances (e.g. AUC) accoring to Breiner et al (2015). They provide full functionality of the approach described in Breiner et al. (2015).
The name of \code{new.env} must be a regular expression (see ?regex)
}
\value{
Returns the projections for all selected models (same as in \code{biomod2})
See \code{"\link[=BIOMOD.projection.out-class]{BIOMOD.projection.out}"} for details.
}
\author{ Frank Breiner \email{frank.breiner@wsl.ch}
with contributions of Olivier Broennimann \email{olivier.broennimann@unil.ch}
}
\references{
Lomba, A., L. Pellissier, C.F. Randin, J. Vicente, F. Moreira, J. Honrado and A. Guisan. 2010. Overcoming the rare species modelling paradox: A novel hierarchical framework applied to an Iberian endemic plant. \emph{Biological Conservation}, \bold{143},2647-2657.
Breiner F.T., A. Guisan, A. Bergamini and M.P. Nobis. 2015. Overcoming limitations of modelling rare species by using ensembles of small models. \emph{Methods in Ecology and Evolution}, \bold{6},1210-1218.
Breiner F.T., Nobis M.P., Bergamini A., Guisan A. 2018. Optimizing ensembles of small models for predicting the distribution of species with few occurrences. \emph{Methods in Ecology and Evolution}. \doi{10.1111/2041-210X.12957}
}
\seealso{
\code{\link[ecospat]{ecospat.ESM.EnsembleModeling}}, \code{\link[ecospat]{ecospat.ESM.Modeling}}, \code{\link[ecospat]{ecospat.ESM.EnsembleProjection}}
\code{\link[biomod2]{BIOMOD_FormatingData}}, \code{\link[biomod2]{BIOMOD_ModelingOptions}}, \code{\link[biomod2]{BIOMOD_Modeling}},\code{\link[biomod2]{BIOMOD_Projection}}
}
\examples{
\donttest{
library(biomod2)
# Loading test data
data(ecospat.testNiche.inv)
inv <- ecospat.testNiche.inv
# species occurrences
xy <- inv[,1:2]
sp_occ <- inv[11]
# env
current <- inv[3:10]
### Formating the data with the BIOMOD_FormatingData() function from the package biomod2
sp <- 1
myBiomodData <- BIOMOD_FormatingData( resp.var = as.numeric(sp_occ[,sp]),
expl.var = current,
resp.xy = xy,
resp.name = colnames(sp_occ)[sp])
### Calibration of simple bivariate models
my.ESM <- ecospat.ESM.Modeling( data=myBiomodData,
models=c('GLM','RF'),
NbRunEval=2,
DataSplit=70,
weighting.score=c("AUC"),
parallel=FALSE)
### Evaluation and average of simple bivariate models to ESMs
my.ESM_EF <- ecospat.ESM.EnsembleModeling(my.ESM,weighting.score=c("SomersD"),threshold=0)
### Projection of simple bivariate models into new space
my.ESM_proj_current<-ecospat.ESM.Projection(ESM.modeling.output=my.ESM,
new.env=current)
### Projection of calibrated ESMs into new space
my.ESM_EFproj_current <- ecospat.ESM.EnsembleProjection(ESM.prediction.output=my.ESM_proj_current,
ESM.EnsembleModeling.output=my.ESM_EF)
## get the model performance of ESMs
my.ESM_EF$ESM.evaluations
## get the weights of the single bivariate models used to build the ESMs
my.ESM_EF$weights
## get the variable contributions of ESMs
ecospat.ESM.VarContrib(my.ESM,my.ESM_EF)
}}
|
/man/ecospat.ESM.Projection.Rd
|
no_license
|
dondealban/ecospat
|
R
| false
| false
| 5,400
|
rd
|
\name{ecospat.ESM.Projection}
\alias{ecospat.ESM.Projection}
\title{
Ensamble of Small Models: Projects Simple Bivariate Models Into New Space Or Time
}
\description{
This function projects simple bivariate models on new.env
}
\usage{
ecospat.ESM.Projection(ESM.modeling.output,
new.env,
name.env,
parallel,
cleanup)
}
\arguments{
\item{ESM.modeling.output}{
\code{list} object returned by \code{\link{ecospat.ESM.Modeling}}
}
\item{new.env}{
A set of explanatory variables onto which models will be projected. It could be a \code{data.frame}, a \code{matrix}, or a \code{rasterStack} object. Make sure the column names (\code{data.frame} or \code{matrix}) or layer Names (\code{rasterStack}) perfectly match with the names of variables used to build the models in the previous steps.
}
\item{name.env}{
A name for the new.env object. If not specified (default) the name of the new.env object will be used. It is necessary to specify a unique name when projecting various new.env objects in a loop.
}
\item{parallel}{
Logical. If TRUE, the parallel computing is enabled}
\item{cleanup}{
Numeric. Calls removeTmpFiles() to delete all files from rasterOptions()$tmpdir which are older than the given time (in hours). This might be necessary to prevent running over quota. No cleanup is used by default}
}
\details{
The basic idea of ensemble of small models (ESMs) is to model a species distribution based on small, simple models, for example all possible bivariate models (i.e. models that contain only two predictors at a time out of a larger set of predictors), and then combine all possible bivariate models into an ensemble (Lomba et al. 2010; Breiner et al. 2015).
The ESM set of functions could be used to build ESMs using simple bivariate models which are averaged using weights based on model performances (e.g. AUC) accoring to Breiner et al (2015). They provide full functionality of the approach described in Breiner et al. (2015).
The name of \code{new.env} must be a regular expression (see ?regex)
}
\value{
Returns the projections for all selected models (same as in \code{biomod2})
See \code{"\link[=BIOMOD.projection.out-class]{BIOMOD.projection.out}"} for details.
}
\author{ Frank Breiner \email{frank.breiner@wsl.ch}
with contributions of Olivier Broennimann \email{olivier.broennimann@unil.ch}
}
\references{
Lomba, A., L. Pellissier, C.F. Randin, J. Vicente, F. Moreira, J. Honrado and A. Guisan. 2010. Overcoming the rare species modelling paradox: A novel hierarchical framework applied to an Iberian endemic plant. \emph{Biological Conservation}, \bold{143},2647-2657.
Breiner F.T., A. Guisan, A. Bergamini and M.P. Nobis. 2015. Overcoming limitations of modelling rare species by using ensembles of small models. \emph{Methods in Ecology and Evolution}, \bold{6},1210-1218.
Breiner F.T., Nobis M.P., Bergamini A., Guisan A. 2018. Optimizing ensembles of small models for predicting the distribution of species with few occurrences. \emph{Methods in Ecology and Evolution}. \doi{10.1111/2041-210X.12957}
}
\seealso{
\code{\link[ecospat]{ecospat.ESM.EnsembleModeling}}, \code{\link[ecospat]{ecospat.ESM.Modeling}}, \code{\link[ecospat]{ecospat.ESM.EnsembleProjection}}
\code{\link[biomod2]{BIOMOD_FormatingData}}, \code{\link[biomod2]{BIOMOD_ModelingOptions}}, \code{\link[biomod2]{BIOMOD_Modeling}},\code{\link[biomod2]{BIOMOD_Projection}}
}
\examples{
\donttest{
library(biomod2)
# Loading test data
data(ecospat.testNiche.inv)
inv <- ecospat.testNiche.inv
# species occurrences
xy <- inv[,1:2]
sp_occ <- inv[11]
# env
current <- inv[3:10]
### Formating the data with the BIOMOD_FormatingData() function from the package biomod2
sp <- 1
myBiomodData <- BIOMOD_FormatingData( resp.var = as.numeric(sp_occ[,sp]),
expl.var = current,
resp.xy = xy,
resp.name = colnames(sp_occ)[sp])
### Calibration of simple bivariate models
my.ESM <- ecospat.ESM.Modeling( data=myBiomodData,
models=c('GLM','RF'),
NbRunEval=2,
DataSplit=70,
weighting.score=c("AUC"),
parallel=FALSE)
### Evaluation and average of simple bivariate models to ESMs
my.ESM_EF <- ecospat.ESM.EnsembleModeling(my.ESM,weighting.score=c("SomersD"),threshold=0)
### Projection of simple bivariate models into new space
my.ESM_proj_current<-ecospat.ESM.Projection(ESM.modeling.output=my.ESM,
new.env=current)
### Projection of calibrated ESMs into new space
my.ESM_EFproj_current <- ecospat.ESM.EnsembleProjection(ESM.prediction.output=my.ESM_proj_current,
ESM.EnsembleModeling.output=my.ESM_EF)
## get the model performance of ESMs
my.ESM_EF$ESM.evaluations
## get the weights of the single bivariate models used to build the ESMs
my.ESM_EF$weights
## get the variable contributions of ESMs
ecospat.ESM.VarContrib(my.ESM,my.ESM_EF)
}}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metric.R
\name{PearsonCorrCoef}
\alias{PearsonCorrCoef}
\title{PearsonCorrCoef}
\usage{
PearsonCorrCoef(
dim_argmax = NULL,
activation = "no",
thresh = NULL,
to_np = FALSE,
invert_arg = FALSE,
flatten = TRUE
)
}
\arguments{
\item{dim_argmax}{dim_argmax}
\item{activation}{activation}
\item{thresh}{thresh}
\item{to_np}{to_np}
\item{invert_arg}{invert_arg}
\item{flatten}{flatten}
}
\value{
None
}
\description{
Pearson correlation coefficient for regression problem
}
|
/man/PearsonCorrCoef.Rd
|
permissive
|
Cdk29/fastai
|
R
| false
| true
| 563
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metric.R
\name{PearsonCorrCoef}
\alias{PearsonCorrCoef}
\title{PearsonCorrCoef}
\usage{
PearsonCorrCoef(
dim_argmax = NULL,
activation = "no",
thresh = NULL,
to_np = FALSE,
invert_arg = FALSE,
flatten = TRUE
)
}
\arguments{
\item{dim_argmax}{dim_argmax}
\item{activation}{activation}
\item{thresh}{thresh}
\item{to_np}{to_np}
\item{invert_arg}{invert_arg}
\item{flatten}{flatten}
}
\value{
None
}
\description{
Pearson correlation coefficient for regression problem
}
|
#' Read File from System Environment Variable
#'
#' To create an encoded file use: \code{gsub("\\n", "", jsonlite::base64_enc(serialize(readLines("tests/testthat/cloudml.yml"), NULL)))}
#'
sysenv_file <- function(name, destination) {
if (file.exists(destination))
return()
value_base64 <- Sys.getenv(name)
if (nchar(value_base64) > 0) {
file_contents <- unserialize(jsonlite::base64_dec(
value_base64
))
writeLines(file_contents, destination)
}
}
cloudml_write_config <- function(base = NULL, destination = "cloudml.yml") {
config = list(
gcloud = list(
project = Sys.getenv("GCLOUD_PROJECT"),
account = Sys.getenv("GCLOUD_ACCOUNT"),
region = Sys.getenv("CLOUDSDK_COMPUTE_REGION", "us-east1")
),
cloudml = list(
storage = paste("gs://", Sys.getenv("GCLOUD_PROJECT"), "/travis", sep = "")
)
)
if (!is.null(base)) {
base$gcloud$project <- config$gcloud$project
base$gcloud$account <- config$gcloud$account
base$gcloud$region <- config$gcloud$region
base$cloudml$storage <- config$cloudml$storage
config <- base
}
yaml::write_yaml(config, destination)
}
cloudml_tests_configured <- function() {
nchar(Sys.getenv("GCLOUD_ACCOUNT_FILE")) > 0
}
if (cloudml_tests_configured()) {
if (identical(Sys.getenv("TRAVIS"), "true")) {
cloudml:::gcloud_install()
}
options(repos = c(CRAN = "http://cran.rstudio.com"))
account_file <- tempfile(fileext = ".json")
sysenv_file("GCLOUD_ACCOUNT_FILE", account_file)
if (!is.null(account_file)) {
gcloud_exec(
"auth",
"activate-service-account",
paste(
"--key-file",
account_file,
sep = "="
)
)
}
cloudml_write_config()
}
|
/tests/testthat/helper-initialize.R
|
no_license
|
anishsingh20/cloudml
|
R
| false
| false
| 1,742
|
r
|
#' Read File from System Environment Variable
#'
#' To create an encoded file use: \code{gsub("\\n", "", jsonlite::base64_enc(serialize(readLines("tests/testthat/cloudml.yml"), NULL)))}
#'
sysenv_file <- function(name, destination) {
if (file.exists(destination))
return()
value_base64 <- Sys.getenv(name)
if (nchar(value_base64) > 0) {
file_contents <- unserialize(jsonlite::base64_dec(
value_base64
))
writeLines(file_contents, destination)
}
}
cloudml_write_config <- function(base = NULL, destination = "cloudml.yml") {
config = list(
gcloud = list(
project = Sys.getenv("GCLOUD_PROJECT"),
account = Sys.getenv("GCLOUD_ACCOUNT"),
region = Sys.getenv("CLOUDSDK_COMPUTE_REGION", "us-east1")
),
cloudml = list(
storage = paste("gs://", Sys.getenv("GCLOUD_PROJECT"), "/travis", sep = "")
)
)
if (!is.null(base)) {
base$gcloud$project <- config$gcloud$project
base$gcloud$account <- config$gcloud$account
base$gcloud$region <- config$gcloud$region
base$cloudml$storage <- config$cloudml$storage
config <- base
}
yaml::write_yaml(config, destination)
}
cloudml_tests_configured <- function() {
nchar(Sys.getenv("GCLOUD_ACCOUNT_FILE")) > 0
}
if (cloudml_tests_configured()) {
if (identical(Sys.getenv("TRAVIS"), "true")) {
cloudml:::gcloud_install()
}
options(repos = c(CRAN = "http://cran.rstudio.com"))
account_file <- tempfile(fileext = ".json")
sysenv_file("GCLOUD_ACCOUNT_FILE", account_file)
if (!is.null(account_file)) {
gcloud_exec(
"auth",
"activate-service-account",
paste(
"--key-file",
account_file,
sep = "="
)
)
}
cloudml_write_config()
}
|
df <- Default
set.seed(42)
train_index <- createDataPartition(df$default,
p = .8,
list = F,
times = 1)
train <- df[train_index,]
test <- df[-train_index,]
train$student <- as.numeric(train$student)-1
test$student <- as.numeric(test$student)-1
train_y <- train$default
train_x <- train %>% dplyr::select(-c("default"))
test_y <- test$default
test_x <- test %>% dplyr::select(-"default")
###### Kategorik değişkenleri tanımadığı için nümerik 1,0 formatına indirgedim.
########## Model Kurmak(class lib) ########
knn_fit <- class::knn(train = train_x,
test = test_x,
cl = train_y,
k = 5)
##### Tahmin ######
class_err <- function(gercek,tahmin){
mean(gercek != tahmin)
}
class_err(test_y,knn_fit) ## Sınıflandırma hatası
##### Model Tuning ######
ctrl <- trainControl(method = "cv",
number = 10,
summaryFunction = twoClassSummary,
classProbs = T,
savePredictions = T)
knn_grid <- data.frame(
k = c(4*(0:5)+1, 20*(1:5)+1, 50*(2:9)+1)
)
knn_tune <- train(train_x,train_y,
method = "knn",
metric = "ROC",
preProc = c("center","scale"),
trControl = ctrl,
tuneGrid = knn_grid
)
knn_tune$bestTune
confusionMatrix(
knn_tune$pred$pred, knn_tune$pred$obs, positive = "Yes"
)
|
/R/Classification/KNN.R
|
no_license
|
sametsoekel/ml-cheatsheet
|
R
| false
| false
| 1,659
|
r
|
df <- Default
set.seed(42)
train_index <- createDataPartition(df$default,
p = .8,
list = F,
times = 1)
train <- df[train_index,]
test <- df[-train_index,]
train$student <- as.numeric(train$student)-1
test$student <- as.numeric(test$student)-1
train_y <- train$default
train_x <- train %>% dplyr::select(-c("default"))
test_y <- test$default
test_x <- test %>% dplyr::select(-"default")
###### Kategorik değişkenleri tanımadığı için nümerik 1,0 formatına indirgedim.
########## Model Kurmak(class lib) ########
knn_fit <- class::knn(train = train_x,
test = test_x,
cl = train_y,
k = 5)
##### Tahmin ######
class_err <- function(gercek,tahmin){
mean(gercek != tahmin)
}
class_err(test_y,knn_fit) ## Sınıflandırma hatası
##### Model Tuning ######
ctrl <- trainControl(method = "cv",
number = 10,
summaryFunction = twoClassSummary,
classProbs = T,
savePredictions = T)
knn_grid <- data.frame(
k = c(4*(0:5)+1, 20*(1:5)+1, 50*(2:9)+1)
)
knn_tune <- train(train_x,train_y,
method = "knn",
metric = "ROC",
preProc = c("center","scale"),
trControl = ctrl,
tuneGrid = knn_grid
)
knn_tune$bestTune
confusionMatrix(
knn_tune$pred$pred, knn_tune$pred$obs, positive = "Yes"
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inspector_operations.R
\name{inspector_list_tags_for_resource}
\alias{inspector_list_tags_for_resource}
\title{Lists all tags associated with an assessment template}
\usage{
inspector_list_tags_for_resource(resourceArn)
}
\arguments{
\item{resourceArn}{[required] The ARN that specifies the assessment template whose tags you want to
list.}
}
\description{
Lists all tags associated with an assessment template.
}
\section{Request syntax}{
\preformatted{svc$list_tags_for_resource(
resourceArn = "string"
)
}
}
\examples{
# Lists all tags associated with an assessment template.
\donttest{svc$list_tags_for_resource(
resourceArn = "arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-gcwFl..."
)}
}
\keyword{internal}
|
/cran/paws.security.identity/man/inspector_list_tags_for_resource.Rd
|
permissive
|
ryanb8/paws
|
R
| false
| true
| 820
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inspector_operations.R
\name{inspector_list_tags_for_resource}
\alias{inspector_list_tags_for_resource}
\title{Lists all tags associated with an assessment template}
\usage{
inspector_list_tags_for_resource(resourceArn)
}
\arguments{
\item{resourceArn}{[required] The ARN that specifies the assessment template whose tags you want to
list.}
}
\description{
Lists all tags associated with an assessment template.
}
\section{Request syntax}{
\preformatted{svc$list_tags_for_resource(
resourceArn = "string"
)
}
}
\examples{
# Lists all tags associated with an assessment template.
\donttest{svc$list_tags_for_resource(
resourceArn = "arn:aws:inspector:us-west-2:123456789012:target/0-0kFIPusq/template/0-gcwFl..."
)}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/guardduty_operations.R
\name{guardduty_accept_administrator_invitation}
\alias{guardduty_accept_administrator_invitation}
\title{Accepts the invitation to be a member account and get monitored by a
GuardDuty administrator account that sent the invitation}
\usage{
guardduty_accept_administrator_invitation(
DetectorId,
AdministratorId,
InvitationId
)
}
\arguments{
\item{DetectorId}{[required] The unique ID of the detector of the GuardDuty member account.}
\item{AdministratorId}{[required] The account ID of the GuardDuty administrator account whose invitation
you're accepting.}
\item{InvitationId}{[required] The value that is used to validate the administrator account to the
member account.}
}
\description{
Accepts the invitation to be a member account and get monitored by a GuardDuty administrator account that sent the invitation.
See \url{https://www.paws-r-sdk.com/docs/guardduty_accept_administrator_invitation/} for full documentation.
}
\keyword{internal}
|
/cran/paws.security.identity/man/guardduty_accept_administrator_invitation.Rd
|
permissive
|
paws-r/paws
|
R
| false
| true
| 1,058
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/guardduty_operations.R
\name{guardduty_accept_administrator_invitation}
\alias{guardduty_accept_administrator_invitation}
\title{Accepts the invitation to be a member account and get monitored by a
GuardDuty administrator account that sent the invitation}
\usage{
guardduty_accept_administrator_invitation(
DetectorId,
AdministratorId,
InvitationId
)
}
\arguments{
\item{DetectorId}{[required] The unique ID of the detector of the GuardDuty member account.}
\item{AdministratorId}{[required] The account ID of the GuardDuty administrator account whose invitation
you're accepting.}
\item{InvitationId}{[required] The value that is used to validate the administrator account to the
member account.}
}
\description{
Accepts the invitation to be a member account and get monitored by a GuardDuty administrator account that sent the invitation.
See \url{https://www.paws-r-sdk.com/docs/guardduty_accept_administrator_invitation/} for full documentation.
}
\keyword{internal}
|
#assummes that the file "household_power_consumption.txt" was in
#"workingdirectory/data" directory;
data <- read.table("./data/household_power_consumption.txt",header=TRUE,
sep=";",stringsAsFactors=FALSE,na.strings = "?")
#subseting data from the dates 2007-02-01 and 2007-02-02
data<-data[(data$Date=="1/2/2007" | data$Date=="2/2/2007"),]
#Paste the Date and Time variables to a new variable
#and use strptime function to converte it to Time class;
data$DateTime<-paste(data$Date,data$Time,sep=" ")
data$DateTime<-strptime(data$DateTime,"%d/%m/%Y %H:%M:%S")
#plot
plot(data$DateTime,data$Global_active_power,type="l",main="",
xlab="",ylab="Global Active Power (kilowatts)")
#copy the plot to a PND file
dev.copy(png,file="plot2.png",width = 480, height = 480)
dev.off()
|
/plot2.R
|
no_license
|
liuyuankai/ExData_Plotting1
|
R
| false
| false
| 802
|
r
|
#assummes that the file "household_power_consumption.txt" was in
#"workingdirectory/data" directory;
data <- read.table("./data/household_power_consumption.txt",header=TRUE,
sep=";",stringsAsFactors=FALSE,na.strings = "?")
#subseting data from the dates 2007-02-01 and 2007-02-02
data<-data[(data$Date=="1/2/2007" | data$Date=="2/2/2007"),]
#Paste the Date and Time variables to a new variable
#and use strptime function to converte it to Time class;
data$DateTime<-paste(data$Date,data$Time,sep=" ")
data$DateTime<-strptime(data$DateTime,"%d/%m/%Y %H:%M:%S")
#plot
plot(data$DateTime,data$Global_active_power,type="l",main="",
xlab="",ylab="Global Active Power (kilowatts)")
#copy the plot to a PND file
dev.copy(png,file="plot2.png",width = 480, height = 480)
dev.off()
|
df1 <- read.table("merged_region_methylation_GH.txt", sep="\t", header=T)
sampleA = data.frame(df1$chr, df1$start, df1$end, df1$G.bam.counts)
sampleB = data.frame(df1$chr, df1$start, df1$end, df1$H.bam.counts)
mr.edgeR.s.gainA = sampleA[which(sampleA[, grep("G.bam.counts", colnames(sampleA))] > 10), ]
mr.edgeR.s.gainB = sampleB[which(sampleB[, grep("H.bam.counts", colnames(sampleB))] > 10), ]
mr.edgeR.s.gain.mA = MEDIPS.mergeFrames(frames = mr.edgeR.s.gainA, distance = 1)
mr.edgeR.s.gain.mB = MEDIPS.mergeFrames(frames = mr.edgeR.s.gainB, distance = 1)
columnA = names(sampleA)[grep("G.bam.counts", names(sampleA))]
columnB = names(sampleB)[grep("H.bamcounts", names(sampleB))]
roisA = MEDIPS.selectROIs(results = sampleA, rois = mr.edgeR.s.gain.mA, columns = columns, summarize = NULL)
roisB = MEDIPS.selectROIs(results = sampleB, rois = mr.edgeR.s.gain.mB, columns = columns, summarize = NULL)
|
/scipts/medips_wholemethylome.R
|
no_license
|
tomarashish/Medip-seq
|
R
| false
| false
| 904
|
r
|
df1 <- read.table("merged_region_methylation_GH.txt", sep="\t", header=T)
sampleA = data.frame(df1$chr, df1$start, df1$end, df1$G.bam.counts)
sampleB = data.frame(df1$chr, df1$start, df1$end, df1$H.bam.counts)
mr.edgeR.s.gainA = sampleA[which(sampleA[, grep("G.bam.counts", colnames(sampleA))] > 10), ]
mr.edgeR.s.gainB = sampleB[which(sampleB[, grep("H.bam.counts", colnames(sampleB))] > 10), ]
mr.edgeR.s.gain.mA = MEDIPS.mergeFrames(frames = mr.edgeR.s.gainA, distance = 1)
mr.edgeR.s.gain.mB = MEDIPS.mergeFrames(frames = mr.edgeR.s.gainB, distance = 1)
columnA = names(sampleA)[grep("G.bam.counts", names(sampleA))]
columnB = names(sampleB)[grep("H.bamcounts", names(sampleB))]
roisA = MEDIPS.selectROIs(results = sampleA, rois = mr.edgeR.s.gain.mA, columns = columns, summarize = NULL)
roisB = MEDIPS.selectROIs(results = sampleB, rois = mr.edgeR.s.gain.mB, columns = columns, summarize = NULL)
|
library(ggplot2)
setwd("D:\\Dropbox\\My Projects\\Courses\\QT_Analysis\\meetings\\m01-intro\\pres\\viz")
mdata<-read.csv("Education_Employment.csv")
library(extrafont)
theme_mplot <- theme(
axis.text.y = element_text(colour="black", size = 12, family = "BPG Arial 2010"),
axis.text.x = element_text(colour="black", size = 12, family="BPG Arial 2010"),
axis.title.x = element_text(size=12, family = "BPG Arial 2010", face="bold"),
axis.title.y = element_text(size=12, family = "BPG Arial 2010", face="bold"),
panel.border = element_rect(fill=NA, linetype = "solid", colour = "black"),
panel.background = element_rect(fill = NA),
panel.grid.major = element_line(colour = "grey"),
plot.title = element_text(colour = "Black", size=14, family = "Gill Sans MT"),
legend.position = "none"
)
p <- ggplot(mdata, aes(PropHiEd, PropWhiteCollar))
p <- p + geom_point(aes(size = Pop10More, colour=Pop10More)) +
xlab("უმაღლესი განათლების მქონეთა წილი") +
ylab("თეთრსაყელოიანი დასაქმებულები") +
theme_mplot
ggsave("scatterplot", p, width = 12, height = 6, device=cairo_pdf)
|
/meetings/m1/pres/viz/viz.R
|
no_license
|
davidsichinava/dar
|
R
| false
| false
| 1,202
|
r
|
library(ggplot2)
setwd("D:\\Dropbox\\My Projects\\Courses\\QT_Analysis\\meetings\\m01-intro\\pres\\viz")
mdata<-read.csv("Education_Employment.csv")
library(extrafont)
theme_mplot <- theme(
axis.text.y = element_text(colour="black", size = 12, family = "BPG Arial 2010"),
axis.text.x = element_text(colour="black", size = 12, family="BPG Arial 2010"),
axis.title.x = element_text(size=12, family = "BPG Arial 2010", face="bold"),
axis.title.y = element_text(size=12, family = "BPG Arial 2010", face="bold"),
panel.border = element_rect(fill=NA, linetype = "solid", colour = "black"),
panel.background = element_rect(fill = NA),
panel.grid.major = element_line(colour = "grey"),
plot.title = element_text(colour = "Black", size=14, family = "Gill Sans MT"),
legend.position = "none"
)
p <- ggplot(mdata, aes(PropHiEd, PropWhiteCollar))
p <- p + geom_point(aes(size = Pop10More, colour=Pop10More)) +
xlab("უმაღლესი განათლების მქონეთა წილი") +
ylab("თეთრსაყელოიანი დასაქმებულები") +
theme_mplot
ggsave("scatterplot", p, width = 12, height = 6, device=cairo_pdf)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/def.loads.R
\docType{data}
\name{def.loads}
\alias{def.loads}
\title{Default loadings for medals}
\format{An object of class \code{matrix} with 432 rows and 432 columns.}
\usage{
def.loads
}
\description{
Default loadings matrix from training data for MEDALS package. This fit used 9 subjects with 4 imaging modalities (FLAIR, T1w,T2w,DWI), and carried out a 4th order MEDALS analysis
}
\keyword{datasets}
|
/man/def.loads.Rd
|
no_license
|
JMMaronge/medals
|
R
| false
| true
| 484
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/def.loads.R
\docType{data}
\name{def.loads}
\alias{def.loads}
\title{Default loadings for medals}
\format{An object of class \code{matrix} with 432 rows and 432 columns.}
\usage{
def.loads
}
\description{
Default loadings matrix from training data for MEDALS package. This fit used 9 subjects with 4 imaging modalities (FLAIR, T1w,T2w,DWI), and carried out a 4th order MEDALS analysis
}
\keyword{datasets}
|
\name{unroll}
\alias{unroll}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Unroll Genetic Horizons}
\description{
Generate a descretized vector of genetic horizons along a user-defined pattern.
}
\usage{unroll(top, bottom, prop, max_depth, bottom_padding_value = NA, strict=FALSE)}
\arguments{
\item{top}{vector of upper horizon boundaries, must be an integer}
\item{bottom}{vector of lower horizon boundaries, must be an integer}
\item{prop}{vector of some property to be "unrolled" over a regular sequence}
\item{max_depth}{maximum depth to which missing data is padded with NA}
\item{bottom_padding_value}{value to use when padding missing data}
\item{strict}{should horizons be strictly checked for self-consistency? defaults to FALSE}
}
\details{
This function is used internally by several higher-level components of the \code{aqp} package. Basic error checking is performed to make sure that bottom and top horizon boundaries make sense. Note that the horizons should be sorted according to depth before using this function. The \code{max_depth} argument is used to specifiy the maximum depth of profiles within a collection, so that data from any profile shallower than this depth is padded with NA.
}
\value{a vector of "unrolled" property values}
\references{http://casoilresource.lawr.ucdavis.edu/}
\author{Dylan E. Beaudette}
\examples{
data(sp1)
# subset a single soil profile:
sp1.1 <- subset(sp1, subset=id == 'P001')
# demonstrate how this function works
x <- with(sp1.1, unroll(top, bottom, prop, max_depth=50))
plot(x, 1:length(x), ylim=c(90,0), type='b', cex=0.5)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{manip}
|
/man/unroll.Rd
|
no_license
|
rsbivand/aqp
|
R
| false
| false
| 1,738
|
rd
|
\name{unroll}
\alias{unroll}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Unroll Genetic Horizons}
\description{
Generate a descretized vector of genetic horizons along a user-defined pattern.
}
\usage{unroll(top, bottom, prop, max_depth, bottom_padding_value = NA, strict=FALSE)}
\arguments{
\item{top}{vector of upper horizon boundaries, must be an integer}
\item{bottom}{vector of lower horizon boundaries, must be an integer}
\item{prop}{vector of some property to be "unrolled" over a regular sequence}
\item{max_depth}{maximum depth to which missing data is padded with NA}
\item{bottom_padding_value}{value to use when padding missing data}
\item{strict}{should horizons be strictly checked for self-consistency? defaults to FALSE}
}
\details{
This function is used internally by several higher-level components of the \code{aqp} package. Basic error checking is performed to make sure that bottom and top horizon boundaries make sense. Note that the horizons should be sorted according to depth before using this function. The \code{max_depth} argument is used to specifiy the maximum depth of profiles within a collection, so that data from any profile shallower than this depth is padded with NA.
}
\value{a vector of "unrolled" property values}
\references{http://casoilresource.lawr.ucdavis.edu/}
\author{Dylan E. Beaudette}
\examples{
data(sp1)
# subset a single soil profile:
sp1.1 <- subset(sp1, subset=id == 'P001')
# demonstrate how this function works
x <- with(sp1.1, unroll(top, bottom, prop, max_depth=50))
plot(x, 1:length(x), ylim=c(90,0), type='b', cex=0.5)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{manip}
|
#Script to create quilt plot for non-focal sites
# clear environment ####
rm(list = ls())
#tightly condensed plot by latitude.
#Two panels, rp/dp
set.seed(42)
# load libraries ####
library(bootRes)
library(dplR) # for read.rwl
library(climwin)
library(tidyverse)
library(lubridate)
library(readxl)
source("Rscripts/0-My_dplR_functions.R")
#Load in lat lon for plotting sort
TRW_coord <- read_excel("data/tree_rings/TRW_coord2.xlsx")
TRW_coord <- TRW_coord[,c(1,3)]
#Add original two locations to include in final quilt plot
originals <- data.frame(42.5388, "HF")
originals <- rbind(originals, c(38.8935, "SCBI"))
names(originals) <- c("Latitude", "Location")
TRW_coord <- rbind(TRW_coord, originals)
#Prepare csv's
crns <- read.csv("data/tree_rings/chronologies/all_crns_res_1901.csv")
#TRW_coord <- read_excel("Data/tree_rings/Other/TRW_coord.xlsx")
# Bert approach
crns <- read_csv("data/tree_rings/chronologies/all_crns_res_1901.csv") %>%
# clean up
select(-c(BearIs, OH_Gol_QUAL_1, `Greenbrook_Sanctuary,_NJ_LITU_LITU`)) %>%
rename(IL_Fer_LITU = IL_Fer_LTU)
crns_long <- crns %>%
# convert to long format
pivot_longer(-Year, names_to = "site_sp", values_to = "ring_width") %>%
# drop years with missing data
filter(!is.na(ring_width))
crns_long_start_end <- crns_long %>%
# for each site/sp extract start and end
group_by(site_sp) %>%
summarize(start = min(Year), end = max(Year)) %>%
# split site_sp variable into site, sp
mutate(
site = str_sub(site_sp, 1, -6),
sp = str_sub(site_sp, -4, n())
)
# Generate named vector: start year
start.years.sss_bert <- crns_long_start_end$start
names(start.years.sss_bert) <- crns_long_start_end$site_sp
start.years.sss_bert
# end year
# Load climate data ####
## needs to be in different format: column for Date, year and then one column per climate variable
climate_variables <- c("tmn", "tmx")
clim_v <- NULL
# something like this should do
for(clim_v in climate_variables) {
print(clim_v)
x <- read.csv(paste0("Data/climate data/CRU/", clim_v, ".1901.2019-Other_sites-3-11.csv"))
### subset for the sites we care about
#x <- droplevels(x[x$sites.sitename %in% "Harvard_Forest", ])
### reshape to long format
x_long <- reshape(x,
times = names(x)[-1],
timevar = "Date",
varying = list(names(x)[-1]),
direction = "long", v.names = clim_v)
### format date
x_long$Date <- gsub("X", "", x_long$Date)
x_long$Date <- as.Date(x_long$Date , format = "%Y.%m.%d")#changed format to work with Harvard data
### combine all variables in one
if(clim_v == climate_variables[1]) all_Clim <- x_long[, c(1:3)]
else all_Clim <- merge(all_Clim, x_long[, c(1:3)], by = c("sites.sitename", "Date"), all = T)
}
### add year column
all_Clim$year <- as.numeric(format(as.Date(all_Clim$Date, format = "%d/%m/%Y"), "%Y"))
### add month column
all_Clim$month <- as.numeric(format(as.Date(all_Clim$Date, format = "%d/%m/%Y"), "%m"))
clim_means <- all_Clim %>%
filter(month == 4) %>%
group_by(sites.sitename) %>%
summarize(tmn = mean(tmn),
tmx = mean(tmx)) %>%
rename(Location = sites.sitename)
#SCBI clim_means
scbi_clim <- read.csv(paste0("Data/climate data/SCBI/Formated_CRU_SCBI_1901_2016.csv"))
scbi_clim <- scbi_clim[,c(1,2,9,11)]
scbi_clim$Location <- "SCBI"
scbi_clim_means <- scbi_clim %>%
filter(month == 4) %>%
group_by(Location) %>%
summarize(tmn = mean(tmn),
tmx = mean(tmx))
#rename(Location = sites.sitename)
#Harvard forest clim_means
hf_clim <- subset(clim_means, clim_means$Location == "HF_LyfordPlots")
hf_clim$Location <- "HF"
# hf_clim <- read_csv("climate data/HF/Harvard_forest_cru.csv")
# hf_clim$year <- as.numeric(format(as.Date(hf_clim$Date, format = "%Y-%m-%d"), "%Y"))
# ### add month column
# hf_clim$month <- as.numeric(format(as.Date(hf_clim$Date, format = "%Y-%m-%d"), "%m"))
#
# hf_clim_means <- hf_clim %>%
# filter(month == 4) %>%
# group_by(sites.sitename) %>%
# summarize(tmn = mean(tmn),
# tmx = mean(tmx)) %>%
# rename(Location = sites.sitename)
#Merge the 3 so we can sort later
clim_means <-rbind(clim_means, scbi_clim_means,hf_clim)
write.csv(clim_means, file = "clim_means_all.csv", row.names = FALSE)
# Get unique site and species names ----
species <- NULL
sites <- NULL
for(i in 1:ncol(crns)){
new <- crns[c(1:119), c(1,1+i)]
name <- colnames(new)
name <- name[2]
sp <- substr(name,nchar(as.character(name))-3, nchar(as.character(name)))
species <- append(species,sp)
site <- substr(name,1, nchar(as.character(name))-5)
sites <- append(sites,site)
#write.csv(assign(as.character(name),new), file = paste0("Data/tree_rings/",name,".csv"), row.names = FALSE)
assign(as.character(name),new)
}
#ERROR IS NORMAL - JUST HAS TO Do WITH THE WAY IT IS CODED. MOVE ON
sites <- unique(sites)
species <- unique(species)
site_species <- unique(crns_long$site_sp)
# Code to produce csv in results/SD_of_each_detrended_chronologies.csv
SD_of_each_detrended_chronologies_bert <- crns_long %>%
group_by(site_sp) %>%
summarize(SD = sd(ring_width)) %>%
mutate(SD = round(SD, 2))
## Run analysis to compare BERT ####
all.dcc.output <- NULL#
corr.dcc.output <- NULL#
#site_species <- c("SIPSEY_WILDERNESS_CAGL")
for(f in site_species) {
#f <- site_species[1]
print(f)
end.year <- crns_long_start_end %>%
filter(site_sp == f) %>%
pull(end)
start.year <- crns_long_start_end %>%
filter(site_sp == f) %>%
pull(start)
# load species chronology data ####
core <- crns_long %>%
filter(site_sp == f) %>%
rename(res = ring_width) %>%
as.data.frame()
rownames(core) <- core$Year
# load climate data for corresponding site (not necessary since you have only one site, but renaming to clim so that the rest works) ####
site <- substr(f,1, nchar(as.character(f))-5)
clim <- all_Clim[all_Clim$sites.sitename %in% site,]
clim <- clim[,c(-1)]
clim <- clim[,c(4,1,3,2,5)]
if(nrow(clim) == 0){
next
}
clim$year <- year(clim$Date)
### crop last year to full.time.frame.end.year
clim <- clim[clim$year <= end.year, ]
clim <- clim[!duplicated(clim$Date),]
# trim measurement years ####
## remove years of core measurement that are before climate record (+ first few first months to be able to look at window before measurement)
#Wasn't sure what window_range was meant to be, so just removed it. I assume this will make the first year's correlation less reliable but since there is a lot more years it shouldn't have a big impact?
core <- core[as.numeric(rownames(core)) >= (min(as.numeric(clim$year))),]#window_range[1]/12), ]
## remove years that are after climate record
core <- core[as.numeric(rownames(core)) <= max(as.numeric(clim$year)), ]
start.year <- max(min(clim$year), start.year)# max(min(clim$year), start.years[which(site_sps[!site_sps %in% species_to_drop] %in% f)])
# run analysis for each variable
for (v in climate_variables) {
print(v)
corr.dcc.output <- my.dcc(chrono = core["res"], clim = clim[, c("year", "month", v)], method = "correlation", start = 1, end = 8, timespan = c(start.year, end.year), ci = 0.05, ci2 = 0.002)
all.dcc.output <- rbind(all.dcc.output, data.frame(cbind(Site = site, Species = substr(f, nchar(f)-3, nchar(f)), corr.dcc.output)))#
}
}
#unique(all_Clim$sites.sitename)
all.dcc.output$variable <- substr(paste(row.names(all.dcc.output)), 1, 3)#get variable from row name
all.dcc.output$month <- substr(paste(row.names(all.dcc.output)), 5, 12)#get month from row name
write.csv(all.dcc.output, file = "Results/tree_cores/quiltplots/plot_data/Other/all.dcc.output_other.csv", row.names = FALSE)
#all.dcc.output <- all.dcc.output %>%
# mutate(Species = str_c(Site, Species, sep = "_")) %>%
# select(-Site)
### plot ####
#############################################
##Copy/Paste this section from other script##
#############################################
#save.plots = TRUE
#all.dcc.output$site <- all.dcc.output$Species
#all.dcc.output$Species <- substr(all.dcc.output$Species, nchar(all.dcc.output$Species)-3, nchar(all.dcc.output$Species))
# library(readr)
# #Merge dcc outputs to plot on the same quilt plot
# all_dcc_output_hf <- read_csv("results/all.dcc.output_hf.csv")
# all_dcc_output_hf$site <- paste0("HF_", all_dcc_output_hf$Species)
# all_dcc_output_other <- read_csv("results/all.dcc.output_other.csv")
# all_dcc_output_scbi <- read_csv("results/scbi_core_corr.csv")
# all_dcc_output_scbi$site <- paste0("SCBI_", all_dcc_output_scbi$Species)
#
# all.dcc.output_all <- rbind(all_dcc_output_other,all_dcc_output_scbi,all_dcc_output_hf)
#
# #Load in clime means
# clim_means <- read_csv("clim_means_all.csv")
# #Create porosity lists
# RP <- c("CAGL","CAOV","CATO","CACO","QURU", "QUST", "QUAL","QUPR","QUMO", "FRAM", "QUVE", "FRNI","QUMA", "QUPA")
# SP <- c( "JUNI", "SAAL")
# DP <- c("FAGR", "LITU", "MAAC", "ACSA","ACRU", "NYSY","BELE","BEAL", "POGR")
# #creat wood_type column for subsetting in the forloop
# all.dcc.output_all$wood_type <- ifelse(all.dcc.output_all$Species %in% RP, "RP",
# ifelse(all.dcc.output_all$Species %in% DP, "DP",
# ifelse(all.dcc.output_all$Species %in% SP, "SP", NA)))
# wood_types <- c("RP","SP", "DP")
#
# v <- "tmn"
# climate_variables <- c("tmn","tmx")
# WT <- "RP"
# for(WT in wood_types){
# all.dcc.output <- all.dcc.output_all[all.dcc.output_all$wood_type %in% WT,]
# for(v in climate_variables) {
# print(v)
#
# #TRW_coord$Location
# TRW_coord <- TRW_coord[!(duplicated(TRW_coord$Location)),]
# X <- all.dcc.output[all.dcc.output$variable %in% v, ]
# X$Location <- ifelse(X$site =="SCBI", "SCBI",
# ifelse(X$site == "HF", "HF",
# substr(X$site, 1, nchar(X$site)-5)))
# #X$numid <- seq(1,8,1)
# X <- X %>%
# mutate(
# month_new = case_when(
# month == "curr.jan" ~ 1,
# month == "curr.feb" ~ 2,
# month == "curr.mar" ~ 3,
# month == "curr.apr" ~ 4,
# month == "curr.may" ~ 5,
# month == "curr.jun" ~ 6,
# month == "curr.jul" ~ 7,
# month == "curr.aug" ~ 8,
# TRUE ~ 0
# )
# )
# X <- X[X$month_new != 0,]
#
# #ctrl shift c
#
# #SORT BY LATITUDE
# # X <- X %>%
# # left_join(TRW_coord, by = "Location")
# #
# # X <- X %>%
# # arrange(desc(Latitude), Species, numid)
# #
#
# #SORT BY APRIL TEMP
# X <- X %>%
# left_join(clim_means, by = "Location") %>%
# group_by(site)
#
# X <- X %>%
# ungroup()%>%
# #arrange(Species,Location, desc(v),month_new)
# arrange(tmn, site, month_new)#, .by_group = TRUE)
#
# X$site <- as.factor(X$site)
# X$month <- as.factor(X$month)
# #X <- merge(X,TRW_coord$Latitude, all.x = TRUE, all.y = FALSE)
# #X <- X[order(as.numeric(X$Latitude), X$numid, X$Species),]
#
#
#
# x <- X[, c("month", "site", "coef")]
#
# x <- x %>%
# pivot_wider(names_from = site,
# id_cols = month,
# values_from = coef)%>%
# as.data.frame()
# #x <- data.frame(reshape(data = X[, c("month","site", "coef")], idvar = "month", timevar = "site",v.names = "coef", direction = "wide"))
#
# rownames(x) <- ifelse(grepl("curr", x$month), toupper(x$month), tolower( x$month))
# rownames(x) <- gsub(".*curr.|.*prev.", "", rownames(x), ignore.case = T)
#
# x.sig <- reshape(X[, c("month", "site", "significant")], idvar = "month", timevar = "site", direction = "wide")
# x.sig2 <- reshape(X[, c("month", "site", "significant2")], idvar = "month", timevar = "site", direction = "wide")
#
# colnames(x) <- gsub("coef.", "", colnames(x))#Here is naming issue. Fixed by multiple column?
# colnames(x.sig) <- gsub("significant.", "", colnames(x.sig))
# colnames(x.sig2) <- gsub("significant2.", "", colnames(x.sig2))
#
# x <- x[, -1] #Remove column since only looking at curr yr
# x.sig <- x.sig[, -1]
# x.sig2 <- x.sig2[, -1]
#
# # x <- x[, rev(SPECIES_IN_ORDER[!SPECIES_IN_ORDER %in% gsub("CAOVL", "CAOV", species_to_drop)])]
# # x.sig <- x.sig[, rev(SPECIES_IN_ORDER[!SPECIES_IN_ORDER %in% gsub("CAOVL", "CAOV", species_to_drop)])]
# # x.sig2 <- x.sig2[, rev(SPECIES_IN_ORDER[!SPECIES_IN_ORDER %in% gsub("CAOVL", "CAOV", species_to_drop)])]
#
# # if(save.plots) {
# # dir.create(paste0("results/", type.start, "/figures/monthly_", method.to.run), showWarnings = F)
# # dir.create(paste0("results/", type.start, "/figures/monthly_", method.to.run, "/", c), showWarnings = F)
# # tiff(paste0("results/", type.start, "/figures/monthly_", method.to.run, "/", c, "/", v, ".tif"), res = 150, width = 169, height = 169, units = "mm", pointsize = 10)
# # }
#
# v <- toupper(v)
# v <- gsub("PDSI_PREWHITEN" , "PDSI", v)
# #x <- x[,c(2,1,3)]
# #x.sig <- x.sig[,c(2,1,3)]
# #x.sig2 <- x.sig2[,c(2,1,3)]
# png(paste0("results/", "monthly_", "correlation", "other", v,WT, ".png"), res = 150, width = 169, height = 2*169, units = "mm", pointsize = 10)
#
# my.dccplot(x = as.data.frame(t(x)), sig = as.data.frame(t(x.sig)), sig2 = as.data.frame(t(x.sig2)), main = ifelse(v %in% "PETminusPRE", "PET-PRE", v), method = "correlation")
#
# if(save.plots) dev.off()
# }
# }
#
#
# all.dcc.output$variable <- substr(paste(row.names(all.dcc.output)), 1, 3)#get variable from row name
# all.dcc.output$month <- substr(paste(row.names(all.dcc.output)), 5, 12)#get month from row name
#
# write.csv(all.dcc.output, file = "results/Other_core_corr.csv", row.names = FALSE)
|
/RScripts/Other_quiltplot.R
|
permissive
|
EcoClimLab/growth_phenology
|
R
| false
| false
| 13,729
|
r
|
#Script to create quilt plot for non-focal sites
# clear environment ####
rm(list = ls())
#tightly condensed plot by latitude.
#Two panels, rp/dp
set.seed(42)
# load libraries ####
library(bootRes)
library(dplR) # for read.rwl
library(climwin)
library(tidyverse)
library(lubridate)
library(readxl)
source("Rscripts/0-My_dplR_functions.R")
#Load in lat lon for plotting sort
TRW_coord <- read_excel("data/tree_rings/TRW_coord2.xlsx")
TRW_coord <- TRW_coord[,c(1,3)]
#Add original two locations to include in final quilt plot
originals <- data.frame(42.5388, "HF")
originals <- rbind(originals, c(38.8935, "SCBI"))
names(originals) <- c("Latitude", "Location")
TRW_coord <- rbind(TRW_coord, originals)
#Prepare csv's
crns <- read.csv("data/tree_rings/chronologies/all_crns_res_1901.csv")
#TRW_coord <- read_excel("Data/tree_rings/Other/TRW_coord.xlsx")
# Bert approach
crns <- read_csv("data/tree_rings/chronologies/all_crns_res_1901.csv") %>%
# clean up
select(-c(BearIs, OH_Gol_QUAL_1, `Greenbrook_Sanctuary,_NJ_LITU_LITU`)) %>%
rename(IL_Fer_LITU = IL_Fer_LTU)
crns_long <- crns %>%
# convert to long format
pivot_longer(-Year, names_to = "site_sp", values_to = "ring_width") %>%
# drop years with missing data
filter(!is.na(ring_width))
crns_long_start_end <- crns_long %>%
# for each site/sp extract start and end
group_by(site_sp) %>%
summarize(start = min(Year), end = max(Year)) %>%
# split site_sp variable into site, sp
mutate(
site = str_sub(site_sp, 1, -6),
sp = str_sub(site_sp, -4, n())
)
# Generate named vector: start year
start.years.sss_bert <- crns_long_start_end$start
names(start.years.sss_bert) <- crns_long_start_end$site_sp
start.years.sss_bert
# end year
# Load climate data ####
## needs to be in different format: column for Date, year and then one column per climate variable
climate_variables <- c("tmn", "tmx")
clim_v <- NULL
# something like this should do
for(clim_v in climate_variables) {
print(clim_v)
x <- read.csv(paste0("Data/climate data/CRU/", clim_v, ".1901.2019-Other_sites-3-11.csv"))
### subset for the sites we care about
#x <- droplevels(x[x$sites.sitename %in% "Harvard_Forest", ])
### reshape to long format
x_long <- reshape(x,
times = names(x)[-1],
timevar = "Date",
varying = list(names(x)[-1]),
direction = "long", v.names = clim_v)
### format date
x_long$Date <- gsub("X", "", x_long$Date)
x_long$Date <- as.Date(x_long$Date , format = "%Y.%m.%d")#changed format to work with Harvard data
### combine all variables in one
if(clim_v == climate_variables[1]) all_Clim <- x_long[, c(1:3)]
else all_Clim <- merge(all_Clim, x_long[, c(1:3)], by = c("sites.sitename", "Date"), all = T)
}
### add year column
all_Clim$year <- as.numeric(format(as.Date(all_Clim$Date, format = "%d/%m/%Y"), "%Y"))
### add month column
all_Clim$month <- as.numeric(format(as.Date(all_Clim$Date, format = "%d/%m/%Y"), "%m"))
clim_means <- all_Clim %>%
filter(month == 4) %>%
group_by(sites.sitename) %>%
summarize(tmn = mean(tmn),
tmx = mean(tmx)) %>%
rename(Location = sites.sitename)
#SCBI clim_means
scbi_clim <- read.csv(paste0("Data/climate data/SCBI/Formated_CRU_SCBI_1901_2016.csv"))
scbi_clim <- scbi_clim[,c(1,2,9,11)]
scbi_clim$Location <- "SCBI"
scbi_clim_means <- scbi_clim %>%
filter(month == 4) %>%
group_by(Location) %>%
summarize(tmn = mean(tmn),
tmx = mean(tmx))
#rename(Location = sites.sitename)
#Harvard forest clim_means
hf_clim <- subset(clim_means, clim_means$Location == "HF_LyfordPlots")
hf_clim$Location <- "HF"
# hf_clim <- read_csv("climate data/HF/Harvard_forest_cru.csv")
# hf_clim$year <- as.numeric(format(as.Date(hf_clim$Date, format = "%Y-%m-%d"), "%Y"))
# ### add month column
# hf_clim$month <- as.numeric(format(as.Date(hf_clim$Date, format = "%Y-%m-%d"), "%m"))
#
# hf_clim_means <- hf_clim %>%
# filter(month == 4) %>%
# group_by(sites.sitename) %>%
# summarize(tmn = mean(tmn),
# tmx = mean(tmx)) %>%
# rename(Location = sites.sitename)
#Merge the 3 so we can sort later
clim_means <-rbind(clim_means, scbi_clim_means,hf_clim)
write.csv(clim_means, file = "clim_means_all.csv", row.names = FALSE)
# Get unique site and species names ----
species <- NULL
sites <- NULL
for(i in 1:ncol(crns)){
new <- crns[c(1:119), c(1,1+i)]
name <- colnames(new)
name <- name[2]
sp <- substr(name,nchar(as.character(name))-3, nchar(as.character(name)))
species <- append(species,sp)
site <- substr(name,1, nchar(as.character(name))-5)
sites <- append(sites,site)
#write.csv(assign(as.character(name),new), file = paste0("Data/tree_rings/",name,".csv"), row.names = FALSE)
assign(as.character(name),new)
}
#ERROR IS NORMAL - JUST HAS TO Do WITH THE WAY IT IS CODED. MOVE ON
sites <- unique(sites)
species <- unique(species)
site_species <- unique(crns_long$site_sp)
# Code to produce csv in results/SD_of_each_detrended_chronologies.csv
SD_of_each_detrended_chronologies_bert <- crns_long %>%
group_by(site_sp) %>%
summarize(SD = sd(ring_width)) %>%
mutate(SD = round(SD, 2))
## Run analysis to compare BERT ####
all.dcc.output <- NULL#
corr.dcc.output <- NULL#
#site_species <- c("SIPSEY_WILDERNESS_CAGL")
for(f in site_species) {
#f <- site_species[1]
print(f)
end.year <- crns_long_start_end %>%
filter(site_sp == f) %>%
pull(end)
start.year <- crns_long_start_end %>%
filter(site_sp == f) %>%
pull(start)
# load species chronology data ####
core <- crns_long %>%
filter(site_sp == f) %>%
rename(res = ring_width) %>%
as.data.frame()
rownames(core) <- core$Year
# load climate data for corresponding site (not necessary since you have only one site, but renaming to clim so that the rest works) ####
site <- substr(f,1, nchar(as.character(f))-5)
clim <- all_Clim[all_Clim$sites.sitename %in% site,]
clim <- clim[,c(-1)]
clim <- clim[,c(4,1,3,2,5)]
if(nrow(clim) == 0){
next
}
clim$year <- year(clim$Date)
### crop last year to full.time.frame.end.year
clim <- clim[clim$year <= end.year, ]
clim <- clim[!duplicated(clim$Date),]
# trim measurement years ####
## remove years of core measurement that are before climate record (+ first few first months to be able to look at window before measurement)
#Wasn't sure what window_range was meant to be, so just removed it. I assume this will make the first year's correlation less reliable but since there is a lot more years it shouldn't have a big impact?
core <- core[as.numeric(rownames(core)) >= (min(as.numeric(clim$year))),]#window_range[1]/12), ]
## remove years that are after climate record
core <- core[as.numeric(rownames(core)) <= max(as.numeric(clim$year)), ]
start.year <- max(min(clim$year), start.year)# max(min(clim$year), start.years[which(site_sps[!site_sps %in% species_to_drop] %in% f)])
# run analysis for each variable
for (v in climate_variables) {
print(v)
corr.dcc.output <- my.dcc(chrono = core["res"], clim = clim[, c("year", "month", v)], method = "correlation", start = 1, end = 8, timespan = c(start.year, end.year), ci = 0.05, ci2 = 0.002)
all.dcc.output <- rbind(all.dcc.output, data.frame(cbind(Site = site, Species = substr(f, nchar(f)-3, nchar(f)), corr.dcc.output)))#
}
}
#unique(all_Clim$sites.sitename)
all.dcc.output$variable <- substr(paste(row.names(all.dcc.output)), 1, 3)#get variable from row name
all.dcc.output$month <- substr(paste(row.names(all.dcc.output)), 5, 12)#get month from row name
write.csv(all.dcc.output, file = "Results/tree_cores/quiltplots/plot_data/Other/all.dcc.output_other.csv", row.names = FALSE)
#all.dcc.output <- all.dcc.output %>%
# mutate(Species = str_c(Site, Species, sep = "_")) %>%
# select(-Site)
### plot ####
#############################################
##Copy/Paste this section from other script##
#############################################
#save.plots = TRUE
#all.dcc.output$site <- all.dcc.output$Species
#all.dcc.output$Species <- substr(all.dcc.output$Species, nchar(all.dcc.output$Species)-3, nchar(all.dcc.output$Species))
# library(readr)
# #Merge dcc outputs to plot on the same quilt plot
# all_dcc_output_hf <- read_csv("results/all.dcc.output_hf.csv")
# all_dcc_output_hf$site <- paste0("HF_", all_dcc_output_hf$Species)
# all_dcc_output_other <- read_csv("results/all.dcc.output_other.csv")
# all_dcc_output_scbi <- read_csv("results/scbi_core_corr.csv")
# all_dcc_output_scbi$site <- paste0("SCBI_", all_dcc_output_scbi$Species)
#
# all.dcc.output_all <- rbind(all_dcc_output_other,all_dcc_output_scbi,all_dcc_output_hf)
#
# #Load in clime means
# clim_means <- read_csv("clim_means_all.csv")
# #Create porosity lists
# RP <- c("CAGL","CAOV","CATO","CACO","QURU", "QUST", "QUAL","QUPR","QUMO", "FRAM", "QUVE", "FRNI","QUMA", "QUPA")
# SP <- c( "JUNI", "SAAL")
# DP <- c("FAGR", "LITU", "MAAC", "ACSA","ACRU", "NYSY","BELE","BEAL", "POGR")
# #creat wood_type column for subsetting in the forloop
# all.dcc.output_all$wood_type <- ifelse(all.dcc.output_all$Species %in% RP, "RP",
# ifelse(all.dcc.output_all$Species %in% DP, "DP",
# ifelse(all.dcc.output_all$Species %in% SP, "SP", NA)))
# wood_types <- c("RP","SP", "DP")
#
# v <- "tmn"
# climate_variables <- c("tmn","tmx")
# WT <- "RP"
# for(WT in wood_types){
# all.dcc.output <- all.dcc.output_all[all.dcc.output_all$wood_type %in% WT,]
# for(v in climate_variables) {
# print(v)
#
# #TRW_coord$Location
# TRW_coord <- TRW_coord[!(duplicated(TRW_coord$Location)),]
# X <- all.dcc.output[all.dcc.output$variable %in% v, ]
# X$Location <- ifelse(X$site =="SCBI", "SCBI",
# ifelse(X$site == "HF", "HF",
# substr(X$site, 1, nchar(X$site)-5)))
# #X$numid <- seq(1,8,1)
# X <- X %>%
# mutate(
# month_new = case_when(
# month == "curr.jan" ~ 1,
# month == "curr.feb" ~ 2,
# month == "curr.mar" ~ 3,
# month == "curr.apr" ~ 4,
# month == "curr.may" ~ 5,
# month == "curr.jun" ~ 6,
# month == "curr.jul" ~ 7,
# month == "curr.aug" ~ 8,
# TRUE ~ 0
# )
# )
# X <- X[X$month_new != 0,]
#
# #ctrl shift c
#
# #SORT BY LATITUDE
# # X <- X %>%
# # left_join(TRW_coord, by = "Location")
# #
# # X <- X %>%
# # arrange(desc(Latitude), Species, numid)
# #
#
# #SORT BY APRIL TEMP
# X <- X %>%
# left_join(clim_means, by = "Location") %>%
# group_by(site)
#
# X <- X %>%
# ungroup()%>%
# #arrange(Species,Location, desc(v),month_new)
# arrange(tmn, site, month_new)#, .by_group = TRUE)
#
# X$site <- as.factor(X$site)
# X$month <- as.factor(X$month)
# #X <- merge(X,TRW_coord$Latitude, all.x = TRUE, all.y = FALSE)
# #X <- X[order(as.numeric(X$Latitude), X$numid, X$Species),]
#
#
#
# x <- X[, c("month", "site", "coef")]
#
# x <- x %>%
# pivot_wider(names_from = site,
# id_cols = month,
# values_from = coef)%>%
# as.data.frame()
# #x <- data.frame(reshape(data = X[, c("month","site", "coef")], idvar = "month", timevar = "site",v.names = "coef", direction = "wide"))
#
# rownames(x) <- ifelse(grepl("curr", x$month), toupper(x$month), tolower( x$month))
# rownames(x) <- gsub(".*curr.|.*prev.", "", rownames(x), ignore.case = T)
#
# x.sig <- reshape(X[, c("month", "site", "significant")], idvar = "month", timevar = "site", direction = "wide")
# x.sig2 <- reshape(X[, c("month", "site", "significant2")], idvar = "month", timevar = "site", direction = "wide")
#
# colnames(x) <- gsub("coef.", "", colnames(x))#Here is naming issue. Fixed by multiple column?
# colnames(x.sig) <- gsub("significant.", "", colnames(x.sig))
# colnames(x.sig2) <- gsub("significant2.", "", colnames(x.sig2))
#
# x <- x[, -1] #Remove column since only looking at curr yr
# x.sig <- x.sig[, -1]
# x.sig2 <- x.sig2[, -1]
#
# # x <- x[, rev(SPECIES_IN_ORDER[!SPECIES_IN_ORDER %in% gsub("CAOVL", "CAOV", species_to_drop)])]
# # x.sig <- x.sig[, rev(SPECIES_IN_ORDER[!SPECIES_IN_ORDER %in% gsub("CAOVL", "CAOV", species_to_drop)])]
# # x.sig2 <- x.sig2[, rev(SPECIES_IN_ORDER[!SPECIES_IN_ORDER %in% gsub("CAOVL", "CAOV", species_to_drop)])]
#
# # if(save.plots) {
# # dir.create(paste0("results/", type.start, "/figures/monthly_", method.to.run), showWarnings = F)
# # dir.create(paste0("results/", type.start, "/figures/monthly_", method.to.run, "/", c), showWarnings = F)
# # tiff(paste0("results/", type.start, "/figures/monthly_", method.to.run, "/", c, "/", v, ".tif"), res = 150, width = 169, height = 169, units = "mm", pointsize = 10)
# # }
#
# v <- toupper(v)
# v <- gsub("PDSI_PREWHITEN" , "PDSI", v)
# #x <- x[,c(2,1,3)]
# #x.sig <- x.sig[,c(2,1,3)]
# #x.sig2 <- x.sig2[,c(2,1,3)]
# png(paste0("results/", "monthly_", "correlation", "other", v,WT, ".png"), res = 150, width = 169, height = 2*169, units = "mm", pointsize = 10)
#
# my.dccplot(x = as.data.frame(t(x)), sig = as.data.frame(t(x.sig)), sig2 = as.data.frame(t(x.sig2)), main = ifelse(v %in% "PETminusPRE", "PET-PRE", v), method = "correlation")
#
# if(save.plots) dev.off()
# }
# }
#
#
# all.dcc.output$variable <- substr(paste(row.names(all.dcc.output)), 1, 3)#get variable from row name
# all.dcc.output$month <- substr(paste(row.names(all.dcc.output)), 5, 12)#get month from row name
#
# write.csv(all.dcc.output, file = "results/Other_core_corr.csv", row.names = FALSE)
|
\name{printProfileCallGraph}
\alias{printProfileCallGraph}
\title{ Print Call Graph for Rprof Profile Data }
\description{
Prints a representation of the call graph for profile data produced
by \code{Rprof}. Output can be directed to a connection or a file.
}
\usage{
printProfileCallGraph(pd, file = stdout(), percent = TRUE, GC = TRUE,
maxnodes = NA, total.pct = 0)
}
\arguments{
\item{pd}{profile data as returned by \code{readProfileData}.}
\item{file}{ a connection or the name of the file where the profile
graph will be written. }
\item{percent}{ logical; if true use percent of total time; otherwise
use time in seconds }
\item{GC}{logical; include GC information or not.}
\item{maxnodes}{integer; maximal number of nodes to use; nodes with
lower total hit counts are dropped.}
\item{total.pct}{numeric; if positive, nodes with hit percentages
below this level are dropped.}
}
\value{Used for side effect.}
\details{
\code{printProfileCallGraph} produces a printed representation of
the call graph for profile data produced by \code{Rprof}. The
representation is analogous to the call graph produced by
\code{gprof} with a few minor changes. Eventually more complete
documentation of the format will be provided here; for now, reading
the \code{gprof} manual section on the call graph should help
understanding this output. The output is similar enough to
gprof output for the \code{cgprof} script to be able to produce a
visual representation of the call graph via Graphviz.
}
\note{
Because of lazy evaluation, nested calls like \code{f(g(x))}
appear in the profile graph as \code{f} or one of its callees
calling \code{g}.
}
\references{
User manual for \code{gprof}, the GNU profiler.
\code{cgprof}: \url{http://mvertes.free.fr/}
}
\author{ Luke Tierney }
\seealso{
\code{\link{Rprof}},
\code{\link{summaryRprof}},
\code{\link{flatProfile}},
\code{\link{readProfileData}},
\code{\link{plotProfileCallGraph}},
\code{\link{profileCallGraph2Dot}}
}
\examples{
pd <- readProfileData(system.file("samples", "glmEx.out", package="proftools"))
printProfileCallGraph(pd)
\dontrun{
## If you have graphviz and cgprof installed on a UNIX-like system
## then in R do:
pd <- readProfileData(system.file("samples", "glmEx.out", package="proftools"))
printProfileCallGraph(pd, "foo.graph")
## and then in a shell do (to use the interactive dotty):
cgprof -TX foo.graph
## or (to create a postscript version and view with gv):
cgprof -Tps foo.graph > foo.ps
gv foo.ps
}
}
\keyword{programming}
\keyword{utilities}
|
/man/printProfileCallGraph.Rd
|
no_license
|
ltierney/Rpkg-proftools
|
R
| false
| false
| 2,665
|
rd
|
\name{printProfileCallGraph}
\alias{printProfileCallGraph}
\title{ Print Call Graph for Rprof Profile Data }
\description{
Prints a representation of the call graph for profile data produced
by \code{Rprof}. Output can be directed to a connection or a file.
}
\usage{
printProfileCallGraph(pd, file = stdout(), percent = TRUE, GC = TRUE,
maxnodes = NA, total.pct = 0)
}
\arguments{
\item{pd}{profile data as returned by \code{readProfileData}.}
\item{file}{ a connection or the name of the file where the profile
graph will be written. }
\item{percent}{ logical; if true use percent of total time; otherwise
use time in seconds }
\item{GC}{logical; include GC information or not.}
\item{maxnodes}{integer; maximal number of nodes to use; nodes with
lower total hit counts are dropped.}
\item{total.pct}{numeric; if positive, nodes with hit percentages
below this level are dropped.}
}
\value{Used for side effect.}
\details{
\code{printProfileCallGraph} produces a printed representation of
the call graph for profile data produced by \code{Rprof}. The
representation is analogous to the call graph produced by
\code{gprof} with a few minor changes. Eventually more complete
documentation of the format will be provided here; for now, reading
the \code{gprof} manual section on the call graph should help
understanding this output. The output is similar enough to
gprof output for the \code{cgprof} script to be able to produce a
visual representation of the call graph via Graphviz.
}
\note{
Because of lazy evaluation, nested calls like \code{f(g(x))}
appear in the profile graph as \code{f} or one of its callees
calling \code{g}.
}
\references{
User manual for \code{gprof}, the GNU profiler.
\code{cgprof}: \url{http://mvertes.free.fr/}
}
\author{ Luke Tierney }
\seealso{
\code{\link{Rprof}},
\code{\link{summaryRprof}},
\code{\link{flatProfile}},
\code{\link{readProfileData}},
\code{\link{plotProfileCallGraph}},
\code{\link{profileCallGraph2Dot}}
}
\examples{
pd <- readProfileData(system.file("samples", "glmEx.out", package="proftools"))
printProfileCallGraph(pd)
\dontrun{
## If you have graphviz and cgprof installed on a UNIX-like system
## then in R do:
pd <- readProfileData(system.file("samples", "glmEx.out", package="proftools"))
printProfileCallGraph(pd, "foo.graph")
## and then in a shell do (to use the interactive dotty):
cgprof -TX foo.graph
## or (to create a postscript version and view with gv):
cgprof -Tps foo.graph > foo.ps
gv foo.ps
}
}
\keyword{programming}
\keyword{utilities}
|
\name{selectArea_shiny_demo}
\alias{selectArea_shiny_demo}
\title{
A demo of using selectArea() as a shiny app
}
\description{
A demo of using selectArea() as a shiny app
}
\usage{
selectArea_shiny_demo(ht_list)
}
\arguments{
\item{ht_list}{A \code{\link{Heatmap-class}} or a \code{\link{HeatmapList-class}} object.}
}
\details{
source code of the app is at \url{https://github.com/jokergoo/ComplexHeatmap/blob/master/inst/app/app.R} .
}
\seealso{
\url{https://jokergoo.shinyapps.io/selectArea/}
}
\examples{
if(interactive()) {
selectArea_shiny_demo()
}
# by providing a heatmap/heatmap list
if(interactive()) {
m = matrix(rnorm(100), 10)
rownames(m) = 1:10
colnames(m) = 1:10
ht = Heatmap(m)
selectArea_shiny_demo(ht)
}
}
|
/man/selectArea_shiny_demo.Rd
|
permissive
|
wangdi2014/ComplexHeatmap
|
R
| false
| false
| 750
|
rd
|
\name{selectArea_shiny_demo}
\alias{selectArea_shiny_demo}
\title{
A demo of using selectArea() as a shiny app
}
\description{
A demo of using selectArea() as a shiny app
}
\usage{
selectArea_shiny_demo(ht_list)
}
\arguments{
\item{ht_list}{A \code{\link{Heatmap-class}} or a \code{\link{HeatmapList-class}} object.}
}
\details{
source code of the app is at \url{https://github.com/jokergoo/ComplexHeatmap/blob/master/inst/app/app.R} .
}
\seealso{
\url{https://jokergoo.shinyapps.io/selectArea/}
}
\examples{
if(interactive()) {
selectArea_shiny_demo()
}
# by providing a heatmap/heatmap list
if(interactive()) {
m = matrix(rnorm(100), 10)
rownames(m) = 1:10
colnames(m) = 1:10
ht = Heatmap(m)
selectArea_shiny_demo(ht)
}
}
|
rdata_file <- '../train_agg3_10pct.RData'
solver_script <- '../dave/gbm_cv.R'
create_submission <- FALSE
cv_frac_trn <- 0.7
tcheck.print <- TRUE
set_rain_thresh <- 69
set_rm_refna <- TRUE
set_nrounds <- 1955
set_maxdepth <- 3
mae_res <- data.frame()
run_time <- numeric()
set_cs <- c("rd"
, "Ref", "Ref_5x5_10th", "Ref_5x5_50th", "Ref_5x5_90th"
, "RefComposite", "RefComposite_5x5_10th", "RefComposite_5x5_50th", "RefComposite_5x5_90th"
, "RhoHV", "RhoHV_5x5_10th", "RhoHV_5x5_50th", "RhoHV_5x5_90th"
, "Zdr", "Zdr_5x5_10th", "Zdr_5x5_50th", "Zdr_5x5_90th"
, "Kdp", "Kdp_5x5_10th", "Kdp_5x5_50th", "Kdp_5x5_90th"
, "nrec", "naRef", "naRefC", "naRho", "naZdr", "naKdp"
, "Ref_rz", "Ref_rz_comp", "Kdp_rk", "rr_Katsumata_ref", "rr_Katsumata_ref_comp"
, "rr_refzdr", "rr_refzdr_comp", "rr_kdpzdr", "Ref2", "RefComposite2", "Zdr2"
, "Kdp2", "rd_Ref", "rd_RefComposite", "rd_Kdp"
)
smallestError <- 100
for (depth in seq(1,5,1)) {
for (rounds in seq(1,5,1)) {
# train
source (solver_script)
elapsed <- sum( time_df$delta )
mae_base <- ifelse( mae_base > 0 , mae_base, mae_cv_test)
mae_res <- rbind( mae_res, data.frame( depth=depth, rounds=rounds, xSet=names(cs_list[i])
, xvars=paste(set_cs, collapse = ",")
, mae_xgb, mae_cv_test, mae_cv_trn
, delta = mae_cv_test - mae_base
, elapsed))
run_time <- c(run_time, elapsed )
# predict
if (mae_cv_test < smallestError) {
smallestError = mae_cv_test
print(paste("New Lowest MAE: ", depth,rounds,mae_cv_test))
}
}
}
print(mae_res)
print(paste("Overall Lowest MAE on Test: ", depth,rounds,mae_cv_test))
|
/dave/run_xgbm_cv.R
|
no_license
|
dsdaveh/weddingcap_rain
|
R
| false
| false
| 1,882
|
r
|
rdata_file <- '../train_agg3_10pct.RData'
solver_script <- '../dave/gbm_cv.R'
create_submission <- FALSE
cv_frac_trn <- 0.7
tcheck.print <- TRUE
set_rain_thresh <- 69
set_rm_refna <- TRUE
set_nrounds <- 1955
set_maxdepth <- 3
mae_res <- data.frame()
run_time <- numeric()
set_cs <- c("rd"
, "Ref", "Ref_5x5_10th", "Ref_5x5_50th", "Ref_5x5_90th"
, "RefComposite", "RefComposite_5x5_10th", "RefComposite_5x5_50th", "RefComposite_5x5_90th"
, "RhoHV", "RhoHV_5x5_10th", "RhoHV_5x5_50th", "RhoHV_5x5_90th"
, "Zdr", "Zdr_5x5_10th", "Zdr_5x5_50th", "Zdr_5x5_90th"
, "Kdp", "Kdp_5x5_10th", "Kdp_5x5_50th", "Kdp_5x5_90th"
, "nrec", "naRef", "naRefC", "naRho", "naZdr", "naKdp"
, "Ref_rz", "Ref_rz_comp", "Kdp_rk", "rr_Katsumata_ref", "rr_Katsumata_ref_comp"
, "rr_refzdr", "rr_refzdr_comp", "rr_kdpzdr", "Ref2", "RefComposite2", "Zdr2"
, "Kdp2", "rd_Ref", "rd_RefComposite", "rd_Kdp"
)
smallestError <- 100
for (depth in seq(1,5,1)) {
for (rounds in seq(1,5,1)) {
# train
source (solver_script)
elapsed <- sum( time_df$delta )
mae_base <- ifelse( mae_base > 0 , mae_base, mae_cv_test)
mae_res <- rbind( mae_res, data.frame( depth=depth, rounds=rounds, xSet=names(cs_list[i])
, xvars=paste(set_cs, collapse = ",")
, mae_xgb, mae_cv_test, mae_cv_trn
, delta = mae_cv_test - mae_base
, elapsed))
run_time <- c(run_time, elapsed )
# predict
if (mae_cv_test < smallestError) {
smallestError = mae_cv_test
print(paste("New Lowest MAE: ", depth,rounds,mae_cv_test))
}
}
}
print(mae_res)
print(paste("Overall Lowest MAE on Test: ", depth,rounds,mae_cv_test))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/text-annotate.R
\name{meme_text_distbf}
\alias{meme_text_distbf}
\alias{meme_text_rock}
\alias{meme_text_batman}
\alias{meme_text_trump}
\alias{meme_text_pigeon}
\alias{meme_text_chopper}
\alias{meme_text_brain}
\alias{meme_text_kermit}
\alias{meme_text_buttons}
\alias{meme_text_drake}
\alias{meme_text_printer}
\alias{meme_text_anakin}
\alias{meme_text_suez}
\title{Specialized functions for placing text in memes}
\usage{
meme_text_distbf(
img,
newgirl,
guy,
oldgirl,
font = "Impact",
color = "white",
strokecolor = "black",
size = 44,
...
)
meme_text_rock(
img,
dwayne,
girl,
font = "Impact",
color = "black",
strokecolor = NULL,
size = 16,
...
)
meme_text_batman(
img,
robin,
batman,
font = "Impact",
color = "black",
strokecolor = NULL,
size = 20,
...
)
meme_text_trump(
img,
rightpg,
leftpg = "",
font = "Impact",
color = "black",
strokecolor = NULL,
size = 20,
...
)
meme_text_pigeon(
img,
isthis,
humanoid,
butterfly,
font = "Impact",
color = "white",
strokecolor = "black",
size = 30,
...
)
meme_text_chopper(
img,
sr1,
jr1,
sr2,
jr2,
sr3,
font = "Impact",
color = "white",
strokecolor = "black",
size = 20,
...
)
meme_text_brain(
img,
br1,
br2,
br3,
br4,
font = "Impact",
color = "black",
strokecolor = NULL,
size = 24,
...
)
meme_text_kermit(
img,
good,
dark,
font = "Impact",
color = "black",
strokecolor = NULL,
size = 20,
...
)
meme_text_buttons(
img,
lbtn,
rbtn,
font = "Impact",
color = "black",
strokecolor = NULL,
size = 18,
...
)
meme_text_drake(
img,
top,
bot,
font = "Impact",
color = "black",
strokecolor = NULL,
size = 30,
...
)
meme_text_printer(
img,
left,
right,
font = "Impact",
color = "black",
strokecolor = NULL,
size = 13,
...
)
meme_text_anakin(img, anakin, padme, font = "sans", size = 16, width = 35, ...)
meme_text_suez(
img,
evergiven,
excavator,
font = "Impact",
color = "white",
strokecolor = NULL,
size = 40,
width = 30,
...
)
}
\arguments{
\item{img}{An image of class \code{magick_image}. See \code{\link[=meme_get]{meme_get()}}.}
\item{newgirl}{A character string. Text for the left-most person in the \code{"DistractedBf"} meme.}
\item{guy}{A character string. Text for the center person in the \code{"DistractedBf"} meme.}
\item{oldgirl}{A character string. Text for the right-most person in the \code{"DistractedBf"} meme.}
\item{font}{string with font family such as \code{"sans"}, \code{"mono"}, \code{"serif"}, \code{"Times"}, \code{"Helvetica"}, \code{"Trebuchet"}, \code{"Georgia"}, \code{"Palatino"} or \code{"Comic Sans"}.}
\item{color}{a valid \href{https://www.imagemagick.org/Magick++/Color.html}{color string} such as \code{"navyblue"} or \code{"#000080"}}
\item{strokecolor}{a \href{https://www.imagemagick.org/Magick++/Color.html}{color string} adds a stroke (border around the text)}
\item{size}{font-size in pixels}
\item{...}{
Arguments passed on to \code{\link[magick:painting]{magick::image_annotate}}
\describe{
\item{\code{location}}{geometry string with location relative to \code{gravity}}
\item{\code{degrees}}{rotates text around center point}
\item{\code{boxcolor}}{a \href{https://www.imagemagick.org/Magick++/Color.html}{color string}
for background color that annotation text is rendered on.}
}}
\item{dwayne}{A character string. Text for the top speech bubble in the \code{"TheRockDriving"} meme.}
\item{girl}{A character string. Text for the other speech bubble in the \code{"TheRockDriving"} meme.}
\item{robin}{A character string. Text for the Robin's speech bubble in the \code{"BatmanRobin"} meme.}
\item{batman}{A character string. Text for the Batman's speech bubble in the \code{"BatmanRobin"} meme.}
\item{rightpg}{A character string. Text for the right page in the \code{"TrumpBillSigning"} meme.}
\item{leftpg}{A character string. Text for the left page in the \code{"TrumpBillSigning"} meme.}
\item{isthis}{A character string. Text for the question in the \code{"IsThisAPigeon"} meme.}
\item{humanoid}{A character string. Text for the humanoid in the \code{"IsThisAPigeon"} meme.}
\item{butterfly}{A character string. Text for the butterfly in the \code{"IsThisAPigeon"} meme.}
\item{sr1}{A character string. Text for the first panel in the \code{"AmericanChopper"} meme.}
\item{jr1}{A character string. Text for the second panel in the \code{"AmericanChopper"} meme.}
\item{sr2}{A character string. Text for the third panel in the \code{"AmericanChopper"} meme.}
\item{jr2}{A character string. Text for the fourth panel in the \code{"AmericanChopper"} meme.}
\item{sr3}{A character string. Text for the fifth panel in the \code{"AmericanChopper"} meme.}
\item{br1}{A character string. Text for the first panel in the \code{"ExpandingBrain"} meme.}
\item{br2}{A character string. Text for the second panel in the \code{"ExpandingBrain"} meme.}
\item{br3}{A character string. Text for the third panel in the \code{"ExpandingBrain"} meme.}
\item{br4}{A character string. Text for the fourth panel in the \code{"ExpandingBrain"} meme.}
\item{good}{A character string. A good idea for the \code{"OffRamp"} meme.}
\item{dark}{A character string. A tempting, bad idea for the \code{"OffRamp"} meme.}
\item{lbtn}{A character string. The left button in the \code{"TwoButtonsAnxiety"} meme.}
\item{rbtn}{A character string. The right button in the \code{"TwoButtonsAnxiety"} meme.}
\item{top}{A character string. Text for the top panel in the \code{"HotlineDrake"} meme.}
\item{bot}{A character string. Text for the bottom panel in the \code{"HotlineDrake"} meme.}
\item{left}{A character string. Text for the left panel in the \code{"MoneyPrinter"} meme.}
\item{right}{A character string. Text for the right panel in the \code{"MoneyPrinter"} meme.}
\item{anakin}{A character string. Text for Anakin in the \code{"AnakinPadmeRight"} meme.}
\item{padme}{A character string. Text for Padme in the \code{"AnakinPadmeRight"} meme.}
\item{width}{positive integer giving target line width in characters. A
width less than or equal to 1 will put each word on its own line.}
\item{evergiven}{A character string. Big text for the ship in the \code{"SuezExcavator"} meme.}
\item{excavator}{A character string. Little text for the excavator in the \code{"SuezExcavator"} meme.}
}
\description{
Specialized functions for placing text in memes
}
\section{Functions}{
\itemize{
\item \code{meme_text_distbf}: Text function for the distracted boyfriend meme.
\item \code{meme_text_rock}: Text function for The Rock driving meme.
\item \code{meme_text_batman}: Text function for the Batman slaps Robin meme.
\item \code{meme_text_trump}: Text function for the Trump "first order of business" meme.
\item \code{meme_text_pigeon}: Text function for the Is this a pigeon? meme.
\item \code{meme_text_chopper}: Text function for the American Chopper Senior vs. Junior fight meme.
\item \code{meme_text_brain}: Text function for the expanding brain meme.
\item \code{meme_text_kermit}: Text function for the Off Ramp meme.
\item \code{meme_text_buttons}: Text function for the Two Buttons Anxiety meme.
\item \code{meme_text_drake}: Text function for the Drake meme.
\item \code{meme_text_printer}: Text function for the Money Printer meme.
\item \code{meme_text_anakin}: Text function for the Anakin/Padme meme.
\item \code{meme_text_suez}: Text function for the Suez canal excavator meme.
}}
\examples{
meme_get("DistractedBf") \%>\%
meme_text_distbf("tidyverse", "new R users", "base R")
meme_get("TheRockDriving") \%>\%
meme_text_rock("What's your favorite thing to do in R?" , "Write for loops.")
meme_get("BatmanRobin") \%>\%
meme_text_batman("Hey, I'm Batman!" , "No, you idiot, I'm Batman!")
meme_get("TrumpBillSigning") \%>\%
meme_text_trump("Stuff and nonsense")
meme_get("IsThisAPigeon") \%>\%
meme_text_pigeon("Is this a pigeon?" , "Me", "Not a pigeon")
meme_get("AmericanChopper") \%>\%
meme_text_chopper("Stop calling yourself 'just an R user'!",
"But I've never written a package!",
"So?? You're still an important part of the R community!",
"But people who write packages are more important",
"NO! All members of the R community are valued & deserve respect!")
meme_get("ExpandingBrain") \%>\%
meme_text_brain("text1",
"text2",
"text3",
"text4")
meme_get("EvilKermit") \%>\%
meme_text_kermit("Me: Your text here", "Me: Your evil text here")
meme_get("TwoButtonsAnxiety") \%>\%
meme_text_buttons("Should I do this?", "Or this?")
meme_get("HotlineDrake") \%>\%
meme_text_drake("Handcrafted memes", "Reproducible memes")
meme_get("MoneyPrinter") \%>\%
meme_text_printer("nooooo!!!!!! you can't just use open source software to
make memes without annoying watermarks on them nooooo",
"haha meme printer go brrrr")
meme_get("AnakinPadmeRight") \%>\%
meme_text_anakin("I'm going to set my working directory",
"With the {here} package, right?")
meme_get("SuezExcavator") \%>\%
meme_text_suez("R programming",
"Me learning R programming")
}
|
/man/meme_text_distbf.Rd
|
no_license
|
wyywyy23/memer
|
R
| false
| true
| 9,424
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/text-annotate.R
\name{meme_text_distbf}
\alias{meme_text_distbf}
\alias{meme_text_rock}
\alias{meme_text_batman}
\alias{meme_text_trump}
\alias{meme_text_pigeon}
\alias{meme_text_chopper}
\alias{meme_text_brain}
\alias{meme_text_kermit}
\alias{meme_text_buttons}
\alias{meme_text_drake}
\alias{meme_text_printer}
\alias{meme_text_anakin}
\alias{meme_text_suez}
\title{Specialized functions for placing text in memes}
\usage{
meme_text_distbf(
img,
newgirl,
guy,
oldgirl,
font = "Impact",
color = "white",
strokecolor = "black",
size = 44,
...
)
meme_text_rock(
img,
dwayne,
girl,
font = "Impact",
color = "black",
strokecolor = NULL,
size = 16,
...
)
meme_text_batman(
img,
robin,
batman,
font = "Impact",
color = "black",
strokecolor = NULL,
size = 20,
...
)
meme_text_trump(
img,
rightpg,
leftpg = "",
font = "Impact",
color = "black",
strokecolor = NULL,
size = 20,
...
)
meme_text_pigeon(
img,
isthis,
humanoid,
butterfly,
font = "Impact",
color = "white",
strokecolor = "black",
size = 30,
...
)
meme_text_chopper(
img,
sr1,
jr1,
sr2,
jr2,
sr3,
font = "Impact",
color = "white",
strokecolor = "black",
size = 20,
...
)
meme_text_brain(
img,
br1,
br2,
br3,
br4,
font = "Impact",
color = "black",
strokecolor = NULL,
size = 24,
...
)
meme_text_kermit(
img,
good,
dark,
font = "Impact",
color = "black",
strokecolor = NULL,
size = 20,
...
)
meme_text_buttons(
img,
lbtn,
rbtn,
font = "Impact",
color = "black",
strokecolor = NULL,
size = 18,
...
)
meme_text_drake(
img,
top,
bot,
font = "Impact",
color = "black",
strokecolor = NULL,
size = 30,
...
)
meme_text_printer(
img,
left,
right,
font = "Impact",
color = "black",
strokecolor = NULL,
size = 13,
...
)
meme_text_anakin(img, anakin, padme, font = "sans", size = 16, width = 35, ...)
meme_text_suez(
img,
evergiven,
excavator,
font = "Impact",
color = "white",
strokecolor = NULL,
size = 40,
width = 30,
...
)
}
\arguments{
\item{img}{An image of class \code{magick_image}. See \code{\link[=meme_get]{meme_get()}}.}
\item{newgirl}{A character string. Text for the left-most person in the \code{"DistractedBf"} meme.}
\item{guy}{A character string. Text for the center person in the \code{"DistractedBf"} meme.}
\item{oldgirl}{A character string. Text for the right-most person in the \code{"DistractedBf"} meme.}
\item{font}{string with font family such as \code{"sans"}, \code{"mono"}, \code{"serif"}, \code{"Times"}, \code{"Helvetica"}, \code{"Trebuchet"}, \code{"Georgia"}, \code{"Palatino"} or \code{"Comic Sans"}.}
\item{color}{a valid \href{https://www.imagemagick.org/Magick++/Color.html}{color string} such as \code{"navyblue"} or \code{"#000080"}}
\item{strokecolor}{a \href{https://www.imagemagick.org/Magick++/Color.html}{color string} adds a stroke (border around the text)}
\item{size}{font-size in pixels}
\item{...}{
Arguments passed on to \code{\link[magick:painting]{magick::image_annotate}}
\describe{
\item{\code{location}}{geometry string with location relative to \code{gravity}}
\item{\code{degrees}}{rotates text around center point}
\item{\code{boxcolor}}{a \href{https://www.imagemagick.org/Magick++/Color.html}{color string}
for background color that annotation text is rendered on.}
}}
\item{dwayne}{A character string. Text for the top speech bubble in the \code{"TheRockDriving"} meme.}
\item{girl}{A character string. Text for the other speech bubble in the \code{"TheRockDriving"} meme.}
\item{robin}{A character string. Text for the Robin's speech bubble in the \code{"BatmanRobin"} meme.}
\item{batman}{A character string. Text for the Batman's speech bubble in the \code{"BatmanRobin"} meme.}
\item{rightpg}{A character string. Text for the right page in the \code{"TrumpBillSigning"} meme.}
\item{leftpg}{A character string. Text for the left page in the \code{"TrumpBillSigning"} meme.}
\item{isthis}{A character string. Text for the question in the \code{"IsThisAPigeon"} meme.}
\item{humanoid}{A character string. Text for the humanoid in the \code{"IsThisAPigeon"} meme.}
\item{butterfly}{A character string. Text for the butterfly in the \code{"IsThisAPigeon"} meme.}
\item{sr1}{A character string. Text for the first panel in the \code{"AmericanChopper"} meme.}
\item{jr1}{A character string. Text for the second panel in the \code{"AmericanChopper"} meme.}
\item{sr2}{A character string. Text for the third panel in the \code{"AmericanChopper"} meme.}
\item{jr2}{A character string. Text for the fourth panel in the \code{"AmericanChopper"} meme.}
\item{sr3}{A character string. Text for the fifth panel in the \code{"AmericanChopper"} meme.}
\item{br1}{A character string. Text for the first panel in the \code{"ExpandingBrain"} meme.}
\item{br2}{A character string. Text for the second panel in the \code{"ExpandingBrain"} meme.}
\item{br3}{A character string. Text for the third panel in the \code{"ExpandingBrain"} meme.}
\item{br4}{A character string. Text for the fourth panel in the \code{"ExpandingBrain"} meme.}
\item{good}{A character string. A good idea for the \code{"OffRamp"} meme.}
\item{dark}{A character string. A tempting, bad idea for the \code{"OffRamp"} meme.}
\item{lbtn}{A character string. The left button in the \code{"TwoButtonsAnxiety"} meme.}
\item{rbtn}{A character string. The right button in the \code{"TwoButtonsAnxiety"} meme.}
\item{top}{A character string. Text for the top panel in the \code{"HotlineDrake"} meme.}
\item{bot}{A character string. Text for the bottom panel in the \code{"HotlineDrake"} meme.}
\item{left}{A character string. Text for the left panel in the \code{"MoneyPrinter"} meme.}
\item{right}{A character string. Text for the right panel in the \code{"MoneyPrinter"} meme.}
\item{anakin}{A character string. Text for Anakin in the \code{"AnakinPadmeRight"} meme.}
\item{padme}{A character string. Text for Padme in the \code{"AnakinPadmeRight"} meme.}
\item{width}{positive integer giving target line width in characters. A
width less than or equal to 1 will put each word on its own line.}
\item{evergiven}{A character string. Big text for the ship in the \code{"SuezExcavator"} meme.}
\item{excavator}{A character string. Little text for the excavator in the \code{"SuezExcavator"} meme.}
}
\description{
Specialized functions for placing text in memes
}
\section{Functions}{
\itemize{
\item \code{meme_text_distbf}: Text function for the distracted boyfriend meme.
\item \code{meme_text_rock}: Text function for The Rock driving meme.
\item \code{meme_text_batman}: Text function for the Batman slaps Robin meme.
\item \code{meme_text_trump}: Text function for the Trump "first order of business" meme.
\item \code{meme_text_pigeon}: Text function for the Is this a pigeon? meme.
\item \code{meme_text_chopper}: Text function for the American Chopper Senior vs. Junior fight meme.
\item \code{meme_text_brain}: Text function for the expanding brain meme.
\item \code{meme_text_kermit}: Text function for the Off Ramp meme.
\item \code{meme_text_buttons}: Text function for the Two Buttons Anxiety meme.
\item \code{meme_text_drake}: Text function for the Drake meme.
\item \code{meme_text_printer}: Text function for the Money Printer meme.
\item \code{meme_text_anakin}: Text function for the Anakin/Padme meme.
\item \code{meme_text_suez}: Text function for the Suez canal excavator meme.
}}
\examples{
meme_get("DistractedBf") \%>\%
meme_text_distbf("tidyverse", "new R users", "base R")
meme_get("TheRockDriving") \%>\%
meme_text_rock("What's your favorite thing to do in R?" , "Write for loops.")
meme_get("BatmanRobin") \%>\%
meme_text_batman("Hey, I'm Batman!" , "No, you idiot, I'm Batman!")
meme_get("TrumpBillSigning") \%>\%
meme_text_trump("Stuff and nonsense")
meme_get("IsThisAPigeon") \%>\%
meme_text_pigeon("Is this a pigeon?" , "Me", "Not a pigeon")
meme_get("AmericanChopper") \%>\%
meme_text_chopper("Stop calling yourself 'just an R user'!",
"But I've never written a package!",
"So?? You're still an important part of the R community!",
"But people who write packages are more important",
"NO! All members of the R community are valued & deserve respect!")
meme_get("ExpandingBrain") \%>\%
meme_text_brain("text1",
"text2",
"text3",
"text4")
meme_get("EvilKermit") \%>\%
meme_text_kermit("Me: Your text here", "Me: Your evil text here")
meme_get("TwoButtonsAnxiety") \%>\%
meme_text_buttons("Should I do this?", "Or this?")
meme_get("HotlineDrake") \%>\%
meme_text_drake("Handcrafted memes", "Reproducible memes")
meme_get("MoneyPrinter") \%>\%
meme_text_printer("nooooo!!!!!! you can't just use open source software to
make memes without annoying watermarks on them nooooo",
"haha meme printer go brrrr")
meme_get("AnakinPadmeRight") \%>\%
meme_text_anakin("I'm going to set my working directory",
"With the {here} package, right?")
meme_get("SuezExcavator") \%>\%
meme_text_suez("R programming",
"Me learning R programming")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{color_hex_palettes}
\alias{color_hex_palettes}
\title{Palettes from https://www.color-hex.com}
\format{
A named list of color palettes.
}
\usage{
color_hex_palettes
}
\description{
Palettes from https://www.color-hex.com
}
\keyword{datasets}
|
/man/color_hex_palettes.Rd
|
permissive
|
EmilHvitfeldt/palette2vec
|
R
| false
| true
| 348
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{color_hex_palettes}
\alias{color_hex_palettes}
\title{Palettes from https://www.color-hex.com}
\format{
A named list of color palettes.
}
\usage{
color_hex_palettes
}
\description{
Palettes from https://www.color-hex.com
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gam_loglikelihood.R
\name{gam_loglikelihood}
\alias{gam_loglikelihood}
\title{Compute negative log-likelihood of gamma HMM parameters}
\usage{
gam_loglikelihood(
working_params,
x,
design,
num_states,
num_variables,
num_subjects,
num_covariates,
state_dep_dist_pooled = FALSE
)
}
\arguments{
\item{working_params}{A vector of the working gamma parameters for the
HMM.}
\item{x}{The data to be fit with an HMM in the form of a 3D array. The
first index (row) corresponds to time, the second (column) to the
variable number, and the third (matrix number) to the subject number.}
\item{design}{A list of design matrices for each subject with each row
indicating the time and each column indicating the value of the
covariate.}
\item{num_states}{The number of states in the desired HMM.}
\item{num_variables}{The number of variables in the data.}
\item{num_subjects}{The number of subjects/trials that generated the data.}
\item{num_covariates}{The number of covariates in the data that the
transition probability matrix depends on.}
\item{state_dep_dist_pooled}{A logical variable indiacting whether the
state dependent distribution parameters \code{alpha} and \code{theta} should be
treated as equal for all subjects.}
}
\value{
A number indicating the negative loglikelihood
}
\description{
This function computes the negative log-likelihood that the given gamma
HMM parameters could have generated the data being fit.
}
|
/man/gam_loglikelihood.Rd
|
permissive
|
simonecollier/lizardHMM
|
R
| false
| true
| 1,521
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gam_loglikelihood.R
\name{gam_loglikelihood}
\alias{gam_loglikelihood}
\title{Compute negative log-likelihood of gamma HMM parameters}
\usage{
gam_loglikelihood(
working_params,
x,
design,
num_states,
num_variables,
num_subjects,
num_covariates,
state_dep_dist_pooled = FALSE
)
}
\arguments{
\item{working_params}{A vector of the working gamma parameters for the
HMM.}
\item{x}{The data to be fit with an HMM in the form of a 3D array. The
first index (row) corresponds to time, the second (column) to the
variable number, and the third (matrix number) to the subject number.}
\item{design}{A list of design matrices for each subject with each row
indicating the time and each column indicating the value of the
covariate.}
\item{num_states}{The number of states in the desired HMM.}
\item{num_variables}{The number of variables in the data.}
\item{num_subjects}{The number of subjects/trials that generated the data.}
\item{num_covariates}{The number of covariates in the data that the
transition probability matrix depends on.}
\item{state_dep_dist_pooled}{A logical variable indiacting whether the
state dependent distribution parameters \code{alpha} and \code{theta} should be
treated as equal for all subjects.}
}
\value{
A number indicating the negative loglikelihood
}
\description{
This function computes the negative log-likelihood that the given gamma
HMM parameters could have generated the data being fit.
}
|
#source('http://bioconductor.org/biocLite.R');
#biocLite('Biostrings');
#biocLite('hgu95av2.db');
#biocLite('annotate');
#library('Biostrings');
#library('hgu95av2.db');
#library('annotate');
#hgu95av2ACCNUM
plik=read.table('affy.txt');
tab=hgu95av2ACCNUM;
mapped_probes=mappedkeys(tab);
tab_lista=as.list(tab[mapped_probes]);
if(length(tab_lista) > 0) {
tab_lista_w=tab_lista[1:5];
}
#getSEQ
wektorseq=c();
for (i in 1:length(tab_lista_w)){
wektorseq[i]=getSEQ(tab_lista[[i]])
}
#pairwiseAlignmen
wynik=c()
wyn=matrix(ncol=5, nrow=5)
for (i in 1:5){
for (j in 1:5){
if (i>j){
wynik=pairwiseAlignment(wektorseq[i],wektorseq[j], type='global', gapOpening=-5)
wyn[i,j]=wynik@score
}
}
}
|
/more basic stuff/lab2.R
|
no_license
|
wopoczynski/R
|
R
| false
| false
| 716
|
r
|
#source('http://bioconductor.org/biocLite.R');
#biocLite('Biostrings');
#biocLite('hgu95av2.db');
#biocLite('annotate');
#library('Biostrings');
#library('hgu95av2.db');
#library('annotate');
#hgu95av2ACCNUM
plik=read.table('affy.txt');
tab=hgu95av2ACCNUM;
mapped_probes=mappedkeys(tab);
tab_lista=as.list(tab[mapped_probes]);
if(length(tab_lista) > 0) {
tab_lista_w=tab_lista[1:5];
}
#getSEQ
wektorseq=c();
for (i in 1:length(tab_lista_w)){
wektorseq[i]=getSEQ(tab_lista[[i]])
}
#pairwiseAlignmen
wynik=c()
wyn=matrix(ncol=5, nrow=5)
for (i in 1:5){
for (j in 1:5){
if (i>j){
wynik=pairwiseAlignment(wektorseq[i],wektorseq[j], type='global', gapOpening=-5)
wyn[i,j]=wynik@score
}
}
}
|
setwd("C:/Users/marin/Desktop/Data Angels")
library(RCurl)
library(dplyr)
library(ggplot2)
library(RColorBrewer)
library(pals)
library(colorRamps)
library(gridExtra)
library(scales)
url <- "https://raw.githubusercontent.com/CityOfLosAngeles/Data-Angels/master/Fall%202018%20Projects/LA_Outmigration/Data/ipums_clean.csv"
#download.file(url, "ipums_clean.csv", method="curl")
#full description of the data here https://github.com/CityOfLosAngeles/Data-Angels/tree/master/Fall%202018%20Projects/LA_Outmigration/Data
data <- read.csv("ipums_clean.csv")
data$YEAR <- as.factor(data$YEAR)
########################################### BY AGE ###############################################################
by_age <- data %>%
group_by(YEAR, AGE) %>%
summarise (No_movers = sum(PERWT))
plot_age <- ggplot(by_age, aes(AGE, No_movers, color = YEAR)) + geom_line() + scale_color_brewer(palette="Paired") +
scale_x_continuous(breaks=seq(0,100,10)) + ggtitle("LA Out Migration by Age") +
xlab("Age")+
ylab("Number of Outmigrators")+
theme(panel.background = element_blank(),
axis.line = element_line(colour = "black"),
panel.grid.major.y = element_line(colour="grey"),
plot.title = element_text(hjust = 0.5,size = 15, face = "bold"))
png("LA_Outmigration_by_AGE.png", width = 1698, height = 1056, res = 180)
plot_age
dev.off()
#splitting data into age groups (groups defined by census)
Age.Group.Description <- c("Age.1_4", "Age.5_17", "Age.18_19", "Age.20_24", "Age.25_29", "Age.30_34", "Age.35_39",
"Age.40_44", "Age.45_49", "Age.50_54", "Age.55_59", "Age.60_64", "Age.65_69", "Age.70_74",
"Age.75_and_over")
data$AGE.GROUP<-cut(data$AGE, c(0,4,17,19,24,29,34, 39, 44, 49, 54, 59, 64, 69, 74,100), Age.Group.Description)
by_age_group <- data %>%
group_by(YEAR, AGE.GROUP) %>%
summarise (No_movers = sum(PERWT))
#plot by age groups (x axis), years in color
plot_age_group <- ggplot(by_age_group, aes(AGE.GROUP, No_movers,group = YEAR, color = YEAR)) +
geom_line() +
ggtitle("LA Outmigration by Age")+
scale_color_brewer(palette="Paired") +
theme(axis.text.x = element_text(angle = 90, hjust = 1),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
panel.grid.major.y = element_line(colour="grey"))
plot_age_group
#by year, age in color
plot_age <- ggplot(by_age_group, aes(YEAR, No_movers,group = AGE.GROUP, color = AGE.GROUP)) +
geom_line() +
ggtitle("LA Outmigration by Age")+
scale_color_manual(values = getPalette(colourCount)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1),panel.background = element_blank(),
axis.line = element_line(colour = "black"), panel.grid.major.y = element_line(colour="grey"))
plot_age
########################################### BY SEX ###############################################################
#by sex
by_sex <- data %>%
group_by(YEAR, SEX) %>%
summarise (No_movers = sum(PERWT))
plot_by_sex <- ggplot(by_sex,aes(x = YEAR, y = No_movers,fill = SEX)) +
geom_bar(position = "fill",stat = "identity") +
theme(panel.background = element_blank(),
axis.text.x = element_text(angle = 0, hjust = 0.5),
plot.title = element_text(hjust = 0.5,size = 15, face = "bold"))+
ggtitle("LA Out Migration by Sex")+
xlab("Year")+
ylab("Percentage of Outmigators")+
scale_y_continuous(labels = scales::percent)
png("LA_Outmigration_by_SEX.png", width = 1698, height = 1056, res = 180)
plot_by_sex
dev.off()
##############################BY Marital status#######################################################
by_marst <- data %>%
group_by(YEAR, MARST) %>%
summarise (No_movers = sum(PERWT))
plot_marst <- ggplot(by_marst, aes(YEAR, No_movers,group = MARST, color = MARST)) +
geom_line(size=1.5) +
ggtitle("LA Out Migration by Marital Status")+
xlab("Year")+
ylab("Number of Outmigrators")+
scale_color_brewer(palette="Paired") +
theme(axis.text.x = element_text(angle = 0, hjust = 0.5),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
panel.grid.major.y = element_line(colour="grey"),
plot.title = element_text(hjust = 0.5,size = 15, face = "bold"))+
scale_y_continuous(labels = comma)
png("LA_Outmigration_by_MARST.png", width = 1698, height = 1056, res = 180)
plot_marst
dev.off()
by_marst_06_17 <- by_marst[by_marst$YEAR == 2006 | by_marst$YEAR == 2017, ]
by_marst_06_17$perc <- by_marst_06_17$No_movers[by_marst_06_17$YEAR == 2006]/sum(by_marst_06_17$No_movers[by_marst_06_17$YEAR == 2006])
by_marst_06_17$perc[by_marst_06_17$YEAR == 2006] <- by_marst_06_17$No_movers[by_marst_06_17$YEAR == 2017]/sum(by_marst_06_17$No_movers[by_marst_06_17$YEAR == 2017])
pie_marst <- ggplot(by_marst_06_17, aes(x="", y=No_movers, fill=MARST))+
geom_bar(width = 1, stat = "identity", position = "fill") + coord_polar("y", start=0)+
ggtitle("LA Outmigration by Marital Status for years 2006 and 2017")+
ylab("Percentage of movers") +
facet_grid(cols = vars(YEAR)) +
theme(axis.title.y=element_blank(),
axis.line = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank())
pie_marst
##############################BY RACE#######################################################
by_race <- data %>%
group_by(YEAR, RACE) %>%
summarise (No_movers = sum(PERWT))
plot_race <- ggplot(by_race, aes(YEAR, No_movers,group = RACE, color = RACE)) +
geom_line() +
ggtitle("LA Outmigration by Race")+
scale_color_brewer(palette="Dark2") +
theme(axis.text.x = element_text(angle = 90, hjust = 1),panel.background = element_blank(),
axis.line = element_line(colour = "black"), panel.grid.major.y = element_line(colour="grey"))+
scale_y_continuous(labels = comma)
plot_race
############################### INDIVIDUAL RENTERS #######################################
#selecting the rows with no additional family income and filtering out missing values and zero rent rows
ind_rent <- data[data$FTOTINC<=data$INCTOT & data$INCTOT!=9999999 & data$RENT!=0,]
#plotting individual income renters by gender
by_sex <- ind_rent %>%
group_by(YEAR, SEX) %>%
summarise (No_movers = sum(PERWT))
plot_by_sex <- ggplot(by_sex,aes(x = YEAR, y = No_movers,fill = SEX)) +
geom_bar(position = "fill",stat = "identity") +
theme(panel.background = element_blank(),
axis.text.x = element_text(angle = 0, hjust = 0.5),
plot.title = element_text(hjust = 0.5,size = 15, face = "bold"))+
ggtitle("LA Out Migration by Sex (Individual Income Renters)")+
xlab("Year")+
ylab("Percentage of Outmigators")+
scale_y_continuous(labels = scales::percent)
png("LA_Outmigration_individual_by_SEX.png", width = 1698, height = 1056, res = 180)
plot_by_sex
dev.off()
#plotting individual income renters by race
#summarizing information about the residents by race
la_residents <- read.csv("ipums_la_residents.csv")
by_race_residents <- la_residents %>%
group_by(YEAR, RACE) %>%
summarise (No_residents = sum(PERWT))
by_race_out <- data %>%
group_by(YEAR, RACE) %>%
summarise (No_movers = sum(PERWT))
by_race <- merge(by_race_residents, by_race_out, by = c("YEAR", "RACE"))
by_race$perc_out <- round(by_race$No_movers/(by_race$No_residents+by_race$No_movers)*100,2)
by_race$YEAR <- as.factor(by_race$YEAR)
by_race <- by_race[by_race$RACE!="Other",]
plot_race <- ggplot(by_race, aes(YEAR, perc_out,group = RACE, color = RACE)) +
geom_line(size=1.5) +
ggtitle("LA Out Migration by Race (Individual Income Renters)")+
xlab("Year")+
ylab("Percentage of Outmigators") +
scale_color_brewer(palette="Paired") +
theme(axis.text.x = element_text(angle = 0, hjust = 0.5),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
panel.grid.major.y = element_line(colour="grey"),
plot.title = element_text(hjust = 0.5,size = 15, face = "bold"))+
scale_y_continuous(labels = comma)
png("LA_Outmigration_individual_by_RACE.png", width = 1698, height = 1056, res = 180)
plot_race
dev.off()
#plotting marital status of individual renters
by_marst <- ind_rent %>%
group_by(YEAR, MARST) %>%
summarise (No_movers = sum(PERWT))
plot_marst <- ggplot(by_marst, aes(YEAR, No_movers,group = MARST, color = MARST)) +
geom_line(size=1.5) +
ggtitle("LA Out Migration by Marital Status (Individual Income Renters)")+
xlab("Year")+
ylab("Number of Outmigrators")+
scale_color_brewer(palette="Paired") +
theme(axis.text.x = element_text(angle = 0, hjust = 0.5),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
panel.grid.major.y = element_line(colour="grey"),
plot.title = element_text(hjust = 0.5,size = 15, face = "bold"))+
scale_y_continuous(labels = comma)
png("LA_Outmigration_individual_by_MARST.png", width = 1698, height = 1056, res = 180)
plot_marst
dev.off()
#plotting age of individual renters
by_age <- ind_rent %>%
group_by(YEAR, AGE) %>%
summarise (No_movers = sum(PERWT))
plot_age <- ggplot(by_age, aes(AGE, No_movers, color = YEAR)) + geom_line() + scale_color_brewer(palette="Paired") +
scale_x_continuous(breaks=seq(0,100,10)) +
ggtitle("LA Out Migration by Age (Individual Income Renters)") +
xlab("Age")+
ylab("Number of Outmigrators")+
theme(panel.background = element_blank(),
axis.line = element_line(colour = "black"),
panel.grid.major.y = element_line(colour="grey"),
plot.title = element_text(hjust = 0.5,size = 15, face = "bold"))
png("LA_Outmigration_individual_by_AGE.png", width = 1698, height = 1056, res = 180)
plot_age
dev.off()
|
/Fall 2018 Projects/LA_Outmigration/Visuals/ipums_demographic.R
|
no_license
|
CityOfLosAngeles/Data-Angels
|
R
| false
| false
| 10,636
|
r
|
setwd("C:/Users/marin/Desktop/Data Angels")
library(RCurl)
library(dplyr)
library(ggplot2)
library(RColorBrewer)
library(pals)
library(colorRamps)
library(gridExtra)
library(scales)
url <- "https://raw.githubusercontent.com/CityOfLosAngeles/Data-Angels/master/Fall%202018%20Projects/LA_Outmigration/Data/ipums_clean.csv"
#download.file(url, "ipums_clean.csv", method="curl")
#full description of the data here https://github.com/CityOfLosAngeles/Data-Angels/tree/master/Fall%202018%20Projects/LA_Outmigration/Data
data <- read.csv("ipums_clean.csv")
data$YEAR <- as.factor(data$YEAR)
########################################### BY AGE ###############################################################
by_age <- data %>%
group_by(YEAR, AGE) %>%
summarise (No_movers = sum(PERWT))
plot_age <- ggplot(by_age, aes(AGE, No_movers, color = YEAR)) + geom_line() + scale_color_brewer(palette="Paired") +
scale_x_continuous(breaks=seq(0,100,10)) + ggtitle("LA Out Migration by Age") +
xlab("Age")+
ylab("Number of Outmigrators")+
theme(panel.background = element_blank(),
axis.line = element_line(colour = "black"),
panel.grid.major.y = element_line(colour="grey"),
plot.title = element_text(hjust = 0.5,size = 15, face = "bold"))
png("LA_Outmigration_by_AGE.png", width = 1698, height = 1056, res = 180)
plot_age
dev.off()
#splitting data into age groups (groups defined by census)
Age.Group.Description <- c("Age.1_4", "Age.5_17", "Age.18_19", "Age.20_24", "Age.25_29", "Age.30_34", "Age.35_39",
"Age.40_44", "Age.45_49", "Age.50_54", "Age.55_59", "Age.60_64", "Age.65_69", "Age.70_74",
"Age.75_and_over")
data$AGE.GROUP<-cut(data$AGE, c(0,4,17,19,24,29,34, 39, 44, 49, 54, 59, 64, 69, 74,100), Age.Group.Description)
by_age_group <- data %>%
group_by(YEAR, AGE.GROUP) %>%
summarise (No_movers = sum(PERWT))
#plot by age groups (x axis), years in color
plot_age_group <- ggplot(by_age_group, aes(AGE.GROUP, No_movers,group = YEAR, color = YEAR)) +
geom_line() +
ggtitle("LA Outmigration by Age")+
scale_color_brewer(palette="Paired") +
theme(axis.text.x = element_text(angle = 90, hjust = 1),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
panel.grid.major.y = element_line(colour="grey"))
plot_age_group
#by year, age in color
plot_age <- ggplot(by_age_group, aes(YEAR, No_movers,group = AGE.GROUP, color = AGE.GROUP)) +
geom_line() +
ggtitle("LA Outmigration by Age")+
scale_color_manual(values = getPalette(colourCount)) +
theme(axis.text.x = element_text(angle = 90, hjust = 1),panel.background = element_blank(),
axis.line = element_line(colour = "black"), panel.grid.major.y = element_line(colour="grey"))
plot_age
########################################### BY SEX ###############################################################
#by sex
by_sex <- data %>%
group_by(YEAR, SEX) %>%
summarise (No_movers = sum(PERWT))
plot_by_sex <- ggplot(by_sex,aes(x = YEAR, y = No_movers,fill = SEX)) +
geom_bar(position = "fill",stat = "identity") +
theme(panel.background = element_blank(),
axis.text.x = element_text(angle = 0, hjust = 0.5),
plot.title = element_text(hjust = 0.5,size = 15, face = "bold"))+
ggtitle("LA Out Migration by Sex")+
xlab("Year")+
ylab("Percentage of Outmigators")+
scale_y_continuous(labels = scales::percent)
png("LA_Outmigration_by_SEX.png", width = 1698, height = 1056, res = 180)
plot_by_sex
dev.off()
##############################BY Marital status#######################################################
by_marst <- data %>%
group_by(YEAR, MARST) %>%
summarise (No_movers = sum(PERWT))
plot_marst <- ggplot(by_marst, aes(YEAR, No_movers,group = MARST, color = MARST)) +
geom_line(size=1.5) +
ggtitle("LA Out Migration by Marital Status")+
xlab("Year")+
ylab("Number of Outmigrators")+
scale_color_brewer(palette="Paired") +
theme(axis.text.x = element_text(angle = 0, hjust = 0.5),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
panel.grid.major.y = element_line(colour="grey"),
plot.title = element_text(hjust = 0.5,size = 15, face = "bold"))+
scale_y_continuous(labels = comma)
png("LA_Outmigration_by_MARST.png", width = 1698, height = 1056, res = 180)
plot_marst
dev.off()
by_marst_06_17 <- by_marst[by_marst$YEAR == 2006 | by_marst$YEAR == 2017, ]
by_marst_06_17$perc <- by_marst_06_17$No_movers[by_marst_06_17$YEAR == 2006]/sum(by_marst_06_17$No_movers[by_marst_06_17$YEAR == 2006])
by_marst_06_17$perc[by_marst_06_17$YEAR == 2006] <- by_marst_06_17$No_movers[by_marst_06_17$YEAR == 2017]/sum(by_marst_06_17$No_movers[by_marst_06_17$YEAR == 2017])
pie_marst <- ggplot(by_marst_06_17, aes(x="", y=No_movers, fill=MARST))+
geom_bar(width = 1, stat = "identity", position = "fill") + coord_polar("y", start=0)+
ggtitle("LA Outmigration by Marital Status for years 2006 and 2017")+
ylab("Percentage of movers") +
facet_grid(cols = vars(YEAR)) +
theme(axis.title.y=element_blank(),
axis.line = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank())
pie_marst
##############################BY RACE#######################################################
by_race <- data %>%
group_by(YEAR, RACE) %>%
summarise (No_movers = sum(PERWT))
plot_race <- ggplot(by_race, aes(YEAR, No_movers,group = RACE, color = RACE)) +
geom_line() +
ggtitle("LA Outmigration by Race")+
scale_color_brewer(palette="Dark2") +
theme(axis.text.x = element_text(angle = 90, hjust = 1),panel.background = element_blank(),
axis.line = element_line(colour = "black"), panel.grid.major.y = element_line(colour="grey"))+
scale_y_continuous(labels = comma)
plot_race
############################### INDIVIDUAL RENTERS #######################################
#selecting the rows with no additional family income and filtering out missing values and zero rent rows
ind_rent <- data[data$FTOTINC<=data$INCTOT & data$INCTOT!=9999999 & data$RENT!=0,]
#plotting individual income renters by gender
by_sex <- ind_rent %>%
group_by(YEAR, SEX) %>%
summarise (No_movers = sum(PERWT))
plot_by_sex <- ggplot(by_sex,aes(x = YEAR, y = No_movers,fill = SEX)) +
geom_bar(position = "fill",stat = "identity") +
theme(panel.background = element_blank(),
axis.text.x = element_text(angle = 0, hjust = 0.5),
plot.title = element_text(hjust = 0.5,size = 15, face = "bold"))+
ggtitle("LA Out Migration by Sex (Individual Income Renters)")+
xlab("Year")+
ylab("Percentage of Outmigators")+
scale_y_continuous(labels = scales::percent)
png("LA_Outmigration_individual_by_SEX.png", width = 1698, height = 1056, res = 180)
plot_by_sex
dev.off()
#plotting individual income renters by race
#summarizing information about the residents by race
la_residents <- read.csv("ipums_la_residents.csv")
by_race_residents <- la_residents %>%
group_by(YEAR, RACE) %>%
summarise (No_residents = sum(PERWT))
by_race_out <- data %>%
group_by(YEAR, RACE) %>%
summarise (No_movers = sum(PERWT))
by_race <- merge(by_race_residents, by_race_out, by = c("YEAR", "RACE"))
by_race$perc_out <- round(by_race$No_movers/(by_race$No_residents+by_race$No_movers)*100,2)
by_race$YEAR <- as.factor(by_race$YEAR)
by_race <- by_race[by_race$RACE!="Other",]
plot_race <- ggplot(by_race, aes(YEAR, perc_out,group = RACE, color = RACE)) +
geom_line(size=1.5) +
ggtitle("LA Out Migration by Race (Individual Income Renters)")+
xlab("Year")+
ylab("Percentage of Outmigators") +
scale_color_brewer(palette="Paired") +
theme(axis.text.x = element_text(angle = 0, hjust = 0.5),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
panel.grid.major.y = element_line(colour="grey"),
plot.title = element_text(hjust = 0.5,size = 15, face = "bold"))+
scale_y_continuous(labels = comma)
png("LA_Outmigration_individual_by_RACE.png", width = 1698, height = 1056, res = 180)
plot_race
dev.off()
#plotting marital status of individual renters
by_marst <- ind_rent %>%
group_by(YEAR, MARST) %>%
summarise (No_movers = sum(PERWT))
plot_marst <- ggplot(by_marst, aes(YEAR, No_movers,group = MARST, color = MARST)) +
geom_line(size=1.5) +
ggtitle("LA Out Migration by Marital Status (Individual Income Renters)")+
xlab("Year")+
ylab("Number of Outmigrators")+
scale_color_brewer(palette="Paired") +
theme(axis.text.x = element_text(angle = 0, hjust = 0.5),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
panel.grid.major.y = element_line(colour="grey"),
plot.title = element_text(hjust = 0.5,size = 15, face = "bold"))+
scale_y_continuous(labels = comma)
png("LA_Outmigration_individual_by_MARST.png", width = 1698, height = 1056, res = 180)
plot_marst
dev.off()
#plotting age of individual renters
by_age <- ind_rent %>%
group_by(YEAR, AGE) %>%
summarise (No_movers = sum(PERWT))
plot_age <- ggplot(by_age, aes(AGE, No_movers, color = YEAR)) + geom_line() + scale_color_brewer(palette="Paired") +
scale_x_continuous(breaks=seq(0,100,10)) +
ggtitle("LA Out Migration by Age (Individual Income Renters)") +
xlab("Age")+
ylab("Number of Outmigrators")+
theme(panel.background = element_blank(),
axis.line = element_line(colour = "black"),
panel.grid.major.y = element_line(colour="grey"),
plot.title = element_text(hjust = 0.5,size = 15, face = "bold"))
png("LA_Outmigration_individual_by_AGE.png", width = 1698, height = 1056, res = 180)
plot_age
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download_tar.R
\name{URLs_DOGS}
\alias{URLs_DOGS}
\title{DOGS dataset}
\usage{
URLs_DOGS(filename = "DOGS", untar = TRUE)
}
\arguments{
\item{filename}{the name of the file}
\item{untar}{logical, whether to untar the '.tgz' file}
}
\description{
download DOGS dataset
}
|
/man/URLs_DOGS.Rd
|
permissive
|
ysnghr/fastai
|
R
| false
| true
| 349
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download_tar.R
\name{URLs_DOGS}
\alias{URLs_DOGS}
\title{DOGS dataset}
\usage{
URLs_DOGS(filename = "DOGS", untar = TRUE)
}
\arguments{
\item{filename}{the name of the file}
\item{untar}{logical, whether to untar the '.tgz' file}
}
\description{
download DOGS dataset
}
|
#' Identify biomarkers
#'
#' @param MAE A multi-assay experiment object
#' @param tax_level The taxon level used for organisms
#' @param input_select_target_biomarker Which condition is the target condition
#' @param nfolds number of splits in CV
#' @param nrepeats number of CVs with different random splits
#' @param seed for repeatable research
#' @param percent_top_biomarker Top importance percentage to pick biomarker
#' @param model_name one of "logistic regression", "random forest"
#'
#' @return A list
#'
#' @import MultiAssayExperiment
#' @import glmnet
#' @import DMwR
#' @import caret
#' @import plotROC
#' @import forcats
#' @importFrom ggplot2 geom_col aes coord_flip theme_bw coord_equal annotate
#'
#' @examples
#' data_dir = system.file("extdata/MAE.rds", package = "animalcules")
#' toy_data <- readRDS(data_dir)
#' p <- find_biomarker(toy_data,
#' tax_level="genus",
#' input_select_target_biomarker=c("DISEASE"),
#' nfolds = 3,
#' nrepeats = 3,
#' seed = 99,
#' percent_top_biomarker = 0.2,
#' model_name = "logistic regression")
#' p
#'
#'
#' @export
find_biomarker <- function(MAE,
tax_level,
input_select_target_biomarker,
nfolds = 3,
nrepeats = 3,
seed = 99,
percent_top_biomarker = 0.2,
model_name = c("logistic regression", "random forest")) {
## SEED
# bioC not suggesst add set seed function in R code
# set.seed(seed)
## tables from MAE
microbe <- MAE[['MicrobeGenetics']] #double bracket subsetting is easier
tax_table <- as.data.frame(rowData(microbe)) # organism x taxlev
sam_table <- as.data.frame(colData(microbe)) # sample x condition
counts_table <- as.data.frame(assays(microbe))[,rownames(sam_table)] # organism x sample
## shiny UI input object
# Sum counts by taxon level and return log10 cpm
logcpm_table <- counts_table %>%
upsample_counts(tax_table, tax_level) %>%
counts_to_logcpm() %>%
base::t() %>%
base::as.data.frame()
# add target variable
logcpm_table[,'y'] <- sam_table %>%
dplyr::pull(input_select_target_biomarker)
# set up classification model prameters
fitControl <- caret::trainControl(## n1-fold CV
method = "repeatedcv",
number = nfolds,
## repeated n2 times
repeats = nrepeats,
classProbs = TRUE,
summaryFunction = twoClassSummary,
sampling = "smote",
savePredictions = TRUE)
# choose different model
if (model_name == "logistic regression"){
model_fit <- caret::train(y ~ .,
data = logcpm_table,
method = "glmnet",
tuneLength = 5,
trControl = fitControl,
metric = "ROC")
} else if (model_name == "svm"){
model_fit <- caret::train(y ~ .,
data = logcpm_table,
method = "svmLinear",
tuneLength = 5,
trControl = fitControl,
metric = "ROC")
} else if (model_name == "gbm"){
model_fit <- caret::train(y ~ .,
data = logcpm_table,
method = "gbm",
trControl = fitControl,
tuneLength = 5,
metric = "ROC",
## This last option is actually one
## for gbm() that passes through
verbose = FALSE)
} else if (model_name == "random forest"){
model_fit <- caret::train(y ~ .,
data = logcpm_table,
method = "ranger",
trControl = fitControl,
tuneLength = 5,
metric = "ROC",
# ranger specific parameter
importance = "impurity")
}
# process the importance score
if (model_name == "svm"){
svm_importance <- caret::varImp(model_fit)$importance
svm_importance[,2] <- NULL
colnames(svm_importance) <- "importance"
biomarker <- svm_importance %>%
tibble::rownames_to_column() %>%
dplyr::rename(biomarker = rowname) %>%
dplyr::arrange(importance) %>%
dplyr::filter(importance > quantile(importance, 1-percent_top_biomarker)) %>%
dplyr::select(biomarker) %>%
.$biomarker
importance_plot <- svm_importance %>%
tibble::rownames_to_column() %>%
dplyr::rename(biomarker = rowname) %>%
dplyr::arrange(importance) %>%
dplyr::filter(importance > quantile(importance, 1-percent_top_biomarker)) %>%
dplyr::mutate(biomarker = forcats::fct_inorder(biomarker)) %>%
ggplot2::ggplot()+
geom_col(aes(x = biomarker, y = importance))+
coord_flip()+
theme_bw()
} else{
biomarker <- caret::varImp(model_fit)$importance %>%
base::as.data.frame() %>%
tibble::rownames_to_column() %>%
dplyr::rename(importance = Overall) %>%
dplyr::rename(biomarker = rowname) %>%
dplyr::arrange(importance) %>%
dplyr::filter(importance > quantile(importance, 1-percent_top_biomarker)) %>%
dplyr::select(biomarker) %>%
.$biomarker
importance_plot <- caret::varImp(model_fit)$importance %>%
base::as.data.frame() %>%
tibble::rownames_to_column() %>%
dplyr::rename(importance = Overall) %>%
dplyr::rename(biomarker = rowname) %>%
dplyr::arrange(importance) %>%
dplyr::filter(importance > quantile(importance, 1-percent_top_biomarker)) %>%
dplyr::mutate(biomarker = forcats::fct_inorder(biomarker)) %>%
ggplot2::ggplot()+
geom_col(aes(x = biomarker, y = importance))+
coord_flip()+
theme_bw()
}
# retrain the model using the biomarker
logcpm_table <- logcpm_table %>%
dplyr::select(biomarker,y)
# choose different model
if (model_name == "logistic regression"){
model_fit <- caret::train(y ~ .,
data = logcpm_table,
method = "glmnet",
tuneLength = 5,
trControl = fitControl,
metric = "ROC")
} else if (model_name == "svm"){
model_fit <- caret::train(y ~ .,
data = logcpm_table,
method = "svmLinear",
tuneLength = 5,
trControl = fitControl,
metric = "ROC")
} else if (model_name == "gbm"){
model_fit <- caret::train(y ~ .,
data = logcpm_table,
method = "gbm",
trControl = fitControl,
tuneLength = 5,
metric = "ROC",
## This last option is actually one
## for gbm() that passes through
verbose = FALSE)
} else if (model_name == "random forest"){
model_fit <- caret::train(y ~ .,
data = logcpm_table,
method = "ranger",
trControl = fitControl,
tuneLength = 5,
metric = "ROC",
# ranger specific parameter
importance = "impurity")
}
# print the biomarker CV performance
# biomarker_cv_performance <- model_fit$results %>%
# dplyr::select(ROC, Sens, Spec) %>%
# dplyr::filter(ROC == max(ROC))
prob_pred <- as.numeric(model_fit$pred$obs)
prob_pred[prob_pred == 1] <- 0
prob_pred[prob_pred == 2] <- 1
df_roc <- data.frame(m = model_fit$pred[,which(colnames(model_fit$pred)
== levels(model_fit$pred$obs)[2])],
d = prob_pred,
stringsAsFactors = FALSE)
g <- ggplot(df_roc, aes(m=m, d=d)) +
geom_roc(n.cuts=0) +
coord_equal() +
style_roc()
roc_plot <- g + annotate("text", x=0.75, y=0.25,
label=paste("AUC =", round((calc_auc(g))$AUC, 4)))
biomarker <- data.frame(biomarker_list = biomarker)
# output a list
list_output <- list(biomarker = biomarker,
importance_plot = importance_plot,
roc_plot = roc_plot)
return(list_output)
}
|
/R/find_biomarker.R
|
permissive
|
it-fm/animalcules
|
R
| false
| false
| 9,608
|
r
|
#' Identify biomarkers
#'
#' @param MAE A multi-assay experiment object
#' @param tax_level The taxon level used for organisms
#' @param input_select_target_biomarker Which condition is the target condition
#' @param nfolds number of splits in CV
#' @param nrepeats number of CVs with different random splits
#' @param seed for repeatable research
#' @param percent_top_biomarker Top importance percentage to pick biomarker
#' @param model_name one of "logistic regression", "random forest"
#'
#' @return A list
#'
#' @import MultiAssayExperiment
#' @import glmnet
#' @import DMwR
#' @import caret
#' @import plotROC
#' @import forcats
#' @importFrom ggplot2 geom_col aes coord_flip theme_bw coord_equal annotate
#'
#' @examples
#' data_dir = system.file("extdata/MAE.rds", package = "animalcules")
#' toy_data <- readRDS(data_dir)
#' p <- find_biomarker(toy_data,
#' tax_level="genus",
#' input_select_target_biomarker=c("DISEASE"),
#' nfolds = 3,
#' nrepeats = 3,
#' seed = 99,
#' percent_top_biomarker = 0.2,
#' model_name = "logistic regression")
#' p
#'
#'
#' @export
find_biomarker <- function(MAE,
tax_level,
input_select_target_biomarker,
nfolds = 3,
nrepeats = 3,
seed = 99,
percent_top_biomarker = 0.2,
model_name = c("logistic regression", "random forest")) {
## SEED
# bioC not suggesst add set seed function in R code
# set.seed(seed)
## tables from MAE
microbe <- MAE[['MicrobeGenetics']] #double bracket subsetting is easier
tax_table <- as.data.frame(rowData(microbe)) # organism x taxlev
sam_table <- as.data.frame(colData(microbe)) # sample x condition
counts_table <- as.data.frame(assays(microbe))[,rownames(sam_table)] # organism x sample
## shiny UI input object
# Sum counts by taxon level and return log10 cpm
logcpm_table <- counts_table %>%
upsample_counts(tax_table, tax_level) %>%
counts_to_logcpm() %>%
base::t() %>%
base::as.data.frame()
# add target variable
logcpm_table[,'y'] <- sam_table %>%
dplyr::pull(input_select_target_biomarker)
# set up classification model prameters
fitControl <- caret::trainControl(## n1-fold CV
method = "repeatedcv",
number = nfolds,
## repeated n2 times
repeats = nrepeats,
classProbs = TRUE,
summaryFunction = twoClassSummary,
sampling = "smote",
savePredictions = TRUE)
# choose different model
if (model_name == "logistic regression"){
model_fit <- caret::train(y ~ .,
data = logcpm_table,
method = "glmnet",
tuneLength = 5,
trControl = fitControl,
metric = "ROC")
} else if (model_name == "svm"){
model_fit <- caret::train(y ~ .,
data = logcpm_table,
method = "svmLinear",
tuneLength = 5,
trControl = fitControl,
metric = "ROC")
} else if (model_name == "gbm"){
model_fit <- caret::train(y ~ .,
data = logcpm_table,
method = "gbm",
trControl = fitControl,
tuneLength = 5,
metric = "ROC",
## This last option is actually one
## for gbm() that passes through
verbose = FALSE)
} else if (model_name == "random forest"){
model_fit <- caret::train(y ~ .,
data = logcpm_table,
method = "ranger",
trControl = fitControl,
tuneLength = 5,
metric = "ROC",
# ranger specific parameter
importance = "impurity")
}
# process the importance score
if (model_name == "svm"){
svm_importance <- caret::varImp(model_fit)$importance
svm_importance[,2] <- NULL
colnames(svm_importance) <- "importance"
biomarker <- svm_importance %>%
tibble::rownames_to_column() %>%
dplyr::rename(biomarker = rowname) %>%
dplyr::arrange(importance) %>%
dplyr::filter(importance > quantile(importance, 1-percent_top_biomarker)) %>%
dplyr::select(biomarker) %>%
.$biomarker
importance_plot <- svm_importance %>%
tibble::rownames_to_column() %>%
dplyr::rename(biomarker = rowname) %>%
dplyr::arrange(importance) %>%
dplyr::filter(importance > quantile(importance, 1-percent_top_biomarker)) %>%
dplyr::mutate(biomarker = forcats::fct_inorder(biomarker)) %>%
ggplot2::ggplot()+
geom_col(aes(x = biomarker, y = importance))+
coord_flip()+
theme_bw()
} else{
biomarker <- caret::varImp(model_fit)$importance %>%
base::as.data.frame() %>%
tibble::rownames_to_column() %>%
dplyr::rename(importance = Overall) %>%
dplyr::rename(biomarker = rowname) %>%
dplyr::arrange(importance) %>%
dplyr::filter(importance > quantile(importance, 1-percent_top_biomarker)) %>%
dplyr::select(biomarker) %>%
.$biomarker
importance_plot <- caret::varImp(model_fit)$importance %>%
base::as.data.frame() %>%
tibble::rownames_to_column() %>%
dplyr::rename(importance = Overall) %>%
dplyr::rename(biomarker = rowname) %>%
dplyr::arrange(importance) %>%
dplyr::filter(importance > quantile(importance, 1-percent_top_biomarker)) %>%
dplyr::mutate(biomarker = forcats::fct_inorder(biomarker)) %>%
ggplot2::ggplot()+
geom_col(aes(x = biomarker, y = importance))+
coord_flip()+
theme_bw()
}
# retrain the model using the biomarker
logcpm_table <- logcpm_table %>%
dplyr::select(biomarker,y)
# choose different model
if (model_name == "logistic regression"){
model_fit <- caret::train(y ~ .,
data = logcpm_table,
method = "glmnet",
tuneLength = 5,
trControl = fitControl,
metric = "ROC")
} else if (model_name == "svm"){
model_fit <- caret::train(y ~ .,
data = logcpm_table,
method = "svmLinear",
tuneLength = 5,
trControl = fitControl,
metric = "ROC")
} else if (model_name == "gbm"){
model_fit <- caret::train(y ~ .,
data = logcpm_table,
method = "gbm",
trControl = fitControl,
tuneLength = 5,
metric = "ROC",
## This last option is actually one
## for gbm() that passes through
verbose = FALSE)
} else if (model_name == "random forest"){
model_fit <- caret::train(y ~ .,
data = logcpm_table,
method = "ranger",
trControl = fitControl,
tuneLength = 5,
metric = "ROC",
# ranger specific parameter
importance = "impurity")
}
# print the biomarker CV performance
# biomarker_cv_performance <- model_fit$results %>%
# dplyr::select(ROC, Sens, Spec) %>%
# dplyr::filter(ROC == max(ROC))
prob_pred <- as.numeric(model_fit$pred$obs)
prob_pred[prob_pred == 1] <- 0
prob_pred[prob_pred == 2] <- 1
df_roc <- data.frame(m = model_fit$pred[,which(colnames(model_fit$pred)
== levels(model_fit$pred$obs)[2])],
d = prob_pred,
stringsAsFactors = FALSE)
g <- ggplot(df_roc, aes(m=m, d=d)) +
geom_roc(n.cuts=0) +
coord_equal() +
style_roc()
roc_plot <- g + annotate("text", x=0.75, y=0.25,
label=paste("AUC =", round((calc_auc(g))$AUC, 4)))
biomarker <- data.frame(biomarker_list = biomarker)
# output a list
list_output <- list(biomarker = biomarker,
importance_plot = importance_plot,
roc_plot = roc_plot)
return(list_output)
}
|
/Análisis de Regresión Logística.R
|
no_license
|
Claudia-Mardones-B/Metodo_Cuanti_USACH
|
R
| false
| false
| 3,350
|
r
| ||
library(MASS)
library(neuralnet)
set.seed(123)
data2 <- Boston
hist(data2$medv)
maxs <- apply(data2, 2, max)
mins <- apply(data2, 2, min)
scaled4 <- 0
scaled4 <- as.data.frame(scale(data2, center = mins, scale = maxs - mins))
hist(scaled4$medv)
index <- sample(1:nrow(data2),round(0.75*nrow(data2)))
train <- data2[index,]
test <- data2[-index,]
lm.fit <- glm(medv~., data=train)
summary(lm.fit)
pr.lm <- predict(lm.fit,test)
MSE.lm <- sum((pr.lm - test$medv)^2)/nrow(test)
train_ <- scaled4[index,]
test_ <- scaled4[-index,]
#train_ <- data1[index,]
#test_ <- data1[-index,]
n <- names(train_)
f <- as.formula(paste("medv ~", paste(n[!n %in% "medv"], collapse = " + ")))
nn <- neuralnet(f,data=train_,hidden=c(4,2),linear.output=T)
plot(nn)
pr.nn <- compute(nn,test_[,1:13])
pr.nn_ <- pr.nn$net.result*(max(data2$medv)-min(data2$medv))+min(data2$medv)
test.r <- (test_$medv)*(max(data2$medv)-min(data2$medv))+min(data2$medv)
MSE.nn <- sum((test.r - pr.nn_)^2)/nrow(test_)
print(paste(MSE.lm,MSE.nn))
par(mfrow=c(1,2))
plot(test$medv,pr.nn_,col='red',main='Real vs predicted NN',pch=18,cex=0.7)
abline(0,1,lwd=2)
legend('bottomright',legend='NN',pch=18,col='red', bty='n')
plot(test$medv,pr.lm,col='blue',main='Real vs predicted lm',pch=18, cex=0.7)
abline(0,1,lwd=2)
legend('bottomright',legend='LM',pch=18,col='blue', bty='n', cex=.95)
out <- compute(nn,cbind(1,2,3,4,5,6,7,8,9,10,11,12,13))
out_ <- out$net.result*(max(data2$medv)-min(data2$medv))+min(data2$medv)
out_
|
/NeuralNetworksEx1.R
|
no_license
|
zeisys-sjojo/NeuralNetworksEx1
|
R
| false
| false
| 1,519
|
r
|
library(MASS)
library(neuralnet)
set.seed(123)
data2 <- Boston
hist(data2$medv)
maxs <- apply(data2, 2, max)
mins <- apply(data2, 2, min)
scaled4 <- 0
scaled4 <- as.data.frame(scale(data2, center = mins, scale = maxs - mins))
hist(scaled4$medv)
index <- sample(1:nrow(data2),round(0.75*nrow(data2)))
train <- data2[index,]
test <- data2[-index,]
lm.fit <- glm(medv~., data=train)
summary(lm.fit)
pr.lm <- predict(lm.fit,test)
MSE.lm <- sum((pr.lm - test$medv)^2)/nrow(test)
train_ <- scaled4[index,]
test_ <- scaled4[-index,]
#train_ <- data1[index,]
#test_ <- data1[-index,]
n <- names(train_)
f <- as.formula(paste("medv ~", paste(n[!n %in% "medv"], collapse = " + ")))
nn <- neuralnet(f,data=train_,hidden=c(4,2),linear.output=T)
plot(nn)
pr.nn <- compute(nn,test_[,1:13])
pr.nn_ <- pr.nn$net.result*(max(data2$medv)-min(data2$medv))+min(data2$medv)
test.r <- (test_$medv)*(max(data2$medv)-min(data2$medv))+min(data2$medv)
MSE.nn <- sum((test.r - pr.nn_)^2)/nrow(test_)
print(paste(MSE.lm,MSE.nn))
par(mfrow=c(1,2))
plot(test$medv,pr.nn_,col='red',main='Real vs predicted NN',pch=18,cex=0.7)
abline(0,1,lwd=2)
legend('bottomright',legend='NN',pch=18,col='red', bty='n')
plot(test$medv,pr.lm,col='blue',main='Real vs predicted lm',pch=18, cex=0.7)
abline(0,1,lwd=2)
legend('bottomright',legend='LM',pch=18,col='blue', bty='n', cex=.95)
out <- compute(nn,cbind(1,2,3,4,5,6,7,8,9,10,11,12,13))
out_ <- out$net.result*(max(data2$medv)-min(data2$medv))+min(data2$medv)
out_
|
fileName <- "./data/household_power_consumption.txt"
wholeData <- read.table(fileName, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
selectedData<- wholeData[wholeData$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
globalReactivePower <- as.numeric(subSetData$Global_reactive_power)
voltage <- as.numeric(subSetData$Voltage)
subMeter1 <- as.numeric(subSetData$Sub_metering_1)
subMeter2 <- as.numeric(subSetData$Sub_metering_2)
subMeter3 <- as.numeric(subSetData$Sub_metering_3)
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, subMeter1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMeter2, type="l", col="red")
lines(datetime, subMeter3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
/plot4.R
|
no_license
|
andandandand/ExData_Plotting1
|
R
| false
| false
| 1,265
|
r
|
fileName <- "./data/household_power_consumption.txt"
wholeData <- read.table(fileName, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
selectedData<- wholeData[wholeData$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subSetData$Date, subSetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
globalActivePower <- as.numeric(subSetData$Global_active_power)
globalReactivePower <- as.numeric(subSetData$Global_reactive_power)
voltage <- as.numeric(subSetData$Voltage)
subMeter1 <- as.numeric(subSetData$Sub_metering_1)
subMeter2 <- as.numeric(subSetData$Sub_metering_2)
subMeter3 <- as.numeric(subSetData$Sub_metering_3)
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
plot(datetime, globalActivePower, type="l", xlab="", ylab="Global Active Power", cex=0.2)
plot(datetime, voltage, type="l", xlab="datetime", ylab="Voltage")
plot(datetime, subMeter1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMeter2, type="l", col="red")
lines(datetime, subMeter3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=, lwd=2.5, col=c("black", "red", "blue"), bty="o")
plot(datetime, globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22796905866677e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613116861-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 251
|
r
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22796905866677e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
setwd("Where you have downloaded the DataFile")##set the working directory to the directory where data is downloaded
unzip("exdata_data_household_power_consumption.zip")##Unzip the compress file
power_cons<-read.csv2("household_power_consumption.txt",na.strings = "?")##read file into dataframe and marking na values as "?"
dim(power_cons)##checking total rows and columns of Data Frame
names(power_cons)##getting the column names for filtering
power_cons<-subset(power_cons,Date=="2/2/2007"| Date=="1/2/2007")##filtering on the basis of specified date
power_cons$Datetime<-paste(power_cons$Date,power_cons$Time)##Adding a new column which is merge of Date and time columns
power_cons$Datetime<-strptime(power_cons$Datetime,"%d/%m/%Y %T")## Formatting the Datetime column for use in plots
hist(as.numeric(power_cons$Global_active_power),main="Global Active Power",xlab ="Global Active Power(Killowatts)",col = "red")##Drawing the histogram
dev.copy(png,"plot1.png", width = 480, height = 480)##transfer to png graphic device
dev.off()##png file generation.
|
/plot1.R
|
no_license
|
sirvishalarora/ExData_Plotting1
|
R
| false
| false
| 1,056
|
r
|
setwd("Where you have downloaded the DataFile")##set the working directory to the directory where data is downloaded
unzip("exdata_data_household_power_consumption.zip")##Unzip the compress file
power_cons<-read.csv2("household_power_consumption.txt",na.strings = "?")##read file into dataframe and marking na values as "?"
dim(power_cons)##checking total rows and columns of Data Frame
names(power_cons)##getting the column names for filtering
power_cons<-subset(power_cons,Date=="2/2/2007"| Date=="1/2/2007")##filtering on the basis of specified date
power_cons$Datetime<-paste(power_cons$Date,power_cons$Time)##Adding a new column which is merge of Date and time columns
power_cons$Datetime<-strptime(power_cons$Datetime,"%d/%m/%Y %T")## Formatting the Datetime column for use in plots
hist(as.numeric(power_cons$Global_active_power),main="Global Active Power",xlab ="Global Active Power(Killowatts)",col = "red")##Drawing the histogram
dev.copy(png,"plot1.png", width = 480, height = 480)##transfer to png graphic device
dev.off()##png file generation.
|
library(ggplot2)
# 1) What is 2 + 5
2 + 5
# 2) What is your name?
cat("Garrett")
r <- readLines('/Users/dancikg/Desktop/r_grading/assignment/test.html')
s <- paste0(r, collapse = '\n')
q1 <- '<pre class="r">[\\s\\S]+?# 1\\)[\\s\\S]+?# 3\\)'
pattern <- '<pre class="r">[\\s\\S]+?# 1\\)[\\s\\S]+?# 3\\)'
f <- str_extract(s, pattern)
f
str_extract(s, '(?:.(?!<pre class=\"r\">))+# 3\\)')
str_extract(s, '(?:.(?!<pre class="r">))+3\\)')
|
/testAssignments/test.R
|
no_license
|
gdancik/shinyGrader
|
R
| false
| false
| 444
|
r
|
library(ggplot2)
# 1) What is 2 + 5
2 + 5
# 2) What is your name?
cat("Garrett")
r <- readLines('/Users/dancikg/Desktop/r_grading/assignment/test.html')
s <- paste0(r, collapse = '\n')
q1 <- '<pre class="r">[\\s\\S]+?# 1\\)[\\s\\S]+?# 3\\)'
pattern <- '<pre class="r">[\\s\\S]+?# 1\\)[\\s\\S]+?# 3\\)'
f <- str_extract(s, pattern)
f
str_extract(s, '(?:.(?!<pre class=\"r\">))+# 3\\)')
str_extract(s, '(?:.(?!<pre class="r">))+3\\)')
|
####################################################################################
#Calcuates quality metrics (Info Score, MAF etc ) of variants in a bgen file,
#using BASH script called "calc_variant_quality", located in "path" argument.
#The BASH script calls QCTOOL.
#
#Input: chromosome to calculate metrics, assuming bgen files are organized by chr.
#Output: files written in "./variant" within "path", containing output from qctool.
####################################################################################
source('../Load_Bgen/LoadBgen.R')
CalcVariantQuality <- function(path,chr,n_cores){
path_out <- paste0(path,'variants/','chr',chr,'/')
system(paste0('mkdir -p ',path_out))
setwd(path)
library(dplyr)
library(parallel)
library(data.table)
library(pbmcapply)
#Find all rsids, and generate chunks to read.
print('Loading rsID')
allRSIds <- FindAllRSIds(chr)
allRSIds <- unique(allRSIds$rsid)
chunkSize <- ceiling(length(allRSIds) / n_cores)
rsIDChunks <- split(allRSIds,seq(length(allRSIds)-1)%/%chunkSize)
#Write file which contains the rsid chunks (used by QC tools)
for(i in 1:length(rsIDChunks)){
write(rsIDChunks[[i]],file = paste0(path_out,'chunk',i,'_rsid.txt'))
}
#Call "calc_variant_quality", a BASH script which calls QCTOOL
mclapply(1:length(rsIDChunks),function(i) system(paste0('./calc_variant_quality ',chr,' ',paste0('./variants/chr',chr,'/','chunk',i,'_rsid.txt'),' ',paste0('./variants/chr',chr,'/','chunk',i,'_stats.txt'))),mc.cores = n_cores)
}
path <- '/mrc-bsu/scratch/zmx21/UKB_Data/' #Path of data
n_cores <- 16
args=(commandArgs(TRUE))
chr <- args[1] #Chr to process
CalcVariantQuality(path,chr,n_cores)
|
/Variant_Quality/CalcVariantQuality.R
|
no_license
|
zmx21/polyresponse
|
R
| false
| false
| 1,690
|
r
|
####################################################################################
#Calcuates quality metrics (Info Score, MAF etc ) of variants in a bgen file,
#using BASH script called "calc_variant_quality", located in "path" argument.
#The BASH script calls QCTOOL.
#
#Input: chromosome to calculate metrics, assuming bgen files are organized by chr.
#Output: files written in "./variant" within "path", containing output from qctool.
####################################################################################
source('../Load_Bgen/LoadBgen.R')
CalcVariantQuality <- function(path,chr,n_cores){
path_out <- paste0(path,'variants/','chr',chr,'/')
system(paste0('mkdir -p ',path_out))
setwd(path)
library(dplyr)
library(parallel)
library(data.table)
library(pbmcapply)
#Find all rsids, and generate chunks to read.
print('Loading rsID')
allRSIds <- FindAllRSIds(chr)
allRSIds <- unique(allRSIds$rsid)
chunkSize <- ceiling(length(allRSIds) / n_cores)
rsIDChunks <- split(allRSIds,seq(length(allRSIds)-1)%/%chunkSize)
#Write file which contains the rsid chunks (used by QC tools)
for(i in 1:length(rsIDChunks)){
write(rsIDChunks[[i]],file = paste0(path_out,'chunk',i,'_rsid.txt'))
}
#Call "calc_variant_quality", a BASH script which calls QCTOOL
mclapply(1:length(rsIDChunks),function(i) system(paste0('./calc_variant_quality ',chr,' ',paste0('./variants/chr',chr,'/','chunk',i,'_rsid.txt'),' ',paste0('./variants/chr',chr,'/','chunk',i,'_stats.txt'))),mc.cores = n_cores)
}
path <- '/mrc-bsu/scratch/zmx21/UKB_Data/' #Path of data
n_cores <- 16
args=(commandArgs(TRUE))
chr <- args[1] #Chr to process
CalcVariantQuality(path,chr,n_cores)
|
#### EXPRESSION DATA: mTOR PATHWAY GENES - ESCA ####
library(TCGA2STAT)
genes <- c("MTOR", "RPTOR", "DEPDC6", "MLST8", "AKT1S1", "RICTOR", "MAPKAP1", "PRR5L", "WNT9B",
"LRP5", "DVL1", "GNAQ", "PIK3R1", "IRS1", "PDPK1", "PTEN", "HRAS", "KRAS", "NRAS", "MAPK15",
"RPS6KA3", "GSK3B", "DDIT4", "PRKAA2", "PRKCA", "SGK1", "TSC1", "TSC2", "TBC1D7", "FKBP1A",
"GRB10", "EIF4G1", "KIAA0652", "RB1CC1", "ULK1", "RPS6KB1", "EIF4EBP1", "EIF4EBP2")
expr.esca <- log2(expr.esca+1)
expr.genes.esca <- expr.esca[,genes]
library(pheatmap)
pdf("heatmap.esca.pdf")
p.esca <-pheatmap(t(expr.genes.esca), show_colnames = FALSE,
cutree_cols = 3)
dev.off()
# Extracting cluster assignments for each sample
clusteredSamples <- p.esca$tree_col
assignments.esca <- cutree(clusteredSamples, k=3) # k = cutree_cols
groupAssignments.esca <- data.frame(Group=factor(assignments.esca))
p.esca <- pheatmap(t(expr.genes.esca), show_colnames = FALSE,
cutree_cols = 3, annotation = groupAssignments.esca)
### Merging group assignments
df.expr.esca <- data.frame(expr.genes.esca)
df.expr.esca$Sample <- rownames(df.expr.esca)
groupAssignments.esca$SampleID <- rownames(groupAssignments.esca)
df.merged.esca <- merge(df.expr.esca, groupAssignments.esca,
by.x = "Sample", by.y = "SampleID",
all.x = FALSE, all.y = FALSE)
# Additional columns
df.merged.esca$Cancer <- "ESCA"
for (i in 1:196) df.merged.esca$Score[i] = sum(df.merged.esca[i,2:39])/39
values <- c("highMAPK15.lowPRKAA2.highDEPDC6.lowPRR5L", "lowMAPK15.highPRKAA2.highDEPDC6.highPRR5L",
"lowMAPK15.lowPRKAA2.lowDEPDC6.highPRR5L")
df.merged.esca$Status <- values[df.merged.esca$Group]
# merging clinical data
clinical.esca$PatientID <- rownames(clinical.esca)
df.merged.esca$PatientID <- substr(df.merged.esca$Sample, start = 1, stop = 12)
df.merged.esca$PatientAge <- clinical.esca[match(df.merged.esca$PatientID, clinical.esca$PatientID), "yearstobirth"]
df.merged.esca$PatientAge <- as.numeric(df.merged.esca$PatientAge)
# merging % S1 data (DS and MP)
df.merged.esca$SubID <- substr(df.merged.esca$Sample, start = 1, stop = 19)
S1_corr_data_ESCA_filter$SubID <- substr(S1_corr_data_ESCA_filter$Sample_ID, start = 1, stop = 19)
df.merged.esca <- merge(df.merged.esca, S1_corr_data_ESCA_filter,
by.x = "SubID", by.y = "SubID",
all.x = FALSE, all.y = FALSE)
save(df.merged.esca, file = "merged.file.esca.RData")
#### CORRELATION OF S1 IN MTOR ACTIVITY ####
comparison.esca <- list(c("highMAPK15.lowPRKAA2.highDEPDC6.lowPRR5L", "lowMAPK15.highPRKAA2.highDEPDC6.highPRR5L"),
c("lowMAPK15.highPRKAA2.highDEPDC6.highPRR5L", "lowMAPK15.lowPRKAA2.lowDEPDC6.highPRR5L"),
c("lowMAPK15.lowPRKAA2.lowDEPDC6.highPRR5L", "highMAPK15.lowPRKAA2.highDEPDC6.lowPRR5L"))
### MP ###
library(ggpubr)
boxplot.mp.esca <- ggboxplot(df.merged.esca, x="Status", y="MP", fill = "Status", palette = c("#47bfff", "#fa7393", "#b789f0"), shape = "Status")
p.mp <- boxplot.mp.esca + stat_compare_means(comparisons = comparison.esca) +
stat_compare_means(label.y = 0.35, size = 5)
p.mp$layers[[2]]$aes_params$textsize <- 5
lab.esca.mp <- ggpar(p.mp,
main = "ESCA",
font.main = c(16, "bold"),
xlab = "Gene expression status",
ylab = "% S1 (MP)",
font.x = c(14, "bold"),
font.y = c(14, "bold"),
font.ytickslab = 14,
font.xtickslab = c(1, "white"),
legend = "none")
mean.mp <- aggregate(df.merged.esca$MP ~ df.merged.esca$Status, df.merged.esca, mean)
### DS ###
boxplot.ds.esca <- ggboxplot(df.merged.esca, x="Status", y="DS", fill = "Status", palette = c("#358fff", "#ff4c77", "#9900ff"), shape = "Status")
p.ds <- boxplot.ds.esca + stat_compare_means(comparisons = comparison.esca) +
stat_compare_means(label.y = 1.5, size = 5)
p.ds$layers[[2]]$aes_params$textsize <- 5
lab.esca.ds <- ggpar(p.ds,
main = "ESCA",
font.main = c(16, "bold"),
xlab = "Gene expression status",
ylab = "% S1 (DS)",
font.x = c(14, "bold"),
font.y = c(14, "bold"),
font.ytickslab = 14,
font.xtickslab = c(1, "white"),
legend = "none")
mean.ds <- aggregate(df.merged.esca$DS ~ df.merged.esca$Status, df.merged.esca, mean)
cor.test(df.merged.esca$MP, df.merged.esca$Score)
cor.test(df.merged.esca$DS, df.merged.esca$Score)
|
/MSc Project R/1 R Scripts /5 mTOR pathway genes_exp/pathway_ESCA.R
|
no_license
|
Varshini-Suresh/MSc-Project-R-Codes-
|
R
| false
| false
| 4,730
|
r
|
#### EXPRESSION DATA: mTOR PATHWAY GENES - ESCA ####
library(TCGA2STAT)
genes <- c("MTOR", "RPTOR", "DEPDC6", "MLST8", "AKT1S1", "RICTOR", "MAPKAP1", "PRR5L", "WNT9B",
"LRP5", "DVL1", "GNAQ", "PIK3R1", "IRS1", "PDPK1", "PTEN", "HRAS", "KRAS", "NRAS", "MAPK15",
"RPS6KA3", "GSK3B", "DDIT4", "PRKAA2", "PRKCA", "SGK1", "TSC1", "TSC2", "TBC1D7", "FKBP1A",
"GRB10", "EIF4G1", "KIAA0652", "RB1CC1", "ULK1", "RPS6KB1", "EIF4EBP1", "EIF4EBP2")
expr.esca <- log2(expr.esca+1)
expr.genes.esca <- expr.esca[,genes]
library(pheatmap)
pdf("heatmap.esca.pdf")
p.esca <-pheatmap(t(expr.genes.esca), show_colnames = FALSE,
cutree_cols = 3)
dev.off()
# Extracting cluster assignments for each sample
clusteredSamples <- p.esca$tree_col
assignments.esca <- cutree(clusteredSamples, k=3) # k = cutree_cols
groupAssignments.esca <- data.frame(Group=factor(assignments.esca))
p.esca <- pheatmap(t(expr.genes.esca), show_colnames = FALSE,
cutree_cols = 3, annotation = groupAssignments.esca)
### Merging group assignments
df.expr.esca <- data.frame(expr.genes.esca)
df.expr.esca$Sample <- rownames(df.expr.esca)
groupAssignments.esca$SampleID <- rownames(groupAssignments.esca)
df.merged.esca <- merge(df.expr.esca, groupAssignments.esca,
by.x = "Sample", by.y = "SampleID",
all.x = FALSE, all.y = FALSE)
# Additional columns
df.merged.esca$Cancer <- "ESCA"
for (i in 1:196) df.merged.esca$Score[i] = sum(df.merged.esca[i,2:39])/39
values <- c("highMAPK15.lowPRKAA2.highDEPDC6.lowPRR5L", "lowMAPK15.highPRKAA2.highDEPDC6.highPRR5L",
"lowMAPK15.lowPRKAA2.lowDEPDC6.highPRR5L")
df.merged.esca$Status <- values[df.merged.esca$Group]
# merging clinical data
clinical.esca$PatientID <- rownames(clinical.esca)
df.merged.esca$PatientID <- substr(df.merged.esca$Sample, start = 1, stop = 12)
df.merged.esca$PatientAge <- clinical.esca[match(df.merged.esca$PatientID, clinical.esca$PatientID), "yearstobirth"]
df.merged.esca$PatientAge <- as.numeric(df.merged.esca$PatientAge)
# merging % S1 data (DS and MP)
df.merged.esca$SubID <- substr(df.merged.esca$Sample, start = 1, stop = 19)
S1_corr_data_ESCA_filter$SubID <- substr(S1_corr_data_ESCA_filter$Sample_ID, start = 1, stop = 19)
df.merged.esca <- merge(df.merged.esca, S1_corr_data_ESCA_filter,
by.x = "SubID", by.y = "SubID",
all.x = FALSE, all.y = FALSE)
save(df.merged.esca, file = "merged.file.esca.RData")
#### CORRELATION OF S1 IN MTOR ACTIVITY ####
comparison.esca <- list(c("highMAPK15.lowPRKAA2.highDEPDC6.lowPRR5L", "lowMAPK15.highPRKAA2.highDEPDC6.highPRR5L"),
c("lowMAPK15.highPRKAA2.highDEPDC6.highPRR5L", "lowMAPK15.lowPRKAA2.lowDEPDC6.highPRR5L"),
c("lowMAPK15.lowPRKAA2.lowDEPDC6.highPRR5L", "highMAPK15.lowPRKAA2.highDEPDC6.lowPRR5L"))
### MP ###
library(ggpubr)
boxplot.mp.esca <- ggboxplot(df.merged.esca, x="Status", y="MP", fill = "Status", palette = c("#47bfff", "#fa7393", "#b789f0"), shape = "Status")
p.mp <- boxplot.mp.esca + stat_compare_means(comparisons = comparison.esca) +
stat_compare_means(label.y = 0.35, size = 5)
p.mp$layers[[2]]$aes_params$textsize <- 5
lab.esca.mp <- ggpar(p.mp,
main = "ESCA",
font.main = c(16, "bold"),
xlab = "Gene expression status",
ylab = "% S1 (MP)",
font.x = c(14, "bold"),
font.y = c(14, "bold"),
font.ytickslab = 14,
font.xtickslab = c(1, "white"),
legend = "none")
mean.mp <- aggregate(df.merged.esca$MP ~ df.merged.esca$Status, df.merged.esca, mean)
### DS ###
boxplot.ds.esca <- ggboxplot(df.merged.esca, x="Status", y="DS", fill = "Status", palette = c("#358fff", "#ff4c77", "#9900ff"), shape = "Status")
p.ds <- boxplot.ds.esca + stat_compare_means(comparisons = comparison.esca) +
stat_compare_means(label.y = 1.5, size = 5)
p.ds$layers[[2]]$aes_params$textsize <- 5
lab.esca.ds <- ggpar(p.ds,
main = "ESCA",
font.main = c(16, "bold"),
xlab = "Gene expression status",
ylab = "% S1 (DS)",
font.x = c(14, "bold"),
font.y = c(14, "bold"),
font.ytickslab = 14,
font.xtickslab = c(1, "white"),
legend = "none")
mean.ds <- aggregate(df.merged.esca$DS ~ df.merged.esca$Status, df.merged.esca, mean)
cor.test(df.merged.esca$MP, df.merged.esca$Score)
cor.test(df.merged.esca$DS, df.merged.esca$Score)
|
#!/usr/bin/env R
VERSION = "0.3"
args = commandArgs(trailingOnly = T)
if (length(args) != 1){
message(paste("VERSION:", VERSION))
stop("Please provide the config file")
}
suppressWarnings(suppressPackageStartupMessages(require(RaceID)))
suppressWarnings(suppressPackageStartupMessages(require(scran)))
source(args[1])
do.filter <- function(sc){
if (!is.null(filt.lbatch.regexes)){
lar <- filt.lbatch.regexes
nn <- colnames(sc@expdata)
filt$LBatch <- lapply(1:length(lar), function(m){ return( nn[grep(lar[[m]], nn)] ) })
}
sc <- do.call(filterdata, c(sc, filt))
## Get histogram metrics for library size and number of features
raw.lib <- log10(colSums(as.matrix(sc@expdata)))
raw.feat <- log10(colSums(as.matrix(sc@expdata)>0))
filt.lib <- log10(colSums(getfdata(sc)))
filt.feat <- log10(colSums(getfdata(sc)>0))
br <- 50
## Determine limits on plots based on the unfiltered data
## (doesn't work, R rejects limits and norm data is too different to compare to exp data
## so let them keep their own ranges)
## betterrange <- function(floatval){
## return(10 * (floor(floatval / 10) + 1))
## }
## tmp.lib <- hist(raw.lib, breaks=br, plot=F)
## tmp.feat <- hist(raw.feat, breaks=br, plot=F)
## lib.y_lim <- c(0,betterrange(max(tmp.lib$counts)))
## lib.x_lim <- c(0,betterrange(max(tmp.lib$breaks)))
## feat.y_lim <- c(0,betterrange(max(tmp.feat$counts)))
## feat.x_lim <- c(0,betterrange(max(tmp.feat$breaks)))
par(mfrow=c(2,2))
print(hist(raw.lib, breaks=br, main="RawData Log10 LibSize")) # , xlim=lib.x_lim, ylim=lib.y_lim)
print(hist(raw.feat, breaks=br, main="RawData Log10 NumFeat")) #, xlim=feat.x_lim, ylim=feat.y_lim)
print(hist(filt.lib, breaks=br, main="FiltData Log10 LibSize")) # , xlim=lib.x_lim, ylim=lib.y_lim)
tmp <- hist(filt.feat, breaks=br, main="FiltData Log10 NumFeat") # , xlim=feat.x_lim, ylim=feat.y_lim)
print(tmp)
## required, for extracting midpoint
unq <- unique(filt.feat)
if (length(unq) == 1){
abline(v=unq, col="red", lw=2)
text(tmp$mids, table(filt.feat)[[1]] - 100, pos=1, paste(10^unq, "\nFeatures\nin remaining\nCells", sep=""), cex=0.8)
}
if (filt.use.ccorrect){
par(mfrow=c(2,2))
sc <- do.call(CCcorrect, c(sc, filt.ccc))
print(plotdimsat(sc, change=T))
print(plotdimsat(sc, change=F))
}
return(sc)
}
do.cluster <- function(sc){
sc <- do.call(compdist, c(sc, clust.compdist))
sc <- do.call(clustexp, c(sc, clust.clustexp))
if (clust.clustexp$sat){
print(plotsaturation(sc, disp=F))
print(plotsaturation(sc, disp=T))
}
print(plotjaccard(sc))
return(sc)
}
do.outlier <- function(sc){
sc <- do.call(findoutliers, c(sc, outlier.findoutliers))
if (outlier.use.randomforest){
sc <- do.call(rfcorrect, c(sc, outlier.rfcorrect))
}
print(plotbackground(sc))
print(plotsensitivity(sc))
print(plotoutlierprobs(sc))
## Heatmaps
test1 <- list()
test1$side = 3
test1$line = 0 #1 #3
x <- clustheatmap(sc, final=FALSE)
print(do.call(mtext, c(paste("(Initial)"), test1))) ## spacing is a hack
x <- clustheatmap(sc, final=TRUE)
print(do.call(mtext, c(paste("(Final)"), test1))) ## spacing is a hack
return(sc)
}
do.clustmap <- function(sc){
sc <- do.call(comptsne, c(sc, cluster.comptsne))
sc <- do.call(compfr, c(sc, cluster.compfr))
return(sc)
}
mkgenelist <- function(sc){
## Layout
test <- list()
test$side = 3
test$line = 0 #1 #3
test$cex = 0.8
df <- c()
options(cex = 1)
lapply(unique(sc@cpart), function(n){
dg <- clustdiffgenes(sc, cl=n, pvalue=genelist.pvalue)
dg.goi <- dg[dg$fc > genelist.foldchange,]
dg.goi.table <- head(dg.goi, genelist.tablelim)
df <<- rbind(df, cbind(n, dg.goi.table))
goi <- head(rownames(dg.goi.table), genelist.plotlim)
print(plotmarkergenes(sc, goi))
print(do.call(mtext, c(paste(" Cluster ",n), test))) ## spacing is a hack
test$line=-1
print(do.call(mtext, c(paste(" Sig. Genes"), test))) ## spacing is a hack
test$line=-2
print(do.call(mtext, c(paste(" (fc > ", genelist.foldchange,")"), test))) ## spacing is a hack
})
write.table(df, file=out.genelist, sep="\t", quote=F)
}
pdf(out.pdf)
if (use.filtnormconf){
sc <- do.filter(sc)
message(paste(" - Source:: genes:",nrow(sc@expdata),", cells:",ncol(sc@expdata)))
message(paste(" - Filter:: genes:",nrow(sc@ndata),", cells:",ncol(sc@ndata)))
message(paste(" :: ",
sprintf("%.1f", 100 * nrow(sc@ndata)/nrow(sc@expdata)), "% of genes remain,",
sprintf("%.1f", 100 * ncol(sc@ndata)/ncol(sc@expdata)), "% of cells remain"))
}
if (use.cluster){
par(mfrow=c(2,2))
sc <- do.cluster(sc)
par(mfrow=c(2,2))
sc <- do.outlier(sc)
par(mfrow=c(2,2), mar=c(1,1,6,1))
sc <- do.clustmap(sc)
mkgenelist(sc)
}
dev.off()
saveRDS(sc, out.rdat)
|
/tools/raceid/scripts/cluster.R
|
permissive
|
cgirardot/tools-iuc
|
R
| false
| false
| 5,220
|
r
|
#!/usr/bin/env R
VERSION = "0.3"
args = commandArgs(trailingOnly = T)
if (length(args) != 1){
message(paste("VERSION:", VERSION))
stop("Please provide the config file")
}
suppressWarnings(suppressPackageStartupMessages(require(RaceID)))
suppressWarnings(suppressPackageStartupMessages(require(scran)))
source(args[1])
do.filter <- function(sc){
if (!is.null(filt.lbatch.regexes)){
lar <- filt.lbatch.regexes
nn <- colnames(sc@expdata)
filt$LBatch <- lapply(1:length(lar), function(m){ return( nn[grep(lar[[m]], nn)] ) })
}
sc <- do.call(filterdata, c(sc, filt))
## Get histogram metrics for library size and number of features
raw.lib <- log10(colSums(as.matrix(sc@expdata)))
raw.feat <- log10(colSums(as.matrix(sc@expdata)>0))
filt.lib <- log10(colSums(getfdata(sc)))
filt.feat <- log10(colSums(getfdata(sc)>0))
br <- 50
## Determine limits on plots based on the unfiltered data
## (doesn't work, R rejects limits and norm data is too different to compare to exp data
## so let them keep their own ranges)
## betterrange <- function(floatval){
## return(10 * (floor(floatval / 10) + 1))
## }
## tmp.lib <- hist(raw.lib, breaks=br, plot=F)
## tmp.feat <- hist(raw.feat, breaks=br, plot=F)
## lib.y_lim <- c(0,betterrange(max(tmp.lib$counts)))
## lib.x_lim <- c(0,betterrange(max(tmp.lib$breaks)))
## feat.y_lim <- c(0,betterrange(max(tmp.feat$counts)))
## feat.x_lim <- c(0,betterrange(max(tmp.feat$breaks)))
par(mfrow=c(2,2))
print(hist(raw.lib, breaks=br, main="RawData Log10 LibSize")) # , xlim=lib.x_lim, ylim=lib.y_lim)
print(hist(raw.feat, breaks=br, main="RawData Log10 NumFeat")) #, xlim=feat.x_lim, ylim=feat.y_lim)
print(hist(filt.lib, breaks=br, main="FiltData Log10 LibSize")) # , xlim=lib.x_lim, ylim=lib.y_lim)
tmp <- hist(filt.feat, breaks=br, main="FiltData Log10 NumFeat") # , xlim=feat.x_lim, ylim=feat.y_lim)
print(tmp)
## required, for extracting midpoint
unq <- unique(filt.feat)
if (length(unq) == 1){
abline(v=unq, col="red", lw=2)
text(tmp$mids, table(filt.feat)[[1]] - 100, pos=1, paste(10^unq, "\nFeatures\nin remaining\nCells", sep=""), cex=0.8)
}
if (filt.use.ccorrect){
par(mfrow=c(2,2))
sc <- do.call(CCcorrect, c(sc, filt.ccc))
print(plotdimsat(sc, change=T))
print(plotdimsat(sc, change=F))
}
return(sc)
}
do.cluster <- function(sc){
sc <- do.call(compdist, c(sc, clust.compdist))
sc <- do.call(clustexp, c(sc, clust.clustexp))
if (clust.clustexp$sat){
print(plotsaturation(sc, disp=F))
print(plotsaturation(sc, disp=T))
}
print(plotjaccard(sc))
return(sc)
}
do.outlier <- function(sc){
sc <- do.call(findoutliers, c(sc, outlier.findoutliers))
if (outlier.use.randomforest){
sc <- do.call(rfcorrect, c(sc, outlier.rfcorrect))
}
print(plotbackground(sc))
print(plotsensitivity(sc))
print(plotoutlierprobs(sc))
## Heatmaps
test1 <- list()
test1$side = 3
test1$line = 0 #1 #3
x <- clustheatmap(sc, final=FALSE)
print(do.call(mtext, c(paste("(Initial)"), test1))) ## spacing is a hack
x <- clustheatmap(sc, final=TRUE)
print(do.call(mtext, c(paste("(Final)"), test1))) ## spacing is a hack
return(sc)
}
do.clustmap <- function(sc){
sc <- do.call(comptsne, c(sc, cluster.comptsne))
sc <- do.call(compfr, c(sc, cluster.compfr))
return(sc)
}
mkgenelist <- function(sc){
## Layout
test <- list()
test$side = 3
test$line = 0 #1 #3
test$cex = 0.8
df <- c()
options(cex = 1)
lapply(unique(sc@cpart), function(n){
dg <- clustdiffgenes(sc, cl=n, pvalue=genelist.pvalue)
dg.goi <- dg[dg$fc > genelist.foldchange,]
dg.goi.table <- head(dg.goi, genelist.tablelim)
df <<- rbind(df, cbind(n, dg.goi.table))
goi <- head(rownames(dg.goi.table), genelist.plotlim)
print(plotmarkergenes(sc, goi))
print(do.call(mtext, c(paste(" Cluster ",n), test))) ## spacing is a hack
test$line=-1
print(do.call(mtext, c(paste(" Sig. Genes"), test))) ## spacing is a hack
test$line=-2
print(do.call(mtext, c(paste(" (fc > ", genelist.foldchange,")"), test))) ## spacing is a hack
})
write.table(df, file=out.genelist, sep="\t", quote=F)
}
pdf(out.pdf)
if (use.filtnormconf){
sc <- do.filter(sc)
message(paste(" - Source:: genes:",nrow(sc@expdata),", cells:",ncol(sc@expdata)))
message(paste(" - Filter:: genes:",nrow(sc@ndata),", cells:",ncol(sc@ndata)))
message(paste(" :: ",
sprintf("%.1f", 100 * nrow(sc@ndata)/nrow(sc@expdata)), "% of genes remain,",
sprintf("%.1f", 100 * ncol(sc@ndata)/ncol(sc@expdata)), "% of cells remain"))
}
if (use.cluster){
par(mfrow=c(2,2))
sc <- do.cluster(sc)
par(mfrow=c(2,2))
sc <- do.outlier(sc)
par(mfrow=c(2,2), mar=c(1,1,6,1))
sc <- do.clustmap(sc)
mkgenelist(sc)
}
dev.off()
saveRDS(sc, out.rdat)
|
\name{qr_plot}
\alias{qr_plot}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Plot
}
\description{
Plot of posterior distribution of effects
}
\usage{
qr_plot(object, index = 1,
xlab = "Quantile level", ylab = "Covariate effect",
main = "", col = gray(0.75), lwd = 1, add = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{An object of class qreg or qreg_spline}
\item{index}{Integer describing which predictor to plot}
\item{xlab}{X axis label}
\item{ylab}{Y axis label}
\item{main}{plot title}
\item{col}{plot color}
\item{lwd}{line width}
\item{add}{Superimposed plot}
}
|
/man/qr_plot.Rd
|
no_license
|
cran/BSquare
|
R
| false
| false
| 670
|
rd
|
\name{qr_plot}
\alias{qr_plot}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Plot
}
\description{
Plot of posterior distribution of effects
}
\usage{
qr_plot(object, index = 1,
xlab = "Quantile level", ylab = "Covariate effect",
main = "", col = gray(0.75), lwd = 1, add = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{object}{An object of class qreg or qreg_spline}
\item{index}{Integer describing which predictor to plot}
\item{xlab}{X axis label}
\item{ylab}{Y axis label}
\item{main}{plot title}
\item{col}{plot color}
\item{lwd}{line width}
\item{add}{Superimposed plot}
}
|
\name{as.output}
\alias{as.output}
\alias{as.output.default}
\alias{as.output.data.frame}
\alias{as.output.list}
\alias{as.output.matrix}
\alias{as.output.table}
\alias{as.output.raw}
\alias{iotools.stdout}
\alias{iotools.stderr}
\alias{iotools.fd}
\title{
Character Output
}
\description{
Create objects of class \code{output}.
}
\usage{
as.output(x, ...)
}
\arguments{
\item{x}{object to be converted to an instance of \code{output}.}
\item{...}{optional arguments to be passed to implementing methods
of \code{as.output}. Most methods support the following arguments:
\code{sep} string, column/value separator, \code{nsep} string, key
separator, \code{keys} either a logical (if \code{FALSE} names/row
names are suppressed) or a character vector with overriding
keys. The default for \code{keys} typically varies by class or is
auto-detected (e.g., named vectors user names as keys,
\code{data.frames} use row names if they are non-automatic
etc.). All methods also support \code{con} argument which pushes
the output into a connection instead of generating an output object
- so \code{as.output(x, con=...)} is thus not a coersion but used
only for its side-effect.
Note that \code{con} also supports special values
\code{iotools.stdout}, \code{iotools.stderr} and
\code{iotools.fd(fd)} which write directly into the corresponding
streams instead of using theconnection API.
}
}
\details{
\code{as.output} is generic, and methods can be written to support
new classes. The output is meant to be a raw vector suitable for
writing to the disk or sending over a connection.
}
\value{
if \code{con} is set to a connection then the result is \code{NULL}
and the method is used for its side-effect, otherwise the result is a
raw vector.
Side note: we cannot create a formal type of \code{output}, because
\code{writeBin} does \code{is.vector()} check which doesn't dispatch
and prevents anything with a class to be written.
}
\author{
Simon Urbanek
}
\examples{
m = matrix(sample(letters), ncol=2)
as.output(m)
df = data.frame(a = sample(letters), b = runif(26), c = sample(state.abb,26))
str(as.output(df))
as.output(df, con=iotools.stdout)
}
\keyword{manip}
|
/man/asoutput.Rd
|
no_license
|
s-u/iotools
|
R
| false
| false
| 2,261
|
rd
|
\name{as.output}
\alias{as.output}
\alias{as.output.default}
\alias{as.output.data.frame}
\alias{as.output.list}
\alias{as.output.matrix}
\alias{as.output.table}
\alias{as.output.raw}
\alias{iotools.stdout}
\alias{iotools.stderr}
\alias{iotools.fd}
\title{
Character Output
}
\description{
Create objects of class \code{output}.
}
\usage{
as.output(x, ...)
}
\arguments{
\item{x}{object to be converted to an instance of \code{output}.}
\item{...}{optional arguments to be passed to implementing methods
of \code{as.output}. Most methods support the following arguments:
\code{sep} string, column/value separator, \code{nsep} string, key
separator, \code{keys} either a logical (if \code{FALSE} names/row
names are suppressed) or a character vector with overriding
keys. The default for \code{keys} typically varies by class or is
auto-detected (e.g., named vectors user names as keys,
\code{data.frames} use row names if they are non-automatic
etc.). All methods also support \code{con} argument which pushes
the output into a connection instead of generating an output object
- so \code{as.output(x, con=...)} is thus not a coersion but used
only for its side-effect.
Note that \code{con} also supports special values
\code{iotools.stdout}, \code{iotools.stderr} and
\code{iotools.fd(fd)} which write directly into the corresponding
streams instead of using theconnection API.
}
}
\details{
\code{as.output} is generic, and methods can be written to support
new classes. The output is meant to be a raw vector suitable for
writing to the disk or sending over a connection.
}
\value{
if \code{con} is set to a connection then the result is \code{NULL}
and the method is used for its side-effect, otherwise the result is a
raw vector.
Side note: we cannot create a formal type of \code{output}, because
\code{writeBin} does \code{is.vector()} check which doesn't dispatch
and prevents anything with a class to be written.
}
\author{
Simon Urbanek
}
\examples{
m = matrix(sample(letters), ncol=2)
as.output(m)
df = data.frame(a = sample(letters), b = runif(26), c = sample(state.abb,26))
str(as.output(df))
as.output(df, con=iotools.stdout)
}
\keyword{manip}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Methods.R
\docType{methods}
\name{useMySQL,EnsDb-method}
\alias{useMySQL,EnsDb-method}
\alias{useMySQL}
\title{Use a MySQL backend}
\usage{
\S4method{useMySQL}{EnsDb}(x, host = "localhost", port = 3306, user, pass)
}
\arguments{
\item{x}{The \code{\linkS4class{EnsDb}} object.}
\item{host}{Character vector specifying the host on which the MySQL
server runs.}
\item{port}{The port on which the MySQL server can be accessed.}
\item{user}{The user name for the MySQL server.}
\item{pass}{The password for the MySQL server.}
}
\value{
A \code{\linkS4class{EnsDb}} object providing access to the
data stored in the MySQL backend.
}
\description{
Change the SQL backend from \emph{SQLite} to \emph{MySQL}.
When first called on an \code{\linkS4class{EnsDb}} object, the function
tries to create and save all of the data into a MySQL database. All
subsequent calls will connect to the already existing MySQL database.
}
\details{
This functionality requires that the \code{RMySQL} package is
installed and that the user has (write) access to a running MySQL server.
If the corresponding database does already exist users without write
access can use this functionality.
}
\note{
At present the function does not evaluate whether the versions
between the SQLite and MySQL database differ.
}
\examples{
## Load the EnsDb database (SQLite backend).
library(EnsDb.Hsapiens.v75)
edb <- EnsDb.Hsapiens.v75
## Now change the backend to MySQL; my_user and my_pass should
## be the user name and password to access the MySQL server.
\dontrun{
edb_mysql <- useMySQL(edb, host = "localhost", user = my_user, pass = my_pass)
}
}
\author{
Johannes Rainer
}
|
/man/useMySQL-EnsDb-method.Rd
|
no_license
|
YTLogos/ensembldb
|
R
| false
| true
| 1,752
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Methods.R
\docType{methods}
\name{useMySQL,EnsDb-method}
\alias{useMySQL,EnsDb-method}
\alias{useMySQL}
\title{Use a MySQL backend}
\usage{
\S4method{useMySQL}{EnsDb}(x, host = "localhost", port = 3306, user, pass)
}
\arguments{
\item{x}{The \code{\linkS4class{EnsDb}} object.}
\item{host}{Character vector specifying the host on which the MySQL
server runs.}
\item{port}{The port on which the MySQL server can be accessed.}
\item{user}{The user name for the MySQL server.}
\item{pass}{The password for the MySQL server.}
}
\value{
A \code{\linkS4class{EnsDb}} object providing access to the
data stored in the MySQL backend.
}
\description{
Change the SQL backend from \emph{SQLite} to \emph{MySQL}.
When first called on an \code{\linkS4class{EnsDb}} object, the function
tries to create and save all of the data into a MySQL database. All
subsequent calls will connect to the already existing MySQL database.
}
\details{
This functionality requires that the \code{RMySQL} package is
installed and that the user has (write) access to a running MySQL server.
If the corresponding database does already exist users without write
access can use this functionality.
}
\note{
At present the function does not evaluate whether the versions
between the SQLite and MySQL database differ.
}
\examples{
## Load the EnsDb database (SQLite backend).
library(EnsDb.Hsapiens.v75)
edb <- EnsDb.Hsapiens.v75
## Now change the backend to MySQL; my_user and my_pass should
## be the user name and password to access the MySQL server.
\dontrun{
edb_mysql <- useMySQL(edb, host = "localhost", user = my_user, pass = my_pass)
}
}
\author{
Johannes Rainer
}
|
library(lubridate)
library(data.table)
# load data into data.table
powerconsumption <- read.table("./household_power_consumption.txt", sep = ";", header = TRUE,
na.strings = "?", colClasses = c(rep("character", 2),rep("numeric", 7)))
# subsetting on relevant date through regular expression
powerconsumption <- powerconsumption[grep("^(1|2)/2/2007",powerconsumption[,1]),]
# convert & add date/time field to data.table
powerconsumption[,10] <- dmy(powerconsumption[,1]) + hms(powerconsumption[,2])
# open file device
png(filename = "./plot3.png")
# plot the graph
plot(powerconsumption[,10], powerconsumption[,7], type='l', col = "black", xlab = "", ylab = "Energy sub metering")
lines(powerconsumption[,10], powerconsumption[,8], col = "red")
lines(powerconsumption[,10], powerconsumption[,9], col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"), lty = "solid")
# off device
dev <- dev.off()
|
/plot3.R
|
no_license
|
tiensen/ExData_Plotting1
|
R
| false
| false
| 1,016
|
r
|
library(lubridate)
library(data.table)
# load data into data.table
powerconsumption <- read.table("./household_power_consumption.txt", sep = ";", header = TRUE,
na.strings = "?", colClasses = c(rep("character", 2),rep("numeric", 7)))
# subsetting on relevant date through regular expression
powerconsumption <- powerconsumption[grep("^(1|2)/2/2007",powerconsumption[,1]),]
# convert & add date/time field to data.table
powerconsumption[,10] <- dmy(powerconsumption[,1]) + hms(powerconsumption[,2])
# open file device
png(filename = "./plot3.png")
# plot the graph
plot(powerconsumption[,10], powerconsumption[,7], type='l', col = "black", xlab = "", ylab = "Energy sub metering")
lines(powerconsumption[,10], powerconsumption[,8], col = "red")
lines(powerconsumption[,10], powerconsumption[,9], col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"), lty = "solid")
# off device
dev <- dev.off()
|
## Getting full dataset
data_full <- read.csv("G:/Data_Science/Exploratory data analysis/Project/exdata-data-household_power_consumption/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 3
with(data, {
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Saving to file
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
/Plot3.R
|
no_license
|
SUBITCHAKA-NAGASUNDARAM/exp_data_analysis
|
R
| false
| false
| 1,026
|
r
|
## Getting full dataset
data_full <- read.csv("G:/Data_Science/Exploratory data analysis/Project/exdata-data-household_power_consumption/household_power_consumption.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_full$Date <- as.Date(data_full$Date, format="%d/%m/%Y")
## Subsetting the data
data <- subset(data_full, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_full)
## Converting dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 3
with(data, {
plot(Sub_metering_1~Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
})
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2,
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Saving to file
dev.copy(png, file="plot3.png", height=480, width=480)
dev.off()
|
library("plyr")
library("dplyr")
library("reshape2")
library("jsonlite")
library("stringr")
# ------------------------
# string manipulate
# todo: 19xx-20xx
#' @return TRUE/FALSE vector
endWithYear=function(x){
x %>% stringr::str_detect("_((?:20|19)\\d{2})$")
}
#' @param x character-vector
#' @return data_frame(x,field,year,isTimeSeries)
extractFieldYear=function(x){
result= x %>% stringr::str_match("^(.*)_((?:20|19)\\d{2})$")
field=result[,2]
year =result[,3] %>% as.integer()
data_frame(x=x,field=field,year=year,isTimeSeries=!is.na(year))
}
# ------------------------
xNULL2xNA=function(x){
if(is.null(x))
NA
else
x
}
#' @return if (all(is.na(x))) return(NA)
getFirstElementNotNa=function(x) {
index = which(!is.na(x))
if(length(index)==0)
return(NA)
else
return(x[index[1]])
}
#' remove those rows where 每一列都是NA
#'
#' @param x data.frame
# todo: substitute by function in `dplyr`
rmNArow=function(x,exceptColumns=character()){
stopifnot( is.data.frame(x) )
cols=colnames(x)
cols_check=setdiff(cols,exceptColumns)
# use drop=FALSE in case of length(exceptColumns)==1
x_check=x[,cols_check,drop=FALSE]
# aaply() will encounter some odd bugs
narow=alply(x_check,1,
function(row){
all(laply(row,is.na))
})
x[!unlist(narow),]
}
categoryInString=function(x,categories){
cat_matrix=llply(categories,function(cat){
stringr::str_detect(x,cat)
})
# column categories, row x
cat_matrix=do.call(cbind,cat_matrix)
cat_result=findMostLeftTrue(cat_matrix)
cat_result
}
# > m=matrix(c(F,T,T,F,F,F,T,T,F),ncol = 3,byrow = T)
# [,1] [,2] [,3]
# [1,] FALSE TRUE TRUE
# [2,] FALSE FALSE FALSE
# [3,] TRUE TRUE FALSE
# > findMostLeftTrue(m)
# 1 2 3
# 2 NA 1
findMostLeftTrue=function(x){
stopifnot( is.matrix(x) , is.logical(x) )
result=aaply(x,1,function(x_cat){
found=which(x_cat)
if(length(found)==0)
return(NA)
else
return(found[1])
})
# result should be integer-vector
# in case of all NAs
as.integer(result)
}
# > m=matrix(c(1,NA,NA,NA,NA,NA,NA,2,NA),ncol = 3,byrow = T)
# [,1] [,2] [,3]
# [1,] 1 NA NA
# [2,] NA NA NA
# [3,] NA 2 NA
# > findMostLeftNotNA(m)
# [1] 1 NA 2
findMostLeftNotNA=function(x) {
MostLeftTrue=findMostLeftTrue( !is.na(x) )
nr=nrow(x)
nc=ncol(x)
mm=cbind(1:nr,MostLeftTrue)
x[mm]
}
selectColumnNames=function(df,fun) {
stopifnot( is.data.frame(df) )
cols=names(df)
cols_selected=laply(cols,function(col){
fun(df[[col]])
})
cols[cols_selected]
}
selectColumns=function(df,fun) {
cols=selectColumnNames(df,fun)
df[,cols,drop=FALSE]
}
selectColumnsNumeric =function(df) { selectColumns(df,is.numeric) }
selectColumnsCharacter=function(df) { selectColumns(df,is.character) }
#' rbind dataframes in @x
#' use all fields, fill in NA when such field not exists
#' similar to jsonlite:::simplifyDataFrame(flatten=TRUE), but without odd bugs
#'
#' @param x a list of dataframes
zzlist2df2=function(x,parallel=F)
{
# plyr::compact
# remove NULL
x=Filter(Negate(is.null),x)
# as.data.frame
x=llply(x,.parallel = parallel,
.fun=function(x) {as.data.frame(x,stringsAsFactors=F)})
# get all field_names
list_field_names=llply(x,names)
field_names_all=unique(unlist(list_field_names))
df=ldply(x,.parallel=parallel,.fun=function(x){
na_names=setdiff(field_names_all,names(x))
x[na_names]<-NA
x
})
df
}
# convert character columns to numeric if possible
convertCharacterColumns=function(df,as.is=TRUE){
l_ply(names(df),function(col){
if(is.character(df[[col]]))
df[[col]] <<- type.convert(df[[col]],as.is=as.is)
})
df
}
|
/util.R
|
no_license
|
JasonXu12/Big-data-and-financial-analysis-system
|
R
| false
| false
| 3,756
|
r
|
library("plyr")
library("dplyr")
library("reshape2")
library("jsonlite")
library("stringr")
# ------------------------
# string manipulate
# todo: 19xx-20xx
#' @return TRUE/FALSE vector
endWithYear=function(x){
x %>% stringr::str_detect("_((?:20|19)\\d{2})$")
}
#' @param x character-vector
#' @return data_frame(x,field,year,isTimeSeries)
extractFieldYear=function(x){
result= x %>% stringr::str_match("^(.*)_((?:20|19)\\d{2})$")
field=result[,2]
year =result[,3] %>% as.integer()
data_frame(x=x,field=field,year=year,isTimeSeries=!is.na(year))
}
# ------------------------
xNULL2xNA=function(x){
if(is.null(x))
NA
else
x
}
#' @return if (all(is.na(x))) return(NA)
getFirstElementNotNa=function(x) {
index = which(!is.na(x))
if(length(index)==0)
return(NA)
else
return(x[index[1]])
}
#' remove those rows where 每一列都是NA
#'
#' @param x data.frame
# todo: substitute by function in `dplyr`
rmNArow=function(x,exceptColumns=character()){
stopifnot( is.data.frame(x) )
cols=colnames(x)
cols_check=setdiff(cols,exceptColumns)
# use drop=FALSE in case of length(exceptColumns)==1
x_check=x[,cols_check,drop=FALSE]
# aaply() will encounter some odd bugs
narow=alply(x_check,1,
function(row){
all(laply(row,is.na))
})
x[!unlist(narow),]
}
categoryInString=function(x,categories){
cat_matrix=llply(categories,function(cat){
stringr::str_detect(x,cat)
})
# column categories, row x
cat_matrix=do.call(cbind,cat_matrix)
cat_result=findMostLeftTrue(cat_matrix)
cat_result
}
# > m=matrix(c(F,T,T,F,F,F,T,T,F),ncol = 3,byrow = T)
# [,1] [,2] [,3]
# [1,] FALSE TRUE TRUE
# [2,] FALSE FALSE FALSE
# [3,] TRUE TRUE FALSE
# > findMostLeftTrue(m)
# 1 2 3
# 2 NA 1
findMostLeftTrue=function(x){
stopifnot( is.matrix(x) , is.logical(x) )
result=aaply(x,1,function(x_cat){
found=which(x_cat)
if(length(found)==0)
return(NA)
else
return(found[1])
})
# result should be integer-vector
# in case of all NAs
as.integer(result)
}
# > m=matrix(c(1,NA,NA,NA,NA,NA,NA,2,NA),ncol = 3,byrow = T)
# [,1] [,2] [,3]
# [1,] 1 NA NA
# [2,] NA NA NA
# [3,] NA 2 NA
# > findMostLeftNotNA(m)
# [1] 1 NA 2
findMostLeftNotNA=function(x) {
MostLeftTrue=findMostLeftTrue( !is.na(x) )
nr=nrow(x)
nc=ncol(x)
mm=cbind(1:nr,MostLeftTrue)
x[mm]
}
selectColumnNames=function(df,fun) {
stopifnot( is.data.frame(df) )
cols=names(df)
cols_selected=laply(cols,function(col){
fun(df[[col]])
})
cols[cols_selected]
}
selectColumns=function(df,fun) {
cols=selectColumnNames(df,fun)
df[,cols,drop=FALSE]
}
selectColumnsNumeric =function(df) { selectColumns(df,is.numeric) }
selectColumnsCharacter=function(df) { selectColumns(df,is.character) }
#' rbind dataframes in @x
#' use all fields, fill in NA when such field not exists
#' similar to jsonlite:::simplifyDataFrame(flatten=TRUE), but without odd bugs
#'
#' @param x a list of dataframes
zzlist2df2=function(x,parallel=F)
{
# plyr::compact
# remove NULL
x=Filter(Negate(is.null),x)
# as.data.frame
x=llply(x,.parallel = parallel,
.fun=function(x) {as.data.frame(x,stringsAsFactors=F)})
# get all field_names
list_field_names=llply(x,names)
field_names_all=unique(unlist(list_field_names))
df=ldply(x,.parallel=parallel,.fun=function(x){
na_names=setdiff(field_names_all,names(x))
x[na_names]<-NA
x
})
df
}
# convert character columns to numeric if possible
convertCharacterColumns=function(df,as.is=TRUE){
l_ply(names(df),function(col){
if(is.character(df[[col]]))
df[[col]] <<- type.convert(df[[col]],as.is=as.is)
})
df
}
|
# PREPARACIÓN DE DATOS PARA LA LA STNN PERO CON BARRIOS Y RANGO HORARIO.
# SIMULACRO DE EXPERIMENTO REAL
# Cargamos paquetes y dataset
library(data.table); library(lubridate); library(plyr); library(tidyr)
load("../Accidentes de tráfico - Madrid/Cleaned_data/BarriosAccidentalidad.RData")
car_crash <- BarriosAccidentalidad[ , .N, by = list(FECHA, `RANGO HORARIO`, BARRIO,lon, lat)]
car_crash <- na.omit(car_crash)
car_crash[,FECHA := dmy(FECHA)]
car_crash[, N := NULL]
car_crash <- car_crash[year(FECHA) == 2018]
car_crash$`RANGO HORARIO` <- extract_numeric(substr(car_crash$`RANGO HORARIO`, start = 1, stop = 5))
car_crash <- car_crash[, .N, by = list(FECHA, `RANGO HORARIO`, BARRIO)]
colnames(car_crash)[4] <- "Número de accidentes"
num_dias = 365
num_zonas = 131
num_horas = num_zonas*24
fechas_horas_y_zonas <- data.table(FECHA = sort(rep(seq(ymd('2018-01-01'), ymd('2018-12-31'), by = '1 day'), num_horas)),
"RANGO HORARIO" = sort(rep(seq(0,23), num_zonas)),
BARRIO = rep(sort(unique(car_crash$BARRIO)), times = num_dias))
# 9_ Ahora se hace el join con car_crash y se da 0 a todas aquellas zonas sin accidentes. A continuación
# se normaliza.
number_crash <- join(fechas_horas_y_zonas, car_crash)
number_crash[is.na(number_crash)] <- 0
normalize <- function(x) {
return ((x - min(x)) / (max(x) - min(x)))
}
number_crash$`Número de accidentes` <- normalize(number_crash$`Número de accidentes`)
crash <- dcast(number_crash, FECHA + `RANGO HORARIO` ~ BARRIO)
crash <- crash[, c("FECHA", "RANGO HORARIO") := NULL]
# write.table(crash, file = "Raw_data/crash.csv", row.names = FALSE, col.names = FALSE)
|
/Accidentes de tráfico - Madrid/Scripts/Fase 3/prueba_stnn.R
|
no_license
|
rdemedrano/RM-TFM-2019
|
R
| false
| false
| 1,695
|
r
|
# PREPARACIÓN DE DATOS PARA LA LA STNN PERO CON BARRIOS Y RANGO HORARIO.
# SIMULACRO DE EXPERIMENTO REAL
# Cargamos paquetes y dataset
library(data.table); library(lubridate); library(plyr); library(tidyr)
load("../Accidentes de tráfico - Madrid/Cleaned_data/BarriosAccidentalidad.RData")
car_crash <- BarriosAccidentalidad[ , .N, by = list(FECHA, `RANGO HORARIO`, BARRIO,lon, lat)]
car_crash <- na.omit(car_crash)
car_crash[,FECHA := dmy(FECHA)]
car_crash[, N := NULL]
car_crash <- car_crash[year(FECHA) == 2018]
car_crash$`RANGO HORARIO` <- extract_numeric(substr(car_crash$`RANGO HORARIO`, start = 1, stop = 5))
car_crash <- car_crash[, .N, by = list(FECHA, `RANGO HORARIO`, BARRIO)]
colnames(car_crash)[4] <- "Número de accidentes"
num_dias = 365
num_zonas = 131
num_horas = num_zonas*24
fechas_horas_y_zonas <- data.table(FECHA = sort(rep(seq(ymd('2018-01-01'), ymd('2018-12-31'), by = '1 day'), num_horas)),
"RANGO HORARIO" = sort(rep(seq(0,23), num_zonas)),
BARRIO = rep(sort(unique(car_crash$BARRIO)), times = num_dias))
# 9_ Ahora se hace el join con car_crash y se da 0 a todas aquellas zonas sin accidentes. A continuación
# se normaliza.
number_crash <- join(fechas_horas_y_zonas, car_crash)
number_crash[is.na(number_crash)] <- 0
normalize <- function(x) {
return ((x - min(x)) / (max(x) - min(x)))
}
number_crash$`Número de accidentes` <- normalize(number_crash$`Número de accidentes`)
crash <- dcast(number_crash, FECHA + `RANGO HORARIO` ~ BARRIO)
crash <- crash[, c("FECHA", "RANGO HORARIO") := NULL]
# write.table(crash, file = "Raw_data/crash.csv", row.names = FALSE, col.names = FALSE)
|
gmm <- function(data, k, init = NA, mu = NA, cov = NA,
maxiters = 1000, restarts = 10, del = 0.00001,
initFunc = kMeans){
# This function cluster the data using th gaussian mixture models algorithm
#
# args:
# data : An unlabelled dataframe
# k : Number clusters to initialize to
# mu : mean value if to be provided, calculated if not provided
# cov : A covariance matrix if to be provided, assumes a diagonal covariance
# if not provided
# restarts: Number of restarts to consider defaults at 10
# maxiters: Number of iterations to check convergence defaults at 20, lesser value
# might not result in convergence
# initFunc: Initial cluster generation, defaults to the kmeans algorithm and
# iterates using the EM-Algorithm from found clusters
#
#
# returns:
# A names list of the various parameters found during the algorithm namely
# the log likelihood estimation, BIC and the found clusters
mvpdf <- function(x, mu, sigma) {
# This function calculates the pdf for the give dataset
#
# args:
# x : An unlabelled dataframe
# mu : Mean of the dimensions in the data
# sigma : Standard deviation of the dimensions in the data
#
#
# returns:
# A matrix of the calculated pdf's
if (det(sigma) == 0) {
warning("Determinant is equal to 0.")
}
print(det(sigma))
apply(x, 1, function(x) {
exp(-(1/2) * (t(x) - mu) %*% MASS::ginv(sigma) %*%
t(t(x) - mu))/sqrt(det(2 * pi * sigma))
})
}
seqK <- seq(1,k,1)
# initialize clusters using the kmeans
if(is.na(init))
init <- matrix(initFunc(data, k = k)$clusters)
if(is.na(mu)){
mu <- split(data, init)
mu <- t(sapply(mu, colMeans))
}
if(is.na(cov))
# Covariance Matrix for each initial class.
cov <- lapply(seqK, function(x) diag(ncol(data)))
# Mixing Components
a <- runif(k)
a <- a/sum(a)
muHist <- vector(mode = "list")
covHist <- vector(mode = "list")
logLikeHist <- vector(mode = "list")
for(j in 1:restarts){
cat("Restart",j,"\n")
if (j > 1){
mu = muHist[[j-1]]
cov = covHist[[j-1]]
}
logLikeIter <- vector()
for (i in 1:maxiters) {
cat("Starting Iter",i,"\n")
# Calculate PDF with class means and covariances.
b <- simplify2array(lapply(seqK, function(i){
mvpdf(data,mu[i,],cov[[i]])
}))
# Expectation Step for each class.
d <- simplify2array(lapply(seqK, function(i){
a[i]*b[,i]/rowSums(t((t(b) * a)))
}))
# Choose the highest rowwise probability
eK <- factor(apply(d, 1, which.max))
# Total Responsibility
mc <- colSums(d)
# Update Mixing Components.
a <- mc/NROW(data)
# Update our Means
mu <- do.call(rbind,(lapply(seqK, function(i){
colSums(data * d[,i]) * 1/mc[i]
})))
cov <- lapply(seqK, function(i){
cov[[i]] <- t(d[,i] * t(apply(data,1,function(x) x - mu[i, ]))) %*%
(d[,i] * t(apply(data, 1, function(x) x - mu[i,]))) * 1/mc[i]
})
# Compute the sum of the mixture densities, take the log, and add the
# column vector.
loglik <- sum(log(apply(t(t(b) * a), 1, sum)))
logLikeIter[i] <- loglik
if (i > 1){
if ((loglik -logLikeIter[i-1]) < del){
logLikeHist[[j]] <- loglik
muHist[[j]] <- mu
covHist[[j]] <- cov
break("Inner Break Condition Achieved")
}
}
}
if(j>1)
if(logLikeHist[[j-1]]==logLikeHist[[j]])
break
}
return(list(data = data, k = k, clusters = eK,
mu = mu, logLike = logLikeHist[[j]], init = init))
}
|
/gaussianMixtureModel.R
|
no_license
|
SumedhSankhe/Unsupervised-machine-learning
|
R
| false
| false
| 3,793
|
r
|
gmm <- function(data, k, init = NA, mu = NA, cov = NA,
maxiters = 1000, restarts = 10, del = 0.00001,
initFunc = kMeans){
# This function cluster the data using th gaussian mixture models algorithm
#
# args:
# data : An unlabelled dataframe
# k : Number clusters to initialize to
# mu : mean value if to be provided, calculated if not provided
# cov : A covariance matrix if to be provided, assumes a diagonal covariance
# if not provided
# restarts: Number of restarts to consider defaults at 10
# maxiters: Number of iterations to check convergence defaults at 20, lesser value
# might not result in convergence
# initFunc: Initial cluster generation, defaults to the kmeans algorithm and
# iterates using the EM-Algorithm from found clusters
#
#
# returns:
# A names list of the various parameters found during the algorithm namely
# the log likelihood estimation, BIC and the found clusters
mvpdf <- function(x, mu, sigma) {
# This function calculates the pdf for the give dataset
#
# args:
# x : An unlabelled dataframe
# mu : Mean of the dimensions in the data
# sigma : Standard deviation of the dimensions in the data
#
#
# returns:
# A matrix of the calculated pdf's
if (det(sigma) == 0) {
warning("Determinant is equal to 0.")
}
print(det(sigma))
apply(x, 1, function(x) {
exp(-(1/2) * (t(x) - mu) %*% MASS::ginv(sigma) %*%
t(t(x) - mu))/sqrt(det(2 * pi * sigma))
})
}
seqK <- seq(1,k,1)
# initialize clusters using the kmeans
if(is.na(init))
init <- matrix(initFunc(data, k = k)$clusters)
if(is.na(mu)){
mu <- split(data, init)
mu <- t(sapply(mu, colMeans))
}
if(is.na(cov))
# Covariance Matrix for each initial class.
cov <- lapply(seqK, function(x) diag(ncol(data)))
# Mixing Components
a <- runif(k)
a <- a/sum(a)
muHist <- vector(mode = "list")
covHist <- vector(mode = "list")
logLikeHist <- vector(mode = "list")
for(j in 1:restarts){
cat("Restart",j,"\n")
if (j > 1){
mu = muHist[[j-1]]
cov = covHist[[j-1]]
}
logLikeIter <- vector()
for (i in 1:maxiters) {
cat("Starting Iter",i,"\n")
# Calculate PDF with class means and covariances.
b <- simplify2array(lapply(seqK, function(i){
mvpdf(data,mu[i,],cov[[i]])
}))
# Expectation Step for each class.
d <- simplify2array(lapply(seqK, function(i){
a[i]*b[,i]/rowSums(t((t(b) * a)))
}))
# Choose the highest rowwise probability
eK <- factor(apply(d, 1, which.max))
# Total Responsibility
mc <- colSums(d)
# Update Mixing Components.
a <- mc/NROW(data)
# Update our Means
mu <- do.call(rbind,(lapply(seqK, function(i){
colSums(data * d[,i]) * 1/mc[i]
})))
cov <- lapply(seqK, function(i){
cov[[i]] <- t(d[,i] * t(apply(data,1,function(x) x - mu[i, ]))) %*%
(d[,i] * t(apply(data, 1, function(x) x - mu[i,]))) * 1/mc[i]
})
# Compute the sum of the mixture densities, take the log, and add the
# column vector.
loglik <- sum(log(apply(t(t(b) * a), 1, sum)))
logLikeIter[i] <- loglik
if (i > 1){
if ((loglik -logLikeIter[i-1]) < del){
logLikeHist[[j]] <- loglik
muHist[[j]] <- mu
covHist[[j]] <- cov
break("Inner Break Condition Achieved")
}
}
}
if(j>1)
if(logLikeHist[[j-1]]==logLikeHist[[j]])
break
}
return(list(data = data, k = k, clusters = eK,
mu = mu, logLike = logLikeHist[[j]], init = init))
}
|
##--------------------------------------------
##
## Counting/Probability R code (lecture 2)
##
## Class: PCE Data Science Methods Class
##
## Contains examples of:
##
## -Counting, Probability
##
## -More on Distributions
##
##--------------------------------------------
library(MASS) # has the function 'fractions()', which is useful.
##-----Sandwich Count----
breads = c('white', 'wheat', 'italian', 'sevengrain')
meats = c('ham', 'turkey', 'chicken', 'pastrami', 'meatballs')
toppings = c('mustard', 'mayo', 'salt_pepper', 'oil_vinegar')
sandwiches = expand.grid(breads,
meats,
toppings)
##-----Two Dice------
two_dice = expand.grid(1:6,1:6)
two_dice$sum = two_dice$Var1 + two_dice$Var2
two_dice$isdouble = two_dice$Var1 == two_dice$Var2
# Count different sums
sum_counts = table(two_dice$sum)
# Count doubles
doubles = sum(two_dice$isdouble)
# Probabilities of sums:
sum_prob = fractions(table(two_dice$sum)/nrow(two_dice)) # type ?fractions for more detail
barplot(sum_prob)
# Probability of a double:
fractions(doubles/nrow(two_dice))
##-------Simulations in R------
# Define deck
suits <- c("Diamonds", "Clubs", "Hearts", "Spades")
ranks <- c("Ace", "Deuce", "Three", "Four","Five", "Six", "Seven", "Eight", "Nine", "Ten", "Jack", "Queen", "King")
deck <- expand.grid(ranks=ranks, suits=suits)
# Find probability that 5 cards make up a flush from simulations
n = 100000 # stay under 1 million
hands = sapply(1:n, function(x){
five_cards = sample(1:nrow(deck),5)
return(length(unique(deck$suits[five_cards]))==1)
})
emp_prob = sum(hands)/n
emp_var = var(hands)
##------Use of system.time------
# system.time() can be used to estimate time costs
system.time(sapply(1:1000, function(x){
five_cards = sample(1:nrow(deck),5)
return(length(unique(deck$suits[five_cards]))==1)
}))
# 0.08 on my system for 1000, so 1 million would take ~ 1000*0.08 = 80 seconds,
# but 100K would take 8 seconds.
# For better system times, use the package 'microbenchmark':
# Careful! this essentially does system.time(rep(1000, f() ))
library(microbenchmark)
microbenchmark(sapply(1:1, function(x){
five_cards = sample(1:nrow(deck),5)
return(length(unique(deck$suits[five_cards]))==1)
}))
# ~ 100 microseconds on avg. = 100E-6 = 1E-4. 1 Million = 100 seconds.
##----Missing Data Demo with Amelia-----
# NA vs. NaN vs. NULL
#
# NA: Not Available, results from missing data or an
# out of bounds reference (Logical, niether T or F)
#
# NaN: Not A Number: results from performing an illegal
# mathematical action. (Numeric placeholder)
#
# NULL: This is operational. R returns this when referencing
# non-existent columns. R also uses a NULL assignment
# to remove objects. (NULL Class).
# If you've had set theory before, think of this as the
# 'empty-set'
#
# NaNs:
0/0
sqrt(-1)
log(-1)
asin(2)
NaN > 1 # NA because NaN is an unknown number
sum(c(1,2,NaN))
sum(c(1,2,NaN), na.rm=TRUE) # !!! NaN isn't a type of NA, but
# na.rm is very general
#NAs:
c(1,2)[3] # third argument missing
as.numeric('data') # R is 'missing' the number available
NA > 1
sum(c(1,2,NA))
sum(c(1,2,NA), na.rm=TRUE)
# NULLs:
t = data.frame('a'=1:4, 'b'=runif(4))
t$c
t$a = NULL
t
s = c(1,2,NULL)
s
# NA vs. NaN vs. NULL
class(NA)
class(NaN)
class(NULL)
library(Amelia)
# To illustrate the helpfulness of Multiple Imputation,
# we will test this out on a contrived data set
n = 1000
full_data = data.frame('A'=runif(n),
'B'=rnorm(n),
'C'=rpois(n, 5))
# Note:
# true mean of A = 0.5
# true mean of B = 0
# true mean of C = 5
sample_means = apply(full_data, 2, mean)
sample_sds = apply(full_data, 2, sd)
# Remove some data:
data = full_data
data$A[sample(1:n, round(n*0.05))] = NA
data$B[sample(1:n, round(n*0.15))] = NA
data$C[sample(1:n, round(n*0.5))] = NA
# Removal of missing data (by entry only)
# Note: This is only really applicable because our statistic is calculated
# on each row separately.
means_rem_entry = apply(data, 2, function(x) mean(x, na.rm=TRUE))
sd_rem_entry = apply(data, 2, function(x) sd(x, na.rm=TRUE))
# Removal of missing data (by row)
means_rem_rows = apply(data[complete.cases(data),], 2, mean)
sd_rem_rows = apply(data[complete.cases(data),], 2, sd)
amelia_data = amelia(data)[1]$imputations # Amelia spits out WAY too much information.
# Calculate samples means
imp_means = lapply(amelia_data, function(x) apply(x,2,function(y) mean(y, na.rm=TRUE)))
avg_imp_means = apply( do.call(rbind, imp_means), 2, function(y) mean(y, na.rm=TRUE))
# Calculate samples sds
imp_sds = lapply(amelia_data, function(x) apply(x,2,function(y) sd(y, na.rm=TRUE)))
avg_imp_sds = apply(do.call(rbind, imp_means), 2,function(y) sd(y, na.rm=TRUE))
##-----Getting/Storing Data-----
# csv files
?read.csv # Note the option stringsAsFactors = FALSE
# txt files
?read.table
# web/html
# See previous class weather_retrieval.R from previous class
?readLines
# API
# Twitter Example
##-----Twitter Text Mining-----
setwd('E:/Work/Teaching/PCE_Data_Science/2_Distributions_ConditionalProb')
##-----Oath Setup-----
twit_cred = read.csv('twitter_cred.csv', stringsAsFactors=FALSE)
TWITTER_CONSUMER_KEY = twit_cred$TWITTER_CONSUMER_KEY
TWITTER_CONSUMER_SECRET = twit_cred$TWITTER_CONSUMER_SECRET
TWITTER_ACCESS_TOKEN = twit_cred$TWITTER_ACCESS_TOKEN
TWITTER_ACCESS_SECRET = twit_cred$TWITTER_ACCESS_SECRET
library(twitteR)
library(httpuv)
setup_twitter_oauth(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET)
##----Retrieve Info From Twitter----
s <- searchTwitter('#datascience')
?searchTwitter
##-----Data Output Examples-----
# Write/Create RSQLite DB
library(sqldf)
db_file = "test.db"
conn = dbConnect(RSQLite::SQLite(), dbname=db_file)
dbSendQuery(conn = conn,paste("CREATE TABLE test (indexer INTEGER,",
"entry1 TEXT,entry2 TEXT)"))
dbSendQuery(conn = conn,"INSERT INTO test VALUES (1, 'joseph', 'fourier')")
dbSendQuery(conn = conn,"INSERT INTO test VALUES (2, 'leonhard', 'euler')")
dbListTables(conn) # The tables in the database
dbListFields(conn, "test") # The columns in a table
dbReadTable(conn, "test") # The data in a table
# Enter data from csv:
# dbWriteTable(conn = conn, name = "test", value = "mathematicians.csv")
# Enter data from dataframe:
mathematicians = data.frame('indexer'=3:6,
'entry1'=c('william', 'emmy', 'srinivasa', 'alfred'),
'entry2'=c('hamilton', 'noether', 'ramanujan', 'lotka'))
dbWriteTable(conn = conn, name = "test", value = mathematicians, append=TRUE, overwrite=FALSE)
|
/qtr2/hw2/R_Examples_Lecture2.R
|
no_license
|
kniemann/data-projects
|
R
| false
| false
| 6,959
|
r
|
##--------------------------------------------
##
## Counting/Probability R code (lecture 2)
##
## Class: PCE Data Science Methods Class
##
## Contains examples of:
##
## -Counting, Probability
##
## -More on Distributions
##
##--------------------------------------------
library(MASS) # has the function 'fractions()', which is useful.
##-----Sandwich Count----
breads = c('white', 'wheat', 'italian', 'sevengrain')
meats = c('ham', 'turkey', 'chicken', 'pastrami', 'meatballs')
toppings = c('mustard', 'mayo', 'salt_pepper', 'oil_vinegar')
sandwiches = expand.grid(breads,
meats,
toppings)
##-----Two Dice------
two_dice = expand.grid(1:6,1:6)
two_dice$sum = two_dice$Var1 + two_dice$Var2
two_dice$isdouble = two_dice$Var1 == two_dice$Var2
# Count different sums
sum_counts = table(two_dice$sum)
# Count doubles
doubles = sum(two_dice$isdouble)
# Probabilities of sums:
sum_prob = fractions(table(two_dice$sum)/nrow(two_dice)) # type ?fractions for more detail
barplot(sum_prob)
# Probability of a double:
fractions(doubles/nrow(two_dice))
##-------Simulations in R------
# Define deck
suits <- c("Diamonds", "Clubs", "Hearts", "Spades")
ranks <- c("Ace", "Deuce", "Three", "Four","Five", "Six", "Seven", "Eight", "Nine", "Ten", "Jack", "Queen", "King")
deck <- expand.grid(ranks=ranks, suits=suits)
# Find probability that 5 cards make up a flush from simulations
n = 100000 # stay under 1 million
hands = sapply(1:n, function(x){
five_cards = sample(1:nrow(deck),5)
return(length(unique(deck$suits[five_cards]))==1)
})
emp_prob = sum(hands)/n
emp_var = var(hands)
##------Use of system.time------
# system.time() can be used to estimate time costs
system.time(sapply(1:1000, function(x){
five_cards = sample(1:nrow(deck),5)
return(length(unique(deck$suits[five_cards]))==1)
}))
# 0.08 on my system for 1000, so 1 million would take ~ 1000*0.08 = 80 seconds,
# but 100K would take 8 seconds.
# For better system times, use the package 'microbenchmark':
# Careful! this essentially does system.time(rep(1000, f() ))
library(microbenchmark)
microbenchmark(sapply(1:1, function(x){
five_cards = sample(1:nrow(deck),5)
return(length(unique(deck$suits[five_cards]))==1)
}))
# ~ 100 microseconds on avg. = 100E-6 = 1E-4. 1 Million = 100 seconds.
##----Missing Data Demo with Amelia-----
# NA vs. NaN vs. NULL
#
# NA: Not Available, results from missing data or an
# out of bounds reference (Logical, niether T or F)
#
# NaN: Not A Number: results from performing an illegal
# mathematical action. (Numeric placeholder)
#
# NULL: This is operational. R returns this when referencing
# non-existent columns. R also uses a NULL assignment
# to remove objects. (NULL Class).
# If you've had set theory before, think of this as the
# 'empty-set'
#
# NaNs:
0/0
sqrt(-1)
log(-1)
asin(2)
NaN > 1 # NA because NaN is an unknown number
sum(c(1,2,NaN))
sum(c(1,2,NaN), na.rm=TRUE) # !!! NaN isn't a type of NA, but
# na.rm is very general
#NAs:
c(1,2)[3] # third argument missing
as.numeric('data') # R is 'missing' the number available
NA > 1
sum(c(1,2,NA))
sum(c(1,2,NA), na.rm=TRUE)
# NULLs:
t = data.frame('a'=1:4, 'b'=runif(4))
t$c
t$a = NULL
t
s = c(1,2,NULL)
s
# NA vs. NaN vs. NULL
class(NA)
class(NaN)
class(NULL)
library(Amelia)
# To illustrate the helpfulness of Multiple Imputation,
# we will test this out on a contrived data set
n = 1000
full_data = data.frame('A'=runif(n),
'B'=rnorm(n),
'C'=rpois(n, 5))
# Note:
# true mean of A = 0.5
# true mean of B = 0
# true mean of C = 5
sample_means = apply(full_data, 2, mean)
sample_sds = apply(full_data, 2, sd)
# Remove some data:
data = full_data
data$A[sample(1:n, round(n*0.05))] = NA
data$B[sample(1:n, round(n*0.15))] = NA
data$C[sample(1:n, round(n*0.5))] = NA
# Removal of missing data (by entry only)
# Note: This is only really applicable because our statistic is calculated
# on each row separately.
means_rem_entry = apply(data, 2, function(x) mean(x, na.rm=TRUE))
sd_rem_entry = apply(data, 2, function(x) sd(x, na.rm=TRUE))
# Removal of missing data (by row)
means_rem_rows = apply(data[complete.cases(data),], 2, mean)
sd_rem_rows = apply(data[complete.cases(data),], 2, sd)
amelia_data = amelia(data)[1]$imputations # Amelia spits out WAY too much information.
# Calculate samples means
imp_means = lapply(amelia_data, function(x) apply(x,2,function(y) mean(y, na.rm=TRUE)))
avg_imp_means = apply( do.call(rbind, imp_means), 2, function(y) mean(y, na.rm=TRUE))
# Calculate samples sds
imp_sds = lapply(amelia_data, function(x) apply(x,2,function(y) sd(y, na.rm=TRUE)))
avg_imp_sds = apply(do.call(rbind, imp_means), 2,function(y) sd(y, na.rm=TRUE))
##-----Getting/Storing Data-----
# csv files
?read.csv # Note the option stringsAsFactors = FALSE
# txt files
?read.table
# web/html
# See previous class weather_retrieval.R from previous class
?readLines
# API
# Twitter Example
##-----Twitter Text Mining-----
setwd('E:/Work/Teaching/PCE_Data_Science/2_Distributions_ConditionalProb')
##-----Oath Setup-----
twit_cred = read.csv('twitter_cred.csv', stringsAsFactors=FALSE)
TWITTER_CONSUMER_KEY = twit_cred$TWITTER_CONSUMER_KEY
TWITTER_CONSUMER_SECRET = twit_cred$TWITTER_CONSUMER_SECRET
TWITTER_ACCESS_TOKEN = twit_cred$TWITTER_ACCESS_TOKEN
TWITTER_ACCESS_SECRET = twit_cred$TWITTER_ACCESS_SECRET
library(twitteR)
library(httpuv)
setup_twitter_oauth(TWITTER_CONSUMER_KEY, TWITTER_CONSUMER_SECRET)
##----Retrieve Info From Twitter----
s <- searchTwitter('#datascience')
?searchTwitter
##-----Data Output Examples-----
# Write/Create RSQLite DB
library(sqldf)
db_file = "test.db"
conn = dbConnect(RSQLite::SQLite(), dbname=db_file)
dbSendQuery(conn = conn,paste("CREATE TABLE test (indexer INTEGER,",
"entry1 TEXT,entry2 TEXT)"))
dbSendQuery(conn = conn,"INSERT INTO test VALUES (1, 'joseph', 'fourier')")
dbSendQuery(conn = conn,"INSERT INTO test VALUES (2, 'leonhard', 'euler')")
dbListTables(conn) # The tables in the database
dbListFields(conn, "test") # The columns in a table
dbReadTable(conn, "test") # The data in a table
# Enter data from csv:
# dbWriteTable(conn = conn, name = "test", value = "mathematicians.csv")
# Enter data from dataframe:
mathematicians = data.frame('indexer'=3:6,
'entry1'=c('william', 'emmy', 'srinivasa', 'alfred'),
'entry2'=c('hamilton', 'noether', 'ramanujan', 'lotka'))
dbWriteTable(conn = conn, name = "test", value = mathematicians, append=TRUE, overwrite=FALSE)
|
#' Exported Constants
#'
#' @name RMUMPS_PERM
#' @aliases RMUMPS_PERM RMUMPS_PERM_AMD RMUMPS_PERM_AMF RMUMPS_PERM_SCOTCH RMUMPS_PERM_PORD RMUMPS_PERM_METIS RMUMPS_PERM_QAMD RMUMPS_PERM_AUTO
#' @description
#' Integer constants defining permutation types and exported from rmumps are following:
#' \itemize{
#' \item{\code{RMUMPS_PERM_AMD}}
#' \item{\code{RMUMPS_PERM_AMF}}
#' \item{\code{RMUMPS_PERM_SCOTCH}}
#' \item{\code{RMUMPS_PERM_PORD}}
#' \item{\code{RMUMPS_PERM_METIS}}
#' \item{\code{RMUMPS_PERM_QAMD}}
#' \item{\code{RMUMPS_PERM_AUTO}}
#' }
#' They are all regrouped in a named vector \code{RMUMPS_PERM} where names are items above and values are corresponding constants.
#' @examples
#' if (requireNamespace("slam", quietly=TRUE)) {
#' am=rmumps::Rmumps$new(slam::as.simple_triplet_matrix(diag(1:3)))
#' am$set_permutation(RMUMPS_PERM_SCOTCH)
#' am$solve(1:3)
#' }
NULL
# set useful constants
loadModule("mod_Rmumps", TRUE)
.onLoad <- function(libname, pkgname){
env=parent.env(environment())
for (cc in c(
"RMUMPS_PERM_AMD",
"RMUMPS_PERM_AMF",
"RMUMPS_PERM_SCOTCH",
"RMUMPS_PERM_PORD",
"RMUMPS_PERM_METIS",
"RMUMPS_PERM_QAMD",
"RMUMPS_PERM_AUTO"
)) {
var=.get_cnst(cc)
env[[cc]]=var
env[["RMUMPS_PERM"]]=c(env[["RMUMPS_PERM"]], structure(var, names=cc))
}
}
|
/R/zzz.R
|
no_license
|
sgsokol/rmumps
|
R
| false
| false
| 1,325
|
r
|
#' Exported Constants
#'
#' @name RMUMPS_PERM
#' @aliases RMUMPS_PERM RMUMPS_PERM_AMD RMUMPS_PERM_AMF RMUMPS_PERM_SCOTCH RMUMPS_PERM_PORD RMUMPS_PERM_METIS RMUMPS_PERM_QAMD RMUMPS_PERM_AUTO
#' @description
#' Integer constants defining permutation types and exported from rmumps are following:
#' \itemize{
#' \item{\code{RMUMPS_PERM_AMD}}
#' \item{\code{RMUMPS_PERM_AMF}}
#' \item{\code{RMUMPS_PERM_SCOTCH}}
#' \item{\code{RMUMPS_PERM_PORD}}
#' \item{\code{RMUMPS_PERM_METIS}}
#' \item{\code{RMUMPS_PERM_QAMD}}
#' \item{\code{RMUMPS_PERM_AUTO}}
#' }
#' They are all regrouped in a named vector \code{RMUMPS_PERM} where names are items above and values are corresponding constants.
#' @examples
#' if (requireNamespace("slam", quietly=TRUE)) {
#' am=rmumps::Rmumps$new(slam::as.simple_triplet_matrix(diag(1:3)))
#' am$set_permutation(RMUMPS_PERM_SCOTCH)
#' am$solve(1:3)
#' }
NULL
# set useful constants
loadModule("mod_Rmumps", TRUE)
.onLoad <- function(libname, pkgname){
env=parent.env(environment())
for (cc in c(
"RMUMPS_PERM_AMD",
"RMUMPS_PERM_AMF",
"RMUMPS_PERM_SCOTCH",
"RMUMPS_PERM_PORD",
"RMUMPS_PERM_METIS",
"RMUMPS_PERM_QAMD",
"RMUMPS_PERM_AUTO"
)) {
var=.get_cnst(cc)
env[[cc]]=var
env[["RMUMPS_PERM"]]=c(env[["RMUMPS_PERM"]], structure(var, names=cc))
}
}
|
############## --------------------------------------------------------
############## ERROR PROPAGATION
############## Author: Daniele Ferraretto
############## started on 26th July, 2017
############## updated: last update: 31/07/2017
############## --------------------------------------------------------
############## --------------------------------------------------------
# ------>>> NOTE: ALL ERRORS ARE IN mg N / m2. You need to divide them by 100 to compare them with the monthly data,
# expressed in Kg N / ha
# clear the memory
rm(list=ls())
#.libPaths("C:/Workspace/R") # adjust the issues with my desktop demmerda
### set working dir for pc:
#setwd("C:/Users/Daniele Ferraretto/Documents/Daniele_Repo")
### setwd per desktop
#setwd("M:/My PhD/R/PhD-local_repo")
#setwd("C:/Users/s1373890/Daniele_Repo")
########## THROUGHFALL PROPAGATION ERROR #############
# by SAMPLING DATE (1/2)
# prima prova. 1) SD e mean per ogni sampling date,
# 2) poi valore giornaliero come errore prova/ numero giorni per giorni utili
# 3) Addition: semplice radice quadrata dei due valori di errore di cui da 2
########## ERROR IN TF DEPTH #############
library(RSQLite)
db = dbConnect(SQLite(), dbname="field_lab/Griffin.SQLite")
TF = dbGetQuery(db, "SELECT * FROM fielddata WHERE variable = 'through depth' AND vals>=0 ORDER BY date")
NO3data = dbGetQuery(db, "SELECT * FROM labdata WHERE VALS >= 0 AND variable = 'NO3.N' ORDER BY date")
NH4data = dbGetQuery(db, "SELECT * FROM labdata WHERE VALS >= 0 AND variable = 'NH4.N' ORDER BY date")
# A1: MEAN depth value by sampling date
TF.depth.mean=aggregate(vals ~ date, data = TF, FUN = mean, na.rm = TRUE )
names(TF.depth.mean) = c("date", "TF.depth.mean")
# A2: SD of depth value by sampling date
TF.depth.SD=aggregate(vals ~ date, data = TF, FUN = sd, na.rm = TRUE )
# A3: counting samples
TF.N = as.data.frame(table(TF$date))
# A4. SE = SD/(N)^0.5. SE (95%) = 2.110*SE; df = 17
TF.SE.95 = as.data.frame(2.110*TF.depth.SD$vals/(TF.N$Freq)^0.5)
TF.SE.95 = cbind(TF.N$Var1, TF.SE.95)
names(TF.SE.95) = c("date", "depth.SE.95")
########## ERROR IN TF N LAB VALS #############
TF.coll = c("C10T1", "C10T2", "C10T3", "C11T1", "C11T2", "C11T3", "C12T1", "C12T2", "C12T3", "T10T1",
"T10T2", "T10T3", "T11T1", "T11T2", "T11T3", "T12T1", "T12T2", "T12T3")
library(data.table)
NO3.TF = NO3data[NO3data$sample %in% TF.coll, ]
NH4.TF = NH4data[NH4data$sample %in% TF.coll, ]
# B1: mean of lab values by sampling date
NO3.TF.mean = aggregate(vals ~ date, data = NO3.TF, FUN = mean, na.rm = TRUE )
names(NO3.TF.mean) = c("date", "TF.NO3.mean")
NH4.TF.mean = aggregate(vals ~ date, data = NH4.TF, FUN = mean, na.rm = TRUE )
names(NH4.TF.mean) = c("date", "TF.NH4.mean")
# B2: SD of lab values by sampling date
NO3.TF.SD = aggregate(vals ~ date, data = NO3.TF, FUN = sd, na.rm = TRUE )
names(NO3.TF.SD) = c("date", "TF.NO3.SD")
NH4.TF.SD = aggregate(vals ~ date, data = NH4.TF, FUN = sd, na.rm = TRUE )
names(NH4.TF.SD) = c("date", "TF.NH4.SD")
# 22/08/2017: problema con i dati di luglio 2017 di NH4: non esistono. verifica origine problema. se nec rerun scripts!!!
# B3: counting samples:
(library(plyr))
NO3.TF.N = as.data.frame(table(NO3.TF$date))
NH4.TF.N = as.data.frame(table(NH4.TF$date))
# B4. SE = SD/(N)^0.5. SE (95%) = 2.110*SE
TFNO3.SE.95 = as.data.frame(2.110*NO3.TF.SD$TF.NO3.SD/(NO3.TF.N$Freq)^0.5)
TFNH4.SE.95 = as.data.frame(2.110*NH4.TF.SD$TF.NH4.SD/(NH4.TF.N$Freq)^0.5)
TFLAB.SE.95 = cbind(NO3.TF.N$Var1, TFNO3.SE.95, TFNH4.SE.95)
names(TFLAB.SE.95) = c("date", "SE.95.NO3", "SE.95.NH4")
# B5. Calculate the error per each sampling date: MULTIMERGE
dTF.samplingdate = merge(merge(merge(merge(NO3.TF.mean, TF.SE.95, by='date', all=T),
NH4.TF.mean, by='date', all=T), TF.depth.mean, by='date', all=T), TFLAB.SE.95, by='date', all=T)
# 5a: Error propagation on NO3.N in TF by sampling date (and turning it from mg/m2 to kg/ha):
dTF.samplingdate$dTF.NO3 = (10000/1000000) * dTF.samplingdate$TF.depth.mean * dTF.samplingdate$TF.NO3.mean *
((dTF.samplingdate$SE.95.NO3/dTF.samplingdate$TF.NO3.mean)^2+(dTF.samplingdate$depth.SE.95/dTF.samplingdate$TF.depth.mean)^2)^0.5
# 5b: Error propagation on NH4.N in TF by sampling date:
dTF.samplingdate$dTF.NH4 = (10000/1000000) * dTF.samplingdate$TF.depth.mean * dTF.samplingdate$TF.NH4.mean *
((dTF.samplingdate$SE.95.NH4/dTF.samplingdate$TF.NH4.mean)^2+(dTF.samplingdate$depth.SE.95/dTF.samplingdate$TF.depth.mean)^2)^0.5
#####################################################################################
######### REFINING ERROR PROPAGATION in TF: ERROR PROPAGATION BY MONTH ##########
# (2/2)
# Rationale: working on partial errors weighed by the number of days of each month
# covered by the n sampling date needs to 1) calculate how many days of each sampling date fall in a month;
# 2. calculate the weighed error; 3. propagation error as sum of the weighed errors
# 1. Calculating how many days from previous sampling:
dates=as.Date(unique(dTF.samplingdate$date))
date.end.month <- seq(as.Date("2011-11-01"),length=66,by="months")-1
dates2 = c(dates,date.end.month)
dates2= as.data.frame.Date(dates2)
library(dplyr)
dates = dates2 %>% distinct(dates2) # this is to select unique values from dates2
dates = dates[order(dates$dates2, decreasing = FALSE ),]
days = as.POSIXlt(dates)
days=rev(days) # reverse the order of dates to obtain positive values from differences
diffdays = difftime( days[1:length(days-1)] , days[2:length(days)] )
diffdays= round(diffdays, digits=0) #correct values to the integer
days=rev(diffdays) #back to the increasing order of dates
days[[1]]=26 #set first value as 26 (first sampling in 2011)
dd.dates=cbind(dates,days)
dd.dates = as.data.frame(dd.dates)
dd.dates$dates = as.Date(dd.dates$dates, origin="1970-01-01") # I ignore why I need to put this date as origin. Calculated via excel.
dTF.samplingdate$date = as.Date(dTF.samplingdate$date)
dTF.samplingdate1 = merge(dTF.samplingdate,dd.dates,by.x="date",by.y="dates",na.rm=FALSE, all = T)
# filling the NA to make calculations
library(zoo)
dTF.samplingdate1 = na.locf(dTF.samplingdate1,fromLast = TRUE)
dTF.samplingdate1$Ym = strftime(dTF.samplingdate1$date, "%Y%m")
# Calculate the number of days of a month
# install.packages("Hmisc")
library(Hmisc)
dTF.samplingdate1$daysxmonth = monthDays(as.Date(dTF.samplingdate1$date))
dTF.samplingdate1$days = as.numeric(dTF.samplingdate1$days)
cols.num <- c("TF.NO3.mean","SE.95.NO3","TF.NH4.mean", "SE.95.NH4","TF.depth.mean", "depth.SE.95", "days")
dTF.samplingdate1[cols.num] <- sapply(dTF.samplingdate1[cols.num],as.numeric)
summary(dTF.samplingdate1)
# 2A: Error propagation on NO3.N in TF (error weighed by days/month):
dTF.samplingdate1$dTF.NO3 = dTF.samplingdate1$TF.depth.mean * dTF.samplingdate1$TF.NO3.mean *
((dTF.samplingdate1$SE.95.NO3/dTF.samplingdate1$TF.NO3.mean)^2+(dTF.samplingdate1$depth.SE.95/dTF.samplingdate1$TF.depth.mean)^2)^0.5 *
dTF.samplingdate1$days/dTF.samplingdate1$daysxmonth
# 2B: Error propagation on NH4.N in TF (error weighed by days/month)::
dTF.samplingdate1$dTF.NH4 = dTF.samplingdate1$TF.depth.mean * dTF.samplingdate1$TF.NH4.mean *
((dTF.samplingdate1$SE.95.NH4/dTF.samplingdate1$TF.NH4.mean)^2+(dTF.samplingdate1$depth.SE.95/dTF.samplingdate1$TF.depth.mean)^2)^0.5 *
dTF.samplingdate1$days/dTF.samplingdate1$daysxmonth
# 3. PROPAGATION ERROR AS SUM OF ERRORS FROM DIFFERENT SAMPLING DATES
# wide to long
library(reshape2)
TF.err.propag = melt(dTF.samplingdate1, id.vars = c("date", "Ym"),
measure.vars = c("dTF.NH4", "dTF.NO3"), variable.name = "error_propagation_var")
# Calculate dn^2
TF.err.propag$value = (TF.err.propag$value)^2
dTF.err = aggregate(TF.err.propag$value ~ TF.err.propag$Ym + TF.err.propag$error_propagation_var, FUN = sum)
names(dTF.err) = c("Ym", "variable", "value")
dTF.err$value= (dTF.err$value)^0.5
#housekeeping
rm(NH4.TF.N, NO3.TF.N, TF.N, TF.SE.95, TFLAB.SE.95, TFNH4.SE.95, TFNO3.SE.95, dates2, dd.dates, dTF.samplingdate, dTF.samplingdate1, NH4.TF, NH4.TF.mean, NH4.TF.SD, NO3.TF, NO3.TF.mean, NO3.TF.SD, TF,
TF.depth.mean, TF.depth.SD, TF.err.propag, cols.num, date.end.month, dates, days, diffdays, TF.coll)
##############################################################################
############ STEMFLOW PROPAGATION ERROR ###############
# prima prova. 1) SD e mean per ogni sampling date,
# 2) poi valore giornaliero come errore prova/ numero giorni per giorni utili
# 3) Addition: semplice radice quadrata dei due valori di errore di cui da 2
########## ERROR IN SF vol #############
SF = dbGetQuery(db, "SELECT * FROM fielddata WHERE variable = 'stem vol' AND vals>=0 ORDER BY date")
# A1: MEAN vol value by sampling date
SF.vol.mean=aggregate(vals ~ date, data = SF, FUN = mean, na.rm = TRUE )
names(SF.vol.mean) = c("date", "SF.vol.mean")
# A2: SD of vol value by sampling date
SF.vol.SD=aggregate(vals ~ date, data = SF, FUN = sd, na.rm = TRUE )
# A3: counting samples
SF.N = as.data.frame(table(SF$date))
# A4. SE = SD/(N)^0.5. SE (95%) = 1.96*SE
SF.SE.95 = as.data.frame(1.96*SF.vol.SD$vals/(SF.N$Freq)^0.5)
SF.SE.95 = cbind(SF.N$Var1, SF.SE.95)
names(SF.SE.95) = c("date", "vol.SE.95")
########## ERROR IN SF N LAB VALS #############
SF.coll = c("C10S1", "C10S2", "C10S3", "C11S1", "C11S2", "C11S3", "C12S1", "C12S2", "C12S3", "T10S1",
"T10S2", "T10S3", "T11S1", "T11S2", "T11S3", "T11S4", "T11S5", "T11S6", "T11S7", "T12S1", "T12S2", "T12S3")
NO3.SF = NO3data[NO3data$sample %in% SF.coll, ]
NH4.SF = NH4data[NH4data$sample %in% SF.coll, ]
# B1: mean of lab values by sampling date
NO3.SF.mean = aggregate(vals ~ date, data = NO3.SF, FUN = mean, na.rm = TRUE )
names(NO3.SF.mean) = c("date", "SF.NO3.mean")
NH4.SF.mean = aggregate(vals ~ date, data = NH4.SF, FUN = mean, na.rm = TRUE )
names(NH4.SF.mean) = c("date", "SF.NH4.mean")
# B2: SD of lab values by sampling date
NO3.SF.SD = aggregate(vals ~ date, data = NO3.SF, FUN = sd, na.rm = TRUE )
names(NO3.SF.SD) = c("date", "SF.NO3.SD")
NH4.SF.SD = aggregate(vals ~ date, data = NH4.SF, FUN = sd, na.rm = TRUE )
names(NH4.SF.SD) = c("date", "SF.NH4.SD")
# B3: counting samples:
NO3.SF.N = as.data.frame(table(NO3.SF$date))
NH4.SF.N = as.data.frame(table(NH4.SF$date))
# B4. SE = SD/(N)^0.5. SE (95%) = 1.96*SE
SFNO3.SE.95 = as.data.frame(1.96*NO3.SF.SD$SF.NO3.SD/(NO3.SF.N$Freq)^0.5)
SFNH4.SE.95 = as.data.frame(1.96*NH4.SF.SD$SF.NH4.SD/(NH4.SF.N$Freq)^0.5)
SFLAB.SE.95 = cbind(NO3.SF.N$Var1, SFNO3.SE.95, SFNH4.SE.95)
names(SFLAB.SE.95) = c("date", "SE.95.NO3", "SE.95.NH4")
# B5. Calculate the error per each sampling date: MULTIMERGE
dSF.samplingdate = merge(merge(merge(merge(SF.vol.mean, SF.SE.95, by='date', all=T),
NH4.SF.mean, by='date', all=T), NO3.SF.mean, by='date', all=T),
SFLAB.SE.95, by='date', all=T) # this last is the sum of the two Nx lab.SE
# B5a: Error propagation on NO3.N in TF by sampling date:
dSF.samplingdate$dSF.NO3 = (1883/1000000) * dSF.samplingdate$SF.vol.mean * dSF.samplingdate$SF.NO3.mean *
((dSF.samplingdate$SE.95.NO3/dSF.samplingdate$SF.NO3.mean)^2+(dSF.samplingdate$vol.SE.95/dSF.samplingdate$SF.vol.mean)^2)^0.5
# B5b: Error propagation on NH4.N in SF by sampling date:
dSF.samplingdate$dSF.NH4 = (1883/1000000) * dSF.samplingdate$SF.vol.mean * dSF.samplingdate$SF.NH4.mean *
((dSF.samplingdate$SE.95.NH4/dSF.samplingdate$SF.NH4.mean)^2+(dSF.samplingdate$vol.SE.95/dSF.samplingdate$SF.vol.mean)^2)^0.5
###############################################################################
######### REFINING ERROR PROPAGATION: ERROR PROPAGATION BY MONTH ##########
# Rationale: working on partial errors weighed by the number of days of each month
# covered by the n sampling date needs to 1) calculate how many days of each sampling date fall in a month;
# 2. calculate the weighed error; 3. propagation error as sum of the weighed errors
# 1. Calculating how many days from previous sampling:
dates=as.Date(unique(dSF.samplingdate$date))
date.end.month <- seq(as.Date("2011-11-01"),length=66,by="months")-1
dates2 = c(dates,date.end.month)
dates2= as.data.frame.Date(dates2)
dates = dates2 %>% distinct(dates2) # this is to select unique values from dates2
dates = dates[order(dates$dates2, decreasing = FALSE ),]
days = as.POSIXlt(dates)
days=rev(days) # reverse the order of dates to obtain positive values from differences
diffdays = difftime( days[1:length(days-1)] , days[2:length(days)] )
diffdays= round(diffdays, digits=0) #correct values to the integer
days=rev(diffdays) #back to the increasing order of dates
days[[1]]=26 #set first value as 26 (first sampling in 2011)
dd.dates=cbind(dates,days)
dd.dates = as.data.frame(dd.dates)
dd.dates$dates = as.Date(dd.dates$dates, origin="1970-01-01") # I ignore why I need to put this date as origin. Calculated via excel.
dSF.samplingdate$date = as.Date(dSF.samplingdate$date)
dSF.samplingdate1 = merge(dSF.samplingdate,dd.dates,by.x="date",by.y="dates",na.rm=FALSE, all = T)
# filling the NA to make calculations
dSF.samplingdate1 = na.locf(dSF.samplingdate1,fromLast = TRUE)
dSF.samplingdate1$Ym = strftime(dSF.samplingdate1$date, "%Y%m")
# Calculate the number of days of a month
# install.packages("Hmisc")
dSF.samplingdate1$daysxmonth = monthDays(as.Date(dSF.samplingdate1$date))
dSF.samplingdate1$days = as.numeric(dSF.samplingdate1$days)
cols.num <- c("SF.NO3.mean","SE.95.NO3","SF.NH4.mean", "SE.95.NH4","SF.vol.mean", "vol.SE.95", "days")
dSF.samplingdate1[cols.num] <- sapply(dSF.samplingdate1[cols.num],as.numeric)
# 2a: Error propagation on NO3.N in SF (error weighed by days/month):
dSF.samplingdate1$dSF.NO3 = dSF.samplingdate1$SF.vol.mean * dSF.samplingdate1$SF.NO3.mean *
((dSF.samplingdate1$SE.95.NO3/dSF.samplingdate1$SF.NO3.mean)^2+(dSF.samplingdate1$vol.SE.95/dSF.samplingdate1$SF.vol.mean)^2)^0.5 *
dSF.samplingdate1$days/dSF.samplingdate1$daysxmonth
# 2b: Error propagation on NH4.N in SF (error weighed by days/month):
dSF.samplingdate1$dSF.NH4 = dSF.samplingdate1$SF.vol.mean * dSF.samplingdate1$SF.NH4.mean *
((dSF.samplingdate1$SE.95.NH4/dSF.samplingdate1$SF.NH4.mean)^2+(dSF.samplingdate1$vol.SE.95/dSF.samplingdate1$SF.vol.mean)^2)^0.5 *
dSF.samplingdate1$days/dSF.samplingdate1$daysxmonth
# 3. PROPAGATION ERROR AS SUM OF ERRORS FROM DIFFERENT SAMPLING DATES
# wide to long
SF.err.propag = melt(dSF.samplingdate1, id.vars = c("date", "Ym"),
measure.vars = c("dSF.NH4", "dSF.NO3"), variable.name = "error_propagation_var")
# Calculate dn^2
SF.err.propag$value = (SF.err.propag$value)^2
dSF.err = aggregate(SF.err.propag$value ~ SF.err.propag$Ym + SF.err.propag$error_propagation_var, FUN = sum)
names(dSF.err) = c("Ym", "variable", "value")
dSF.err$value= (dSF.err$value)^0.5
#housekeeping
rm(dates2, dd.dates, dSF.samplingdate, dSF.samplingdate1, NH4.SF, NH4.SF.mean, NH4.SF.SD, NO3.SF, NO3.SF.mean, NO3.SF.SD,
SF.err.propag, cols.num, date.end.month, dates, days, diffdays, SF.coll, NH4.SF.N, NO3.SF.N, SF, SF.N, SF.SE.95,
SF.vol.mean, SFLAB.SE.95, SFNH4.SE.95, SFNO3.SE.95, SF.vol.SD)
##########################################################################################################
########## PRECIPITATION PROPAGATION ERROR #############
##########################################################################################################
RF = dbGetQuery(db, "SELECT * FROM fielddata WHERE sample = 'C30D1' or sample = 'C31D1' ORDER BY date")
# A1: MEAN depth by sampling date
RF.depth.mean=aggregate(vals ~ date, data = RF, FUN = mean, na.rm = TRUE )
names(RF.depth.mean) = c("date", "RF.depth.mean")
# A2: SD of depth value by sampling date
RF.depth.SD=aggregate(vals ~ date, data = RF, FUN = sd, na.rm = TRUE )
# A3: counting samples
RF.N = as.data.frame(table(RF$date))
# A4. SE = SD/(N)^0.5. SE (95%) = 12.706*SE degrees of freedom = 1!!!
RF.SE.95 = as.data.frame(12.706*RF.depth.SD$vals/(RF.N$Freq)^0.5)
RF.SE.95 = cbind(RF.N$Var1, RF.SE.95)
names(RF.SE.95) = c("date", "depth.SE.95")
########## ERROR IN RF N LAB VALS #############
RF.coll = c("C30D1", "C31D1")
NO3.RF = NO3data[NO3data$sample %in% RF.coll, ]
NH4.RF = NH4data[NH4data$sample %in% RF.coll, ]
# B1: MEAN of LAB values by sampling date
NO3.RF.mean = aggregate(vals ~ date, data = NO3.RF, FUN = mean, na.rm = TRUE )
names(NO3.RF.mean) = c("date", "RF.NO3.mean")
NH4.RF.mean = aggregate(vals ~ date, data = NH4.RF, FUN = mean, na.rm = TRUE )
names(NH4.RF.mean) = c("date", "RF.NH4.mean")
#4: SD of lab values by sampling date
NO3.RF.SD = aggregate(vals ~ date, data = NO3.RF, FUN = sd, na.rm = TRUE )
names(NO3.RF.SD) = c("date", "RF.NO3.SD")
NH4.RF.SD = aggregate(vals ~ date, data = NH4.RF, FUN = sd, na.rm = TRUE )
names(NH4.RF.SD) = c("date", "RF.NH4.SD")
# B3: counting samples:
NO3.RF.N = as.data.frame(table(NO3.RF$date))
NH4.RF.N = as.data.frame(table(NH4.RF$date))
# B4. SE = SD/(N)^0.5. SE (95%) = 1.96*SE
RFNO3.SE.95 = as.data.frame(12.706*NO3.RF.SD$RF.NO3.SD/(NO3.RF.N$Freq)^0.5)
RFNH4.SE.95 = as.data.frame(12.706*NH4.RF.SD$RF.NH4.SD/(NH4.RF.N$Freq)^0.5)
RFLAB.SE.95 = cbind(NO3.RF.N$Var1, RFNO3.SE.95, RFNH4.SE.95)
names(RFLAB.SE.95) = c("date", "SE.95.NO3", "SE.95.NH4")
# B5. Calculate the error per each sampling date: MULTIMERGE
dRF.samplingdate = merge(merge(merge(merge(NO3.RF.mean, RF.SE.95, by='date', all=T),
NH4.RF.mean, by='date', all=T), RF.depth.mean, by='date', all=T), RFLAB.SE.95, by='date', all=T)
# 5a: Error propagation on NO3.N in RF by sampling date:
dRF.samplingdate$dRF.NO3 = (10000/1000000) * dRF.samplingdate$RF.depth.mean * dRF.samplingdate$RF.NO3.mean *
((dRF.samplingdate$SE.95.NO3/dRF.samplingdate$RF.NO3.mean)^2+(dRF.samplingdate$depth.SE.95/dRF.samplingdate$RF.depth.mean)^2)^0.5
# 5b: Error propagation on NH4.N in RF by sampling date:
dRF.samplingdate$dRF.NH4 = (10000/1000000) * dRF.samplingdate$RF.depth.mean * dRF.samplingdate$RF.NH4.mean *
((dRF.samplingdate$SE.95.NH4/dRF.samplingdate$RF.NH4.mean)^2+(dRF.samplingdate$depth.SE.95/dRF.samplingdate$RF.depth.mean)^2)^0.5
# correct NaN to 0 (after a check to lab values, all = 0)
# dRF.samplingdate[19, "dRF.NO3"] = 0 # per ora no, non mi piace come soluzione, forse meglio ignorare l'errore proprio
###############################################################################
######### REFINING ERROR PROPAGATION: ERROR PROPAGATION BY MONTH ##########
# Rationale: working on partial errors weighed by the number of days of each month
# covered by the n sampling date needs to 1) calculate how many days of each sampling date fall in a month;
# 2. calculate the weighed error; 3. propagation error as sum of the weighed errors
# 1. Calculating how many days from previous sampling:
dates=as.Date(unique(dRF.samplingdate$date))
date.end.month <- seq(as.Date("2011-11-01"),length=66,by="months")-1
dates2 = c(dates,date.end.month)
dates2= as.data.frame.Date(dates2)
dates = dates2 %>% distinct(dates2) # this is to select unique values from dates2
dates = dates[order(dates$dates2, decreasing = FALSE ),]
days = as.POSIXlt(dates)
days=rev(days) # reverse the order of dates to obtain positive values from differences
diffdays = difftime( days[1:length(days-1)] , days[2:length(days)] )
diffdays= round(diffdays, digits=0) #correct values to the integer
days=rev(diffdays) #back to the increasing order of dates
days[[1]]=26 #set first value as 26 (first sampling in 2011)
dd.dates=cbind(dates,days)
dd.dates = as.data.frame(dd.dates)
dd.dates$dates = as.Date(dd.dates$dates, origin="1970-01-01") # I ignore why I need to put this date as origin. Calculated via excel.
dRF.samplingdate$date = as.Date(dRF.samplingdate$date)
dRF.samplingdate1 = merge(dRF.samplingdate,dd.dates,by.x="date",by.y="dates",na.rm=FALSE, all = T)
# filling the NA to make calculations
dRF.samplingdate1 = na.locf(dRF.samplingdate1,fromLast = TRUE)
dRF.samplingdate1$Ym = strftime(dRF.samplingdate1$date, "%Y%m")
# Calculate the number of days of a month
dRF.samplingdate1$daysxmonth = monthDays(as.Date(dRF.samplingdate1$date))
# converting columns from character to numeric at once:
dRF.samplingdate1$days = as.numeric(dRF.samplingdate1$days)
cols.num <- c("RF.NO3.mean","SE.95.NO3","RF.NH4.mean", "SE.95.NH4","RF.depth.mean", "depth.SE.95", "days")
dRF.samplingdate1[cols.num] <- sapply(dRF.samplingdate1[cols.num],as.numeric)
# 2a: Error propagation on NO3.N in RF:
dRF.samplingdate1$dRF.NO3 = dRF.samplingdate1$RF.depth.mean * dRF.samplingdate1$RF.NO3.mean *
((dRF.samplingdate1$SE.95.NO3/dRF.samplingdate1$RF.NO3.mean)^2+(dRF.samplingdate1$depth.SE.95/dRF.samplingdate1$RF.depth.mean)^2)^0.5 *
dRF.samplingdate1$days/dRF.samplingdate1$daysxmonth
# 2b: Error propagation on NH4.N in RF:
dRF.samplingdate1$dRF.NH4 = dRF.samplingdate1$RF.depth.mean * dRF.samplingdate1$RF.NH4.mean *
((dRF.samplingdate1$SE.95.NH4/dRF.samplingdate1$RF.NH4.mean)^2+(dRF.samplingdate1$depth.SE.95/dRF.samplingdate1$RF.depth.mean)^2)^0.5 *
dRF.samplingdate1$days/dRF.samplingdate1$daysxmonth
# 3. PROPAGATION ERROR AS SUM OF ERRORS FROM DIFFERENT SAMPLING DATES
# wide to long
RF.err.propag = melt(dRF.samplingdate1, id.vars = c("date", "Ym"),
measure.vars = c("dRF.NH4", "dRF.NO3"), variable.name = "error_propagation_var")
# Calculate dn^2
RF.err.propag$value = (RF.err.propag$value)^2 # a)
dRF.err = aggregate(RF.err.propag$value ~ RF.err.propag$Ym + RF.err.propag$error_propagation_var, FUN = sum) # sum of squares b)
names(dRF.err) = c("Ym", "variable", "value")
dRF.err$value= (dRF.err$value)^0.5 # square root to calculate the error propagation of sums c)
#housekeeping
rm(dates2, dd.dates, dRF.samplingdate, dRF.samplingdate1, NH4.RF, NH4.RF.mean, NH4.RF.SD, NO3.RF, NO3.RF.mean,
NO3.RF.SD, RF, RF.depth.mean, NH4.RF.N, NO3.RF.N, RF.SE.95, RFLAB.SE.95, RFNH4.SE.95, RFNO3.SE.95, RF.N,
RF.depth.SD, RF.err.propag, cols.num, date.end.month, dates, days, diffdays, RF.coll, NO3data, NH4data)
### Creating input and output errors (input as 1.41RF, cioe' come se l'errore di fog fosse dello stesso ordine di grandezza
# di RF)
dIN.err = dRF.err
dIN.err$variable = revalue(dRF.err$variable, c("dRF.NH4"="dIN.NH4", "dRF.NO3"="dIN.NO3"))
dIN.err$value = dIN.err$value * (2^0.5)
dOUT.err = dTF.err
dOUT.err$variable = revalue(dOUT.err$variable, c("dTF.NH4"="dOUT.NH4", "dTF.NO3"="dOUT.NO3"))
dOUT.err$value = ((dOUT.err$value)^2+(dSF.err$value)^2)^0.5
# Creating long.N.error
long.N.error = rbind(dRF.err, dTF.err, dSF.err, dIN.err, dOUT.err)
long.N.error = transform(long.N.error, Ym = as.yearmon(as.character(Ym), "%Y%m"))
long.N.error$month = format(long.N.error$mY, "%m")
long.N.error$year = format(long.N.error$mY, "%Y")
|
/mikerspencer/Error_propagation_TF_SF_RF_fog.R
|
no_license
|
dferraretto/Daniele_Repo
|
R
| false
| false
| 23,211
|
r
|
############## --------------------------------------------------------
############## ERROR PROPAGATION
############## Author: Daniele Ferraretto
############## started on 26th July, 2017
############## updated: last update: 31/07/2017
############## --------------------------------------------------------
############## --------------------------------------------------------
# ------>>> NOTE: ALL ERRORS ARE IN mg N / m2. You need to divide them by 100 to compare them with the monthly data,
# expressed in Kg N / ha
# clear the memory
rm(list=ls())
#.libPaths("C:/Workspace/R") # adjust the issues with my desktop demmerda
### set working dir for pc:
#setwd("C:/Users/Daniele Ferraretto/Documents/Daniele_Repo")
### setwd per desktop
#setwd("M:/My PhD/R/PhD-local_repo")
#setwd("C:/Users/s1373890/Daniele_Repo")
########## THROUGHFALL PROPAGATION ERROR #############
# by SAMPLING DATE (1/2)
# prima prova. 1) SD e mean per ogni sampling date,
# 2) poi valore giornaliero come errore prova/ numero giorni per giorni utili
# 3) Addition: semplice radice quadrata dei due valori di errore di cui da 2
########## ERROR IN TF DEPTH #############
library(RSQLite)
db = dbConnect(SQLite(), dbname="field_lab/Griffin.SQLite")
TF = dbGetQuery(db, "SELECT * FROM fielddata WHERE variable = 'through depth' AND vals>=0 ORDER BY date")
NO3data = dbGetQuery(db, "SELECT * FROM labdata WHERE VALS >= 0 AND variable = 'NO3.N' ORDER BY date")
NH4data = dbGetQuery(db, "SELECT * FROM labdata WHERE VALS >= 0 AND variable = 'NH4.N' ORDER BY date")
# A1: MEAN depth value by sampling date
TF.depth.mean=aggregate(vals ~ date, data = TF, FUN = mean, na.rm = TRUE )
names(TF.depth.mean) = c("date", "TF.depth.mean")
# A2: SD of depth value by sampling date
TF.depth.SD=aggregate(vals ~ date, data = TF, FUN = sd, na.rm = TRUE )
# A3: counting samples
TF.N = as.data.frame(table(TF$date))
# A4. SE = SD/(N)^0.5. SE (95%) = 2.110*SE; df = 17
TF.SE.95 = as.data.frame(2.110*TF.depth.SD$vals/(TF.N$Freq)^0.5)
TF.SE.95 = cbind(TF.N$Var1, TF.SE.95)
names(TF.SE.95) = c("date", "depth.SE.95")
########## ERROR IN TF N LAB VALS #############
TF.coll = c("C10T1", "C10T2", "C10T3", "C11T1", "C11T2", "C11T3", "C12T1", "C12T2", "C12T3", "T10T1",
"T10T2", "T10T3", "T11T1", "T11T2", "T11T3", "T12T1", "T12T2", "T12T3")
library(data.table)
NO3.TF = NO3data[NO3data$sample %in% TF.coll, ]
NH4.TF = NH4data[NH4data$sample %in% TF.coll, ]
# B1: mean of lab values by sampling date
NO3.TF.mean = aggregate(vals ~ date, data = NO3.TF, FUN = mean, na.rm = TRUE )
names(NO3.TF.mean) = c("date", "TF.NO3.mean")
NH4.TF.mean = aggregate(vals ~ date, data = NH4.TF, FUN = mean, na.rm = TRUE )
names(NH4.TF.mean) = c("date", "TF.NH4.mean")
# B2: SD of lab values by sampling date
NO3.TF.SD = aggregate(vals ~ date, data = NO3.TF, FUN = sd, na.rm = TRUE )
names(NO3.TF.SD) = c("date", "TF.NO3.SD")
NH4.TF.SD = aggregate(vals ~ date, data = NH4.TF, FUN = sd, na.rm = TRUE )
names(NH4.TF.SD) = c("date", "TF.NH4.SD")
# 22/08/2017: problema con i dati di luglio 2017 di NH4: non esistono. verifica origine problema. se nec rerun scripts!!!
# B3: counting samples:
(library(plyr))
NO3.TF.N = as.data.frame(table(NO3.TF$date))
NH4.TF.N = as.data.frame(table(NH4.TF$date))
# B4. SE = SD/(N)^0.5. SE (95%) = 2.110*SE
TFNO3.SE.95 = as.data.frame(2.110*NO3.TF.SD$TF.NO3.SD/(NO3.TF.N$Freq)^0.5)
TFNH4.SE.95 = as.data.frame(2.110*NH4.TF.SD$TF.NH4.SD/(NH4.TF.N$Freq)^0.5)
TFLAB.SE.95 = cbind(NO3.TF.N$Var1, TFNO3.SE.95, TFNH4.SE.95)
names(TFLAB.SE.95) = c("date", "SE.95.NO3", "SE.95.NH4")
# B5. Calculate the error per each sampling date: MULTIMERGE
dTF.samplingdate = merge(merge(merge(merge(NO3.TF.mean, TF.SE.95, by='date', all=T),
NH4.TF.mean, by='date', all=T), TF.depth.mean, by='date', all=T), TFLAB.SE.95, by='date', all=T)
# 5a: Error propagation on NO3.N in TF by sampling date (and turning it from mg/m2 to kg/ha):
dTF.samplingdate$dTF.NO3 = (10000/1000000) * dTF.samplingdate$TF.depth.mean * dTF.samplingdate$TF.NO3.mean *
((dTF.samplingdate$SE.95.NO3/dTF.samplingdate$TF.NO3.mean)^2+(dTF.samplingdate$depth.SE.95/dTF.samplingdate$TF.depth.mean)^2)^0.5
# 5b: Error propagation on NH4.N in TF by sampling date:
dTF.samplingdate$dTF.NH4 = (10000/1000000) * dTF.samplingdate$TF.depth.mean * dTF.samplingdate$TF.NH4.mean *
((dTF.samplingdate$SE.95.NH4/dTF.samplingdate$TF.NH4.mean)^2+(dTF.samplingdate$depth.SE.95/dTF.samplingdate$TF.depth.mean)^2)^0.5
#####################################################################################
######### REFINING ERROR PROPAGATION in TF: ERROR PROPAGATION BY MONTH ##########
# (2/2)
# Rationale: working on partial errors weighed by the number of days of each month
# covered by the n sampling date needs to 1) calculate how many days of each sampling date fall in a month;
# 2. calculate the weighed error; 3. propagation error as sum of the weighed errors
# 1. Calculating how many days from previous sampling:
dates=as.Date(unique(dTF.samplingdate$date))
date.end.month <- seq(as.Date("2011-11-01"),length=66,by="months")-1
dates2 = c(dates,date.end.month)
dates2= as.data.frame.Date(dates2)
library(dplyr)
dates = dates2 %>% distinct(dates2) # this is to select unique values from dates2
dates = dates[order(dates$dates2, decreasing = FALSE ),]
days = as.POSIXlt(dates)
days=rev(days) # reverse the order of dates to obtain positive values from differences
diffdays = difftime( days[1:length(days-1)] , days[2:length(days)] )
diffdays= round(diffdays, digits=0) #correct values to the integer
days=rev(diffdays) #back to the increasing order of dates
days[[1]]=26 #set first value as 26 (first sampling in 2011)
dd.dates=cbind(dates,days)
dd.dates = as.data.frame(dd.dates)
dd.dates$dates = as.Date(dd.dates$dates, origin="1970-01-01") # I ignore why I need to put this date as origin. Calculated via excel.
dTF.samplingdate$date = as.Date(dTF.samplingdate$date)
dTF.samplingdate1 = merge(dTF.samplingdate,dd.dates,by.x="date",by.y="dates",na.rm=FALSE, all = T)
# filling the NA to make calculations
library(zoo)
dTF.samplingdate1 = na.locf(dTF.samplingdate1,fromLast = TRUE)
dTF.samplingdate1$Ym = strftime(dTF.samplingdate1$date, "%Y%m")
# Calculate the number of days of a month
# install.packages("Hmisc")
library(Hmisc)
dTF.samplingdate1$daysxmonth = monthDays(as.Date(dTF.samplingdate1$date))
dTF.samplingdate1$days = as.numeric(dTF.samplingdate1$days)
cols.num <- c("TF.NO3.mean","SE.95.NO3","TF.NH4.mean", "SE.95.NH4","TF.depth.mean", "depth.SE.95", "days")
dTF.samplingdate1[cols.num] <- sapply(dTF.samplingdate1[cols.num],as.numeric)
summary(dTF.samplingdate1)
# 2A: Error propagation on NO3.N in TF (error weighed by days/month):
dTF.samplingdate1$dTF.NO3 = dTF.samplingdate1$TF.depth.mean * dTF.samplingdate1$TF.NO3.mean *
((dTF.samplingdate1$SE.95.NO3/dTF.samplingdate1$TF.NO3.mean)^2+(dTF.samplingdate1$depth.SE.95/dTF.samplingdate1$TF.depth.mean)^2)^0.5 *
dTF.samplingdate1$days/dTF.samplingdate1$daysxmonth
# 2B: Error propagation on NH4.N in TF (error weighed by days/month)::
dTF.samplingdate1$dTF.NH4 = dTF.samplingdate1$TF.depth.mean * dTF.samplingdate1$TF.NH4.mean *
((dTF.samplingdate1$SE.95.NH4/dTF.samplingdate1$TF.NH4.mean)^2+(dTF.samplingdate1$depth.SE.95/dTF.samplingdate1$TF.depth.mean)^2)^0.5 *
dTF.samplingdate1$days/dTF.samplingdate1$daysxmonth
# 3. PROPAGATION ERROR AS SUM OF ERRORS FROM DIFFERENT SAMPLING DATES
# wide to long
library(reshape2)
TF.err.propag = melt(dTF.samplingdate1, id.vars = c("date", "Ym"),
measure.vars = c("dTF.NH4", "dTF.NO3"), variable.name = "error_propagation_var")
# Calculate dn^2
TF.err.propag$value = (TF.err.propag$value)^2
dTF.err = aggregate(TF.err.propag$value ~ TF.err.propag$Ym + TF.err.propag$error_propagation_var, FUN = sum)
names(dTF.err) = c("Ym", "variable", "value")
dTF.err$value= (dTF.err$value)^0.5
#housekeeping
rm(NH4.TF.N, NO3.TF.N, TF.N, TF.SE.95, TFLAB.SE.95, TFNH4.SE.95, TFNO3.SE.95, dates2, dd.dates, dTF.samplingdate, dTF.samplingdate1, NH4.TF, NH4.TF.mean, NH4.TF.SD, NO3.TF, NO3.TF.mean, NO3.TF.SD, TF,
TF.depth.mean, TF.depth.SD, TF.err.propag, cols.num, date.end.month, dates, days, diffdays, TF.coll)
##############################################################################
############ STEMFLOW PROPAGATION ERROR ###############
# prima prova. 1) SD e mean per ogni sampling date,
# 2) poi valore giornaliero come errore prova/ numero giorni per giorni utili
# 3) Addition: semplice radice quadrata dei due valori di errore di cui da 2
########## ERROR IN SF vol #############
SF = dbGetQuery(db, "SELECT * FROM fielddata WHERE variable = 'stem vol' AND vals>=0 ORDER BY date")
# A1: MEAN vol value by sampling date
SF.vol.mean=aggregate(vals ~ date, data = SF, FUN = mean, na.rm = TRUE )
names(SF.vol.mean) = c("date", "SF.vol.mean")
# A2: SD of vol value by sampling date
SF.vol.SD=aggregate(vals ~ date, data = SF, FUN = sd, na.rm = TRUE )
# A3: counting samples
SF.N = as.data.frame(table(SF$date))
# A4. SE = SD/(N)^0.5. SE (95%) = 1.96*SE
SF.SE.95 = as.data.frame(1.96*SF.vol.SD$vals/(SF.N$Freq)^0.5)
SF.SE.95 = cbind(SF.N$Var1, SF.SE.95)
names(SF.SE.95) = c("date", "vol.SE.95")
########## ERROR IN SF N LAB VALS #############
SF.coll = c("C10S1", "C10S2", "C10S3", "C11S1", "C11S2", "C11S3", "C12S1", "C12S2", "C12S3", "T10S1",
"T10S2", "T10S3", "T11S1", "T11S2", "T11S3", "T11S4", "T11S5", "T11S6", "T11S7", "T12S1", "T12S2", "T12S3")
NO3.SF = NO3data[NO3data$sample %in% SF.coll, ]
NH4.SF = NH4data[NH4data$sample %in% SF.coll, ]
# B1: mean of lab values by sampling date
NO3.SF.mean = aggregate(vals ~ date, data = NO3.SF, FUN = mean, na.rm = TRUE )
names(NO3.SF.mean) = c("date", "SF.NO3.mean")
NH4.SF.mean = aggregate(vals ~ date, data = NH4.SF, FUN = mean, na.rm = TRUE )
names(NH4.SF.mean) = c("date", "SF.NH4.mean")
# B2: SD of lab values by sampling date
NO3.SF.SD = aggregate(vals ~ date, data = NO3.SF, FUN = sd, na.rm = TRUE )
names(NO3.SF.SD) = c("date", "SF.NO3.SD")
NH4.SF.SD = aggregate(vals ~ date, data = NH4.SF, FUN = sd, na.rm = TRUE )
names(NH4.SF.SD) = c("date", "SF.NH4.SD")
# B3: counting samples:
NO3.SF.N = as.data.frame(table(NO3.SF$date))
NH4.SF.N = as.data.frame(table(NH4.SF$date))
# B4. SE = SD/(N)^0.5. SE (95%) = 1.96*SE
SFNO3.SE.95 = as.data.frame(1.96*NO3.SF.SD$SF.NO3.SD/(NO3.SF.N$Freq)^0.5)
SFNH4.SE.95 = as.data.frame(1.96*NH4.SF.SD$SF.NH4.SD/(NH4.SF.N$Freq)^0.5)
SFLAB.SE.95 = cbind(NO3.SF.N$Var1, SFNO3.SE.95, SFNH4.SE.95)
names(SFLAB.SE.95) = c("date", "SE.95.NO3", "SE.95.NH4")
# B5. Calculate the error per each sampling date: MULTIMERGE
dSF.samplingdate = merge(merge(merge(merge(SF.vol.mean, SF.SE.95, by='date', all=T),
NH4.SF.mean, by='date', all=T), NO3.SF.mean, by='date', all=T),
SFLAB.SE.95, by='date', all=T) # this last is the sum of the two Nx lab.SE
# B5a: Error propagation on NO3.N in TF by sampling date:
dSF.samplingdate$dSF.NO3 = (1883/1000000) * dSF.samplingdate$SF.vol.mean * dSF.samplingdate$SF.NO3.mean *
((dSF.samplingdate$SE.95.NO3/dSF.samplingdate$SF.NO3.mean)^2+(dSF.samplingdate$vol.SE.95/dSF.samplingdate$SF.vol.mean)^2)^0.5
# B5b: Error propagation on NH4.N in SF by sampling date:
dSF.samplingdate$dSF.NH4 = (1883/1000000) * dSF.samplingdate$SF.vol.mean * dSF.samplingdate$SF.NH4.mean *
((dSF.samplingdate$SE.95.NH4/dSF.samplingdate$SF.NH4.mean)^2+(dSF.samplingdate$vol.SE.95/dSF.samplingdate$SF.vol.mean)^2)^0.5
###############################################################################
######### REFINING ERROR PROPAGATION: ERROR PROPAGATION BY MONTH ##########
# Rationale: working on partial errors weighed by the number of days of each month
# covered by the n sampling date needs to 1) calculate how many days of each sampling date fall in a month;
# 2. calculate the weighed error; 3. propagation error as sum of the weighed errors
# 1. Calculating how many days from previous sampling:
dates=as.Date(unique(dSF.samplingdate$date))
date.end.month <- seq(as.Date("2011-11-01"),length=66,by="months")-1
dates2 = c(dates,date.end.month)
dates2= as.data.frame.Date(dates2)
dates = dates2 %>% distinct(dates2) # this is to select unique values from dates2
dates = dates[order(dates$dates2, decreasing = FALSE ),]
days = as.POSIXlt(dates)
days=rev(days) # reverse the order of dates to obtain positive values from differences
diffdays = difftime( days[1:length(days-1)] , days[2:length(days)] )
diffdays= round(diffdays, digits=0) #correct values to the integer
days=rev(diffdays) #back to the increasing order of dates
days[[1]]=26 #set first value as 26 (first sampling in 2011)
dd.dates=cbind(dates,days)
dd.dates = as.data.frame(dd.dates)
dd.dates$dates = as.Date(dd.dates$dates, origin="1970-01-01") # I ignore why I need to put this date as origin. Calculated via excel.
dSF.samplingdate$date = as.Date(dSF.samplingdate$date)
dSF.samplingdate1 = merge(dSF.samplingdate,dd.dates,by.x="date",by.y="dates",na.rm=FALSE, all = T)
# filling the NA to make calculations
dSF.samplingdate1 = na.locf(dSF.samplingdate1,fromLast = TRUE)
dSF.samplingdate1$Ym = strftime(dSF.samplingdate1$date, "%Y%m")
# Calculate the number of days of a month
# install.packages("Hmisc")
dSF.samplingdate1$daysxmonth = monthDays(as.Date(dSF.samplingdate1$date))
dSF.samplingdate1$days = as.numeric(dSF.samplingdate1$days)
cols.num <- c("SF.NO3.mean","SE.95.NO3","SF.NH4.mean", "SE.95.NH4","SF.vol.mean", "vol.SE.95", "days")
dSF.samplingdate1[cols.num] <- sapply(dSF.samplingdate1[cols.num],as.numeric)
# 2a: Error propagation on NO3.N in SF (error weighed by days/month):
dSF.samplingdate1$dSF.NO3 = dSF.samplingdate1$SF.vol.mean * dSF.samplingdate1$SF.NO3.mean *
((dSF.samplingdate1$SE.95.NO3/dSF.samplingdate1$SF.NO3.mean)^2+(dSF.samplingdate1$vol.SE.95/dSF.samplingdate1$SF.vol.mean)^2)^0.5 *
dSF.samplingdate1$days/dSF.samplingdate1$daysxmonth
# 2b: Error propagation on NH4.N in SF (error weighed by days/month):
dSF.samplingdate1$dSF.NH4 = dSF.samplingdate1$SF.vol.mean * dSF.samplingdate1$SF.NH4.mean *
((dSF.samplingdate1$SE.95.NH4/dSF.samplingdate1$SF.NH4.mean)^2+(dSF.samplingdate1$vol.SE.95/dSF.samplingdate1$SF.vol.mean)^2)^0.5 *
dSF.samplingdate1$days/dSF.samplingdate1$daysxmonth
# 3. PROPAGATION ERROR AS SUM OF ERRORS FROM DIFFERENT SAMPLING DATES
# wide to long
SF.err.propag = melt(dSF.samplingdate1, id.vars = c("date", "Ym"),
measure.vars = c("dSF.NH4", "dSF.NO3"), variable.name = "error_propagation_var")
# Calculate dn^2
SF.err.propag$value = (SF.err.propag$value)^2
dSF.err = aggregate(SF.err.propag$value ~ SF.err.propag$Ym + SF.err.propag$error_propagation_var, FUN = sum)
names(dSF.err) = c("Ym", "variable", "value")
dSF.err$value= (dSF.err$value)^0.5
#housekeeping
rm(dates2, dd.dates, dSF.samplingdate, dSF.samplingdate1, NH4.SF, NH4.SF.mean, NH4.SF.SD, NO3.SF, NO3.SF.mean, NO3.SF.SD,
SF.err.propag, cols.num, date.end.month, dates, days, diffdays, SF.coll, NH4.SF.N, NO3.SF.N, SF, SF.N, SF.SE.95,
SF.vol.mean, SFLAB.SE.95, SFNH4.SE.95, SFNO3.SE.95, SF.vol.SD)
##########################################################################################################
########## PRECIPITATION PROPAGATION ERROR #############
##########################################################################################################
RF = dbGetQuery(db, "SELECT * FROM fielddata WHERE sample = 'C30D1' or sample = 'C31D1' ORDER BY date")
# A1: MEAN depth by sampling date
RF.depth.mean=aggregate(vals ~ date, data = RF, FUN = mean, na.rm = TRUE )
names(RF.depth.mean) = c("date", "RF.depth.mean")
# A2: SD of depth value by sampling date
RF.depth.SD=aggregate(vals ~ date, data = RF, FUN = sd, na.rm = TRUE )
# A3: counting samples
RF.N = as.data.frame(table(RF$date))
# A4. SE = SD/(N)^0.5. SE (95%) = 12.706*SE degrees of freedom = 1!!!
RF.SE.95 = as.data.frame(12.706*RF.depth.SD$vals/(RF.N$Freq)^0.5)
RF.SE.95 = cbind(RF.N$Var1, RF.SE.95)
names(RF.SE.95) = c("date", "depth.SE.95")
########## ERROR IN RF N LAB VALS #############
RF.coll = c("C30D1", "C31D1")
NO3.RF = NO3data[NO3data$sample %in% RF.coll, ]
NH4.RF = NH4data[NH4data$sample %in% RF.coll, ]
# B1: MEAN of LAB values by sampling date
NO3.RF.mean = aggregate(vals ~ date, data = NO3.RF, FUN = mean, na.rm = TRUE )
names(NO3.RF.mean) = c("date", "RF.NO3.mean")
NH4.RF.mean = aggregate(vals ~ date, data = NH4.RF, FUN = mean, na.rm = TRUE )
names(NH4.RF.mean) = c("date", "RF.NH4.mean")
#4: SD of lab values by sampling date
NO3.RF.SD = aggregate(vals ~ date, data = NO3.RF, FUN = sd, na.rm = TRUE )
names(NO3.RF.SD) = c("date", "RF.NO3.SD")
NH4.RF.SD = aggregate(vals ~ date, data = NH4.RF, FUN = sd, na.rm = TRUE )
names(NH4.RF.SD) = c("date", "RF.NH4.SD")
# B3: counting samples:
NO3.RF.N = as.data.frame(table(NO3.RF$date))
NH4.RF.N = as.data.frame(table(NH4.RF$date))
# B4. SE = SD/(N)^0.5. SE (95%) = 1.96*SE
RFNO3.SE.95 = as.data.frame(12.706*NO3.RF.SD$RF.NO3.SD/(NO3.RF.N$Freq)^0.5)
RFNH4.SE.95 = as.data.frame(12.706*NH4.RF.SD$RF.NH4.SD/(NH4.RF.N$Freq)^0.5)
RFLAB.SE.95 = cbind(NO3.RF.N$Var1, RFNO3.SE.95, RFNH4.SE.95)
names(RFLAB.SE.95) = c("date", "SE.95.NO3", "SE.95.NH4")
# B5. Calculate the error per each sampling date: MULTIMERGE
dRF.samplingdate = merge(merge(merge(merge(NO3.RF.mean, RF.SE.95, by='date', all=T),
NH4.RF.mean, by='date', all=T), RF.depth.mean, by='date', all=T), RFLAB.SE.95, by='date', all=T)
# 5a: Error propagation on NO3.N in RF by sampling date:
dRF.samplingdate$dRF.NO3 = (10000/1000000) * dRF.samplingdate$RF.depth.mean * dRF.samplingdate$RF.NO3.mean *
((dRF.samplingdate$SE.95.NO3/dRF.samplingdate$RF.NO3.mean)^2+(dRF.samplingdate$depth.SE.95/dRF.samplingdate$RF.depth.mean)^2)^0.5
# 5b: Error propagation on NH4.N in RF by sampling date:
dRF.samplingdate$dRF.NH4 = (10000/1000000) * dRF.samplingdate$RF.depth.mean * dRF.samplingdate$RF.NH4.mean *
((dRF.samplingdate$SE.95.NH4/dRF.samplingdate$RF.NH4.mean)^2+(dRF.samplingdate$depth.SE.95/dRF.samplingdate$RF.depth.mean)^2)^0.5
# correct NaN to 0 (after a check to lab values, all = 0)
# dRF.samplingdate[19, "dRF.NO3"] = 0 # per ora no, non mi piace come soluzione, forse meglio ignorare l'errore proprio
###############################################################################
######### REFINING ERROR PROPAGATION: ERROR PROPAGATION BY MONTH ##########
# Rationale: working on partial errors weighed by the number of days of each month
# covered by the n sampling date needs to 1) calculate how many days of each sampling date fall in a month;
# 2. calculate the weighed error; 3. propagation error as sum of the weighed errors
# 1. Calculating how many days from previous sampling:
dates=as.Date(unique(dRF.samplingdate$date))
date.end.month <- seq(as.Date("2011-11-01"),length=66,by="months")-1
dates2 = c(dates,date.end.month)
dates2= as.data.frame.Date(dates2)
dates = dates2 %>% distinct(dates2) # this is to select unique values from dates2
dates = dates[order(dates$dates2, decreasing = FALSE ),]
days = as.POSIXlt(dates)
days=rev(days) # reverse the order of dates to obtain positive values from differences
diffdays = difftime( days[1:length(days-1)] , days[2:length(days)] )
diffdays= round(diffdays, digits=0) #correct values to the integer
days=rev(diffdays) #back to the increasing order of dates
days[[1]]=26 #set first value as 26 (first sampling in 2011)
dd.dates=cbind(dates,days)
dd.dates = as.data.frame(dd.dates)
dd.dates$dates = as.Date(dd.dates$dates, origin="1970-01-01") # I ignore why I need to put this date as origin. Calculated via excel.
dRF.samplingdate$date = as.Date(dRF.samplingdate$date)
dRF.samplingdate1 = merge(dRF.samplingdate,dd.dates,by.x="date",by.y="dates",na.rm=FALSE, all = T)
# filling the NA to make calculations
dRF.samplingdate1 = na.locf(dRF.samplingdate1,fromLast = TRUE)
dRF.samplingdate1$Ym = strftime(dRF.samplingdate1$date, "%Y%m")
# Calculate the number of days of a month
dRF.samplingdate1$daysxmonth = monthDays(as.Date(dRF.samplingdate1$date))
# converting columns from character to numeric at once:
dRF.samplingdate1$days = as.numeric(dRF.samplingdate1$days)
cols.num <- c("RF.NO3.mean","SE.95.NO3","RF.NH4.mean", "SE.95.NH4","RF.depth.mean", "depth.SE.95", "days")
dRF.samplingdate1[cols.num] <- sapply(dRF.samplingdate1[cols.num],as.numeric)
# 2a: Error propagation on NO3.N in RF:
dRF.samplingdate1$dRF.NO3 = dRF.samplingdate1$RF.depth.mean * dRF.samplingdate1$RF.NO3.mean *
((dRF.samplingdate1$SE.95.NO3/dRF.samplingdate1$RF.NO3.mean)^2+(dRF.samplingdate1$depth.SE.95/dRF.samplingdate1$RF.depth.mean)^2)^0.5 *
dRF.samplingdate1$days/dRF.samplingdate1$daysxmonth
# 2b: Error propagation on NH4.N in RF:
dRF.samplingdate1$dRF.NH4 = dRF.samplingdate1$RF.depth.mean * dRF.samplingdate1$RF.NH4.mean *
((dRF.samplingdate1$SE.95.NH4/dRF.samplingdate1$RF.NH4.mean)^2+(dRF.samplingdate1$depth.SE.95/dRF.samplingdate1$RF.depth.mean)^2)^0.5 *
dRF.samplingdate1$days/dRF.samplingdate1$daysxmonth
# 3. PROPAGATION ERROR AS SUM OF ERRORS FROM DIFFERENT SAMPLING DATES
# wide to long
RF.err.propag = melt(dRF.samplingdate1, id.vars = c("date", "Ym"),
measure.vars = c("dRF.NH4", "dRF.NO3"), variable.name = "error_propagation_var")
# Calculate dn^2
RF.err.propag$value = (RF.err.propag$value)^2 # a)
dRF.err = aggregate(RF.err.propag$value ~ RF.err.propag$Ym + RF.err.propag$error_propagation_var, FUN = sum) # sum of squares b)
names(dRF.err) = c("Ym", "variable", "value")
dRF.err$value= (dRF.err$value)^0.5 # square root to calculate the error propagation of sums c)
#housekeeping
rm(dates2, dd.dates, dRF.samplingdate, dRF.samplingdate1, NH4.RF, NH4.RF.mean, NH4.RF.SD, NO3.RF, NO3.RF.mean,
NO3.RF.SD, RF, RF.depth.mean, NH4.RF.N, NO3.RF.N, RF.SE.95, RFLAB.SE.95, RFNH4.SE.95, RFNO3.SE.95, RF.N,
RF.depth.SD, RF.err.propag, cols.num, date.end.month, dates, days, diffdays, RF.coll, NO3data, NH4data)
### Creating input and output errors (input as 1.41RF, cioe' come se l'errore di fog fosse dello stesso ordine di grandezza
# di RF)
dIN.err = dRF.err
dIN.err$variable = revalue(dRF.err$variable, c("dRF.NH4"="dIN.NH4", "dRF.NO3"="dIN.NO3"))
dIN.err$value = dIN.err$value * (2^0.5)
dOUT.err = dTF.err
dOUT.err$variable = revalue(dOUT.err$variable, c("dTF.NH4"="dOUT.NH4", "dTF.NO3"="dOUT.NO3"))
dOUT.err$value = ((dOUT.err$value)^2+(dSF.err$value)^2)^0.5
# Creating long.N.error
long.N.error = rbind(dRF.err, dTF.err, dSF.err, dIN.err, dOUT.err)
long.N.error = transform(long.N.error, Ym = as.yearmon(as.character(Ym), "%Y%m"))
long.N.error$month = format(long.N.error$mY, "%m")
long.N.error$year = format(long.N.error$mY, "%Y")
|
library(dplyr)
library(ggplot2)
library(bootstrap)
library(lme4)
library(tidyr)
theme_set(theme_bw(18))
setwd("/Users/elisakreiss/Documents/Stanford/overinformativeness/experiments/elisa_paper_relevant/typicality_calculation")
source("rscripts/helpers.r")
objTyp = read.table(file="../norming_comp_object/results/data/meantypicalities.csv",sep=",", header=T, quote="")
colTyp = read.table(file="../norming_comp_colorPatch/results/data/meantypicalities.csv",sep=",", header=T, quote="")
fullTyp = read.table(file="../norming_full/results/data/meantypicalities.csv",sep=",", header=T, quote="")
fullTyp$Col_utt = sapply(strsplit(as.character(fullTyp$Utterance),"_"), "[", 1)
fullTyp$Obj_utt = sapply(strsplit(as.character(fullTyp$Utterance),"_"), "[", 2)
fullTyp$ID = seq.int(nrow(fullTyp))
fullTyp$ColTyp = lapply(fullTyp$ID, function(x) colTyp[colTyp$Combo == fullTyp$Combo[x] & colTyp$color_utterance == fullTyp$Col_utt[x],]$MeanTypicality)
fullTyp$ObjTyp = lapply(fullTyp$ID, function(x) objTyp[objTyp$Combo == fullTyp$Combo[x] & objTyp$utterance == fullTyp$Obj_utt[x],]$Typicality)
fullTyp$ColTyp <- as.numeric(fullTyp$ColTyp)
fullTyp$ObjTyp <- as.numeric(fullTyp$ObjTyp)
fullTyp$Sum = (fullTyp$ColTyp)+fullTyp$ObjTyp
fullTyp$SigmSum = 1/(1+exp(-1*fullTyp$Sum))
minVal = min(fullTyp$Sum)
maxVal = max(fullTyp$Sum)
fullTyp$NormSum = (fullTyp$Sum - minVal)/(maxVal - minVal)
minVal = min(fullTyp$SigmSum)
maxVal = max(fullTyp$SigmSum)
fullTyp$NormSigmSum = (fullTyp$SigmSum - minVal)/(maxVal - minVal)
# normed data point doesn't make any sense
fullTyp = droplevels(fullTyp[!(fullTyp$Utterance == "red_tomato" & fullTyp$Combo == "green pear"),])
ggplot(fullTyp, aes(x=RoundMTypicality, y=NormSigmSum, group=1)) +
geom_point() +
# geom_smooth() +
xlim(0, 1) +
ylim(0, 1)
# geom_line() +
# theme(axis.text.x = element_text(angle=45,size=5,vjust=1,hjust=1))
ggsave("graphs/normSigmSum_purple.png",height=10, width=13)
ggplot(fullTyp, aes(x=RoundMTypicality, y=NormSum, group=1)) +
geom_point() +
# geom_smooth() +
xlim(0, 1) +
ylim(0, 1)
# geom_line() +
# theme(axis.text.x = element_text(angle=45,size=5,vjust=1,hjust=1))
ggsave("graphs/normSum_Purple.png",height=10, width=13)
ggplot(fullTyp, aes(x=RoundMTypicality, y=NormSigmSum, group=1)) +
geom_point() +
geom_text(aes(label=Combo),angle=0,hjust=0, vjust=0) +
geom_text(aes(label=Utterance),angle=0,hjust=0, vjust=0.5, color="red") +
# geom_smooth() +
xlim(0, 1) +
ylim(0, 1)
# geom_line() +
# theme(axis.text.x = element_text(angle=45,size=5,vjust=1,hjust=1))
ggsave("graphs/normSigmSum_label_purple.png",height=20, width=20)
ggplot(fullTyp, aes(x=RoundMTypicality, y=NormSum, group=1)) +
geom_point() +
geom_text(aes(label=Combo),angle=0,hjust=0, vjust=0) +
geom_text(aes(label=Utterance),angle=0,hjust=0, vjust=0.5, color="red") +
# geom_smooth() +
xlim(0, 1) +
ylim(0, 1)
# geom_line() +
# theme(axis.text.x = element_text(angle=45,size=5,vjust=1,hjust=1))
ggsave("graphs/normSum_label_purple.png",height=20, width=20)
|
/experiments/elisa_paper_relevant/typicality_calculation/rscripts/typicality_calc.R
|
permissive
|
thegricean/overinformativeness
|
R
| false
| false
| 3,041
|
r
|
library(dplyr)
library(ggplot2)
library(bootstrap)
library(lme4)
library(tidyr)
theme_set(theme_bw(18))
setwd("/Users/elisakreiss/Documents/Stanford/overinformativeness/experiments/elisa_paper_relevant/typicality_calculation")
source("rscripts/helpers.r")
objTyp = read.table(file="../norming_comp_object/results/data/meantypicalities.csv",sep=",", header=T, quote="")
colTyp = read.table(file="../norming_comp_colorPatch/results/data/meantypicalities.csv",sep=",", header=T, quote="")
fullTyp = read.table(file="../norming_full/results/data/meantypicalities.csv",sep=",", header=T, quote="")
fullTyp$Col_utt = sapply(strsplit(as.character(fullTyp$Utterance),"_"), "[", 1)
fullTyp$Obj_utt = sapply(strsplit(as.character(fullTyp$Utterance),"_"), "[", 2)
fullTyp$ID = seq.int(nrow(fullTyp))
fullTyp$ColTyp = lapply(fullTyp$ID, function(x) colTyp[colTyp$Combo == fullTyp$Combo[x] & colTyp$color_utterance == fullTyp$Col_utt[x],]$MeanTypicality)
fullTyp$ObjTyp = lapply(fullTyp$ID, function(x) objTyp[objTyp$Combo == fullTyp$Combo[x] & objTyp$utterance == fullTyp$Obj_utt[x],]$Typicality)
fullTyp$ColTyp <- as.numeric(fullTyp$ColTyp)
fullTyp$ObjTyp <- as.numeric(fullTyp$ObjTyp)
fullTyp$Sum = (fullTyp$ColTyp)+fullTyp$ObjTyp
fullTyp$SigmSum = 1/(1+exp(-1*fullTyp$Sum))
minVal = min(fullTyp$Sum)
maxVal = max(fullTyp$Sum)
fullTyp$NormSum = (fullTyp$Sum - minVal)/(maxVal - minVal)
minVal = min(fullTyp$SigmSum)
maxVal = max(fullTyp$SigmSum)
fullTyp$NormSigmSum = (fullTyp$SigmSum - minVal)/(maxVal - minVal)
# normed data point doesn't make any sense
fullTyp = droplevels(fullTyp[!(fullTyp$Utterance == "red_tomato" & fullTyp$Combo == "green pear"),])
ggplot(fullTyp, aes(x=RoundMTypicality, y=NormSigmSum, group=1)) +
geom_point() +
# geom_smooth() +
xlim(0, 1) +
ylim(0, 1)
# geom_line() +
# theme(axis.text.x = element_text(angle=45,size=5,vjust=1,hjust=1))
ggsave("graphs/normSigmSum_purple.png",height=10, width=13)
ggplot(fullTyp, aes(x=RoundMTypicality, y=NormSum, group=1)) +
geom_point() +
# geom_smooth() +
xlim(0, 1) +
ylim(0, 1)
# geom_line() +
# theme(axis.text.x = element_text(angle=45,size=5,vjust=1,hjust=1))
ggsave("graphs/normSum_Purple.png",height=10, width=13)
ggplot(fullTyp, aes(x=RoundMTypicality, y=NormSigmSum, group=1)) +
geom_point() +
geom_text(aes(label=Combo),angle=0,hjust=0, vjust=0) +
geom_text(aes(label=Utterance),angle=0,hjust=0, vjust=0.5, color="red") +
# geom_smooth() +
xlim(0, 1) +
ylim(0, 1)
# geom_line() +
# theme(axis.text.x = element_text(angle=45,size=5,vjust=1,hjust=1))
ggsave("graphs/normSigmSum_label_purple.png",height=20, width=20)
ggplot(fullTyp, aes(x=RoundMTypicality, y=NormSum, group=1)) +
geom_point() +
geom_text(aes(label=Combo),angle=0,hjust=0, vjust=0) +
geom_text(aes(label=Utterance),angle=0,hjust=0, vjust=0.5, color="red") +
# geom_smooth() +
xlim(0, 1) +
ylim(0, 1)
# geom_line() +
# theme(axis.text.x = element_text(angle=45,size=5,vjust=1,hjust=1))
ggsave("graphs/normSum_label_purple.png",height=20, width=20)
|
x <- c("spring", "winter",
"winter", "autumn")
str(x)
x <- factor(x)
str(x)
x
x + x
levels(x)
library("titanic") # Titanic dataset
library("dplyr") # data transform
# library("car") # transform levels of factor vars
library("vcd") # mosaic plot
library("GGally") # scatter plot matrix
library("psych") # descriptive stats, plots
library("mfx")
tit <- titanic_train
glimpse(tit)
describe(tit)
help("titanic_train")
tit2 <- mutate_each(tit, "factor",
Survived, Pclass, Sex, Embarked)
glimpse(tit2)
describe(tit2)
tit3 <- select(tit2,
-PassengerId, -Name,
-Ticket, -Cabin)
glimpse(tit3)
table(tit3$Survived)
table(tit3$Pclass)
pairs.panels(tit3) # for numeric variables
tit4 <- select(tit3,
-Fare, -Embarked)
ggpairs(tit4)
table(tit4$Parch)
tit5 <- mutate(tit4,
Parch2 = ifelse(Parch > 2, 2, Parch))
tit6 <- mutate(tit5,
SibSp2 = ifelse(SibSp > 2, 2, SibSp))
tit7 <- select(tit6, -SibSp, -Parch)
ggpairs(tit7)
tit8 <- mutate(tit7,
Parch2 = factor(Parch2),
SibSp2 = factor(SibSp2))
ggpairs(tit8)
# goo.gl/gLfSTf
mosaic(data = tit8,
~ Survived + Pclass + Sex,
shade = TRUE)
mosaic(data = tit8,
~ Pclass + Sex + Parch2 + Survived,
shade = TRUE)
#
pairs.panels(swiss)
cors <- cor(swiss)
cor.plot(cors)
# back to Titanic!!!
model <- logitmfx(data = tit8,
Survived ~ Sex + Pclass + Age)
model
# goo.gl/wefcrJ
fit <- model$fit
library("broom")
library("ggplot2")
glimpse(tit8)
new <- data_frame(Age = 1:100,
Sex = factor("male"),
Pclass = factor(2))
glimpse(new)
fit2 <- augment(fit, newdata = new,
type.predict = "response")
glimpse(fit2)
qplot(data = fit2,
x = Age, y = .fitted, geom = "line")
|
/2015/seminar_scripts/script_13_scatter_matrix.R
|
no_license
|
bdemeshev/sa621
|
R
| false
| false
| 1,716
|
r
|
x <- c("spring", "winter",
"winter", "autumn")
str(x)
x <- factor(x)
str(x)
x
x + x
levels(x)
library("titanic") # Titanic dataset
library("dplyr") # data transform
# library("car") # transform levels of factor vars
library("vcd") # mosaic plot
library("GGally") # scatter plot matrix
library("psych") # descriptive stats, plots
library("mfx")
tit <- titanic_train
glimpse(tit)
describe(tit)
help("titanic_train")
tit2 <- mutate_each(tit, "factor",
Survived, Pclass, Sex, Embarked)
glimpse(tit2)
describe(tit2)
tit3 <- select(tit2,
-PassengerId, -Name,
-Ticket, -Cabin)
glimpse(tit3)
table(tit3$Survived)
table(tit3$Pclass)
pairs.panels(tit3) # for numeric variables
tit4 <- select(tit3,
-Fare, -Embarked)
ggpairs(tit4)
table(tit4$Parch)
tit5 <- mutate(tit4,
Parch2 = ifelse(Parch > 2, 2, Parch))
tit6 <- mutate(tit5,
SibSp2 = ifelse(SibSp > 2, 2, SibSp))
tit7 <- select(tit6, -SibSp, -Parch)
ggpairs(tit7)
tit8 <- mutate(tit7,
Parch2 = factor(Parch2),
SibSp2 = factor(SibSp2))
ggpairs(tit8)
# goo.gl/gLfSTf
mosaic(data = tit8,
~ Survived + Pclass + Sex,
shade = TRUE)
mosaic(data = tit8,
~ Pclass + Sex + Parch2 + Survived,
shade = TRUE)
#
pairs.panels(swiss)
cors <- cor(swiss)
cor.plot(cors)
# back to Titanic!!!
model <- logitmfx(data = tit8,
Survived ~ Sex + Pclass + Age)
model
# goo.gl/wefcrJ
fit <- model$fit
library("broom")
library("ggplot2")
glimpse(tit8)
new <- data_frame(Age = 1:100,
Sex = factor("male"),
Pclass = factor(2))
glimpse(new)
fit2 <- augment(fit, newdata = new,
type.predict = "response")
glimpse(fit2)
qplot(data = fit2,
x = Age, y = .fitted, geom = "line")
|
# ---------------------------------------------------------------------------- #
# CAS DATA VISUALIZATION 2016
# Autorenprojekt Flight Fare Visualization
#
# Description Datenbereinigung und -aufbereitung der gesammelten Flugpreise
# zu 20 Flugverbindungen (EU sowie Oversea).
#
# Frage 0) An welchem Wochentag soll ich buchen? (requestDate)
#
# WICHTIG: Wenn ein günstigster Preis von einem Anbieter über
# mehrere Tage konstant ist, dann werden alle Tage berücksichtigt.
# Dies führt dazu, dass ein Abflugtag mehrmals "gezählt" wird. Da
# in dieser Auswertung nur der Abfragetag (requestDate) eine Rolle
# spielt, ist dies in Ordnung.
#
# Autor Ruth Ziegler
# Date 2016-06-21
# Version v1.0
# ---------------------------------------------------------------------------- #
# --- global settings (include in every script)
Sys.setlocale("LC_ALL", "de_CH.UTF-8") # set locale to UTF-8
setwd("/Users/ruthziegler/Documents/Work/CAS Data Visualization/Flight Fare Visualization/Analytics/Part 3")
# --- import base script if not sourced
if(!exists("global_labeller", mode="function")) {
source("FFV_Analytics_QBase.R")
}
# filter lowest prices for each flight
data.q0 <- data.flights.completeSeriesOnly %>%
ungroup() %>%
arrange(flightNumber, departureDate, requestDate) %>% # sorting
group_by(flightNumber, departureDate) %>%
filter(
pmin == min(pmin)
)
# -- BY REQUEST WEEKDAY, DESTINATION AND CARRIER
# count by request weekday, destination AND carrier
data.q0.byRequestWeekday <- data.q0 %>%
ungroup() %>%
group_by(requestWeekday, carrier, destination) %>%
summarise(
n = n()
)
# plot cheapest flights distributed by request weekday for each destination and carrier
ggplot(data = data.q0.byRequestWeekday,
aes(x = requestWeekday,
y = n,
fill = carrier)) +
geom_bar(stat="identity") +
facet_wrap(~ destination, ncol = 5, scales="free_y", labeller = global_labeller) +
ggtitle("Number of cheapest flights on request weekday overall") +
xlab("request weekday") +
ylab("number of cheapest flights")
SavePlot("q0-request-wday-all.pdf")
ggplot(data = data.q0.byRequestWeekday,
aes(x = requestWeekday,
y = n,
fill = carrier)) +
geom_bar(stat="identity", position = "dodge") +
facet_wrap(~ destination, ncol = 5, scales="free_y", labeller = global_labeller) +
ggtitle("Number of cheapest flights on request weekday overall") +
xlab("request weekday") +
ylab("number of cheapest flights")
SavePlot("q0-request-wday-all-2.pdf")
# calculate the mode value for request weekday
# data.q0.mode <- mfv(as.numeric(data.q0$requestWeekday))
# same as above but additionally grouped by agent
data.q0.agent <- data.flights.completeSeriesOnly %>%
ungroup() %>%
arrange(flightNumber, departureDate, agentName, requestDate) %>% # sorting
group_by(flightNumber, departureDate, agentName) %>%
filter(
pmin == min(pmin)
)
# -- BY REQUEST WEEKDAY, DESTINATION AND AGENT
# count by request weekday and destination for each agent
data.q0.agent.byRequestWeekday <- data.q0.agent %>%
group_by(requestWeekday, carrier, destination, agentName) %>%
summarise(
n = n()
)
ggplot(data = data.q0.agent.byRequestWeekday,
aes(x = requestWeekday,
y = n,
fill = carrier)) +
geom_bar(stat="identity") +
facet_grid(destination ~ agentName, scales="free_y", labeller = global_labeller) +
ggtitle("Number of cheapest flights on request weekday for each agent") +
xlab("request weekday") +
ylab("number of cheapest flights")
SavePlot("q0-request-wday-agent.pdf")
write.csv(data.q0.byRequestWeekday, "data-q0-request-wday-all.csv", row.names = FALSE)
write.csv(data.q0.agent.byRequestWeekday, "data-q0-request-wday-agent.csv", row.names = FALSE)
|
/Analytics/Part 3/FFV_Analytics_Q0.R
|
no_license
|
rziegler/ffv
|
R
| false
| false
| 3,924
|
r
|
# ---------------------------------------------------------------------------- #
# CAS DATA VISUALIZATION 2016
# Autorenprojekt Flight Fare Visualization
#
# Description Datenbereinigung und -aufbereitung der gesammelten Flugpreise
# zu 20 Flugverbindungen (EU sowie Oversea).
#
# Frage 0) An welchem Wochentag soll ich buchen? (requestDate)
#
# WICHTIG: Wenn ein günstigster Preis von einem Anbieter über
# mehrere Tage konstant ist, dann werden alle Tage berücksichtigt.
# Dies führt dazu, dass ein Abflugtag mehrmals "gezählt" wird. Da
# in dieser Auswertung nur der Abfragetag (requestDate) eine Rolle
# spielt, ist dies in Ordnung.
#
# Autor Ruth Ziegler
# Date 2016-06-21
# Version v1.0
# ---------------------------------------------------------------------------- #
# --- global settings (include in every script)
Sys.setlocale("LC_ALL", "de_CH.UTF-8") # set locale to UTF-8
setwd("/Users/ruthziegler/Documents/Work/CAS Data Visualization/Flight Fare Visualization/Analytics/Part 3")
# --- import base script if not sourced
if(!exists("global_labeller", mode="function")) {
source("FFV_Analytics_QBase.R")
}
# filter lowest prices for each flight
data.q0 <- data.flights.completeSeriesOnly %>%
ungroup() %>%
arrange(flightNumber, departureDate, requestDate) %>% # sorting
group_by(flightNumber, departureDate) %>%
filter(
pmin == min(pmin)
)
# -- BY REQUEST WEEKDAY, DESTINATION AND CARRIER
# count by request weekday, destination AND carrier
data.q0.byRequestWeekday <- data.q0 %>%
ungroup() %>%
group_by(requestWeekday, carrier, destination) %>%
summarise(
n = n()
)
# plot cheapest flights distributed by request weekday for each destination and carrier
ggplot(data = data.q0.byRequestWeekday,
aes(x = requestWeekday,
y = n,
fill = carrier)) +
geom_bar(stat="identity") +
facet_wrap(~ destination, ncol = 5, scales="free_y", labeller = global_labeller) +
ggtitle("Number of cheapest flights on request weekday overall") +
xlab("request weekday") +
ylab("number of cheapest flights")
SavePlot("q0-request-wday-all.pdf")
ggplot(data = data.q0.byRequestWeekday,
aes(x = requestWeekday,
y = n,
fill = carrier)) +
geom_bar(stat="identity", position = "dodge") +
facet_wrap(~ destination, ncol = 5, scales="free_y", labeller = global_labeller) +
ggtitle("Number of cheapest flights on request weekday overall") +
xlab("request weekday") +
ylab("number of cheapest flights")
SavePlot("q0-request-wday-all-2.pdf")
# calculate the mode value for request weekday
# data.q0.mode <- mfv(as.numeric(data.q0$requestWeekday))
# same as above but additionally grouped by agent
data.q0.agent <- data.flights.completeSeriesOnly %>%
ungroup() %>%
arrange(flightNumber, departureDate, agentName, requestDate) %>% # sorting
group_by(flightNumber, departureDate, agentName) %>%
filter(
pmin == min(pmin)
)
# -- BY REQUEST WEEKDAY, DESTINATION AND AGENT
# count by request weekday and destination for each agent
data.q0.agent.byRequestWeekday <- data.q0.agent %>%
group_by(requestWeekday, carrier, destination, agentName) %>%
summarise(
n = n()
)
ggplot(data = data.q0.agent.byRequestWeekday,
aes(x = requestWeekday,
y = n,
fill = carrier)) +
geom_bar(stat="identity") +
facet_grid(destination ~ agentName, scales="free_y", labeller = global_labeller) +
ggtitle("Number of cheapest flights on request weekday for each agent") +
xlab("request weekday") +
ylab("number of cheapest flights")
SavePlot("q0-request-wday-agent.pdf")
write.csv(data.q0.byRequestWeekday, "data-q0-request-wday-all.csv", row.names = FALSE)
write.csv(data.q0.agent.byRequestWeekday, "data-q0-request-wday-agent.csv", row.names = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_read_years}
\alias{fars_read_years}
\title{Read multiple file names}
\usage{
fars_read_years(years)
}
\arguments{
\item{years}{a numeric vector containing four-digit numeric values.}
}
\value{
A list with length equal to the length of numeric vector \code{years}
in case year names are correctly provided. Elements of the list are
tibbles containing 2 columns, month and year.
}
\description{
This function takes a vector of numeric values and read their
corresponding file names into a tibble.
}
\details{
\code{fars_read_years} first takes a numeric vector containing four-digit
elements. It then calls \code{\link{make_filename}} to turn every elements of
years vector into a character vector of formatted file names. The file
names are used to read the corresponding date files into tibbles.
for the purpose of error handling the function calls\code{\link[base]{tryCatch}}
in order to check whether the provided file name can be evaluated and read
into tibble or not. In case an invalid year name is provided an error will
be thrown.
}
\examples{
\dontrun{
fars_read_years(c(2013, 2014, 2015))
}
}
|
/man/fars_read_years.Rd
|
no_license
|
AnoushiravanR/fars
|
R
| false
| true
| 1,219
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_read_years}
\alias{fars_read_years}
\title{Read multiple file names}
\usage{
fars_read_years(years)
}
\arguments{
\item{years}{a numeric vector containing four-digit numeric values.}
}
\value{
A list with length equal to the length of numeric vector \code{years}
in case year names are correctly provided. Elements of the list are
tibbles containing 2 columns, month and year.
}
\description{
This function takes a vector of numeric values and read their
corresponding file names into a tibble.
}
\details{
\code{fars_read_years} first takes a numeric vector containing four-digit
elements. It then calls \code{\link{make_filename}} to turn every elements of
years vector into a character vector of formatted file names. The file
names are used to read the corresponding date files into tibbles.
for the purpose of error handling the function calls\code{\link[base]{tryCatch}}
in order to check whether the provided file name can be evaluated and read
into tibble or not. In case an invalid year name is provided an error will
be thrown.
}
\examples{
\dontrun{
fars_read_years(c(2013, 2014, 2015))
}
}
|
# source("http://egret.psychol.cam.ac.uk/statistics/R/extensions/rnc_ggplot2_border_themes_2013_01.r")
source("d:/KULeuven/PhD/rLibrary/rnc_ggplot2_border_themes_2013_01.r")
cleanPlot2 <- function(pp, fontsize)
{
pp <- pp + theme(
panel.background = element_rect(fill='white')
,panel.grid.major = element_line(colour = "black", size = 0.5, linetype = "dotted")
, panel.grid.minor = element_blank() # switch off minor gridlines
, axis.ticks = element_line(colour = 'black')
, axis.line = element_line(colour = 'black')
, panel.border = theme_border(c("left","bottom"), size=0.25)
, axis.title.y = element_text(face="bold", size = fontsize, angle=90, colour = 'black')
, axis.title.x = element_text(face="bold", size = fontsize, angle=0, colour = 'black')
, axis.text.x = element_text(face="plain", size = fontsize, colour = 'black')
, axis.text.y = element_text(face="plain", size = fontsize, colour = 'black')
, plot.title = element_text(face="plain", size = fontsize, colour = "black")
, legend.text = element_text(face="plain", size = fontsize)
, legend.title = element_text(face="bold", size = fontsize)
, strip.background = element_blank()
)
}
|
/dataAnalysisCodes/deps/cleanPlot2.R
|
no_license
|
adriencombaz/HybBciCode
|
R
| false
| false
| 1,223
|
r
|
# source("http://egret.psychol.cam.ac.uk/statistics/R/extensions/rnc_ggplot2_border_themes_2013_01.r")
source("d:/KULeuven/PhD/rLibrary/rnc_ggplot2_border_themes_2013_01.r")
cleanPlot2 <- function(pp, fontsize)
{
pp <- pp + theme(
panel.background = element_rect(fill='white')
,panel.grid.major = element_line(colour = "black", size = 0.5, linetype = "dotted")
, panel.grid.minor = element_blank() # switch off minor gridlines
, axis.ticks = element_line(colour = 'black')
, axis.line = element_line(colour = 'black')
, panel.border = theme_border(c("left","bottom"), size=0.25)
, axis.title.y = element_text(face="bold", size = fontsize, angle=90, colour = 'black')
, axis.title.x = element_text(face="bold", size = fontsize, angle=0, colour = 'black')
, axis.text.x = element_text(face="plain", size = fontsize, colour = 'black')
, axis.text.y = element_text(face="plain", size = fontsize, colour = 'black')
, plot.title = element_text(face="plain", size = fontsize, colour = "black")
, legend.text = element_text(face="plain", size = fontsize)
, legend.title = element_text(face="bold", size = fontsize)
, strip.background = element_blank()
)
}
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
titlePanel(h1('IRIS DATA STATISTICS',style='background-color:yellow')),
sidebarLayout(
sidebarPanel(
h1(
selectInput(inputId='x',label = 'X-axis',
choices = c("Sepal.Length","Sepal.Width", "Petal.Length",
"Petal.Width","Species" ),
selected = "Sepal.Width"),style='background-color:blue'),
h1(
selectInput(inputId='y',label = 'y-axis',
choices = c("Sepal.Length","Sepal.Width", "Petal.Length",
"Petal.Width","Species" ),
selected = '"Sepal.Length"'),style='background-color:green'),
h1(
selectInput(inputId='z',label = 'class',
choices = c("Species" ),
selected = '"Species"'),style='background-color:green'))
,
mainPanel(
helpText(h1('Summary Statistics ',style='background-color:tomato')),
verbatimTextOutput(outputId='summary'),
fluidRow(
column(6, plotOutput(outputId='scatterplot')),
column(6,plotOutput(outputId='histplot'))),
helpText(h1('Static Display ',style='background-color:yellow')),
tableOutput('static'),
helpText(h1('Dynamic Display',style='color:blue')),
dataTableOutput('dynamic')
)
)
))
|
/dashboard/app/ui.R
|
no_license
|
emmanuel-arize/R
|
R
| false
| false
| 2,060
|
r
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
titlePanel(h1('IRIS DATA STATISTICS',style='background-color:yellow')),
sidebarLayout(
sidebarPanel(
h1(
selectInput(inputId='x',label = 'X-axis',
choices = c("Sepal.Length","Sepal.Width", "Petal.Length",
"Petal.Width","Species" ),
selected = "Sepal.Width"),style='background-color:blue'),
h1(
selectInput(inputId='y',label = 'y-axis',
choices = c("Sepal.Length","Sepal.Width", "Petal.Length",
"Petal.Width","Species" ),
selected = '"Sepal.Length"'),style='background-color:green'),
h1(
selectInput(inputId='z',label = 'class',
choices = c("Species" ),
selected = '"Species"'),style='background-color:green'))
,
mainPanel(
helpText(h1('Summary Statistics ',style='background-color:tomato')),
verbatimTextOutput(outputId='summary'),
fluidRow(
column(6, plotOutput(outputId='scatterplot')),
column(6,plotOutput(outputId='histplot'))),
helpText(h1('Static Display ',style='background-color:yellow')),
tableOutput('static'),
helpText(h1('Dynamic Display',style='color:blue')),
dataTableOutput('dynamic')
)
)
))
|
multa cerneala a mai curs pe seama Securitatii lui Nicolae Ceausescu .
si - o sa mai curga , impestritata cu povesti , cu bancuri , cu zvonuri , pentru ca sintem inca departe de o clarificare sanatoasa , pe baze stiintifice .
societatea civila si clasa politica dau din gura , se confrunta , dar nu pot cadea de acord privitor la punerea la punct a unei masinarii care sa faca cercetari in adevaratul sens al cuvintului .
si , in acest timp , " securistii " ce fac ?
ce gindesc ?
s - au rasfirat prin partide si prin afaceri , s - au cosmetizat nitel , dar mare lucru nu prea stim despre evolutia gindirii lor .
cu exceptia unui serial care ii incrimina si a doua - trei participari pe la emisiuni de televiziune , in toti acesti 11 ani , fostii ofiteri de Securitate n - au prea vorbit .
interviul cu colonelul in rezerva Ilie Merce ( deputat al Partidului Romania Mare ) , publicat in ziarul " Adevarul " , merita citit cu creionul in mina , in ciuda faptului ca intrebarile sint puse in dorul lelii .
el are meritul de a ne atrage atentia asupra felului in care vad fostii ofiteri trecutul lor de glorie si prezentul nostru confuz .
primul lucru care socheaza la " viziunea " lui Ilie Merce este nevinovatia .
" n - a suferit in alt fel nimeni din cauza Securitatii " , cu exceptia unora care n - au primit aviz pentru a fi avansati sau sa plece in strainatate .
nimic , nimic , nimic !
frica pe care o traiau romanii nu era decit rezultatul incercarii de discreditare a Securitatii si Militiei , dusa de postul de radio " Europa Libera " !
deducem astfel ca , fara emisiunile celebrului post de radio , Militia si Securitatea ar fi fost doua forte nevinovate , progresiste , straine de orice abuz , democratice , concentrate doar pe apararea ordinii in societate si pe actiuni de contraspionaj economic .
Ilie Merce spune ca ofiterii de Securitate erau principalii rostitori de bancuri politice ( mai ales la serviciu ) , ca turnatoriile nu erau luate in seama decit atunci cind era vorba despre oamenii care se aflau in atentia vreunui serviciu de spionaj .
in rest , " marele rol al Securitatii a fost ca a urmarit toate contractele importante incheiate cu firme din alte tari " .
si de ce ?
" de ce sa platim asistenta tehnica pe un an , doi sau trei , daca obtineam datele prin mijloace specifice ? "
abuzuri ? Exact trei , adica numarul persoanelor care erau in puscarii in decembrie 1989 pentru " infractiuni legate de securitatea nationala " .
fie Ilie Merce a pazit gainile de la una din gospodariile - anexa ale Securitatii , fie face pe prostul .
cum avea grad si a raspuns si de un judet si de Cultura , ne obliga sa luam in calcul a doua varianta .
ba mai mult .
incearca sa acrediteze si varianta eroica , conform careia si Securitatea voia sa scapam de Ceausescu .
n - ati auzit chiar de un ofiter erou , faimos pentru ca ar fi votat impotriva lui Ceausescu , dat afara din PCR ?
inseamna ca habar nu aveti de marile proteste puse la cale de Securitate , de disidenta constanta din rindurile acesteia !
de fapt , zice Ilie Merce , nu stiti nimic .
institutia asta merita titlul de erou al natiei !
noi nu sintem constienti de cit bine ne - a facut si nu ne dam seama ca dusmanii patriei au distrus - o !
or fi fost bune de incriminat serviciile secrete din celelalte tari socialiste , dar al nostru , patriotic si prevenitor , numai asta nu merita !
cu acelasi aer de ingeras care n - a poposit niciodata pe marginea unei troace de porci ( si nici pe linga ea n - a trecut ) , Ilie Merce zice : " Nu discut ce a fost in anii ' 50 - ' 60 ! " .
de ce sa discute , ca doar ororile s - au intimplat in Cipru si in Malta , nu in Romania , iar ofiterii au stat nepasatori in timp ce niste martieni sadici terorizau populatia ?
! si , ca sa nu ne mai amagim , nici disidenti n - am avut !
doar " Paul Goma , pe care l - am creat noi , Securitatea , la ordinul partidului " .
dupa mintea acestui colonel nostalgic , noua nu ni s - a intimplat nimic .
iaca , doi - trei urmariti si o groaza de beneficii economice pentru tara !
de altfel , singurul lucru care - i mai raminea de facut lui Ilie Merce era sa - i puna ziaristului intrebarea : " Domnule reporter , dumneata ai inteles de ce a cazut Ceausescu si de ce nu mai avem socialism ? " .
nu am motive sa acuz pe cineva anume .
nici nu cred ca fiecare ofiter in parte poate fi socotit un om cu bube in cap .
cita vreme nu avem o clarificare temeinica a faptelor ascunse in arhive , nu are cum sa ne mai surprinda o asemenea incercare stupida de justificare si reabilitare a principalului instrument de " prelucrare " al totalitarismului !!
|
/data/Newspapers/2001.02.27.editorial.42232.0557.r
|
no_license
|
narcis96/decrypting-alpha
|
R
| false
| false
| 4,642
|
r
|
multa cerneala a mai curs pe seama Securitatii lui Nicolae Ceausescu .
si - o sa mai curga , impestritata cu povesti , cu bancuri , cu zvonuri , pentru ca sintem inca departe de o clarificare sanatoasa , pe baze stiintifice .
societatea civila si clasa politica dau din gura , se confrunta , dar nu pot cadea de acord privitor la punerea la punct a unei masinarii care sa faca cercetari in adevaratul sens al cuvintului .
si , in acest timp , " securistii " ce fac ?
ce gindesc ?
s - au rasfirat prin partide si prin afaceri , s - au cosmetizat nitel , dar mare lucru nu prea stim despre evolutia gindirii lor .
cu exceptia unui serial care ii incrimina si a doua - trei participari pe la emisiuni de televiziune , in toti acesti 11 ani , fostii ofiteri de Securitate n - au prea vorbit .
interviul cu colonelul in rezerva Ilie Merce ( deputat al Partidului Romania Mare ) , publicat in ziarul " Adevarul " , merita citit cu creionul in mina , in ciuda faptului ca intrebarile sint puse in dorul lelii .
el are meritul de a ne atrage atentia asupra felului in care vad fostii ofiteri trecutul lor de glorie si prezentul nostru confuz .
primul lucru care socheaza la " viziunea " lui Ilie Merce este nevinovatia .
" n - a suferit in alt fel nimeni din cauza Securitatii " , cu exceptia unora care n - au primit aviz pentru a fi avansati sau sa plece in strainatate .
nimic , nimic , nimic !
frica pe care o traiau romanii nu era decit rezultatul incercarii de discreditare a Securitatii si Militiei , dusa de postul de radio " Europa Libera " !
deducem astfel ca , fara emisiunile celebrului post de radio , Militia si Securitatea ar fi fost doua forte nevinovate , progresiste , straine de orice abuz , democratice , concentrate doar pe apararea ordinii in societate si pe actiuni de contraspionaj economic .
Ilie Merce spune ca ofiterii de Securitate erau principalii rostitori de bancuri politice ( mai ales la serviciu ) , ca turnatoriile nu erau luate in seama decit atunci cind era vorba despre oamenii care se aflau in atentia vreunui serviciu de spionaj .
in rest , " marele rol al Securitatii a fost ca a urmarit toate contractele importante incheiate cu firme din alte tari " .
si de ce ?
" de ce sa platim asistenta tehnica pe un an , doi sau trei , daca obtineam datele prin mijloace specifice ? "
abuzuri ? Exact trei , adica numarul persoanelor care erau in puscarii in decembrie 1989 pentru " infractiuni legate de securitatea nationala " .
fie Ilie Merce a pazit gainile de la una din gospodariile - anexa ale Securitatii , fie face pe prostul .
cum avea grad si a raspuns si de un judet si de Cultura , ne obliga sa luam in calcul a doua varianta .
ba mai mult .
incearca sa acrediteze si varianta eroica , conform careia si Securitatea voia sa scapam de Ceausescu .
n - ati auzit chiar de un ofiter erou , faimos pentru ca ar fi votat impotriva lui Ceausescu , dat afara din PCR ?
inseamna ca habar nu aveti de marile proteste puse la cale de Securitate , de disidenta constanta din rindurile acesteia !
de fapt , zice Ilie Merce , nu stiti nimic .
institutia asta merita titlul de erou al natiei !
noi nu sintem constienti de cit bine ne - a facut si nu ne dam seama ca dusmanii patriei au distrus - o !
or fi fost bune de incriminat serviciile secrete din celelalte tari socialiste , dar al nostru , patriotic si prevenitor , numai asta nu merita !
cu acelasi aer de ingeras care n - a poposit niciodata pe marginea unei troace de porci ( si nici pe linga ea n - a trecut ) , Ilie Merce zice : " Nu discut ce a fost in anii ' 50 - ' 60 ! " .
de ce sa discute , ca doar ororile s - au intimplat in Cipru si in Malta , nu in Romania , iar ofiterii au stat nepasatori in timp ce niste martieni sadici terorizau populatia ?
! si , ca sa nu ne mai amagim , nici disidenti n - am avut !
doar " Paul Goma , pe care l - am creat noi , Securitatea , la ordinul partidului " .
dupa mintea acestui colonel nostalgic , noua nu ni s - a intimplat nimic .
iaca , doi - trei urmariti si o groaza de beneficii economice pentru tara !
de altfel , singurul lucru care - i mai raminea de facut lui Ilie Merce era sa - i puna ziaristului intrebarea : " Domnule reporter , dumneata ai inteles de ce a cazut Ceausescu si de ce nu mai avem socialism ? " .
nu am motive sa acuz pe cineva anume .
nici nu cred ca fiecare ofiter in parte poate fi socotit un om cu bube in cap .
cita vreme nu avem o clarificare temeinica a faptelor ascunse in arhive , nu are cum sa ne mai surprinda o asemenea incercare stupida de justificare si reabilitare a principalului instrument de " prelucrare " al totalitarismului !!
|
# Created on : 29-06-2021
# Course work:
# @author: Harsha Vardhan
# Source:
#Simple Strip Chart
str(airquality)
airquality
stripchart(airquality$Ozone)
|
/harsha1/simple-strip-chart.R
|
no_license
|
tactlabs/r-samples
|
R
| false
| false
| 155
|
r
|
# Created on : 29-06-2021
# Course work:
# @author: Harsha Vardhan
# Source:
#Simple Strip Chart
str(airquality)
airquality
stripchart(airquality$Ozone)
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 11273
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 11273
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query57_query57_1344n.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 3343
c no.of clauses 11273
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 11273
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query57_query57_1344n.qdimacs 3343 11273 E1 [] 0 113 3230 11273 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query57_query57_1344n/query57_query57_1344n.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 720
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 11273
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 11273
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query57_query57_1344n.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 3343
c no.of clauses 11273
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 11273
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query57_query57_1344n.qdimacs 3343 11273 E1 [] 0 113 3230 11273 NONE
|
\name{here}
\alias{here}
\title{
Here Documents
}
\description{
Read lines and convert into appropriate vector or data frame.
}
\usage{
here(s, drop = TRUE, guess.type = TRUE, sep = NULL, header = TRUE,
stringsAsFactors = FALSE, trim = TRUE, \dots)
}
\arguments{
\item{s}{
a string
}
\item{drop}{
logical: drop empty first and last element
}
\item{guess.type}{
logical
}
\item{sep}{
NULL or character
}
\item{header}{
logical
}
\item{stringsAsFactors}{
logical
}
\item{trim}{
logical: trim whitespace?
}
\item{\dots}{
named arguments to be passed to \code{\link[utils]{read.table}}
}
}
\details{
Experimental. (Notably, the function's name may change.)
The function reads a (typically multi-line) string and treats each
line as one element of a vector or, if sep is specified, a
\code{data.frame}.
If \code{sep} is not specified, \code{here} calls
\code{\link[utils]{type.convert}} on the input \code{s}.
If \code{sep} is specified, the input \code{s} is fed to
\code{\link[utils]{read.table}}. Additional arguments may be passed
through \dots.
}
\value{
a vector or, if \code{sep} is specified, a \code{\link{data.frame}}
}
\references{
\url{https://rosettacode.org/wiki/Here_document}
(note that \R supports multi-line strings, so in a way it has
built-in support for here documents as defined on
that website)
}
\author{
Enrico Schumann
}
\seealso{
\code{\link{type.convert}}
}
\examples{
## numbers
here("
1
2
3
4
")
## character
here("
Al
Bob
Carl
David
")
## data frame
here("
letter, number
x, 1
y, 2
z, 3",
sep = ",")
}
|
/man/here.Rd
|
no_license
|
enricoschumann/textutils
|
R
| false
| false
| 1,657
|
rd
|
\name{here}
\alias{here}
\title{
Here Documents
}
\description{
Read lines and convert into appropriate vector or data frame.
}
\usage{
here(s, drop = TRUE, guess.type = TRUE, sep = NULL, header = TRUE,
stringsAsFactors = FALSE, trim = TRUE, \dots)
}
\arguments{
\item{s}{
a string
}
\item{drop}{
logical: drop empty first and last element
}
\item{guess.type}{
logical
}
\item{sep}{
NULL or character
}
\item{header}{
logical
}
\item{stringsAsFactors}{
logical
}
\item{trim}{
logical: trim whitespace?
}
\item{\dots}{
named arguments to be passed to \code{\link[utils]{read.table}}
}
}
\details{
Experimental. (Notably, the function's name may change.)
The function reads a (typically multi-line) string and treats each
line as one element of a vector or, if sep is specified, a
\code{data.frame}.
If \code{sep} is not specified, \code{here} calls
\code{\link[utils]{type.convert}} on the input \code{s}.
If \code{sep} is specified, the input \code{s} is fed to
\code{\link[utils]{read.table}}. Additional arguments may be passed
through \dots.
}
\value{
a vector or, if \code{sep} is specified, a \code{\link{data.frame}}
}
\references{
\url{https://rosettacode.org/wiki/Here_document}
(note that \R supports multi-line strings, so in a way it has
built-in support for here documents as defined on
that website)
}
\author{
Enrico Schumann
}
\seealso{
\code{\link{type.convert}}
}
\examples{
## numbers
here("
1
2
3
4
")
## character
here("
Al
Bob
Carl
David
")
## data frame
here("
letter, number
x, 1
y, 2
z, 3",
sep = ",")
}
|
library(tidyverse)
library(haven)
library(ggplot2)
china_data <- read_dta(file = "fes_china.dta")
# Add in log variables for later use
china_data_logged = china_data %>%
mutate(ln_share_food = log(food/totalexpenditures),
ln_share_clothing = log(clothing/totalexpenditures),
ln_share_housing = log(houserent/totalexpenditures),
ln_exp = log(totalexpenditures),
ln_fam_size = log(totalfamilymembers))
china_data_add = china_data %>%
mutate(ln_food = log(food),
ln_clothing = log(clothing),
ln_housing = log(houserent),
ln_exp = log(totalexpenditures),
ln_fam_size = log(totalfamilymembers))
# Seperate chinese data into 4 subsets
shanghai = subset(china_data_add, factory==0 | factory==1 | factory==2)
peiping = subset(china_data_add, factory==3)
### Find Elasticities
## shanghai
# food
lm_shanghai_food = lm(ln_food ~ ln_exp+ln_fam_size,shanghai)
plot(ln_food ~ ln_exp, main = "Shanghai Food", data=shanghai)
abline(lm_shanghai_food, col="blue")
summary(lm_shanghai_food)
# clothing
lm_shanghai_clothing = lm(ln_clothing ~ ln_exp+ln_fam_size,shanghai)
plot(ln_clothing ~ ln_exp, main = "Shanghai Clothing", data=shanghai)
abline(lm_shanghai_clothing, col="blue")
summary(lm_shanghai_clothing)
# housing
lm_shanghai_housing = lm(ln_housing ~ ln_exp+ln_fam_size,shanghai)
plot(ln_housing ~ ln_exp, main = "Shanghai Housing", data=shanghai)
abline(lm_shanghai_housing, col="blue")
summary(lm_shanghai_housing)
## peiping
# food
lm_peiping_food = lm(ln_food ~ ln_exp,peiping)
plot(ln_food ~ ln_exp, main = "Peiping Food", data=peiping)
abline(lm_peiping_food, col="blue")
summary(lm_peiping_food)
# clothing
lm_peiping_clothing = lm(ln_clothing ~ ln_exp,peiping)
plot(ln_clothing ~ ln_exp, main = "Peiping Clothing", data=peiping)
abline(lm_peiping_clothing, col="blue")
summary(lm_peiping_clothing)
# housing
lm_peiping_housing = lm(ln_housing ~ ln_exp,peiping)
plot(ln_housing ~ ln_exp, main = "Peiping Housing", data=peiping)
abline(lm_peiping_housing, col="blue")
summary(lm_peiping_housing)
|
/Homework_1/Homework_1_Script.R
|
no_license
|
lathamri/200A_Homework
|
R
| false
| false
| 2,086
|
r
|
library(tidyverse)
library(haven)
library(ggplot2)
china_data <- read_dta(file = "fes_china.dta")
# Add in log variables for later use
china_data_logged = china_data %>%
mutate(ln_share_food = log(food/totalexpenditures),
ln_share_clothing = log(clothing/totalexpenditures),
ln_share_housing = log(houserent/totalexpenditures),
ln_exp = log(totalexpenditures),
ln_fam_size = log(totalfamilymembers))
china_data_add = china_data %>%
mutate(ln_food = log(food),
ln_clothing = log(clothing),
ln_housing = log(houserent),
ln_exp = log(totalexpenditures),
ln_fam_size = log(totalfamilymembers))
# Seperate chinese data into 4 subsets
shanghai = subset(china_data_add, factory==0 | factory==1 | factory==2)
peiping = subset(china_data_add, factory==3)
### Find Elasticities
## shanghai
# food
lm_shanghai_food = lm(ln_food ~ ln_exp+ln_fam_size,shanghai)
plot(ln_food ~ ln_exp, main = "Shanghai Food", data=shanghai)
abline(lm_shanghai_food, col="blue")
summary(lm_shanghai_food)
# clothing
lm_shanghai_clothing = lm(ln_clothing ~ ln_exp+ln_fam_size,shanghai)
plot(ln_clothing ~ ln_exp, main = "Shanghai Clothing", data=shanghai)
abline(lm_shanghai_clothing, col="blue")
summary(lm_shanghai_clothing)
# housing
lm_shanghai_housing = lm(ln_housing ~ ln_exp+ln_fam_size,shanghai)
plot(ln_housing ~ ln_exp, main = "Shanghai Housing", data=shanghai)
abline(lm_shanghai_housing, col="blue")
summary(lm_shanghai_housing)
## peiping
# food
lm_peiping_food = lm(ln_food ~ ln_exp,peiping)
plot(ln_food ~ ln_exp, main = "Peiping Food", data=peiping)
abline(lm_peiping_food, col="blue")
summary(lm_peiping_food)
# clothing
lm_peiping_clothing = lm(ln_clothing ~ ln_exp,peiping)
plot(ln_clothing ~ ln_exp, main = "Peiping Clothing", data=peiping)
abline(lm_peiping_clothing, col="blue")
summary(lm_peiping_clothing)
# housing
lm_peiping_housing = lm(ln_housing ~ ln_exp,peiping)
plot(ln_housing ~ ln_exp, main = "Peiping Housing", data=peiping)
abline(lm_peiping_housing, col="blue")
summary(lm_peiping_housing)
|
# Q5(a)
library(ISLR)
library(MASS)
RNGkind(sample.kind = "Rounding") #To correct the RNG of different R versions
set.seed(1)
attach(Default)
fit.glm1.0 <- glm(default~income+balance,
data = Default, family = binomial)
summary(fit.glm1.0)$coef #both predictors are statistically significant
# Q5(b)
## Sample splitting
set.seed(1)
smpl.size <- floor(0.5*nrow(Default))
train <- sample(seq_len(nrow(Default)), size = smpl.size)
def.train <- Default[train, ]
def.test <- Default[-train, ]
def.c<-default[-train]
# dim(Default)
# dim(def.train)
# dim(def.test)
## Fitting Multiple Logistic Regression
fit.glm1.1<-glm(default~income+balance,
data = Default, family = binomial, subset = train)
prob.glm<-predict(fit.glm1.1, def.test, type = "response")
pred.glm<-rep("No", 5000)
pred.glm[prob.glm>.5]<-"Yes"
table(pred.glm,def.c)
mean(pred.glm==def.c) #model accuracy is 97.1%
# At the granular level, the confusion matrix suggests that the actual "default" miscalculation
# is approx. 68.9%! such poor precision level may be unacceptable to a credit card company.
## Validation set error
mean(pred.glm!= def.c) #The test (validation) error is 0.0286
# Q5(c)
set.seed(2)
smpl.size <- floor(0.5*nrow(Default))
train <- sample(seq_len(nrow(Default)), size = smpl.size)
def.train <- Default[train, ]
def.test <- Default[-train, ]
def.c<-default[-train]
fit.glm1.1<-glm(default~income+balance,
data = Default, family = binomial, subset = train)
prob.glm<-predict(fit.glm1.1, def.test, type = "response")
pred.glm<-rep("No", 5000)
pred.glm[prob.glm>.5]<-"Yes"
table(pred.glm,def.c)
mean(pred.glm!= def.c) #The test (validation) error is 0.0276
set.seed(3)
smpl.size <- floor(0.5*nrow(Default))
train <- sample(seq_len(nrow(Default)), size = smpl.size)
def.train <- Default[train, ]
def.test <- Default[-train, ]
def.c<-default[-train]
fit.glm1.1<-glm(default~income+balance,
data = Default, family = binomial, subset = train)
prob.glm<-predict(fit.glm1.1, def.test, type = "response")
pred.glm<-rep("No", 5000)
pred.glm[prob.glm>.5]<-"Yes"
table(pred.glm,def.c)
mean(pred.glm!= def.c) #The test (validation) error is 0.0248
set.seed(4)
smpl.size <- floor(0.5*nrow(Default))
train <- sample(seq_len(nrow(Default)), size = smpl.size)
def.train <- Default[train, ]
def.test <- Default[-train, ]
def.c<-default[-train]
fit.glm1.1<-glm(default~income+balance,
data = Default, family = binomial, subset = train)
prob.glm<-predict(fit.glm1.1, def.test, type = "response")
pred.glm<-rep("No", 5000)
pred.glm[prob.glm>.5]<-"Yes"
table(pred.glm,def.c)
mean(pred.glm!= def.c) #The test (validation) error is 0.0262
# The test error was different for the three different sample splits.
# Q5(d)
set.seed(1)
smpl.size <- floor(0.5*nrow(Default))
train <- sample(seq_len(nrow(Default)), size = smpl.size)
def.train <- Default[train, ]
def.test <- Default[-train, ]
def.c<-default[-train]
fit.glm1.2<-glm(default~income+balance+student,
data = Default, family = binomial, subset = train)
prob.glm<-predict(fit.glm1.2, def.test, type = "response")
pred.glm <- ifelse(prob.glm > 0.5, "Yes", "No") #smooth right? lol
table(pred.glm,def.c)
mean(pred.glm!= def.c)#The test (validation) error is 0.0288
# Inluding a dummy variable for "student" resulted in an increase in test error by 0.002, which is practically insignificant.
# Hence, it can be deduced that the inclusion of the dummy variable in the logistic regression model is redundant.
# Q6(a)
set.seed(1)
smpl.size <- floor(0.5*nrow(Default))
train <- sample(seq_len(nrow(Default)), size = smpl.size)
def.train <- Default[train, ]
def.test <- Default[-train, ]
def.c<-default[-train]
fit.glm1.1<-glm(default~income+balance,
data = Default, family = binomial, subset = train)
summary(fit.glm1.1)
# Q6(b)
boot.fn <- function(data,index){
fit.glm1.3 <- glm(default~income+balance, data=data[index, ], family = binomial)
return(coef(fit.glm1.3))
}
boot.fn(Default, 1:10000)
# Q6(c)
library(boot)
set.seed(1)
boot(Default,boot.fn,1000)
# Q6(d)
# The disparity in the estimated standard errors of the logistic regression and bootstrap,
# may be attributed to the inadequacy of the fitted model in the logistic regression.
# Further, the bootstrap approach does not assume that the variablility only comes from the irreducible error, as compared to the glm
# Question 8 - Cross-Validation
# Q8(a) - Generate a simulated data set.
set.seed(1)
y<-rnorm(100)
x<-rnorm(100)
y<-x-2*x^2+rnorm(100)
# n = y = 100; p = x = 100
# \begin{align*}
# y = x - 2x^2 + \epsilon\\
# \end{align*}
## Q8(b)
data<-data.frame(y,x)
library(ggplot2)
ggplot(data, aes(x=x, y=y))+
geom_point(color = "darkred", size = 2)+
ggtitle("Graph of Y vs X")+
theme(plot.title = element_text(hjust = 0.5))
# The graph obtained looks like an inverse quadratic graph. This suggests that the relationship between x and y is non-linear.
## Q8(c)
set.seed(1)
library(boot)
cv.error=rep(0,4)
for (j in 1:4){
glm.fit<-glm(y~poly(x, j), data = data)
cv.error[j]<-cv.glm(data,glm.fit)$delta[1]
}
cv.error
## Q8(d)
set.seed(2)
library(boot)
cv.error=rep(0,4)
for (j in 1:4){
glm.fit<-glm(y~poly(x, j), data = data)
cv.error[j]<-cv.glm(data,glm.fit)$delta[1]
}
cv.error
# The LOOCV errors are the same. This is expected since there is no randomness in the tranining/validation data set splits.
## Q8(e)
# The quadratic model had the lowest LOOCV error.
# This is expected since y is a polynomial of the second order (i.e quadratic) that is dependent on x.
## Q8(f)
set.seed(1)
for (j in 1:4){
print(summary(glm(y~poly(x, j), data = data)))
}
# The quadratic term in the quadratic, cubic, and quartic model is statistically significant, while the rest are not.
# This agrees with the conclusion drawn from the CV analysis that suggests that the quadratic model outperforms the rest.
# Question 9
## Q9(a)
attach(Boston)
mu<-mean(medv)
mu #mu = 22.53
## Q9(b) - Standard Error
se<-sd(medv)/sqrt(length(medv))
se #standard error is 0.409
## Q9(c)
set.seed(1)
boot(medv,function(x,index){mean(x[index])},R<-1000)
# The standard error is almost the same as the estimated standard error in (b)
## Q9(d)
# set.seed(1)
# boot.ci(boot(medv,function(x,index){mean(x[index])},R<-1000), type = "bca")
# boot.ci(boot.out = boot(medv,function(x,index){mean(x[index])},R<-1000), conf = .95)
t.test(Boston$medv)
CI.mu.hat<-c(mu-2*se,mu+2*se)
CI.mu.hat
# The confidence interval obtained using the central limit theorem approach and t-test approach are almost the same.
## Q9(e)
mu.med<-median(medv)
mu.med
## Q9(f)
set.seed(1)
boot(medv, function(x,index){median(x[index])},1000)
# The standard error of the median is approx. 0.378.
## Q9(g)
mu0.1<-quantile(medv, .1)
mu0.1
## Q9(h)
set.seed(1)
boot(medv, function(x,index){quantile(medv[index],.1)},1000)
#The estimated standard error of the 10th percentile of medv in Boston suburbs is 0.477.
|
/Homework_4.R
|
no_license
|
Hakeem-7/Class_Project_4-Stat6000
|
R
| false
| false
| 6,986
|
r
|
# Q5(a)
library(ISLR)
library(MASS)
RNGkind(sample.kind = "Rounding") #To correct the RNG of different R versions
set.seed(1)
attach(Default)
fit.glm1.0 <- glm(default~income+balance,
data = Default, family = binomial)
summary(fit.glm1.0)$coef #both predictors are statistically significant
# Q5(b)
## Sample splitting
set.seed(1)
smpl.size <- floor(0.5*nrow(Default))
train <- sample(seq_len(nrow(Default)), size = smpl.size)
def.train <- Default[train, ]
def.test <- Default[-train, ]
def.c<-default[-train]
# dim(Default)
# dim(def.train)
# dim(def.test)
## Fitting Multiple Logistic Regression
fit.glm1.1<-glm(default~income+balance,
data = Default, family = binomial, subset = train)
prob.glm<-predict(fit.glm1.1, def.test, type = "response")
pred.glm<-rep("No", 5000)
pred.glm[prob.glm>.5]<-"Yes"
table(pred.glm,def.c)
mean(pred.glm==def.c) #model accuracy is 97.1%
# At the granular level, the confusion matrix suggests that the actual "default" miscalculation
# is approx. 68.9%! such poor precision level may be unacceptable to a credit card company.
## Validation set error
mean(pred.glm!= def.c) #The test (validation) error is 0.0286
# Q5(c)
set.seed(2)
smpl.size <- floor(0.5*nrow(Default))
train <- sample(seq_len(nrow(Default)), size = smpl.size)
def.train <- Default[train, ]
def.test <- Default[-train, ]
def.c<-default[-train]
fit.glm1.1<-glm(default~income+balance,
data = Default, family = binomial, subset = train)
prob.glm<-predict(fit.glm1.1, def.test, type = "response")
pred.glm<-rep("No", 5000)
pred.glm[prob.glm>.5]<-"Yes"
table(pred.glm,def.c)
mean(pred.glm!= def.c) #The test (validation) error is 0.0276
set.seed(3)
smpl.size <- floor(0.5*nrow(Default))
train <- sample(seq_len(nrow(Default)), size = smpl.size)
def.train <- Default[train, ]
def.test <- Default[-train, ]
def.c<-default[-train]
fit.glm1.1<-glm(default~income+balance,
data = Default, family = binomial, subset = train)
prob.glm<-predict(fit.glm1.1, def.test, type = "response")
pred.glm<-rep("No", 5000)
pred.glm[prob.glm>.5]<-"Yes"
table(pred.glm,def.c)
mean(pred.glm!= def.c) #The test (validation) error is 0.0248
set.seed(4)
smpl.size <- floor(0.5*nrow(Default))
train <- sample(seq_len(nrow(Default)), size = smpl.size)
def.train <- Default[train, ]
def.test <- Default[-train, ]
def.c<-default[-train]
fit.glm1.1<-glm(default~income+balance,
data = Default, family = binomial, subset = train)
prob.glm<-predict(fit.glm1.1, def.test, type = "response")
pred.glm<-rep("No", 5000)
pred.glm[prob.glm>.5]<-"Yes"
table(pred.glm,def.c)
mean(pred.glm!= def.c) #The test (validation) error is 0.0262
# The test error was different for the three different sample splits.
# Q5(d)
set.seed(1)
smpl.size <- floor(0.5*nrow(Default))
train <- sample(seq_len(nrow(Default)), size = smpl.size)
def.train <- Default[train, ]
def.test <- Default[-train, ]
def.c<-default[-train]
fit.glm1.2<-glm(default~income+balance+student,
data = Default, family = binomial, subset = train)
prob.glm<-predict(fit.glm1.2, def.test, type = "response")
pred.glm <- ifelse(prob.glm > 0.5, "Yes", "No") #smooth right? lol
table(pred.glm,def.c)
mean(pred.glm!= def.c)#The test (validation) error is 0.0288
# Inluding a dummy variable for "student" resulted in an increase in test error by 0.002, which is practically insignificant.
# Hence, it can be deduced that the inclusion of the dummy variable in the logistic regression model is redundant.
# Q6(a)
set.seed(1)
smpl.size <- floor(0.5*nrow(Default))
train <- sample(seq_len(nrow(Default)), size = smpl.size)
def.train <- Default[train, ]
def.test <- Default[-train, ]
def.c<-default[-train]
fit.glm1.1<-glm(default~income+balance,
data = Default, family = binomial, subset = train)
summary(fit.glm1.1)
# Q6(b)
boot.fn <- function(data,index){
fit.glm1.3 <- glm(default~income+balance, data=data[index, ], family = binomial)
return(coef(fit.glm1.3))
}
boot.fn(Default, 1:10000)
# Q6(c)
library(boot)
set.seed(1)
boot(Default,boot.fn,1000)
# Q6(d)
# The disparity in the estimated standard errors of the logistic regression and bootstrap,
# may be attributed to the inadequacy of the fitted model in the logistic regression.
# Further, the bootstrap approach does not assume that the variablility only comes from the irreducible error, as compared to the glm
# Question 8 - Cross-Validation
# Q8(a) - Generate a simulated data set.
set.seed(1)
y<-rnorm(100)
x<-rnorm(100)
y<-x-2*x^2+rnorm(100)
# n = y = 100; p = x = 100
# \begin{align*}
# y = x - 2x^2 + \epsilon\\
# \end{align*}
## Q8(b)
data<-data.frame(y,x)
library(ggplot2)
ggplot(data, aes(x=x, y=y))+
geom_point(color = "darkred", size = 2)+
ggtitle("Graph of Y vs X")+
theme(plot.title = element_text(hjust = 0.5))
# The graph obtained looks like an inverse quadratic graph. This suggests that the relationship between x and y is non-linear.
## Q8(c)
set.seed(1)
library(boot)
cv.error=rep(0,4)
for (j in 1:4){
glm.fit<-glm(y~poly(x, j), data = data)
cv.error[j]<-cv.glm(data,glm.fit)$delta[1]
}
cv.error
## Q8(d)
set.seed(2)
library(boot)
cv.error=rep(0,4)
for (j in 1:4){
glm.fit<-glm(y~poly(x, j), data = data)
cv.error[j]<-cv.glm(data,glm.fit)$delta[1]
}
cv.error
# The LOOCV errors are the same. This is expected since there is no randomness in the tranining/validation data set splits.
## Q8(e)
# The quadratic model had the lowest LOOCV error.
# This is expected since y is a polynomial of the second order (i.e quadratic) that is dependent on x.
## Q8(f)
set.seed(1)
for (j in 1:4){
print(summary(glm(y~poly(x, j), data = data)))
}
# The quadratic term in the quadratic, cubic, and quartic model is statistically significant, while the rest are not.
# This agrees with the conclusion drawn from the CV analysis that suggests that the quadratic model outperforms the rest.
# Question 9
## Q9(a)
attach(Boston)
mu<-mean(medv)
mu #mu = 22.53
## Q9(b) - Standard Error
se<-sd(medv)/sqrt(length(medv))
se #standard error is 0.409
## Q9(c)
set.seed(1)
boot(medv,function(x,index){mean(x[index])},R<-1000)
# The standard error is almost the same as the estimated standard error in (b)
## Q9(d)
# set.seed(1)
# boot.ci(boot(medv,function(x,index){mean(x[index])},R<-1000), type = "bca")
# boot.ci(boot.out = boot(medv,function(x,index){mean(x[index])},R<-1000), conf = .95)
t.test(Boston$medv)
CI.mu.hat<-c(mu-2*se,mu+2*se)
CI.mu.hat
# The confidence interval obtained using the central limit theorem approach and t-test approach are almost the same.
## Q9(e)
mu.med<-median(medv)
mu.med
## Q9(f)
set.seed(1)
boot(medv, function(x,index){median(x[index])},1000)
# The standard error of the median is approx. 0.378.
## Q9(g)
mu0.1<-quantile(medv, .1)
mu0.1
## Q9(h)
set.seed(1)
boot(medv, function(x,index){quantile(medv[index],.1)},1000)
#The estimated standard error of the 10th percentile of medv in Boston suburbs is 0.477.
|
#######################################
## author: Rob Williams ##
## contact: jayrobwilliams@gmail.com ##
## project: dissertation ##
## created: December 11, 2017 ##
## updated: December 30, 2018 ##
#######################################
## this script extracts the spatial components of the territorial governability
## measure from each ethnic group-year polygon, including population, nightlights,
## travel times, and various statistics calculated based on them
## print script to identify in log
print(paste('Group Variable Creation Started', Sys.time()))
## load packages
library(sf) # new unified spatial package
library(sp) # basic spatial data handling
library(raster) # pixel based data
library(rgdal) # spatial data I/O
library(rgeos) # spatial topology operations
library(tidyverse)
library(spdplyr)
library(stringr)
library(data.table)
library(ineq) # Gini coefficient
source(here::here('Code/sfFunctions.R'))
source(here::here('Code/cshapes Recode.R'))
library(doParallel)
## get number of cores from SLURM submission script
registerDoParallel(as.numeric(Sys.getenv('SLURM_CPUS_PER_TASK')))
## create directory to hold output
dir.create(here::here('Input Data'), showWarnings = F)
## read in data ####
## read in cshapes
cshapes <- st_read(here::here('Datasets/cshapes/cshapes_0.6'), 'cshapes')
## read in GeoEPR
GeoEPR <- st_read(here::here('Datasets/EPR'), 'GeoEPR-2014 Cleaned')
## read in EPR
EPR <- read.csv(here::here('Datasets/EPR/EPR-2014.csv'))
## read in oil data
oil <- st_read(here::here('Datasets/PRIO/PETRODATA/PETRODATA V1.2'), 'onshore_cleaned')
## read in population rasters
population_cnt <- stack(list.files(here::here('Datasets/Population',
'Count Corrected'),
'.tif', full.names = T))
## read in nightlights rasters
nightlights <- stack(list.files(here::here('Datasets/Nightlights/Corrected'),
'.tif', full.names = T))
## spatial pre-processing ####
## recode start and end dates based on 6 months of a year rule
cshapes <- cshapes.rc(cshapes)
## assign WGS84 CRS to GeoEPR
GeoEPR <- st_transform(GeoEPR, st_crs(cshapes))
## create object of capitals
capitals <- st_as_sf(st_drop_geometry(cshapes), coords = c('CAPLONG', 'CAPLAT'),
crs = st_crs(cshapes), agr = 'constant')
## recode from <= 1990 to 1990 and >= 2013 to 2013
state_data <- cshapes %>%
mutate(GWSYEAR = ifelse(GWSYEAR <= data_start, data_start, GWSYEAR),
GWEYEAR = ifelse(GWEYEAR >= data_end, data_end, GWEYEAR))
## expand cshapes dataframe to yearly observations
state_data <- data.frame(setDT(state_data)[, list(GWCODE = GWCODE,
year = seq(GWSYEAR, GWEYEAR, by = 1)),
by = 1:nrow(state_data)][, -1])
## drop oil fields w/o confirmed discovery date
oil <- oil %>% filter(DISC != -9999)
## data preprocessing ####
## temporal range
data_start <- 1990
data_end <- 2013
## convert group status to ordered factor in EPR; report coding in text
EPR$status <- factor(EPR$status, ordered = T, levels = c('IRRELEVANT',
'STATE COLLAPSE',
'DISCRIMINATED',
'POWERLESS',
'SELF-EXCLUSION',
'JUNIOR PARTNER',
'SENIOR PARTNER',
'DOMINANT',
'MONOPOLY'))
## subset EPR to groups that end after start of sample; recode from <= 1990 to 1990
EPR_df <- EPR %>%
filter(to >= data_start) %>%
mutate(from = ifelse(from <= data_start, data_start, from))
## expand EPR to yearly observations
EPR_df <- data.frame(setDT(EPR_df)[, list(gwid = gwid, groupid = groupid,
gwgroupid = gwgroupid,
year = as.numeric(seq(from, to, by = 1)),
size = size, status = status,
reg_aut = reg_aut, umbrella = umbrella),
by = 1:nrow(EPR_df)][, -1])
## subset GeoEPR to polygons that end after start of sample
GeoEPR <- GeoEPR %>% filter(to >= data_start)
## extract dataframe and recode from <= 1990 to 1990
GeoEPR_df <- GeoEPR %>% mutate(from = ifelse(from <= data_start, data_start, from))
## expand GeoEPR dataframe to yearly observations
GeoEPR_df <- data.frame(setDT(GeoEPR_df)[, list(gwid = gwid,
groupid = groupid,
gwgroupid = gwgroupid,
year = seq(from, to, by = 1),
state = statename, group = group),
by = 1:nrow(GeoEPR_df)][, -1])
## GeoEPR and EPR disagree on when some groups are politically relevant e.g.
## GeoEPR says that the northern groups in sierra leone are politically relevant
## in 1996-2007, while EPR says they are politically relevant in 1997-2007. I
## defer to EPR since it is the main data source. other issues arise when GeoEPR
## has a polygon for a group that moves in and out of relevance over time, but
## that relevance is entirely outside the sample period of 1990-2013 e.g. Hindus
## in Mauritius have a polygon from 1969-2013, but their last year of political
## relevance is 1987, so they get NA for all of the EPR variables (status, size,
## etc). the last example in this sample are the Shona in Zimbabwe, which do not
## become politically relevant until 1992.
GeoEPR_df <- GeoEPR_df %>%
left_join(EPR_df) %>%
tidyr::replace_na(list(status = 'IRRELEVANT'))
## recode croatia to start in 1992 to match cshapes coding using international
## recognition by EEC and UN, instead of EPR coding using independence referendum
## in 1991
GeoEPR_df <- GeoEPR_df %>% filter(!(state == 'Croatia' & year == 1991))
## same but for Slovenia
GeoEPR_df <- GeoEPR_df %>% filter(!(state == 'Slovenia' & year == 1991))
## same but for Russia; USSR ends on 12/20/91
GeoEPR_df <- GeoEPR_df %>% filter(!(state == 'Russia' & year == 1991))
## drop observations after 2013 b/c nightlights still end then
GeoEPR_df <- GeoEPR_df %>% filter(year <= 2013)
## onset related variables
group_data <- foreach(i = 1:nrow(GeoEPR_df), # replace w/ nrow(GeoEPR_df) after figuring out
.packages = c('sf', 'sp', 'raster', 'rgeos', 'ineq', 'dplyr',
'data.table'),
.combine = rbind, .errorhandling = 'remove') %dopar% {
## get group-year i
group <- GeoEPR_df[i, ]
## get state-year for group-year i
state <- state_data[state_data$GWCODE == group$gwid & state_data$year == group$year, ]
## get polygons for group-year i's state
state_poly <- cshapes[cshapes$GWCODE == state$GWCODE &
state$year %between% st_drop_geometry(cshapes[, c('GWSYEAR', 'GWEYEAR')]), ]
## get point for group-year i's capital
capital <- capitals[capitals$GWCODE == group$gwid &
group$year %between% st_drop_geometry(capitals[, c('GWSYEAR', 'GWEYEAR')]), ]
## get polygon for group-year i
terr <- GeoEPR[GeoEPR$gwgroupid == group$gwgroupid &
group$year %between% st_drop_geometry(GeoEPR[, c('from', 'to')]), ]
## print ID message; maybe temporary?
print(paste('Coding group variables', 'for', terr$group, 'in',
terr$statename, 'row', i))
## get number of polygons group is spread across (includes holes, I think...)
polygons_terr <- length(unique(st_coordinates(terr)[,4]))
## subset GeoEPR to group polygons in existence for group-year i and get
## adjacent and overlapping group polygons
GeoEPR_terr <- GeoEPR[group$year %between% st_drop_geometry(GeoEPR[, c('from', 'to')]), ][terr, ]
## drop group i's polygon
GeoEPR_terr <- GeoEPR_terr[GeoEPR_terr$gwgroupid != terr$gwgroupid, ]
## drop polygons not in group i's state
GeoEPR_terr <- GeoEPR_terr[GeoEPR_terr$statename == terr$statename, ]
## get population for group-year i; 1989 b/c 1 indexing
pop_cnt_terr <- crop(population_cnt[[group$year - 1989]], terr)
## get nightlights for group-year i; use 1992 data for 1990 and 1991; not ideal but
## still better than just using nightlights for only one year; 1991 b/c 1 indexing
nl_terr <- crop(nightlights[[max(group$year - 1991, 1)]], terr)
## mask population and nightlights for inequality calculation
pop_cnt_terr <- mask(pop_cnt_terr, terr)
nl_terr <- mask(nl_terr, terr)
## calculate total population
pop_terr_tot <- cellStats(pop_cnt_terr, 'sum')
## calculate population inequality; recode NaN to 0 b/c perfect equality
pop_terr_gini <- Gini(pop_cnt_terr@data@values)
pop_terr_gini <- ifelse(is.nan(pop_terr_gini), 0, pop_terr_gini)
## calculate mean nightlights
nl_terr_mean <- cellStats(nl_terr, 'mean')
## calculate median nightlights
nl_terr_med <- median(nl_terr@data@values, na.rm = T)
## calculate total nightlights
nl_terr_tot <- cellStats(nl_terr, 'sum')
## if no nightlights for group-year i, recode to 1 to preserve inequality measure
nl_terr_tot <- ifelse(nl_terr_tot == 0, 1, nl_terr_tot)
## subset oil fields to those discovered before group-year
oil_terr <- oil %>% filter(DISC <= group$year)
## check for presence of oil in territory
oil_terr <- max(st_intersects(terr, oil_terr, sparse = F))
## project territory and rasters
terr <- projectUTM(terr)
state_poly <- st_transform(state_poly, st_crs(terr))
capital <- st_transform(capital, st_crs(terr))
pop_cnt_terr <- projectRaster(pop_cnt_terr, crs = CRS(st_crs(terr)$proj4string))
nl_terr <- projectRaster(nl_terr, crs = CRS(st_crs(terr)$proj4string))
GeoEPR_terr <- st_transform(GeoEPR_terr, st_crs(terr))
## redraw polygons w/ GEOS, fixing topology errors
terr <- st_simplify(terr, preserveTopology = T, dTolerance = 0)
GeoEPR_terr <- st_simplify(GeoEPR_terr, preserveTopology = T, dTolerance = 0)
state_poly <- st_simplify(state_poly, preserveTopology = T, dTolerance = 0)
terr <- st_buffer(terr, dist = .001)
GeoEPR_terr <- st_buffer(GeoEPR_terr, dist = .001)
state_poly <- st_buffer(state_poly, dist = .001)
## check whether territory abuts an international border by checking whether
## it is fully covered by the state's polygon buffered 1km inward
border <- !st_within(terr, st_buffer(state_poly, -1e3), sparse = F)[[1]]
## calculate area of group territory in km^2
area_terr <- as.numeric(st_area(terr) / 1e6)
## calculate distance from territory centroid to capital in km
cap_dist <- as.numeric(st_distance(st_centroid(terr), capital) / 1e3)
## code whether group's status has been downgraded in the previous year
downgraded <- group$status < (EPR[EPR$gwgroupid == group$gwgroupid, ]
[(group$year - 1) %between%
EPR[EPR$gwgroupid == group$gwgroupid,
c("from", "to")], "status"])
## if state does not exist in previous year, set downgraded to 0 b/c new
## political context (replace w/ 2 and spot code after running script later)
if (length(downgraded) == 0) downgraded <- FALSE
## code political exclusion for group-year i
excluded <- as.numeric(group$status) <= 4
## get all ethnic group-years in state-year
groups <- GeoEPR_df[GeoEPR_df$gwid == group$gwid & GeoEPR_df$year == group$year, ]
if (all(group$gwgroupid == groups[groups$status == max(groups$status), 'gwgroupid'])) {
## group-year i is dominant group, so its territory overlaps w/ dominant group's
dom_overlap <- 1
} else {
## code whether group-year i's territory overlaps the territory of the dominant group
dom_overlap <- GeoEPR %>%
left_join(groups, by = c('gwid', 'group', 'groupid', 'gwgroupid')) %>%
filter(status == max(groups$status) & gwid %in% groups$gwid) %>%
st_transform(st_crs(terr)) %>%
st_intersects(terr) %>%
as.numeric() %>%
replace_na(0)
}
## return all measures for concatenation by foreach
data.frame(COWcode = state_poly$COWCODE, gwid = group$gwid, groupid = group$groupid,
gwgroupid = group$gwgroupid, year = group$year, state = group$state,
group = group$group, size = group$size, status = group$status,
downgraded = downgraded, excluded = excluded,
pop_tot = pop_terr_tot, pop_gini = pop_terr_gini,
nl = nl_terr_tot, oil_terr, dom_overlap = dom_overlap,
area = area_terr, border = border, cap_dist = cap_dist,
polygons = polygons_terr)
}
## save group data
saveRDS(group_data, here::here('Input Data/group data.RDS'))
## print script to verify successful execution in log
print(paste('Group Variable Creation Completed', Sys.time()))
## quit R
quit(save = 'no')
###################
## End of Script ##
###################
|
/Code/Group Variable Creation.R
|
no_license
|
MACHEIKH/conflict-preemption
|
R
| false
| false
| 13,432
|
r
|
#######################################
## author: Rob Williams ##
## contact: jayrobwilliams@gmail.com ##
## project: dissertation ##
## created: December 11, 2017 ##
## updated: December 30, 2018 ##
#######################################
## this script extracts the spatial components of the territorial governability
## measure from each ethnic group-year polygon, including population, nightlights,
## travel times, and various statistics calculated based on them
## print script to identify in log
print(paste('Group Variable Creation Started', Sys.time()))
## load packages
library(sf) # new unified spatial package
library(sp) # basic spatial data handling
library(raster) # pixel based data
library(rgdal) # spatial data I/O
library(rgeos) # spatial topology operations
library(tidyverse)
library(spdplyr)
library(stringr)
library(data.table)
library(ineq) # Gini coefficient
source(here::here('Code/sfFunctions.R'))
source(here::here('Code/cshapes Recode.R'))
library(doParallel)
## get number of cores from SLURM submission script
registerDoParallel(as.numeric(Sys.getenv('SLURM_CPUS_PER_TASK')))
## create directory to hold output
dir.create(here::here('Input Data'), showWarnings = F)
## read in data ####
## read in cshapes
cshapes <- st_read(here::here('Datasets/cshapes/cshapes_0.6'), 'cshapes')
## read in GeoEPR
GeoEPR <- st_read(here::here('Datasets/EPR'), 'GeoEPR-2014 Cleaned')
## read in EPR
EPR <- read.csv(here::here('Datasets/EPR/EPR-2014.csv'))
## read in oil data
oil <- st_read(here::here('Datasets/PRIO/PETRODATA/PETRODATA V1.2'), 'onshore_cleaned')
## read in population rasters
population_cnt <- stack(list.files(here::here('Datasets/Population',
'Count Corrected'),
'.tif', full.names = T))
## read in nightlights rasters
nightlights <- stack(list.files(here::here('Datasets/Nightlights/Corrected'),
'.tif', full.names = T))
## spatial pre-processing ####
## recode start and end dates based on 6 months of a year rule
cshapes <- cshapes.rc(cshapes)
## assign WGS84 CRS to GeoEPR
GeoEPR <- st_transform(GeoEPR, st_crs(cshapes))
## create object of capitals
capitals <- st_as_sf(st_drop_geometry(cshapes), coords = c('CAPLONG', 'CAPLAT'),
crs = st_crs(cshapes), agr = 'constant')
## recode from <= 1990 to 1990 and >= 2013 to 2013
state_data <- cshapes %>%
mutate(GWSYEAR = ifelse(GWSYEAR <= data_start, data_start, GWSYEAR),
GWEYEAR = ifelse(GWEYEAR >= data_end, data_end, GWEYEAR))
## expand cshapes dataframe to yearly observations
state_data <- data.frame(setDT(state_data)[, list(GWCODE = GWCODE,
year = seq(GWSYEAR, GWEYEAR, by = 1)),
by = 1:nrow(state_data)][, -1])
## drop oil fields w/o confirmed discovery date
oil <- oil %>% filter(DISC != -9999)
## data preprocessing ####
## temporal range
data_start <- 1990
data_end <- 2013
## convert group status to ordered factor in EPR; report coding in text
EPR$status <- factor(EPR$status, ordered = T, levels = c('IRRELEVANT',
'STATE COLLAPSE',
'DISCRIMINATED',
'POWERLESS',
'SELF-EXCLUSION',
'JUNIOR PARTNER',
'SENIOR PARTNER',
'DOMINANT',
'MONOPOLY'))
## subset EPR to groups that end after start of sample; recode from <= 1990 to 1990
EPR_df <- EPR %>%
filter(to >= data_start) %>%
mutate(from = ifelse(from <= data_start, data_start, from))
## expand EPR to yearly observations
EPR_df <- data.frame(setDT(EPR_df)[, list(gwid = gwid, groupid = groupid,
gwgroupid = gwgroupid,
year = as.numeric(seq(from, to, by = 1)),
size = size, status = status,
reg_aut = reg_aut, umbrella = umbrella),
by = 1:nrow(EPR_df)][, -1])
## subset GeoEPR to polygons that end after start of sample
GeoEPR <- GeoEPR %>% filter(to >= data_start)
## extract dataframe and recode from <= 1990 to 1990
GeoEPR_df <- GeoEPR %>% mutate(from = ifelse(from <= data_start, data_start, from))
## expand GeoEPR dataframe to yearly observations
GeoEPR_df <- data.frame(setDT(GeoEPR_df)[, list(gwid = gwid,
groupid = groupid,
gwgroupid = gwgroupid,
year = seq(from, to, by = 1),
state = statename, group = group),
by = 1:nrow(GeoEPR_df)][, -1])
## GeoEPR and EPR disagree on when some groups are politically relevant e.g.
## GeoEPR says that the northern groups in sierra leone are politically relevant
## in 1996-2007, while EPR says they are politically relevant in 1997-2007. I
## defer to EPR since it is the main data source. other issues arise when GeoEPR
## has a polygon for a group that moves in and out of relevance over time, but
## that relevance is entirely outside the sample period of 1990-2013 e.g. Hindus
## in Mauritius have a polygon from 1969-2013, but their last year of political
## relevance is 1987, so they get NA for all of the EPR variables (status, size,
## etc). the last example in this sample are the Shona in Zimbabwe, which do not
## become politically relevant until 1992.
GeoEPR_df <- GeoEPR_df %>%
left_join(EPR_df) %>%
tidyr::replace_na(list(status = 'IRRELEVANT'))
## recode croatia to start in 1992 to match cshapes coding using international
## recognition by EEC and UN, instead of EPR coding using independence referendum
## in 1991
GeoEPR_df <- GeoEPR_df %>% filter(!(state == 'Croatia' & year == 1991))
## same but for Slovenia
GeoEPR_df <- GeoEPR_df %>% filter(!(state == 'Slovenia' & year == 1991))
## same but for Russia; USSR ends on 12/20/91
GeoEPR_df <- GeoEPR_df %>% filter(!(state == 'Russia' & year == 1991))
## drop observations after 2013 b/c nightlights still end then
GeoEPR_df <- GeoEPR_df %>% filter(year <= 2013)
## onset related variables
group_data <- foreach(i = 1:nrow(GeoEPR_df), # replace w/ nrow(GeoEPR_df) after figuring out
.packages = c('sf', 'sp', 'raster', 'rgeos', 'ineq', 'dplyr',
'data.table'),
.combine = rbind, .errorhandling = 'remove') %dopar% {
## get group-year i
group <- GeoEPR_df[i, ]
## get state-year for group-year i
state <- state_data[state_data$GWCODE == group$gwid & state_data$year == group$year, ]
## get polygons for group-year i's state
state_poly <- cshapes[cshapes$GWCODE == state$GWCODE &
state$year %between% st_drop_geometry(cshapes[, c('GWSYEAR', 'GWEYEAR')]), ]
## get point for group-year i's capital
capital <- capitals[capitals$GWCODE == group$gwid &
group$year %between% st_drop_geometry(capitals[, c('GWSYEAR', 'GWEYEAR')]), ]
## get polygon for group-year i
terr <- GeoEPR[GeoEPR$gwgroupid == group$gwgroupid &
group$year %between% st_drop_geometry(GeoEPR[, c('from', 'to')]), ]
## print ID message; maybe temporary?
print(paste('Coding group variables', 'for', terr$group, 'in',
terr$statename, 'row', i))
## get number of polygons group is spread across (includes holes, I think...)
polygons_terr <- length(unique(st_coordinates(terr)[,4]))
## subset GeoEPR to group polygons in existence for group-year i and get
## adjacent and overlapping group polygons
GeoEPR_terr <- GeoEPR[group$year %between% st_drop_geometry(GeoEPR[, c('from', 'to')]), ][terr, ]
## drop group i's polygon
GeoEPR_terr <- GeoEPR_terr[GeoEPR_terr$gwgroupid != terr$gwgroupid, ]
## drop polygons not in group i's state
GeoEPR_terr <- GeoEPR_terr[GeoEPR_terr$statename == terr$statename, ]
## get population for group-year i; 1989 b/c 1 indexing
pop_cnt_terr <- crop(population_cnt[[group$year - 1989]], terr)
## get nightlights for group-year i; use 1992 data for 1990 and 1991; not ideal but
## still better than just using nightlights for only one year; 1991 b/c 1 indexing
nl_terr <- crop(nightlights[[max(group$year - 1991, 1)]], terr)
## mask population and nightlights for inequality calculation
pop_cnt_terr <- mask(pop_cnt_terr, terr)
nl_terr <- mask(nl_terr, terr)
## calculate total population
pop_terr_tot <- cellStats(pop_cnt_terr, 'sum')
## calculate population inequality; recode NaN to 0 b/c perfect equality
pop_terr_gini <- Gini(pop_cnt_terr@data@values)
pop_terr_gini <- ifelse(is.nan(pop_terr_gini), 0, pop_terr_gini)
## calculate mean nightlights
nl_terr_mean <- cellStats(nl_terr, 'mean')
## calculate median nightlights
nl_terr_med <- median(nl_terr@data@values, na.rm = T)
## calculate total nightlights
nl_terr_tot <- cellStats(nl_terr, 'sum')
## if no nightlights for group-year i, recode to 1 to preserve inequality measure
nl_terr_tot <- ifelse(nl_terr_tot == 0, 1, nl_terr_tot)
## subset oil fields to those discovered before group-year
oil_terr <- oil %>% filter(DISC <= group$year)
## check for presence of oil in territory
oil_terr <- max(st_intersects(terr, oil_terr, sparse = F))
## project territory and rasters
terr <- projectUTM(terr)
state_poly <- st_transform(state_poly, st_crs(terr))
capital <- st_transform(capital, st_crs(terr))
pop_cnt_terr <- projectRaster(pop_cnt_terr, crs = CRS(st_crs(terr)$proj4string))
nl_terr <- projectRaster(nl_terr, crs = CRS(st_crs(terr)$proj4string))
GeoEPR_terr <- st_transform(GeoEPR_terr, st_crs(terr))
## redraw polygons w/ GEOS, fixing topology errors
terr <- st_simplify(terr, preserveTopology = T, dTolerance = 0)
GeoEPR_terr <- st_simplify(GeoEPR_terr, preserveTopology = T, dTolerance = 0)
state_poly <- st_simplify(state_poly, preserveTopology = T, dTolerance = 0)
terr <- st_buffer(terr, dist = .001)
GeoEPR_terr <- st_buffer(GeoEPR_terr, dist = .001)
state_poly <- st_buffer(state_poly, dist = .001)
## check whether territory abuts an international border by checking whether
## it is fully covered by the state's polygon buffered 1km inward
border <- !st_within(terr, st_buffer(state_poly, -1e3), sparse = F)[[1]]
## calculate area of group territory in km^2
area_terr <- as.numeric(st_area(terr) / 1e6)
## calculate distance from territory centroid to capital in km
cap_dist <- as.numeric(st_distance(st_centroid(terr), capital) / 1e3)
## code whether group's status has been downgraded in the previous year
downgraded <- group$status < (EPR[EPR$gwgroupid == group$gwgroupid, ]
[(group$year - 1) %between%
EPR[EPR$gwgroupid == group$gwgroupid,
c("from", "to")], "status"])
## if state does not exist in previous year, set downgraded to 0 b/c new
## political context (replace w/ 2 and spot code after running script later)
if (length(downgraded) == 0) downgraded <- FALSE
## code political exclusion for group-year i
excluded <- as.numeric(group$status) <= 4
## get all ethnic group-years in state-year
groups <- GeoEPR_df[GeoEPR_df$gwid == group$gwid & GeoEPR_df$year == group$year, ]
if (all(group$gwgroupid == groups[groups$status == max(groups$status), 'gwgroupid'])) {
## group-year i is dominant group, so its territory overlaps w/ dominant group's
dom_overlap <- 1
} else {
## code whether group-year i's territory overlaps the territory of the dominant group
dom_overlap <- GeoEPR %>%
left_join(groups, by = c('gwid', 'group', 'groupid', 'gwgroupid')) %>%
filter(status == max(groups$status) & gwid %in% groups$gwid) %>%
st_transform(st_crs(terr)) %>%
st_intersects(terr) %>%
as.numeric() %>%
replace_na(0)
}
## return all measures for concatenation by foreach
data.frame(COWcode = state_poly$COWCODE, gwid = group$gwid, groupid = group$groupid,
gwgroupid = group$gwgroupid, year = group$year, state = group$state,
group = group$group, size = group$size, status = group$status,
downgraded = downgraded, excluded = excluded,
pop_tot = pop_terr_tot, pop_gini = pop_terr_gini,
nl = nl_terr_tot, oil_terr, dom_overlap = dom_overlap,
area = area_terr, border = border, cap_dist = cap_dist,
polygons = polygons_terr)
}
## save group data
saveRDS(group_data, here::here('Input Data/group data.RDS'))
## print script to verify successful execution in log
print(paste('Group Variable Creation Completed', Sys.time()))
## quit R
quit(save = 'no')
###################
## End of Script ##
###################
|
#Ram A May 9 2015
#Read the CSV file
DF <- read.csv("./data/household_power_consumption.txt",sep=";",header=TRUE)
#Subset the dates
DF2 <- subset(DF, as.Date(Date, "%d/%m/%Y") == as.Date("2007-02-01") | as.Date(Date, "%d/%m/%Y") == as.Date("2007-02-02"))
#select one column and clean it up and convert
DF3 <- DF2[,"Global_active_power"]
DF3 <- gsub("?", "", DF3) # remove ?
DF3 <- as.numeric(DF3) # turn into numbers
#create the plot in a file device
png(file="plot1.png", width=480, height=480)
hist(DF3, col="red", xlab = "Global Active Power (kilowatts)", main="Global Active Power")
dev.off()
|
/plot1.R
|
no_license
|
thag/ExData_Plotting1
|
R
| false
| false
| 608
|
r
|
#Ram A May 9 2015
#Read the CSV file
DF <- read.csv("./data/household_power_consumption.txt",sep=";",header=TRUE)
#Subset the dates
DF2 <- subset(DF, as.Date(Date, "%d/%m/%Y") == as.Date("2007-02-01") | as.Date(Date, "%d/%m/%Y") == as.Date("2007-02-02"))
#select one column and clean it up and convert
DF3 <- DF2[,"Global_active_power"]
DF3 <- gsub("?", "", DF3) # remove ?
DF3 <- as.numeric(DF3) # turn into numbers
#create the plot in a file device
png(file="plot1.png", width=480, height=480)
hist(DF3, col="red", xlab = "Global Active Power (kilowatts)", main="Global Active Power")
dev.off()
|
rm(list = ls())
library(lubridate)
Datos <- read.table(file = "household_power_consumption.txt" , header = TRUE, sep = ";")
Datos$Fecha <- dmy_hms(paste(Datos$Date, Datos$Time, sep = " "))
SubConjunto <- subset(x = Datos, subset = Datos$Fecha >= dmy_hms("01/02/2007 00:00:00") & Datos$Fecha <= dmy_hms("02/02/2007 23:59:59"))
SubConjunto$Global_active_power <- as.numeric(as.character(SubConjunto$Global_active_power))
SubConjunto$Sub_metering_1 <- as.numeric(as.character(SubConjunto$Sub_metering_1))
SubConjunto$Sub_metering_2 <- as.numeric(as.character(SubConjunto$Sub_metering_2))
SubConjunto$Sub_metering_3 <- as.numeric(as.character(SubConjunto$Sub_metering_3))
SubConjunto$Voltage <- as.numeric(as.character(SubConjunto$Voltage))
SubConjunto$Global_reactive_power <- as.numeric(as.character(SubConjunto$Global_reactive_power))
#Plot 1
png(filename = "./Plot1.png", width = 480, height = 480, bg = "transparent")
hist(x = SubConjunto$Global_active_power, col = "red", xlab = "Global Active Power (Kilowatts)", ylab = "Frecuency", main = "Global Active Power")
dev.off()
|
/Plot1.R
|
no_license
|
jeancm26/ExData_Plotting1
|
R
| false
| false
| 1,091
|
r
|
rm(list = ls())
library(lubridate)
Datos <- read.table(file = "household_power_consumption.txt" , header = TRUE, sep = ";")
Datos$Fecha <- dmy_hms(paste(Datos$Date, Datos$Time, sep = " "))
SubConjunto <- subset(x = Datos, subset = Datos$Fecha >= dmy_hms("01/02/2007 00:00:00") & Datos$Fecha <= dmy_hms("02/02/2007 23:59:59"))
SubConjunto$Global_active_power <- as.numeric(as.character(SubConjunto$Global_active_power))
SubConjunto$Sub_metering_1 <- as.numeric(as.character(SubConjunto$Sub_metering_1))
SubConjunto$Sub_metering_2 <- as.numeric(as.character(SubConjunto$Sub_metering_2))
SubConjunto$Sub_metering_3 <- as.numeric(as.character(SubConjunto$Sub_metering_3))
SubConjunto$Voltage <- as.numeric(as.character(SubConjunto$Voltage))
SubConjunto$Global_reactive_power <- as.numeric(as.character(SubConjunto$Global_reactive_power))
#Plot 1
png(filename = "./Plot1.png", width = 480, height = 480, bg = "transparent")
hist(x = SubConjunto$Global_active_power, col = "red", xlab = "Global Active Power (Kilowatts)", ylab = "Frecuency", main = "Global Active Power")
dev.off()
|
# Course 4: Exploratory Data Analysis #
# Course Project #1 #
## 0 - Download and read files
#01 - set up working directory
setwd("./AlexPersonal/DataScience/Assignments/Course4_ExploratoryDataAnalysis/Courseproj1")
#02 - define dataset
datafile <- "./household_power_consumption.txt"
#03 - read dataset
fulldata <- read.table(datafile, header = TRUE, sep = ";", stringsAsFactors = FALSE, dec = ".")
#04 - cut dataset
subsetdata <- fulldata[fulldata$Date %in% c("1/2/2007", "2/2/2007"),]
## 1 - Plot Sub_metering chart
par(mfrow = (c(2, 2)), mar = c(4, 4, 2, 1))
datetime <- strptime(paste(subsetdata$Date, subsetdata$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
submetering1 <- as.numeric(subsetdata$Sub_metering_1)
submetering2 <- as.numeric(subsetdata$Sub_metering_2)
submetering3 <- as.numeric(subsetdata$Sub_metering_3)
dev.copy(png, file = "plot4.png", width = 480, height = 480)
plot(datetime, as.numeric(subsetdata$Global_active_power), type="l", xlab="", ylab="Global Active Power (kilowatts)")
plot(datetime, as.numeric(subsetdata$Voltage), type="l", xlab="datetime", ylab="Voltage")
plot(datetime, submetering1, type="l", xlab="", ylab="Energy_sub_metering")
lines(datetime, submetering2, col = "red")
lines(datetime, submetering3, col = "blue")
legend("topright", c("submetering1", "submetering2", "submetering3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
plot(datetime, as.numeric(subsetdata$Global_reactive_power), type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
/plot4.R
|
no_license
|
dushuang2/ExData_Plotting1
|
R
| false
| false
| 1,536
|
r
|
# Course 4: Exploratory Data Analysis #
# Course Project #1 #
## 0 - Download and read files
#01 - set up working directory
setwd("./AlexPersonal/DataScience/Assignments/Course4_ExploratoryDataAnalysis/Courseproj1")
#02 - define dataset
datafile <- "./household_power_consumption.txt"
#03 - read dataset
fulldata <- read.table(datafile, header = TRUE, sep = ";", stringsAsFactors = FALSE, dec = ".")
#04 - cut dataset
subsetdata <- fulldata[fulldata$Date %in% c("1/2/2007", "2/2/2007"),]
## 1 - Plot Sub_metering chart
par(mfrow = (c(2, 2)), mar = c(4, 4, 2, 1))
datetime <- strptime(paste(subsetdata$Date, subsetdata$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
submetering1 <- as.numeric(subsetdata$Sub_metering_1)
submetering2 <- as.numeric(subsetdata$Sub_metering_2)
submetering3 <- as.numeric(subsetdata$Sub_metering_3)
dev.copy(png, file = "plot4.png", width = 480, height = 480)
plot(datetime, as.numeric(subsetdata$Global_active_power), type="l", xlab="", ylab="Global Active Power (kilowatts)")
plot(datetime, as.numeric(subsetdata$Voltage), type="l", xlab="datetime", ylab="Voltage")
plot(datetime, submetering1, type="l", xlab="", ylab="Energy_sub_metering")
lines(datetime, submetering2, col = "red")
lines(datetime, submetering3, col = "blue")
legend("topright", c("submetering1", "submetering2", "submetering3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
plot(datetime, as.numeric(subsetdata$Global_reactive_power), type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off()
|
#lag a vector
lagvec=function(lag,vec){
c(rep(vec[1],lag),vec)[1:length(vec)]
}
#fit a gam to evaluate
lagresid_generic=function(lagvec,data){
#data$lagvar=lagvec
r2=gam(resid~s(lagvec,bs="cs",by=factor(occupied)),data=data)
mean(resid(r2)^2)
}
bestlag=function(variable,lagseq,data){
lagvecs=lapply(lagseq,lagvec,data[,variable])
mses=sapply(lagvecs,lagresid_generic,data)
bestlag=which.min(mses)
message(sprintf("Best lag for %s is %d",variable,lagseq[bestlag]))
lagseq[bestlag]
}
#find the best lag for the weather variables
bestlags=function(data){
tolag=c("temp","dewpt","rh","wind","poccupied")
tolag=intersect(tolag,names(data))
lagseq=seq(from=0,to=96,by=4)
r=lm(power~timeofday_fac*weekday_fac,data)
data$resid=data$power-predict(r,newdata=data)
sapply(tolag,bestlag,lagseq,data)
}
#function to actually get the best lags
get_lags=function(data,bestlags){
bestlagvecs=sapply(1:length(bestlags),function(i){lagvec(bestlags[i],data[,names(bestlags)[i]])})
colnames(bestlagvecs)=paste("bestlag",names(bestlags),sep="_")
bestlagvecs
}
|
/R/bestlags.R
|
no_license
|
jeremyrcoyle/PEVerify
|
R
| false
| false
| 1,095
|
r
|
#lag a vector
lagvec=function(lag,vec){
c(rep(vec[1],lag),vec)[1:length(vec)]
}
#fit a gam to evaluate
lagresid_generic=function(lagvec,data){
#data$lagvar=lagvec
r2=gam(resid~s(lagvec,bs="cs",by=factor(occupied)),data=data)
mean(resid(r2)^2)
}
bestlag=function(variable,lagseq,data){
lagvecs=lapply(lagseq,lagvec,data[,variable])
mses=sapply(lagvecs,lagresid_generic,data)
bestlag=which.min(mses)
message(sprintf("Best lag for %s is %d",variable,lagseq[bestlag]))
lagseq[bestlag]
}
#find the best lag for the weather variables
bestlags=function(data){
tolag=c("temp","dewpt","rh","wind","poccupied")
tolag=intersect(tolag,names(data))
lagseq=seq(from=0,to=96,by=4)
r=lm(power~timeofday_fac*weekday_fac,data)
data$resid=data$power-predict(r,newdata=data)
sapply(tolag,bestlag,lagseq,data)
}
#function to actually get the best lags
get_lags=function(data,bestlags){
bestlagvecs=sapply(1:length(bestlags),function(i){lagvec(bestlags[i],data[,names(bestlags)[i]])})
colnames(bestlagvecs)=paste("bestlag",names(bestlags),sep="_")
bestlagvecs
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mortalityTable.joined.R
\docType{class}
\name{mortalityTable.joined-class}
\alias{mortalityTable.joined}
\alias{mortalityTable.joined-class}
\title{Class mortalityTable.joined - Life table created by joining two life tables}
\description{
A cohort life table obtained by joining two cohort life tables, each of which
applies only to certain observation years (e.g. for the past use the observed
PoDs, and project them to the future with the trend projection)
}
\section{Slots}{
\describe{
\item{\code{table1}}{The first \code{mortalityTable}, valid for years given in \code{yearRange1}}
\item{\code{yearRange1}}{The years, for which \code{table1} describes the death probabilities}
\item{\code{table2}}{The second \code{mortalityTable}, valid for years given in \code{yearRange2}}
\item{\code{yearRange2}}{The years, for which \code{table2} describes the death probabilities}
}}
|
/man/mortalityTable.joined-class.Rd
|
no_license
|
Algorios/r-mortality-tables
|
R
| false
| true
| 962
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mortalityTable.joined.R
\docType{class}
\name{mortalityTable.joined-class}
\alias{mortalityTable.joined}
\alias{mortalityTable.joined-class}
\title{Class mortalityTable.joined - Life table created by joining two life tables}
\description{
A cohort life table obtained by joining two cohort life tables, each of which
applies only to certain observation years (e.g. for the past use the observed
PoDs, and project them to the future with the trend projection)
}
\section{Slots}{
\describe{
\item{\code{table1}}{The first \code{mortalityTable}, valid for years given in \code{yearRange1}}
\item{\code{yearRange1}}{The years, for which \code{table1} describes the death probabilities}
\item{\code{table2}}{The second \code{mortalityTable}, valid for years given in \code{yearRange2}}
\item{\code{yearRange2}}{The years, for which \code{table2} describes the death probabilities}
}}
|
#' Peers' registered interests
#'
#' Registered financial interests of members of the House of Lords.
#' If `peer_id=NULL` the actual details of registered interests
#' are stored in a nested data frame.
#'
#' @param peer_id The ID of a member of the House of lords. If `NULL`,
#' returns a tibble with all listed financial interests for all members.
#' Defaults to `NULL`.
#' @inheritParams all_answered_questions
#' @return A tibble with details on the interests of peers in
#' the House of Lords.
#' @export
#' @examples
#' \dontrun{
#' x <- lords_interests(4170)
#'
#' y <- lords_interests()
#' }
lords_interests <- function(peer_id = NULL, extra_args = NULL, tidy = TRUE,
tidy_style = "snake", verbose = TRUE) {
json_query <- ifelse(
is.null(peer_id) == TRUE,
".json?",
paste0(".json?member=", peer_id)
)
baseurl <- paste0(url_util, "lordsregisteredinterests")
if (verbose == TRUE) {
message("Connecting to API")
}
members <- jsonlite::fromJSON(paste0(
baseurl, json_query,
extra_args, "&_pageSize=1"
),
flatten = TRUE
)
jpage <- floor(members$result$totalResults / 100)
query <- paste0(baseurl, json_query, extra_args)
df <- loop_query(query, jpage, verbose) # in utils-loop.R
if (nrow(df) == 0) {
message("The request did not return any data.
Please check your parameters.")
} else {
if (tidy == TRUE) {
if (is.null(peer_id)) {
df <- lords_interests_tidy2(df, tidy_style) ## in utils-lords.R
} else {
df <- lords_interests_tidy(df, tidy_style) ## in utils-lords.R
}
}
df
}
}
#' @export
#' @rdname lords_interests
hansard_lords_interests <- lords_interests
|
/R/lords_interests.R
|
permissive
|
cran/hansard
|
R
| false
| false
| 1,789
|
r
|
#' Peers' registered interests
#'
#' Registered financial interests of members of the House of Lords.
#' If `peer_id=NULL` the actual details of registered interests
#' are stored in a nested data frame.
#'
#' @param peer_id The ID of a member of the House of lords. If `NULL`,
#' returns a tibble with all listed financial interests for all members.
#' Defaults to `NULL`.
#' @inheritParams all_answered_questions
#' @return A tibble with details on the interests of peers in
#' the House of Lords.
#' @export
#' @examples
#' \dontrun{
#' x <- lords_interests(4170)
#'
#' y <- lords_interests()
#' }
lords_interests <- function(peer_id = NULL, extra_args = NULL, tidy = TRUE,
tidy_style = "snake", verbose = TRUE) {
json_query <- ifelse(
is.null(peer_id) == TRUE,
".json?",
paste0(".json?member=", peer_id)
)
baseurl <- paste0(url_util, "lordsregisteredinterests")
if (verbose == TRUE) {
message("Connecting to API")
}
members <- jsonlite::fromJSON(paste0(
baseurl, json_query,
extra_args, "&_pageSize=1"
),
flatten = TRUE
)
jpage <- floor(members$result$totalResults / 100)
query <- paste0(baseurl, json_query, extra_args)
df <- loop_query(query, jpage, verbose) # in utils-loop.R
if (nrow(df) == 0) {
message("The request did not return any data.
Please check your parameters.")
} else {
if (tidy == TRUE) {
if (is.null(peer_id)) {
df <- lords_interests_tidy2(df, tidy_style) ## in utils-lords.R
} else {
df <- lords_interests_tidy(df, tidy_style) ## in utils-lords.R
}
}
df
}
}
#' @export
#' @rdname lords_interests
hansard_lords_interests <- lords_interests
|
library(httr)
library(readxl)
library(readr)
library(dplyr)
library(magrittr)
library(zoo) # na.locf
library(data.table)
if (!file.exists("./data-raw/Samplefilesall.zip")){
dir.create("data-raw", showWarnings = FALSE)
GET(url = "http://data.gov.au/dataset/62ae540b-01b0-4c2e-a984-b8013884f1ec/resource/6ca75bab-96a6-4852-897c-1c0784d2fec9/download/Allyearssamplefile.zip",
write_disk("./data-raw/Samplefilesall.zip", overwrite = TRUE))
}
unzip("./data-raw/Samplefilesall.zip", exdir = "data-raw")
for (filename in list.files(pattern = "^Sample", path = "data-raw", full.names = TRUE)){
unzip(filename, exdir = "data-raw")
}
read_taxstats <- function(filename){
data.table::fread(filename, na.strings = c("NA", "", "?"))
}
taxstats <- lapply(list.files(pattern = "file.*txt$",
path = "data-raw",
recursive = TRUE,
full.names = TRUE),
read_taxstats)
# metadata
tempf <- paste0(tempfile(), ".xlsx")
GET(url = "http://data.gov.au/dataset/e29ef9ca-0d1a-47ec-9e9b-14a79a941511/resource/07087862-134c-4804-99cc-da8e3a6cfdcb/download/taxstats2013samplefile2013.xlsx",
write_disk(tempf))
sample_file_variable_names <-
read_excel(tempf, sheet = 1) %>%
.[1:6] %>%
filter(!is.na(No.))
devtools::use_data(sample_file_variable_names)
age_range_decoder <-
readr::read_tsv("age_range age_range_description
0 70 and over
1 65 to 69
2 60 to 64
3 55 to 59
4 50 to 54
5 45 to 49
6 40 to 44
7 35 to 39
8 30 to 34
9 25 to 29
10 20 to 24
11 under 20
") %>%
arrange(desc(age_range)) %>%
mutate(age_range_description = factor(age_range_description,
levels = unique(.$age_range_description),
ordered = TRUE)) %>%
as.data.table %>%
setkey(age_range) %>%
.[]
devtools::use_data(age_range_decoder, overwrite = TRUE)
occupation_decoder <-
readr::read_tsv("Occ_code\tOccupation_description
0 Occupation not listed/ Occupation not specified
1 Managers
2 Professionals
3 Technicians and Trades Workers
4 Community and Personal Service Workers
5 Clerical and Administrative Workers
6 Sales workers
7 Machinery operators and drivers
8 Labourers
9 Consultants, apprentices and type not specified or not listed") %>%
as.data.table %>%
setkey(Occ_code) %>%
.[]
devtools::use_data(occupation_decoder, overwrite = TRUE)
region_decoder <-
readr::read_tsv("Region Region_description
0 ACT major urban - capital city
1 NSW major urban - capital city
2 NSW other urban
3 NSW regional - high urbanisation
4 NSW regional - low urbanisation
5 NSW rural
6 NT major urban - capital city
7 NT regional - high urbanisation
8 NT regional - low urbanisation
9 QLD major urban - capital city
10 QLD other urban
11 QLD regional - high urbanisation
12 QLD regional - low urbanisation
13 QLD rural
14 SA major urban - capital city
15 SA regional - high urbanisation
16 SA regional - low urbanisation
17 SA rural
18 TAS major urban - capital city
19 TAS other urban
20 TAS regional - high urbanisation
21 TAS regional - low urbanisation
22 Tas rural
23 VIC major urban - capital city
24 VIC other urban
25 VIC regional - high urbanisation
26 VIC regional - low urbanisation
27 VIC rural
28 WA major urban - capital city
29 WA other urban
30 WA regional - high urbanisation
31 WA regional - low urbanisation
32 WA rural
34 NSW other
35 WA other") %>%
as.data.table %>%
setkey(Region) %>%
.[]
devtools::use_data(region_decoder, overwrite = TRUE)
|
/data-raw/get-data.R
|
no_license
|
HughParsonage/taxstats
|
R
| false
| false
| 4,324
|
r
|
library(httr)
library(readxl)
library(readr)
library(dplyr)
library(magrittr)
library(zoo) # na.locf
library(data.table)
if (!file.exists("./data-raw/Samplefilesall.zip")){
dir.create("data-raw", showWarnings = FALSE)
GET(url = "http://data.gov.au/dataset/62ae540b-01b0-4c2e-a984-b8013884f1ec/resource/6ca75bab-96a6-4852-897c-1c0784d2fec9/download/Allyearssamplefile.zip",
write_disk("./data-raw/Samplefilesall.zip", overwrite = TRUE))
}
unzip("./data-raw/Samplefilesall.zip", exdir = "data-raw")
for (filename in list.files(pattern = "^Sample", path = "data-raw", full.names = TRUE)){
unzip(filename, exdir = "data-raw")
}
read_taxstats <- function(filename){
data.table::fread(filename, na.strings = c("NA", "", "?"))
}
taxstats <- lapply(list.files(pattern = "file.*txt$",
path = "data-raw",
recursive = TRUE,
full.names = TRUE),
read_taxstats)
# metadata
tempf <- paste0(tempfile(), ".xlsx")
GET(url = "http://data.gov.au/dataset/e29ef9ca-0d1a-47ec-9e9b-14a79a941511/resource/07087862-134c-4804-99cc-da8e3a6cfdcb/download/taxstats2013samplefile2013.xlsx",
write_disk(tempf))
sample_file_variable_names <-
read_excel(tempf, sheet = 1) %>%
.[1:6] %>%
filter(!is.na(No.))
devtools::use_data(sample_file_variable_names)
age_range_decoder <-
readr::read_tsv("age_range age_range_description
0 70 and over
1 65 to 69
2 60 to 64
3 55 to 59
4 50 to 54
5 45 to 49
6 40 to 44
7 35 to 39
8 30 to 34
9 25 to 29
10 20 to 24
11 under 20
") %>%
arrange(desc(age_range)) %>%
mutate(age_range_description = factor(age_range_description,
levels = unique(.$age_range_description),
ordered = TRUE)) %>%
as.data.table %>%
setkey(age_range) %>%
.[]
devtools::use_data(age_range_decoder, overwrite = TRUE)
occupation_decoder <-
readr::read_tsv("Occ_code\tOccupation_description
0 Occupation not listed/ Occupation not specified
1 Managers
2 Professionals
3 Technicians and Trades Workers
4 Community and Personal Service Workers
5 Clerical and Administrative Workers
6 Sales workers
7 Machinery operators and drivers
8 Labourers
9 Consultants, apprentices and type not specified or not listed") %>%
as.data.table %>%
setkey(Occ_code) %>%
.[]
devtools::use_data(occupation_decoder, overwrite = TRUE)
region_decoder <-
readr::read_tsv("Region Region_description
0 ACT major urban - capital city
1 NSW major urban - capital city
2 NSW other urban
3 NSW regional - high urbanisation
4 NSW regional - low urbanisation
5 NSW rural
6 NT major urban - capital city
7 NT regional - high urbanisation
8 NT regional - low urbanisation
9 QLD major urban - capital city
10 QLD other urban
11 QLD regional - high urbanisation
12 QLD regional - low urbanisation
13 QLD rural
14 SA major urban - capital city
15 SA regional - high urbanisation
16 SA regional - low urbanisation
17 SA rural
18 TAS major urban - capital city
19 TAS other urban
20 TAS regional - high urbanisation
21 TAS regional - low urbanisation
22 Tas rural
23 VIC major urban - capital city
24 VIC other urban
25 VIC regional - high urbanisation
26 VIC regional - low urbanisation
27 VIC rural
28 WA major urban - capital city
29 WA other urban
30 WA regional - high urbanisation
31 WA regional - low urbanisation
32 WA rural
34 NSW other
35 WA other") %>%
as.data.table %>%
setkey(Region) %>%
.[]
devtools::use_data(region_decoder, overwrite = TRUE)
|
# Install or load libraries
tryCatch({
library(RPostgreSQL)
}, error = function(e) {
install.packages("RPPostgreSQL")
library(RPPostgreSQL)
})
tryCatch({
library(tidyverse)
}, error = function(e) {
install.packages("tidyverse")
library(tidyverse)
})
# Connect to database
drv <- dbDriver('PostgreSQL')
con <- dbConnect(drv,
dbname=Sys.getenv("dbname"),
host=Sys.getenv("host"),
port=5432,
user=Sys.getenv("userid"),
password=Sys.getenv("userpass")
)
# Make ctgov schema public
dbExecute(con, "SET search_path TO ctgov,public")
# Query database for enrollment of completed trials
completed <- dbGetQuery(con, "SELECT enrollment, overall_status FROM Studies WHERE overall_status = 'Completed'")
# Summarize the total number of trials with the same enrollment count
completed <- completed %>%
group_by(enrollment) %>%
summarize(count = n())
# Create histogram to visualize the number of people enrolled in each completed trial
completed %>%
filter(enrollment <= 20000) %>%
ggplot() + geom_histogram(mapping = aes(enrollment), bins = 200, fill = 'black') + labs(title = "Total Number of Trials by Enrollment Count", caption = "This histogram helps visualize the general trend of how many trials have a certain number of people enrolled.") + theme(plot.title = element_text(hjust = 0.5))
|
/R_Scripts/total_trials_enrollment_count.R
|
permissive
|
MoravianCollege/ClinicalTrialsViz
|
R
| false
| false
| 1,398
|
r
|
# Install or load libraries
tryCatch({
library(RPostgreSQL)
}, error = function(e) {
install.packages("RPPostgreSQL")
library(RPPostgreSQL)
})
tryCatch({
library(tidyverse)
}, error = function(e) {
install.packages("tidyverse")
library(tidyverse)
})
# Connect to database
drv <- dbDriver('PostgreSQL')
con <- dbConnect(drv,
dbname=Sys.getenv("dbname"),
host=Sys.getenv("host"),
port=5432,
user=Sys.getenv("userid"),
password=Sys.getenv("userpass")
)
# Make ctgov schema public
dbExecute(con, "SET search_path TO ctgov,public")
# Query database for enrollment of completed trials
completed <- dbGetQuery(con, "SELECT enrollment, overall_status FROM Studies WHERE overall_status = 'Completed'")
# Summarize the total number of trials with the same enrollment count
completed <- completed %>%
group_by(enrollment) %>%
summarize(count = n())
# Create histogram to visualize the number of people enrolled in each completed trial
completed %>%
filter(enrollment <= 20000) %>%
ggplot() + geom_histogram(mapping = aes(enrollment), bins = 200, fill = 'black') + labs(title = "Total Number of Trials by Enrollment Count", caption = "This histogram helps visualize the general trend of how many trials have a certain number of people enrolled.") + theme(plot.title = element_text(hjust = 0.5))
|
runKF <- function(y,A,C,Q,R,Z_0,V_0){
#note that y is transposed - time is accross columns. I'll correct this later
#C = Z = lambda = loadings in observation matrix = design matrix = OBSERVATION EQUATION
#A = T = transitions matrix = STATE EQUATION
#Q = covariance matrix from VAR and AR = state disturbance cov mx. = STATE EQUATION
#R = observation disturbance covariance mx. (very small) = OBSERVATION EQUATION
#initZ = initial state vector - prior state (alpha0) mean at t=0
#initV = initial variance of state vector - prior state (alpha0) cov at t=0
#there is no element for the state (alpha) itself!
S <- KFRun(y,A,C,Q,R,Z_0,V_0)
S <- KSRun(y,C,R,A,Q,S)
xSmooth <- S$alpha_t_T
VSmooth <- S$cov_alpha_t_T
VVSmooth <- S$cov_alpha_t_T1
loglik <- S$logLik
res <- list(xSmooth,VSmooth,VVSmooth,loglik)
names(res) <- c("xSmooth","VSmooth","VVSmooth","loglik")
return(res)
}
KFRun <- function(y,A,C,Q,R,Z_0,V_0){
#note: in some papers:
#A = T
#C = Z
#R = H
#Q = Q
#Z_0 <- alpha0
#V_0 <- v0
n <- dim(C)[1]
m <- dim(C)[2]
numObs <- dim(y)[2]
#create empty output matrices
alpha_t_tm1 <- matrix(NA,m,numObs)#Predicted state vector
cov_alpha_t_tm1 <- array(rep(NA,m*m*numObs),dim=c(m,m,numObs))#Predicted state covariance
alpha_t_t <- matrix(NA,m,numObs+1)#Filtered state vector
cov_alpha_t_t <- array(rep(NA,m*m*numObs),dim=c(m,m,numObs+1))#Filtered state covariance - has an addiitonal element at t0
logLik = 0
#Alpha = state
#P = state convariance
alpha_temp <- Z_0 #init state mean
cov_alpha_temp <- V_0 #init state covariance
#set first observation as the initial state (at time t0...)
alpha_t_t[,1] <- alpha_temp
cov_alpha_t_t[,,1] <- cov_alpha_temp
for(t in 1:numObs){
#print(t)
alpha <- A%*%alpha_temp
cov_alpha <- A%*%cov_alpha_temp%*%t(A) + Q
cov_alpha <- 0.5 * (cov_alpha + t(cov_alpha)) #just double check diagnoality... cus this amtrix is diagonal, the whole operation results in itself
select <- missingData(y[,t,drop=FALSE],C,R)
y_t <- select$y_t
C_t <- select$C
R_t <- select$R
if(getDim(y_t)==0){
alpha_temp <- alpha
cov_alpha_temp <- cov_alpha
}else{
eq_1 <- cov_alpha%*%t(C_t)
eq_2 <- ginv(C_t%*%eq_1+R_t)
eq_3 <- eq_1%*%eq_2
#V= forecast eror
V <- y_t-C_t%*%alpha
alpha_temp <- alpha+eq_3%*%V
cov_alpha_temp <- cov_alpha-eq_3%*%t(eq_1)
cov_alpha_temp <- 0.5*(cov_alpha_temp+t(cov_alpha_temp))
logLik <- logLik+0.5*(log(det(eq_2))-t(V)%*%eq_2%*%V)
}
alpha_t_tm1[,t] <- alpha
cov_alpha_t_tm1[,,t] <- cov_alpha
alpha_t_t[,t+1] <- alpha_temp
cov_alpha_t_t[,,t+1] <- cov_alpha_temp
}
if(getDim(y_t)==0){
kalmanGain <- matrix(0,m,m)
}else{
kalmanGain <- eq_3%*%C_t
}
res <- list(alpha_t_tm1,cov_alpha_t_tm1,alpha_t_t,cov_alpha_t_t,kalmanGain,logLik)
names(res) <- c("alpha_t_tm1","cov_alpha_t_tm1","alpha_t_t","cov_alpha_t_t","kalmanGain","logLik")
return(res)
}
#KFRunResult <- KFRun(y,A,C,Q,R,Z_0,V_0)
KSRun <- function(y,C,R,A,Q,KFRunResult){
n <- dim(C)[1]
m <- dim(C)[2]
numObs <- dim(y)[2]
#create empty output matrices
alpha_t_T <- matrix(0,m,numObs+1)#Filtered state vector
cov_alpha_t_T <- array(rep(0,m*m*numObs),dim=c(m,m,numObs+1))#Filtered state covariance - has an addiitonal element at t0
alpha_t_T[,numObs+1] <- drop(KFRunResult$alpha_t_t[,numObs+1])
cov_alpha_t_T[,,numObs+1] <- drop(KFRunResult$cov_alpha_t_t[,,numObs+1])
cov_alpha_t_T1 <- array(rep(0,m*m*numObs),dim=c(m,m,numObs))
cov_alpha_t_T1[,,numObs] <- (diag(m)-KFRunResult$kalmanGain)%*%A%*%drop(KFRunResult$cov_alpha_t_t[,,numObs])
#here, we should explore the possibility of replacing ginv simply with solve. Why do we actually need ginv?
J_2 <- drop(KFRunResult$cov_alpha_t_t[,,numObs])%*%t(A)%*%ginv(KFRunResult$cov_alpha_t_tm1[,,numObs])
#J_2 <- drop(KFRunResult$cov_alpha_t_t[,,numObs])%*%t(A)%*%solve(KFRunResult$cov_alpha_t_tm1[,,numObs])
for(t in numObs:1){
cov_alpha_t_t_temp <- drop(KFRunResult$cov_alpha_t_t[,,t])
cov_alpha_t_tm1_temp <- drop(KFRunResult$cov_alpha_t_tm1[,,t])
cov_alpha_t_T_temp <- drop(cov_alpha_t_T[,,t+1])
cov_alpha_t_T1_temp <- drop(cov_alpha_t_T1[,,t])
J_1 <- J_2
alpha_t_T[,t] <- KFRunResult$alpha_t_t[,t] + J_1 %*% (alpha_t_T[,t+1]-A%*%KFRunResult$alpha_t_t[,t])
cov_alpha_t_T[,,t] <- cov_alpha_t_t_temp + J_1 %*% (cov_alpha_t_T_temp-cov_alpha_t_tm1_temp) %*% t(J_1)
if(t>1){
J_2 <- KFRunResult$cov_alpha_t_t[,,t-1]%*%t(A)%*%ginv(drop(KFRunResult$cov_alpha_t_tm1[,,t-1]))
#J_2 <- KFRunResult$cov_alpha_t_t[,,t-1]%*%t(A)%*%solve(drop(KFRunResult$cov_alpha_t_tm1[,,t-1]))
cov_alpha_t_T1[,,t-1] <- cov_alpha_t_t_temp%*%t(J_2)+J_1%*%(cov_alpha_t_T1_temp-(A%*%cov_alpha_t_t_temp))%*%t(J_2)
}
}
res <- KFRunResult
res[[7]] <- alpha_t_T
res[[8]] <- cov_alpha_t_T
res[[9]] <- cov_alpha_t_T1
names(res)[c(7,8,9)] <- c("alpha_t_T","cov_alpha_t_T","cov_alpha_t_T1")
return(res)
}
missingData <- function(y_t,C,R){
ix <- !is.na(y_t)
e <- diag(getDim(ix)[1])
L <- e[,ix,drop=FALSE]
y_t <- y_t[ix,drop=FALSE]
C <- C[ix,,drop=FALSE]
R <- R[ix,ix,drop=FALSE]
res <- list(y_t,C,R,L)
names(res) <- c("y_t","C","R","L")
return(res)
}
|
/Nowcast_DLARFM_framework/DLMKalmanFilter.R
|
no_license
|
elSomewhere/DLM-Nowcaster---QuantEcon
|
R
| false
| false
| 5,558
|
r
|
runKF <- function(y,A,C,Q,R,Z_0,V_0){
#note that y is transposed - time is accross columns. I'll correct this later
#C = Z = lambda = loadings in observation matrix = design matrix = OBSERVATION EQUATION
#A = T = transitions matrix = STATE EQUATION
#Q = covariance matrix from VAR and AR = state disturbance cov mx. = STATE EQUATION
#R = observation disturbance covariance mx. (very small) = OBSERVATION EQUATION
#initZ = initial state vector - prior state (alpha0) mean at t=0
#initV = initial variance of state vector - prior state (alpha0) cov at t=0
#there is no element for the state (alpha) itself!
S <- KFRun(y,A,C,Q,R,Z_0,V_0)
S <- KSRun(y,C,R,A,Q,S)
xSmooth <- S$alpha_t_T
VSmooth <- S$cov_alpha_t_T
VVSmooth <- S$cov_alpha_t_T1
loglik <- S$logLik
res <- list(xSmooth,VSmooth,VVSmooth,loglik)
names(res) <- c("xSmooth","VSmooth","VVSmooth","loglik")
return(res)
}
KFRun <- function(y,A,C,Q,R,Z_0,V_0){
#note: in some papers:
#A = T
#C = Z
#R = H
#Q = Q
#Z_0 <- alpha0
#V_0 <- v0
n <- dim(C)[1]
m <- dim(C)[2]
numObs <- dim(y)[2]
#create empty output matrices
alpha_t_tm1 <- matrix(NA,m,numObs)#Predicted state vector
cov_alpha_t_tm1 <- array(rep(NA,m*m*numObs),dim=c(m,m,numObs))#Predicted state covariance
alpha_t_t <- matrix(NA,m,numObs+1)#Filtered state vector
cov_alpha_t_t <- array(rep(NA,m*m*numObs),dim=c(m,m,numObs+1))#Filtered state covariance - has an addiitonal element at t0
logLik = 0
#Alpha = state
#P = state convariance
alpha_temp <- Z_0 #init state mean
cov_alpha_temp <- V_0 #init state covariance
#set first observation as the initial state (at time t0...)
alpha_t_t[,1] <- alpha_temp
cov_alpha_t_t[,,1] <- cov_alpha_temp
for(t in 1:numObs){
#print(t)
alpha <- A%*%alpha_temp
cov_alpha <- A%*%cov_alpha_temp%*%t(A) + Q
cov_alpha <- 0.5 * (cov_alpha + t(cov_alpha)) #just double check diagnoality... cus this amtrix is diagonal, the whole operation results in itself
select <- missingData(y[,t,drop=FALSE],C,R)
y_t <- select$y_t
C_t <- select$C
R_t <- select$R
if(getDim(y_t)==0){
alpha_temp <- alpha
cov_alpha_temp <- cov_alpha
}else{
eq_1 <- cov_alpha%*%t(C_t)
eq_2 <- ginv(C_t%*%eq_1+R_t)
eq_3 <- eq_1%*%eq_2
#V= forecast eror
V <- y_t-C_t%*%alpha
alpha_temp <- alpha+eq_3%*%V
cov_alpha_temp <- cov_alpha-eq_3%*%t(eq_1)
cov_alpha_temp <- 0.5*(cov_alpha_temp+t(cov_alpha_temp))
logLik <- logLik+0.5*(log(det(eq_2))-t(V)%*%eq_2%*%V)
}
alpha_t_tm1[,t] <- alpha
cov_alpha_t_tm1[,,t] <- cov_alpha
alpha_t_t[,t+1] <- alpha_temp
cov_alpha_t_t[,,t+1] <- cov_alpha_temp
}
if(getDim(y_t)==0){
kalmanGain <- matrix(0,m,m)
}else{
kalmanGain <- eq_3%*%C_t
}
res <- list(alpha_t_tm1,cov_alpha_t_tm1,alpha_t_t,cov_alpha_t_t,kalmanGain,logLik)
names(res) <- c("alpha_t_tm1","cov_alpha_t_tm1","alpha_t_t","cov_alpha_t_t","kalmanGain","logLik")
return(res)
}
#KFRunResult <- KFRun(y,A,C,Q,R,Z_0,V_0)
KSRun <- function(y,C,R,A,Q,KFRunResult){
n <- dim(C)[1]
m <- dim(C)[2]
numObs <- dim(y)[2]
#create empty output matrices
alpha_t_T <- matrix(0,m,numObs+1)#Filtered state vector
cov_alpha_t_T <- array(rep(0,m*m*numObs),dim=c(m,m,numObs+1))#Filtered state covariance - has an addiitonal element at t0
alpha_t_T[,numObs+1] <- drop(KFRunResult$alpha_t_t[,numObs+1])
cov_alpha_t_T[,,numObs+1] <- drop(KFRunResult$cov_alpha_t_t[,,numObs+1])
cov_alpha_t_T1 <- array(rep(0,m*m*numObs),dim=c(m,m,numObs))
cov_alpha_t_T1[,,numObs] <- (diag(m)-KFRunResult$kalmanGain)%*%A%*%drop(KFRunResult$cov_alpha_t_t[,,numObs])
#here, we should explore the possibility of replacing ginv simply with solve. Why do we actually need ginv?
J_2 <- drop(KFRunResult$cov_alpha_t_t[,,numObs])%*%t(A)%*%ginv(KFRunResult$cov_alpha_t_tm1[,,numObs])
#J_2 <- drop(KFRunResult$cov_alpha_t_t[,,numObs])%*%t(A)%*%solve(KFRunResult$cov_alpha_t_tm1[,,numObs])
for(t in numObs:1){
cov_alpha_t_t_temp <- drop(KFRunResult$cov_alpha_t_t[,,t])
cov_alpha_t_tm1_temp <- drop(KFRunResult$cov_alpha_t_tm1[,,t])
cov_alpha_t_T_temp <- drop(cov_alpha_t_T[,,t+1])
cov_alpha_t_T1_temp <- drop(cov_alpha_t_T1[,,t])
J_1 <- J_2
alpha_t_T[,t] <- KFRunResult$alpha_t_t[,t] + J_1 %*% (alpha_t_T[,t+1]-A%*%KFRunResult$alpha_t_t[,t])
cov_alpha_t_T[,,t] <- cov_alpha_t_t_temp + J_1 %*% (cov_alpha_t_T_temp-cov_alpha_t_tm1_temp) %*% t(J_1)
if(t>1){
J_2 <- KFRunResult$cov_alpha_t_t[,,t-1]%*%t(A)%*%ginv(drop(KFRunResult$cov_alpha_t_tm1[,,t-1]))
#J_2 <- KFRunResult$cov_alpha_t_t[,,t-1]%*%t(A)%*%solve(drop(KFRunResult$cov_alpha_t_tm1[,,t-1]))
cov_alpha_t_T1[,,t-1] <- cov_alpha_t_t_temp%*%t(J_2)+J_1%*%(cov_alpha_t_T1_temp-(A%*%cov_alpha_t_t_temp))%*%t(J_2)
}
}
res <- KFRunResult
res[[7]] <- alpha_t_T
res[[8]] <- cov_alpha_t_T
res[[9]] <- cov_alpha_t_T1
names(res)[c(7,8,9)] <- c("alpha_t_T","cov_alpha_t_T","cov_alpha_t_T1")
return(res)
}
missingData <- function(y_t,C,R){
ix <- !is.na(y_t)
e <- diag(getDim(ix)[1])
L <- e[,ix,drop=FALSE]
y_t <- y_t[ix,drop=FALSE]
C <- C[ix,,drop=FALSE]
R <- R[ix,ix,drop=FALSE]
res <- list(y_t,C,R,L)
names(res) <- c("y_t","C","R","L")
return(res)
}
|
#################################################################
## UT-KBRIN Bioinformatics Summit 2017
## Module 1: Introduction to R and RStudio, Reading Data into R
## The R Team
## Department of Statistics, University of Kentucky
## April 21, 2017
##################################################################################
#### Set Working Directory
setwd("C:/Users/ukystat/Dropbox/UT-KBRIN R Workshop/") # Command to set working directory
getwd() # Displays current working directory
#### Read in data
practicedata = read.table('practicedata.csv', # Give filename first
header=TRUE, # If filename has variable names, set header to TRUE.
# Otherwise, use header=FALSE
sep=",", # Symbol separating data values (comma here)
na.strings=c("","NA") # Characters used to denote missing values
#, comment.char='#', # Character used to indicate comments in your file
#skip=0, # number of lines of data file to skip before reading in data
#nrows=1000 # maximum number of lines of data file to read in
)
practicedata[1:5,] # Prints first 5 rows of the data
practicedata[,1:2] # Prints first 2 columns of the data
practicedata[,"expvar"] # One way to call the variable, expvar
practicedata$expvar # Another way to call the variable, expvar
practicedata$expvar
#### Subset data into data for control and treatment groups
practicedata2=practicedata[order(practicedata$groupvar2),]
k=50 # number of observations in each group
n=100 # total number of observations
trtmtdata = practicedata[(k+1):n, ] # Save the 51st through 100th rows of the data
#### Subset data into data for control and treatment groups using subset function
trtmtdata = subset(practicedata , groupvar=='Treatment')
controldata = subset(practicedata , groupvar=='Control')
### Check format of practice data
class(practicedata)
#### Writing Data to Files
getwd() #Check current working directory
resp.log=log(practicedata$respvar) # take the natural log of the response variable
# Put columns (variables) together in a new data frame
data.to.write=data.frame(practicedata$expvar,practicedata$groupvar,resp.log)
colnames(data.to.write) # print out column names of the new data
colnames(data.to.write)<-c('expvar','groupvar','logrespvar') # rename the columns of
# the new dataset
colnames(data.to.write) # print out column names of the new data again
##To write data to a new .csv file:
write.table(data.to.write, # data to write to a file
file='logdata.csv', # name of file you want to save data in
quote=FALSE, # whether or not to put quotations around data
col.names=TRUE, # whether or not to write column names to file
row.names=FALSE, # whether or not to write row names to file
sep=',', # what you want to put between data entries (commas and spaces are common)
append=FALSE, # whether or not to append existing data to the current file
na='NA' # string to use for missing values
)
|
/module1.R
|
no_license
|
pdtrang/R-Workshop-UT-KBRIN-BioSummit2017
|
R
| false
| false
| 3,130
|
r
|
#################################################################
## UT-KBRIN Bioinformatics Summit 2017
## Module 1: Introduction to R and RStudio, Reading Data into R
## The R Team
## Department of Statistics, University of Kentucky
## April 21, 2017
##################################################################################
#### Set Working Directory
setwd("C:/Users/ukystat/Dropbox/UT-KBRIN R Workshop/") # Command to set working directory
getwd() # Displays current working directory
#### Read in data
practicedata = read.table('practicedata.csv', # Give filename first
header=TRUE, # If filename has variable names, set header to TRUE.
# Otherwise, use header=FALSE
sep=",", # Symbol separating data values (comma here)
na.strings=c("","NA") # Characters used to denote missing values
#, comment.char='#', # Character used to indicate comments in your file
#skip=0, # number of lines of data file to skip before reading in data
#nrows=1000 # maximum number of lines of data file to read in
)
practicedata[1:5,] # Prints first 5 rows of the data
practicedata[,1:2] # Prints first 2 columns of the data
practicedata[,"expvar"] # One way to call the variable, expvar
practicedata$expvar # Another way to call the variable, expvar
practicedata$expvar
#### Subset data into data for control and treatment groups
practicedata2=practicedata[order(practicedata$groupvar2),]
k=50 # number of observations in each group
n=100 # total number of observations
trtmtdata = practicedata[(k+1):n, ] # Save the 51st through 100th rows of the data
#### Subset data into data for control and treatment groups using subset function
trtmtdata = subset(practicedata , groupvar=='Treatment')
controldata = subset(practicedata , groupvar=='Control')
### Check format of practice data
class(practicedata)
#### Writing Data to Files
getwd() #Check current working directory
resp.log=log(practicedata$respvar) # take the natural log of the response variable
# Put columns (variables) together in a new data frame
data.to.write=data.frame(practicedata$expvar,practicedata$groupvar,resp.log)
colnames(data.to.write) # print out column names of the new data
colnames(data.to.write)<-c('expvar','groupvar','logrespvar') # rename the columns of
# the new dataset
colnames(data.to.write) # print out column names of the new data again
##To write data to a new .csv file:
write.table(data.to.write, # data to write to a file
file='logdata.csv', # name of file you want to save data in
quote=FALSE, # whether or not to put quotations around data
col.names=TRUE, # whether or not to write column names to file
row.names=FALSE, # whether or not to write row names to file
sep=',', # what you want to put between data entries (commas and spaces are common)
append=FALSE, # whether or not to append existing data to the current file
na='NA' # string to use for missing values
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/markov_wrapper.R
\name{markov_model}
\alias{markov_model}
\title{Example of Markov model function}
\usage{
markov_model(l_param_all, strategy = NULL)
}
\arguments{
\item{l_param_all}{All parameter inputs. This is a named list organized as
`list(par1 = 0.3, par2 = 0.5, df1 = data.frame(t = c(1:3), param = c(0.2, 0.2, 0.3)))`}
\item{strategy}{The strategy of interest. The default is `NULL`, which is the do nothing strategy.}
}
\value{
The function returns a `data.frame` with first column as the strategy of interest,
and the other columns are the outcomes of interest.
}
\description{
This Markov model function using Ontario tanning ban as an example
}
|
/man/markov_model.Rd
|
no_license
|
MedipAnalytics/CEAutil
|
R
| false
| true
| 744
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/markov_wrapper.R
\name{markov_model}
\alias{markov_model}
\title{Example of Markov model function}
\usage{
markov_model(l_param_all, strategy = NULL)
}
\arguments{
\item{l_param_all}{All parameter inputs. This is a named list organized as
`list(par1 = 0.3, par2 = 0.5, df1 = data.frame(t = c(1:3), param = c(0.2, 0.2, 0.3)))`}
\item{strategy}{The strategy of interest. The default is `NULL`, which is the do nothing strategy.}
}
\value{
The function returns a `data.frame` with first column as the strategy of interest,
and the other columns are the outcomes of interest.
}
\description{
This Markov model function using Ontario tanning ban as an example
}
|
library(RColorBrewer)
library(gridExtra)
source("getTweets.R")
N=2000
dir.create("abpoli",showWarnings = FALSE)
abpoli<-getTweets(keyword = "#abpoli OR #abvote",n=N,exclude.words = c("http","ableg","cdnpoli","abpoli","onpoli"))
png("abpoli/abpoli.png",width=2880,height=800)
wordcloud(abpoli$word,abpoli$freq,scale = c(12,4),max.words = 150,min.freq = 4,fixed.asp = FALSE,rot.per=0,colors = (brewer.pal(9,"Greens")[4:9]))
dev.off()
ndp<-getTweets(keyword = "#abndp",n=N,exclude.words = c("http","ableg","cdnpoli","abvote","ableg","abpoli"))
png("abpoli/abndp.png",width=720,height=1000)
wordcloud(ndp$word,ndp$freq,scale = c(8,2),max.words = 150,use.r.layout = FALSE,fixed.asp = FALSE,rot.per=0,colors = (brewer.pal(9,"Oranges")[4:9]))
dev.off()
abpc<-getTweets(keyword = "#abpc OR #pcaa",n=N,exclude.words = c("http","ableg","cdnpoli","abvote","ableg","abpoli","abpc"))
png("abpoli/abpc.png",width=720,height=1000)
wordcloud(abpc$word,abpc$freq,scale = c(8,2),max.words = 100,use.r.layout = FALSE,fixed.asp = FALSE,rot.per=0,colors = (brewer.pal(9,"Blues")[4:9]))
dev.off()
wrp<-getTweets(keyword = "#wrp",n=N,exclude.words = c("http","ableg","cdnpoli","abvote","ableg","abpoli"))
png("abpoli/abwrp.png",width=720,height=1000)
wordcloud(wrp$word,wrp$freq,scale = c(8,2),max.words = 100,use.r.layout = FALSE,fixed.asp = FALSE,rot.per=0,colors = (brewer.pal(6,"PiYG")))
dev.off()
ablib<-getTweets(keyword = "#ablib",n=N, exclude.words = c("http","ableg","cdnpoli","abvote","ableg","abpoli"))
png("abpoli/ablib.png",width=720,height=1000)
wordcloud(ablib$word,ablib$freq,scale = c(8,2),max.words = 100,use.r.layout = FALSE,fixed.asp = FALSE,rot.per=0,colors = (brewer.pal(9,"Reds")[4:9]))
dev.off()
|
/abvote_2015.R
|
no_license
|
mikebirdgeneau/abvote_wordcloud
|
R
| false
| false
| 1,712
|
r
|
library(RColorBrewer)
library(gridExtra)
source("getTweets.R")
N=2000
dir.create("abpoli",showWarnings = FALSE)
abpoli<-getTweets(keyword = "#abpoli OR #abvote",n=N,exclude.words = c("http","ableg","cdnpoli","abpoli","onpoli"))
png("abpoli/abpoli.png",width=2880,height=800)
wordcloud(abpoli$word,abpoli$freq,scale = c(12,4),max.words = 150,min.freq = 4,fixed.asp = FALSE,rot.per=0,colors = (brewer.pal(9,"Greens")[4:9]))
dev.off()
ndp<-getTweets(keyword = "#abndp",n=N,exclude.words = c("http","ableg","cdnpoli","abvote","ableg","abpoli"))
png("abpoli/abndp.png",width=720,height=1000)
wordcloud(ndp$word,ndp$freq,scale = c(8,2),max.words = 150,use.r.layout = FALSE,fixed.asp = FALSE,rot.per=0,colors = (brewer.pal(9,"Oranges")[4:9]))
dev.off()
abpc<-getTweets(keyword = "#abpc OR #pcaa",n=N,exclude.words = c("http","ableg","cdnpoli","abvote","ableg","abpoli","abpc"))
png("abpoli/abpc.png",width=720,height=1000)
wordcloud(abpc$word,abpc$freq,scale = c(8,2),max.words = 100,use.r.layout = FALSE,fixed.asp = FALSE,rot.per=0,colors = (brewer.pal(9,"Blues")[4:9]))
dev.off()
wrp<-getTweets(keyword = "#wrp",n=N,exclude.words = c("http","ableg","cdnpoli","abvote","ableg","abpoli"))
png("abpoli/abwrp.png",width=720,height=1000)
wordcloud(wrp$word,wrp$freq,scale = c(8,2),max.words = 100,use.r.layout = FALSE,fixed.asp = FALSE,rot.per=0,colors = (brewer.pal(6,"PiYG")))
dev.off()
ablib<-getTweets(keyword = "#ablib",n=N, exclude.words = c("http","ableg","cdnpoli","abvote","ableg","abpoli"))
png("abpoli/ablib.png",width=720,height=1000)
wordcloud(ablib$word,ablib$freq,scale = c(8,2),max.words = 100,use.r.layout = FALSE,fixed.asp = FALSE,rot.per=0,colors = (brewer.pal(9,"Reds")[4:9]))
dev.off()
|
################################################################################
#
# Social interaction model: Sweep beta and group size parameter space
#
################################################################################
rm(list = ls())
####################
# Source necessary scripts/libraries
####################
source("scripts/util/__Util__MASTER.R")
library(parallel)
library(snowfall)
####################
# Set global variables
####################
# Initial paramters: Free to change
# Base parameters
Ns <- seq(5, 100, 5) #vector of number of individuals to simulate
m <- 2 #number of tasks
Tsteps <- 50000 #number of time steps to run simulation
reps <- 100 #number of replications per simulation (for ensemble)
# Threshold Parameters
ThreshM <- rep(50, m) #population threshold means
ThreshSD <- ThreshM * 0 #population threshold standard deviations
InitialStim <- rep(0, m) #intital vector of stimuli
deltas <- rep(0.8, m) #vector of stimuli increase rates
alpha <- m #efficiency of task performance
quitP <- 0.2 #probability of quitting task once active
# Social Network Parameters
p <- 1 #baseline probablity of initiating an interaction per time step
epsilon <- 0.1 #relative weighting of social interactions for adjusting thresholds
betas <- seq(1.05, 1.09, 0.01) #probability of interacting with individual in same state relative to others
####################
# Prep for Parallelization
####################
# Create parameter combinations for parallelization
run_in_parallel <- expand.grid(n = Ns, beta = betas)
run_in_parallel <- run_in_parallel %>%
arrange(n)
# Create directory for depositing data
storage_path <- "/scratch/gpfs/ctokita/"
file_name <- paste0("GroupSizeBetaSweep_Sigma", ThreshSD[1], "-Epsilon", epsilon)
full_path <- paste0(storage_path, file_name, '/')
dir.create(full_path, showWarnings = FALSE)
# Check if there is already some runs done
files <- list.files(full_path)
completed_runs <- data.frame(n = as.numeric(gsub(x = files, "n([0-9]+)-.*", "\\1", perl = T)))
completed_runs$beta <- as.numeric(gsub(x = files, ".*-beta([\\.0-9]+).Rdata$", "\\1", perl = T))
run_in_parallel <- anti_join(run_in_parallel, completed_runs, by = c("n", "beta"))
# Prepare for parallel
no_cores <- detectCores()
sfInit(parallel = TRUE, cpus = no_cores)
sfExportAll()
sfLibrary(dplyr)
sfLibrary(reshape2)
sfLibrary(igraph)
sfLibrary(ggplot2)
sfLibrary(msm)
sfLibrary(gtools)
sfLibrary(snowfall)
sfLibrary(tidyr)
sfLibrary(stringr)
# sfClusterSetupRNGstream(seed = 105)
####################
# Run ensemble simulation
####################
# Loop through group size (and chucnks)
parallel_simulations <- sfLapply(1:nrow(run_in_parallel), function(k) {
# Set group size
n <- run_in_parallel[k, 1]
beta <- run_in_parallel[k, 2]
# Prep lists for collection of simulation outputs from this group size
ens_entropy <- list()
# Run Simulations
for (sim in 1:reps) {
####################
# Seed structures and intial matrices
####################
# Set initial probability matrix (P_g)
P_g <- matrix(data = rep(0, n * m), ncol = m)
# Seed task (external) stimuli
stimMat <- seed_stimuls(intitial_stim = InitialStim,
Tsteps = Tsteps)
# Seed internal thresholds
threshMat <- seed_thresholds(n = n,
m = m,
threshold_means = ThreshM,
threshold_sds = ThreshSD)
# Start task performance
X_g <- matrix(data = rep(0, length(P_g)), ncol = ncol(P_g))
# Create cumulative task performance matrix
X_tot <- X_g
# Create cumulative adjacency matrix
g_tot <- matrix(data = rep(0, n * n), ncol = n)
colnames(g_tot) <- paste0("v-", 1:n)
rownames(g_tot) <- paste0("v-", 1:n)
####################
# Simulate individual run
####################
# Run simulation
for (t in 1:Tsteps) {
# Current timestep is actually t+1 in this formulation, because first row is timestep 0
# Update stimuli
stimMat <- update_stim(stim_matrix = stimMat,
deltas = deltas,
alpha = alpha,
state_matrix = X_g,
time_step = t)
# Calculate task demand based on global stimuli
P_g <- calc_determ_thresh(time_step = t + 1, # first row is generation 0
threshold_matrix = threshMat,
stimulus_matrix = stimMat)
# Update task performance
X_g <- update_task_performance(task_probs = P_g,
state_matrix = X_g,
quit_prob = quitP)
# Update social network (previously this was before probability/task update)
g_adj <- temporalNetwork(X_sub_g = X_g,
prob_interact = p,
bias = beta)
g_tot <- g_tot + g_adj
# Adjust thresholds
threshMat <- adjust_thresholds_social_capped(social_network = g_adj,
threshold_matrix = threshMat,
state_matrix = X_g,
epsilon = epsilon,
threshold_max = 100)
# Update total task performance profile
X_tot <- X_tot + X_g
}
####################
# Post run calculations
####################
# Calculate Entropy
entropy <- as.data.frame(mutualEntropy(TotalStateMat = X_tot))
entropy$n <- n
entropy$beta <- beta
# Add entropy values to list
ens_entropy[[sim]] <- entropy
# Clean
rm(X_tot, stimMat, threshMat, g_tot, g_adj, P_g, X_g)
}
# Bind together and summarise
entropy_sum <- do.call("rbind", ens_entropy)
entropy_sum <- entropy_sum %>%
group_by(n, beta) %>%
summarise(Dsym_mean = mean(Dsym),
Dysm_SD = sd(Dsym),
Dtask_mean = mean(Dtask),
Dtask_SD = sd(Dtask),
Dind_mean = mean(Dind),
Dind_SD = sd(Dind))
entropy_sum <- as.data.frame(entropy_sum)
save(entropy_sum, file = paste0(full_path,
"n",
str_pad(string = n, width = 3, pad = "0"),
"-beta",
beta,
".Rdata"))
Sys.sleep(1)
})
sfStop()
# Bind and save
# parallel_data <- do.call('rbind', parallel_simulations)
# Create directory for depositing data
# storage_path <- "/scratch/gpfs/ctokita/"
# file_name <- paste0("GroupSizeBetaSweep_Sigma", ThreshSD[1], "-Epsilon", epsilon)
# full_path <- paste0(storage_path, file_name, '.Rdata')
# save(parallel_data, file = full_path)
|
/scripts/3_para_sweep/3a_BetaParaSweep_2.R
|
no_license
|
christokita/socially-modulated-threshold-model
|
R
| false
| false
| 7,035
|
r
|
################################################################################
#
# Social interaction model: Sweep beta and group size parameter space
#
################################################################################
rm(list = ls())
####################
# Source necessary scripts/libraries
####################
source("scripts/util/__Util__MASTER.R")
library(parallel)
library(snowfall)
####################
# Set global variables
####################
# Initial paramters: Free to change
# Base parameters
Ns <- seq(5, 100, 5) #vector of number of individuals to simulate
m <- 2 #number of tasks
Tsteps <- 50000 #number of time steps to run simulation
reps <- 100 #number of replications per simulation (for ensemble)
# Threshold Parameters
ThreshM <- rep(50, m) #population threshold means
ThreshSD <- ThreshM * 0 #population threshold standard deviations
InitialStim <- rep(0, m) #intital vector of stimuli
deltas <- rep(0.8, m) #vector of stimuli increase rates
alpha <- m #efficiency of task performance
quitP <- 0.2 #probability of quitting task once active
# Social Network Parameters
p <- 1 #baseline probablity of initiating an interaction per time step
epsilon <- 0.1 #relative weighting of social interactions for adjusting thresholds
betas <- seq(1.05, 1.09, 0.01) #probability of interacting with individual in same state relative to others
####################
# Prep for Parallelization
####################
# Create parameter combinations for parallelization
run_in_parallel <- expand.grid(n = Ns, beta = betas)
run_in_parallel <- run_in_parallel %>%
arrange(n)
# Create directory for depositing data
storage_path <- "/scratch/gpfs/ctokita/"
file_name <- paste0("GroupSizeBetaSweep_Sigma", ThreshSD[1], "-Epsilon", epsilon)
full_path <- paste0(storage_path, file_name, '/')
dir.create(full_path, showWarnings = FALSE)
# Check if there is already some runs done
files <- list.files(full_path)
completed_runs <- data.frame(n = as.numeric(gsub(x = files, "n([0-9]+)-.*", "\\1", perl = T)))
completed_runs$beta <- as.numeric(gsub(x = files, ".*-beta([\\.0-9]+).Rdata$", "\\1", perl = T))
run_in_parallel <- anti_join(run_in_parallel, completed_runs, by = c("n", "beta"))
# Prepare for parallel
no_cores <- detectCores()
sfInit(parallel = TRUE, cpus = no_cores)
sfExportAll()
sfLibrary(dplyr)
sfLibrary(reshape2)
sfLibrary(igraph)
sfLibrary(ggplot2)
sfLibrary(msm)
sfLibrary(gtools)
sfLibrary(snowfall)
sfLibrary(tidyr)
sfLibrary(stringr)
# sfClusterSetupRNGstream(seed = 105)
####################
# Run ensemble simulation
####################
# Loop through group size (and chucnks)
parallel_simulations <- sfLapply(1:nrow(run_in_parallel), function(k) {
# Set group size
n <- run_in_parallel[k, 1]
beta <- run_in_parallel[k, 2]
# Prep lists for collection of simulation outputs from this group size
ens_entropy <- list()
# Run Simulations
for (sim in 1:reps) {
####################
# Seed structures and intial matrices
####################
# Set initial probability matrix (P_g)
P_g <- matrix(data = rep(0, n * m), ncol = m)
# Seed task (external) stimuli
stimMat <- seed_stimuls(intitial_stim = InitialStim,
Tsteps = Tsteps)
# Seed internal thresholds
threshMat <- seed_thresholds(n = n,
m = m,
threshold_means = ThreshM,
threshold_sds = ThreshSD)
# Start task performance
X_g <- matrix(data = rep(0, length(P_g)), ncol = ncol(P_g))
# Create cumulative task performance matrix
X_tot <- X_g
# Create cumulative adjacency matrix
g_tot <- matrix(data = rep(0, n * n), ncol = n)
colnames(g_tot) <- paste0("v-", 1:n)
rownames(g_tot) <- paste0("v-", 1:n)
####################
# Simulate individual run
####################
# Run simulation
for (t in 1:Tsteps) {
# Current timestep is actually t+1 in this formulation, because first row is timestep 0
# Update stimuli
stimMat <- update_stim(stim_matrix = stimMat,
deltas = deltas,
alpha = alpha,
state_matrix = X_g,
time_step = t)
# Calculate task demand based on global stimuli
P_g <- calc_determ_thresh(time_step = t + 1, # first row is generation 0
threshold_matrix = threshMat,
stimulus_matrix = stimMat)
# Update task performance
X_g <- update_task_performance(task_probs = P_g,
state_matrix = X_g,
quit_prob = quitP)
# Update social network (previously this was before probability/task update)
g_adj <- temporalNetwork(X_sub_g = X_g,
prob_interact = p,
bias = beta)
g_tot <- g_tot + g_adj
# Adjust thresholds
threshMat <- adjust_thresholds_social_capped(social_network = g_adj,
threshold_matrix = threshMat,
state_matrix = X_g,
epsilon = epsilon,
threshold_max = 100)
# Update total task performance profile
X_tot <- X_tot + X_g
}
####################
# Post run calculations
####################
# Calculate Entropy
entropy <- as.data.frame(mutualEntropy(TotalStateMat = X_tot))
entropy$n <- n
entropy$beta <- beta
# Add entropy values to list
ens_entropy[[sim]] <- entropy
# Clean
rm(X_tot, stimMat, threshMat, g_tot, g_adj, P_g, X_g)
}
# Bind together and summarise
entropy_sum <- do.call("rbind", ens_entropy)
entropy_sum <- entropy_sum %>%
group_by(n, beta) %>%
summarise(Dsym_mean = mean(Dsym),
Dysm_SD = sd(Dsym),
Dtask_mean = mean(Dtask),
Dtask_SD = sd(Dtask),
Dind_mean = mean(Dind),
Dind_SD = sd(Dind))
entropy_sum <- as.data.frame(entropy_sum)
save(entropy_sum, file = paste0(full_path,
"n",
str_pad(string = n, width = 3, pad = "0"),
"-beta",
beta,
".Rdata"))
Sys.sleep(1)
})
sfStop()
# Bind and save
# parallel_data <- do.call('rbind', parallel_simulations)
# Create directory for depositing data
# storage_path <- "/scratch/gpfs/ctokita/"
# file_name <- paste0("GroupSizeBetaSweep_Sigma", ThreshSD[1], "-Epsilon", epsilon)
# full_path <- paste0(storage_path, file_name, '.Rdata')
# save(parallel_data, file = full_path)
|
###
# SET WORKING DIRECTORY TO SOURCE FILE LOCATION
# Session -> Set Working Directory -> To Sourec File Location
###
#install.packages("ggrepel")
#install.packages("dplyr")
#install.packages("sqldf")
#install.packages("ggplot2")
library(sqldf)
library(ggplot2)
library(dplyr)
library(ggrepel)
deliveries <- read.csv('deliveries.csv')
########################################################################################################################
# Perform Analysis of Data to create new data which will be used in ANalysis
########################################################################################################################
# get total number of matches played by each platyer
matches_per_player <- sqldf('
select bowler,
count(DISTINCT(match_id)) as "total_matches"
from deliveries group by bowler')
# dot balls, total balls, runs conceded by each player
bowler_stats <- group_by(deliveries, bowler)%>%
summarise(dot_balls = sum(ifelse(batsman_runs ==0 & extra_runs == 0 , 1, 0 )),
total_balls = sum(ifelse(wide_runs == 1 & noball_runs == 1, 0, 1 )),
total_runs = sum(batsman_runs+extra_runs))
# add number of games played per player
bowler_stats <- merge(bowler_stats, matches_per_player, by.x = "bowler", by.y = "bowler")
# total wickets per player
total_wickets_per_player <- group_by(deliveries, bowler)%>%
summarise(wickets = sum(ifelse(dismissal_kind
%in% c("caught", "bowled", "lbw", "caught and bowled"), 1,0)))
bowler_stats <- merge(bowler_stats,
total_wickets_per_player,
by.x = "bowler",
by.y = "bowler")
# remove bowler name to allow for creation of boxplots
bowler_overall_stats <- sqldf('
select dot_balls as "Dots" ,
total_balls as "Deliveries",
total_runs as "Runs_Conceded",
wickets,
total_matches
from bowler_stats')
bowler_stats_Per_match <- sqldf('
select (total_balls/total_matches) as "Deliveries_Per_Match",
(total_runs/total_matches) as "Runs_Conceded_Per_Match",
(dot_balls/total_matches) as "Dots_Per_Match",
(wickets/total_matches) as "Wickets_Per_Match"
from bowler_stats')
########################################################################################################################
# boxplot of Deliveries per player per match
########################################################################################################################
png(filename="Images/Boxplot_Player_Deliveries_Per_Match.png")
boxplot(bowler_stats_Per_match$Deliveries_Per_Match,
main="Boxplot of Deliveries per player per match",
ylab="Number of Deliveries")
dev.off()
summary(bowler_stats_Per_match$Deliveries_Per_Match)
########################################################################################################################
# Scatter Plot of Run Conceeded Vs Number of Deliveries
########################################################################################################################
png(filename="Images/Scatter_Plot_Runs_Conceded_Vs_Deliveries.png")
plot(bowler_stats_Per_match$Deliveries_Per_Match,
bowler_stats_Per_match$Runs_Conceded_Per_Match,
main = "Scatter Plot of Runs Conceded vs No. of Deliveries",
xlab = "Average No. Of Deliveries",
ylab = "Average Runs Conceded")
dev.off()
cor(bowler_stats_Per_match$Deliveries_Per_Match, bowler_stats_Per_match$Runs_Conceded_Per_Match)
########################################################################################################################
# Scatter Plot of Dot Balls Vs Number of Deliveries
########################################################################################################################
png(filename="Images/Scatter_Plot_Dots_Balls_Vs_Deliveries.png")
plot(bowler_stats_Per_match$Deliveries_Per_Match,
bowler_stats_Per_match$Dots_Per_Match,
main = "Scatter Plot of No. of Dot Balls vs No. of Deliveries",
xlab = "Average No. Of Deliveries",
ylab = "Average Dot Balls")
dev.off()
cor(bowler_stats_Per_match$Deliveries_Per_Match, bowler_stats_Per_match$Dots_Per_Match)
########################################################################################################################
# Scatter Plot of Wicets Taken Vs Number of Deliveries
########################################################################################################################
png(filename="Images/Scatter_Plot_Wickets_Taken_Vs_Deliveries.png")
plot(bowler_stats_Per_match$Deliveries_Per_Match,
bowler_stats_Per_match$Wickets_Per_Match,
main = "Scatter Plot of Wickets Taken vs No. of Deliveries",
xlab = "Average No. Of Deliveries",
ylab = "Average Wickets Taken")
dev.off()
cor(bowler_stats_Per_match$Deliveries_Per_Match, bowler_stats_Per_match$Wickets_Per_Match)
|
/ipl/Analysis/3.Bowler_Analysis.R
|
no_license
|
DB79/BigData_IPL_Analysis
|
R
| false
| false
| 4,937
|
r
|
###
# SET WORKING DIRECTORY TO SOURCE FILE LOCATION
# Session -> Set Working Directory -> To Sourec File Location
###
#install.packages("ggrepel")
#install.packages("dplyr")
#install.packages("sqldf")
#install.packages("ggplot2")
library(sqldf)
library(ggplot2)
library(dplyr)
library(ggrepel)
deliveries <- read.csv('deliveries.csv')
########################################################################################################################
# Perform Analysis of Data to create new data which will be used in ANalysis
########################################################################################################################
# get total number of matches played by each platyer
matches_per_player <- sqldf('
select bowler,
count(DISTINCT(match_id)) as "total_matches"
from deliveries group by bowler')
# dot balls, total balls, runs conceded by each player
bowler_stats <- group_by(deliveries, bowler)%>%
summarise(dot_balls = sum(ifelse(batsman_runs ==0 & extra_runs == 0 , 1, 0 )),
total_balls = sum(ifelse(wide_runs == 1 & noball_runs == 1, 0, 1 )),
total_runs = sum(batsman_runs+extra_runs))
# add number of games played per player
bowler_stats <- merge(bowler_stats, matches_per_player, by.x = "bowler", by.y = "bowler")
# total wickets per player
total_wickets_per_player <- group_by(deliveries, bowler)%>%
summarise(wickets = sum(ifelse(dismissal_kind
%in% c("caught", "bowled", "lbw", "caught and bowled"), 1,0)))
bowler_stats <- merge(bowler_stats,
total_wickets_per_player,
by.x = "bowler",
by.y = "bowler")
# remove bowler name to allow for creation of boxplots
bowler_overall_stats <- sqldf('
select dot_balls as "Dots" ,
total_balls as "Deliveries",
total_runs as "Runs_Conceded",
wickets,
total_matches
from bowler_stats')
bowler_stats_Per_match <- sqldf('
select (total_balls/total_matches) as "Deliveries_Per_Match",
(total_runs/total_matches) as "Runs_Conceded_Per_Match",
(dot_balls/total_matches) as "Dots_Per_Match",
(wickets/total_matches) as "Wickets_Per_Match"
from bowler_stats')
########################################################################################################################
# boxplot of Deliveries per player per match
########################################################################################################################
png(filename="Images/Boxplot_Player_Deliveries_Per_Match.png")
boxplot(bowler_stats_Per_match$Deliveries_Per_Match,
main="Boxplot of Deliveries per player per match",
ylab="Number of Deliveries")
dev.off()
summary(bowler_stats_Per_match$Deliveries_Per_Match)
########################################################################################################################
# Scatter Plot of Run Conceeded Vs Number of Deliveries
########################################################################################################################
png(filename="Images/Scatter_Plot_Runs_Conceded_Vs_Deliveries.png")
plot(bowler_stats_Per_match$Deliveries_Per_Match,
bowler_stats_Per_match$Runs_Conceded_Per_Match,
main = "Scatter Plot of Runs Conceded vs No. of Deliveries",
xlab = "Average No. Of Deliveries",
ylab = "Average Runs Conceded")
dev.off()
cor(bowler_stats_Per_match$Deliveries_Per_Match, bowler_stats_Per_match$Runs_Conceded_Per_Match)
########################################################################################################################
# Scatter Plot of Dot Balls Vs Number of Deliveries
########################################################################################################################
png(filename="Images/Scatter_Plot_Dots_Balls_Vs_Deliveries.png")
plot(bowler_stats_Per_match$Deliveries_Per_Match,
bowler_stats_Per_match$Dots_Per_Match,
main = "Scatter Plot of No. of Dot Balls vs No. of Deliveries",
xlab = "Average No. Of Deliveries",
ylab = "Average Dot Balls")
dev.off()
cor(bowler_stats_Per_match$Deliveries_Per_Match, bowler_stats_Per_match$Dots_Per_Match)
########################################################################################################################
# Scatter Plot of Wicets Taken Vs Number of Deliveries
########################################################################################################################
png(filename="Images/Scatter_Plot_Wickets_Taken_Vs_Deliveries.png")
plot(bowler_stats_Per_match$Deliveries_Per_Match,
bowler_stats_Per_match$Wickets_Per_Match,
main = "Scatter Plot of Wickets Taken vs No. of Deliveries",
xlab = "Average No. Of Deliveries",
ylab = "Average Wickets Taken")
dev.off()
cor(bowler_stats_Per_match$Deliveries_Per_Match, bowler_stats_Per_match$Wickets_Per_Match)
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ra = numeric(0), relh = -1.72131968218895e+83, rs = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884132131e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_PenmanMonteith/AFL_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1615838474-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 826
|
r
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ra = numeric(0), relh = -1.72131968218895e+83, rs = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884132131e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result)
|
#' (helper function )Aggregate data for WOE/NWOE calculations
#'
#' \code{Aggregate} returns aggregated data for the WOE and NWOE functions
#'
#' @param data input data frame
#' @param x variable to be aggregated
#' @param y dependent variable
#' @param breaks breaks for binning
#' @param trt binary treatment variable (for net lift only)
#'
#' @import data.table
#'
#' @export Aggregate
Aggregate <- function(data, x, y, breaks, trt){
var <- Group <- n <- y_1 <- y_0 <- t_1 <- t_0 <- y_1_t <- y_0_t <- y_1_c <- y_0_c <- NULL
data$y_1 <- data[[y]]
data$y_0 <- ifelse(data$y_1==1, 0, 1)
data$n <- 1
if (is.null(trt)==FALSE){
data$t_1 <- ifelse(data[,trt]==1, 1, 0)
data$t_0 <- ifelse(data[,trt]==0, 1, 0)
data$y_1_t <- ifelse(data[,y]==1 & data[,trt]==1, 1, 0)
data$y_0_t <- ifelse(data[,y]==0 & data[,trt]==1, 1, 0)
data$y_1_c <- ifelse(data[,y]==1 & data[,trt]==0, 1, 0)
data$y_0_c <- ifelse(data[,y]==0 & data[,trt]==0, 1, 0)
}
if (is.character(data[[x]])==FALSE & is.factor(data[[x]])==FALSE){
if (length(breaks)==1){
if (breaks==max(data[[x]], na.rm=TRUE)){
data$Group <- findInterval(data[[x]], breaks, rightmost.closed=FALSE)
} else{
data$Group <- findInterval(data[[x]], breaks, rightmost.closed=TRUE)
}
} else{
data$Group <- findInterval(data[[x]], breaks)
}
data <- data.table(data)
setkey(data, Group)
} else{
data$Group <- data[[x]]
data <- data.table(data)
setkey(data, Group)
}
if (is.null(trt)==TRUE){
if (is.character(data[[x]])==FALSE & is.factor(data[[x]])==FALSE){
t <- as.data.frame(data[,list(sum(n), sum(y_1), sum(y_0), min(var), max(var)), by=Group])
names(t) <- c("Group", "N", "y_1", "y_0", "Min", "Max")
t <- t[,c("Group", "N", "y_1", "y_0", "Min", "Max")]
} else{
t <- as.data.frame(data[,list(sum(n), sum(y_1), sum(y_0)), by=Group])
names(t) <- c("Group", "N", "y_1", "y_0")
}
} else{
if (is.character(data[[x]])==FALSE & is.factor(data[[x]])==FALSE){
t <- as.data.frame(data[,list(sum(n), sum(t_1), sum(t_0),
sum(y_1_t), sum(y_0_t),
sum(y_1_c), sum(y_0_c),
min(var), max(var)),
by=Group])
names(t) <- c("Group", "N", "Treatment", "Control", "y_1_t", "y_0_t", "y_1_c", "y_0_c", "Min", "Max")
t <- t[,c("Group", "N", "Treatment", "Control", "y_1_t", "y_0_t", "y_1_c", "y_0_c", "Min", "Max")]
} else{
t <- as.data.frame(data[,list(sum(n), sum(t_1), sum(t_0),
sum(y_1_t), sum(y_0_t),
sum(y_1_c), sum(y_0_c)), by=Group])
names(t) <- c("Group", "N", "Treatment", "Control", "y_1_t", "y_0_t", "y_1_c", "y_0_c")
}
}
if (is.character(data[[x]]) | is.factor(data[[x]])){
t[,x] <- t$Group
} else{
for (i in 1:nrow(t)){
if (is.na(t[i,1])){
t[i,x] <- "NA"
} else{
t[i,x] <- paste0("[",round(t[i,"Min"],2),",",round(t[i,"Max"],2),"]")
}
}
}
t$Group <- NULL
t$Percent <- t$N/sum(t$N)
return(t)
}
|
/R/Aggregate.R
|
no_license
|
jcassiojr/Information
|
R
| false
| false
| 3,270
|
r
|
#' (helper function )Aggregate data for WOE/NWOE calculations
#'
#' \code{Aggregate} returns aggregated data for the WOE and NWOE functions
#'
#' @param data input data frame
#' @param x variable to be aggregated
#' @param y dependent variable
#' @param breaks breaks for binning
#' @param trt binary treatment variable (for net lift only)
#'
#' @import data.table
#'
#' @export Aggregate
Aggregate <- function(data, x, y, breaks, trt){
var <- Group <- n <- y_1 <- y_0 <- t_1 <- t_0 <- y_1_t <- y_0_t <- y_1_c <- y_0_c <- NULL
data$y_1 <- data[[y]]
data$y_0 <- ifelse(data$y_1==1, 0, 1)
data$n <- 1
if (is.null(trt)==FALSE){
data$t_1 <- ifelse(data[,trt]==1, 1, 0)
data$t_0 <- ifelse(data[,trt]==0, 1, 0)
data$y_1_t <- ifelse(data[,y]==1 & data[,trt]==1, 1, 0)
data$y_0_t <- ifelse(data[,y]==0 & data[,trt]==1, 1, 0)
data$y_1_c <- ifelse(data[,y]==1 & data[,trt]==0, 1, 0)
data$y_0_c <- ifelse(data[,y]==0 & data[,trt]==0, 1, 0)
}
if (is.character(data[[x]])==FALSE & is.factor(data[[x]])==FALSE){
if (length(breaks)==1){
if (breaks==max(data[[x]], na.rm=TRUE)){
data$Group <- findInterval(data[[x]], breaks, rightmost.closed=FALSE)
} else{
data$Group <- findInterval(data[[x]], breaks, rightmost.closed=TRUE)
}
} else{
data$Group <- findInterval(data[[x]], breaks)
}
data <- data.table(data)
setkey(data, Group)
} else{
data$Group <- data[[x]]
data <- data.table(data)
setkey(data, Group)
}
if (is.null(trt)==TRUE){
if (is.character(data[[x]])==FALSE & is.factor(data[[x]])==FALSE){
t <- as.data.frame(data[,list(sum(n), sum(y_1), sum(y_0), min(var), max(var)), by=Group])
names(t) <- c("Group", "N", "y_1", "y_0", "Min", "Max")
t <- t[,c("Group", "N", "y_1", "y_0", "Min", "Max")]
} else{
t <- as.data.frame(data[,list(sum(n), sum(y_1), sum(y_0)), by=Group])
names(t) <- c("Group", "N", "y_1", "y_0")
}
} else{
if (is.character(data[[x]])==FALSE & is.factor(data[[x]])==FALSE){
t <- as.data.frame(data[,list(sum(n), sum(t_1), sum(t_0),
sum(y_1_t), sum(y_0_t),
sum(y_1_c), sum(y_0_c),
min(var), max(var)),
by=Group])
names(t) <- c("Group", "N", "Treatment", "Control", "y_1_t", "y_0_t", "y_1_c", "y_0_c", "Min", "Max")
t <- t[,c("Group", "N", "Treatment", "Control", "y_1_t", "y_0_t", "y_1_c", "y_0_c", "Min", "Max")]
} else{
t <- as.data.frame(data[,list(sum(n), sum(t_1), sum(t_0),
sum(y_1_t), sum(y_0_t),
sum(y_1_c), sum(y_0_c)), by=Group])
names(t) <- c("Group", "N", "Treatment", "Control", "y_1_t", "y_0_t", "y_1_c", "y_0_c")
}
}
if (is.character(data[[x]]) | is.factor(data[[x]])){
t[,x] <- t$Group
} else{
for (i in 1:nrow(t)){
if (is.na(t[i,1])){
t[i,x] <- "NA"
} else{
t[i,x] <- paste0("[",round(t[i,"Min"],2),",",round(t[i,"Max"],2),"]")
}
}
}
t$Group <- NULL
t$Percent <- t$N/sum(t$N)
return(t)
}
|
tv.weight <- function(subchannel, coast, name) {
if (coast == 'east') {
tz = "'EST', 'CST'"
} else if (coast == 'west') {
tz = "'PST', 'MST'"
} else stop("wrong coast: either 'east', or 'west'")
query1 <-
"select substr(cast(servertimemst as varchar(100)), 1, 16) || ':00' timetominstr, count(*) cnt
from a.feng_us2015_visitandtv
where ustimezone in ("
query2 <-
") and subchannel in ("
query3 <-
")
group by 1"
subchannel <- paste("'", subchannel, "'", sep="", collapse=", ")
query <- paste(query1, tz, query2, subchannel, query3, sep="")
visit.data <- sqlQuery(.dwMatrix, query)
# visit.data <- visit.data[order(visit.data$timetominstr), ]
visit.data$timetomin <- strptime(visit.data$timetominstr, "%Y-%m-%d %H:%M:%S")
time <- data.frame(timestamp=seq(min(visit.data$timetomin, na.rm=T),
max(visit.data$timetomin, na.rm=T),
by=60))
time$ts.char <- as.character(time$timestamp)
data <- merge(visit.data, time, by.x="timetominstr", by.y="ts.char", all.y=T, all.x=T)
data$cnt[!complete.cases(data[, 1:2])] <- 0
data <- data[order(data$timestamp), ]
# tmp <- data.frame(group_by(data[, c(2,5)], date) %>% summarise(count=n()))
### pay attention: 03/08/2015 one hour loss ###
data$baseline <- pmax(runmed(data$cnt, k=61), 0)
data$tvweights <- pmax(pmin((data$cnt - data$baseline) / data$cnt, 1), 0)
# data$tvweights[data$cnt < 10] <- 0
weights.by.min <- data.frame(datetime = data$timetominstr,
tvweights = data$tvweights)
names(weights.by.min)[2] <- paste(name, "-tv", sep="")
list(visits=data, weights=weights.by.min)
}
plot.idx <- (1440*7+1):(1440*14)
paid.search.brand <- tv.weight("Paid Search – Brand", "east", "Paid Search Brand")
organic.search.brand <- tv.weight("Organic Brand", "east", "Organic Search Brand")
paid.search.nonbrand <- tv.weight("Paid Search – NonBrand", "east", "Paid Search NonBrand")
organic.search.nonbrand <- tv.weight("Organic NonBrand", "east", "Organic Search NonBrand")
direct.homepage <- tv.weight("Direct Homepage", "east", "Direct Homepage")
direct.nonhomepage <- tv.weight("Direct Non-Homepage", "east", "Direct Non-Homepage")
par(mfrow=c(3, 2), mar=c(2, 2, 2, 0.5))
plot(paid.search.brand$visits$timestamp[plot.idx], paid.search.brand$visits$cnt[plot.idx],
type="l", main="Paid Search - Brand", ylab="visits", xlab="")
lines(paid.search.brand$visits$timestamp[plot.idx], paid.search.brand$visits$baseline[plot.idx],
lwd=3, lty=1, col="red")
plot(paid.search.nonbrand$visits$timestamp[plot.idx],
paid.search.nonbrand$visits$cnt[plot.idx],
type="l", main="Paid Search - NonBrand", ylab="visits", xlab="")
lines(paid.search.nonbrand$visits$timestamp[plot.idx],
paid.search.nonbrand$visits$baseline[plot.idx],
lwd=3, lty=1, col="red")
plot(organic.search.brand$visits$timestamp[plot.idx], organic.search.brand$visits$cnt[plot.idx],
type="l", main="Organic Search - Brand", ylab="visits", xlab="")
lines(organic.search.brand$visits$timestamp[plot.idx], organic.search.brand$visits$baseline[plot.idx],
lwd=3, lty=1, col="red")
plot(organic.search.nonbrand$visits$timestamp[plot.idx],
organic.search.nonbrand$visits$cnt[plot.idx],
type="l", main="Organic Search - NonBrand", ylab="visits", xlab="")
lines(organic.search.nonbrand$visits$timestamp[plot.idx],
organic.search.nonbrand$visits$baseline[plot.idx],
lwd=3, lty=1, col="red")
plot(direct.homepage$visits$timestamp[plot.idx],
direct.homepage$visits$cnt[plot.idx],
type="l", main="Direct Homepage", ylab="visits", xlab="")
lines(direct.homepage$visits$timestamp[plot.idx],
direct.homepage$visits$baseline[plot.idx],
lwd=3, lty=1, col="red")
plot(direct.nonhomepage$visits$timestamp[plot.idx],
direct.nonhomepage$visits$cnt[plot.idx],
type="l", main="Direct Non-Homepage", ylab="visits", xlab="")
lines(direct.nonhomepage$visits$timestamp[plot.idx],
direct.nonhomepage$visits$baseline[plot.idx],
lwd=3, lty=1, col="red")
partner <- tv.weight("Partners", "east", "Partners")
internal.referral <- tv.weight("Internal Referrals", "east", "Internal Referrals")
content.marketing <- tv.weight("Content Marketing", "east", "Content Marketing")
email <- tv.weight(c("Email - Programs", "Email - Campaigns"), "east", "Email - Programs & Campaigns")
par(mfrow=c(2,2), mar=c(2, 2, 2, 0.5))
plot(partner$visits$timestamp[plot.idx], partner$visits$cnt[plot.idx],
type="l", main="Partner", ylab="visits", xlab="")
lines(partner$visits$timestamp[plot.idx], partner$visits$baseline[plot.idx],
lwd=3, lty=1, col="red")
plot(internal.referral$visits$timestamp[plot.idx], internal.referral$visits$cnt[plot.idx],
type="l", main="Internal Referrals", ylab="visits", xlab="")
lines(internal.referral$visits$timestamp[plot.idx], internal.referral$visits$baseline[plot.idx],
lwd=3, lty=1, col="red")
plot(content.marketing$visits$timestamp[plot.idx], content.marketing$visits$cnt[plot.idx],
type="l", main="Content Marketing", ylab="visits", xlab="")
lines(content.marketing$visits$timestamp[plot.idx], content.marketing$visits$baseline[plot.idx],
lwd=3, lty=1, col="red")
plot(email$visits$timestamp[plot.idx], email$visits$cnt[plot.idx],
type="l", main="Email - Programs & Campaigns", ylab="visits", xlab="")
lines(email$visits$timestamp[plot.idx], email$visits$baseline[plot.idx],
lwd=3, lty=1, col="red")
|
/R/us_tv_baseline_plot.R
|
no_license
|
cholita77/AttributionModel
|
R
| false
| false
| 5,657
|
r
|
tv.weight <- function(subchannel, coast, name) {
if (coast == 'east') {
tz = "'EST', 'CST'"
} else if (coast == 'west') {
tz = "'PST', 'MST'"
} else stop("wrong coast: either 'east', or 'west'")
query1 <-
"select substr(cast(servertimemst as varchar(100)), 1, 16) || ':00' timetominstr, count(*) cnt
from a.feng_us2015_visitandtv
where ustimezone in ("
query2 <-
") and subchannel in ("
query3 <-
")
group by 1"
subchannel <- paste("'", subchannel, "'", sep="", collapse=", ")
query <- paste(query1, tz, query2, subchannel, query3, sep="")
visit.data <- sqlQuery(.dwMatrix, query)
# visit.data <- visit.data[order(visit.data$timetominstr), ]
visit.data$timetomin <- strptime(visit.data$timetominstr, "%Y-%m-%d %H:%M:%S")
time <- data.frame(timestamp=seq(min(visit.data$timetomin, na.rm=T),
max(visit.data$timetomin, na.rm=T),
by=60))
time$ts.char <- as.character(time$timestamp)
data <- merge(visit.data, time, by.x="timetominstr", by.y="ts.char", all.y=T, all.x=T)
data$cnt[!complete.cases(data[, 1:2])] <- 0
data <- data[order(data$timestamp), ]
# tmp <- data.frame(group_by(data[, c(2,5)], date) %>% summarise(count=n()))
### pay attention: 03/08/2015 one hour loss ###
data$baseline <- pmax(runmed(data$cnt, k=61), 0)
data$tvweights <- pmax(pmin((data$cnt - data$baseline) / data$cnt, 1), 0)
# data$tvweights[data$cnt < 10] <- 0
weights.by.min <- data.frame(datetime = data$timetominstr,
tvweights = data$tvweights)
names(weights.by.min)[2] <- paste(name, "-tv", sep="")
list(visits=data, weights=weights.by.min)
}
plot.idx <- (1440*7+1):(1440*14)
paid.search.brand <- tv.weight("Paid Search – Brand", "east", "Paid Search Brand")
organic.search.brand <- tv.weight("Organic Brand", "east", "Organic Search Brand")
paid.search.nonbrand <- tv.weight("Paid Search – NonBrand", "east", "Paid Search NonBrand")
organic.search.nonbrand <- tv.weight("Organic NonBrand", "east", "Organic Search NonBrand")
direct.homepage <- tv.weight("Direct Homepage", "east", "Direct Homepage")
direct.nonhomepage <- tv.weight("Direct Non-Homepage", "east", "Direct Non-Homepage")
par(mfrow=c(3, 2), mar=c(2, 2, 2, 0.5))
plot(paid.search.brand$visits$timestamp[plot.idx], paid.search.brand$visits$cnt[plot.idx],
type="l", main="Paid Search - Brand", ylab="visits", xlab="")
lines(paid.search.brand$visits$timestamp[plot.idx], paid.search.brand$visits$baseline[plot.idx],
lwd=3, lty=1, col="red")
plot(paid.search.nonbrand$visits$timestamp[plot.idx],
paid.search.nonbrand$visits$cnt[plot.idx],
type="l", main="Paid Search - NonBrand", ylab="visits", xlab="")
lines(paid.search.nonbrand$visits$timestamp[plot.idx],
paid.search.nonbrand$visits$baseline[plot.idx],
lwd=3, lty=1, col="red")
plot(organic.search.brand$visits$timestamp[plot.idx], organic.search.brand$visits$cnt[plot.idx],
type="l", main="Organic Search - Brand", ylab="visits", xlab="")
lines(organic.search.brand$visits$timestamp[plot.idx], organic.search.brand$visits$baseline[plot.idx],
lwd=3, lty=1, col="red")
plot(organic.search.nonbrand$visits$timestamp[plot.idx],
organic.search.nonbrand$visits$cnt[plot.idx],
type="l", main="Organic Search - NonBrand", ylab="visits", xlab="")
lines(organic.search.nonbrand$visits$timestamp[plot.idx],
organic.search.nonbrand$visits$baseline[plot.idx],
lwd=3, lty=1, col="red")
plot(direct.homepage$visits$timestamp[plot.idx],
direct.homepage$visits$cnt[plot.idx],
type="l", main="Direct Homepage", ylab="visits", xlab="")
lines(direct.homepage$visits$timestamp[plot.idx],
direct.homepage$visits$baseline[plot.idx],
lwd=3, lty=1, col="red")
plot(direct.nonhomepage$visits$timestamp[plot.idx],
direct.nonhomepage$visits$cnt[plot.idx],
type="l", main="Direct Non-Homepage", ylab="visits", xlab="")
lines(direct.nonhomepage$visits$timestamp[plot.idx],
direct.nonhomepage$visits$baseline[plot.idx],
lwd=3, lty=1, col="red")
partner <- tv.weight("Partners", "east", "Partners")
internal.referral <- tv.weight("Internal Referrals", "east", "Internal Referrals")
content.marketing <- tv.weight("Content Marketing", "east", "Content Marketing")
email <- tv.weight(c("Email - Programs", "Email - Campaigns"), "east", "Email - Programs & Campaigns")
par(mfrow=c(2,2), mar=c(2, 2, 2, 0.5))
plot(partner$visits$timestamp[plot.idx], partner$visits$cnt[plot.idx],
type="l", main="Partner", ylab="visits", xlab="")
lines(partner$visits$timestamp[plot.idx], partner$visits$baseline[plot.idx],
lwd=3, lty=1, col="red")
plot(internal.referral$visits$timestamp[plot.idx], internal.referral$visits$cnt[plot.idx],
type="l", main="Internal Referrals", ylab="visits", xlab="")
lines(internal.referral$visits$timestamp[plot.idx], internal.referral$visits$baseline[plot.idx],
lwd=3, lty=1, col="red")
plot(content.marketing$visits$timestamp[plot.idx], content.marketing$visits$cnt[plot.idx],
type="l", main="Content Marketing", ylab="visits", xlab="")
lines(content.marketing$visits$timestamp[plot.idx], content.marketing$visits$baseline[plot.idx],
lwd=3, lty=1, col="red")
plot(email$visits$timestamp[plot.idx], email$visits$cnt[plot.idx],
type="l", main="Email - Programs & Campaigns", ylab="visits", xlab="")
lines(email$visits$timestamp[plot.idx], email$visits$baseline[plot.idx],
lwd=3, lty=1, col="red")
|
\name{isOrthogonal}
\alias{isOrthogonal}
\alias{isOrthogonal.default}
\title{Is Orthogonal}
\description{
Determine if the set of contrast is orthogonal
}
\usage{
isOrthogonal(contrast)
}
\arguments{
\item{contrast}{a}
}
\author{AAGulles}
\keyword{utilities}
|
/.svn/pristine/8f/8f670b11428e54c550e62f24ebcd20fced10dfd2.svn-base
|
no_license
|
djnpisano/RScriptLibrary
|
R
| false
| false
| 263
|
\name{isOrthogonal}
\alias{isOrthogonal}
\alias{isOrthogonal.default}
\title{Is Orthogonal}
\description{
Determine if the set of contrast is orthogonal
}
\usage{
isOrthogonal(contrast)
}
\arguments{
\item{contrast}{a}
}
\author{AAGulles}
\keyword{utilities}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.