content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
m=25226
mu=24672
s=3251
n=35
c=round(qnorm(0.995),2)
z=round((m-mu)/(s/sqrt(n)),2)
x=seq(-4,4,length=200)
y=dnorm(x)
plot(x,y,type="l", lwd=2, col="green")
x=seq(z,z,length=100)
y=dnorm(x)
polygon(c(z,x,z),c(0,y,0),col="black")
x=seq(-c,-4,length=100)
y=dnorm(x)
polygon(c(-c,x,-4),c(0,y,0),col="yellow")
x=seq(c,4,length=100)
y=dnorm(x)
polygon(c(c,x,4),c(0,y,0),col="yellow")
#values are compared on the plot | /Elementary_Statistics:_A_Step_By_Step_Approach_by_Allan_G._Bluman/CH8/EX8.5/ex8_5.R | permissive | FOSSEE/R_TBC_Uploads | R | false | false | 410 | r | m=25226
mu=24672
s=3251
n=35
c=round(qnorm(0.995),2)
z=round((m-mu)/(s/sqrt(n)),2)
x=seq(-4,4,length=200)
y=dnorm(x)
plot(x,y,type="l", lwd=2, col="green")
x=seq(z,z,length=100)
y=dnorm(x)
polygon(c(z,x,z),c(0,y,0),col="black")
x=seq(-c,-4,length=100)
y=dnorm(x)
polygon(c(-c,x,-4),c(0,y,0),col="yellow")
x=seq(c,4,length=100)
y=dnorm(x)
polygon(c(c,x,4),c(0,y,0),col="yellow")
#values are compared on the plot |
testlist <- list(A = structure(c(2.31245008297217e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613101565-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 343 | r | testlist <- list(A = structure(c(2.31245008297217e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
library(testthat)
library(tidyverse)
library(readr)
library(readxl)
library(readODS)
context("Verification that the values in the insect_sampling template meet our requirements")
# Test the features of datasets
# Right labels (dataframe's labels are equal to those of "insect_sampling" template)
# Right number of columns (dataframe's number of columns is equal to that of "insect_sampling" template)
# All studies are identified (study_ID is not NA)
# All sites are identified (site_ID is not NA)
# abundance format (non-negative number or NA)
# total_sampled_area format (non-negative number or NA)
# total_sampled_time format (non-negative number or NA)
# total_sampled_flowers format (non-negative number or NA)
# guild categories are in this list: "honeybees","bumblebees",
# "other_wild_bees", "syrphids","humbleflies","other_flies","beetles",
# "non_bee_hymenoptera","lepidoptera","other"
# all study IDs are also in field_level template
# all site IDs are also in field_level template
# Only ASCII characters are allowed.
labels_OK <- c("study_id","site_id","sampling_method",
"pollinator","identified_to","guild",
"abundance","total_sampled_area","total_sampled_time",
"total_sampled_flowers","description","notes")
guids_OK <- c("honeybees","bumblebees","other_wild_bees",
"syrphids","humbleflies","other_flies",
"beetles","non_bee_hymenoptera","lepidoptera",
"other")
exp_column_number <- length(labels_OK)
folder_base <- "../Your_new_study"
files <- list.files(folder_base)
excel_file <- files[grep(".xlsx",files)]
ods_file <- files[grep(".ods",files)]
if(length(excel_file)>0){
list_files_insect_sampling <- excel_file
}else{
list_files_insect_sampling <- ods_file
}
for (i in seq(length(list_files_insect_sampling))) {
if(length(excel_file)>0){
file_insect_sampling_i <- paste(folder_base, list_files_insect_sampling[i], sep = "/")
insect_sampling_i <- read_excel(file_insect_sampling_i, sheet = "insect_sampling")
field_level_i <- read_excel(file_insect_sampling_i, sheet = "field_level_data")
}else{
file_insect_sampling_i <- paste(folder_base, list_files_insect_sampling[i], sep = "/")
insect_sampling_i <- read_ods(file_insect_sampling_i, sheet = "insect_sampling")
field_level_i <- read_ods(file_insect_sampling_i, sheet = "field_level_data")
}
field_level_sites <- field_level_i$site_id %>% unique()
field_level_studies <- field_level_i$study_id %>% unique()
test_name_i <- paste("Right variables:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
labels_i <- labels(insect_sampling_i)[[2]]
expect_equal(labels_i, labels_OK)
})
test_name_i <- paste("Study identified:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
studyID_i <- any(is.na(insect_sampling_i$study_id))
expect_equal(studyID_i, FALSE)
})
test_name_i <- paste("All sites are identified:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
fieldID_i <- any(is.na(insect_sampling_i$site_id))
expect_equal(fieldID_i, FALSE)
})
test_name_i <- paste("abundance format:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
NA_values <- is.na(insect_sampling_i$abundance)
if(all(NA_values) == FALSE){
values_pos_i <- insect_sampling_i$abundance[!NA_values]
expect_equal(all(0 <= values_pos_i), TRUE)
}else{
expect_equal(TRUE, TRUE)
}
})
test_name_i <- paste("total_sampled_area format:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
NA_values <- is.na(insect_sampling_i$total_sampled_area)
if(all(NA_values) == FALSE){
values_pos_i <- insect_sampling_i$total_sampled_area[!NA_values]
expect_equal(all(0 <= values_pos_i), TRUE)
}else{
expect_equal(TRUE, TRUE)
}
})
test_name_i <- paste("total_sampled_time format:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
NA_values <- is.na(insect_sampling_i$total_sampled_time)
if(all(NA_values) == FALSE){
values_pos_i <- insect_sampling_i$total_sampled_time[!NA_values]
expect_equal(all(0 <= values_pos_i), TRUE)
}else{
expect_equal(TRUE, TRUE)
}
})
test_name_i <- paste("total_sampled_flowers format:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
NA_values <- is.na(insect_sampling_i$total_sampled_flowers)
if(all(NA_values) == FALSE){
values_pos_i <- insect_sampling_i$total_sampled_flowers[!NA_values]
expect_equal(all(0 <= values_pos_i), TRUE)
}else{
expect_equal(TRUE, TRUE)
}
})
test_name_i <- paste("guild categories OK:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
NA_values <- is.na(insect_sampling_i$guild)
if(all(NA_values) == FALSE){
values_pos_i <- insect_sampling_i$guild[!NA_values]
expect_equal(all(values_pos_i %in% guids_OK), TRUE)
}else{
expect_equal(TRUE, TRUE)
}
})
test_name_i <- paste("Sites match those in field_data:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
NA_values <- is.na(insect_sampling_i$site_id)
if(all(NA_values) == FALSE){
values_pos_i <- insect_sampling_i$site_id[!NA_values]
expect_equal(all(values_pos_i %in% field_level_sites), TRUE)
}else{
expect_equal(TRUE, TRUE)
}
})
test_name_i <- paste("Studies match those in field_data:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
NA_values <- is.na(insect_sampling_i$study_id)
if(all(NA_values) == FALSE){
values_pos_i <- insect_sampling_i$study_id[!NA_values]
expect_equal(all(values_pos_i %in% field_level_studies), TRUE)
}else{
expect_equal(TRUE, TRUE)
}
})
test_name_i <- paste("Only ASCII characters:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
any.non.ascii <- any(grepl("I_WAS_NOT_ASCII", iconv(insect_sampling_i,
"", "ASCII",
sub="I_WAS_NOT_ASCII")))
expect_equal(any.non.ascii, FALSE)
})
}
| /testthat/test-format-insect_sampling-NEWDATA.R | permissive | ibartomeus/OBservData | R | false | false | 6,268 | r | library(testthat)
library(tidyverse)
library(readr)
library(readxl)
library(readODS)
context("Verification that the values in the insect_sampling template meet our requirements")
# Test the features of datasets
# Right labels (dataframe's labels are equal to those of "insect_sampling" template)
# Right number of columns (dataframe's number of columns is equal to that of "insect_sampling" template)
# All studies are identified (study_ID is not NA)
# All sites are identified (site_ID is not NA)
# abundance format (non-negative number or NA)
# total_sampled_area format (non-negative number or NA)
# total_sampled_time format (non-negative number or NA)
# total_sampled_flowers format (non-negative number or NA)
# guild categories are in this list: "honeybees","bumblebees",
# "other_wild_bees", "syrphids","humbleflies","other_flies","beetles",
# "non_bee_hymenoptera","lepidoptera","other"
# all study IDs are also in field_level template
# all site IDs are also in field_level template
# Only ASCII characters are allowed.
labels_OK <- c("study_id","site_id","sampling_method",
"pollinator","identified_to","guild",
"abundance","total_sampled_area","total_sampled_time",
"total_sampled_flowers","description","notes")
guids_OK <- c("honeybees","bumblebees","other_wild_bees",
"syrphids","humbleflies","other_flies",
"beetles","non_bee_hymenoptera","lepidoptera",
"other")
exp_column_number <- length(labels_OK)
folder_base <- "../Your_new_study"
files <- list.files(folder_base)
excel_file <- files[grep(".xlsx",files)]
ods_file <- files[grep(".ods",files)]
if(length(excel_file)>0){
list_files_insect_sampling <- excel_file
}else{
list_files_insect_sampling <- ods_file
}
for (i in seq(length(list_files_insect_sampling))) {
if(length(excel_file)>0){
file_insect_sampling_i <- paste(folder_base, list_files_insect_sampling[i], sep = "/")
insect_sampling_i <- read_excel(file_insect_sampling_i, sheet = "insect_sampling")
field_level_i <- read_excel(file_insect_sampling_i, sheet = "field_level_data")
}else{
file_insect_sampling_i <- paste(folder_base, list_files_insect_sampling[i], sep = "/")
insect_sampling_i <- read_ods(file_insect_sampling_i, sheet = "insect_sampling")
field_level_i <- read_ods(file_insect_sampling_i, sheet = "field_level_data")
}
field_level_sites <- field_level_i$site_id %>% unique()
field_level_studies <- field_level_i$study_id %>% unique()
test_name_i <- paste("Right variables:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
labels_i <- labels(insect_sampling_i)[[2]]
expect_equal(labels_i, labels_OK)
})
test_name_i <- paste("Study identified:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
studyID_i <- any(is.na(insect_sampling_i$study_id))
expect_equal(studyID_i, FALSE)
})
test_name_i <- paste("All sites are identified:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
fieldID_i <- any(is.na(insect_sampling_i$site_id))
expect_equal(fieldID_i, FALSE)
})
test_name_i <- paste("abundance format:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
NA_values <- is.na(insect_sampling_i$abundance)
if(all(NA_values) == FALSE){
values_pos_i <- insect_sampling_i$abundance[!NA_values]
expect_equal(all(0 <= values_pos_i), TRUE)
}else{
expect_equal(TRUE, TRUE)
}
})
test_name_i <- paste("total_sampled_area format:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
NA_values <- is.na(insect_sampling_i$total_sampled_area)
if(all(NA_values) == FALSE){
values_pos_i <- insect_sampling_i$total_sampled_area[!NA_values]
expect_equal(all(0 <= values_pos_i), TRUE)
}else{
expect_equal(TRUE, TRUE)
}
})
test_name_i <- paste("total_sampled_time format:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
NA_values <- is.na(insect_sampling_i$total_sampled_time)
if(all(NA_values) == FALSE){
values_pos_i <- insect_sampling_i$total_sampled_time[!NA_values]
expect_equal(all(0 <= values_pos_i), TRUE)
}else{
expect_equal(TRUE, TRUE)
}
})
test_name_i <- paste("total_sampled_flowers format:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
NA_values <- is.na(insect_sampling_i$total_sampled_flowers)
if(all(NA_values) == FALSE){
values_pos_i <- insect_sampling_i$total_sampled_flowers[!NA_values]
expect_equal(all(0 <= values_pos_i), TRUE)
}else{
expect_equal(TRUE, TRUE)
}
})
test_name_i <- paste("guild categories OK:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
NA_values <- is.na(insect_sampling_i$guild)
if(all(NA_values) == FALSE){
values_pos_i <- insect_sampling_i$guild[!NA_values]
expect_equal(all(values_pos_i %in% guids_OK), TRUE)
}else{
expect_equal(TRUE, TRUE)
}
})
test_name_i <- paste("Sites match those in field_data:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
NA_values <- is.na(insect_sampling_i$site_id)
if(all(NA_values) == FALSE){
values_pos_i <- insect_sampling_i$site_id[!NA_values]
expect_equal(all(values_pos_i %in% field_level_sites), TRUE)
}else{
expect_equal(TRUE, TRUE)
}
})
test_name_i <- paste("Studies match those in field_data:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
NA_values <- is.na(insect_sampling_i$study_id)
if(all(NA_values) == FALSE){
values_pos_i <- insect_sampling_i$study_id[!NA_values]
expect_equal(all(values_pos_i %in% field_level_studies), TRUE)
}else{
expect_equal(TRUE, TRUE)
}
})
test_name_i <- paste("Only ASCII characters:", list_files_insect_sampling[i], sep = " ")
test_that(test_name_i,{
any.non.ascii <- any(grepl("I_WAS_NOT_ASCII", iconv(insect_sampling_i,
"", "ASCII",
sub="I_WAS_NOT_ASCII")))
expect_equal(any.non.ascii, FALSE)
})
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/documentation.R
\name{default_doc}
\alias{default_doc}
\title{Unexported documentation helper function.}
\usage{
default_doc()
}
\arguments{
\item{internal}{List.
Holds all parameters, data, functions and computed objects used within \code{\link[=explain]{explain()}}
The list contains one or more of the elements \code{parameters}, \code{data}, \code{objects}, \code{output}.}
\item{model}{Objects.
The model object that ought to be explained.
See the documentation of \code{\link[=explain]{explain()}} for details.}
\item{predict_model}{Function.
The prediction function used when \code{model} is not natively supported.
See the documentation of \code{\link[=explain]{explain()}} for details.}
\item{output_size}{TODO: Document}
\item{extra}{TODO: Document}
\item{...}{Further arguments passed to \code{approach}-specific functions.}
}
\value{
List \code{internal}
It holds all parameters, data, and computed objects used within \code{\link[=explain]{explain()}}.
The list contains one or more of the elements \code{parameters}, \code{data}, \code{objects}, \code{output}.
}
\description{
Unexported documentation helper function.
}
\keyword{internal}
| /man/default_doc.Rd | permissive | NorskRegnesentral/shapr | R | false | true | 1,237 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/documentation.R
\name{default_doc}
\alias{default_doc}
\title{Unexported documentation helper function.}
\usage{
default_doc()
}
\arguments{
\item{internal}{List.
Holds all parameters, data, functions and computed objects used within \code{\link[=explain]{explain()}}
The list contains one or more of the elements \code{parameters}, \code{data}, \code{objects}, \code{output}.}
\item{model}{Objects.
The model object that ought to be explained.
See the documentation of \code{\link[=explain]{explain()}} for details.}
\item{predict_model}{Function.
The prediction function used when \code{model} is not natively supported.
See the documentation of \code{\link[=explain]{explain()}} for details.}
\item{output_size}{TODO: Document}
\item{extra}{TODO: Document}
\item{...}{Further arguments passed to \code{approach}-specific functions.}
}
\value{
List \code{internal}
It holds all parameters, data, and computed objects used within \code{\link[=explain]{explain()}}.
The list contains one or more of the elements \code{parameters}, \code{data}, \code{objects}, \code{output}.
}
\description{
Unexported documentation helper function.
}
\keyword{internal}
|
## Library
library("dplyr")
source("./functions/cleanBoletimUrna.R")
## define paths
### globals
g <- list()
g$inputDataPath <- "/data/electionBrazil/bweb_estado/"
g$dataCleanPath <- "/data/electionBrazil/bweb_estado_clean"
g$listOfFiles <- list.files(g$inputDataPath)
g$tmp <- g$listOfFiles[1]
g$keep <- c("CD_ELEICAO",
"NM_VOTAVEL",
"NR_PARTIDO",
"NR_VOTAVEL",
"CD_MUNICIPIO",
"NM_MUNICIPIO",
"QT_VOTOS")
listOfFiles <- list.files(g$inputDataPath)
for (file in listOfFiles) {
cat(paste("reading:", file))
# check if already done
ufCode <- substr(file, 9,10)
if (any(grepl(ufCode, list.files(g$dataCleanPath)))) {
cat(" already there!\n")
next
}
filepath <- file.path(g$inputDataPath, file)
tmpDataFrame <- cleanBoletimUrna(filePath = filepath, keepVars = g$keep)
## Write to file
filepathWrite <- file.path(g$dataCleanPath)
write.csv2(x = tmpDataFrame,
file = paste(filepathWrite, paste0("clean_", ufCode, ".csv"), sep = "/"))
cat(" DONE!\n")
}
df <- cleanBoletimUrna(filePath = paste0("/data/electionBrazil/bweb_estado/,
bweb_1t_SP_101020182030.csv"),
keepVars = g$keep)
filePathSP <- file.path("/data/electionBrazil/bweb_estado/",
"bweb_1t_SP_101020182030.csv")
tmpDataFrame <- data.table::fread(file = filePathSP,
encoding = "Latin-1",
stringsAsFactors = FALSE)
vec1 <- tmpDataFrame[CD_ELEICAO]
| /cleanData.R | no_license | avila/electionsBrazil | R | false | false | 1,596 | r | ## Library
library("dplyr")
source("./functions/cleanBoletimUrna.R")
## define paths
### globals
g <- list()
g$inputDataPath <- "/data/electionBrazil/bweb_estado/"
g$dataCleanPath <- "/data/electionBrazil/bweb_estado_clean"
g$listOfFiles <- list.files(g$inputDataPath)
g$tmp <- g$listOfFiles[1]
g$keep <- c("CD_ELEICAO",
"NM_VOTAVEL",
"NR_PARTIDO",
"NR_VOTAVEL",
"CD_MUNICIPIO",
"NM_MUNICIPIO",
"QT_VOTOS")
listOfFiles <- list.files(g$inputDataPath)
for (file in listOfFiles) {
cat(paste("reading:", file))
# check if already done
ufCode <- substr(file, 9,10)
if (any(grepl(ufCode, list.files(g$dataCleanPath)))) {
cat(" already there!\n")
next
}
filepath <- file.path(g$inputDataPath, file)
tmpDataFrame <- cleanBoletimUrna(filePath = filepath, keepVars = g$keep)
## Write to file
filepathWrite <- file.path(g$dataCleanPath)
write.csv2(x = tmpDataFrame,
file = paste(filepathWrite, paste0("clean_", ufCode, ".csv"), sep = "/"))
cat(" DONE!\n")
}
df <- cleanBoletimUrna(filePath = paste0("/data/electionBrazil/bweb_estado/,
bweb_1t_SP_101020182030.csv"),
keepVars = g$keep)
filePathSP <- file.path("/data/electionBrazil/bweb_estado/",
"bweb_1t_SP_101020182030.csv")
tmpDataFrame <- data.table::fread(file = filePathSP,
encoding = "Latin-1",
stringsAsFactors = FALSE)
vec1 <- tmpDataFrame[CD_ELEICAO]
|
library(data.table)
# Read the original data
DT <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
# Convert the date
DT$Date <- as.Date(DT$Date, format = "%d/%m/%Y")
# Subsetting by the date
data <- subset(DT, Date >= as.Date("2007-2-1") & Date <= "2007-2-2")
data$Global_active_power <- as.numeric(data$Global_active_power)
rm(DT)
# Convert the time & date into weekday
data$Datetime <- as.POSIXct(paste(data$Date, data$Time))
# Plotting
plot(data$Global_active_power~data$Datetime, xlab = "", ylab = "Global Active Power (kilowatts)", type = "l")
# Save to png file
dev.copy(png, file = "plot2.png", height = 480, width = 480)
dev.off() | /Explo_PJ1/plot2.R | no_license | yluo42/Exploratory-Data-Analysis | R | false | false | 690 | r | library(data.table)
# Read the original data
DT <- read.table("household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?")
# Convert the date
DT$Date <- as.Date(DT$Date, format = "%d/%m/%Y")
# Subsetting by the date
data <- subset(DT, Date >= as.Date("2007-2-1") & Date <= "2007-2-2")
data$Global_active_power <- as.numeric(data$Global_active_power)
rm(DT)
# Convert the time & date into weekday
data$Datetime <- as.POSIXct(paste(data$Date, data$Time))
# Plotting
plot(data$Global_active_power~data$Datetime, xlab = "", ylab = "Global Active Power (kilowatts)", type = "l")
# Save to png file
dev.copy(png, file = "plot2.png", height = 480, width = 480)
dev.off() |
# ch5: matrix
score=sample(seq(1,100),20)
print(score)
length(score)
# convert the above vector into a 2-d obj
# R * C = total
# 1 * 20 = 20
# 2 * 10 = 20
# 4 * 5 = 20
# 5 * 4 = 20
# 10 * 2 = 20
# 20 * 1 = 20
# rows and columns represent dimensions
# to create matrix,
# i) data
# ii) rows OR columns
# iii) dimension names (optional)
# iv) order of creation - R/C
# by default, values are filled 'columnwise'
mxscore1=matrix(score,ncol=4)
print(mxscore1)
# create matrix with the foll:
# i) give names to rows/columns
# ii) fill the data 'row-wise'
cols=c('match1','match2','match3','match4')
rows=c('rohit','virat','saun','kartik','joe')
mxscore2=matrix(score,ncol=4,
byrow=T,
dimnames=list(rows,cols)
)
print(mxscore1)
print(mxscore2)
print(score)
# transpose a matrix
# converting R->C and C->R
mxscore1 = t(mxscore2)
print(mxscore1)
print(mxscore2)
# dimensions
dim(mxscore1)
# total elements
length(mxscore1)
# A1)
# create a matrix of marks obtained by 10 students in a class in 5 subjects.
# marks range from 1-50
# subjects are: maths,science,arts,english,history
# student names can be left blank or you can generate random names
# fill the data row-wise
# why 'by-row' important ?
# inventory data:
cols=c('itemcode','itemname','qty','rate','avail')
rows=c()
items=c(47900940,'color tv',19,93949,'Y',
3934991,'fridge',399,39491,'N',
888192,'musicplayer',10,165656,'Y')
# create the matrix
# wrong format
mx1=matrix(items,ncol=length(cols),
dimnames=list(rows,cols))
print(mx1)
# right format
mx1=matrix(items,ncol=length(cols),
byrow=T, dimnames=list(rows,cols))
mx1
# access the matrix - using the index pos for R,C
mxscore1[1] # r=1,c=1
mxscore1[1,] # first row
mxscore1[1:2] # first 2 rows
mxscore1[,1] # r=all, first column
mxscore1[,1:2] # r=all, first 2 columns
mxscore1[1:2,3:4] # r=1-2, c=3-4
mxscore1[c(1,3),c(1,3,5)] # r=1and3,c=1,3,5
# change value in a matrix
print(mxscore1)
mxscore1[1,1] = 17
mxscore1[4,]=0 # all columns in 4th row=0
print(mxscore1)
# matrix arithmetic
n1=sample(seq(1,20),6)
n2=sample(seq(51,70),6)
mx1=matrix(n1,ncol=2)
mx2=matrix(n2,ncol=2)
mx1
mx2
# arithmetic operations - element-wise
mx1+mx2 # addition
abs(mx1-mx2) # subtraction
mx1*mx2 # multiplication
round(mx1/mx2,3) # division
# for arithmetic ops, dimensions must be same
# else, there will be an error
mx2=matrix(n2,ncol=2)
mx1=matrix(n1,ncol=3)
mx1
mx2
mx1+mx2
# diagonal of a matrix
n1=sample(seq(1,20),16)
mx1=matrix(n1,ncol=4)
mx1
diag(mx1)
# changing the dimension of an existing matrix
dim(mx1)
# change 4,4 to a 2,8 dimension
mx1=matrix(mx1,ncol=8)
print(mx1)
dim(mx1)
# operations on a matrix
mxscore1
## 1) row-wise
apply(mxscore1,1,sum)
# total for the first and second match
apply(mxscore1,1,sum)[1]
apply(mxscore1,1,sum)[2]
# maximum of the scores
max(apply(mxscore1,1,sum))
# match-wise average scores
apply(mxscore1,1,mean)
# match-wise median scores
apply(mxscore1,1,median)
## 2) column-wise
mxscore1
apply(mxscore1,2,sum) # total of each player
# which player scored max runs
max(apply(mxscore1,2,sum))
# average score
apply(mxscore1,2,mean)
# lowest average
min(apply(mxscore1,2,mean))
# A3) total score = <>, average = <> for player='kartik'
print(paste('Kartik: Total score=',
apply(mxscore1,2,sum)[4],
'Average=',
apply(mxscore1,2,mean)[4]))
## matrix multiplication
v1=c(32,37,48,45,14,10)
mx1=matrix(v1,ncol=3,byrow=T); mx1
v1=seq(1:3); v1
mx1%*%v1
32*1 + 37*2 + 48*3
45*1 + 14*2 + 10*3
# adding new columns to matrix
# adding new rows to matrix
# create a matrix to store the average stock prices of a company for the 4 quarters for the years 2001-2010
# take price range from 105 - 775 with a 1 decimal precision
cols=c('Q1','Q2','Q3','Q4');cols
rows=seq(2001,2010); rows
total = length(rows) * length(cols); total
prices = round(runif(total,105,775),1)
stock=matrix(prices,
ncol=length(cols),
dimnames=list(rows,cols)
)
print(stock)
# add 2 new columns to the matrix
# high,low price for each year
lowp=round(runif(length(rows),90,140),1)
highp=round(runif(length(rows),600,1000),1)
lowp
highp
length(lowp)
length(highp)
stock
# column binding - to add new columns to matrix
stock = cbind(stock,low=lowp,high=highp)
print(stock)
# A4) create a new column 'tot_st_traded'
# measured in thousands
# range -> 1000-2500
tr=sample(seq(1000,2500),length(rows))
stock = cbind(stock,tot_st_tr=tr)
print(stock) | /ch5_matrix.R | no_license | MayurKolki/r_language_basics | R | false | false | 4,794 | r | # ch5: matrix
score=sample(seq(1,100),20)
print(score)
length(score)
# convert the above vector into a 2-d obj
# R * C = total
# 1 * 20 = 20
# 2 * 10 = 20
# 4 * 5 = 20
# 5 * 4 = 20
# 10 * 2 = 20
# 20 * 1 = 20
# rows and columns represent dimensions
# to create matrix,
# i) data
# ii) rows OR columns
# iii) dimension names (optional)
# iv) order of creation - R/C
# by default, values are filled 'columnwise'
mxscore1=matrix(score,ncol=4)
print(mxscore1)
# create matrix with the foll:
# i) give names to rows/columns
# ii) fill the data 'row-wise'
cols=c('match1','match2','match3','match4')
rows=c('rohit','virat','saun','kartik','joe')
mxscore2=matrix(score,ncol=4,
byrow=T,
dimnames=list(rows,cols)
)
print(mxscore1)
print(mxscore2)
print(score)
# transpose a matrix
# converting R->C and C->R
mxscore1 = t(mxscore2)
print(mxscore1)
print(mxscore2)
# dimensions
dim(mxscore1)
# total elements
length(mxscore1)
# A1)
# create a matrix of marks obtained by 10 students in a class in 5 subjects.
# marks range from 1-50
# subjects are: maths,science,arts,english,history
# student names can be left blank or you can generate random names
# fill the data row-wise
# why 'by-row' important ?
# inventory data:
cols=c('itemcode','itemname','qty','rate','avail')
rows=c()
items=c(47900940,'color tv',19,93949,'Y',
3934991,'fridge',399,39491,'N',
888192,'musicplayer',10,165656,'Y')
# create the matrix
# wrong format
mx1=matrix(items,ncol=length(cols),
dimnames=list(rows,cols))
print(mx1)
# right format
mx1=matrix(items,ncol=length(cols),
byrow=T, dimnames=list(rows,cols))
mx1
# access the matrix - using the index pos for R,C
mxscore1[1] # r=1,c=1
mxscore1[1,] # first row
mxscore1[1:2] # first 2 rows
mxscore1[,1] # r=all, first column
mxscore1[,1:2] # r=all, first 2 columns
mxscore1[1:2,3:4] # r=1-2, c=3-4
mxscore1[c(1,3),c(1,3,5)] # r=1and3,c=1,3,5
# change value in a matrix
print(mxscore1)
mxscore1[1,1] = 17
mxscore1[4,]=0 # all columns in 4th row=0
print(mxscore1)
# matrix arithmetic
n1=sample(seq(1,20),6)
n2=sample(seq(51,70),6)
mx1=matrix(n1,ncol=2)
mx2=matrix(n2,ncol=2)
mx1
mx2
# arithmetic operations - element-wise
mx1+mx2 # addition
abs(mx1-mx2) # subtraction
mx1*mx2 # multiplication
round(mx1/mx2,3) # division
# for arithmetic ops, dimensions must be same
# else, there will be an error
mx2=matrix(n2,ncol=2)
mx1=matrix(n1,ncol=3)
mx1
mx2
mx1+mx2
# diagonal of a matrix
n1=sample(seq(1,20),16)
mx1=matrix(n1,ncol=4)
mx1
diag(mx1)
# changing the dimension of an existing matrix
dim(mx1)
# change 4,4 to a 2,8 dimension
mx1=matrix(mx1,ncol=8)
print(mx1)
dim(mx1)
# operations on a matrix
mxscore1
## 1) row-wise
apply(mxscore1,1,sum)
# total for the first and second match
apply(mxscore1,1,sum)[1]
apply(mxscore1,1,sum)[2]
# maximum of the scores
max(apply(mxscore1,1,sum))
# match-wise average scores
apply(mxscore1,1,mean)
# match-wise median scores
apply(mxscore1,1,median)
## 2) column-wise
mxscore1
apply(mxscore1,2,sum) # total of each player
# which player scored max runs
max(apply(mxscore1,2,sum))
# average score
apply(mxscore1,2,mean)
# lowest average
min(apply(mxscore1,2,mean))
# A3) total score = <>, average = <> for player='kartik'
print(paste('Kartik: Total score=',
apply(mxscore1,2,sum)[4],
'Average=',
apply(mxscore1,2,mean)[4]))
## matrix multiplication
v1=c(32,37,48,45,14,10)
mx1=matrix(v1,ncol=3,byrow=T); mx1
v1=seq(1:3); v1
mx1%*%v1
32*1 + 37*2 + 48*3
45*1 + 14*2 + 10*3
# adding new columns to matrix
# adding new rows to matrix
# create a matrix to store the average stock prices of a company for the 4 quarters for the years 2001-2010
# take price range from 105 - 775 with a 1 decimal precision
cols=c('Q1','Q2','Q3','Q4');cols
rows=seq(2001,2010); rows
total = length(rows) * length(cols); total
prices = round(runif(total,105,775),1)
stock=matrix(prices,
ncol=length(cols),
dimnames=list(rows,cols)
)
print(stock)
# add 2 new columns to the matrix
# high,low price for each year
lowp=round(runif(length(rows),90,140),1)
highp=round(runif(length(rows),600,1000),1)
lowp
highp
length(lowp)
length(highp)
stock
# column binding - to add new columns to matrix
stock = cbind(stock,low=lowp,high=highp)
print(stock)
# A4) create a new column 'tot_st_traded'
# measured in thousands
# range -> 1000-2500
tr=sample(seq(1000,2500),length(rows))
stock = cbind(stock,tot_st_tr=tr)
print(stock) |
options(java.parameters = "-Xmx28g")
library(feather)
library(FSelector)
library(dplyr)
library(caret)
testCFS = function(df) {
cat('Most important variables are displayed first:', '\n')
independent_variables = c('pressure', 'temperature', 'salinity', 'month', 'freezing_point_delta', 'season')
while(length(independent_variables) > 0) {
f = as.formula(
paste('radius0_bin ~ ', paste(independent_variables, collapse = '+'), sep='')
)
variables = cfs(f, df)
cat('Selected variables', '\n')
cat(variables, '\n')
cat('\n')
independent_variables = independent_variables[! independent_variables %in% variables]
}
cat('Done', '\n')
}
set.seed(2019)
f = radius0_bin ~ pressure + temperature + salinity + month + freezing_point_delta + season
df = read_feather('/home/tloureiro/projects/arctic-analysis/data/produced-csvs/clean_final.feather')
#subsample keeping the proportions (limited memory to run cfs)
indices = createDataPartition(df$radius0_bin, p=0.7, list = FALSE)
df = df[indices,]
#CFS
cat('Running CFS against the subsampled dataset', '\n')
testCFS(df)
#Information Gain
cat('Running Information Gain against the subsampled dataset', '\n')
information.gain(f, df)
#Chi Square
cat('Running Chi Squared against the subsampled dataset', '\n')
chi.squared(f, df)
#comparison
barplot(c(3, 3, 2, 2, 2, 1), names.arg = c('month', 'season', 'fpd', 'temperature', 'salinity', 'pressure'), main = 'CFS (by reverse order)')
barplot(c(0.14, 0.09, 0.02, 0.01, 0.01, 0), names.arg = c('month', 'season', 'fpd', 'temperature', 'salinity', 'pressure'), main = 'Information Gain')
barplot(c(0.31, 0.22, 0.13, 0.12, 0.11, 0.01), names.arg = c('month', 'season', 'fpd', 'temperature', 'salinity', 'pressure'), main = 'Chi-squared')
| /feature-selection.r | no_license | tloureiro/captstone-course | R | false | false | 1,822 | r | options(java.parameters = "-Xmx28g")
library(feather)
library(FSelector)
library(dplyr)
library(caret)
testCFS = function(df) {
cat('Most important variables are displayed first:', '\n')
independent_variables = c('pressure', 'temperature', 'salinity', 'month', 'freezing_point_delta', 'season')
while(length(independent_variables) > 0) {
f = as.formula(
paste('radius0_bin ~ ', paste(independent_variables, collapse = '+'), sep='')
)
variables = cfs(f, df)
cat('Selected variables', '\n')
cat(variables, '\n')
cat('\n')
independent_variables = independent_variables[! independent_variables %in% variables]
}
cat('Done', '\n')
}
set.seed(2019)
f = radius0_bin ~ pressure + temperature + salinity + month + freezing_point_delta + season
df = read_feather('/home/tloureiro/projects/arctic-analysis/data/produced-csvs/clean_final.feather')
#subsample keeping the proportions (limited memory to run cfs)
indices = createDataPartition(df$radius0_bin, p=0.7, list = FALSE)
df = df[indices,]
#CFS
cat('Running CFS against the subsampled dataset', '\n')
testCFS(df)
#Information Gain
cat('Running Information Gain against the subsampled dataset', '\n')
information.gain(f, df)
#Chi Square
cat('Running Chi Squared against the subsampled dataset', '\n')
chi.squared(f, df)
#comparison
barplot(c(3, 3, 2, 2, 2, 1), names.arg = c('month', 'season', 'fpd', 'temperature', 'salinity', 'pressure'), main = 'CFS (by reverse order)')
barplot(c(0.14, 0.09, 0.02, 0.01, 0.01, 0), names.arg = c('month', 'season', 'fpd', 'temperature', 'salinity', 'pressure'), main = 'Information Gain')
barplot(c(0.31, 0.22, 0.13, 0.12, 0.11, 0.01), names.arg = c('month', 'season', 'fpd', 'temperature', 'salinity', 'pressure'), main = 'Chi-squared')
|
library(ape)
testtree <- read.tree("4359_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4359_0_unrooted.txt") | /codeml_files/newick_trees_processed/4359_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("4359_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4359_0_unrooted.txt") |
###
# training module to python scikit-learn algorithm
###
library(data.table)
library(caret)
# add any model specific package library commands
# set working directory
WORK.DIR <- "./src/L1_xtc1" # modify to specify directory to contain model artififacts
# Common Functions and Global variables
source("./src/CommonFunctions.R")
MODEL.NAME <- "ExtraTreeClassifier"
MODEL.COMMENT <- "All Features, Python model"
PREPARE.MODEL.DATA <- prepL1FeatureSet1
LEVEL0.MODELS <- c("L0_gbm2",
#"L0_rngr1",
"L0_xtc1",
"L0_xtc2",
"L0_xgb2",
"L0_xgb3")
# amount of data to train
FRACTION.TRAIN.DATA <- 1.0
# force recording model flag
FORCE_RECORDING_MODEL <- FALSE
# get training data
load(paste0(DATA.DIR,"/train_calib_test.RData"))
train.df <- train1.raw
# extract subset for inital training
set.seed(29)
idx <- createDataPartition(train.df$target,p=FRACTION.TRAIN.DATA,list=FALSE)
train.df <- train.df[idx,]
# prepare data for training
train.data <- PREPARE.MODEL.DATA(LEVEL0.MODELS,train.df)
# save prepared training data for Python function
# put response as first column in data set
write.table(cbind(response=train.data$response,train.data$predictors),
file=paste0(WORK.DIR,"/py_train.tsv"),row.names = FALSE,
sep="\t")
# invoke Python training model
python.train.command <- paste(PYTHON_COMMAND,paste0(WORK.DIR,"/train_model.py"),WORK.DIR)
Sys.time()
time.data <- system.time(system(python.train.command))
time.data
# stopCluster(cl)
# prepare data for training
test.data <- PREPARE.MODEL.DATA(LEVEL0.MODELS,test.raw)
write.table(test.data$predictors,file=paste0(WORK.DIR,"/py_test.tsv"),row.names = FALSE,
sep="\t")
# execute Python prediction code
python.test.command <- paste(PYTHON_COMMAND,paste0(WORK.DIR,"/make_prediction.py"),
WORK.DIR,
"possible_model",
"py_test.tsv",
"py_test_predictions.tsv")
system(python.test.command)
# get predictions from Python model
pred.probs <- fread(paste0(WORK.DIR,"/py_test_predictions.tsv"), sep="\t")
score <- logLossEval(pred.probs[,Class_1],test.data$response)
score
# record Model performance
modelPerf.df <- read.delim(paste0(WORK.DIR,"/model_performance.tsv"),
stringsAsFactors=FALSE)
# determine if score improved
improved <- ifelse(score < min(modelPerf.df$score),"Yes","No")
recordModelPerf(paste0(WORK.DIR,"/model_performance.tsv"),
MODEL.NAME,
time.data,
train.data$predictors,
score,
improved=improved,
bestTune="NA",
tune.grid="NA",
model.parms="NA",
comment=paste0(MODEL.COMMENT))
modelPerf.df <- read.delim(paste0(WORK.DIR,"/model_performance.tsv"),
stringsAsFactors=FALSE)
#display model performance record for this run
tail(modelPerf.df[,1:10],1)
# if last score recorded is better than previous ones save model object
last.idx <- length(modelPerf.df$score)
if (last.idx == 1 || improved == "Yes" || FORCE_RECORDING_MODEL) {
cat("found improved model, saving...\n")
flush.console()
#yes we have improvement or first score, save R-based data
file.name <- paste0("model_",MODEL.NAME,"_",modelPerf.df$date.time[last.idx],".RData")
file.name <- gsub(" ","_",file.name)
file.name <- gsub(":","_",file.name)
save(LEVEL0.MODELS,PREPARE.MODEL.DATA,file=paste0(WORK.DIR,"/",file.name))
# save Python model data
py.file.name <- paste0("model_",MODEL.NAME,"_",modelPerf.df$date.time[last.idx],".PyData")
py.file.name <- gsub(" ","_",py.file.name)
py.file.name <- gsub(":","_",py.file.name)
file.rename(paste0(WORK.DIR,"/possible_model"),paste0(WORK.DIR,"/",py.file.name))
# estalish pointer to current model
writeLines(c(file.name,py.file.name),paste0(WORK.DIR,"/this_model"))
} else {
cat("no improvement!!!\n")
flush.console()
}
# clean up files no longer needed
file.remove(c(paste0(WORK.DIR,"/py_train.tsv"),paste0(WORK.DIR,"/py_test.tsv"),
paste0(WORK.DIR,"/py_test_predictions.tsv")))
| /src/L1_xtc1/train_model.R | permissive | jimthompson5802/kaggle-BNP-Paribas | R | false | false | 4,418 | r | ###
# training module to python scikit-learn algorithm
###
library(data.table)
library(caret)
# add any model specific package library commands
# set working directory
WORK.DIR <- "./src/L1_xtc1" # modify to specify directory to contain model artififacts
# Common Functions and Global variables
source("./src/CommonFunctions.R")
MODEL.NAME <- "ExtraTreeClassifier"
MODEL.COMMENT <- "All Features, Python model"
PREPARE.MODEL.DATA <- prepL1FeatureSet1
LEVEL0.MODELS <- c("L0_gbm2",
#"L0_rngr1",
"L0_xtc1",
"L0_xtc2",
"L0_xgb2",
"L0_xgb3")
# amount of data to train
FRACTION.TRAIN.DATA <- 1.0
# force recording model flag
FORCE_RECORDING_MODEL <- FALSE
# get training data
load(paste0(DATA.DIR,"/train_calib_test.RData"))
train.df <- train1.raw
# extract subset for inital training
set.seed(29)
idx <- createDataPartition(train.df$target,p=FRACTION.TRAIN.DATA,list=FALSE)
train.df <- train.df[idx,]
# prepare data for training
train.data <- PREPARE.MODEL.DATA(LEVEL0.MODELS,train.df)
# save prepared training data for Python function
# put response as first column in data set
write.table(cbind(response=train.data$response,train.data$predictors),
file=paste0(WORK.DIR,"/py_train.tsv"),row.names = FALSE,
sep="\t")
# invoke Python training model
python.train.command <- paste(PYTHON_COMMAND,paste0(WORK.DIR,"/train_model.py"),WORK.DIR)
Sys.time()
time.data <- system.time(system(python.train.command))
time.data
# stopCluster(cl)
# prepare data for training
test.data <- PREPARE.MODEL.DATA(LEVEL0.MODELS,test.raw)
write.table(test.data$predictors,file=paste0(WORK.DIR,"/py_test.tsv"),row.names = FALSE,
sep="\t")
# execute Python prediction code
python.test.command <- paste(PYTHON_COMMAND,paste0(WORK.DIR,"/make_prediction.py"),
WORK.DIR,
"possible_model",
"py_test.tsv",
"py_test_predictions.tsv")
system(python.test.command)
# get predictions from Python model
pred.probs <- fread(paste0(WORK.DIR,"/py_test_predictions.tsv"), sep="\t")
score <- logLossEval(pred.probs[,Class_1],test.data$response)
score
# record Model performance
modelPerf.df <- read.delim(paste0(WORK.DIR,"/model_performance.tsv"),
stringsAsFactors=FALSE)
# determine if score improved
improved <- ifelse(score < min(modelPerf.df$score),"Yes","No")
recordModelPerf(paste0(WORK.DIR,"/model_performance.tsv"),
MODEL.NAME,
time.data,
train.data$predictors,
score,
improved=improved,
bestTune="NA",
tune.grid="NA",
model.parms="NA",
comment=paste0(MODEL.COMMENT))
modelPerf.df <- read.delim(paste0(WORK.DIR,"/model_performance.tsv"),
stringsAsFactors=FALSE)
#display model performance record for this run
tail(modelPerf.df[,1:10],1)
# if last score recorded is better than previous ones save model object
last.idx <- length(modelPerf.df$score)
if (last.idx == 1 || improved == "Yes" || FORCE_RECORDING_MODEL) {
cat("found improved model, saving...\n")
flush.console()
#yes we have improvement or first score, save R-based data
file.name <- paste0("model_",MODEL.NAME,"_",modelPerf.df$date.time[last.idx],".RData")
file.name <- gsub(" ","_",file.name)
file.name <- gsub(":","_",file.name)
save(LEVEL0.MODELS,PREPARE.MODEL.DATA,file=paste0(WORK.DIR,"/",file.name))
# save Python model data
py.file.name <- paste0("model_",MODEL.NAME,"_",modelPerf.df$date.time[last.idx],".PyData")
py.file.name <- gsub(" ","_",py.file.name)
py.file.name <- gsub(":","_",py.file.name)
file.rename(paste0(WORK.DIR,"/possible_model"),paste0(WORK.DIR,"/",py.file.name))
# estalish pointer to current model
writeLines(c(file.name,py.file.name),paste0(WORK.DIR,"/this_model"))
} else {
cat("no improvement!!!\n")
flush.console()
}
# clean up files no longer needed
file.remove(c(paste0(WORK.DIR,"/py_train.tsv"),paste0(WORK.DIR,"/py_test.tsv"),
paste0(WORK.DIR,"/py_test_predictions.tsv")))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GeoLight.R
\name{siteEstimate}
\alias{siteEstimate}
\title{siteEstimate}
\usage{
siteEstimate(
tFirst,
tSecond,
type,
twl,
degElevation,
method = "gamma",
parms = c(3.3, 0.8),
xlim = c(-180, 180),
ylim = c(-90, 90),
res = c(0.5, 0.5)
)
}
\arguments{
\item{tFirst}{vector of sunrise/sunset times (e.g. 2008-12-01 08:30).}
\item{tSecond}{vector of of sunrise/sunset times (e.g. 2008-12-01 17:30).}
\item{type}{vector of either 1 or 2, defining \code{tFirst} as sunrise or sunset respectively.}
\item{twl}{data.frame containing twilights and at least \code{tFirst}, \code{tSecond} and \code{type}}
\item{degElevation}{the sun elevation angle (in degrees) that defines twilight (e.g. -6 for "civil
twilight"). Either a single value, a \code{vector}.}
\item{method}{\code{character} string; only \code{gamma} and \code{log-normal} are implemented.}
\item{parms}{a \code{vector} describing the two parameters of the error density distribution (defined by \code{method}).}
\item{xlim}{the longitudinal boundaries for which the likelihood will be calculated.}
\item{ylim}{the latitudinal boundaries for which the likelihood will be calculated.}
\item{res}{the spatial resolution in degrees.}
}
\value{
A \code{list} with ...
}
\description{
...
}
\author{
Simeon Lisovski
}
| /man/siteEstimate.Rd | no_license | slisovski/GeoLight | R | false | true | 1,373 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GeoLight.R
\name{siteEstimate}
\alias{siteEstimate}
\title{siteEstimate}
\usage{
siteEstimate(
tFirst,
tSecond,
type,
twl,
degElevation,
method = "gamma",
parms = c(3.3, 0.8),
xlim = c(-180, 180),
ylim = c(-90, 90),
res = c(0.5, 0.5)
)
}
\arguments{
\item{tFirst}{vector of sunrise/sunset times (e.g. 2008-12-01 08:30).}
\item{tSecond}{vector of of sunrise/sunset times (e.g. 2008-12-01 17:30).}
\item{type}{vector of either 1 or 2, defining \code{tFirst} as sunrise or sunset respectively.}
\item{twl}{data.frame containing twilights and at least \code{tFirst}, \code{tSecond} and \code{type}}
\item{degElevation}{the sun elevation angle (in degrees) that defines twilight (e.g. -6 for "civil
twilight"). Either a single value, a \code{vector}.}
\item{method}{\code{character} string; only \code{gamma} and \code{log-normal} are implemented.}
\item{parms}{a \code{vector} describing the two parameters of the error density distribution (defined by \code{method}).}
\item{xlim}{the longitudinal boundaries for which the likelihood will be calculated.}
\item{ylim}{the latitudinal boundaries for which the likelihood will be calculated.}
\item{res}{the spatial resolution in degrees.}
}
\value{
A \code{list} with ...
}
\description{
...
}
\author{
Simeon Lisovski
}
|
#########################################################################################
# Prepared for Gabor's Data Analysis
#
# Data Analysis for Business, Economics, and Policy
# by Gabor Bekes and Gabor Kezdi
# Cambridge University Press 2021
#
# gabors-data-analysis.com
#
# License: Free to share, modify and use for educational purposes.
# Not to be used for commercial purposes.
# CHAPTER 21
# CH20A Founder/family ownership and quality of management
# using the wms-management dataset
# version 0.92 2020-03-08
#########################################################################################
# Clear memory
rm(list=ls())
library(tidyverse)
library(purrr)
library(haven)
library(stargazer)
library(MatchIt)
library(Matching) # masks dplyr select!!! #
library(gmodels)
getwd()
# set working directory
# option A: open material as project
# option B: set working directory for da_case_studies
# example: setwd("C:/Users/bekes.gabor/Documents/github/da_case_studies/")
# set data dir, load theme and functions
source("ch00-tech-prep/theme_bg.R")
source("ch00-tech-prep/da_helper_functions.R")
# data used
source("set-data-directory.R") #data_dir must be first defined #
use_case_dir <- file.path("ch21-ownership-management-quality/")
data_in <- use_case_dir
data_out <- use_case_dir
output <- paste0(use_case_dir,"output/")
create_output_if_doesnt_exist(output)
# This is the second part
# you must run ch21-wms-01-dataprep.R first.
# Read in data ------------------------------------------------------------
data <- read_csv(paste0(data_out, "wms_da_textbook-work.csv"))
data %>%
group_by(foundfam_owned) %>%
summarise (mean(management))
# Set variables to use -------------------------------------------------------
y_var <- "management"
x_var <- "foundfam_owned"
control_vars <- c("degree_nm", "degree_nm_sq", "compet_moder", "compet_strong",
"lnemp", "age_young", "age_old", "age_unknown")
control_vars_to_interact <- c("industry", "countrycode")
data %>%
dplyr::select(c(control_vars, control_vars_to_interact)) %>%
summary()
# *************************************************************
# * REGRESSIONS
# *************************************************************
# OLS with no control vars. -------------------------------------------------------
formula1 <- as.formula(paste0(y_var, " ~ ",x_var))
ols1 <- lm(formula1, data=data)
# OLS with all control vars -------------------------------------------------------
formula2 <- as.formula(paste0(y_var, " ~ ",x_var," + ",
paste(c(control_vars, control_vars_to_interact), collapse = " + ")))
ols2 <- lm(formula2, data=data)
# OLS with all controls + interactions -------------------------------------------------------
formula3 <- as.formula(paste(y_var, " ~ ",x_var," + ",
paste(control_vars_to_interact, collapse = ":"),
" + (", paste(control_vars, collapse = "+"),")*(",
paste(control_vars_to_interact, collapse = "+"),")",sep=""))
ols3 <- lm(formula3, data=data)
stargazer_r(
list_of_models = list(ols1, ols2, ols3),
keep.stat=c("n", "rsq"), keep = c(x_var, "Constant"), dep.var.labels.include = FALSE, dep.var.caption = "",
column.labels = c("'no confounders'", "'with confounders'", "'with confounders interacted'")) #%>%
#cat(.,file= paste0(output, "ch21-foundfam-reg1.tex"))
# *************************************************************
# * EXACT MATCHING
# *****************************************************************
Hmisc::describe(data$management)
data <- data %>%
mutate(
empbin5 = cut(emp_firm, quantile(emp_firm, seq(0,1,1/5)), include.lowest = TRUE, right = FALSE),
agecat = (age_young == TRUE) + 2*(age_mid == TRUE) + 3*(age_old == TRUE) + 4*(age_unknown == TRUE))
data_agg <- data %>%
group_by(degree_nm_bins, agecat, competition, empbin5, industry, countrycode) %>%
dplyr::summarise(
n = n(), n0 = sum(1-foundfam_owned), n1 = sum(foundfam_owned),
y0 = sum(management*(foundfam_owned == 0))/sum(1-foundfam_owned),
y1 = sum(management*(foundfam_owned == 1))/sum(foundfam_owned)
) %>%
ungroup()
# firms with/without exact match
data_agg %>%
group_by(n0 == 0, n1 == 0) %>%
summarise(n())
# random order just for the examples
set.seed(12345)
data_sample <- data_agg %>%
sample_n(size = 340) %>%
dplyr::select(industry, countrycode, degree_nm_bins, competition, agecat, empbin5, n1, n0, n)
# examples with founder/family only
data_sample %>%
.[1:19,] %>%
filter(n1==1 & n0==0)
# examples with other only:
data_sample %>%
.[1:19,] %>%
filter(n1==0 & n0==1)
# examples of similar firms unmatched
data_sample %>%
.[1:339,] %>%
filter(countrycode == "us" & industry == "food" & n == 1) %>%
arrange(countrycode, industry, degree_nm_bins, competition, agecat, empbin5, n)
# ATE/ATET
data_agg %>%
filter(n0>0 & n1>0) %>%
summarise(ATE = weighted.mean(y1-y0, n), ATET = weighted.mean(y1-y0, n1))
# *****************************************************************
# * Matching on the propensity score
# *****************************************************************
# NOTE: ATE is not calculated in the R code.
# Function only works with non-missing values
data_pscore <- data %>%
dplyr::select(c(y_var, x_var, control_vars, control_vars_to_interact)) %>%
na.omit()
# with all control vars -------------------------------------------------------
# Step 1 - Matching
formula_pscore1 <- as.formula(paste0(x_var, " ~ ",
paste(c(control_vars, control_vars_to_interact), collapse = " + ")))
mod_match <- matchit(formula_pscore1,
data = data_pscore,
method = 'nearest', distance = 'logit', replace=TRUE, estimand="ATT")
summary(mod_match)
# Step 2 - restrict data to matched
data_match <- match.data(mod_match)
# Please note that nhe "number of matched observations" calculated by
# this code varies marginally from the one on p607 in the textbook.
dim(data_match)
# Step 3 - Estimate treatment effects
# NOTE: We use weights here,to account for control observations that were matchet to multiple treated osb
# This is different from weights used to estimate ATE!
reg_match <- lm(management ~ foundfam_owned,
data = data_match,
weights = data_match$weights
)
out1 <- summary(reg_match)
ATET_PSME1 <- out1$coefficients[2]
ATET_PSME1_SE <- out1$coefficients[2,2]
# with all controls + interactions -------------------------------------------------------
# Step 1 - Matching
formula_pscore2 <- as.formula(paste(x_var, " ~ " ,
paste(control_vars_to_interact, collapse = ":"),
" + (", paste(control_vars, collapse = "+"),")*(",
paste(control_vars_to_interact, collapse = "+"),")",sep=""))
mod_match2 <- matchit(formula_pscore2,
data = data_pscore,
method = 'nearest', distance = 'logit', replace=TRUE, estimand="ATT")
summary(mod_match2)
# Step 2 - restrict data to matched
data_match2 <- match.data(mod_match2)
# Please note that nhe "number of matched observations" calculated by
# this code varies marginally from the one on p607 in the textbook.
dim(data_match2)
# Step 3 - Estimate treatment effects
# NOTE: We use weights here,to account for control observations that were matchet to multiple treated osb
# This is different from weights used to estimate ATE!
reg_match2 <- lm(management ~ foundfam_owned,
data = data_match2, weights = data_match2$weights)
out2 <- summary(reg_match2)
ATET_PSME2 <- out2$coefficients[2]
ATET_PSME2_SE <- out2$coefficients[2,2]
# *****************************************************************
# * CHECK common support
# *****************************************************************
# Country, cometition, industry
c1 <- CrossTable(data$foundfam_owned, data$compet_moder, na.rm=T )
c2 <- CrossTable(data$foundfam_owned, data$compet_strong, na.rm=T)
i <- CrossTable(data$foundfam_owned, data$industry, na.rm=T)
c <- CrossTable(data$foundfam_owned, data$countrycode, na.rm=T)
cbind(c1$prop.row, c2$prop.row, i$prop.row, c$prop.row)
# College Degree
data %>%
group_by(foundfam_owned) %>%
summarise(min = min(degree_nm , na.rm=T),
max = max(degree_nm , na.rm=T),
p1 = quantile(degree_nm , probs = 0.01, na.rm=T),
p5 = quantile(degree_nm , probs = 0.05, na.rm=T),
p95 = quantile(degree_nm , probs = 0.95, na.rm=T),
q99 = quantile(degree_nm, probs = 0.99, na.rm=T),
n = n())
# Employment
data %>%
group_by(foundfam_owned) %>%
summarise(min = min(emp_firm , na.rm=T),
max = max(emp_firm , na.rm=T),
p1 = quantile(emp_firm , probs = 0.01, na.rm=T),
p5 = quantile(emp_firm, probs = 0.05, na.rm=T),
p95 = quantile(emp_firm, probs = 0.95, na.rm=T),
q99 = quantile(emp_firm, probs = 0.99, na.rm=T),
n = n())
# * common support check passed
| /ch21-ownership-management-quality/ch21-wms-02-analysis.R | no_license | sindile/da_case_studies | R | false | false | 8,985 | r | #########################################################################################
# Prepared for Gabor's Data Analysis
#
# Data Analysis for Business, Economics, and Policy
# by Gabor Bekes and Gabor Kezdi
# Cambridge University Press 2021
#
# gabors-data-analysis.com
#
# License: Free to share, modify and use for educational purposes.
# Not to be used for commercial purposes.
# CHAPTER 21
# CH20A Founder/family ownership and quality of management
# using the wms-management dataset
# version 0.92 2020-03-08
#########################################################################################
# Clear memory
rm(list=ls())
library(tidyverse)
library(purrr)
library(haven)
library(stargazer)
library(MatchIt)
library(Matching) # masks dplyr select!!! #
library(gmodels)
getwd()
# set working directory
# option A: open material as project
# option B: set working directory for da_case_studies
# example: setwd("C:/Users/bekes.gabor/Documents/github/da_case_studies/")
# set data dir, load theme and functions
source("ch00-tech-prep/theme_bg.R")
source("ch00-tech-prep/da_helper_functions.R")
# data used
source("set-data-directory.R") #data_dir must be first defined #
use_case_dir <- file.path("ch21-ownership-management-quality/")
data_in <- use_case_dir
data_out <- use_case_dir
output <- paste0(use_case_dir,"output/")
create_output_if_doesnt_exist(output)
# This is the second part
# you must run ch21-wms-01-dataprep.R first.
# Read in data ------------------------------------------------------------
data <- read_csv(paste0(data_out, "wms_da_textbook-work.csv"))
data %>%
group_by(foundfam_owned) %>%
summarise (mean(management))
# Set variables to use -------------------------------------------------------
y_var <- "management"
x_var <- "foundfam_owned"
control_vars <- c("degree_nm", "degree_nm_sq", "compet_moder", "compet_strong",
"lnemp", "age_young", "age_old", "age_unknown")
control_vars_to_interact <- c("industry", "countrycode")
data %>%
dplyr::select(c(control_vars, control_vars_to_interact)) %>%
summary()
# *************************************************************
# * REGRESSIONS
# *************************************************************
# OLS with no control vars. -------------------------------------------------------
formula1 <- as.formula(paste0(y_var, " ~ ",x_var))
ols1 <- lm(formula1, data=data)
# OLS with all control vars -------------------------------------------------------
formula2 <- as.formula(paste0(y_var, " ~ ",x_var," + ",
paste(c(control_vars, control_vars_to_interact), collapse = " + ")))
ols2 <- lm(formula2, data=data)
# OLS with all controls + interactions -------------------------------------------------------
formula3 <- as.formula(paste(y_var, " ~ ",x_var," + ",
paste(control_vars_to_interact, collapse = ":"),
" + (", paste(control_vars, collapse = "+"),")*(",
paste(control_vars_to_interact, collapse = "+"),")",sep=""))
ols3 <- lm(formula3, data=data)
stargazer_r(
list_of_models = list(ols1, ols2, ols3),
keep.stat=c("n", "rsq"), keep = c(x_var, "Constant"), dep.var.labels.include = FALSE, dep.var.caption = "",
column.labels = c("'no confounders'", "'with confounders'", "'with confounders interacted'")) #%>%
#cat(.,file= paste0(output, "ch21-foundfam-reg1.tex"))
# *************************************************************
# * EXACT MATCHING
# *****************************************************************
Hmisc::describe(data$management)
data <- data %>%
mutate(
empbin5 = cut(emp_firm, quantile(emp_firm, seq(0,1,1/5)), include.lowest = TRUE, right = FALSE),
agecat = (age_young == TRUE) + 2*(age_mid == TRUE) + 3*(age_old == TRUE) + 4*(age_unknown == TRUE))
data_agg <- data %>%
group_by(degree_nm_bins, agecat, competition, empbin5, industry, countrycode) %>%
dplyr::summarise(
n = n(), n0 = sum(1-foundfam_owned), n1 = sum(foundfam_owned),
y0 = sum(management*(foundfam_owned == 0))/sum(1-foundfam_owned),
y1 = sum(management*(foundfam_owned == 1))/sum(foundfam_owned)
) %>%
ungroup()
# firms with/without exact match
data_agg %>%
group_by(n0 == 0, n1 == 0) %>%
summarise(n())
# random order just for the examples
set.seed(12345)
data_sample <- data_agg %>%
sample_n(size = 340) %>%
dplyr::select(industry, countrycode, degree_nm_bins, competition, agecat, empbin5, n1, n0, n)
# examples with founder/family only
data_sample %>%
.[1:19,] %>%
filter(n1==1 & n0==0)
# examples with other only:
data_sample %>%
.[1:19,] %>%
filter(n1==0 & n0==1)
# examples of similar firms unmatched
data_sample %>%
.[1:339,] %>%
filter(countrycode == "us" & industry == "food" & n == 1) %>%
arrange(countrycode, industry, degree_nm_bins, competition, agecat, empbin5, n)
# ATE/ATET
data_agg %>%
filter(n0>0 & n1>0) %>%
summarise(ATE = weighted.mean(y1-y0, n), ATET = weighted.mean(y1-y0, n1))
# *****************************************************************
# * Matching on the propensity score
# *****************************************************************
# NOTE: ATE is not calculated in the R code.
# Function only works with non-missing values
data_pscore <- data %>%
dplyr::select(c(y_var, x_var, control_vars, control_vars_to_interact)) %>%
na.omit()
# with all control vars -------------------------------------------------------
# Step 1 - Matching
formula_pscore1 <- as.formula(paste0(x_var, " ~ ",
paste(c(control_vars, control_vars_to_interact), collapse = " + ")))
mod_match <- matchit(formula_pscore1,
data = data_pscore,
method = 'nearest', distance = 'logit', replace=TRUE, estimand="ATT")
summary(mod_match)
# Step 2 - restrict data to matched
data_match <- match.data(mod_match)
# Please note that nhe "number of matched observations" calculated by
# this code varies marginally from the one on p607 in the textbook.
dim(data_match)
# Step 3 - Estimate treatment effects
# NOTE: We use weights here,to account for control observations that were matchet to multiple treated osb
# This is different from weights used to estimate ATE!
reg_match <- lm(management ~ foundfam_owned,
data = data_match,
weights = data_match$weights
)
out1 <- summary(reg_match)
ATET_PSME1 <- out1$coefficients[2]
ATET_PSME1_SE <- out1$coefficients[2,2]
# with all controls + interactions -------------------------------------------------------
# Step 1 - Matching
formula_pscore2 <- as.formula(paste(x_var, " ~ " ,
paste(control_vars_to_interact, collapse = ":"),
" + (", paste(control_vars, collapse = "+"),")*(",
paste(control_vars_to_interact, collapse = "+"),")",sep=""))
mod_match2 <- matchit(formula_pscore2,
data = data_pscore,
method = 'nearest', distance = 'logit', replace=TRUE, estimand="ATT")
summary(mod_match2)
# Step 2 - restrict data to matched
data_match2 <- match.data(mod_match2)
# Please note that nhe "number of matched observations" calculated by
# this code varies marginally from the one on p607 in the textbook.
dim(data_match2)
# Step 3 - Estimate treatment effects
# NOTE: We use weights here,to account for control observations that were matchet to multiple treated osb
# This is different from weights used to estimate ATE!
reg_match2 <- lm(management ~ foundfam_owned,
data = data_match2, weights = data_match2$weights)
out2 <- summary(reg_match2)
ATET_PSME2 <- out2$coefficients[2]
ATET_PSME2_SE <- out2$coefficients[2,2]
# *****************************************************************
# * CHECK common support
# *****************************************************************
# Country, cometition, industry
c1 <- CrossTable(data$foundfam_owned, data$compet_moder, na.rm=T )
c2 <- CrossTable(data$foundfam_owned, data$compet_strong, na.rm=T)
i <- CrossTable(data$foundfam_owned, data$industry, na.rm=T)
c <- CrossTable(data$foundfam_owned, data$countrycode, na.rm=T)
cbind(c1$prop.row, c2$prop.row, i$prop.row, c$prop.row)
# College Degree
data %>%
group_by(foundfam_owned) %>%
summarise(min = min(degree_nm , na.rm=T),
max = max(degree_nm , na.rm=T),
p1 = quantile(degree_nm , probs = 0.01, na.rm=T),
p5 = quantile(degree_nm , probs = 0.05, na.rm=T),
p95 = quantile(degree_nm , probs = 0.95, na.rm=T),
q99 = quantile(degree_nm, probs = 0.99, na.rm=T),
n = n())
# Employment
data %>%
group_by(foundfam_owned) %>%
summarise(min = min(emp_firm , na.rm=T),
max = max(emp_firm , na.rm=T),
p1 = quantile(emp_firm , probs = 0.01, na.rm=T),
p5 = quantile(emp_firm, probs = 0.05, na.rm=T),
p95 = quantile(emp_firm, probs = 0.95, na.rm=T),
q99 = quantile(emp_firm, probs = 0.99, na.rm=T),
n = n())
# * common support check passed
|
\name{multiRF}
\alias{multiRF}
\title{Computes Robinson-Foulds distance between a set of trees}
\usage{
multiRF(trees,quiet=FALSE,multi2di=FALSE)
}
\arguments{
\item{trees}{object of class \code{"multiPhylo"} consisting of two or more fully bifurcating, unrooted trees. If trees are rooted, they will be unrooted.}
\item{quiet}{logical argument indicating whether or not to run quietly. (Defaults to \code{FALSE}.)}
\item{multi2di}{logical argumenet indicating whether or not to resolve multifurcating trees. (Defaults to \code{FALSE}.)}
}
\description{
Computes the Robinson-Foulds (Robinson & Foulds 1981) distance between a set of trees in an object of class \code{"multiPhylo"}.
}
\details{
Computes the Robinson-Foulds distance between all phylogenies in an object of class \code{"multiPhylo"}. Uses \code{\link{prop.part}} internally for most of the heavy lifting.
}
\value{
A matrix containing distances.
}
\references{
Robinson, D. R., Foulds, L. R. (1981) Comparison of phylogenetic trees. \emph{Mathematical Biosciences}, \bold{53}, 131-147.
Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). \emph{Methods Ecol. Evol.}, \bold{3}, 217-223.
}
\author{Liam Revell \email{liam.revell@umb.edu}}
\keyword{phylogenetics}
\keyword{utilities}
| /man/multiRF.Rd | no_license | olmen/phytools | R | false | false | 1,304 | rd | \name{multiRF}
\alias{multiRF}
\title{Computes Robinson-Foulds distance between a set of trees}
\usage{
multiRF(trees,quiet=FALSE,multi2di=FALSE)
}
\arguments{
\item{trees}{object of class \code{"multiPhylo"} consisting of two or more fully bifurcating, unrooted trees. If trees are rooted, they will be unrooted.}
\item{quiet}{logical argument indicating whether or not to run quietly. (Defaults to \code{FALSE}.)}
\item{multi2di}{logical argumenet indicating whether or not to resolve multifurcating trees. (Defaults to \code{FALSE}.)}
}
\description{
Computes the Robinson-Foulds (Robinson & Foulds 1981) distance between a set of trees in an object of class \code{"multiPhylo"}.
}
\details{
Computes the Robinson-Foulds distance between all phylogenies in an object of class \code{"multiPhylo"}. Uses \code{\link{prop.part}} internally for most of the heavy lifting.
}
\value{
A matrix containing distances.
}
\references{
Robinson, D. R., Foulds, L. R. (1981) Comparison of phylogenetic trees. \emph{Mathematical Biosciences}, \bold{53}, 131-147.
Revell, L. J. (2012) phytools: An R package for phylogenetic comparative biology (and other things). \emph{Methods Ecol. Evol.}, \bold{3}, 217-223.
}
\author{Liam Revell \email{liam.revell@umb.edu}}
\keyword{phylogenetics}
\keyword{utilities}
|
###############################################################################
#
# plot_3DRQAEntr for openface landmarks
#
# Reference:
# https://github.com/mxochicale/phd-thesis-code-data/blob/master/code/rscripts/rqa/hii/v00/Gb_rqa_topology_plots.R
#
#
# Miguel Xochicale [http://mxochicale.github.io]
#
###############################################################################
# OUTLINE:
# (0) Defining paths for main_path, r_scripts_path, ..., etc.
# (1) Loading libraries and functions
# (2) Setting DataSets paths and reading data
# (3) Reading data
#################
# Start the clock!
start.time <- Sys.time()
################################################################################
# (0) Defining paths for main_path, r_scripts_path, ..., etc.
homepath <- Sys.getenv("HOME")
r_scripts_path <- getwd()
setwd("../../../../")
repository_path <- getwd()
setwd("../")
root_path <- getwd()
################################################################################
# (1) Loading Functions and Libraries and Setting up digits
library(data.table) # for manipulating data
library(plot3D)
data_path <- '/data/dataset'
graphics_path <- '/tsrp/src/3DRQA'
#### Outcomes Graphics Path
outcomes_graphics_path <- paste(repository_path,'/docs/figs', graphics_path, sep="")
### DataSet Path
main_data_path <- paste(repository_path, data_path, sep="")
#################################################################################
## (2) Setting DataSets paths and reading data
setwd(main_data_path)
################################################################################
# (3) Reading data
signal_source<-'lorenz'
file_ext <- paste('RQA3D_', signal_source, '.dt',sep='')
RQA <- fread( file_ext, header=TRUE)
################################################################################
################################################################################
################################################################################
################################################################################
## RQA Metric Selection
rqas <- c('ENTR')
#rqas <- c('REC','DET', 'RATIO', 'ENTR')
for (rqas_k in 1:length(rqas) ) {
rqask <- rqas[rqas_k]
message('############')
message('RQA: ',rqask)
Rk <- RQA[,.(
get(rqask)
), by=. (source, dim, tau, eps)]
#
#
#
################################################################################
# (5.0) Creating and Changing to PlotPath
plots_path <- paste( outcomes_graphics_path, sep='')
if (file.exists(plots_path)){
setwd(file.path(plots_path))
} else {
dir.create(plots_path, recursive=TRUE)
setwd(file.path(plots_path))
}
### Save Picture
image_width = 2000
image_height = 2000
#image_bg = "transparent"
image_bg = "white"
text.factor = 1
image_dpi <- text.factor * 100
width.calc <- image_width / image_dpi
height.calc <- image_height / image_dpi
zlim_max<-10
EPS<-c (0.1,0.2,0.5,0.7,1.0,1.2,1.5,1.7,2,3,4,5)
message('Plotting different 3DRQA with different eps')
for (eps_k in 1:length(EPS) ) {
recurrence_threshold<-EPS[eps_k]
message(recurrence_threshold)
A<-Rk[, range:= (eps== recurrence_threshold ), by=. (source, dim, tau, eps)]
A<-A[range == TRUE] # filter only true values for the condition "eps== recurrence_threshold"
A<-A[,range:=NULL] # delete range column
tau_i <- length(1:10)
dim_j <- length(1:10)
m<-as.matrix(A[,5])
mm<-matrix(m,tau_i,dim_j, byrow=TRUE)
filenameimage <- paste(signal_source, '_', rqask, '_', 'eps_', recurrence_threshold, '.png', sep='')
png(filenameimage,width=image_width, height=image_height, units="px", res=image_dpi, bg=image_bg)
persp3D(
x = 1:nrow(mm),
y = 1:ncol(mm),
z = mm,
xlab = "dim",
ylab = "tau",
zlab = "",
zlim = c(0,zlim_max),
#
# #phi = 0,
phi = 30,
#
# #theta = 0,
# #theta = 20,
theta = 30,
# #theta = 60,
# theta = 90,
# #theta = 120,
# #theta = 140,
# theta = 160,
# #theta = 190,
#
#
ticktype = "detailed",
nticks = 9,
colkey = list(length = 0.5, width = 0.8, shift = 0.0,cex.axis =4, cex.clab = 1),
lighting = FALSE, #If notFALSEthe facets will be illuminated, and colors may appear more bright
# #lphi = 90,
clab = rqask,# label for col key
bty = "b2",
# #space = 5,
# #d = 10,
cex.lab = 5,# change font size of the labels
cex.axis = 3,# change axis tick size to a very low size
# #contour = list(col = "grey", side = c("z"))
add=FALSE,
plot=TRUE
)
dev.off()
}
#for (eps_k in 1:length(EPS) ) {
################################################################################
}##end## for (rqas_k in 1:length(rqas) ) {
################################################################################
################################################################################
################################################################################
################################################################################
#################
# Stop the clock!
end.time <- Sys.time()
end.time - start.time
################################################################################
setwd(r_scripts_path) ## go back to the r-script source path
| /code/rscripts/tsrp/rscripts/EC_plot_3DRQAEntr.R | permissive | mxochicale/mlds2019 | R | false | false | 5,164 | r | ###############################################################################
#
# plot_3DRQAEntr for openface landmarks
#
# Reference:
# https://github.com/mxochicale/phd-thesis-code-data/blob/master/code/rscripts/rqa/hii/v00/Gb_rqa_topology_plots.R
#
#
# Miguel Xochicale [http://mxochicale.github.io]
#
###############################################################################
# OUTLINE:
# (0) Defining paths for main_path, r_scripts_path, ..., etc.
# (1) Loading libraries and functions
# (2) Setting DataSets paths and reading data
# (3) Reading data
#################
# Start the clock!
start.time <- Sys.time()
################################################################################
# (0) Defining paths for main_path, r_scripts_path, ..., etc.
homepath <- Sys.getenv("HOME")
r_scripts_path <- getwd()
setwd("../../../../")
repository_path <- getwd()
setwd("../")
root_path <- getwd()
################################################################################
# (1) Loading Functions and Libraries and Setting up digits
library(data.table) # for manipulating data
library(plot3D)
data_path <- '/data/dataset'
graphics_path <- '/tsrp/src/3DRQA'
#### Outcomes Graphics Path
outcomes_graphics_path <- paste(repository_path,'/docs/figs', graphics_path, sep="")
### DataSet Path
main_data_path <- paste(repository_path, data_path, sep="")
#################################################################################
## (2) Setting DataSets paths and reading data
setwd(main_data_path)
################################################################################
# (3) Reading data
signal_source<-'lorenz'
file_ext <- paste('RQA3D_', signal_source, '.dt',sep='')
RQA <- fread( file_ext, header=TRUE)
################################################################################
################################################################################
################################################################################
################################################################################
## RQA Metric Selection
rqas <- c('ENTR')
#rqas <- c('REC','DET', 'RATIO', 'ENTR')
for (rqas_k in 1:length(rqas) ) {
rqask <- rqas[rqas_k]
message('############')
message('RQA: ',rqask)
Rk <- RQA[,.(
get(rqask)
), by=. (source, dim, tau, eps)]
#
#
#
################################################################################
# (5.0) Creating and Changing to PlotPath
plots_path <- paste( outcomes_graphics_path, sep='')
if (file.exists(plots_path)){
setwd(file.path(plots_path))
} else {
dir.create(plots_path, recursive=TRUE)
setwd(file.path(plots_path))
}
### Save Picture
image_width = 2000
image_height = 2000
#image_bg = "transparent"
image_bg = "white"
text.factor = 1
image_dpi <- text.factor * 100
width.calc <- image_width / image_dpi
height.calc <- image_height / image_dpi
zlim_max<-10
EPS<-c (0.1,0.2,0.5,0.7,1.0,1.2,1.5,1.7,2,3,4,5)
message('Plotting different 3DRQA with different eps')
for (eps_k in 1:length(EPS) ) {
recurrence_threshold<-EPS[eps_k]
message(recurrence_threshold)
A<-Rk[, range:= (eps== recurrence_threshold ), by=. (source, dim, tau, eps)]
A<-A[range == TRUE] # filter only true values for the condition "eps== recurrence_threshold"
A<-A[,range:=NULL] # delete range column
tau_i <- length(1:10)
dim_j <- length(1:10)
m<-as.matrix(A[,5])
mm<-matrix(m,tau_i,dim_j, byrow=TRUE)
filenameimage <- paste(signal_source, '_', rqask, '_', 'eps_', recurrence_threshold, '.png', sep='')
png(filenameimage,width=image_width, height=image_height, units="px", res=image_dpi, bg=image_bg)
persp3D(
x = 1:nrow(mm),
y = 1:ncol(mm),
z = mm,
xlab = "dim",
ylab = "tau",
zlab = "",
zlim = c(0,zlim_max),
#
# #phi = 0,
phi = 30,
#
# #theta = 0,
# #theta = 20,
theta = 30,
# #theta = 60,
# theta = 90,
# #theta = 120,
# #theta = 140,
# theta = 160,
# #theta = 190,
#
#
ticktype = "detailed",
nticks = 9,
colkey = list(length = 0.5, width = 0.8, shift = 0.0,cex.axis =4, cex.clab = 1),
lighting = FALSE, #If notFALSEthe facets will be illuminated, and colors may appear more bright
# #lphi = 90,
clab = rqask,# label for col key
bty = "b2",
# #space = 5,
# #d = 10,
cex.lab = 5,# change font size of the labels
cex.axis = 3,# change axis tick size to a very low size
# #contour = list(col = "grey", side = c("z"))
add=FALSE,
plot=TRUE
)
dev.off()
}
#for (eps_k in 1:length(EPS) ) {
################################################################################
}##end## for (rqas_k in 1:length(rqas) ) {
################################################################################
################################################################################
################################################################################
################################################################################
#################
# Stop the clock!
end.time <- Sys.time()
end.time - start.time
################################################################################
setwd(r_scripts_path) ## go back to the r-script source path
|
# Load packages
library('ggplot2') # visualization
library('ggthemes') # visualization
library('scales') # visualization
library('dplyr') # data manipulation
library('mice') # imputation
library('randomForest') # classification algorithm
## Read Training and test data into R
train <- read.csv("titanic_train.csv", stringsAsFactors = FALSE)
test <- read.csv("titanic_test.csv", stringsAsFactors = FALSE)
## Bind rows using dplyr - binds same col names
require(dplyr)
full <- bind_rows(train,test)
str(full)
## Feature Engineering
?gsub()
full$Name
full$Title <- gsub('(.*, )|(\\..*)', '', full$Name)
##Show title count by sex
table(full$Sex,full$Title)
# Titles with very low cell counts to be combined to "rare" level
rare_title <- c('Dona', 'Lady', 'the Countess','Capt', 'Col', 'Don',
'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer')
full$Title[full$Title == 'Mlle'] <- 'Miss'
full$Title[full$Title == 'Ms'] <- 'Miss'
full$Title[full$Title == 'Mme'] <- 'Mrs'
full$Title[full$Title %in% rare_title] <- 'Rare Title'
# Show title counts by sex again
table(full$Sex, full$Title)
# Finally, grab surname from passenger name
full$Surname <- sapply(full$Name,function(x) strsplit(x, split = '[,.]')[[1]][1])
full$Surname
full$Fsize <- full$SibSp + full$Parch + 1
full$Family <- paste(full$Surname, full$Fsize, sep='_')
full$Family
# Use ggplot2 to visualize the relationship between family size & survival
require(ggplot2)
ggplot(full[1:891,], aes(x = Fsize, fill = factor(Survived))) +
geom_bar(stat='count', position='dodge') +
scale_x_continuous(breaks=c(1:11)) +
labs(x = 'Family Size') +
theme_few()
# Discretize family size
full$FsizeD[full$Fsize == 1] <- 'singleton'
full$FsizeD[full$Fsize < 5 & full$Fsize > 1] <- 'small'
full$FsizeD[full$Fsize > 4] <- 'large'
mosaicplot(table(full$FsizeD, full$Survived), main='Family Size by Survival', shade=TRUE)
full$Cabin[1:28]
# The first character is the deck. For example:
strsplit(full$Cabin[2], NULL)[[1]]
full$Deck<-factor(sapply(full$Cabin, function(x) strsplit(x, NULL)[[1]][1]))
full$Deck
full[c(62, 830), 'Embarked']
# Get rid of our missing passenger IDs
embark_fare <- full %>% filter(PassengerId != 62 & PassengerId != 830)
ggplot(embark_fare, aes(x = Embarked, y = Fare, fill = factor(Pclass))) +
geom_boxplot() +
geom_hline(aes(yintercept=80),
colour='red', linetype='dashed', lwd=2) +
scale_y_continuous(labels=dollar_format())
full$Embarked[c(62, 830)] <- 'C'
full[1044, ]
ggplot(full[full$Pclass == '3' & full$Embarked == 'S', ],
aes(x = Fare)) +
geom_density(fill = '#99d6ff', alpha=0.4) +
geom_vline(aes(xintercept=median(Fare, na.rm=T)),
colour='red', linetype='dashed', lwd=1) +
scale_x_continuous(labels=dollar_format()) +
theme_few()
full$Fare[1044] <- median(full[full$Pclass == '3' & full$Embarked == 'S', ]$Fare, na.rm = TRUE)
# Show number of missing Age values
sum(is.na(full$Age))
# Make variables factors into factors
factor_vars <- c('PassengerId','Pclass','Sex','Embarked',
'Title','Surname','Family','FsizeD')
full[factor_vars] <- lapply(full[factor_vars], function(x) as.factor(x))
# Set a random seed
set.seed(129)
# Perform mice imputation, excluding certain less-than-useful variables:
mice_mod <- mice(full[, !names(full) %in% c('PassengerId','Name','Ticket','Cabin','Family','Surname','Survived')], method='rf')
mice_output <- complete(mice_mod)
head(mice_output)
par(mfrow=c(1,2))
hist(full$Age, freq=F, main='Age: Original Data',
col='darkgreen', ylim=c(0,0.04))
hist(mice_output$Age, freq=F, main='Age: MICE Output',
col='lightgreen', ylim=c(0,0.04))
full$Age <- mice_output$Age
sum(is.na(full$Age))
# First we'll look at the relationship between age & survival
ggplot(full[1:891,], aes(Age, fill = factor(Survived))) +
geom_histogram() +
# I include Sex since we know (a priori) it's a significant predictor
facet_grid(.~Sex) +
theme_few()
# Create the column child, and indicate whether child or adult
full$Child[full$Age < 18] <- 'Child'
full$Child[full$Age >= 18] <- 'Adult'
table(full$Child, full$Survived)
full$Mother <- 'Not Mother'
full$Mother[full$Sex == 'female' & full$Parch > 0 & full$Age > 18 & full$Title != 'Miss'] <- 'Mother'
table(full$Mother, full$Survived)
full$Child <- factor(full$Child)
full$Mother <- factor(full$Mother)
md.pattern(full)
# Split the data back into a train set and a test set
train <- full[1:891,]
test <- full[892:1309,]
# Set a random seed
set.seed(754)
rf_model <- randomForest(factor(Survived) ~ Pclass + Sex + Age + SibSp + Parch +
Fare + Embarked + Title +
FsizeD + Child + Mother,
data = train)
par(mfrow = c(1,1))
plot(rf_model, ylim=c(0,0.36))
legend('topright', colnames(rf_model$err.rate), col=1:3, fill=1:3)
importance <- importance(rf_model)
varImportance <- data.frame(Variables = row.names(importance),
Importance = round(importance[ ,'MeanDecreaseGini'],2))
# Create a rank variable based on importance
rankImportance <- varImportance %>%
mutate(Rank = paste0('#',dense_rank(desc(Importance))))
ggplot(rankImportance, aes(x = reorder(Variables, Importance),
y = Importance, fill = Importance)) +
geom_bar(stat='identity') +
geom_text(aes(x = Variables, y = 0.5, label = Rank),
hjust=0, vjust=0.55, size = 4, colour = 'red') +
labs(x = 'Variables') +
coord_flip() +
theme_few()
# Predict using the test set
prediction <- predict(rf_model, test)
solution <- data.frame(PassengerID = test$PassengerId, Survived = prediction)
write.csv(solution, file = 'rf_mod_Solution.csv', row.names = F)
| /Titanic.R | no_license | RakeshKumarA/Titanic | R | false | false | 5,810 | r | # Load packages
library('ggplot2') # visualization
library('ggthemes') # visualization
library('scales') # visualization
library('dplyr') # data manipulation
library('mice') # imputation
library('randomForest') # classification algorithm
## Read Training and test data into R
train <- read.csv("titanic_train.csv", stringsAsFactors = FALSE)
test <- read.csv("titanic_test.csv", stringsAsFactors = FALSE)
## Bind rows using dplyr - binds same col names
require(dplyr)
full <- bind_rows(train,test)
str(full)
## Feature Engineering
?gsub()
full$Name
full$Title <- gsub('(.*, )|(\\..*)', '', full$Name)
##Show title count by sex
table(full$Sex,full$Title)
# Titles with very low cell counts to be combined to "rare" level
rare_title <- c('Dona', 'Lady', 'the Countess','Capt', 'Col', 'Don',
'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer')
full$Title[full$Title == 'Mlle'] <- 'Miss'
full$Title[full$Title == 'Ms'] <- 'Miss'
full$Title[full$Title == 'Mme'] <- 'Mrs'
full$Title[full$Title %in% rare_title] <- 'Rare Title'
# Show title counts by sex again
table(full$Sex, full$Title)
# Finally, grab surname from passenger name
full$Surname <- sapply(full$Name,function(x) strsplit(x, split = '[,.]')[[1]][1])
full$Surname
full$Fsize <- full$SibSp + full$Parch + 1
full$Family <- paste(full$Surname, full$Fsize, sep='_')
full$Family
# Use ggplot2 to visualize the relationship between family size & survival
require(ggplot2)
ggplot(full[1:891,], aes(x = Fsize, fill = factor(Survived))) +
geom_bar(stat='count', position='dodge') +
scale_x_continuous(breaks=c(1:11)) +
labs(x = 'Family Size') +
theme_few()
# Discretize family size
full$FsizeD[full$Fsize == 1] <- 'singleton'
full$FsizeD[full$Fsize < 5 & full$Fsize > 1] <- 'small'
full$FsizeD[full$Fsize > 4] <- 'large'
mosaicplot(table(full$FsizeD, full$Survived), main='Family Size by Survival', shade=TRUE)
full$Cabin[1:28]
# The first character is the deck. For example:
strsplit(full$Cabin[2], NULL)[[1]]
full$Deck<-factor(sapply(full$Cabin, function(x) strsplit(x, NULL)[[1]][1]))
full$Deck
full[c(62, 830), 'Embarked']
# Get rid of our missing passenger IDs
embark_fare <- full %>% filter(PassengerId != 62 & PassengerId != 830)
ggplot(embark_fare, aes(x = Embarked, y = Fare, fill = factor(Pclass))) +
geom_boxplot() +
geom_hline(aes(yintercept=80),
colour='red', linetype='dashed', lwd=2) +
scale_y_continuous(labels=dollar_format())
full$Embarked[c(62, 830)] <- 'C'
full[1044, ]
ggplot(full[full$Pclass == '3' & full$Embarked == 'S', ],
aes(x = Fare)) +
geom_density(fill = '#99d6ff', alpha=0.4) +
geom_vline(aes(xintercept=median(Fare, na.rm=T)),
colour='red', linetype='dashed', lwd=1) +
scale_x_continuous(labels=dollar_format()) +
theme_few()
full$Fare[1044] <- median(full[full$Pclass == '3' & full$Embarked == 'S', ]$Fare, na.rm = TRUE)
# Show number of missing Age values
sum(is.na(full$Age))
# Make variables factors into factors
factor_vars <- c('PassengerId','Pclass','Sex','Embarked',
'Title','Surname','Family','FsizeD')
full[factor_vars] <- lapply(full[factor_vars], function(x) as.factor(x))
# Set a random seed
set.seed(129)
# Perform mice imputation, excluding certain less-than-useful variables:
mice_mod <- mice(full[, !names(full) %in% c('PassengerId','Name','Ticket','Cabin','Family','Surname','Survived')], method='rf')
mice_output <- complete(mice_mod)
head(mice_output)
par(mfrow=c(1,2))
hist(full$Age, freq=F, main='Age: Original Data',
col='darkgreen', ylim=c(0,0.04))
hist(mice_output$Age, freq=F, main='Age: MICE Output',
col='lightgreen', ylim=c(0,0.04))
full$Age <- mice_output$Age
sum(is.na(full$Age))
# First we'll look at the relationship between age & survival
ggplot(full[1:891,], aes(Age, fill = factor(Survived))) +
geom_histogram() +
# I include Sex since we know (a priori) it's a significant predictor
facet_grid(.~Sex) +
theme_few()
# Create the column child, and indicate whether child or adult
full$Child[full$Age < 18] <- 'Child'
full$Child[full$Age >= 18] <- 'Adult'
table(full$Child, full$Survived)
full$Mother <- 'Not Mother'
full$Mother[full$Sex == 'female' & full$Parch > 0 & full$Age > 18 & full$Title != 'Miss'] <- 'Mother'
table(full$Mother, full$Survived)
full$Child <- factor(full$Child)
full$Mother <- factor(full$Mother)
md.pattern(full)
# Split the data back into a train set and a test set
train <- full[1:891,]
test <- full[892:1309,]
# Set a random seed
set.seed(754)
rf_model <- randomForest(factor(Survived) ~ Pclass + Sex + Age + SibSp + Parch +
Fare + Embarked + Title +
FsizeD + Child + Mother,
data = train)
par(mfrow = c(1,1))
plot(rf_model, ylim=c(0,0.36))
legend('topright', colnames(rf_model$err.rate), col=1:3, fill=1:3)
importance <- importance(rf_model)
varImportance <- data.frame(Variables = row.names(importance),
Importance = round(importance[ ,'MeanDecreaseGini'],2))
# Create a rank variable based on importance
rankImportance <- varImportance %>%
mutate(Rank = paste0('#',dense_rank(desc(Importance))))
ggplot(rankImportance, aes(x = reorder(Variables, Importance),
y = Importance, fill = Importance)) +
geom_bar(stat='identity') +
geom_text(aes(x = Variables, y = 0.5, label = Rank),
hjust=0, vjust=0.55, size = 4, colour = 'red') +
labs(x = 'Variables') +
coord_flip() +
theme_few()
# Predict using the test set
prediction <- predict(rf_model, test)
solution <- data.frame(PassengerID = test$PassengerId, Survived = prediction)
write.csv(solution, file = 'rf_mod_Solution.csv', row.names = F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parseData.R
\name{rebuildTimeIncrement}
\alias{rebuildTimeIncrement}
\title{Rebuild Data to Time Design Point}
\usage{
rebuildTimeIncrement(temp, increment)
}
\arguments{
\item{temp}{dataframe with first column timestamp, all other columns numeric}
\item{increment}{designpoint frame (determined in setup$designpoint)}
}
\value{
updated dataframe with few points, and median values
}
\description{
We use a recent median up to a right-hand stop point.
setup$pareto determines if the point will be included.
[TODO] - missing data inline ... do not know if it is possible, but maybe control
}
| /mPowerEI/man/rebuildTimeIncrement.Rd | permissive | MonteShaffer/mPowerEI | R | false | true | 670 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parseData.R
\name{rebuildTimeIncrement}
\alias{rebuildTimeIncrement}
\title{Rebuild Data to Time Design Point}
\usage{
rebuildTimeIncrement(temp, increment)
}
\arguments{
\item{temp}{dataframe with first column timestamp, all other columns numeric}
\item{increment}{designpoint frame (determined in setup$designpoint)}
}
\value{
updated dataframe with few points, and median values
}
\description{
We use a recent median up to a right-hand stop point.
setup$pareto determines if the point will be included.
[TODO] - missing data inline ... do not know if it is possible, but maybe control
}
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
filesdir <- directory
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate".
poltype <- pollutant
## 'id' is an integer vector indicating the monitor ID numbers
## to be used, so the data in the corresponding files must be read &
## stored.
monitorids <- id
monitorcount <- (length(monitorids))
m <- 1
pollutiondata <- data.frame()
while (m <= monitorcount) {
filename <- paste(formatC(monitorids[m], width=3, flag = "0"), ".csv", sep = "")
pathname <- paste(filesdir, "/", filename, sep ="")
pollutiondata <- rbind(pollutiondata, read.csv(pathname))
m <- m + 1
}
onetype <- data.frame()
if (poltype == "sulfate") {
onetype <- pollutiondata[,2]
}else if (poltype == "nitrate") {
onetype <- pollutiondata[,3]
}else{
print("***********")
print("Invalid pollutant passed to function. Please re-enter.")
print("***********")
}
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
round(mean(onetype, na.rm = TRUE), 3)
} | /pollutantmean.R | no_license | BedgieBear/datasciencecoursera | R | false | false | 1,713 | r | pollutantmean <- function(directory, pollutant, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
filesdir <- directory
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate".
poltype <- pollutant
## 'id' is an integer vector indicating the monitor ID numbers
## to be used, so the data in the corresponding files must be read &
## stored.
monitorids <- id
monitorcount <- (length(monitorids))
m <- 1
pollutiondata <- data.frame()
while (m <= monitorcount) {
filename <- paste(formatC(monitorids[m], width=3, flag = "0"), ".csv", sep = "")
pathname <- paste(filesdir, "/", filename, sep ="")
pollutiondata <- rbind(pollutiondata, read.csv(pathname))
m <- m + 1
}
onetype <- data.frame()
if (poltype == "sulfate") {
onetype <- pollutiondata[,2]
}else if (poltype == "nitrate") {
onetype <- pollutiondata[,3]
}else{
print("***********")
print("Invalid pollutant passed to function. Please re-enter.")
print("***********")
}
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
round(mean(onetype, na.rm = TRUE), 3)
} |
#
# This test file has been generated by kwb.test::create_test_files()
#
test_that("text_matrix_to_numeric_matrix() works", {
expect_error(
kwb.readxl:::text_matrix_to_numeric_matrix()
# argument "x" is missing, with no default
)
})
| /tests/testthat/test-function-text_matrix_to_numeric_matrix.R | permissive | KWB-R/kwb.readxl | R | false | false | 249 | r | #
# This test file has been generated by kwb.test::create_test_files()
#
test_that("text_matrix_to_numeric_matrix() works", {
expect_error(
kwb.readxl:::text_matrix_to_numeric_matrix()
# argument "x" is missing, with no default
)
})
|
draft_game_data_2018_1 <- read.csv("C:\\Users\\sahan\\OneDrive\\Documents\\Projects\\CMSAC2020\\game_player_draft_data.csv")
draft_game_data_2018_clean <- draft_game_data_2018_1 %>%
mutate(week = cut(as.Date(dateGame), "week"), winBool = ifelse(outcomeGame == "W", 1, 0)) %>%
group_by(namePlayer, week) %>%
summarize(avg_game_score = mean(game_score),
numGames = n(),
avg_box_plusminus = mean(plusminus),
avg_minutes = mean(minutes),
avg_pct_fg3 = mean(pctFG3),
avg_pct_fg2 = mean(pctFG2),
win_percent = mean(winBool),
avg_fgm = mean(fgm),
avg_fga = mean(fga),
avg_pctfg = mean(pctFG),
avg_pctFT = mean(pctFT),
avg_oreb = mean(oreb),
avg_tov = mean(tov),
avg_dreb = mean(dreb),
avg_stl = mean(stl),
avg_blk = mean(blk),
avg_ast = mean(ast),
avg_treb = mean(treb),
avg_pf = mean(pf),
avg_pts = mean(pts),
avg_countDaysRestPlayer = mean(countDaysRestPlayer)) %>%
left_join(select(draft_30, namePlayer, numberRoundPick), by = "namePlayer") %>%
arrange(numberRoundPick) %>%
mutate(week = as.Date(week)) %>%
left_join(timeseries_draft_2018_clean, by = c("namePlayer", "week")) %>%
left_join(select(df_rosters_2019, namePlayer, weightLBS, heightInches, groupPosition), by = "namePlayer") %>%
select(-numberRoundPick.x) %>% rename(numberRoundPick = numberRoundPick.y)
write.csv(draft_game_data_2018_clean, "C:\\Users\\sahan\\OneDrive\\Documents\\Projects\\CMSAC2020\\draft_2018_clean.csv")
reddit_draft_data <- read.csv("C:\\Users\\sahan\\Downloads\\combo_scored_1.csv")
reddit_draft_data <- reddit_draft_data %>%
mutate(sentiment_week = sentiment / 7,
positive_week = positive/ 7,
negative_week = negative / 7)
reddit_draft_data$week = as.Date(reddit_draft_data$week)
draft_data_combined <- reddit_draft_data %>%
rename(namePlayer = player_name) %>%
inner_join(draft_game_data_2018_clean, by = c("namePlayer", "week")) %>%
arrange(numberRoundPick) %>%
mutate(wiki_per_100 = wiki_views / 100)
write.csv(draft_data_combined, "C:\\Users\\sahan\\OneDrive\\Documents\\Projects\\CMSAC2020\\draft_gamereddit_clean.csv")
#Comment score and controversiality
reddit_comment <- read_xlsx("C:\\Users\\sahan\\Downloads\\final_reddit_datescorrected.xlsx")
reddit_comment$comm_date <- as.Date(reddit_comment$comm_date)
reddit_comment <- reddit_comment %>%
filter((comm_date > as.Date("2018-10-16")) & ((comm_date < as.Date("2019-04-10"))) )
reddit_comment_week <- reddit_comment %>%
mutate(week = cut(comm_date, "week")) %>%
group_by(player_name, week) %>%
summarize(avg_commentscoreperday = sum(comment_score) / 7,
avg_controversial = sum(controversiality) / 7)
reddit_draft_comment_data <- left_join(reddit_comment_week, reddit_draft_data, by = c("player_name", "week"))
write.csv(reddit_draft_comment_data, "C:\\Users\\sahan\\OneDrive\\Documents\\Projects\\CMSAC2020\\all_reddit_data.csv")
#EDA
#Normality check
draft_game_data_2018_clean %>%
ggplot(aes(x = avg_box_plusminus)) +
geom_density()
draft_game_data_2018_clean %>%
ggplot(aes(x = avg_minutes, y = avg_web_hits)) +
geom_point()
draft_game_data_2018_clean %>%
filter(win_percent %in% c(0 , 1)) %>%
ggplot(aes(x = as.factor(win_percent), y = avg_web_hits)) +
geom_boxplot()
draft_game_data_2018_clean %>%
ggplot(aes(x = avg_minutes, y = avg_web_hits)) +
geom_point(alpha = 0.2)
#draft data combined
draft_data_combined %>%
ggplot(aes(x = avg_game_score)) +
geom_histogram(fill = "#eb1933", color = "black") +
theme_bw() +
labs(
x = "Average game score",
title = "Distribution of Average game score"
) +
theme(
plot.title = element_text(hjust="0.5")
)
draft_data_combined %>%
ggplot(aes(x = scale(sentiment), y = win_percent)) +
geom_jitter()
draft_data_combined %>%
filter(sentiment < 0) %>%
ggplot(aes(x = positive)) +
geom_density()
draft_data_combined %>%
group_by(numberRoundPick) %>%
summarize(gamescore = mean(avg_game_score), senti = mean(sentiment_week), webhit = mean(avg_yt_hits)) %>%
ggplot(aes(x = numberRoundPick, y = senti)) +
geom_point() +
xlab("Number Round Pick") + ylab("Average Sentiment score") +
theme_bw()
#Hierarchial Clustering
draft_scaled_data <- draft_data_combined %>%
select(wiki_per_100, avg_web_hits, avg_news_hits, avg_yt_hits, positive_week, negative_week) %>%
scale() %>%
as.data.frame()
nba_draft_hclust <-
hclust(dist(draft_scaled_data),
method = "complete")
hc_player_clusters <-
cutree(nba_draft_hclust,
k = 2)
draft_data_combined <-draft_data_combined %>%
mutate(player_hc_clusters =
as.factor(hc_player_clusters))
draft_data_combined %>%
mutate(player_hc_clusters =
as.factor(hc_player_clusters)) %>%
ggplot(aes(x = avg_web_hits, y = sentiment_week,
color = player_hc_clusters)) +
geom_point() +
theme_bw() +
labs(x = " Average web hits",
y = "Sentiment score",
color = "Cluster",
title = "Scatter plot of Average Web hits and Sentiment Score") +
theme(legend.position = "bottom") + scale_color_manual(values =c("#eb1933", "#2151a1"))
# What is cluster 3. The players have high average game score and average minutes but popularity metrics doesnt capture this
# Deandre Ayton and Trae Young are only in cluster 3
# In cluster 3, the players during that week weren't very popular but people had a highly positive sentiment towards them.
# Cluster 2 : not popular, game score is low, sentiment score is low
# Cluster 1 : a bit more popular than cluster 2; sentiment is the same as cluster 2; game score is a little better than
# Cluster 4 : Most popular category, average game scores are still high but not as high as cluster 3
draft_data_combined %>%
mutate(player_hc_clusters =
as.factor(hc_player_clusters)) %>%
ggplot(aes(x = player_hc_clusters, y = avg_game_score)) +
geom_violin() +
geom_boxplot(width = 0.2, color = "#eb1933", size = 0.8 ) +
theme_bw() +
labs(
x = "Clusters",
y = "Average Game Score",
title = "Box Plot of Average game score for the 2 clusters "
) +
theme (
plot.title = element_text(hjust = "0.5")
)
draft_data_combined %>%
mutate(player_hc_clusters =
as.factor(hc_player_clusters)) %>%
ggplot(aes(x = player_hc_clusters, y = avg_pts)) +
geom_violin() +
geom_boxplot(width = 0.2, color = "#2151a1", size = 0.8) +
theme_bw() +
labs(
x = "Clusters",
y = "Average Points",
title = "Box Plot of Average points for the 2 clusters "
) +
theme(
axis.title = element_text(hjust = "0.5")
)
#Gaussian Mixture Models
library(mclust)
draft_mclust <- Mclust(draft_scaled_data)
summary(draft_mclust)
draft_mclust$classification
draft_data_combined %>%
mutate(cluster = as.factor(draft_mclust$classification)) %>%
ggplot(aes(x = avg_web_hits, y = wiki_per_100,
color = cluster)) +
geom_point() +
ggthemes::scale_color_colorblind() +
theme_bw() +
labs(x = " Average web hits",
y = "Wiki views in 100s",
color = "Cluster") +
theme(legend.position = "bottom")
#Time series analysis
#Top 5 picks
draft_game_data_2018_clean$week <- as.Date(draft_game_data_2018_clean$week)
draft_game_data_2018_clean %>%
group_by(week) %>%
draft_game_data_2018_clean %>%
filter(namePlayer == "Deandre Ayton") %>%
pivot_longer(c("avg_game_score", "avg_web_hits"),
names_to = "Name",
values_to = "value") %>%
ggplot (aes(x = week, y = value)) +
geom_line()+
facet_wrap(~Name, scales = "free_y") +
theme_bw()
timeseries_draft_2018_clean$week <- as.Date(timeseries_draft_2018_clean$week)
timeseries_draft_2018_clean %>%
filter(numberRoundPick < 5) %>%
pivot_longer(c("wiki_views", "avg_web_hits", "avg_yt_hits"),
names_to = "names",
values_to = "values") %>%
ggplot(aes(x = week, y = values)) +
geom_line(aes(color = namePlayer), width = 1.2) +
facet_wrap(~names, scales = "free_y", nrow = 2) +
theme_classic()
draft_game_data_2018_clean %>%
filter(numberRoundPick < 4) %>%
pivot_longer(c("wiki_views", "avg_web_hits", "avg_yt_hits", "avg_game_score"),
names_to = "names",
values_to = "values") %>%
ggplot(aes(x = week, y = values)) +
geom_line(aes(color = namePlayer), width = 1.2) +
facet_wrap(~names, scales = "free_y", nrow = 2) +
theme_classic()
#Clustering for sentiment metrics
reddit_draft_comment_data <- read.csv("C:\\Users\\sahan\\OneDrive\\Documents\\Projects\\CMSAC2020\\all_reddit_data.csv")
draft_scaled_data_comment <- reddit_draft_comment_data %>%
select(sentiment_week, avg_commentscoreperday) %>%
filter(avg_commentscoreperday < 3000) %>%
scale() %>%
as.data.frame() %>%
na.omit()
nba_draft_hclust <-
hclust(dist(draft_scaled_data_comment),
method = "complete")
hc_player_clusters <-
cutree(nba_draft_hclust,
k = 4)
reddit_draft_comment_data %>%
filter((!is.na(avg_commentscoreperday)) & (!is.na(sentiment_week))) %>%
filter(avg_commentscoreperday < 3000) %>%
#na.omit() %>%
mutate(player_hc_clusters =
as.factor(hc_player_clusters)) %>%
ggplot(aes(x = sentiment_week, y = avg_commentscoreperday,
color = player_hc_clusters)) +
geom_point() +
theme_bw() +
labs(x = "Sentiment Score",
y = "Average Comment Score",
color = "Cluster",
title = "Scatterplot of Sentiment and comment score") +
theme(legend.position = "bottom",
plot.title = element_text(hjust = "0.5")) +
scale_color_manual(values = c("#eb1933", "#2151a1", "black", "#a9a9a9") )
#Cluster 1 :- Sentiment scores are low but comment scores are also quite low. This indicated that the plaer is controversial
#Cluster 2: Higher sentiment scores and relatively higher comment scores. The player is perceived more positive than Cluster 1 players
#Cluster 3: Higherst sentiment scores but comment scores are in between cluster 2 and cluster 1. The player is perceived most positively
reddit_draft_comment_data %>%
filter((!is.na(avg_commentscoreperday)) & (!is.na(sentiment_week))) %>%
filter(avg_commentscoreperday < 3000) %>%
rename(namePlayer = player_name) %>%
left_join(dplyr::select(draft_data_combined, c("week", "namePlayer", "avg_game_score")), by = c("namePlayer", "week")) %>%
mutate(player_hc_clusters =
as.factor(hc_player_clusters)) %>%
ggplot(aes(x = player_hc_clusters, y = avg_game_score)) +
#geom_violin() +
geom_boxplot(width = 0.2, color = "#2151a1", size = 0.8) +
labs(
x = "Clusters", y = "Average game score",
title = "Box plot of Average game score for the 4 clusters"
) +
theme_bw() +
theme(
plot.title = element_text(hjust = "0.5")
)
| /Stat_popularity.R | no_license | srayan00/nba-social-media | R | false | false | 10,994 | r | draft_game_data_2018_1 <- read.csv("C:\\Users\\sahan\\OneDrive\\Documents\\Projects\\CMSAC2020\\game_player_draft_data.csv")
draft_game_data_2018_clean <- draft_game_data_2018_1 %>%
mutate(week = cut(as.Date(dateGame), "week"), winBool = ifelse(outcomeGame == "W", 1, 0)) %>%
group_by(namePlayer, week) %>%
summarize(avg_game_score = mean(game_score),
numGames = n(),
avg_box_plusminus = mean(plusminus),
avg_minutes = mean(minutes),
avg_pct_fg3 = mean(pctFG3),
avg_pct_fg2 = mean(pctFG2),
win_percent = mean(winBool),
avg_fgm = mean(fgm),
avg_fga = mean(fga),
avg_pctfg = mean(pctFG),
avg_pctFT = mean(pctFT),
avg_oreb = mean(oreb),
avg_tov = mean(tov),
avg_dreb = mean(dreb),
avg_stl = mean(stl),
avg_blk = mean(blk),
avg_ast = mean(ast),
avg_treb = mean(treb),
avg_pf = mean(pf),
avg_pts = mean(pts),
avg_countDaysRestPlayer = mean(countDaysRestPlayer)) %>%
left_join(select(draft_30, namePlayer, numberRoundPick), by = "namePlayer") %>%
arrange(numberRoundPick) %>%
mutate(week = as.Date(week)) %>%
left_join(timeseries_draft_2018_clean, by = c("namePlayer", "week")) %>%
left_join(select(df_rosters_2019, namePlayer, weightLBS, heightInches, groupPosition), by = "namePlayer") %>%
select(-numberRoundPick.x) %>% rename(numberRoundPick = numberRoundPick.y)
write.csv(draft_game_data_2018_clean, "C:\\Users\\sahan\\OneDrive\\Documents\\Projects\\CMSAC2020\\draft_2018_clean.csv")
reddit_draft_data <- read.csv("C:\\Users\\sahan\\Downloads\\combo_scored_1.csv")
reddit_draft_data <- reddit_draft_data %>%
mutate(sentiment_week = sentiment / 7,
positive_week = positive/ 7,
negative_week = negative / 7)
reddit_draft_data$week = as.Date(reddit_draft_data$week)
draft_data_combined <- reddit_draft_data %>%
rename(namePlayer = player_name) %>%
inner_join(draft_game_data_2018_clean, by = c("namePlayer", "week")) %>%
arrange(numberRoundPick) %>%
mutate(wiki_per_100 = wiki_views / 100)
write.csv(draft_data_combined, "C:\\Users\\sahan\\OneDrive\\Documents\\Projects\\CMSAC2020\\draft_gamereddit_clean.csv")
#Comment score and controversiality
reddit_comment <- read_xlsx("C:\\Users\\sahan\\Downloads\\final_reddit_datescorrected.xlsx")
reddit_comment$comm_date <- as.Date(reddit_comment$comm_date)
reddit_comment <- reddit_comment %>%
filter((comm_date > as.Date("2018-10-16")) & ((comm_date < as.Date("2019-04-10"))) )
reddit_comment_week <- reddit_comment %>%
mutate(week = cut(comm_date, "week")) %>%
group_by(player_name, week) %>%
summarize(avg_commentscoreperday = sum(comment_score) / 7,
avg_controversial = sum(controversiality) / 7)
reddit_draft_comment_data <- left_join(reddit_comment_week, reddit_draft_data, by = c("player_name", "week"))
write.csv(reddit_draft_comment_data, "C:\\Users\\sahan\\OneDrive\\Documents\\Projects\\CMSAC2020\\all_reddit_data.csv")
#EDA
#Normality check
draft_game_data_2018_clean %>%
ggplot(aes(x = avg_box_plusminus)) +
geom_density()
draft_game_data_2018_clean %>%
ggplot(aes(x = avg_minutes, y = avg_web_hits)) +
geom_point()
draft_game_data_2018_clean %>%
filter(win_percent %in% c(0 , 1)) %>%
ggplot(aes(x = as.factor(win_percent), y = avg_web_hits)) +
geom_boxplot()
draft_game_data_2018_clean %>%
ggplot(aes(x = avg_minutes, y = avg_web_hits)) +
geom_point(alpha = 0.2)
#draft data combined
draft_data_combined %>%
ggplot(aes(x = avg_game_score)) +
geom_histogram(fill = "#eb1933", color = "black") +
theme_bw() +
labs(
x = "Average game score",
title = "Distribution of Average game score"
) +
theme(
plot.title = element_text(hjust="0.5")
)
draft_data_combined %>%
ggplot(aes(x = scale(sentiment), y = win_percent)) +
geom_jitter()
draft_data_combined %>%
filter(sentiment < 0) %>%
ggplot(aes(x = positive)) +
geom_density()
draft_data_combined %>%
group_by(numberRoundPick) %>%
summarize(gamescore = mean(avg_game_score), senti = mean(sentiment_week), webhit = mean(avg_yt_hits)) %>%
ggplot(aes(x = numberRoundPick, y = senti)) +
geom_point() +
xlab("Number Round Pick") + ylab("Average Sentiment score") +
theme_bw()
#Hierarchial Clustering
draft_scaled_data <- draft_data_combined %>%
select(wiki_per_100, avg_web_hits, avg_news_hits, avg_yt_hits, positive_week, negative_week) %>%
scale() %>%
as.data.frame()
nba_draft_hclust <-
hclust(dist(draft_scaled_data),
method = "complete")
hc_player_clusters <-
cutree(nba_draft_hclust,
k = 2)
draft_data_combined <-draft_data_combined %>%
mutate(player_hc_clusters =
as.factor(hc_player_clusters))
draft_data_combined %>%
mutate(player_hc_clusters =
as.factor(hc_player_clusters)) %>%
ggplot(aes(x = avg_web_hits, y = sentiment_week,
color = player_hc_clusters)) +
geom_point() +
theme_bw() +
labs(x = " Average web hits",
y = "Sentiment score",
color = "Cluster",
title = "Scatter plot of Average Web hits and Sentiment Score") +
theme(legend.position = "bottom") + scale_color_manual(values =c("#eb1933", "#2151a1"))
# What is cluster 3. The players have high average game score and average minutes but popularity metrics doesnt capture this
# Deandre Ayton and Trae Young are only in cluster 3
# In cluster 3, the players during that week weren't very popular but people had a highly positive sentiment towards them.
# Cluster 2 : not popular, game score is low, sentiment score is low
# Cluster 1 : a bit more popular than cluster 2; sentiment is the same as cluster 2; game score is a little better than
# Cluster 4 : Most popular category, average game scores are still high but not as high as cluster 3
draft_data_combined %>%
mutate(player_hc_clusters =
as.factor(hc_player_clusters)) %>%
ggplot(aes(x = player_hc_clusters, y = avg_game_score)) +
geom_violin() +
geom_boxplot(width = 0.2, color = "#eb1933", size = 0.8 ) +
theme_bw() +
labs(
x = "Clusters",
y = "Average Game Score",
title = "Box Plot of Average game score for the 2 clusters "
) +
theme (
plot.title = element_text(hjust = "0.5")
)
draft_data_combined %>%
mutate(player_hc_clusters =
as.factor(hc_player_clusters)) %>%
ggplot(aes(x = player_hc_clusters, y = avg_pts)) +
geom_violin() +
geom_boxplot(width = 0.2, color = "#2151a1", size = 0.8) +
theme_bw() +
labs(
x = "Clusters",
y = "Average Points",
title = "Box Plot of Average points for the 2 clusters "
) +
theme(
axis.title = element_text(hjust = "0.5")
)
#Gaussian Mixture Models
library(mclust)
draft_mclust <- Mclust(draft_scaled_data)
summary(draft_mclust)
draft_mclust$classification
draft_data_combined %>%
mutate(cluster = as.factor(draft_mclust$classification)) %>%
ggplot(aes(x = avg_web_hits, y = wiki_per_100,
color = cluster)) +
geom_point() +
ggthemes::scale_color_colorblind() +
theme_bw() +
labs(x = " Average web hits",
y = "Wiki views in 100s",
color = "Cluster") +
theme(legend.position = "bottom")
#Time series analysis
#Top 5 picks
draft_game_data_2018_clean$week <- as.Date(draft_game_data_2018_clean$week)
draft_game_data_2018_clean %>%
group_by(week) %>%
draft_game_data_2018_clean %>%
filter(namePlayer == "Deandre Ayton") %>%
pivot_longer(c("avg_game_score", "avg_web_hits"),
names_to = "Name",
values_to = "value") %>%
ggplot (aes(x = week, y = value)) +
geom_line()+
facet_wrap(~Name, scales = "free_y") +
theme_bw()
timeseries_draft_2018_clean$week <- as.Date(timeseries_draft_2018_clean$week)
timeseries_draft_2018_clean %>%
filter(numberRoundPick < 5) %>%
pivot_longer(c("wiki_views", "avg_web_hits", "avg_yt_hits"),
names_to = "names",
values_to = "values") %>%
ggplot(aes(x = week, y = values)) +
geom_line(aes(color = namePlayer), width = 1.2) +
facet_wrap(~names, scales = "free_y", nrow = 2) +
theme_classic()
draft_game_data_2018_clean %>%
filter(numberRoundPick < 4) %>%
pivot_longer(c("wiki_views", "avg_web_hits", "avg_yt_hits", "avg_game_score"),
names_to = "names",
values_to = "values") %>%
ggplot(aes(x = week, y = values)) +
geom_line(aes(color = namePlayer), width = 1.2) +
facet_wrap(~names, scales = "free_y", nrow = 2) +
theme_classic()
#Clustering for sentiment metrics
reddit_draft_comment_data <- read.csv("C:\\Users\\sahan\\OneDrive\\Documents\\Projects\\CMSAC2020\\all_reddit_data.csv")
draft_scaled_data_comment <- reddit_draft_comment_data %>%
select(sentiment_week, avg_commentscoreperday) %>%
filter(avg_commentscoreperday < 3000) %>%
scale() %>%
as.data.frame() %>%
na.omit()
nba_draft_hclust <-
hclust(dist(draft_scaled_data_comment),
method = "complete")
hc_player_clusters <-
cutree(nba_draft_hclust,
k = 4)
reddit_draft_comment_data %>%
filter((!is.na(avg_commentscoreperday)) & (!is.na(sentiment_week))) %>%
filter(avg_commentscoreperday < 3000) %>%
#na.omit() %>%
mutate(player_hc_clusters =
as.factor(hc_player_clusters)) %>%
ggplot(aes(x = sentiment_week, y = avg_commentscoreperday,
color = player_hc_clusters)) +
geom_point() +
theme_bw() +
labs(x = "Sentiment Score",
y = "Average Comment Score",
color = "Cluster",
title = "Scatterplot of Sentiment and comment score") +
theme(legend.position = "bottom",
plot.title = element_text(hjust = "0.5")) +
scale_color_manual(values = c("#eb1933", "#2151a1", "black", "#a9a9a9") )
#Cluster 1 :- Sentiment scores are low but comment scores are also quite low. This indicated that the plaer is controversial
#Cluster 2: Higher sentiment scores and relatively higher comment scores. The player is perceived more positive than Cluster 1 players
#Cluster 3: Higherst sentiment scores but comment scores are in between cluster 2 and cluster 1. The player is perceived most positively
reddit_draft_comment_data %>%
filter((!is.na(avg_commentscoreperday)) & (!is.na(sentiment_week))) %>%
filter(avg_commentscoreperday < 3000) %>%
rename(namePlayer = player_name) %>%
left_join(dplyr::select(draft_data_combined, c("week", "namePlayer", "avg_game_score")), by = c("namePlayer", "week")) %>%
mutate(player_hc_clusters =
as.factor(hc_player_clusters)) %>%
ggplot(aes(x = player_hc_clusters, y = avg_game_score)) +
#geom_violin() +
geom_boxplot(width = 0.2, color = "#2151a1", size = 0.8) +
labs(
x = "Clusters", y = "Average game score",
title = "Box plot of Average game score for the 4 clusters"
) +
theme_bw() +
theme(
plot.title = element_text(hjust = "0.5")
)
|
pollutantmean <- function(directory, pollutant, id = 1:332){
mean_vector <- c()
files <- list.files(directory)
for (index in id) {
data <- read.csv(pastehea(directory, "/", files[index], sep=""))
cleaned_data <- data[which(!is.na(data[, pollutant])), pollutant]
mean_vector <- c(mean_vector, cleaned_data)
}
mean(mean_vector)
} | /R programming/Week 2/pollutantmean.R | no_license | Coverete/datasciencecoursera | R | false | false | 371 | r | pollutantmean <- function(directory, pollutant, id = 1:332){
mean_vector <- c()
files <- list.files(directory)
for (index in id) {
data <- read.csv(pastehea(directory, "/", files[index], sep=""))
cleaned_data <- data[which(!is.na(data[, pollutant])), pollutant]
mean_vector <- c(mean_vector, cleaned_data)
}
mean(mean_vector)
} |
SETTINGS_OPTIONS <- c("stopwords",
"collocations",
"dictionary",
"valuetype",
"stem",
"delimiter_word",
"delimiter_sentence",
"delimiter_paragraph",
"clean_tolower",
"clean_removeDigits",
"clean_removePunct",
"unitsoriginal",
"units")
DEFAULT_DELIM_SENTENCE <- ".!?"
DEFAULT_DELIM_WORD <- " "
DEFAULT_DELIM_PARAGRAPH <- "\n\n"
#' Get or set the corpus settings
#'
#' @param x object from/to which settings are queried or applied
#' @param ... additional arguments
#' @export
settings <- function(x, ...) {
UseMethod("settings")
}
#' @rdname settings
#' @export
#' @details Calling \code{settings()} with no arguments returns a list of system default settings.
settings.default <- function(x=NULL, ...) {
if (!is.null(x))
stop("settings default should be used without arguments")
settingsInitialize()
}
#' Get or set various settings in the corpus for the treatment of texts, such as rules for
#' stemming, stopwords, collocations, etc.
#' @param field string containing the name of the setting to be set or queried
#' \code{settings(x)} query the corps settings
#'
#' \code{settings(x, field) <-} update the corpus settings for \code{field}
#' @rdname settings
#' @examples
#' settings(inaugCorpus, "stopwords")
#' (tempdfm <- dfm(subset(inaugCorpus, Year>1980), verbose=FALSE))
#' (tempdfmSW <- dfm(subset(inaugCorpus, Year>1980),
#' ignoredFeatures=stopwords("english"), verbose=FALSE))
#' settings(inaugCorpus, "stopwords") <- TRUE
#' @export
settings.corpus <- function(x, field=NULL, ...) {
if (is.null(field)) {
x$settings
} else {
if (!(field %in% SETTINGS_OPTIONS)) stop(paste(field, "not valid setting."))
x$settings[[field]]
}
}
# replacement function for corpus settings
#' @export
#' @rdname settings
#' @param value new setting value
#' @export
"settings<-" <- function(x, field, value) {
if (is(x, "dfm")) stop("Cannot assign settings to a dfm object.")
if (!(field %in% SETTINGS_OPTIONS)) stop(paste(field, "not valid setting."))
x$settings[field] <- value
# cat("note: corpus settings are not yet used in dfm construction.\n")
x
}
#' Get the settings from a which a \link{dfm} was created
#' @rdname settings
#' @examples
#' tempdfm <- dfm(inaugCorpus, stem=TRUE, verbose=FALSE)
#' settings(tempdfm)
#' @export
settings.dfm <- function(x, ...) {
# attributes(x)$settings
cat("note: dfm settings are not yet implemented - coming soon.\n")
}
# @rdname settings
# @export
settingsInitialize <- function() {
tempsettings <- list(stopwords=NULL,
collocations=NULL,
dictionary=NULL,
valuetype = "glob",
stem=FALSE,
delimiter_word=DEFAULT_DELIM_WORD,
delimiter_sentence=DEFAULT_DELIM_SENTENCE,
delimiter_paragraph=DEFAULT_DELIM_PARAGRAPH,
clean_tolower=TRUE,
clean_removeDigits=TRUE,
clean_removePunct=TRUE,
units="documents",
unitsoriginal="documents")
class(tempsettings) <- c("settings", class(tempsettings))
tempsettings
}
#' @rdname settings
#' @method print settings
#' @export
print.settings <- function(x, ...) {
cat("Settings:\n")
for (s in names(x)) {
cat(" ", s, ": ", sep="")
print(x[[s]])
}
}
##
## DOESN'T MODIFY IN PLACE -- NEEDS REWRITING
##
# \code{settingsReset} restores settings for a corpus to the default values
# @rdname settings
# @export
settingsReset <- function(corp) {
corp$settings <- settingsInitialize()
}
# \code{settingsReset} restores settings for a corpus to the default values
# @rdname setttings
# @export
settingsGet <- function(corp, match.call.list) {
callingenv <- parent.frame()
if (is.null(match.call.list$dictionary))
assign("dictionary", settings(corp, "dictionary"), callingenv)
if (is.null(match.call.list$valuetype))
assign("valuetype", settings(corp, "valuetype"), callingenv)
if (is.null(match.call.list$stem))
assign("stem", settings(corp, "stem"), callingenv)
if (is.null(match.call.list$stopwords))
assign("stopwords", settings(corp, "stopwords"), callingenv)
if (is.null(match.call.list$removeDigits))
assign("removeDigits", settings(corp, "clean_removeDigits"), callingenv)
if (is.null(match.call.list$removePunct))
assign("removePunct", settings(corp, "clean_removePunct"), callingenv)
if (is.null(match.call.list$lower))
assign("lower", settings(corp, "clean_tolower"), callingenv)
if (is.null(match.call.list$collocations))
assign("collocations", settings(corp, "collocations"), callingenv)
}
# clean=TRUE,
# removeDigits=TRUE, removePunct=TRUE, lower=TRUE,
# addto=NULL
| /R/settings.R | no_license | schinria/quanteda | R | false | false | 5,211 | r |
SETTINGS_OPTIONS <- c("stopwords",
"collocations",
"dictionary",
"valuetype",
"stem",
"delimiter_word",
"delimiter_sentence",
"delimiter_paragraph",
"clean_tolower",
"clean_removeDigits",
"clean_removePunct",
"unitsoriginal",
"units")
DEFAULT_DELIM_SENTENCE <- ".!?"
DEFAULT_DELIM_WORD <- " "
DEFAULT_DELIM_PARAGRAPH <- "\n\n"
#' Get or set the corpus settings
#'
#' @param x object from/to which settings are queried or applied
#' @param ... additional arguments
#' @export
settings <- function(x, ...) {
UseMethod("settings")
}
#' @rdname settings
#' @export
#' @details Calling \code{settings()} with no arguments returns a list of system default settings.
settings.default <- function(x=NULL, ...) {
if (!is.null(x))
stop("settings default should be used without arguments")
settingsInitialize()
}
#' Get or set various settings in the corpus for the treatment of texts, such as rules for
#' stemming, stopwords, collocations, etc.
#' @param field string containing the name of the setting to be set or queried
#' \code{settings(x)} query the corps settings
#'
#' \code{settings(x, field) <-} update the corpus settings for \code{field}
#' @rdname settings
#' @examples
#' settings(inaugCorpus, "stopwords")
#' (tempdfm <- dfm(subset(inaugCorpus, Year>1980), verbose=FALSE))
#' (tempdfmSW <- dfm(subset(inaugCorpus, Year>1980),
#' ignoredFeatures=stopwords("english"), verbose=FALSE))
#' settings(inaugCorpus, "stopwords") <- TRUE
#' @export
settings.corpus <- function(x, field=NULL, ...) {
if (is.null(field)) {
x$settings
} else {
if (!(field %in% SETTINGS_OPTIONS)) stop(paste(field, "not valid setting."))
x$settings[[field]]
}
}
# replacement function for corpus settings
#' @export
#' @rdname settings
#' @param value new setting value
#' @export
"settings<-" <- function(x, field, value) {
if (is(x, "dfm")) stop("Cannot assign settings to a dfm object.")
if (!(field %in% SETTINGS_OPTIONS)) stop(paste(field, "not valid setting."))
x$settings[field] <- value
# cat("note: corpus settings are not yet used in dfm construction.\n")
x
}
#' Get the settings from a which a \link{dfm} was created
#' @rdname settings
#' @examples
#' tempdfm <- dfm(inaugCorpus, stem=TRUE, verbose=FALSE)
#' settings(tempdfm)
#' @export
settings.dfm <- function(x, ...) {
# attributes(x)$settings
cat("note: dfm settings are not yet implemented - coming soon.\n")
}
# @rdname settings
# @export
settingsInitialize <- function() {
tempsettings <- list(stopwords=NULL,
collocations=NULL,
dictionary=NULL,
valuetype = "glob",
stem=FALSE,
delimiter_word=DEFAULT_DELIM_WORD,
delimiter_sentence=DEFAULT_DELIM_SENTENCE,
delimiter_paragraph=DEFAULT_DELIM_PARAGRAPH,
clean_tolower=TRUE,
clean_removeDigits=TRUE,
clean_removePunct=TRUE,
units="documents",
unitsoriginal="documents")
class(tempsettings) <- c("settings", class(tempsettings))
tempsettings
}
#' @rdname settings
#' @method print settings
#' @export
print.settings <- function(x, ...) {
cat("Settings:\n")
for (s in names(x)) {
cat(" ", s, ": ", sep="")
print(x[[s]])
}
}
##
## DOESN'T MODIFY IN PLACE -- NEEDS REWRITING
##
# \code{settingsReset} restores settings for a corpus to the default values
# @rdname settings
# @export
settingsReset <- function(corp) {
corp$settings <- settingsInitialize()
}
# \code{settingsReset} restores settings for a corpus to the default values
# @rdname setttings
# @export
settingsGet <- function(corp, match.call.list) {
callingenv <- parent.frame()
if (is.null(match.call.list$dictionary))
assign("dictionary", settings(corp, "dictionary"), callingenv)
if (is.null(match.call.list$valuetype))
assign("valuetype", settings(corp, "valuetype"), callingenv)
if (is.null(match.call.list$stem))
assign("stem", settings(corp, "stem"), callingenv)
if (is.null(match.call.list$stopwords))
assign("stopwords", settings(corp, "stopwords"), callingenv)
if (is.null(match.call.list$removeDigits))
assign("removeDigits", settings(corp, "clean_removeDigits"), callingenv)
if (is.null(match.call.list$removePunct))
assign("removePunct", settings(corp, "clean_removePunct"), callingenv)
if (is.null(match.call.list$lower))
assign("lower", settings(corp, "clean_tolower"), callingenv)
if (is.null(match.call.list$collocations))
assign("collocations", settings(corp, "collocations"), callingenv)
}
# clean=TRUE,
# removeDigits=TRUE, removePunct=TRUE, lower=TRUE,
# addto=NULL
|
\name{dimension_rename}
\alias{dimension_rename}
\title{
Rename a dimension.
}
\description{
Rename a SciDB array dimension.
}
\usage{
dimension_rename(x, old, `new`, `eval`=FALSE)
}
\arguments{
\item{x}{A \code{scidb} or \code{scidbdf} object.}
\item{old}{A character string representation of the old dimension name.}
\item{new}{A character string representation of the new dimension name.}
\item{eval}{(Optional) If true, execute the query and store the reult array. Otherwise defer evaluation.}
}
\value{
A new \code{scidb} or \code{scidbdf} object.
}
\author{
B. W. Lewis <blewis@paradigm4.com>
}
\seealso{
\code{\link{scidb}}
}
\examples{
\dontrun{
x <- as.scidb(iris)
y <- dimension_rename(x, "row", "i")
str(y)
}}
| /man/dimension_rename.Rd | no_license | edenzik/SciDBR | R | false | false | 733 | rd | \name{dimension_rename}
\alias{dimension_rename}
\title{
Rename a dimension.
}
\description{
Rename a SciDB array dimension.
}
\usage{
dimension_rename(x, old, `new`, `eval`=FALSE)
}
\arguments{
\item{x}{A \code{scidb} or \code{scidbdf} object.}
\item{old}{A character string representation of the old dimension name.}
\item{new}{A character string representation of the new dimension name.}
\item{eval}{(Optional) If true, execute the query and store the reult array. Otherwise defer evaluation.}
}
\value{
A new \code{scidb} or \code{scidbdf} object.
}
\author{
B. W. Lewis <blewis@paradigm4.com>
}
\seealso{
\code{\link{scidb}}
}
\examples{
\dontrun{
x <- as.scidb(iris)
y <- dimension_rename(x, "row", "i")
str(y)
}}
|
####################################################################
# Machine Learning - MIRI Master
# Lluís A. Belanche
# LAB 10: Radial Basis Function Network (Part 1)
# version of April 2019
####################################################################
library(MASS)
library(cclust)
####################################################################
## Radial Basis Function Network Example: regression of a 1D function
####################################################################
set.seed (4)
## We are going to do all the computations "by hand"
## Let us depart from the following function in the (a,b) interval
myf <- function (x) { (1 + x - 2*x^2) * exp(-x^2) }
## We are going to model this function in the interval (-5,5)
a <- -5
b <- 5
sigma.2 <- 0.04
domain <- c(a,b)
myf.data <- function (N, a, b)
{
x <- runif(N, a, b)
t <- myf(x) + rnorm(N, sd=sqrt(sigma.2))
dd <- data.frame(x,t)
names(dd) <- c("x", "t")
dd
}
N <- 100
d <- myf.data (N, a , b)
summary(d)
## The black points are the data, the blue line is the true underlying function
plot (d)
curve (myf, a, b, col='blue', add=TRUE)
## Create a large test data too for future use; notice that the generation mechanism is the same
N.test <- 2000
d.test <- myf.data (N.test, a , b)
# Function to compute a PHI (N x M) design matrix, without the Phi_0(x) = 1 column;
# c.i, sp.i are the centers and smoothing parameters or variances (sigma_i^2) of the neurons, respectively
PHI <- function (x,c,sp)
{
N <- length(x)
M <- length(c)
phis <- matrix(rep(0,M*N), nrow=M)
for (i in 1:M)
phis[i,] <- exp(-(x - c[i])^2/(2*sp[i]))
t(phis)
}
## We find the centers and variances for each neuron using k-means; since this clustering algorithm is non-deterministic (because the initial centers are random), we do it 'NumKmeans' times
NumKmeans <- 20
## We set a rather large number of hidden units (= basis functions) M as a function of data size (the sqrt is just a heuristic!) because we are going to try different regularizers
(M <- floor(sqrt(N)))
m <- matrix(0,nrow=NumKmeans,ncol=M)
h <- matrix(0,nrow=NumKmeans,ncol=M)
data.Kmeans <- cbind(d$x,rep(0,N))
for (j in 1:NumKmeans)
{
# Find the centers c.i with k-means
km.res <- cclust (x=data.Kmeans, centers=M, iter.max=200, method="kmeans", dist="euclidean")
m[j,] <- km.res$centers[,1]
# Obtain the variances sp.i as a function of the c.i
h[j,] <- rep(0,M)
for (i in 1:M)
{
indexes <- which(km.res$cluster == i)
h[j,i] <- sum(abs(d$x[indexes] - m[j,i]))/length(indexes)
if (h[j,i] == 0) h[j,i] <- 1
}
}
## Now for each k-means we get the hidden-to-output weights by solving a regularized
## least-squares problem (standard ridge regression), very much as we did in previous labs
## The difference is that now we perform ridge regression on the PHI matrix (that is, on the new regressors given by the basis functions), not on the original inputs ...
## ... and find the best lambda with using GCV across all choices of basis functions (the NumKmeans clusterings)
(lambda.list <- 10^seq(-3,1.5,by=0.1))
library(MASS) # we need it for lm.ridge
errors <- rep(0,NumKmeans)
bestLambdas <- rep(0,NumKmeans)
# For each k-means' result
for (num in 1:NumKmeans)
{
c.i <- m[num,]
sp.i <- h[num,]
myPHI <- PHI (d$x,c.i,sp.i)
aux1 <- lm.ridge(d$t ~ myPHI, d, lambda = lambda.list)
my.lambda <- as.numeric(names(which.min(aux1$GCV)))
aux2 <- lm.ridge(d$t ~ myPHI, d, lambda = my.lambda)
errors[num] <- sqrt(aux2$GCV)
bestLambdas[num] <- my.lambda
}
## Now we obtain the best model among the tested ones
(bestIndex <- which(errors == min(errors)))
(bestLambda <- bestLambdas[bestIndex])
c.i <- m[bestIndex,]
sp.i <- h[bestIndex,]
## we see that this problem needs a lot of regularization! This makes sense if you take a look at how the data is generated (the previous plot): the noise level is very high relative to the signal
bestLambda
## We also see that the best lambda fluctuates (since the data changes due to the clustering), but the order of magnitude is quite stable)
bestLambdas
## We now create the final model:
my.RBF <- lm.ridge (d$t ~ PHI (d$x,c.i,sp.i), d, lambda = bestLambda)
## these are the final hidden-to-output weights: note how small they are (here is where we regularize)
(w.i <- setNames(coef(my.RBF), paste0("w_", 0:M)))
## It remains to calculate the prediction on the test data
test.PHI <- cbind(rep(1,length(d.test$x)),PHI(d.test$x,c.i,sp.i))
y <- test.PHI %*% w.i
## And now the normalized error of this prediction
(errorsTest <- sqrt(sum((d.test$t - y)^2)/((N.test-1)*var(d.test$t))))
## Much better if we plot everything
par(mfrow=c(1,1))
## Test data in black
plot(d.test$x,d.test$t,xlab="x",ylab="t",main=paste("Prediction (learning size: ",toString(N),"examples)"),ylim=c(-1.5,1.5))
## Red data are the predictions
points(d.test$x,y,col='red',lwd=1)
## and the blue line is the underlying function
curve (myf, a, b, col='blue', add=TRUE)
## The classical (predictive) R^2 coefficient is:
1-errorsTest^2
## Since we know the data generating mechanism, we can compute how good our model is wrt to the best possible model: this model (the regression function), would have a true genearlization error of sigma^2.
##
## we can therefore compute the so-called "percentage error":
##
(perc.error <- ( sum((d.test$t - y)^2)/N.test - sigma.2 ) / sigma.2 * 100)
## which means that our model is just 9.3% away from the best possible model
## The previous code is designed for 1D problems but you can easily adapt it to more input dimensions
## There is a general package for neural networks: {RSNNS}
## Which is actually the R interface to the (formerly widely) used and flexible Stuttgart Neural Network Simulator (SNNS)
## This library contains many standard implementations of neural networks. The package actually wraps the SNNS functionality to make it available from within R
## The RBF version within this package has a sophisticated method for initializing the network, which is also quite non-standard, so we avoid further explanation
## Sadly this package does not provide with a way to control regularization or to allow for multi-class problems (a softmax option with the cross-entropy error), so I would not recommend it
##
## A second package for neural networks in R is {neural}:
## This package trains a RBFNN with backpropagation, so is a rather different view
library(neural)
# For comparison, we use the same number of centers (M)
data <- rbftrain (as.matrix(d$x),M,as.matrix(d$t), visual = FALSE)
# And make it predict the same test data
preds <- rbf (as.matrix(d.test$x),data$weight,data$dist,data$neurons,data$sigma)
## And now the normalized error of this prediction
(errorsTest <- sqrt(sum((d.test$t - preds)^2)/((N.test-1)*var(d.test$t))))
## Much better if we plot everything
par(mfrow=c(1,1))
## Test data in black
plot(d.test$x,d.test$t,xlab="x",ylab="t",main=paste("Prediction (learning size: ",toString(N),"examples)"),ylim=c(-1.5,1.5))
## Red data are the predictions
points(d.test$x,preds,col='red',lwd=1)
## and the blue line is the underlying function
curve (myf, a, b, col='blue', add=TRUE)
## The results are poorer, with an advantage for the former method (using clustering)
1-errorsTest^2
| /ML-MIRI-L10-RBFNN/ML-MIRI-L10-Part1.R | no_license | isadorasalvetti/ML | R | false | false | 7,351 | r | ####################################################################
# Machine Learning - MIRI Master
# Lluís A. Belanche
# LAB 10: Radial Basis Function Network (Part 1)
# version of April 2019
####################################################################
library(MASS)
library(cclust)
####################################################################
## Radial Basis Function Network Example: regression of a 1D function
####################################################################
set.seed (4)
## We are going to do all the computations "by hand"
## Let us depart from the following function in the (a,b) interval
myf <- function (x) { (1 + x - 2*x^2) * exp(-x^2) }
## We are going to model this function in the interval (-5,5)
a <- -5
b <- 5
sigma.2 <- 0.04
domain <- c(a,b)
myf.data <- function (N, a, b)
{
x <- runif(N, a, b)
t <- myf(x) + rnorm(N, sd=sqrt(sigma.2))
dd <- data.frame(x,t)
names(dd) <- c("x", "t")
dd
}
N <- 100
d <- myf.data (N, a , b)
summary(d)
## The black points are the data, the blue line is the true underlying function
plot (d)
curve (myf, a, b, col='blue', add=TRUE)
## Create a large test data too for future use; notice that the generation mechanism is the same
N.test <- 2000
d.test <- myf.data (N.test, a , b)
# Function to compute a PHI (N x M) design matrix, without the Phi_0(x) = 1 column;
# c.i, sp.i are the centers and smoothing parameters or variances (sigma_i^2) of the neurons, respectively
PHI <- function (x,c,sp)
{
N <- length(x)
M <- length(c)
phis <- matrix(rep(0,M*N), nrow=M)
for (i in 1:M)
phis[i,] <- exp(-(x - c[i])^2/(2*sp[i]))
t(phis)
}
## We find the centers and variances for each neuron using k-means; since this clustering algorithm is non-deterministic (because the initial centers are random), we do it 'NumKmeans' times
NumKmeans <- 20
## We set a rather large number of hidden units (= basis functions) M as a function of data size (the sqrt is just a heuristic!) because we are going to try different regularizers
(M <- floor(sqrt(N)))
m <- matrix(0,nrow=NumKmeans,ncol=M)
h <- matrix(0,nrow=NumKmeans,ncol=M)
data.Kmeans <- cbind(d$x,rep(0,N))
for (j in 1:NumKmeans)
{
# Find the centers c.i with k-means
km.res <- cclust (x=data.Kmeans, centers=M, iter.max=200, method="kmeans", dist="euclidean")
m[j,] <- km.res$centers[,1]
# Obtain the variances sp.i as a function of the c.i
h[j,] <- rep(0,M)
for (i in 1:M)
{
indexes <- which(km.res$cluster == i)
h[j,i] <- sum(abs(d$x[indexes] - m[j,i]))/length(indexes)
if (h[j,i] == 0) h[j,i] <- 1
}
}
## Now for each k-means we get the hidden-to-output weights by solving a regularized
## least-squares problem (standard ridge regression), very much as we did in previous labs
## The difference is that now we perform ridge regression on the PHI matrix (that is, on the new regressors given by the basis functions), not on the original inputs ...
## ... and find the best lambda with using GCV across all choices of basis functions (the NumKmeans clusterings)
(lambda.list <- 10^seq(-3,1.5,by=0.1))
library(MASS) # we need it for lm.ridge
errors <- rep(0,NumKmeans)
bestLambdas <- rep(0,NumKmeans)
# For each k-means' result
for (num in 1:NumKmeans)
{
c.i <- m[num,]
sp.i <- h[num,]
myPHI <- PHI (d$x,c.i,sp.i)
aux1 <- lm.ridge(d$t ~ myPHI, d, lambda = lambda.list)
my.lambda <- as.numeric(names(which.min(aux1$GCV)))
aux2 <- lm.ridge(d$t ~ myPHI, d, lambda = my.lambda)
errors[num] <- sqrt(aux2$GCV)
bestLambdas[num] <- my.lambda
}
## Now we obtain the best model among the tested ones
(bestIndex <- which(errors == min(errors)))
(bestLambda <- bestLambdas[bestIndex])
c.i <- m[bestIndex,]
sp.i <- h[bestIndex,]
## we see that this problem needs a lot of regularization! This makes sense if you take a look at how the data is generated (the previous plot): the noise level is very high relative to the signal
bestLambda
## We also see that the best lambda fluctuates (since the data changes due to the clustering), but the order of magnitude is quite stable)
bestLambdas
## We now create the final model:
my.RBF <- lm.ridge (d$t ~ PHI (d$x,c.i,sp.i), d, lambda = bestLambda)
## these are the final hidden-to-output weights: note how small they are (here is where we regularize)
(w.i <- setNames(coef(my.RBF), paste0("w_", 0:M)))
## It remains to calculate the prediction on the test data
test.PHI <- cbind(rep(1,length(d.test$x)),PHI(d.test$x,c.i,sp.i))
y <- test.PHI %*% w.i
## And now the normalized error of this prediction
(errorsTest <- sqrt(sum((d.test$t - y)^2)/((N.test-1)*var(d.test$t))))
## Much better if we plot everything
par(mfrow=c(1,1))
## Test data in black
plot(d.test$x,d.test$t,xlab="x",ylab="t",main=paste("Prediction (learning size: ",toString(N),"examples)"),ylim=c(-1.5,1.5))
## Red data are the predictions
points(d.test$x,y,col='red',lwd=1)
## and the blue line is the underlying function
curve (myf, a, b, col='blue', add=TRUE)
## The classical (predictive) R^2 coefficient is:
1-errorsTest^2
## Since we know the data generating mechanism, we can compute how good our model is wrt to the best possible model: this model (the regression function), would have a true genearlization error of sigma^2.
##
## we can therefore compute the so-called "percentage error":
##
(perc.error <- ( sum((d.test$t - y)^2)/N.test - sigma.2 ) / sigma.2 * 100)
## which means that our model is just 9.3% away from the best possible model
## The previous code is designed for 1D problems but you can easily adapt it to more input dimensions
## There is a general package for neural networks: {RSNNS}
## Which is actually the R interface to the (formerly widely) used and flexible Stuttgart Neural Network Simulator (SNNS)
## This library contains many standard implementations of neural networks. The package actually wraps the SNNS functionality to make it available from within R
## The RBF version within this package has a sophisticated method for initializing the network, which is also quite non-standard, so we avoid further explanation
## Sadly this package does not provide with a way to control regularization or to allow for multi-class problems (a softmax option with the cross-entropy error), so I would not recommend it
##
## A second package for neural networks in R is {neural}:
## This package trains a RBFNN with backpropagation, so is a rather different view
library(neural)
# For comparison, we use the same number of centers (M)
data <- rbftrain (as.matrix(d$x),M,as.matrix(d$t), visual = FALSE)
# And make it predict the same test data
preds <- rbf (as.matrix(d.test$x),data$weight,data$dist,data$neurons,data$sigma)
## And now the normalized error of this prediction
(errorsTest <- sqrt(sum((d.test$t - preds)^2)/((N.test-1)*var(d.test$t))))
## Much better if we plot everything
par(mfrow=c(1,1))
## Test data in black
plot(d.test$x,d.test$t,xlab="x",ylab="t",main=paste("Prediction (learning size: ",toString(N),"examples)"),ylim=c(-1.5,1.5))
## Red data are the predictions
points(d.test$x,preds,col='red',lwd=1)
## and the blue line is the underlying function
curve (myf, a, b, col='blue', add=TRUE)
## The results are poorer, with an advantage for the former method (using clustering)
1-errorsTest^2
|
#' Change R's prompt when running on R for MS Windows via Wine
#'
#' Options that are set:
#' * `prompt`
#' * `width`
#'
#' @section Install required packages:
#' ```r
#' install.packages("memuse")
#' source("https://callr.org/install#gaborcsardi/prompt")
#' ```
#'
#' @author Henrik Bengtsson
#'
#' @references
#' * https://github.com/gaborcsardi/prompt
#'
#' @imports prompt memuse crayon grDevices profmem
if (!exists("toggle", mode = "function", envir = getNamespace("prompt"))) {
use_prompt <- local({
env <- prompt:::prompt_env
env$default_prompt <- getOption("prompt", "> ")
env$disabled_prompt <- env$prompt
env$enabled <- TRUE
function(enable = TRUE) {
## Toggle?
if (is.na(enable)) {
enable <- !env$enabled
} else if (enable == env$enabled) {
## Nothing do to?
return(invisible(FALSE))
}
if (enable) {
prompt::set_prompt(env$disabled_prompt)
} else {
env$disabled_prompt <- env$prompt
prompt::set_prompt(env$default_prompt)
}
env$enabled <- enable
invisible(TRUE)
}
})
toggle_prompt <- function() use_prompt(enable = NA)
}
startup_toolbox({
last_value <- local({
db <- list()
MAX_STACK_SIZE <- 10L
function(value = 0L, action = c("get", "set", "list")) {
action <- match.arg(action)
if (action == "get" && identical(value, "list")) action <- "list"
if (action == "get") {
pos <- as.integer(value)
n <- length(db)
if (n == 0) {
warning("last_value() stack is empty.")
} else if (pos == 0L) {
return(db[[n]])
} else if (-n <= pos && pos < 0L) {
return(db[[-pos]])
} else {
warning(sprintf("last_value() out of range [-%d, 0]: %d", n, pos))
}
} else if (action == "list") {
db
} else if (action == "set") {
t <- c(list(value), db)
if (length(t) > MAX_STACK_SIZE) t <- t[seq_len(MAX_STACK_SIZE)]
db <<- t
}
}
})
})
prompt::set_prompt(local({
symbol <- clisymbols::symbol
blue <- function(x) if (is.null(x)) NULL else crayon::blue(x)
silver <- function(x) if (is.null(x)) NULL else crayon::silver(x)
green <- function(x) if (is.null(x)) NULL else crayon::green(x)
yellow <- function(x) if (is.null(x)) NULL else crayon::yellow(x)
red <- function(x) if (is.null(x)) NULL else crayon::red(x)
## WORKAROUND: https://github.com/gaborcsardi/crayon/issues/48
if (crayon:::has_color()) options(crayon.enabled = TRUE)
get_width <- function() {
## Identify best way to infer dynamic `width`, iff at all
get_width <<- function() as.integer(Sys.getenv("COLUMNS"))
if (is.na(get_width())) {
get_width <<- function() {
as.integer(try(system2("tput", args = "cols", stdout = TRUE), silent = TRUE))
}
}
if (is.na(get_width())) get_width <<- function() NA_integer_
get_width()
}
has_git <- function() {
!inherits(try(prompt:::check_git_path(), silent = TRUE), "try-error")
}
status <- function(ok) {
if (ok) green(symbol$tick) else red(symbol$cross)
}
mem <- function() {
silver(prompt:::memory_usage())
}
pkg <- function() {
if (!prompt:::using_devtools()) return(NULL)
blue(prompt:::devtools_package())
}
gitinfo <- function() {
if (!has_git()) return(NULL)
info <- prompt:::git_info()
if (nchar(info) == 0) return(NULL)
silver(info)
}
## Report on active sinks
sinks <- function() {
n_out <- sink.number(type = "output")
n_msg <- sink.number(type = "message") - 2L
if (n_out == 0 && n_msg == 0) return(NULL)
if (n_out > 0 && n_msg == 0) return(yellow(sprintf("%d output sink", n_out)))
if (n_out == 0 && n_msg > 0) return(red(sprintf("%d message sink", n_msg)))
red(sprintf("%d output & %d message sink", n_out, n_msg))
}
## Report on open graphics devices
devs <- function() {
devs <- grDevices::dev.list()
n_devs <- length(devs)
if (n_devs == 0) return(NULL)
devs <- sort(unique(names(devs)))
if (n_devs == 1) {
msg <- sprintf("%d graphics device (%s)", n_devs, devs)
return(yellow(msg))
}
msg <- sprintf("%d graphics devices (%s)", n_devs,
paste(devs, collapse = ", "))
red(msg)
}
profmem_prompt <- local({
.suspended <- getOption("profmem.prompt.suspend", TRUE)
.depth <- NULL
.last_profmem <- NULL
## TODO:
## * Allow user change threshold of the profmem prompt
## * Allow user to suspend/resume the profmem prompt
## * Add support for custom prompt(profmem, depth, ...) function
## * Have built-in prompt() function return args as attributes
function(what = c("update", "suspend", "resume", "prompt", "begin", "end"), threshold = 10 * 1024) {
what <- match.arg(what)
## Produce prompt string
if (what == "prompt") {
if (is.null(.last_profmem)) return("")
depth <- profmem::profmem_depth()
if (!is.null(.depth) && .depth != depth) {
return("waiting for active profmem to close")
}
## Don't report on 'new page' entries
pm <- subset(.last_profmem, what != "new page")
threshold <- attr(pm, "threshold")
threshold <- structure(threshold, class = "object_size")
threshold <- format(threshold, units = "auto", standard = "IEC")
n <- nrow(pm)
if (n == 0) {
prompt <- sprintf("0 %s+ alloc", threshold)
} else {
total <- profmem::total(pm)
total <- structure(total, class = "object_size")
total <- format(total, units = "auto", standard = "IEC")
prompt <- sprintf("%s in %d %s+ alloc",
total, n, threshold)
}
return(prompt)
}
## Begin and end profiling by the prompt
if (what == "begin") {
if (!.suspended && is.null(.depth)) {
tryCatch({
profmem::profmem_begin(threshold = threshold)
.depth <<- profmem::profmem_depth()
}, error = function(ex) NULL)
}
} else if (what == "end") {
if (!is.null(.depth) && .depth == profmem::profmem_depth()) {
.last_profmem <<- tryCatch({
p <- profmem::profmem_end()
.depth <<- NULL
p
}, error = function(ex) NULL)
}
}
## Tweak how profiling is done by the prompt
if (what == "suspend") {
.suspended <<- TRUE
force(t <- .suspended)
} else if (what == "suspend") {
.suspended <<- FALSE
force(t <- .suspended)
} else if (what == "update") {
}
}
})
alloc <- function() {
if (getOption("profmem.suspend", FALSE)) {
prompt <- "profmem suspended"
} else {
prompt <- profmem_prompt("prompt")
}
if (nzchar(prompt)) prompt <- sprintf("(%s)", prompt)
silver(prompt)
}
#' @param expr The expression evaluated
#' @param value The value of the expression
#' @param ok Whether the evaluation succeeded or not
#' @param visible Whether the value is visible or not
prompt_fancy_hb <- function(expr, value, ok, visible) {
# message("expr: ", deparse(expr))
# message("value: ", value)
# message("ok: ", ok)
# message("visible: ", visible)
info <- list(
status = status(ok),
mem = mem(),
alloc = alloc(),
pkg = pkg(),
gitinfo = gitinfo(),
devs = devs(),
sinks = sinks()
)
paste0("\n", paste(unlist(info), collapse = " "), "\n> ")
}
## In case last_value() becomes deleted
.last_value <- last_value
function(...) {
.last_profmem <<- profmem_prompt("end")
if (!getOption("profmem.suspend", FALSE)) {
on.exit({
profmem_prompt("begin", threshold = 10 * 1024)
})
}
value <- .Last.value
## In case last_value() has been deleted
if (!exists("last_value", mode = "function")) last_value <<- .last_value
last_value(value, action = "set")
## Dynamically set option 'width'
if (!is.na(width <- get_width())) options(width = width)
## prompt_fancy() requires 'git'
prompt_fancy_hb(...)
}
}))
| /.Rprofile.d/interactive=TRUE/ui,package=prompt,rstudio=FALSE,wine=FALSE.R | no_license | jimsforks/dotfiles-for-R | R | false | false | 8,385 | r | #' Change R's prompt when running on R for MS Windows via Wine
#'
#' Options that are set:
#' * `prompt`
#' * `width`
#'
#' @section Install required packages:
#' ```r
#' install.packages("memuse")
#' source("https://callr.org/install#gaborcsardi/prompt")
#' ```
#'
#' @author Henrik Bengtsson
#'
#' @references
#' * https://github.com/gaborcsardi/prompt
#'
#' @imports prompt memuse crayon grDevices profmem
if (!exists("toggle", mode = "function", envir = getNamespace("prompt"))) {
use_prompt <- local({
env <- prompt:::prompt_env
env$default_prompt <- getOption("prompt", "> ")
env$disabled_prompt <- env$prompt
env$enabled <- TRUE
function(enable = TRUE) {
## Toggle?
if (is.na(enable)) {
enable <- !env$enabled
} else if (enable == env$enabled) {
## Nothing do to?
return(invisible(FALSE))
}
if (enable) {
prompt::set_prompt(env$disabled_prompt)
} else {
env$disabled_prompt <- env$prompt
prompt::set_prompt(env$default_prompt)
}
env$enabled <- enable
invisible(TRUE)
}
})
toggle_prompt <- function() use_prompt(enable = NA)
}
startup_toolbox({
last_value <- local({
db <- list()
MAX_STACK_SIZE <- 10L
function(value = 0L, action = c("get", "set", "list")) {
action <- match.arg(action)
if (action == "get" && identical(value, "list")) action <- "list"
if (action == "get") {
pos <- as.integer(value)
n <- length(db)
if (n == 0) {
warning("last_value() stack is empty.")
} else if (pos == 0L) {
return(db[[n]])
} else if (-n <= pos && pos < 0L) {
return(db[[-pos]])
} else {
warning(sprintf("last_value() out of range [-%d, 0]: %d", n, pos))
}
} else if (action == "list") {
db
} else if (action == "set") {
t <- c(list(value), db)
if (length(t) > MAX_STACK_SIZE) t <- t[seq_len(MAX_STACK_SIZE)]
db <<- t
}
}
})
})
prompt::set_prompt(local({
symbol <- clisymbols::symbol
blue <- function(x) if (is.null(x)) NULL else crayon::blue(x)
silver <- function(x) if (is.null(x)) NULL else crayon::silver(x)
green <- function(x) if (is.null(x)) NULL else crayon::green(x)
yellow <- function(x) if (is.null(x)) NULL else crayon::yellow(x)
red <- function(x) if (is.null(x)) NULL else crayon::red(x)
## WORKAROUND: https://github.com/gaborcsardi/crayon/issues/48
if (crayon:::has_color()) options(crayon.enabled = TRUE)
get_width <- function() {
## Identify best way to infer dynamic `width`, iff at all
get_width <<- function() as.integer(Sys.getenv("COLUMNS"))
if (is.na(get_width())) {
get_width <<- function() {
as.integer(try(system2("tput", args = "cols", stdout = TRUE), silent = TRUE))
}
}
if (is.na(get_width())) get_width <<- function() NA_integer_
get_width()
}
has_git <- function() {
!inherits(try(prompt:::check_git_path(), silent = TRUE), "try-error")
}
status <- function(ok) {
if (ok) green(symbol$tick) else red(symbol$cross)
}
mem <- function() {
silver(prompt:::memory_usage())
}
pkg <- function() {
if (!prompt:::using_devtools()) return(NULL)
blue(prompt:::devtools_package())
}
gitinfo <- function() {
if (!has_git()) return(NULL)
info <- prompt:::git_info()
if (nchar(info) == 0) return(NULL)
silver(info)
}
## Report on active sinks
sinks <- function() {
n_out <- sink.number(type = "output")
n_msg <- sink.number(type = "message") - 2L
if (n_out == 0 && n_msg == 0) return(NULL)
if (n_out > 0 && n_msg == 0) return(yellow(sprintf("%d output sink", n_out)))
if (n_out == 0 && n_msg > 0) return(red(sprintf("%d message sink", n_msg)))
red(sprintf("%d output & %d message sink", n_out, n_msg))
}
## Report on open graphics devices
devs <- function() {
devs <- grDevices::dev.list()
n_devs <- length(devs)
if (n_devs == 0) return(NULL)
devs <- sort(unique(names(devs)))
if (n_devs == 1) {
msg <- sprintf("%d graphics device (%s)", n_devs, devs)
return(yellow(msg))
}
msg <- sprintf("%d graphics devices (%s)", n_devs,
paste(devs, collapse = ", "))
red(msg)
}
profmem_prompt <- local({
.suspended <- getOption("profmem.prompt.suspend", TRUE)
.depth <- NULL
.last_profmem <- NULL
## TODO:
## * Allow user change threshold of the profmem prompt
## * Allow user to suspend/resume the profmem prompt
## * Add support for custom prompt(profmem, depth, ...) function
## * Have built-in prompt() function return args as attributes
function(what = c("update", "suspend", "resume", "prompt", "begin", "end"), threshold = 10 * 1024) {
what <- match.arg(what)
## Produce prompt string
if (what == "prompt") {
if (is.null(.last_profmem)) return("")
depth <- profmem::profmem_depth()
if (!is.null(.depth) && .depth != depth) {
return("waiting for active profmem to close")
}
## Don't report on 'new page' entries
pm <- subset(.last_profmem, what != "new page")
threshold <- attr(pm, "threshold")
threshold <- structure(threshold, class = "object_size")
threshold <- format(threshold, units = "auto", standard = "IEC")
n <- nrow(pm)
if (n == 0) {
prompt <- sprintf("0 %s+ alloc", threshold)
} else {
total <- profmem::total(pm)
total <- structure(total, class = "object_size")
total <- format(total, units = "auto", standard = "IEC")
prompt <- sprintf("%s in %d %s+ alloc",
total, n, threshold)
}
return(prompt)
}
## Begin and end profiling by the prompt
if (what == "begin") {
if (!.suspended && is.null(.depth)) {
tryCatch({
profmem::profmem_begin(threshold = threshold)
.depth <<- profmem::profmem_depth()
}, error = function(ex) NULL)
}
} else if (what == "end") {
if (!is.null(.depth) && .depth == profmem::profmem_depth()) {
.last_profmem <<- tryCatch({
p <- profmem::profmem_end()
.depth <<- NULL
p
}, error = function(ex) NULL)
}
}
## Tweak how profiling is done by the prompt
if (what == "suspend") {
.suspended <<- TRUE
force(t <- .suspended)
} else if (what == "suspend") {
.suspended <<- FALSE
force(t <- .suspended)
} else if (what == "update") {
}
}
})
alloc <- function() {
if (getOption("profmem.suspend", FALSE)) {
prompt <- "profmem suspended"
} else {
prompt <- profmem_prompt("prompt")
}
if (nzchar(prompt)) prompt <- sprintf("(%s)", prompt)
silver(prompt)
}
#' @param expr The expression evaluated
#' @param value The value of the expression
#' @param ok Whether the evaluation succeeded or not
#' @param visible Whether the value is visible or not
prompt_fancy_hb <- function(expr, value, ok, visible) {
# message("expr: ", deparse(expr))
# message("value: ", value)
# message("ok: ", ok)
# message("visible: ", visible)
info <- list(
status = status(ok),
mem = mem(),
alloc = alloc(),
pkg = pkg(),
gitinfo = gitinfo(),
devs = devs(),
sinks = sinks()
)
paste0("\n", paste(unlist(info), collapse = " "), "\n> ")
}
## In case last_value() becomes deleted
.last_value <- last_value
function(...) {
.last_profmem <<- profmem_prompt("end")
if (!getOption("profmem.suspend", FALSE)) {
on.exit({
profmem_prompt("begin", threshold = 10 * 1024)
})
}
value <- .Last.value
## In case last_value() has been deleted
if (!exists("last_value", mode = "function")) last_value <<- .last_value
last_value(value, action = "set")
## Dynamically set option 'width'
if (!is.na(width <- get_width())) options(width = width)
## prompt_fancy() requires 'git'
prompt_fancy_hb(...)
}
}))
|
#' Automatically read and convert every single CSV file within the zip file to disk.frame format
#' @param zipfile The zipfile
#' @param outdir The output directory for the disk.frames
#' @param ... passed to csv_to_disk.frame and data.table::fread
#' @import glue dplyr fst future future.apply fs
#' @export
#' @return a list of disk.frames
# TODO add all the options of fread into the ... as future may not be able to deal with it
zip_to_disk.frame = function(zipfile, outdir, ..., parallel = T) {
# obtain the list of files in the zipfile
files = unzip(zipfile, list=T)
# TODO sort the files by file size
# create the output directory
fs::dir_create(outdir)
# create a temporary directory; this is where all the CSV files are extracted to
tmpdir = tempfile(pattern = "tmp_zip2csv")
if(parallel) {
res = future.apply::future_lapply(files$Name, function(fn) {
out_dir_for_file = file.path(outdir, fn)
# unzip a file
unzip(zipfile, files = fn, exdir = tmpdir)
# create disk.frame from file
res = csv_to_disk.frame(file.path(tmpdir, fn), out_dir_for_file, ...)
add_meta(res)
})
} else {
res = lapply(files$Name, function(fn) {
out_dir_for_file = file.path(outdir, fn)
# unzip a file
unzip(zipfile, files = fn, exdir = tmpdir)
# create disk.frame
res = csv_to_disk.frame(file.path(tmpdir, fn), out_dir_for_file, ...)
add_meta(res)
})
}
# validate
#if(validation.check) validate_zip_to_disk.frame(zipfile, outdir)
res
}
# validate_zip_to_disk.frame(zipfile, outdir)
#' Validate and auto-correct read and convert every single file within the zip file to df format
#' @param zipfile The zipfile
#' @param outdir The output directory for disk.frame
#' @import glue
#' @import dplyr
#' @import fst
#' @import future
validate_zip_to_disk.frame = function(zipfile, outdir) {
files = unzip(zipfile, list=T)
if(!dir.exists(outdir)) {
stop(glue("The output directory {outdir} does not exist.\n Nothing to validate."))
}
tmpdir = tempfile(pattern = "tmp_zip2csv")
# check if files are ok
system.time(lapply(files$Name, function(fn) {
print(fn)
out_fst_file = file.path(outdir, paste0(fn,".fst"))
if(file.exists(out_fst_file)) {
tryCatch({
# the output file already exists
# read it and if it errors then the file might be corrupted, so
# read it again and write again
pt = proc.time()
read_fst(out_fst_file, as.data.table = T)
print(paste0("checking(read): ", timetaken(pt))); pt = proc.time()
}, error = function(e) {
print(e)
pt = proc.time()
unzip(zipfile, files = fn, exdir = tmpdir)
print(paste0("unzip: ", timetaken(pt))); pt = proc.time()
write_fst(fread(file.path(tmpdir, fn)), out_fst_file,100)
print(paste0("read: ", timetaken(pt)))
unlink(file.path(tmpdir, fn))
gc()
})
print("output already exists")
return(NULL)
} else {
# if the output file doesn't exists then the process might have failed
# re do again
pt = proc.time()
unzip(zipfile, files = fn, exdir = tmpdir)
print(paste0("unzip: ", timetaken(pt))); pt = proc.time()
write_fst(fread(file.path(tmpdir, fn)), out_fst_file,100)
print(paste0("read: ", timetaken(pt)))
unlink(file.path(tmpdir, fn))
gc()
}
})) # 507 econds
}
| /R/zip_to_disk.frame.r | no_license | kuzmenkov111/disk.frame | R | false | false | 3,503 | r | #' Automatically read and convert every single CSV file within the zip file to disk.frame format
#' @param zipfile The zipfile
#' @param outdir The output directory for the disk.frames
#' @param ... passed to csv_to_disk.frame and data.table::fread
#' @import glue dplyr fst future future.apply fs
#' @export
#' @return a list of disk.frames
# TODO add all the options of fread into the ... as future may not be able to deal with it
zip_to_disk.frame = function(zipfile, outdir, ..., parallel = T) {
# obtain the list of files in the zipfile
files = unzip(zipfile, list=T)
# TODO sort the files by file size
# create the output directory
fs::dir_create(outdir)
# create a temporary directory; this is where all the CSV files are extracted to
tmpdir = tempfile(pattern = "tmp_zip2csv")
if(parallel) {
res = future.apply::future_lapply(files$Name, function(fn) {
out_dir_for_file = file.path(outdir, fn)
# unzip a file
unzip(zipfile, files = fn, exdir = tmpdir)
# create disk.frame from file
res = csv_to_disk.frame(file.path(tmpdir, fn), out_dir_for_file, ...)
add_meta(res)
})
} else {
res = lapply(files$Name, function(fn) {
out_dir_for_file = file.path(outdir, fn)
# unzip a file
unzip(zipfile, files = fn, exdir = tmpdir)
# create disk.frame
res = csv_to_disk.frame(file.path(tmpdir, fn), out_dir_for_file, ...)
add_meta(res)
})
}
# validate
#if(validation.check) validate_zip_to_disk.frame(zipfile, outdir)
res
}
# validate_zip_to_disk.frame(zipfile, outdir)
#' Validate and auto-correct read and convert every single file within the zip file to df format
#' @param zipfile The zipfile
#' @param outdir The output directory for disk.frame
#' @import glue
#' @import dplyr
#' @import fst
#' @import future
validate_zip_to_disk.frame = function(zipfile, outdir) {
files = unzip(zipfile, list=T)
if(!dir.exists(outdir)) {
stop(glue("The output directory {outdir} does not exist.\n Nothing to validate."))
}
tmpdir = tempfile(pattern = "tmp_zip2csv")
# check if files are ok
system.time(lapply(files$Name, function(fn) {
print(fn)
out_fst_file = file.path(outdir, paste0(fn,".fst"))
if(file.exists(out_fst_file)) {
tryCatch({
# the output file already exists
# read it and if it errors then the file might be corrupted, so
# read it again and write again
pt = proc.time()
read_fst(out_fst_file, as.data.table = T)
print(paste0("checking(read): ", timetaken(pt))); pt = proc.time()
}, error = function(e) {
print(e)
pt = proc.time()
unzip(zipfile, files = fn, exdir = tmpdir)
print(paste0("unzip: ", timetaken(pt))); pt = proc.time()
write_fst(fread(file.path(tmpdir, fn)), out_fst_file,100)
print(paste0("read: ", timetaken(pt)))
unlink(file.path(tmpdir, fn))
gc()
})
print("output already exists")
return(NULL)
} else {
# if the output file doesn't exists then the process might have failed
# re do again
pt = proc.time()
unzip(zipfile, files = fn, exdir = tmpdir)
print(paste0("unzip: ", timetaken(pt))); pt = proc.time()
write_fst(fread(file.path(tmpdir, fn)), out_fst_file,100)
print(paste0("read: ", timetaken(pt)))
unlink(file.path(tmpdir, fn))
gc()
}
})) # 507 econds
}
|
# using install.packages(c("ggplot2", "shiny", "plotly"))
#other devtools::install_github("rstudio/shiny"); devtools::install_github("hadley/ggplot2"); devtools::install_github("ropensci/plotly")
# rsconnect::deployApp("/Users/jdlim/Bioinformatics/RNAseeker", launch.browser = F, account = "sciencebuff")
#to fix
# updating of corr plots after sample selection
#########
##TO DO##
#########
# make seletion on volcan plot show other plots - have hover and select as radiobutton options
#
# check scatter plot of counts is correct
#
# add GO analysis
#
# add 'googleAuthR' button and to load data
# add bookmark button
#
# add annotation info to plotly and tables
# add annotation lookup
#
# make plots look good
#
# add download as HTML widget for plots
#
#################################################################################################
source("qqly.R")
require(shiny)
require(ggplot2)
require(plotly)
require(DESeq2)
require(NOISeq)
require(adegenet)
require(DT)
require(ggfortify)
require(heatmaply)
require(BiocParallel)
require(AnnotationDbi)
require(org.Hs.eg.db)
require(goseq)
require(GO.db)
require(GOexpress)
#custom theme for ggplot
gg_back <- theme(
panel.background = element_rect(fill = "#272b30"),
plot.background = element_rect(fill = "#272b30"),
legend.background = element_rect(fill = "#272b30"),
panel.grid.major = element_line(colour = "black"),
axis.title.x = element_text(colour = "light grey"),
axis.title.y = element_text(colour ="light grey"),
legend.title = element_text(colour = "white"),
plot.title = element_text(colour = "light grey"),
legend.text = element_text(colour = "light grey"),
axis.text = element_text(colour ="light grey")
)
testing <<- "T"
threads <<- 1
register(MulticoreParam(threads))
shinyServer(function(input, output, session) {
#############
##load data##
#############
data_load <- reactiveValues()
observeEvent(input$load_x,{
data_load$rRNA_file_pat <- "example_data/rRNA_check/"
data_load$vst_path <- "example_data/vst_in"
data_load$rlt_path <- "example_data/rlt_in"
data_load$dds_path <-"example_data/dds_HTSeq_in"
data_load$bio_path <- "bio.txt"
data_load$noi_sat_path <- "example_data/noi_dat_saturation"
data_load$noi_coutns_path <- "example_data/noi_dat_countsbio"
data_load$noi_bio_path <- "example_data/noi_dat_bio_detect"
data_load$multi_qc_path <- "/Users/jdlim/Library/Mobile Documents/com~apple~CloudDocs/Bioinformatics/RNAseeker/example_data/multiqc_report.html"
})
observeEvent(input$load_user,{
data_load$rRNA_file_pat <- data_load$vst_path <- data_load$rlt_path <- data_load$dds_path <- data_load$bio_path <- data_load$noi_sat_path <- data_load$noi_coutns_path <- data_load$noi_bio_path <- data_load$multi_qc_path <- NULL
data_load$rRNA_file_pat <- paste0(input$load_user, "/rRNA_check/")
data_load$vst_path <- paste0(input$load_user, "/vst_in")
data_load$rlt_path <- paste0(input$load_user, "/rlt_in")
data_load$dds_path <-paste0(input$load_user, "/dds_HTSeq_in")
data_load$bio_path <- paste0(input$load_user, "bio.txt")
data_load$noi_sat_path <- paste0(input$load_user, "/noi_dat_saturation")
data_load$noi_coutns_path <- paste0(input$load_user, "/noi_dat_countsbio")
data_load$noi_bio_path <- paste0(input$load_user, "/noi_dat_bio_detect")
data_load$multi_qc_path <- paste0(input$load_user, "/multiqc_report.html")
# data_load$snp_matrix <- as.character(input$meta_dat$datapath[1])
# data_load$genp_path <- as.character(input$snp_mat_IN$datapath[1])
})
rlt_in <- reactive({
load(data_load$rlt_path)
if (testing == "T")
rlt_in[1:500]
else
rlt_in
})
dds_HTSeq_in <- reactive({
load(data_load$dds_path)
if (testing == "T")
dds_HTSeq_in[1:500]
else
dds_HTSeq_in
})
vst_in <- reactive({
load(data_load$vst_path)
if (testing == "T")
vst_in[1:500]
else
vst_in
})
# # bio_detect <- eventReactive(input$load_x,{
# # load("example_data/bio_detect")
# # names(bio_detect@dat$biotables) <- gsub(".Homo_sapiens.HTSeq.counts", "", names(bio_detect@dat$biotables))
# # bio_detect
# # }, ignoreInit = TRUE)
# #
# rlt_in <- eventReactive(input$load_x, input$load_u,{
# print(data_load$rlt_path)
# load(data_load$rlt_path)
# if (testing == "T")
# rlt_in[1:500]
# else
# rlt_in
# })
noi_dat <- reactive({
bio <- read.table(data_load$bio_path)
readData(assay(dds_HTSeq_in()), colData(dds_HTSeq_in()), biotype = bio)
})
noi_dat_saturation <- reactive({
# noi_dat_saturation() <- dat(noi_dat(), k = 0, ndepth = 10, type = "saturation")
load(data_load$noi_sat_path)
noi_dat_saturation
})
noi_dat_countsbio <- reactive({
# noi_dat_countsbio() <- dat(noi_dat(), factor = NULL, type = "countsbio")
load(data_load$noi_coutns_path)
noi_dat_countsbio
})
noi_dat_bio_detect <- reactive({
# noi_dat_bio_detect() <- dat(noi_dat(), k = 0, type = "biodetection", factor = NULL)
load(data_load$noi_bio_path)
noi_dat_bio_detect
})
# observeEvent(input$load_x, input$load_u,{
# data_load$multi_qc_path <<- "/Users/jdlim/Library/Mobile Documents/com~apple~CloudDocs/Bioinformatics/RNAseeker/example_data/multiqc_report.html"
# })
#load data end#
###############
#create the main tabke with samples (this is where samples can be selected)
sa_tab <- eventReactive(input$load_x,{
s_tab <- data.frame(colData(dds_HTSeq_in()))
s_tab$sample <- row.names(s_tab)
conts <- data.frame(colSums(assay(dds_HTSeq_in())))
s_tab <- merge(s_tab, conts, by = "row.names")
genes <- data.frame(colSums(assay(dds_HTSeq_in())>1))
sa_tab <- merge(s_tab, genes, by.x = "sample", by.y = "row.names")[,2:6]
colnames(sa_tab) <- c("Sample", "Group", "Rep", "Read Count", "Features Detected")
sa_tab
})
observeEvent(c(input$load_x, input$load_user),{
# render radio buttons based on the groups in the dds object to be used for selection in table
output$sel <- renderUI({
all_samples <- colnames(dds_HTSeq_in())
all_groups <- as.character(unique(colData(dds_HTSeq_in())[,1]))
tags$div(align = 'left',
class = 'multicol',
# checkboxGroupInput("sel_samp", "Select samples to keep by name",
# inline = T,
# choiceNames = as.list(all_samples),
# choiceValues = as.list(all_samples),
# selected = as.list(all_samples)
# ),
checkboxGroupInput("sel_samp_g", "Select samples to keep by group (not working yet)",
inline = T,
# choiceNames = as.list(all_groups),
# choiceValues = as.list(all_groups),
choices = as.list(all_groups),
selected = as.list(all_groups)
)
)
})
output$sel_samp_out <- renderUI(actionButton("sel_samp", h4("Update Sample Selection")))
#data table of samples, metadata from dds and reads counts - see sa_tab function
output$tbl <- DT::renderDataTable({
datatable(sa_tab(),
selection = list(target = 'row', selected = 1:nrow(sa_tab()))
) %>% formatStyle(
c(colnames(sa_tab())),
backgroundColor = "red",
color = "black"
)
}, server = TRUE)
output$gene_count <- renderUI({
sliderInput("min_count", "Keep features with more than this many normalized counts", min = 0, max = 100, value = 5, step = 1)
})
output$gene_count_sample <- renderUI({
sliderInput("min_count_sample", "In this many samples", min = 1, max = length(input$tbl_rows_selected), value = length(input$tbl_rows_selected), step = 1)
})
output$mil_reads <- renderPlotly({
counts_total <- counts(dds_HTSeq_in())
counts_total <- colSums(counts_total)
counts_total <- data.frame(counts_total)
counts_total$Sample <- rownames(counts_total)
c <- ggplot(counts_total, aes(Sample,counts_total)) +
geom_bar(stat = "identity", aes(fill = counts_total)) + #colour = counts_total,
# scale_colour_gradient(low = "blue", high = "green") +
scale_fill_gradientn(colours = heat.colors(10), name = "Count\nIntensity") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
gg_back +
ggtitle("Counts of reads per sample") +
ylab("Count") + xlab(NULL)
ggplotly(c) #%>% layout(autosize = FALSE, width = "400px")
})
}, ignoreInit = T)
selected_samples <- reactive({
input$sel_samp
sa_tab()[input$tbl_rows_selected,]$Sample
})
dds_HTSeq <- reactive({
input$sel_samp
dds_HTSeq <- dds_HTSeq_in()[,selected_samples()]
dds_HTSeq <- estimateSizeFactors(dds_HTSeq)
idx <- try(rowSums( counts(dds_HTSeq, normalized=TRUE) >= input$min_count ) >= input$min_count_sample)
if (class(idx) == "try-error")
dds_HTSeq
else
dds_HTSeq[idx, ]
})
assay_in <- reactive({
input$sel_samp
if (input$corr == "log2")
assay_in <- rlt_in()
else
assay_in <- vst_in()
assay_in[rownames(dds_HTSeq()), colnames(assay_in)%in%selected_samples()]
})
expgroups <- reactive({
input$load_x
input$sel_samp
expgroups <- as.data.frame(colData(dds_HTSeq()))
rownames(expgroups) <- colnames(dds_HTSeq())
expgroups
})
output$notes <- renderText(readLines("example_data/user_notes.txt"))
output$dens_log <- renderPlotly({
# input$sel_samp
# de_counts <- stack(de_counts)
# de_counts <- de_counts[de_counts$values >0, ]
# log2(values+1)
#or raw counts
# counts(dds_HTseq)
req(c(input$min_count, input$min_count_sample), cancelOutput = T)
#this is for the selected assay (vst or rlog)
# de_counts <- assay(assay_in())
# de_counts <- stack(de_counts)
# de_counts <- data.frame(de_counts[c(2,4)])
# colnames(de_counts) <- c("Sample", "value")
# g <- ggplot(de_counts, aes(value, colour = Sample, fill = Sample )) +
# geom_density(alpha = 0.05) + theme(legend.position="none") +
# xlab(input$corr)
#this is for raw, normalized counts
de_counts <- counts(dds_HTSeq(), normalized = T)
de_counts <- stack(de_counts)
de_counts <- data.frame(de_counts[c(2,4)])
colnames(de_counts) <- c("Sample", "value")
g <- ggplot(de_counts, aes(value, colour = Sample, fill = Sample )) +
geom_density(alpha = 0.05) + theme(legend.position="none") +
xlab("Log10 of normalized counts") + scale_x_log10() +
ylab("Density")
g <- g + gg_back
ggplotly(g)
})
#plot rRNA contamination
observeEvent(c(input$load_x, input$load_user, input$sel_samp),{
if (file.exists(data_load$rRNA_file_pat)){
#rRNA
files_rrna <- list.files(data_load$rRNA_file_pat, full.names = T)
rrna <- NULL
for (file in files_rrna){
tmp.r <- read.table(file)
rrna <- rbind(rrna, tmp.r[c(6,9)])
}
rrna$V9 <- gsub("%","",rrna$V9)
rrna$V9 <- as.numeric(rrna$V9)
rrna$V6 <- gsub("_R1_.*", "", rrna$V6)
colnames(rrna) <- c("Sample", "Percentage_rRNA")
rrna <- rrna[rrna$Sample%in%selected_samples(),]
r_plot <- ggplot(data = rrna, aes(x = Sample, y = Percentage_rRNA)) +
geom_histogram(stat = "identity") + #, aes(fill = Sample)
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
ylab("Percentage rRNA reads") + xlab(NULL)
r_plot <- r_plot + gg_back
#rRNA end
output$rRNA_cont <- renderPlotly({ggplotly(r_plot)})
}else{
output$rRNA_cont <- renderPlotly({
ggplotly(
ggplot(data.frame()) +
annotate("text", x=8, y=13000, label= "No Data", size = 20, color = "red") +
gg_back
)
})
}
}, ignoreInit = T)
#plot rRNA contamination end
#not working??
observeEvent(c(input$load_x, input$load_user),
output$QC <- renderUI(browseURL(data_load$multi_qc_path, encodeIfNeeded = T))
)
#sample dists
output$geneslide <- renderUI({
sliderInput("g_slide", "Select number of top genes to retain", min = 2, max = length(rownames(dds_HTSeq())), value = length(rownames(dds_HTSeq())))
})
output$geneslide_box <- renderUI({
textInput("g_box", "", value = length(rownames(dds_HTSeq())))
})
observeEvent(input$g_box,{
updateSliderInput(session, "g_slide", value = input$g_box)
})
assay_red <- reactive({
input$g_slide
mads <- apply(assay(assay_in()), 1, mad)
#or can use the row variation like this
# library(genefilter)
# ntop <- input$g_slide
# rv <- rowVars(assay(assay.tmp))
# select <- order(rv, decreasing = TRUE)[seq_len(min(ntop, length(rv)))]
#
# assay(assay.tmp)[select, ]
assay_in()[order(mads, decreasing=T)[1:input$g_slide], ]
})
output$heatmapsampledist <- renderD3heatmap({
# if (!is.null(input$color_by)) {
# assay_red.tmp <- assay_red()
# expgroups() <- as.data.frame(colData(assay_red.tmp)[, "Group"])
# rownames(expgroups()) <- colnames(assay_red.tmp)
# colnames(expgroups()) <- "Group"
d3heatmap(as.matrix(dist(t(assay(assay_red())))),
annotation_col = expgroups())
})
output$col_h <- renderUI({
selectInput("h_col", "Color by metadata", choices = names(colData(dds_HTSeq_in())), selected = 1)
})
corr <- reactive({
input$corr_up
isolate(cor(assay(assay_red()), method = input$pair_corr))
})
observe({
output$heatmapsampledist_corr <- renderPlotly({
# if(!is.null(corr()))
heatmaply(corr(),
row_side_colors = fac2col(expgroups()[,colnames(expgroups())==input$h_col]),
col_side_colors = fac2col(expgroups()[,colnames(expgroups())==input$h_col]),
k_row = as.numeric(input$k_num), k_col = as.numeric(input$k_num),
hclust_method = input$hclust, dist_method = input$dist_met,
scale_fill_gradient_fun = ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0, limits = c(-1, 1)),
heatmap_layers = gg_back,
side_color_layers = gg_back
)
})
})
output$col_pca <- renderUI({
selectInput("pc_col", "Color by metadata", choices = names(colData(dds_HTSeq_in())), selected = 1)
})
output$pca <- renderPlot({
# expgroups() <- as.data.frame(colData(dds_HTSeq_in()))
# rownames(expgroups()) <- colnames(dds_HTSeq_in())
# colnames(expgroups()) <- "Group"
pc_as <- t(assay(assay_red()))
# pc_as <- t(assay(rlt_in))
pca <- prcomp(pc_as, scale. = F, center = T)
eig <- pca$sdev^2
output$pca_plotly <- renderPlotly(
autoplot(pca, data = expgroups(), x = input$PC1, y = input$PC2,
colour = input$pc_col, frame.colour = input$pc_col) + #frame = TRUE, frame.type = 'norm'
gg_back
)
output$scree <- renderPlot({
screeplot(pca,
col = "#d3d3d3", col.lab = "#d3d3d3", col.main = "white", col.sub = "white", col.axis = "#d3d3d3", col.axis = "#d3d3d3", main = "")
}, bg = "#272b30")
output$pca_3d <- renderPlotly({
# d3.tmp <- data.frame(pca$rotation)
# d3.tmp <- merge(d3.tmp, expgroups()[c(input$pc_col)], by = "row.names")
# rownames(d3.tmp) <- d3.tmp$Row.names; d3.tmp <- d3.tmp[,-1]
#
# plot_ly(d3.tmp, x = ~PC1, y = ~PC2, z = ~PC3, color = input$pc_col) %>%
# add_markers() %>%
# layout(scene = list(xaxis = list(title = 'PC1'),
# yaxis = list(title = 'PC2'),
# zaxis = list(title = 'PC3')))
###3d scatter param
axis <- list(
xaxis = list(title = 'PC1'),
yaxis = list(title = 'PC2'),
zaxis = list(title = 'PC3'),
color =toRGB("#d3d3d3"),
linecolor = toRGB("#d3d3d3"),
gridcolor = toRGB("#d3d3d3")
)
scene = list(
xaxis = axis,
yaxis = axis,
zaxis = axis)
####
if (all(rownames(expgroups()[c(input$pc_col)]) == rownames(data.frame(pca$x))))
plot_ly(data.frame(pca$x), x = ~PC1, y = ~PC2, z = ~PC3, color = expgroups()$Group,
colors = unique(fac2col(grps$group)), alpha = 0.8) %>%
add_markers() %>%
layout(scene = scene,
paper_bgcolor = "#272b30",
plot_bgcolor = "#272b30",
legend = list(
font = list(
color = "#d3d3d3")
)
)
# plot_ly(data.frame(pca$rotation), x = ~PC1, y = ~PC2, z = ~PC3, color = unlist(expgroups()[c(input$pc_col)])) %>%
# add_markers() %>%
# layout(scene = list(xaxis = list(title = 'PC1'),
# yaxis = list(title = 'PC2'),
# zaxis = list(title = 'PC3')))
else
ggplotly(
ggplot(data.frame()) +
annotate("text", x=8, y=13000, label= "No Data", size = 20, color = "red") +
gg_back
)
})
# pca <- dudi.pca(pc_as, center=TRUE, scale=FALSE, scannf = F, nf = input$pcs)
#cool pca plots
# par(bg = "grey")
# s.class(pca$l1, expgroups()$Group, col = unique(fac2col(expgroups()$Group)))
# s.class(pca$x[,c(input$PC1, input$PC2)], expgroups()[,input$pc_col], col = unique(fac2col(expgroups()[,input$pc_col])))
s.class(data.frame(pca$x), xax = as.numeric(input$PC1), yax = as.numeric(input$PC2),
expgroups()[,colnames(expgroups())==input$pc_col], col = unique(fac2col(expgroups()[,colnames(expgroups())==input$pc_col])))
add.scatter.eig(eig, 3, 1, c(as.numeric(input$PC1), as.numeric(input$PC2)), posi = "bottomright")
# bg.col = "#272b30")
}, bg = "#272b30")
#sat, sens and sample plots
observeEvent(c(input$load_x, input$load_user),{
output$slider_sat_ui <- renderUI({
#change this to the sample names
selectInput("slider_sat", "Select samples to compare",
choices = selected_samples(),
multiple = T,
width = "100%",
selected = selected_samples()[1:3])
# sliderInput(inputId = "slider_sat", "Select sample range to explore", min = 1, max = length(names(noi_dat_saturation()@dat$depth)), value = c(1, 10), step = 1)
#length(input$tbl_rows_selected)
# input$tbl_rows_selected
})
output$sat_1<- renderPlot({
par(col.lab="white", col="grey", col.axis = "white", col.main = "white", col.sub = "white")
# explo.plot(noi_dat_saturation(), toplot = 1, samples = c(input$slider_sat))
explo.plot(noi_dat_saturation(), toplot = 1, samples = c(1:length(names(noi_dat_saturation()@dat$depth)))[names(noi_dat_saturation()@dat$depth)%in%c(input$slider_sat)],
col.ticks = "white")
}, bg = "#272b30")
output$sat_2 <- renderPlot({
par(col.lab="white", col="grey", col.axis = "white", col.main = "white", col.sub = "white")
explo.plot(noi_dat_saturation(), toplot = "protein_coding", samples = c(1:length(names(noi_dat_saturation()@dat$depth)))[names(noi_dat_saturation()@dat$depth)%in%c(input$slider_sat)],
col.ticks = "white")
}, bg = "#272b30")
output$slider_sens_ui <- renderUI({
selectInput("slider_sens", "Select samples to compare",
choices = selected_samples(),
multiple = T,
width = "100%",
selected = selected_samples()[1:3])
})
output$sens_1 <- renderPlot({
par(col.lab="white", col="grey", col.axis = "white", col.main = "white", col.sub = "white")
explo.plot(noi_dat_countsbio(), toplot = 1, plottype = "barplot",
samples = c(1:length(names(noi_dat_countsbio()@dat$summary$global)[!(names(noi_dat_countsbio()@dat$summary$global)%in%c("global", "total"))]))[names(noi_dat_countsbio()@dat$summary$global)[!(names(noi_dat_countsbio()@dat$summary$global)%in%c("global", "total"))]%in%c(input$slider_sens)],
col.ticks = "white")
}, bg = "#272b30")
output$sens_2 <- renderPlot({
par(col.lab="white", col="grey", col.axis = "white", col.main = "white", col.sub = "white")
explo.plot(noi_dat_countsbio(), toplot = "protein_coding", plottype = "boxplot",
samples = c(1:length(names(noi_dat_countsbio()@dat$summary$global)[!(names(noi_dat_countsbio()@dat$summary$global)%in%c("global", "total"))]))[names(noi_dat_countsbio()@dat$summary$global)[!(names(noi_dat_countsbio()@dat$summary$global)%in%c("global", "total"))]%in%c(input$slider_sens)],
col.ticks = "white")
}, bg = "#272b30")
output$slider_comps_ui <- renderUI({
fluidRow(
column(4,
selectInput("slider_comps1", "Select samples to compare",
choices = selected_samples(),
multiple = F,
width = "100%",
selected = selected_samples()[1])),
column(4,
selectInput("slider_comps2", "Select samples to compare",
choices = selected_samples(),
multiple = F,
width = "100%",
selected = selected_samples()[2]))
)
})
output$bio_plot_1 <- renderPlot({
par(col.lab="white", col="grey", col.axis = "white", col.main = "white", col.sub = "white")
par(mfrow = c(1,2)) # we need this instruction because two plots (one per sample) will be generated
explo.plot(noi_dat_bio_detect(), plottype = "persample", samples = c(1:length(names(noi_dat_bio_detect()@dat$biotables)))[names(noi_dat_bio_detect()@dat$biotables)%in%c(input$slider_comps1, input$slider_comps2)],
col.ticks = "white")
}, bg = "#272b30")
output$bio_plot_2 <- renderPlot({
par(col.lab="white", col="grey", col.axis = "white", col.main = "white", col.sub = "white")
par(mfrow = c(1,2)) # we need this instruction because two plots (one per sample) will be generated
explo.plot(noi_dat_bio_detect(), toplot = "protein_coding", plottype = "comparison", samples = c(1:length(names(noi_dat_bio_detect()@dat$biotables)))[names(noi_dat_bio_detect()@dat$biotables)%in%c(input$slider_comps1, input$slider_comps2)],
col.ticks = "white")
}, bg = "#272b30")
output$bio_plot_2_text <- renderText({
print("Show test for difference btw samples here")
# # par(mfrow = c(1,2)) # we need this instruction because two plots (one per sample) will be generated
# explo.plot(noi_dat_bio_detect(), toplot = "protein_coding", plottype = "comparison", samples = c(1:length(names(noi_dat_bio_detect()@dat$biotables)))[names(noi_dat_bio_detect()@dat$biotables)%in%c(input$slider_comps1, input$slider_comps2)])
})
}, ignoreInit = T)
#statistical tab
# withProgress(message = 'Performing Differential Expression Calculations',
# detail = 'This may take a while...', value = 0, {
# # incProgress(1/2)
# })
#need selector with
output$meta_deUI <- renderUI(selectInput("meta_de", "Select Variable Group to compare",
choices = colnames(expgroups()),
selected = 1))
# then radio buttons with
output$de_grp_1_UI <- renderUI({
radioButtons("de_grp_1", "Group 1",
choices = unique(unlist(expgroups()[,input$meta_de])),
selected = unique(unlist(expgroups()[,input$meta_de]))[1])
})
# then radio buttons with
output$de_grp_2_UI <- renderUI({
radioButtons("de_grp_2", "Group 2", choices = unique(unlist(expgroups()[,input$meta_de])),
selected = unique(unlist(expgroups()[,input$meta_de]))[2])
})
output$sample_table <- renderTable(table(expgroups()[, input$meta_de]))
observeEvent(input$DEcalc, {
req(input$de_grp_1, input$de_grp_2)
isolate({
if(input$de_grp_1 !=input$de_grp_2){
# a character vector with exactly three elements:
# the name of a factor in the design formula,
# the name of the numerator level for the fold change,
# and the name of the denominator level for the fold change (simplest case)
de_options <- NULL
if (input$de_test != "Auto")
de_options <- paste0(de_options, 'test="', input$de_test, '",')
if (input$de_filt != "Auto")
de_options <- paste0(de_options, 'fitType="', input$de_filt, '",')
if (input$de_beta != "Auto")
de_options <- paste0(de_options, 'betaPrior="', input$de_beta, '",')
# if (testing == "dfd"){
# # save(dds, "example_data/testing/dds")
# load("example_data/testing/dds")
# dds<-dds
# # save(rld.test, "example_data/testing/rld.test")
# load("example_data/testing/rld.test")
# rld.test<-rld.test
# # save(counts.test, "example_data/testing/counts.test")
# load("example_data/testing/counts.test")
# counts.test<-counts.test
# # save(counts.all, "example_data/testing/counts.all")
# load("example_data/testing/counts.all")
# counts.all<-counts.all
# # save(res, "example_data/testing/res")
# load("example_data/testing/res")
# res<-res
# # save(dds.test, "example_data/testing/dds.test")
# load("example_data/testing/dds.test")
# dds.test<-dds.test
#
# }else{
dds <- reactive({
dds.test <- dds_HTSeq()
dds.test <- dds.test[,colData(dds.test)[,colnames(colData(dds.test))==input$meta_de]%in%c(input$de_grp_1, input$de_grp_2)]
colData(dds.test)[,colnames(colData(dds.test))==input$meta_de] <- as.factor(as.character(colData(dds.test)[,colnames(colData(dds.test))==input$meta_de]))
dds.test <- estimateSizeFactors(dds.test)
if (is.null(de_options))
dds <- DESeq(dds.test, parallel = T)
else
dds <- eval(parse(text =
cat(
paste("DESeq(dds.test, parallel = T,",
de_options,
")"))))
dds
})
# rld.test <- rlog(dds, blind = FALSE) #for hetamps and such
counts.test <- log2(counts(dds(), normalized=T))
counts.all <- log2(counts(dds_HTSeq(), normalized=T))
res <- reactive({
res_opts <- NULL
if (input$alt != "Auto")
res_opts <- paste0(res_opts, 'altHypothesis="', input$alt, '",')
if (input$cook != "Auto")
res_opts <- paste0(res_opts, 'cooksCutoff="', input$cook, '",')
if (is.null(res_opts))
res <- results(dds(),
alpha=input$alpha_in, contrast = c(input$meta_de, input$de_grp_1, input$de_grp_2),
pAdjustMethod = input$p_adjM, lfcThreshold = input$lcf, parallel = T)
else
res <- eval(parse(text =
cat(
paste("results(dds, parallel = T, alpha=input$alpha_in, contrast = c(input$meta_de, input$de_grp_1, input$de_grp_2), pAdjustMethod = input$p_adjM,lfcThreshold = input$lcf,",
res_opts,
")"))))
#Annotate
res$symbol <- mapIds(org.Hs.eg.db,
keys=row.names(res),
column="SYMBOL",
keytype="ENSEMBL",
multiVals="first")
res$entrez <- mapIds(org.Hs.eg.db,
keys=row.names(res),
column="ENTREZID",
keytype="ENSEMBL",
multiVals="first")
res
})
# if (input$de_beta == "Auto")
# dds <- DESeq(dds_HTSeq(), test = input$de_test, fitType = input$de_filt)
# else
# dds <- DESeq(dds_HTSeq(), test = input$de_test, fitType = input$de_filt, betaPrior = input$de_beta)
#
# if (input$cook == "Auto")
# res <- results(dds,
# alpha=input$alpha_in, contrast = c(input$meta_de, input$de_grp_1, input$de_grp_2),
# altHypothesis = input$alt, pAdjustMethod = input$p_adjM, lfcThreshold = input$lcf,
# test = input$de_test)
# else
# res <- results(dds,
# alpha=input$alpha_in, contrast = c(input$meta_de, input$de_grp_1, input$de_grp_2),
# altHypothesis = input$alt, pAdjustMethod = input$p_adjM, lfcThreshold = input$lcf,
# test = input$de_test,
# cooksCutoff = input$cook)
# parallel = T
# library(BiocParallel)
# register(MulticoreParam(10))
# DESeq(dds, parallel=10)
# results(DESeq_dds, parallel = T)
#DE outputs
output$DE_stat <- renderDataTable({
# req(input$de_grp_1, input$de_grp_2)
res <- res()
res <- res[order(res$padj, decreasing = F),]
# tmp <- data.frame(res[1:sum(res$padj < input$p_in, na.rm=TRUE), ])
tmp <- data.frame(res[!(is.na(res$pvalue)),])
tmp[,1:6] <- round(tmp[,1:6], 3)
tmp$Gene <- row.names(tmp)
tmp <- tmp[c(9,1,2,3,4,5,6,7,8)]
datatable(tmp, rownames = F) %>% formatStyle(
'padj',
backgroundColor = styleInterval(input$p_in, c('#98FB98', '#D3D3D3')),
color = "black"
) %>% formatStyle(
c(colnames(tmp)),
color = "black"
)
})
res_ma <- data.frame(res())
res_ma$Fold_change <- ifelse(res_ma$log2FoldChange <0, "down", "up")
res_ma$Fold_change <- ifelse(is.na(res_ma$Fold_change), "none", res_ma$Fold_change)
res_ma$Significat_at_p_adjusted <- ifelse(res_ma$padj <= input$p_in, "yes", "no")
output$DE_ma <- renderPlotly({
# plotMA(res, ylim=c(-5,5))
ggplotly(
ggplot(res_ma, aes(x=baseMean, y=log2FoldChange)) +
geom_point(aes( colour = Significat_at_p_adjusted, shape=Fold_change )) + scale_x_continuous(trans='log10') +
geom_hline(yintercept = 0, col = "red") + xlab("Mean of Normalized Counts") + ylab("Log Fold Change") +
ylim(c(-5, 5)) +
geom_smooth(se = F) +
gg_back
)
})
### volc
# input=NULL
# input$p_in=0.1
volc <- data.frame(res())
volc$P <- volc$pvalue
volc$log_pvalue <- -log10(volc$pvalue)
volc$sig_p.adj <- ifelse(volc$padj <= input$p_in, "yes", "no")
volc$Gene <- paste0(rownames(volc), " (", volc$symbol, ")")
volc$key_test <- seq(1:nrow(volc))
output$DE_volc <- renderPlotly({
vol <- ggplot(data=volc,
aes(x = log2FoldChange, y = log_pvalue, colour = sig_p.adj, text = Gene, key = key_test)) +
geom_point(alpha=0.4, size=1.75) +
# xlim(c(-6, 6)) +
xlab("log2FoldChange") + ylab("-log10 p-value (unadjusted)") +
theme_bw() +
theme(legend.position="right")+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
gg_back
ggplotly(vol, tooltip = c("log2FoldChange", "log_pvalue", "Gene"), source = "volc_sor") %>% layout(dragmode = "select")
# %>% layout(dragmode = "select")
#try changing to plot_ly as it seems to be much less sluggish
# plot_ly(volc, x = ~log2FoldChange, y = ~ log_pvalue,
# source = "volc_sor")
})
output$DE_qq <- renderPlotly({
qqly(na.omit(volc))
})
output$DE_scat <- renderPlotly({
#test
# load("example_data/delete/dds_2")
# dds
# rld.test <- rlog(dds, blind = FALSE)
# counts.test <- counts(dds, normalized=T)
# res <- results(dds)
#
#
# res[1,]
#
# assay(rld.test)[1,]
# counts.test[1,]
#
#
#
# scat.tmp <- assay(rld.test)
# scat.tmp.1 <- data.frame(scat.tmp[1,colnames(scat.tmp)%in%rownames(colData(rld.test)[colData(rld.test)$Group == "LTBI",])])
# scat.tmp.2 <- data.frame(scat.tmp[1,colnames(scat.tmp)%in%rownames(colData(rld.test)[colData(rld.test)$Group == "prev",])])
# scat.tmp <- NULL
# cond1 <- colMeans(scat.tmp.1, na.rm = T)
# cond2 <- colMeans(scat.tmp.2, na.rm = T)
#
#
# res[1,]
# cond2-cond1
#
#
#
#
#
# scat.tmp <- counts.test
# scat.tmp.1 <- data.frame(scat.tmp[1,colnames(scat.tmp)%in%rownames(colData(rld.test)[colData(rld.test)$Group == "LTBI",])])
# scat.tmp.2 <- data.frame(scat.tmp[1,colnames(scat.tmp)%in%rownames(colData(rld.test)[colData(rld.test)$Group == "prev",])])
# scat.tmp <- NULL
# cond1 <- colMeans(scat.tmp.1, na.rm = T)
# cond2 <- colMeans(scat.tmp.2, na.rm = T)
#
#
# res[1,]
# cond2-cond1
# log2(cond2)-log2(cond1)
#
#test end
#should recalculate this using the subsetted dds
# scat.tmp <- assay(rld.test)
expgroups.tmp <- expgroups()[c(colnames(expgroups())==input$meta_de)]
colnames(expgroups.tmp) <- "Group"
expgroups.tmp$sample <- rownames(expgroups.tmp)
scat.tmp <- counts.test
scat.tmp.1 <- data.frame(scat.tmp[,colnames(scat.tmp)%in%
expgroups.tmp[expgroups.tmp$Group==input$de_grp_1, ]$sample
])
scat.tmp.2 <- data.frame(scat.tmp[,colnames(scat.tmp)%in%
expgroups.tmp[expgroups.tmp$Group==input$de_grp_2, ]$sample
])
scat.tmp <- NULL
scat.tmp.1$cond1 <- rowMeans(scat.tmp.1, na.rm = T)
scat.tmp.2$cond2 <- rowMeans(scat.tmp.2, na.rm = T)
scat <- data.frame("cond1" = scat.tmp.1$cond1,
"cond2" = scat.tmp.2$cond2,
"Gene" = rownames(scat.tmp.1),
"Significat_at_p_adjusted" = res_ma$Significat_at_p_adjusted,
"Adj.p" = res_ma$padj)
p.scat <- ggplot(scat, aes(x=cond1, y=cond2)) +
geom_point(aes( colour = Significat_at_p_adjusted)) +
xlab(input$de_grp_1) + ylab(input$de_grp_2) + gg_back
#jerryrig to get extra in the tiptool..
p.scat <- p.scat + geom_point(aes(Adj.p), alpha = 0) +
geom_text(aes(label=Gene), hjust=0, vjust=0, alpha = 0)
ggplotly(p.scat, tooltip = c("Gene", "cond1", "cond2")) #, "Adj.p"
})
output$brush <- renderPrint({
d <- event_data("plotly_selected", source = "volc_sor", session)
if (is.null(d)) "Click and drag events (i.e., select/lasso) appear here (double-click to clear)" else d
})
observe({
#setup tmp data for subplots
eve_data <- event_data("plotly_selected", source = "volc_sor", session)
volc_key <- as.numeric(eve_data$key)
if (!(is.null(eve_data))){
# if (length(volc_key) == 1)
# count.tmp <- t(data.frame(counts.test[c(volc_key),]))
# else
# count.tmp <- counts.test[c(volc_key),]
count.tmp <- counts.test[c(volc_key),, drop=F]
count.tmp <- data.frame(t(count.tmp))
count.tmp$mean_count <- rowMeans(count.tmp)
ano <- colData(dds())[,input$meta_de]
# table(rownames(colData(dds.test))==rownames(count.tmp))
#all samples
# if (length(volc_key) == 1)
# counts.all.tmp <- t(data.frame(counts.all[c(volc_key),]))
# else
# counts.all.tmp <- counts.all[c(volc_key),]
counts.all.tmp <- counts.all[c(volc_key),, drop=F]
counts.all.tmp <- data.frame(t(counts.all.tmp))
counts.all.tmp$mean_count <- rowMeans(counts.all.tmp)
counts.all.tmp$Sample <- rownames(counts.all.tmp)
counts.all.tmp$Genes <- paste(colnames(counts.all.tmp)[1:(length(colnames(counts.all.tmp))-2)], sep="", collapse=",")
ano.all <- colData(dds_HTSeq())[,input$meta_de]
output$genes_sel <- renderText(c("Seleceted Genes", colnames(count.tmp)[colnames(count.tmp) != "mean_count"] ))
output$boxplot_sel <- renderPlotly({
p1 <- ggplotly(
ggplot(count.tmp, aes(x = ano, y = mean_count, color = ano)) +
scale_y_log10() + geom_boxplot(outlier.alpha = 0) + geom_jitter(cex = 3) +
xlab(input$meta_de) + ylab("Mean count accross selected genes")
)
# no need to have its own data here, just use the highlight() from plotly...?
p2 <- ggplotly(
ggplot(counts.all.tmp, aes(x = ano.all, y = mean_count, color = ano.all)) +
scale_y_log10() + geom_boxplot(outlier.alpha = 0) + geom_jitter(cex = 3) +
xlab(input$meta_de) + ylab("Mean count accross selected genes - all samples in initial table")
)
# p2 <- p2 + tooltip = c("Sample", "mean_count", "Genes")
subplot(p1, p2, shareX = F, shareY = T)
# }else{
# ggplotly(
# ggplot(data.frame()) +
# annotate("text", x=8, y=13000, label= "Please select some points", size = 20, color = "orange") +
# gg_back
# )
# }
# }
})
output$heat_sel <- renderPlotly({
if (length(volc_key) > 1){
# count.heat.tmp <- assay(rld.test)[c(volc_key),]
count.heat.tmp <- counts.test[c(volc_key),]
# h1 <-
heatmaply(count.heat.tmp,
# col_side_colors = fac2col( expgroups()[c(colnames(expgroups())==input$meta_de)] ),
scale_fill_gradient_fun = ggplot2::scale_fill_gradient2(low = "red", mid = "black", high = "green", midpoint = 0, na.value = "white"),
xlab = "Mean count accross selected genes",
heatmap_layers = gg_back,
side_color_layers = gg_back
)
# # k_row = as.numeric(input$k_num), k_col = as.numeric(input$k_num),
# # hclust_method = input$hclust, dist_method = input$dist_met,
#
# plot_ly(z = count.heat.tmp, type = "heatmap",
# col_side_colors = fac2col(colData(dds.test)[,input$meta_de]))
# tooltip = c("Sample", "mean_count", "Genes")
# subplot(h1, h2, shareX = F, shareY = T)
# h1
}
})
}
})
###
# #this must take whats selected in manhattan plot
# output$DE_gene <- renderPlotly({
# req(input$gene_in)
# d <- plotCounts(dds, gene=input$gene_in, intgroup=input$meta_de,
# returnData=TRUE)
# p <- ggplot(d, aes(x=condition.Group, y=count)) +
# geom_point(position=position_jitter(w=0.1, h=0))
# ggplotly()
# })
############
##go panel##
############
# require(goseq)
observeEvent(input$Calculate, {
load("hg19.ensGene.LENGTH")
res.go <- res()
res.go <- res.go[!is.na(res.go$pvalue),]
sig_genes <- ifelse(res.go$padj<=input$p_in, 1, 0)
names(sig_genes) <- rownames(res.go)
# need to download the annotations
pwf <- nullp(sig_genes, "hg19", "ensGene", plot.fit = F)
output$pwd_plot <- renderPlot({
plotPWF(pwf)
})
output$enriched <- renderText({
GO.wall <- goseq(pwf, "hg19", "ensGene")
enriched.GO <- GO.wall$category[p.adjust(GO.wall$over_represented_pvalue, method="BH")<input$p_go]
for(go in enriched.GO){
print(GOTERM[[go]])
cat("--------------------------------------\n")
}
})
counts.tmp <- log2(counts(dds(), normalized=T))
counts.phen <- annotatedDataFrameFrom(as.matrix(colData(dds())), byrow = T)
counts.go <- ExpressionSet(counts.tmp, counts.phen)
pData(counts.go)$Group <- colData(dds())$Group
#download annotations
# listDatasets(useMart('ensembl'))
# listMarts()
# ensembl89 = useMart(biomart='ENSEMBL_MART_ENSEMBL', dataset='hsapiens_gene_ensembl')
#
# allgenes.Ensembl = getBM(attributes=c('ensembl_gene_id', 'external_gene_name', 'description'),
# mart=ensembl89)
# colnames(allgenes.Ensembl)[1] = 'gene_id'
# allGO.Ensembl = getBM(attributes=c('go_id', 'name_1006', 'namespace_1003'),
# mart=ensembl89)
# allGO.Ensembl = allGO.Ensembl[allGO.Ensembl$go_id != '',]
#
# GOgenes.Ensembl = getBM( attributes=c('ensembl_gene_id', 'go_id'),
# mart=ensembl89)
#
# colnames(GOgenes.Ensembl)[1] = 'gene_id'
#
# GOgenes.Ensembl = GOgenes.Ensembl[GOgenes.Ensembl$go_id != '',]
# GOgenes.Ensembl = GOgenes.Ensembl[GOgenes.Ensembl$gene_id != '',]
#
# save(GOgenes.Ensembl, file = "GOgenes.Ensembl")
# save(allGO.Ensembl, file = "allGO.Ensembl")
# save(allgenes.Ensembl, file = "allgenes.Ensembl")
load("GOgenes.Ensembl")
load("allGO.Ensembl")
load("allgenes.Ensembl")
set.seed(1987)
GO_results <- GO_analyse(eSet = counts.go, f = "Group",
GO_genes=GOgenes.Ensembl, all_GO=allGO.Ensembl, all_genes=allgenes.Ensembl,
method = input$method_go) #rf
#rf not working for some reason
# output$Go_progress <- renderText({
#add progress meter
GO_results.pVal <- pValue_GO(result=GO_results)
# })
output$res_tab <- renderTable({
data.frame(GO_results.pVal$GO)
})
# need to make assignment for input$GO_selected, liked to table?
output$GO_heat <- renderPlot({
heatmap_GO(input$GO_selected, GO_results, counts.go)
})
})
}
})
}, ignoreInit = T) #end DE action button
observeEvent(input$bookmark_test, {
session$doBookmark()
})
#
# observeEvent(input$test_save,{
# save(session, file = "delete/test")
# })
#
# observeEvent(input$test_load,{
# session = load("delete/test")
# })
#
# observeEvent(input$test_load,{
#
# if(!file.exists("delete/inputs.RDS")) {return(NULL)}
#
# savedInputs <- readRDS("delete/inputs.RDS")
#
# inputIDs <- names(savedInputs)
# inputvalues <- unlist(savedInputs)
# for (i in 1:length(savedInputs)) {
# session$sendInputMessage(inputIDs[i], list(value=inputvalues[[i]]) )
# }
# })
#
# observeEvent(input$test_save,{
# saveRDS( reactiveValuesToList(input) , file = "delete/inputs.RDS")
# })
# #for DE calcs
# save.image(file = "test.R")
# dds(), counts.test, counts.all, res()
# onBookmark(function(state) {
# state$values$DE <- vals$sum
# })
#
})
enableBookmarking(store = "server") #or "url"
| /server.R | no_license | SemiQuant/RNAseeker | R | false | false | 44,624 | r | # using install.packages(c("ggplot2", "shiny", "plotly"))
#other devtools::install_github("rstudio/shiny"); devtools::install_github("hadley/ggplot2"); devtools::install_github("ropensci/plotly")
# rsconnect::deployApp("/Users/jdlim/Bioinformatics/RNAseeker", launch.browser = F, account = "sciencebuff")
#to fix
# updating of corr plots after sample selection
#########
##TO DO##
#########
# make seletion on volcan plot show other plots - have hover and select as radiobutton options
#
# check scatter plot of counts is correct
#
# add GO analysis
#
# add 'googleAuthR' button and to load data
# add bookmark button
#
# add annotation info to plotly and tables
# add annotation lookup
#
# make plots look good
#
# add download as HTML widget for plots
#
#################################################################################################
source("qqly.R")
require(shiny)
require(ggplot2)
require(plotly)
require(DESeq2)
require(NOISeq)
require(adegenet)
require(DT)
require(ggfortify)
require(heatmaply)
require(BiocParallel)
require(AnnotationDbi)
require(org.Hs.eg.db)
require(goseq)
require(GO.db)
require(GOexpress)
#custom theme for ggplot
gg_back <- theme(
panel.background = element_rect(fill = "#272b30"),
plot.background = element_rect(fill = "#272b30"),
legend.background = element_rect(fill = "#272b30"),
panel.grid.major = element_line(colour = "black"),
axis.title.x = element_text(colour = "light grey"),
axis.title.y = element_text(colour ="light grey"),
legend.title = element_text(colour = "white"),
plot.title = element_text(colour = "light grey"),
legend.text = element_text(colour = "light grey"),
axis.text = element_text(colour ="light grey")
)
testing <<- "T"
threads <<- 1
register(MulticoreParam(threads))
shinyServer(function(input, output, session) {
#############
##load data##
#############
data_load <- reactiveValues()
observeEvent(input$load_x,{
data_load$rRNA_file_pat <- "example_data/rRNA_check/"
data_load$vst_path <- "example_data/vst_in"
data_load$rlt_path <- "example_data/rlt_in"
data_load$dds_path <-"example_data/dds_HTSeq_in"
data_load$bio_path <- "bio.txt"
data_load$noi_sat_path <- "example_data/noi_dat_saturation"
data_load$noi_coutns_path <- "example_data/noi_dat_countsbio"
data_load$noi_bio_path <- "example_data/noi_dat_bio_detect"
data_load$multi_qc_path <- "/Users/jdlim/Library/Mobile Documents/com~apple~CloudDocs/Bioinformatics/RNAseeker/example_data/multiqc_report.html"
})
observeEvent(input$load_user,{
data_load$rRNA_file_pat <- data_load$vst_path <- data_load$rlt_path <- data_load$dds_path <- data_load$bio_path <- data_load$noi_sat_path <- data_load$noi_coutns_path <- data_load$noi_bio_path <- data_load$multi_qc_path <- NULL
data_load$rRNA_file_pat <- paste0(input$load_user, "/rRNA_check/")
data_load$vst_path <- paste0(input$load_user, "/vst_in")
data_load$rlt_path <- paste0(input$load_user, "/rlt_in")
data_load$dds_path <-paste0(input$load_user, "/dds_HTSeq_in")
data_load$bio_path <- paste0(input$load_user, "bio.txt")
data_load$noi_sat_path <- paste0(input$load_user, "/noi_dat_saturation")
data_load$noi_coutns_path <- paste0(input$load_user, "/noi_dat_countsbio")
data_load$noi_bio_path <- paste0(input$load_user, "/noi_dat_bio_detect")
data_load$multi_qc_path <- paste0(input$load_user, "/multiqc_report.html")
# data_load$snp_matrix <- as.character(input$meta_dat$datapath[1])
# data_load$genp_path <- as.character(input$snp_mat_IN$datapath[1])
})
rlt_in <- reactive({
load(data_load$rlt_path)
if (testing == "T")
rlt_in[1:500]
else
rlt_in
})
dds_HTSeq_in <- reactive({
load(data_load$dds_path)
if (testing == "T")
dds_HTSeq_in[1:500]
else
dds_HTSeq_in
})
vst_in <- reactive({
load(data_load$vst_path)
if (testing == "T")
vst_in[1:500]
else
vst_in
})
# # bio_detect <- eventReactive(input$load_x,{
# # load("example_data/bio_detect")
# # names(bio_detect@dat$biotables) <- gsub(".Homo_sapiens.HTSeq.counts", "", names(bio_detect@dat$biotables))
# # bio_detect
# # }, ignoreInit = TRUE)
# #
# rlt_in <- eventReactive(input$load_x, input$load_u,{
# print(data_load$rlt_path)
# load(data_load$rlt_path)
# if (testing == "T")
# rlt_in[1:500]
# else
# rlt_in
# })
noi_dat <- reactive({
bio <- read.table(data_load$bio_path)
readData(assay(dds_HTSeq_in()), colData(dds_HTSeq_in()), biotype = bio)
})
noi_dat_saturation <- reactive({
# noi_dat_saturation() <- dat(noi_dat(), k = 0, ndepth = 10, type = "saturation")
load(data_load$noi_sat_path)
noi_dat_saturation
})
noi_dat_countsbio <- reactive({
# noi_dat_countsbio() <- dat(noi_dat(), factor = NULL, type = "countsbio")
load(data_load$noi_coutns_path)
noi_dat_countsbio
})
noi_dat_bio_detect <- reactive({
# noi_dat_bio_detect() <- dat(noi_dat(), k = 0, type = "biodetection", factor = NULL)
load(data_load$noi_bio_path)
noi_dat_bio_detect
})
# observeEvent(input$load_x, input$load_u,{
# data_load$multi_qc_path <<- "/Users/jdlim/Library/Mobile Documents/com~apple~CloudDocs/Bioinformatics/RNAseeker/example_data/multiqc_report.html"
# })
#load data end#
###############
#create the main tabke with samples (this is where samples can be selected)
sa_tab <- eventReactive(input$load_x,{
s_tab <- data.frame(colData(dds_HTSeq_in()))
s_tab$sample <- row.names(s_tab)
conts <- data.frame(colSums(assay(dds_HTSeq_in())))
s_tab <- merge(s_tab, conts, by = "row.names")
genes <- data.frame(colSums(assay(dds_HTSeq_in())>1))
sa_tab <- merge(s_tab, genes, by.x = "sample", by.y = "row.names")[,2:6]
colnames(sa_tab) <- c("Sample", "Group", "Rep", "Read Count", "Features Detected")
sa_tab
})
observeEvent(c(input$load_x, input$load_user),{
# render radio buttons based on the groups in the dds object to be used for selection in table
output$sel <- renderUI({
all_samples <- colnames(dds_HTSeq_in())
all_groups <- as.character(unique(colData(dds_HTSeq_in())[,1]))
tags$div(align = 'left',
class = 'multicol',
# checkboxGroupInput("sel_samp", "Select samples to keep by name",
# inline = T,
# choiceNames = as.list(all_samples),
# choiceValues = as.list(all_samples),
# selected = as.list(all_samples)
# ),
checkboxGroupInput("sel_samp_g", "Select samples to keep by group (not working yet)",
inline = T,
# choiceNames = as.list(all_groups),
# choiceValues = as.list(all_groups),
choices = as.list(all_groups),
selected = as.list(all_groups)
)
)
})
output$sel_samp_out <- renderUI(actionButton("sel_samp", h4("Update Sample Selection")))
#data table of samples, metadata from dds and reads counts - see sa_tab function
output$tbl <- DT::renderDataTable({
datatable(sa_tab(),
selection = list(target = 'row', selected = 1:nrow(sa_tab()))
) %>% formatStyle(
c(colnames(sa_tab())),
backgroundColor = "red",
color = "black"
)
}, server = TRUE)
output$gene_count <- renderUI({
sliderInput("min_count", "Keep features with more than this many normalized counts", min = 0, max = 100, value = 5, step = 1)
})
output$gene_count_sample <- renderUI({
sliderInput("min_count_sample", "In this many samples", min = 1, max = length(input$tbl_rows_selected), value = length(input$tbl_rows_selected), step = 1)
})
output$mil_reads <- renderPlotly({
counts_total <- counts(dds_HTSeq_in())
counts_total <- colSums(counts_total)
counts_total <- data.frame(counts_total)
counts_total$Sample <- rownames(counts_total)
c <- ggplot(counts_total, aes(Sample,counts_total)) +
geom_bar(stat = "identity", aes(fill = counts_total)) + #colour = counts_total,
# scale_colour_gradient(low = "blue", high = "green") +
scale_fill_gradientn(colours = heat.colors(10), name = "Count\nIntensity") +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
gg_back +
ggtitle("Counts of reads per sample") +
ylab("Count") + xlab(NULL)
ggplotly(c) #%>% layout(autosize = FALSE, width = "400px")
})
}, ignoreInit = T)
selected_samples <- reactive({
input$sel_samp
sa_tab()[input$tbl_rows_selected,]$Sample
})
dds_HTSeq <- reactive({
input$sel_samp
dds_HTSeq <- dds_HTSeq_in()[,selected_samples()]
dds_HTSeq <- estimateSizeFactors(dds_HTSeq)
idx <- try(rowSums( counts(dds_HTSeq, normalized=TRUE) >= input$min_count ) >= input$min_count_sample)
if (class(idx) == "try-error")
dds_HTSeq
else
dds_HTSeq[idx, ]
})
assay_in <- reactive({
input$sel_samp
if (input$corr == "log2")
assay_in <- rlt_in()
else
assay_in <- vst_in()
assay_in[rownames(dds_HTSeq()), colnames(assay_in)%in%selected_samples()]
})
expgroups <- reactive({
input$load_x
input$sel_samp
expgroups <- as.data.frame(colData(dds_HTSeq()))
rownames(expgroups) <- colnames(dds_HTSeq())
expgroups
})
output$notes <- renderText(readLines("example_data/user_notes.txt"))
output$dens_log <- renderPlotly({
# input$sel_samp
# de_counts <- stack(de_counts)
# de_counts <- de_counts[de_counts$values >0, ]
# log2(values+1)
#or raw counts
# counts(dds_HTseq)
req(c(input$min_count, input$min_count_sample), cancelOutput = T)
#this is for the selected assay (vst or rlog)
# de_counts <- assay(assay_in())
# de_counts <- stack(de_counts)
# de_counts <- data.frame(de_counts[c(2,4)])
# colnames(de_counts) <- c("Sample", "value")
# g <- ggplot(de_counts, aes(value, colour = Sample, fill = Sample )) +
# geom_density(alpha = 0.05) + theme(legend.position="none") +
# xlab(input$corr)
#this is for raw, normalized counts
de_counts <- counts(dds_HTSeq(), normalized = T)
de_counts <- stack(de_counts)
de_counts <- data.frame(de_counts[c(2,4)])
colnames(de_counts) <- c("Sample", "value")
g <- ggplot(de_counts, aes(value, colour = Sample, fill = Sample )) +
geom_density(alpha = 0.05) + theme(legend.position="none") +
xlab("Log10 of normalized counts") + scale_x_log10() +
ylab("Density")
g <- g + gg_back
ggplotly(g)
})
#plot rRNA contamination
observeEvent(c(input$load_x, input$load_user, input$sel_samp),{
if (file.exists(data_load$rRNA_file_pat)){
#rRNA
files_rrna <- list.files(data_load$rRNA_file_pat, full.names = T)
rrna <- NULL
for (file in files_rrna){
tmp.r <- read.table(file)
rrna <- rbind(rrna, tmp.r[c(6,9)])
}
rrna$V9 <- gsub("%","",rrna$V9)
rrna$V9 <- as.numeric(rrna$V9)
rrna$V6 <- gsub("_R1_.*", "", rrna$V6)
colnames(rrna) <- c("Sample", "Percentage_rRNA")
rrna <- rrna[rrna$Sample%in%selected_samples(),]
r_plot <- ggplot(data = rrna, aes(x = Sample, y = Percentage_rRNA)) +
geom_histogram(stat = "identity") + #, aes(fill = Sample)
theme(axis.text.x = element_text(angle = 90, hjust = 1)) +
ylab("Percentage rRNA reads") + xlab(NULL)
r_plot <- r_plot + gg_back
#rRNA end
output$rRNA_cont <- renderPlotly({ggplotly(r_plot)})
}else{
output$rRNA_cont <- renderPlotly({
ggplotly(
ggplot(data.frame()) +
annotate("text", x=8, y=13000, label= "No Data", size = 20, color = "red") +
gg_back
)
})
}
}, ignoreInit = T)
#plot rRNA contamination end
#not working??
observeEvent(c(input$load_x, input$load_user),
output$QC <- renderUI(browseURL(data_load$multi_qc_path, encodeIfNeeded = T))
)
#sample dists
output$geneslide <- renderUI({
sliderInput("g_slide", "Select number of top genes to retain", min = 2, max = length(rownames(dds_HTSeq())), value = length(rownames(dds_HTSeq())))
})
output$geneslide_box <- renderUI({
textInput("g_box", "", value = length(rownames(dds_HTSeq())))
})
observeEvent(input$g_box,{
updateSliderInput(session, "g_slide", value = input$g_box)
})
assay_red <- reactive({
input$g_slide
mads <- apply(assay(assay_in()), 1, mad)
#or can use the row variation like this
# library(genefilter)
# ntop <- input$g_slide
# rv <- rowVars(assay(assay.tmp))
# select <- order(rv, decreasing = TRUE)[seq_len(min(ntop, length(rv)))]
#
# assay(assay.tmp)[select, ]
assay_in()[order(mads, decreasing=T)[1:input$g_slide], ]
})
output$heatmapsampledist <- renderD3heatmap({
# if (!is.null(input$color_by)) {
# assay_red.tmp <- assay_red()
# expgroups() <- as.data.frame(colData(assay_red.tmp)[, "Group"])
# rownames(expgroups()) <- colnames(assay_red.tmp)
# colnames(expgroups()) <- "Group"
d3heatmap(as.matrix(dist(t(assay(assay_red())))),
annotation_col = expgroups())
})
output$col_h <- renderUI({
selectInput("h_col", "Color by metadata", choices = names(colData(dds_HTSeq_in())), selected = 1)
})
corr <- reactive({
input$corr_up
isolate(cor(assay(assay_red()), method = input$pair_corr))
})
observe({
output$heatmapsampledist_corr <- renderPlotly({
# if(!is.null(corr()))
heatmaply(corr(),
row_side_colors = fac2col(expgroups()[,colnames(expgroups())==input$h_col]),
col_side_colors = fac2col(expgroups()[,colnames(expgroups())==input$h_col]),
k_row = as.numeric(input$k_num), k_col = as.numeric(input$k_num),
hclust_method = input$hclust, dist_method = input$dist_met,
scale_fill_gradient_fun = ggplot2::scale_fill_gradient2(low = "blue", high = "red", midpoint = 0, limits = c(-1, 1)),
heatmap_layers = gg_back,
side_color_layers = gg_back
)
})
})
output$col_pca <- renderUI({
selectInput("pc_col", "Color by metadata", choices = names(colData(dds_HTSeq_in())), selected = 1)
})
output$pca <- renderPlot({
# expgroups() <- as.data.frame(colData(dds_HTSeq_in()))
# rownames(expgroups()) <- colnames(dds_HTSeq_in())
# colnames(expgroups()) <- "Group"
pc_as <- t(assay(assay_red()))
# pc_as <- t(assay(rlt_in))
pca <- prcomp(pc_as, scale. = F, center = T)
eig <- pca$sdev^2
output$pca_plotly <- renderPlotly(
autoplot(pca, data = expgroups(), x = input$PC1, y = input$PC2,
colour = input$pc_col, frame.colour = input$pc_col) + #frame = TRUE, frame.type = 'norm'
gg_back
)
output$scree <- renderPlot({
screeplot(pca,
col = "#d3d3d3", col.lab = "#d3d3d3", col.main = "white", col.sub = "white", col.axis = "#d3d3d3", col.axis = "#d3d3d3", main = "")
}, bg = "#272b30")
output$pca_3d <- renderPlotly({
# d3.tmp <- data.frame(pca$rotation)
# d3.tmp <- merge(d3.tmp, expgroups()[c(input$pc_col)], by = "row.names")
# rownames(d3.tmp) <- d3.tmp$Row.names; d3.tmp <- d3.tmp[,-1]
#
# plot_ly(d3.tmp, x = ~PC1, y = ~PC2, z = ~PC3, color = input$pc_col) %>%
# add_markers() %>%
# layout(scene = list(xaxis = list(title = 'PC1'),
# yaxis = list(title = 'PC2'),
# zaxis = list(title = 'PC3')))
###3d scatter param
axis <- list(
xaxis = list(title = 'PC1'),
yaxis = list(title = 'PC2'),
zaxis = list(title = 'PC3'),
color =toRGB("#d3d3d3"),
linecolor = toRGB("#d3d3d3"),
gridcolor = toRGB("#d3d3d3")
)
scene = list(
xaxis = axis,
yaxis = axis,
zaxis = axis)
####
if (all(rownames(expgroups()[c(input$pc_col)]) == rownames(data.frame(pca$x))))
plot_ly(data.frame(pca$x), x = ~PC1, y = ~PC2, z = ~PC3, color = expgroups()$Group,
colors = unique(fac2col(grps$group)), alpha = 0.8) %>%
add_markers() %>%
layout(scene = scene,
paper_bgcolor = "#272b30",
plot_bgcolor = "#272b30",
legend = list(
font = list(
color = "#d3d3d3")
)
)
# plot_ly(data.frame(pca$rotation), x = ~PC1, y = ~PC2, z = ~PC3, color = unlist(expgroups()[c(input$pc_col)])) %>%
# add_markers() %>%
# layout(scene = list(xaxis = list(title = 'PC1'),
# yaxis = list(title = 'PC2'),
# zaxis = list(title = 'PC3')))
else
ggplotly(
ggplot(data.frame()) +
annotate("text", x=8, y=13000, label= "No Data", size = 20, color = "red") +
gg_back
)
})
# pca <- dudi.pca(pc_as, center=TRUE, scale=FALSE, scannf = F, nf = input$pcs)
#cool pca plots
# par(bg = "grey")
# s.class(pca$l1, expgroups()$Group, col = unique(fac2col(expgroups()$Group)))
# s.class(pca$x[,c(input$PC1, input$PC2)], expgroups()[,input$pc_col], col = unique(fac2col(expgroups()[,input$pc_col])))
s.class(data.frame(pca$x), xax = as.numeric(input$PC1), yax = as.numeric(input$PC2),
expgroups()[,colnames(expgroups())==input$pc_col], col = unique(fac2col(expgroups()[,colnames(expgroups())==input$pc_col])))
add.scatter.eig(eig, 3, 1, c(as.numeric(input$PC1), as.numeric(input$PC2)), posi = "bottomright")
# bg.col = "#272b30")
}, bg = "#272b30")
#sat, sens and sample plots
observeEvent(c(input$load_x, input$load_user),{
output$slider_sat_ui <- renderUI({
#change this to the sample names
selectInput("slider_sat", "Select samples to compare",
choices = selected_samples(),
multiple = T,
width = "100%",
selected = selected_samples()[1:3])
# sliderInput(inputId = "slider_sat", "Select sample range to explore", min = 1, max = length(names(noi_dat_saturation()@dat$depth)), value = c(1, 10), step = 1)
#length(input$tbl_rows_selected)
# input$tbl_rows_selected
})
output$sat_1<- renderPlot({
par(col.lab="white", col="grey", col.axis = "white", col.main = "white", col.sub = "white")
# explo.plot(noi_dat_saturation(), toplot = 1, samples = c(input$slider_sat))
explo.plot(noi_dat_saturation(), toplot = 1, samples = c(1:length(names(noi_dat_saturation()@dat$depth)))[names(noi_dat_saturation()@dat$depth)%in%c(input$slider_sat)],
col.ticks = "white")
}, bg = "#272b30")
output$sat_2 <- renderPlot({
par(col.lab="white", col="grey", col.axis = "white", col.main = "white", col.sub = "white")
explo.plot(noi_dat_saturation(), toplot = "protein_coding", samples = c(1:length(names(noi_dat_saturation()@dat$depth)))[names(noi_dat_saturation()@dat$depth)%in%c(input$slider_sat)],
col.ticks = "white")
}, bg = "#272b30")
output$slider_sens_ui <- renderUI({
selectInput("slider_sens", "Select samples to compare",
choices = selected_samples(),
multiple = T,
width = "100%",
selected = selected_samples()[1:3])
})
output$sens_1 <- renderPlot({
par(col.lab="white", col="grey", col.axis = "white", col.main = "white", col.sub = "white")
explo.plot(noi_dat_countsbio(), toplot = 1, plottype = "barplot",
samples = c(1:length(names(noi_dat_countsbio()@dat$summary$global)[!(names(noi_dat_countsbio()@dat$summary$global)%in%c("global", "total"))]))[names(noi_dat_countsbio()@dat$summary$global)[!(names(noi_dat_countsbio()@dat$summary$global)%in%c("global", "total"))]%in%c(input$slider_sens)],
col.ticks = "white")
}, bg = "#272b30")
output$sens_2 <- renderPlot({
par(col.lab="white", col="grey", col.axis = "white", col.main = "white", col.sub = "white")
explo.plot(noi_dat_countsbio(), toplot = "protein_coding", plottype = "boxplot",
samples = c(1:length(names(noi_dat_countsbio()@dat$summary$global)[!(names(noi_dat_countsbio()@dat$summary$global)%in%c("global", "total"))]))[names(noi_dat_countsbio()@dat$summary$global)[!(names(noi_dat_countsbio()@dat$summary$global)%in%c("global", "total"))]%in%c(input$slider_sens)],
col.ticks = "white")
}, bg = "#272b30")
output$slider_comps_ui <- renderUI({
fluidRow(
column(4,
selectInput("slider_comps1", "Select samples to compare",
choices = selected_samples(),
multiple = F,
width = "100%",
selected = selected_samples()[1])),
column(4,
selectInput("slider_comps2", "Select samples to compare",
choices = selected_samples(),
multiple = F,
width = "100%",
selected = selected_samples()[2]))
)
})
output$bio_plot_1 <- renderPlot({
par(col.lab="white", col="grey", col.axis = "white", col.main = "white", col.sub = "white")
par(mfrow = c(1,2)) # we need this instruction because two plots (one per sample) will be generated
explo.plot(noi_dat_bio_detect(), plottype = "persample", samples = c(1:length(names(noi_dat_bio_detect()@dat$biotables)))[names(noi_dat_bio_detect()@dat$biotables)%in%c(input$slider_comps1, input$slider_comps2)],
col.ticks = "white")
}, bg = "#272b30")
output$bio_plot_2 <- renderPlot({
par(col.lab="white", col="grey", col.axis = "white", col.main = "white", col.sub = "white")
par(mfrow = c(1,2)) # we need this instruction because two plots (one per sample) will be generated
explo.plot(noi_dat_bio_detect(), toplot = "protein_coding", plottype = "comparison", samples = c(1:length(names(noi_dat_bio_detect()@dat$biotables)))[names(noi_dat_bio_detect()@dat$biotables)%in%c(input$slider_comps1, input$slider_comps2)],
col.ticks = "white")
}, bg = "#272b30")
output$bio_plot_2_text <- renderText({
print("Show test for difference btw samples here")
# # par(mfrow = c(1,2)) # we need this instruction because two plots (one per sample) will be generated
# explo.plot(noi_dat_bio_detect(), toplot = "protein_coding", plottype = "comparison", samples = c(1:length(names(noi_dat_bio_detect()@dat$biotables)))[names(noi_dat_bio_detect()@dat$biotables)%in%c(input$slider_comps1, input$slider_comps2)])
})
}, ignoreInit = T)
#statistical tab
# withProgress(message = 'Performing Differential Expression Calculations',
# detail = 'This may take a while...', value = 0, {
# # incProgress(1/2)
# })
#need selector with
output$meta_deUI <- renderUI(selectInput("meta_de", "Select Variable Group to compare",
choices = colnames(expgroups()),
selected = 1))
# then radio buttons with
output$de_grp_1_UI <- renderUI({
radioButtons("de_grp_1", "Group 1",
choices = unique(unlist(expgroups()[,input$meta_de])),
selected = unique(unlist(expgroups()[,input$meta_de]))[1])
})
# then radio buttons with
output$de_grp_2_UI <- renderUI({
radioButtons("de_grp_2", "Group 2", choices = unique(unlist(expgroups()[,input$meta_de])),
selected = unique(unlist(expgroups()[,input$meta_de]))[2])
})
output$sample_table <- renderTable(table(expgroups()[, input$meta_de]))
observeEvent(input$DEcalc, {
req(input$de_grp_1, input$de_grp_2)
isolate({
if(input$de_grp_1 !=input$de_grp_2){
# a character vector with exactly three elements:
# the name of a factor in the design formula,
# the name of the numerator level for the fold change,
# and the name of the denominator level for the fold change (simplest case)
de_options <- NULL
if (input$de_test != "Auto")
de_options <- paste0(de_options, 'test="', input$de_test, '",')
if (input$de_filt != "Auto")
de_options <- paste0(de_options, 'fitType="', input$de_filt, '",')
if (input$de_beta != "Auto")
de_options <- paste0(de_options, 'betaPrior="', input$de_beta, '",')
# if (testing == "dfd"){
# # save(dds, "example_data/testing/dds")
# load("example_data/testing/dds")
# dds<-dds
# # save(rld.test, "example_data/testing/rld.test")
# load("example_data/testing/rld.test")
# rld.test<-rld.test
# # save(counts.test, "example_data/testing/counts.test")
# load("example_data/testing/counts.test")
# counts.test<-counts.test
# # save(counts.all, "example_data/testing/counts.all")
# load("example_data/testing/counts.all")
# counts.all<-counts.all
# # save(res, "example_data/testing/res")
# load("example_data/testing/res")
# res<-res
# # save(dds.test, "example_data/testing/dds.test")
# load("example_data/testing/dds.test")
# dds.test<-dds.test
#
# }else{
dds <- reactive({
dds.test <- dds_HTSeq()
dds.test <- dds.test[,colData(dds.test)[,colnames(colData(dds.test))==input$meta_de]%in%c(input$de_grp_1, input$de_grp_2)]
colData(dds.test)[,colnames(colData(dds.test))==input$meta_de] <- as.factor(as.character(colData(dds.test)[,colnames(colData(dds.test))==input$meta_de]))
dds.test <- estimateSizeFactors(dds.test)
if (is.null(de_options))
dds <- DESeq(dds.test, parallel = T)
else
dds <- eval(parse(text =
cat(
paste("DESeq(dds.test, parallel = T,",
de_options,
")"))))
dds
})
# rld.test <- rlog(dds, blind = FALSE) #for hetamps and such
counts.test <- log2(counts(dds(), normalized=T))
counts.all <- log2(counts(dds_HTSeq(), normalized=T))
res <- reactive({
res_opts <- NULL
if (input$alt != "Auto")
res_opts <- paste0(res_opts, 'altHypothesis="', input$alt, '",')
if (input$cook != "Auto")
res_opts <- paste0(res_opts, 'cooksCutoff="', input$cook, '",')
if (is.null(res_opts))
res <- results(dds(),
alpha=input$alpha_in, contrast = c(input$meta_de, input$de_grp_1, input$de_grp_2),
pAdjustMethod = input$p_adjM, lfcThreshold = input$lcf, parallel = T)
else
res <- eval(parse(text =
cat(
paste("results(dds, parallel = T, alpha=input$alpha_in, contrast = c(input$meta_de, input$de_grp_1, input$de_grp_2), pAdjustMethod = input$p_adjM,lfcThreshold = input$lcf,",
res_opts,
")"))))
#Annotate
res$symbol <- mapIds(org.Hs.eg.db,
keys=row.names(res),
column="SYMBOL",
keytype="ENSEMBL",
multiVals="first")
res$entrez <- mapIds(org.Hs.eg.db,
keys=row.names(res),
column="ENTREZID",
keytype="ENSEMBL",
multiVals="first")
res
})
# if (input$de_beta == "Auto")
# dds <- DESeq(dds_HTSeq(), test = input$de_test, fitType = input$de_filt)
# else
# dds <- DESeq(dds_HTSeq(), test = input$de_test, fitType = input$de_filt, betaPrior = input$de_beta)
#
# if (input$cook == "Auto")
# res <- results(dds,
# alpha=input$alpha_in, contrast = c(input$meta_de, input$de_grp_1, input$de_grp_2),
# altHypothesis = input$alt, pAdjustMethod = input$p_adjM, lfcThreshold = input$lcf,
# test = input$de_test)
# else
# res <- results(dds,
# alpha=input$alpha_in, contrast = c(input$meta_de, input$de_grp_1, input$de_grp_2),
# altHypothesis = input$alt, pAdjustMethod = input$p_adjM, lfcThreshold = input$lcf,
# test = input$de_test,
# cooksCutoff = input$cook)
# parallel = T
# library(BiocParallel)
# register(MulticoreParam(10))
# DESeq(dds, parallel=10)
# results(DESeq_dds, parallel = T)
#DE outputs
output$DE_stat <- renderDataTable({
# req(input$de_grp_1, input$de_grp_2)
res <- res()
res <- res[order(res$padj, decreasing = F),]
# tmp <- data.frame(res[1:sum(res$padj < input$p_in, na.rm=TRUE), ])
tmp <- data.frame(res[!(is.na(res$pvalue)),])
tmp[,1:6] <- round(tmp[,1:6], 3)
tmp$Gene <- row.names(tmp)
tmp <- tmp[c(9,1,2,3,4,5,6,7,8)]
datatable(tmp, rownames = F) %>% formatStyle(
'padj',
backgroundColor = styleInterval(input$p_in, c('#98FB98', '#D3D3D3')),
color = "black"
) %>% formatStyle(
c(colnames(tmp)),
color = "black"
)
})
res_ma <- data.frame(res())
res_ma$Fold_change <- ifelse(res_ma$log2FoldChange <0, "down", "up")
res_ma$Fold_change <- ifelse(is.na(res_ma$Fold_change), "none", res_ma$Fold_change)
res_ma$Significat_at_p_adjusted <- ifelse(res_ma$padj <= input$p_in, "yes", "no")
output$DE_ma <- renderPlotly({
# plotMA(res, ylim=c(-5,5))
ggplotly(
ggplot(res_ma, aes(x=baseMean, y=log2FoldChange)) +
geom_point(aes( colour = Significat_at_p_adjusted, shape=Fold_change )) + scale_x_continuous(trans='log10') +
geom_hline(yintercept = 0, col = "red") + xlab("Mean of Normalized Counts") + ylab("Log Fold Change") +
ylim(c(-5, 5)) +
geom_smooth(se = F) +
gg_back
)
})
### volc
# input=NULL
# input$p_in=0.1
volc <- data.frame(res())
volc$P <- volc$pvalue
volc$log_pvalue <- -log10(volc$pvalue)
volc$sig_p.adj <- ifelse(volc$padj <= input$p_in, "yes", "no")
volc$Gene <- paste0(rownames(volc), " (", volc$symbol, ")")
volc$key_test <- seq(1:nrow(volc))
output$DE_volc <- renderPlotly({
vol <- ggplot(data=volc,
aes(x = log2FoldChange, y = log_pvalue, colour = sig_p.adj, text = Gene, key = key_test)) +
geom_point(alpha=0.4, size=1.75) +
# xlim(c(-6, 6)) +
xlab("log2FoldChange") + ylab("-log10 p-value (unadjusted)") +
theme_bw() +
theme(legend.position="right")+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank()) +
gg_back
ggplotly(vol, tooltip = c("log2FoldChange", "log_pvalue", "Gene"), source = "volc_sor") %>% layout(dragmode = "select")
# %>% layout(dragmode = "select")
#try changing to plot_ly as it seems to be much less sluggish
# plot_ly(volc, x = ~log2FoldChange, y = ~ log_pvalue,
# source = "volc_sor")
})
output$DE_qq <- renderPlotly({
qqly(na.omit(volc))
})
output$DE_scat <- renderPlotly({
#test
# load("example_data/delete/dds_2")
# dds
# rld.test <- rlog(dds, blind = FALSE)
# counts.test <- counts(dds, normalized=T)
# res <- results(dds)
#
#
# res[1,]
#
# assay(rld.test)[1,]
# counts.test[1,]
#
#
#
# scat.tmp <- assay(rld.test)
# scat.tmp.1 <- data.frame(scat.tmp[1,colnames(scat.tmp)%in%rownames(colData(rld.test)[colData(rld.test)$Group == "LTBI",])])
# scat.tmp.2 <- data.frame(scat.tmp[1,colnames(scat.tmp)%in%rownames(colData(rld.test)[colData(rld.test)$Group == "prev",])])
# scat.tmp <- NULL
# cond1 <- colMeans(scat.tmp.1, na.rm = T)
# cond2 <- colMeans(scat.tmp.2, na.rm = T)
#
#
# res[1,]
# cond2-cond1
#
#
#
#
#
# scat.tmp <- counts.test
# scat.tmp.1 <- data.frame(scat.tmp[1,colnames(scat.tmp)%in%rownames(colData(rld.test)[colData(rld.test)$Group == "LTBI",])])
# scat.tmp.2 <- data.frame(scat.tmp[1,colnames(scat.tmp)%in%rownames(colData(rld.test)[colData(rld.test)$Group == "prev",])])
# scat.tmp <- NULL
# cond1 <- colMeans(scat.tmp.1, na.rm = T)
# cond2 <- colMeans(scat.tmp.2, na.rm = T)
#
#
# res[1,]
# cond2-cond1
# log2(cond2)-log2(cond1)
#
#test end
#should recalculate this using the subsetted dds
# scat.tmp <- assay(rld.test)
expgroups.tmp <- expgroups()[c(colnames(expgroups())==input$meta_de)]
colnames(expgroups.tmp) <- "Group"
expgroups.tmp$sample <- rownames(expgroups.tmp)
scat.tmp <- counts.test
scat.tmp.1 <- data.frame(scat.tmp[,colnames(scat.tmp)%in%
expgroups.tmp[expgroups.tmp$Group==input$de_grp_1, ]$sample
])
scat.tmp.2 <- data.frame(scat.tmp[,colnames(scat.tmp)%in%
expgroups.tmp[expgroups.tmp$Group==input$de_grp_2, ]$sample
])
scat.tmp <- NULL
scat.tmp.1$cond1 <- rowMeans(scat.tmp.1, na.rm = T)
scat.tmp.2$cond2 <- rowMeans(scat.tmp.2, na.rm = T)
scat <- data.frame("cond1" = scat.tmp.1$cond1,
"cond2" = scat.tmp.2$cond2,
"Gene" = rownames(scat.tmp.1),
"Significat_at_p_adjusted" = res_ma$Significat_at_p_adjusted,
"Adj.p" = res_ma$padj)
p.scat <- ggplot(scat, aes(x=cond1, y=cond2)) +
geom_point(aes( colour = Significat_at_p_adjusted)) +
xlab(input$de_grp_1) + ylab(input$de_grp_2) + gg_back
#jerryrig to get extra in the tiptool..
p.scat <- p.scat + geom_point(aes(Adj.p), alpha = 0) +
geom_text(aes(label=Gene), hjust=0, vjust=0, alpha = 0)
ggplotly(p.scat, tooltip = c("Gene", "cond1", "cond2")) #, "Adj.p"
})
output$brush <- renderPrint({
d <- event_data("plotly_selected", source = "volc_sor", session)
if (is.null(d)) "Click and drag events (i.e., select/lasso) appear here (double-click to clear)" else d
})
observe({
#setup tmp data for subplots
eve_data <- event_data("plotly_selected", source = "volc_sor", session)
volc_key <- as.numeric(eve_data$key)
if (!(is.null(eve_data))){
# if (length(volc_key) == 1)
# count.tmp <- t(data.frame(counts.test[c(volc_key),]))
# else
# count.tmp <- counts.test[c(volc_key),]
count.tmp <- counts.test[c(volc_key),, drop=F]
count.tmp <- data.frame(t(count.tmp))
count.tmp$mean_count <- rowMeans(count.tmp)
ano <- colData(dds())[,input$meta_de]
# table(rownames(colData(dds.test))==rownames(count.tmp))
#all samples
# if (length(volc_key) == 1)
# counts.all.tmp <- t(data.frame(counts.all[c(volc_key),]))
# else
# counts.all.tmp <- counts.all[c(volc_key),]
counts.all.tmp <- counts.all[c(volc_key),, drop=F]
counts.all.tmp <- data.frame(t(counts.all.tmp))
counts.all.tmp$mean_count <- rowMeans(counts.all.tmp)
counts.all.tmp$Sample <- rownames(counts.all.tmp)
counts.all.tmp$Genes <- paste(colnames(counts.all.tmp)[1:(length(colnames(counts.all.tmp))-2)], sep="", collapse=",")
ano.all <- colData(dds_HTSeq())[,input$meta_de]
output$genes_sel <- renderText(c("Seleceted Genes", colnames(count.tmp)[colnames(count.tmp) != "mean_count"] ))
output$boxplot_sel <- renderPlotly({
p1 <- ggplotly(
ggplot(count.tmp, aes(x = ano, y = mean_count, color = ano)) +
scale_y_log10() + geom_boxplot(outlier.alpha = 0) + geom_jitter(cex = 3) +
xlab(input$meta_de) + ylab("Mean count accross selected genes")
)
# no need to have its own data here, just use the highlight() from plotly...?
p2 <- ggplotly(
ggplot(counts.all.tmp, aes(x = ano.all, y = mean_count, color = ano.all)) +
scale_y_log10() + geom_boxplot(outlier.alpha = 0) + geom_jitter(cex = 3) +
xlab(input$meta_de) + ylab("Mean count accross selected genes - all samples in initial table")
)
# p2 <- p2 + tooltip = c("Sample", "mean_count", "Genes")
subplot(p1, p2, shareX = F, shareY = T)
# }else{
# ggplotly(
# ggplot(data.frame()) +
# annotate("text", x=8, y=13000, label= "Please select some points", size = 20, color = "orange") +
# gg_back
# )
# }
# }
})
output$heat_sel <- renderPlotly({
if (length(volc_key) > 1){
# count.heat.tmp <- assay(rld.test)[c(volc_key),]
count.heat.tmp <- counts.test[c(volc_key),]
# h1 <-
heatmaply(count.heat.tmp,
# col_side_colors = fac2col( expgroups()[c(colnames(expgroups())==input$meta_de)] ),
scale_fill_gradient_fun = ggplot2::scale_fill_gradient2(low = "red", mid = "black", high = "green", midpoint = 0, na.value = "white"),
xlab = "Mean count accross selected genes",
heatmap_layers = gg_back,
side_color_layers = gg_back
)
# # k_row = as.numeric(input$k_num), k_col = as.numeric(input$k_num),
# # hclust_method = input$hclust, dist_method = input$dist_met,
#
# plot_ly(z = count.heat.tmp, type = "heatmap",
# col_side_colors = fac2col(colData(dds.test)[,input$meta_de]))
# tooltip = c("Sample", "mean_count", "Genes")
# subplot(h1, h2, shareX = F, shareY = T)
# h1
}
})
}
})
###
# #this must take whats selected in manhattan plot
# output$DE_gene <- renderPlotly({
# req(input$gene_in)
# d <- plotCounts(dds, gene=input$gene_in, intgroup=input$meta_de,
# returnData=TRUE)
# p <- ggplot(d, aes(x=condition.Group, y=count)) +
# geom_point(position=position_jitter(w=0.1, h=0))
# ggplotly()
# })
############
##go panel##
############
# require(goseq)
observeEvent(input$Calculate, {
load("hg19.ensGene.LENGTH")
res.go <- res()
res.go <- res.go[!is.na(res.go$pvalue),]
sig_genes <- ifelse(res.go$padj<=input$p_in, 1, 0)
names(sig_genes) <- rownames(res.go)
# need to download the annotations
pwf <- nullp(sig_genes, "hg19", "ensGene", plot.fit = F)
output$pwd_plot <- renderPlot({
plotPWF(pwf)
})
output$enriched <- renderText({
GO.wall <- goseq(pwf, "hg19", "ensGene")
enriched.GO <- GO.wall$category[p.adjust(GO.wall$over_represented_pvalue, method="BH")<input$p_go]
for(go in enriched.GO){
print(GOTERM[[go]])
cat("--------------------------------------\n")
}
})
counts.tmp <- log2(counts(dds(), normalized=T))
counts.phen <- annotatedDataFrameFrom(as.matrix(colData(dds())), byrow = T)
counts.go <- ExpressionSet(counts.tmp, counts.phen)
pData(counts.go)$Group <- colData(dds())$Group
#download annotations
# listDatasets(useMart('ensembl'))
# listMarts()
# ensembl89 = useMart(biomart='ENSEMBL_MART_ENSEMBL', dataset='hsapiens_gene_ensembl')
#
# allgenes.Ensembl = getBM(attributes=c('ensembl_gene_id', 'external_gene_name', 'description'),
# mart=ensembl89)
# colnames(allgenes.Ensembl)[1] = 'gene_id'
# allGO.Ensembl = getBM(attributes=c('go_id', 'name_1006', 'namespace_1003'),
# mart=ensembl89)
# allGO.Ensembl = allGO.Ensembl[allGO.Ensembl$go_id != '',]
#
# GOgenes.Ensembl = getBM( attributes=c('ensembl_gene_id', 'go_id'),
# mart=ensembl89)
#
# colnames(GOgenes.Ensembl)[1] = 'gene_id'
#
# GOgenes.Ensembl = GOgenes.Ensembl[GOgenes.Ensembl$go_id != '',]
# GOgenes.Ensembl = GOgenes.Ensembl[GOgenes.Ensembl$gene_id != '',]
#
# save(GOgenes.Ensembl, file = "GOgenes.Ensembl")
# save(allGO.Ensembl, file = "allGO.Ensembl")
# save(allgenes.Ensembl, file = "allgenes.Ensembl")
load("GOgenes.Ensembl")
load("allGO.Ensembl")
load("allgenes.Ensembl")
set.seed(1987)
GO_results <- GO_analyse(eSet = counts.go, f = "Group",
GO_genes=GOgenes.Ensembl, all_GO=allGO.Ensembl, all_genes=allgenes.Ensembl,
method = input$method_go) #rf
#rf not working for some reason
# output$Go_progress <- renderText({
#add progress meter
GO_results.pVal <- pValue_GO(result=GO_results)
# })
output$res_tab <- renderTable({
data.frame(GO_results.pVal$GO)
})
# need to make assignment for input$GO_selected, liked to table?
output$GO_heat <- renderPlot({
heatmap_GO(input$GO_selected, GO_results, counts.go)
})
})
}
})
}, ignoreInit = T) #end DE action button
observeEvent(input$bookmark_test, {
session$doBookmark()
})
#
# observeEvent(input$test_save,{
# save(session, file = "delete/test")
# })
#
# observeEvent(input$test_load,{
# session = load("delete/test")
# })
#
# observeEvent(input$test_load,{
#
# if(!file.exists("delete/inputs.RDS")) {return(NULL)}
#
# savedInputs <- readRDS("delete/inputs.RDS")
#
# inputIDs <- names(savedInputs)
# inputvalues <- unlist(savedInputs)
# for (i in 1:length(savedInputs)) {
# session$sendInputMessage(inputIDs[i], list(value=inputvalues[[i]]) )
# }
# })
#
# observeEvent(input$test_save,{
# saveRDS( reactiveValuesToList(input) , file = "delete/inputs.RDS")
# })
# #for DE calcs
# save.image(file = "test.R")
# dds(), counts.test, counts.all, res()
# onBookmark(function(state) {
# state$values$DE <- vals$sum
# })
#
})
enableBookmarking(store = "server") #or "url"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/myhm.R
\name{myhm}
\alias{myhm}
\title{Heatmap on eset}
\usage{
myhm(eset, count = 100, ...)
}
\arguments{
\item{eset}{a}
\item{count}{a}
\item{...}{b}
}
\description{
Does a cool heatmap
}
| /man/myhm.Rd | no_license | seandavi/NewPkg | R | false | true | 271 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/myhm.R
\name{myhm}
\alias{myhm}
\title{Heatmap on eset}
\usage{
myhm(eset, count = 100, ...)
}
\arguments{
\item{eset}{a}
\item{count}{a}
\item{...}{b}
}
\description{
Does a cool heatmap
}
|
#' Pairwise shared Cognate distance
#'
#' This calculates a distance based on the amount of shared cognates. Two languages share a cognate for a word
#' if they have at least one cognate code in common for that word (e.g. if language 1 has for the concept cognate "1, 2" and language 2
#' has "1, 3", they share a cognate). The distance is then the proportion of concepts, where these two languages do not share a cognate.
#'
#' @param lang1 row index of the first language in the alignment
#' @param lang2 row index of the second language in the alignment
#' @param alignment alignment matrix as created by createAlignmentMatrix()
#'
#' @return
#'
pairwiseSCDistance <- function(lang1, lang2, alignment) {
conceptCount <- length(alignment$charsetFrom)
isShared <- logical(conceptCount)
isShared <- sapply(1:conceptCount, hasSharedCognates, lang1=lang1, lang2=lang2, alignment=alignment)
return(1-sum(isShared, na.rm = TRUE)/sum(!is.na(isShared)))
}
hasSharedCognates <- function(word, lang1, lang2, alignment) {
matrix <- alignment$matrix[c(lang1,lang2), alignment$charsetFrom[word]:alignment$charsetTo[word]]
if(matrix[1,1]=="?" | matrix[2,1]=="?") return(NA)
return(sum(matrix[1,]=="1" & matrix[2,]=="1")>=1)
} | /R/SharedCognateDistance.R | no_license | KonstantinHoffmann/ABVDTools | R | false | false | 1,229 | r | #' Pairwise shared Cognate distance
#'
#' This calculates a distance based on the amount of shared cognates. Two languages share a cognate for a word
#' if they have at least one cognate code in common for that word (e.g. if language 1 has for the concept cognate "1, 2" and language 2
#' has "1, 3", they share a cognate). The distance is then the proportion of concepts, where these two languages do not share a cognate.
#'
#' @param lang1 row index of the first language in the alignment
#' @param lang2 row index of the second language in the alignment
#' @param alignment alignment matrix as created by createAlignmentMatrix()
#'
#' @return
#'
pairwiseSCDistance <- function(lang1, lang2, alignment) {
conceptCount <- length(alignment$charsetFrom)
isShared <- logical(conceptCount)
isShared <- sapply(1:conceptCount, hasSharedCognates, lang1=lang1, lang2=lang2, alignment=alignment)
return(1-sum(isShared, na.rm = TRUE)/sum(!is.na(isShared)))
}
hasSharedCognates <- function(word, lang1, lang2, alignment) {
matrix <- alignment$matrix[c(lang1,lang2), alignment$charsetFrom[word]:alignment$charsetTo[word]]
if(matrix[1,1]=="?" | matrix[2,1]=="?") return(NA)
return(sum(matrix[1,]=="1" & matrix[2,]=="1")>=1)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cor_sign.R
\name{pvalue}
\alias{pvalue}
\title{Calculates the p.value}
\usage{
pvalue(r, n)
}
\arguments{
\item{r}{Correlation coefficient of a}
\item{n}{Number of samples}
}
\value{
the p-value
}
\description{
Calculates the p.value
}
\seealso{
Other pvalue correlations:
\code{\link{cor_sign}()}
}
\concept{pvalue correlations}
| /man/pvalue.Rd | permissive | llrs/integration-helper | R | false | true | 410 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cor_sign.R
\name{pvalue}
\alias{pvalue}
\title{Calculates the p.value}
\usage{
pvalue(r, n)
}
\arguments{
\item{r}{Correlation coefficient of a}
\item{n}{Number of samples}
}
\value{
the p-value
}
\description{
Calculates the p.value
}
\seealso{
Other pvalue correlations:
\code{\link{cor_sign}()}
}
\concept{pvalue correlations}
|
#' Draw loci and its corresponding alleles.
#'
#' This function will take a list of loci alleles and plot it, where y axis represents loci and x axis represents allele "length". Function
#' assumes allele names are coercable to numeric.
#'
#' @param x List. Named list where names correspond to true loci names. Each element should hold unique allele values. At this time
#' they are assumed to be coercable to integers.
#' @export
#' @title Draw alleles for loci list.
#' @author Roman Lustrik (\email{roman.lustrik@@biolitika.si})
#' @examples
#' # generate data
#' require(magrittr)
#' require(ggplot2)
#' al <- lapply(1:10, FUN = function(x) {
#' runif(n = sample(x = 2:7, size = 1), min = 100, max = 300) %>%
#' round()
#' })
#'
#' # add names to the list
#' names(al) <- paste("L", 1:length(al), sep = "")
#'
#' fig.plotted <- drawLoci(al)
# TODO: coerce alleles to ordered list
drawLoci <- function(x) {
al.list <- mapply(x, 1:length(x), FUN = function(x, y) {
data.frame(allele = y, x)
}, SIMPLIFY = FALSE)
out <- do.call("rbind", al.list)
out$allele <- as.factor(out$allele)
out$x <- as.numeric(as.character(out$x))
print(
go <- ggplot(out, aes_string(x = "x", y = "allele")) +
theme_bw() +
xlab("Alleles") +
ylab("Loci") +
geom_line(color = "grey50") +
geom_point(size = 3, shape = 1) +
scale_y_discrete(label = names(al.list)) +
theme(
panel.grid.major.y = element_line(color = "grey95")
)
)
return(list(data = out, fig = go))
} | /R/drawLoci.R | no_license | catherinecsun/zvau | R | false | false | 1,554 | r | #' Draw loci and its corresponding alleles.
#'
#' This function will take a list of loci alleles and plot it, where y axis represents loci and x axis represents allele "length". Function
#' assumes allele names are coercable to numeric.
#'
#' @param x List. Named list where names correspond to true loci names. Each element should hold unique allele values. At this time
#' they are assumed to be coercable to integers.
#' @export
#' @title Draw alleles for loci list.
#' @author Roman Lustrik (\email{roman.lustrik@@biolitika.si})
#' @examples
#' # generate data
#' require(magrittr)
#' require(ggplot2)
#' al <- lapply(1:10, FUN = function(x) {
#' runif(n = sample(x = 2:7, size = 1), min = 100, max = 300) %>%
#' round()
#' })
#'
#' # add names to the list
#' names(al) <- paste("L", 1:length(al), sep = "")
#'
#' fig.plotted <- drawLoci(al)
# TODO: coerce alleles to ordered list
drawLoci <- function(x) {
al.list <- mapply(x, 1:length(x), FUN = function(x, y) {
data.frame(allele = y, x)
}, SIMPLIFY = FALSE)
out <- do.call("rbind", al.list)
out$allele <- as.factor(out$allele)
out$x <- as.numeric(as.character(out$x))
print(
go <- ggplot(out, aes_string(x = "x", y = "allele")) +
theme_bw() +
xlab("Alleles") +
ylab("Loci") +
geom_line(color = "grey50") +
geom_point(size = 3, shape = 1) +
scale_y_discrete(label = names(al.list)) +
theme(
panel.grid.major.y = element_line(color = "grey95")
)
)
return(list(data = out, fig = go))
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/import.r
\name{importExcelInternal}
\alias{importExcelInternal}
\title{workhiorse function (parser) for importExcel.}
\usage{
importExcelInternal(file, dir = NULL, sheetIndex = 1, min = NULL,
max = NULL)
}
\arguments{
\item{file}{A vector of filenames including the full path if file is not in current working
directory. The file suffix has to be \code{.xlsx} (used since Excel 2007).}
\item{dir}{Alternative way to supply the directory where the file is located
(default \code{NULL}).}
\item{sheetIndex}{The number of the Excel sheet that contains the grid data.}
\item{min}{Optional argument (\code{numeric}, default \code{NULL})
for minimum rating value in grid.}
\item{max}{Optional argument (\code{numeric}, default \code{NULL})
for maximum rating value in grid.}
}
\description{
workhiorse function (parser) for importExcel.
}
\author{
Mark Heckmann
}
\keyword{internal}
| /man/importExcelInternal.Rd | no_license | artoo-git/OpenRepGrid | R | false | true | 963 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/import.r
\name{importExcelInternal}
\alias{importExcelInternal}
\title{workhiorse function (parser) for importExcel.}
\usage{
importExcelInternal(file, dir = NULL, sheetIndex = 1, min = NULL,
max = NULL)
}
\arguments{
\item{file}{A vector of filenames including the full path if file is not in current working
directory. The file suffix has to be \code{.xlsx} (used since Excel 2007).}
\item{dir}{Alternative way to supply the directory where the file is located
(default \code{NULL}).}
\item{sheetIndex}{The number of the Excel sheet that contains the grid data.}
\item{min}{Optional argument (\code{numeric}, default \code{NULL})
for minimum rating value in grid.}
\item{max}{Optional argument (\code{numeric}, default \code{NULL})
for maximum rating value in grid.}
}
\description{
workhiorse function (parser) for importExcel.
}
\author{
Mark Heckmann
}
\keyword{internal}
|
#' Obtain estimates or true versions of key quantities as a
#' function of sample size
#'
#' @param .data Data simulated from simulate_data_from_sem (or a subset)
#' @param sample_sizes Sample sizes for samples used to fit model (target sample sizes)
#' @param with_replacement Sample from .data with replacement?
#' @return When .data is the full population of data, result is the true
#' sampling distribution for the supplied target sample sizes
#' When .data is a sample of data from the population, result is the
#' (upstrap-)estimated sampling distribution for the supplied target
#' sample sizes
get_quantities_from_data <- function(.data, sample_sizes, iters, with_replacement) {
## Extract the exposure and outcome variable
exposure <- attr(.data, "exposure")
outcome <- attr(.data, "outcome")
## Extract the adjustment set
adj_set <- attr(.data, "adjustment_set")
## Create model formula
mod_formula_part_adjset <- paste(adj_set, collapse = "+")
mod_formula_part_right <- paste(exposure, "+", mod_formula_part_adjset)
mod_formula <- as.formula(paste(outcome, "~", mod_formula_part_right))
## Loop over sample sizes
results_by_sample_size <- lapply(sample_sizes, function(n) {
## Perform many iterations
replicate(iters, {
## Obtain sample of size n (target sample sizes)
data_resampled <- dplyr::slice_sample(.data, n = n, replace = with_replacement)
## Fit model and extract p-value
mod <- lm(mod_formula, data = data_resampled)
results <- tidy(mod) %>%
filter(term==exposure)
estim <- results$estimate
se <- results$std.error
pval <- results$p.value
c(estim = estim, se = se, pval = pval)
})
})
names(results_by_sample_size) <- sample_sizes
results_by_sample_size
}
| /code/clean/functions_get_targets.R | no_license | lmyint/upstrap | R | false | false | 1,924 | r | #' Obtain estimates or true versions of key quantities as a
#' function of sample size
#'
#' @param .data Data simulated from simulate_data_from_sem (or a subset)
#' @param sample_sizes Sample sizes for samples used to fit model (target sample sizes)
#' @param with_replacement Sample from .data with replacement?
#' @return When .data is the full population of data, result is the true
#' sampling distribution for the supplied target sample sizes
#' When .data is a sample of data from the population, result is the
#' (upstrap-)estimated sampling distribution for the supplied target
#' sample sizes
get_quantities_from_data <- function(.data, sample_sizes, iters, with_replacement) {
## Extract the exposure and outcome variable
exposure <- attr(.data, "exposure")
outcome <- attr(.data, "outcome")
## Extract the adjustment set
adj_set <- attr(.data, "adjustment_set")
## Create model formula
mod_formula_part_adjset <- paste(adj_set, collapse = "+")
mod_formula_part_right <- paste(exposure, "+", mod_formula_part_adjset)
mod_formula <- as.formula(paste(outcome, "~", mod_formula_part_right))
## Loop over sample sizes
results_by_sample_size <- lapply(sample_sizes, function(n) {
## Perform many iterations
replicate(iters, {
## Obtain sample of size n (target sample sizes)
data_resampled <- dplyr::slice_sample(.data, n = n, replace = with_replacement)
## Fit model and extract p-value
mod <- lm(mod_formula, data = data_resampled)
results <- tidy(mod) %>%
filter(term==exposure)
estim <- results$estimate
se <- results$std.error
pval <- results$p.value
c(estim = estim, se = se, pval = pval)
})
})
names(results_by_sample_size) <- sample_sizes
results_by_sample_size
}
|
library(dplyr)
library(ggplot2)
library(scales)
df_top = read.table("top_deputados_por_usuario.dat", header = T)
n_users = length(unique(df_top$usuario))
n_temas = length(unique(df_top$tema))
head()
dff = df_top %>% group_by(nome_dep) %>% mutate(partido = partido_dep) %>% summarise(total = n(), freq = n() / (n_users * n_temas)) %>% arrange(desc(freq))
head(dff)
df_plot = dff %>% left_join(group_by(df_top, nome_dep), by="nome_dep") %>% select(nome_dep, uf_dep, total, freq, partido_dep) %>% distinct()
desq_freq = unique(df_plot$freq)[1:5]
df_plot$destaque = df_plot$freq %in% desq_freq
df_plot$deputado = paste(df_plot$nome_dep, " (", df_plot$uf_dep, ")", sep="")
p = ggplot(df_plot, aes(as.numeric(nome_dep), freq))
p = p + geom_jitter(size = 3, color = "gray70", alpha = 0.5)
p = p + geom_text(show_guide = TRUE, angle = 35, vjust=-0.25, hjust=0, data = filter(df_plot, destaque == TRUE), aes(x=as.numeric(nome_dep), y=freq, label=deputado, colour=toupper(partido_dep)), position = "jitter", size = 6)
p = p + scale_colour_hue("Partido:", guide=guide_legend(override.aes=list(size=7, shape = utf8ToInt("A"))))
p = p + scale_y_continuous(labels = percent, limit = c(0,0.115))
p = p + xlab(NULL) + ylab("Representatividade")
p = p + theme_bw()
p = p + theme(legend.position="top", axis.ticks = element_blank(), axis.text.x = element_blank()) + theme(text = element_text(size=20))
print(p)
| /src/plot_top_representativos.R | no_license | analytics-ufcg/vocedeputado | R | false | false | 1,404 | r | library(dplyr)
library(ggplot2)
library(scales)
df_top = read.table("top_deputados_por_usuario.dat", header = T)
n_users = length(unique(df_top$usuario))
n_temas = length(unique(df_top$tema))
head()
dff = df_top %>% group_by(nome_dep) %>% mutate(partido = partido_dep) %>% summarise(total = n(), freq = n() / (n_users * n_temas)) %>% arrange(desc(freq))
head(dff)
df_plot = dff %>% left_join(group_by(df_top, nome_dep), by="nome_dep") %>% select(nome_dep, uf_dep, total, freq, partido_dep) %>% distinct()
desq_freq = unique(df_plot$freq)[1:5]
df_plot$destaque = df_plot$freq %in% desq_freq
df_plot$deputado = paste(df_plot$nome_dep, " (", df_plot$uf_dep, ")", sep="")
p = ggplot(df_plot, aes(as.numeric(nome_dep), freq))
p = p + geom_jitter(size = 3, color = "gray70", alpha = 0.5)
p = p + geom_text(show_guide = TRUE, angle = 35, vjust=-0.25, hjust=0, data = filter(df_plot, destaque == TRUE), aes(x=as.numeric(nome_dep), y=freq, label=deputado, colour=toupper(partido_dep)), position = "jitter", size = 6)
p = p + scale_colour_hue("Partido:", guide=guide_legend(override.aes=list(size=7, shape = utf8ToInt("A"))))
p = p + scale_y_continuous(labels = percent, limit = c(0,0.115))
p = p + xlab(NULL) + ylab("Representatividade")
p = p + theme_bw()
p = p + theme(legend.position="top", axis.ticks = element_blank(), axis.text.x = element_blank()) + theme(text = element_text(size=20))
print(p)
|
\newcommand{\PR}{\Sexpr[results=rd]{tools:::Rd_expr_PR(#1)}}
\name{NEWS}
\title{broom.mixed News}
\encoding{UTF-8}
\section{CHANGES IN VERSION 0.2.4.9000}{
\subsection{NEW FEATURES}{
\itemize{
\item \code{lmer} tidier gets \code{ddf.method} (applies only to
lmerTest fits)
}
}
}
\section{CHANGES IN VERSION 0.2.4}{
\subsection{NEW FEATURES}{
\itemize{
\item \code{gls} tidier gets confint (GH #49)
}
}
\subsection{USER-VISIBLE CHANGES}{
\itemize{
\item redundant \code{estimate.method} in MCMC tidiers goes
away; use \code{robust} to compute point estimates/uncertainty via
median and MAD rather than mean and SE
}
}
\subsection{BUG FIXES}{
\itemize{
\item misc fixes: \code{lme4} tidiers (confint for ran_vals,
profile conf intervals fixed), \code{R2jags}, \code{gamlss} ...
\item ran_vals works for \code{glmmTMB}
}
}
}
\section{CHANGES IN VERSION 0.2.3}{
\subsection{BUG FIXES}{
\itemize{
\item don't ignore conf.level in tidy.(merMod|glmmTMB) (GH #30,31: @strengejacke)
\item levels correct in tidy.brmsfit (GH #36: @strengejacke)
\item component argument works for random effects in glmmTMB (GH #33: @strengejacke)
}
}
\subsection{NEW FEATURES}{
\itemize{
\item \code{brmsfit} and \code{rstanarm} methods allow
\code{conf.method="HPDinterval"}
}
}
\subsection{USER-VISIBLE CHANGES}{
\itemize{
\item tidy.brmsfit gets component column (GH #35: @strengejacke),
response column for multi-response models (GH #34: @strengejacke)
\item component tags are stripped from tidied brmsfit objects
\item "Intercept" terms in brms fits are re-coded as
"(Intercept)" by default, for dotwhisker/cross-model
compatibility; for previous behaviour, specify \code{fix.intercept=FALSE}
}
}
}
\section{CHANGES IN VERSION 0.2.2}{
\itemize{
\item modify examples, for CRAN compliance
}
}
\section{CHANGES IN VERSION 0.2.1}{
\itemize{
\item reduced size of stored fits for examples, for CRAN compliance
}
}
\section{CHANGES IN VERSION 0.2.0}{
\subsection{NEW FEATURES}{
\itemize{
\item more consistent term names in \code{brmsfit}, \code{rstanreg} tidiers
\item improved \code{tidy.MCMCglmm}
}
}
\subsection{USER-VISIBLE CHANGES}{
\itemize{
\item all methods return tibbles (\code{tbl_df}) rather than data
frames
\item the value of the \code{group} variable for fixed-effect
parameters has changed from \code{"fixed"} to \code{NA}
\item \code{brmsfit} and \code{rstanarm} tidiers are more
consistent with other tidiers (e.g. \code{conf.level} rather than
\code{prob})
}
}
}
\section{CHANGES IN VERSION 0.0.1}{
\subsection{BUG FIXES}{
\itemize{
\item Sorted out some of the confusion over random effect naming:
"ran_vals" extracts conditional modes/BLUPs/varying parameters
(deviations from population-level estimates),
while "ran_coefs" extracts group-level estimates
}
}
\subsection{NEW FEATURES}{
\itemize{
\item improved \code{nlme} tidiers
\item improved \code{glmmTMB} tidiers (can handle some
zero-inflation parameters)
\item \code{lme4} tidiers now optionally take a pre-computed
\code{profile} argument when using \code{conf.method="profile"}
}
}
\subsection{USER-VISIBLE CHANGES}{
\itemize{
\item The default behaviour of most mixed-model tidiers has
changed/will gradually be changed to the following
(description modified from TJ Mahr at
\url{https://github.com/tidyverse/broom/issues/96}):
\itemize{
\item Random effect variances and covariances can now be extracted.
\code{effects = "ran_pars"} returns the standard deviations/correlations
of random effects (if \code{scales="sdcor"} [default])
or their variances
and covariances (if \code{scales = "varcov"})
\item Random effects estimates are now extracted with
\code{effects = "ran_coefs"} for the group-level
estimates (previously these effects were
extracted with \code{tidy(model, "random")}) or
\code{effects = "ran_vals"} for the conditional modes
(deviations of the group-level parameters from the
population-level estimates)
\item \code{effects} can take a vector of values (those listed
above, plus "fixed" for fixed effects). The default value
is \code{effects = c("ran_pars", "fixed")} which extracts random
effect variances/covariances and fixed effect estimates.
}
\item \code{term} names for random-effect parameters no longer
contain a (redundant) group specifier (at least for \code{lme4}
models); use something like
\code{tidyr::unite(term,term,group,sep=".")}
to collapse the two columns
}
}
}
| /inst/NEWS.Rd | no_license | junkka/broom.mixed | R | false | false | 4,833 | rd | \newcommand{\PR}{\Sexpr[results=rd]{tools:::Rd_expr_PR(#1)}}
\name{NEWS}
\title{broom.mixed News}
\encoding{UTF-8}
\section{CHANGES IN VERSION 0.2.4.9000}{
\subsection{NEW FEATURES}{
\itemize{
\item \code{lmer} tidier gets \code{ddf.method} (applies only to
lmerTest fits)
}
}
}
\section{CHANGES IN VERSION 0.2.4}{
\subsection{NEW FEATURES}{
\itemize{
\item \code{gls} tidier gets confint (GH #49)
}
}
\subsection{USER-VISIBLE CHANGES}{
\itemize{
\item redundant \code{estimate.method} in MCMC tidiers goes
away; use \code{robust} to compute point estimates/uncertainty via
median and MAD rather than mean and SE
}
}
\subsection{BUG FIXES}{
\itemize{
\item misc fixes: \code{lme4} tidiers (confint for ran_vals,
profile conf intervals fixed), \code{R2jags}, \code{gamlss} ...
\item ran_vals works for \code{glmmTMB}
}
}
}
\section{CHANGES IN VERSION 0.2.3}{
\subsection{BUG FIXES}{
\itemize{
\item don't ignore conf.level in tidy.(merMod|glmmTMB) (GH #30,31: @strengejacke)
\item levels correct in tidy.brmsfit (GH #36: @strengejacke)
\item component argument works for random effects in glmmTMB (GH #33: @strengejacke)
}
}
\subsection{NEW FEATURES}{
\itemize{
\item \code{brmsfit} and \code{rstanarm} methods allow
\code{conf.method="HPDinterval"}
}
}
\subsection{USER-VISIBLE CHANGES}{
\itemize{
\item tidy.brmsfit gets component column (GH #35: @strengejacke),
response column for multi-response models (GH #34: @strengejacke)
\item component tags are stripped from tidied brmsfit objects
\item "Intercept" terms in brms fits are re-coded as
"(Intercept)" by default, for dotwhisker/cross-model
compatibility; for previous behaviour, specify \code{fix.intercept=FALSE}
}
}
}
\section{CHANGES IN VERSION 0.2.2}{
\itemize{
\item modify examples, for CRAN compliance
}
}
\section{CHANGES IN VERSION 0.2.1}{
\itemize{
\item reduced size of stored fits for examples, for CRAN compliance
}
}
\section{CHANGES IN VERSION 0.2.0}{
\subsection{NEW FEATURES}{
\itemize{
\item more consistent term names in \code{brmsfit}, \code{rstanreg} tidiers
\item improved \code{tidy.MCMCglmm}
}
}
\subsection{USER-VISIBLE CHANGES}{
\itemize{
\item all methods return tibbles (\code{tbl_df}) rather than data
frames
\item the value of the \code{group} variable for fixed-effect
parameters has changed from \code{"fixed"} to \code{NA}
\item \code{brmsfit} and \code{rstanarm} tidiers are more
consistent with other tidiers (e.g. \code{conf.level} rather than
\code{prob})
}
}
}
\section{CHANGES IN VERSION 0.0.1}{
\subsection{BUG FIXES}{
\itemize{
\item Sorted out some of the confusion over random effect naming:
"ran_vals" extracts conditional modes/BLUPs/varying parameters
(deviations from population-level estimates),
while "ran_coefs" extracts group-level estimates
}
}
\subsection{NEW FEATURES}{
\itemize{
\item improved \code{nlme} tidiers
\item improved \code{glmmTMB} tidiers (can handle some
zero-inflation parameters)
\item \code{lme4} tidiers now optionally take a pre-computed
\code{profile} argument when using \code{conf.method="profile"}
}
}
\subsection{USER-VISIBLE CHANGES}{
\itemize{
\item The default behaviour of most mixed-model tidiers has
changed/will gradually be changed to the following
(description modified from TJ Mahr at
\url{https://github.com/tidyverse/broom/issues/96}):
\itemize{
\item Random effect variances and covariances can now be extracted.
\code{effects = "ran_pars"} returns the standard deviations/correlations
of random effects (if \code{scales="sdcor"} [default])
or their variances
and covariances (if \code{scales = "varcov"})
\item Random effects estimates are now extracted with
\code{effects = "ran_coefs"} for the group-level
estimates (previously these effects were
extracted with \code{tidy(model, "random")}) or
\code{effects = "ran_vals"} for the conditional modes
(deviations of the group-level parameters from the
population-level estimates)
\item \code{effects} can take a vector of values (those listed
above, plus "fixed" for fixed effects). The default value
is \code{effects = c("ran_pars", "fixed")} which extracts random
effect variances/covariances and fixed effect estimates.
}
\item \code{term} names for random-effect parameters no longer
contain a (redundant) group specifier (at least for \code{lme4}
models); use something like
\code{tidyr::unite(term,term,group,sep=".")}
to collapse the two columns
}
}
}
|
source('./plot.R')
library('xtable')
assign("consistencyEcdf", TRUE, envir = .GlobalEnv)
assign("consistencyReaders", TRUE, envir = .GlobalEnv)
consistencyParse <- function(fileName){
#READ FILE
k <- readLines(fileName, warn = FALSE)
splitted <- strsplit(x=k, split = ",")
outputW <- list();
outputR <- list();
RThreads <- list();
WThreads <- list();
header <- splitted[[1]]
for(lineNb in 2:length(k)){
line = splitted[[lineNb]]
timing <- line[1]
thread <- line[2]
start <- line [3]
if(!is.na(suppressWarnings(as.numeric(timing)))){
if(length(grep("W", thread)) > 0){
WThreads[[thread]] <- TRUE
if(is.null(outputW[[timing]])){
outputW[[timing]] <- list()
}
if(is.null(outputW[[timing]][[thread]])){
outputW[[timing]][[thread]] <- list()
}
lineList <- list()
lineList[["start"]] <- suppressWarnings(as.numeric(line[3]))
lineList[["delay"]] <- suppressWarnings(as.numeric(line[4]))
lineList[["value"]] <- suppressWarnings(as.numeric(line[5]))
outputW[[timing]][[thread]][[start]] <- lineList
}else if(length(grep("R", thread)) > 0){
RThreads[[thread]] <- TRUE
if(is.null(outputR[[timing]])){
outputR[[timing]] <- list()
}
if(is.null(outputR[[timing]][[thread]])){
outputR[[timing]][[thread]] <- list()
}
lineList <- list()
lineList[["start"]] <- suppressWarnings(as.numeric(line[3]))
lineList[["delay"]] <- suppressWarnings(as.numeric(line[4]))
lineList[["value"]] <- suppressWarnings(as.numeric(line[5]))
outputR[[timing]][[thread]][[start]] <- lineList
}
}
}
return(list(writerThreads = WThreads, readerThreads = RThreads, outputW = outputW, outputR = outputR))
}
consistencyPostParse <- function(parsedReader){
changesInData <- list()
for(key in names(parsedReader)){
parserKey <- parsedReader[[key]]
changesInData[[key]] <- list()
# zoek alle veranderingen per thread
for(thread in names(parserKey)){
threadList <- list()
parserThread <- parserKey[[thread]]
#Collect all changes
isFirst <- TRUE
change <- FALSE
tryCatch({
for(start in names(parserThread)){
if(isFirst){
isFirst <- FALSE
threadList[[start]] <- parserThread[[start]]
lastValue <- parserThread[[start]]
change <- TRUE
}
else if(areDifferentNumbersWithNa(lastValue[["value"]], parserThread[[start]][["value"]])){
threadList[[start]] <- parserThread[[start]]
lastValue <- parserThread[[start]]
change <- TRUE
}
if(change){
# Try to find last read of other thread
change <- FALSE
minStart <- -1000000000
for(innerthread in names(parserKey)){
for(innerstart in names(parserThread)){
if(innerstart < start && innerstart > minStart){
minStart <- innerstart
minThread <- innerthread
minValue <- parserKey[[innerthread]][[innerstart]][["value"]]
}
}
}
if(minStart > -1000000000){
threadList[[start]]["beforeThread"] <-minThread
threadList[[start]]["beforeStart"] <-minStart
threadList[[start]]["beforeValue"] <-minValue
}
}
}
},error = function(e) print(paste("Problem in parse", thread, start, lastValue[["value"]] , parserThread[[start]][["value"]] )))
changesInData[[key]][[thread]] <- threadList
}
##
}
return(changesInData)
}
areDifferentNumbersWithNa <- function(number1, number2){
if(is.na(number1)){
return(!is.na(number2))
}else if(is.na(number2)){
return(TRUE)
}
return (number1 != number2)
}
consistencyPlotNb <- function(parsedReader, parsedWriter, readerNames, writerNames, maxOnSamePlot, exportDir){
columnNames <- names(readerNames)
rowNames <- names(parsedReader)
amountOfChanges <-matrix(nrow=length(rowNames), ncol=length(columnNames), dimnames = list((as.numeric(rowNames)), columnNames))
for(i in 1:nrow(amountOfChanges)){
key <- rowNames[i]
parserKey <- parsedReader[[key]]
# zoek alle veranderingen per thread
for(j in 1:ncol(amountOfChanges)){
thread <- columnNames[j]
parserThread <- parserKey[[thread]]
amountOfChanges[i,j] <-length(parserThread)
}
}
# Find all similar columns
similarMatrix <- matrix(nrow=ncol(amountOfChanges), ncol=ncol(amountOfChanges))
for(outerThread in 1:ncol(amountOfChanges)){
for(innerThread in outerThread:ncol(amountOfChanges)){
if(areEqualAmount(outerThread, innerThread, amountOfChanges)){
similarMatrix[outerThread, innerThread] = 1
similarMatrix[innerThread, outerThread] = 1
}
else{
similarMatrix[outerThread, innerThread] = 0
similarMatrix[innerThread, outerThread] = 0
}
}
}
labelThreads <- list()
labelList <- list()
labelIndex <- list()
index = 0
for(thread in 1:ncol(amountOfChanges)){
threadNumber = getUniqueThreadID(thread, similarMatrix)
if(threadNumber == thread){
index <- index + 1
labelThreads[thread] <- index
labelList[index] <- paste((thread-1), "", sep="")
labelIndex[index] <- thread
}
else{
labelList[labelThreads[[threadNumber]]] <- paste(labelList[labelThreads[[threadNumber]]], (thread-1), sep = ", ")
}
}
labelArray <- unlist(labelList)
for(i in 1:ceiling(index/maxOnSamePlot)){
startNumber <- ((i-1)*maxOnSamePlot+1)
endNumber <- min((i)*maxOnSamePlot, index)
fileNameSub <- gsub("%number%", i, gsub("%type%", "consistency-%number%", gsub("%extension%", "png", exportDir)))
png(filename=fileNameSub, width=figureWidth, height=figureHeight, units="px", res=figureRes)
#plot.new()
plot(x = 0, y = 0, type="n", xlab ="Key number)",ylab = "Amount of changes",
ylim = c(0, max(amountOfChanges)), xlim = as.numeric(c(min((rowNames)), max((rowNames)))))
for(plotNb in startNumber:endNumber){
lines(x=rowNames, y = amountOfChanges[,labelIndex[[plotNb]]],type="l",col = plotNb, pch = plotNb)
}
legend("topleft", legend=labelArray[startNumber:endNumber], col = startNumber:endNumber, pch = startNumber:endNumber)
dev.off();
}
# Make barplot
maxAmountOfChanges <- max(amountOfChanges)
countPlot <- matrix(ncol=length(labelArray), nrow= (maxAmountOfChanges+1))
for(indexCounter in 1:index){
thread <- labelIndex[[indexCounter]]
threadTable <- table(amountOfChanges[,thread])
countPlot[, indexCounter] <- tabulate(amountOfChanges[,thread]+1, nbins = maxAmountOfChanges+1)
}
#Change label
labelListSplitted <- strsplit(labelArray, c(", "))
labelListMerged <- rep(0,index)
for(indexCounter in 1:index){
for(i in 1:length(labelListSplitted[indexCounter][[1]])){
if(labelListMerged[indexCounter] == 0){
labelListMerged[indexCounter] <- labelListSplitted[indexCounter][[1]][i]
}
else{
labelListMerged[indexCounter] <- paste(labelListMerged[indexCounter], labelListSplitted[indexCounter][[1]][i], sep=" ")
}
if(i%%4 == 0){
labelListMerged[indexCounter] <- paste(labelListMerged[indexCounter], "\n", sep="")
}
}
}
fileNameSub <- gsub("%type%", "consistency-merged", gsub("%extension%", "png", exportDir))
png(filename=fileNameSub, width=figureWidth, height=2*figureHeight, units="px", res=figureRes)
oldMar <- par()$mar
newMar <- oldMar
newMar[2] <- newMar[2] + 2
par(mar=newMar)
barplot(countPlot, main="Consistency together",
xlab="Number of changes", col = c(1:3),
legend = c(0:max(amountOfChanges)),
names.arg=labelListMerged,
horiz=TRUE)
dev.off();
par(mar=oldMar)
}
areEqualAmount <- function(thread1, thread2, matrix){
for(row in 1:nrow(matrix)){
if(matrix[row, thread1] != matrix[row, thread2]){
return (FALSE)
}
}
return (TRUE)
}
getUniqueThreadID <- function(thread, matrix){
if(thread == 1){
return (1)
}
for(row in 1:thread){
if(matrix[row, thread] == 1){
return (row)
}
}
}
consistencyPlotEachReader <- function(parsedReader, parsedWriter, readerNames, writerNames, exportDir, maxRetries){
numberOfWriters <- length(writerNames)
plotMatrix <- matrix(nrow = length(parsedWriter), ncol = (2*length(writerNames)+2*maxRetries*length(readerNames)))
densityMatrix <- matrix(nrow = length(parsedWriter), ncol = (2*length(writerNames)+2*length(readerNames)))
densityMatrixAll <- matrix(nrow = (length(parsedWriter)*length(readerNames)), ncol = (2))
rownames(plotMatrix) <- names(parsedWriter)
rownames(densityMatrix) <- names(parsedWriter)
colNamesPlot <- rep("", ncol(plotMatrix))
densityMatrixNamesPlot<- rep("", ncol(densityMatrix))
for(j in 1:length(writerNames)){
threadName <- names(writerNames[j])[1]
colNamesPlot[(2*j-1)] <- paste(threadName, "-START", sep="")
colNamesPlot[(2*j)] <- paste(threadName, "-DELAY", sep="")
densityMatrixNamesPlot[(2*j-1)] <- paste(threadName, "-START", sep="")
densityMatrixNamesPlot[(2*j)] <- paste(threadName, "-DELAY", sep="")
}
for(j in 1:length(readerNames)){
threadName <- names(readerNames[j])[1]
for(k in 1:maxRetries){
colNamesPlot[(2*numberOfWriters+2*maxRetries*(j-1)+2*k-1)] <- paste(threadName, "-", k, "-START", sep="")
colNamesPlot[(2*numberOfWriters+2*maxRetries*(j-1)+2*k)] <- paste(threadName, "-", k, "-DELAY", sep="")
}
densityMatrixNamesPlot[(2*numberOfWriters+2*j-1)] <- paste(threadName, "-START", sep="")
densityMatrixNamesPlot[(2*numberOfWriters+2*j)] <- paste(threadName, "-DELAY", sep="")
}
colnames(plotMatrix) <- colNamesPlot
colnames(densityMatrix) <- densityMatrixNamesPlot
colnames(densityMatrixAll) <- c("START", "DELAY")
#writer data gathering (delay)
for(i in 1:length(parsedWriter)){
parserKey <- parsedWriter[i]
parserKey <- parserKey[[names(parserKey)[1]]]
# zoek alle veranderingen per thread
for(j in 1:length(writerNames)){
parserThread <- parserKey[j]
parserThread <- parserThread[[names(parserThread)[1]]]
firstEntry <- parserThread[[names(parserThread)[1]]]
plotMatrix[i,(2*j-1)] <- firstEntry[["start"]]/1000
plotMatrix[i,(2*j)] <- firstEntry[["delay"]]/1000
densityMatrix[i,(2*j-1)] <- firstEntry[["start"]]/1000
densityMatrix[i,(2*j)] <- firstEntry[["delay"]]/1000
}
}
for(i in 1:length(parsedReader)){
parserKey <- parsedReader[i]
parserKey <- parserKey[[names(parserKey)[1]]]
# zoek alle veranderingen per thread
for(j in 1:length(readerNames)){
parserThread <- parserKey[j]
parserThread <- parserThread[[names(parserThread)[1]]]
allValue <- names(parserThread)
startIndex <- 1
for(k in max(length(allValue)-maxRetries+1, 1):length(allValue)){
firstEntry <- parserThread[[allValue[k]]]
if(length(firstEntry) >0){
plotMatrix[i,(2*numberOfWriters+2*maxRetries*(j-1)+2*startIndex-1)] <- firstEntry[["start"]]/1000
plotMatrix[i,(2*numberOfWriters+2*maxRetries*(j-1)+2*startIndex)] <- firstEntry[["delay"]]/1000
densityMatrix[i,(2*numberOfWriters+2*j-1)] <- firstEntry[["start"]]/1000
densityMatrix[i,(2*numberOfWriters+2*j)] <- firstEntry[["delay"]]/1000
densityMatrixAll[(j-1)* length(parsedReader) + i, 1] <- firstEntry[["start"]]/1000
densityMatrixAll[(j-1)* length(parsedReader) + i, 2]<- firstEntry[["delay"]]/1000
}
startIndex <- startIndex + 1
}
}
}
if(consistencyReaders){
rowNames <- names(parsedWriter)
labelPlot <- list()
labelPlot[1] <- "Schrijver start"
labelPlot[2] <- "Schrijver stop"
for(k in 1:maxRetries){
labelPlot[(2*k + 1)] <- paste("Lezer Poging ", k, " start", sep = "")
labelPlot[(2*k + 2)] <- paste("Lezer Poging ", k, " stop", sep = "")
}
labelPlot <- unlist(labelPlot)
for(j in 1:length(readerNames)){
startIndex <- (2*numberOfWriters+2*maxRetries*(j-1)+1)
endIndex <- (2*numberOfWriters+2*maxRetries*(j-1)+2*maxRetries)
maxY = max(max(plotMatrix[,1:2],na.rm = TRUE),
max(plotMatrix[,startIndex:endIndex], na.rm = TRUE) ,na.rm = TRUE)
fileNameSub <- gsub("%type%", paste("consistency-plot-", names(readerNames)[j], sep=""), gsub("%extension%", "png", exportDir))
png(filename=fileNameSub, width=figureWidth, height=figureHeight, units="px", res=figureRes)
plot(x = 0, y = 0, type="n", xlab ="Sleutelnummer",ylab = "Tijd (ms)",
ylim = c(0, 80), xlim = as.numeric(c(as.numeric(min(rowNames))+72000000, as.numeric(min(rowNames))+150000000)))
#Plot writers
lines(x=rowNames, y = plotMatrix[,1],type="l",col = 1, pch = 1)
lines(x=rowNames, y = plotMatrix[,2],type="l",col = 2, pch = 2)
for(k in 1:maxRetries){
if(k == 1){
plotType <- "l"
}else{
plotType <- "b"
}
lines(x=rowNames, y = plotMatrix[,startIndex + 2*(k-1)],type=plotType,col = (2*k+1), pch = (2*k+1))
lines(x=rowNames, y = plotMatrix[,startIndex + 2*(k-1)+1],type=plotType,col = (2*k+2), pch = (2*k+2))
}
legend("topright", labelPlot, col = 1:(2*maxRetries+2), pch = 1:(2*maxRetries+2))
dev.off();
}
}
## ECDF
if(consistencyEcdf){
ecdfStartList <- list()
ecdfStopList <- list()
startWQuery <- ecdf(densityMatrix[,1])
stopWQuery <- ecdf(densityMatrix[,2])
ecdfStartList[["w"]] <- startWQuery
ecdfStopList[["w"]] <- stopWQuery
minLezen <- quantile(startWQuery,0.01)
maxLezen <- quantile(stopWQuery,0.90)
for(j in 1:length(readerNames)){
fileNameSub <- gsub("%type%", paste("ECDF-plot-", names(readerNames)[j], sep=""), gsub("%extension%", "png", exportDir))
png(filename=fileNameSub, width=figureWidthSquare, height=figureHeight, units="px", res=figureRes)
startQuery <- ecdf(densityMatrix[,2*numberOfWriters+2*j-1])
stopQuery <- ecdf(densityMatrix[,2*numberOfWriters+2*j])
plot(startWQuery, xlim=c(quantile(startQuery,0.01), quantile(stopQuery, 0.99)), ylim = c(0,1), col = 1, pch = 1,
do.p = FALSE, xlab =tijd,ylab = cumKans, main = NULL)
lines(stopWQuery, col = 2, pch = 2, do.p = FALSE)
lines(startQuery, col = 3, pch = 3, do.p = FALSE)
lines(stopQuery, col = 4, pch = 4, do.p = FALSE)
legend("bottomright", ecdfLegend, col = 1:4, pch=1:4)
dev.off();
minLezen = min(quantile(startQuery,0.01), minLezen, na.rm = TRUE)
maxLezen = max(quantile(stopQuery,0.99), maxLezen, na.rm = TRUE)
ecdfStartList[[toString(j)]] <- startQuery
ecdfStopList[[toString(j)]] <- stopQuery
}
## ECDF Start
labelPlot <- list()
labelPlot[1] <- "Schrijver"
fileNameSub <- gsub("%type%", paste("ECDF-plot-Start", sep=""), gsub("%extension%", "png", exportDir))
png(filename=fileNameSub, width=figureWidthSquare, height=figureHeight, units="px", res=figureRes)
plot(startWQuery, xlim=c(minLezen, maxLezen), ylim = c(0,1), col = 1, pch = 1,
do.p = FALSE, xlab =tijd,ylab = cumKans, main = NULL)
for(j in 1:length(readerNames)){
lines(ecdf(densityMatrix[,2*numberOfWriters+2*j-1]), col = (j+1), pch = (j+1), do.p = FALSE)
labelPlot[(1+j)] <- paste("Lezer ", j, sep = "")
}
labelPlot <- unlist(labelPlot)
legend("bottomright", labelPlot, col = 1:(length(readerNames)+1), pch = 1:(length(readerNames)+1))
dev.off();
## ECDF Stop
labelPlot <- list()
labelPlot[1] <- "Schrijver"
fileNameSub <- gsub("%type%", paste("ECDF-plot-Stop", sep=""), gsub("%extension%", "png", exportDir))
png(filename=fileNameSub, width=figureWidthSquare, height=figureHeight, units="px", res=figureRes)
plot(stopWQuery, xlim=c(minLezen, maxLezen), ylim = c(0,1), col = 1, pch = 1,
do.p = FALSE, xlab =tijd,ylab = cumKans, main = NULL)
for(j in 1:length(readerNames)){
lines(ecdf(densityMatrix[,2*numberOfWriters+2*j]), col = (j+1), pch = (j+1), do.p = FALSE)
labelPlot[(1+j)] <- paste("Lezer ", j, sep = "")
}
labelPlot <- unlist(labelPlot)
legend("bottomright", labelPlot, col = 1:(length(readerNames)+1), pch = 1:(length(readerNames)+1))
dev.off();
## ECDF ALL
fileNameSub <- gsub("%type%", paste("ECDF-plot-all", sep=""), gsub("%extension%", "png", exportDir))
png(filename=fileNameSub, width=figureWidthSquare, height=figureHeight, units="px", res=figureRes)
startQuery <- ecdf(densityMatrixAll[,1])
stopQuery <- ecdf(densityMatrixAll[,2])
ecdfStartList[["all"]] <- startQuery
ecdfStopList[["all"]] <- stopQuery
plot(startWQuery, xlim=c(quantile(startQuery,0.01), quantile(stopQuery, 0.99)), ylim = c(0,1), col = 1, pch = 1,
do.p = FALSE, xlab = tijd,ylab = cumKans, main = NULL)
lines(stopWQuery, col = 2, pch = 2, do.p = FALSE)
lines(startQuery, col = 3, pch = 3, do.p = FALSE)
lines(stopQuery, col = 4, pch = 4, do.p = FALSE)
legend("bottomright", ecdfLegend, col = 1:4, pch=1:4)
dev.off();
temp <- list(startL = ecdfStartList, stopL = ecdfStopList)
return (list(startL = ecdfStartList, stopL = ecdfStopList))
}
}
consistencyPlotsMongoForWrites <- function(startECDF, stopECDF, writeOpps, readOpps, typeOpps, loops, readerThreads, writerThreads, exportDir){
for(loop in loops){
for(typeOpp in typeOpps){
for(readOpp in readOpps){
for(thread in 1:length(readerThreads)){
xMinimum = NA
xMaximum = NA
for(writeOpp in writeOpps){
xMinimum <- min(xMinimum, quantile(startECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(x=thread)]],0.01), na.rm = TRUE)
xMaximum <- max(xMaximum, quantile(stopECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(x=thread)]],0.97), na.rm = TRUE)
}
fileNameSub <- gsub("%type%", paste("ECDF-Write-", typeOpp, "-", readOpp, "-", thread, "-", loop, sep=""), gsub("%extension%", "png", exportDir))
png(filename=fileNameSub, width=figureWidthSquare, height=figureHeight, units="px", res=figureRes)
plot(c(0), xlim=c(xMinimum, xMaximum), ylim = c(0,1), col = 1, pch = 1, type = "n",
xlab =tijd,ylab = cumKans, main = NULL)
index <- 0
for(writeOpp in writeOpps){
index <- index + 1
lines(startECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(thread)]], col = index, pch = index, do.p = FALSE)
lines(stopECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(thread)]], col = index, pch = index, do.p = FALSE)
}
legend("bottomright", writeOpps, col = 1:index, pch=1:index)
dev.off();
}
thread = "all"
xMinimum = NA
xMaximum = NA
for(writeOpp in writeOpps){
xMinimum <- min(xMinimum, quantile(startECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(thread)]],0.01), na.rm = TRUE)
xMaximum <- max(xMaximum, quantile(stopECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(thread)]],0.97), na.rm = TRUE)
}
fileNameSub <- gsub("%type%", paste("ECDF-Write-", typeOpp, "-", readOpp, "-", thread, "-", loop, sep=""), gsub("%extension%", "png", exportDir))
png(filename=fileNameSub, width=figureWidthSquare, height=figureHeight, units="px", res=figureRes)
plot(c(0), xlim=c(xMinimum, xMaximum), ylim = c(0,1), col = 1, pch = 1, type = "n",
xlab =tijd,ylab = cumKans, main = NULL)
index <- 0
for(writeOpp in writeOpps){
index <- index + 1
lines(startECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(x=thread)]], col = index, pch = index, do.p = FALSE)
lines(stopECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(x=thread)]], col = index, pch = index, do.p = FALSE)
}
legend("bottomright", writeOpps, col = 1:index, pch=1:index)
dev.off();
}
}
}
}
consistencyPlotsMongoForReads <- function(startECDF, stopECDF, writeOpps, readOpps, typeOpps, loops, readerThreads, writerThreads, exportDir){
readLabel <- c("Schrijver start", "Schrijver stop", readOpps)
for(loop in loops){
for(typeOpp in typeOpps){
for(writeOpp in writeOpps){
for(thread in 1:length(readerThreads)){
fileNameSub <- gsub("%type%", paste("ECDF-Reads-", typeOpp, "-", writeOpp, "-", thread, "-", loop, sep=""), gsub("%extension%", "png", exportDir))
writeStart <- startECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][["w"]]
writeStop <- stopECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][["w"]]
xMinimum = quantile(writeStart,0.01)
xMaximum = quantile(writeStop,0.90)
for(readOpp in readOpps){
xMinimum <- min(xMinimum, quantile(startECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(thread)]],0.01), na.rm = TRUE)
xMaximum <- max(xMaximum, quantile(stopECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(thread)]],0.97), na.rm = TRUE)
}
png(filename=fileNameSub, width=figureWidthSquare, height=figureHeight, units="px", res=figureRes)
plot(c(0), xlim=c(xMinimum, xMaximum), ylim = c(0,1), col = 1, pch = 1, type = "n",
xlab =tijd,ylab = cumKans, main = NULL)
lines(writeStart, col = 1, pch = 1, do.p = FALSE)
lines(writeStop, col = 2, pch = 2, do.p = FALSE)
index <- 2
for(readOpp in readOpps){
index <- index + 1
lines(startECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(x=thread)]], col = index, pch = index, do.p = FALSE)
lines(stopECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(x=thread)]], col = index, pch = index, do.p = FALSE)
}
legend("bottomright", readLabel, col = 1:index, pch=1:index)
dev.off();
}
## ALL
thread = "all"
writeStart <- startECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][["w"]]
writeStop <- stopECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][["w"]]
xMinimum = quantile(writeStart,0.01)
xMaximum = quantile(writeStop,0.90)
for(readOpp in readOpps){
xMinimum <- min(xMinimum, quantile(startECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(thread)]],0.01), na.rm = TRUE)
xMaximum <- max(xMaximum, quantile(stopECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(thread)]],0.97), na.rm = TRUE)
}
fileNameSub <- gsub("%type%", paste("ECDF-Reads-", typeOpp, "-", writeOpp, "-", thread, "-", loop, sep=""), gsub("%extension%", "png", exportDir))
png(filename=fileNameSub, width=figureWidthSquare, height=figureHeight, units="px", res=figureRes)
plot(c(0), xlim=c(xMinimum, xMaximum), ylim = c(0,1), col = 1, pch = 1, type = "n",
xlab =tijd ,ylab = cumKans, main = NULL)
lines(writeStart, col = 1, pch = 1, do.p = FALSE)
lines(writeStop, col = 2, pch = 2, do.p = FALSE)
index <- 2
for(readOpp in readOpps){
index <- index + 1
lines(startECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(x=thread)]], col = index, pch = index, do.p = FALSE)
lines(stopECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(x=thread)]], col = index, pch = index, do.p = FALSE)
}
legend("bottomright", readLabel, col = 1:index, pch=1:index)
dev.off();
}
}
}
}
plotWriteComparison <- function(startECDF, stopECDF, writeOpps, readOpps, typeOpps, loops, readerThreads, writerThreads, exportDir){
readLabel <- c("Schrijver start", "Schrijver stop", readOpps)
for(loop in loops){
for(typeOpp in typeOpps){
fileNameSub <- gsub("%type%", paste("ECDF-Compare-Write-", typeOpp, "-", loop, sep=""), gsub("%extension%", "png", exportDir))
png(filename=fileNameSub, width=figureWidthSquare, height=figureHeight, units="px", res=figureRes)
xMinimum = NA
xMaximum = NA
for(writeOpp in writeOpps){
writeStart <- startECDF[[writeOpp]][[readOpps[[1]]]][[typeOpp]][[loop]][["w"]]
writeStop <- stopECDF[[writeOpp]][[readOpps[[1]]]][[typeOpp]][[loop]][["w"]]
xMinimum = min(xMinimum, quantile(writeStart,0.01), na.rm = TRUE)
xMaximum = max(xMaximum, quantile(writeStop,0.90), na.rm = TRUE)
}
plot(c(0), xlim=c(xMinimum, xMaximum), ylim = c(0,1), col = 1, pch = 1, type = "n",
xlab =tijd,ylab = cumKans, main = NULL)
index <- 0
for(writeOpp in writeOpps){
index <- index + 1
writeStart <- startECDF[[writeOpp]][[readOpps[[1]]]][[typeOpp]][[loop]][["w"]]
writeStop <- stopECDF[[writeOpp]][[readOpps[[1]]]][[typeOpp]][[loop]][["w"]]
lines(writeStart, col = index, pch = index, do.p = FALSE)
lines(writeStop, col = index, pch = index, do.p = FALSE)
}
legend("bottomright", writeOpps, col = 1:index, pch=1:index)
dev.off();
}
}
}
getPercentageDone <- function(dataset, readerThreads, startpunt, afstand){
resultSet <- 1:length(readerThreads)
for(i in 1:length(readerThreads)){
resultSet[i] <- dataset[[toString(i)]](startpunt+afstand*i)
}
return (resultSet)
}
getPercentageDoneMongoDB <- function(dataset, loop, typeOpp, writeOpps, readOpps, readerThreads, startpunt, afstand, datapunten){
resultSet <- matrix(nrow = length(writeOpps), ncol =length(readOpps))
rownames(resultSet) <- writeOpps
colnames(resultSet) <-readOpps
i <- 0
for(readOpp in readOpps){
i <- i + 1
j <- 0
for(writeOpp in writeOpps){
j <- j + 1
tryCatch({
output <- getPercentageDone(dataset[[writeOpp]][[readOpp]][[typeOpp]][[toString(loop)]], readerThreads[1:datapunten], startpunt, afstand)
resultSet[j, i] <- toString(round(output, digits=2)*100)
},error = function(e) TRUE)
}
}
return(resultSet)
}
getLatexTableForMongoDB <- function(dataset, loop, typeOpp, writeOpps, readOpps, readerThreads, startpunt, afstand, datapunten){
return(xtable(getPercentageDoneMongoDB(dataset, loop, typeOpp, writeOpps, readOpps, readerThreads, startpunt, afstand, datapunten)))
}
#fileDir <- paste(dir, "Fig/%type%.%extension%", sep="")
#consistencyPlotsMongoForReads(startECDF, endECDF, writeOpps, readOpps, typeOpps, loops, 1:5, 1, fileDir)
#consistencyPlotsMongoForWrites(startECDF, endECDF, writeOpps, readOpps, typeOpps, loops, 1:5, 1, fileDir)
if(debugmodus){
fileName ="D:/Schooljaar 2013-2014/Thesis/Result-Folder/2014-03-24/InsertRawData"
parsed <- consistencyParse(fileName)
postParsed <- consistencyPostParse(parsed$outputR)
consistencyPlotNb(postParsed, parsed$outputW, parsed$readerThreads, parsed$writerThreads, 3, "D:/Schooljaar 2013-2014/Thesis/Result-Folder/2014-03-24/Consistency/%type%.%extension%")
consistencyPlotEachReader(postParsed, parsed$outputW, parsed$readerThreads, parsed$writerThreads, "D:/Schooljaar 2013-2014/Thesis/Result-Folder/2014-03-24/Consistency/%type%.%extension%", 2)
} | /Consistency-parse.R | permissive | thuys/YCSB-R-Scripts | R | false | false | 28,077 | r | source('./plot.R')
library('xtable')
assign("consistencyEcdf", TRUE, envir = .GlobalEnv)
assign("consistencyReaders", TRUE, envir = .GlobalEnv)
consistencyParse <- function(fileName){
#READ FILE
k <- readLines(fileName, warn = FALSE)
splitted <- strsplit(x=k, split = ",")
outputW <- list();
outputR <- list();
RThreads <- list();
WThreads <- list();
header <- splitted[[1]]
for(lineNb in 2:length(k)){
line = splitted[[lineNb]]
timing <- line[1]
thread <- line[2]
start <- line [3]
if(!is.na(suppressWarnings(as.numeric(timing)))){
if(length(grep("W", thread)) > 0){
WThreads[[thread]] <- TRUE
if(is.null(outputW[[timing]])){
outputW[[timing]] <- list()
}
if(is.null(outputW[[timing]][[thread]])){
outputW[[timing]][[thread]] <- list()
}
lineList <- list()
lineList[["start"]] <- suppressWarnings(as.numeric(line[3]))
lineList[["delay"]] <- suppressWarnings(as.numeric(line[4]))
lineList[["value"]] <- suppressWarnings(as.numeric(line[5]))
outputW[[timing]][[thread]][[start]] <- lineList
}else if(length(grep("R", thread)) > 0){
RThreads[[thread]] <- TRUE
if(is.null(outputR[[timing]])){
outputR[[timing]] <- list()
}
if(is.null(outputR[[timing]][[thread]])){
outputR[[timing]][[thread]] <- list()
}
lineList <- list()
lineList[["start"]] <- suppressWarnings(as.numeric(line[3]))
lineList[["delay"]] <- suppressWarnings(as.numeric(line[4]))
lineList[["value"]] <- suppressWarnings(as.numeric(line[5]))
outputR[[timing]][[thread]][[start]] <- lineList
}
}
}
return(list(writerThreads = WThreads, readerThreads = RThreads, outputW = outputW, outputR = outputR))
}
consistencyPostParse <- function(parsedReader){
changesInData <- list()
for(key in names(parsedReader)){
parserKey <- parsedReader[[key]]
changesInData[[key]] <- list()
# zoek alle veranderingen per thread
for(thread in names(parserKey)){
threadList <- list()
parserThread <- parserKey[[thread]]
#Collect all changes
isFirst <- TRUE
change <- FALSE
tryCatch({
for(start in names(parserThread)){
if(isFirst){
isFirst <- FALSE
threadList[[start]] <- parserThread[[start]]
lastValue <- parserThread[[start]]
change <- TRUE
}
else if(areDifferentNumbersWithNa(lastValue[["value"]], parserThread[[start]][["value"]])){
threadList[[start]] <- parserThread[[start]]
lastValue <- parserThread[[start]]
change <- TRUE
}
if(change){
# Try to find last read of other thread
change <- FALSE
minStart <- -1000000000
for(innerthread in names(parserKey)){
for(innerstart in names(parserThread)){
if(innerstart < start && innerstart > minStart){
minStart <- innerstart
minThread <- innerthread
minValue <- parserKey[[innerthread]][[innerstart]][["value"]]
}
}
}
if(minStart > -1000000000){
threadList[[start]]["beforeThread"] <-minThread
threadList[[start]]["beforeStart"] <-minStart
threadList[[start]]["beforeValue"] <-minValue
}
}
}
},error = function(e) print(paste("Problem in parse", thread, start, lastValue[["value"]] , parserThread[[start]][["value"]] )))
changesInData[[key]][[thread]] <- threadList
}
##
}
return(changesInData)
}
areDifferentNumbersWithNa <- function(number1, number2){
if(is.na(number1)){
return(!is.na(number2))
}else if(is.na(number2)){
return(TRUE)
}
return (number1 != number2)
}
consistencyPlotNb <- function(parsedReader, parsedWriter, readerNames, writerNames, maxOnSamePlot, exportDir){
columnNames <- names(readerNames)
rowNames <- names(parsedReader)
amountOfChanges <-matrix(nrow=length(rowNames), ncol=length(columnNames), dimnames = list((as.numeric(rowNames)), columnNames))
for(i in 1:nrow(amountOfChanges)){
key <- rowNames[i]
parserKey <- parsedReader[[key]]
# zoek alle veranderingen per thread
for(j in 1:ncol(amountOfChanges)){
thread <- columnNames[j]
parserThread <- parserKey[[thread]]
amountOfChanges[i,j] <-length(parserThread)
}
}
# Find all similar columns
similarMatrix <- matrix(nrow=ncol(amountOfChanges), ncol=ncol(amountOfChanges))
for(outerThread in 1:ncol(amountOfChanges)){
for(innerThread in outerThread:ncol(amountOfChanges)){
if(areEqualAmount(outerThread, innerThread, amountOfChanges)){
similarMatrix[outerThread, innerThread] = 1
similarMatrix[innerThread, outerThread] = 1
}
else{
similarMatrix[outerThread, innerThread] = 0
similarMatrix[innerThread, outerThread] = 0
}
}
}
labelThreads <- list()
labelList <- list()
labelIndex <- list()
index = 0
for(thread in 1:ncol(amountOfChanges)){
threadNumber = getUniqueThreadID(thread, similarMatrix)
if(threadNumber == thread){
index <- index + 1
labelThreads[thread] <- index
labelList[index] <- paste((thread-1), "", sep="")
labelIndex[index] <- thread
}
else{
labelList[labelThreads[[threadNumber]]] <- paste(labelList[labelThreads[[threadNumber]]], (thread-1), sep = ", ")
}
}
labelArray <- unlist(labelList)
for(i in 1:ceiling(index/maxOnSamePlot)){
startNumber <- ((i-1)*maxOnSamePlot+1)
endNumber <- min((i)*maxOnSamePlot, index)
fileNameSub <- gsub("%number%", i, gsub("%type%", "consistency-%number%", gsub("%extension%", "png", exportDir)))
png(filename=fileNameSub, width=figureWidth, height=figureHeight, units="px", res=figureRes)
#plot.new()
plot(x = 0, y = 0, type="n", xlab ="Key number)",ylab = "Amount of changes",
ylim = c(0, max(amountOfChanges)), xlim = as.numeric(c(min((rowNames)), max((rowNames)))))
for(plotNb in startNumber:endNumber){
lines(x=rowNames, y = amountOfChanges[,labelIndex[[plotNb]]],type="l",col = plotNb, pch = plotNb)
}
legend("topleft", legend=labelArray[startNumber:endNumber], col = startNumber:endNumber, pch = startNumber:endNumber)
dev.off();
}
# Make barplot
maxAmountOfChanges <- max(amountOfChanges)
countPlot <- matrix(ncol=length(labelArray), nrow= (maxAmountOfChanges+1))
for(indexCounter in 1:index){
thread <- labelIndex[[indexCounter]]
threadTable <- table(amountOfChanges[,thread])
countPlot[, indexCounter] <- tabulate(amountOfChanges[,thread]+1, nbins = maxAmountOfChanges+1)
}
#Change label
labelListSplitted <- strsplit(labelArray, c(", "))
labelListMerged <- rep(0,index)
for(indexCounter in 1:index){
for(i in 1:length(labelListSplitted[indexCounter][[1]])){
if(labelListMerged[indexCounter] == 0){
labelListMerged[indexCounter] <- labelListSplitted[indexCounter][[1]][i]
}
else{
labelListMerged[indexCounter] <- paste(labelListMerged[indexCounter], labelListSplitted[indexCounter][[1]][i], sep=" ")
}
if(i%%4 == 0){
labelListMerged[indexCounter] <- paste(labelListMerged[indexCounter], "\n", sep="")
}
}
}
fileNameSub <- gsub("%type%", "consistency-merged", gsub("%extension%", "png", exportDir))
png(filename=fileNameSub, width=figureWidth, height=2*figureHeight, units="px", res=figureRes)
oldMar <- par()$mar
newMar <- oldMar
newMar[2] <- newMar[2] + 2
par(mar=newMar)
barplot(countPlot, main="Consistency together",
xlab="Number of changes", col = c(1:3),
legend = c(0:max(amountOfChanges)),
names.arg=labelListMerged,
horiz=TRUE)
dev.off();
par(mar=oldMar)
}
areEqualAmount <- function(thread1, thread2, matrix){
for(row in 1:nrow(matrix)){
if(matrix[row, thread1] != matrix[row, thread2]){
return (FALSE)
}
}
return (TRUE)
}
getUniqueThreadID <- function(thread, matrix){
if(thread == 1){
return (1)
}
for(row in 1:thread){
if(matrix[row, thread] == 1){
return (row)
}
}
}
consistencyPlotEachReader <- function(parsedReader, parsedWriter, readerNames, writerNames, exportDir, maxRetries){
numberOfWriters <- length(writerNames)
plotMatrix <- matrix(nrow = length(parsedWriter), ncol = (2*length(writerNames)+2*maxRetries*length(readerNames)))
densityMatrix <- matrix(nrow = length(parsedWriter), ncol = (2*length(writerNames)+2*length(readerNames)))
densityMatrixAll <- matrix(nrow = (length(parsedWriter)*length(readerNames)), ncol = (2))
rownames(plotMatrix) <- names(parsedWriter)
rownames(densityMatrix) <- names(parsedWriter)
colNamesPlot <- rep("", ncol(plotMatrix))
densityMatrixNamesPlot<- rep("", ncol(densityMatrix))
for(j in 1:length(writerNames)){
threadName <- names(writerNames[j])[1]
colNamesPlot[(2*j-1)] <- paste(threadName, "-START", sep="")
colNamesPlot[(2*j)] <- paste(threadName, "-DELAY", sep="")
densityMatrixNamesPlot[(2*j-1)] <- paste(threadName, "-START", sep="")
densityMatrixNamesPlot[(2*j)] <- paste(threadName, "-DELAY", sep="")
}
for(j in 1:length(readerNames)){
threadName <- names(readerNames[j])[1]
for(k in 1:maxRetries){
colNamesPlot[(2*numberOfWriters+2*maxRetries*(j-1)+2*k-1)] <- paste(threadName, "-", k, "-START", sep="")
colNamesPlot[(2*numberOfWriters+2*maxRetries*(j-1)+2*k)] <- paste(threadName, "-", k, "-DELAY", sep="")
}
densityMatrixNamesPlot[(2*numberOfWriters+2*j-1)] <- paste(threadName, "-START", sep="")
densityMatrixNamesPlot[(2*numberOfWriters+2*j)] <- paste(threadName, "-DELAY", sep="")
}
colnames(plotMatrix) <- colNamesPlot
colnames(densityMatrix) <- densityMatrixNamesPlot
colnames(densityMatrixAll) <- c("START", "DELAY")
#writer data gathering (delay)
for(i in 1:length(parsedWriter)){
parserKey <- parsedWriter[i]
parserKey <- parserKey[[names(parserKey)[1]]]
# zoek alle veranderingen per thread
for(j in 1:length(writerNames)){
parserThread <- parserKey[j]
parserThread <- parserThread[[names(parserThread)[1]]]
firstEntry <- parserThread[[names(parserThread)[1]]]
plotMatrix[i,(2*j-1)] <- firstEntry[["start"]]/1000
plotMatrix[i,(2*j)] <- firstEntry[["delay"]]/1000
densityMatrix[i,(2*j-1)] <- firstEntry[["start"]]/1000
densityMatrix[i,(2*j)] <- firstEntry[["delay"]]/1000
}
}
for(i in 1:length(parsedReader)){
parserKey <- parsedReader[i]
parserKey <- parserKey[[names(parserKey)[1]]]
# zoek alle veranderingen per thread
for(j in 1:length(readerNames)){
parserThread <- parserKey[j]
parserThread <- parserThread[[names(parserThread)[1]]]
allValue <- names(parserThread)
startIndex <- 1
for(k in max(length(allValue)-maxRetries+1, 1):length(allValue)){
firstEntry <- parserThread[[allValue[k]]]
if(length(firstEntry) >0){
plotMatrix[i,(2*numberOfWriters+2*maxRetries*(j-1)+2*startIndex-1)] <- firstEntry[["start"]]/1000
plotMatrix[i,(2*numberOfWriters+2*maxRetries*(j-1)+2*startIndex)] <- firstEntry[["delay"]]/1000
densityMatrix[i,(2*numberOfWriters+2*j-1)] <- firstEntry[["start"]]/1000
densityMatrix[i,(2*numberOfWriters+2*j)] <- firstEntry[["delay"]]/1000
densityMatrixAll[(j-1)* length(parsedReader) + i, 1] <- firstEntry[["start"]]/1000
densityMatrixAll[(j-1)* length(parsedReader) + i, 2]<- firstEntry[["delay"]]/1000
}
startIndex <- startIndex + 1
}
}
}
if(consistencyReaders){
rowNames <- names(parsedWriter)
labelPlot <- list()
labelPlot[1] <- "Schrijver start"
labelPlot[2] <- "Schrijver stop"
for(k in 1:maxRetries){
labelPlot[(2*k + 1)] <- paste("Lezer Poging ", k, " start", sep = "")
labelPlot[(2*k + 2)] <- paste("Lezer Poging ", k, " stop", sep = "")
}
labelPlot <- unlist(labelPlot)
for(j in 1:length(readerNames)){
startIndex <- (2*numberOfWriters+2*maxRetries*(j-1)+1)
endIndex <- (2*numberOfWriters+2*maxRetries*(j-1)+2*maxRetries)
maxY = max(max(plotMatrix[,1:2],na.rm = TRUE),
max(plotMatrix[,startIndex:endIndex], na.rm = TRUE) ,na.rm = TRUE)
fileNameSub <- gsub("%type%", paste("consistency-plot-", names(readerNames)[j], sep=""), gsub("%extension%", "png", exportDir))
png(filename=fileNameSub, width=figureWidth, height=figureHeight, units="px", res=figureRes)
plot(x = 0, y = 0, type="n", xlab ="Sleutelnummer",ylab = "Tijd (ms)",
ylim = c(0, 80), xlim = as.numeric(c(as.numeric(min(rowNames))+72000000, as.numeric(min(rowNames))+150000000)))
#Plot writers
lines(x=rowNames, y = plotMatrix[,1],type="l",col = 1, pch = 1)
lines(x=rowNames, y = plotMatrix[,2],type="l",col = 2, pch = 2)
for(k in 1:maxRetries){
if(k == 1){
plotType <- "l"
}else{
plotType <- "b"
}
lines(x=rowNames, y = plotMatrix[,startIndex + 2*(k-1)],type=plotType,col = (2*k+1), pch = (2*k+1))
lines(x=rowNames, y = plotMatrix[,startIndex + 2*(k-1)+1],type=plotType,col = (2*k+2), pch = (2*k+2))
}
legend("topright", labelPlot, col = 1:(2*maxRetries+2), pch = 1:(2*maxRetries+2))
dev.off();
}
}
## ECDF
if(consistencyEcdf){
ecdfStartList <- list()
ecdfStopList <- list()
startWQuery <- ecdf(densityMatrix[,1])
stopWQuery <- ecdf(densityMatrix[,2])
ecdfStartList[["w"]] <- startWQuery
ecdfStopList[["w"]] <- stopWQuery
minLezen <- quantile(startWQuery,0.01)
maxLezen <- quantile(stopWQuery,0.90)
for(j in 1:length(readerNames)){
fileNameSub <- gsub("%type%", paste("ECDF-plot-", names(readerNames)[j], sep=""), gsub("%extension%", "png", exportDir))
png(filename=fileNameSub, width=figureWidthSquare, height=figureHeight, units="px", res=figureRes)
startQuery <- ecdf(densityMatrix[,2*numberOfWriters+2*j-1])
stopQuery <- ecdf(densityMatrix[,2*numberOfWriters+2*j])
plot(startWQuery, xlim=c(quantile(startQuery,0.01), quantile(stopQuery, 0.99)), ylim = c(0,1), col = 1, pch = 1,
do.p = FALSE, xlab =tijd,ylab = cumKans, main = NULL)
lines(stopWQuery, col = 2, pch = 2, do.p = FALSE)
lines(startQuery, col = 3, pch = 3, do.p = FALSE)
lines(stopQuery, col = 4, pch = 4, do.p = FALSE)
legend("bottomright", ecdfLegend, col = 1:4, pch=1:4)
dev.off();
minLezen = min(quantile(startQuery,0.01), minLezen, na.rm = TRUE)
maxLezen = max(quantile(stopQuery,0.99), maxLezen, na.rm = TRUE)
ecdfStartList[[toString(j)]] <- startQuery
ecdfStopList[[toString(j)]] <- stopQuery
}
## ECDF Start
labelPlot <- list()
labelPlot[1] <- "Schrijver"
fileNameSub <- gsub("%type%", paste("ECDF-plot-Start", sep=""), gsub("%extension%", "png", exportDir))
png(filename=fileNameSub, width=figureWidthSquare, height=figureHeight, units="px", res=figureRes)
plot(startWQuery, xlim=c(minLezen, maxLezen), ylim = c(0,1), col = 1, pch = 1,
do.p = FALSE, xlab =tijd,ylab = cumKans, main = NULL)
for(j in 1:length(readerNames)){
lines(ecdf(densityMatrix[,2*numberOfWriters+2*j-1]), col = (j+1), pch = (j+1), do.p = FALSE)
labelPlot[(1+j)] <- paste("Lezer ", j, sep = "")
}
labelPlot <- unlist(labelPlot)
legend("bottomright", labelPlot, col = 1:(length(readerNames)+1), pch = 1:(length(readerNames)+1))
dev.off();
## ECDF Stop
labelPlot <- list()
labelPlot[1] <- "Schrijver"
fileNameSub <- gsub("%type%", paste("ECDF-plot-Stop", sep=""), gsub("%extension%", "png", exportDir))
png(filename=fileNameSub, width=figureWidthSquare, height=figureHeight, units="px", res=figureRes)
plot(stopWQuery, xlim=c(minLezen, maxLezen), ylim = c(0,1), col = 1, pch = 1,
do.p = FALSE, xlab =tijd,ylab = cumKans, main = NULL)
for(j in 1:length(readerNames)){
lines(ecdf(densityMatrix[,2*numberOfWriters+2*j]), col = (j+1), pch = (j+1), do.p = FALSE)
labelPlot[(1+j)] <- paste("Lezer ", j, sep = "")
}
labelPlot <- unlist(labelPlot)
legend("bottomright", labelPlot, col = 1:(length(readerNames)+1), pch = 1:(length(readerNames)+1))
dev.off();
## ECDF ALL
fileNameSub <- gsub("%type%", paste("ECDF-plot-all", sep=""), gsub("%extension%", "png", exportDir))
png(filename=fileNameSub, width=figureWidthSquare, height=figureHeight, units="px", res=figureRes)
startQuery <- ecdf(densityMatrixAll[,1])
stopQuery <- ecdf(densityMatrixAll[,2])
ecdfStartList[["all"]] <- startQuery
ecdfStopList[["all"]] <- stopQuery
plot(startWQuery, xlim=c(quantile(startQuery,0.01), quantile(stopQuery, 0.99)), ylim = c(0,1), col = 1, pch = 1,
do.p = FALSE, xlab = tijd,ylab = cumKans, main = NULL)
lines(stopWQuery, col = 2, pch = 2, do.p = FALSE)
lines(startQuery, col = 3, pch = 3, do.p = FALSE)
lines(stopQuery, col = 4, pch = 4, do.p = FALSE)
legend("bottomright", ecdfLegend, col = 1:4, pch=1:4)
dev.off();
temp <- list(startL = ecdfStartList, stopL = ecdfStopList)
return (list(startL = ecdfStartList, stopL = ecdfStopList))
}
}
consistencyPlotsMongoForWrites <- function(startECDF, stopECDF, writeOpps, readOpps, typeOpps, loops, readerThreads, writerThreads, exportDir){
for(loop in loops){
for(typeOpp in typeOpps){
for(readOpp in readOpps){
for(thread in 1:length(readerThreads)){
xMinimum = NA
xMaximum = NA
for(writeOpp in writeOpps){
xMinimum <- min(xMinimum, quantile(startECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(x=thread)]],0.01), na.rm = TRUE)
xMaximum <- max(xMaximum, quantile(stopECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(x=thread)]],0.97), na.rm = TRUE)
}
fileNameSub <- gsub("%type%", paste("ECDF-Write-", typeOpp, "-", readOpp, "-", thread, "-", loop, sep=""), gsub("%extension%", "png", exportDir))
png(filename=fileNameSub, width=figureWidthSquare, height=figureHeight, units="px", res=figureRes)
plot(c(0), xlim=c(xMinimum, xMaximum), ylim = c(0,1), col = 1, pch = 1, type = "n",
xlab =tijd,ylab = cumKans, main = NULL)
index <- 0
for(writeOpp in writeOpps){
index <- index + 1
lines(startECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(thread)]], col = index, pch = index, do.p = FALSE)
lines(stopECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(thread)]], col = index, pch = index, do.p = FALSE)
}
legend("bottomright", writeOpps, col = 1:index, pch=1:index)
dev.off();
}
thread = "all"
xMinimum = NA
xMaximum = NA
for(writeOpp in writeOpps){
xMinimum <- min(xMinimum, quantile(startECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(thread)]],0.01), na.rm = TRUE)
xMaximum <- max(xMaximum, quantile(stopECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(thread)]],0.97), na.rm = TRUE)
}
fileNameSub <- gsub("%type%", paste("ECDF-Write-", typeOpp, "-", readOpp, "-", thread, "-", loop, sep=""), gsub("%extension%", "png", exportDir))
png(filename=fileNameSub, width=figureWidthSquare, height=figureHeight, units="px", res=figureRes)
plot(c(0), xlim=c(xMinimum, xMaximum), ylim = c(0,1), col = 1, pch = 1, type = "n",
xlab =tijd,ylab = cumKans, main = NULL)
index <- 0
for(writeOpp in writeOpps){
index <- index + 1
lines(startECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(x=thread)]], col = index, pch = index, do.p = FALSE)
lines(stopECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(x=thread)]], col = index, pch = index, do.p = FALSE)
}
legend("bottomright", writeOpps, col = 1:index, pch=1:index)
dev.off();
}
}
}
}
consistencyPlotsMongoForReads <- function(startECDF, stopECDF, writeOpps, readOpps, typeOpps, loops, readerThreads, writerThreads, exportDir){
readLabel <- c("Schrijver start", "Schrijver stop", readOpps)
for(loop in loops){
for(typeOpp in typeOpps){
for(writeOpp in writeOpps){
for(thread in 1:length(readerThreads)){
fileNameSub <- gsub("%type%", paste("ECDF-Reads-", typeOpp, "-", writeOpp, "-", thread, "-", loop, sep=""), gsub("%extension%", "png", exportDir))
writeStart <- startECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][["w"]]
writeStop <- stopECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][["w"]]
xMinimum = quantile(writeStart,0.01)
xMaximum = quantile(writeStop,0.90)
for(readOpp in readOpps){
xMinimum <- min(xMinimum, quantile(startECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(thread)]],0.01), na.rm = TRUE)
xMaximum <- max(xMaximum, quantile(stopECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(thread)]],0.97), na.rm = TRUE)
}
png(filename=fileNameSub, width=figureWidthSquare, height=figureHeight, units="px", res=figureRes)
plot(c(0), xlim=c(xMinimum, xMaximum), ylim = c(0,1), col = 1, pch = 1, type = "n",
xlab =tijd,ylab = cumKans, main = NULL)
lines(writeStart, col = 1, pch = 1, do.p = FALSE)
lines(writeStop, col = 2, pch = 2, do.p = FALSE)
index <- 2
for(readOpp in readOpps){
index <- index + 1
lines(startECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(x=thread)]], col = index, pch = index, do.p = FALSE)
lines(stopECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(x=thread)]], col = index, pch = index, do.p = FALSE)
}
legend("bottomright", readLabel, col = 1:index, pch=1:index)
dev.off();
}
## ALL
thread = "all"
writeStart <- startECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][["w"]]
writeStop <- stopECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][["w"]]
xMinimum = quantile(writeStart,0.01)
xMaximum = quantile(writeStop,0.90)
for(readOpp in readOpps){
xMinimum <- min(xMinimum, quantile(startECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(thread)]],0.01), na.rm = TRUE)
xMaximum <- max(xMaximum, quantile(stopECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(thread)]],0.97), na.rm = TRUE)
}
fileNameSub <- gsub("%type%", paste("ECDF-Reads-", typeOpp, "-", writeOpp, "-", thread, "-", loop, sep=""), gsub("%extension%", "png", exportDir))
png(filename=fileNameSub, width=figureWidthSquare, height=figureHeight, units="px", res=figureRes)
plot(c(0), xlim=c(xMinimum, xMaximum), ylim = c(0,1), col = 1, pch = 1, type = "n",
xlab =tijd ,ylab = cumKans, main = NULL)
lines(writeStart, col = 1, pch = 1, do.p = FALSE)
lines(writeStop, col = 2, pch = 2, do.p = FALSE)
index <- 2
for(readOpp in readOpps){
index <- index + 1
lines(startECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(x=thread)]], col = index, pch = index, do.p = FALSE)
lines(stopECDF[[writeOpp]][[readOpp]][[typeOpp]][[loop]][[toString(x=thread)]], col = index, pch = index, do.p = FALSE)
}
legend("bottomright", readLabel, col = 1:index, pch=1:index)
dev.off();
}
}
}
}
plotWriteComparison <- function(startECDF, stopECDF, writeOpps, readOpps, typeOpps, loops, readerThreads, writerThreads, exportDir){
readLabel <- c("Schrijver start", "Schrijver stop", readOpps)
for(loop in loops){
for(typeOpp in typeOpps){
fileNameSub <- gsub("%type%", paste("ECDF-Compare-Write-", typeOpp, "-", loop, sep=""), gsub("%extension%", "png", exportDir))
png(filename=fileNameSub, width=figureWidthSquare, height=figureHeight, units="px", res=figureRes)
xMinimum = NA
xMaximum = NA
for(writeOpp in writeOpps){
writeStart <- startECDF[[writeOpp]][[readOpps[[1]]]][[typeOpp]][[loop]][["w"]]
writeStop <- stopECDF[[writeOpp]][[readOpps[[1]]]][[typeOpp]][[loop]][["w"]]
xMinimum = min(xMinimum, quantile(writeStart,0.01), na.rm = TRUE)
xMaximum = max(xMaximum, quantile(writeStop,0.90), na.rm = TRUE)
}
plot(c(0), xlim=c(xMinimum, xMaximum), ylim = c(0,1), col = 1, pch = 1, type = "n",
xlab =tijd,ylab = cumKans, main = NULL)
index <- 0
for(writeOpp in writeOpps){
index <- index + 1
writeStart <- startECDF[[writeOpp]][[readOpps[[1]]]][[typeOpp]][[loop]][["w"]]
writeStop <- stopECDF[[writeOpp]][[readOpps[[1]]]][[typeOpp]][[loop]][["w"]]
lines(writeStart, col = index, pch = index, do.p = FALSE)
lines(writeStop, col = index, pch = index, do.p = FALSE)
}
legend("bottomright", writeOpps, col = 1:index, pch=1:index)
dev.off();
}
}
}
getPercentageDone <- function(dataset, readerThreads, startpunt, afstand){
resultSet <- 1:length(readerThreads)
for(i in 1:length(readerThreads)){
resultSet[i] <- dataset[[toString(i)]](startpunt+afstand*i)
}
return (resultSet)
}
getPercentageDoneMongoDB <- function(dataset, loop, typeOpp, writeOpps, readOpps, readerThreads, startpunt, afstand, datapunten){
resultSet <- matrix(nrow = length(writeOpps), ncol =length(readOpps))
rownames(resultSet) <- writeOpps
colnames(resultSet) <-readOpps
i <- 0
for(readOpp in readOpps){
i <- i + 1
j <- 0
for(writeOpp in writeOpps){
j <- j + 1
tryCatch({
output <- getPercentageDone(dataset[[writeOpp]][[readOpp]][[typeOpp]][[toString(loop)]], readerThreads[1:datapunten], startpunt, afstand)
resultSet[j, i] <- toString(round(output, digits=2)*100)
},error = function(e) TRUE)
}
}
return(resultSet)
}
getLatexTableForMongoDB <- function(dataset, loop, typeOpp, writeOpps, readOpps, readerThreads, startpunt, afstand, datapunten){
return(xtable(getPercentageDoneMongoDB(dataset, loop, typeOpp, writeOpps, readOpps, readerThreads, startpunt, afstand, datapunten)))
}
#fileDir <- paste(dir, "Fig/%type%.%extension%", sep="")
#consistencyPlotsMongoForReads(startECDF, endECDF, writeOpps, readOpps, typeOpps, loops, 1:5, 1, fileDir)
#consistencyPlotsMongoForWrites(startECDF, endECDF, writeOpps, readOpps, typeOpps, loops, 1:5, 1, fileDir)
if(debugmodus){
fileName ="D:/Schooljaar 2013-2014/Thesis/Result-Folder/2014-03-24/InsertRawData"
parsed <- consistencyParse(fileName)
postParsed <- consistencyPostParse(parsed$outputR)
consistencyPlotNb(postParsed, parsed$outputW, parsed$readerThreads, parsed$writerThreads, 3, "D:/Schooljaar 2013-2014/Thesis/Result-Folder/2014-03-24/Consistency/%type%.%extension%")
consistencyPlotEachReader(postParsed, parsed$outputW, parsed$readerThreads, parsed$writerThreads, "D:/Schooljaar 2013-2014/Thesis/Result-Folder/2014-03-24/Consistency/%type%.%extension%", 2)
} |
# classify_homopolymer.R
# Compare pyrosequencing signal intensity values against reference ranges
#
# Chun Hang AU (chau@hksh.com)
# Hong Kong Sanatorium and Hospital
#
suppressPackageStartupMessages(suppressWarnings(library(RSQLite)))
suppressPackageStartupMessages(suppressWarnings(library(ggplot2)))
suppressPackageStartupMessages(suppressWarnings(library(sqldf)))
suppressPackageStartupMessages(suppressWarnings(library(plyr)))
args <- commandArgs(trailingOnly = TRUE)
# use "-" as argument to print to STDOUT
flowstatdbfile<-args[1]
flowstattable<-args[2]
refrangetable<-args[3]
fileout<-args[4]
refseq<-args[5]
pos<-args[6]
gapcol<-args[7]
genotype<-args[8]
sqlite <- dbDriver("SQLite")
flowstatdb <- dbConnect(sqlite,flowstatdbfile)
# support print to STDOUT
if (fileout == "-") fileout<-stdout()
out<-dbGetQuery(flowstatdb, paste("SELECT a.autoanalysis, a.processing, a.batch, a.sample, a.refseq, a.pos, a.gapcol, a.genotype, a.flowvalue_group, SUM(a.flowvalue_readcnt) as flowvalue_group_sum, (SELECT SUM(b.flowvalue_readcnt) FROM flowstat b WHERE a.processing = b.processing AND a.batch = b.batch AND a.sample = b.sample AND a.refseq = b.refseq AND a.pos = b.pos AND a.gapcol = b.gapcol AND a.genotype = b.genotype GROUP BY b.processing, b.batch, b.sample, b.refseq, b.pos, b.gapcol, b.genotype) AS pos_coverage, r.processing, r.refseq, r.pos, r.gapcol, r.genotype, r.flowvalue_group, r.refN, r.refmean, r.refsd, r.refse, r.refiqr, r.refq1, r.refq3, r.refq1outlier, r.refq3outlier
FROM ", flowstattable," a, ", refrangetable," r
WHERE a.processing = r.processing AND a.refseq = r.refseq AND a.pos = r.pos AND a.gapcol = r.gapcol AND a.genotype = r.genotype AND a.flowvalue_group = r.flowvalue_group
AND a.processing='ampliconprocessing' AND a.refseq = '", refseq,"' AND a.pos = ",pos," AND a.gapcol = ", gapcol," AND a.genotype = '", genotype,"'
GROUP BY a.processing, a.batch, a.sample, a.refseq, a.pos, a.gapcol, a.genotype, a.flowvalue_group;", sep=""))
if (nrow(out) >= 1) {
out$flowvalue_group_sum_frac<-out$flowvalue_group_sum / out$pos_coverage
out$classq3outlier <- out$flowvalue_group_sum_frac >= out$refq3outlier
out$zscore <- ( out$flowvalue_group_sum_frac - out$refmean ) / out$refsd
# since append=TRUE and col.names=TRUE, this leads to the following warning message
#Warning message:
#In write.table(subset(out, classq3outlier == TRUE & flowvalue_group_sum_frac >= :
# appending column names to file
# we are going to suppress such wanring
suppressWarnings(write.table(subset(out, classq3outlier==TRUE & flowvalue_group_sum_frac >= 0.05), file=fileout, col.names=TRUE, row.names=FALSE, quote=FALSE, sep="\t", append=TRUE))
} | /classify_homopolymer.R | permissive | tommyau/homopolymerqz | R | false | false | 2,675 | r | # classify_homopolymer.R
# Compare pyrosequencing signal intensity values against reference ranges
#
# Chun Hang AU (chau@hksh.com)
# Hong Kong Sanatorium and Hospital
#
suppressPackageStartupMessages(suppressWarnings(library(RSQLite)))
suppressPackageStartupMessages(suppressWarnings(library(ggplot2)))
suppressPackageStartupMessages(suppressWarnings(library(sqldf)))
suppressPackageStartupMessages(suppressWarnings(library(plyr)))
args <- commandArgs(trailingOnly = TRUE)
# use "-" as argument to print to STDOUT
flowstatdbfile<-args[1]
flowstattable<-args[2]
refrangetable<-args[3]
fileout<-args[4]
refseq<-args[5]
pos<-args[6]
gapcol<-args[7]
genotype<-args[8]
sqlite <- dbDriver("SQLite")
flowstatdb <- dbConnect(sqlite,flowstatdbfile)
# support print to STDOUT
if (fileout == "-") fileout<-stdout()
out<-dbGetQuery(flowstatdb, paste("SELECT a.autoanalysis, a.processing, a.batch, a.sample, a.refseq, a.pos, a.gapcol, a.genotype, a.flowvalue_group, SUM(a.flowvalue_readcnt) as flowvalue_group_sum, (SELECT SUM(b.flowvalue_readcnt) FROM flowstat b WHERE a.processing = b.processing AND a.batch = b.batch AND a.sample = b.sample AND a.refseq = b.refseq AND a.pos = b.pos AND a.gapcol = b.gapcol AND a.genotype = b.genotype GROUP BY b.processing, b.batch, b.sample, b.refseq, b.pos, b.gapcol, b.genotype) AS pos_coverage, r.processing, r.refseq, r.pos, r.gapcol, r.genotype, r.flowvalue_group, r.refN, r.refmean, r.refsd, r.refse, r.refiqr, r.refq1, r.refq3, r.refq1outlier, r.refq3outlier
FROM ", flowstattable," a, ", refrangetable," r
WHERE a.processing = r.processing AND a.refseq = r.refseq AND a.pos = r.pos AND a.gapcol = r.gapcol AND a.genotype = r.genotype AND a.flowvalue_group = r.flowvalue_group
AND a.processing='ampliconprocessing' AND a.refseq = '", refseq,"' AND a.pos = ",pos," AND a.gapcol = ", gapcol," AND a.genotype = '", genotype,"'
GROUP BY a.processing, a.batch, a.sample, a.refseq, a.pos, a.gapcol, a.genotype, a.flowvalue_group;", sep=""))
if (nrow(out) >= 1) {
out$flowvalue_group_sum_frac<-out$flowvalue_group_sum / out$pos_coverage
out$classq3outlier <- out$flowvalue_group_sum_frac >= out$refq3outlier
out$zscore <- ( out$flowvalue_group_sum_frac - out$refmean ) / out$refsd
# since append=TRUE and col.names=TRUE, this leads to the following warning message
#Warning message:
#In write.table(subset(out, classq3outlier == TRUE & flowvalue_group_sum_frac >= :
# appending column names to file
# we are going to suppress such wanring
suppressWarnings(write.table(subset(out, classq3outlier==TRUE & flowvalue_group_sum_frac >= 0.05), file=fileout, col.names=TRUE, row.names=FALSE, quote=FALSE, sep="\t", append=TRUE))
} |
library(Rvcg)
### Name: vcgCreateKDtreeFromBarycenters
### Title: create a KD-tree from Barycenters for multiple closest point
### searches on a mesh
### Aliases: vcgCreateKDtreeFromBarycenters
### ** Examples
## Not run:
##D data(humface);data(dummyhead)
##D barytree <- vcgCreateKDtreeFromBarycenters(humface)
##D closest <- vcgClostOnKDtreeFromBarycenters(barytree,dummyhead.mesh,k=50,threads=1)
## End(Not run)
| /data/genthat_extracted_code/Rvcg/examples/vcgCreateKDtreeFromBarycenters.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 425 | r | library(Rvcg)
### Name: vcgCreateKDtreeFromBarycenters
### Title: create a KD-tree from Barycenters for multiple closest point
### searches on a mesh
### Aliases: vcgCreateKDtreeFromBarycenters
### ** Examples
## Not run:
##D data(humface);data(dummyhead)
##D barytree <- vcgCreateKDtreeFromBarycenters(humface)
##D closest <- vcgClostOnKDtreeFromBarycenters(barytree,dummyhead.mesh,k=50,threads=1)
## End(Not run)
|
library(openxlsx)
files <- list.files("Dropbox (Cambridge University)/GammaDelta/Analysis/Clones/Txt_files/", full.names = TRUE)
files.2 <- list.files("Dropbox (Cambridge University)/GammaDelta/Analysis/Clones/Txt_files/", full.names = FALSE)
for(i in seq(1, length(files), 6)){
cur_data = list("TRD" = read.table(files[i+4], sep = "\t", header = TRUE),
"TRG" = read.table(files[i+5], sep = "\t", header = TRUE),
"TRA" = read.table(files[i+2], sep = "\t", header = TRUE),
"TRB" = read.table(files[i+3], sep = "\t", header = TRUE),
"IGH" = read.table(files[i], sep = "\t", header = TRUE),
"IGL" = read.table(files[i+1], sep = "\t", header = TRUE))
file.name <- paste(unlist(strsplit(files.2[i], "_"))[1:11], collapse = "_")
write.xlsx(cur_data, file = paste("Dropbox (Cambridge University)/GammaDelta/Analysis/Clones/Xlsx_files/",
file.name, ".xlsx", sep = ""))
}
| /Functions/Merge_clones.R | no_license | MarioniLab/GammaDeltaTcells2018 | R | false | false | 999 | r | library(openxlsx)
files <- list.files("Dropbox (Cambridge University)/GammaDelta/Analysis/Clones/Txt_files/", full.names = TRUE)
files.2 <- list.files("Dropbox (Cambridge University)/GammaDelta/Analysis/Clones/Txt_files/", full.names = FALSE)
for(i in seq(1, length(files), 6)){
cur_data = list("TRD" = read.table(files[i+4], sep = "\t", header = TRUE),
"TRG" = read.table(files[i+5], sep = "\t", header = TRUE),
"TRA" = read.table(files[i+2], sep = "\t", header = TRUE),
"TRB" = read.table(files[i+3], sep = "\t", header = TRUE),
"IGH" = read.table(files[i], sep = "\t", header = TRUE),
"IGL" = read.table(files[i+1], sep = "\t", header = TRUE))
file.name <- paste(unlist(strsplit(files.2[i], "_"))[1:11], collapse = "_")
write.xlsx(cur_data, file = paste("Dropbox (Cambridge University)/GammaDelta/Analysis/Clones/Xlsx_files/",
file.name, ".xlsx", sep = ""))
}
|
setwd("C:/Users/MIGUEL GUILLEN/Documents/GitHub/ExData_Plotting1/Project1")
hpc <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
## Format date to Type Date
hpc$Date <- as.Date(hpc$Date, "%d/%m/%Y")
## Filter data set from Feb. 1, 2007 to Feb. 2, 2007
hpc <- subset(hpc,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
## Remove incomplete observation
hpc <- hpc[complete.cases(hpc),]
## Combine Date and Time column
dateTime <- paste(hpc$Date, hpc$Time)
## Name the vector
dateTime <- setNames(dateTime, "DateTime")
## Remove Date and Time column
hpc <- hpc[ ,!(names(hpc) %in% c("Date","Time"))]
## Add DateTime column
hpc <- cbind(dateTime, hpc)
## Format dateTime Column
hpc$dateTime <- as.POSIXct(dateTime)
## Create Plot 2
dev.copy(png,"plot2.png", width=480, height=480)
plot(hpc$Global_active_power~t$dateTime, type="l", ylab="Global Active Power (kilowatts)", xlab="")
dev.off()
| /Project1_Plot2.R | no_license | maguillens/ExData_Plotting1 | R | false | false | 1,099 | r | setwd("C:/Users/MIGUEL GUILLEN/Documents/GitHub/ExData_Plotting1/Project1")
hpc <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings = "?", colClasses = c('character','character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'))
## Format date to Type Date
hpc$Date <- as.Date(hpc$Date, "%d/%m/%Y")
## Filter data set from Feb. 1, 2007 to Feb. 2, 2007
hpc <- subset(hpc,Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2"))
## Remove incomplete observation
hpc <- hpc[complete.cases(hpc),]
## Combine Date and Time column
dateTime <- paste(hpc$Date, hpc$Time)
## Name the vector
dateTime <- setNames(dateTime, "DateTime")
## Remove Date and Time column
hpc <- hpc[ ,!(names(hpc) %in% c("Date","Time"))]
## Add DateTime column
hpc <- cbind(dateTime, hpc)
## Format dateTime Column
hpc$dateTime <- as.POSIXct(dateTime)
## Create Plot 2
dev.copy(png,"plot2.png", width=480, height=480)
plot(hpc$Global_active_power~t$dateTime, type="l", ylab="Global Active Power (kilowatts)", xlab="")
dev.off()
|
# ---------------------------------------------------------------------------------------------- #
# Pulling in data and setup
library(dplyr)
library(ggplot2)
library(plotly)
library(leaflet)
landfill <- read.csv("landfill.csv")
power <- read.csv("power.csv")
water <- read.csv("water.csv")
pop2017 <- read.csv("pop2017.csv")
popdensity <- read.csv("popdensity.csv")
source("functions.R")
states <- as.data.frame(state.name, state.abb) %>%
mutate(stateabrev = state.abb)
# ---------------------------------------------------------------------------------------------- #
# Landfill Data
landfill <- landfill %>%
select(State, Landfill.Name, Longitude, Latitude, Ownership.Type, Year.Landfill.Opened,
Landfill.Closure.Year, Waste.in.Place..tons., LFG.Collection.System.In.Place.,
LFG.Collected..mmscfd., LFG.Energy.Project.Type, Project.Type.Category, MW.Capacity,
Current.Year.Emission.Reductions..MMTCO2e.yr....Direct)
landfill_by_state <- landfill %>%
select(State, Landfill.Name, Waste.in.Place..tons., LFG.Collection.System.In.Place.,
LFG.Collected..mmscfd., MW.Capacity, Current.Year.Emission.Reductions..MMTCO2e.yr....Direct) %>%
group_by(State) %>%
summarise(num.landfills = n(),
total.waste = sum(as.numeric(Waste.in.Place..tons.)),
total.with.lfg = sum(LFG.Collection.System.In.Place. == "Yes", na.rm = TRUE),
total.lfg.collected = sum(LFG.Collected..mmscfd., na.rm = TRUE),
total.capacity = sum(MW.Capacity, na.rm = TRUE),
total.reduction = sum(Current.Year.Emission.Reductions..MMTCO2e.yr....Direct, na.rm = TRUE))
#write.csv(landfill_by_state, file = "CABS.csv")
#casum <- landfill %>%
# filter(State == "CA") %>%
# filter(Waste.in.Place..tons. != "NA") %>%
# summarise(total = sum(as.numeric(Waste.in.Place..tons.)))
landfill_by_state <- read.csv("CABS.csv")
landfill_by_state <- landfill_by_state %>%
filter(State != "PR")
#landfill_by_state <- left_join(landfill_by_state, states, by = c("State" = "stateabrev"))
#landfill_by_state <- left_join(landfill_by_state, pop2017, by = c("state.name" = "State"))
#landfill_by_state <- landfill_by_state %>%
# filter(state.name != "N/A")
#landfill_by_state <-left_join(landfill_by_state, popdensity, by = c("state.name" = "State"))
# ----------------------------------------------------------------------------------------------#
# Water Data
water <- water %>%
select(-Groundwater.Fresh, -Groundwater.Saline, -Surfacewater.Fresh, -Surfacewater.Saline, -Irrigation,
-LiveStock, -Aquaculture, -Mining.Fresh, -Mining.Saline, -ThermoelectricPower.Fresh,
-ThermoelectricPower.Saline,
-PublicWithdrawals.Groundwater, -PublicWithdrawals.Surfacewater, -Irrigation.Groundwater,
-Irigation.Surfacewater, -Livestock.Groundwater, -Livestock.Surfacewater, -Aquaculture.Groundwater,
-Aquaculture.Surfacewater, -Mining.Groundwater, -Mining.Surfacewater, -Thermoelectric.Groundwater,
-Thermoelectric.Surfacewater) %>%
mutate(SelfSuppliedIndustrialTotal = (as.numeric(water$SelfSuppliedIndustrial.Saline) +
as.numeric(water$SelfSuppliedIndustrial.Fresh))) %>%
filter(State != "District Of Columbia") %>%
mutate(State.abbr = )
# ----------------------------------------------------------------------------------------------#
# Power Data
i <- 1
StateFull = c()
while (i < 52){
StateFull <- c(StateFull, state.name[grep(power$State.abbreviation[i], state.abb)])
i <- i + 1
}
power <- power %>%
select(-State.ozone.season.net.generation..MWh., -State.ozone.season.NOx.emissions..tons.) %>%
filter(State.abbreviation != "DC")
StateFull <- as.data.frame(StateFull)
power <- bind_cols(power, StateFull)
# ----------------------------------------------------------------------------------------------#
# New Stuff
landfill.withna <- landfill %>%
filter(is.na(Waste.in.Place..tons.)) %>%
group_by(State) %>%
summarise(Total.With.No.Waste = n())
landfill.withna <- left_join(landfill_by_state, landfill.withna, by = "State")
landfill.withna <- select(landfill.withna, State, Total.With.No.Waste, num.landfills) %>%
mutate(percent = ( 1 - (Total.With.No.Waste / num.landfills)) * 100) %>%
arrange(percent)
landfill_by_state <- left_join(landfill_by_state, landfill.withna, by = "State")
landfill_by_state <- landfill_by_state %>%
arrange(percent)
landfill_by_state$percent[46:50] <- 1
#new waste
total.waste.population <- landfill_by_state %>%
mutate(total.waste.pop = (total.waste * (100 - percent)) / X2017.Population) %>%
select(state.name, total.waste.pop)
# Landfill Scores
# get waste/pop
# get waste scores
waste.score <- total.waste.population %>%
mutate(biggest = max(total.waste.pop)) %>%
mutate(waste.pop = 1 - (total.waste.pop / biggest))%>%
select(state.name, waste.pop)
# get lfg/pop
lfg.collected.population <- landfill_by_state %>%
mutate(lfg.collected.pop = total.lfg.collected / X2017.Population) %>%
select(state.name, lfg.collected.pop)
# get lfg scores
lfg.score <- lfg.collected.population %>%
mutate(biggest = max(lfg.collected.pop)) %>%
mutate(lfg.collected = lfg.collected.pop / biggest) %>%
select(state.name, lfg.collected)
# Power Scores
# get noncombust ratio
noncombust.total <- power %>%
mutate(total.combust = as.numeric(gsub(",","",power$State.annual.total.combustion.net.generation..MWh.))
+ as.numeric(gsub(",","",power$State.annual.total.noncombustion.net.generation..MWh.))) %>%
mutate(noncombust = as.numeric(gsub(",","",power$State.annual.total.noncombustion.net.generation..MWh.)) / total.combust) %>%
select(StateFull, noncombust)
# get noncombust score
noncombust.score <- noncombust.total %>%
mutate(biggest = max(noncombust)) %>%
mutate(combustion = noncombust / biggest) %>%
select(StateFull, combustion)
# get renewables ratio
renewables.total <- power %>%
mutate(total.renewables = as.numeric(gsub(",","",power$State.annual.total.renewables.net.generation..MWh.))
+ as.numeric(gsub(",","",power$State.annual.total.nonrenewables.net.generation..MWh.))) %>%
mutate(renewables = as.numeric(gsub(",","",power$State.annual.total.renewables.net.generation..MWh.)) / total.renewables) %>%
select(StateFull, renewables)
# get renewables score
renewables.score <- renewables.total %>%
mutate(biggest = max(renewables)) %>%
mutate(renews = renewables / biggest) %>%
select(StateFull, renews)
# get total emissions/net generation
total.emissions <- power %>%
select(StateFull, State.annual.NOx.emissions..tons.,
State.annual.CH4.emissions..lbs.,
State.annual.CO2.emissions..tons.,
State.annual.N2O.emissions..lbs.,
State.annual.SO2.emissions..tons.,
State.annual.net.generation..MWh.) %>%
mutate(total = (State.annual.N2O.emissions..lbs. * 0.0005) +
State.annual.CO2.emissions..tons. +
State.annual.NOx.emissions..tons. +
State.annual.SO2.emissions..tons. +
(State.annual.CH4.emissions..lbs.* 0.0005)) %>%
mutate(emissions.total.gen = total / State.annual.net.generation..MWh.) %>%
select(StateFull, emissions.total.gen)
# get emissions score
emissions.score <- total.emissions %>%
mutate(biggest = max(emissions.total.gen)) %>%
mutate(emissions = 1 - (emissions.total.gen / biggest)) %>%
select(StateFull, emissions)
# Water Scores
# get water withdrawal/pop
water.withdrawals.by.pop <- slice(water, 1:50) %>%
transform( withdrawal.pop = as.numeric(Total) / as.numeric(Population.Total)) %>%
select(State, withdrawal.pop)
# get water scores
water.score <- water.withdrawals.by.pop %>%
mutate(biggest = max(withdrawal.pop)) %>%
mutate(withdrawals = 1 - (withdrawal.pop / biggest)) %>%
select(State, withdrawals)
all.data <- left_join(waste.score, lfg.score, by="state.name")
all.data <- left_join(all.data, noncombust.score, by = c("state.name" = "StateFull"))
all.data <- left_join(all.data,renewables.score, by = c("state.name" = "StateFull"))
all.data <- left_join(all.data,emissions.score, by = c("state.name" = "StateFull"))
all.data <- left_join(all.data,water.score, by = c("state.name" = "State"))
locations <- state.abb
# population
all.data.pop <- all.data %>%
filter(state.name != "District of Columbia") %>%
mutate(totalScore = ((waste.pop * 0.16) +
(lfg.collected * 0.13) +
(emissions * 0.25) +
(renews * 0.14) +
(combustion * 0.12) +
(withdrawals * 0.2)))%>%
arrange(state.name) %>%
mutate(loc = locations) %>%
arrange(-totalScore)
all.data.pop$Rank <- seq.int(nrow(all.data.pop))
all.data.pop <- all.data.pop %>%
arrange(state.name)
overall.map <- choroplthFunc(all.data.pop, all.data.pop$totalScore, all.data.pop$loc, all.data.pop$totalScore,
"",c('red4', 'khaki1'), paste("Rank:", all.data.pop$Rank, "<br>", "Score: ",
round((all.data.pop$totalScore * 100), 2), "%"))
# ----------------------------------------------------------------------------------------------#
# summary stats
all.data.output <- all.data.pop %>%
mutate(waste.pop = paste0(round((waste.pop * 100), 2), "%")) %>%
mutate(lfg.collected = paste0(round((lfg.collected * 100), 2), "%")) %>%
mutate(combustion = paste0(round((combustion * 100), 2), "%")) %>%
mutate(renews = paste0(round((renews * 100), 2), "%")) %>%
mutate(emissions = paste0(round((emissions * 100), 2), "%")) %>%
mutate(withdrawals = paste0(round((withdrawals * 100), 2), "%")) %>%
arrange(-totalScore) %>%
mutate(totalScore = paste0(round((totalScore * 100), 2), "%"))
all.data.output$Rank <- seq.int(nrow(all.data.output))
all.data.output <- all.data.output %>%
select(-loc) %>%
arrange(state.name)
names(all.data.output)[1]<-paste("State")
names(all.data.output)[2]<-paste("Waste")
names(all.data.output)[3]<-paste("LFG")
names(all.data.output)[4]<-paste("Noncombustables")
names(all.data.output)[5]<-paste("Renewables")
names(all.data.output)[6]<-paste("Emissions")
names(all.data.output)[7]<-paste("Withdrawals")
names(all.data.output)[8]<-paste("Score")
# get sum stats and combine
emissions.sum.stats <- summarise(total.emissions, variable = "Emissions",
mean = mean(emissions.total.gen),
median = median(emissions.total.gen))
water.sum.stats <- summarise(water.withdrawals.by.pop, variable = "Water Withdrawals",
mean = mean(withdrawal.pop),
median = median(withdrawal.pop))
waste.sum.stats <- summarise(total.waste.population, variable = "Waste",
mean = mean(total.waste.pop),
median = median(total.waste.pop))
renewables.sum.stats <- summarise(renewables.total , variable = "Renewables",
mean = mean(renewables),
median = median(renewables))
lfg.sum.stats <- summarise(lfg.collected.population, variable = "LFG",
mean = mean(lfg.collected.pop),
median = median(lfg.collected.pop))
noncombust.sum.stats <- summarise(noncombust.total, variable = "Noncombustible",
mean = mean(noncombust),
median = median(noncombust))
sum.stats <- rbind(emissions.sum.stats, water.sum.stats, waste.sum.stats, renewables.sum.stats,
lfg.sum.stats, noncombust.sum.stats)
# distributions
m <- list(
l = 50,
r = 50,
b = 100,
t = 0,
pad = 4
)
distribution.plot <-
plot_ly(all.data.pop, x = ~state.name, y = ~emissions, name = 'Emissions', type = 'scatter', mode = 'markers',
text = ~state.name, marker = list(color = 'rgb(10, 36, 99)')) %>%
add_trace(y = ~withdrawals, name = 'Withdrawals', mode = 'markers', marker = list(color = 'rgb(63, 136, 197)')) %>%
add_trace(y = ~waste.pop, name = 'Waste', mode = 'markers', marker = list(color = 'rgb(232, 151, 44)')) %>%
add_trace(y = ~lfg.collected, name = 'LFG Collection', mode = 'markers', marker = list(color = 'rgb(239, 219, 127)')) %>%
add_trace(y = ~renews, name = 'Renewable Power', mode = 'markers', marker = list(color = 'rgb(215, 38, 56)')) %>%
add_trace(y = ~combustion, name = 'Noncombustables', mode = 'markers', marker = list(color = 'rgb(148, 16, 32)')) %>%
layout(autosize = F, width = 930, height = 500, margin = m, xaxis = list(title = "", tickfont = list(size = 10)),
yaxis = list(title = "score"), font = list(family = "times"), title = "")
emissions.dist <-
plot_ly(all.data.pop, x = ~state.name, y = ~emissions, name = 'Emissions', type = 'scatter', mode = 'lines',
text = ~state.name)
withdrawals.dist <-
plot_ly(all.data.pop, y = ~withdrawals, name = 'Withdrawals', mode = 'lines', type = 'scatter',
text = ~state.name)
waste.dist <-
plot_ly(all.data.pop, y = ~waste.pop, name = 'Waste', mode = 'lines', type = 'scatter',
text = ~state.name)
lfg.dist <-
plot_ly(all.data.pop, y = ~lfg.collected, name = 'LFG Collection', mode = 'lines', type = 'scatter',
text = ~state.name)
renewables.dist <-
plot_ly(all.data.pop, y = ~renews, name = 'Renewable Power', mode = 'lines', type = 'scatter',
text = ~state.name)
combustion.dist <-
plot_ly(all.data.pop, y = ~combustion, name = 'Noncombustables', mode = 'lines', type = 'scatter',
text = ~state.name)
# ----------------------------------------------------------------------------------------------#
# Discussion
getRanks <- function(StateName){
all.data <- arrange(all.data.pop, waste.pop)
waste.pop <- which(grepl(StateName, all.data$state.name))
all.data <- arrange(all.data.pop, lfg.collected)
lfg.collected <- which(grepl(StateName, all.data$state.name))
all.data <- arrange(all.data.pop, emissions)
emissions <- which(grepl(StateName, all.data$state.name))
all.data <- arrange(all.data.pop, renews)
renews <- which(grepl(StateName, all.data$state.name))
all.data <- arrange(all.data.pop, combustion)
combustion <- which(grepl(StateName, all.data$state.name))
all.data <- arrange(all.data.pop, withdrawals)
withdrawals <- which(grepl(StateName, all.data$state.name))
ranks <- list("Total Waste" = 51 - waste.pop, "LFG Collection" = 51 - lfg.collected,
"Emissions" = 51 - emissions, "Renewable Energy" = 51 - renews,
"Combustables" = 51 - combustion, "Water Withdrawals" = 51 - withdrawals)
return(ranks)
}
wy.ranks <- as.data.frame(getRanks("Wyoming"))
wa.ranks <- as.data.frame(getRanks("Washington"))
ca.ranks <- as.data.frame(getRanks("California"))
wy.landfill <- landfill_by_state %>%
mutate(num.without = num.landfills.x - total.with.lfg) %>%
mutate(percent.without = paste0(round(((num.without / num.landfills.x) * 100), 2), "%")) %>%
select(State, num.without, percent.without) %>%
arrange(num.without)
#new waste
total.waste.population2 <- landfill_by_state %>%
mutate(total.waste.pop = total.waste / X2017.Population) %>%
select(state.name, total.waste.pop)
# get waste scores
waste.score2 <- total.waste.population2 %>%
mutate(biggest = max(total.waste.pop)) %>%
mutate(waste.pop = 1 - (total.waste.pop / biggest))%>%
select(state.name, waste.pop)
all.data2 <- left_join(waste.score2, lfg.score, by="state.name")
all.data2 <- left_join(all.data2, noncombust.score, by = c("state.name" = "StateFull"))
all.data2 <- left_join(all.data2,renewables.score, by = c("state.name" = "StateFull"))
all.data2 <- left_join(all.data2,emissions.score, by = c("state.name" = "StateFull"))
all.data2 <- left_join(all.data2,water.score, by = c("state.name" = "State"))
locations <- state.abb
# population
all.data.pop2 <- all.data2 %>%
filter(state.name != "District of Columbia") %>%
mutate(totalScore = ((waste.pop * 0.16) +
(lfg.collected * 0.13) +
(emissions * 0.25) +
(renews * 0.14) +
(combustion * 0.12) +
(withdrawals * 0.2))) %>%
arrange(state.name) %>%
mutate(loc = locations)
overall.map2 <- choroplthFunc(all.data.pop2, all.data.pop2$totalScore, all.data.pop2$loc, all.data.pop2$totalScore,
"States Overall Impact Scores", c('red4', 'khaki1'), paste("Rank"))
getRanks2 <- function(StateName){
all.data2 <- arrange(all.data2, waste.pop)
waste.pop <- which(grepl(StateName, all.data2$state.name))
all.data2 <- arrange(all.data2, lfg.collected)
lfg.collected <- which(grepl(StateName, all.data2$state.name))
all.data2 <- arrange(all.data2, emissions)
emissions <- which(grepl(StateName, all.data2$state.name))
all.data2 <- arrange(all.data2, renews)
renews <- which(grepl(StateName, all.data2$state.name))
all.data2 <- arrange(all.data2, combustion)
combustion <- which(grepl(StateName, all.data2$state.name))
all.data2 <- arrange(all.data2, withdrawals)
withdrawals <- which(grepl(StateName, all.data2$state.name))
ranks <- list("Total Waste" = 51 - waste.pop, "LFG Collection" = 51 - lfg.collected,
"Emissions" = 51 - emissions, "Renewable Energy" = 51 - renews,
"Combustables" = 51 - combustion, "Water Withdrawals" = 51 - withdrawals)
return(ranks)
}
ca.ranks2 <- as.data.frame(getRanks2("California"))
| /script.R | no_license | Alimcg2/info470_aak | R | false | false | 17,595 | r |
# ---------------------------------------------------------------------------------------------- #
# Pulling in data and setup
library(dplyr)
library(ggplot2)
library(plotly)
library(leaflet)
landfill <- read.csv("landfill.csv")
power <- read.csv("power.csv")
water <- read.csv("water.csv")
pop2017 <- read.csv("pop2017.csv")
popdensity <- read.csv("popdensity.csv")
source("functions.R")
states <- as.data.frame(state.name, state.abb) %>%
mutate(stateabrev = state.abb)
# ---------------------------------------------------------------------------------------------- #
# Landfill Data
landfill <- landfill %>%
select(State, Landfill.Name, Longitude, Latitude, Ownership.Type, Year.Landfill.Opened,
Landfill.Closure.Year, Waste.in.Place..tons., LFG.Collection.System.In.Place.,
LFG.Collected..mmscfd., LFG.Energy.Project.Type, Project.Type.Category, MW.Capacity,
Current.Year.Emission.Reductions..MMTCO2e.yr....Direct)
landfill_by_state <- landfill %>%
select(State, Landfill.Name, Waste.in.Place..tons., LFG.Collection.System.In.Place.,
LFG.Collected..mmscfd., MW.Capacity, Current.Year.Emission.Reductions..MMTCO2e.yr....Direct) %>%
group_by(State) %>%
summarise(num.landfills = n(),
total.waste = sum(as.numeric(Waste.in.Place..tons.)),
total.with.lfg = sum(LFG.Collection.System.In.Place. == "Yes", na.rm = TRUE),
total.lfg.collected = sum(LFG.Collected..mmscfd., na.rm = TRUE),
total.capacity = sum(MW.Capacity, na.rm = TRUE),
total.reduction = sum(Current.Year.Emission.Reductions..MMTCO2e.yr....Direct, na.rm = TRUE))
#write.csv(landfill_by_state, file = "CABS.csv")
#casum <- landfill %>%
# filter(State == "CA") %>%
# filter(Waste.in.Place..tons. != "NA") %>%
# summarise(total = sum(as.numeric(Waste.in.Place..tons.)))
landfill_by_state <- read.csv("CABS.csv")
landfill_by_state <- landfill_by_state %>%
filter(State != "PR")
#landfill_by_state <- left_join(landfill_by_state, states, by = c("State" = "stateabrev"))
#landfill_by_state <- left_join(landfill_by_state, pop2017, by = c("state.name" = "State"))
#landfill_by_state <- landfill_by_state %>%
# filter(state.name != "N/A")
#landfill_by_state <-left_join(landfill_by_state, popdensity, by = c("state.name" = "State"))
# ----------------------------------------------------------------------------------------------#
# Water Data
water <- water %>%
select(-Groundwater.Fresh, -Groundwater.Saline, -Surfacewater.Fresh, -Surfacewater.Saline, -Irrigation,
-LiveStock, -Aquaculture, -Mining.Fresh, -Mining.Saline, -ThermoelectricPower.Fresh,
-ThermoelectricPower.Saline,
-PublicWithdrawals.Groundwater, -PublicWithdrawals.Surfacewater, -Irrigation.Groundwater,
-Irigation.Surfacewater, -Livestock.Groundwater, -Livestock.Surfacewater, -Aquaculture.Groundwater,
-Aquaculture.Surfacewater, -Mining.Groundwater, -Mining.Surfacewater, -Thermoelectric.Groundwater,
-Thermoelectric.Surfacewater) %>%
mutate(SelfSuppliedIndustrialTotal = (as.numeric(water$SelfSuppliedIndustrial.Saline) +
as.numeric(water$SelfSuppliedIndustrial.Fresh))) %>%
filter(State != "District Of Columbia") %>%
mutate(State.abbr = )
# ----------------------------------------------------------------------------------------------#
# Power Data
i <- 1
StateFull = c()
while (i < 52){
StateFull <- c(StateFull, state.name[grep(power$State.abbreviation[i], state.abb)])
i <- i + 1
}
power <- power %>%
select(-State.ozone.season.net.generation..MWh., -State.ozone.season.NOx.emissions..tons.) %>%
filter(State.abbreviation != "DC")
StateFull <- as.data.frame(StateFull)
power <- bind_cols(power, StateFull)
# ----------------------------------------------------------------------------------------------#
# New Stuff
landfill.withna <- landfill %>%
filter(is.na(Waste.in.Place..tons.)) %>%
group_by(State) %>%
summarise(Total.With.No.Waste = n())
landfill.withna <- left_join(landfill_by_state, landfill.withna, by = "State")
landfill.withna <- select(landfill.withna, State, Total.With.No.Waste, num.landfills) %>%
mutate(percent = ( 1 - (Total.With.No.Waste / num.landfills)) * 100) %>%
arrange(percent)
landfill_by_state <- left_join(landfill_by_state, landfill.withna, by = "State")
landfill_by_state <- landfill_by_state %>%
arrange(percent)
landfill_by_state$percent[46:50] <- 1
#new waste
total.waste.population <- landfill_by_state %>%
mutate(total.waste.pop = (total.waste * (100 - percent)) / X2017.Population) %>%
select(state.name, total.waste.pop)
# Landfill Scores
# get waste/pop
# get waste scores
waste.score <- total.waste.population %>%
mutate(biggest = max(total.waste.pop)) %>%
mutate(waste.pop = 1 - (total.waste.pop / biggest))%>%
select(state.name, waste.pop)
# get lfg/pop
lfg.collected.population <- landfill_by_state %>%
mutate(lfg.collected.pop = total.lfg.collected / X2017.Population) %>%
select(state.name, lfg.collected.pop)
# get lfg scores
lfg.score <- lfg.collected.population %>%
mutate(biggest = max(lfg.collected.pop)) %>%
mutate(lfg.collected = lfg.collected.pop / biggest) %>%
select(state.name, lfg.collected)
# Power Scores
# get noncombust ratio
noncombust.total <- power %>%
mutate(total.combust = as.numeric(gsub(",","",power$State.annual.total.combustion.net.generation..MWh.))
+ as.numeric(gsub(",","",power$State.annual.total.noncombustion.net.generation..MWh.))) %>%
mutate(noncombust = as.numeric(gsub(",","",power$State.annual.total.noncombustion.net.generation..MWh.)) / total.combust) %>%
select(StateFull, noncombust)
# get noncombust score
noncombust.score <- noncombust.total %>%
mutate(biggest = max(noncombust)) %>%
mutate(combustion = noncombust / biggest) %>%
select(StateFull, combustion)
# get renewables ratio
renewables.total <- power %>%
mutate(total.renewables = as.numeric(gsub(",","",power$State.annual.total.renewables.net.generation..MWh.))
+ as.numeric(gsub(",","",power$State.annual.total.nonrenewables.net.generation..MWh.))) %>%
mutate(renewables = as.numeric(gsub(",","",power$State.annual.total.renewables.net.generation..MWh.)) / total.renewables) %>%
select(StateFull, renewables)
# get renewables score
renewables.score <- renewables.total %>%
mutate(biggest = max(renewables)) %>%
mutate(renews = renewables / biggest) %>%
select(StateFull, renews)
# get total emissions/net generation
total.emissions <- power %>%
select(StateFull, State.annual.NOx.emissions..tons.,
State.annual.CH4.emissions..lbs.,
State.annual.CO2.emissions..tons.,
State.annual.N2O.emissions..lbs.,
State.annual.SO2.emissions..tons.,
State.annual.net.generation..MWh.) %>%
mutate(total = (State.annual.N2O.emissions..lbs. * 0.0005) +
State.annual.CO2.emissions..tons. +
State.annual.NOx.emissions..tons. +
State.annual.SO2.emissions..tons. +
(State.annual.CH4.emissions..lbs.* 0.0005)) %>%
mutate(emissions.total.gen = total / State.annual.net.generation..MWh.) %>%
select(StateFull, emissions.total.gen)
# get emissions score
emissions.score <- total.emissions %>%
mutate(biggest = max(emissions.total.gen)) %>%
mutate(emissions = 1 - (emissions.total.gen / biggest)) %>%
select(StateFull, emissions)
# Water Scores
# get water withdrawal/pop
water.withdrawals.by.pop <- slice(water, 1:50) %>%
transform( withdrawal.pop = as.numeric(Total) / as.numeric(Population.Total)) %>%
select(State, withdrawal.pop)
# get water scores
water.score <- water.withdrawals.by.pop %>%
mutate(biggest = max(withdrawal.pop)) %>%
mutate(withdrawals = 1 - (withdrawal.pop / biggest)) %>%
select(State, withdrawals)
all.data <- left_join(waste.score, lfg.score, by="state.name")
all.data <- left_join(all.data, noncombust.score, by = c("state.name" = "StateFull"))
all.data <- left_join(all.data,renewables.score, by = c("state.name" = "StateFull"))
all.data <- left_join(all.data,emissions.score, by = c("state.name" = "StateFull"))
all.data <- left_join(all.data,water.score, by = c("state.name" = "State"))
locations <- state.abb
# population
all.data.pop <- all.data %>%
filter(state.name != "District of Columbia") %>%
mutate(totalScore = ((waste.pop * 0.16) +
(lfg.collected * 0.13) +
(emissions * 0.25) +
(renews * 0.14) +
(combustion * 0.12) +
(withdrawals * 0.2)))%>%
arrange(state.name) %>%
mutate(loc = locations) %>%
arrange(-totalScore)
all.data.pop$Rank <- seq.int(nrow(all.data.pop))
all.data.pop <- all.data.pop %>%
arrange(state.name)
overall.map <- choroplthFunc(all.data.pop, all.data.pop$totalScore, all.data.pop$loc, all.data.pop$totalScore,
"",c('red4', 'khaki1'), paste("Rank:", all.data.pop$Rank, "<br>", "Score: ",
round((all.data.pop$totalScore * 100), 2), "%"))
# ----------------------------------------------------------------------------------------------#
# summary stats
all.data.output <- all.data.pop %>%
mutate(waste.pop = paste0(round((waste.pop * 100), 2), "%")) %>%
mutate(lfg.collected = paste0(round((lfg.collected * 100), 2), "%")) %>%
mutate(combustion = paste0(round((combustion * 100), 2), "%")) %>%
mutate(renews = paste0(round((renews * 100), 2), "%")) %>%
mutate(emissions = paste0(round((emissions * 100), 2), "%")) %>%
mutate(withdrawals = paste0(round((withdrawals * 100), 2), "%")) %>%
arrange(-totalScore) %>%
mutate(totalScore = paste0(round((totalScore * 100), 2), "%"))
all.data.output$Rank <- seq.int(nrow(all.data.output))
all.data.output <- all.data.output %>%
select(-loc) %>%
arrange(state.name)
names(all.data.output)[1]<-paste("State")
names(all.data.output)[2]<-paste("Waste")
names(all.data.output)[3]<-paste("LFG")
names(all.data.output)[4]<-paste("Noncombustables")
names(all.data.output)[5]<-paste("Renewables")
names(all.data.output)[6]<-paste("Emissions")
names(all.data.output)[7]<-paste("Withdrawals")
names(all.data.output)[8]<-paste("Score")
# get sum stats and combine
emissions.sum.stats <- summarise(total.emissions, variable = "Emissions",
mean = mean(emissions.total.gen),
median = median(emissions.total.gen))
water.sum.stats <- summarise(water.withdrawals.by.pop, variable = "Water Withdrawals",
mean = mean(withdrawal.pop),
median = median(withdrawal.pop))
waste.sum.stats <- summarise(total.waste.population, variable = "Waste",
mean = mean(total.waste.pop),
median = median(total.waste.pop))
renewables.sum.stats <- summarise(renewables.total , variable = "Renewables",
mean = mean(renewables),
median = median(renewables))
lfg.sum.stats <- summarise(lfg.collected.population, variable = "LFG",
mean = mean(lfg.collected.pop),
median = median(lfg.collected.pop))
noncombust.sum.stats <- summarise(noncombust.total, variable = "Noncombustible",
mean = mean(noncombust),
median = median(noncombust))
sum.stats <- rbind(emissions.sum.stats, water.sum.stats, waste.sum.stats, renewables.sum.stats,
lfg.sum.stats, noncombust.sum.stats)
# distributions
m <- list(
l = 50,
r = 50,
b = 100,
t = 0,
pad = 4
)
distribution.plot <-
plot_ly(all.data.pop, x = ~state.name, y = ~emissions, name = 'Emissions', type = 'scatter', mode = 'markers',
text = ~state.name, marker = list(color = 'rgb(10, 36, 99)')) %>%
add_trace(y = ~withdrawals, name = 'Withdrawals', mode = 'markers', marker = list(color = 'rgb(63, 136, 197)')) %>%
add_trace(y = ~waste.pop, name = 'Waste', mode = 'markers', marker = list(color = 'rgb(232, 151, 44)')) %>%
add_trace(y = ~lfg.collected, name = 'LFG Collection', mode = 'markers', marker = list(color = 'rgb(239, 219, 127)')) %>%
add_trace(y = ~renews, name = 'Renewable Power', mode = 'markers', marker = list(color = 'rgb(215, 38, 56)')) %>%
add_trace(y = ~combustion, name = 'Noncombustables', mode = 'markers', marker = list(color = 'rgb(148, 16, 32)')) %>%
layout(autosize = F, width = 930, height = 500, margin = m, xaxis = list(title = "", tickfont = list(size = 10)),
yaxis = list(title = "score"), font = list(family = "times"), title = "")
emissions.dist <-
plot_ly(all.data.pop, x = ~state.name, y = ~emissions, name = 'Emissions', type = 'scatter', mode = 'lines',
text = ~state.name)
withdrawals.dist <-
plot_ly(all.data.pop, y = ~withdrawals, name = 'Withdrawals', mode = 'lines', type = 'scatter',
text = ~state.name)
waste.dist <-
plot_ly(all.data.pop, y = ~waste.pop, name = 'Waste', mode = 'lines', type = 'scatter',
text = ~state.name)
lfg.dist <-
plot_ly(all.data.pop, y = ~lfg.collected, name = 'LFG Collection', mode = 'lines', type = 'scatter',
text = ~state.name)
renewables.dist <-
plot_ly(all.data.pop, y = ~renews, name = 'Renewable Power', mode = 'lines', type = 'scatter',
text = ~state.name)
combustion.dist <-
plot_ly(all.data.pop, y = ~combustion, name = 'Noncombustables', mode = 'lines', type = 'scatter',
text = ~state.name)
# ----------------------------------------------------------------------------------------------#
# Discussion
getRanks <- function(StateName){
all.data <- arrange(all.data.pop, waste.pop)
waste.pop <- which(grepl(StateName, all.data$state.name))
all.data <- arrange(all.data.pop, lfg.collected)
lfg.collected <- which(grepl(StateName, all.data$state.name))
all.data <- arrange(all.data.pop, emissions)
emissions <- which(grepl(StateName, all.data$state.name))
all.data <- arrange(all.data.pop, renews)
renews <- which(grepl(StateName, all.data$state.name))
all.data <- arrange(all.data.pop, combustion)
combustion <- which(grepl(StateName, all.data$state.name))
all.data <- arrange(all.data.pop, withdrawals)
withdrawals <- which(grepl(StateName, all.data$state.name))
ranks <- list("Total Waste" = 51 - waste.pop, "LFG Collection" = 51 - lfg.collected,
"Emissions" = 51 - emissions, "Renewable Energy" = 51 - renews,
"Combustables" = 51 - combustion, "Water Withdrawals" = 51 - withdrawals)
return(ranks)
}
wy.ranks <- as.data.frame(getRanks("Wyoming"))
wa.ranks <- as.data.frame(getRanks("Washington"))
ca.ranks <- as.data.frame(getRanks("California"))
wy.landfill <- landfill_by_state %>%
mutate(num.without = num.landfills.x - total.with.lfg) %>%
mutate(percent.without = paste0(round(((num.without / num.landfills.x) * 100), 2), "%")) %>%
select(State, num.without, percent.without) %>%
arrange(num.without)
#new waste
total.waste.population2 <- landfill_by_state %>%
mutate(total.waste.pop = total.waste / X2017.Population) %>%
select(state.name, total.waste.pop)
# get waste scores
waste.score2 <- total.waste.population2 %>%
mutate(biggest = max(total.waste.pop)) %>%
mutate(waste.pop = 1 - (total.waste.pop / biggest))%>%
select(state.name, waste.pop)
all.data2 <- left_join(waste.score2, lfg.score, by="state.name")
all.data2 <- left_join(all.data2, noncombust.score, by = c("state.name" = "StateFull"))
all.data2 <- left_join(all.data2,renewables.score, by = c("state.name" = "StateFull"))
all.data2 <- left_join(all.data2,emissions.score, by = c("state.name" = "StateFull"))
all.data2 <- left_join(all.data2,water.score, by = c("state.name" = "State"))
locations <- state.abb
# population
all.data.pop2 <- all.data2 %>%
filter(state.name != "District of Columbia") %>%
mutate(totalScore = ((waste.pop * 0.16) +
(lfg.collected * 0.13) +
(emissions * 0.25) +
(renews * 0.14) +
(combustion * 0.12) +
(withdrawals * 0.2))) %>%
arrange(state.name) %>%
mutate(loc = locations)
overall.map2 <- choroplthFunc(all.data.pop2, all.data.pop2$totalScore, all.data.pop2$loc, all.data.pop2$totalScore,
"States Overall Impact Scores", c('red4', 'khaki1'), paste("Rank"))
getRanks2 <- function(StateName){
all.data2 <- arrange(all.data2, waste.pop)
waste.pop <- which(grepl(StateName, all.data2$state.name))
all.data2 <- arrange(all.data2, lfg.collected)
lfg.collected <- which(grepl(StateName, all.data2$state.name))
all.data2 <- arrange(all.data2, emissions)
emissions <- which(grepl(StateName, all.data2$state.name))
all.data2 <- arrange(all.data2, renews)
renews <- which(grepl(StateName, all.data2$state.name))
all.data2 <- arrange(all.data2, combustion)
combustion <- which(grepl(StateName, all.data2$state.name))
all.data2 <- arrange(all.data2, withdrawals)
withdrawals <- which(grepl(StateName, all.data2$state.name))
ranks <- list("Total Waste" = 51 - waste.pop, "LFG Collection" = 51 - lfg.collected,
"Emissions" = 51 - emissions, "Renewable Energy" = 51 - renews,
"Combustables" = 51 - combustion, "Water Withdrawals" = 51 - withdrawals)
return(ranks)
}
ca.ranks2 <- as.data.frame(getRanks2("California"))
|
# Backend of the activity log tab in the Shiny app
# Copyright (C) 2017 Dana-Farber Cancer Institute Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Questions, comments and concerns can be directed to
# Alexander Gimelbrant: alexander_gimelbrant@dfci.harvard.edu
# Sebastien Vigneau: Sebastien_Vigneau@dfci.harvard.edu
# Svetlana Vinogradova: Svetlana_Vinogradova@dfci.harvard.edu
# Henry Ward: henry.neil.ward@gmail.com
# Sachit Saksena: sachitdsaksena@utexas.edu
######
# MAIN FUNCTIONS
######
# Writes a string, formatted as a new entry, to the given filepath
write_log <- function(log_file, entry) {
current_date <- format(Sys.Date(), format="%B %d, %Y")
date_header <- paste("###", current_date)
create_header <- paste("Activity log created on", current_date)
update_header <- paste("Activity log last updated on", current_date,
"at", format(Sys.time(), "%X"))
newly_created <- FALSE
# Opens connection to log and creates new log if necessary
lines <- NA
conn <- file(log_file)
if (!file.exists(log_file)) {
newly_created <- TRUE
writeLines(c("", create_header, update_header), conn)
lines <- readLines(conn)
} else {
lines <- readLines(conn)
}
close(conn)
# Checks to see if there is an entry for the current day
# and records that line number if it exists. Otherwise,
# the current date index is set to the first line
current_date_line <- match(date_header, lines)
if (is.na(current_date_line)) { current_date_line <- 1 }
# Duplicates each line before the current header in the
# output array we build up
output <- c()
if (newly_created) {
output <- c(output, date_header)
}
else {
if (current_date_line != 1) {
output <- lines[1:current_date_line]
} else {
# Checks for the case where the first line is the current date
output <- c(output, date_header)
}
}
# Adds our new entry to the first open position under
# the current date along with an empty line
output <- c(output, entry)
output <- c(output, "")
# Joins the new entry and current date with the previous
# entries in the log. Retains previous date header if
# the previous date is on the first line
if (!newly_created && (!is.na(match(date_header, lines)))) {
current_date_line <- current_date_line + 1
}
output <- c(output, lines[current_date_line:length(lines)])
# Records the update time
output[length(output)] = update_header
# Overwrites file with new output
conn <- file(log_file)
writeLines(output, conn)
close(conn)
}
write_log("Documents/Histograms/activity_log_test.txt", "this is an update")
| /shiny/MagicWeb/src/log.R | permissive | liufan-creat/magic | R | false | false | 3,274 | r | # Backend of the activity log tab in the Shiny app
# Copyright (C) 2017 Dana-Farber Cancer Institute Inc.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Questions, comments and concerns can be directed to
# Alexander Gimelbrant: alexander_gimelbrant@dfci.harvard.edu
# Sebastien Vigneau: Sebastien_Vigneau@dfci.harvard.edu
# Svetlana Vinogradova: Svetlana_Vinogradova@dfci.harvard.edu
# Henry Ward: henry.neil.ward@gmail.com
# Sachit Saksena: sachitdsaksena@utexas.edu
######
# MAIN FUNCTIONS
######
# Writes a string, formatted as a new entry, to the given filepath
write_log <- function(log_file, entry) {
current_date <- format(Sys.Date(), format="%B %d, %Y")
date_header <- paste("###", current_date)
create_header <- paste("Activity log created on", current_date)
update_header <- paste("Activity log last updated on", current_date,
"at", format(Sys.time(), "%X"))
newly_created <- FALSE
# Opens connection to log and creates new log if necessary
lines <- NA
conn <- file(log_file)
if (!file.exists(log_file)) {
newly_created <- TRUE
writeLines(c("", create_header, update_header), conn)
lines <- readLines(conn)
} else {
lines <- readLines(conn)
}
close(conn)
# Checks to see if there is an entry for the current day
# and records that line number if it exists. Otherwise,
# the current date index is set to the first line
current_date_line <- match(date_header, lines)
if (is.na(current_date_line)) { current_date_line <- 1 }
# Duplicates each line before the current header in the
# output array we build up
output <- c()
if (newly_created) {
output <- c(output, date_header)
}
else {
if (current_date_line != 1) {
output <- lines[1:current_date_line]
} else {
# Checks for the case where the first line is the current date
output <- c(output, date_header)
}
}
# Adds our new entry to the first open position under
# the current date along with an empty line
output <- c(output, entry)
output <- c(output, "")
# Joins the new entry and current date with the previous
# entries in the log. Retains previous date header if
# the previous date is on the first line
if (!newly_created && (!is.na(match(date_header, lines)))) {
current_date_line <- current_date_line + 1
}
output <- c(output, lines[current_date_line:length(lines)])
# Records the update time
output[length(output)] = update_header
# Overwrites file with new output
conn <- file(log_file)
writeLines(output, conn)
close(conn)
}
write_log("Documents/Histograms/activity_log_test.txt", "this is an update")
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
devtools::install_github ("lingani/swiftcap")
library(swiftcap)
data(model)
shinyServer(function(input, output, clientData, session) {
pred <- reactive({
s <- paste(input$textfield)
s <- paste(s, sep = ' ', collapse = ' ')
predict (model, s)
})
output$predictions <- renderText({
pre <- pred()
s <- pre$word
ss <- paste0 ("(", pre$rank, ")", pre$word)
ss <- paste(ss, sep = ', ', collapse = ' ')
})
output$phonePlot <- renderPlot({
pre <- pred()
pre$word2 <- reorder(pre$word, pre$p)
ggplot(pre, aes(x=word2, y=p)) +
geom_bar(stat = "identity", position="stack") +
coord_flip() +
labs(title="The most likely next words and the probability of each as calculated by the model")
})
})
| /server.R | no_license | lingani/DS_CapstoneProject | R | false | false | 952 | r |
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
devtools::install_github ("lingani/swiftcap")
library(swiftcap)
data(model)
shinyServer(function(input, output, clientData, session) {
pred <- reactive({
s <- paste(input$textfield)
s <- paste(s, sep = ' ', collapse = ' ')
predict (model, s)
})
output$predictions <- renderText({
pre <- pred()
s <- pre$word
ss <- paste0 ("(", pre$rank, ")", pre$word)
ss <- paste(ss, sep = ', ', collapse = ' ')
})
output$phonePlot <- renderPlot({
pre <- pred()
pre$word2 <- reorder(pre$word, pre$p)
ggplot(pre, aes(x=word2, y=p)) +
geom_bar(stat = "identity", position="stack") +
coord_flip() +
labs(title="The most likely next words and the probability of each as calculated by the model")
})
})
|
# loading the required package
require(plyr)
require(ggplot2)
#importing the data
dat <- read.csv("~/Documents/prof.vaso/RDD_2.csv")
#adding on column that specify whether a city response to the arrest stimulus
dat<-ddply(dat, .(MetroName_Google), transform, sel=(!GoogleOWS_percent_rec[13]==0))
attach(dat)
#preliminary check with the data
plot(GoogleOWS_percent_rec~TIME,pch=19,cex=0.2)
plot(LnArrestsNumber~TIME,pch=19,cex=0.2)
plot(GoogleOWS_percent_rec~LnArrestsNumber,pch=19,cex=0.2)
# point plot by the state and city
pdf("~/Documents/prof.vaso/state.plot.pdf",height=50, width=20)
ggplot(dat) + geom_point(aes(x=TIME,y=GoogleOWS_percent_rec, col=MetroName_Google,size=0.1))+
facet_wrap(~MetroSt_Google, ncol=4)
dev.off()
######################
#forming a new data.frame to make the model, named it as "new_data"
new_data <- as.data.frame(cbind(TIME,GoogleOWS_percent_rec,sel))
#adding one column to the "new_data", which specify the time_period of each row, whether in 4~10, or in 11~16
new_data$time_period <- apply(new_data,1,function(x){
if(x[1] <= 10) tp = 1
if(x[1] > 10) tp = 2
return(tp)
})
#adding one column to the "new_data", subtracting the period two time by 10
new_data$time_transformed <- apply(new_data,1,function(x){
if(x[1] <= 10) tp = x[1]
if(x[1] > 10) tp = x[1]-7
return(tp)
})
detach(dat)
attach(new_data)
#model 1
mod1 <- lm(GoogleOWS_percent_rec ~ time_transformed + as.factor(time_period)
+ time_transformed * as.factor(time_period), data = new_data[sel==1,])
#model 2
mod3 <- lm(GoogleOWS_percent_rec ~ time_transformed + as.factor(time_period)
+ time_transformed * as.factor(time_period), data = new_data[sel==0,])
#model 3
mod2 <-lm(GoogleOWS_percent_rec ~ time_transformed, dat = new_data[sel==1 & time_period==2,])
ggplot(new_data)+geom_point(aes(x=time_transformed, y=GoogleOWS_percent_rec, col=as.factor(time_period)))
ggplot(new_data[new_data[,3]==2,])+geom_point(aes(x=time_transformed, y=GoogleOWS_percent_rec))
| /model_lm.R | no_license | zmyao88/Vasi | R | false | false | 2,114 | r | # loading the required package
require(plyr)
require(ggplot2)
#importing the data
dat <- read.csv("~/Documents/prof.vaso/RDD_2.csv")
#adding on column that specify whether a city response to the arrest stimulus
dat<-ddply(dat, .(MetroName_Google), transform, sel=(!GoogleOWS_percent_rec[13]==0))
attach(dat)
#preliminary check with the data
plot(GoogleOWS_percent_rec~TIME,pch=19,cex=0.2)
plot(LnArrestsNumber~TIME,pch=19,cex=0.2)
plot(GoogleOWS_percent_rec~LnArrestsNumber,pch=19,cex=0.2)
# point plot by the state and city
pdf("~/Documents/prof.vaso/state.plot.pdf",height=50, width=20)
ggplot(dat) + geom_point(aes(x=TIME,y=GoogleOWS_percent_rec, col=MetroName_Google,size=0.1))+
facet_wrap(~MetroSt_Google, ncol=4)
dev.off()
######################
#forming a new data.frame to make the model, named it as "new_data"
new_data <- as.data.frame(cbind(TIME,GoogleOWS_percent_rec,sel))
#adding one column to the "new_data", which specify the time_period of each row, whether in 4~10, or in 11~16
new_data$time_period <- apply(new_data,1,function(x){
if(x[1] <= 10) tp = 1
if(x[1] > 10) tp = 2
return(tp)
})
#adding one column to the "new_data", subtracting the period two time by 10
new_data$time_transformed <- apply(new_data,1,function(x){
if(x[1] <= 10) tp = x[1]
if(x[1] > 10) tp = x[1]-7
return(tp)
})
detach(dat)
attach(new_data)
#model 1
mod1 <- lm(GoogleOWS_percent_rec ~ time_transformed + as.factor(time_period)
+ time_transformed * as.factor(time_period), data = new_data[sel==1,])
#model 2
mod3 <- lm(GoogleOWS_percent_rec ~ time_transformed + as.factor(time_period)
+ time_transformed * as.factor(time_period), data = new_data[sel==0,])
#model 3
mod2 <-lm(GoogleOWS_percent_rec ~ time_transformed, dat = new_data[sel==1 & time_period==2,])
ggplot(new_data)+geom_point(aes(x=time_transformed, y=GoogleOWS_percent_rec, col=as.factor(time_period)))
ggplot(new_data[new_data[,3]==2,])+geom_point(aes(x=time_transformed, y=GoogleOWS_percent_rec))
|
#' Read Landsat MTL metadata files
#'
#' Reads metadata and deals with legacy versions of Landsat metadata files and where possible adds missing information (radiometric gain and offset, earth-sun distance).
#'
#' @param file path to Landsat MTL file (...MTL.txt)
#' @param raw Logical. If \code{TRUE} the full raw metadata will be returned as a list. if \code{FALSE} (the default) all important metadata are homogenized into a standard format (ImageMetaData) and some information is added.
#' @return Object of class ImageMetaData
#' @export
#' @examples
#' ## Example metadata file (MTL)
#' mtlFile <- system.file("external/landsat/LT52240631988227CUB02_MTL.txt", package="RStoolbox")
#'
#' ## Read metadata
#' metaData <- readMeta(mtlFile)
#'
#' ## Summary
#' summary(metaData)
#'
readMeta <- function(file, raw = FALSE){
## TODO: make modular for additional sensors
if(!file.exists(file)) stop("Metadata file does not exist. Looking for: ", file, call. = FALSE)
if(!grepl("MTL", file) & !grepl("xml", file)) warning("The Landsat metadata file you have specified looks unusual. Typically the filename contains the string 'MTL' or 'xml'. Are you sure you specified the right file? \n I'll try to read it but check the results!")
format <- if(grepl('xml', file)) "XML" else "MTL"
if(format == "MTL") {
## PROCESS LPS MTL FILES
meta <- read.delim(file, sep = "=", header = FALSE, stringsAsFactors = FALSE, strip.white = TRUE, skip = 1, skipNul = TRUE)
meta <- meta[-(nrow(meta)-c(1,0)),]
## Retrieve groups
l <- meta[grep("GROUP",meta[,1]),]
## Assemble metadata list
meta <- lapply(unique(l[,2]), FUN = function(x){
w <- which(meta[,2] == x)
m <- meta[(w[1]+1):(w[2]-1),]
rownames(m) <- m[,1]
m <- m[ , 2, drop = FALSE]
colnames(m) <- "VALUE"
return(m)
})
names(meta) <- unique(l[,2])
num <- grep("IMAGE_AT|MIN_MAX|RESCAL|THERMAL", names(meta))
meta[num] <- lapply(meta[num], function(x) {x[,1] <- as.numeric(x[,1]);x})
if(raw) return(meta)
## Legacy MTL?
legacy <- "PROCESSING_SOFTWARE" %in% rownames(meta$PRODUCT_METADATA)
if(legacy) message("This scene was processed before August 29, 2012. Using MTL legacy format. Some minor infos such as SCENE_ID will be missing")
sat <- paste0("LANDSAT", .getNumeric(meta$PRODUCT_METADATA["SPACECRAFT_ID",]))
sen <- meta$PRODUCT_METADATA["SENSOR_ID",]
scene <- meta$METADATA_FILE_INFO["LANDSAT_SCENE_ID",] ## could assemble name for legacy files: http://landsat.usgs.gov/naming_conventions_scene_identifiers.php
date <- as.POSIXct(if(!legacy) meta$PRODUCT_METADATA["DATE_ACQUIRED",] else meta$PRODUCT_METADATA["ACQUISITION_DATE",])
date <- strptime(paste0(date, meta$PRODUCT_METADATA["SCENE_CENTER_TIME",]), "%Y-%m-%d %H:%M:%S")
pdate <- as.POSIXct(if(!legacy) meta$METADATA_FILE_INFO["FILE_DATE",] else meta$METADATA_FILE_INFO["PRODUCT_CREATION_TIME",])
path <- as.numeric(meta$PRODUCT_METADATA["WRS_PATH",])
row <- if(!legacy) as.numeric(meta$PRODUCT_METADATA["WRS_ROW",]) else as.numeric(meta$PRODUCT_METADATA["STARTING_ROW",])
pars <- meta$PROJECTION_PARAMETERS[c("MAP_PROJECTION","UTM_ZONE","DATUM"),]
pars[1] <- tolower(pars[1])
proj <- CRS(paste0(c("+proj=", "+zone=", "+units=m +datum="), pars, collapse=" "))
files <- row.names(meta[["PRODUCT_METADATA"]])[grep("^.*BAND", row.names(meta$PRODUCT_METADATA))]
files <- meta[["PRODUCT_METADATA"]][files,]
bands <- gsub(paste0(scene,"_|.TIF"), "", files)
bands <- paste0(bands, "_dn")
bands <- gsub("BQA", "QA", bands )
if(trailingZeros <- length(grep("0.TIF", bands)) > 1) stop("Trailing zeros")
quant <- rep("dn", length(bands))
cat <- rep("image", length(bands))
cat[grep("QA", bands)] <- "qa"
cat[grep("B8", bands)] <- "pan"
spatRes <- rep(meta$PROJECTION_PARAMETERS["GRID_CELL_SIZE_REFLECTIVE", ], length(bands))
spatRes[grep("B8", bands)] <- meta$PROJECTION_PARAMETERS["GRID_CELL_SIZE_PANCHROMATIC", ]
spatRes[grep("B6|B10|B11", bands)] <- meta$PROJECTION_PARAMETERS["GRID_CELL_SIZE_THERMAL", ]
spatRes <- as.numeric(spatRes)
na <- NA
az <- if(!legacy) as.numeric(meta$IMAGE_ATTRIBUTES["SUN_AZIMUTH",]) else as.numeric(meta$PRODUCT_PARAMETERS["SUN_AZIMUTH",])
selv <- if(!legacy) as.numeric(meta$IMAGE_ATTRIBUTES["SUN_ELEVATION",]) else as.numeric(meta$PRODUCT_PARAMETERS["SUN_ELEVATION",])
esd <- meta$IMAGE_ATTRIBUTES["EARTH_SUN_DISTANCE",]
if(is.null(esd) || is.na(esd)) esd <- .ESdist(date)
esd <- as.numeric(esd)
vsat <- NA ## or set to max?
scal <- 1
dtyp <- NA
## RADIOMETRIC CORRECTION/RESCALING PARAMETERS
if(!legacy) {
r <- meta$RADIOMETRIC_RESCALING
rnr <- rownames(r)
calrad <- data.frame(offset = r[grep("RADIANCE_ADD*", rownames(r)),], gain = r[grep("RADIANCE_MULT*", rownames(r)),])
calref <- data.frame(offset = r[grep("REFLECTANCE_ADD*", rownames(r)),], gain = r[grep("REFLECTANCE_MULT*", rownames(r)),])
rownames(calrad) <- paste0(gsub("^.*BAND_","B", rnr[grep("RADIANCE_MULT", rnr)]), "_dn")
if(nrow(calref) != 0) rownames(calref) <- paste0(gsub("^.*BAND_","B", rnr[grep("REFLECTANCE_MULT", rnr)]), "_dn") else calref <- NA
} else {
r <- meta$MIN_MAX_RADIANCE
rp <- meta$MIN_MAX_PIXEL_VALUE
rnr <- rownames(r)
e2nd <- seq(1, nrow(r), 2)
L <- diff(r[,1])[e2nd]
Q <- diff(rp[,1])[e2nd]
radg <- L/Q
rado <- r[seq(2,nrow(r),2),1] - radg
calrad <- data.frame(offset = rado, gain = radg)
calref <- NA
rownames(calrad) <- paste0(gsub("^.*BAND_","B", rnr[grep("MAX", rnr)]), "_dn")
}
if(sat == "LANDSAT8"){
r <- meta$TIRS_THERMAL_CONSTANTS
calbt <- data.frame(K1 = r[1:2,] , K2 = r[3:4,])
rownames(calbt) <- c("B10_dn", "B11_dn")
} else {
TAB7 <- list(LANDSAT4 = data.frame(K1=671.62,K2=1284.3), # TAB7 from Chander 2009
LANDSAT5 = data.frame(K1=607.76,K2=1260.56),
LANDSAT7 = data.frame(K1=666.09,K2=1282.71))
calbt <- TAB7[[sat]]
tbds <- bands[grep("B6", bands)]
if(length(tbds)>1) calbt <- rbind(calbt, calbt)
rownames(calbt)<-tbds
}
} else {
## PROCESS ESPA LEDAPS XML FILES
meta <- xmlToList(xmlParse(file))
names(meta$bands) <- str_replace_all(unlist(sapply(meta$bands, "[", "long_name")), " ", "_")
if(raw) return(meta)
luv <- c(dn = "dn", toa_rad = "tra", toa_refl = "tre", toa_bt = "bt", sr_refl = "sre", spectral_indices = "idx", cfmask = "tre")
atts <- sapply(meta$bands, "[", ".attrs")
sat <- paste0("LANDSAT", .getNumeric(meta$global_metadata$satellite))
sen <- meta$global_metadata$instrument
scene <- str_replace(meta$global_metadata$lpgs_metadata_file, "_MTL.txt", "") ## could assemble name for legacy files: http://landsat.usgs.gov/naming_conventions_scene_identifiers.php
date <- as.POSIXct(paste(meta$global_metadata$acquisition_date,meta$global_metadata$scene_center_time), "%Y%m%d %H:%M:%S" )
pdate <- as.POSIXct(meta$bands[[1]]$production_date)
path <- as.numeric(meta$global_metadata$wrs["path"])
row <- as.numeric(meta$global_metadata$wrs["row"])
az <- as.numeric(meta$global_metadata$solar_angles["azimuth"])
selv <- 90 - as.numeric(meta$global_metadata$solar_angles["zenith"])
proj <- CRS(paste0("+proj=utm +zone=",meta$global_metadata$projection_information$utm_proj_params," +datum=WGS84 +units=m"))
esd <- .ESdist(date)
files <- sapply(meta$bands, "[[", "file_name")
quant <- luv[sapply(atts, "[", "product")]
cat <- sapply(atts, "[", "category")
cat[grep("opacity", names(cat))] <- "qa"
bands <- gsub(paste0(scene, "_|.tif"), "", files)
bs <- grepl("_band", files)
bands[bs] <- paste0("B", .getNumeric(bands[bs]), "_", quant[bs])
bands[cat == "qa"] <- paste0("QA_", gsub("sr_|_qa", "", bands[cat == "qa"]))
bands[cat == "index"] <- gsub("SR_", "", toupper(bands[cat == "index"]))
spatRes <- vapply(meta$bands,function(x) x$pixel_size["x"], character(1))
na <- as.numeric(sapply(atts, "[" , "fill_value"))
vsat <- as.numeric(sapply(atts, "[" , "saturate_value"))
scal <- as.numeric(sapply(atts, "[" , "scale_factor"))
dataTypes <- c(INT16 = "INT4S", UINT8 = "INT1U")
dtyp <- dataTypes[as.character(sapply(atts, "[" , "data_type"))]
## Missing
calrad <- calref <- calbt <- NA
}
## Add-on data:
radRes <- if(sat == "LANDSAT8") 16 else 8
obj <- ImageMetaData(file = file, format = format, sat = sat, sen = sen, scene = scene, date = date, pdate = pdate, path = path, radRes=radRes, spatRes = spatRes, row = row, az = az,
selv = selv, esd = esd, files = files, bands = bands, quant = quant, cat = cat, na = na, vsat = vsat, scal = scal, dtyp = dtyp,
calrad=calrad, calref=calref, calbt=calbt, proj = proj)
}
#' ImageMetaData Class
#'
#' @param file Character. Metadata file
#' @param format Character. Metadata format, e.g. xml, mtl
#' @param sat Character. Satellite platform
#' @param sen Character. Sensor
#' @param scene Character. Scene_ID
#' @param proj CRS. Projection.
#' @param date POSIXct. Aquosition date.
#' @param pdate POSIXct. Processing date.
#' @param path Integer. Path.
#' @param row Integer. Row.
#' @param az Numeric. Sun azimuth
#' @param selv Numeric. Sun elevation
#' @param esd Numeric. Earth-sun distance
#' @param files Character vector. Files containing the data, e.g. tiff files
#' @param bands Character vector. Band names
#' @param quant Character vector. Quantity, one of c("dn", "tra", "tre", "sre", "bt", "idx")
#' @param cat Character vector. Category, e.g. c("image", "pan", "index", "qa")
#' @param na Numeric vector. No-data value per band
#' @param vsat Numeric vector. Saturation value per band
#' @param scal Numeric vector. Scale factor per band. e.g. if data was scaled to 1000*reflectance for integer conversion.
#' @param dtyp Character vector. Data type per band. See \code{\link[raster]{dataType}} for options.
#' @param radRes Numeric vector. Radiometric resolution per band.
#' @param spatRes Numeric vector. Spatial resolution per band.
#' @param calrad data.frame. Calibration coefficients for dn->radiance conversion. Must have columns 'gain' and 'offset'. Rows named according to \code{bands}.
#' @param calref data.frame. Calibration coefficients for dn->reflectance conversion. Must have columns 'gain' and 'offset'. Rows named according to \code{bands}.
#' @param calbt data.frame. Calibration coefficients for dn->brightness temperature conversion. Must have columns 'K1' and 'K2'. Rows named according to \code{bands}.
#' @export
ImageMetaData <- function(file = NA, format = NA, sat = NA, sen = NA, scene = NA, proj =NA, date = NA, pdate = NA,path = NA, row = NA, az = NA, selv = NA,
esd = NA, files = NA, bands = NA, quant = NA, cat = NA, na = NA, vsat = NA, scal = NA, dtyp = NA, calrad = NA, calref = NA, calbt = NA, radRes=NA, spatRes = NA){
obj <- list(
METADATA_FILE = file,
METADATA_FORMAT = format,
SATELLITE = sat,
SENSOR = sen,
SCENE_ID = scene,
ACQUISITION_DATE = date,
PROCESSING_DATE = pdate,
PATH_ROW = c(path=path,row=row),
PROJECTION = proj,
SOLAR_PARAMETERS = c(azimuth=az, elevation = selv, distance = esd),
DATA = data.frame(
FILES = files,
BANDS = bands,
QUANTITY = quant,
CATEGORY = cat,
NA_VALUE = na,
SATURATE_VALUE = vsat,
SCALE_FACTOR = scal,
DATA_TYPE = dtyp,
SPATIAL_RESOLUTION = spatRes,
RADIOMETRIC_RESOLUTION = radRes,
stringsAsFactors=FALSE
),
CALRAD = calrad,
CALREF = calref,
CALBT = calbt
)
if(length(bands) == 1 && is.na(bands)) BANDS <- bands <- "1"
rownames(obj$DATA) <- bands
## Re-order DATA
obj$DATA <- obj$DATA[with(obj$DATA, order(factor(CATEGORY, levels = c("image", "pan", "index", "qa")),
.getNumeric(BANDS),
factor(QUANTITY, levels = c("dn", "tra", "tre", "sre", "bt", "idx"))
)),]
structure(obj, class = c("ImageMetaData", "RStoolbox"))
}
#' @method summary ImageMetaData
#' @export
summary.ImageMetaData <- function(object, ...) {
labs <- format(c("Scene:", "Satellite:", "Sensor:", "Date:", "Path/Row:", "Projection:"))
vals <- c(object$SCENE_ID, object$SATELLITE,object$SENSOR,format(object$ACQUISITION_DATE, "%F"), paste(object$PATH_ROW, collapse="/"), projection(object$PROJECTION))
cat(paste(labs, vals), fill =1)
cat("\nData:\n")
print(object$DATA[, c("FILES", "QUANTITY", "CATEGORY")])
hasCal <- vapply(object[c("CALRAD", "CALREF", "CALBT")], is.data.frame, logical(1))
cat("\nAvailable calibration parameters (gain and offset):\n")
if(any(hasCal)) {
cat(c("\tdn -> radiance (toa)", "\tdn -> reflectance (toa)", "\tdn -> brightness temperature (toa)")[hasCal], sep = "\n")
} else {
cat("\tnone")
}
cat("\n")
invisible(NULL)
}
| /R/readMeta.R | no_license | nemochina2008/RStoolbox | R | false | false | 14,504 | r | #' Read Landsat MTL metadata files
#'
#' Reads metadata and deals with legacy versions of Landsat metadata files and where possible adds missing information (radiometric gain and offset, earth-sun distance).
#'
#' @param file path to Landsat MTL file (...MTL.txt)
#' @param raw Logical. If \code{TRUE} the full raw metadata will be returned as a list. if \code{FALSE} (the default) all important metadata are homogenized into a standard format (ImageMetaData) and some information is added.
#' @return Object of class ImageMetaData
#' @export
#' @examples
#' ## Example metadata file (MTL)
#' mtlFile <- system.file("external/landsat/LT52240631988227CUB02_MTL.txt", package="RStoolbox")
#'
#' ## Read metadata
#' metaData <- readMeta(mtlFile)
#'
#' ## Summary
#' summary(metaData)
#'
readMeta <- function(file, raw = FALSE){
## TODO: make modular for additional sensors
if(!file.exists(file)) stop("Metadata file does not exist. Looking for: ", file, call. = FALSE)
if(!grepl("MTL", file) & !grepl("xml", file)) warning("The Landsat metadata file you have specified looks unusual. Typically the filename contains the string 'MTL' or 'xml'. Are you sure you specified the right file? \n I'll try to read it but check the results!")
format <- if(grepl('xml', file)) "XML" else "MTL"
if(format == "MTL") {
## PROCESS LPS MTL FILES
meta <- read.delim(file, sep = "=", header = FALSE, stringsAsFactors = FALSE, strip.white = TRUE, skip = 1, skipNul = TRUE)
meta <- meta[-(nrow(meta)-c(1,0)),]
## Retrieve groups
l <- meta[grep("GROUP",meta[,1]),]
## Assemble metadata list
meta <- lapply(unique(l[,2]), FUN = function(x){
w <- which(meta[,2] == x)
m <- meta[(w[1]+1):(w[2]-1),]
rownames(m) <- m[,1]
m <- m[ , 2, drop = FALSE]
colnames(m) <- "VALUE"
return(m)
})
names(meta) <- unique(l[,2])
num <- grep("IMAGE_AT|MIN_MAX|RESCAL|THERMAL", names(meta))
meta[num] <- lapply(meta[num], function(x) {x[,1] <- as.numeric(x[,1]);x})
if(raw) return(meta)
## Legacy MTL?
legacy <- "PROCESSING_SOFTWARE" %in% rownames(meta$PRODUCT_METADATA)
if(legacy) message("This scene was processed before August 29, 2012. Using MTL legacy format. Some minor infos such as SCENE_ID will be missing")
sat <- paste0("LANDSAT", .getNumeric(meta$PRODUCT_METADATA["SPACECRAFT_ID",]))
sen <- meta$PRODUCT_METADATA["SENSOR_ID",]
scene <- meta$METADATA_FILE_INFO["LANDSAT_SCENE_ID",] ## could assemble name for legacy files: http://landsat.usgs.gov/naming_conventions_scene_identifiers.php
date <- as.POSIXct(if(!legacy) meta$PRODUCT_METADATA["DATE_ACQUIRED",] else meta$PRODUCT_METADATA["ACQUISITION_DATE",])
date <- strptime(paste0(date, meta$PRODUCT_METADATA["SCENE_CENTER_TIME",]), "%Y-%m-%d %H:%M:%S")
pdate <- as.POSIXct(if(!legacy) meta$METADATA_FILE_INFO["FILE_DATE",] else meta$METADATA_FILE_INFO["PRODUCT_CREATION_TIME",])
path <- as.numeric(meta$PRODUCT_METADATA["WRS_PATH",])
row <- if(!legacy) as.numeric(meta$PRODUCT_METADATA["WRS_ROW",]) else as.numeric(meta$PRODUCT_METADATA["STARTING_ROW",])
pars <- meta$PROJECTION_PARAMETERS[c("MAP_PROJECTION","UTM_ZONE","DATUM"),]
pars[1] <- tolower(pars[1])
proj <- CRS(paste0(c("+proj=", "+zone=", "+units=m +datum="), pars, collapse=" "))
files <- row.names(meta[["PRODUCT_METADATA"]])[grep("^.*BAND", row.names(meta$PRODUCT_METADATA))]
files <- meta[["PRODUCT_METADATA"]][files,]
bands <- gsub(paste0(scene,"_|.TIF"), "", files)
bands <- paste0(bands, "_dn")
bands <- gsub("BQA", "QA", bands )
if(trailingZeros <- length(grep("0.TIF", bands)) > 1) stop("Trailing zeros")
quant <- rep("dn", length(bands))
cat <- rep("image", length(bands))
cat[grep("QA", bands)] <- "qa"
cat[grep("B8", bands)] <- "pan"
spatRes <- rep(meta$PROJECTION_PARAMETERS["GRID_CELL_SIZE_REFLECTIVE", ], length(bands))
spatRes[grep("B8", bands)] <- meta$PROJECTION_PARAMETERS["GRID_CELL_SIZE_PANCHROMATIC", ]
spatRes[grep("B6|B10|B11", bands)] <- meta$PROJECTION_PARAMETERS["GRID_CELL_SIZE_THERMAL", ]
spatRes <- as.numeric(spatRes)
na <- NA
az <- if(!legacy) as.numeric(meta$IMAGE_ATTRIBUTES["SUN_AZIMUTH",]) else as.numeric(meta$PRODUCT_PARAMETERS["SUN_AZIMUTH",])
selv <- if(!legacy) as.numeric(meta$IMAGE_ATTRIBUTES["SUN_ELEVATION",]) else as.numeric(meta$PRODUCT_PARAMETERS["SUN_ELEVATION",])
esd <- meta$IMAGE_ATTRIBUTES["EARTH_SUN_DISTANCE",]
if(is.null(esd) || is.na(esd)) esd <- .ESdist(date)
esd <- as.numeric(esd)
vsat <- NA ## or set to max?
scal <- 1
dtyp <- NA
## RADIOMETRIC CORRECTION/RESCALING PARAMETERS
if(!legacy) {
r <- meta$RADIOMETRIC_RESCALING
rnr <- rownames(r)
calrad <- data.frame(offset = r[grep("RADIANCE_ADD*", rownames(r)),], gain = r[grep("RADIANCE_MULT*", rownames(r)),])
calref <- data.frame(offset = r[grep("REFLECTANCE_ADD*", rownames(r)),], gain = r[grep("REFLECTANCE_MULT*", rownames(r)),])
rownames(calrad) <- paste0(gsub("^.*BAND_","B", rnr[grep("RADIANCE_MULT", rnr)]), "_dn")
if(nrow(calref) != 0) rownames(calref) <- paste0(gsub("^.*BAND_","B", rnr[grep("REFLECTANCE_MULT", rnr)]), "_dn") else calref <- NA
} else {
r <- meta$MIN_MAX_RADIANCE
rp <- meta$MIN_MAX_PIXEL_VALUE
rnr <- rownames(r)
e2nd <- seq(1, nrow(r), 2)
L <- diff(r[,1])[e2nd]
Q <- diff(rp[,1])[e2nd]
radg <- L/Q
rado <- r[seq(2,nrow(r),2),1] - radg
calrad <- data.frame(offset = rado, gain = radg)
calref <- NA
rownames(calrad) <- paste0(gsub("^.*BAND_","B", rnr[grep("MAX", rnr)]), "_dn")
}
if(sat == "LANDSAT8"){
r <- meta$TIRS_THERMAL_CONSTANTS
calbt <- data.frame(K1 = r[1:2,] , K2 = r[3:4,])
rownames(calbt) <- c("B10_dn", "B11_dn")
} else {
TAB7 <- list(LANDSAT4 = data.frame(K1=671.62,K2=1284.3), # TAB7 from Chander 2009
LANDSAT5 = data.frame(K1=607.76,K2=1260.56),
LANDSAT7 = data.frame(K1=666.09,K2=1282.71))
calbt <- TAB7[[sat]]
tbds <- bands[grep("B6", bands)]
if(length(tbds)>1) calbt <- rbind(calbt, calbt)
rownames(calbt)<-tbds
}
} else {
## PROCESS ESPA LEDAPS XML FILES
meta <- xmlToList(xmlParse(file))
names(meta$bands) <- str_replace_all(unlist(sapply(meta$bands, "[", "long_name")), " ", "_")
if(raw) return(meta)
luv <- c(dn = "dn", toa_rad = "tra", toa_refl = "tre", toa_bt = "bt", sr_refl = "sre", spectral_indices = "idx", cfmask = "tre")
atts <- sapply(meta$bands, "[", ".attrs")
sat <- paste0("LANDSAT", .getNumeric(meta$global_metadata$satellite))
sen <- meta$global_metadata$instrument
scene <- str_replace(meta$global_metadata$lpgs_metadata_file, "_MTL.txt", "") ## could assemble name for legacy files: http://landsat.usgs.gov/naming_conventions_scene_identifiers.php
date <- as.POSIXct(paste(meta$global_metadata$acquisition_date,meta$global_metadata$scene_center_time), "%Y%m%d %H:%M:%S" )
pdate <- as.POSIXct(meta$bands[[1]]$production_date)
path <- as.numeric(meta$global_metadata$wrs["path"])
row <- as.numeric(meta$global_metadata$wrs["row"])
az <- as.numeric(meta$global_metadata$solar_angles["azimuth"])
selv <- 90 - as.numeric(meta$global_metadata$solar_angles["zenith"])
proj <- CRS(paste0("+proj=utm +zone=",meta$global_metadata$projection_information$utm_proj_params," +datum=WGS84 +units=m"))
esd <- .ESdist(date)
files <- sapply(meta$bands, "[[", "file_name")
quant <- luv[sapply(atts, "[", "product")]
cat <- sapply(atts, "[", "category")
cat[grep("opacity", names(cat))] <- "qa"
bands <- gsub(paste0(scene, "_|.tif"), "", files)
bs <- grepl("_band", files)
bands[bs] <- paste0("B", .getNumeric(bands[bs]), "_", quant[bs])
bands[cat == "qa"] <- paste0("QA_", gsub("sr_|_qa", "", bands[cat == "qa"]))
bands[cat == "index"] <- gsub("SR_", "", toupper(bands[cat == "index"]))
spatRes <- vapply(meta$bands,function(x) x$pixel_size["x"], character(1))
na <- as.numeric(sapply(atts, "[" , "fill_value"))
vsat <- as.numeric(sapply(atts, "[" , "saturate_value"))
scal <- as.numeric(sapply(atts, "[" , "scale_factor"))
dataTypes <- c(INT16 = "INT4S", UINT8 = "INT1U")
dtyp <- dataTypes[as.character(sapply(atts, "[" , "data_type"))]
## Missing
calrad <- calref <- calbt <- NA
}
## Add-on data:
radRes <- if(sat == "LANDSAT8") 16 else 8
obj <- ImageMetaData(file = file, format = format, sat = sat, sen = sen, scene = scene, date = date, pdate = pdate, path = path, radRes=radRes, spatRes = spatRes, row = row, az = az,
selv = selv, esd = esd, files = files, bands = bands, quant = quant, cat = cat, na = na, vsat = vsat, scal = scal, dtyp = dtyp,
calrad=calrad, calref=calref, calbt=calbt, proj = proj)
}
#' ImageMetaData Class
#'
#' @param file Character. Metadata file
#' @param format Character. Metadata format, e.g. xml, mtl
#' @param sat Character. Satellite platform
#' @param sen Character. Sensor
#' @param scene Character. Scene_ID
#' @param proj CRS. Projection.
#' @param date POSIXct. Aquosition date.
#' @param pdate POSIXct. Processing date.
#' @param path Integer. Path.
#' @param row Integer. Row.
#' @param az Numeric. Sun azimuth
#' @param selv Numeric. Sun elevation
#' @param esd Numeric. Earth-sun distance
#' @param files Character vector. Files containing the data, e.g. tiff files
#' @param bands Character vector. Band names
#' @param quant Character vector. Quantity, one of c("dn", "tra", "tre", "sre", "bt", "idx")
#' @param cat Character vector. Category, e.g. c("image", "pan", "index", "qa")
#' @param na Numeric vector. No-data value per band
#' @param vsat Numeric vector. Saturation value per band
#' @param scal Numeric vector. Scale factor per band. e.g. if data was scaled to 1000*reflectance for integer conversion.
#' @param dtyp Character vector. Data type per band. See \code{\link[raster]{dataType}} for options.
#' @param radRes Numeric vector. Radiometric resolution per band.
#' @param spatRes Numeric vector. Spatial resolution per band.
#' @param calrad data.frame. Calibration coefficients for dn->radiance conversion. Must have columns 'gain' and 'offset'. Rows named according to \code{bands}.
#' @param calref data.frame. Calibration coefficients for dn->reflectance conversion. Must have columns 'gain' and 'offset'. Rows named according to \code{bands}.
#' @param calbt data.frame. Calibration coefficients for dn->brightness temperature conversion. Must have columns 'K1' and 'K2'. Rows named according to \code{bands}.
#' @export
ImageMetaData <- function(file = NA, format = NA, sat = NA, sen = NA, scene = NA, proj =NA, date = NA, pdate = NA,path = NA, row = NA, az = NA, selv = NA,
esd = NA, files = NA, bands = NA, quant = NA, cat = NA, na = NA, vsat = NA, scal = NA, dtyp = NA, calrad = NA, calref = NA, calbt = NA, radRes=NA, spatRes = NA){
obj <- list(
METADATA_FILE = file,
METADATA_FORMAT = format,
SATELLITE = sat,
SENSOR = sen,
SCENE_ID = scene,
ACQUISITION_DATE = date,
PROCESSING_DATE = pdate,
PATH_ROW = c(path=path,row=row),
PROJECTION = proj,
SOLAR_PARAMETERS = c(azimuth=az, elevation = selv, distance = esd),
DATA = data.frame(
FILES = files,
BANDS = bands,
QUANTITY = quant,
CATEGORY = cat,
NA_VALUE = na,
SATURATE_VALUE = vsat,
SCALE_FACTOR = scal,
DATA_TYPE = dtyp,
SPATIAL_RESOLUTION = spatRes,
RADIOMETRIC_RESOLUTION = radRes,
stringsAsFactors=FALSE
),
CALRAD = calrad,
CALREF = calref,
CALBT = calbt
)
if(length(bands) == 1 && is.na(bands)) BANDS <- bands <- "1"
rownames(obj$DATA) <- bands
## Re-order DATA
obj$DATA <- obj$DATA[with(obj$DATA, order(factor(CATEGORY, levels = c("image", "pan", "index", "qa")),
.getNumeric(BANDS),
factor(QUANTITY, levels = c("dn", "tra", "tre", "sre", "bt", "idx"))
)),]
structure(obj, class = c("ImageMetaData", "RStoolbox"))
}
#' @method summary ImageMetaData
#' @export
summary.ImageMetaData <- function(object, ...) {
labs <- format(c("Scene:", "Satellite:", "Sensor:", "Date:", "Path/Row:", "Projection:"))
vals <- c(object$SCENE_ID, object$SATELLITE,object$SENSOR,format(object$ACQUISITION_DATE, "%F"), paste(object$PATH_ROW, collapse="/"), projection(object$PROJECTION))
cat(paste(labs, vals), fill =1)
cat("\nData:\n")
print(object$DATA[, c("FILES", "QUANTITY", "CATEGORY")])
hasCal <- vapply(object[c("CALRAD", "CALREF", "CALBT")], is.data.frame, logical(1))
cat("\nAvailable calibration parameters (gain and offset):\n")
if(any(hasCal)) {
cat(c("\tdn -> radiance (toa)", "\tdn -> reflectance (toa)", "\tdn -> brightness temperature (toa)")[hasCal], sep = "\n")
} else {
cat("\tnone")
}
cat("\n")
invisible(NULL)
}
|
fem.sparse <- function(Y,K,maxit,eps,Tinit,model,method='reg',l1,nbit,l2){
colnames = colnames(Y)
if (length(l1)!=1 | l1>1){cat('\n','The l1 penalty term is a single figure comprises between 0 and 1','\n'); break}
# Initialization
Y = as.matrix(Y)
n = nrow(Y)
p = ncol(Y)
d = min((K-1),(p-1))
# New objects
Lobs = rep(c(-Inf),1,(maxit+1))
#
# Initialization of T
T = Tinit
V = fstep.sparse(Y,T,l1,nbit,l2)
prms = fem.mstep(Y,V,T,model=model,method=method)
res.estep = fem.estep(prms,Y,V)
T = res.estep$T
Lobs[1] = res.estep$loglik
# Main loop
Linf_new = Lobs[1]
for (i in 1:maxit){
# The three main steps F, M and E
V = fstep.sparse(Y,T,l1,nbit,l2)
prms = fem.mstep(Y,V,T,model=model,method=method)
res.estep = fem.estep(prms,Y,V)
T = res.estep$T
Lobs[i+1] = res.estep$loglik
# Stop criterion
if (i>=2){
acc = (Lobs[i+1] - Lobs[i]) / (Lobs[i] - Lobs[i-1])
Linf_old = Linf_new
Linf_new <- try( Lobs[i] + 1/(1-acc) * (Lobs[i+1] - Lobs[i]))
if (abs(Linf_new - Linf_old) < eps) {break}
}
}
# Returning the results
cls = max.col(T)
crit = fem.criteria(Lobs[(i+1)],T,prms,n)
rownames(V) = colnames
colnames(V) = paste('U',1:d,sep='')
res = list(K=K,cls=cls,P=T,U=V,aic=crit$aic,mean=prms$mean,my=prms$my,prop=prms$prop,D=prms$D,model=prms$model,bic=crit$bic,icl=crit$icl,loglik=Lobs[2:(i+1)],ll=Lobs[i+1],method=method)
res
}
| /FisherEM/R/fem.sparse.R | no_license | ingted/R-Examples | R | false | false | 1,545 | r | fem.sparse <- function(Y,K,maxit,eps,Tinit,model,method='reg',l1,nbit,l2){
colnames = colnames(Y)
if (length(l1)!=1 | l1>1){cat('\n','The l1 penalty term is a single figure comprises between 0 and 1','\n'); break}
# Initialization
Y = as.matrix(Y)
n = nrow(Y)
p = ncol(Y)
d = min((K-1),(p-1))
# New objects
Lobs = rep(c(-Inf),1,(maxit+1))
#
# Initialization of T
T = Tinit
V = fstep.sparse(Y,T,l1,nbit,l2)
prms = fem.mstep(Y,V,T,model=model,method=method)
res.estep = fem.estep(prms,Y,V)
T = res.estep$T
Lobs[1] = res.estep$loglik
# Main loop
Linf_new = Lobs[1]
for (i in 1:maxit){
# The three main steps F, M and E
V = fstep.sparse(Y,T,l1,nbit,l2)
prms = fem.mstep(Y,V,T,model=model,method=method)
res.estep = fem.estep(prms,Y,V)
T = res.estep$T
Lobs[i+1] = res.estep$loglik
# Stop criterion
if (i>=2){
acc = (Lobs[i+1] - Lobs[i]) / (Lobs[i] - Lobs[i-1])
Linf_old = Linf_new
Linf_new <- try( Lobs[i] + 1/(1-acc) * (Lobs[i+1] - Lobs[i]))
if (abs(Linf_new - Linf_old) < eps) {break}
}
}
# Returning the results
cls = max.col(T)
crit = fem.criteria(Lobs[(i+1)],T,prms,n)
rownames(V) = colnames
colnames(V) = paste('U',1:d,sep='')
res = list(K=K,cls=cls,P=T,U=V,aic=crit$aic,mean=prms$mean,my=prms$my,prop=prms$prop,D=prms$D,model=prms$model,bic=crit$bic,icl=crit$icl,loglik=Lobs[2:(i+1)],ll=Lobs[i+1],method=method)
res
}
|
#############################################################################
#
# XLConnect
# Copyright (C) 2010-2013 Mirai Solutions GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# Tests around clearing ranges from references in an Excel Workbook
#
# Author: Nicola Lambiase, Mirai Solutions GmbH
#
#############################################################################
test.workbook.clearRangeFromReference <- function() {
# Create workbooks
wb.xls <- loadWorkbook(rsrc("resources/testWorkbookClearCells.xls"), create = FALSE)
wb.xlsx <- loadWorkbook(rsrc("resources/testWorkbookClearCells.xlsx"), create = FALSE)
checkDf <- data.frame(
"one" = 1:5,
"two" = c(NA, NA, 8, 9, 10),
"three" = c(NA, NA, 13, 14, 15),
"four" = 16:20,
"five" = c(21, 22, NA, NA, 25),
"six" = c(26, 27, NA, NA, 30),
"seven" = 31:35,
stringsAsFactors = F
)
# Check that clearing ranges from references returns the desired result (*.xls)
clearRangeFromReference(wb.xls, c("clearRangeFromReference!D4:E5", "clearRangeFromReference!G6:H7"))
res <- readWorksheet(wb.xls, "clearRangeFromReference", region = "C3:I8", header = TRUE)
checkEquals(res, checkDf)
# Check that clearing ranges from references returns the desired result (*.xlsx)
clearRangeFromReference(wb.xlsx, c("clearRangeFromReference!D4:E5", "clearRangeFromReference!G6:H7"))
res <- readWorksheet(wb.xlsx, "clearRangeFromReference", region = "C3:I8", header = TRUE)
checkEquals(res, checkDf)
} | /inst/unitTests/runit.workbook.clearRangeFromReference.R | no_license | tk3369/xlconnect | R | false | false | 2,293 | r | #############################################################################
#
# XLConnect
# Copyright (C) 2010-2013 Mirai Solutions GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
#############################################################################
#
# Tests around clearing ranges from references in an Excel Workbook
#
# Author: Nicola Lambiase, Mirai Solutions GmbH
#
#############################################################################
test.workbook.clearRangeFromReference <- function() {
# Create workbooks
wb.xls <- loadWorkbook(rsrc("resources/testWorkbookClearCells.xls"), create = FALSE)
wb.xlsx <- loadWorkbook(rsrc("resources/testWorkbookClearCells.xlsx"), create = FALSE)
checkDf <- data.frame(
"one" = 1:5,
"two" = c(NA, NA, 8, 9, 10),
"three" = c(NA, NA, 13, 14, 15),
"four" = 16:20,
"five" = c(21, 22, NA, NA, 25),
"six" = c(26, 27, NA, NA, 30),
"seven" = 31:35,
stringsAsFactors = F
)
# Check that clearing ranges from references returns the desired result (*.xls)
clearRangeFromReference(wb.xls, c("clearRangeFromReference!D4:E5", "clearRangeFromReference!G6:H7"))
res <- readWorksheet(wb.xls, "clearRangeFromReference", region = "C3:I8", header = TRUE)
checkEquals(res, checkDf)
# Check that clearing ranges from references returns the desired result (*.xlsx)
clearRangeFromReference(wb.xlsx, c("clearRangeFromReference!D4:E5", "clearRangeFromReference!G6:H7"))
res <- readWorksheet(wb.xlsx, "clearRangeFromReference", region = "C3:I8", header = TRUE)
checkEquals(res, checkDf)
} |
# Importing the dataset
dataset = read.csv('Position_Salaries.csv')
dataset = dataset[2:3]
#install.packages('rpart')
library(rpart)
regressor = rpart(formula = Salary ~ .,
data = dataset,
control = rpart.control(minsplit = 1))
# Predicting a new result
y_pred = predict(regressor, data.frame(Level = 6.5))
#install.packages('ggplot2')
library(ggplot2)
# The code below is not high reolution therefore the graph is wrong
# Get the high resolution code from the regression template
# ggplot() +
# geom_point(aes(x = dataset$Level, y = dataset$Salary),
# colour = 'red') +
# geom_line(aes(x=dataset$Level, y = predict(regressor, newdata = dataset)),
# colour = 'blue') +
# ggtitle('Truth or Bluff ( Decision Tree Regression') +
# xlab('Level') +
# ylab('Salary')
# The high resolution template
library(ggplot2)
x_grid = seq(min(dataset$Level), max(dataset$Level), 0.1)
ggplot() +
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = 'red') +
geom_line(aes(x = x_grid, y = predict(regressor, newdata = data.frame(Level = x_grid))),
colour = 'blue') +
ggtitle('Truth or Bluff (Regression Model)') +
xlab('Level') +
ylab('Salary') | /Section 8 - Decision Tree Regression/R_version_mine.R | no_license | nimakondori/Udemy_ML_DL | R | false | false | 1,252 | r | # Importing the dataset
dataset = read.csv('Position_Salaries.csv')
dataset = dataset[2:3]
#install.packages('rpart')
library(rpart)
regressor = rpart(formula = Salary ~ .,
data = dataset,
control = rpart.control(minsplit = 1))
# Predicting a new result
y_pred = predict(regressor, data.frame(Level = 6.5))
#install.packages('ggplot2')
library(ggplot2)
# The code below is not high reolution therefore the graph is wrong
# Get the high resolution code from the regression template
# ggplot() +
# geom_point(aes(x = dataset$Level, y = dataset$Salary),
# colour = 'red') +
# geom_line(aes(x=dataset$Level, y = predict(regressor, newdata = dataset)),
# colour = 'blue') +
# ggtitle('Truth or Bluff ( Decision Tree Regression') +
# xlab('Level') +
# ylab('Salary')
# The high resolution template
library(ggplot2)
x_grid = seq(min(dataset$Level), max(dataset$Level), 0.1)
ggplot() +
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = 'red') +
geom_line(aes(x = x_grid, y = predict(regressor, newdata = data.frame(Level = x_grid))),
colour = 'blue') +
ggtitle('Truth or Bluff (Regression Model)') +
xlab('Level') +
ylab('Salary') |
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/h70.ssp');
year <- 2002
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE02X, AGE42X, AGE31X))
FYC$ind = 1
# Reason for difficulty receiving needed prescribed medicines
FYC <- FYC %>%
mutate(delay_PM = (PMUNAB42 == 1 | PMDLAY42 == 1)*1,
afford_PM = (PMDLRS42 == 1 | PMUNRS42 == 1)*1,
insure_PM = (PMDLRS42 %in% c(2,3) | PMUNRS42 %in% c(2,3))*1,
other_PM = (PMDLRS42 > 3 | PMUNRS42 > 3)*1)
# Perceived mental health
if(year == 1996)
FYC <- FYC %>% mutate(MNHLTH53 = MNTHLTH2, MNHLTH42 = MNTHLTH2, MNHLTH31 = MNTHLTH1)
FYC <- FYC %>%
mutate_at(vars(starts_with("MNHLTH")), funs(replace(., .< 0, NA))) %>%
mutate(mnhlth = coalesce(MNHLTH53, MNHLTH42, MNHLTH31)) %>%
mutate(mnhlth = recode_factor(mnhlth, .default = "Missing",
"1" = "Excellent",
"2" = "Very good",
"3" = "Good",
"4" = "Fair",
"5" = "Poor"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT02F,
data = FYC,
nest = TRUE)
svyby(~afford_PM + insure_PM + other_PM, FUN = svytotal, by = ~mnhlth, design = subset(FYCdsgn, ACCELI42==1 & delay_PM==1))
| /_check/test_code/care/r_codes/totPOP_mnhlth_rsn_PM_2002.R | permissive | HHS-AHRQ/MEPS-summary-tables | R | false | false | 1,592 | r | # Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read.xport('C:/MEPS/h70.ssp');
year <- 2002
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE02X, AGE42X, AGE31X))
FYC$ind = 1
# Reason for difficulty receiving needed prescribed medicines
FYC <- FYC %>%
mutate(delay_PM = (PMUNAB42 == 1 | PMDLAY42 == 1)*1,
afford_PM = (PMDLRS42 == 1 | PMUNRS42 == 1)*1,
insure_PM = (PMDLRS42 %in% c(2,3) | PMUNRS42 %in% c(2,3))*1,
other_PM = (PMDLRS42 > 3 | PMUNRS42 > 3)*1)
# Perceived mental health
if(year == 1996)
FYC <- FYC %>% mutate(MNHLTH53 = MNTHLTH2, MNHLTH42 = MNTHLTH2, MNHLTH31 = MNTHLTH1)
FYC <- FYC %>%
mutate_at(vars(starts_with("MNHLTH")), funs(replace(., .< 0, NA))) %>%
mutate(mnhlth = coalesce(MNHLTH53, MNHLTH42, MNHLTH31)) %>%
mutate(mnhlth = recode_factor(mnhlth, .default = "Missing",
"1" = "Excellent",
"2" = "Very good",
"3" = "Good",
"4" = "Fair",
"5" = "Poor"))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT02F,
data = FYC,
nest = TRUE)
svyby(~afford_PM + insure_PM + other_PM, FUN = svytotal, by = ~mnhlth, design = subset(FYCdsgn, ACCELI42==1 & delay_PM==1))
|
#FIG. 9 - Publish types
groupType <- adSt %>% group_by(document_type, publication_year, actor_id) %>% dplyr::summarise(n = n())
fig9 <- ggplot(data = groupType, aes(publication_year, y = n, color = actor_id)) +
geom_line(aes(linetype=document_type)) +
geom_point() +
scale_color_hue(labels = c("Steele", "Addison"))+
ggtitle("Publish types - Addison & Steele")
png(file="../../../output/figures/fig9_pub_types.png",
width=1200, height=700)
print(fig9)
dev.off() | /code/work/kivisto/fig9.R | no_license | mikkosk/project_course_addison_steele_spectator_in_estc | R | false | false | 478 | r | #FIG. 9 - Publish types
groupType <- adSt %>% group_by(document_type, publication_year, actor_id) %>% dplyr::summarise(n = n())
fig9 <- ggplot(data = groupType, aes(publication_year, y = n, color = actor_id)) +
geom_line(aes(linetype=document_type)) +
geom_point() +
scale_color_hue(labels = c("Steele", "Addison"))+
ggtitle("Publish types - Addison & Steele")
png(file="../../../output/figures/fig9_pub_types.png",
width=1200, height=700)
print(fig9)
dev.off() |
#LIM
###########################################
# Define UI for dataset viewer app ----
library(shiny)
library(EBMAforecast)
data("presidentialForecast")
attach(presidentialForecast)
presidentialForecast$years<- seq(1952, 2008, 4)
ui <- fluidPage(
headerPanel("Presidential Forecasts"),
# App title ----
titlePanel("Results of presidential forecasts from 1952-2008"),
# Sidebar layout with a input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Selector for choosing dataset ----
selectInput("variable", "Projection type:",
c("None" = NULL,
"Campbell"="Campbell",
"Hibbs"="Hibbs",
"Fair"="Fair",
"EWT2C2"="EWT2C2",
"Abramowitz"="Abramowitz")),
sliderInput("range", label="Observed Years",
min=min(1952),
max=max(2008),
value=c(1952, 2008), step=4)
# Input: Numeric entry for number of obs to view ----
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Verbatim text for caption ----
h3(textOutput("caption")),
# Output: ----
plotOutput("forecastPlot", click="plot_click"),
verbatimTextOutput("info")
)
)
)
# Define server logic to summarize and view selected dataset ----
server <- function(input, output) {
# Return the requested dataset ----
datasetInput <- reactive({
switch(input$presidentialForecast,
"Campbell" = Campbell,
"Hibbs" = Hibbs,
"Fair" = Fair)
})
formulaText <- reactive({
paste("Yearly forecast according to", input$presidentialForecast)
})
# Generate a summary of the dataset ----
output$forecastPlot <- renderPlot({
plot(presidentialForecast$years, ylim=c(40,66), presidentialForecast[,input$variable], xlab="Year", ylab="Presidential Vote Share", "l", col="red", lty=2)
lines(presidentialForecast$years, presidentialForecast$Actual)
legend("topright", legend=c("Predicted Result", "Actual Result"), col=c("red", "black"), lty=c(2,1), cex=0.8)
output$info <- renderText({
paste0("x=", input$plot_click$x, "\ny=", input$plot_click$y)})
})}
# Create Shiny app ----
shinyApp(ui = ui, server = server)
as.matrix(presidentialForecast) | /Lim_app.R | no_license | HungryLim/App | R | false | false | 2,440 | r | #LIM
###########################################
# Define UI for dataset viewer app ----
library(shiny)
library(EBMAforecast)
data("presidentialForecast")
attach(presidentialForecast)
presidentialForecast$years<- seq(1952, 2008, 4)
ui <- fluidPage(
headerPanel("Presidential Forecasts"),
# App title ----
titlePanel("Results of presidential forecasts from 1952-2008"),
# Sidebar layout with a input and output definitions ----
sidebarLayout(
# Sidebar panel for inputs ----
sidebarPanel(
# Input: Selector for choosing dataset ----
selectInput("variable", "Projection type:",
c("None" = NULL,
"Campbell"="Campbell",
"Hibbs"="Hibbs",
"Fair"="Fair",
"EWT2C2"="EWT2C2",
"Abramowitz"="Abramowitz")),
sliderInput("range", label="Observed Years",
min=min(1952),
max=max(2008),
value=c(1952, 2008), step=4)
# Input: Numeric entry for number of obs to view ----
),
# Main panel for displaying outputs ----
mainPanel(
# Output: Verbatim text for caption ----
h3(textOutput("caption")),
# Output: ----
plotOutput("forecastPlot", click="plot_click"),
verbatimTextOutput("info")
)
)
)
# Define server logic to summarize and view selected dataset ----
server <- function(input, output) {
# Return the requested dataset ----
datasetInput <- reactive({
switch(input$presidentialForecast,
"Campbell" = Campbell,
"Hibbs" = Hibbs,
"Fair" = Fair)
})
formulaText <- reactive({
paste("Yearly forecast according to", input$presidentialForecast)
})
# Generate a summary of the dataset ----
output$forecastPlot <- renderPlot({
plot(presidentialForecast$years, ylim=c(40,66), presidentialForecast[,input$variable], xlab="Year", ylab="Presidential Vote Share", "l", col="red", lty=2)
lines(presidentialForecast$years, presidentialForecast$Actual)
legend("topright", legend=c("Predicted Result", "Actual Result"), col=c("red", "black"), lty=c(2,1), cex=0.8)
output$info <- renderText({
paste0("x=", input$plot_click$x, "\ny=", input$plot_click$y)})
})}
# Create Shiny app ----
shinyApp(ui = ui, server = server)
as.matrix(presidentialForecast) |
#Regression
cur_est_smokers_w1 <- adult_panel %>%
filter(current_est_smoker_w1==1)
mod2 <- glm(factor(smoking_status_full_w3) ~ race_ethnicity_w3 +
gender_w3 + sexual_orientation_w3 +
education_w3 + poverty_w1 + region_w1 +
age_w1 + psychdist_w1,
data = cur_est_smokers_w1,
family =binomial)
summary(mod2)
table_star <- cbind(summary(mod2)$coefficients[, 1] %>% exp,
summary(mod2)$coefficients[, 2] %>% exp,
summary(mod2)$coefficients[, 3:4])
table_star <- as.data.frame(table_star)
table_star$Sig <- as.character(ifelse(summary(mod2)$coefficients[, 4] < .05, '*', '0'))
names(table_star)[1:2] <- c('OR', 'Std_Err')
write.csv(table_star,
'Logistic_Regression_Smoking_Status_W3_Among_W1_Cur_Est_Smokers.csv')
#### Multinomial ####
mod4 <- multinom(factor(smoking_status_full_w3) ~ race_ethnicity_w3 +
gender_w3 + sexual_orientation_w3 +
education_w3 + poverty_w1 + region_w1 +
age_w1 + psychdist_w1 + factor(smoking_status_full_w1),
data = adult_panel)
summary(mod4)
cur_est_smokers_w1$smoking_status_full_w3 %>% as.factor %>% levels
levels(adult_w3$ education_w3)
factor(cur_est_smokers_w1$smoking_status_full_w3)
multinom(q) | /path_analysis.R | no_license | wspagnola/path_study | R | false | false | 1,335 | r | #Regression
cur_est_smokers_w1 <- adult_panel %>%
filter(current_est_smoker_w1==1)
mod2 <- glm(factor(smoking_status_full_w3) ~ race_ethnicity_w3 +
gender_w3 + sexual_orientation_w3 +
education_w3 + poverty_w1 + region_w1 +
age_w1 + psychdist_w1,
data = cur_est_smokers_w1,
family =binomial)
summary(mod2)
table_star <- cbind(summary(mod2)$coefficients[, 1] %>% exp,
summary(mod2)$coefficients[, 2] %>% exp,
summary(mod2)$coefficients[, 3:4])
table_star <- as.data.frame(table_star)
table_star$Sig <- as.character(ifelse(summary(mod2)$coefficients[, 4] < .05, '*', '0'))
names(table_star)[1:2] <- c('OR', 'Std_Err')
write.csv(table_star,
'Logistic_Regression_Smoking_Status_W3_Among_W1_Cur_Est_Smokers.csv')
#### Multinomial ####
mod4 <- multinom(factor(smoking_status_full_w3) ~ race_ethnicity_w3 +
gender_w3 + sexual_orientation_w3 +
education_w3 + poverty_w1 + region_w1 +
age_w1 + psychdist_w1 + factor(smoking_status_full_w1),
data = adult_panel)
summary(mod4)
cur_est_smokers_w1$smoking_status_full_w3 %>% as.factor %>% levels
levels(adult_w3$ education_w3)
factor(cur_est_smokers_w1$smoking_status_full_w3)
multinom(q) |
# load_data
#
# Author: Roger Beecham
###############################################################################
# Origin-Desination data by occupation published via UK Data Service.
# https://www.ukdataservice.ac.uk. Table : WU07AUK.
data <- read_csv("./data/wicid_output_occupation.csv")
# Overall counts (not just within London) -- otherwise edge effects to self-containment scores.
# Read in data describing borough-wise total jobs (demand) and total workers (supply) by occupation.
all_demand <- read_csv("/Users/roger/-/git/visualizing-self-containment/data/msoa_destinations_occupation.csv") %>%
inner_join(temp_lookup, by=c("destination_msoa"="msoa_code")) %>%
group_by(lad_code) %>%
summarise_at(vars(`1_managers_senior`:`9_elementary`), funs(sum(.))) %>%
ungroup() %>% group_by(lad_code) %>%
mutate(all_jobs=sum(`1_managers_senior`,
`2_professional`,
`3_associate_professional`,
`4_administrative`,
`5_trade`,
`6_caring_leisure`,
`7_sales_customer`,
`8_machine_operatives`,
`9_elementary`),
prof_jobs=sum(`1_managers_senior`,`2_professional`, `3_associate_professional`),
non_prof_jobs=all_jobs-prof_jobs) %>%
select(lad_code, all_jobs, prof_jobs, non_prof_jobs)
all_supply <- read_csv("./data/msoa_origins_occupation.csv") %>%
inner_join(temp_lookup, by=c("origin_msoa"="msoa_code")) %>%
group_by(lad_code) %>%
summarise_at(vars(`1_managers_senior`:`9_elementary`), funs(sum(.))) %>%
ungroup() %>% group_by(lad_code) %>%
mutate(all_workers=sum(`1_managers_senior`,
`2_professional`,
`3_associate_professional`,
`4_administrative`,
`5_trade`,
`6_caring_leisure`,
`7_sales_customer`,
`8_machine_operatives`,
`9_elementary`),
prof_workers=sum(`1_managers_senior`,`2_professional`, `3_associate_professional`),
non_prof_workers=all_workers-prof_workers) %>%
select(lad_code, all_workers, prof_workers, non_prof_workers)
# Get msoa_lad lookup.
# From: https://ons.maps.arcgis.com/home/item.html?id=552fd4886ebe417fab71da61555d4f8a
temp_lookup <- read_csv("./data/msoa_lad_lookup.csv") %>%
rename("msoa_code"="MSOA11CD") %>%
group_by(msoa_code) %>%
summarise(msoa_name=first(MSOA11NM),
lad_code=first(LAD11CD),
lad_name=first(LAD11NM))
# Upload london_squared layout: https://aftertheflood.com/projects/future-cities-catapult/
london_squared <- read_csv("./data/london_squared.csv") %>% select(fX,fY, authority, BOR, panel) %>% mutate(panel=as.factor(panel))
# Reverse northing cells.
max_y <- max(london_squared$fY)
min_y <- min(london_squared$fY)
london_squared <- london_squared %>% mutate(fY=map_scale(fY, min_y, max_y, max_y, min_y))
rm(min_y,max_y)
# Inner_join on london_squared and temp_lookup to filter out London msoas in lookup table.
temp_lookup <- temp_lookup %>% inner_join(london_squared %>% select(authority), by=c("lad_name"="authority"))
# Lookup to LAD: summarise over occupation.
data <- data %>%
# origin lookup
inner_join(temp_lookup, by=c("origin_msoa"="msoa_code")) %>%
rename("o_msoa"="origin_msoa", "o_lad"="lad_code") %>%
select(o_msoa:`9_elementary`,o_lad) %>%
# destination lookup
inner_join(temp_lookup, by=c("destination_msoa"="msoa_code")) %>% rename("d_msoa"="destination_msoa", "d_lad"="lad_code") %>%
select(-msoa_name)
rm(temp_lookup)
# London has 983 msoas, so can check lookup with.
data %>% group_by(o_msoa) %>% summarise() %>% nrow()
# And 33 LADs.
data %>% group_by(o_lad) %>% summarise() %>% nrow()
# Summarise over LADs : on occupation
data <- data %>%
mutate(od_pair=paste(o_lad, d_lad, sep="-")) %>%
group_by(od_pair, o_lad, d_lad) %>%
summarise_at(vars(all:`9_elementary`), funs(sum(.)))
data <- data %>% ungroup()
# Read in shapefile containing GB LAD boundaries.
# Made available from ONS Open Geography Portal.
download.file("http://geoportal.statistics.gov.uk/datasets/8edafbe3276d4b56aec60991cbddda50_3.zip", "boundaries_gb.zip")
unzip("boundaries_gb.zip")
gb_boundaries <- read_sf("Local_Authority_Districts_December_2015_Super_Generalised_Clipped_Boundaries_in_Great_Britain.shp")
# Set CRS to OSGB.
gb_boundaries <- st_transform(gb_boundaries, crs=27700)
# Simplify polygon.
gb_boundaries <- ms_simplify(gb_boundaries, keep=0.2)
# Inner_join with data to filter London LADs.
temp_london_lads <- data %>% group_by(o_lad) %>% summarise() %>% rename("ladcd"="o_lad")
london_boundaries <- gb_boundaries %>%
inner_join(temp_london_lads, by=c("lad15cd"="ladcd"))
rm(gb_boundaries)
# Calculate real centroids of LADs.
london_centroids <- london_boundaries %>%
st_centroid() %>%
st_coordinates() %>%
as_tibble() %>%
rename("east"="X", "north"="Y") %>%
add_column(ladcd=london_boundaries %>% pull(lad15cd))
# Add centroids to OD data.
data <- data %>% left_join(london_centroids %>% select(east, north, ladcd), by=c("o_lad"="ladcd")) %>% rename("o_east"="east", "o_north"="north")
data <- data %>% left_join(london_centroids %>% select(east, north, ladcd), by=c("d_lad"="ladcd")) %>% rename("d_east"="east", "d_north"="north")
# Add in london_squared data.
london_centroids <- london_centroids %>% add_column(ladnm=london_boundaries %>% pull(lad15nm)) %>%
left_join(london_squared, by=c("ladnm"="authority"))
# Add fX fY postions for OD data.
data <- data %>%
left_join(london_centroids %>% select(ladcd, fX, fY, BOR), by=c("o_lad"="ladcd")) %>%
rename("o_fX"="fX", "o_fY"="fY", "o_bor"="BOR") %>%
left_join(london_centroids %>% select(ladcd, fX, fY, BOR), by=c("d_lad"="ladcd")) %>%
rename("d_fX"="fX", "d_fY"="fY", "d_bor"="BOR")
# Demand-side summary.
demand_side <- data %>%
group_by(od_pair) %>%
mutate(
prof=sum(`1_managers_senior`,`2_professional`, `3_associate_professional`),
non_prof=all-prof
) %>%
ungroup() %>%
select(od_pair, d_bor, o_bor, d_fY, d_fX, o_fY, o_fX, o_lad, d_lad, prof, non_prof) %>%
group_by(d_bor) %>%
mutate(
prof_total_jobs=sum(prof),
# Jobs filled by local residents.
prof_demand_side=if_else(o_bor==d_bor, prof/prof_total_jobs,0),
prof_max_jobs=max(prof),
prof_rank=row_number(desc(prof)),
non_prof_total_jobs=sum(non_prof),
# Jobs filled by local residents.
non_prof_demand_side=if_else(o_bor==d_bor, non_prof/non_prof_total_jobs,0),
non_prof_rank=row_number(desc(non_prof)),
non_prof_max_jobs=max(non_prof),
bor_label=if_else(d_bor==o_bor,d_bor,"")) %>%
ungroup() %>%
rename("bor_focus"="d_bor", "fY"="d_fY", "fX"="d_fX")
# Supply-side summary.
supply_side <- data %>%
group_by(od_pair) %>%
mutate(
prof=sum(`1_managers_senior`,`2_professional`, `3_associate_professional`),
non_prof=all-prof
) %>%
ungroup() %>%
select(od_pair, d_bor, o_bor, o_fY, o_fX, d_fX, d_fY, o_lad, d_lad, prof, non_prof) %>%
group_by(o_bor) %>%
mutate(
prof_total_workers=sum(prof),
prof_supply_side=if_else(o_bor==d_bor, prof/prof_total_workers,0),
prof_rank=row_number(desc(prof)),
prof_max_workers=max(prof),
non_prof_total_workers=sum(non_prof),
non_prof_supply_side=if_else(o_bor==d_bor, non_prof/non_prof_total_workers,0),
non_prof_rank=row_number(desc(non_prof)),
non_prof_max_workers=max(non_prof),
bor_label=if_else(d_bor==o_bor,d_bor,"")) %>%
ungroup() %>%
rename("bor_focus"="o_bor", "fY"="o_fY", "fX"="o_fX")
| /src/load_data.R | permissive | rogerbeecham/visualizing-self-containment | R | false | false | 7,607 | r | # load_data
#
# Author: Roger Beecham
###############################################################################
# Origin-Desination data by occupation published via UK Data Service.
# https://www.ukdataservice.ac.uk. Table : WU07AUK.
data <- read_csv("./data/wicid_output_occupation.csv")
# Overall counts (not just within London) -- otherwise edge effects to self-containment scores.
# Read in data describing borough-wise total jobs (demand) and total workers (supply) by occupation.
all_demand <- read_csv("/Users/roger/-/git/visualizing-self-containment/data/msoa_destinations_occupation.csv") %>%
inner_join(temp_lookup, by=c("destination_msoa"="msoa_code")) %>%
group_by(lad_code) %>%
summarise_at(vars(`1_managers_senior`:`9_elementary`), funs(sum(.))) %>%
ungroup() %>% group_by(lad_code) %>%
mutate(all_jobs=sum(`1_managers_senior`,
`2_professional`,
`3_associate_professional`,
`4_administrative`,
`5_trade`,
`6_caring_leisure`,
`7_sales_customer`,
`8_machine_operatives`,
`9_elementary`),
prof_jobs=sum(`1_managers_senior`,`2_professional`, `3_associate_professional`),
non_prof_jobs=all_jobs-prof_jobs) %>%
select(lad_code, all_jobs, prof_jobs, non_prof_jobs)
all_supply <- read_csv("./data/msoa_origins_occupation.csv") %>%
inner_join(temp_lookup, by=c("origin_msoa"="msoa_code")) %>%
group_by(lad_code) %>%
summarise_at(vars(`1_managers_senior`:`9_elementary`), funs(sum(.))) %>%
ungroup() %>% group_by(lad_code) %>%
mutate(all_workers=sum(`1_managers_senior`,
`2_professional`,
`3_associate_professional`,
`4_administrative`,
`5_trade`,
`6_caring_leisure`,
`7_sales_customer`,
`8_machine_operatives`,
`9_elementary`),
prof_workers=sum(`1_managers_senior`,`2_professional`, `3_associate_professional`),
non_prof_workers=all_workers-prof_workers) %>%
select(lad_code, all_workers, prof_workers, non_prof_workers)
# Get msoa_lad lookup.
# From: https://ons.maps.arcgis.com/home/item.html?id=552fd4886ebe417fab71da61555d4f8a
temp_lookup <- read_csv("./data/msoa_lad_lookup.csv") %>%
rename("msoa_code"="MSOA11CD") %>%
group_by(msoa_code) %>%
summarise(msoa_name=first(MSOA11NM),
lad_code=first(LAD11CD),
lad_name=first(LAD11NM))
# Upload london_squared layout: https://aftertheflood.com/projects/future-cities-catapult/
london_squared <- read_csv("./data/london_squared.csv") %>% select(fX,fY, authority, BOR, panel) %>% mutate(panel=as.factor(panel))
# Reverse northing cells.
max_y <- max(london_squared$fY)
min_y <- min(london_squared$fY)
london_squared <- london_squared %>% mutate(fY=map_scale(fY, min_y, max_y, max_y, min_y))
rm(min_y,max_y)
# Inner_join on london_squared and temp_lookup to filter out London msoas in lookup table.
temp_lookup <- temp_lookup %>% inner_join(london_squared %>% select(authority), by=c("lad_name"="authority"))
# Lookup to LAD: summarise over occupation.
data <- data %>%
# origin lookup
inner_join(temp_lookup, by=c("origin_msoa"="msoa_code")) %>%
rename("o_msoa"="origin_msoa", "o_lad"="lad_code") %>%
select(o_msoa:`9_elementary`,o_lad) %>%
# destination lookup
inner_join(temp_lookup, by=c("destination_msoa"="msoa_code")) %>% rename("d_msoa"="destination_msoa", "d_lad"="lad_code") %>%
select(-msoa_name)
rm(temp_lookup)
# London has 983 msoas, so can check lookup with.
data %>% group_by(o_msoa) %>% summarise() %>% nrow()
# And 33 LADs.
data %>% group_by(o_lad) %>% summarise() %>% nrow()
# Summarise over LADs : on occupation
data <- data %>%
mutate(od_pair=paste(o_lad, d_lad, sep="-")) %>%
group_by(od_pair, o_lad, d_lad) %>%
summarise_at(vars(all:`9_elementary`), funs(sum(.)))
data <- data %>% ungroup()
# Read in shapefile containing GB LAD boundaries.
# Made available from ONS Open Geography Portal.
download.file("http://geoportal.statistics.gov.uk/datasets/8edafbe3276d4b56aec60991cbddda50_3.zip", "boundaries_gb.zip")
unzip("boundaries_gb.zip")
gb_boundaries <- read_sf("Local_Authority_Districts_December_2015_Super_Generalised_Clipped_Boundaries_in_Great_Britain.shp")
# Set CRS to OSGB.
gb_boundaries <- st_transform(gb_boundaries, crs=27700)
# Simplify polygon.
gb_boundaries <- ms_simplify(gb_boundaries, keep=0.2)
# Inner_join with data to filter London LADs.
temp_london_lads <- data %>% group_by(o_lad) %>% summarise() %>% rename("ladcd"="o_lad")
london_boundaries <- gb_boundaries %>%
inner_join(temp_london_lads, by=c("lad15cd"="ladcd"))
rm(gb_boundaries)
# Calculate real centroids of LADs.
london_centroids <- london_boundaries %>%
st_centroid() %>%
st_coordinates() %>%
as_tibble() %>%
rename("east"="X", "north"="Y") %>%
add_column(ladcd=london_boundaries %>% pull(lad15cd))
# Add centroids to OD data.
data <- data %>% left_join(london_centroids %>% select(east, north, ladcd), by=c("o_lad"="ladcd")) %>% rename("o_east"="east", "o_north"="north")
data <- data %>% left_join(london_centroids %>% select(east, north, ladcd), by=c("d_lad"="ladcd")) %>% rename("d_east"="east", "d_north"="north")
# Add in london_squared data.
london_centroids <- london_centroids %>% add_column(ladnm=london_boundaries %>% pull(lad15nm)) %>%
left_join(london_squared, by=c("ladnm"="authority"))
# Add fX fY postions for OD data.
data <- data %>%
left_join(london_centroids %>% select(ladcd, fX, fY, BOR), by=c("o_lad"="ladcd")) %>%
rename("o_fX"="fX", "o_fY"="fY", "o_bor"="BOR") %>%
left_join(london_centroids %>% select(ladcd, fX, fY, BOR), by=c("d_lad"="ladcd")) %>%
rename("d_fX"="fX", "d_fY"="fY", "d_bor"="BOR")
# Demand-side summary.
demand_side <- data %>%
group_by(od_pair) %>%
mutate(
prof=sum(`1_managers_senior`,`2_professional`, `3_associate_professional`),
non_prof=all-prof
) %>%
ungroup() %>%
select(od_pair, d_bor, o_bor, d_fY, d_fX, o_fY, o_fX, o_lad, d_lad, prof, non_prof) %>%
group_by(d_bor) %>%
mutate(
prof_total_jobs=sum(prof),
# Jobs filled by local residents.
prof_demand_side=if_else(o_bor==d_bor, prof/prof_total_jobs,0),
prof_max_jobs=max(prof),
prof_rank=row_number(desc(prof)),
non_prof_total_jobs=sum(non_prof),
# Jobs filled by local residents.
non_prof_demand_side=if_else(o_bor==d_bor, non_prof/non_prof_total_jobs,0),
non_prof_rank=row_number(desc(non_prof)),
non_prof_max_jobs=max(non_prof),
bor_label=if_else(d_bor==o_bor,d_bor,"")) %>%
ungroup() %>%
rename("bor_focus"="d_bor", "fY"="d_fY", "fX"="d_fX")
# Supply-side summary.
supply_side <- data %>%
group_by(od_pair) %>%
mutate(
prof=sum(`1_managers_senior`,`2_professional`, `3_associate_professional`),
non_prof=all-prof
) %>%
ungroup() %>%
select(od_pair, d_bor, o_bor, o_fY, o_fX, d_fX, d_fY, o_lad, d_lad, prof, non_prof) %>%
group_by(o_bor) %>%
mutate(
prof_total_workers=sum(prof),
prof_supply_side=if_else(o_bor==d_bor, prof/prof_total_workers,0),
prof_rank=row_number(desc(prof)),
prof_max_workers=max(prof),
non_prof_total_workers=sum(non_prof),
non_prof_supply_side=if_else(o_bor==d_bor, non_prof/non_prof_total_workers,0),
non_prof_rank=row_number(desc(non_prof)),
non_prof_max_workers=max(non_prof),
bor_label=if_else(d_bor==o_bor,d_bor,"")) %>%
ungroup() %>%
rename("bor_focus"="o_bor", "fY"="o_fY", "fX"="o_fX")
|
testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -1942759613L, -1815221204L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707632L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156041943L, 823641812L, -1106109928L, -1048157941L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result) | /dexterMST/inst/testfiles/is_person_booklet_sorted/AFL_is_person_booklet_sorted/is_person_booklet_sorted_valgrind_files/1615939061-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 826 | r | testlist <- list(id = NULL, id = NULL, booklet_id = c(8168473L, 2127314835L, 171177770L, -1942759613L, -1815221204L, 601253144L, -804651186L, 2094281728L, 860713787L, -971707632L, -1475044502L, 870040598L, -1182814578L, -1415711445L, 1901326755L, -1882837573L, 1340545259L, 1156041943L, 823641812L, -1106109928L, -1048157941L), person_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result) |
\name{Human_Cho_Expr}
\alias{Human_Cho_Expr}
\docType{data}
\title{
Gene Expression data from selected samples of the Human CHO Medulloblastoma dataset
}
\description{
Gene Expression data (formatted and normalized using BrainArray CDFs) for 10 selected samples of the CHO dataset (see references for details).
}
\usage{data(Human_Cho_Expr)}
\format{
A data frame with 12129 observations on the following 10 variables.
\describe{
\item{\code{MD_269}}{WNT sample}
\item{\code{MD_172}}{WNT sample}
\item{\code{NC_1}}{Normal Cerebellum}
\item{\code{NC_3}}{Normal Cerebellum}
\item{\code{MD_151}}{SHH sample}
\item{\code{MD_153}}{SHH sample}
\item{\code{MD_268}}{Group4 sample}
\item{\code{MD_161}}{Group4 sample}
\item{\code{MD_242}}{Group3 sample}
\item{\code{MD_75}}{Group3 sample}
}
}
\source{
CHO dataset: Y.J. Cho, A. Tsherniak, P. Tamayo, S. Santagata, A. Ligon, H. Greulich, R. Berhoukim, V. Amani, L. Goumnerova, C.G. Eberhart, C.C. Lau, J.M. Olson, R.J. Gilbertson, A. Gajjar, O. Delattre, M. Kool, K. Ligon, M. Meyerson, J.P. Mesirov, S.L. Pomeroy, Integrative genomic analysis of medulloblastoma identifies a molecular subgroup that drives poor clinical outcome, J Clin Oncol, 29 (2011) 1424-1430.
}
\references{
Gendoo, D. M., Smirnov, P., Lupien, M. & Haibe-Kains, B. Personalized diagnosis of medulloblastoma subtypes across patients and model systems.
Genomics, doi:10.1016/j.ygeno.2015.05.002 (2015)
Manuscript URL: http://www.sciencedirect.com/science/article/pii/S0888754315000774
}
\examples{
data(Human_Cho_Expr)
}
\keyword{datasets}
| /man/Human_Cho_Expr.Rd | no_license | DGendoo/MM2S | R | false | false | 1,600 | rd | \name{Human_Cho_Expr}
\alias{Human_Cho_Expr}
\docType{data}
\title{
Gene Expression data from selected samples of the Human CHO Medulloblastoma dataset
}
\description{
Gene Expression data (formatted and normalized using BrainArray CDFs) for 10 selected samples of the CHO dataset (see references for details).
}
\usage{data(Human_Cho_Expr)}
\format{
A data frame with 12129 observations on the following 10 variables.
\describe{
\item{\code{MD_269}}{WNT sample}
\item{\code{MD_172}}{WNT sample}
\item{\code{NC_1}}{Normal Cerebellum}
\item{\code{NC_3}}{Normal Cerebellum}
\item{\code{MD_151}}{SHH sample}
\item{\code{MD_153}}{SHH sample}
\item{\code{MD_268}}{Group4 sample}
\item{\code{MD_161}}{Group4 sample}
\item{\code{MD_242}}{Group3 sample}
\item{\code{MD_75}}{Group3 sample}
}
}
\source{
CHO dataset: Y.J. Cho, A. Tsherniak, P. Tamayo, S. Santagata, A. Ligon, H. Greulich, R. Berhoukim, V. Amani, L. Goumnerova, C.G. Eberhart, C.C. Lau, J.M. Olson, R.J. Gilbertson, A. Gajjar, O. Delattre, M. Kool, K. Ligon, M. Meyerson, J.P. Mesirov, S.L. Pomeroy, Integrative genomic analysis of medulloblastoma identifies a molecular subgroup that drives poor clinical outcome, J Clin Oncol, 29 (2011) 1424-1430.
}
\references{
Gendoo, D. M., Smirnov, P., Lupien, M. & Haibe-Kains, B. Personalized diagnosis of medulloblastoma subtypes across patients and model systems.
Genomics, doi:10.1016/j.ygeno.2015.05.002 (2015)
Manuscript URL: http://www.sciencedirect.com/science/article/pii/S0888754315000774
}
\examples{
data(Human_Cho_Expr)
}
\keyword{datasets}
|
##Packman UI
##initialized March 24, 2014
##Takes in a report, cleans aggregations
## makes it client facing.
require('shiny')
require('datasets')
require('data.table')
require('plyr')
require('devtools')
require('stringr')
shinyUI(pageWithSidebar(
headerPanel("Packman"),
sidebarPanel(
htmlOutput("appMessage"),
textInput("job_id", h4("Enter a Job ID or IDs"), 0),
br(),
span(strong("separate multiple job ids with a comma ,"), style="color:blue"),
fileInput("files", h4("Select an agg report:"), multiple=T, accept =
c('text/csv', 'text/comma-separated-values,text/plain', '.csv')),
h4("***"),
h5("Complete the step by step submissions under the numerated tab. Once you've completed that you
can analyze and download the final product under the Download Results tab."),
htmlOutput("summaryMessage"),
htmlOutput("pacman")
), #close sidebarPanel
mainPanel(
tabsetPanel(
tabPanel("Operations",
tabsetPanel(
tabPanel("1. Reorder Columns",
htmlOutput("reorderTabDesc"),
uiOutput("columnSelector"),
actionButton("get_reorder", "Submit?"),
h4("Output File Viewer:"),
tableOutput("reorderTabTable")),
tabPanel("2. Row Data Cleanup",
htmlOutput("dataCleanTabDesc"),
uiOutput("rowProperCase"),
actionButton("get_clean", "Submit?"),
h4("Output File Viewer:"),
uiOutput("dataCleanTabTable"),
htmlOutput("dataCleanTabWarning")),
tabPanel("3. Row Data Dedupe",
htmlOutput("rowDedupeTabDesc"),
uiOutput("rowDedupeKey"),
actionButton("get_dedupe", "Submit?"),
h4("Output File Viewer:"),
htmlOutput("dedupeTabWarning"),
uiOutput("dedupeTabTable")),
tabPanel("4. Rename Columns",
htmlOutput("renameTabDesc"),
htmlOutput("renameTabTable"),
br(),
actionButton("get_rename", "Submit?")),
tabPanel("5. View Built File:",
htmlOutput("builtTabWarning"),
uiOutput("builtTabTable"),
tags$style(type="text/css", ".shiny-datatable-output { overflow: scroll; }")
)
)),
tabPanel("Download Results",
tabsetPanel(
tabPanel("Summary",
textInput("download_name", "Name for the output file (uft-8 without the .csv)", "output"),
br(),
downloadButton('downloadOutput', 'Download Built File'),
h4("***"),
br(),
uiOutput("selectReportCardCols"),
actionButton("get_report", "Generate Report?"),
htmlOutput("createReportCard"),
br(),
downloadButton('downloadReport', 'Download Report Card')
),
tabPanel("View Low Confidence Units",
dataTableOutput("displayLowUnits"))
)),
tabPanel("Compare to Source",
fileInput("source_file", h4("Upload a job source file:"), multiple=F, accept =
c('text/csv', 'text/comma-separated-values,text/plain', '.csv')),
tabsetPanel(
tabPanel("Missing Units",
p(strong("Warning:"),
span("Make sure that some source column names and agg column names overlap! We use these to make a key.",
style="color:red")),
htmlOutput("missingUnitsText"),
br(),
dataTableOutput("missingUnits")
),
tabPanel("Source Viewer",
dataTableOutput("sourceFile"))
)),
tabPanel("View Original File",
dataTableOutput("originalFileTabTable"),
tags$style(type="text/css", ".data { overflow: scroll; }")
),
tabPanel("Logic Aware Aggregation",
fileInput("files_logic", h4("Upload your FULL report here (required):"), multiple=FALSE),
textInput("job_id_logic", h4("Add a job id if it's not contained in the name of the file (optional):"), 0),
h4(textOutput("sample_skip_text")),
h4("If the lines above turned grey, your file is being processed.
You can download your file when it's done."),
h4(textOutput("logic_agg_ready")),
downloadButton('downloadAgg', 'Download your Logic-Aware Agg report!')
)
) #close overall tabset
) #close mainPanel
)) #close shiny ui
| /ui.R | no_license | oapostrophec/packman | R | false | false | 5,397 | r | ##Packman UI
##initialized March 24, 2014
##Takes in a report, cleans aggregations
## makes it client facing.
require('shiny')
require('datasets')
require('data.table')
require('plyr')
require('devtools')
require('stringr')
shinyUI(pageWithSidebar(
headerPanel("Packman"),
sidebarPanel(
htmlOutput("appMessage"),
textInput("job_id", h4("Enter a Job ID or IDs"), 0),
br(),
span(strong("separate multiple job ids with a comma ,"), style="color:blue"),
fileInput("files", h4("Select an agg report:"), multiple=T, accept =
c('text/csv', 'text/comma-separated-values,text/plain', '.csv')),
h4("***"),
h5("Complete the step by step submissions under the numerated tab. Once you've completed that you
can analyze and download the final product under the Download Results tab."),
htmlOutput("summaryMessage"),
htmlOutput("pacman")
), #close sidebarPanel
mainPanel(
tabsetPanel(
tabPanel("Operations",
tabsetPanel(
tabPanel("1. Reorder Columns",
htmlOutput("reorderTabDesc"),
uiOutput("columnSelector"),
actionButton("get_reorder", "Submit?"),
h4("Output File Viewer:"),
tableOutput("reorderTabTable")),
tabPanel("2. Row Data Cleanup",
htmlOutput("dataCleanTabDesc"),
uiOutput("rowProperCase"),
actionButton("get_clean", "Submit?"),
h4("Output File Viewer:"),
uiOutput("dataCleanTabTable"),
htmlOutput("dataCleanTabWarning")),
tabPanel("3. Row Data Dedupe",
htmlOutput("rowDedupeTabDesc"),
uiOutput("rowDedupeKey"),
actionButton("get_dedupe", "Submit?"),
h4("Output File Viewer:"),
htmlOutput("dedupeTabWarning"),
uiOutput("dedupeTabTable")),
tabPanel("4. Rename Columns",
htmlOutput("renameTabDesc"),
htmlOutput("renameTabTable"),
br(),
actionButton("get_rename", "Submit?")),
tabPanel("5. View Built File:",
htmlOutput("builtTabWarning"),
uiOutput("builtTabTable"),
tags$style(type="text/css", ".shiny-datatable-output { overflow: scroll; }")
)
)),
tabPanel("Download Results",
tabsetPanel(
tabPanel("Summary",
textInput("download_name", "Name for the output file (uft-8 without the .csv)", "output"),
br(),
downloadButton('downloadOutput', 'Download Built File'),
h4("***"),
br(),
uiOutput("selectReportCardCols"),
actionButton("get_report", "Generate Report?"),
htmlOutput("createReportCard"),
br(),
downloadButton('downloadReport', 'Download Report Card')
),
tabPanel("View Low Confidence Units",
dataTableOutput("displayLowUnits"))
)),
tabPanel("Compare to Source",
fileInput("source_file", h4("Upload a job source file:"), multiple=F, accept =
c('text/csv', 'text/comma-separated-values,text/plain', '.csv')),
tabsetPanel(
tabPanel("Missing Units",
p(strong("Warning:"),
span("Make sure that some source column names and agg column names overlap! We use these to make a key.",
style="color:red")),
htmlOutput("missingUnitsText"),
br(),
dataTableOutput("missingUnits")
),
tabPanel("Source Viewer",
dataTableOutput("sourceFile"))
)),
tabPanel("View Original File",
dataTableOutput("originalFileTabTable"),
tags$style(type="text/css", ".data { overflow: scroll; }")
),
tabPanel("Logic Aware Aggregation",
fileInput("files_logic", h4("Upload your FULL report here (required):"), multiple=FALSE),
textInput("job_id_logic", h4("Add a job id if it's not contained in the name of the file (optional):"), 0),
h4(textOutput("sample_skip_text")),
h4("If the lines above turned grey, your file is being processed.
You can download your file when it's done."),
h4(textOutput("logic_agg_ready")),
downloadButton('downloadAgg', 'Download your Logic-Aware Agg report!')
)
) #close overall tabset
) #close mainPanel
)) #close shiny ui
|
if(!file.exists('EDA')) dir.create('EDA')
fileUrl <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
download.file(fileUrl, destfile = './EDA/household_power_consumption.zip')
unzip('./EDA/household_power_consumption.zip', exdir = './EDA')
fileEDA <- file('./EDA/household_power_consumption.txt')
dataEDA <- read.table(text = grep("^[1,2]/2/2007",readLines(fileEDA),value=TRUE), sep = ';', col.names = c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), na.strings = '?')
# convert data and time to specific format
dataEDA$Date <- as.Date(dataEDA$Date, format = '%d/%m/%Y')
dataEDA$DateTime <- as.POSIXct(paste(dataEDA$Date, dataEDA$Time))
# Plot 4
# open device
png(filename = './plot4.png', width = 480, height = 480, units='px')
# plot figure
Sys.setlocale(category = "LC_ALL", locale = "english")
par(mfrow = c(2, 2))
plot(dataEDA$DateTime, dataEDA$Global_active_power, xlab = '', ylab = 'Global Active Power (kilowatt)', type = 'l')
plot(dataEDA$DateTime, dataEDA$Voltage, xlab = 'datetime', ylab = 'Voltage', type = 'l')
plot(dataEDA$DateTime, dataEDA$Sub_metering_1, xlab = '', ylab = 'Energy sub metering', type = 'l')
lines(dataEDA$DateTime, dataEDA$Sub_metering_2, col = 'red')
lines(dataEDA$DateTime, dataEDA$Sub_metering_3, col = 'blue')
legend('topright', col = c('black', 'red', 'blue'), legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'), lwd = 1)
plot(dataEDA$DateTime, dataEDA$Global_reactive_power, xlab = 'datetime', ylab = 'Global_reactive_power', type = 'l')
# close device
dev.off() | /plot4.R | no_license | AiyaV/ExData_Plotting1 | R | false | false | 1,687 | r |
if(!file.exists('EDA')) dir.create('EDA')
fileUrl <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
download.file(fileUrl, destfile = './EDA/household_power_consumption.zip')
unzip('./EDA/household_power_consumption.zip', exdir = './EDA')
fileEDA <- file('./EDA/household_power_consumption.txt')
dataEDA <- read.table(text = grep("^[1,2]/2/2007",readLines(fileEDA),value=TRUE), sep = ';', col.names = c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), na.strings = '?')
# convert data and time to specific format
dataEDA$Date <- as.Date(dataEDA$Date, format = '%d/%m/%Y')
dataEDA$DateTime <- as.POSIXct(paste(dataEDA$Date, dataEDA$Time))
# Plot 4
# open device
png(filename = './plot4.png', width = 480, height = 480, units='px')
# plot figure
Sys.setlocale(category = "LC_ALL", locale = "english")
par(mfrow = c(2, 2))
plot(dataEDA$DateTime, dataEDA$Global_active_power, xlab = '', ylab = 'Global Active Power (kilowatt)', type = 'l')
plot(dataEDA$DateTime, dataEDA$Voltage, xlab = 'datetime', ylab = 'Voltage', type = 'l')
plot(dataEDA$DateTime, dataEDA$Sub_metering_1, xlab = '', ylab = 'Energy sub metering', type = 'l')
lines(dataEDA$DateTime, dataEDA$Sub_metering_2, col = 'red')
lines(dataEDA$DateTime, dataEDA$Sub_metering_3, col = 'blue')
legend('topright', col = c('black', 'red', 'blue'), legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'), lwd = 1)
plot(dataEDA$DateTime, dataEDA$Global_reactive_power, xlab = 'datetime', ylab = 'Global_reactive_power', type = 'l')
# close device
dev.off() |
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/skin.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.05,family="gaussian",standardize=TRUE)
sink('./skin_023.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Correlation/skin/skin_023.R | no_license | esbgkannan/QSMART | R | false | false | 346 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/skin.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.05,family="gaussian",standardize=TRUE)
sink('./skin_023.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
#' Insert node data attributes during node creation
#' @description This helper function should be
#' invoked to provide values for the namesake
#' \code{node_data} argument, which is present
#' in any function where nodes are created.
#' @param ... node data attributes provided
#' as one or more named vectors.
#' @examples
#' # Create a new graph and add
#' # a path with several node
#' # data attributes
#' graph <-
#' create_graph() %>%
#' add_path(
#' n = 3,
#' type = "path",
#' node_data = node_data(
#' hour = 5,
#' index = c(1, 3, 2)))
#'
#' # View the graph's internal
#' # node data frame; the node
#' # data attributes have been
#' # inserted
#' graph %>%
#' get_node_df()
#' @export node_data
node_data <- function(...) {
# Collect vectors of node data
# attribute values into a list object
node_data_values <- list(...)
if (any(names(node_data_values) %in% c(gv_node_attributes(), "x", "y"))){
emit_error(
fcn_name = fcn_name,
reasons = "Names for node data attributes shouldn't be any of those reserved for node aesthetic attributes")
}
node_data_values
}
| /R/node_data.R | permissive | akkalbist55/DiagrammeR | R | false | false | 1,136 | r | #' Insert node data attributes during node creation
#' @description This helper function should be
#' invoked to provide values for the namesake
#' \code{node_data} argument, which is present
#' in any function where nodes are created.
#' @param ... node data attributes provided
#' as one or more named vectors.
#' @examples
#' # Create a new graph and add
#' # a path with several node
#' # data attributes
#' graph <-
#' create_graph() %>%
#' add_path(
#' n = 3,
#' type = "path",
#' node_data = node_data(
#' hour = 5,
#' index = c(1, 3, 2)))
#'
#' # View the graph's internal
#' # node data frame; the node
#' # data attributes have been
#' # inserted
#' graph %>%
#' get_node_df()
#' @export node_data
node_data <- function(...) {
# Collect vectors of node data
# attribute values into a list object
node_data_values <- list(...)
if (any(names(node_data_values) %in% c(gv_node_attributes(), "x", "y"))){
emit_error(
fcn_name = fcn_name,
reasons = "Names for node data attributes shouldn't be any of those reserved for node aesthetic attributes")
}
node_data_values
}
|
#' @title trail_status
#' @description Get trail status/history
#' @details This function returns full details and history for a trail, including errors and logging start/stop times.
#' @template name
#' @template dots
#' @return A list.
#' @seealso \code{\link{get_trails}}, \code{\link{start_logging}}, \code{\link{create_trail}}
#' @references \url{http://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_GetTrailStatus.html}
#' @export
trail_status <- function(name, ...) {
out <- cloudtrailHTTP(query = list(Action = "GetTrailStatus", Name = get_trailname(name)), ...)
if (inherits(out, "aws-error")) {
return(out)
}
structure(out$GetTrailStatusResponse$GetTrailStatusResult,
RequestId = out$GetTrailStatusResponse$ResponseMetadata$RequestId)
}
| /R/trail_status.R | no_license | isabella232/aws.cloudtrail | R | false | false | 797 | r | #' @title trail_status
#' @description Get trail status/history
#' @details This function returns full details and history for a trail, including errors and logging start/stop times.
#' @template name
#' @template dots
#' @return A list.
#' @seealso \code{\link{get_trails}}, \code{\link{start_logging}}, \code{\link{create_trail}}
#' @references \url{http://docs.aws.amazon.com/awscloudtrail/latest/APIReference/API_GetTrailStatus.html}
#' @export
trail_status <- function(name, ...) {
out <- cloudtrailHTTP(query = list(Action = "GetTrailStatus", Name = get_trailname(name)), ...)
if (inherits(out, "aws-error")) {
return(out)
}
structure(out$GetTrailStatusResponse$GetTrailStatusResult,
RequestId = out$GetTrailStatusResponse$ResponseMetadata$RequestId)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot.r
\name{theme_hrbrmstr_ex}
\alias{theme_hrbrmstr_ex}
\title{Exo 2 theme}
\usage{
theme_hrbrmstr_ex(...)
}
\description{
Exo 2 theme
}
| /man/theme_hrbrmstr_ex.Rd | no_license | tpopenfoose/kylerove-r-theme | R | false | true | 220 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ggplot.r
\name{theme_hrbrmstr_ex}
\alias{theme_hrbrmstr_ex}
\title{Exo 2 theme}
\usage{
theme_hrbrmstr_ex(...)
}
\description{
Exo 2 theme
}
|
dbListTables(mydb)
?dbFetch
#step 1: pull every person
sqlStatementall <- "
select
pr.person_id, pr.year_of_birth, pr.month_of_birth, pr.day_of_birth, pr.race_source_value as race,
pr.ethnicity_source_value
from person pr
"
rs <- dbSendQuery(mydb,sqlStatementall)
fetch(rs, n=-1)
dbClearResult(rs)
#step 1.b : query cohorts - groups of patients predefined
sqlStatementcoh <- "
select
cd.*
from cohort_definition cd
"
rs <- dbSendQuery(mydb,sqlStatementcoh)
fetch(rs, n=-1)
dbClearResult(rs)
#step 2 : select outcomes
sqlStatementout <- "
select
cd.*
, ca.*
, cs.*
, co.*
, pr.person_id, pr.year_of_birth, pr.month_of_birth, pr.day_of_birth, pr.race_source_value as race,
pr.ethnicity_source_value
from cohort_definition cd
join cohort_attribute ca on ca.cohort_definition_id = cd.cohort_definition_id
join concept cs on ca.value_as_concept_id = cs.concept_id
left join condition_occurrence co on co.condition_concept_id = cs.concept_id
left join person pr on pr.person_id = co.person_id
where cd.cohort_definition_name='ASCVD'
"
rs <- dbSendQuery(mydb,sqlStatementout)
fetch(rs, n=-1)
dbClearResult(rs)
#step 2 : select various potential predictors
sqlStatementpreds <- "
select
cd.*
, ca.*
, cs.*
, co.*
, pr.person_id, pr.year_of_birth, pr.month_of_birth, pr.day_of_birth, pr.race_source_value as race,
pr.ethnicity_source_value
from cohort_definition cd
join cohort_attribute ca on ca.cohort_definition_id = cd.cohort_definition_id
join concept cs on ca.value_as_concept_id = cs.concept_id
left join condition_occurrence co on co.condition_concept_id = cs.concept_id
left join person pr on pr.person_id = co.person_id
where cd.cohort_definition_name in ('Diabetes - ICD10CM','Hypertension - ICD10CM',' Anti-Hypertensive Pharmacologic Therapy - RxNORM')
"
rs <- dbSendQuery(mydb,sqlStatementpreds)
fetch(rs, n=-1)
dbClearResult(rs)
#step 3 : create unique dataset with dummy variables and case statements + retain original codes
sqlStatementdata <- "
select
pr.person_id, pr.year_of_birth, pr.month_of_birth, pr.day_of_birth, pr.race_source_value as race,
pr.ethnicity_source_value,
case when o.person_ID ne . then 1 else 0 END as outcome, o.OUTCOME_NAME, o.ASCVD_code,
case when
from person pr
LEFT JOIN (select
co.person_id, cd.cohort_definition_name as OUTCOME_NAME, cs.concept_code as ASCVD_code
from cohort_definition cd
join cohort_attribute ca on ca.cohort_definition_id = cd.cohort_definition_id
join concept cs on ca.value_as_concept_id = cs.concept_id
left join condition_occurrence co on co.condition_concept_id = cs.concept_id
where cd.cohort_definition_name='ASCVD' and pr.person_ID ne .) out o on o.person_ID=pr.person_ID
LEFT JOIN (select
from cohort_definition cd
join cohort_attribute ca on ca.cohort_definition_id = cd.cohort_definition_id
join concept cs on ca.value_as_concept_id = cs.concept_id
left join condition_occurrence co on co.condition_concept_id = cs.concept_id
where cd.cohort_definition_name in ('Diabetes - ICD10CM','Hypertension - ICD10CM',' Anti-Hypertensive Pharmacologic Therapy - RxNORM'))
" | /scripts/defineCohorts.R | no_license | laderast/OHDSIqueries | R | false | false | 3,060 | r | dbListTables(mydb)
?dbFetch
#step 1: pull every person
sqlStatementall <- "
select
pr.person_id, pr.year_of_birth, pr.month_of_birth, pr.day_of_birth, pr.race_source_value as race,
pr.ethnicity_source_value
from person pr
"
rs <- dbSendQuery(mydb,sqlStatementall)
fetch(rs, n=-1)
dbClearResult(rs)
#step 1.b : query cohorts - groups of patients predefined
sqlStatementcoh <- "
select
cd.*
from cohort_definition cd
"
rs <- dbSendQuery(mydb,sqlStatementcoh)
fetch(rs, n=-1)
dbClearResult(rs)
#step 2 : select outcomes
sqlStatementout <- "
select
cd.*
, ca.*
, cs.*
, co.*
, pr.person_id, pr.year_of_birth, pr.month_of_birth, pr.day_of_birth, pr.race_source_value as race,
pr.ethnicity_source_value
from cohort_definition cd
join cohort_attribute ca on ca.cohort_definition_id = cd.cohort_definition_id
join concept cs on ca.value_as_concept_id = cs.concept_id
left join condition_occurrence co on co.condition_concept_id = cs.concept_id
left join person pr on pr.person_id = co.person_id
where cd.cohort_definition_name='ASCVD'
"
rs <- dbSendQuery(mydb,sqlStatementout)
fetch(rs, n=-1)
dbClearResult(rs)
#step 2 : select various potential predictors
sqlStatementpreds <- "
select
cd.*
, ca.*
, cs.*
, co.*
, pr.person_id, pr.year_of_birth, pr.month_of_birth, pr.day_of_birth, pr.race_source_value as race,
pr.ethnicity_source_value
from cohort_definition cd
join cohort_attribute ca on ca.cohort_definition_id = cd.cohort_definition_id
join concept cs on ca.value_as_concept_id = cs.concept_id
left join condition_occurrence co on co.condition_concept_id = cs.concept_id
left join person pr on pr.person_id = co.person_id
where cd.cohort_definition_name in ('Diabetes - ICD10CM','Hypertension - ICD10CM',' Anti-Hypertensive Pharmacologic Therapy - RxNORM')
"
rs <- dbSendQuery(mydb,sqlStatementpreds)
fetch(rs, n=-1)
dbClearResult(rs)
#step 3 : create unique dataset with dummy variables and case statements + retain original codes
sqlStatementdata <- "
select
pr.person_id, pr.year_of_birth, pr.month_of_birth, pr.day_of_birth, pr.race_source_value as race,
pr.ethnicity_source_value,
case when o.person_ID ne . then 1 else 0 END as outcome, o.OUTCOME_NAME, o.ASCVD_code,
case when
from person pr
LEFT JOIN (select
co.person_id, cd.cohort_definition_name as OUTCOME_NAME, cs.concept_code as ASCVD_code
from cohort_definition cd
join cohort_attribute ca on ca.cohort_definition_id = cd.cohort_definition_id
join concept cs on ca.value_as_concept_id = cs.concept_id
left join condition_occurrence co on co.condition_concept_id = cs.concept_id
where cd.cohort_definition_name='ASCVD' and pr.person_ID ne .) out o on o.person_ID=pr.person_ID
LEFT JOIN (select
from cohort_definition cd
join cohort_attribute ca on ca.cohort_definition_id = cd.cohort_definition_id
join concept cs on ca.value_as_concept_id = cs.concept_id
left join condition_occurrence co on co.condition_concept_id = cs.concept_id
where cd.cohort_definition_name in ('Diabetes - ICD10CM','Hypertension - ICD10CM',' Anti-Hypertensive Pharmacologic Therapy - RxNORM'))
" |
setOldClass("dendrogram")
setClassUnion("dendrogramOrNULL",members=c("dendrogram", "NULL"))
setClassUnion("matrixOrNULL",members=c("matrix", "NULL"))
setClassUnion("matrixOrMissing",members=c("matrix", "missing"))
#' @title Class ClusterExperiment
#'
#' @description \code{ClusterExperiment} is a class that extends
#' \code{SummarizedExperiment} and is used to store the data
#' and clustering information.
#'
#' @docType class
#' @aliases ClusterExperiment ClusterExperiment-class clusterExperiment
#'
#' @description In addition to the slots of the \code{SummarizedExperiment}
#' class, the \code{ClusterExperiment} object has the additional slots described
#' in the Slots section.
#'
#' @description There are several methods implemented for this class. The most
#' important methods (e.g., \code{\link{clusterMany}}, \code{\link{combineMany}},
#' ...) have their own help page. Simple helper methods are described in the
#' Methods section. For a comprehensive list of methods specific to this class
#' see the Reference Manual.
#'
#' @slot transformation function. Function to transform the data by when methods
#' that assume normal-like data (e.g. log)
#' @slot clusterMatrix matrix. A matrix giving the integer-valued cluster ids
#' for each sample. The rows of the matrix correspond to clusterings and columns
#' to samples. The integer values are assigned in the order that the clusters
#' were found, if found by setting sequential=TRUE in clusterSingle. "-1" indicates
#' the sample was not clustered.
#' @slot primaryIndex numeric. An index that specifies the primary set of
#' labels.
#' @slot clusterInfo list. A list with info about the clustering.
#' If created from \code{\link{clusterSingle}}, clusterInfo will include the
#' parameter used for the call, and the call itself. If \code{sequential = TRUE}
#' it will also include the following components.
#' \itemize{
#' \item{\code{clusterInfo}}{if sequential=TRUE and clusters were successfully
#' found, a matrix of information regarding the algorithm behavior for each
#' cluster (the starting and stopping K for each cluster, and the number of
#' iterations for each cluster).}
#' \item{\code{whyStop}}{if sequential=TRUE and clusters were successfully
#' found, a character string explaining what triggered the algorithm to stop.}
#' }
#' @slot clusterTypes character vector with the origin of each column of
#' clusterMatrix.
#' @slot dendro_samples dendrogram. A dendrogram containing the cluster
#' relationship (leaves are samples; see \code{\link{makeDendrogram}} for
#' details).
#' @slot dendro_clusters dendrogram. A dendrogram containing the cluster
#' relationship (leaves are clusters; see \code{\link{makeDendrogram}} for
#' details).
#' @slot dendro_index numeric. An integer giving the cluster that was used to
#' make the dendrograms. NA_real_ value if no dendrograms are saved.
#' @slot dendro_outbranch logical. Whether the dendro_samples dendrogram put
#' missing/non-clustered samples in an outbranch, or intermixed in the dendrogram.
#' @slot coClustering matrix. A matrix with the cluster co-occurrence
#' information; this can either be based on subsampling or on co-clustering
#' across parameter sets (see \code{clusterMany}). The matrix is a square matrix
#' with number of rows/columns equal to the number of samples.
#' @slot clusterLegend a list, one per cluster in \code{clusterMatrix}. Each
#' element of the list is a matrix with nrows equal to the number of different
#' clusters in the clustering, and consisting of at least two columns with the
#' following column names: "clusterId" and "color".
#' @slot orderSamples a numeric vector (of integers) defining the order of
#' samples to be used for plotting of samples. Usually set internally by other
#' functions.
#'
#' @name ClusterExperiment-class
#' @aliases ClusterExperiment
#' @rdname ClusterExperiment-class
#' @import SummarizedExperiment
#' @import methods
#' @importClassesFrom SummarizedExperiment SummarizedExperiment
#' @importFrom dendextend as.phylo.dendrogram
#' @export
#'
setClass(
Class = "ClusterExperiment",
contains = "SummarizedExperiment",
slots = list(
transformation="function",
clusterMatrix = "matrix",
primaryIndex = "numeric",
clusterInfo = "list",
clusterTypes = "character",
dendro_samples = "dendrogramOrNULL",
dendro_clusters = "dendrogramOrNULL",
dendro_index = "numeric",
dendro_outbranch = "logical",
coClustering = "matrixOrNULL",
clusterLegend="list",
orderSamples="numeric"
)
)
## One question is how to extend the "[" method, i.e., how do we subset the co-occurance matrix and the dendrogram?
## For now, if subsetting, these are lost, but perhaps we can do something smarter?
setValidity("ClusterExperiment", function(object) {
#browser()
if(length(assays(object)) < 1) {
return("There must be at least one assay slot.")
}
if(!is.numeric(assay(object))) {
return("The data must be numeric.")
}
if(any(is.na(assay(object)))) {
return("NA values are not allowed.")
}
tX <- try(transform(object),silent=TRUE)
if(inherits(tX, "try-error")){
stop(paste("User-supplied `transformation` produces error on the input data
matrix:\n",x))
}
if(any(is.na(tX))) {
return("NA values after transforming data matrix are not allowed.")
}
if(!all(is.na((object@clusterMatrix))) &
!(NROW(object@clusterMatrix) == NCOL(object))) {
return("If present, `clusterMatrix` must have as many row as cells.")
}
if(!is.numeric(object@clusterMatrix)) {
return("`clusterMatrix` must be a numeric matrix.")
}
if(NCOL(object@clusterMatrix)!= length(object@clusterTypes)) {
return("length of clusterTypes must be same as NCOL of the clusterMatrix")
}
if(NCOL(object@clusterMatrix)!= length(object@clusterInfo)) {
return("length of clusterInfo must be same as NCOL of the clusterMatrix")
}
############
##Check dendrogram slotNames
############
#browser()
if(!is.null(object@dendro_samples)){
if(nobs(object@dendro_samples) != NCOL(object)) {
return("dendro_samples must have the same number of leaves as the number of samples")
}
if(is.na(object@dendro_outbranch)) return("if dendro_samples is defined, must also define dendro_outbranch")
}
else{
if(!is.null(object@dendro_clusters)) return("dendro_samples should not be null if dendro_clusters is non-null")
if(!is.na(object@dendro_outbranch)) return("dendro_samples should not be null if dendro_outbranch is not NA")
}
if(!is.null(object@dendro_clusters)){
if(is.na(dendroClusterIndex(object))) return("if dendrogram slots are filled, must have corresponding dendro_index defined.")
dcluster<-clusterMatrix(object)[,dendroClusterIndex(object)]
if(nobs(object@dendro_clusters) != max(dcluster)) {
return("dendro_clusters must have the same number of leaves as the number of (non-negative) clusters")
}
}
else{
if(!is.null(object@dendro_samples)) return("dendro_clusters should not be null if dendro_samples is non-null")
}
## Check co-clustering
if(!is.null(object@coClustering) &&
(NROW(object@coClustering) != NCOL(object@coClustering)
| NCOL(object@coClustering) != NCOL(object))) {
return("`coClustering` must be a sample by sample matrix.")
}
## If have a cluster matrix
if(!all(is.na(object@clusterMatrix))){ #what does this mean, how can they be all NA?
#check primary index
if(length(object@primaryIndex) != 1) {
if(length(object@primaryIndex) == 0) return("If more than one set of clusterings, a primary cluster must
be specified.")
if(length(object@primaryIndex) > 0) return("Only a single primary index may be specified")
}
if(object@primaryIndex > NCOL(object@clusterMatrix) |
object@primaryIndex < 1) {
return("`primaryIndex` out of bounds.")
}
#check clusterTypes
if(NCOL(object@clusterMatrix) != length(object@clusterTypes)) {
return("`clusterTypes` must be the same length as NCOL of
`clusterMatrix`.")
}
#check internally stored as integers
testConsecIntegers<-apply(object@clusterMatrix,2,function(x){
whCl<-which(!x %in% c(-1,-2))
uniqVals<-unique(x[whCl])
return(all(sort(uniqVals)==1:length(uniqVals)))
})
#browser()
if(!all(testConsecIntegers)) return("the cluster ids in clusterMatrix must be stored internally as consecutive integer values")
####
#test that colnames of clusterMatrix appropriately aligns with everything else
####
if(is.null(colnames(object@clusterMatrix))) return("clusterMatrix must have column names")
if(any(duplicated(colnames(object@clusterMatrix)))) return("clusterMatrix must have unique column names")
if(!is.null(names(object@clusterTypes))) return("clusterTypes should not have names")
if(!is.null(names(object@clusterInfo))) return("clusterInfo should not have names")
if(!is.null(names(object@clusterLegend))) return("clusterLegend should not have names")
####
#test that @clusterLegend is proper form
####
if(length(object@clusterLegend) != NCOL(object@clusterMatrix)) {
return("`clusterLegend` must be list of same length as NCOL of
`clusterMatrix`")
}
testIsMatrix <- sapply(object@clusterLegend,
function(x) {!is.null(dim(x))})
if(!all(testIsMatrix)) {
return("Each element of `clusterLegend` list must be a matrix")
}
testColorRows <- sapply(object@clusterLegend, function(x){nrow(x)})
testClusterMat <- apply(object@clusterMatrix, 2, function(x) {
length(unique(x))})
if(!all(testColorRows == testClusterMat)) {
return("each element of `clusterLegend` must be matrix with number of
rows equal to the number of clusters (including -1 or -2 values)
in `clusterMatrix`")
}
testColorCols1 <- sapply(object@clusterLegend, function(x) {
"color" %in% colnames(x)})
testColorCols2 <- sapply(object@clusterLegend, function(x) {
"clusterIds" %in% colnames(x)})
testColorCols3 <- sapply(object@clusterLegend, function(x) {
"name" %in% colnames(x)})
if(!all(testColorCols1) || !all(testColorCols2) || !all(testColorCols3)) {
return("each element of `clusterLegend` must be matrix with at least 3
columns, and at least 3 columns have names `clusterIds`,
`color` and `name`")
}
# testUniqueName <- sapply(object@clusterLegend, function(x) {
# any(duplicated(x[,"name"]))})
# if(any(testUniqueName)) return("the column")
testColorCols1 <- sapply(object@clusterLegend, function(x){is.character(x)})
if(!all(testColorCols1)) {
return("each element of `clusterLegend` must be matrix of character
values")
}
testColorCols1 <- sapply(1:length(object@clusterLegend), function(ii){
col<-object@clusterLegend[[ii]]
x<-object@clusterMatrix[,ii]
y<-as.numeric(col[,"clusterIds"])
all(y %in% x)
})
if(!all(testColorCols1)) {
return("each element of `clusterLegend` must be matrix with column
`clusterIds` matching the corresponding integer valued
clusterMatrix values")
}
}
if(length(object@orderSamples)!=NCOL(assay(object))) {
return("`orderSamples` must be of same length as number of samples
(NCOL(assay(object)))")
}
if(any(!object@orderSamples %in% 1:NCOL(assay(object)))) {
return("`orderSamples` must be values between 1 and the number of samples.")
}
return(TRUE)
})
#' @description The constructor \code{clusterExperiment} creates an object of
#' the class \code{ClusterExperiment}. However, the typical way of creating
#' these objects is the result of a call to \code{\link{clusterMany}} or
#' \code{\link{clusterSingle}}.
#'
#' @description Note that when subsetting the data, the co-clustering and
#' dendrogram information are lost.
#'
#'@param se a matrix or \code{SummarizedExperiment} containing the data to be
#'clustered.
#'@param clusters can be either a numeric or character vector, a factor, or a
#'numeric matrix, containing the cluster labels.
#'@param transformation function. A function to transform the data before
#'performing steps that assume normal-like data (i.e. constant variance), such
#'as the log.
#'@param ... The arguments \code{transformation}, \code{clusterTypes} and
#' \code{clusterInfo} to be passed to the constructor for signature
#' \code{SummarizedExperiment,matrix}.
#'
#'@return A \code{ClusterExperiment} object.
#'
#'@examples
#'
#'se <- matrix(data=rnorm(200), ncol=10)
#'labels <- gl(5, 2)
#'
#'cc <- clusterExperiment(se, as.numeric(labels), transformation =
#'function(x){x})
#'
#' @rdname ClusterExperiment-class
#' @export
setGeneric(
name = "clusterExperiment",
def = function(se, clusters,...) {
standardGeneric("clusterExperiment")
}
)
#' @rdname ClusterExperiment-class
#' @export
setMethod(
f = "clusterExperiment",
signature = signature("matrix","ANY"),
definition = function(se, clusters, ...){
clusterExperiment(SummarizedExperiment(se), clusters, ...)
})
#' @rdname ClusterExperiment-class
setMethod(
f = "clusterExperiment",
signature = signature("SummarizedExperiment", "numeric"),
definition = function(se, clusters, ...){
if(NCOL(se) != length(clusters)) {
stop("`clusters` must be a vector of length equal to the number of samples.")
}
clusterExperiment(se,matrix(clusters, ncol=1),...)
})
#' @rdname ClusterExperiment-class
setMethod(
f = "clusterExperiment",
signature = signature("SummarizedExperiment","character"),
definition = function(se, clusters,...){
clusterExperiment(se,matrix(clusters,ncol=1),...)
})
#' @rdname ClusterExperiment-class
setMethod(
f = "clusterExperiment",
signature = signature("SummarizedExperiment","factor"),
definition = function(se, clusters,...){
clusters <- as.character(clusters)
clusterExperiment(se,clusters,...)
})
#'@rdname ClusterExperiment-class
#'@param clusterTypes a string describing the nature of the clustering. The
#' values `clusterSingle`, `clusterMany`, `mergeClusters`, `combineMany` are
#' reserved for the clustering coming from the package workflow and should not
#' be used when creating a new object with the constructor.
#'@param clusterInfo a list with information on the clustering (see Slots).
#'@param primaryIndex integer. Sets the `primaryIndex` slot (see Slots).
#'@param orderSamples a vector of integers. Sets the `orderSamples` slot (see
#' Slots).
#'@param dendro_samples dendrogram. Sets the `dendro_samples` slot (see Slots).
#'@param dendro_clusters dendrogram. Sets the `dendro_clusters` slot (see
#' Slots).
#' @param dendro_outbranch logical. Sets the `dendro_outbranch` slot (see Slots)
#'@param dendro_index numeric. Sets the dendro_index slot (see Slots).
#'@param coClustering matrix. Sets the `coClustering` slot (see Slots).
#'@details The \code{clusterExperiment} constructor function gives clusterLabels
#' based on the column names of the input matrix/SummarizedExperiment. If
#' missing, will assign labels "cluster1","cluster2", etc.
setMethod(
f = "clusterExperiment",
signature = signature("SummarizedExperiment","matrix"),
definition = function(se, clusters,
transformation,
primaryIndex=1,
clusterTypes="User",
clusterInfo=NULL,
orderSamples=1:ncol(se),
dendro_samples=NULL,
dendro_index=NA_real_,
dendro_clusters=NULL,
dendro_outbranch=NA,
coClustering=NULL
){
if(NCOL(se) != nrow(clusters)) {
stop("`clusters` must be a matrix of rows equal to the number of
samples.")
}
if(length(clusterTypes)==1) {
clusterTypes <- rep(clusterTypes, length=NCOL(clusters))
}
if(is.null(clusterInfo)) {
clusterInfo<-rep(list(NULL),length=NCOL(clusters))
}
if(length(clusterTypes)!=NCOL(clusters)) {
stop("clusterTypes must be of length equal to number of clusters in
`clusters`")
}
#fix up names of clusters and match
#browser()
if(is.null(colnames(clusters))){
colnames(clusters)<-paste("cluster",1:NCOL(clusters),sep="")
}
if(any(duplicated(colnames(clusters)))){#probably not possible
colnames(clusters)<-make.names(colnames(clusters),unique=TRUE)
}
if(length(clusterTypes) == 1) {
clusterTypes <- rep(clusterTypes, length=NCOL(clusters))
}
if(is.null(clusterInfo)) {
clusterInfo <- rep(list(NULL), length=NCOL(clusters))
}
#make clusters consecutive integer valued:
tmp<-.makeColors(clusters, colors=bigPalette)
clusterLegend<-tmp$colorList
clustersNum<-tmp$numClusters
colnames(clustersNum)<-colnames(clusters)
#can just give se in constructor, and then don't loose any information!
out <- new("ClusterExperiment",
se,
transformation=transformation,
clusterMatrix = clustersNum,
primaryIndex = primaryIndex,
clusterTypes = unname(clusterTypes),
clusterInfo=unname(clusterInfo),
clusterLegend=unname(clusterLegend),
orderSamples=1:ncol(se),
dendro_samples=dendro_samples,
dendro_clusters=dendro_clusters,
dendro_index=dendro_index,
dendro_outbranch=dendro_outbranch,
coClustering=coClustering
)
validObject(out)
return(out)
})
| /R/AllClasses.R | no_license | 12379Monty/clusterExperiment | R | false | false | 17,615 | r | setOldClass("dendrogram")
setClassUnion("dendrogramOrNULL",members=c("dendrogram", "NULL"))
setClassUnion("matrixOrNULL",members=c("matrix", "NULL"))
setClassUnion("matrixOrMissing",members=c("matrix", "missing"))
#' @title Class ClusterExperiment
#'
#' @description \code{ClusterExperiment} is a class that extends
#' \code{SummarizedExperiment} and is used to store the data
#' and clustering information.
#'
#' @docType class
#' @aliases ClusterExperiment ClusterExperiment-class clusterExperiment
#'
#' @description In addition to the slots of the \code{SummarizedExperiment}
#' class, the \code{ClusterExperiment} object has the additional slots described
#' in the Slots section.
#'
#' @description There are several methods implemented for this class. The most
#' important methods (e.g., \code{\link{clusterMany}}, \code{\link{combineMany}},
#' ...) have their own help page. Simple helper methods are described in the
#' Methods section. For a comprehensive list of methods specific to this class
#' see the Reference Manual.
#'
#' @slot transformation function. Function to transform the data by when methods
#' that assume normal-like data (e.g. log)
#' @slot clusterMatrix matrix. A matrix giving the integer-valued cluster ids
#' for each sample. The rows of the matrix correspond to clusterings and columns
#' to samples. The integer values are assigned in the order that the clusters
#' were found, if found by setting sequential=TRUE in clusterSingle. "-1" indicates
#' the sample was not clustered.
#' @slot primaryIndex numeric. An index that specifies the primary set of
#' labels.
#' @slot clusterInfo list. A list with info about the clustering.
#' If created from \code{\link{clusterSingle}}, clusterInfo will include the
#' parameter used for the call, and the call itself. If \code{sequential = TRUE}
#' it will also include the following components.
#' \itemize{
#' \item{\code{clusterInfo}}{if sequential=TRUE and clusters were successfully
#' found, a matrix of information regarding the algorithm behavior for each
#' cluster (the starting and stopping K for each cluster, and the number of
#' iterations for each cluster).}
#' \item{\code{whyStop}}{if sequential=TRUE and clusters were successfully
#' found, a character string explaining what triggered the algorithm to stop.}
#' }
#' @slot clusterTypes character vector with the origin of each column of
#' clusterMatrix.
#' @slot dendro_samples dendrogram. A dendrogram containing the cluster
#' relationship (leaves are samples; see \code{\link{makeDendrogram}} for
#' details).
#' @slot dendro_clusters dendrogram. A dendrogram containing the cluster
#' relationship (leaves are clusters; see \code{\link{makeDendrogram}} for
#' details).
#' @slot dendro_index numeric. An integer giving the cluster that was used to
#' make the dendrograms. NA_real_ value if no dendrograms are saved.
#' @slot dendro_outbranch logical. Whether the dendro_samples dendrogram put
#' missing/non-clustered samples in an outbranch, or intermixed in the dendrogram.
#' @slot coClustering matrix. A matrix with the cluster co-occurrence
#' information; this can either be based on subsampling or on co-clustering
#' across parameter sets (see \code{clusterMany}). The matrix is a square matrix
#' with number of rows/columns equal to the number of samples.
#' @slot clusterLegend a list, one per cluster in \code{clusterMatrix}. Each
#' element of the list is a matrix with nrows equal to the number of different
#' clusters in the clustering, and consisting of at least two columns with the
#' following column names: "clusterId" and "color".
#' @slot orderSamples a numeric vector (of integers) defining the order of
#' samples to be used for plotting of samples. Usually set internally by other
#' functions.
#'
#' @name ClusterExperiment-class
#' @aliases ClusterExperiment
#' @rdname ClusterExperiment-class
#' @import SummarizedExperiment
#' @import methods
#' @importClassesFrom SummarizedExperiment SummarizedExperiment
#' @importFrom dendextend as.phylo.dendrogram
#' @export
#'
setClass(
Class = "ClusterExperiment",
contains = "SummarizedExperiment",
slots = list(
transformation="function",
clusterMatrix = "matrix",
primaryIndex = "numeric",
clusterInfo = "list",
clusterTypes = "character",
dendro_samples = "dendrogramOrNULL",
dendro_clusters = "dendrogramOrNULL",
dendro_index = "numeric",
dendro_outbranch = "logical",
coClustering = "matrixOrNULL",
clusterLegend="list",
orderSamples="numeric"
)
)
## One question is how to extend the "[" method, i.e., how do we subset the co-occurance matrix and the dendrogram?
## For now, if subsetting, these are lost, but perhaps we can do something smarter?
setValidity("ClusterExperiment", function(object) {
#browser()
if(length(assays(object)) < 1) {
return("There must be at least one assay slot.")
}
if(!is.numeric(assay(object))) {
return("The data must be numeric.")
}
if(any(is.na(assay(object)))) {
return("NA values are not allowed.")
}
tX <- try(transform(object),silent=TRUE)
if(inherits(tX, "try-error")){
stop(paste("User-supplied `transformation` produces error on the input data
matrix:\n",x))
}
if(any(is.na(tX))) {
return("NA values after transforming data matrix are not allowed.")
}
if(!all(is.na((object@clusterMatrix))) &
!(NROW(object@clusterMatrix) == NCOL(object))) {
return("If present, `clusterMatrix` must have as many row as cells.")
}
if(!is.numeric(object@clusterMatrix)) {
return("`clusterMatrix` must be a numeric matrix.")
}
if(NCOL(object@clusterMatrix)!= length(object@clusterTypes)) {
return("length of clusterTypes must be same as NCOL of the clusterMatrix")
}
if(NCOL(object@clusterMatrix)!= length(object@clusterInfo)) {
return("length of clusterInfo must be same as NCOL of the clusterMatrix")
}
############
##Check dendrogram slotNames
############
#browser()
if(!is.null(object@dendro_samples)){
if(nobs(object@dendro_samples) != NCOL(object)) {
return("dendro_samples must have the same number of leaves as the number of samples")
}
if(is.na(object@dendro_outbranch)) return("if dendro_samples is defined, must also define dendro_outbranch")
}
else{
if(!is.null(object@dendro_clusters)) return("dendro_samples should not be null if dendro_clusters is non-null")
if(!is.na(object@dendro_outbranch)) return("dendro_samples should not be null if dendro_outbranch is not NA")
}
if(!is.null(object@dendro_clusters)){
if(is.na(dendroClusterIndex(object))) return("if dendrogram slots are filled, must have corresponding dendro_index defined.")
dcluster<-clusterMatrix(object)[,dendroClusterIndex(object)]
if(nobs(object@dendro_clusters) != max(dcluster)) {
return("dendro_clusters must have the same number of leaves as the number of (non-negative) clusters")
}
}
else{
if(!is.null(object@dendro_samples)) return("dendro_clusters should not be null if dendro_samples is non-null")
}
## Check co-clustering
if(!is.null(object@coClustering) &&
(NROW(object@coClustering) != NCOL(object@coClustering)
| NCOL(object@coClustering) != NCOL(object))) {
return("`coClustering` must be a sample by sample matrix.")
}
## If have a cluster matrix
if(!all(is.na(object@clusterMatrix))){ #what does this mean, how can they be all NA?
#check primary index
if(length(object@primaryIndex) != 1) {
if(length(object@primaryIndex) == 0) return("If more than one set of clusterings, a primary cluster must
be specified.")
if(length(object@primaryIndex) > 0) return("Only a single primary index may be specified")
}
if(object@primaryIndex > NCOL(object@clusterMatrix) |
object@primaryIndex < 1) {
return("`primaryIndex` out of bounds.")
}
#check clusterTypes
if(NCOL(object@clusterMatrix) != length(object@clusterTypes)) {
return("`clusterTypes` must be the same length as NCOL of
`clusterMatrix`.")
}
#check internally stored as integers
testConsecIntegers<-apply(object@clusterMatrix,2,function(x){
whCl<-which(!x %in% c(-1,-2))
uniqVals<-unique(x[whCl])
return(all(sort(uniqVals)==1:length(uniqVals)))
})
#browser()
if(!all(testConsecIntegers)) return("the cluster ids in clusterMatrix must be stored internally as consecutive integer values")
####
#test that colnames of clusterMatrix appropriately aligns with everything else
####
if(is.null(colnames(object@clusterMatrix))) return("clusterMatrix must have column names")
if(any(duplicated(colnames(object@clusterMatrix)))) return("clusterMatrix must have unique column names")
if(!is.null(names(object@clusterTypes))) return("clusterTypes should not have names")
if(!is.null(names(object@clusterInfo))) return("clusterInfo should not have names")
if(!is.null(names(object@clusterLegend))) return("clusterLegend should not have names")
####
#test that @clusterLegend is proper form
####
if(length(object@clusterLegend) != NCOL(object@clusterMatrix)) {
return("`clusterLegend` must be list of same length as NCOL of
`clusterMatrix`")
}
testIsMatrix <- sapply(object@clusterLegend,
function(x) {!is.null(dim(x))})
if(!all(testIsMatrix)) {
return("Each element of `clusterLegend` list must be a matrix")
}
testColorRows <- sapply(object@clusterLegend, function(x){nrow(x)})
testClusterMat <- apply(object@clusterMatrix, 2, function(x) {
length(unique(x))})
if(!all(testColorRows == testClusterMat)) {
return("each element of `clusterLegend` must be matrix with number of
rows equal to the number of clusters (including -1 or -2 values)
in `clusterMatrix`")
}
testColorCols1 <- sapply(object@clusterLegend, function(x) {
"color" %in% colnames(x)})
testColorCols2 <- sapply(object@clusterLegend, function(x) {
"clusterIds" %in% colnames(x)})
testColorCols3 <- sapply(object@clusterLegend, function(x) {
"name" %in% colnames(x)})
if(!all(testColorCols1) || !all(testColorCols2) || !all(testColorCols3)) {
return("each element of `clusterLegend` must be matrix with at least 3
columns, and at least 3 columns have names `clusterIds`,
`color` and `name`")
}
# testUniqueName <- sapply(object@clusterLegend, function(x) {
# any(duplicated(x[,"name"]))})
# if(any(testUniqueName)) return("the column")
testColorCols1 <- sapply(object@clusterLegend, function(x){is.character(x)})
if(!all(testColorCols1)) {
return("each element of `clusterLegend` must be matrix of character
values")
}
testColorCols1 <- sapply(1:length(object@clusterLegend), function(ii){
col<-object@clusterLegend[[ii]]
x<-object@clusterMatrix[,ii]
y<-as.numeric(col[,"clusterIds"])
all(y %in% x)
})
if(!all(testColorCols1)) {
return("each element of `clusterLegend` must be matrix with column
`clusterIds` matching the corresponding integer valued
clusterMatrix values")
}
}
if(length(object@orderSamples)!=NCOL(assay(object))) {
return("`orderSamples` must be of same length as number of samples
(NCOL(assay(object)))")
}
if(any(!object@orderSamples %in% 1:NCOL(assay(object)))) {
return("`orderSamples` must be values between 1 and the number of samples.")
}
return(TRUE)
})
#' @description The constructor \code{clusterExperiment} creates an object of
#' the class \code{ClusterExperiment}. However, the typical way of creating
#' these objects is the result of a call to \code{\link{clusterMany}} or
#' \code{\link{clusterSingle}}.
#'
#' @description Note that when subsetting the data, the co-clustering and
#' dendrogram information are lost.
#'
#'@param se a matrix or \code{SummarizedExperiment} containing the data to be
#'clustered.
#'@param clusters can be either a numeric or character vector, a factor, or a
#'numeric matrix, containing the cluster labels.
#'@param transformation function. A function to transform the data before
#'performing steps that assume normal-like data (i.e. constant variance), such
#'as the log.
#'@param ... The arguments \code{transformation}, \code{clusterTypes} and
#' \code{clusterInfo} to be passed to the constructor for signature
#' \code{SummarizedExperiment,matrix}.
#'
#'@return A \code{ClusterExperiment} object.
#'
#'@examples
#'
#'se <- matrix(data=rnorm(200), ncol=10)
#'labels <- gl(5, 2)
#'
#'cc <- clusterExperiment(se, as.numeric(labels), transformation =
#'function(x){x})
#'
#' @rdname ClusterExperiment-class
#' @export
setGeneric(
name = "clusterExperiment",
def = function(se, clusters,...) {
standardGeneric("clusterExperiment")
}
)
#' @rdname ClusterExperiment-class
#' @export
setMethod(
f = "clusterExperiment",
signature = signature("matrix","ANY"),
definition = function(se, clusters, ...){
clusterExperiment(SummarizedExperiment(se), clusters, ...)
})
#' @rdname ClusterExperiment-class
setMethod(
f = "clusterExperiment",
signature = signature("SummarizedExperiment", "numeric"),
definition = function(se, clusters, ...){
if(NCOL(se) != length(clusters)) {
stop("`clusters` must be a vector of length equal to the number of samples.")
}
clusterExperiment(se,matrix(clusters, ncol=1),...)
})
#' @rdname ClusterExperiment-class
setMethod(
f = "clusterExperiment",
signature = signature("SummarizedExperiment","character"),
definition = function(se, clusters,...){
clusterExperiment(se,matrix(clusters,ncol=1),...)
})
#' @rdname ClusterExperiment-class
setMethod(
f = "clusterExperiment",
signature = signature("SummarizedExperiment","factor"),
definition = function(se, clusters,...){
clusters <- as.character(clusters)
clusterExperiment(se,clusters,...)
})
#'@rdname ClusterExperiment-class
#'@param clusterTypes a string describing the nature of the clustering. The
#' values `clusterSingle`, `clusterMany`, `mergeClusters`, `combineMany` are
#' reserved for the clustering coming from the package workflow and should not
#' be used when creating a new object with the constructor.
#'@param clusterInfo a list with information on the clustering (see Slots).
#'@param primaryIndex integer. Sets the `primaryIndex` slot (see Slots).
#'@param orderSamples a vector of integers. Sets the `orderSamples` slot (see
#' Slots).
#'@param dendro_samples dendrogram. Sets the `dendro_samples` slot (see Slots).
#'@param dendro_clusters dendrogram. Sets the `dendro_clusters` slot (see
#' Slots).
#' @param dendro_outbranch logical. Sets the `dendro_outbranch` slot (see Slots)
#'@param dendro_index numeric. Sets the dendro_index slot (see Slots).
#'@param coClustering matrix. Sets the `coClustering` slot (see Slots).
#'@details The \code{clusterExperiment} constructor function gives clusterLabels
#' based on the column names of the input matrix/SummarizedExperiment. If
#' missing, will assign labels "cluster1","cluster2", etc.
setMethod(
f = "clusterExperiment",
signature = signature("SummarizedExperiment","matrix"),
definition = function(se, clusters,
transformation,
primaryIndex=1,
clusterTypes="User",
clusterInfo=NULL,
orderSamples=1:ncol(se),
dendro_samples=NULL,
dendro_index=NA_real_,
dendro_clusters=NULL,
dendro_outbranch=NA,
coClustering=NULL
){
if(NCOL(se) != nrow(clusters)) {
stop("`clusters` must be a matrix of rows equal to the number of
samples.")
}
if(length(clusterTypes)==1) {
clusterTypes <- rep(clusterTypes, length=NCOL(clusters))
}
if(is.null(clusterInfo)) {
clusterInfo<-rep(list(NULL),length=NCOL(clusters))
}
if(length(clusterTypes)!=NCOL(clusters)) {
stop("clusterTypes must be of length equal to number of clusters in
`clusters`")
}
#fix up names of clusters and match
#browser()
if(is.null(colnames(clusters))){
colnames(clusters)<-paste("cluster",1:NCOL(clusters),sep="")
}
if(any(duplicated(colnames(clusters)))){#probably not possible
colnames(clusters)<-make.names(colnames(clusters),unique=TRUE)
}
if(length(clusterTypes) == 1) {
clusterTypes <- rep(clusterTypes, length=NCOL(clusters))
}
if(is.null(clusterInfo)) {
clusterInfo <- rep(list(NULL), length=NCOL(clusters))
}
#make clusters consecutive integer valued:
tmp<-.makeColors(clusters, colors=bigPalette)
clusterLegend<-tmp$colorList
clustersNum<-tmp$numClusters
colnames(clustersNum)<-colnames(clusters)
#can just give se in constructor, and then don't loose any information!
out <- new("ClusterExperiment",
se,
transformation=transformation,
clusterMatrix = clustersNum,
primaryIndex = primaryIndex,
clusterTypes = unname(clusterTypes),
clusterInfo=unname(clusterInfo),
clusterLegend=unname(clusterLegend),
orderSamples=1:ncol(se),
dendro_samples=dendro_samples,
dendro_clusters=dendro_clusters,
dendro_index=dendro_index,
dendro_outbranch=dendro_outbranch,
coClustering=coClustering
)
validObject(out)
return(out)
})
|
#' Purpose: produce three sets of predictions, using all possible covariates, from one imputed data set
#' Inputs: data/imputed/imputed-lm-vartype.rds
#' Outputs: output/predictions/{glmnet_lm_all, glmnet_lm_all_experts, glmnet_lm_all_mturkers}/prediction.csv
#' Machine used: cluster
#' Expected runtime: hours
set.seed(123)
library(dplyr)
library(purrr)
library(readr)
library(tidyr)
library(doParallel)
library(parallel)
registerDoParallel(cores = parallel::detectCores(logical = FALSE))
# for more info on
# cv.glmnet with parallel = TRUE and doParallel package:
# https://stackoverflow.com/a/21710769
# https://stackoverflow.com/a/29001039
source("code/models/calculate_penalty_factors.R")
source("code/models/lasso.R")
source("code/models/setup_lasso_all.R")
source("code/utils/validate_imputed_background.R")
source("code/utils/zip_prediction.R")
source("code/data_processing/R/get_vars.R")
source("code/data_processing/R/merge_train.R")
source("code/data_processing/R/subset_vars.R")
data_file_name <- "imputed-lm-vartype.rds"
prediction_name <- "glmnet_lm_all"
# data ----
train <- read_csv(file.path("data", "train.csv"))
imputed_background <- readRDS(file.path("data", "imputed", data_file_name))
# handle potential issues with imputed data
# adds a challengeID column if necessary
# removes any columns that still have NAs
# converts categorical variables to factors
imputed_background <- validate_imputed_background(imputed_background)
imputed_background <-
imputed_background %>%
subset_vars_remove(get_vars_unique)
ffc <- merge_train(imputed_background, train)
# model information ----
outcomes <- list("gpa", "grit", "materialHardship",
"eviction", "layoff", "jobTraining")
covariates <- colnames(imputed_background)[-1]
families <- as.list(c(rep("gaussian", 3),
rep("binomial", 3)))
alphas <- as.list(c(0.05, 0.10, 0.025, 0.15, 0.05, 0.05))
# scores ----
ffvars_scored <-
read_csv(file.path("data", "variables", "ffvars_scored.csv")) %>%
filter(!is.na(ffvar))
# handle duplicates by taking max score
ffvars_scored <-
ffvars_scored %>%
group_by(outcome, ffvar) %>%
summarise(experts = max(experts, na.rm=TRUE),
mturkers = max(mturkers, na.rm=TRUE)) %>%
mutate(experts = ifelse(is.infinite(experts), NA, experts),
mturkers = ifelse(is.infinite(mturkers), NA, mturkers))
all_covariates <- rep(list(colnames(imputed_background)[-1]), 6)
names(all_covariates) <- c("gpa", "grit", "material_hardship",
"eviction", "layoff", "job_training")
covariates_df <-
all_covariates %>%
as_data_frame() %>%
gather("outcome", "ffvar")
ffvars_scored <-
covariates_df %>%
left_join(ffvars_scored, by = c("outcome", "ffvar"))
gpa_vars <- ffvars_scored %>% filter(outcome == "gpa")
grit_vars <- ffvars_scored %>% filter(outcome == "grit")
materialHardship_vars <- ffvars_scored %>% filter(outcome == "material_hardship")
eviction_vars <- ffvars_scored %>% filter(outcome == "eviction")
layoff_vars <- ffvars_scored %>% filter(outcome == "layoff")
jobTraining_vars <- ffvars_scored %>% filter(outcome == "job_training")
vars_data_list <- list(gpa_vars, grit_vars, materialHardship_vars,
eviction_vars, layoff_vars, jobTraining_vars)
names(vars_data_list) <- as.character(outcomes)
scores_experts <- map(vars_data_list, "experts")
scores_mturkers <- map(vars_data_list, "mturkers")
# set up covariates ----
# these steps are time consuming!
# approximately 1hr each
x_cache <- lapply(outcomes, setup_x, data = ffc, covariates = covariates)
x_pred_cache <- setup_x_pred(ffc, covariates)
write_rds(x_cache, "data/cached/x_cache.rds")
write_rds(x_pred_cache, "data/cached/x_pred_cache.rds")
# run models ----
prediction_list <-
Map(f = function(...) {
lasso(data = ffc,
covariates = covariates,
x_pred_cache = x_pred_cache,
..., parallel = TRUE)$pred
},
outcome = outcomes,
family = families,
x_cache = x_cache,
alpha = alphas)
# with expert score information
prediction_list_experts <-
Map(f = function(...) {
lasso(data = ffc,
covariates = covariates,
x_pred_cache = x_pred_cache,
..., parallel = TRUE)$pred
},
outcome = outcomes,
scores = scores_experts,
family = families,
x_cache = x_cache,
alpha = alphas)
# with mturk score information
prediction_list_mturkers <-
Map(f = function(...) {
lasso(data = ffc,
covariates = covariates,
x_pred_cache = x_pred_cache,
..., parallel = TRUE)$pred
},
outcome = outcomes,
scores = scores_mturkers,
family = families,
x_cache = x_cache,
alpha = alphas)
# predictions ----
names(prediction_list) <- as.character(outcomes)
prediction <-
ffc %>%
select(challengeID) %>%
bind_cols(prediction_list)
names(prediction_list_experts) <- as.character(outcomes)
prediction_experts <-
ffc %>%
select(challengeID) %>%
bind_cols(prediction_list_experts)
names(prediction_list_mturkers) <- as.character(outcomes)
prediction_mturkers <-
ffc %>%
select(challengeID) %>%
bind_cols(prediction_list_mturkers)
# output ----
# write to csv and zip for submission
prediction_name_experts <- paste0(prediction_name, "_experts")
prediction_name_mturkers <- paste0(prediction_name, "_mturkers")
zip_prediction(prediction, prediction_name, run_file = "run_lasso_all.R")
zip_prediction(prediction_experts, prediction_name_experts, run_file = "run_lasso_all.R")
zip_prediction(prediction_mturkers, prediction_name_mturkers, run_file = "run_lasso_all.R")
| /code/runs/run_lasso_all.R | permissive | formidable-family/ffc-humans-in-the-loop | R | false | false | 5,630 | r | #' Purpose: produce three sets of predictions, using all possible covariates, from one imputed data set
#' Inputs: data/imputed/imputed-lm-vartype.rds
#' Outputs: output/predictions/{glmnet_lm_all, glmnet_lm_all_experts, glmnet_lm_all_mturkers}/prediction.csv
#' Machine used: cluster
#' Expected runtime: hours
set.seed(123)
library(dplyr)
library(purrr)
library(readr)
library(tidyr)
library(doParallel)
library(parallel)
registerDoParallel(cores = parallel::detectCores(logical = FALSE))
# for more info on
# cv.glmnet with parallel = TRUE and doParallel package:
# https://stackoverflow.com/a/21710769
# https://stackoverflow.com/a/29001039
source("code/models/calculate_penalty_factors.R")
source("code/models/lasso.R")
source("code/models/setup_lasso_all.R")
source("code/utils/validate_imputed_background.R")
source("code/utils/zip_prediction.R")
source("code/data_processing/R/get_vars.R")
source("code/data_processing/R/merge_train.R")
source("code/data_processing/R/subset_vars.R")
data_file_name <- "imputed-lm-vartype.rds"
prediction_name <- "glmnet_lm_all"
# data ----
train <- read_csv(file.path("data", "train.csv"))
imputed_background <- readRDS(file.path("data", "imputed", data_file_name))
# handle potential issues with imputed data
# adds a challengeID column if necessary
# removes any columns that still have NAs
# converts categorical variables to factors
imputed_background <- validate_imputed_background(imputed_background)
imputed_background <-
imputed_background %>%
subset_vars_remove(get_vars_unique)
ffc <- merge_train(imputed_background, train)
# model information ----
outcomes <- list("gpa", "grit", "materialHardship",
"eviction", "layoff", "jobTraining")
covariates <- colnames(imputed_background)[-1]
families <- as.list(c(rep("gaussian", 3),
rep("binomial", 3)))
alphas <- as.list(c(0.05, 0.10, 0.025, 0.15, 0.05, 0.05))
# scores ----
ffvars_scored <-
read_csv(file.path("data", "variables", "ffvars_scored.csv")) %>%
filter(!is.na(ffvar))
# handle duplicates by taking max score
ffvars_scored <-
ffvars_scored %>%
group_by(outcome, ffvar) %>%
summarise(experts = max(experts, na.rm=TRUE),
mturkers = max(mturkers, na.rm=TRUE)) %>%
mutate(experts = ifelse(is.infinite(experts), NA, experts),
mturkers = ifelse(is.infinite(mturkers), NA, mturkers))
all_covariates <- rep(list(colnames(imputed_background)[-1]), 6)
names(all_covariates) <- c("gpa", "grit", "material_hardship",
"eviction", "layoff", "job_training")
covariates_df <-
all_covariates %>%
as_data_frame() %>%
gather("outcome", "ffvar")
ffvars_scored <-
covariates_df %>%
left_join(ffvars_scored, by = c("outcome", "ffvar"))
gpa_vars <- ffvars_scored %>% filter(outcome == "gpa")
grit_vars <- ffvars_scored %>% filter(outcome == "grit")
materialHardship_vars <- ffvars_scored %>% filter(outcome == "material_hardship")
eviction_vars <- ffvars_scored %>% filter(outcome == "eviction")
layoff_vars <- ffvars_scored %>% filter(outcome == "layoff")
jobTraining_vars <- ffvars_scored %>% filter(outcome == "job_training")
vars_data_list <- list(gpa_vars, grit_vars, materialHardship_vars,
eviction_vars, layoff_vars, jobTraining_vars)
names(vars_data_list) <- as.character(outcomes)
scores_experts <- map(vars_data_list, "experts")
scores_mturkers <- map(vars_data_list, "mturkers")
# set up covariates ----
# these steps are time consuming!
# approximately 1hr each
x_cache <- lapply(outcomes, setup_x, data = ffc, covariates = covariates)
x_pred_cache <- setup_x_pred(ffc, covariates)
write_rds(x_cache, "data/cached/x_cache.rds")
write_rds(x_pred_cache, "data/cached/x_pred_cache.rds")
# run models ----
prediction_list <-
Map(f = function(...) {
lasso(data = ffc,
covariates = covariates,
x_pred_cache = x_pred_cache,
..., parallel = TRUE)$pred
},
outcome = outcomes,
family = families,
x_cache = x_cache,
alpha = alphas)
# with expert score information
prediction_list_experts <-
Map(f = function(...) {
lasso(data = ffc,
covariates = covariates,
x_pred_cache = x_pred_cache,
..., parallel = TRUE)$pred
},
outcome = outcomes,
scores = scores_experts,
family = families,
x_cache = x_cache,
alpha = alphas)
# with mturk score information
prediction_list_mturkers <-
Map(f = function(...) {
lasso(data = ffc,
covariates = covariates,
x_pred_cache = x_pred_cache,
..., parallel = TRUE)$pred
},
outcome = outcomes,
scores = scores_mturkers,
family = families,
x_cache = x_cache,
alpha = alphas)
# predictions ----
names(prediction_list) <- as.character(outcomes)
prediction <-
ffc %>%
select(challengeID) %>%
bind_cols(prediction_list)
names(prediction_list_experts) <- as.character(outcomes)
prediction_experts <-
ffc %>%
select(challengeID) %>%
bind_cols(prediction_list_experts)
names(prediction_list_mturkers) <- as.character(outcomes)
prediction_mturkers <-
ffc %>%
select(challengeID) %>%
bind_cols(prediction_list_mturkers)
# output ----
# write to csv and zip for submission
prediction_name_experts <- paste0(prediction_name, "_experts")
prediction_name_mturkers <- paste0(prediction_name, "_mturkers")
zip_prediction(prediction, prediction_name, run_file = "run_lasso_all.R")
zip_prediction(prediction_experts, prediction_name_experts, run_file = "run_lasso_all.R")
zip_prediction(prediction_mturkers, prediction_name_mturkers, run_file = "run_lasso_all.R")
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
CorrMat_Sym <- function(X, CorrType, Omega) {
.Call(`_GPM_CorrMat_Sym`, X, CorrType, Omega)
}
CorrMat_Vec <- function(X1, X2, CorrType, Omega) {
.Call(`_GPM_CorrMat_Vec`, X1, X2, CorrType, Omega)
}
Eigen <- function(A) {
.Call(`_GPM_Eigen`, A)
}
CppSolve <- function(A, B) {
.Call(`_GPM_CppSolve`, A, B)
}
LowerChol <- function(A) {
.Call(`_GPM_LowerChol`, A)
}
| /GPM/R/RcppExports.R | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 514 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
CorrMat_Sym <- function(X, CorrType, Omega) {
.Call(`_GPM_CorrMat_Sym`, X, CorrType, Omega)
}
CorrMat_Vec <- function(X1, X2, CorrType, Omega) {
.Call(`_GPM_CorrMat_Vec`, X1, X2, CorrType, Omega)
}
Eigen <- function(A) {
.Call(`_GPM_Eigen`, A)
}
CppSolve <- function(A, B) {
.Call(`_GPM_CppSolve`, A, B)
}
LowerChol <- function(A) {
.Call(`_GPM_LowerChol`, A)
}
|
#!/usr/bin/env Rscript
library(dplyr)
#read fam file
FamFile <- read.table("Chenchereren2finalwithChimp.fam")
#make column in style needed for populations
FamFile$V7 <- paste(FamFile$V1, FamFile$V2, sep = "_")
FamFile <- FamFile %>% select(V7)
Ind <- read.table("Chenchereren2PopulationsTreemix.txt")
Mapfile <- cbind(FamFile, Ind)
write.table(Mapfile, "Chenchereren2_map.tsv", sep="\t", col.names =F, row.names = F, quote =F)
| /Chenchereren2/Chenchereren2aDNAMapfile.R | no_license | audreyarner/MalagasyaDNA | R | false | false | 433 | r | #!/usr/bin/env Rscript
library(dplyr)
#read fam file
FamFile <- read.table("Chenchereren2finalwithChimp.fam")
#make column in style needed for populations
FamFile$V7 <- paste(FamFile$V1, FamFile$V2, sep = "_")
FamFile <- FamFile %>% select(V7)
Ind <- read.table("Chenchereren2PopulationsTreemix.txt")
Mapfile <- cbind(FamFile, Ind)
write.table(Mapfile, "Chenchereren2_map.tsv", sep="\t", col.names =F, row.names = F, quote =F)
|
#' @export
ApplySum <- function(X, margins, ii = NULL, ir = NULL){
dims = dim(X);
nd = length(dims);
if (length(margins)==0)
return(list(Y = sum(X), ii = c(), ir=1:nd));
if (length(margins)==nd)
return(list(Y = X, ii = 1:nd, ir = c()));
ny = prod(dims[margins]);
computeIndices = is.null(ii);
if (!computeIndices){
## Don't compute the indices
results = .C("ApplySum",
as.double(X), as.integer(dims), as.integer(nd), as.integer(margins-1), as.integer(length(margins)),
as.double(vector("double", ny)),
as.integer(computeIndices),
ii, length(ii), ir, length(ir), PACKAGE = "physplit.analysis");
}else{
## We need the indices to be computed.
nx = prod(dims); ni = prod(dims[margins]); nr = nx/ni;
ii = as.integer(vector("integer", ni));
ir = as.integer(vector("integer", nr));
results = .C("ApplySum",
as.double(X), as.integer(dims), as.integer(nd), as.integer(margins-1), as.integer(length(margins)),
as.double(vector("double", ny)),
as.integer(computeIndices),
ii, ni, ir, ni, PACKAGE = "physplit.analysis");
ii = results[[8]];
ir = results[[10]];
}
if (length(margins)==1){
Y = array(results[[6]], dim=c(dims[margins],1));
}
else{
Y = array(results[[6]], dim=dims[margins]);
}
list(Y = Y, ii = ii, ir = ir);
}
| /R/ApplySum.r | no_license | sfrechter/physplit.analysis | R | false | false | 1,498 | r | #' @export
ApplySum <- function(X, margins, ii = NULL, ir = NULL){
dims = dim(X);
nd = length(dims);
if (length(margins)==0)
return(list(Y = sum(X), ii = c(), ir=1:nd));
if (length(margins)==nd)
return(list(Y = X, ii = 1:nd, ir = c()));
ny = prod(dims[margins]);
computeIndices = is.null(ii);
if (!computeIndices){
## Don't compute the indices
results = .C("ApplySum",
as.double(X), as.integer(dims), as.integer(nd), as.integer(margins-1), as.integer(length(margins)),
as.double(vector("double", ny)),
as.integer(computeIndices),
ii, length(ii), ir, length(ir), PACKAGE = "physplit.analysis");
}else{
## We need the indices to be computed.
nx = prod(dims); ni = prod(dims[margins]); nr = nx/ni;
ii = as.integer(vector("integer", ni));
ir = as.integer(vector("integer", nr));
results = .C("ApplySum",
as.double(X), as.integer(dims), as.integer(nd), as.integer(margins-1), as.integer(length(margins)),
as.double(vector("double", ny)),
as.integer(computeIndices),
ii, ni, ir, ni, PACKAGE = "physplit.analysis");
ii = results[[8]];
ir = results[[10]];
}
if (length(margins)==1){
Y = array(results[[6]], dim=c(dims[margins],1));
}
else{
Y = array(results[[6]], dim=dims[margins]);
}
list(Y = Y, ii = ii, ir = ir);
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/word_title_page.R
\name{word_title_page}
\alias{word_title_page}
\title{Create title page}
\usage{
word_title_page(x)
}
\arguments{
\item{x}{List. Meta data of the document as a result from \code{\link[yaml]{yaml.load}}.}
}
\description{
Creates text for the title and abstract page for MS Word documents.
\emph{This function is not exported.}
}
\seealso{
\code{\link{apa6_word}}
}
| /man/word_title_page.Rd | no_license | minzhanggit/papaja | R | false | true | 461 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/word_title_page.R
\name{word_title_page}
\alias{word_title_page}
\title{Create title page}
\usage{
word_title_page(x)
}
\arguments{
\item{x}{List. Meta data of the document as a result from \code{\link[yaml]{yaml.load}}.}
}
\description{
Creates text for the title and abstract page for MS Word documents.
\emph{This function is not exported.}
}
\seealso{
\code{\link{apa6_word}}
}
|
library(shiny)
library(leaflet)
library(ggmap)
crimes_vector <- c("APROPIACIÓN ILEGAL" = "Apropiación Ilegal", "AGRESIÓN AGRAVADA" = "Agresión Agravada", "ESCALAMIENTO" = "Escalamiento",
"ROBO" = "Robo", "VEHÍCULO HURTADO" = "Vehículo Hurtado", "ASESINATO" = "Asesinato", "VIOLACIÓN" = "Violación",
"INCENDIO MALICIOSO" = "Incendio Malicioso", "TRATA HUMANA" = "Trata Humana", "OTROS" = "Otros")
crimes_checked <- c("Apropiación Ilegal", "Escalamiento", "Robo", "Vehículo Hurtado")
days_vector <- c("Domingo" = "Domingo", "Lunes" = "Lunes", "Martes" = "Martes", "Miércoles" = "Miércoles", "Jueves" = "Jueves", "Viernes" = "Viernes", "Sábado" = "Sábado")
days_checked <- c("Domingo", "Lunes", "Martes", "Miércoles", "Jueves", "Viernes", "Sábado")
periods_vector <- c("MEDIANOCHE - 6:00 A.M." = "madrugada", "6:00 A.M. - MEDIODÍA" = "mañana",
"MEDIODÍA - 6:00 P.M." = "tarde", "6:00 P.M. - MEDIANOCHE" = "noche")
periods_checked <- c("madrugada", "mañana", "tarde", "noche")
plots_facets_vector <- c("día" , "período" , "crimen" )
years_vector <- c("2016", "2015", "2014", "2013", "2012")
# Will select one of the following locations at random
locations_vector <- c("Calle Acosta, Caguas, 00725, Puerto Rico", "Calle Sol, Ponce, PR", "Calle Méndez Vigo, Mayagüez, Puerto Rico",
"Walmart Santurce, San Juan, Puerto Rico", "Avenida Ingeniero Manuel Domenech, San Juan, Puerto Rico",
"Calle Parque, Río Piedras, San Juan, Puerto Rico", "Calle Derkes, Guayama, Puerto Rico",
"Caguas Norte, Caguas, Puerto Rico", "Calle Cristo, Patillas, Puerto Rico", "Calle Georgetti, Manatí, Puerto Rico",
"Calle Betances, Arecibo, Puerto Rico", "Calle Del Carmen, Morovis, Puerto Rico", "Fajardo, PR", "Humacao, PR",
"Calle 4, Lares, Puerto Rico", "Calle Padre Feliciano, San Sebastián, Puerto Rico", "Levittown, Toa Baja, Puerto Rico",
"Calle Luna, San Juan, 00901, Puerto Rico", "Calle Baldorioty, Guaynabo, 00969, Puerto Rico", "Cayey, PR",
"Calle Rafael Laba, Aguas Buenas, Puerto Rico", "Calle Colón, San Lorenzo, PR", "Calle Corchado, Juncos, PR",
"Calle San José, Gurabo, PR", "Aguadilla, Puerto Rico", "Calle Juan Hernández, Isabela, Puerto Rico")
shinyUI(fluidPage(
titlePanel(h3("Mapa del crimen en Puerto Rico"), windowTitle = "Mapa del crimen en Puerto Rico"),
sidebarLayout (
sidebarPanel(
textInput("address",label=h4("Escriba una dirección o haga clic en el mapa"),
value=sample(locations_vector, size=1, replace=TRUE)),
sliderInput("radius",label=h4("Radio en millas"),
min=0.5,max=2.0,value=0.5, step=0.5),
actionButton("goButton", "Buscar", style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
selectInput("year", label = h4("Año"), years_vector, selected = "2015"),
checkboxGroupInput("crimes", label = h4("Crímenes"), choices = crimes_vector, selected = crimes_checked, inline = TRUE),
checkboxGroupInput("days_of_week", label = h4("Día de la semana"), choices = days_vector, selected = days_checked, inline = TRUE),
checkboxGroupInput("time_periods", label = h4("Período"), choices = periods_vector, selected = periods_checked, inline = TRUE),
selectInput("plots_facets", label = h4("Facetas para bar plot"), plots_facets_vector),
HTML('<b style="color: #337ab7;"><a href="mailto:carlosgg123@gmail.com" target="_blank">carlosgg123@gmail.com</a></b>')
),
mainPanel(
tabsetPanel(
tabPanel("Mapa", leafletOutput("map",width="auto",height="640px")),
tabPanel("Data", dataTableOutput("DataTable")),
tabPanel("Barplots", plotOutput("barplots", width = "auto", height="640px")),
tabPanel("Mapas de densidad", plotOutput("density_maps", width = "auto", height="640px")),
tabPanel("Tabla", verbatimTextOutput("table")),
tabPanel("Instrucciones", htmlOutput("instructions")),
tabPanel("Referencias", htmlOutput("references"))
#tabPanel("Debug", verbatimTextOutput("debug"))
)
)
))) | /pr-crime/ui.r | no_license | carlosror/puerto_rico_crime | R | false | false | 4,348 | r | library(shiny)
library(leaflet)
library(ggmap)
crimes_vector <- c("APROPIACIÓN ILEGAL" = "Apropiación Ilegal", "AGRESIÓN AGRAVADA" = "Agresión Agravada", "ESCALAMIENTO" = "Escalamiento",
"ROBO" = "Robo", "VEHÍCULO HURTADO" = "Vehículo Hurtado", "ASESINATO" = "Asesinato", "VIOLACIÓN" = "Violación",
"INCENDIO MALICIOSO" = "Incendio Malicioso", "TRATA HUMANA" = "Trata Humana", "OTROS" = "Otros")
crimes_checked <- c("Apropiación Ilegal", "Escalamiento", "Robo", "Vehículo Hurtado")
days_vector <- c("Domingo" = "Domingo", "Lunes" = "Lunes", "Martes" = "Martes", "Miércoles" = "Miércoles", "Jueves" = "Jueves", "Viernes" = "Viernes", "Sábado" = "Sábado")
days_checked <- c("Domingo", "Lunes", "Martes", "Miércoles", "Jueves", "Viernes", "Sábado")
periods_vector <- c("MEDIANOCHE - 6:00 A.M." = "madrugada", "6:00 A.M. - MEDIODÍA" = "mañana",
"MEDIODÍA - 6:00 P.M." = "tarde", "6:00 P.M. - MEDIANOCHE" = "noche")
periods_checked <- c("madrugada", "mañana", "tarde", "noche")
plots_facets_vector <- c("día" , "período" , "crimen" )
years_vector <- c("2016", "2015", "2014", "2013", "2012")
# Will select one of the following locations at random
locations_vector <- c("Calle Acosta, Caguas, 00725, Puerto Rico", "Calle Sol, Ponce, PR", "Calle Méndez Vigo, Mayagüez, Puerto Rico",
"Walmart Santurce, San Juan, Puerto Rico", "Avenida Ingeniero Manuel Domenech, San Juan, Puerto Rico",
"Calle Parque, Río Piedras, San Juan, Puerto Rico", "Calle Derkes, Guayama, Puerto Rico",
"Caguas Norte, Caguas, Puerto Rico", "Calle Cristo, Patillas, Puerto Rico", "Calle Georgetti, Manatí, Puerto Rico",
"Calle Betances, Arecibo, Puerto Rico", "Calle Del Carmen, Morovis, Puerto Rico", "Fajardo, PR", "Humacao, PR",
"Calle 4, Lares, Puerto Rico", "Calle Padre Feliciano, San Sebastián, Puerto Rico", "Levittown, Toa Baja, Puerto Rico",
"Calle Luna, San Juan, 00901, Puerto Rico", "Calle Baldorioty, Guaynabo, 00969, Puerto Rico", "Cayey, PR",
"Calle Rafael Laba, Aguas Buenas, Puerto Rico", "Calle Colón, San Lorenzo, PR", "Calle Corchado, Juncos, PR",
"Calle San José, Gurabo, PR", "Aguadilla, Puerto Rico", "Calle Juan Hernández, Isabela, Puerto Rico")
shinyUI(fluidPage(
titlePanel(h3("Mapa del crimen en Puerto Rico"), windowTitle = "Mapa del crimen en Puerto Rico"),
sidebarLayout (
sidebarPanel(
textInput("address",label=h4("Escriba una dirección o haga clic en el mapa"),
value=sample(locations_vector, size=1, replace=TRUE)),
sliderInput("radius",label=h4("Radio en millas"),
min=0.5,max=2.0,value=0.5, step=0.5),
actionButton("goButton", "Buscar", style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
selectInput("year", label = h4("Año"), years_vector, selected = "2015"),
checkboxGroupInput("crimes", label = h4("Crímenes"), choices = crimes_vector, selected = crimes_checked, inline = TRUE),
checkboxGroupInput("days_of_week", label = h4("Día de la semana"), choices = days_vector, selected = days_checked, inline = TRUE),
checkboxGroupInput("time_periods", label = h4("Período"), choices = periods_vector, selected = periods_checked, inline = TRUE),
selectInput("plots_facets", label = h4("Facetas para bar plot"), plots_facets_vector),
HTML('<b style="color: #337ab7;"><a href="mailto:carlosgg123@gmail.com" target="_blank">carlosgg123@gmail.com</a></b>')
),
mainPanel(
tabsetPanel(
tabPanel("Mapa", leafletOutput("map",width="auto",height="640px")),
tabPanel("Data", dataTableOutput("DataTable")),
tabPanel("Barplots", plotOutput("barplots", width = "auto", height="640px")),
tabPanel("Mapas de densidad", plotOutput("density_maps", width = "auto", height="640px")),
tabPanel("Tabla", verbatimTextOutput("table")),
tabPanel("Instrucciones", htmlOutput("instructions")),
tabPanel("Referencias", htmlOutput("references"))
#tabPanel("Debug", verbatimTextOutput("debug"))
)
)
))) |
percent <- function(x, digits = 2, format = "f", ...) {
paste0(formatC(100 * x, format = format, digits = digits, ...), "%")
} | /utils.R | no_license | cfia-data-science/OGP-Summit-Hackathon-Sommet-PGO | R | false | false | 128 | r | percent <- function(x, digits = 2, format = "f", ...) {
paste0(formatC(100 * x, format = format, digits = digits, ...), "%")
} |
## This R script "run_analysis.R" does the following:
## 1. Downloads and reads the UCI HAR data set
## 2. Extracts mean and standard deviation measurements for each variable
## 3. Merges the training and the test data sets to create one data set
## 4. Uses descriptive activity names to the activities in the data set
## 5. Appropriately labels the data set with descriptive variable names
## 6. Creates a second, independent tidy data set with the average of each
## variable for each activity and each subject.
## Load the required libraries
library(data.table)
library(reshape2)
## Create a new directory for the project in your current working directory
if(!file.exists("./Project")){
dir.create("./Project")
}
## Set the current working directory to "Project"
setwd("./Project")
## Download the data set and extract data from zip archives
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile = "UCI_HAR_dataset.zip", method = "curl")
unzip("UCI_HAR_dataset.zip")
## Load activity labels and list of variables for each feature
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")[,2]
features <- read.table("./UCI HAR Dataset/features.txt")[,2]
## Load X & Y training data sets as well as list of training data subjects
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
Y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
## Load X & Y training data sets as well as list of test data subjects
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
Y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
## Extract list of variables that contain mean and standard deviation (std)
## measurements
extract_features <- grepl("mean\\(\\)|std\\(\\)", features)
## Extract measurements on mean and std for the training data
names(X_train) = features
X_train = X_train[,extract_features]
## Extract activity ID, activity labels & subject ID for the training dataset
Y_train[,2] = activity_labels[Y_train[,1]]
names(Y_train) = c("Activity_ID", "Activity_Label")
names(subject_train) = "Subject"
## Combine mean & std measurement with activity ID, activity labels & subject
## ID for training data
train_data <- cbind(as.data.table(subject_train), Y_train, X_train)
## Extract measurements on mean and std for the test data
names(X_test) = features
X_test = X_test[,extract_features]
## Extract activity ID, activity labels & subject ID for the test dataset
Y_test[,2] = activity_labels[Y_test[,1]]
names(Y_test) = c("Activity_ID", "Activity_Label")
names(subject_test) = "Subject"
## Combine mean & std measurement with activity ID, activity labels & subject
## ID for test data
test_data <- cbind(as.data.table(subject_test), Y_test, X_test)
## Merge training and test data sets
combined_data = rbind(test_data, train_data)
## Melt the combined data for reshaping
id_labels = c("Subject", "Activity_ID", "Activity_Label")
data_labels = setdiff(colnames(combined_data), id_labels)
melted_data = melt(combined_data, id = id_labels, measure.vars = data_labels)
## Average each variable for each activity and each subject
## Cast the averaged molten data into tidy data frame
tidy_data = dcast(melted_data, Subject + Activity_Label ~ variable, mean)
## Clean variable names and generate descriptive labels
names(tidy_data) = gsub("\\(|\\)|‐|,|_", "", names(tidy_data))
names(tidy_data) = gsub("^t", "Time", names(tidy_data))
names(tidy_data) = gsub("^f", "Freq", names(tidy_data))
names(tidy_data) = gsub("BodyBody", "Body", names(tidy_data))
names(tidy_data) = gsub("mean", "Mean", names(tidy_data))
names(tidy_data) = gsub("std", "StdDev", names(tidy_data))
## Create a text file with tidy dataset containing average of each variable
## for each activity and each subject
write.table(tidy_data, file = "UCI_HAR_tidy_data.txt",row.name = FALSE, quote = FALSE)
| /run_analysis.R | no_license | dvenkita/GCD-Project | R | false | false | 4,057 | r | ## This R script "run_analysis.R" does the following:
## 1. Downloads and reads the UCI HAR data set
## 2. Extracts mean and standard deviation measurements for each variable
## 3. Merges the training and the test data sets to create one data set
## 4. Uses descriptive activity names to the activities in the data set
## 5. Appropriately labels the data set with descriptive variable names
## 6. Creates a second, independent tidy data set with the average of each
## variable for each activity and each subject.
## Load the required libraries
library(data.table)
library(reshape2)
## Create a new directory for the project in your current working directory
if(!file.exists("./Project")){
dir.create("./Project")
}
## Set the current working directory to "Project"
setwd("./Project")
## Download the data set and extract data from zip archives
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, destfile = "UCI_HAR_dataset.zip", method = "curl")
unzip("UCI_HAR_dataset.zip")
## Load activity labels and list of variables for each feature
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")[,2]
features <- read.table("./UCI HAR Dataset/features.txt")[,2]
## Load X & Y training data sets as well as list of training data subjects
X_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
Y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
## Load X & Y training data sets as well as list of test data subjects
X_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
Y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
## Extract list of variables that contain mean and standard deviation (std)
## measurements
extract_features <- grepl("mean\\(\\)|std\\(\\)", features)
## Extract measurements on mean and std for the training data
names(X_train) = features
X_train = X_train[,extract_features]
## Extract activity ID, activity labels & subject ID for the training dataset
Y_train[,2] = activity_labels[Y_train[,1]]
names(Y_train) = c("Activity_ID", "Activity_Label")
names(subject_train) = "Subject"
## Combine mean & std measurement with activity ID, activity labels & subject
## ID for training data
train_data <- cbind(as.data.table(subject_train), Y_train, X_train)
## Extract measurements on mean and std for the test data
names(X_test) = features
X_test = X_test[,extract_features]
## Extract activity ID, activity labels & subject ID for the test dataset
Y_test[,2] = activity_labels[Y_test[,1]]
names(Y_test) = c("Activity_ID", "Activity_Label")
names(subject_test) = "Subject"
## Combine mean & std measurement with activity ID, activity labels & subject
## ID for test data
test_data <- cbind(as.data.table(subject_test), Y_test, X_test)
## Merge training and test data sets
combined_data = rbind(test_data, train_data)
## Melt the combined data for reshaping
id_labels = c("Subject", "Activity_ID", "Activity_Label")
data_labels = setdiff(colnames(combined_data), id_labels)
melted_data = melt(combined_data, id = id_labels, measure.vars = data_labels)
## Average each variable for each activity and each subject
## Cast the averaged molten data into tidy data frame
tidy_data = dcast(melted_data, Subject + Activity_Label ~ variable, mean)
## Clean variable names and generate descriptive labels
names(tidy_data) = gsub("\\(|\\)|‐|,|_", "", names(tidy_data))
names(tidy_data) = gsub("^t", "Time", names(tidy_data))
names(tidy_data) = gsub("^f", "Freq", names(tidy_data))
names(tidy_data) = gsub("BodyBody", "Body", names(tidy_data))
names(tidy_data) = gsub("mean", "Mean", names(tidy_data))
names(tidy_data) = gsub("std", "StdDev", names(tidy_data))
## Create a text file with tidy dataset containing average of each variable
## for each activity and each subject
write.table(tidy_data, file = "UCI_HAR_tidy_data.txt",row.name = FALSE, quote = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/layout.R
\name{config}
\alias{config}
\title{Set the default configuration for plotly}
\usage{
config(
p,
...,
cloud = FALSE,
showSendToCloud = cloud,
locale = NULL,
mathjax = NULL
)
}
\arguments{
\item{p}{a plotly object}
\item{...}{these arguments are documented at
\url{https://github.com/plotly/plotly.js/blob/master/src/plot_api/plot_config.js}}
\item{cloud}{deprecated. Use \code{showSendToCloud} instead.}
\item{showSendToCloud}{include the send data to cloud button?}
\item{locale}{locale to use. See \href{https://github.com/plotly/plotly.js/tree/master/dist#to-include-localization}{here} for more info.}
\item{mathjax}{add \href{https://github.com/plotly/plotly.js/tree/master/dist#to-support-mathjax}{MathJax rendering support}.
If \code{"cdn"}, mathjax is loaded externally (meaning an internet connection is needed for
TeX rendering). If \code{"local"}, the PLOTLY_MATHJAX_PATH environment variable must be
set to the location (a local file path) of MathJax. IMPORTANT: \strong{plotly} uses SVG-based
mathjax rendering which doesn't play nicely with HTML-based rendering
(e.g., \strong{rmarkdown} documents and \strong{shiny} apps). To leverage both types of rendering,
you must \verb{<iframe>} your plotly graph(s) into the larger document
(see \href{https://github.com/plotly/plotly.R/blob/master/inst/examples/rmd/MathJax/index.Rmd}{here}
for an \strong{rmarkdown} example and
\href{https://github.com/plotly/plotly.R/blob/master/inst/examples/rmd/MathJax/index.Rmd}{here} for a \strong{shiny} example).}
}
\description{
Set the default configuration for plotly
}
\examples{
# remove the plotly logo and collaborate button from modebar
config(plot_ly(), displaylogo = FALSE, collaborate = FALSE)
# enable mathjax
# see more examples at https://plotly.com/r/LaTeX/
plot_ly(x = c(1, 2, 3, 4), y = c(1, 4, 9, 16)) \%>\%
layout(title = TeX("\\\\text{Some mathjax: }\\\\alpha+\\\\beta x")) \%>\%
config(mathjax = "cdn")
# change the language used to render date axes and on-graph text
# (e.g., modebar buttons)
today <- Sys.Date()
x <- seq.Date(today, today + 360, by = "day")
p <- plot_ly(x = x, y = rnorm(length(x))) \%>\%
add_lines()
# japanese
config(p, locale = "ja")
# german
config(p, locale = "de")
# spanish
config(p, locale = "es")
# chinese
config(p, locale = "zh-CN")
}
\author{
Carson Sievert
}
| /man/config.Rd | permissive | Displayr/plotly | R | false | true | 2,427 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/layout.R
\name{config}
\alias{config}
\title{Set the default configuration for plotly}
\usage{
config(
p,
...,
cloud = FALSE,
showSendToCloud = cloud,
locale = NULL,
mathjax = NULL
)
}
\arguments{
\item{p}{a plotly object}
\item{...}{these arguments are documented at
\url{https://github.com/plotly/plotly.js/blob/master/src/plot_api/plot_config.js}}
\item{cloud}{deprecated. Use \code{showSendToCloud} instead.}
\item{showSendToCloud}{include the send data to cloud button?}
\item{locale}{locale to use. See \href{https://github.com/plotly/plotly.js/tree/master/dist#to-include-localization}{here} for more info.}
\item{mathjax}{add \href{https://github.com/plotly/plotly.js/tree/master/dist#to-support-mathjax}{MathJax rendering support}.
If \code{"cdn"}, mathjax is loaded externally (meaning an internet connection is needed for
TeX rendering). If \code{"local"}, the PLOTLY_MATHJAX_PATH environment variable must be
set to the location (a local file path) of MathJax. IMPORTANT: \strong{plotly} uses SVG-based
mathjax rendering which doesn't play nicely with HTML-based rendering
(e.g., \strong{rmarkdown} documents and \strong{shiny} apps). To leverage both types of rendering,
you must \verb{<iframe>} your plotly graph(s) into the larger document
(see \href{https://github.com/plotly/plotly.R/blob/master/inst/examples/rmd/MathJax/index.Rmd}{here}
for an \strong{rmarkdown} example and
\href{https://github.com/plotly/plotly.R/blob/master/inst/examples/rmd/MathJax/index.Rmd}{here} for a \strong{shiny} example).}
}
\description{
Set the default configuration for plotly
}
\examples{
# remove the plotly logo and collaborate button from modebar
config(plot_ly(), displaylogo = FALSE, collaborate = FALSE)
# enable mathjax
# see more examples at https://plotly.com/r/LaTeX/
plot_ly(x = c(1, 2, 3, 4), y = c(1, 4, 9, 16)) \%>\%
layout(title = TeX("\\\\text{Some mathjax: }\\\\alpha+\\\\beta x")) \%>\%
config(mathjax = "cdn")
# change the language used to render date axes and on-graph text
# (e.g., modebar buttons)
today <- Sys.Date()
x <- seq.Date(today, today + 360, by = "day")
p <- plot_ly(x = x, y = rnorm(length(x))) \%>\%
add_lines()
# japanese
config(p, locale = "ja")
# german
config(p, locale = "de")
# spanish
config(p, locale = "es")
# chinese
config(p, locale = "zh-CN")
}
\author{
Carson Sievert
}
|
# loading libraries
library(ggplot2)
library(mvtnorm)
shinyServer(function(input, output, session) {
# Define functions
sigmaXY <- function(rho, sdX, sdY) {
covTerm <- rho * sdX * sdY
VCmatrix <- matrix(c(sdX^2, covTerm, covTerm, sdY^2),2, 2, byrow = TRUE)
return(VCmatrix)
}
genBVN <- function(n = 1, seed = NA, muXY=c(0,1), sigmaXY=diag(2)) {
if(!is.na(seed)) set.seed(seed)
rdraws <- rmvnorm(n, mean = muXY, sigma = sigmaXY)
return(rdraws)
}
loanData <- function(noApproved = 50, noDenied = 50, muApproved, muDenied, sdApproved,
sdDenied, rhoApproved=-0.1, rhoDenied= 0.6, seed=1111) {
sigmaApproved <- sigmaXY(rho=rhoApproved, sdX=sdApproved[1], sdY=sdApproved[2])
sigmaDenied <- sigmaXY(rho=rhoDenied, sdX=sdDenied[1], sdY=sdDenied[2])
approved <- genBVN(noApproved, muApproved, sigmaApproved, seed = seed)
denied <- genBVN(noDenied, muDenied, sigmaDenied, seed = seed+1)
loanDf <- as.data.frame(rbind(approved,denied))
deny <- c(rep("Approved", noApproved), rep("Denied", noDenied))
target = c(rep(0, noApproved), rep(1, noDenied))
loanDf <- data.frame(loanDf, deny, target)
colnames(loanDf) <- c("PIratio", "solvency", "deny", "target")
return(loanDf)
}
# Combine the selected variables into a new data frame
selectedData <- reactive({
loanData(muApproved = c(input$muAX, input$muAY), muDenied =c(input$muDX, input$muDY),
sdApproved = c(input$sdAX, input$sdAY), sdDenied = c(input$sdDX, input$sdDY))
})
# compute the fitting
Datafit <- reactive({
datafit <- lm(target ~ solvency + PIratio + 1, data=selectedData())
})
# Compute boundaries
Boundaries <- reactive({
weights <- coef(Datafit())[c("solvency", "PIratio")]
intercept <- (-coef(Datafit())[1] + 0.5)/weights["PIratio"]
slope <- -(weights["solvency"]/weights["PIratio"])
return(data.frame(intercept = intercept, slope = slope))
})
# Compute the confusion matrix
ConfMatrix <- reactive({
predictedLabels <- ifelse(predict(Datafit()) < 0.5, "Approved", "Denied")
confMatrixFreq <- table(selectedData()$deny, predictedLabels)
return(confMatrixFreq)
})
# serving the plot of the data
output$plot1 <- renderPlot({
ggplot(data = selectedData(),
aes(x = solvency, y = PIratio, colour=deny, fill=deny)) +
geom_point() +
xlab("solvency") +
ylab("PIratio") +
theme_bw() +
geom_abline(intercept = Boundaries()$intercept, slope = Boundaries()$slope)
})
# serving the table of the data
output$table1 <- renderTable({
ConfMatrix()
})
}) | /PS2/app/server.R | no_license | ainalopez/Advanced-Computational-Methods | R | false | false | 2,660 | r | # loading libraries
library(ggplot2)
library(mvtnorm)
shinyServer(function(input, output, session) {
# Define functions
sigmaXY <- function(rho, sdX, sdY) {
covTerm <- rho * sdX * sdY
VCmatrix <- matrix(c(sdX^2, covTerm, covTerm, sdY^2),2, 2, byrow = TRUE)
return(VCmatrix)
}
genBVN <- function(n = 1, seed = NA, muXY=c(0,1), sigmaXY=diag(2)) {
if(!is.na(seed)) set.seed(seed)
rdraws <- rmvnorm(n, mean = muXY, sigma = sigmaXY)
return(rdraws)
}
loanData <- function(noApproved = 50, noDenied = 50, muApproved, muDenied, sdApproved,
sdDenied, rhoApproved=-0.1, rhoDenied= 0.6, seed=1111) {
sigmaApproved <- sigmaXY(rho=rhoApproved, sdX=sdApproved[1], sdY=sdApproved[2])
sigmaDenied <- sigmaXY(rho=rhoDenied, sdX=sdDenied[1], sdY=sdDenied[2])
approved <- genBVN(noApproved, muApproved, sigmaApproved, seed = seed)
denied <- genBVN(noDenied, muDenied, sigmaDenied, seed = seed+1)
loanDf <- as.data.frame(rbind(approved,denied))
deny <- c(rep("Approved", noApproved), rep("Denied", noDenied))
target = c(rep(0, noApproved), rep(1, noDenied))
loanDf <- data.frame(loanDf, deny, target)
colnames(loanDf) <- c("PIratio", "solvency", "deny", "target")
return(loanDf)
}
# Combine the selected variables into a new data frame
selectedData <- reactive({
loanData(muApproved = c(input$muAX, input$muAY), muDenied =c(input$muDX, input$muDY),
sdApproved = c(input$sdAX, input$sdAY), sdDenied = c(input$sdDX, input$sdDY))
})
# compute the fitting
Datafit <- reactive({
datafit <- lm(target ~ solvency + PIratio + 1, data=selectedData())
})
# Compute boundaries
Boundaries <- reactive({
weights <- coef(Datafit())[c("solvency", "PIratio")]
intercept <- (-coef(Datafit())[1] + 0.5)/weights["PIratio"]
slope <- -(weights["solvency"]/weights["PIratio"])
return(data.frame(intercept = intercept, slope = slope))
})
# Compute the confusion matrix
ConfMatrix <- reactive({
predictedLabels <- ifelse(predict(Datafit()) < 0.5, "Approved", "Denied")
confMatrixFreq <- table(selectedData()$deny, predictedLabels)
return(confMatrixFreq)
})
# serving the plot of the data
output$plot1 <- renderPlot({
ggplot(data = selectedData(),
aes(x = solvency, y = PIratio, colour=deny, fill=deny)) +
geom_point() +
xlab("solvency") +
ylab("PIratio") +
theme_bw() +
geom_abline(intercept = Boundaries()$intercept, slope = Boundaries()$slope)
})
# serving the table of the data
output$table1 <- renderTable({
ConfMatrix()
})
}) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotVarReg.R
\name{plotVarReg}
\alias{plotVarReg}
\title{Plots graphics for a mean and variance regression model}
\usage{
plotVarReg(
x,
knot.lines = FALSE,
ci = FALSE,
ci.type = c("im", "boot"),
bootreps = 1000,
xlab = "x",
ylab = "y",
control = list(...),
...
)
}
\arguments{
\item{x}{Object of class \code{VarReg} (see \code{\link{semiVarReg}}).}
\item{knot.lines}{Logical to indicate if knot lines should be shown on graphics
(if model is type "semi"). Default is \code{FALSE}}
\item{ci}{Logical indicate if 95\% CI should be shown on the plots. Default is \code{FALSE}
and \code{ci.type="im"}.}
\item{ci.type}{Text to indicate the type of CI to plot. Either \code{"im"} (information matrix) or \code{"boot"} (bootstrapped). Default is \code{"im"}.}
\item{bootreps}{Integer to indicate the number of bootstrap replications to be performed if \code{ci.type="boot"}. Default is \code{1000}.}
\item{xlab}{Text for the label to be placed on the \code{x} axis of graphics (covariate)}
\item{ylab}{Text for the label to be placed on the \code{y} axis of graphics (outcome)}
\item{control}{list of control parameters to be used in bootstrapping.
See \code{\link{VarReg.control}}.}
\item{...}{arguments to be used to form the default control argument if it is not supplied
directly}
}
\value{
This function returns a 2x2 plot, with slightly different plots given, depending on the outcome data. For uncensored data, the plots are:
\itemize{
\item the mean function over the \code{x}-variable, with or without 95\% CI, and with or
without the knot lines indicated
\item the variance function over the \code{x}-variable, with or without 95\% CI and with or
without the knot lines indicated
\item a Q-Q plot of the residuals from the model
\item a histogram of the residuals from the model
}
If the outcome data is censored, the last two plots are no longer appropriate.
Given the censored residuals from the model, we can compare the squared standardised residuals
(given in black) with their censoring indicator to the chi-squared distribution with one
degree of freedom (given in red). This is one of the plots given for censored data, and the
other is a plot of the data, coloured by the censoring status. The triangles with the point at
the top are bottom censored and the triangles with the point at the bottom are top censored.
}
\description{
\code{plotVarReg} to produce graphics for models fit in this package.
}
\examples{
data(mcycle)
linmodel<-semiVarReg(mcycle$accel, mcycle$times, meanmodel="linear", varmodel="linear",
maxit=10000)
plotVarReg(linmodel)
plotVarReg(linmodel, ci=TRUE, ci.type="im", ylab="Range", xlab="Time in seconds")
##not run
##plotVarReg(linmodel, ci=TRUE, ci.type="boot", bootreps=10,ylab="Acceleration",
##xlab="Time in seconds")
##not run
##semimodel<-semiVarReg(mcycle$accel, mcycle$times, meanmodel="semi", varmodel="semi",
##knots.m=4, knots.v=2, maxit=10000)
##plotVarReg(semimodel, ci=TRUE, ci.type="boot",bootreps=10,ylab="Acceleration",
##xlab="Time in seconds", maxit=10000)
}
\seealso{
\code{\link{semiVarReg}}, \code{\link{VarReg.control}}
}
| /man/plotVarReg.Rd | no_license | kristyrobledo/VarReg | R | false | true | 3,196 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotVarReg.R
\name{plotVarReg}
\alias{plotVarReg}
\title{Plots graphics for a mean and variance regression model}
\usage{
plotVarReg(
x,
knot.lines = FALSE,
ci = FALSE,
ci.type = c("im", "boot"),
bootreps = 1000,
xlab = "x",
ylab = "y",
control = list(...),
...
)
}
\arguments{
\item{x}{Object of class \code{VarReg} (see \code{\link{semiVarReg}}).}
\item{knot.lines}{Logical to indicate if knot lines should be shown on graphics
(if model is type "semi"). Default is \code{FALSE}}
\item{ci}{Logical indicate if 95\% CI should be shown on the plots. Default is \code{FALSE}
and \code{ci.type="im"}.}
\item{ci.type}{Text to indicate the type of CI to plot. Either \code{"im"} (information matrix) or \code{"boot"} (bootstrapped). Default is \code{"im"}.}
\item{bootreps}{Integer to indicate the number of bootstrap replications to be performed if \code{ci.type="boot"}. Default is \code{1000}.}
\item{xlab}{Text for the label to be placed on the \code{x} axis of graphics (covariate)}
\item{ylab}{Text for the label to be placed on the \code{y} axis of graphics (outcome)}
\item{control}{list of control parameters to be used in bootstrapping.
See \code{\link{VarReg.control}}.}
\item{...}{arguments to be used to form the default control argument if it is not supplied
directly}
}
\value{
This function returns a 2x2 plot, with slightly different plots given, depending on the outcome data. For uncensored data, the plots are:
\itemize{
\item the mean function over the \code{x}-variable, with or without 95\% CI, and with or
without the knot lines indicated
\item the variance function over the \code{x}-variable, with or without 95\% CI and with or
without the knot lines indicated
\item a Q-Q plot of the residuals from the model
\item a histogram of the residuals from the model
}
If the outcome data is censored, the last two plots are no longer appropriate.
Given the censored residuals from the model, we can compare the squared standardised residuals
(given in black) with their censoring indicator to the chi-squared distribution with one
degree of freedom (given in red). This is one of the plots given for censored data, and the
other is a plot of the data, coloured by the censoring status. The triangles with the point at
the top are bottom censored and the triangles with the point at the bottom are top censored.
}
\description{
\code{plotVarReg} to produce graphics for models fit in this package.
}
\examples{
data(mcycle)
linmodel<-semiVarReg(mcycle$accel, mcycle$times, meanmodel="linear", varmodel="linear",
maxit=10000)
plotVarReg(linmodel)
plotVarReg(linmodel, ci=TRUE, ci.type="im", ylab="Range", xlab="Time in seconds")
##not run
##plotVarReg(linmodel, ci=TRUE, ci.type="boot", bootreps=10,ylab="Acceleration",
##xlab="Time in seconds")
##not run
##semimodel<-semiVarReg(mcycle$accel, mcycle$times, meanmodel="semi", varmodel="semi",
##knots.m=4, knots.v=2, maxit=10000)
##plotVarReg(semimodel, ci=TRUE, ci.type="boot",bootreps=10,ylab="Acceleration",
##xlab="Time in seconds", maxit=10000)
}
\seealso{
\code{\link{semiVarReg}}, \code{\link{VarReg.control}}
}
|
demisia ( sau retragerea fortata ) a lui Pavel Todoran de la CNSLR Fratia e cu cintec .
numai ziarele mai amplifica in " boxe " cazul , politia si politicienii tacind chitic .
poporul doarme , iar la sindicate e liniste .
de fapt , totul nu e decit o minciuna cit roata carului .
la sindicate nu e liniste , nici ordine , iar plebea nu doarme .
stringe doar din dinti .
poate pentru ca nu - i vine sa creada .
l - a vazut de atitea ori pe Pavel Todoran la televizor , la mitinguri si nu - si crede ochilor .
omul acesta , totdeauna imbracat ingrijit , la cravata , cu parul lui buclat trecut parca in fiecare dimineata prin miinile unui frizer , in loc sa se bata pentru membrii de sindicat , asa cum era firesc , " invirtea " statiuni , cladiri , facea contracte de milioane de dolari , garanta cu bunuri ale Fratiei .
daca cititi articolul din ziarul nostru de astazi , chiar va luati cu miinile de cap .
seful de la Fratia ii concura la singe pe Razvan Temesan si George Constantin Paunescu .
daca i - ar fi iesit doua - trei pasiente , ar fi ajuns mare bancher , ca Sever Muresan sau , mai stii , chiar ca Rockefeller .
dincolo de aura de sindicat fortos , cum ne - a obisnuit Fratia inca din 1990 , iata se ascund afaceri oneroase , incheiate abracadabrant .
importuri de televizoare din Coreea , participari la o banca ajunsa intr - o rina financiara , vinzari de actiuni , majorari de capital , o aiureala incredibila pe care nimeni nu vrea s - o descurce .
si nici nu stim daca avem de - a face cu un amatorism sinucigas sau cu lucraturi abile de maestru in falimentari .
politia si celelalte institutii abilitate ale statului nu vor sa faca lumina in acest caz .
nici in altele !
de ce ?
pentru ca guvernul si partidele politice se tem de sindicate !
una - doua , ele aprind flacara grevelor si pot rasturna totul .
si , in baza acestui potential exploziv , marile centrale sindicale au putut sa treaca binisor peste limitele legii .
ele sint in situatia partidelor politice .
chiar cei care ar trebui sa vegheze la aplicarea legii n - au nici un interes ca ea sa opereze .
situatia nu e caracteristica numai Fratiei .
cazuri similare putem intilni si la celelalte confederatii .
ele si - au creat firme care au derulat operatiuni dubioase .
unele au ajuns acum extrem de bogate , ba , chiar sint in situatia de a cumpara mari holdinguri .
imaginati - va un sindicat care cumpara , sa zicem , Uzina de utilaj petrolier din Ploiesti .
ce fel de sindicat ar fi ala care ar avea nevoie de contracte de export , de inlesniri la importuri de utilaje , de aminari la TVA , de scutiri de vama ?
n - ar fi decit o jucarie trasa de sfoara de oricare guvernanti .
faceti gura mica si va dam un contract , va scutim de penalizari sau naiba mai stie ce !
in toata aceasta evolutie nefireasca , liderii de sindicat sint mai ceva decit miliardarii de carton .
nu risca nimic , nu - si pun pielea la bataie , nu dau faliment .
in schimb , o duc mult mai bine , se ascund in spatele unor societati comerciale care pot " crapa " linistite .
singura lor problema este sa fie realesi .
cit ii priveste pe muncitori si pe ceilalti membri de sindicat , Dumnezeu cu mila !
sa - si plateasca toate cotizatiile si sa raspunda la chemarile la greva .
dar sa nu se infurie prea tare pentru ca incurca jocurile alesilor !
tendinta de implicare a sindicatelor in diverse afaceri ( prin intermediul firmelor care le apartin in totalitate ) reprezinta un fenomen .
e o forma anapoda de a intelege miscarea sindicala .
o inventie hermafrodita , rezultata din incrucisarea miliardarilor de carton cu militantismul de parada , n - are cum sa nu duca in prapastie !
in acest colt de pagina am mai scris despre situatii similare .
pozitia ziarului nostru a generat reactii nervoase .
o mare centrala sindicala , in loc sa isi redescopere menirea , ne cheama in instanta , socotind ca in felul acesta ne vom speria si vom inchide ochii la afacerile sindicatelor .
nimic mai gresit !
" evenimentul zilei " , indiferent de reactiile unora sau ale altora , isi va continua dezvaluirile , fidel liniei sale de pina acum .
afacerile sindicatelor sint la fel de necurate ca ale politicienilor .
" corb la corb nu - si scoate ochii " , zice un proverb romanesc .
dar vine o vreme in care toti corbii orbesc singuri .
| /data/Newspapers/2000.10.05.editorial.58601.0456.r | no_license | narcis96/decrypting-alpha | R | false | false | 4,332 | r | demisia ( sau retragerea fortata ) a lui Pavel Todoran de la CNSLR Fratia e cu cintec .
numai ziarele mai amplifica in " boxe " cazul , politia si politicienii tacind chitic .
poporul doarme , iar la sindicate e liniste .
de fapt , totul nu e decit o minciuna cit roata carului .
la sindicate nu e liniste , nici ordine , iar plebea nu doarme .
stringe doar din dinti .
poate pentru ca nu - i vine sa creada .
l - a vazut de atitea ori pe Pavel Todoran la televizor , la mitinguri si nu - si crede ochilor .
omul acesta , totdeauna imbracat ingrijit , la cravata , cu parul lui buclat trecut parca in fiecare dimineata prin miinile unui frizer , in loc sa se bata pentru membrii de sindicat , asa cum era firesc , " invirtea " statiuni , cladiri , facea contracte de milioane de dolari , garanta cu bunuri ale Fratiei .
daca cititi articolul din ziarul nostru de astazi , chiar va luati cu miinile de cap .
seful de la Fratia ii concura la singe pe Razvan Temesan si George Constantin Paunescu .
daca i - ar fi iesit doua - trei pasiente , ar fi ajuns mare bancher , ca Sever Muresan sau , mai stii , chiar ca Rockefeller .
dincolo de aura de sindicat fortos , cum ne - a obisnuit Fratia inca din 1990 , iata se ascund afaceri oneroase , incheiate abracadabrant .
importuri de televizoare din Coreea , participari la o banca ajunsa intr - o rina financiara , vinzari de actiuni , majorari de capital , o aiureala incredibila pe care nimeni nu vrea s - o descurce .
si nici nu stim daca avem de - a face cu un amatorism sinucigas sau cu lucraturi abile de maestru in falimentari .
politia si celelalte institutii abilitate ale statului nu vor sa faca lumina in acest caz .
nici in altele !
de ce ?
pentru ca guvernul si partidele politice se tem de sindicate !
una - doua , ele aprind flacara grevelor si pot rasturna totul .
si , in baza acestui potential exploziv , marile centrale sindicale au putut sa treaca binisor peste limitele legii .
ele sint in situatia partidelor politice .
chiar cei care ar trebui sa vegheze la aplicarea legii n - au nici un interes ca ea sa opereze .
situatia nu e caracteristica numai Fratiei .
cazuri similare putem intilni si la celelalte confederatii .
ele si - au creat firme care au derulat operatiuni dubioase .
unele au ajuns acum extrem de bogate , ba , chiar sint in situatia de a cumpara mari holdinguri .
imaginati - va un sindicat care cumpara , sa zicem , Uzina de utilaj petrolier din Ploiesti .
ce fel de sindicat ar fi ala care ar avea nevoie de contracte de export , de inlesniri la importuri de utilaje , de aminari la TVA , de scutiri de vama ?
n - ar fi decit o jucarie trasa de sfoara de oricare guvernanti .
faceti gura mica si va dam un contract , va scutim de penalizari sau naiba mai stie ce !
in toata aceasta evolutie nefireasca , liderii de sindicat sint mai ceva decit miliardarii de carton .
nu risca nimic , nu - si pun pielea la bataie , nu dau faliment .
in schimb , o duc mult mai bine , se ascund in spatele unor societati comerciale care pot " crapa " linistite .
singura lor problema este sa fie realesi .
cit ii priveste pe muncitori si pe ceilalti membri de sindicat , Dumnezeu cu mila !
sa - si plateasca toate cotizatiile si sa raspunda la chemarile la greva .
dar sa nu se infurie prea tare pentru ca incurca jocurile alesilor !
tendinta de implicare a sindicatelor in diverse afaceri ( prin intermediul firmelor care le apartin in totalitate ) reprezinta un fenomen .
e o forma anapoda de a intelege miscarea sindicala .
o inventie hermafrodita , rezultata din incrucisarea miliardarilor de carton cu militantismul de parada , n - are cum sa nu duca in prapastie !
in acest colt de pagina am mai scris despre situatii similare .
pozitia ziarului nostru a generat reactii nervoase .
o mare centrala sindicala , in loc sa isi redescopere menirea , ne cheama in instanta , socotind ca in felul acesta ne vom speria si vom inchide ochii la afacerile sindicatelor .
nimic mai gresit !
" evenimentul zilei " , indiferent de reactiile unora sau ale altora , isi va continua dezvaluirile , fidel liniei sale de pina acum .
afacerile sindicatelor sint la fel de necurate ca ale politicienilor .
" corb la corb nu - si scoate ochii " , zice un proverb romanesc .
dar vine o vreme in care toti corbii orbesc singuri .
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotWeights.R
\name{plotWeights}
\alias{plotWeights}
\title{Plot Weights}
\usage{
plotWeights(object, view, factor, nfeatures = 10, abs = FALSE,
manual = NULL, color_manual = NULL, scale = TRUE)
}
\arguments{
\item{object}{a \code{\link{MOFAmodel}} object.}
\item{view}{character vector with the view name, or numeric vector with the index of the view to use.}
\item{factor}{character vector with the factor name, or numeric vector with the index of the factor to use.}
\item{nfeatures}{number of top features to label.}
\item{abs}{logical indicating whether to use the absolute value of the weights.}
\item{manual}{A nested list of character vectors with features to be manually labelled.}
\item{color_manual}{a character vector with colors, one for each element of 'manual'}
\item{scale}{logical indicating whether to scale all loadings from 0 to 1.}
}
\description{
An important step to annotate factors is to visualise the corresponding feature loadings. \cr
This function plots all loadings for a given latent factor and view, labeling the top ones. \cr
In contrast, the function \code{\link{plotTopWeights}} displays only the top features with highest loading.
}
\details{
The weights of the features within a view are relative and they should not be interpreted in an absolute scale.
For interpretability purposes we always recommend to scale the weights with \code{scale=TRUE}.
}
\examples{
# Example on the CLL data
filepath <- system.file("extdata", "CLL_model.hdf5", package = "MOFAtools")
MOFA_CLL <- loadModel(filepath)
plotWeights(MOFA_CLL, view="Mutations", factor=1)
plotWeights(MOFA_CLL, view="Mutations", factor=1,
manual=list("IGHV", c("TP53", "del17p13")), color_manual=c("blue", "red"))
# Example on the scMT data
filepath <- system.file("extdata", "scMT_model.hdf5", package = "MOFAtools")
MOFA_scMT <- loadModel(filepath)
plotWeights(MOFA_scMT, view="RNA expression", factor=1)
plotWeights(MOFA_scMT, view="RNA expression", factor=1, nfeatures=15)
}
| /MOFAtools/man/plotWeights.Rd | no_license | vibbits/MOFA | R | false | true | 2,063 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotWeights.R
\name{plotWeights}
\alias{plotWeights}
\title{Plot Weights}
\usage{
plotWeights(object, view, factor, nfeatures = 10, abs = FALSE,
manual = NULL, color_manual = NULL, scale = TRUE)
}
\arguments{
\item{object}{a \code{\link{MOFAmodel}} object.}
\item{view}{character vector with the view name, or numeric vector with the index of the view to use.}
\item{factor}{character vector with the factor name, or numeric vector with the index of the factor to use.}
\item{nfeatures}{number of top features to label.}
\item{abs}{logical indicating whether to use the absolute value of the weights.}
\item{manual}{A nested list of character vectors with features to be manually labelled.}
\item{color_manual}{a character vector with colors, one for each element of 'manual'}
\item{scale}{logical indicating whether to scale all loadings from 0 to 1.}
}
\description{
An important step to annotate factors is to visualise the corresponding feature loadings. \cr
This function plots all loadings for a given latent factor and view, labeling the top ones. \cr
In contrast, the function \code{\link{plotTopWeights}} displays only the top features with highest loading.
}
\details{
The weights of the features within a view are relative and they should not be interpreted in an absolute scale.
For interpretability purposes we always recommend to scale the weights with \code{scale=TRUE}.
}
\examples{
# Example on the CLL data
filepath <- system.file("extdata", "CLL_model.hdf5", package = "MOFAtools")
MOFA_CLL <- loadModel(filepath)
plotWeights(MOFA_CLL, view="Mutations", factor=1)
plotWeights(MOFA_CLL, view="Mutations", factor=1,
manual=list("IGHV", c("TP53", "del17p13")), color_manual=c("blue", "red"))
# Example on the scMT data
filepath <- system.file("extdata", "scMT_model.hdf5", package = "MOFAtools")
MOFA_scMT <- loadModel(filepath)
plotWeights(MOFA_scMT, view="RNA expression", factor=1)
plotWeights(MOFA_scMT, view="RNA expression", factor=1, nfeatures=15)
}
|
#Utilisateurs Doctissimo
library(rvest)
load("C:/Users/ottavig/Documents/docti_membre.RData")
Sys.setenv(http_proxy="*****")
Sys.setenv(https_proxy="*****")
Sys.setenv(encoding = "UTF-8")
if (!exists("uti")){
uti <- liste_membre
uti <- uti[which(unique(as.character(uti)) != "Profil supprimé")]
uti <- paste("http://club.doctissimo.fr/",uti,"/",sep="")
uti <- gsub("é","e",uti, fixed = T)
uti <- gsub("è","e",uti, fixed = T)
uti <- gsub("ê","e",uti, fixed = T)
uti <- gsub("ë","e",uti, fixed = T)
uti <- gsub(" ","-",uti,fixed = T)
uti <- gsub("./","",uti,fixed = T)
}
for (pers in uti) {
if(typeof(try(html(pers),silent= T)) == "externalptr") {
html_uti <- html(pers)
nom_uti <- html_uti %>% html_nodes("h1#user_pseudo") %>% html_text()
grade_uti <- html_uti %>% html_nodes("div.user_infos span.user_grade") %>% html_text()
grade_uti <- if(length(grade_uti) == 0) "NonRens" else grade_uti
sexe_uti <- html_uti %>% html_nodes("div.small_column_left_panel img") %>% html_attr("alt")
sexe_uti <- if(length(sexe_uti) == 0) "NonRens" else sexe_uti
date_uti <- try(html_uti %>% html_nodes("div.small_column_left div.small_column_left_panel") %>% .[[2]] %>% html_text(),silent = T)
date_uti <- substr(date_uti,regexpr("[0-9][0-9][0-9][0-9]",date_uti,perl= T),regexpr("[0-9][0-9][0-9][0-9]",date_uti,perl= T) + 3)
date_uti <- if(date_uti == "") "NonRens" else date_uti
date_uti <- if(substr(date_uti,1,2) == "fa") "NonRens" else date_uti
zone_uti <- html_uti %>% html_nodes("div.small_column_left_shadow div#more_informations") %>% html_text()
zone_uti <- gsub("(\\r)","",zone_uti,perl = T)
zone_uti <- gsub("(\\t)","",zone_uti,perl = T)
zone_uti <- gsub("(\\n)","",zone_uti,perl = T)
zone_uti <- substr(zone_uti,18,nchar(zone_uti))
zone_uti <- if(length(zone_uti) == 0) "NonRens" else zone_uti
visit_uti <- html_uti %>% html_nodes("div.small_column_left_shadow ul li strong") %>% html_text()
msg_uti <- visit_uti[2]
visit_uti <- visit_uti[1]
profil_uti <- data.frame(cbind(nom_uti,sexe_uti,date_uti,grade_uti,zone_uti,visit_uti,msg_uti))
if (!exists("docti_uti")) docti_uti <- profil_uti else if(typeof(try(rbind(docti_uti,profil_uti),silent = T)) == "list")docti_uti <- rbind(docti_uti,profil_uti)
rm(profil_uti)
}
print(paste0("Il reste ", length(uti) - which(pers == uti) , " sur ", length(uti)))
save.image("C:/Users/ottavig/Documents/docti_membre.RData")
Sys.sleep(3)
}
rm(nom_uti,date_uti,grade_uti,sexe_uti,msg_uti,visit_uti,zone_uti,profil_uti,pers,html_uti)
#------
docti_uti <- unique(docti_uti)
for (i in 2:length(docti_uti$nom_uti)) if (docti_uti$nom_uti[i] == docti_uti$nom_uti[(i-1)]) docti_uti <- docti_uti[-i + 1 ,]
docti_uti$date_uti <- ifelse(nchar(as.character(docti_uti$date_uti)) != 4, "NonRens", as.character(docti_uti$date_uti))
docti_uti$zone_uti <- ifelse(nchar(as.character(docti_uti$zone_uti)) > 40 ,"NonRens", as.character(docti_uti$zone_uti))
docti_uti$zone_uti <- ifelse(regexpr("[0-9][0-9][0-9][0-9][0-9]",docti_uti$zone_uti,perl= T) != -1, substr(docti_uti$zone_uti,regexpr("[0-9][0-9]",docti_uti$zone_uti,perl= T),regexpr("[0-9][0-9]",docti_uti$zone_uti,perl= T) + 1),docti_uti$zone_uti)
docti$date_msg <- ifelse(regexpr("[0-9][0-9][0-9][0-9]",docti$date_msg,perl= T) != -1, substr(docti$date_msg,regexpr("[0-9][0-9][0-9][0-9]",docti$date_msg,perl= T),regexpr("[0-9][0-9][0-9][0-9]",docti$date_msg,perl= T) + 3),docti$date_msg)
#A TERMINER
for (c in names(docti_uti)) docti_uti[,c] <- as.character(docti_uti[,c])
res <- docti_uti[1,]
j <- 2
var <- docti_uti[1,"nom_uti"]
for (i in 2:length(docti_uti$nom_uti)) {
if (var != docti_uti[i,"nom_uti"]) {
res[j,] <- docti_uti[i,]
j <- j+1
}
var <- docti_uti[i,"nom_uti"]
}
require(httr)
require(RCurl)
require(RJSONIO)
full_url <- oauth_callback()
full_url <- gsub("(.*localhost:[0-9]{1,5}/).*", x=full_url, replacement="\\1")
print(full_url)
app_name <- "instaR"
client_id <- "73ffb23e19ea4bcea82d48284b665ca1"
client_secret <- "cf785d5ca9084772bf47c5c10375a965"
scope <- "basic"
instagram <- oauth_endpoint(
authorize = "https://api.instagram.com/oauth/authorize",
access = "https://api.instagram.com/oauth/access_token")
myapp <- oauth_app(app_name, client_id, client_secret)
ig_oauth <- oauth2.0_token(instagram, myapp,scope="basic", type = "application/x-www-form-urlencoded",cache=FALSE)
tmp <- strsplit(toString(names(ig_oauth$credentials)), '"')
token <- tmp[[1]][4]
user_info <- fromJSON(getURL(paste('https://api.instagram.com/v1/users/search?q=',username,'&access_token=',token,sep="")),unexpected.escape = "keep")
user_followed <- fromJSON(getURL(paste('https://api.instagram.com/v1/users/37721750/followed_by?access_token=',token,sep="")),unexpected.escape = "keep")
username <- "ninonchka"
received_profile <- user_info$data[[1]]
library(instaR)
load("my_oauth")
mccain <- getFollowers(username, token)
test <- getUser(username = "ninonchka",token)
obama <- getUser( userid = 269505098, token = token)
obama <- searchInstagram( tag="", token=my_oauth, n=100, folder="obama")
?instaR
obama <- getFollowers( username="bysuzette", token=token)
instaOAuth
token <- oauth2.0_token(instagram, myapp, cache = FALSE,
scope = scope)
if (GET(paste0("https://api.instagram.com/v1/users/self/feed?count=1",
"&access_token=", token$credentials$access_token))$status ==
200) message("Authentication successful.")
| /doctissimo/docti_membres_liste.R | no_license | Gottavianoni/R | R | false | false | 5,499 | r | #Utilisateurs Doctissimo
library(rvest)
load("C:/Users/ottavig/Documents/docti_membre.RData")
Sys.setenv(http_proxy="*****")
Sys.setenv(https_proxy="*****")
Sys.setenv(encoding = "UTF-8")
if (!exists("uti")){
uti <- liste_membre
uti <- uti[which(unique(as.character(uti)) != "Profil supprimé")]
uti <- paste("http://club.doctissimo.fr/",uti,"/",sep="")
uti <- gsub("é","e",uti, fixed = T)
uti <- gsub("è","e",uti, fixed = T)
uti <- gsub("ê","e",uti, fixed = T)
uti <- gsub("ë","e",uti, fixed = T)
uti <- gsub(" ","-",uti,fixed = T)
uti <- gsub("./","",uti,fixed = T)
}
for (pers in uti) {
if(typeof(try(html(pers),silent= T)) == "externalptr") {
html_uti <- html(pers)
nom_uti <- html_uti %>% html_nodes("h1#user_pseudo") %>% html_text()
grade_uti <- html_uti %>% html_nodes("div.user_infos span.user_grade") %>% html_text()
grade_uti <- if(length(grade_uti) == 0) "NonRens" else grade_uti
sexe_uti <- html_uti %>% html_nodes("div.small_column_left_panel img") %>% html_attr("alt")
sexe_uti <- if(length(sexe_uti) == 0) "NonRens" else sexe_uti
date_uti <- try(html_uti %>% html_nodes("div.small_column_left div.small_column_left_panel") %>% .[[2]] %>% html_text(),silent = T)
date_uti <- substr(date_uti,regexpr("[0-9][0-9][0-9][0-9]",date_uti,perl= T),regexpr("[0-9][0-9][0-9][0-9]",date_uti,perl= T) + 3)
date_uti <- if(date_uti == "") "NonRens" else date_uti
date_uti <- if(substr(date_uti,1,2) == "fa") "NonRens" else date_uti
zone_uti <- html_uti %>% html_nodes("div.small_column_left_shadow div#more_informations") %>% html_text()
zone_uti <- gsub("(\\r)","",zone_uti,perl = T)
zone_uti <- gsub("(\\t)","",zone_uti,perl = T)
zone_uti <- gsub("(\\n)","",zone_uti,perl = T)
zone_uti <- substr(zone_uti,18,nchar(zone_uti))
zone_uti <- if(length(zone_uti) == 0) "NonRens" else zone_uti
visit_uti <- html_uti %>% html_nodes("div.small_column_left_shadow ul li strong") %>% html_text()
msg_uti <- visit_uti[2]
visit_uti <- visit_uti[1]
profil_uti <- data.frame(cbind(nom_uti,sexe_uti,date_uti,grade_uti,zone_uti,visit_uti,msg_uti))
if (!exists("docti_uti")) docti_uti <- profil_uti else if(typeof(try(rbind(docti_uti,profil_uti),silent = T)) == "list")docti_uti <- rbind(docti_uti,profil_uti)
rm(profil_uti)
}
print(paste0("Il reste ", length(uti) - which(pers == uti) , " sur ", length(uti)))
save.image("C:/Users/ottavig/Documents/docti_membre.RData")
Sys.sleep(3)
}
rm(nom_uti,date_uti,grade_uti,sexe_uti,msg_uti,visit_uti,zone_uti,profil_uti,pers,html_uti)
#------
docti_uti <- unique(docti_uti)
for (i in 2:length(docti_uti$nom_uti)) if (docti_uti$nom_uti[i] == docti_uti$nom_uti[(i-1)]) docti_uti <- docti_uti[-i + 1 ,]
docti_uti$date_uti <- ifelse(nchar(as.character(docti_uti$date_uti)) != 4, "NonRens", as.character(docti_uti$date_uti))
docti_uti$zone_uti <- ifelse(nchar(as.character(docti_uti$zone_uti)) > 40 ,"NonRens", as.character(docti_uti$zone_uti))
docti_uti$zone_uti <- ifelse(regexpr("[0-9][0-9][0-9][0-9][0-9]",docti_uti$zone_uti,perl= T) != -1, substr(docti_uti$zone_uti,regexpr("[0-9][0-9]",docti_uti$zone_uti,perl= T),regexpr("[0-9][0-9]",docti_uti$zone_uti,perl= T) + 1),docti_uti$zone_uti)
docti$date_msg <- ifelse(regexpr("[0-9][0-9][0-9][0-9]",docti$date_msg,perl= T) != -1, substr(docti$date_msg,regexpr("[0-9][0-9][0-9][0-9]",docti$date_msg,perl= T),regexpr("[0-9][0-9][0-9][0-9]",docti$date_msg,perl= T) + 3),docti$date_msg)
#A TERMINER
for (c in names(docti_uti)) docti_uti[,c] <- as.character(docti_uti[,c])
res <- docti_uti[1,]
j <- 2
var <- docti_uti[1,"nom_uti"]
for (i in 2:length(docti_uti$nom_uti)) {
if (var != docti_uti[i,"nom_uti"]) {
res[j,] <- docti_uti[i,]
j <- j+1
}
var <- docti_uti[i,"nom_uti"]
}
require(httr)
require(RCurl)
require(RJSONIO)
full_url <- oauth_callback()
full_url <- gsub("(.*localhost:[0-9]{1,5}/).*", x=full_url, replacement="\\1")
print(full_url)
app_name <- "instaR"
client_id <- "73ffb23e19ea4bcea82d48284b665ca1"
client_secret <- "cf785d5ca9084772bf47c5c10375a965"
scope <- "basic"
instagram <- oauth_endpoint(
authorize = "https://api.instagram.com/oauth/authorize",
access = "https://api.instagram.com/oauth/access_token")
myapp <- oauth_app(app_name, client_id, client_secret)
ig_oauth <- oauth2.0_token(instagram, myapp,scope="basic", type = "application/x-www-form-urlencoded",cache=FALSE)
tmp <- strsplit(toString(names(ig_oauth$credentials)), '"')
token <- tmp[[1]][4]
user_info <- fromJSON(getURL(paste('https://api.instagram.com/v1/users/search?q=',username,'&access_token=',token,sep="")),unexpected.escape = "keep")
user_followed <- fromJSON(getURL(paste('https://api.instagram.com/v1/users/37721750/followed_by?access_token=',token,sep="")),unexpected.escape = "keep")
username <- "ninonchka"
received_profile <- user_info$data[[1]]
library(instaR)
load("my_oauth")
mccain <- getFollowers(username, token)
test <- getUser(username = "ninonchka",token)
obama <- getUser( userid = 269505098, token = token)
obama <- searchInstagram( tag="", token=my_oauth, n=100, folder="obama")
?instaR
obama <- getFollowers( username="bysuzette", token=token)
instaOAuth
token <- oauth2.0_token(instagram, myapp, cache = FALSE,
scope = scope)
if (GET(paste0("https://api.instagram.com/v1/users/self/feed?count=1",
"&access_token=", token$credentials$access_token))$status ==
200) message("Authentication successful.")
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
approx_glpom <- function(P) {
.Call('_netrankr_approx_glpom', PACKAGE = 'netrankr', P)
}
approx_relative <- function(Nu, Nd, P, iterative, max_iter) {
.Call('_netrankr_approx_relative', PACKAGE = 'netrankr', Nu, Nd, P, iterative, max_iter)
}
checkPairs <- function(x, y) {
.Call('_netrankr_checkPairs', PACKAGE = 'netrankr', x, y)
}
dependCurFlow <- function(Tmat, el, m, n) {
.Call('_netrankr_dependCurFlow', PACKAGE = 'netrankr', Tmat, el, m, n)
}
dependRspn <- function(A, Z, Zdiv, W, n) {
.Call('_netrankr_dependRspn', PACKAGE = 'netrankr', A, Z, Zdiv, W, n)
}
dependency <- function(adj) {
.Call('_netrankr_dependency', PACKAGE = 'netrankr', adj)
}
LatticeOfIdeals <- function(child, parent, Ek, nElem, nIdeals) {
.Call('_netrankr_LatticeOfIdeals', PACKAGE = 'netrankr', child, parent, Ek, nElem, nIdeals)
}
listingIdeals <- function(P, nElem, nIdeals) {
.Call('_netrankr_listingIdeals', PACKAGE = 'netrankr', P, nElem, nIdeals)
}
mcmc_rank <- function(P, init_rank, rp) {
.Call('_netrankr_mcmc_rank', PACKAGE = 'netrankr', P, init_rank, rp)
}
nialgo <- function(adjList, deg) {
.Call('_netrankr_nialgo', PACKAGE = 'netrankr', adjList, deg)
}
matdom <- function(A, map, benefit) {
.Call('_netrankr_matdom', PACKAGE = 'netrankr', A, map, benefit)
}
preserve <- function(P, s, n) {
.Call('_netrankr_preserve', PACKAGE = 'netrankr', P, s, n)
}
rankprobs <- function(ImPred, ideals, nElem, nIdeals) {
.Call('_netrankr_rankprobs', PACKAGE = 'netrankr', ImPred, ideals, nElem, nIdeals)
}
rankings <- function(paths, ideals, nRank, nElem) {
.Call('_netrankr_rankings', PACKAGE = 'netrankr', paths, ideals, nRank, nElem)
}
resistanceDistance <- function(C, n) {
.Call('_netrankr_resistanceDistance', PACKAGE = 'netrankr', C, n)
}
transreduct <- function(M) {
.Call('_netrankr_transreduct', PACKAGE = 'netrankr', M)
}
treeOfIdeals <- function(imPred) {
.Call('_netrankr_treeOfIdeals', PACKAGE = 'netrankr', imPred)
}
| /netrankr/R/RcppExports.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 2,125 | r | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
approx_glpom <- function(P) {
.Call('_netrankr_approx_glpom', PACKAGE = 'netrankr', P)
}
approx_relative <- function(Nu, Nd, P, iterative, max_iter) {
.Call('_netrankr_approx_relative', PACKAGE = 'netrankr', Nu, Nd, P, iterative, max_iter)
}
checkPairs <- function(x, y) {
.Call('_netrankr_checkPairs', PACKAGE = 'netrankr', x, y)
}
dependCurFlow <- function(Tmat, el, m, n) {
.Call('_netrankr_dependCurFlow', PACKAGE = 'netrankr', Tmat, el, m, n)
}
dependRspn <- function(A, Z, Zdiv, W, n) {
.Call('_netrankr_dependRspn', PACKAGE = 'netrankr', A, Z, Zdiv, W, n)
}
dependency <- function(adj) {
.Call('_netrankr_dependency', PACKAGE = 'netrankr', adj)
}
LatticeOfIdeals <- function(child, parent, Ek, nElem, nIdeals) {
.Call('_netrankr_LatticeOfIdeals', PACKAGE = 'netrankr', child, parent, Ek, nElem, nIdeals)
}
listingIdeals <- function(P, nElem, nIdeals) {
.Call('_netrankr_listingIdeals', PACKAGE = 'netrankr', P, nElem, nIdeals)
}
mcmc_rank <- function(P, init_rank, rp) {
.Call('_netrankr_mcmc_rank', PACKAGE = 'netrankr', P, init_rank, rp)
}
nialgo <- function(adjList, deg) {
.Call('_netrankr_nialgo', PACKAGE = 'netrankr', adjList, deg)
}
matdom <- function(A, map, benefit) {
.Call('_netrankr_matdom', PACKAGE = 'netrankr', A, map, benefit)
}
preserve <- function(P, s, n) {
.Call('_netrankr_preserve', PACKAGE = 'netrankr', P, s, n)
}
rankprobs <- function(ImPred, ideals, nElem, nIdeals) {
.Call('_netrankr_rankprobs', PACKAGE = 'netrankr', ImPred, ideals, nElem, nIdeals)
}
rankings <- function(paths, ideals, nRank, nElem) {
.Call('_netrankr_rankings', PACKAGE = 'netrankr', paths, ideals, nRank, nElem)
}
resistanceDistance <- function(C, n) {
.Call('_netrankr_resistanceDistance', PACKAGE = 'netrankr', C, n)
}
transreduct <- function(M) {
.Call('_netrankr_transreduct', PACKAGE = 'netrankr', M)
}
treeOfIdeals <- function(imPred) {
.Call('_netrankr_treeOfIdeals', PACKAGE = 'netrankr', imPred)
}
|
## Creation of functions to calculate the inverse of a matrix and cache it
## Creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## Computes the inverse of the special "matrix". If it already exists, it caches (retrieves) it
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if (!is.null(inv)) {
message('getting cached data')
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | sanjuairforce/ProgrammingAssignment2 | R | false | false | 793 | r | ## Creation of functions to calculate the inverse of a matrix and cache it
## Creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## Computes the inverse of the special "matrix". If it already exists, it caches (retrieves) it
cacheSolve <- function(x, ...) {
inv <- x$getinverse()
if (!is.null(inv)) {
message('getting cached data')
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setinverse(inv)
inv
## Return a matrix that is the inverse of 'x'
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enumerationSetApi.r
\name{enumerationSet$updateSecurityEntry}
\alias{enumerationSet$updateSecurityEntry}
\title{Update a security entry owned by the enumeration set.}
\arguments{
\item{name}{The name of the security entry.}
\item{webId}{The ID of the enumeration set where the security entry will be updated.}
\item{piSecurityEntry}{The new security entry definition. The full list of allow and deny rights must be supplied or they will be removed.}
\item{applyToChildren}{If false, the new access permissions are only applied to the associated object. If true, the access permissions of children with any parent-child reference types will change when the permissions on the primary parent change.}
}
\value{
The security entry was updated.
}
\description{
Update a security entry owned by the enumeration set.
}
| /man/enumerationSet-cash-updateSecurityEntry.Rd | permissive | aj9253/PI-Web-API-Client-R | R | false | true | 894 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enumerationSetApi.r
\name{enumerationSet$updateSecurityEntry}
\alias{enumerationSet$updateSecurityEntry}
\title{Update a security entry owned by the enumeration set.}
\arguments{
\item{name}{The name of the security entry.}
\item{webId}{The ID of the enumeration set where the security entry will be updated.}
\item{piSecurityEntry}{The new security entry definition. The full list of allow and deny rights must be supplied or they will be removed.}
\item{applyToChildren}{If false, the new access permissions are only applied to the associated object. If true, the access permissions of children with any parent-child reference types will change when the permissions on the primary parent change.}
}
\value{
The security entry was updated.
}
\description{
Update a security entry owned by the enumeration set.
}
|
definedFunction <- Zeldasine10
functionName <- "Zeldasine10"
definedBounds <- getDefaultBounds(functionName)
##############################################
startwd <- getwd()
setwd("./parameter selection scenarios")
dirName<-paste("results/",functionName, format(Sys.time(), "%d-%m-%Y_%H-%M-%S") ,sep='')
dir.create(dirName)
setwd(dirName)
dir.create("gaussMutation")
setwd("gaussMutation")
gaussMutation = TRUE
source(file = "../../../performOptimisationGEN.R")
source(file = "../../../performOptimisationScenarios.R")
setwd("..")
dir.create("defaultMutation")
setwd("defaultMutation")
gaussMutation = FALSE
source(file = "../../../performOptimisationGEN.R")
source(file = "../../../performOptimisationScenarios.R")
setwd("..")
dir.create("PSO")
setwd("PSO")
source(file = "../../../performOptimisationPSO.R")
setwd(startwd)
| /repo/R/workspace/parameter selection scenarios/runAllTests.R | no_license | PiotrekSotor/PracaMagisterska | R | false | false | 834 | r | definedFunction <- Zeldasine10
functionName <- "Zeldasine10"
definedBounds <- getDefaultBounds(functionName)
##############################################
startwd <- getwd()
setwd("./parameter selection scenarios")
dirName<-paste("results/",functionName, format(Sys.time(), "%d-%m-%Y_%H-%M-%S") ,sep='')
dir.create(dirName)
setwd(dirName)
dir.create("gaussMutation")
setwd("gaussMutation")
gaussMutation = TRUE
source(file = "../../../performOptimisationGEN.R")
source(file = "../../../performOptimisationScenarios.R")
setwd("..")
dir.create("defaultMutation")
setwd("defaultMutation")
gaussMutation = FALSE
source(file = "../../../performOptimisationGEN.R")
source(file = "../../../performOptimisationScenarios.R")
setwd("..")
dir.create("PSO")
setwd("PSO")
source(file = "../../../performOptimisationPSO.R")
setwd(startwd)
|
###########################################################################/**
# @RdocClass MbeiCnPlm
#
# @title "The MbeiCnPlm class"
#
# \description{
# @classhierarchy
# }
#
# @synopsis
#
# \arguments{
# \item{...}{Arguments passed to @see "MbeiSnpPlm".}
# \item{combineAlleles}{If @FALSE, allele A and allele B are treated
# seperately, otherwise together.}
# }
#
# \section{Fields and Methods}{
# @allmethods "public"
# }
#
# @author "HB"
#*/###########################################################################
setConstructorS3("MbeiCnPlm", function(..., combineAlleles=FALSE) {
extend(MbeiSnpPlm(...), c("MbeiCnPlm", uses(CnPlm())),
combineAlleles = combineAlleles
)
})
setMethodS3("getAsteriskTags", "MbeiCnPlm", function(this, collapse=NULL, ...) {
# Returns 'MBEI[,<flavor>][,+-]'
tags <- NextMethod("getAsteriskTags", collapse=NULL);
# Add class specific parameter tags
if (this$combineAlleles)
tags <- c(tags, "A+B");
# Collapse
tags <- paste(tags, collapse=collapse);
tags;
}, protected=TRUE)
############################################################################
# HISTORY:
# 2007-12-06
# o Added getAsteriskTag() for MbeiCnPlm.
# 2006-09-12
# o Recreated.
############################################################################
| /R/MbeiCnPlm.R | no_license | microarray/aroma.affymetrix | R | false | false | 1,305 | r | ###########################################################################/**
# @RdocClass MbeiCnPlm
#
# @title "The MbeiCnPlm class"
#
# \description{
# @classhierarchy
# }
#
# @synopsis
#
# \arguments{
# \item{...}{Arguments passed to @see "MbeiSnpPlm".}
# \item{combineAlleles}{If @FALSE, allele A and allele B are treated
# seperately, otherwise together.}
# }
#
# \section{Fields and Methods}{
# @allmethods "public"
# }
#
# @author "HB"
#*/###########################################################################
setConstructorS3("MbeiCnPlm", function(..., combineAlleles=FALSE) {
extend(MbeiSnpPlm(...), c("MbeiCnPlm", uses(CnPlm())),
combineAlleles = combineAlleles
)
})
setMethodS3("getAsteriskTags", "MbeiCnPlm", function(this, collapse=NULL, ...) {
# Returns 'MBEI[,<flavor>][,+-]'
tags <- NextMethod("getAsteriskTags", collapse=NULL);
# Add class specific parameter tags
if (this$combineAlleles)
tags <- c(tags, "A+B");
# Collapse
tags <- paste(tags, collapse=collapse);
tags;
}, protected=TRUE)
############################################################################
# HISTORY:
# 2007-12-06
# o Added getAsteriskTag() for MbeiCnPlm.
# 2006-09-12
# o Recreated.
############################################################################
|
#' Hidden Markov model for the biofam data
#'
#' A five-state hidden Markov model (HMM) fitted for the \code{\link[TraMineR]{biofam}} data.
#'
#' @format A hidden Markov model of class \code{hmm};
#' a left-to-right model with four hidden states.
#'
#' @details
#' The model is loaded by calling \code{data(hmm_biofam)}. It was created with the
#' following code:
#' \preformatted{
#' data("biofam3c")
#'
#' # Building sequence objects
#' marr_seq <- seqdef(biofam3c$married, start = 15,
#' alphabet = c("single", "married", "divorced"))
#' child_seq <- seqdef(biofam3c$children, start = 15,
#' alphabet = c("childless", "children"))
#' left_seq <- seqdef(biofam3c$left, start = 15,
#' alphabet = c("with parents", "left home"))
#'
#' ## Choosing colors
#' attr(marr_seq, "cpal") <- c("violetred2", "darkgoldenrod2", "darkmagenta")
#' attr(child_seq, "cpal") <- c("darkseagreen1", "coral3")
#' attr(left_seq, "cpal") <- c("lightblue", "red3")
#'
#' init <- c(0.9, 0.05, 0.02, 0.02, 0.01)
#'
#' # Starting values for transition matrix
#' trans <- matrix(
#' c(0.8, 0.10, 0.05, 0.03, 0.02,
#' 0, 0.9, 0.05, 0.03, 0.02,
#' 0, 0, 0.9, 0.07, 0.03,
#' 0, 0, 0, 0.9, 0.1,
#' 0, 0, 0, 0, 1),
#' nrow = 5, ncol = 5, byrow = TRUE)
#'
#' # Starting values for emission matrices
#' emiss_marr <- matrix(
#' c(0.9, 0.05, 0.05, # High probability for single
#' 0.9, 0.05, 0.05,
#' 0.05, 0.9, 0.05, # High probability for married
#' 0.05, 0.9, 0.05,
#' 0.3, 0.3, 0.4), # mixed group
#' nrow = 5, ncol = 3, byrow = TRUE)
#'
#' emiss_child <- matrix(
#' c(0.9, 0.1, # High probability for childless
#' 0.9, 0.1,
#' 0.1, 0.9,
#' 0.1, 0.9,
#' 0.5, 0.5),
#' nrow = 5, ncol = 2, byrow = TRUE)
#'
#' emiss_left <- matrix(
#' c(0.9, 0.1, # High probability for living with parents
#' 0.1, 0.9,
#' 0.1, 0.9,
#' 0.1, 0.9,
#' 0.5, 0.5),
#' nrow = 5, ncol = 2, byrow = TRUE)
#'
#' initmod <- build_hmm(
#' observations = list(marr_seq, child_seq, left_seq),
#' initial_probs = init, transition_probs = trans,
#' emission_probs = list(emiss_marr, emiss_child,
#' emiss_left),
#' channel_names = c("Marriage", "Parenthood", "Residence"))
#'
#' fit_biofam <- fit_model(initmod, em = FALSE, local = TRUE)
#' hmm_biofam <- fit_biofam$model
#' }
#'
#' @seealso Examples of building and fitting HMMs in \code{\link{build_hmm}} and
#' \code{\link{fit_model}}; and \code{\link[TraMineR]{biofam}} for the original data and
#' \code{\link{biofam3c}} for the three-channel version used in this model.
#'
#' @docType data
#' @keywords datasets
#' @name hmm_biofam
#' @examples
#'
#' # Plotting the model
#' plot(hmm_biofam)
#'
NULL
| /R/hmm_biofam.R | no_license | cran/seqHMM | R | false | false | 2,809 | r | #' Hidden Markov model for the biofam data
#'
#' A five-state hidden Markov model (HMM) fitted for the \code{\link[TraMineR]{biofam}} data.
#'
#' @format A hidden Markov model of class \code{hmm};
#' a left-to-right model with four hidden states.
#'
#' @details
#' The model is loaded by calling \code{data(hmm_biofam)}. It was created with the
#' following code:
#' \preformatted{
#' data("biofam3c")
#'
#' # Building sequence objects
#' marr_seq <- seqdef(biofam3c$married, start = 15,
#' alphabet = c("single", "married", "divorced"))
#' child_seq <- seqdef(biofam3c$children, start = 15,
#' alphabet = c("childless", "children"))
#' left_seq <- seqdef(biofam3c$left, start = 15,
#' alphabet = c("with parents", "left home"))
#'
#' ## Choosing colors
#' attr(marr_seq, "cpal") <- c("violetred2", "darkgoldenrod2", "darkmagenta")
#' attr(child_seq, "cpal") <- c("darkseagreen1", "coral3")
#' attr(left_seq, "cpal") <- c("lightblue", "red3")
#'
#' init <- c(0.9, 0.05, 0.02, 0.02, 0.01)
#'
#' # Starting values for transition matrix
#' trans <- matrix(
#' c(0.8, 0.10, 0.05, 0.03, 0.02,
#' 0, 0.9, 0.05, 0.03, 0.02,
#' 0, 0, 0.9, 0.07, 0.03,
#' 0, 0, 0, 0.9, 0.1,
#' 0, 0, 0, 0, 1),
#' nrow = 5, ncol = 5, byrow = TRUE)
#'
#' # Starting values for emission matrices
#' emiss_marr <- matrix(
#' c(0.9, 0.05, 0.05, # High probability for single
#' 0.9, 0.05, 0.05,
#' 0.05, 0.9, 0.05, # High probability for married
#' 0.05, 0.9, 0.05,
#' 0.3, 0.3, 0.4), # mixed group
#' nrow = 5, ncol = 3, byrow = TRUE)
#'
#' emiss_child <- matrix(
#' c(0.9, 0.1, # High probability for childless
#' 0.9, 0.1,
#' 0.1, 0.9,
#' 0.1, 0.9,
#' 0.5, 0.5),
#' nrow = 5, ncol = 2, byrow = TRUE)
#'
#' emiss_left <- matrix(
#' c(0.9, 0.1, # High probability for living with parents
#' 0.1, 0.9,
#' 0.1, 0.9,
#' 0.1, 0.9,
#' 0.5, 0.5),
#' nrow = 5, ncol = 2, byrow = TRUE)
#'
#' initmod <- build_hmm(
#' observations = list(marr_seq, child_seq, left_seq),
#' initial_probs = init, transition_probs = trans,
#' emission_probs = list(emiss_marr, emiss_child,
#' emiss_left),
#' channel_names = c("Marriage", "Parenthood", "Residence"))
#'
#' fit_biofam <- fit_model(initmod, em = FALSE, local = TRUE)
#' hmm_biofam <- fit_biofam$model
#' }
#'
#' @seealso Examples of building and fitting HMMs in \code{\link{build_hmm}} and
#' \code{\link{fit_model}}; and \code{\link[TraMineR]{biofam}} for the original data and
#' \code{\link{biofam3c}} for the three-channel version used in this model.
#'
#' @docType data
#' @keywords datasets
#' @name hmm_biofam
#' @examples
#'
#' # Plotting the model
#' plot(hmm_biofam)
#'
NULL
|
library(plyr)
library(stringr)
library(lubridate)
library(jsonlite)
library(httr)
library(xml2)
library(rvest)
library(devtools)
library(ggplot2)
library(RSelenium)
library(pageviews)
library(aRxiv)
library(streamR)
library(scales)
library(shiny)
ui <- fluidPage(
titlePanel("Number of Page Views on Wikipedia", windowTitle = "Wiki Page Views"),
sidebarLayout(
sidebarPanel(("Enter Name of Wikipedia Article to Query Here"),
textInput("search1", "Article Title (Case Sensitive)", ""),
textInput("search2", "Article Title (Case Sensitive)", ""),
dateInput(inputId = "start",
label = "Search Start Date",
value = Sys.Date()-30),
dateInput(inputId = "end",
label = "Search End Date",
value = Sys.Date()),
sliderInput(inputId = "line",
label = "Line Color Strength",
value = .4, min = 0,
max = 1),
sliderInput(inputId = "point",
label = "Adjust Point Size",
value = 1, min = 0,
max = 5),
sliderInput(inputId = "smooth",
label = "Overplotting Visual Aid",
value = 0, min = 0,
max = 5),
submitButton(text = "Refresh"),
radioButtons(inputId = "var3",
label = "Select the file type",
choices = list("png","pdf")),
downloadButton(outputId = "down",
label = "Export Plot",
class = NULL),
width = 3),
mainPanel((""),
plotOutput("line"))
)
)
server <- function(input, output){
output$line <- renderPlot(
ggplot(
data =
{df1 <- article_pageviews(project = "en.wikipedia",
article = input$search1,
user_type = "user",
start = input$start,
end = input$end)
df2 <- article_pageviews(project = "en.wikipedia",
article = input$search2,
user_type = "user",
start = input$start,
end = input$end)
df1$Page = input$search1
df2$Page = input$search2
df = rbind(df1, df2)
},
aes(date, views, color=Page))
+ geom_smooth(size=input$smooth*.5, se = TRUE,
span = input$smooth*.2)
+ geom_line(alpha=input$line)
+ geom_point(size=input$point)
+ scale_y_log10()
+ ggtitle("Page Views on Wikipedia Over Time")
+ labs(x = "Date", y = "Number of Views (Log 10)")
+ theme(plot.title = element_text(color="black",
face="bold.italic",
size=24,
hjust=.5),
axis.title = element_text(color="black",
face="italic",
size=18)),
height = 600)
output$down <- downloadHandler(
filename = function(){
paste("plot",input$var3, sep = ".")
},
content = function(file){
if(input$var3 =="png")
png(file)
else
pdf(file)
plot(ggplot(
data =
{
df1 <- article_pageviews(project = "en.wikipedia",
article = input$search1,
user_type = "user",
start = input$start,
end = input$end)
df2 <- article_pageviews(project = "en.wikipedia",
article = input$search2,
user_type = "user",
start = input$start,
end = input$end)
df1$Page = input$search1
df2$Page = input$search2
df = rbind(df1, df2)
},
aes(date, views, color=Page))
+ geom_smooth(size=input$smooth*.5, se = TRUE,
span = input$smooth*.2)
+ geom_line(alpha=input$line)
+ geom_point(size=input$point)
+ scale_y_log10()
+ ggtitle("Page Views on Wikipedia Over Time")
+ labs(x = "Date", y = "Number of Views (Log 10)")
+ theme(plot.title = element_text(color="black",
face="bold.italic",
size=24,
hjust=.5))
+ theme(axis.title = element_text(color="black",
face="italic",
size=18)),
height = 700,
width = 1400)
dev.off()
}
)
}
shinyApp(ui = ui, server = server) | /Wikipedia Search Application/Wikipedia Search.R | no_license | cjc2238/Shiny-Apps | R | false | false | 6,118 | r | library(plyr)
library(stringr)
library(lubridate)
library(jsonlite)
library(httr)
library(xml2)
library(rvest)
library(devtools)
library(ggplot2)
library(RSelenium)
library(pageviews)
library(aRxiv)
library(streamR)
library(scales)
library(shiny)
ui <- fluidPage(
titlePanel("Number of Page Views on Wikipedia", windowTitle = "Wiki Page Views"),
sidebarLayout(
sidebarPanel(("Enter Name of Wikipedia Article to Query Here"),
textInput("search1", "Article Title (Case Sensitive)", ""),
textInput("search2", "Article Title (Case Sensitive)", ""),
dateInput(inputId = "start",
label = "Search Start Date",
value = Sys.Date()-30),
dateInput(inputId = "end",
label = "Search End Date",
value = Sys.Date()),
sliderInput(inputId = "line",
label = "Line Color Strength",
value = .4, min = 0,
max = 1),
sliderInput(inputId = "point",
label = "Adjust Point Size",
value = 1, min = 0,
max = 5),
sliderInput(inputId = "smooth",
label = "Overplotting Visual Aid",
value = 0, min = 0,
max = 5),
submitButton(text = "Refresh"),
radioButtons(inputId = "var3",
label = "Select the file type",
choices = list("png","pdf")),
downloadButton(outputId = "down",
label = "Export Plot",
class = NULL),
width = 3),
mainPanel((""),
plotOutput("line"))
)
)
server <- function(input, output){
output$line <- renderPlot(
ggplot(
data =
{df1 <- article_pageviews(project = "en.wikipedia",
article = input$search1,
user_type = "user",
start = input$start,
end = input$end)
df2 <- article_pageviews(project = "en.wikipedia",
article = input$search2,
user_type = "user",
start = input$start,
end = input$end)
df1$Page = input$search1
df2$Page = input$search2
df = rbind(df1, df2)
},
aes(date, views, color=Page))
+ geom_smooth(size=input$smooth*.5, se = TRUE,
span = input$smooth*.2)
+ geom_line(alpha=input$line)
+ geom_point(size=input$point)
+ scale_y_log10()
+ ggtitle("Page Views on Wikipedia Over Time")
+ labs(x = "Date", y = "Number of Views (Log 10)")
+ theme(plot.title = element_text(color="black",
face="bold.italic",
size=24,
hjust=.5),
axis.title = element_text(color="black",
face="italic",
size=18)),
height = 600)
output$down <- downloadHandler(
filename = function(){
paste("plot",input$var3, sep = ".")
},
content = function(file){
if(input$var3 =="png")
png(file)
else
pdf(file)
plot(ggplot(
data =
{
df1 <- article_pageviews(project = "en.wikipedia",
article = input$search1,
user_type = "user",
start = input$start,
end = input$end)
df2 <- article_pageviews(project = "en.wikipedia",
article = input$search2,
user_type = "user",
start = input$start,
end = input$end)
df1$Page = input$search1
df2$Page = input$search2
df = rbind(df1, df2)
},
aes(date, views, color=Page))
+ geom_smooth(size=input$smooth*.5, se = TRUE,
span = input$smooth*.2)
+ geom_line(alpha=input$line)
+ geom_point(size=input$point)
+ scale_y_log10()
+ ggtitle("Page Views on Wikipedia Over Time")
+ labs(x = "Date", y = "Number of Views (Log 10)")
+ theme(plot.title = element_text(color="black",
face="bold.italic",
size=24,
hjust=.5))
+ theme(axis.title = element_text(color="black",
face="italic",
size=18)),
height = 700,
width = 1400)
dev.off()
}
)
}
shinyApp(ui = ui, server = server) |
# TODO: Make this an R6 layer?
#' Input query, key, and value matrices are used to compute dot product
#' attention. (Vaswani et al. 2017)
#' q: a Tensor with shape [batch, length_q, depth_k]
#' k: a Tensor with shape [batch, length_kv, depth_k]
#' v: a Tensor with shape [batch, length_kv, depth_v]
#' @export
dot_product_attention_1d <-
function(q,
k,
v,
bias = NULL,
dropout = 0,
name = "dot_product_attention") {
q_shape <- shape_list2(q)
scalar <-
tf$math$rsqrt(tf$cast(q_shape[[length(q_shape)]], tf$float32))
logits <- tf$matmul(q * scalar, k, transpose_b = TRUE)
if (!is.null(bias))
logits <- logits + bias
weights <- tf$nn$softmax(logits, name = "attention_weights")
x <- tf$matmul(weights, v)
x
}
# TODO: hard_attention_k: integer, if > 0 triggers hard attention (pick top-k)
# TODO: Add callable option to attention_type
#' Multihead attention mechanism
#'
#' With num_heads == 1, becomes simple dot product attention
#' @param query Tensor [batch, seqlen, depth_q]
#' @param memory Tensor [batch, seqlen, depth_m]
#' @param bias Bias tensor passed to attention function
#' @param key_depth Specify units for key component
#' @param value_depth Specify units for value component
#' @param output_depth Specify feature dim of final output
#' @param num_heads Specify number of heads to break input space up by
#' @param dropout Float value to add dropout to attention function
#' @param attention_type Character value of attention type
#' @param vars_3d use 3-dimensional variables for input/output transformations
#' @export
multihead_attention <- function(query,
memory = NULL,
bias = NULL,
key_depth = 64L,
value_depth = 64L,
output_depth = 128L,
num_heads = 4L,
dropout = 0,
attention_type = "dot_product",
q_filter_width = 1L,
kv_filter_width = 1L,
q_padding = "same",
kv_padding = "same",
max_area_width = 1L,
max_area_height = 1L,
area_height = 1L,
area_key_mode = "mean",
area_value_mode = "sum",
vars_3d = FALSE) {
stopifnot(key_depth %% num_heads == 0, value_depth %% num_heads == 0)
vars_3d_num_heads <- if (vars_3d) num_heads else 0
c(q, k, v) %<-% compute_qkv(query,
memory,
key_depth,
value_depth,
q_filter_width,
kv_filter_width,
vars_3d_num_heads = vars_3d_num_heads)
q <- split_heads(q, num_heads)
k <- split_heads(k, num_heads)
v <- split_heads(v, num_heads)
key_depth_per_head <- key_depth %/% num_heads
if (!vars_3d)
q %<>% `*`(key_depth_per_head^(-0.5))
if (attention_type == "dot_product")
x <- dot_product_area_attention_1d(q, k, v, bias, dropout)
else if (attention_type == "dot_product_area")
x <- dot_product_area_attention_1d(
q, k, v, bias, dropout, max_area_width, max_area_height, area_height)
else
stop("No other attention types currently implemented...")
x <- combine_heads(x)
x <-
if (vars_3d)
tf$Variable(
tf$glorot_normal_initializer()(
shape = list(num_heads,
as.integer(value_depth %/% num_heads),
output_depth),
dtype = x$dtype
), trainable = TRUE,
name = "output_kernel_3d") %>%
tf$reshape(list(value_depth, output_depth)) %>%
{tf$tensordot(x, ., axes = 1L)}
else
layer_dense(x, output_depth, use_bias = FALSE, name = "output_transform")
x
}
#' Strided block local self-attention.
#'
#' The sequence is divided into blocks of length block_length.
#' Attention for agiven query position can see all memory positions
#' in the corresponding block and filter_width many positions to
#' the left and right of the block.
#' q Tensor [batch, heads, length, depth_k]
#' k Tensor [batch, heads, length, depth_k]
#' v Tensor [batch, heads, length, depth_v]
#' Returns Tensor [batch, heads, length, depth_v]
#' @export
local_attention_1d <-
function(q,
k,
v,
block_length = 128L,
filter_width = 100L,
name = NULL){
# Shape assertions go here
q_shape <- shape_list2(q)
c(batch, num_heads, original_length, original_depth) %<-% q_shape
pad_to_multiple <- function(x, pad_length) {
x_length <- shape_list2(x)[[3]]
tf$pad(x, list(c(0L, 0L),
c(0L, 0L),
c(0L, -x_length %% pad_length),
c(0L, 0L)))
}
pad_l_and_r <- function(x, pad_length) {
x_length <- shape_list2(x)[[3]]
tf$pad(x, list(c(0L, 0L),
c(0L, 0L),
c(pad_length, pad_length),
c(0L, 0L)))
}
# Set up query blocks.
# [batch, heads, blocks_q, block_length, depth_k]
q <- pad_to_multiple(q, block_length)
q <- reshape_by_blocks(q, shape_list2(q), block_length)
total_query_blocks <- shape_list2(q)[[3]]
blocks_per_filter_width <- as.integer(filter_width %/% block_length)
remaining <- filter_width %% block_length
k <- pad_to_multiple(k, block_length)
v <- pad_to_multiple(v, block_length)
k <- pad_l_and_r(k, filter_width + block_length - remaining)
v <- pad_l_and_r(v, filter_width + block_length - remaining)
k <- reshape_by_blocks(k, shape_list2(k), block_length)
v <- reshape_by_blocks(v, shape_list2(v), block_length)
total_kv_blocks <- shape_list2(k)[[3]]
if (remaining) {
left_partial_block_k <- tf$slice(
k, list(0L, 0L, 0L, block_length - remaining, 0L),
list(-1L, -1L, total_query_blocks, -1L, -1L)
)
left_partial_block_v <- tf$slice(
k, list(0L, 0L, 0L, block_length - remaining, 0L),
list(-1L, -1L, total_query_blocks, -1L, -1L)
)
right_partial_block_k = tf$slice(
k, list(0L, 0L, total_kv_blocks - total_query_blocks, 0L, 0L),
list(-1L, -1L, -1L, remaining, -1L)
)
right_partial_block_v = tf$slice(
k, list(0L, 0L, total_kv_blocks - total_query_blocks, 0L, 0L),
list(-1L, -1L, -1L, remaining, -1L)
)
slices <- list(c(left_partial_block_k, left_partial_block_v),
c(right_partial_block_k, right_partial_block_v))
}
# Prepare the rest of the blocks
first_block_index <- if (remaining) 1L else 0L
attention_blocks <- 2 * blocks_per_filter_width + 1L
n <- first_block_index:attention_blocks + first_block_index
blocks <- lapply(1:n, function(i) {
block_k <- tf$slice(k, list(0L, 0L, i, 0L, 0L),
list(-1L, -1L, total_query_blocks, -1L, -1L))
block_v <- tf$slice(k, list(0L, 0L, i, 0L, 0L),
list(-1L, -1L, total_query_blocks, -1L, -1L))
c(block_k, block_v)
})
slices <- append(slices, blocks)
k <- tf$concat(lapply(slices, function(b) b[[1]]), axis = 3L)
v <- tf$concat(lapply(slices, function(b) b[[2]]), axis = 3L)
attention_bias <- tf$expand_dims(embedding_to_padding(k) * -1e9, axis = -2L)
shape_v <- shape_list2(v)
depth_v <- shape_v[[length(shape_v)]]
output <-
dot_product_attention_1d(q, k, v, attention_bias, name = "local_1d") %>%
tf$reshape(list(batch, num_heads, original_length, depth_v))
# Remove the padding if introduced.
output <- tf$slice(output,
list(0L, 0L, 0L, 0L),
list(-1L, -1L, original_length, -1L))
output$set_shape(list(batch, num_heads, original_length, depth_v))
output
}
| /R/attention-functions.R | no_license | ifrit98/attention-layers | R | false | false | 8,341 | r |
# TODO: Make this an R6 layer?
#' Input query, key, and value matrices are used to compute dot product
#' attention. (Vaswani et al. 2017)
#' q: a Tensor with shape [batch, length_q, depth_k]
#' k: a Tensor with shape [batch, length_kv, depth_k]
#' v: a Tensor with shape [batch, length_kv, depth_v]
#' @export
dot_product_attention_1d <-
function(q,
k,
v,
bias = NULL,
dropout = 0,
name = "dot_product_attention") {
q_shape <- shape_list2(q)
scalar <-
tf$math$rsqrt(tf$cast(q_shape[[length(q_shape)]], tf$float32))
logits <- tf$matmul(q * scalar, k, transpose_b = TRUE)
if (!is.null(bias))
logits <- logits + bias
weights <- tf$nn$softmax(logits, name = "attention_weights")
x <- tf$matmul(weights, v)
x
}
# TODO: hard_attention_k: integer, if > 0 triggers hard attention (pick top-k)
# TODO: Add callable option to attention_type
#' Multihead attention mechanism
#'
#' With num_heads == 1, becomes simple dot product attention
#' @param query Tensor [batch, seqlen, depth_q]
#' @param memory Tensor [batch, seqlen, depth_m]
#' @param bias Bias tensor passed to attention function
#' @param key_depth Specify units for key component
#' @param value_depth Specify units for value component
#' @param output_depth Specify feature dim of final output
#' @param num_heads Specify number of heads to break input space up by
#' @param dropout Float value to add dropout to attention function
#' @param attention_type Character value of attention type
#' @param vars_3d use 3-dimensional variables for input/output transformations
#' @export
multihead_attention <- function(query,
memory = NULL,
bias = NULL,
key_depth = 64L,
value_depth = 64L,
output_depth = 128L,
num_heads = 4L,
dropout = 0,
attention_type = "dot_product",
q_filter_width = 1L,
kv_filter_width = 1L,
q_padding = "same",
kv_padding = "same",
max_area_width = 1L,
max_area_height = 1L,
area_height = 1L,
area_key_mode = "mean",
area_value_mode = "sum",
vars_3d = FALSE) {
stopifnot(key_depth %% num_heads == 0, value_depth %% num_heads == 0)
vars_3d_num_heads <- if (vars_3d) num_heads else 0
c(q, k, v) %<-% compute_qkv(query,
memory,
key_depth,
value_depth,
q_filter_width,
kv_filter_width,
vars_3d_num_heads = vars_3d_num_heads)
q <- split_heads(q, num_heads)
k <- split_heads(k, num_heads)
v <- split_heads(v, num_heads)
key_depth_per_head <- key_depth %/% num_heads
if (!vars_3d)
q %<>% `*`(key_depth_per_head^(-0.5))
if (attention_type == "dot_product")
x <- dot_product_area_attention_1d(q, k, v, bias, dropout)
else if (attention_type == "dot_product_area")
x <- dot_product_area_attention_1d(
q, k, v, bias, dropout, max_area_width, max_area_height, area_height)
else
stop("No other attention types currently implemented...")
x <- combine_heads(x)
x <-
if (vars_3d)
tf$Variable(
tf$glorot_normal_initializer()(
shape = list(num_heads,
as.integer(value_depth %/% num_heads),
output_depth),
dtype = x$dtype
), trainable = TRUE,
name = "output_kernel_3d") %>%
tf$reshape(list(value_depth, output_depth)) %>%
{tf$tensordot(x, ., axes = 1L)}
else
layer_dense(x, output_depth, use_bias = FALSE, name = "output_transform")
x
}
#' Strided block local self-attention.
#'
#' The sequence is divided into blocks of length block_length.
#' Attention for agiven query position can see all memory positions
#' in the corresponding block and filter_width many positions to
#' the left and right of the block.
#' q Tensor [batch, heads, length, depth_k]
#' k Tensor [batch, heads, length, depth_k]
#' v Tensor [batch, heads, length, depth_v]
#' Returns Tensor [batch, heads, length, depth_v]
#' @export
local_attention_1d <-
function(q,
k,
v,
block_length = 128L,
filter_width = 100L,
name = NULL){
# Shape assertions go here
q_shape <- shape_list2(q)
c(batch, num_heads, original_length, original_depth) %<-% q_shape
pad_to_multiple <- function(x, pad_length) {
x_length <- shape_list2(x)[[3]]
tf$pad(x, list(c(0L, 0L),
c(0L, 0L),
c(0L, -x_length %% pad_length),
c(0L, 0L)))
}
pad_l_and_r <- function(x, pad_length) {
x_length <- shape_list2(x)[[3]]
tf$pad(x, list(c(0L, 0L),
c(0L, 0L),
c(pad_length, pad_length),
c(0L, 0L)))
}
# Set up query blocks.
# [batch, heads, blocks_q, block_length, depth_k]
q <- pad_to_multiple(q, block_length)
q <- reshape_by_blocks(q, shape_list2(q), block_length)
total_query_blocks <- shape_list2(q)[[3]]
blocks_per_filter_width <- as.integer(filter_width %/% block_length)
remaining <- filter_width %% block_length
k <- pad_to_multiple(k, block_length)
v <- pad_to_multiple(v, block_length)
k <- pad_l_and_r(k, filter_width + block_length - remaining)
v <- pad_l_and_r(v, filter_width + block_length - remaining)
k <- reshape_by_blocks(k, shape_list2(k), block_length)
v <- reshape_by_blocks(v, shape_list2(v), block_length)
total_kv_blocks <- shape_list2(k)[[3]]
if (remaining) {
left_partial_block_k <- tf$slice(
k, list(0L, 0L, 0L, block_length - remaining, 0L),
list(-1L, -1L, total_query_blocks, -1L, -1L)
)
left_partial_block_v <- tf$slice(
k, list(0L, 0L, 0L, block_length - remaining, 0L),
list(-1L, -1L, total_query_blocks, -1L, -1L)
)
right_partial_block_k = tf$slice(
k, list(0L, 0L, total_kv_blocks - total_query_blocks, 0L, 0L),
list(-1L, -1L, -1L, remaining, -1L)
)
right_partial_block_v = tf$slice(
k, list(0L, 0L, total_kv_blocks - total_query_blocks, 0L, 0L),
list(-1L, -1L, -1L, remaining, -1L)
)
slices <- list(c(left_partial_block_k, left_partial_block_v),
c(right_partial_block_k, right_partial_block_v))
}
# Prepare the rest of the blocks
first_block_index <- if (remaining) 1L else 0L
attention_blocks <- 2 * blocks_per_filter_width + 1L
n <- first_block_index:attention_blocks + first_block_index
blocks <- lapply(1:n, function(i) {
block_k <- tf$slice(k, list(0L, 0L, i, 0L, 0L),
list(-1L, -1L, total_query_blocks, -1L, -1L))
block_v <- tf$slice(k, list(0L, 0L, i, 0L, 0L),
list(-1L, -1L, total_query_blocks, -1L, -1L))
c(block_k, block_v)
})
slices <- append(slices, blocks)
k <- tf$concat(lapply(slices, function(b) b[[1]]), axis = 3L)
v <- tf$concat(lapply(slices, function(b) b[[2]]), axis = 3L)
attention_bias <- tf$expand_dims(embedding_to_padding(k) * -1e9, axis = -2L)
shape_v <- shape_list2(v)
depth_v <- shape_v[[length(shape_v)]]
output <-
dot_product_attention_1d(q, k, v, attention_bias, name = "local_1d") %>%
tf$reshape(list(batch, num_heads, original_length, depth_v))
# Remove the padding if introduced.
output <- tf$slice(output,
list(0L, 0L, 0L, 0L),
list(-1L, -1L, original_length, -1L))
output$set_shape(list(batch, num_heads, original_length, depth_v))
output
}
|
#---------------------------------------------------
# Intro to R - Session 3
# Prepared by Patricia Kirkland
# Last updated: 8 June 2017
#---------------------------------------------------
rm(list=ls(all=TRUE))
library(dplyr)
## update with the file path to your working directory
setwd("/Users/patriciakirkland/Dropbox/Empiprical Reasoning Center/R Workshop")
### user-defined functions
addVectors <- function(a,b) {
out.vec <- a + b
return(out.vec)
}
a <- 1:10
b <- -1:-10
addVectors(a, b)
x <- 1:10
y <- -1:-10
addVectors(x, y)
z <- addVectors(a=x, b=y)
cluster_se <- function(dat,fm, cluster){
require(sandwich, quietly = TRUE)
require(lmtest, quietly = TRUE)
M <- length(unique(cluster))
N <- length(cluster)
K <- fm$rank
dfc <- (M/(M-1))*((N-1)/(N-K))
uj <- apply(estfun(fm),2, function(x) tapply(x, cluster, sum));
vcovCL <- dfc*sandwich(fm, meat=crossprod(uj)/N)
coeftest(fm, vcovCL) }
load("muni_finance_data_cleaned.RData")
fit_3 <- lm(Total.Expenditure.PC ~ Total.Taxes.PC + Population + Census.Region, data=COG.fips)
summary(fit_3)
library(lmtest)
library(sandwich)
# heteroskedasticity-robust standard errors
coeftest(fit_3, vcov=vcovHC(fit_3, type="HC1"))
robust_se <- function(regmodel){
require(sandwich, quietly = TRUE)
require(lmtest, quietly = TRUE)
coeftest(regmodel, vcov=vcovHC(regmodel, type="HC1"))
}
# robust SEs with our user-defined function
robust_se(fit_3)
# cluster-robust standard errors
cluster_se(COG.fips, fit_3, COG.fips$fipsid)
## clear the workspace
rm(list=ls(all=TRUE))
## update with the file path to your working directory
setwd("/Users/patriciakirkland/Dropbox/Empiprical Reasoning Center/R Workshop")
source("ERC R Workshop Source.R")
load("muni_finance_data_cleaned.RData")
fit_3 <- lm(Total.Expenditure.PC ~ Total.Taxes.PC + Population + Census.Region, data=COG.fips)
summary(fit_3)
# robust SEs with our user-defined function (from source file)
robust_se(fit_3)
# cluster-robust standard errors (from source file)
cluster_se(COG.fips, fit_3, COG.fips$fipsid)
#------------------------------------------------------
#
# Automating tasks--- an example
#
#------------------------------------------------------
rm(list=ls(all=TRUE))
library(dplyr)
## update with the file path to your working directory
setwd("/Users/patriciakirkland/Dropbox/Empiprical Reasoning Center/R Workshop")
#### build annual .csv files from COG text files
## update with the file path for your directory
directory <- "/Users/patriciakirkland/Dropbox/Census of Governments/_IndFin_1967-2007"
year <- 2000:2003
COG.muni <- data.frame()
for(j in year){
i <- substr(as.character(j), 3, 4)
COG.a <- read.csv(paste0(directory, "/", "IndFin", formatC(i, width = 2, flag = "0"), "a",
".txt"))
COG.b <- read.csv(paste0(directory, "/", "IndFin", formatC(i, width = 2, flag = "0"), "b",
".txt"))
COG.c <- read.csv(paste0(directory, "/", "IndFin", formatC(i, width = 2, flag = "0"), "c",
".txt"))
COGmerge <- left_join(COG.a, COG.b)
COGmerge <- left_join(COGmerge, COG.c)
# COG.muni.temp <- subset(COGmerge, Type.Code == 2)
COG.muni <- rbind(COG.muni, subset(COGmerge, Type.Code == 2))
}
#------------------------------------------------------
#
# Apply Functions
#
#------------------------------------------------------
load("muni_finance_data_cleaned.RData")
### tapply() -- takes a vector & returns a vector; performs function on subsets by grouping variable(s)
## group or categorical variables typically should be factors
COG.fips$Census.Region <- factor(COG.fips$Census.Region)
## check the frequency distribution (optional)
table(COG.fips$Census.Region)
## use tapply() to get group means (you can use other functions as well)
tapply(COG.fips$Total.Expenditure, COG.fips$Census.Region, mean, na.rm=TRUE)
### sapply() takes a list, a data frame, or a subset of a data frame,
### performs a function on each element, and returns a vector
## basic example
sapply(COG.fips[, 7:15], mean, na.rm=TRUE)
## create a data frame of summary statistics
## start by obtaining the statistics using sapply()
COG_means <- sapply(COG.fips[, 7:15], mean, na.rm=TRUE)
COG_medians <- sapply(COG.fips[, 7:15], median, na.rm=TRUE)
COG_stdevs <- sapply(COG.fips[, 7:15], sd, na.rm=TRUE)
## create a vector of variable names
COG_variable <- names(COG.fips[, 7:15])
## bind the vectors by columns
COG_summary <- cbind.data.frame(COG_variable, COG_means, COG_medians, COG_stdevs)
## remove the row names
row.names(COG_summary) <- NULL
COG_summary
#### lapply() -- takes a list or data frame; returns a list
## EXAMPLE: run the same regression on multiple DVs
## specify a list of variables
varlist <- c("Total.Expenditure.PC", "Total.Taxes.PC", "Total.Revenue.PC")
## run the same regression on multiple DVs
COG_models <- lapply(varlist, function(x) {
lm(substitute(COG.fips$i ~ COG.fips$Population + COG.fips$Census.Region, list(i = x)))
})
## to perform the summary() function
COG_results <- lapply(COG_models, summary)
## The functions above can be combined into a single lapply() functions
COG_models_results <- lapply(varlist, function(x) {
summary(lm(substitute(COG.fips$i ~ COG.fips$Population + COG.fips$Census.Region, list(i = x))))
})
COG_models_results
## extract the first element
COG_models_results[[1]]
## Add names to the elements in a list
names(COG_models_results) <- varlist
COG_models_results
## you can extract them using the names you assigned
COG_models_results[["Total.Taxes.PC"]]
## extract more detailed information
COG_models_results[["Total.Taxes.PC"]]$coefficients
COG_models_results[["Total.Taxes.PC"]]$coefficients[, 1:2]
#------------------------------------------------------
#
# ANOVA Models --- several examples
#
#------------------------------------------------------
#### One-way ANOVA
## Adapted from https://personality-project.org/r/r.guide.html\#anova
## load data
datafilename <- "http://personality-project.org/r/datasets/R.appendix1.data"
data.ex1 <- read.table(datafilename,header=T)
## check the class of the variables -- the IV should be a factor (group or categorical) variable
class(data.ex1$Dosage)
class(data.ex1$Alertness)
## ANOVA model
aov.ex1 <- aov(Alertness ~ Dosage, data = data.ex1)
summary(aov.ex1)
## obtain the means and the number of subjects per cell
model.tables(aov.ex1, "means")
## a basic boxplot -- you can add additional arguments to format the boxplot
## you could also do this in ggplot if you prefer
boxplot(Alertness ~ Dosage, data = data.ex1)
#### Two-way (between subjects) ANOVA
## Adapted from https://personality-project.org/r/r.guide.html\#anova
## load data
datafilename <- "http://personality-project.org/r/datasets/R.appendix2.data"
data.ex2 <- read.table(datafilename,header=T)
## view data
data.ex2
## ANOVA model
aov.ex2 <- aov(Alertness ~ Gender*Dosage, data = data.ex2)
summary(aov.ex2)
## obtain the means and the number of subjects per cell
model.tables(aov.ex2, "means")
## graphical summary using a boxplot
boxplot(Alertness ~ Dosage*Gender, data = data.ex2)
## another way to graph the means
with(data.ex2, interaction.plot(Dosage, Gender, Alertness))
#### One-way repeated measures ANOVA
## note that data must be in long format (one response variable in one column)
## Adapted from https://personality-project.org/r/r.guide.html\#anova
## load data
datafilename <- "http://personality-project.org/r/datasets/R.appendix3.data"
data.ex3 <- read.table(datafilename, header=T)
## view data -- note, data are in long format
data.ex3
## ANOVA model -- note the Error() term, indicates that the treatment (data.ex3$Valence)
## is nested within subjects (data.ex3Subject)
aov.ex3 <- aov(Recall ~ Valence + Error(Subject/Valence), data = data.ex3)
summary(aov.ex3)
## obtain the means and the number of subjects per cell
model.tables(aov.ex3, "means")
## boxplot (also could use ggplot)
boxplot(Recall ~ Valence, data = data.ex3)
#### Two-way repeated measures ANOVA
## Adapted from https://personality-project.org/r/r.guide.html\#anova
## load data
datafilename="http://personality-project.org/r/datasets/R.appendix4.data"
data.ex4=read.table(datafilename,header=T)
## ANOVA
aov.ex4=aov(Recall ~ (Task*Valence) + Error(Subject/(Task*Valence)), data = data.ex4)
summary(aov.ex4)
## obtain the means and the number of subjects/cell
model.tables(aov.ex4, "means")
## boxplot
boxplot(Recall ~ Task*Valence, data = data.ex4)
## interaction.plot -- another way to graph the interaction
with(data.ex4, interaction.plot(Valence, Task, Recall))
#### 4-way ANOVA: 2 repeated measures and two between-subjects
## Adapted from https://personality-project.org/r/r.guide.html\#anova
## load data
datafilename="http://personality-project.org/r/datasets/R.appendix5.data"
data.ex5=read.table(datafilename,header=T)
## ANOVA
aov.ex5 <- aov(Recall ~ (Task*Valence*Gender*Dosage) +
Error(Subject/(Task*Valence)) +
(Gender*Dosage),
data = data.ex5)
summary(aov.ex5)
## obtain the means and the number of subjects per cell
model.tables(aov.ex5, "means")
# graphical summary of means of the 36 cells
boxplot(Recall ~ Task*Valence*Gender*Dosage, data = data.ex5)
# graphical summary of means of 18 cells
boxplot(Recall ~ Task*Valence*Dosage, data = data.ex5)
#------------------------------------------------------
#
# Structural Equation Models --- an example
#
#------------------------------------------------------
## adapted from a presentation by Grace Charles, presented at Davis R Users' Group on May 15, 2015
## adapted from Jim Grace's SEM workshop and Lavaan tutorials
rm(list=ls())
### install libraries
library(lavaan)
library(semPlot)
library(qgraph)
## here is an example model using a built-in dataset-- if you have problems accessing it, you may
## need to load the datasets package
# built in dataset
data(PoliticalDemocracy)
# let's name the variables per the documentation
names(PoliticalDemocracy) <- c("free_press_1960", "free_oppo_1960", "fair_elect_1960", "leg_effective_1960",
"free_press_1965", "free_oppo_1965", "fair_elect_1965", "leg_effect_1965",
"GNP_pc_1960", "energy_pc_1960", "labor_force_pct_industry_1960")
## the measurement model equations are "latent" and represented by =~
## regressions are indicated by ~
## residual correlations (in this case because they represent different years of the same measurement) are represented by ~~
model <- '
# measurement model
industrialization_1960 =~ GNP_pc_1960 + energy_pc_1960 + labor_force_pct_industry_1960
democracy_1960 =~ free_press_1960 + free_oppo_1960 + fair_elect_1960 + leg_effective_1960
democracy_1965 =~ free_press_1965 + free_oppo_1965 + fair_elect_1965 + leg_effect_1965
# regressions
democracy_1960 ~ industrialization_1960
democracy_1965 ~ industrialization_1960 + democracy_1960
# residual correlations
free_press_1960 ~~ free_press_1965
free_oppo_1960 ~~ leg_effective_1960
free_oppo_1960 ~~ free_oppo_1965
fair_elect_1960 ~~ fair_elect_1965
leg_effective_1960 ~~ leg_effect_1965
free_oppo_1965 ~~ leg_effect_1965
'
#fit your SEM
fit <- sem(model, data = PoliticalDemocracy)
#summarize results
summary(fit, standardized = TRUE, rsq = T)
##plot results using semPaths function in qgraph
semPaths(fit, "std", edge.label.cex = 0.5, curvePivot = TRUE, layout = "tree")
### use dev.off() to clear your plots after you finish with the path diagram
# dev.off()
##check to see if you missed anything. High mi values suggest that there is a path that you missed.
modindices(fit)
## looks good
##can also look at variance tables
vartable(fit)
## sometimes you get warnings about the scale of your variables
#Warning message:
# In getDataFull(data = data, group = group, group.label = group.label, :
# lavaan WARNING: some observed variances are (at least) a factor 100 times larger than others; please rescale
# in this case, all you have to do to make this error go away is rescale variables
#model comparison
#you can compare alternative pathway models using AIC, BIC, etc:
#create second alternative model
model2 <- '
# measurement model
industrialization_1960 =~ GNP_pc_1960 + energy_pc_1960 + labor_force_pct_industry_1960
democracy_1960 =~ free_press_1960 + free_oppo_1960 + fair_elect_1960 + leg_effective_1960
democracy_1965 =~ free_press_1965 + free_oppo_1965 + fair_elect_1965 + leg_effect_1965
# regressions
# leave ind60 out of regression
democracy_1960 ~ industrialization_1960
democracy_1965 ~ democracy_1960
# residual correlations
free_press_1960 ~~ free_press_1965
free_oppo_1960 ~~ leg_effective_1960
free_oppo_1960 ~~ free_oppo_1965
fair_elect_1960 ~~ fair_elect_1965
leg_effective_1960 ~~ leg_effect_1965
free_oppo_1965 ~~ leg_effect_1965
'
fit2 <- sem(model2, data = PoliticalDemocracy)
summary(fit2)
AIC(fit, fit2)
| /R Workshop 3/ERC R Workshop - Session 3 (Summer 2017).R | no_license | barnarderc/R-workshops | R | false | false | 13,290 | r | #---------------------------------------------------
# Intro to R - Session 3
# Prepared by Patricia Kirkland
# Last updated: 8 June 2017
#---------------------------------------------------
rm(list=ls(all=TRUE))
library(dplyr)
## update with the file path to your working directory
setwd("/Users/patriciakirkland/Dropbox/Empiprical Reasoning Center/R Workshop")
### user-defined functions
addVectors <- function(a,b) {
out.vec <- a + b
return(out.vec)
}
a <- 1:10
b <- -1:-10
addVectors(a, b)
x <- 1:10
y <- -1:-10
addVectors(x, y)
z <- addVectors(a=x, b=y)
cluster_se <- function(dat,fm, cluster){
require(sandwich, quietly = TRUE)
require(lmtest, quietly = TRUE)
M <- length(unique(cluster))
N <- length(cluster)
K <- fm$rank
dfc <- (M/(M-1))*((N-1)/(N-K))
uj <- apply(estfun(fm),2, function(x) tapply(x, cluster, sum));
vcovCL <- dfc*sandwich(fm, meat=crossprod(uj)/N)
coeftest(fm, vcovCL) }
load("muni_finance_data_cleaned.RData")
fit_3 <- lm(Total.Expenditure.PC ~ Total.Taxes.PC + Population + Census.Region, data=COG.fips)
summary(fit_3)
library(lmtest)
library(sandwich)
# heteroskedasticity-robust standard errors
coeftest(fit_3, vcov=vcovHC(fit_3, type="HC1"))
robust_se <- function(regmodel){
require(sandwich, quietly = TRUE)
require(lmtest, quietly = TRUE)
coeftest(regmodel, vcov=vcovHC(regmodel, type="HC1"))
}
# robust SEs with our user-defined function
robust_se(fit_3)
# cluster-robust standard errors
cluster_se(COG.fips, fit_3, COG.fips$fipsid)
## clear the workspace
rm(list=ls(all=TRUE))
## update with the file path to your working directory
setwd("/Users/patriciakirkland/Dropbox/Empiprical Reasoning Center/R Workshop")
source("ERC R Workshop Source.R")
load("muni_finance_data_cleaned.RData")
fit_3 <- lm(Total.Expenditure.PC ~ Total.Taxes.PC + Population + Census.Region, data=COG.fips)
summary(fit_3)
# robust SEs with our user-defined function (from source file)
robust_se(fit_3)
# cluster-robust standard errors (from source file)
cluster_se(COG.fips, fit_3, COG.fips$fipsid)
#------------------------------------------------------
#
# Automating tasks--- an example
#
#------------------------------------------------------
rm(list=ls(all=TRUE))
library(dplyr)
## update with the file path to your working directory
setwd("/Users/patriciakirkland/Dropbox/Empiprical Reasoning Center/R Workshop")
#### build annual .csv files from COG text files
## update with the file path for your directory
directory <- "/Users/patriciakirkland/Dropbox/Census of Governments/_IndFin_1967-2007"
year <- 2000:2003
COG.muni <- data.frame()
for(j in year){
i <- substr(as.character(j), 3, 4)
COG.a <- read.csv(paste0(directory, "/", "IndFin", formatC(i, width = 2, flag = "0"), "a",
".txt"))
COG.b <- read.csv(paste0(directory, "/", "IndFin", formatC(i, width = 2, flag = "0"), "b",
".txt"))
COG.c <- read.csv(paste0(directory, "/", "IndFin", formatC(i, width = 2, flag = "0"), "c",
".txt"))
COGmerge <- left_join(COG.a, COG.b)
COGmerge <- left_join(COGmerge, COG.c)
# COG.muni.temp <- subset(COGmerge, Type.Code == 2)
COG.muni <- rbind(COG.muni, subset(COGmerge, Type.Code == 2))
}
#------------------------------------------------------
#
# Apply Functions
#
#------------------------------------------------------
load("muni_finance_data_cleaned.RData")
### tapply() -- takes a vector & returns a vector; performs function on subsets by grouping variable(s)
## group or categorical variables typically should be factors
COG.fips$Census.Region <- factor(COG.fips$Census.Region)
## check the frequency distribution (optional)
table(COG.fips$Census.Region)
## use tapply() to get group means (you can use other functions as well)
tapply(COG.fips$Total.Expenditure, COG.fips$Census.Region, mean, na.rm=TRUE)
### sapply() takes a list, a data frame, or a subset of a data frame,
### performs a function on each element, and returns a vector
## basic example
sapply(COG.fips[, 7:15], mean, na.rm=TRUE)
## create a data frame of summary statistics
## start by obtaining the statistics using sapply()
COG_means <- sapply(COG.fips[, 7:15], mean, na.rm=TRUE)
COG_medians <- sapply(COG.fips[, 7:15], median, na.rm=TRUE)
COG_stdevs <- sapply(COG.fips[, 7:15], sd, na.rm=TRUE)
## create a vector of variable names
COG_variable <- names(COG.fips[, 7:15])
## bind the vectors by columns
COG_summary <- cbind.data.frame(COG_variable, COG_means, COG_medians, COG_stdevs)
## remove the row names
row.names(COG_summary) <- NULL
COG_summary
#### lapply() -- takes a list or data frame; returns a list
## EXAMPLE: run the same regression on multiple DVs
## specify a list of variables
varlist <- c("Total.Expenditure.PC", "Total.Taxes.PC", "Total.Revenue.PC")
## run the same regression on multiple DVs
COG_models <- lapply(varlist, function(x) {
lm(substitute(COG.fips$i ~ COG.fips$Population + COG.fips$Census.Region, list(i = x)))
})
## to perform the summary() function
COG_results <- lapply(COG_models, summary)
## The functions above can be combined into a single lapply() functions
COG_models_results <- lapply(varlist, function(x) {
summary(lm(substitute(COG.fips$i ~ COG.fips$Population + COG.fips$Census.Region, list(i = x))))
})
COG_models_results
## extract the first element
COG_models_results[[1]]
## Add names to the elements in a list
names(COG_models_results) <- varlist
COG_models_results
## you can extract them using the names you assigned
COG_models_results[["Total.Taxes.PC"]]
## extract more detailed information
COG_models_results[["Total.Taxes.PC"]]$coefficients
COG_models_results[["Total.Taxes.PC"]]$coefficients[, 1:2]
#------------------------------------------------------
#
# ANOVA Models --- several examples
#
#------------------------------------------------------
#### One-way ANOVA
## Adapted from https://personality-project.org/r/r.guide.html\#anova
## load data
datafilename <- "http://personality-project.org/r/datasets/R.appendix1.data"
data.ex1 <- read.table(datafilename,header=T)
## check the class of the variables -- the IV should be a factor (group or categorical) variable
class(data.ex1$Dosage)
class(data.ex1$Alertness)
## ANOVA model
aov.ex1 <- aov(Alertness ~ Dosage, data = data.ex1)
summary(aov.ex1)
## obtain the means and the number of subjects per cell
model.tables(aov.ex1, "means")
## a basic boxplot -- you can add additional arguments to format the boxplot
## you could also do this in ggplot if you prefer
boxplot(Alertness ~ Dosage, data = data.ex1)
#### Two-way (between subjects) ANOVA
## Adapted from https://personality-project.org/r/r.guide.html\#anova
## load data
datafilename <- "http://personality-project.org/r/datasets/R.appendix2.data"
data.ex2 <- read.table(datafilename,header=T)
## view data
data.ex2
## ANOVA model
aov.ex2 <- aov(Alertness ~ Gender*Dosage, data = data.ex2)
summary(aov.ex2)
## obtain the means and the number of subjects per cell
model.tables(aov.ex2, "means")
## graphical summary using a boxplot
boxplot(Alertness ~ Dosage*Gender, data = data.ex2)
## another way to graph the means
with(data.ex2, interaction.plot(Dosage, Gender, Alertness))
#### One-way repeated measures ANOVA
## note that data must be in long format (one response variable in one column)
## Adapted from https://personality-project.org/r/r.guide.html\#anova
## load data
datafilename <- "http://personality-project.org/r/datasets/R.appendix3.data"
data.ex3 <- read.table(datafilename, header=T)
## view data -- note, data are in long format
data.ex3
## ANOVA model -- note the Error() term, indicates that the treatment (data.ex3$Valence)
## is nested within subjects (data.ex3Subject)
aov.ex3 <- aov(Recall ~ Valence + Error(Subject/Valence), data = data.ex3)
summary(aov.ex3)
## obtain the means and the number of subjects per cell
model.tables(aov.ex3, "means")
## boxplot (also could use ggplot)
boxplot(Recall ~ Valence, data = data.ex3)
#### Two-way repeated measures ANOVA
## Adapted from https://personality-project.org/r/r.guide.html\#anova
## load data
datafilename="http://personality-project.org/r/datasets/R.appendix4.data"
data.ex4=read.table(datafilename,header=T)
## ANOVA
aov.ex4=aov(Recall ~ (Task*Valence) + Error(Subject/(Task*Valence)), data = data.ex4)
summary(aov.ex4)
## obtain the means and the number of subjects/cell
model.tables(aov.ex4, "means")
## boxplot
boxplot(Recall ~ Task*Valence, data = data.ex4)
## interaction.plot -- another way to graph the interaction
with(data.ex4, interaction.plot(Valence, Task, Recall))
#### 4-way ANOVA: 2 repeated measures and two between-subjects
## Adapted from https://personality-project.org/r/r.guide.html\#anova
## load data
datafilename="http://personality-project.org/r/datasets/R.appendix5.data"
data.ex5=read.table(datafilename,header=T)
## ANOVA
aov.ex5 <- aov(Recall ~ (Task*Valence*Gender*Dosage) +
Error(Subject/(Task*Valence)) +
(Gender*Dosage),
data = data.ex5)
summary(aov.ex5)
## obtain the means and the number of subjects per cell
model.tables(aov.ex5, "means")
# graphical summary of means of the 36 cells
boxplot(Recall ~ Task*Valence*Gender*Dosage, data = data.ex5)
# graphical summary of means of 18 cells
boxplot(Recall ~ Task*Valence*Dosage, data = data.ex5)
#------------------------------------------------------
#
# Structural Equation Models --- an example
#
#------------------------------------------------------
## adapted from a presentation by Grace Charles, presented at Davis R Users' Group on May 15, 2015
## adapted from Jim Grace's SEM workshop and Lavaan tutorials
rm(list=ls())
### install libraries
library(lavaan)
library(semPlot)
library(qgraph)
## here is an example model using a built-in dataset-- if you have problems accessing it, you may
## need to load the datasets package
# built in dataset
data(PoliticalDemocracy)
# let's name the variables per the documentation
names(PoliticalDemocracy) <- c("free_press_1960", "free_oppo_1960", "fair_elect_1960", "leg_effective_1960",
"free_press_1965", "free_oppo_1965", "fair_elect_1965", "leg_effect_1965",
"GNP_pc_1960", "energy_pc_1960", "labor_force_pct_industry_1960")
## the measurement model equations are "latent" and represented by =~
## regressions are indicated by ~
## residual correlations (in this case because they represent different years of the same measurement) are represented by ~~
model <- '
# measurement model
industrialization_1960 =~ GNP_pc_1960 + energy_pc_1960 + labor_force_pct_industry_1960
democracy_1960 =~ free_press_1960 + free_oppo_1960 + fair_elect_1960 + leg_effective_1960
democracy_1965 =~ free_press_1965 + free_oppo_1965 + fair_elect_1965 + leg_effect_1965
# regressions
democracy_1960 ~ industrialization_1960
democracy_1965 ~ industrialization_1960 + democracy_1960
# residual correlations
free_press_1960 ~~ free_press_1965
free_oppo_1960 ~~ leg_effective_1960
free_oppo_1960 ~~ free_oppo_1965
fair_elect_1960 ~~ fair_elect_1965
leg_effective_1960 ~~ leg_effect_1965
free_oppo_1965 ~~ leg_effect_1965
'
#fit your SEM
fit <- sem(model, data = PoliticalDemocracy)
#summarize results
summary(fit, standardized = TRUE, rsq = T)
##plot results using semPaths function in qgraph
semPaths(fit, "std", edge.label.cex = 0.5, curvePivot = TRUE, layout = "tree")
### use dev.off() to clear your plots after you finish with the path diagram
# dev.off()
##check to see if you missed anything. High mi values suggest that there is a path that you missed.
modindices(fit)
## looks good
##can also look at variance tables
vartable(fit)
## sometimes you get warnings about the scale of your variables
#Warning message:
# In getDataFull(data = data, group = group, group.label = group.label, :
# lavaan WARNING: some observed variances are (at least) a factor 100 times larger than others; please rescale
# in this case, all you have to do to make this error go away is rescale variables
#model comparison
#you can compare alternative pathway models using AIC, BIC, etc:
#create second alternative model
model2 <- '
# measurement model
industrialization_1960 =~ GNP_pc_1960 + energy_pc_1960 + labor_force_pct_industry_1960
democracy_1960 =~ free_press_1960 + free_oppo_1960 + fair_elect_1960 + leg_effective_1960
democracy_1965 =~ free_press_1965 + free_oppo_1965 + fair_elect_1965 + leg_effect_1965
# regressions
# leave ind60 out of regression
democracy_1960 ~ industrialization_1960
democracy_1965 ~ democracy_1960
# residual correlations
free_press_1960 ~~ free_press_1965
free_oppo_1960 ~~ leg_effective_1960
free_oppo_1960 ~~ free_oppo_1965
fair_elect_1960 ~~ fair_elect_1965
leg_effective_1960 ~~ leg_effect_1965
free_oppo_1965 ~~ leg_effect_1965
'
fit2 <- sem(model2, data = PoliticalDemocracy)
summary(fit2)
AIC(fit, fit2)
|
#this is to convert the Hijri dates in the database to consistent Christian dates
#the purpose is to display these Christian dates in graphs
#let's import the data
Nishapur.Augment <- read.csv("C:/Users/Conor/Dropbox/TCNJ/MUSE/Nishapur Analysis/Nishapur Augment.csv")
Hegira.to.Gregorian.Dates <- read.csv("C:/Users/Conor/Dropbox/TCNJ/MUSE/Hegira to Gregorian Dates.csv")
#now, let's make the matrix and data frame to store the data
tidy.matrix <- matrix(ncol=3, nrow=1207)
tidy.data <- data.frame(tidy.matrix)
#create dummy variables for use in the for loop
x <- 1
#now, let's make our for loop and if statements
for(n in 1:1207){
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- 1
}
#with that procedure done, let's export the data
write.csv(tidy.data, file = "Nishapur_Hijri_to_Christian.csv", row.names = FALSE) | /Data Wrangling.R | no_license | conorreid/Nishapur | R | false | false | 91,210 | r | #this is to convert the Hijri dates in the database to consistent Christian dates
#the purpose is to display these Christian dates in graphs
#let's import the data
Nishapur.Augment <- read.csv("C:/Users/Conor/Dropbox/TCNJ/MUSE/Nishapur Analysis/Nishapur Augment.csv")
Hegira.to.Gregorian.Dates <- read.csv("C:/Users/Conor/Dropbox/TCNJ/MUSE/Hegira to Gregorian Dates.csv")
#now, let's make the matrix and data frame to store the data
tidy.matrix <- matrix(ncol=3, nrow=1207)
tidy.data <- data.frame(tidy.matrix)
#create dummy variables for use in the for loop
x <- 1
#now, let's make our for loop and if statements
for(n in 1:1207){
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- x + 1
if(Nishapur.Augment$Hegira.Date[n]==x){
tidy.data$X1[n] <- x
tidy.data$X2[n] <- Hegira.to.Gregorian.Dates$A.D..1[x]
tidy.data$X3[n] <- Hegira.to.Gregorian.Dates$A.D.2[x]
}
x <- 1
}
#with that procedure done, let's export the data
write.csv(tidy.data, file = "Nishapur_Hijri_to_Christian.csv", row.names = FALSE) |
# Project: Text mining VitaePro reviews
# Author: Peer Christensen
# Date: December, 2018
# Task: Word co-occurrence
########## CONTENTS #######################################
# 1. Prepare data
# 2. Plot
library(tidyverse)
library(tidytext)
library(gridExtra)
library(widyr)
library(ggraph)
library(igraph)
library(tm)
########## 1. Prepare data ################################
df <- read_csv("vitaepro_dataCombined.csv")
my_stopwords <- c("så","vitaepro","pro","vita","1","d","2","3","venlig","danmark","vitae","vitapro", "vita", "kan",
tm::stopwords("danish"))
df %<>%
unnest_tokens(word, review) %>%
mutate(word = removeWords(word,my_stopwords)) %>%
add_count(word) %>%
filter(n > 1,word != "")
word_pairs_pos <- df %>%
filter(rating >= 4) %>%
pairwise_count(word, id, sort = TRUE)
word_pairs_neg <- df %>%
filter(rating < 4) %>%
pairwise_count(word, id, sort = TRUE)
########## 1. Plot ########################################
set.seed(611)
pairs_plot_pos <- word_pairs_pos %>%
filter(n > 5) %>%
graph_from_data_frame() %>%
ggraph(layout = "fr") +
geom_edge_link(aes(edge_alpha = n, edge_width = n), edge_colour = "#00B67A",show.legend=F) +
geom_node_point(size = 4) +
geom_node_text(aes(label = name), repel = TRUE,
point.padding = unit(0.2, "lines")) +
theme_void()
pairs_plot_neg <- word_pairs_neg %>%
filter(n >= 4) %>%
graph_from_data_frame() %>%
ggraph(layout = "fr") +
geom_edge_link(aes(edge_alpha = n, edge_width = n), edge_colour = "#FF3722",show.legend=F) +
geom_node_point(size = 4) +
geom_node_text(aes(label = name), repel = TRUE,
point.padding = unit(0.2, "lines")) +
theme_void()
grid.arrange(pairs_plot_pos, pairs_plot_neg, ncol = 2)
| /word_cooccurrence.R | no_license | PeerChristensen/Customer_review_mining | R | false | false | 1,842 | r | # Project: Text mining VitaePro reviews
# Author: Peer Christensen
# Date: December, 2018
# Task: Word co-occurrence
########## CONTENTS #######################################
# 1. Prepare data
# 2. Plot
library(tidyverse)
library(tidytext)
library(gridExtra)
library(widyr)
library(ggraph)
library(igraph)
library(tm)
########## 1. Prepare data ################################
df <- read_csv("vitaepro_dataCombined.csv")
my_stopwords <- c("så","vitaepro","pro","vita","1","d","2","3","venlig","danmark","vitae","vitapro", "vita", "kan",
tm::stopwords("danish"))
df %<>%
unnest_tokens(word, review) %>%
mutate(word = removeWords(word,my_stopwords)) %>%
add_count(word) %>%
filter(n > 1,word != "")
word_pairs_pos <- df %>%
filter(rating >= 4) %>%
pairwise_count(word, id, sort = TRUE)
word_pairs_neg <- df %>%
filter(rating < 4) %>%
pairwise_count(word, id, sort = TRUE)
########## 1. Plot ########################################
set.seed(611)
pairs_plot_pos <- word_pairs_pos %>%
filter(n > 5) %>%
graph_from_data_frame() %>%
ggraph(layout = "fr") +
geom_edge_link(aes(edge_alpha = n, edge_width = n), edge_colour = "#00B67A",show.legend=F) +
geom_node_point(size = 4) +
geom_node_text(aes(label = name), repel = TRUE,
point.padding = unit(0.2, "lines")) +
theme_void()
pairs_plot_neg <- word_pairs_neg %>%
filter(n >= 4) %>%
graph_from_data_frame() %>%
ggraph(layout = "fr") +
geom_edge_link(aes(edge_alpha = n, edge_width = n), edge_colour = "#FF3722",show.legend=F) +
geom_node_point(size = 4) +
geom_node_text(aes(label = name), repel = TRUE,
point.padding = unit(0.2, "lines")) +
theme_void()
grid.arrange(pairs_plot_pos, pairs_plot_neg, ncol = 2)
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "qsar-biodeg")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "Class")
lrn = makeLearner("classif.xgboost", par.vals = list(booster = "dart", normalize_type = "tree", sample_type = "uniform"), predict.type = "prob")
#:# hash
#:# 0074b139f423fa5c4adc7962f633b1e8
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
| /models/openml_qsar-biodeg/classification_Class/0074b139f423fa5c4adc7962f633b1e8/code.R | no_license | pysiakk/CaseStudies2019S | R | false | false | 752 | r | #:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "qsar-biodeg")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "Class")
lrn = makeLearner("classif.xgboost", par.vals = list(booster = "dart", normalize_type = "tree", sample_type = "uniform"), predict.type = "prob")
#:# hash
#:# 0074b139f423fa5c4adc7962f633b1e8
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\name{get_demeter_file}
\alias{get_demeter_file}
\title{get_demeter_file}
\usage{
get_demeter_file(irrigation_file_path)
}
\arguments{
\item{irrigation_file_path}{FUll path to the demeter data file}
}
\description{
get_demeter_file
}
\details{
Load demeter file for irrigation and rainfed crop data
}
\author{
Kristian Nelson (kristian.nelson@pnnl.gov)
}
| /man/get_demeter_file.Rd | permissive | jsta/gamut | R | false | true | 438 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\name{get_demeter_file}
\alias{get_demeter_file}
\title{get_demeter_file}
\usage{
get_demeter_file(irrigation_file_path)
}
\arguments{
\item{irrigation_file_path}{FUll path to the demeter data file}
}
\description{
get_demeter_file
}
\details{
Load demeter file for irrigation and rainfed crop data
}
\author{
Kristian Nelson (kristian.nelson@pnnl.gov)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codecommit_operations.R
\name{codecommit_update_repository_name}
\alias{codecommit_update_repository_name}
\title{Renames a repository}
\usage{
codecommit_update_repository_name(oldName, newName)
}
\arguments{
\item{oldName}{[required] The current name of the repository.}
\item{newName}{[required] The new name for the repository.}
}
\description{
Renames a repository. The repository name must be unique across the calling AWS account. Repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. The suffix .git is prohibited. For more information about the limits on repository names, see \href{https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html}{Limits} in the AWS CodeCommit User Guide.
See \url{https://www.paws-r-sdk.com/docs/codecommit_update_repository_name/} for full documentation.
}
\keyword{internal}
| /cran/paws.developer.tools/man/codecommit_update_repository_name.Rd | permissive | paws-r/paws | R | false | true | 973 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codecommit_operations.R
\name{codecommit_update_repository_name}
\alias{codecommit_update_repository_name}
\title{Renames a repository}
\usage{
codecommit_update_repository_name(oldName, newName)
}
\arguments{
\item{oldName}{[required] The current name of the repository.}
\item{newName}{[required] The new name for the repository.}
}
\description{
Renames a repository. The repository name must be unique across the calling AWS account. Repository names are limited to 100 alphanumeric, dash, and underscore characters, and cannot include certain characters. The suffix .git is prohibited. For more information about the limits on repository names, see \href{https://docs.aws.amazon.com/codecommit/latest/userguide/limits.html}{Limits} in the AWS CodeCommit User Guide.
See \url{https://www.paws-r-sdk.com/docs/codecommit_update_repository_name/} for full documentation.
}
\keyword{internal}
|
iamb.tobitbs <- function(target, dataset, threshold = 0.05, wei = NULL, heavy = FALSE) {
threshold <- log(threshold)
dm <- dim(dataset)
n <- dm[1] ## sample size
p <- dm[2] ## number of variables
if ( p > n ) {
res <- paste("The number of variables is hiher than the sample size. No backward procedure was attempted")
} else {
a1 <- internaliamb.tobitbs( target = target, dataset = dataset, threshold = threshold, wei = wei, p = p, heavy = heavy )
ind <- 1:p
a2 <- list()
poies <- a1$mat[, 1]
if ( length(poies) > 0 ) {
ind[-poies] <- 0
ind <- ind[ind > 0]
dat <- dataset[, poies, drop = FALSE ]
a2 <- internaliamb.tobitbs( target = target, dataset = dat, threshold = threshold, wei = wei, p = length(ind), heavy = heavy )
poies <- a2$mat[, 1]
ind[-poies] <- 0
ind <- ind[ind > 0]
dat <- dat[, poies, drop = FALSE]
i <- 2
} else {
ind <- NULL
a2$mat <- NULL
}
while ( length(a1$mat[, 1]) - length(a2$mat[, 1]) != 0 ) {
i <- i + 1
a1 <- a2
a2 <- internaliamb.tobitbs( target = target, dataset = dat, threshold = threshold, wei = wei, p = length(ind), heavy = heavy )
poies <- a2$mat[, 1]
if ( length(poies) > 0 ) {
ind[-poies] <- 0
ind <- ind[ind > 0]
dat <- dat[, poies, drop = FALSE]
} else {
ind <- NULL
dat <- NULL
}
}
res <- list(info = ind, mat = a2$mat, ci_test = "testIndTobit", final = a2$final )
}
res
}
| /R/iamb.tobitbs.R | no_license | JokerWhy233/MXM | R | false | false | 1,614 | r | iamb.tobitbs <- function(target, dataset, threshold = 0.05, wei = NULL, heavy = FALSE) {
threshold <- log(threshold)
dm <- dim(dataset)
n <- dm[1] ## sample size
p <- dm[2] ## number of variables
if ( p > n ) {
res <- paste("The number of variables is hiher than the sample size. No backward procedure was attempted")
} else {
a1 <- internaliamb.tobitbs( target = target, dataset = dataset, threshold = threshold, wei = wei, p = p, heavy = heavy )
ind <- 1:p
a2 <- list()
poies <- a1$mat[, 1]
if ( length(poies) > 0 ) {
ind[-poies] <- 0
ind <- ind[ind > 0]
dat <- dataset[, poies, drop = FALSE ]
a2 <- internaliamb.tobitbs( target = target, dataset = dat, threshold = threshold, wei = wei, p = length(ind), heavy = heavy )
poies <- a2$mat[, 1]
ind[-poies] <- 0
ind <- ind[ind > 0]
dat <- dat[, poies, drop = FALSE]
i <- 2
} else {
ind <- NULL
a2$mat <- NULL
}
while ( length(a1$mat[, 1]) - length(a2$mat[, 1]) != 0 ) {
i <- i + 1
a1 <- a2
a2 <- internaliamb.tobitbs( target = target, dataset = dat, threshold = threshold, wei = wei, p = length(ind), heavy = heavy )
poies <- a2$mat[, 1]
if ( length(poies) > 0 ) {
ind[-poies] <- 0
ind <- ind[ind > 0]
dat <- dat[, poies, drop = FALSE]
} else {
ind <- NULL
dat <- NULL
}
}
res <- list(info = ind, mat = a2$mat, ci_test = "testIndTobit", final = a2$final )
}
res
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{spike_read_counts}
\alias{spike_read_counts}
\title{spike-in counts, as a long data.frame}
\format{
A data.frame object with
\describe{
\item{frag_grp}{the encoded spike contig name: basepairs_CpGs_GCpercent}
\item{id}{subject from whom cfMeDIP spike reads (column 3) were counted}
\item{read_count}{read coverage for this spike in this subject (column 2)}
}
}
\usage{
spike_read_counts
}
\description{
A data.frame with spike-in results from control samples in the manuscript.
This maps 1:1 onto \code{dedup} using reshape2::melt.
}
\keyword{datasets}
| /man/spike_read_counts.Rd | no_license | alexpcheng/spiky | R | false | true | 659 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{spike_read_counts}
\alias{spike_read_counts}
\title{spike-in counts, as a long data.frame}
\format{
A data.frame object with
\describe{
\item{frag_grp}{the encoded spike contig name: basepairs_CpGs_GCpercent}
\item{id}{subject from whom cfMeDIP spike reads (column 3) were counted}
\item{read_count}{read coverage for this spike in this subject (column 2)}
}
}
\usage{
spike_read_counts
}
\description{
A data.frame with spike-in results from control samples in the manuscript.
This maps 1:1 onto \code{dedup} using reshape2::melt.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jd3_x11.R
\name{henderson}
\alias{henderson}
\title{Title}
\usage{
henderson(y, length, musgrave = TRUE, ic = 4.5)
}
\arguments{
\item{y}{}
\item{length}{}
\item{musgrave}{}
\item{ic}{}
}
\value{
}
\description{
Title
}
| /man/henderson.Rd | no_license | palatej/rjdhighfreq | R | false | true | 302 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jd3_x11.R
\name{henderson}
\alias{henderson}
\title{Title}
\usage{
henderson(y, length, musgrave = TRUE, ic = 4.5)
}
\arguments{
\item{y}{}
\item{length}{}
\item{musgrave}{}
\item{ic}{}
}
\value{
}
\description{
Title
}
|
# Install packages if we need it
if (!require("corrplot")) install.packages("corrplot")
if (!require("ggplot2")) install.packages("ggplot2")
if (!require("caret")) install.packages("caret")
if (!require("e1071")) install.packages("e1071")
if (!require("car")) install.packages("car")
if (!require("stats")) install.packages("stats")
if (!require("gridExtra")) install.packages("gridExtra")
if (!require("ROCR")) install.packages("ROCR")
if (!require("fBasics")) install.packages("fBasics")
if (!require("randomForest")) install.packages("randomForest")
# Load librarys
library(caret)
library(ggplot2)
library(corrplot)
library(e1071)
library(car)
library(stats)
library(gridExtra)
library(ROCR)
library(fBasics)
library(randomForest)
# Read database
dataset <- read.csv("HR_comma_sep.csv")
# look at data
sapply(dataset, function(x) length(unique(x)))
#how many unique elements we have. What variable are or can be seen as factors
str(dataset)
summary(dataset)
#satisfaction_level last_evaluation number_project average_montly_hours time_spend_company Work_accident
#92 65 6 215 8 2
#left promotion_last_5years sales salary
#2 2 10 3
# 'data.frame': 14999 obs. of 10 variables:
# $ satisfaction_level : num 0.38 0.8 0.11 0.72 0.37 0.41 0.1 0.92 0.89 0.42 ...
# $ last_evaluation : num 0.53 0.86 0.88 0.87 0.52 0.5 0.77 0.85 1 0.53 ...
# $ number_project : int 2 5 7 5 2 2 6 5 5 2 ...
# $ average_montly_hours : int 157 262 272 223 159 153 247 259 224 142 ...
# $ time_spend_company : int 3 6 4 5 3 3 4 5 5 3 ...
# $ Work_accident : int 0 0 0 0 0 0 0 0 0 0 ...
# $ left : int 1 1 1 1 1 1 1 1 1 1 ...
# $ promotion_last_5years: int 0 0 0 0 0 0 0 0 0 0 ...
# $ sales : Factor w/ 10 levels "accounting","hr",..: 8 8 8 8 8 8 8 8 8 8 ...
# $ salary : Factor w/ 3 levels "high","low","medium": 2 3 3 2 2 2 2 2 2 2 ...
#Change some variables type at factors
dataset$left <- as.factor(dataset$left)
dataset$promotion_last_5years<- as.factor(dataset$promotion_last_5years)
dataset$Work_accident <- as.factor(dataset$Work_accident)
dataset$salary <- ordered(dataset$salary, c("low","medium" ,"high"))
# look at some graphs: historams
par(mfrow=c(2,3))
hist(dataset$last_evaluation, col="lightyellow", freq = FALSE, main = paste('Last Evaluation'), xlab = "x")
hist(dataset$satisfaction_level, col="lightyellow", breaks = 10, freq = FALSE,main = paste('Level of Satisfaction'), xlab = "x")
hist(dataset$average_montly_hours, col="lightyellow", freq = FALSE, main = paste('Monthly hours'), xlab = "x")
hist(dataset$number_project, col="lightyellow",breaks = 5, freq = FALSE, main = paste('Number of Projects'), xlab = "x")
hist(dataset$time_spend_company, col="lightyellow", freq = FALSE, main = paste('Time at company'), xlab = "x")
#density for different factors
g1<-ggplot(dataset, aes(x = last_evaluation, colour = factor(left))) +
geom_density() + ggtitle("Last Evaluation")
g2<-ggplot(dataset, aes(x = last_evaluation, colour = factor(salary))) +
geom_density() + ggtitle(" ")
grid.arrange(g1,g2, nrow=2, ncol=1)
g1<-ggplot(dataset, aes(x = satisfaction_level, colour = factor(left))) +
geom_density() + ggtitle("Level of Satisfaction")
g2<-ggplot(dataset, aes(x = satisfaction_level, colour = factor(salary))) +
geom_density() + ggtitle(" ")
grid.arrange(g1,g2, nrow=2, ncol=1)
g1<-ggplot(dataset, aes(x = average_montly_hours, colour = factor(left))) +
geom_density() + ggtitle("Monthly hours")
g2<-ggplot(dataset, aes(x = average_montly_hours, colour = factor(salary))) +
geom_density() + ggtitle(" ")
grid.arrange(g1,g2, nrow=2, ncol=1)
#The graphs of density for variables differ for different left factors, but have almost the same type for different salary factors.
#From graphs none of these variables are normal. We can also check null hypothesis that the population is normally distributed
#with such tests as ad.test(), shapiro.test(),lillie.test() or others
set.seed(123)
split = createDataPartition(y=dataset$left, p=0.33, list=FALSE)
smallsample <- dataset[split, ]
print(shapiroTest(smallsample$last_evaluation)) # works for size of sample <=5000
print(ksnormTest(unique(dataset$last_evaluation)))# works only for unique elements
print(adTest(dataset$last_evaluation))
print(lillieTest(dataset$last_evaluation))
# Shapiro - Wilk Normality Test
# Test Results:
# STATISTIC:
# W: 0.9507
# P VALUE:
# < 2.2e-16
# One-sample Kolmogorov-Smirnov test
# Test Results:
# STATISTIC:
# D: 0.6406
# P VALUE:
# Alternative Two-Sided: < 2.2e-16
# Alternative Less: < 2.2e-16
# Alternative Greater: 0.03385
#
# Anderson - Darling Normality Test
# Test Results:
# STATISTIC:
# A: 221.1229
# P VALUE:
# < 2.2e-16
#
# Lilliefors (KS) Normality Test
# Test Results:
# STATISTIC:
# D: 0.0875
# P VALUE:
# < 2.2e-16
# For other variables we have the same result: null hypotesis is rejected
# analysis of dependencies that affect leaves.
g1<-ggplot(dataset, aes(x =average_montly_hours, y = time_spend_company))+
geom_point(color = as.numeric(dataset$left))+
geom_density2d()+
labs(title="The probability destribution of leaving \n (red points show who left)", x = "Avrenge hours per month", y = "Years in the company")
g2<-ggplot(dataset, aes(x =last_evaluation, y = satisfaction_level))+
geom_point(color = as.numeric(dataset$left))+
geom_density2d()+
labs(x="The level of the last evaluation", y = "The level of employee satisfaction",
title = "The probability destribution of leaving")
grid.arrange(g1,g2, nrow=2, ncol=1)
# check correlation for numeric variables
par(mfrow=c(1,1))
num.cols <- sapply(dataset,is.numeric)
cor.data <- cor(dataset[,num.cols])
# visualisation of corrolation with corrlot
corrplot(cor.data, method = "pie")
# we can see that pairs (last_evaluation, number_project), (last_evaluation, average_montly_hours)
#(average_montly_hours, number_project) have the biggest correlation, these coefficients really differ from 0:
cor.test(dataset$last_evaluation,dataset$number_project)
cor.test(dataset$last_evaluation,dataset$average_montly_hours)
cor.test(dataset$number_project,dataset$average_montly_hours)
# we remember that coefficient tells us only about linear dependence. And between these variables there are not linear dependence.
# but could be nonlinear
#Predicshion who are going to leave
#Split data training : testing like 3:1
set.seed(123)
split = createDataPartition(y=dataset$left, p=0.75, list=FALSE)
training <- dataset[split, ]
testing <- dataset[-split,]
#MODEL 1 NAIVE BAYES
set.seed(123)
modelFit <- naiveBayes(left ~. , data = training )
prediction_prob <- predict(modelFit, newdata = testing[-7], type = "raw")
prediction_bayes <- prediction_prob[,2]
y_hat <- ifelse(prediction_bayes<0.5, 0, 1)
y_hat <- as.factor(y_hat)
confusionMatrix(y_hat, testing$left)
# Confusion Matrix and Statistics
#
# Reference
# Prediction 0 1
# 0 2625 365
# 1 232 527
#
# Accuracy : 0.8408
# 95% CI : (0.8286, 0.8523)
# No Information Rate : 0.7621
# P-Value [Acc > NIR] : < 2.2e-16
#
# Kappa : 0.5371
# Mcnemar's Test P-Value : 6.575e-08
#
# Sensitivity : 0.9188
# Specificity : 0.5908
# Pos Pred Value : 0.8779
# Neg Pred Value : 0.6943
# Prevalence : 0.7621
# Detection Rate : 0.7002
# Detection Prevalence : 0.7975
# Balanced Accuracy : 0.7548
#Accuracy : 0.8408 means that we predict correctly 84% of results about who left and stayed
# Sensitivity : 0.9188 means that we predict correct result 92% of stayed people
# Specificity : 0.5908 means that we predict correct result 59% of left people
#Build CAP curve
CAP_curve <- function(lf,yhat){
lf <- testing$left
yhat<- prediction_bayes
cap <- cbind(left = (as.numeric(lf)-1), predicted = yhat)
nraws <- length(cap[,1])
nleft <- sum(cap[,1])
cap <- cbind (cap[order( -predicted),], Percent_of_Sample = (1:nraws)/nraws)
n <-0
for (i in 1:nraws) {
n <- cap[i,1]+n
cap[i,4]<- n/nleft
}
perc=nleft/nraws
plot(x = cap[,3], y = cap[,4], col = "red", lwd =0.3,
xlab = "Percent of Sample", ylab = "Predicted Left")
lines(c(0,1),c(0,1), col = "gray", lwd =2)
text(0.6,0.2,paste("Percent people left is ", round(perc,2)*100, sep="", "%" ), cex=1.4)
title("CAP Curve")
}
CAP_curve(testing$left, prediction_bayes)
#Build ROC curve (it shows )
par(mfrow=c(1,1))
ROCRpred = prediction(prediction_bayes, testing$left)
ROCRperf = performance(ROCRpred, "tpr", "fpr")
auc <- slot(performance(ROCRpred, "auc"), "y.values")[[1]] # Area Under Curve
plot(ROCRperf, colorize=TRUE)
abline(h=seq(0,1,0.05), v=seq(0,1,0.05), col = "lightgray", lty = "dotted")
lines(c(0,1),c(0,1), col = "gray", lwd =2)
text(0.6,0.2,paste("AUC=", round(auc,4), sep=""), cex=1.4)
title("ROC Curve Bayes")
#MODEL 2 train with random forest model (packege Random Forest)
set.seed(123)
rf.model <- randomForest(x = training[-7], y = training$left)
rf.predict <- predict(rf.model, testing[-7])
confusionMatrix(rf.predict, testing$left)
# Confusion Matrix and Statistics
#
# Reference
# Prediction 0 1
# 0 2852 29
# 1 5 863
#
# Accuracy : 0.9909
# 95% CI : (0.9873, 0.9937)
# No Information Rate : 0.7621
# P-Value [Acc > NIR] : < 2.2e-16
#
# Kappa : 0.9748
# Mcnemar's Test P-Value : 7.998e-05
#
# Sensitivity : 0.9982
# Specificity : 0.9675
# Pos Pred Value : 0.9899
# Neg Pred Value : 0.9942
# Prevalence : 0.7621
# Detection Rate : 0.7607
# Detection Prevalence : 0.7685
# Balanced Accuracy : 0.9829
| /Final.R | no_license | MiG-Kharkov/HR-training | R | false | false | 10,150 | r | # Install packages if we need it
if (!require("corrplot")) install.packages("corrplot")
if (!require("ggplot2")) install.packages("ggplot2")
if (!require("caret")) install.packages("caret")
if (!require("e1071")) install.packages("e1071")
if (!require("car")) install.packages("car")
if (!require("stats")) install.packages("stats")
if (!require("gridExtra")) install.packages("gridExtra")
if (!require("ROCR")) install.packages("ROCR")
if (!require("fBasics")) install.packages("fBasics")
if (!require("randomForest")) install.packages("randomForest")
# Load librarys
library(caret)
library(ggplot2)
library(corrplot)
library(e1071)
library(car)
library(stats)
library(gridExtra)
library(ROCR)
library(fBasics)
library(randomForest)
# Read database
dataset <- read.csv("HR_comma_sep.csv")
# look at data
sapply(dataset, function(x) length(unique(x)))
#how many unique elements we have. What variable are or can be seen as factors
str(dataset)
summary(dataset)
#satisfaction_level last_evaluation number_project average_montly_hours time_spend_company Work_accident
#92 65 6 215 8 2
#left promotion_last_5years sales salary
#2 2 10 3
# 'data.frame': 14999 obs. of 10 variables:
# $ satisfaction_level : num 0.38 0.8 0.11 0.72 0.37 0.41 0.1 0.92 0.89 0.42 ...
# $ last_evaluation : num 0.53 0.86 0.88 0.87 0.52 0.5 0.77 0.85 1 0.53 ...
# $ number_project : int 2 5 7 5 2 2 6 5 5 2 ...
# $ average_montly_hours : int 157 262 272 223 159 153 247 259 224 142 ...
# $ time_spend_company : int 3 6 4 5 3 3 4 5 5 3 ...
# $ Work_accident : int 0 0 0 0 0 0 0 0 0 0 ...
# $ left : int 1 1 1 1 1 1 1 1 1 1 ...
# $ promotion_last_5years: int 0 0 0 0 0 0 0 0 0 0 ...
# $ sales : Factor w/ 10 levels "accounting","hr",..: 8 8 8 8 8 8 8 8 8 8 ...
# $ salary : Factor w/ 3 levels "high","low","medium": 2 3 3 2 2 2 2 2 2 2 ...
#Change some variables type at factors
dataset$left <- as.factor(dataset$left)
dataset$promotion_last_5years<- as.factor(dataset$promotion_last_5years)
dataset$Work_accident <- as.factor(dataset$Work_accident)
dataset$salary <- ordered(dataset$salary, c("low","medium" ,"high"))
# look at some graphs: historams
par(mfrow=c(2,3))
hist(dataset$last_evaluation, col="lightyellow", freq = FALSE, main = paste('Last Evaluation'), xlab = "x")
hist(dataset$satisfaction_level, col="lightyellow", breaks = 10, freq = FALSE,main = paste('Level of Satisfaction'), xlab = "x")
hist(dataset$average_montly_hours, col="lightyellow", freq = FALSE, main = paste('Monthly hours'), xlab = "x")
hist(dataset$number_project, col="lightyellow",breaks = 5, freq = FALSE, main = paste('Number of Projects'), xlab = "x")
hist(dataset$time_spend_company, col="lightyellow", freq = FALSE, main = paste('Time at company'), xlab = "x")
#density for different factors
g1<-ggplot(dataset, aes(x = last_evaluation, colour = factor(left))) +
geom_density() + ggtitle("Last Evaluation")
g2<-ggplot(dataset, aes(x = last_evaluation, colour = factor(salary))) +
geom_density() + ggtitle(" ")
grid.arrange(g1,g2, nrow=2, ncol=1)
g1<-ggplot(dataset, aes(x = satisfaction_level, colour = factor(left))) +
geom_density() + ggtitle("Level of Satisfaction")
g2<-ggplot(dataset, aes(x = satisfaction_level, colour = factor(salary))) +
geom_density() + ggtitle(" ")
grid.arrange(g1,g2, nrow=2, ncol=1)
g1<-ggplot(dataset, aes(x = average_montly_hours, colour = factor(left))) +
geom_density() + ggtitle("Monthly hours")
g2<-ggplot(dataset, aes(x = average_montly_hours, colour = factor(salary))) +
geom_density() + ggtitle(" ")
grid.arrange(g1,g2, nrow=2, ncol=1)
#The graphs of density for variables differ for different left factors, but have almost the same type for different salary factors.
#From graphs none of these variables are normal. We can also check null hypothesis that the population is normally distributed
#with such tests as ad.test(), shapiro.test(),lillie.test() or others
set.seed(123)
split = createDataPartition(y=dataset$left, p=0.33, list=FALSE)
smallsample <- dataset[split, ]
print(shapiroTest(smallsample$last_evaluation)) # works for size of sample <=5000
print(ksnormTest(unique(dataset$last_evaluation)))# works only for unique elements
print(adTest(dataset$last_evaluation))
print(lillieTest(dataset$last_evaluation))
# Shapiro - Wilk Normality Test
# Test Results:
# STATISTIC:
# W: 0.9507
# P VALUE:
# < 2.2e-16
# One-sample Kolmogorov-Smirnov test
# Test Results:
# STATISTIC:
# D: 0.6406
# P VALUE:
# Alternative Two-Sided: < 2.2e-16
# Alternative Less: < 2.2e-16
# Alternative Greater: 0.03385
#
# Anderson - Darling Normality Test
# Test Results:
# STATISTIC:
# A: 221.1229
# P VALUE:
# < 2.2e-16
#
# Lilliefors (KS) Normality Test
# Test Results:
# STATISTIC:
# D: 0.0875
# P VALUE:
# < 2.2e-16
# For other variables we have the same result: null hypotesis is rejected
# analysis of dependencies that affect leaves.
g1<-ggplot(dataset, aes(x =average_montly_hours, y = time_spend_company))+
geom_point(color = as.numeric(dataset$left))+
geom_density2d()+
labs(title="The probability destribution of leaving \n (red points show who left)", x = "Avrenge hours per month", y = "Years in the company")
g2<-ggplot(dataset, aes(x =last_evaluation, y = satisfaction_level))+
geom_point(color = as.numeric(dataset$left))+
geom_density2d()+
labs(x="The level of the last evaluation", y = "The level of employee satisfaction",
title = "The probability destribution of leaving")
grid.arrange(g1,g2, nrow=2, ncol=1)
# check correlation for numeric variables
par(mfrow=c(1,1))
num.cols <- sapply(dataset,is.numeric)
cor.data <- cor(dataset[,num.cols])
# visualisation of corrolation with corrlot
corrplot(cor.data, method = "pie")
# we can see that pairs (last_evaluation, number_project), (last_evaluation, average_montly_hours)
#(average_montly_hours, number_project) have the biggest correlation, these coefficients really differ from 0:
cor.test(dataset$last_evaluation,dataset$number_project)
cor.test(dataset$last_evaluation,dataset$average_montly_hours)
cor.test(dataset$number_project,dataset$average_montly_hours)
# we remember that coefficient tells us only about linear dependence. And between these variables there are not linear dependence.
# but could be nonlinear
#Predicshion who are going to leave
#Split data training : testing like 3:1
set.seed(123)
split = createDataPartition(y=dataset$left, p=0.75, list=FALSE)
training <- dataset[split, ]
testing <- dataset[-split,]
#MODEL 1 NAIVE BAYES
set.seed(123)
modelFit <- naiveBayes(left ~. , data = training )
prediction_prob <- predict(modelFit, newdata = testing[-7], type = "raw")
prediction_bayes <- prediction_prob[,2]
y_hat <- ifelse(prediction_bayes<0.5, 0, 1)
y_hat <- as.factor(y_hat)
confusionMatrix(y_hat, testing$left)
# Confusion Matrix and Statistics
#
# Reference
# Prediction 0 1
# 0 2625 365
# 1 232 527
#
# Accuracy : 0.8408
# 95% CI : (0.8286, 0.8523)
# No Information Rate : 0.7621
# P-Value [Acc > NIR] : < 2.2e-16
#
# Kappa : 0.5371
# Mcnemar's Test P-Value : 6.575e-08
#
# Sensitivity : 0.9188
# Specificity : 0.5908
# Pos Pred Value : 0.8779
# Neg Pred Value : 0.6943
# Prevalence : 0.7621
# Detection Rate : 0.7002
# Detection Prevalence : 0.7975
# Balanced Accuracy : 0.7548
#Accuracy : 0.8408 means that we predict correctly 84% of results about who left and stayed
# Sensitivity : 0.9188 means that we predict correct result 92% of stayed people
# Specificity : 0.5908 means that we predict correct result 59% of left people
#Build CAP curve
CAP_curve <- function(lf,yhat){
lf <- testing$left
yhat<- prediction_bayes
cap <- cbind(left = (as.numeric(lf)-1), predicted = yhat)
nraws <- length(cap[,1])
nleft <- sum(cap[,1])
cap <- cbind (cap[order( -predicted),], Percent_of_Sample = (1:nraws)/nraws)
n <-0
for (i in 1:nraws) {
n <- cap[i,1]+n
cap[i,4]<- n/nleft
}
perc=nleft/nraws
plot(x = cap[,3], y = cap[,4], col = "red", lwd =0.3,
xlab = "Percent of Sample", ylab = "Predicted Left")
lines(c(0,1),c(0,1), col = "gray", lwd =2)
text(0.6,0.2,paste("Percent people left is ", round(perc,2)*100, sep="", "%" ), cex=1.4)
title("CAP Curve")
}
CAP_curve(testing$left, prediction_bayes)
#Build ROC curve (it shows )
par(mfrow=c(1,1))
ROCRpred = prediction(prediction_bayes, testing$left)
ROCRperf = performance(ROCRpred, "tpr", "fpr")
auc <- slot(performance(ROCRpred, "auc"), "y.values")[[1]] # Area Under Curve
plot(ROCRperf, colorize=TRUE)
abline(h=seq(0,1,0.05), v=seq(0,1,0.05), col = "lightgray", lty = "dotted")
lines(c(0,1),c(0,1), col = "gray", lwd =2)
text(0.6,0.2,paste("AUC=", round(auc,4), sep=""), cex=1.4)
title("ROC Curve Bayes")
#MODEL 2 train with random forest model (packege Random Forest)
set.seed(123)
rf.model <- randomForest(x = training[-7], y = training$left)
rf.predict <- predict(rf.model, testing[-7])
confusionMatrix(rf.predict, testing$left)
# Confusion Matrix and Statistics
#
# Reference
# Prediction 0 1
# 0 2852 29
# 1 5 863
#
# Accuracy : 0.9909
# 95% CI : (0.9873, 0.9937)
# No Information Rate : 0.7621
# P-Value [Acc > NIR] : < 2.2e-16
#
# Kappa : 0.9748
# Mcnemar's Test P-Value : 7.998e-05
#
# Sensitivity : 0.9982
# Specificity : 0.9675
# Pos Pred Value : 0.9899
# Neg Pred Value : 0.9942
# Prevalence : 0.7621
# Detection Rate : 0.7607
# Detection Prevalence : 0.7685
# Balanced Accuracy : 0.9829
|
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
#install.packages("remotes")
remotes::install_github("GLEON/rLakeAnalyzer")
remotes::install_github("aemon-j/GLM3r", ref = "v3.1.1")
remotes::install_github("USGS-R/glmtools", ref = "ggplot_overhaul")
remotes::install_github("aemon-j/FLakeR", ref = "inflow")
remotes::install_github("aemon-j/GOTMr")
remotes::install_github("aemon-j/gotmtools")
remotes::install_github("aemon-j/SimstratR")
remotes::install_github("aemon-j/MyLakeR")
rm(list=ls())
graphics.off()
cat("\14")
# Load libraries
library(gotmtools)
library(LakeEnsemblR)
library(ggplot2)
library(ggpubr)
library(FME)
library(reshape2)
library(readr)
library(lubridate)
setwd("feeagh") # Change working directory to example folder
wtemp <- read_csv("LakeEnsemblR_wtemp_profile_standard.csv")
start <- "2013-06-12 00:00:00"
ncdf <- file.path("output", paste0("output_bc", ".nc"))
config_file <- 'LakeEnsemblR_bc_uncert.yaml'
model <- c("FLake", "GLM", "GOTM", "Simstrat", "MyLake")
n_mem <- 100
met_vari <- 0.44
export_config(config_file, model)
set.seed(123)
samp_fact <- data.frame(wind = rnorm(n_mem, 1, met_vari^2),
swr = rnorm(n_mem, 1, met_vari^2))
for(i in seq_len(n_mem)) {
for (m in model) {
input_yaml_multiple(file = config_file, key1 = "scaling_factors", key2 = m,
key3 = "wind_speed", value = samp_fact$wind[i])
input_yaml_multiple(file = config_file, key1 = "scaling_factors", key2 = m,
key3 = "swr", value = samp_fact$swr[i])
}
suppressMessages(export_config(config_file = config_file, model = model, dirs = F, time = F, location = F, output_settings = F,
meteo = T, init_cond = F, extinction = F, inflow = F, model_parameters = F))
run_ensemble(config_file, model, add = (i != 1), parallel = TRUE)
print(i)
}
plot_heatmap(ncdf)
out <- load_var(ncdf, "temp", return = "array")
out_list <- lapply(1:5, function(x) {
apply(out[, x, , 3], 2, function(y) quantile(y, c(0.025, 0.5, 0.975), na.rm = TRUE))
})
names(out_list) <- dimnames(out)[2][[1]][1:5]
obs <- out[1, 6, , 3]
obs_df <- data.frame(datetime = as.POSIXct(names(obs), tz = "UTC"),
temp = obs)
mlt1 <- reshape2::melt(out_list)
wid <- tidyr::pivot_wider(mlt1, values_from = 3, names_from = 1)
colnames(wid) <- c("datetime", "model", "c025", "c50", "c975")
wid$datetime <- as.POSIXct(as.character(wid$datetime))
ggplot(wid) +
geom_ribbon(aes(datetime, ymin = c025, ymax = c975, fill = model), alpha = 0.3)
out_var <- lapply(1:5, function(x) {
data.frame(datetime = as.POSIXct(dimnames(out)[3][[1]], tz = "UTC"),
vari = apply(out[, x, , 3], 2, function(y) var(y, na.rm = TRUE)))
})
names(out_var) <- dimnames(out)[2][[1]][1:5]
mlt2 <- reshape2::melt(out_var, id.vars = "datetime")
ggplot(mlt2) +
geom_line(aes(datetime, value, color = L1))
out_var <- lapply(1:5, function(x) {
data.frame(datetime = as.POSIXct(dimnames(out)[3][[1]], tz = "UTC"),
vari = apply(out[, x, , 86], 2, function(y) var(y, na.rm = TRUE)))
})
names(out_var) <- dimnames(out)[2][[1]][1:5]
mlt2 <- reshape2::melt(out_var, id.vars = "datetime")
ggplot(mlt2) +
geom_line(aes(datetime, value, color = L1))
| /feeagh/uncertainty_bc.R | no_license | tadhg-moore/LER_v2 | R | false | false | 3,257 | r | setwd(dirname(rstudioapi::getSourceEditorContext()$path))
#install.packages("remotes")
remotes::install_github("GLEON/rLakeAnalyzer")
remotes::install_github("aemon-j/GLM3r", ref = "v3.1.1")
remotes::install_github("USGS-R/glmtools", ref = "ggplot_overhaul")
remotes::install_github("aemon-j/FLakeR", ref = "inflow")
remotes::install_github("aemon-j/GOTMr")
remotes::install_github("aemon-j/gotmtools")
remotes::install_github("aemon-j/SimstratR")
remotes::install_github("aemon-j/MyLakeR")
rm(list=ls())
graphics.off()
cat("\14")
# Load libraries
library(gotmtools)
library(LakeEnsemblR)
library(ggplot2)
library(ggpubr)
library(FME)
library(reshape2)
library(readr)
library(lubridate)
setwd("feeagh") # Change working directory to example folder
wtemp <- read_csv("LakeEnsemblR_wtemp_profile_standard.csv")
start <- "2013-06-12 00:00:00"
ncdf <- file.path("output", paste0("output_bc", ".nc"))
config_file <- 'LakeEnsemblR_bc_uncert.yaml'
model <- c("FLake", "GLM", "GOTM", "Simstrat", "MyLake")
n_mem <- 100
met_vari <- 0.44
export_config(config_file, model)
set.seed(123)
samp_fact <- data.frame(wind = rnorm(n_mem, 1, met_vari^2),
swr = rnorm(n_mem, 1, met_vari^2))
for(i in seq_len(n_mem)) {
for (m in model) {
input_yaml_multiple(file = config_file, key1 = "scaling_factors", key2 = m,
key3 = "wind_speed", value = samp_fact$wind[i])
input_yaml_multiple(file = config_file, key1 = "scaling_factors", key2 = m,
key3 = "swr", value = samp_fact$swr[i])
}
suppressMessages(export_config(config_file = config_file, model = model, dirs = F, time = F, location = F, output_settings = F,
meteo = T, init_cond = F, extinction = F, inflow = F, model_parameters = F))
run_ensemble(config_file, model, add = (i != 1), parallel = TRUE)
print(i)
}
plot_heatmap(ncdf)
out <- load_var(ncdf, "temp", return = "array")
out_list <- lapply(1:5, function(x) {
apply(out[, x, , 3], 2, function(y) quantile(y, c(0.025, 0.5, 0.975), na.rm = TRUE))
})
names(out_list) <- dimnames(out)[2][[1]][1:5]
obs <- out[1, 6, , 3]
obs_df <- data.frame(datetime = as.POSIXct(names(obs), tz = "UTC"),
temp = obs)
mlt1 <- reshape2::melt(out_list)
wid <- tidyr::pivot_wider(mlt1, values_from = 3, names_from = 1)
colnames(wid) <- c("datetime", "model", "c025", "c50", "c975")
wid$datetime <- as.POSIXct(as.character(wid$datetime))
ggplot(wid) +
geom_ribbon(aes(datetime, ymin = c025, ymax = c975, fill = model), alpha = 0.3)
out_var <- lapply(1:5, function(x) {
data.frame(datetime = as.POSIXct(dimnames(out)[3][[1]], tz = "UTC"),
vari = apply(out[, x, , 3], 2, function(y) var(y, na.rm = TRUE)))
})
names(out_var) <- dimnames(out)[2][[1]][1:5]
mlt2 <- reshape2::melt(out_var, id.vars = "datetime")
ggplot(mlt2) +
geom_line(aes(datetime, value, color = L1))
out_var <- lapply(1:5, function(x) {
data.frame(datetime = as.POSIXct(dimnames(out)[3][[1]], tz = "UTC"),
vari = apply(out[, x, , 86], 2, function(y) var(y, na.rm = TRUE)))
})
names(out_var) <- dimnames(out)[2][[1]][1:5]
mlt2 <- reshape2::melt(out_var, id.vars = "datetime")
ggplot(mlt2) +
geom_line(aes(datetime, value, color = L1))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Ncpt.R
\name{Ncpt}
\alias{Ncpt}
\title{Ncpt}
\usage{
Ncpt(x, q, df, confirm = FALSE)
}
\arguments{
\item{x}{sample mean}
\item{q}{population mean}
\item{df}{degrees of freedom}
\item{confirm}{defaults to FALSE}
}
\description{
Calculates non-central t
}
\examples{
Ncpt(x, q, df, confirm=FALSE)
}
\keyword{effect}
\keyword{size}
| /man/Ncpt.Rd | no_license | grne/MOTE | R | false | true | 410 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Ncpt.R
\name{Ncpt}
\alias{Ncpt}
\title{Ncpt}
\usage{
Ncpt(x, q, df, confirm = FALSE)
}
\arguments{
\item{x}{sample mean}
\item{q}{population mean}
\item{df}{degrees of freedom}
\item{confirm}{defaults to FALSE}
}
\description{
Calculates non-central t
}
\examples{
Ncpt(x, q, df, confirm=FALSE)
}
\keyword{effect}
\keyword{size}
|
#' \code{vistributions} package
#'
#' Visualize probability distributions.
#'
#' @docType package
#' @name vistributions
NULL
## quiets concerns of R CMD check re: the .'s that appear in pipelines
if (getRversion() >= "2.15.1") {
utils::globalVariables(c(
".", "df", "chi", "x", "y", "z", "x1", "x2"
))
}
| /R/vistributions.R | permissive | kaushikmanikonda/vistributions | R | false | false | 314 | r | #' \code{vistributions} package
#'
#' Visualize probability distributions.
#'
#' @docType package
#' @name vistributions
NULL
## quiets concerns of R CMD check re: the .'s that appear in pipelines
if (getRversion() >= "2.15.1") {
utils::globalVariables(c(
".", "df", "chi", "x", "y", "z", "x1", "x2"
))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.summary.rem.R
\name{print.summary.rem}
\alias{print.summary.rem}
\title{Print summary of distance detection function model object}
\usage{
\method{print}{summary.rem}(x, ...)
}
\arguments{
\item{x}{a summary of \code{ddf} model object}
\item{\dots}{unspecified and unused arguments for S3 consistency}
}
\description{
Provides a brief summary of data and fitted detection probability model
parameters, model selection criterion, and optionally abundance in the
covered (sampled) region and its standard error. What is printed depends
on the corresponding call to summary.
}
\seealso{
\code{\link{summary.rem}}
}
\author{
Jeff Laake
}
\keyword{utility}
| /man/print.summary.rem.Rd | no_license | cran/mrds | R | false | true | 763 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.summary.rem.R
\name{print.summary.rem}
\alias{print.summary.rem}
\title{Print summary of distance detection function model object}
\usage{
\method{print}{summary.rem}(x, ...)
}
\arguments{
\item{x}{a summary of \code{ddf} model object}
\item{\dots}{unspecified and unused arguments for S3 consistency}
}
\description{
Provides a brief summary of data and fitted detection probability model
parameters, model selection criterion, and optionally abundance in the
covered (sampled) region and its standard error. What is printed depends
on the corresponding call to summary.
}
\seealso{
\code{\link{summary.rem}}
}
\author{
Jeff Laake
}
\keyword{utility}
|
##########################################################################################
# Function: bowlerWktsRunsPlot
# This function makes boxplot of Wickets versus Runs concded
###########################################################################################
bowlerWktsRunsPlot <- function(file, name="A Googly") {
b <- cleanBowlerData(file)
# Create a color palette of 10 colors
p1 <-colorRampPalette(c("red","blue"))
palette <- p1(10)
# Create boxplots
str <- paste(name,"- Wkts vs Runs given")
boxplot(Runs~Wkts,data=b, xlab="Wickets",ylab="Run conceded",main=str,
col=as.vector(palette))
mtext("Data source-Courtesy:ESPN Cricinfo", side=1, line=4, adj=1.0, cex=0.8, col="blue")
} | /SPL/cricketr-master/cricketr-master/R/bowlerWktsRunsPlot.R | no_license | revanth465/CricketStats | R | false | false | 753 | r | ##########################################################################################
# Function: bowlerWktsRunsPlot
# This function makes boxplot of Wickets versus Runs concded
###########################################################################################
bowlerWktsRunsPlot <- function(file, name="A Googly") {
b <- cleanBowlerData(file)
# Create a color palette of 10 colors
p1 <-colorRampPalette(c("red","blue"))
palette <- p1(10)
# Create boxplots
str <- paste(name,"- Wkts vs Runs given")
boxplot(Runs~Wkts,data=b, xlab="Wickets",ylab="Run conceded",main=str,
col=as.vector(palette))
mtext("Data source-Courtesy:ESPN Cricinfo", side=1, line=4, adj=1.0, cex=0.8, col="blue")
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudwatchlogs_operations.R
\name{cloudwatchlogs_create_export_task}
\alias{cloudwatchlogs_create_export_task}
\title{Creates an export task, which allows you to efficiently export data from
a log group to an Amazon S3 bucket}
\usage{
cloudwatchlogs_create_export_task(taskName, logGroupName,
logStreamNamePrefix, from, to, destination, destinationPrefix)
}
\arguments{
\item{taskName}{The name of the export task.}
\item{logGroupName}{[required] The name of the log group.}
\item{logStreamNamePrefix}{Export only log streams that match the provided prefix. If you don't
specify a value, no prefix filter is applied.}
\item{from}{[required] The start time of the range for the request, expressed as the number of
milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a timestamp
earlier than this time are not exported.}
\item{to}{[required] The end time of the range for the request, expressed as the number of
milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a timestamp
later than this time are not exported.}
\item{destination}{[required] The name of S3 bucket for the exported log data. The bucket must be in
the same AWS region.}
\item{destinationPrefix}{The prefix used as the start of the key for every object exported. If
you don't specify a value, the default is \code{exportedlogs}.}
}
\value{
A list with the following syntax:\preformatted{list(
taskId = "string"
)
}
}
\description{
Creates an export task, which allows you to efficiently export data from
a log group to an Amazon S3 bucket. When you perform a
\code{\link[=cloudwatchlogs_create_export_task]{create_export_task}} operation, you
must use credentials that have permission to write to the S3 bucket that
you specify as the destination.
This is an asynchronous call. If all the required information is
provided, this operation initiates an export task and responds with the
ID of the task. After the task has started, you can use
\code{\link[=cloudwatchlogs_describe_export_tasks]{describe_export_tasks}} to get
the status of the export task. Each account can only have one active
(\code{RUNNING} or \code{PENDING}) export task at a time. To cancel an export
task, use \code{\link[=cloudwatchlogs_cancel_export_task]{cancel_export_task}}.
You can export logs from multiple log groups or multiple time ranges to
the same S3 bucket. To separate out log data for each export task, you
can specify a prefix to be used as the Amazon S3 key prefix for all
exported objects.
Exporting to S3 buckets that are encrypted with AES-256 is supported.
Exporting to S3 buckets encrypted with SSE-KMS is not supported.
}
\section{Request syntax}{
\preformatted{svc$create_export_task(
taskName = "string",
logGroupName = "string",
logStreamNamePrefix = "string",
from = 123,
to = 123,
destination = "string",
destinationPrefix = "string"
)
}
}
\keyword{internal}
| /cran/paws.management/man/cloudwatchlogs_create_export_task.Rd | permissive | TWarczak/paws | R | false | true | 2,940 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudwatchlogs_operations.R
\name{cloudwatchlogs_create_export_task}
\alias{cloudwatchlogs_create_export_task}
\title{Creates an export task, which allows you to efficiently export data from
a log group to an Amazon S3 bucket}
\usage{
cloudwatchlogs_create_export_task(taskName, logGroupName,
logStreamNamePrefix, from, to, destination, destinationPrefix)
}
\arguments{
\item{taskName}{The name of the export task.}
\item{logGroupName}{[required] The name of the log group.}
\item{logStreamNamePrefix}{Export only log streams that match the provided prefix. If you don't
specify a value, no prefix filter is applied.}
\item{from}{[required] The start time of the range for the request, expressed as the number of
milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a timestamp
earlier than this time are not exported.}
\item{to}{[required] The end time of the range for the request, expressed as the number of
milliseconds after Jan 1, 1970 00:00:00 UTC. Events with a timestamp
later than this time are not exported.}
\item{destination}{[required] The name of S3 bucket for the exported log data. The bucket must be in
the same AWS region.}
\item{destinationPrefix}{The prefix used as the start of the key for every object exported. If
you don't specify a value, the default is \code{exportedlogs}.}
}
\value{
A list with the following syntax:\preformatted{list(
taskId = "string"
)
}
}
\description{
Creates an export task, which allows you to efficiently export data from
a log group to an Amazon S3 bucket. When you perform a
\code{\link[=cloudwatchlogs_create_export_task]{create_export_task}} operation, you
must use credentials that have permission to write to the S3 bucket that
you specify as the destination.
This is an asynchronous call. If all the required information is
provided, this operation initiates an export task and responds with the
ID of the task. After the task has started, you can use
\code{\link[=cloudwatchlogs_describe_export_tasks]{describe_export_tasks}} to get
the status of the export task. Each account can only have one active
(\code{RUNNING} or \code{PENDING}) export task at a time. To cancel an export
task, use \code{\link[=cloudwatchlogs_cancel_export_task]{cancel_export_task}}.
You can export logs from multiple log groups or multiple time ranges to
the same S3 bucket. To separate out log data for each export task, you
can specify a prefix to be used as the Amazon S3 key prefix for all
exported objects.
Exporting to S3 buckets that are encrypted with AES-256 is supported.
Exporting to S3 buckets encrypted with SSE-KMS is not supported.
}
\section{Request syntax}{
\preformatted{svc$create_export_task(
taskName = "string",
logGroupName = "string",
logStreamNamePrefix = "string",
from = 123,
to = 123,
destination = "string",
destinationPrefix = "string"
)
}
}
\keyword{internal}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.