blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e62aa7bb7fa44c439c68bc258f676b7afab7835b
|
4b889fcf97ce5bd80609ae1ecfa055779bc55b57
|
/20160216/server.R
|
132423988fb540155b626802cfb6f502d6930d30
|
[] |
no_license
|
weitinglin/ShinyPractice2016
|
dbea57c7dad740b9c25d202c3690ad5efb8d8685
|
720b51618eb314cf16ed9a55699f555147b4f876
|
refs/heads/master
| 2021-01-10T01:39:52.954447
| 2016-02-16T15:47:17
| 2016-02-16T15:47:17
| 51,238,294
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,079
|
r
|
server.R
|
#20160209 shiny practice upload file server.R
#By defaut, the file size limit is 5 MB, It can be changed by setting this option.
library(shiny)
options(shiny.maxRequestSize = 9*1024^2)
#loading package
library(dplyr)
library(ggplot2)
library(reshape2)
library(XML)
#define the function
#==================================================================== Function
pagecut <- function(raw_data) {
output<-list()
#seperate the data sheet
tables_index<-grep("table",raw_data)
for( i in 1:(1/2*length(tables_index))){
a<-tables_index[2*i-1];b<-tables_index[2*i]
output[[i]]<-raw_data[a:b]
}
return(output)
}
#==================================================================== Function
tablecut<-function(pages,raw_data){
output<-list()
for( i in 1: length(pages)){
if(i < length(pages)){
a<-pages[i];b<-pages[(i+1)]-1
output[[i]]<-raw_data[a:b]
}else{
a<-pages[i]
output[[i]]<-raw_data[a:(length(raw_data)-1)]
#dont include the last line "/table" in the last block
}
}
return(output)
}
#==================================================================== Function
filter_chr<-function(time1_raw){
for(i in 1:length(time1_raw)){
n<-regexpr("<tt>",time1_raw[i])+4
time1_raw[i]<-substring(time1_raw[i],n[1],n[1]+7)
}
return(time1_raw)}
#==================================================================== Function
readhtmltable_index<-function(page_number,tables){
index_list<-list()
tables1<-grep("Date",tables[[page_number]])
page1_table<-tablecut(tables1,tables[[page_number]])
for ( no_table in 1:length(page1_table)) {
lab_grep_index<-grep("<tr valign=top>",page1_table[[no_table]])[1]+tables1[no_table]
lab_grep_index_last<-rev(grep("<tr valign=top>",page1_table[[no_table]]))[2]-2+tables1[no_table]
index_list[[no_table]] <- c(1,2,lab_grep_index:lab_grep_index_last,length(tables[[page_number]]))
}
return (index_list)
}
#==================================================================== Function
table_time <- function (page_number,tables) {
time_list <- list()
tables1 <- grep("Date",tables[[page_number]])
tryCatch ( page1_table <- tablecut(tables1,tables[[page_number]]),
warning = function(w) { print("Something Wrong in the data, use the last page time") },
error = function(e) { print("Something Error in the data, use the last page time ") } )
error<-0
if ( length(tables1) == 0) {
tables1 <- grep("Date",tables[[page_number-1]])
page1_table <- tablecut(tables1,tables[[page_number-1]])
error <- 1
}
for ( no_table in 1 : length(page1_table)) {
page_1_date_line <- grep("<tr><td",unlist(page1_table[[no_table]]))
temporary<-unlist(page1_table[[no_table]])
temporary<-temporary[-page_1_date_line]
if ( grep("<tr valign=top>",temporary)[1] >3 ) {
page_date_line <- grep("<tr valign=top>",temporary)[1]
time1_raw <- c( temporary[seq( 3, (page_date_line-2), by = 2)])
}else{
page_date_line <- grep("<tr valign=top>",temporary)[2]
time1_raw <- c( temporary[seq( (grep("<tr valign=top>",temporary)[1]+2), (page_date_line-2), by = 2)])
}
##the lab data time
time_list[[no_table]] <- filter_chr(time1_raw)
}
if (error == 0){
return( time_list)
} else if (error ==1) {
return (last(time_list))
}
}
#==================================================================== Main
shinyServer(function(input,output){
output$contents <-renderTable({
inFile<-input$file1
# Load the data from the input
raw_data<-readLines(inFile$datapath, encoding = "BIG-5")
raw_data<-iconv(raw_data,from ="BIG-5",to = "UTF-8")
tables<-pagecut(raw_data)
#Separate the page's table into page1_table
page_number<-input$pages_number
index <- readhtmltable_index(page_number,tables)
no_table<-input$table_number
#use XML package to parse the table *****
tables_test_index <- readHTMLTable(tables[[page_number]][index[[no_table]]],encoding="UTF-8",as.data.frame = TRUE)
#get the time
time1<-table_time(page_number,tables)
time1<-time1[[no_table]]
#preprocess
names(tables_test_index)<-"table"
index_table<-c(2,seq(4,4*length(time1),by=4))
clean_test<-tables_test_index$table[index_table]
rule_out<-sapply(clean_test[,1],function(x){ if( is.na(x) == TRUE ){ sum(!is.na(x)) }else{nchar(as.character(x))}})>0
#remove the NA column
clean_test<-clean_test[rule_out,]
#remove duplicate data : normale plasma mean
duplicat_index <- !duplicated(clean_test[,1],fromLast = TRUE)&!duplicated(clean_test[,1])
clean_test<-clean_test[duplicat_index,]
row.names(clean_test)<-clean_test[,1]
colnames(clean_test)[1]<-c("Lab_data")
colnames(clean_test)[2:(length(time1)+1)]<-time1
#
#==================== Fetch the General Data of the Patient
#Name
name_raw<-raw_data[grep("姓名",raw_data)[1]+2]
name<-substring(name_raw,(regexpr("tt",name_raw)+3),(regexpr("tt",name_raw)+5))
#Patient_ID
id_raw<-raw_data[grep("病歷號",raw_data)[1]+2]
id<-substring(id_raw,(regexpr("tt",id_raw)+3),(regexpr("tt",id_raw)+9))
#Gender
gender_raw<-raw_data[grep("性別",raw_data)[1]+2]
gender<-substring(gender_raw,(regexpr("tt",gender_raw)[1]+3),(regexpr("tt",gender_raw)[1]+3))
#Birthday
birth_raw<-raw_data[grep("生日",raw_data)[1]+2]
birth<-getlabname(birth_raw)
#Final Data presentation
clean_melt_test<-melt(clean_test,id.vars = "Lab_data")
clean_melt_test$ID<-id
clean_melt_test$BIRTH<-birth
clean_melt_test$GENDER<-gender
clean_melt_test
})
datasetInput <- reactive({
inFile<-input$file1
# Load the data from the input
raw_data<-readLines(inFile$datapath, encoding = "BIG-5")
raw_data<-iconv(raw_data,from ="BIG-5",to = "UTF-8")
tables<-pagecut(raw_data)
#Separate the page's table into page1_table
page_number<-input$pages_number
index <- readhtmltable_index(page_number,tables)
no_table<-input$table_number
#use XML package to parse the table *****
tables_test_index <- readHTMLTable(tables[[page_number]][index[[no_table]]],encoding="UTF-8",as.data.frame = TRUE)
#get the time
time1<-table_time(page_number,tables)
time1<-time1[[no_table]]
#preprocess
names(tables_test_index)<-"table"
index_table<-c(2,seq(4,4*length(time1),by=4))
clean_test<-tables_test_index$table[index_table]
rule_out<-sapply(clean_test[,1],function(x){ if( is.na(x) == TRUE ){ sum(!is.na(x)) }else{nchar(as.character(x))}})>0
#remove the NA column
clean_test<-clean_test[rule_out,]
#remove duplicate data : normale plasma mean
duplicat_index <- !duplicated(clean_test[,1],fromLast = TRUE)&!duplicated(clean_test[,1])
clean_test<-clean_test[duplicat_index,]
row.names(clean_test)<-clean_test[,1]
colnames(clean_test)[1]<-c("Lab_data")
colnames(clean_test)[2:(length(time1)+1)]<-time1
#
#==================== Fetch the General Data of the Patient
#Name
name_raw<-raw_data[grep("姓名",raw_data)[1]+2]
name<-substring(name_raw,(regexpr("tt",name_raw)+3),(regexpr("tt",name_raw)+5))
#Patient_ID
id_raw<-raw_data[grep("病歷號",raw_data)[1]+2]
id<-substring(id_raw,(regexpr("tt",id_raw)+3),(regexpr("tt",id_raw)+9))
#Gender
gender_raw<-raw_data[grep("性別",raw_data)[1]+2]
gender<-substring(gender_raw,(regexpr("tt",gender_raw)[1]+3),(regexpr("tt",gender_raw)[1]+3))
#Birthday
birth_raw<-raw_data[grep("生日",raw_data)[1]+2]
birth<-getlabname(birth_raw)
#Final Data presentation
clean_melt_test<-melt(clean_test,id.vars = "Lab_data")
clean_melt_test$ID<-id
clean_melt_test$BIRTH<-birth
clean_melt_test$GENDER<-gender
clean_melt_test
})
output$downloadData <- downloadHandler(
filename = function(){paste(input$FileName, '.csv', sep='')},
content = function(file){
write.csv(datasetInput(), file)
})
})
|
eb5787918cf8c2f38414b0d8df576ccb72a15202
|
e7ffb8cb08334a16da20d543ba0b51cf99f1a8f3
|
/run_analysis.R
|
e94253c77a58ce3b0d1ed5506d38f806333115ff
|
[] |
no_license
|
Commander14/tidyingdata
|
0b9df5d6ed66846d37bf9f333ff7af283f988eb3
|
5a864530b89110e2d9479d9d4214a1fe8d3fec61
|
refs/heads/master
| 2021-01-02T22:30:46.331862
| 2014-08-24T21:45:54
| 2014-08-24T21:45:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,857
|
r
|
run_analysis.R
|
analyzeData <- function() {
# Load all of the applicable files
base <- "UCI HAR Dataset"
trainX <- read.table(paste(base, "/train/X_train.txt", sep=""))
testX <- read.table(paste(base, "/test/X_test.txt", sep=""))
features <- read.table(paste(base, "/features.txt", sep=""))
activities <- read.table(paste(base, "/activity_labels.txt", sep=""))
testY <- read.table(paste(base, "/test/y_test.txt", sep=""))
trainY <- read.table(paste(base, "/train/y_train.txt", sep=""))
subjectsTest <- read.table(paste(base, "/test/subject_test.txt", sep=""))
subjectsTrain <- read.table(paste(base, "/train/subject_train.txt", sep=""))
# pull out the features to make the column names proper
# this is the raw data for the standard deviations and the medians
cnames <- features[,2]
colnames(testX) <- cnames
colnames(trainX) <- cnames
# insert the activities into a column in the raw data for both datasets
testX <- cbind(testY, testX)
trainX <- cbind(trainY, trainX)
colnames(testX)[1] <- colnames(trainX)[1] <- "Activity"
# insert the subjects into a column in the raw data for both data sets
testX <- cbind(subjectsTest, testX)
trainX <- cbind(subjectsTrain, trainX)
colnames(testX)[1] <- colnames(trainX)[1] <- "Subjects"
# replace the numbers (1-6) with strings describing the activities (eg: WALKING)
trainX[,2] <- factor(as.vector(trainX[[2]]), levels=c(1, 2, 3, 4, 5, 6), labels=as.vector(activities[[2]]))
testX[,2] <- factor(as.vector(testX[[2]]), levels=c(1, 2, 3, 4, 5, 6), labels=as.vector(activities[[2]]))
# reduce down to just the columns we need by grepping out anything that has mean() or std() in it
# also keep the subjects and activity columns
trainX <- trainX[c(grep("mean\\(\\)|std\\(\\)|Subjects|Activity", colnames(trainX)))]
testX <- testX[c(grep("mean\\(\\)|std\\(\\)|Subjects|Activity", colnames(testX)))]
# combine the two tables into one by appending rows onto trainX
data <- rbind(trainX, testX)
# make the tidy data, grouping by each subject and their activity and averaging the columns for those
# eg Subject 1 WALKING happens X times, so Subject 1 WALKING will be in this once will the value in each column
# being the average for all activity of that subject for that activity
tidyData <- aggregate(data, by=list(data[,1], data[,2]), FUN=mean, na.rm=TRUE, simplify=TRUE)
# remove the duplicate columns that resulted from the aggregate call
tidyData$activity <- NULL
tidyData$subjects <- NULL
# rename the columns to be accurate
colnames(tidyData)[1] <- "Subject"
colnames(tidyData)[2] <- "Activity"
# write out the final tidy output to the TTY
write.table(tidyData,row.name=FALSE)
}
|
55a2bc8af07a839df426dc74e29a3c02165c0f1d
|
b47182133fa5eadbeab06735e6a78fcdeaad8094
|
/shiny/server.R
|
ad98ff428cd5ae6963c4b7720895e8ae59345e2c
|
[] |
no_license
|
zhenhuixu/PHMO
|
1d274aae08b2a73c7f8cbaff672d8580c463b745
|
cfaac457050fa4f3c1c81cc8a1db35b850f80500
|
refs/heads/master
| 2022-12-10T21:14:29.009225
| 2020-09-11T16:02:13
| 2020-09-11T16:02:13
| 294,731,880
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 41,986
|
r
|
server.R
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(tidyverse)
library(ggplot2)
library(broom)
library(GGally)
library(shinythemes)
library(kableExtra)
library(plotly)
# Define server logic required to draw a histogram
shinyServer(function(input, output, session) {
dat19 <- readRDS("Data/dat_19.rds")
#remove the following PatientKeys - an extreme value (very low cost, impossible days in hospice, >20 MRIs)
dat19 = dat19 %>% filter(PatientKey != 421661, PatientKey != 359067, PatientKey != 142780)
dat19 = dat19 %>% filter(Sex!="Other")
dat19 <- dat19 %>% mutate(PrimaryCare = replace_na(PrimaryCare, "Missing"))
dat19$PrimaryCare <- factor(dat19$PrimaryCare, levels=c("Primary Care", "Specialist", "Missing"))
#create percentages of primary care services
dat19 <- dat19 %>%
mutate(`Primary Care Services with a Primary Care Physician (%)`= `Primary Care Services with a Primary Care Physician`/`Total Primary Care Services`) %>%
mutate(`Primary Care Services with a Specialist Physician (%)`= `Primary Care Services with a Specialist Physician`/`Total Primary Care Services`) %>%
mutate(`Primary Care Services with APP (%)` = `Primary Care Services with a Nurse Practitioner/Physician Assistant/Clinical Nurse Specialist`/`Total Primary Care Services`)
#replace missing values into 0
dat19 <- dat19 %>%
mutate(`Primary Care Services with a Primary Care Physician (%)` = replace_na(`Primary Care Services with a Primary Care Physician (%)`,0)) %>%
mutate(`Primary Care Services with a Specialist Physician (%)` = replace_na(`Primary Care Services with a Specialist Physician (%)`,0)) %>%
mutate(`Primary Care Services with APP (%)` = replace_na(`Primary Care Services with APP (%)`,0))
#replace NA of CHF and COPD with zero
dat19 <- dat19 %>%
mutate(HCC_85 = replace_na(HCC_85, 0)) %>%
mutate(HCC_111 = replace_na(HCC_111, 0))
#merge with correct TINs number
TINs <- read.csv("Data/Practice_TIN_map.csv")
TINs <- TINs %>% rename(PrimaryPractice = PracticeName) %>%
rename(PrimaryPracticeTIN_mapping = PrimaryPracticeTIN)
dat19 <- dat19 %>% left_join(TINs, by="PrimaryPractice")
#rename null values in Primary Practice to Other
dat19$TaxEntityName <- as.character(dat19$TaxEntityName)
dat19 <- dat19 %>% mutate(TaxEntityName = replace_na(TaxEntityName, "Other"))
dat19$TaxEntityName[dat19$TaxEntityName=="DUKE UNIVERSITY HOSPITAL"]="DUKE UNIVERSITY HEALTH SYSTEMS INC"
dat19$TaxEntityName <- as.factor(dat19$TaxEntityName)
com_Replace <- function(x){
x <- gsub("HCC_","",x)
x <- gsub("85","CHF", x)
x <- gsub("111","COPD", x)
return(x)
}
var_Replace <- function(x){
x <- gsub("`","",x)
x <- gsub("UnplannedAdmits","Unplanned Admissions",x)
x <- gsub("Readmits","Readmissions",x)
}
output$heat <- renderPlotly({
fit_var <- c("Total Hospital Discharges", "Unplanned Admissions", "Readmissions", "ED Visits",
"Computed Tomography (CT) Events", "Magnetic Resonance Imaging (MRI) Events",
"Days in Hospice", "Total Primary Care Services", "Primary Care Services with a Primary Care Physician",
"Skilled Nursing Facility or Unit Discharges")
fit_out <- c("Log(Cost of Care)", "Log(Cost Efficiency)")
CC_coef <- c(0.1588, 0.1459, 0.1630, 0.0345, 0.0380, 0.0515, 0.0019, 0.0087, -0.0034, 0.1118)
CE_coef <- c(0.6021, 0.5533, 0.3249, 0.1632, 0.1834, 0.3419, 0.0073, 0.0713, -0.2087, 0.1326)
mat = t(matrix(c(CC_coef, CE_coef), nrow = 10, ncol = 2))
conditions.text <- as.vector(outer(paste0("Outcome variable:",fit_out,"\n"), paste0("Hospital Utilizations:",fit_var,"\n"), paste0))
fit_num <- c(0.1588, 0.6021, 0.1459, 0.5533, 0.1630, 0.3249, 0.0345, 0.1632, 0.0380, 0.1834,
0.0515, 0.3419, 0.0019, 0.0073, 0.0087, 0.0713, -0.0034, -0.2087, 0.1118, 0.1326)
conditions.text <- paste0(conditions.text,"Magnitude of Association:",fit_num)
text.mat <- matrix(conditions.text, nrow= nrow(mat), ncol = ncol(mat))
colorlength <- 100
null_value <- (0 - min(fit_num)) / (max(fit_num) - min(fit_num))
border <- as.integer(null_value * colorlength)
colorscale <- as.list(1:colorlength)
#colorscale below zero
s <- scales::seq_gradient_pal("blue", "white", "Lab")(seq(0,1,length.out=border))
for (i in 1:border) {
colorscale[[i]] <- c((i - 1) / colorlength, s[i])
}
#colorscale above zero
s <- scales::seq_gradient_pal("white", "red", "Lab")(seq(0,1,length.out=colorlength - border))
for (i in 1:(colorlength - border)) {
colorscale[[i + border]] <- c((i + border) / colorlength, s[i])
}
plot_ly(source = "heat_plot") %>%
add_heatmap(
x=fit_var,
y=fit_out,
z=mat,
hoverinfo = 'text',
text = text.mat,
colors = colorRamp(c("blue","white","red")),
colorscale = colorscale,
colorbar = list(len = 1, limits = c(-1,1)))
})
output$models <- renderPlot({
clickData <- event_data("plotly_click", source = "heat_plot")
if (is.null(clickData)) return(NULL)
out <- gsub("Log\\(Cost of Care\\)","log_Cost_Care",clickData[["y"]])
out <- gsub("Log\\(Cost Efficiency\\)", "log_Cost_Eff", out)
var <- gsub("Unplanned Admissions", "UnplannedAdmits", clickData[["x"]])
var <- gsub("Readmissions", "Readmits", var)
if (var=="Primary Care Services with a Primary Care Physician"){
var = "Primary Care Services with a Primary Care Physician (%)"
adjusted_var = "`+Sex+Age+Dual+ESRD+Disabled+`Total Primary Care Services`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
} else {
if (var=="Skilled Nursing Facility or Unit Discharges"){
adjusted_var = "`+Sex+Age+Dual+ESRD+Disabled+`Total Hospital Discharges`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
else {
adjusted_var = "`+Sex+Age+Dual+ESRD+Disabled+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
}
form <- as.formula(paste0(out, "~`", var, adjusted_var))
fit <- lm(form, data = dat19)
plt <- ggcoef(fit, exclude_intercept=TRUE)
plt$data$term <- gsub("SexMale", "Sex(Male=1)",plt$data$term)
plt$data$term <- gsub("Cont_Att", "Continuous Attribution",plt$data$term)
plt$data$term <- gsub("UnplannedAdmits", "Unplanned Admissions",plt$data$term)
plt$data$term <- gsub("Readmits", "Readmissions",plt$data$term)
plt$data$term <- gsub("HCC_85", "HCC_CHF",plt$data$term)
plt$data$term <- gsub("HCC_111", "HCC_COPD",plt$data$term)
plt$data$term <- gsub("PrimaryCareSpecialist", "Primary Practice(Specialist=1)",plt$data$term)
plt
})
output$coef <- renderUI({
clickData <- event_data("plotly_click", source = "heat_plot")
if (is.null(clickData)) return(NULL)
out <- gsub("Log\\(Cost of Care\\)","log_Cost_Care",clickData[["y"]])
out <- gsub("Log\\(Cost Efficiency\\)", "log_Cost_Eff", out)
var <- gsub("Unplanned Admissions", "UnplannedAdmits", clickData[["x"]])
var <- gsub("Readmissions", "Readmits", var)
if (var=="Primary Care Services with a Primary Care Physician"){
var = "Primary Care Services with a Primary Care Physician (%)"
adjusted_var = "`+Sex+Age+Dual+ESRD+Disabled+`Total Primary Care Services`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
} else {
if (var=="Skilled Nursing Facility or Unit Discharges"){
adjusted_var = "`+Sex+Age+Dual+ESRD+Disabled+`Total Hospital Discharges`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
else {
adjusted_var = "`+Sex+Age+Dual+ESRD+Disabled+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
}
form <- as.formula(paste0(out, "~`", var, adjusted_var))
fit <- lm(form, data = dat19)
fit.sum <- summary(fit)
var <- var_Replace(var)
HTML(paste(
"<b>Coefficients: </b>", "The coefficient of ", var, " is ", round(fit$coefficients[2], 3),
". The 95% confidence interval is (", round(confint(fit)[2,1], 3), ",", round(confint(fit)[2,2], 3), ").","<p/>",
var, " and other variables (comorbidities, sex, age and etc.) could explain ", round(fit.sum$adj.r.squared, 3)*100,
"% variability of ", out,"<p/>",
"<b>Note: </b>", "The dots represent the coefficients of these variables and the width of each line represents the confidence interval. ","<p/>",
"The more further the dot is from the dashed line, the larger the effect of the variable is.","<p/>",
"If the line around the dot crosses the vertical dashed line, this variable is not significant.",
sep=""
))
})
output$tab1 <- function(){
if (input$OR=="`Primary Care Services with a Primary Care Physician`"){
var = "`Primary Care Services with a Primary Care Physician (%)`"
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+`Total Primary Care Services`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
} else {
var=input$OR
if (input$OR=="`Skilled Nursing Facility or Unit Discharges`"){
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+`Total Hospital Discharges`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
else {
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
}
form <- as.formula(paste0(input$out, "~", var, adjusted_var))
fit <- lm(form, data = dat19)
explanatory <- gsub("`", "", var)
work <- dat19
origin <- predict(fit, work)
work[explanatory] = work[explanatory]*(1-input$prop)
pop <- predict(fit, work)
pred <- data.frame(origin = origin, pop = pop, ESRD = work$ESRD, Dual=work$Dual,
NonDual = work$NonDual, Disabled = work$Disabled, PrimaryCare = work$PrimaryCare,
HCC_Cancer = work$HCC_Cancer, HCC_CKD = work$HCC_CKD, HCC_85 = work$HCC_85,
HCC_Diabetes = work$HCC_Diabetes, HCC_111 = work$HCC_111, HCC_CAD = work$HCC_CAD,
TaxEntityName = work$TaxEntityName)
##filter by selected eligibility reason and primary practice
if (input$eligibility!="all"){
pred = pred %>% filter(!!as.name(input$eligibility)==1)
}
if (input$primarycare!="all"){
pred = pred %>% filter(PrimaryCare==input$primarycare)
pred = pred %>% filter(PrimaryCare==input$primarycare)
}
if (input$comorbidities!="all"){
pred = pred %>% filter(!!as.name(input$comorbidities)==1)
pred = pred %>% filter(!!as.name(input$comorbidities)==1)
}
if (input$tins!="all"){
pred = pred %>% filter(TaxEntityName==input$tins)
pred = pred %>% filter(TaxEntityName==input$tins)
}
if (input$out=="log_Cost_Care"){
origin <- sum(exp(pred$origin))
pop <- sum(exp(pred$pop))
popCost = paste0("$",round(origin-pop, digits = 1))
indCost = paste0("$",round((origin-pop)/dim(pred)[1], digits=1))
tab1 <- data.frame("Population Cost Reduction" = popCost, "Individual Cost Reduction" = indCost,
"Number of Beneficiaries" = dim(pred)[1])
}
else {
origin <- sum(pred$origin)
pop <- sum(pred$pop)
popCost = round((origin-pop)/origin*100, digits = 1)
indCost = round((origin-pop)/dim(pred)[1], digits=3)
tab1 <- data.frame("Population Cost Efficiency Reduction" = paste0(popCost, "%"), "Individual Cost Efficiency Reduction" = indCost,
"Number of Beneficiaries" = dim(pred)[1])
}
rownames(tab1) <- "Overall Population"
tab1 %>% kable() %>% kable_styling()
}
tab_com <- function(fit, var, elig, pc, com, prop, tins){
tab <- data.frame(matrix(NA, nrow = length(com), ncol = 3))
if (input$out2=="log_Cost_Care"){
colnames(tab) <- c("Population Cost Reduction", "Individual Cost Reduction", "Number of Beneficiaries")
} else {
colnames(tab) <- c("Population Cost Efficiency Reduction", "Individual Cost Efficiency Reduction", "Number of Beneficiaries")
}
com_name <- vector()
for (i in 1:length(com)){
work <- dat19 %>% filter(!!as.name(com[i])==1)
origin <- predict(fit, work)
work[var] = work[var]*(1-prop)
pop <- predict(fit, work)
pred <- data.frame(origin = origin, pop = pop, ESRD = work$ESRD, Dual=work$Dual,
NonDual = work$NonDual, Disabled = work$Disabled, PrimaryCare = work$PrimaryCare,
TaxEntityName=work$TaxEntityName)
if (elig!="all"){
pred = pred %>% filter(!!as.name(elig)==1)
}
if (pc!="all"){
pred = pred %>% filter(PrimaryCare==pc)
}
if (tins!="all"){
pred = pred %>% filter(TaxEntityName==tins)
}
if (input$out2=="log_Cost_Care"){
origin <- sum(exp(pred$origin))
pop <- sum(exp(pred$pop))
popCost = round(origin-pop, digits = 1)
indCost = round((origin-pop)/dim(pred)[1], digits=1)
}
else {
origin <- sum(pred$origin)
pop <- sum(pred$pop)
popCost = round((origin-pop)/origin*100, digits = 1)
indCost = round((origin-pop)/dim(pred)[1], digits=3)
}
tab[i,] <- c(popCost, indCost, dim(pred)[1])
com_name <- append(com_name, com_Replace(com[i]))
}
rownames(tab) <- com_name
tab
}
output$compare <- renderPlot({
if (input$OR2=="`Primary Care Services with a Primary Care Physician`"){
var = "`Primary Care Services with a Primary Care Physician (%)`"
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+`Total Primary Care Services`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
} else {
var=input$OR2
if (input$OR2=="`Skilled Nursing Facility or Unit Discharges`"){
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+`Total Hospital Discharges`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
else {
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
}
form <- as.formula(paste0(input$out2, "~", var, adjusted_var))
fit <- lm(form, data = dat19)
explanatory <- gsub("`", "", var)
tab <- tab_com(fit, explanatory, input$eligibility2, input$primarycare2, input$comorbidities2, input$prop2, input$tins2)
tab$Comorbidities <- rownames(tab)
if (input$out2=="log_Cost_Care"){
ggplot(tab, aes(x=Comorbidities)) +
geom_bar(stat = "identity",aes(y=`Population Cost Reduction`), fill = "steelblue", width = 0.3) +
geom_point(aes(y=`Individual Cost Reduction`*3000, label = `Individual Cost Reduction`), size = 2) +
geom_text(aes(y=`Individual Cost Reduction`*3000,
label = paste0("Individual Cost: $", `Individual Cost Reduction`)), hjust = 0, vjust=-1)+
scale_y_continuous(
name = "Cost of Care",
sec.axis = sec_axis(~./3000, name="Individual Cost")
)
} else{
ggplot(tab, aes(x=Comorbidities)) +
geom_bar(stat = "identity",aes(y=`Population Cost Efficiency Reduction`), fill = "steelblue", width = 0.3) +
geom_point(aes(y=`Individual Cost Efficiency Reduction`*100, label = `Individual Cost Efficiency Reduction`), size = 2) +
geom_text(aes(y=`Individual Cost Efficiency Reduction`*100, label = paste0("Individual Cost Efficiency: ", `Individual Cost Efficiency Reduction`)), hjust = 0, vjust=-1)+
scale_y_continuous(
name = "Population Cost Efficiency (%)",
sec.axis = sec_axis(~./100, name="Individual Cost Efficiency")
)
}
})
output$tab2 <- function(){
if (input$OR2=="`Primary Care Services with a Primary Care Physician`"){
var = "`Primary Care Services with a Primary Care Physician (%)`"
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+`Total Primary Care Services`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
} else {
var=input$OR2
if (input$OR2=="`Skilled Nursing Facility or Unit Discharges`"){
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+`Total Hospital Discharges`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
else {
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
}
form <- as.formula(paste0(input$out2, "~", var, adjusted_var))
fit <- lm(form, data = dat19)
explanatory <- gsub("`", "", var)
tab <- tab_com(fit, explanatory, input$eligibility2, input$primarycare2, input$comorbidities2, input$prop2, input$tins2)
if (input$out2=="log_Cost_Care"){
tab$`Population Cost Reduction` <- paste0("$",tab$`Population Cost Reduction`)
tab$`Individual Cost Reduction` <- paste0("$",tab$`Individual Cost Reduction`)
}
else {
tab$`Population Cost Efficiency Reduction` = paste0(tab$`Population Cost Efficiency Reduction`, "%")
}
tab %>% kable() %>% kable_styling()
}
tab_elig <- function(fit, var, elig, pc, com, prop, tins){
tab <- data.frame(matrix(NA, nrow = length(elig), ncol = 3))
if (input$out3=="log_Cost_Care"){
colnames(tab) <- c("Population Cost Reduction", "Individual Cost Reduction", "Number of Beneficiaries")
} else {
colnames(tab) <- c("Population Cost Efficiency Reduction", "Individual Cost Efficiency Reduction", "Number of Beneficiaries")
}
elig_name <- vector()
for (i in 1:length(elig)){
work <- dat19 %>% filter(!!as.name(elig[i])==1)
origin <- predict(fit, work)
work[var] = work[var]*(1-prop)
pop <- predict(fit, work)
pred <- data.frame(origin = origin, pop = pop, PrimaryCare = work$PrimaryCare, HCC_Cancer=work$HCC_Cancer,
HCC_CAD = work$HCC_CAD, HCC_Diabetes = work$HCC_Diabetes, HCC_CKD = work$HCC_CKD,
HCC_85 = work$HCC_85, HCC_111 = work$HCC_111, TaxEntityName=work$TaxEntityName)
if (com!="all"){
pred = pred %>% filter(!!as.name(com)==1)
}
if (pc!="all"){
pred = pred %>% filter(PrimaryCare==pc)
}
if (tins!="all"){
pred = pred %>% filter(TaxEntityName==tins)
}
if (input$out3=="log_Cost_Care"){
origin <- sum(exp(pred$origin))
pop <- sum(exp(pred$pop))
popCost = round(origin-pop, digits = 1)
indCost = round((origin-pop)/dim(pred)[1], digits=1)
}
else {
origin <- sum(pred$origin)
pop <- sum(pred$pop)
popCost = round((origin-pop)/origin*100, digits = 1)
indCost = round((origin-pop)/dim(pred)[1], digits=3)
}
tab[i,] <- c(popCost, indCost, dim(pred)[1])
elig_name <- append(elig_name, elig[i])
}
rownames(tab) <- elig_name
tab
}
output$bar <- renderPlot({
if (input$OR3=="`Primary Care Services with a Primary Care Physician`"){
var = "`Primary Care Services with a Primary Care Physician (%)`"
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+`Total Primary Care Services`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
} else {
var=input$OR3
if (input$OR3=="`Skilled Nursing Facility or Unit Discharges`"){
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+`Total Hospital Discharges`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
else {
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
}
form <- as.formula(paste0(input$out3, "~", var, adjusted_var))
fit <- lm(form, data = dat19)
explanatory <- gsub("`", "", var)
tab <- tab_elig(fit, explanatory, input$eligibility3, input$primarycare3, input$comorbidities3, input$prop3, input$tins3)
tab$EligibilityReason <- rownames(tab)
if (input$out3=="log_Cost_Care"){
ggplot(tab, aes(x=EligibilityReason)) +
geom_bar(stat = "identity",aes(y=`Population Cost Reduction`), fill = "steelblue", width = 0.3) +
geom_point(aes(y=`Individual Cost Reduction`*2000, label = `Individual Cost Reduction`), size = 2) +
geom_text(aes(y=`Individual Cost Reduction`*2000,
label = paste0("Individual Cost: $", `Individual Cost Reduction`)), hjust = 0, vjust=-1)+
scale_y_continuous(
name = "Cost of Care",
sec.axis = sec_axis(~./2000, name="Individual Cost")
)
} else{
ggplot(tab, aes(x=EligibilityReason)) +
geom_bar(stat = "identity",aes(y=`Population Cost Efficiency Reduction`), fill = "steelblue", width = 0.3) +
geom_point(aes(y=`Individual Cost Efficiency Reduction`*100, label = `Individual Cost Efficiency Reduction`), size = 2) +
geom_text(aes(y=`Individual Cost Efficiency Reduction`*100, label = paste0("Individual Cost Efficiency: ", `Individual Cost Efficiency Reduction`)), hjust = 0, vjust=-1)+
scale_y_continuous(
name = "Population Cost Efficiency (%)",
sec.axis = sec_axis(~./100, name="Individual Cost Efficiency")
)
}
})
output$tab3 <- function(){
if (input$OR3=="`Primary Care Services with a Primary Care Physician`"){
var = "`Primary Care Services with a Primary Care Physician (%)`"
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+`Total Primary Care Services`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
} else {
var=input$OR3
if (input$OR3=="`Skilled Nursing Facility or Unit Discharges`"){
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+`Total Hospital Discharges`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
else {
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
}
form <- as.formula(paste0(input$out3, "~", var, adjusted_var))
fit <- lm(form, data = dat19)
explanatory <- gsub("`", "", var)
tab <- tab_elig(fit, explanatory, input$eligibility3, input$primarycare3, input$comorbidities3, input$prop3, input$tins3)
if (input$out3=="log_Cost_Care"){
tab$`Population Cost Reduction` <- paste0("$",tab$`Population Cost Reduction`)
tab$`Individual Cost Reduction` <- paste0("$",tab$`Individual Cost Reduction`)
}
else {
tab$`Population Cost Efficiency Reduction` <- paste0(tab$`Population Cost Efficiency Reduction`, "%")
}
tab %>% kable() %>% kable_styling()
}
tab_pc <- function(fit, var, elig, pc, com, prop, tins){
tab <- data.frame(matrix(NA, nrow = length(pc), ncol = 3))
if (input$out4=="log_Cost_Care"){
colnames(tab) <- c("Population Cost Reduction", "Individual Cost Reduction", "Number of Beneficiaries")
} else {
colnames(tab) <- c("Population Cost Efficiency Reduction", "Individual Cost Efficiency Reduction", "Number of Beneficiaries")
}
pc_name <- vector()
for (i in 1:length(pc)){
work <- dat19 %>% filter(PrimaryCare==pc[i])
origin <- predict(fit, work)
work[var] = work[var]*(1-prop)
pop <- predict(fit, work)
pred <- data.frame(origin = origin, pop = pop, ESRD = work$ESRD, Dual=work$Dual, NonDual = work$NonDual,
Disabled = work$Disabled, PrimaryCare = work$PrimaryCare, HCC_Cancer=work$HCC_Cancer,
HCC_CAD = work$HCC_CAD, HCC_Diabetes = work$HCC_Diabetes, HCC_CKD = work$HCC_CKD,
HCC_85 = work$HCC_85, HCC_111 = work$HCC_111, TaxEntityName = work$TaxEntityName)
if (elig!="all"){
pred = pred %>% filter(!!as.name(elig)==1)
}
if (com!="all"){
pred = pred %>% filter(!!as.name(com)==1)
}
if (tins!="all"){
pred = pred %>% filter(TaxEntityName==tins)
}
if (input$out4=="log_Cost_Care"){
origin <- sum(exp(pred$origin))
pop <- sum(exp(pred$pop))
popCost = round(origin-pop, digits = 1)
indCost = round((origin-pop)/dim(pred)[1], digits=1)
}
else {
origin <- sum(pred$origin)
pop <- sum(pred$pop)
popCost = round((origin-pop)/origin*100, digits = 1)
indCost = round((origin-pop)/dim(pred)[1], digits=3)
}
tab[i,] <- c(popCost, indCost, dim(pred)[1])
pc_name <- append(pc_name, pc[i])
}
rownames(tab) <- pc_name
tab
}
output$bar2 <- renderPlot({
if (input$OR4=="`Primary Care Services with a Primary Care Physician`"){
var = "`Primary Care Services with a Primary Care Physician (%)`"
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+`Total Primary Care Services`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
} else {
var=input$OR4
if (input$OR4=="`Skilled Nursing Facility or Unit Discharges`"){
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+`Total Hospital Discharges`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
else {
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
}
form <- as.formula(paste0(input$out4, "~", var, adjusted_var))
fit <- lm(form, data = dat19)
explanatory <- gsub("`", "", var)
tab <- tab_pc(fit, explanatory, input$eligibility4, input$primarycare4, input$comorbidities4, input$prop4, input$tins4)
tab$PrimaryPractice <- rownames(tab)
if (input$out4=="log_Cost_Care"){
ggplot(tab, aes(x=PrimaryPractice)) +
geom_bar(stat = "identity",aes(y=`Population Cost Reduction`), fill = "steelblue", width = 0.3) +
geom_point(aes(y=`Individual Cost Reduction`*2000, label = `Individual Cost Reduction`), size = 2) +
geom_text(aes(y=`Individual Cost Reduction`*2000, label = paste0("Individual Cost: $", `Individual Cost Reduction`)), hjust = 0, vjust=-1)+
scale_y_continuous(
name = "Population Cost",
sec.axis = sec_axis(~./2000, name="Individual Cost")
)
} else {
ggplot(tab, aes(x=PrimaryPractice)) +
geom_bar(stat = "identity",aes(y=`Population Cost Efficiency Reduction`), fill = "steelblue", width = 0.3) +
geom_point(aes(y=`Individual Cost Efficiency Reduction`*100, label = `Individual Cost Efficiency Reduction`), size = 2) +
geom_text(aes(y=`Individual Cost Efficiency Reduction`*100, label = paste0("Individual Cost Efficiency: ", `Individual Cost Efficiency Reduction`)), hjust = 0, vjust=-1)+
scale_y_continuous(
name = "Population Cost Efficiency (%)",
sec.axis = sec_axis(~./100, name="Individual Cost Efficiency")
)
}
})
output$tab4 <- function(){
if (input$OR4=="`Primary Care Services with a Primary Care Physician`"){
var = "`Primary Care Services with a Primary Care Physician (%)`"
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+`Total Primary Care Services`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
} else {
var=input$OR4
if (input$OR4=="`Skilled Nursing Facility or Unit Discharges`"){
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+`Total Hospital Discharges`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
else {
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
}
form <- as.formula(paste0(input$out4, "~", var, adjusted_var))
fit <- lm(form, data = dat19)
explanatory <- gsub("`", "", var)
tab <- tab_pc(fit, explanatory, input$eligibility4, input$primarycare4, input$comorbidities4, input$prop4, input$tins4)
if (input$out4=="log_Cost_Care"){
tab$`Population Cost Reduction` <- paste0("$",tab$`Population Cost Reduction`)
tab$`Individual Cost Reduction` <- paste0("$",tab$`Individual Cost Reduction`)
}
else {
tab$`Population Cost Efficiency Reduction` <- paste0(tab$`Population Cost Efficiency Reduction`, "%")
}
tab %>% kable() %>% kable_styling()
}
tab_tins <- function(fit, var, elig, tins, com, prop,pc){
tab <- data.frame(matrix(NA, nrow = length(tins), ncol = 3))
if (input$out5=="log_Cost_Care"){
colnames(tab) <- c("Population Cost Reduction", "Individual Cost Reduction", "Number of Beneficiaries")
} else {
colnames(tab) <- c("Population Cost Efficiency Reduction", "Individual Cost Efficiency Reduction", "Number of Beneficiaries")
}
tins_name <- vector()
for (i in seq_along(tins)){
work <- dat19 %>% filter(TaxEntityName==tins[i])
origin <- predict(fit, work)
work[var] = work[var]*(1-prop)
pop <- predict(fit, work)
pred <- data.frame(origin = origin, pop = pop, ESRD = work$ESRD, Dual=work$Dual, NonDual = work$NonDual,
Disabled = work$Disabled, PrimaryCare = work$PrimaryCare, HCC_Cancer=work$HCC_Cancer,
HCC_CAD = work$HCC_CAD, HCC_Diabetes = work$HCC_Diabetes, HCC_CKD = work$HCC_CKD,
HCC_85 = work$HCC_85, HCC_111 = work$HCC_111)
if (elig!="all"){
pred = pred %>% filter(!!as.name(elig)==1)
}
if (com!="all"){
pred = pred %>% filter(!!as.name(com)==1)
}
if (pc!="all"){
pred = pred %>% filter(PrimaryCare==pc)
}
if (input$out5=="log_Cost_Care"){
origin <- sum(exp(pred$origin))
pop <- sum(exp(pred$pop))
popCost = round(origin-pop, digits = 1)
indCost = round((origin-pop)/dim(pred)[1], digits=1)
}
else {
origin <- sum(pred$origin)
pop <- sum(pred$pop)
popCost = round((origin-pop)/origin*100, digits = 1)
indCost = round((origin-pop)/dim(pred)[1], digits=3)
}
tab[i,] <- c(popCost, indCost, dim(pred)[1])
tins_name <- append(tins_name, tins[i])
}
rownames(tab) <- tins_name
tab
}
output$bar3 <- renderPlot({
if (input$OR5=="`Primary Care Services with a Primary Care Physician`"){
var = "`Primary Care Services with a Primary Care Physician (%)`"
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+`Total Primary Care Services`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
} else {
var=input$OR5
if (input$OR5=="`Skilled Nursing Facility or Unit Discharges`"){
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+`Total Hospital Discharges`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
else {
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
}
form <- as.formula(paste0(input$out5, "~", var, adjusted_var))
fit <- lm(form, data = dat19)
explanatory <- gsub("`", "", var)
tab <- tab_tins(fit, explanatory, input$eligibility5, input$tins5, input$comorbidities5, input$prop5, input$primarycare5)
tab$tins <- rownames(tab)
if (input$out5=="log_Cost_Care"){
ggplot(tab, aes(x=tins)) +
geom_bar(stat = "identity",aes(y=`Population Cost Reduction`), fill = "steelblue", width = 0.3) +
geom_point(aes(y=`Individual Cost Reduction`*2000, label = `Individual Cost Reduction`), size = 2) +
geom_text(aes(y=`Individual Cost Reduction`*2000, label = paste0("Individual Cost: $", `Individual Cost Reduction`)), hjust = 0, vjust=-1)+
xlab("Primary Practice TINs")+
scale_y_continuous(
name = "Population Cost",
sec.axis = sec_axis(~./2000, name="Individual Cost")
)
} else {
ggplot(tab, aes(x=tins)) +
geom_bar(stat = "identity",aes(y=`Population Cost Efficiency Reduction`), fill = "steelblue", width = 0.3) +
geom_point(aes(y=`Individual Cost Efficiency Reduction`*100, label = `Individual Cost Efficiency Reduction`), size = 2) +
geom_text(aes(y=`Individual Cost Efficiency Reduction`*100, label = paste0("Individual Cost Efficiency: ", `Individual Cost Efficiency Reduction`)), hjust = 0, vjust=-1)+
scale_y_continuous(
name = "Population Cost Efficiency (%)",
sec.axis = sec_axis(~./100, name="Individual Cost Efficiency")
)
}
})
output$tab5 <- function(){
if (input$OR5=="`Primary Care Services with a Primary Care Physician`"){
var = "`Primary Care Services with a Primary Care Physician (%)`"
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+`Total Primary Care Services`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
} else {
var=input$OR5
if (input$OR5=="`Skilled Nursing Facility or Unit Discharges`"){
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+`Total Hospital Discharges`+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
else {
adjusted_var = "+Sex+Age+Dual+ESRD+Disabled+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+OutNetwork+HCC_CKD+PrimaryCare"
}
}
form <- as.formula(paste0(input$out5, "~", var, adjusted_var))
fit <- lm(form, data = dat19)
explanatory <- gsub("`", "", var)
tab <- tab_tins(fit, explanatory, input$eligibility5, input$tins5, input$comorbidities5, input$prop5, input$primarycare5)
if (input$out5=="log_Cost_Care"){
tab$`Population Cost Reduction` <- paste0("$",tab$`Population Cost Reduction`)
tab$`Individual Cost Reduction` <- paste0("$",tab$`Individual Cost Reduction`)
}
else {
tab$`Population Cost Efficiency Reduction` <- paste0(tab$`Population Cost Efficiency Reduction`, "%")
}
tab %>% kable() %>% kable_styling()
}
drills <- reactiveValues(
category = NULL
)
categories <- unique(TINs$TaxEntityName)
#current_category <- reactiveVal()
tin_dat <- reactive({
#if (!length(current_category())) {
if (!length(drills$category)) {
return(count(dat19, TaxEntityName))
}
dat19 %>%
#filter(TaxEntityName == current_category()) %>%
filter(TaxEntityName == drills$category) %>%
count(EligibilityReason)
})
output$pie <- renderPlotly({
#work <- count(dat19, TaxEntityName)
work <- setNames(tin_dat(), c("x", "y"))
plot_ly(work) %>%
add_pie(
labels = ~x,
values = ~y,
customdata = ~x
)
})
tin_filter <- reactive({
#if (!length(current_category())) {
if (!length(drills$category)) {
return(dat19)
}
dat19 %>% filter(TaxEntityName == drills$category)
})
output$heatmap2 <- renderPlotly({
d <- tin_filter()
out <- c("log_Cost_Care","log_Cost_Eff")
var <- c("`Total Hospital Discharges`", "UnplannedAdmits", "Readmits", "`ED Visits`",
"`Computed Tomography (CT) Events`", "`Magnetic Resonance Imaging (MRI) Events`",
"`Days in Hospice`", "`Total Primary Care Services`",
"`Primary Care Services with a Primary Care Physician (%)`+`Total Primary Care Services`",
"`Skilled Nursing Facility or Unit Discharges`+`Total Hospital Discharges`")
#check the categories of primarycare
if (dim(distinct(d, PrimaryCare))[1]<=1) adj = "+Sex+Age+Dual+Disabled+ESRD+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+HCC_CKD+OutNetwork"
else adj="+Sex+Age+Dual+Disabled+ESRD+Cont_Att+Death+HCC_Cancer+HCC_Diabetes+HCC_CAD+HCC_85+HCC_111+HCC_CKD+OutNetwork+PrimaryCare"
coef_vector <- vector()
conf_vector <- vector()
for (j in 1:length(out)){
for (i in 1:length(var)){
form <- as.formula(
paste0(out[j],"~",var[i],adj)
)
fit <- lm(form, data = d)
coef_vector <- append(coef_vector,fit$coefficients[2])
conf_vector <- append(conf_vector,ifelse(confint(fit)[2,1]*confint(fit)[2,2]<0,0,1))
}
}
coef_vector <- round(replace_na(coef_vector,0),3)
ann <- list()
j=0
for (i in 1:length(conf_vector)){
if (conf_vector[i]==0){
j=j+1
xaxis = ifelse(i<=10,"Log(Cost of Care)","Log(Cost Efficiency)")
yaxis = gsub("Admits"," Admissions",gsub("admits","admissions",gsub("`","",names(coef_vector[i]))))
ann[[j]] <- list(x=xaxis,y=yaxis, text="Uninterpretable",xref="x",yref="y",showarrow=FALSE)
}
}
mat = t(matrix(coef_vector, nrow = 2, ncol = 10))
conditions.text <- as.vector(outer(paste0("Hospital Utilizations:",gsub("Admits"," Admissions",gsub("admits","admissions",gsub("`","",names(coef_vector[1:10])))),"\n"), paste0("Outcome variable:",c("Log(Cost Care)","Log(Cost Efficiency)"),"\n"), paste0))
conditions.text <- paste0(conditions.text,"Magnitude of Association:",coef_vector)
text.mat <- matrix(conditions.text, nrow= nrow(mat), ncol = ncol(mat))
colorlength <- 100
null_value <- (0 - min(coef_vector)) / (max(coef_vector) - min(coef_vector))
border <- as.integer(null_value * colorlength)
colorscale <- as.list(1:colorlength)
#colorscale below zero
s <- scales::seq_gradient_pal("blue", "white", "Lab")(seq(0,1,length.out=border))
for (i in 1:border) {
colorscale[[i]] <- c((i - 1) / colorlength, s[i])
}
#colorscale above zero
s <- scales::seq_gradient_pal("white", "red", "Lab")(seq(0,1,length.out=colorlength - border))
for (i in 1:(colorlength - border)) {
colorscale[[i + border]] <- c((i + border) / colorlength, s[i])
}
plot_ly(source = "heat_plot") %>%
add_heatmap(
x=c("Log(Cost of Care)","Log(Cost Efficiency)"),
y=gsub("Admits"," Admissions",gsub("admits","admissions",gsub("`","",names(coef_vector[1:10])))),
z=matrix(coef_vector,nrow=10,ncol = 2),
hoverinfo = 'text',
text = text.mat,
colors = colorRamp(c("blue","white","red")),
colorscale = colorscale,
colorbar = list(len = 1, limits = c(-1,1)),
wdith = 500,
height=1000
)%>%
layout(margin=list(l=350,r=20, b=10, t=10), annotations=ann)
})
observeEvent(event_data("plotly_click"), {
x <- event_data("plotly_click")$customdata[[1]]
if (!length(x)) return()
drills$category <- x
})
output$history <- renderUI({
if (!length(drills$category))
return("Click the pie chart to drilldown")
categoryInput <- selectInput(
"category", "TIN",
choices = categories, selected = drills$category
)
sd <- filter(dat19, TaxEntityName %in% drills$category)
fluidRow(
column(3, categoryInput)
)
})
observeEvent(input$category, {
drills$category <- input$category
})
#observe({
# cd <- event_data("plotly_click")$customdata[[1]]
# if (isTRUE(cd %in% categories)) current_category(cd)
#})
output$back <- renderUI({
if (length(drills$category))
actionButton("clear", "Back", icon("chevron-left"))
})
# clear the chosen category on back button press
observeEvent(input$clear, {drills$category<-NULL})
session$onSessionEnded(function() {
stopApp()
})
})
|
1365367551ea64a082d636e2f017d4d68efa7472
|
43f4b374161525ed09d297e812b4de66fb8b06dd
|
/species_resolution_per_db.R
|
2a5af96109698c121c876e1cff4c39d1ff0ab12a
|
[] |
no_license
|
RJEGR/metagenomics
|
4760f7d07e8ae38fad739cce97e803ab81c7601f
|
b2070fa35b2fa1af3d387734ebc1ec919b5be737
|
refs/heads/master
| 2021-12-14T15:31:54.338906
| 2021-12-02T06:00:50
| 2021-12-02T06:00:50
| 134,743,404
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,596
|
r
|
species_resolution_per_db.R
|
#!/usr/bin/env Rscript
rm(list=ls());
# Load libraries ----
.cran_packages <- c("stringr", "reshape2", "tidyverse")
# .cran_packages <- c("stringr", "tidyverse", "reshape2", "ggplot2")
# 1. cran_package
.inst <- .cran_packages %in% installed.packages()
if(any(!.inst)) {
install.packages(.cran_packages[!.inst], dep=TRUE, repos='http://cran.us.r-project.org')
}
# Load packages into session, and print package version
sapply(c(.cran_packages), require, character.only = TRUE)
args = commandArgs(trailingOnly=TRUE)
# wangtax = args[1]
# tag <- strsplit(wangtax, "[.]")[[1]][1]
# wang tax from:
path = '/Users/cigom/metagenomics/COI/species_resolution_per_db'
bold.file = "run014_t2_ASVs.BOLD_public_species_v02.wang.taxonomy"
midori.file = "run014_t2_ASVs.midori_unique_DB_0.wang.taxonomy"
coarbitrator.file = "run014_t2_ASVs.Coarbitrator_COI_nuc_curated.wang.taxonomy"
# To load file: wang.taxonomy
source(file = "~/Documents/GitHub/metagenomics/converted2worms/cigom_load_dada2_tables.R")
setwd(path)
## Load assignation result ----
bold <- read.wangtax(bold.file)
midori <- read.wangtax(midori.file)
coarbitrator <- read.wangtax(coarbitrator.file)
TL <- names(midori)[-c(1,ncol(midori))]
if (identical(names(bold), names(midori) )) { TL <- names(bold)[-c(1,ncol(bold))]
}
# Contamos el numero de indeterminados por base de datos a nivel filo ----
n_undetermined <- function(x) ( nrow(x[x[,3] == 'Undetermined',]) )
print(n_undetermined_bold <- n_undetermined(bold))
print(n_undetermined_midori <- n_undetermined(midori))
print(n_undetermined_coarbitrator <- n_undetermined(coarbitrator))
# Back the SL position per database ----
coarbitrator_ <- NULL
for (i in 1:nrow(coarbitrator)) {
rl <- coarbitrator$SL[i] + 1
coarbitrator_[[i]] <- list(rank=names(coarbitrator)[rl], linage=coarbitrator[i,rl]) }
coarbitrator_ <- do.call(rbind, coarbitrator_)
midori_ <- NULL
for (i in 1:nrow(midori)) {
rl <- midori$SL[i] + 1
midori_[[i]] <- list(rank=names(midori)[rl], linage=midori[i,rl]) }
midori_ <- do.call(rbind, midori_)
#
bold_ <-NULL
for (i in 1:nrow(bold)) {
rl <- bold$SL[i] + 1
bold_[[i]] <- list(rank=names(bold)[rl], linage=bold[i,rl]) }
bold_ <- do.call(rbind, bold_)
# compare the last parent between db ----
names(midori)[-1] <- paste("midori", names(midori)[-1], sep="_")
names(bold)[-1] <- paste("bold", names(bold)[-1], sep="_")
names(coarbitrator)[-1] <- paste("coarbitrator", names(coarbitrator)[-1], sep="_")
# bold vs midori ----
LCR <- data.frame(midori, bold[,-1], coarbitrator[,-1], diff_x_y = midori[,9] - bold[,9], diff_x_z = coarbitrator[,9] - bold[,9], stringsAsFactors = FALSE)
# aniadir una columna, rango 1
LCR$bold_vs_midori <- 1
# recorrer hacia el 7 en loop
for(t in 1:7){
# Generar nombre de la columna con taxonomia por morfologia
midori_rank <- paste("midori", TL[t], sep="_")
# Generar nombre de la columna con taxonomica por molecular
bold_rank <- paste("bold", TL[t], sep="_")
# Aquellos renglones donde coincidan los nombres tienen LCR = t
LCR$bold_vs_midori[ which(LCR[, midori_rank] == LCR[, bold_rank]) ] <- t
}
# bold vs coarbitrator ----
LCR$bold_vs_coarbitrator <- 1
# recorrer hacia el 7 en loop
for(t in 1:7){
# Generar nombre de la columna con taxonomia por morfologia
coarbitrator_rank <- paste("coarbitrator", TL[t], sep="_")
# Generar nombre de la columna con taxonomica por molecular
bold_rank <- paste("bold", TL[t], sep="_")
# Aquellos renglones donde coincidan los nombres tienen LCR = t
LCR$bold_vs_coarbitrator[ which(LCR[, midori_rank] == LCR[, bold_rank]) ] <- t
}
# getback the Last Common Rank
bold_LCR <-NULL
for (i in 1:nrow(bold)) {
rl <- LCR[,'bold_SL'][i] + 1
bold_LCR[[i]] <- list(rank=names(bold)[rl], linage=bold[i,rl]) }
bold_LCR <- do.call(rbind, bold_LCR)
bold_LCR <- data.frame(rank = do.call(rbind, bold_LCR[,1]), lineage = do.call(rbind, bold_LCR[,2]))
bold_LCR$rank <- sapply(strsplit(as.vector(bold_LCR$rank), "_"), "[[", 2)
# ES NECESARIO VERIFICAR ESTA PARTE,
midori_LCR <-NULL
for (i in 1:nrow(midori)) {
rl <- LCR[,'midori_SL'][i] + 1
midori_LCR[[i]] <- list(rank=names(midori)[rl], linage=midori[i,rl]) }
midori_LCR <- do.call(rbind, midori_LCR)
midori_LCR <- data.frame(rank = do.call(rbind, midori_LCR[,1]), lineage = do.call(rbind, midori_LCR[,2]))
midori_LCR$rank <- sapply(strsplit(as.vector(midori_LCR$rank), "_"), "[[", 2)
coarbitrator_LCR <-NULL
for (i in 1:nrow(coarbitrator)) {
rl <- LCR[,'bold_vs_coarbitrator'][i] + 1
coarbitrator_LCR[[i]] <- list(rank=names(coarbitrator)[rl], linage=coarbitrator[i,rl]) }
coarbitrator_LCR <- do.call(rbind, coarbitrator_LCR)
coarbitrator_LCR <- data.frame(rank = do.call(rbind, coarbitrator_LCR[,1]), lineage = do.call(rbind, coarbitrator_LCR[,2]))
coarbitrator_LCR$rank <- sapply(strsplit(as.vector(coarbitrator_LCR$rank), "_"), "[[", 2)
# and save data imput
data <- as_tibble(data.frame( ASV = midori$ASV,
#midori = do.call(rbind, midori_[,1]),
#bold = do.call(rbind, bold_[,1]),
lineage_x = do.call(rbind, bold_[,2]),
lineage_y = do.call(rbind, midori_[,2]),
lineage_z = do.call(rbind, coarbitrator_[,2]),
# Anadimos el valor de cambio en las asignaciones:
rank_x = bold[,9],
rank_y = midori[,9],
rank_z = coarbitrator[,9],
diff_x_y = bold[,9] - midori[,9],
diff_x_z = bold[,9] - coarbitrator[,9],
LCR_x_y = LCR$bold_vs_midori,
LCR_x_z = LCR$bold_vs_coarbitrator,
LCR_x = bold_LCR$lineage,
LCR_y = midori_LCR$lineage,
LCR_z = coarbitrator_LCR$lineage,
stringsAsFactors = FALSE)
) # NOT ABS VALUE IN DIFF
data$diff_x_y_z <- data$diff_x_y - data$diff_x_z
# Calculate distance:
# Computes pairwise string distances between elements
library('stringdist')
# We use is the Levenshtein Distance which is simply the
# single character edit distance between two strings by
# a combination of insertions, deletions, or substitutions
dist_x_y <- stringdist(data$lineage_x, data$lineage_y, method = 'lv') # inf values reported
dist_x_z <- stringdist(data$lineage_x, data$lineage_z, method = 'lv') # es necesario arreglar la nomenclatura de coarbitrator
# esto cambia
filter(dist_x_y, dist > 0) %>%
select(midori, bold, diff) %>%
melt(id.vars = "diff", variable.name = "DB", value.name = "Rank") %>%
as_tibble() %>% # mutate(diff = abs(diff))
mutate(Rank = factor(Rank, levels = TL[-1]) ) -> y
# ahora comprobamos que los niveles de cambio zero sean consistentes en su asignacion:
disvis <- dist[dist$diff == 0 & dist$dist > 0,]
disvis <- dist[ dist$dist > 0,]
filter(dist, dist > 0) %>%
select(LCR, LCR_x, LCR_y, dist) %>%
melt(id.vars = c("LCR", "dist"), variable.name = "LCR_xy", value.name = "lineage") %>%
as_tibble() -> dist_xy
p <- ggplot(dist_xy, aes(x=dist, fill=lineage)) +
#geom_histogram(position="dodge2", bins = 40) +
geom_density(alpha = 0.6) +
facet_wrap (~ lineage , scales = 'free_y') +
theme(legend.position="top")
p
p <- ggplot(dist_xy, aes(x=dist)) +
geom_density(alpha = 0.6) + facet_wrap (~ LCR_xy, scales = 'free_y')
plot(density(dist$dist), main = 'Comparing DB assignations by String Distance [LCR]')
lines(stats::lowess(dist$dist))
d <- stringdistmatrix(disvis$LCR_x, disvis$LCR_y, useNames="none", method = 'lv')
library(superheat)
superheat(d, row.dendrogram = TRUE, col.dendrogram = TRUE)
quit(save = 'no')
data$weight <- 'None'
data[data$diff > 0 , 'weight'] <- 'midori'
data[data$diff < 0 , 'weight'] <- 'bold'
filter(data, weight != 'None') %>%
select(midori, bold, diff) %>%
melt(id.vars = "diff", variable.name = "DB", value.name = "Rank") %>%
as_tibble() %>% # mutate(diff = abs(diff))
mutate(Rank = factor(Rank, levels = TL[-1]) ) -> y
# peces entre las bases de datos (bold)
ntaxa <- function(r) {
r <- data.frame(table(r))
r <- r[order(r$Freq, decreasing = TRUE), ]
# r <- r[order(r$r, decreasing = FALSE), ]
return(r) }
ntaxa(bold[,4]) # Actinopterygii, 691
ntaxa(midori[,4]) # Actinopteri, 582
ntaxa(coarbitrator[,4]) # Actinopteri, 388
# arreglar la taxonomia primero
acti_bold <- midori[bold[,4] == 'Actinopterygii', ] # 13056
acti_midori <- midori[midori[,4] == 'Actinopteri', ] # 13014
acti_coarbitrator <- coarbitrator[coarbitrator[,4] == 'Actinopteri', ] # 13023
# Barplot
# use SL_diff or SL_tidy
# Midori and bold taxonomy resolution
# Won (+) and lost (−) levels (shoul)
SL <- select(data, ASV, diff_x_y, diff_x_z)
SL$weight_x_y <- 'shared_x_y'
SL[SL$diff_x_y > 0 , 'weight_x_y'] <- 'midori'
SL[SL$diff_x_y < 0 , 'weight_x_y'] <- 'bold'
SL$weight_x_z <- 'shared_x_z'
SL[SL$diff_x_z > 0 , 'weight_x_z'] <- 'coarbitrator'
SL[SL$diff_x_z < 0 , 'weight_x_z'] <- 'bold'
SL[,'diff_x_y'] <- abs(SL[,'diff_x_y'])
SL[,'diff_x_z'] <- abs(SL[,'diff_x_z'])
table(SL$weight_x_z)
SL_melt <- melt(SL,
id.vars= c("ASV","diff_x_y", "diff_x_z"),
value.name = "DB",
variable.name = 'weight')
SL_melt <- melt(SL_melt)
SL_diff <- aggregate(SL_melt[,'value'], by=list(SL_melt[,'DB']), FUN = table)
SL_diff <- data.frame(level = SL_diff[,1], SL_diff[,2])
SL_diff <- melt(SL_diff)
# barplot
ggplot(SL_melt, aes(value, variable, fill = diff)) +
geom_col(width = 0.4, position = position_dodge(), alpha = 0.7) +
geom_text(aes(label=ASVs), size=3, vjust=-0.2) +
scale_fill_brewer(palette = "Set1") #+
#labs(x="Levels changed", y="ASVs (n)",
title="Midori - Bold taxonomy Difference",
subtitle=paste0("Midori and Bold levels\nTotal ASVs = ", nrow(midori), "\n",
"ASVs without levels change across data bases: ",no_change),
caption=paste0("Number of ASVs with levels change across databases: ","\n",
"Midori: ", midori_lc, " and Bold: ", bold_lc)) +
theme()
# ALLUVIAL
#filter(data, weight != 'shared') %>%
data %>%
select(rank_x, rank_y, rank_z) %>%
mutate(diff = abs(diff), weight = as.factor(weight),
midori = factor(midori, levels = TL[-1]),
bold = factor(bold, levels = TL[-1])) -> alluvia
# library('ggalluvial')
ggplot(data = alluvia,
aes(axis1 = midori, axis2 = bold)) +
geom_alluvium(aes(fill = weight)) +
geom_stratum() +
scale_fill_brewer(type = "qual", palette = "Set2") +
geom_text(stat = "stratum", label.strata = TRUE) +
# theme(legend.position = "bottom") +
theme_minimal()
library(alluvial)
alluvial(select(data, lineage_x, lineage_y, lineage_z), freq=data$diff_x_y_z,
col = ifelse(data$diff_x_y_z > 0, "orange", "grey"),
border = ifelse(data$diff_x_y_z > 0, "orange", "grey"),
hide = data$diff_x_y_z == 0,
cex = 0.7)
head(as.data.frame(Titanic, stringsAsFactors = FALSE))
#
# Parallel Coordinates
library(GGally)
paralle <- select(data, rank_x, rank_y, rank_z, diff_x_y, diff_x_z)
# sampleTree = hclust(dist(paralle[,1:3]), method = "average");
# plot(sampleTree, main = "", sub="", xlab="", cex.lab = 1.5, cex.axis = 1.5, cex.main = 2)
cbind(data.frame(table(paralle$diff_x_y)), data.frame(table(paralle$diff_x_z)))
paralle$weight_x_y <- 'shared_x_y'
paralle[paralle$diff_x_y > 0 , 'weight_x_y'] <- 'midori'
paralle[paralle$diff_x_y < 0 , 'weight_x_y'] <- 'bold'
paralle$weight_x_z <- 'shared_x_z'
paralle[paralle$diff_x_z > 0 , 'weight_x_z'] <- 'coarbitrator'
paralle[paralle$diff_x_z < 0 , 'weight_x_z'] <- 'bold'
ggparcoord(data, columns = 4:8, groupColumn = "sex")
ggparcoord(paralle, columns = 1:3, groupColumn = "weight_x_y")
# Lineplot. Distribucion de las asignaciones entre bases de datos
L_tables <- data.frame(Level=-5:5,
x_y=data.frame(table(data$diff_x_y))[,'Freq'],
x_z=data.frame(table(data$diff_x_z))[, 'Freq'])
L_tidy <- melt(L_tables, id.vars = 'Level',
variable.name = 'diff',
value.name = 'n')
# wrap by standar (BOLD) and reference (midori and GBank)
L_tidy$Ref <- 'Standard(BOLD)'
L_tidy[L_tidy$Level > 0, 'Ref'] <- 'Reference'
#
L_tidy$database <- ''
L_tidy[L_tidy$diff == 'x_y', 'database'] <- 'Midori'
L_tidy[L_tidy$diff == 'x_z', 'database'] <- 'GBank'
L_tidy$Level <- abs(L_tidy$Level)
L_tidy$Level <- factor(L_tidy$Level, levels = 0:5, ordered = T)
ggplot(subset(L_tidy, Level > 0), aes(x=Level, y=n, group=database, fill=database, color=database)) +
geom_point(size=2, alpha=0.6) + geom_line(size=1, alpha=0.6, linetype="dashed") +
geom_text(data = subset(L_tidy, Level > 0), aes(label=n), size=4, vjust=1, color = 'black') +
# geom_text(aes(label=diff), size=3, vjust=-0.2, color = 'black') +
scale_color_brewer(palette = "Set1") +
#theme(panel.grid = element_blank(),
# panel.background = element_blank()) +
labs(y="ASVs", title="Taxonomy resolution Distribution",
subtitle=paste0("Total ASVs in (run14) = ", nrow(midori), "\n",
"Undetermined Bold = ", n_undetermined_bold, "\n",
"Undetermined Midori = ", n_undetermined_midori, "\n",
"Undetermined GBank = ", n_undetermined_coarbitrator),
caption = 'Reference panel show how best the database was against the standard \nwhereas the Standard(BOLD) panel shows how best the BOLD db was against either, Midori or GBank db') +
facet_grid(~Ref)
|
265b9e56712c7b630f72db1d1575b3a5cb84df36
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/NRejections/examples/sim_data.Rd.R
|
a9b5f8a437eaae6fdd7484d46e6c65367fb015f7
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 250
|
r
|
sim_data.Rd.R
|
library(NRejections)
### Name: sim_data
### Title: Simulate MVN data
### Aliases: sim_data
### ** Examples
cor = make_corr_mat( nX = 5,
nY = 2,
rho.XX = -0.06,
rho.YY = 0.1,
rho.XY = -0.1,
prop.corr = 8/40 )
d = sim_data( n = 50, cor = cor )
|
558f1e3d1bdab98da17a21341fb9bc204ac58aa9
|
e231e5d9093ea588634d710a8b24e7c0e4dbf820
|
/Grenades.r
|
414fe026e404b29bc5743fe68dfcde3edbf9bdd8
|
[] |
no_license
|
dannyweiland/CSGO-Analysis
|
938a9e739e72c24a6d0dcc8c41c023868922df89
|
5992cd6a8f02a8a76118e84bc46a16a863cad918
|
refs/heads/master
| 2023-02-22T14:40:36.888008
| 2021-01-26T20:29:39
| 2021-01-26T20:29:39
| 292,067,050
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,694
|
r
|
Grenades.r
|
#Mapping Grenade Spots in CSGO
#My Goal here is to see where the most common spots to throw and be hit by grenades are by visualizing them on the game map.
#Import Libraries
library(readxl)
library(dplyr)
library(ggplot2)
library(ggpubr)
library(png)
#Import Data
grenades1 <- read.csv("mm_grenades_demos.csv")
de_dust2_map <- png::readPNG("de_dust2.png")
de_nuke_map <- png::readPNG("de_new_nuke.png")
de_cache_map <- png::readPNG("de_cache.png")
de_cache_map_test <- png::readPNG("de_cache_test.png")
#Plotting on map -----
#Dust2 Attacker Plot
dust2 <- grenades1 %>%
filter(map == "de_dust2") %>%
ggplot(., aes(att_pos_x, att_pos_y)) +
background_image(de_dust2_map)+
geom_jitter(alpha = 0.1, color = "red")
dust2
#Nuke Attacker Plot
nuke <- grenades1 %>%
filter(map == "de_nuke") %>%
ggplot(., aes(att_pos_x, att_pos_y)) +
background_image(de_nuke_map)+
geom_jitter(alpha = 0.1, color = "red")
#This one needs to be more properly scaled. Lets figure that out
#We can solve this by finding the min and max values for each player position.
mapScale <- grenades1 %>%
select(map, att_pos_x, att_pos_y) %>%
group_by(map) %>%
summarise(
max_att_pos_x = max(att_pos_x),
max_att_pos_y = max(att_pos_y),
min_att_pos_x = min(att_pos_x),
min_att_pos_y = min(att_pos_y)
) %>%
arrange(map)
#Increase the value to essentially "shrink" the size of the plot relative to the image
#Cache Attacker Plot
cache <- grenades1 %>%
filter(map == "de_cache") %>%
ggplot(., aes(att_pos_x, att_pos_y)) +
background_image(de_cache_map_test)+
geom_jitter(color = "red")+
cache
#When completed, re-add alpha = .1 next to color to show density
|
a9038fdaf39f4029a232ad4bdb58d00776602380
|
92e597e4ffc9b52cfb6b512734fb10c255543d26
|
/tests/testthat/test-function-hsSubstSpecChars.R
|
98e9958c40a98c17a2433a6211e86a84b0defa3a
|
[
"MIT"
] |
permissive
|
KWB-R/kwb.utils
|
3b978dba2a86a01d3c11fee1fbcb965dd15a710d
|
0930eaeb9303cd9359892c1403226a73060eed5b
|
refs/heads/master
| 2023-05-12T15:26:14.529039
| 2023-04-21T04:28:29
| 2023-04-21T04:28:29
| 60,531,844
| 9
| 1
|
MIT
| 2023-04-21T04:28:30
| 2016-06-06T13:52:43
|
R
|
UTF-8
|
R
| false
| false
| 132
|
r
|
test-function-hsSubstSpecChars.R
|
test_that("hsSubstSpecChars() works", {
f <- kwb.utils:::hsSubstSpecChars
expect_error(expect_warning(f(), "deprecated"))
})
|
298096b9b3f206ad01724115438057f37fe634bd
|
9fbd7cafab56b8cb58ca7385a726a0070d9e050d
|
/man/sen_nominal_votes.Rd
|
d29d1a9787536678eaebada87c743ba40004e737
|
[] |
no_license
|
duarteguilherme/congressbr
|
6f343935a7734dfac70c6794a031db614dafd248
|
e9f05428e877f56d31966b14ca00b4ec825fabf5
|
refs/heads/master
| 2022-11-22T05:40:27.177434
| 2020-07-14T23:20:22
| 2020-07-14T23:20:22
| 83,827,931
| 17
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,213
|
rd
|
sen_nominal_votes.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sen_nominal_votes.R
\docType{data}
\name{sen_nominal_votes}
\alias{sen_nominal_votes}
\title{Nominal votes in the Brazilian Federal Senate}
\format{
A data frame with 60691 rows and 8 variables
}
\usage{
sen_nominal_votes
}
\value{
\itemize{
\item{\code{vote_date: }}{\code{POSIXct}, date the vote took place.}
\item{\code{bill_id: }}{id of the bill in the Senate API database.}
\item{\code{bill: }}{bill type, year and number.}
\item{\code{legislature: }}{legislature number.}
\item{\code{senator_id: }}{unique id of the senator.}
\item{\code{senator_name: }}{the senator's name.}
\item{\code{senator_vote: }}\code{numeric}{vote cast. 1 = "yes"; 0 = "no", NA = other.}
\item{\code{senator_party: }}{political party the senator was in when the vote took place.}
\item{\code{senator_state: }}{state the senator represented when the vote took place.}
}
}
\description{
This is a dataset of the nominal votes in the Brazilian
Federal Senate, from all those available on the API from 1991 onwards.
}
\note{
These data can easily be grouped by legislature if so desired, using the
\code{legislature} variable.
}
\keyword{datasets}
|
55b3f78be7c1825e0ac62ba5076b4f6d5a237e08
|
958894bf1eb0f125b34afccd4c760f941ceb4e99
|
/clustering.R
|
a331d55a9fe5002385486512facedf2be633c090
|
[] |
no_license
|
alex21th/bank-telemarketing
|
5fb85d76a75a3ae646344357391e2633d77f4cef
|
891e172e6935b4a38991172a88123bd18874c40f
|
refs/heads/master
| 2022-12-20T16:43:56.713145
| 2019-06-26T09:10:00
| 2019-06-26T09:10:00
| 298,642,447
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,928
|
r
|
clustering.R
|
# Set current directory to file location
setwd(dirname(rstudioapi::getSourceEditorContext()$path))
getwd()
# Load data
load(paste0(getwd(), '/dd_complete.RData'))
# Data dimensionality; hence p is number of predictors (without response value)
N <- nrow(dd.complete); p <- ncol(dd.complete) -1
str(dd.complete)
library(cluster)
library(dplyr)
library(ggplot2)
library(readr)
library(Rtsne)
# data = dd.complete
# Clustering on a reduced portion of the dataset
example <- createDataPartition(dd.complete$y, p = .2, list = FALSE)
results <- dd.complete[example,20]
example <- dd.complete[example,-20]
# Compute Gower distance
gower_dist <- daisy(example, metric = 'gower')
gower_mat <- as.matrix(gower_dist)
sil_width <- c(NA)
for (i in 2:10) {
pam_fit <- pam(gower_dist, diss = TRUE, k = i)
sil_width[i] <- pam_fit$silinfo$avg.width
}
plot(1:10, sil_width, xlab = 'Number of clusters',
ylab = 'Silhouette Width', col = 'red')
lines(1:10, sil_width, col = 'red')
k <- 4
pam_fit <- pam(gower_dist, diss = TRUE, k)
pam_results <- example %>%
mutate(cluster = pam_fit$clustering) %>%
group_by(cluster) %>%
do(the_summary = summary(.))
pam_results$the_summary
# Visualization in a lower dimensional space
tsne_obj <- Rtsne(gower_dist, is_distance = TRUE)
tsne_data <- tsne_obj$Y %>%
data.frame() %>%
setNames(c('X','Y')) %>%
mutate(cluster = factor(pam_fit$clustering))
ggplot(aes(x = X, y = Y), data = tsne_data) + geom_point(aes(color = cluster))
# Try to interpret our results with 2, 4 and 8 clusters
results2 <- pam_results$the_summary
table2 <- table(pam_fit$clustering, results)
results4 <- pam_results$the_summary
table4 <- table(pam_fit$clustering, results)
results8 <- pam_results$the_summary
table8 <- table(pam_fit$clustering, results)
# Visualize the results
results2
table2
results4
table4
results8
table8
# Proportion of class 'yes' in every cluster
table2[,2] / rowSums(table2) * 100
table4[,2] / rowSums(table4) * 100
table8[,2] / rowSums(table8) * 100
# It looks like clusters 3,4 (for k = 4) and 7,8 (for k = 8) have a much higher proportion of 'yes' ratio than the other clusters.
# So we conclude that there might be some characteristics of those clusters that specifically make costumers fall to category 'yes'.
# Try to visualize difference between cluster variables in order to classify between 'yes' or 'no
# We are going to take 4 as the number of clusters
aux <- cbind(example, pam_fit$clustering)
colnames(aux)[20] <- 'Cluster'
# Check the 'euribor' variance between clusters
boxplot(aux$euribor3m ~ aux$Cluster, main = 'Cluster comparision', xlab = 'Cluster', ylab = 'Euribor')
# Check the 'nr.employed' variance between clusters
boxplot(aux$nr.employed ~ aux$Cluster, main = 'Cluster comparision', xlab = 'Cluster', ylab = 'nr.employed')
# age
boxplot(aux$age ~ aux$Cluster)
|
1a9dac23654dcd11c36e99268228ffa7b3730538
|
54782788f33fa1ae0799d32c59d0d8567fa15451
|
/plot4.R
|
ed95f410258a3f66c515d397af7594f6522f1f6a
|
[] |
no_license
|
nschmidt13/ExData_Plotting1
|
df9aa3e2cb1a0e33ccb3d3713745a05bbd4c06ef
|
9737833d42bc10a43355dacc8d41c478f3195a84
|
refs/heads/master
| 2021-01-16T22:31:38.378516
| 2014-10-12T06:08:38
| 2014-10-12T06:08:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,051
|
r
|
plot4.R
|
#This function loads the data from the working directory and adds a time/date column and filters to the correct dates
getData <- function () {
# Create columns types vector
colTypes <- c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric")
#Read in data from file in project folder
elecData <- read.table ("./household_power_consumption.txt", header = TRUE, sep=";", na.strings="?", colClasses=colTypes)
# Combine the data and time fields into one date/time field
elecData$dt <- as.POSIXlt(paste(elecData$Date, elecData$Time, sep=" "), format = "%d/%m/%Y %H:%M:%S")
# Set the start and end dates to be used to filter only dates to be used in plots
startDate <- as.POSIXlt("1/2/2007", format="%d/%m/%Y")
endDate <- as.POSIXlt("3/2/2007", format="%d/%m/%Y")
# filter data based on start and end dates above and return data set
d1 <- elecData[elecData$dt>=startDate, ]
d1 <<- d1[d1$dt<endDate, ]
d1
}
#This function creates a png of the plot and places it in the working directory
plot4png <- function () {
png(file = "./plot4.png")
# Plot 4
par(mfrow = c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
with(d1, {
plot(dt, Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab="")
plot(dt, Voltage, type="l", ylab="Voltage", xlab="")
plot(dt, Sub_metering_1, type="l", ylab="Energy sub metering", xlab="")
with(subset(d1), points(dt, Sub_metering_2, col = "red", type="l"))
with(subset(d1), points(dt, Sub_metering_3, col = "blue", type="l"))
legend("topright", lwd=1, col=c("black", "red", "blue"), legend=c("Sub_metering_1", "Sub_metering_2","Sub_metering_3"))
plot(dt, Global_reactive_power, type="l", ylab="Global Reactive Power (kilowatts)", xlab="")
})
dev.off()
}
#This funtion will get the data and create the plot in one funciton
plot4 <- function() { getData()
plot4png()}
|
0588c88bc7cf39a779eb18d9b76ca598bcdb706e
|
40ac18e5524b9d55e9d4f0095a518121eac92efb
|
/man/weiqqci.Rd
|
9a4251757fc0bdc3427775bbdf0fba716ac202df
|
[] |
no_license
|
vjcitn/barca
|
b57e32904638673951a29147322e725e3c708178
|
ef531c78b7f423a739421bf63e67fa731e043655
|
refs/heads/master
| 2021-01-14T02:39:59.799374
| 2020-06-12T16:39:13
| 2020-06-12T16:39:13
| 81,848,206
| 0
| 1
| null | 2017-03-30T17:39:14
| 2017-02-13T16:51:33
|
R
|
UTF-8
|
R
| false
| true
| 479
|
rd
|
weiqqci.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qq.R
\name{weiqqci}
\alias{weiqqci}
\title{plot a qqplot relative to a weibull distribution}
\usage{
weiqqci(x, conf.int = 0.95, ...)
}
\arguments{
\item{x}{vector of numbers}
\item{conf.int}{numeric confidence coefficient for EDF CIs}
\item{\dots}{not used}
}
\description{
plot a qqplot relative to a weibull distribution
}
\details{
The weibull parameters are estimated using survival::survreg
}
|
f8476e9c5a779061c77e1ce8a701060e210cde8b
|
daa39ef0a3e4d643bfdead74e0067ff6c856f304
|
/Chapter-10-Conditional and Block Statements R.R
|
701e10847e4e75bffe8b43a7dd7d738ffb8490cf
|
[] |
no_license
|
balajidileepkumar/R_ML
|
ca32105e78c41f17c1397078c34c478a84e38334
|
3da18dad0d173ae28c6552a5f9a22dd308180e1b
|
refs/heads/master
| 2021-06-30T03:32:27.506476
| 2020-12-23T15:00:16
| 2020-12-23T15:00:16
| 188,610,132
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,184
|
r
|
Chapter-10-Conditional and Block Statements R.R
|
## Conditional and Block statements
a = 10
b = 110
#if(a >b) { print(a) } else { print(b) }
if(a>b)
{
print(a)
} else if(a==b) {
print("equal")
} else {
print(b)
}
#Block Statements
###################
#Switch
###################
#a = switch(expression, case1,case2,case3,.....casen)
a = switch(3, FALSE,"case2","case3")
print(a)
a = 9
b = 10
a>b
c = switch(a>b, FALSE,TRUE)
print(c)
selector = "logical"
c = switch(selector,"color"="red", "number"=1, "logical"=TRUE)
print(c)
Invite = "Welcome"
y = switch(Invite, Hello = {
print("Hello to besant1")
print("Hello to R Lang1")
},
Welcome = {
print("Welcome to besant2")
print("Welcome to R Lang2")
"I get to returned to the caller"
})
y
##################
#For statement
##################
for(a in c(1,3,4))
{
print(sqrt(a))
}
V <- LETTERS
month_abb <- month.abb
months_name <- month.name
letters
vowels = c('a','e','i','o','u')
for(x in V)
{
print(x)
}
###################
#Repeat
###################
x = 1
repeat{
print(x*2)
x = x+1
if(x >1000){
break
}
}
###################
#while
###################
a = 100
b =1000
while(a < b)
{
a = a + 100
print(paste("a is lesser", a))
}
|
13154fecdfa2d20c93f3f016121626cee68ddc45
|
20a35379eb8907465759b651ca7c36c2e23b9a40
|
/tests/testthat/test-input-validate.R
|
db5103c24de5a7946487accf0fd7e547b14b535e
|
[
"MIT"
] |
permissive
|
jeffeaton/pkgapi
|
5c175067a168f449ca2fda1d0f47b40f497084c4
|
22eef9b9bd2d0671aa03f741cdecb32d0e7166a9
|
refs/heads/master
| 2022-12-14T02:26:57.385116
| 2020-09-01T08:44:14
| 2020-09-01T08:44:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 485
|
r
|
test-input-validate.R
|
context("input validation")
test_that("validate mime", {
expect_error(
pkgapi_input_validate_mime(NULL, "application/json"),
"Content-Type was not set (expected 'application/json')",
fixed = TRUE, class = "pkgapi_error")
expect_error(
pkgapi_input_validate_mime("application/octet-stream", "application/json"),
paste("Expected content type 'application/json'",
"but was sent 'application/octet-stream'"),
fixed = TRUE, class = "pkgapi_error")
})
|
72476f290ce22807983bbff935f5f8a21dc57d99
|
2bed5cc5dfabd54b0d5483030500dcd0d07a0f5a
|
/man/docaids.Rd
|
1f2b8ce52dffc3aa1a110a9b88cb0ba3ab7254bf
|
[] |
no_license
|
langfob/docaids
|
bec4f924b18943efcaac5761cb2ac590ab354a11
|
ce44c5ceb1b782dbebfb587ebff1ba0b13be290f
|
refs/heads/master
| 2021-01-21T10:05:10.153071
| 2017-03-05T01:52:41
| 2017-03-05T01:52:41
| 83,377,086
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 211
|
rd
|
docaids.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/docaids-package.r
\docType{package}
\name{docaids}
\alias{docaids}
\alias{docaids-package}
\title{docaids.}
\description{
docaids.
}
|
a14dae6a2825f07e4f02fbdc0e5eeef300e4ddbd
|
9582bd189e5620664b2235822af87bd2426d1aa3
|
/R/get_relevant_date.R
|
2e8167e32acac11e05800625925e7450ea827221
|
[] |
no_license
|
gottalottasoul/RiHana
|
6a30b217cf3e2eb65f622453a70e89fe87c7492a
|
97e12a1df12e2b85fea2d792a2c50225131c4ecd
|
refs/heads/master
| 2021-01-08T06:19:40.935972
| 2020-08-03T19:57:05
| 2020-08-03T19:57:05
| 241,939,098
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 415
|
r
|
get_relevant_date.R
|
get_relevant_date <-function()
{
relevenant_dates<-tibble::tibble(
yesterdays_date = Sys.Date()-1,
ytd_date=lubridate::floor_date(Sys.Date(),unit="year"),
yoy_date = lubridate::ceiling_date(Sys.Date()-365, unit="week",week_start = 1),
past_30_days = Sys.Date()-31,
past_7_days = Sys.Date()-8,
month_to_date = lubridate::floor_date(Sys.Date()-1, unit="month")
)
relevenant_dates
}
|
82d75fc6c5a80ba10515b600178015ce19619901
|
fc9df5d512d33a2805a0d1834095afebdbc8302f
|
/scripts/07_script_ensemble.R
|
a70181e699b77138904e38b0a20f053d0fd97d62
|
[
"MIT"
] |
permissive
|
jgiovanelli/enm_r
|
09ecc5f4d62db025762f4acf556b63af00ed1233
|
70cbabfa3179f6bdadc7f3754a98edc313bbd600
|
refs/heads/master
| 2021-01-20T13:23:36.319794
| 2017-02-20T12:32:36
| 2017-02-20T12:32:36
| null | 0
| 0
| null | null | null | null |
WINDOWS-1250
|
R
| false
| false
| 22,919
|
r
|
07_script_ensemble.R
|
### disciplina - modelagem de nicho ecológico: teoria e pratica ###
### ppg ecologia e biodiversidade - unesp 2017 ###
# Thadeu Sobral de Souza - thadeusobral@gmail.com
# Maurício Humberto Vancine - mauricio.vancine@gmail.com
###-----------------------------------------------------------------------------------------###
### 3. script ensemble ###
###-----------------------------------------------------------------------------------------###
# 1. limpara a memoria e carregar os pacotes
# limpar o workspace e aumentar a memoria para o r
rm(list = ls())
memory.limit(size = 10000000000000)
# instalar e carregar pacotes
# install.packages(c("raster", "rgdal", "vegan"), dep = T)
# carregar pacotes
library(raster) # manejo de arquivos sig
library(rgdal) # manejo de arquivos sig
library(vegan) # diversas analises multivariadas
# verificar pacotes carregados
search()
###------------------------------------------------------------------------------------------------------###
# diretorio
setwd("D:/90_aulas_montadas/_disciplina_enm_R_unesp_2017/scripts_r/03_saidas_enm")
# importando as tabelas com as avaliacoes
eval.bioclim <- read.table ("zEval_CCSM_Bioclim_B.balansae.txt")
eval.Gower <- read.table ("zEval_CCSM_Gower_B.balansae.txt")
eval.Maha <- read.table ("zEval_CCSM_Maha_B.balansae.txt")
eval.Maxent <- read.table ("zEval_CCSM_Maxent_B.balansae.txt")
eval.SVM <- read.table ("zEval_CCSM_SVM_B.balansae.txt")
###------------------------------------------------------------------------------------------------------###
### ensemble por frequencia - parte 1 ###
# bioclim
# importar os .asc
bioc1 <- raster ("CCSM_Bioclim_0k_B.balansae1.asc")
bioc2 <- raster ("CCSM_Bioclim_0k_B.balansae2.asc")
bioc3 <- raster ("CCSM_Bioclim_0k_B.balansae3.asc")
bioc4 <- raster ("CCSM_Bioclim_0k_B.balansae4.asc")
bioc5 <- raster ("CCSM_Bioclim_0k_B.balansae5.asc")
bioc211 <- raster ("CCSM_Bioclim_21k_B.balansae1.asc")
bioc212 <- raster ("CCSM_Bioclim_21k_B.balansae2.asc")
bioc213 <- raster ("CCSM_Bioclim_21k_B.balansae3.asc")
bioc214 <- raster ("CCSM_Bioclim_21k_B.balansae4.asc")
bioc215 <- raster ("CCSM_Bioclim_21k_B.balansae5.asc")
bioc61 <- raster ("CCSM_Bioclim_6k_B.balansae1.asc")
bioc62 <- raster ("CCSM_Bioclim_6k_B.balansae2.asc")
bioc63 <- raster ("CCSM_Bioclim_6k_B.balansae3.asc")
bioc64 <- raster ("CCSM_Bioclim_6k_B.balansae4.asc")
bioc65 <- raster ("CCSM_Bioclim_6k_B.balansae5.asc")
# soma dos mapas e corte do threshold 0
bioclim0k <- sum(bioc1 >= eval.bioclim[1, 1],
bioc2 >= eval.bioclim[2, 1],
bioc3 >= eval.bioclim[3, 1],
bioc4 >= eval.bioclim[4, 1],
bioc5 >= eval.bioclim[5, 1])
# plot
plot(bioclim0k)
# exportando o .asc da soma por frequencia
writeRaster (bioclim0k, "Bioclim_0k.asc", format="ascii")
# bioclim 21k
bioclim21k <- sum(bioc211 >= eval.bioclim[1, 1],
bioc212 >= eval.bioclim[2, 1],
bioc213 >= eval.bioclim[3, 1],
bioc214 >= eval.bioclim[4, 1],
bioc215 >= eval.bioclim[5, 1])
plot(bioclim21k)
writeRaster (bioclim21k, "Bioclim_21k.asc", format="ascii")
# bioclim 6k
bioclim6k <- sum(bioc61 >= eval.bioclim[1, 1],
bioc62 >= eval.bioclim[2, 1],
bioc63 >= eval.bioclim[3, 1],
bioc64 >= eval.bioclim[4, 1],
bioc65 >= eval.bioclim[5, 1])
plot(bioclim6k)
writeRaster (bioclim6k, "Bioclim_6k.asc", format = "ascii")
###------------------------------------------------------------------------------------------------------###
# gower
# importar
bioc1 <- raster ("CCSM_Gower_0k_B.balansae1.asc")
bioc2 <- raster ("CCSM_Gower_0k_B.balansae2.asc")
bioc3 <- raster ("CCSM_Gower_0k_B.balansae3.asc")
bioc4 <- raster ("CCSM_Gower_0k_B.balansae4.asc")
bioc5 <- raster ("CCSM_Gower_0k_B.balansae5.asc")
bioc211 <- raster ("CCSM_Gower_21K_B.balansae1.asc")
bioc212 <- raster ("CCSM_Gower_21K_B.balansae2.asc")
bioc213 <- raster ("CCSM_Gower_21K_B.balansae3.asc")
bioc214 <- raster ("CCSM_Gower_21K_B.balansae4.asc")
bioc215 <- raster ("CCSM_Gower_21K_B.balansae5.asc")
bioc61 <- raster ("CCSM_Gower_6K_B.balansae1.asc")
bioc62 <- raster ("CCSM_Gower_6K_B.balansae2.asc")
bioc63 <- raster ("CCSM_Gower_6K_B.balansae3.asc")
bioc64 <- raster ("CCSM_Gower_6K_B.balansae4.asc")
bioc65 <- raster ("CCSM_Gower_6K_B.balansae5.asc")
# soma
Gower0k <- sum(bioc1 >= eval.Gower[1, 1],
bioc2 >= eval.Gower[2, 1],
bioc3 >= eval.Gower[3, 1],
bioc4 >= eval.Gower[4, 1],
bioc5 >= eval.Gower[5, 1])
plot(Gower0k)
writeRaster (Gower0k, "Gower_0k.asc", format="ascii")
# gower 21k
Gower21k <- sum(bioc211 >= eval.Gower[1, 1],
bioc212 >= eval.Gower[2, 1],
bioc213 >= eval.Gower[3, 1],
bioc214 >= eval.Gower[4, 1],
bioc215 >= eval.Gower[5, 1])
plot(Gower21k)
writeRaster (Gower21k, "Gower_21k.asc", format="ascii")
# gower 6k
Gower6k <- sum(bioc61 >= eval.Gower[1, 1],
bioc62 >= eval.Gower[2, 1],
bioc63 >= eval.Gower[3, 1],
bioc64 >= eval.Gower[4, 1],
bioc65 >= eval.Gower[5, 1])
plot(Gower6k)
writeRaster (Gower6k, "Gower_6k.asc", format="ascii")
###------------------------------------------------------------------------------------------------------###
# mahalanobis
# importar
bioc1 <- raster ("CCSM_Maha_0k_B.balansae1.asc")
bioc2 <- raster ("CCSM_Maha_0k_B.balansae2.asc")
bioc3 <- raster ("CCSM_Maha_0k_B.balansae3.asc")
bioc4 <- raster ("CCSM_Maha_0k_B.balansae4.asc")
bioc5 <- raster ("CCSM_Maha_0k_B.balansae5.asc")
bioc211 <- raster ("CCSM_Maha_21K_B.balansae1.asc")
bioc212 <- raster ("CCSM_Maha_21K_B.balansae2.asc")
bioc213 <- raster ("CCSM_Maha_21K_B.balansae3.asc")
bioc214 <- raster ("CCSM_Maha_21K_B.balansae4.asc")
bioc215 <- raster ("CCSM_Maha_21K_B.balansae5.asc")
bioc61 <- raster ("CCSM_Maha_6K_B.balansae1.asc")
bioc62 <- raster ("CCSM_Maha_6K_B.balansae2.asc")
bioc63 <- raster ("CCSM_Maha_6K_B.balansae3.asc")
bioc64 <- raster ("CCSM_Maha_6K_B.balansae4.asc")
bioc65 <- raster ("CCSM_Maha_6K_B.balansae5.asc")
# soma
Maha0k <- sum(bioc1 >= eval.Maha[1, 1],
bioc2 >= eval.Maha[2, 1],
bioc3 >= eval.Maha[3, 1],
bioc4 >= eval.Maha[4, 1],
bioc5 >= eval.Maha[5, 1])
plot(Maha0k)
writeRaster (Maha0k, "Maha_0k.asc", format="ascii")
# maha 21k
Maha21k <- sum(bioc211 >= eval.Maha[1, 1],
bioc212 >= eval.Maha[2, 1],
bioc213 >= eval.Maha[3, 1],
bioc214 >= eval.Maha[4, 1],
bioc215 >= eval.Maha[5, 1])
plot(Maha21k)
writeRaster (Maha21k, "Maha_21k.asc", format="ascii")
# maha 6k
Maha6k <- sum(bioc61 >= eval.Maha[1, 1],
bioc62 >= eval.Maha[2, 1],
bioc63 >= eval.Maha[3, 1],
bioc64 >= eval.Maha[4, 1],
bioc65 >= eval.Maha[5, 1])
plot(Maha6k)
writeRaster (Maha6k, "Maha_6k.asc", format="ascii")
###------------------------------------------------------------------------------------------------------###
# maxent
# importar
bioc1 <- raster ("CCSM_Maxent_0k_B.balansae1.asc")
bioc2 <- raster ("CCSM_Maxent_0k_B.balansae2.asc")
bioc3 <- raster ("CCSM_Maxent_0k_B.balansae3.asc")
bioc4 <- raster ("CCSM_Maxent_0k_B.balansae4.asc")
bioc5 <- raster ("CCSM_Maxent_0k_B.balansae5.asc")
bioc211 <- raster ("CCSM_Maxent_21K_B.balansae1.asc")
bioc212 <- raster ("CCSM_Maxent_21K_B.balansae2.asc")
bioc213 <- raster ("CCSM_Maxent_21K_B.balansae3.asc")
bioc214 <- raster ("CCSM_Maxent_21K_B.balansae4.asc")
bioc215 <- raster ("CCSM_Maxent_21K_B.balansae5.asc")
bioc61 <- raster ("CCSM_Maxent_6K_B.balansae1.asc")
bioc62 <- raster ("CCSM_Maxent_6K_B.balansae2.asc")
bioc63 <- raster ("CCSM_Maxent_6K_B.balansae3.asc")
bioc64 <- raster ("CCSM_Maxent_6K_B.balansae4.asc")
bioc65 <- raster ("CCSM_Maxent_6K_B.balansae5.asc")
# soma
Maxent0k <- sum(bioc1 >= eval.Maxent[1, 1],
bioc2 >= eval.Maxent[2, 1],
bioc3 >= eval.Maxent[3, 1],
bioc4 >= eval.Maxent[4, 1],
bioc5 >= eval.Maxent[5, 1])
plot(Maxent0k)
writeRaster (Maxent0k, "Maxent_0k.asc", format="ascii")
# maxent 21k
Maxent21k <- sum(bioc211 >= eval.Maxent[1, 1],
bioc212 >= eval.Maxent[2, 1],
bioc213 >= eval.Maxent[3, 1],
bioc214 >= eval.Maxent[4, 1],
bioc215 >= eval.Maxent[5, 1])
plot(Maxent21k)
writeRaster (Maxent21k, "Maxent_21k.asc", format="ascii")
# maxent 6k
Maxent6k <- sum(bioc61 >= eval.Maxent[1, 1],
bioc62 >= eval.Maxent[2, 1],
bioc63 >= eval.Maxent[3, 1],
bioc64 >= eval.Maxent[4, 1],
bioc65 >= eval.Maxent[5, 1])
plot(Maxent6k)
writeRaster (Maxent6k, "Maxent_6k.asc", format="ascii")
###------------------------------------------------------------------------------------------------------###
# svm
bioc1 <- raster ("CCSM_SVM_0k_B.balansae1.asc")
bioc2 <- raster ("CCSM_SVM_0k_B.balansae2.asc")
bioc3 <- raster ("CCSM_SVM_0k_B.balansae3.asc")
bioc4 <- raster ("CCSM_SVM_0k_B.balansae4.asc")
bioc5 <- raster ("CCSM_SVM_0k_B.balansae5.asc")
bioc211 <- raster ("CCSM_SVM_21K_B.balansae1.asc")
bioc212 <- raster ("CCSM_SVM_21K_B.balansae2.asc")
bioc213 <- raster ("CCSM_SVM_21K_B.balansae3.asc")
bioc214 <- raster ("CCSM_SVM_21K_B.balansae4.asc")
bioc215 <- raster ("CCSM_SVM_21K_B.balansae5.asc")
bioc61 <- raster ("CCSM_SVM_6K_B.balansae1.asc")
bioc62 <- raster ("CCSM_SVM_6K_B.balansae2.asc")
bioc63 <- raster ("CCSM_SVM_6K_B.balansae3.asc")
bioc64 <- raster ("CCSM_SVM_6K_B.balansae4.asc")
bioc65 <- raster ("CCSM_SVM_6K_B.balansae5.asc")
SVM0k <- sum(bioc1 >= eval.SVM[1, 1],
bioc2 >= eval.SVM[2, 1],
bioc3 >= eval.SVM[3, 1],
bioc4 >= eval.SVM[4, 1],
bioc5 >= eval.SVM[5, 1])
plot(SVM0k)
writeRaster (SVM0k, "SVM_0k.asc", format="ascii")
# svm 21k
SVM21k <- sum(bioc211 >= eval.SVM[1, 1],
bioc212 >= eval.SVM[2, 1],
bioc213 >= eval.SVM[3, 1],
bioc214 >= eval.SVM[4, 1],
bioc215 >= eval.SVM[5, 1])
plot(SVM21k)
writeRaster (SVM21k, "SVM_21k.asc", format="ascii")
# svm 6k
SVM6k <- sum(bioc61 >= eval.SVM[1, 1],
bioc62 >= eval.SVM[2, 1],
bioc63 >= eval.SVM[3, 1],
bioc64 >= eval.SVM[4, 1],
bioc65 >= eval.SVM[5, 1])
plot(SVM6k)
writeRaster (SVM6k, "SVM_6k.asc", format="ascii")
###------------------------------------------------------------------------------------------------------###
### ensamble por frequencia - parte 2 ###
# somando todos os algoritmos
# 0k
m1 <- raster ("Bioclim_0k.asc")
m2 <- raster ("Gower_0k.asc")
m3 <- raster ("Maha_0k.asc")
m4 <- raster ("Maxent_0k.asc")
m5 <- raster ("SVM_0k.asc")
ensemble <- m1 + m2 + m3 + m4 + m5
plot(ensemble)
writeRaster (ensemble, "ensemble_0k.asc", format="ascii")
# 21k
m1 <- raster ("Bioclim_21k.asc")
m2 <- raster ("Gower_21k.asc")
m3 <- raster ("Maha_21k.asc")
m4 <- raster ("Maxent_21k.asc")
m5 <- raster ("SVM_21k.asc")
ensemble21k <- m1 + m2 + m3 + m4 + m5
plot(ensemble21k)
writeRaster (ensemble21k, "ensemble_21k.asc", format="ascii")
# 6k
m1 <- raster ("Bioclim_6k.asc")
m2 <- raster ("Gower_6k.asc")
m3 <- raster ("Maha_6k.asc")
m4 <- raster ("Maxent_6k.asc")
m5 <- raster ("SVM_6k.asc")
ensemble6k <- m1 + m2 + m3 + m4 + m5
plot(ensemble6k)
writeRaster (ensemble6k, "ensemble_6k.asc", format="ascii")
# dividindo por 25 para ter um mapa de 0 a 1
ensemble0kcut <- ensemble/25
writeRaster (ensemble0kcut, "ensemble0k_0_1.asc", format="ascii")
ensemble6kcut <-ensemble6k/25
writeRaster (ensemble6kcut, "ensemble6k_0_1.asc", format="ascii")
ensemble21kcut <- ensemble21k/25
writeRaster (ensemble21kcut, "ensemble21k_0_1.asc", format="ascii")
par(mfrow = c(1, 3))
plot(ensemble0kcut, main = "Atual")
plot(ensemble6kcut, main = "Holoceno")
plot(ensemble21kcut, main = "LGM")
###------------------------------------------------------------------------------------------------------###
### ensemble por media - parte 1 ###
# colocando todos os loops em um arquivo único
# bioclim
Bioclim0k1 <- raster("CCSM_Bioclim_0k_B.balansae1.asc")
Bioclim0k2 <- raster ("CCSM_Bioclim_0k_B.balansae2.asc")
Bioclim0k3 <- raster ("CCSM_Bioclim_0k_B.balansae3.asc")
Bioclim0k4 <- raster("CCSM_Bioclim_0k_B.balansae4.asc")
Bioclim0k5 <- raster ("CCSM_Bioclim_0k_B.balansae5.asc")
Bioclim6k1 <- raster("CCSM_Bioclim_6k_B.balansae1.asc")
Bioclim6k2 <- raster ("CCSM_Bioclim_6k_B.balansae2.asc")
Bioclim6k3 <- raster ("CCSM_Bioclim_6k_B.balansae3.asc")
Bioclim6k4 <- raster("CCSM_Bioclim_6k_B.balansae4.asc")
Bioclim6k5 <- raster ("CCSM_Bioclim_6k_B.balansae5.asc")
Bioclim21k1 <- raster("CCSM_Bioclim_21k_B.balansae1.asc")
Bioclim21k2 <- raster ("CCSM_Bioclim_21k_B.balansae2.asc")
Bioclim21k3 <- raster ("CCSM_Bioclim_21k_B.balansae3.asc")
Bioclim21k4 <- raster("CCSM_Bioclim_21k_B.balansae4.asc")
Bioclim21k5 <- raster ("CCSM_Bioclim_21k_B.balansae5.asc")
Bioclim0k1 <- values (Bioclim0k1)
Bioclim0k2 <- values (Bioclim0k2)
Bioclim0k3 <- values (Bioclim0k3)
Bioclim0k4 <- values (Bioclim0k4)
Bioclim0k5 <- values (Bioclim0k5)
Bioclim6k1 <- values (Bioclim6k1)
Bioclim6k2 <- values (Bioclim6k2)
Bioclim6k3 <- values (Bioclim6k3)
Bioclim6k4 <- values (Bioclim6k4)
Bioclim6k5 <- values (Bioclim6k5)
Bioclim21k1 <- values (Bioclim21k1)
Bioclim21k2 <- values (Bioclim21k2)
Bioclim21k3 <- values (Bioclim21k3)
Bioclim21k4 <- values (Bioclim21k4)
Bioclim21k5 <- values (Bioclim21k5)
Bioclim0k <-cbind (Bioclim0k1, Bioclim0k2, Bioclim0k3, Bioclim0k4, Bioclim0k5)
dim(Bioclim0k)
Bioclim6k <-cbind (Bioclim6k1, Bioclim6k2, Bioclim6k3, Bioclim6k4, Bioclim6k5)
dim(Bioclim6k)
Bioclim21k <-cbind (Bioclim21k1, Bioclim21k2, Bioclim21k3, Bioclim21k4, Bioclim21k5)
dim(Bioclim21k)
# gower
Gower0k1 <- raster ("CCSM_Gower_0k_B.balansae1.asc")
Gower0k2 <- raster ("CCSM_Gower_0k_B.balansae2.asc")
Gower0k3 <- raster ("CCSM_Gower_0k_B.balansae3.asc")
Gower0k4 <- raster ("CCSM_Gower_0k_B.balansae4.asc")
Gower0k5 <- raster ("CCSM_Gower_0k_B.balansae5.asc")
Gower6k1 <- raster ("CCSM_Gower_6k_B.balansae1.asc")
Gower6k2 <- raster ("CCSM_Gower_6k_B.balansae2.asc")
Gower6k3 <- raster ("CCSM_Gower_6k_B.balansae3.asc")
Gower6k4 <- raster ("CCSM_Gower_6k_B.balansae4.asc")
Gower6k5 <- raster ("CCSM_Gower_6k_B.balansae5.asc")
Gower21k1 <- raster ("CCSM_Gower_21k_B.balansae1.asc")
Gower21k2 <- raster ("CCSM_Gower_21k_B.balansae2.asc")
Gower21k3 <- raster ("CCSM_Gower_21k_B.balansae3.asc")
Gower21k4 <- raster ("CCSM_Gower_21k_B.balansae4.asc")
Gower21k5 <- raster ("CCSM_Gower_21k_B.balansae5.asc")
Gower0k1 <- values (Gower0k1)
Gower0k2 <- values (Gower0k2)
Gower0k3 <- values (Gower0k3)
Gower0k4 <- values (Gower0k4)
Gower0k5 <- values (Gower0k5)
Gower6k1 <- values (Gower6k1)
Gower6k2 <- values (Gower6k2)
Gower6k3 <- values (Gower6k3)
Gower6k4 <- values (Gower6k4)
Gower6k5 <- values (Gower6k5)
Gower21k1 <- values (Gower21k1)
Gower21k2 <- values (Gower21k2)
Gower21k3 <- values (Gower21k3)
Gower21k4 <- values (Gower21k4)
Gower21k5 <- values (Gower21k5)
Gower0k <- cbind(Gower0k1, Gower0k2, Gower0k3, Gower0k4, Gower0k5)
dim(Gower0k)
Gower6k <- cbind(Gower6k1, Gower6k2, Gower6k3, Gower6k4, Gower6k5)
dim(Gower6k)
Gower21k <- cbind(Gower21k1, Gower21k2, Gower21k3, Gower21k4, Gower21k5)
dim(Gower21k)
# mahanalobis
Maha0k1 <- raster ("CCSM_Maha_0k_B.balansae1.asc")
Maha0k2 <- raster ("CCSM_Maha_0k_B.balansae2.asc")
Maha0k3 <- raster ("CCSM_Maha_0k_B.balansae3.asc")
Maha0k4 <- raster ("CCSM_Maha_0k_B.balansae4.asc")
Maha0k5 <- raster ("CCSM_Maha_0k_B.balansae5.asc")
Maha6k1 <- raster ("CCSM_Maha_6k_B.balansae1.asc")
Maha6k2 <- raster ("CCSM_Maha_6k_B.balansae2.asc")
Maha6k3 <- raster ("CCSM_Maha_6k_B.balansae3.asc")
Maha6k4 <- raster ("CCSM_Maha_6k_B.balansae4.asc")
Maha6k5 <- raster ("CCSM_Maha_6k_B.balansae5.asc")
Maha21k1 <- raster ("CCSM_Maha_21k_B.balansae1.asc")
Maha21k2 <- raster ("CCSM_Maha_21k_B.balansae2.asc")
Maha21k3 <- raster ("CCSM_Maha_21k_B.balansae3.asc")
Maha21k4 <- raster ("CCSM_Maha_21k_B.balansae4.asc")
Maha21k5 <- raster ("CCSM_Maha_21k_B.balansae5.asc")
Maha0k1 <- values (Maha0k1)
Maha0k2 <- values (Maha0k2)
Maha0k3 <- values (Maha0k3)
Maha0k4 <- values (Maha0k4)
Maha0k5 <- values (Maha0k5)
Maha6k1 <- values (Maha6k1)
Maha6k2 <- values (Maha6k2)
Maha6k3 <- values (Maha6k3)
Maha6k4 <- values (Maha6k4)
Maha6k5 <- values (Maha6k5)
Maha21k1 <- values (Maha21k1)
Maha21k2 <- values (Maha21k2)
Maha21k3 <- values (Maha21k3)
Maha21k4 <- values (Maha21k4)
Maha21k5 <- values (Maha21k5)
Maha0k <- cbind(Maha0k1, Maha0k2, Maha0k3, Maha0k4, Maha0k5 )
dim(Maha0k)
Maha6k <- cbind(Maha6k1, Maha6k2, Maha6k3, Maha6k4, Maha6k5)
dim(Maha6k)
Maha21k <- cbind(Maha21k1, Maha21k2, Maha21k3, Maha21k4, Maha21k5)
dim(Maha21k)
# maxent
Maxent0k1 <- raster ("CCSM_Maxent_0k_B.balansae1.asc")
Maxent0k2 <- raster ("CCSM_Maxent_0k_B.balansae2.asc")
Maxent0k3 <- raster ("CCSM_Maxent_0k_B.balansae3.asc")
Maxent0k4 <- raster ("CCSM_Maxent_0k_B.balansae4.asc")
Maxent0k5 <- raster ("CCSM_Maxent_0k_B.balansae5.asc")
Maxent6k1 <- raster ("CCSM_Maxent_6k_B.balansae1.asc")
Maxent6k2 <- raster ("CCSM_Maxent_6k_B.balansae2.asc")
Maxent6k3 <- raster ("CCSM_Maxent_6k_B.balansae3.asc")
Maxent6k4 <- raster ("CCSM_Maxent_6k_B.balansae4.asc")
Maxent6k5 <- raster ("CCSM_Maxent_6k_B.balansae5.asc")
Maxent21k1 <- raster ("CCSM_Maxent_21k_B.balansae1.asc")
Maxent21k2 <- raster ("CCSM_Maxent_21k_B.balansae2.asc")
Maxent21k3 <- raster ("CCSM_Maxent_21k_B.balansae3.asc")
Maxent21k4 <- raster ("CCSM_Maxent_21k_B.balansae4.asc")
Maxent21k5 <- raster ("CCSM_Maxent_21k_B.balansae5.asc")
Maxent0k1 <- values (Maxent0k1)
Maxent0k2 <- values (Maxent0k2)
Maxent0k3 <- values (Maxent0k3)
Maxent0k4 <- values (Maxent0k4)
Maxent0k5 <- values (Maxent0k5)
Maxent6k1 <- values (Maxent6k1)
Maxent6k2 <- values (Maxent6k2)
Maxent6k3 <- values (Maxent6k3)
Maxent6k4 <- values (Maxent6k4)
Maxent6k5 <- values (Maxent6k5)
Maxent21k1 <- values (Maxent21k1)
Maxent21k2 <- values (Maxent21k2)
Maxent21k3 <- values (Maxent21k3)
Maxent21k4 <- values (Maxent21k4)
Maxent21k5 <- values (Maxent21k5)
Maxent0k <- cbind(Maxent0k1, Maxent0k2, Maxent0k3, Maxent0k4, Maxent0k5 )
dim(Maxent0k)
Maxent6k <- cbind(Maxent6k1, Maxent6k2, Maxent6k3, Maxent6k4, Maxent6k5 )
dim(Maxent0k)
Maxent21k <- cbind(Maxent21k1, Maxent21k2, Maxent21k3, Maxent21k4, Maxent21k5 )
dim(Maxent0k)
# svm
SVM0k1 <- raster ("CCSM_SVM_0k_B.balansae1.asc")
SVM0k2 <- raster ("CCSM_SVM_0k_B.balansae2.asc")
SVM0k3 <- raster ("CCSM_SVM_0k_B.balansae3.asc")
SVM0k4 <- raster ("CCSM_SVM_0k_B.balansae4.asc")
SVM0k5 <- raster ("CCSM_SVM_0k_B.balansae5.asc")
SVM6k1 <- raster ("CCSM_SVM_6k_B.balansae1.asc")
SVM6k2 <- raster ("CCSM_SVM_6k_B.balansae2.asc")
SVM6k3 <- raster ("CCSM_SVM_6k_B.balansae3.asc")
SVM6k4 <- raster ("CCSM_SVM_6k_B.balansae4.asc")
SVM6k5 <- raster ("CCSM_SVM_6k_B.balansae5.asc")
SVM21k1 <- raster ("CCSM_SVM_21k_B.balansae1.asc")
SVM21k2 <- raster ("CCSM_SVM_21k_B.balansae2.asc")
SVM21k3 <- raster ("CCSM_SVM_21k_B.balansae3.asc")
SVM21k4 <- raster ("CCSM_SVM_21k_B.balansae4.asc")
SVM21k5 <- raster ("CCSM_SVM_21k_B.balansae5.asc")
SVM0k1 <- values (SVM0k1)
SVM0k2 <- values (SVM0k2)
SVM0k3 <- values (SVM0k3)
SVM0k4 <- values (SVM0k4)
SVM0k5 <- values (SVM0k5)
SVM6k1 <- values (SVM6k1)
SVM6k2 <- values (SVM6k2)
SVM6k3 <- values (SVM6k3)
SVM6k4 <- values (SVM6k4)
SVM6k5 <- values (SVM6k5)
SVM21k1 <- values (SVM21k1)
SVM21k2 <- values (SVM21k2)
SVM21k3 <- values (SVM21k3)
SVM21k4 <- values (SVM21k4)
SVM21k5 <- values (SVM21k5)
SVM0k <- cbind(SVM0k1, SVM0k2, SVM0k3, SVM0k4, SVM0k5 )
dim(SVM0k)
SVM6k <- cbind(SVM6k1, SVM6k2, SVM6k3, SVM6k4, SVM6k5 )
dim(SVM6k)
SVM21k <- cbind(SVM21k1, SVM21k2, SVM21k3, SVM21k4, SVM21k5 )
dim(SVM21k)
###------------------------------------------------------------------------------------------------------###
### ensemble por media - parte 2 ###
# media
head(Bioclim0k)
Bioclim.0k.mean <- apply(Bioclim0k, 1, mean)
Gower.0k.mean <- apply(Gower0k, 1, mean)
Maha.0k.mean <- apply(Maha0k, 1, mean)
Maxent.0k.mean <- apply(Maxent0k, 1, mean)
SVM.0k.mean <- apply(SVM0k, 1, mean)
Bioclim.6k.mean <- apply(Bioclim6k, 1, mean)
Gower.6k.mean <- apply(Gower6k, 1, mean)
Maha.6k.mean <- apply(Maha6k, 1, mean)
Maxent.6k.mean <- apply(Maxent6k, 1, mean)
SVM.6k.mean <- apply(SVM6k, 1, mean)
Bioclim.21k.mean <- apply(Bioclim21k, 1, mean)
Gower.21k.mean <- apply(Gower21k, 1, mean)
Maha.21k.mean <- apply(Maha21k, 1, mean)
Maxent.21k.mean <- apply(Maxent21k, 1, mean)
SVM.21k.mean <- apply(SVM21k, 1, mean)
# organizar os valores das medias dos algoritmos em um arquivo
Output0k <- cbind(Bioclim = Bioclim.0k.mean, Gower = Gower.0k.mean, Maha = Maha.0k.mean,
SVM= SVM.0k.mean, Maxent = Maxent.0k.mean)
head(Output0k)
Output6k <- cbind(Bioclim = Bioclim.6k.mean, Gower = Gower.6k.mean, Maha = Maha.6k.mean,
SVM = SVM.6k.mean, Maxent = Maxent.6k.mean)
head(Output6k)
Output21k <- cbind(Bioclim = Bioclim.21k.mean, Gower = Gower.21k.mean, Maha = Maha.21k.mean,
SVM = SVM.21k.mean, Maxent = Maxent.21k.mean)
head(Output21k)
# conferindo a dimensao
dim(Output0k)
dim(Output6k)
dim(Output21k)
# inserindo coordenadas geograficas aos outputs
setwd("D:/90_aulas_montadas/_disciplina_enm_R_unesp_2017/scripts_r/01_dados")
env <- raster ("CCSM_0k_am_bio02.asc")
coords <- xyFromCell(env, 1:ncell(env))
Output0k <- cbind(coords, Output0k)
Output6k <- cbind(coords, Output6k)
Output21k <- cbind(coords, Output21k)
head(Output0k)
# excluindo NAs
Output0k <- na.omit(Output0k)
Output21k <- na.omit(Output21k)
Output6k <- na.omit(Output6k)
nrow(Output0k)
nrow(Output6k)
nrow(Output21k)
# coercao em dataframe
Output0k <- as.data.frame(Output0k)
head(Output0k)
Output6k <- as.data.frame(Output6k)
Output21k <- as.data.frame(Output21k)
# transformando em arquivos espacializados - grid
gridded(Output0k) <- ~x + y
gridded(Output6k) <- ~x + y
gridded(Output21k) <- ~x + y
# transformando em arquivos stack raster
output.0k <- stack(Output0k)
output.6k <- stack(Output6k)
output.21k <- stack(Output21k)
# plot
par(mfrow = c(2, 3))
plot(output.0k)
plot(output.6k)
plot(output.21k)
# salvando os arquivos
setwd("C:/Users/leec/Dropbox/disciplina_enm_R_unicamp_2016/scripts_r/saidas_enm")
write.table (Output0k, "Output0k.txt")
write.table (Output6k, "Output6k.txt")
write.table (Output21k, "Output21k.txt")
###------------------------------------------------------------------------------------------------------###
|
97d203ccab2bb36b4cf23c9631ce7ec98e086775
|
9f57e0ad44b78d809c262fa0ffb659232fdb8d5e
|
/implementations/set_ops.R
|
c6793869a15014a0f26548a5531c3fe36c393fcc
|
[] |
no_license
|
abhi8893/Intensive-R
|
c3439c177776f63705546c6666960fbc020c47e8
|
e340ad775bf25d5a17435f8ea18300013195e2c7
|
refs/heads/master
| 2020-09-22T00:57:36.118504
| 2020-08-31T09:23:57
| 2020-08-31T09:23:57
| 224,994,015
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,514
|
r
|
set_ops.R
|
# Implement set operations
find.unique <- function(v){
elems <- c()
for (e in v){
if (! e %in% elems){
elems <- c(elems, e)
}
}
return(elems)
}
has.elem <- function(elem, v){
status <- F
for (e in v){
if (e == elem){
status <- T
break
}
}
return(status)
}
set.union <- function(A, B){
A.uniq <- unique(A)
B.uniq <- unique(B)
return(unique(c(A.uniq, B.uniq)))
}
set.intersect <- function(A, B){
int.set <- rep(0, length(A))
i <- 0
for (a in A){
for (b in B){
if ((a == b) & (!a %in% int.set)){
i <- i + 1
int.set[i] <- b
}
}
}
int.set <- int.set[1:i]
return(int.set)
}
set.diff <- function(A, B){
diff.set <- rep(0, length(A))
n.B <- length(B)
i <- 0
for (a in A){
i.B <- 1
for (b in B){
if (a == b){
break
} else if(i.B == n.B){
i <- i + 1
diff.set[i] <- a
}
i.B <- i.B + 1
}
}
diff.set <- diff.set[1:i]
return(diff.set)
}
set.diff(c(1, 2, 3, 4, 99), c(5, 3, 2, 1, 12, 1, 87))
set.equal <- function(A, B){
n.A <- length(A)
n.B <- length(B)
if (n.A >= n.B){
longer <- A
shorter <- B
} else {
longer <- B
shorter <- A
}
n.s <- length(shorter)
for (l in longer){
i.s <- 1
for (s in shorter){
if (l == s){
break
}else if (i.s == n.s){
return(FALSE)
}
i.s <- i.s + 1
}
}
return(TRUE)
}
set.equal(c(1, 2), c(1, 2, 1, 1))
|
f7a007f7e2536a1a34cc4209ced3a7369b8c8fd5
|
8b1c2469dc5ed9d7d9c94da87a6c0a1d47c8604e
|
/R/plot_signal.R
|
7f73b2d5c7811f7da81093483396c92a8d053042
|
[] |
no_license
|
dami82/CellSignalingTools
|
077c4496638a090debed5f6ef8774c45747f564d
|
0405a2443d3f380ce61d6d5469ffd96f92760c98
|
refs/heads/master
| 2016-08-11T13:49:45.042182
| 2016-03-19T22:51:53
| 2016-03-19T22:51:53
| 54,290,581
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,146
|
r
|
plot_signal.R
|
plot_signal <-
function(signal_map) {
ch_1 <- signal_map$nucl
ch_2 <- signal_map$cyto
ch_ori <- signal_map$input
my_x <- ncol(ch_1)
my_y <- nrow(ch_1)
ch_0 <- matrix(0, ncol = my_x, nrow = my_y)
ch_1[ch_1 == 0] <- NA
ch_2[ch_2 == 0] <- NA
#
#faux palettes
greys <- colorpanel(n = 15, low = "black", high = "grey99")
cyans <- colorpanel(n = 15, low = "black", high = "cyan")
greens <- colorpanel(n = 15, low = "black", high = "limegreen")
#
curr_par <- par(no.readonly = T)
par(mfrow = c(2,2))
image(ch_ori, col = greys, add = F, axes = F, main = "input image", useRaster = T)
#
image(ch_0, col = "black", add = F, axes = F, main = "faux colors", useRaster = T)
image(ch_1, col = greens, add = T, axes = F, useRaster = T)
image(ch_2, col = cyans, add = T, axes = F, useRaster = T)
#
image(ch_0, col = "black", add = F, axes = F, main = "nucl. signal", useRaster = T)
image(ch_1, col = greys, add = T, axes = F, useRaster = T)
#
image(ch_0, col = "black", add = F, axes = F, main = "cyto. signal", useRaster = T)
image(ch_2, col = greys, add = T, axes = F, useRaster = T)
#
par(curr_par)
}
|
c5f37718cb2445d4f5540ebaae5e863f6e72b56a
|
97836dd5cc8776c185fd75b8637c89fcc58ced73
|
/R/packages.R
|
08ba3d1331673afe38069ab42a9f56d06e42a308
|
[] |
no_license
|
mihagazvoda/mini-bayes-shiny
|
141ccd4aaf5fea2dde51c575f4e3fc7dbedb1446
|
b9eb5bb3b42c69ea1c12ddc8f0997d892cf24e06
|
refs/heads/master
| 2023-02-05T11:10:01.283399
| 2021-01-01T17:11:08
| 2021-01-01T17:11:08
| 314,895,002
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 71
|
r
|
packages.R
|
library(shiny.semantic)
library(shiny)
library(dplyr)
library(ggplot2)
|
3bd6f86ac16232fba8d81c626f1904abac3aaaa1
|
f7cb5ffe2d36c1a529a9b74ce6408dc0e0ae2413
|
/man/plot_temporal_network.Rd
|
eb95c9f3f5ed0b9c6a44833d6038b4be33a7fdb7
|
[] |
no_license
|
alaindanet/fishcom
|
aa5b03f49c5e698d37003b6ffa75b88f3f18b1f4
|
76e30a7bf13cccabc1cd8fc230bb469276d7f846
|
refs/heads/master
| 2023-06-18T20:29:30.291089
| 2021-07-12T16:34:30
| 2021-07-12T16:34:30
| 160,175,823
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 632
|
rd
|
plot_temporal_network.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_methods.R
\name{plot_temporal_network}
\alias{plot_temporal_network}
\title{Temporal graph for a station}
\usage{
plot_temporal_network(net, meta, dead, ...)
}
\arguments{
\item{net}{data.frame contaning year variable and network list}
\item{meta}{an object created by build_metaweb}
\item{dead}{character vector indicating which node are basal}
\item{...}{Other arguments to pass to set_layout_graph}
}
\value{
a tibble containing a list of graph in a net_graph column
}
\description{
Temporal graph for a station
}
\seealso{
set_layout_graph
}
|
e56e73e58d78d5f2147eb271078b1fddad9a1992
|
7797ee6832f469768dfaffbf63d7dae6bf3426ae
|
/R/IRR.R
|
d0eb286172c302c8890482d8ebeb91bd5bdb43a8
|
[] |
no_license
|
AmateurECE/economics
|
27dd67e0a04bdfd999d5e124d2114230ab518358
|
37109c8a8376627e840dc061b6fc802a64e8ff30
|
refs/heads/master
| 2020-05-02T23:39:26.440627
| 2020-02-04T19:02:35
| 2020-02-04T19:02:35
| 178,286,376
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 104
|
r
|
IRR.R
|
IRR <-
function (cf)
{
uniroot(netPresentValue, cashFlows = cf, lower = 1e-04, upper = 100)$root
}
|
634c68216a63e019a65dee444e0794e0d48797c8
|
e9f06ea90b7c9127f3b05bafa7e0d34c7074d4e5
|
/Overview.R
|
d110f767e110627fdcb36bc5fe84fb0f498699c4
|
[] |
no_license
|
danielahertrich/masterthesis
|
1dee9b1800f8dee197972aa3d009000394c8e224
|
1c2e2c6c0efc3c22a778ae106e092d8f36e5183f
|
refs/heads/master
| 2021-01-01T19:47:57.860948
| 2017-07-29T22:42:29
| 2017-07-29T22:42:29
| 98,687,685
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,027
|
r
|
Overview.R
|
library(fields)
## set parameters
art <- "NMF" #"AMr","AMp","OLr","OLp", "PCA"
q <- 100 #300, 600
it <- 50 # 150
## load dictionary
load(file=paste("dict_",art,q,"_it",it,".rda",sep = ""))
# load(file=paste("dict_",art,q,".rda",sep = "")) # for art="PCA"
dict <- spdict$H
Z <- t(dict)
score <- spdict$W
## Look at range of values in H and A
range(dict)
range(score)
###########################################
## Print first 20 atoms of dictionary
###############
## Prepare color palette for blue(neg)-white(0)-red(pos) depiction of dictonary elements
## (adjusted code from 'https://stackoverflow.com/questions/29262824/r-center-color-palette-on-0' for our purpose)
nHalf <- round(nrow(dict)/2)
Min <- -0.01 #!# choose according to dictionary range
Max <- 0.5 #!#choose according to dictionary range
Thresh <-0
## Make vector of colors for values below threshold
rc1 <- colorRampPalette(colors = c("blue", "white"), space="Lab")(nHalf)
## Make vector of colors for values above threshold
rc2 <- colorRampPalette(colors = c("white", "red"), space="Lab")(nHalf)
rampcols <- c(rc1, rc2)
rb1 <- seq(Min, Thresh, length.out=nHalf+1)
rb2 <- seq(Thresh, Max, length.out=nHalf+1)[-1]
rampbreaks <- c(rb1, rb2)
## dictionary elements (red and blue) (print first 20 atoms)
png(filename = paste("Dictionary_elements_",art,q,"it",it,".png",sep = ""), width = 770, height = 530)
par(mfrow=c(4,5),mar=rep(0,4),oma=c(0.5,rep(0,2),4.7))
for (qc in 1:20){
image(matrix(dict[qc,], nrow=92),col = rampcols,
breaks = rampbreaks, axes=FALSE)
}
## add color scale
image.plot(matrix(dict[20,], nrow=92),col = rampcols,
breaks = rampbreaks, axes=FALSE, axis.args = list(cex.axis = 2), legend.width = 3.5, legend.only = TRUE,
smallplot= c(0.95,1,0.04,1))
dev.off()
## some info about dictionaries
spH1 <- spH2 <- spH3 <- numeric(nrow(score))
for(i in 1:nrow(dict)){
spH1[i] <- sum(dict[i,]==0)
spH2[i] <- sum(abs(dict[i,])<=0.005)
spH3[i] <- sum(abs(dict[i,]) <=1)
}
sum(spH1);sum(spH1)/length(dict);mean(spH1);range(spH1)
sum(spH2);sum(spH2)/length(dict);mean(spH2);range(spH2)
sum(spH3);sum(spH3)/length(dict);mean(spH3);range(spH3)
###########################################
## Print image of score matrix
###############
png(filename = paste("Scores",art,q,"it",it,".png",sep = ""), width = 520, height = 310)
par(mfrow=c(1,1),mar=c(2,2,1,0),oma=c(0,0,0,1.5))
image.plot(t(score)[,nrow(score):1],axis.args = list(cex.axis=1.5))
dev.off()
## some info about dictionaries
spA1 <- spA2 <- spA3 <- numeric(nrow(score))
for(i in 1:nrow(score)){
spA1[i] <- sum(score[i,]==0)
spA2[i] <- sum(abs(score[i,])<=0.005)
spA3[i] <- sum(abs(score[i,]) <=1)
}
sum(spA1);sum(spA1)/length(score);mean(spA1);range(spA1)
sum(spA2);sum(spA2)/length(score);mean(spA2);range(spA2)
sum(spA3);sum(spA3)/length(score);mean(spA3);range(spA3)
|
54183f583effae821ab75010b7f9414d40c4a066
|
de0b8ccead63362fbc4976b773cabacfa1bb9ff1
|
/generic.R
|
9e663835846bde9cc5937f9b3eac25422e61775e
|
[] |
no_license
|
fannyrancourt/NoncentralityParam_MixtureModels
|
45bdc68a8ccb5762dc875e4011a78ab64f3c8fd5
|
5bbf8fc798d32a0faf6f723f3200c146de80b5ae
|
refs/heads/master
| 2022-12-04T14:34:24.986111
| 2020-08-18T00:44:08
| 2020-08-18T00:44:08
| 288,314,543
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 724
|
r
|
generic.R
|
# generic functions
if (!isGeneric("unbiased_estimator")) {
setGeneric("unbiased_estimator", function(object) standardGeneric("unbiased_estimator"))
}
if (!isGeneric("expected_value")) {
setGeneric("expected_value", function(object) standardGeneric("expected_value"))
}
if (!isGeneric("variance")) {
setGeneric("variance", function(object) standardGeneric("variance"))
}
if (!isGeneric("psi_lowerbound")) {
setGeneric("psi_lowerbound", function(object, obs) standardGeneric("psi_lowerbound"))
}
if (!isGeneric("plot_psi")) {
setGeneric("plot_psi", function(object, ...) standardGeneric("plot_psi"))
}
if (!isGeneric("unbiased")) {
setGeneric("unbiased", function(object, obs) standardGeneric("unbiased"))
}
|
7c88d85b36dc689fde6c88d0844fdaade4f6c50e
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.internet.of.things/R/iotdataplane_operations.R
|
1113c92dcaa6d109109949cd5bb4e261185e6c14
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| false
| 7,155
|
r
|
iotdataplane_operations.R
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include iotdataplane_service.R
NULL
#' Deletes the shadow for the specified thing
#'
#' @description
#' Deletes the shadow for the specified thing.
#'
#' For more information, see
#' [`delete_thing_shadow`][iotdataplane_delete_thing_shadow] in the AWS IoT
#' Developer Guide.
#'
#' @usage
#' iotdataplane_delete_thing_shadow(thingName, shadowName)
#'
#' @param thingName [required] The name of the thing.
#' @param shadowName The name of the shadow.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' payload = raw
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$delete_thing_shadow(
#' thingName = "string",
#' shadowName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotdataplane_delete_thing_shadow
iotdataplane_delete_thing_shadow <- function(thingName, shadowName = NULL) {
op <- new_operation(
name = "DeleteThingShadow",
http_method = "DELETE",
http_path = "/things/{thingName}/shadow",
paginator = list()
)
input <- .iotdataplane$delete_thing_shadow_input(thingName = thingName, shadowName = shadowName)
output <- .iotdataplane$delete_thing_shadow_output()
config <- get_config()
svc <- .iotdataplane$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotdataplane$operations$delete_thing_shadow <- iotdataplane_delete_thing_shadow
#' Gets the shadow for the specified thing
#'
#' @description
#' Gets the shadow for the specified thing.
#'
#' For more information, see
#' [`get_thing_shadow`][iotdataplane_get_thing_shadow] in the AWS IoT
#' Developer Guide.
#'
#' @usage
#' iotdataplane_get_thing_shadow(thingName, shadowName)
#'
#' @param thingName [required] The name of the thing.
#' @param shadowName The name of the shadow.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' payload = raw
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$get_thing_shadow(
#' thingName = "string",
#' shadowName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotdataplane_get_thing_shadow
iotdataplane_get_thing_shadow <- function(thingName, shadowName = NULL) {
op <- new_operation(
name = "GetThingShadow",
http_method = "GET",
http_path = "/things/{thingName}/shadow",
paginator = list()
)
input <- .iotdataplane$get_thing_shadow_input(thingName = thingName, shadowName = shadowName)
output <- .iotdataplane$get_thing_shadow_output()
config <- get_config()
svc <- .iotdataplane$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotdataplane$operations$get_thing_shadow <- iotdataplane_get_thing_shadow
#' Lists the shadows for the specified thing
#'
#' @description
#' Lists the shadows for the specified thing.
#'
#' @usage
#' iotdataplane_list_named_shadows_for_thing(thingName, nextToken,
#' pageSize)
#'
#' @param thingName [required] The name of the thing.
#' @param nextToken The token to retrieve the next set of results.
#' @param pageSize The result page size.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' results = list(
#' "string"
#' ),
#' nextToken = "string",
#' timestamp = 123
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$list_named_shadows_for_thing(
#' thingName = "string",
#' nextToken = "string",
#' pageSize = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotdataplane_list_named_shadows_for_thing
iotdataplane_list_named_shadows_for_thing <- function(thingName, nextToken = NULL, pageSize = NULL) {
op <- new_operation(
name = "ListNamedShadowsForThing",
http_method = "GET",
http_path = "/api/things/shadow/ListNamedShadowsForThing/{thingName}",
paginator = list()
)
input <- .iotdataplane$list_named_shadows_for_thing_input(thingName = thingName, nextToken = nextToken, pageSize = pageSize)
output <- .iotdataplane$list_named_shadows_for_thing_output()
config <- get_config()
svc <- .iotdataplane$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotdataplane$operations$list_named_shadows_for_thing <- iotdataplane_list_named_shadows_for_thing
#' Publishes state information
#'
#' @description
#' Publishes state information.
#'
#' For more information, see [HTTP
#' Protocol](https://docs.aws.amazon.com/iot/latest/developerguide/protocols.html#http)
#' in the AWS IoT Developer Guide.
#'
#' @usage
#' iotdataplane_publish(topic, qos, payload)
#'
#' @param topic [required] The name of the MQTT topic.
#' @param qos The Quality of Service (QoS) level.
#' @param payload The state information, in JSON format.
#'
#' @return
#' An empty list.
#'
#' @section Request syntax:
#' ```
#' svc$publish(
#' topic = "string",
#' qos = 123,
#' payload = raw
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotdataplane_publish
iotdataplane_publish <- function(topic, qos = NULL, payload = NULL) {
op <- new_operation(
name = "Publish",
http_method = "POST",
http_path = "/topics/{topic}",
paginator = list()
)
input <- .iotdataplane$publish_input(topic = topic, qos = qos, payload = payload)
output <- .iotdataplane$publish_output()
config <- get_config()
svc <- .iotdataplane$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotdataplane$operations$publish <- iotdataplane_publish
#' Updates the shadow for the specified thing
#'
#' @description
#' Updates the shadow for the specified thing.
#'
#' For more information, see
#' [`update_thing_shadow`][iotdataplane_update_thing_shadow] in the AWS IoT
#' Developer Guide.
#'
#' @usage
#' iotdataplane_update_thing_shadow(thingName, shadowName, payload)
#'
#' @param thingName [required] The name of the thing.
#' @param shadowName The name of the shadow.
#' @param payload [required] The state information, in JSON format.
#'
#' @return
#' A list with the following syntax:
#' ```
#' list(
#' payload = raw
#' )
#' ```
#'
#' @section Request syntax:
#' ```
#' svc$update_thing_shadow(
#' thingName = "string",
#' shadowName = "string",
#' payload = raw
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotdataplane_update_thing_shadow
iotdataplane_update_thing_shadow <- function(thingName, shadowName = NULL, payload) {
op <- new_operation(
name = "UpdateThingShadow",
http_method = "POST",
http_path = "/things/{thingName}/shadow",
paginator = list()
)
input <- .iotdataplane$update_thing_shadow_input(thingName = thingName, shadowName = shadowName, payload = payload)
output <- .iotdataplane$update_thing_shadow_output()
config <- get_config()
svc <- .iotdataplane$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotdataplane$operations$update_thing_shadow <- iotdataplane_update_thing_shadow
|
ffc5ff55f35c4553f2c68bd6da6d18a8beb23b70
|
c8a3b33f0ec6b53c3280f6a65cfb18c115d0b615
|
/cp_average/cp_compare.R
|
01b766eea40361d7d60453d58ab4d94fd5d444c1
|
[
"MIT"
] |
permissive
|
marcelosalles/dissertacao
|
dd502c1ab78d12bfc4673fcec9816485992d358a
|
692f80a0d6b28a7e929dc86f6a684cfa84b59df1
|
refs/heads/master
| 2021-12-11T15:42:22.545985
| 2021-12-07T17:53:47
| 2021-12-07T17:53:47
| 190,596,533
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,968
|
r
|
cp_compare.R
|
library(R.matlab)
library(ggplot2)
setwd('/media/marcelo/OS/Cps/')
df_average <- read.csv('df_cp_average.csv')
tpu_data <- dir(pattern = '.mat')
first = TRUE
for (file in tpu_data){
print(file)
opened_file = readMat(file)
df_location = opened_file$Location.of.measured.points
df_cp = opened_file$Wind.pressure.coefficients
mean_cp = apply(df_cp, 2, mean)
bldg_type = ifelse(substr(file,1,2)=="Cp","lowrise","highrise")
height = opened_file$Building.height
if(first){
df = data.frame(
"file"=rep(file,length(mean_cp)),
"height"=rep(height,length(mean_cp)),
"bldg_type"=rep(bldg_type,length(mean_cp)),
"ratio"=rep(opened_file$Building.breadth/opened_file$Building.depth,length(mean_cp)),
"Point"=seq(length(mean_cp)),
"facade"=df_location[4,],
"angle"=opened_file$Wind.direction.angle,
"Cp"=mean_cp)
first = FALSE
}else{
df_i = data.frame(
"file"=rep(file,length(mean_cp)),
"height"=rep(height,length(mean_cp)),
"bldg_type"=rep(bldg_type,length(mean_cp)),
"ratio"=rep(opened_file$Building.breadth/opened_file$Building.depth,length(mean_cp)),
"Point"=seq(length(mean_cp)),
"facade"=df_location[4,],
"angle"=opened_file$Wind.direction.angle,
"Cp"=mean_cp)
df = rbind(df,df_i)
}
}
df = subset(df, df$facade != 5 & (df$angle == 0 | df$angle == 30 | df$angle == 60 | df$angle == 90 |
df$angle == 120 | df$angle == 150 | df$angle == 180 | df$angle == 210 |
df$angle == 240 | df$angle == 270 | df$angle == 300 | df$angle == 330))
df$ratio = as.numeric(as.character(df$ratio))
df_average$ratio = as.numeric(as.character(df_average$ratio))
df$cp_average = NA
df$bldg_type = as.character(df$bldg_type)
for(row in 1:nrow(df)){
average_cp = subset(
df_average,
df_average$bldg_type == df$bldg_type[row] &
df_average$ratio == df$ratio[row] &
df_average$facade == df$facade[row] &
df_average$angle == df$angle[row]
)
df$cp_average[row] = average_cp$Cp
}
df$cp_diff = df$Cp - df$cp_average
hist(df$Cp)
hist(df$cp_average)
hist(df$cp_diff)
ratio = unique(df$ratio)
facade = unique(df$facade)
files = unique(df$file)
bldg_type = unique(df$bldg_type)
maxdiff = 0
ratio_worst = ''
bldg_worst = ''
height_worst = ''
for(bldg in bldg_type){
ratio = unique(df$ratio[df$bldg_type == bldg])
for(r in ratio){
height = unique(df$height[df$bldg_type == bldg & df$ratio == r])
for(h in height){
dfplot = subset(df, df$bldg_type == bldg & df$ratio == r & df$height == h)
diferenca = mean(dfplot$cp_diff**2)**(1/2)
# print(diferenca)
if(diferenca > maxdiff){
ratio_worst = r
bldg_worst = bldg
height_worst = h
maxdiff = diferenca
print(maxdiff)
}
}
}
}
dfplot = subset(df, df$height == .2 & df$ratio == 2)
print(ggplot(dfplot,aes(dfplot$cp_diff)) + geom_histogram(binwidth = .01))
|
8cf417698b9c5f71fe50ce1d3df58ae83629603f
|
56b32941415e9abe063d6e52754b665bf95c8d6a
|
/R-Portable/App/R-Portable/library/igraph/tests/test_assortativity.R
|
88b3ea8a749bfa5e82795c4133be6e41fb822ccc
|
[
"LicenseRef-scancode-unknown-license-reference",
"GPL-2.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-newlib-historical",
"GPL-2.0-or-later",
"MIT"
] |
permissive
|
voltek62/seo-viz-install
|
37ed82a014fc36e192d9a5e5aed7bd45327c8ff3
|
e7c63f4e2e4acebc1556912887ecd6a12b4458a0
|
refs/heads/master
| 2020-05-23T08:59:32.933837
| 2017-03-12T22:00:01
| 2017-03-12T22:00:01
| 84,758,190
| 1
| 0
|
MIT
| 2019-10-13T20:51:49
| 2017-03-12T21:20:14
|
C++
|
UTF-8
|
R
| false
| false
| 1,694
|
r
|
test_assortativity.R
|
context("assortativity")
test_that("assortativity works", {
library(igraph)
g <- read_graph(f <- gzfile("celegansneural.gml.gz"), format="gml")
assR <- function(graph) {
indeg <- degree(graph, mode="in")
outdeg <- degree(graph, mode="out")
el <- as_edgelist(graph, names=FALSE)
J <- outdeg[el[,1]]-1
K <- indeg[el[,2]]-1
num <- sum(J*K) - sum(J)*sum(K)/ecount(graph)
den1 <- sum(J*J) - sum(J)^2/ecount(graph)
den2 <- sum(K*K) - sum(K)^2/ecount(graph)
num / sqrt(den1) / sqrt(den2)
}
asd <- assortativity_degree(g)
as <- assortativity(g, degree(g, mode="out"), degree(g, mode="in"))
as2 <- assR(g)
expect_that(asd, equals(as))
expect_that(asd, equals(as2))
asu <- assortativity_degree(simplify(as.undirected(g, mode="collapse")))
expect_that(asu, equals(-0.16319921031570466807))
p <- read_graph(f <- gzfile("power.gml.gz"), format="gml")
p.asd <- assortativity_degree(p)
p.as <- assortativity(p, degree(p))
p.as2 <- assR(as.directed(p, mode="mutual"))
expect_that(p.asd, equals(p.as))
expect_that(p.asd, equals(p.as2))
})
test_that("nominal assortativity works", {
library(igraph)
o <- read_graph(f <- gzfile("football.gml.gz"), format="gml")
o <- simplify(o)
an <- assortativity_nominal(o, V(o)$value+1)
el <- as_edgelist(o, names=FALSE)
etm <- matrix(0, nr=max(V(o)$value)+1, nc=max(V(o)$value)+1)
for (e in 1:nrow(el)) {
t1 <- V(o)$value[ el[e,1] ]+1
t2 <- V(o)$value[ el[e,2] ]+1
etm[t1, t2] <- etm[t1, t2] + 1
etm[t2, t1] <- etm[t2, t1] + 1
}
etm <- etm/sum(etm)
an2 <- ( sum(diag(etm))-sum(etm %*% etm) ) / ( 1-sum(etm %*% etm) )
expect_that(an, equals(an2))
})
|
0e8e6d2bfd4169a5d46860959e8cac584d4664ab
|
91945b9420a2447e53265865b7f96b87f2a9c515
|
/server.R
|
a080cdb7392c5a83aca4ad3b2ab53ab2383f410a
|
[] |
no_license
|
ajaypillarisetti/squid
|
d8de50e6164f578004e438169a4fdbc0c497faad
|
dd1bbe55d670f895636df45e43ead459667e2180
|
refs/heads/master
| 2021-05-04T10:50:13.393879
| 2017-02-28T01:31:13
| 2017-02-28T01:31:13
| 43,640,478
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,529
|
r
|
server.R
|
### Ajay Pillarisetti, University of California, Berkeley, 2015
### V1.0N
shinyServer(function(input, output) {
Sys.setenv(TZ="Asia/Kathmandu")
# read in data - for uploads
datasetInput <- reactive({
inFile <- input$files
if (is.null(inFile)){return(NULL)}
dta<- read.squid(inFile$datapath[1])
})
datasetName <- reactive({
inFile <- input$files
inFile$name
})
data_cleaned <- reactive({
if (is.null(datasetInput())) return(NULL)
data_d <- datasetInput()[,with=F]
})
####################
##### datasets #####
####################
dataXTS.plainplot <- reactive({
dta<-data_cleaned()
})
####################
##### dygraphs ##### interactivity - to subset data to user-selected range
####################
#timezone support is broken for these selections, so manually input the offset.
from_z1 <- reactive({
if (!is.null(input$zeroPlot1_date_window))
ymd_hms(strftime(gsub(".000Z","",gsub("T"," ",input$zeroPlot1_date_window[[1]])), "%Y-%m-%d %H:%M:%S"), tz='Asia/Kathmandu')+(5.75*60*60)
})
to_z1 <- reactive({
if (!is.null(input$zeroPlot1_date_window))
ymd_hms(strftime(gsub(".000Z","",gsub("T"," ",input$zeroPlot1_date_window[[2]])), "%Y-%m-%d %H:%M:%S"), tz='Asia/Kathmandu')+(5.75*60*60)
})
output$zero1Title <- renderText({paste("Zero 1:", paste(from_z1(), to_z1(), sep=" to ")," ")})
from_z2 <- reactive({
if (!is.null(input$zeroPlot2_date_window))
ymd_hms(strftime(gsub(".000Z","",gsub("T"," ",input$zeroPlot2_date_window[[1]])), "%Y-%m-%d %H:%M:%S"), tz='Asia/Kathmandu')+(5.75*60*60)
})
to_z2 <- reactive({
if (!is.null(input$zeroPlot2_date_window))
ymd_hms(strftime(gsub(".000Z","",gsub("T"," ",input$zeroPlot2_date_window[[2]])), "%Y-%m-%d %H:%M:%S"), tz='Asia/Kathmandu')+(5.75*60*60)
})
output$zero2Title <- renderText({paste("Zero 2:", paste(from_z2(), to_z2(), sep=" to ")," ")})
from_sample <- reactive({
if (!is.null(input$samplePlot_date_window))
ymd_hms(strftime(gsub(".000Z","",gsub("T"," ",input$samplePlot_date_window[[1]])), "%Y-%m-%d %H:%M:%S"), tz='Asia/Kathmandu')+(5.75*60*60)
})
to_sample <- reactive({
if (!is.null(input$samplePlot_date_window))
ymd_hms(strftime(gsub(".000Z","",gsub("T"," ",input$samplePlot_date_window[[2]])), "%Y-%m-%d %H:%M:%S"), tz='Asia/Kathmandu')+(5.75*60*60)
})
output$sampleTitle <- renderText({paste("Tracer Decay:", paste(from_sample(), to_sample(), sep=" to ")," ")})
####################
####### Boxes ######
####################
output$background1 <- renderValueBox({
datum <- as.data.table(data_cleaned())
datum[,index:=ymd_hms(index,tz='Asia/Kathmandu')]
mean_ppm1 <- round(datum[index>=from_z1() & index<=to_z1(), mean(ppm1, na.rm=T)],1)
mean_ppm2 <- round(datum[index>=from_z1() & index<=to_z1(), mean(ppm2, na.rm=T)],1)
mean_ppm3 <- round(datum[index>=from_z1() & index<=to_z1(), mean(ppm3, na.rm=T)],1)
mean_ppm4 <- round(datum[index>=from_z1() & index<=to_z1(), mean(ppm4, na.rm=T)],1)
infoBox(
value = paste(mean_ppm1, mean_ppm2, mean_ppm3, mean_ppm4, sep="\n"),
title = "Background (Zero 1)",
icon = icon("time", lib='glyphicon'),
color = "aqua"
)
})
bg1 <- reactive({
datum <- as.data.table(data_cleaned())
datum[,index:=ymd_hms(index,tz='Asia/Kathmandu')]
data.table(
mean_ppm1_1 <- round(datum[index>=from_z1() & index<=to_z1(), mean(ppm1, na.rm=T)],1),
mean_ppm2_1 <- round(datum[index>=from_z1() & index<=to_z1(), mean(ppm2, na.rm=T)],1),
mean_ppm3_1 <- round(datum[index>=from_z1() & index<=to_z1(), mean(ppm3, na.rm=T)],1),
mean_ppm4_1 <- round(datum[index>=from_z1() & index<=to_z1(), mean(ppm4, na.rm=T)],1)
)
})
output$background2 <- renderValueBox({
datum <- as.data.table(data_cleaned())
datum[,index:=ymd_hms(index,tz='Asia/Kathmandu')]
mean_ppm1 <- round(datum[index>=from_z2() & index<=to_z2(), mean(ppm1, na.rm=T)],1)
mean_ppm2 <- round(datum[index>=from_z2() & index<=to_z2(), mean(ppm2, na.rm=T)],1)
mean_ppm3 <- round(datum[index>=from_z2() & index<=to_z2(), mean(ppm3, na.rm=T)],1)
mean_ppm4 <- round(datum[index>=from_z2() & index<=to_z2(), mean(ppm4, na.rm=T)],1)
infoBox(
value = paste(mean_ppm1, mean_ppm2, mean_ppm3, mean_ppm4, sep="\n"),
title = "Background (Zero 2)",
icon = icon("time", lib='glyphicon'),
color = "aqua"
)
})
bg2 <- reactive({
datum <- as.data.table(data_cleaned())
datum[,index:=ymd_hms(index,tz='Asia/Kathmandu')]
data.table(
mean_ppm1_2 <- round(datum[index>=from_z2() & index<=to_z2(), mean(ppm1, na.rm=T)],1),
mean_ppm2_2 <- round(datum[index>=from_z2() & index<=to_z2(), mean(ppm2, na.rm=T)],1),
mean_ppm3_2 <- round(datum[index>=from_z2() & index<=to_z2(), mean(ppm3, na.rm=T)],1),
mean_ppm4_2 <- round(datum[index>=from_z2() & index<=to_z2(), mean(ppm4, na.rm=T)],1)
)
})
bg_mean <- reactive({
datum <- rbind(bg1(), bg2())
setnames(datum, c("ppm1", 'ppm2', 'ppm3', 'ppm4'))
datum <- datum[,list(
ppm1=mean(ppm1, na.rm=T),
ppm2=mean(ppm2, na.rm=T),
ppm3=mean(ppm3, na.rm=T),
ppm4=mean(ppm4, na.rm=T)
)]
})
output$background_mean <- renderValueBox({
infoBox(
value = bg_mean(),
title = "Background (Mean)",
icon = icon("time", lib='glyphicon'),
color = "aqua"
)
})
####################
##### Analysis #####
####################
correctedData <- reactive({
datum <- as.data.table(data_cleaned())
datum[,index:=ymd_hms(index,tz='Asia/Kathmandu')]
datum[,ppm1:=ppm1-bg_mean()[,ppm1]]
datum[,ppm2:=ppm2-bg_mean()[,ppm2]]
datum[,ppm3:=ppm3-bg_mean()[,ppm3]]
datum[,ppm4:=ppm4-bg_mean()[,ppm4]]
as.xts.data.table(datum)
})
correctedData_lm <- reactive({
datum <- as.data.table(correctedData())
datum[,index:=ymd_hms(index,tz='Asia/Kathmandu')]
constrained <- melt(datum, id.var='index')
constrained <- constrained[index>=from_sample() & index<=to_sample()]
constrained[,timeproxy:=1:length(value),by='variable']
fits <- lmList(log(value) ~ timeproxy | variable, data=as.data.frame(constrained))
ach <- as.data.table(t(abs(coef(fits)[2]*3600)))
ach <- ach[,`:=`(
hhid=strsplit(datasetName(), "-")[[1]][1],
location=strsplit(datasetName(), "-")[[1]][3],
sqd=strsplit(datasetName(), "-")[[1]][2],
date=as.character(constrained[,unique(ymd(substring(index,1,10)),tz='Asia/Kathmandu')]),
file=datasetName(),
zero1_start= as.character(from_z1()),
zero1_stop= as.character(to_z1()),
sample_start= as.character(from_sample()),
sample_stop= as.character(to_sample()),
zero2_start= as.character(from_z2()),
zero2_stop= as.character(to_z2())
)]
# not all the same width, so we have an is.unsorted
ach <- melt(ach, id.var=c("hhid","location","date", 'zero1_start', 'zero1_stop', 'sample_start', 'sample_stop', 'zero2_start', 'zero2_stop', 'file'), measure.var=grep("ppm",names(ach)))
ach.all <- ach[,with=F]
ach.all[, value:=round(value, 2)]
ach.all
})
####################
###### Tables ######
####################
output$achTable <- renderTable({
correctedData_lm()
}, include.rownames=FALSE)
####################
####### PLOTS ######
####################
output$plainPlot<-
renderDygraph({
dygraph(dataXTS.plainplot()) %>%
dyOptions(axisLineWidth = 1.5, fillGraph = F, drawGrid = FALSE, useDataTimezone=T, colors = RColorBrewer::brewer.pal(4, "Set2"))
})
output$zeroPlot1<-
renderDygraph({
dygraph(dataXTS.plainplot()) %>%
dyOptions(axisLineWidth = 1.5, fillGraph = F, drawGrid = FALSE, useDataTimezone=T, colors = RColorBrewer::brewer.pal(4, "Set2"))
})
output$zeroPlot2<-
renderDygraph({
dygraph(dataXTS.plainplot()) %>%
dyOptions(axisLineWidth = 1.5, fillGraph = F, drawGrid = FALSE, useDataTimezone=T, colors = RColorBrewer::brewer.pal(4, "Set2"))
})
output$samplePlot<-
renderDygraph({
dygraph(log(correctedData()), group = 'SamplePlots') %>%
dyOptions(axisLineWidth = 1.5, fillGraph = F, drawGrid = FALSE, useDataTimezone=T, colors = RColorBrewer::brewer.pal(4, "Set2"))
})
output$samplePlotnolog<-
renderDygraph({
dygraph(correctedData(), group = 'SamplePlots') %>%
dyOptions(axisLineWidth = 1.5, fillGraph = F, drawGrid = FALSE, useDataTimezone=T, colors = RColorBrewer::brewer.pal(4, "Set2"))
})
##########################
####### DL HANDLERS ######
##########################
output$downloadCSV <- downloadHandler(
filename = function() {paste(datasetName(), '.cleaned.csv', sep='') },
content = function(file) {
write.csv(correctedData_lm(), file, row.names=F)
}
)
})
|
3cabeb7f55c91cbb59ff93c0c9d944cd055fd6c4
|
b08f1485d0c2c18e57134d0d3d56220fdff3cc5e
|
/R/FL-Airline_Data/week1.R
|
3e4ccc06478e586e8801cbfba5e9ab19b67b6765
|
[] |
no_license
|
bithu30/myRepo
|
250c9383d31c296a9e116032aebf4ce947d1964e
|
64485bb10327ed3a84b3c15200b1dd6a90117a8e
|
refs/heads/master
| 2022-10-21T16:26:45.997395
| 2018-05-31T02:51:31
| 2018-05-31T02:51:31
| 41,030,617
| 5
| 18
| null | 2022-10-05T07:40:41
| 2015-08-19T11:43:00
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 724
|
r
|
week1.R
|
#import all of the data from 2008 dataset
airline=read.csv('C://myRepo//R//FL-Airline_Data//2008.csv')
head(airline$Origin == 'IND')
sum(airline$Origin == 'IND')
sum(airline$Origin == 'ORD')
sum(airline$Dest == 'ORD')
sum( (airline$Origin == 'IND') & (airline$Dest == 'ORD') )
myIndieFlights <- subset(airline,airline$Origin=='IND')
myIndieDest <- subset(airline,airline$Dest=='IND')
plot(table(myIndieFlights$Month))
myTupFlights <- subset(airline,airline$Origin=='TUP')
mean(myTupFlights$DepDelay)
head(myIndieFlights)
sum(myIndieFlights$DepTime<600,na.rm = TRUE)
sum(airline$Dest=='LAX')
temp_flight<-subset(airline,(airline$Origin == 'ATL') & (airline$Dest == 'LAX') )
sum(temp_flight$DepTime < 1200,na.rm = TRUE)
|
1c62d943ea35003ea7109102c9a2453921a26616
|
577606de99821f75d8aa0e7bb4b9a1d5fb1746d6
|
/cachematrix.R
|
90a452332bcc4d68548d4ae0bf8d07ff1e9b3a8b
|
[] |
no_license
|
milosgajdos/ProgrammingAssignment2
|
36221be8fa2885c10ece73225d85c5ae5e6ac477
|
325e4fcf13b9b6bbf0ccb566fcc3504afee4d070
|
refs/heads/master
| 2021-05-31T00:05:57.077423
| 2015-11-21T15:07:01
| 2015-11-21T15:07:01
| 46,615,974
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,790
|
r
|
cachematrix.R
|
# makeCacheMatrix accepts a matrix as an argument and generates a list object
# which provides various functions to retrieve and modify the underlying matrix
# and its inverse matrix
makeCacheMatrix <- function(x = matrix()) {
# inverse matrix
inv <- NULL
# Initialize the underlying matrix object, NULLs its inverse cache
# If the matrix hasn't changed we won't modify the underlying matrix
set <- function(y = matrix()) {
# only change the underlying matrix IF new matrix is not the same
if (!(dim(x) == dim(y) && all(x == y))) {
x <<- y
inv <<- NULL
}
}
# returns underlying matrix
get <- function() x
# sets matrix's inverse cache
setinv <- function(invMatrix = matrix()) inv <<- invMatrix
# returns inverse cache value
getinv <- function() inv
# returns matrix object
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
# cacheSolve accepts a special list object that provides get, set, setinv and getinv
# function objects which allow to calculat an inverse matrix of passed in matrix
# object or returns a cached data if there is any.
cacheSolve <- function(x = list(), ...) {
# retrieve cached inverse matrix
inv <- x$getinv()
# check if the cache exists and if the matrix hasn't changed
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
# read in matrix
data <- x$get()
# calculate inverse matrix
inv <- solve(data)
# set inverse cache value
x$setinv(inv)
# return inverse matrix
inv
}
|
ed7d7f2f7dbc8cd8d4144852eec729964f3635d6
|
831bab411f97dde4a4d41640393bd8fe7c5dde74
|
/SL_CCLE_del.R
|
61f454bf32f24cdc815fb9ca8c4865987a290d03
|
[
"Apache-2.0"
] |
permissive
|
CBigOxf/synthetic_lethality
|
86f4209760c4d79509fad405b81fbebf937937a2
|
705b6555882202c1ed157b1be7ef881eb73b3bb6
|
refs/heads/main
| 2023-03-25T03:56:43.665837
| 2021-03-18T13:53:38
| 2021-03-18T13:53:38
| 309,764,977
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,014
|
r
|
SL_CCLE_del.R
|
SL_CCLE_del <- function()
{
###########################################################################
# This function implements the SL genome scan for CCLE_20Q2 dataset
# It scans through the genome looking for which genes are
# co-mutated (only deleterious) or WT based on tumour samples that
# contain information for AGO2 and MYC CNA
###########################################################################
###########################################################################
# source the data-set ; the .zip folder contains all data in //inputs subfolder
# change as appropriate ...
input_dir <- paste0(getwd(),"//inputs")
print("Loading CCLE data-sets...")
load(file=paste0(input_dir,"/CCLE_20Q2.RData"))
print("Done...")
disease_name <- readline(prompt="Enter type of disease (e.g. breast, lung etc) - 'all' for full CCLE run: ")
disease_name_bkp <- disease_name
gene <- readline(prompt="Enter the gene to analyze (e.g. AGO2): ")
if (disease_name == "all") { disease_name = "CCLE.csv"}
else{disease_name = paste0(disease_name,".csv")}
# Sink is a command to transfer all console output to a .txt file here
# This prevents any output while running - can deactivate with command : CloseAllConnections()
sink(paste0("tp_del_",disease_name_bkp,".txt"))
lib_dir <- paste0(getwd(),"/libs")
.libPaths( c( lib_dir , .libPaths() ) )
print("Loading libraries required...")
list.of.packages <- c("dplyr","ggplot2","ggrepel","ggpubr","tibble","stringr",
"tidyverse", "data.table",
"stringi","openxlsx", "data.table" )
invisible(lapply(list.of.packages, library, character.only = TRUE))
colnames(expr_matrix_csv)[1] <- "CELLLINE"
if (disease_name == "CCLE.csv") {
print("Running through all cancer cell lines in CCLE...")
}
else{
print(paste0("Running for ",disease_name))
cell_line_dir = paste0(input_dir,"/CELL_LINES")
command <- paste0(cell_line_dir,"//",disease_name)
command <- shQuote(command)
command <- paste0("read.csv(",command)
command <- paste0(command,",header = TRUE)")
command <-paste0("as.data.frame(",command)
command <- paste0(command,")")
disease_csv <- eval(parse(text = command))
disease_cell_lines = disease_csv[, 1]
expr_matrix_csv <- expr_matrix_csv %>% dplyr::filter(CELLLINE %in% disease_cell_lines)
mut_matrix_csv <- mut_matrix_csv %>% dplyr::filter(Tumor_Sample_Barcode %in% disease_cell_lines)
}
# initialize lists to be populated later on
master_pvalues <- c()
ratio1 <- c()
ratio2 <- c()
mt <- c() # mutational frequency for each gene
tp_flag <- c() # twenty percent mutated flag, true or false
all_genes <- colnames(expr_matrix_csv)
c_matrix <- c() ### contingency table freqs
gene_list <- sapply(strsplit(all_genes[2:length(all_genes)], split='..', fixed=TRUE),function(x) (x[1]))
gene_list_IDs <- sapply(strsplit(all_genes[2:length(all_genes)], split='..', fixed=TRUE),function(x) (x[2]))
gene_list_IDs <- sapply(str_sub(gene_list_IDs,1,-2),function(x) (x[1]))
user <- length(gene_list) # can change to scan first x genes (1:x)
limit <- 100 # how many pvalues and genes to plot (top in ascending order)
title_cancer = paste0("% done - Analyzing genes in ",disease_name_bkp," ...")
#####################################################################################################
cell_line_names <- as.data.frame(read.csv(paste0(input_dir,"/cell_line_names.csv"), header = TRUE))
names_mat <- cell_line_names %>% dplyr::select(c("BROAD_ID","CCLE_ID"))
colnames(names_mat) <- c("Tumor_Sample_Barcode","NAME")
calls <- read.table(file = paste0(input_dir,"/calls.txt"), sep = '\t', header = TRUE)
colnames(calls)[3] <- "TARGET"
colnames(calls)[4] <- "MYC"
calls <- calls[,c(1,2,3,4)]
colnames(expr_matrix_csv)[1] <- "Tumor_Sample_Barcode"
colnames(calls)[2] <- "NAME"
pb <- winProgressBar(title = "progress bar", min = 0,max = user,width = 300)
df_t <- expr_matrix_csv %>% dplyr::select(c("Tumor_Sample_Barcode"))
df_t <- merge(df_t, names_mat, by="Tumor_Sample_Barcode")
df_t <- merge(df_t,calls, by = "NAME")
# filter by MYC diploidy:
df_t <- df_t %>% dplyr::filter(MYC == 0)
colnames(df_t)[2] <- "Tumor_Sample_Barcode"
for (i in 1:user) {
setWinProgressBar(pb,i,title = paste(round(i / user * 100, 0), title_cancer))
c_gene <- gene_list[i]
target_mutations <- mut_matrix_csv %>% dplyr::filter(Hugo_Symbol %in% c_gene &
!(Variant_Classification %in% "Silent") )
check_1 <- nrow(target_mutations)
df <- merge(df_t,target_mutations, by = "Tumor_Sample_Barcode", all = TRUE)
check_2 <- nrow(df)
check <- check_1/check_2 # mutational frequency for the gene
check_all <- paste0(check_1,"/",check_2," (",check,")")
df <- df[order(df[,'Variant_Classification']), ]
df[,'Variant_Classification'][is.na(df[,'Variant_Classification'])] <- "WT"
df[,'isDeleterious'][is.na(df[,'isDeleterious'])] <- "False"
df <- tibble::add_column(df, status = "MT")
df$status <- ifelse(df$Variant_Classification == "WT", "WT", "MT")
df_small <- df %>% dplyr::select(c("Tumor_Sample_Barcode",
"Variant_Classification", "TARGET","status","isDeleterious"))
# -2 or Deep Deletion indicates a deep loss, possibly a homozygous deletion
# -1 or Shallow Deletion indicates a shallow loss, possibley a heterozygous deletion
# 0 is diploid
# 1 or Gain indicates a low-level gain (a few additional copies, often broad)
# 2 or Amplification indicate a high-level amplification (more copies, often focal)
df_small <- df_small %>% dplyr::mutate (TARGET=replace(TARGET, TARGET==-2, "deep_deletion"))
df_small <- df_small %>% dplyr::mutate (TARGET=replace(TARGET, TARGET==-1, "shallow_deletion"))
df_small <- df_small %>% dplyr::mutate (TARGET=replace(TARGET, TARGET==0, "diploid"))
df_small <- df_small %>% dplyr::mutate (TARGET=replace(TARGET, TARGET==1, "gain"))
df_small <- df_small %>% dplyr::mutate (TARGET=replace(TARGET, TARGET==2, "amplification"))
df_small <- df_small[order(df_small[,"TARGET"]), ]
##################################
df_small <- tibble::add_column(df_small, AMPL_MUT = 0)
df_small <- tibble::add_column(df_small, NON_AMPL_MUT = 0)
df_small <- tibble::add_column(df_small, AMPL_WT = 0)
df_small <- tibble::add_column(df_small, NON_AMPL_WT = 0)
df_small$AMPL_MUT <- ifelse(df_small$isDeleterious == "True" & (df_small$TARGET == "amplification" | df_small$TARGET == "gain"), 1, 0)
df_small$NON_AMPL_MUT <- ifelse(df_small$isDeleterious == "True" & (df_small$TARGET != "amplification" & df_small$TARGET != "gain"), 1, 0)
df_small$AMPL_WT <- ifelse(df_small$status == "WT" & (df_small$TARGET == "amplification" | df_small$TARGET == "gain"), 1, 0)
df_small$NON_AMPL_WT <- ifelse(df_small$status == "WT" & (df_small$TARGET != "amplification" & df_small$TARGET != "gain"), 1, 0)
df_small <- na.omit(df_small)
df_small <- df_small[order(df_small[,'status']), ]
# prepare matrix for Fisher's exact test:
c1 <- table(df_small$AMPL_MUT)
c2 <- table(df_small$AMPL_WT)
c3 <- table(df_small$NON_AMPL_MUT)
c4 <- table(df_small$NON_AMPL_WT)
c <- matrix(c(c1[2],c2[2],c3[2],c4[2]),2,2)
c_string <- paste0("(",c1[2],",",c3[2],",",c2[2],",",c4[2],")")
colnames(c) <- c(paste0(gene," amplified/gain"),paste0(gene," not amplified/gain (deep_deletion/diploid)"))
rownames(c) <- c(paste0(c_gene," deleterious"),paste0(c_gene," WT"))
c[is.na(c)] <- 0
print("'######################################################################")
print(c)
res <- fisher.test(c)
# now filter out anything that is mutant in less than 20% of samples:
ratio1 <- c(ratio1, c[1,1]/(c[1,1]+c[2,1]))
ratio2 <- c(ratio2,c[1,2]/(c[1,2]+c[2,2]))
if ( check >= 0.1 ) {
tp_flag <- c(tp_flag,"TRUE")
}
else{
tp_flag <- c(tp_flag,"FALSE")
}
master_pvalues <- c(master_pvalues, res$p.value)
mt <- c(mt, check_all)
c_matrix <- c(c_matrix,c_string) ###
print(res)
}
close(pb)
names(master_pvalues) <- gene_list[1:user]
df <- matrix(data = , nrow = user, ncol = 1)
df[, 1] = master_pvalues
row.names(df) <- names(master_pvalues)
colnames(df) <- "pvalues"
df <- data.frame(df)
# add ratio columns
df <- add_column(df, ratios1 = 0)
df <- add_column(df, ratios2 = 0)
df <- add_column(df, ctn = 0) ###
df <- add_column(df,tp_flags = "FALSE")
df <- add_column(df,Mutational_Freq = 0)
df$ratios1 <- ratio1
df$ratios2 <- ratio2
df$ctn <- c_matrix ###
df$tp_flags <- tp_flag
df$Mutational_Freq <- mt
df <- df %>% arrange(desc(-pvalues))
# adjust p values
colnames(df)[2] <- "AMPLIFIED_DELETERIOUS/(AMPLIFIED_DELETERIOUS+AMPLIFIED_WT)"
colnames(df)[3] <- "NON_AMPLIFIED_DELETERIOUS/(NON_AMPLIFIED_DELETERIOUS+NON_AMPLIFIED_WT)"
colnames(df)[4] <- "contingency table frequencies (A,B,C,D)" ###
dfshort <- df %>% dplyr::filter(tp_flags=="TRUE") # & pvalues < 0.05)
dfshort <- add_column(dfshort, adjusted_pvalue = 0)
dfshort$adjusted_pvalue <- p.adjust(dfshort$pvalues, method = "BH")
colnames(df)[5] <- "Mutational_Freq > 10%"
write.csv(dfshort,paste0("CCLE_DEL_VS_WT_adjpvalues_",disease_name))
write.csv(as.data.frame(df), paste0("genome_results_del_",disease_name))
if (user>limit) {df2 <- head(df,limit)}
png(paste0("sp_del_",disease_name_bkp,".png"), width=1920, height=1080, res=100)
sp <- ggplot(data = df2,aes(x=factor(row.names(df2), levels=row.names(df2)), y = pvalues ))+
geom_point(size = 4,color = dplyr::case_when(df2$pvalues > 0.05 ~ "#FF0000",
df2$pvalues < 0.05 ~ "#00CC00",
TRUE ~ "#00CC00"), alpha = 0.8) +
geom_hline(yintercept = 0.05, color = "#00CC00") +
geom_label_repel(aes(label=row.names(df2)),
box.padding = 0.5,
point.padding = 0.005, size = 2) +
ylab( c("P-value of Fisher exact test") ) +
xlab( c("Hugo Symbol") ) +
font("xylab",size=10)+
font("xy",size=10)+
font("xy.text", size = 10) +
font("legend.text",size = 10) +
theme(axis.text.x=element_text(size=10, angle=90,hjust=0.95,vjust=0.02)) +
ggtitle(paste0("Genome-wide comparison of ", gene, " amplification VS Deleterious/WT"))
print(sp)
dev.off()
return(df)
}
|
807fde5f658394b63b38046f7decfeecf922ea7e
|
a664cda0a1bb3e7351b7c0b2c5755d224bcaf330
|
/project1_plot3.R
|
2debd819a9ddb432094f168c186774e3f51c45ac
|
[] |
no_license
|
Sophro/ExData_Plotting1
|
2bb675578a6472cd8e746aebed1b2a80dcae58c3
|
5afdf0462d98093972fd2ebcf6c56fe82ca46158
|
refs/heads/master
| 2020-03-22T01:02:06.837358
| 2018-06-30T21:32:16
| 2018-06-30T21:32:16
| 139,279,005
| 0
| 0
| null | 2018-06-30T20:49:00
| 2018-06-30T20:48:59
| null |
UTF-8
|
R
| false
| false
| 1,250
|
r
|
project1_plot3.R
|
# Cousera Course 4, week1, Project1 29.06.2018
rm(list=ls()) # rm all variable = clear
# read input file
dat_inp <- read.table("data/household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors = FALSE, na.strings=c("NA","?"))
# select all features between two dates (as character)
tmp <- dat_inp[dat_inp$Date >= "1/2/2007" & dat_inp$Date <= "2/2/2007", ]
# generate a date-time as POSIXct called my_time
tmp$Date <- as.Date(tmp$Date, format = "%d/%m/%Y")
tmp$Time <- format(tmp$Time, format = "%H:%M:%S")
tmp2 <- paste(tmp$Date, tmp$Time)
# add my_time to dat
tmp$my_time <- strptime(tmp2, "%Y-%m-%d %H:%M:%S")
pip <- unique(tmp$Date)
dat <- tmp[tmp$Date >= "2007-2-1" & tmp$Date <= "2007-2-2", ]
plot3
dev.new()
png(file = "Plot3.png",width = 480, height = 480, units = "px")
plot(dat$my_time , dat$Sub_metering_1, type= "l", lty=1, col="black", ylab="Energy sub metering", xlab="")
points(dat$my_time , dat$Sub_metering_2, type= "l", lty=1, col="red", ylab="", xlab="")
points(dat$my_time , dat$Sub_metering_3, type= "l", lty=1, col="blue", ylab="", xlab="")
# add a legend
legend("topright",legend =c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col =c("black","red","blue"))
dev.off()
|
6ccb5aa6552118abc37f1857dd7e947f2085768e
|
5f1f83ecd94dd139e4c2a09bbc36ed58b65806f5
|
/example/mergeStringtieTPM.R
|
41b69141f79e0da37b6c2bdb3660125e2660062c
|
[] |
no_license
|
huangwb8/bioinf
|
6fe50b4bf55372f9d87946c0d2591c0220a3857c
|
02aba6ad6173c376d81e5cb58c973189d5787c8a
|
refs/heads/master
| 2023-01-29T00:23:52.279428
| 2023-01-15T01:01:37
| 2023-01-15T01:01:37
| 215,574,769
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,553
|
r
|
mergeStringtieTPM.R
|
##==========================Input========================##
# path_stringtie='/home/huangwb8/Project/RNA-Seq_A2B1-KO/output/stringtie/hisat2'
##==============assistant base function=================##
source("/home/huangwb8/bin/ShellBase.R",encoding = "UTF-8")
##====================environment=======================##
nd.pac=c("dplyr","plyr","readr")
Plus.library(nd.pac)
# grobal options:
options(scipen = 100)
##===========================Programe====================##
f <- list.files(path_stringtie,pattern = 'gene_abund.tab',full.names = T,recursive = T)
df_tpm <- NULL; df_fpkm <- NULL
for(i in 1:length(f)){ # i=1
f.i <- f[i]
n.i <- rev(Fastextra(f.i,'[/]'))[2]
df.i <- read.table(f.i,header = T,check.names = F,sep = '\t')
gene.i <- Fastextra(df.i$`Gene ID`, '[.]',1)
# table(duplicated(gene.i))
# gene.i[duplicated(gene.i)]
tpm.i <- tapply(df.i[,'TPM'], gene.i, sum)
fpkm.i <- tapply(df.i[,'FPKM'], gene.i, sum)
# TPM
df.i2 <- data.frame(Gene_ID = names(tpm.i), TPM = tpm.i, stringsAsFactors = F,row.names = 1:length(tpm.i))
colnames(df.i2)[2] <- n.i
if(is.null(df_tpm)){
df_tpm <- df.i2
} else {
df_tpm <- full_join(df_tpm,df.i2,by = "Gene_ID")
}
# FPKM
df.i3 <- data.frame(Gene_ID = names(fpkm.i), FPKM = fpkm.i, stringsAsFactors = F,row.names = 1:length(fpkm.i))
colnames(df.i3)[2] <- n.i
if(is.null(df_fpkm)){
df_fpkm <- df.i3
} else {
df_fpkm <- full_join(df_fpkm,df.i3,by = "Gene_ID")
}
# TPM for SUPPA
# df.i2.m <- df.i2[-1]; rownames(df.i2.m) <- df.i2[,1]
# write.table(df.i2.m, paste0(Fastextra(f.i,n.i,1),n.i,'.tpm.tab'),sep = "\t",col.names = T,row.names = T,quote = F)
if(T){
x <- readr::read_delim(paste0(path_stringtie,'/',n.i,'/merged.gtf'), delim = '\t', comment = "#",col_names = F) %>% as.data.frame()
x2 <- x[x[,3] == 'transcript',]
a <- apply(as.matrix(x2[,9]), 1, function(i){
# i = 'gene_id \"ENSG00000228794.9\"; transcript_id \"ENST00000671208.1\"; ref_gene_name \"LINC01128\"; cov \"0.0\"; FPKM \"0.000000\"; TPM \"0.000000\";'
i2 <- Fastextra(i,'\"')
i3 <- i2[seq(2,12,2)]
# names(i3) <- gsub(';| ','',i2[seq(1,11,2)])
return(i3)
})
a2 <- t(a);
colnames(a2) <- c("gene_id","transcript_id" ,"ref_gene_name","cov","FPKM","TPM") # View(a2[1:10,])
# rownames(a2) <- Fastextra(a2[,"transcript_id"], "[.]",1)
rownames(a2) <- a2[,"transcript_id"]
a2 <- as.data.frame(a2)
a3 <- a2["TPM"]; colnames(a3) <- n.i
a3[,1] <- as.numeric(as.character(a3[,1]))
a3[,1][is.na(a3[,1])] <- 0
# a3 <- na.omit(a3) # table(is.na(a3$D1)); a3[is.na(a3$D1),]
# Output
write.table(a3, paste0(Fastextra(f.i,n.i,1),n.i,'.transcript.tpm.tab'),sep = "\t",col.names = T,row.names = T,quote = F)
}
}
# table(duplicated(df_tpm$Gene_ID))
# table(duplicated(df_fpkm$Gene_ID))
# Output
df_tpm2 <- as.matrix(df_tpm[,-1]); rownames(df_tpm2) <- df_tpm[,1]
for(j in 1:ncol(df_tpm2)) df_tpm2[,j][is.na(df_tpm2[,j])] <- 0
saveRDS(df_tpm2, paste0(path_stringtie,'/stringtie.tpm.rds'))
write.table(df_tpm2, paste0(path_stringtie,"/stringtie.tpm.txt"),sep = "\t",col.names = T,row.names = T,quote = F)
df_fpkm2 <- as.matrix(df_fpkm[,-1]); rownames(df_fpkm2) <- df_fpkm[,1]
for(j in 1:ncol(df_fpkm2)) df_fpkm2[,j][is.na(df_fpkm2[,j])] <- 0
saveRDS(df_fpkm2, paste0(path_stringtie,'/stringtie.fpkm.rds'))
write.table(df_fpkm2, paste0(path_stringtie,"/stringtie.fpkm.txt"),sep = "\t",col.names = T,row.names = T,quote = F)
# grobal options:
options(scipen = 1)
|
71ef9eca139630ac15908cb9cdebf0b910059c67
|
d7ff71e8ffb07419aad458fb2114a752c5bf562c
|
/tests/testthat/test-public_api-2.R
|
8606a4afdc5b5ac04fbdc1c875e6a21aa71207c2
|
[
"MIT"
] |
permissive
|
r-lib/styler
|
50dcfe2a0039bae686518959d14fa2d8a3c2a50b
|
ca400ad869c6bc69aacb2f18ec0ffae8a195f811
|
refs/heads/main
| 2023-08-24T20:27:37.511727
| 2023-08-22T13:27:51
| 2023-08-22T13:27:51
| 81,366,413
| 634
| 79
|
NOASSERTION
| 2023-09-11T08:24:43
| 2017-02-08T19:16:37
|
R
|
UTF-8
|
R
| false
| false
| 4,236
|
r
|
test-public_api-2.R
|
test_that("styler can style R, Rmd and Rmarkdown files via style_dir()", {
msg <- capture_output(
style_dir(testthat_file("public-api", "xyz-r-and-rmd-dir"),
filetype = c("R", "Rmd", "Rmarkdown")
)
)
expect_true(any(grepl("random-script-in-sub-dir.R", msg, fixed = TRUE)))
expect_true(any(grepl("random-rmd-script.Rmd", msg, fixed = TRUE)))
expect_true(any(grepl("random-rmd-script.Rmarkdown", msg, fixed = TRUE)))
})
test_that("styler can style Rmd files only via style_dir()", {
msg <- capture_output(
style_dir(testthat_file("public-api", "xyz-r-and-rmd-dir"),
filetype = "Rmd"
)
)
expect_true(any(grepl("random-rmd-script.Rmd", msg, fixed = TRUE)))
expect_false(any(grepl("random-script-in-sub-dir.R", msg, fixed = TRUE)))
expect_false(any(grepl("random-rmd-script.Rmarkdown", msg, fixed = TRUE)))
})
test_that("styler can style .r and .rmd files only via style_dir()", {
msg <- capture_output(
style_dir(testthat_file("public-api", "xyz-r-and-rmd-dir"),
filetype = c(".r", ".rmd")
)
)
expect_true(any(grepl("random-script-in-sub-dir.R", msg, fixed = TRUE)))
expect_true(any(grepl("random-rmd-script.Rmd", msg, fixed = TRUE)))
expect_false(any(grepl("random-rmd-script.Rmarkdown", msg, fixed = TRUE)))
})
test_that("styler can style R and Rmd files via style_pkg()", {
msg <- capture_output(
style_pkg(testthat_file("public-api", "xyzpackage-rmd"),
filetype = c("R", "Rmd", "Rmarkdown")
)
)
expect_true(any(grepl("hello-world.R", msg, fixed = TRUE)))
expect_true(any(grepl("test-package-xyz.R", msg, fixed = TRUE)))
expect_true(any(grepl("random.Rmd", msg, fixed = TRUE)))
expect_true(any(grepl("random.Rmarkdown", msg, fixed = TRUE)))
expect_true(any(grepl("README.Rmd", msg, fixed = TRUE)))
expect_false(any(grepl("RcppExports.R", msg, fixed = TRUE)))
})
test_that("style_pkg() styles qmd files by default", {
msg <- capture_output(
style_pkg(testthat_file("public-api", "xyzpackage-qmd"))
)
expect_true(any(grepl("hello-world.R", msg, fixed = TRUE)))
expect_true(any(grepl("test-package-xyz.R", msg, fixed = TRUE)))
expect_true(any(grepl("random.Rmd", msg, fixed = TRUE)))
expect_true(any(grepl("random.Rmarkdown", msg, fixed = TRUE)))
expect_true(any(grepl("README.Rmd", msg, fixed = TRUE)))
expect_false(any(grepl("RcppExports.R", msg, fixed = TRUE)))
expect_true(any(grepl("new.qmd", msg, fixed = TRUE)))
})
test_that("style_pkg() can find qmd anywhere", {
msg <- capture_output(
style_pkg(testthat_file("public-api", "xyzpackage-qmd"),
filetype = ".Qmd"
)
)
expect_no_match(msg, "hello-world.R", fixed = TRUE)
expect_no_match(msg, "test-package-xyz.R", fixed = TRUE)
expect_no_match(msg, "random.Rmd", fixed = TRUE)
expect_no_match(msg, "random.Rmarkdown", fixed = TRUE)
expect_no_match(msg, "README.Rmd", fixed = TRUE)
expect_no_match(msg, "RcppExports.R", fixed = TRUE)
expect_match(msg, "new.qmd", fixed = TRUE)
})
test_that("styler can style Rmd files only via style_pkg()", {
msg <- capture_output(
style_pkg(testthat_file("public-api", "xyzpackage-rmd"),
filetype = "Rmd"
)
)
expect_false(any(grepl("hello-world.R", msg, fixed = TRUE)))
expect_false(any(grepl("test-package-xyz.R", msg, fixed = TRUE)))
expect_true(any(grepl("random.Rmd", msg, fixed = TRUE)))
expect_false(any(grepl("random.Rmarkdown", msg, fixed = TRUE)))
expect_true(any(grepl("README.Rmd", msg, fixed = TRUE)))
expect_false(any(grepl("RcppExports.R", msg, fixed = TRUE)))
})
test_that("styler can style Rmarkdown files only via style_pkg()", {
msg <- capture_output(
style_pkg(testthat_file("public-api", "xyzpackage-rmd"),
filetype = "Rmarkdown"
)
)
expect_false(any(grepl("hello-world.R", msg, fixed = TRUE)))
expect_false(any(grepl("test-package-xyz.R", msg, fixed = TRUE)))
expect_false(any(grepl("random.Rmd", msg, fixed = TRUE)))
expect_true(any(grepl("random.Rmarkdown", msg, fixed = TRUE)))
expect_false(any(grepl("README.Rmd", msg, fixed = TRUE)))
expect_false(any(grepl("RcppExports.R", msg, fixed = TRUE)))
})
test_that("insufficient R version returns error", {
expect_error(stop_insufficient_r_version())
})
|
a39c9d446891fa515459e1e35b3ca6aefaf7beff
|
9cd1bf7e216a26c5cd089a8f9fa89c9f684674d0
|
/R/transformation_functions.R
|
3197df88d42ba9ca3201330383350d4b674f6726
|
[
"Apache-2.0"
] |
permissive
|
jukujala/R-data-pipe
|
8195c81cdb74e2e2fc18146156c3d25abf390881
|
d259e72175a4bf6f0cf9e002da206e979cfac425
|
refs/heads/master
| 2020-12-01T22:17:43.172268
| 2020-01-11T15:30:49
| 2020-01-11T15:30:49
| 230,789,713
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,076
|
r
|
transformation_functions.R
|
# this file has feature transformations
#
# Example:
# getCategTransform returns list (column, function), where column is the name
# of the output column for the transformation and the function transforms
# a data.table to feature vector.
library(data.table)
#' get function that encodes away less common values in a feature
#' saves you from those errors on missing factor levels in test set
#'
#' @param col: transformation function will transform this column
#' @param fit_dt: use data in fit_dt to calculate less common feature values
#' @param threshold: retain only those values that occur more than this many times in fit_dt
#'
#' @return list(col, fun) where fun is the transformation function
#' @export
getCategTransform <- function(col, fit_dt, threshold=0, default_value="other") {
# count how often each value in x occurs
retain_values <- (
fit_dt
[, .(val=fit_dt[[col]])]
[, .(n=.N), by=.(val)]
[n > threshold,]
$val
)
# return input_dt[[col]] but less common values set to default_value
#
# @param input_dt: data.table with column <col>
#
# @return vector that has transformed input feature
categTransform <- function(input_dt) {
x <- input_dt[[col]]
x[!(x %in% retain_values)] <- default_value
# TODO: handle unseen column values
return(x)
}
rm(fit_dt)
return(list(col=col, fun=categTransform))
}
#' get function that discretizes numerical feature to factor
#'
#' @param col: transformation function will transform this column
#' @param fit_dt: use data in fit_dt to calculate discretization boundaries
#' @param n: calculate this many discretization boundary points
#'
#' @return list(col, fun) where fun is the transformation function
#' @export
getDiscretizeTransform <- function(col, fit_dt, n=10) {
countUniq <- function(x) {length(unique(x))}
# get discretization boundaries from fit_dt
suppressWarnings(
col_disc <- discretize(
fit_dt[[col]],
method = "frequency", #frequency implementation is broken
breaks = min(n, countUniq(fit_dt[[col]])-1),
right = FALSE,
infinity=TRUE
)
)
breaks <- attr(col_disc, "discretized:breaks")
labels <- attr(col_disc, "levels")
rm(col_disc)
rm(fit_dt)
discretizeTransform <- function(input_dt) {
x <- input_dt[[col]]
y <- cut(x, breaks=breaks, labels=labels, right=FALSE)
# add additional "missing" that is not NA because models fail on NAs
y <- factor(y, levels=c("Unknown", levels(y)))
y[is.na(y)] <- "Unknown"
y
}
return( list(col=col, fun=discretizeTransform) )
}
#' set NA values in column col to 0 and add new column that is 0/1 NA indicator
#'
#' @param input_dt: data.table with column <col>
#' @param col: column name in input_dt, is of numeric type
#'
#' @return TRUE, modifies input_dt by reference
#' @export
processNAColumn <- function(input_dt, col) {
col_na <- paste0(col, "_na")
# create new column
input_dt[, (col_na) := 0 ]
input_dt[is.na(input_dt[[col]]), (col_na) := 1 ]
input_dt[is.na(input_dt[[col]]), (col) := 0 ]
return(TRUE)
}
#' get function that calculates new feature: average of <avg_col> by group
#'
#' @param group_cols: vector of column names to group by the average calculation
#' @param avg_col: column to average
#' @param output_col: column name to output the new feature
#' @param fit_dt: data.table with columns in group_cols and avg_col
#' @param n_threshold: calculate average only when group size exceed this
#'
#' @return list(col, fun) where fun is the transformation function
#' @export
getAveragingTransform <- function(
group_cols, avg_col, out_col, fit_dt, n_threshold=500) {
# calculate average for each group
averaging_dt <- (
fit_dt[,
.(
n=.N,
avg = mean(.SD[[avg_col]])
),
keyby=group_cols
]
[n > n_threshold, ]
[, n := NULL ]
)
rm(fit_dt)
# merge each row of input_dt to correct average value
averagingTransform <- function(input_dt) {
averaging_dt[input_dt, on=group_cols]$avg
}
return(list(col=out_col, fun=averagingTransform))
}
|
5447a7d0c603ffa50d3eceb8913a3dd3516356e7
|
dfe6867eb701ed128c80ca08f9e4d1d447f547b0
|
/man/summary.jmdem.Rd
|
43a5098f0ce1d593f670aef8cf572d823f7972b0
|
[] |
no_license
|
cran/jmdem
|
fc8510e36ac0879621766f639af9e06e71eaa5aa
|
5afc1049cec9566824a50238d8d05ee212c53977
|
refs/heads/master
| 2020-03-13T20:16:25.577903
| 2020-03-04T05:50:02
| 2020-03-04T05:50:02
| 131,270,464
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,550
|
rd
|
summary.jmdem.Rd
|
\name{summary.jmdem}
\alias{summary.jmdem}
\alias{print.summary.jmdem}
\title{
Summarising Joint Mean and Dispersion Effects Model Fits
}
\description{
These functions are all \code{\link{methods}} for class \code{jmdem} or \code{summary.jmdem} objects.
}
\usage{
\method{summary}{jmdem}(object, correlation = FALSE, symbolic.cor = FALSE, ...)
\method{print}{summary.jmdem}(x, digits = max(3L, getOption("digits") - 3L),
scientific = FALSE, symbolic.cor = x$symbolic.cor,
signif.stars = getOption("show.signif.stars"), ...)
}
\arguments{
\item{object}{an object of class "\code{jmdem}", usually, a result of a call to \code{\link{jmdem}}.}
\item{x}{an object of class "\code{summary.jmdem}", usually, a result of a call to \code{summary.jmdem}.}
\item{correlation}{logical; if \code{TRUE}, the correlation matrix of the estimated parameters is returned and printed.}
\item{digits}{the number of significant digits to use when printing.}
\item{scientific}{logical; if \code{TRUE}, scientific notation is used when printing.}
\item{symbolic.cor}{logical. If \code{TRUE}, print the correlations in a symbolic form (see \code{\link{symnum}}) rather than as numbers.}
\item{signif.stars}{logical. If \code{TRUE}, 'significance stars' are printed for each coefficient.}
\item{...}{further arguments passed to or from other methods.}
}
\details{
\code{print.summary.jmdem} tries to be smart about formatting the coefficients, standard errors, etc. and additionally gives 'significance stars' if \code{signif.stars} is \code{TRUE}. The \code{coefficients}, \code{mean.coefficients} and \code{dispersion.coefficients} components of the result give the estimated coefficients and their estimated standard errors, together with their ratio. This third column is labelled \code{t-ratio} and a fourth column gives the two-tailed p-value corresponding to the \code{t-ratio} based on a Student t distribution.
Aliased coefficients are omitted in the returned object but restored by the \code{print} method.
Correlations are printed to the same decimal places specified in \code{digits} (or symbolically): to see the actual correlations print \code{summary(object)$correlation} directly.
For more details, see \code{\link{summary.glm}}.
}
\value{
\item{call}{the component from \code{object}.}
\item{mean.family}{the component from \code{object}.}
\item{dispersion.family}{the component from \code{object}.}
\item{deviance}{the component from \code{object}.}
\item{mean.terms}{the component from \code{object}.}
\item{dispersion.terms}{the component from \code{object}.}
\item{aic}{the component from \code{object}.}
\item{mean.contrasts}{the component from \code{object}.}
\item{dispersion.contrasts}{the component from \code{object}.}
\item{df.residual}{the component from \code{object}.}
\item{null.deviance}{the component from \code{object}.}
\item{df.null}{the component from \code{object}.}
\item{information.type}{the component from \code{object}.}
\item{iter}{the component from \code{object}.}
\item{mean.na.action}{the component from \code{object}.}
\item{dispersion.na.action}{the component from \code{object}.}
\item{deviance.resid}{the deviance residuals.}
\item{pearson.resid}{the pearson residuals.}
\item{resid}{the working residuals depends on the setting of \code{deviance.type}.}
\item{coefficients}{the matrix of coefficients, standard errors, z-values and p-values. Aliased coefficients are omitted.}
\item{mean.coefficients}{the matrix of coefficients, standard errors, z-values and p-values of the mean submodel.}
\item{dispersion.coefficients}{the matrix of coefficients, standard errors, z-values and p-values of the dispersion submodel.}
\item{deviance.type}{the type of redidual deviance specified, it is either "\code{deviance}" or "\code{pearson}".}
\item{aliased}{named logical vector showing if the original coefficients are aliased.}
\item{df}{a 3-vector of the rank of the model and the number of residual degrees of freedom, plus number of coefficients (including aliased ones).}
\item{covariance}{the estimated covariance matrix of the estimated coefficients.}
\item{digits}{the number of significant digits to use when printing.}
\item{scientific}{logical value of using scientific notation when printing.}
\item{covmat.method}{named method used to invert the covariance matrix.}
\item{correlation}{(only if correlation is true.) The estimated correlations of the estimated coefficients.}
\item{symbolic.cor}{(only if correlation is true.) The value of the argument symbolic.cor.}
}
\author{
Karl Wu Ka Yui (karlwuky@suss.edu.sg)
}
\seealso{
\code{\link{jmdem}}, \code{\link{summary}}
}
\examples{
## Example in jmdem(...)
MyData <- simdata.jmdem.sim(mformula = y ~ x, dformula = ~ z,
mfamily = poisson(),
dfamily = Gamma(link = "log"),
beta.true = c(0.5, 4),
lambda.true = c(2.5, 3), n = 100)
fit <- jmdem(mformula = y ~ x, dformula = ~ z, data = MyData,
mfamily = poisson, dfamily = Gamma(link = "log"),
dev.type = "deviance", method = "CG")
## Summarise fit with correlation matrix
summary(fit, correlation = TRUE, digits = 4)
}
|
be110f33a23e2ac56d3116a03b9ddd9e4d9ac2d3
|
947316115f3c10532162697eb7636c92e04a7f46
|
/Codes/Part8_10mer threshold.R
|
975b091b96c784bb7712902941e49fab763a43a2
|
[] |
no_license
|
Flashiness/TFBS
|
3aa96fff84c68514788a8cbdd0771b70e3115b3a
|
ca6186dbd9ef1e54ad51a5aa0c1f2603efc43040
|
refs/heads/master
| 2023-02-24T00:10:09.763563
| 2021-01-28T05:18:58
| 2021-01-28T05:18:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,427
|
r
|
Part8_10mer threshold.R
|
# get the threshold for 10-mers
motifs= c("BCL11A", "CTCF", "EGR1", "GABPA", "JUN",
"JUND", "MAX", "NANOG", "POU5F1", "RAD21",
"REST", "RFX5", "SIX5", "SRF", "STAT1",
"TCF12", "USF1", "USF2", "YY1")
motif = motifs[2]
# the size of
sizes <- 100000
# ten-mer threshold
load(paste0("H:/10-mers/07062020_Motifs_over_SNP_deltaSVM/d_matrix/",motif,"/Part1/d.rda"))
# de = as.matrix(d)
# test = d
d[,1] = as.character(d[,1])
#
# generate the random collection of 10-mers
d_rand <- d[sample(seq(nrow(d)), size = sizes, replace = F),]
# random positions within 10-mers
pos <- sample(1:10,size = sizes,replace = T)
# the reference 10-mer sequences
seq.ref <- as.character(d_rand[,1])
# the SNP in reference 10-mer
d_rand_ref <- substr(seq.ref,pos,pos)
# function to change targeted SNP within the 10-mer sequence
# x = d_rand_ref[1]
mut_allele <- function(x){
temp <- c("A","C","G","T")
temp2<- temp[-which(temp==x)]
xx <- temp2[sample(1:3,1)]
return(xx)
}
# get the SNP in the corresponding alternative 10-mers
d_rand_mut <- mapply(mut_allele,d_rand_ref)
# the alternative 10-mer sequences
seq.mut <- paste0(substr(seq.ref,start = 1,stop = pos-1),d_rand_mut,substr(seq.ref,start = pos+1, stop = 10))
# match corresponding weights for 10-mer
seq10_weight = function(x){
xlistr= chartr("ATGC","TACG",x)
m1 = d[match(x,d[,1]),2]
m2 = d[match(xlistr,d[,1]),2]
m1[which(is.na(m1))]=m2[which(is.na(m1))]
return(m1)
}
# seq10_weight(seq.ref[1])
# get the weights for reference 10-mer
weight.ref <- d[match(seq.ref, d[,1]), 2]
seq.mut2 <- chartr("ATGC","TACG",seq.mut)
m1 <- d[match(seq.mut, d[,1]), 2]
m2 <- d[match(seq.mut2, d[,1]), 2]
m1[which(is.na(m1))]=m2[which(is.na(m1))]
# get the weights for alternative 10-mer
weight.mut <- m1
rm(m1,m2,seq.mut2)
# the weight difference of 10000 10-mers should be:
weight_diff <- weight.mut - weight.ref
# the empirical quantiles
quantile(weight_diff,probs = c(0.025,0.975))
# you can use sink() here to get the results saved in a txt file
# here is some example code, please revise according to your settings
sink(file = "Your path to save the results", append = T) # if you set append=T and use the same file name, it will append in the same file
cat(c(motif,quantile(weight_diff,probs = c(0.025,0.975))))
sink()
|
46cb0ceedb96fb97c6bfb36fb7ac4a03419805bf
|
1a0ad94b0ed048d029b61f50ead424c1adee26c1
|
/scripts/parkrun_efforts.R
|
3dc4fd47d0704340c88b78199a8c104ccffcfc38
|
[] |
no_license
|
padpadpadpad/parkrun_play
|
2affb3bdb76f3cd0ecaaa095262cc1ce199a7bb9
|
8994914c9a219c95ed5e4aa942018f5b7b63f431
|
refs/heads/master
| 2021-07-07T18:18:26.437689
| 2017-10-04T17:27:30
| 2017-10-04T17:27:30
| 103,745,790
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,768
|
r
|
parkrun_efforts.R
|
#devtools::install_github('fawda123/rStrava')
#devtools::install_github("timelyportfolio/listviewer")
mise::mise(vars = TRUE, console = TRUE)
library(rStrava)
library(tidyr)
library(dplyr)
library(ggplot2)
library(broom)
library(purrr)
library(ggjoy)
help.search('token', package = 'rStrava')
app_name <- 'stRava'
app_client_id <- '12962'
app_secret <- 'e58f0335795fb08674d2a412c9d35efb0d740761'
# create token
my_token <- httr::config(token = strava_oauth(app_name, app_client_id, app_secret))
# functions
compile_effort <- function(x){
temp <- data.frame(unlist(x)) %>%
mutate(ColNames = rownames(.)) %>%
spread(., ColNames, unlist.x.)
desired_cols <- c('athlete.id', 'distance', 'elapsed_time', 'moving_time', 'name', 'start_date', 'start_date_local')
# check which columns arent present
cols_not_present <- desired_cols[! desired_cols %in% colnames(temp)] %>%
data.frame(cols = .) %>%
mutate(., value = NA)
if(nrow(cols_not_present) >= 1){cols_not_present <- spread(cols_not_present, cols, value)}
if(nrow(cols_not_present) == 1){temp <- cbind(temp, cols_not_present)}
temp <- select(temp, athlete.id, distance, elapsed_time, moving_time, name, start_date, start_date_local)
return(temp)
}
compile_segment_efforts <- function(x, stoken){
temp1 <- get_efforts_list(stoken, id = x)
temp2 <- suppressWarnings(purrr::map_df(temp1, compile_effort))
return(temp2)
}
# bind segment info together
segments <- read.csv("~/Desktop/park_runs/raw/park_run_segmentIDs.csv", stringsAsFactors = FALSE) %>%
filter(., !is.na(segment_id) & !is.na(Parkrun))
# get leaderboard for parkruns
# test run through
#test <- segments[1:5,]
d_parkrun <- segments$segment_id %>% purrr::map_df(., .f = compile_segment_efforts, stoken = my_token, .id = 'id')
# make some columns numeric
d_parkrun <- mutate_at(d_parkrun, c('distance', 'moving_time', 'elapsed_time'), as.numeric)
segments <- mutate(segments, id = 1:n()) %>%
rename(., parkrun = Parkrun)
# filter and merge with ids
d <- filter(d_parkrun, distance > 4750 & distance < 5200 & elapsed_time < 60 * 60) %>%
merge(., select(segments, id, parkrun), by = 'id')
saveRDS(d, '~/Desktop/park_runs/parkrun_efforts.rds')
# plot as a joyplot
ggplot(filter(d, elapsed_time < 3000)) +
geom_joy(aes(elapsed_time, parkrun)) +
theme_bw() +
xlab('Time (s)') +
ylab('')
# create date columnn
MaenMurderData$date <- gsub("T.*$", '', MaenMurderData$start_date) %>%
as.POSIXct(., format = '%Y-%m-%d')
MaenMurderData <- mutate(MaenMurderData, month = format(date, "%m"),
day = format(date, "%d"),
year = format(date, "%Y"))
MaenMurderData[c('month', 'day', 'year')] <- lapply(MaenMurderData[c('month', 'day', 'year')], as.numeric)
|
f391338504125bd6251de4ce54eeaa9d3cd9227b
|
fd365694237edb699e53eef04f1c3c0ff649f3c8
|
/man/opal.execute.Rd
|
22d80ed2dacace371f99009b7bee644fd23b162f
|
[] |
no_license
|
obiba/opalr
|
f73a0eb0280bc768b47711d6a1a08ce0eded7ce1
|
5ca4936deae7e3410db5ee6a02df7994ff5fa336
|
refs/heads/master
| 2023-08-03T06:18:07.954481
| 2023-07-21T06:58:07
| 2023-07-21T06:58:07
| 166,788,279
| 3
| 3
| null | 2021-05-13T15:50:49
| 2019-01-21T09:45:41
|
R
|
UTF-8
|
R
| false
| true
| 898
|
rd
|
opal.execute.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/opal.execute.R
\name{opal.execute}
\alias{opal.execute}
\title{Execute a R script}
\usage{
opal.execute(opal, script, async = FALSE)
}
\arguments{
\item{opal}{Opal object or list of opal objects.}
\item{script}{R script to execute.}
\item{async}{R script is executed asynchronously within the session (default is FALSE).
If TRUE, the value returned is the ID of the command to look for (from Opal 2.1).}
}
\description{
Execute a R script in the remote R session.
}
\examples{
\dontrun{
o <- opal.login('administrator','password', url='https://opal-demo.obiba.org')
opal.execute(o, "x <- 'foo'")
opal.execute(o, "ls()")
opal.logout(o)
}
}
\seealso{
Other execution functions:
\code{\link{opal.execute.source}()},
\code{\link{opal.load_package}()},
\code{\link{opal.unload_package}()}
}
\concept{execution functions}
|
f4a09aedc3e426d6aacb647c7f5949fd9cbbbeef
|
e4373b408d639af6635baa58a635d5ff1f9e9d95
|
/man/parcoords.Rd
|
b6dd91c9897779bf61697e3e179a54622cf2184f
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
jkaupp/parcoords
|
afb4edb77be85226f6be04f660867fd234286d28
|
aefd83354446a8dabc7fdb83852c5b5ece5a7760
|
refs/heads/master
| 2021-01-18T11:10:45.607817
| 2016-02-26T21:02:41
| 2016-02-26T21:02:41
| 59,310,893
| 0
| 0
| null | 2016-05-20T16:49:27
| 2016-05-20T16:49:26
| null |
UTF-8
|
R
| false
| true
| 4,566
|
rd
|
parcoords.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parcoords.R
\name{parcoords}
\alias{parcoords}
\title{htmlwidget for d3.js parallel-coordinates}
\usage{
parcoords(data, rownames = T, color = NULL, brushMode = NULL,
brushPredicate = "and", reorderable = F, axisDots = NULL,
margin = NULL, composite = NULL, alpha = NULL, queue = F, mode = F,
rate = NULL, tasks = NULL, width = NULL, height = NULL)
}
\arguments{
\item{data}{data.frame with data to use in the chart}
\item{rownames}{logical use rownames from the data.frame in the chart. Regardless of
this parameter, we will append rownames to the data that we send to JavaScript.
If \code{rownames} equals \code{FALSE}, then we will use parallel coordinates
to hide it.}
\item{color}{see \href{https://github.com/syntagmatic/parallel-coordinates\#parcoords_color}{parcoords.color( color )}.
Color can be a single color as rgb or hex. For a color function,
provide a list( colorScale = , colorBy = ) where colorScale is
a function such as \code{d3.scale.category10()} and colorBy
is the column name from the data to determine color.}
\item{brushMode}{string, either \code{"1D-axes"}, \code{"1D-axes-multi"},
or \code{"2D-strums"}
giving the type of desired brush behavior for the chart.}
\item{brushPredicate}{string, either \code{"and"} or \code{"or"} giving
the logic forthe join with multiple brushes.}
\item{reorderable}{logical enable reordering of axes}
\item{axisDots}{logical mark the points where polylines meet an axis with dots}
\item{margin}{list of sizes of margins in pixels. Currently
\code{brushMode = "2D-strums"} requires left margin = 0, so
this will change automatically and might result in unexpected
behavior.}
\item{alpha}{opacity from 0 to 1 of the polylines}
\item{queue}{logical (default FALSE) to change rendering mode to queue for
progressive rendering. Usually \code{ queue = T } for very large datasets.}
\item{mode}{string see\code{queue} above; \code{ queue = T } will set
\code{ mode = "queue" }}
\item{tasks}{a character string or \code{\link[htmlwidgets]{JS}} or list of
strings or \code{JS} representing a JavaScript function(s) to run
after the \code{parcoords} has rendered. These provide an opportunity
for advanced customization. Note, the \code{function} will use the
JavaScript \code{call} mechanism, so within the function, \code{this} will
be an object with {this.el} representing the containing element of the
\code{parcoords} and {this.parcoords} representing the \code{parcoords}
instance.}
\item{width}{integer in pixels defining the width of the widget. Autosizing to 100%
of the widget container will occur if \code{ width = NULL }.}
\item{height}{integer in pixels defining the height of the widget. Autosizing to 400px
of the widget container will occur if \code{ height = NULL }.}
}
\value{
An object of class \code{htmlwidget} that will
intelligently print itself into HTML in a variety of contexts
including the R console, within R Markdown documents,
and within Shiny output bindings.
}
\description{
Create interactive parallel coordinates charts with this htmlwidget
wrapper for d3.js \href{http://syntagmatic.github.io/parallel-coordinates/}{parallel-coordinates}.
}
\examples{
\dontrun{
# simple example using the mtcars dataset
data( mtcars )
parcoords( mtcars )
# various ways to change color
# in these all lines are the specified color
parcoords( mtcars, color = "green" )
parcoords( mtcars, color=RColorBrewer::brewer.pal(3,"BuPu")[3] )
parcoords( mtcars, color = "#f0c" )
# in these we supply a function for our color
parcoords(
mtcars
, color = list(
colorBy="cyl"
,colorScale=htmlwidgets::JS('d3.scale.category10()')
)
)
### be careful; this might strain your system #######
### #######
data( diamonds, package = "ggplot2" )
parcoords(
diamonds
,rownames=F
,brushMode = "1d-axes"
,reorderable=T
,queue = T
,color= list(
colorBy="cut"
,colorScale = htmlwidgets::JS("d3.scale.category10()")
)
)
# or if we want to add in a dplyr chain
library(dplyr)
data( diamonds, package = "ggplot2" )
diamonds \%>\%
mutate( carat = cut(carat,breaks = c(0,1,2,3,4,5), right =F) ) \%>\%
group_by( carat ) \%>\%
summarise_each(funs(mean),-carat) \%>\%
parcoords(
rownames= F
,color = list(
colorScale = htmlwidgets::JS('d3.scale.category10()' )
, colorBy = "carat"
)
,brushMode = "1D"
)
}
}
|
9a2d4ee5f0db4e456e5cbeb5bde3763a8194a070
|
109621995ae71dcf333e4f57ff715fb77db8b912
|
/craving/scripts/createEV_scannervas.R
|
1cfff61c41a542dcfdca815e2eb36c5b5d5ff3ec
|
[] |
no_license
|
pederisager/Opioid_Reward_fMRI
|
76ebc34eea3cead59e48216f124f61961fa7af5b
|
4d8156bd17c8bff555f261bd2af1b598cc01d4ca
|
refs/heads/master
| 2021-05-04T06:41:54.386497
| 2017-06-24T15:48:22
| 2017-06-24T15:48:22
| 70,497,056
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,484
|
r
|
createEV_scannervas.R
|
## Loop through all Observe and Regulate EVs and construct Food Wanting and Regulate Success EVs
attach(scannervas_long)
# For all included subjects
for (subj in unique(subject)) {
# For all sessions
for (sess in unique(session)) {
# For all the conditions (Observe/Regulate)
for (cond in unique(condition)) {
# Load the EV file
assign(
x = "EV",
value = read.table(file = paste("EVs/session",sess,"/",subj,"/",cond,"_",subj,".txt", sep = ""))
)
# Create either a FoodWanting or a RegSuccess EV and put it in the appropriate directory
if (cond=="Observer"){
EV$V3 <- demvas[subject==subj & session==sess & condition==cond] # Substitute 1s in V3 with demeaned vas variance
assign(paste("FoodWanting","_",subj,"_",sess, sep = ""), EV) # Create a variable for visual inspection
# Write the new EV content to a txt file, for use in first level FEAT
write.table(
x = EV,
file = paste("EVs/session",sess,"/",subj,"/","FoodWanting","_",subj,".txt", sep = ""),
sep = "\t",
dec = ".",
row.names = FALSE, col.names = FALSE
)
}
if (cond=="Reguler"){
EV <- EV[,c(1,2)]
EV$V3 <- -demvas[subject==subj & session==sess & condition==cond]
assign(paste("RegSuccess","_",subj,"_",sess, sep = ""), EV)
write.table(
x = EV,
file = paste("EVs/session",sess,"/",subj,"/","RegSuccess","_",subj,".txt", sep = ""),
sep = "\t",
dec = ".",
row.names = FALSE, col.names = FALSE
)
}
}
}
}
## Load and combine the condition-separated VAS and instruct EVs. Then save the combined EVs in the appropriate session/subject directory
#attach(scannervas_long)
# For all included subjects
for (subj in unique(subject)) {
# For all sessions
for (sess in unique(session)) {
# Load the relevant EVs
assign(
x = "EVinstrreg",
value = read.table(file = paste("EVs/session",sess,"/",subj,"/","Reguler","_instruct_",subj,".txt", sep = ""))
)
assign(
x = "EVinstrobs",
value = read.table(file = paste("EVs/session",sess,"/",subj,"/","Observer","_instruct_",subj,".txt", sep = ""))
)
assign(
x = "EVvasreg",
value = read.table(file = paste("EVs/session",sess,"/",subj,"/","Reguler","_VAS_",subj,".txt", sep = ""))
)
assign(
x = "EVvasobs",
value = read.table(file = paste("EVs/session",sess,"/",subj,"/","Observer","_VAS_",subj,".txt", sep = ""))
)
# Combine EVs
EVinst <- rbind(EVinstrobs, EVinstrreg)
EVvas <- rbind(EVvasobs, EVvasreg)
# Save combined vas and instruct EVs as tab delimited .txt file
# Save the instruct EV
write.table(
x = EVinst,
file = paste("EVs/session",sess,"/",subj,"/","Instruct","_",subj,".txt", sep = ""),
sep = "\t",
dec = ".",
row.names = FALSE, col.names = FALSE
)
# Save the VAS EV as tab delimited .txt file
write.table(
<<<<<<< HEAD
x = EVvas,
=======
x = EVinst,
>>>>>>> e9f456e8f38254ca983353581547507885683647
file = paste("EVs/session",sess,"/",subj,"/","VAS","_",subj,".txt", sep = ""),
sep = "\t",
dec = ".",
row.names = FALSE, col.names = FALSE
)
}
}
|
2ce53207a500aa7e4a9cb99dac153770f9131e8a
|
1a5741fb17781bce50228cab6d11842e6451df32
|
/R/predict.default.R
|
7905b2a71c06f2436dfa8d217d261ebc3f9071e6
|
[] |
no_license
|
paulponcet/statip
|
a88e78e023cb550e7ad22a5ec701d0f641050bbd
|
ed5cfd7d00db470f9b32a812399f9b3c0bd7b229
|
refs/heads/master
| 2021-01-11T15:30:39.695374
| 2019-11-17T21:15:34
| 2019-11-17T21:15:34
| 80,364,466
| 0
| 1
| null | 2019-11-17T21:15:35
| 2017-01-29T18:43:51
|
R
|
UTF-8
|
R
| false
| false
| 950
|
r
|
predict.default.R
|
#' @title
#' Default model predictions
#'
#' @description
#' Default method of the \code{\link[stats]{predict}} generic
#' function, which can be used when the model object is empty.
#'
#' @param object
#' A model object, possibly empty.
#'
#' @param newdata
#' An optional data frame in which to look for variables
#' with which to predict.
#' If omitted, the fitted values are used.
#'
#' @param ...
#' Additional arguments.
#'
#' @return
#' A vector of predictions.
#'
#' @seealso
#' \code{\link[stats]{predict}} from package \pkg{stats}.
#'
#' @export
#'
#' @examples
#' stats::predict(NULL)
#' stats::predict(NULL, newdata = data.frame(x = 1:2, y = 2:3))
#'
predict.default <-
function(object,
newdata,
...)
{
if (length(object) == 0L) {
n <- if (missing(newdata)) 0L else nrow(newdata)
return(rep(NA, n))
}
NextMethod("predict")
}
#' @importFrom stats predict
#' @export
#'
stats::predict
|
3c407cd50a4ba56258c79d931356f15a0d59f49a
|
c3eed653d66fbca3e32f925d1ea4575fcef3af01
|
/Cap14 - Análise de Regressão com R e Azure Machine Learning/01-Solution-Exercicios-Cap13-Parte1.R
|
effa3e380409a6e3a823ec0d9e6399619454aa05
|
[
"MIT"
] |
permissive
|
FelipeChristanelli/BigDataAnalytics_R_AzureMachineLearning
|
a54fda2f753f08ae8a69cc6466441a78414b1e5a
|
568202484c3edd5146a501ab5a5b419055d6c01d
|
refs/heads/main
| 2023-02-21T05:07:16.662032
| 2021-01-16T15:07:35
| 2021-01-16T15:07:35
| 313,734,151
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,036
|
r
|
01-Solution-Exercicios-Cap13-Parte1.R
|
# Lista de Exercícios - Capítulo 13
# Para este script, vamos usar o mlbench (Machine Learning Benchmark Problems)
# https://cran.r-project.org/web/packages/mlbench/mlbench.pdf
# Este pacote contém diversos datasets e usaremos um com os dados
# de votação do congresso americano
# Seu trabalho é prever os votos em republicanos e democratas (variável Class)
# Import
install.packages("mlbench")
library(mlbench)
# Carregando o dataset
?HouseVotes84
data("HouseVotes84")
View(HouseVotes84)
# Analise exploratória de dados
plot(as.factor(HouseVotes84[,2]))
title(main = "Votes cast for issue", xlab = "vote", ylab = "# reps")
plot(as.factor(HouseVotes84[HouseVotes84$Class == 'republican', 2]))
title(main = "Republican votes cast for issue 1", xlab = "vote", ylab = "# reps")
plot(as.factor(HouseVotes84[HouseVotes84$Class == 'democrat',2]))
title(main = "Democrat votes cast for issue 1", xlab = "vote", ylab = "# reps")
# Funções usadas para imputation
# Função que retorna o numeros de NA's por voto e classe (democrat or republican)
na_by_col_class <- function (col,cls){return(sum(is.na(HouseVotes84[,col]) & HouseVotes84$Class==cls))}
p_y_col_class <- function(col,cls){
sum_y <- sum(HouseVotes84[,col] == 'y' & HouseVotes84$Class == cls, na.rm = TRUE)
sum_n <- sum(HouseVotes84[,col] == 'n' & HouseVotes84$Class == cls, na.rm = TRUE)
return(sum_y/(sum_y+sum_n))}
# Testando a função
p_y_col_class(2,'democrat')
p_y_col_class(2,'republican')
na_by_col_class(2,'democrat')
na_by_col_class(2,'republican')
# Impute missing values
for (i in 2:ncol(HouseVotes84)) {
if(sum(is.na(HouseVotes84[,i])>0)) {
c1 <- which(is.na(HouseVotes84[,i]) & HouseVotes84$Class == 'democrat',arr.ind = TRUE)
c2 <- which(is.na(HouseVotes84[,i]) & HouseVotes84$Class == 'republican',arr.ind = TRUE)
HouseVotes84[c1,i] <- ifelse(runif(na_by_col_class(i,'democrat'))<p_y_col_class(i,'democrat'),'y','n')
HouseVotes84[c2,i] <- ifelse(runif(na_by_col_class(i,'republican'))<p_y_col_class(i,'republican'),'y','n')}
}
# Gerando dados de treino e dados de teste
HouseVotes84[,"train"] <- ifelse(runif(nrow(HouseVotes84)) < 0.80,1,0)
trainColNum <- grep("train",names(HouseVotes84))
# Gerando os dados de treino e de teste a partir da coluna de treino
trainHouseVotes84 <- HouseVotes84[HouseVotes84$train == 1, -trainColNum]
testHouseVotes84 <- HouseVotes84[HouseVotes84$train == 0, -trainColNum]
# Invocando o método NaiveBayes
install.packages("e1071")
library(e1071)
# Exercício 1 - Crie o modelo NaiveBayes
# Treine o modelo
?naiveBayes
nb_model <- naiveBayes(Class ~ ., data = trainHouseVotes84)
# Visualizando o resultado
nb_model
summary(nb_model)
str(nb_model)
# Faça as Previsões
nb_test_predict <- predict(nb_model, testHouseVotes84[,-1])
# Crie a Confusion matrix
table(pred = nb_test_predict, true = testHouseVotes84$Class)
# Média
mean(nb_test_predict == testHouseVotes84$Class)
# Função para executar o registrar todos os resultados do modelo
nb_multiple_runs <- function(train_fraction, n) {
fraction_correct <- rep(NA,n)
for (i in 1:n) {
HouseVotes84[,"train"] <- ifelse(runif(nrow(HouseVotes84))<train_fraction,1,0)
trainColNum <- grep("train", names(HouseVotes84))
trainHouseVotes84 <- HouseVotes84[HouseVotes84$train == 1,-trainColNum]
testHouseVotes84 <- HouseVotes84[HouseVotes84$train == 0,-trainColNum]
nb_model <- naiveBayes(Class ~ ., data = trainHouseVotes84)
nb_test_predict <- predict(nb_model, testHouseVotes84[,-1])
fraction_correct[i] <- mean(nb_test_predict == testHouseVotes84$Class)
}
return(fraction_correct)
}
# Executando o modelo 20 vezes
fraction_correct_predictions <- nb_multiple_runs(0.8, 20)
fraction_correct_predictions
# Resumo dos resultados
summary(fraction_correct_predictions)
# Desvio padrão
sd(fraction_correct_predictions)
# Os resultados das execuções estão bem próximos, entre 0.87 e 0.95,
# com um desvio padrão de 0.02.
# O Naive Bayes fez um bom trabalho com este conjunto de dados
|
43816f15ee91be590928f16127158585099b4e23
|
ed7a6d8459fe0d0b4eceb27708499fb628075d78
|
/TxDbLite/R/indexIfNoneFound.R
|
f2c7a8e6dcac5a961136dcecfc49b676b18bea98
|
[
"MIT"
] |
permissive
|
RamsinghLab/Arkas-RNASeq
|
a80b5d24fb4137191ffc7b4aeabbb7c3f9c01fa0
|
75ae2baa6a85b6d040be2b3a29ddac9d9cf106a2
|
refs/heads/master
| 2023-04-12T07:02:00.071576
| 2017-04-11T22:31:31
| 2017-04-11T22:31:31
| 87,348,768
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 338
|
r
|
indexIfNoneFound.R
|
#' helper function for findDupes
#'
#' @param fastaFile the name of the FASTA file
#'
#' return the index, invisibly
#'
#' @export
indexIfNoneFound <- function(fastaFile) {
if (!file.exists(paste0(fastaFile, ".fai"))) {
message("Indexing ", fastaFile, " to extract sequence names...")
invisible(indexFa(fastaFile))
}
}
|
1ff9f3b73afb498f94eb856d5d7e62eeb752e89d
|
587a4ce6eefc0da2eb5a3516b19180ff06710538
|
/man/download.Rd
|
cb9388c34b6a6c94c75f2093ec0bdb71ac29be27
|
[] |
no_license
|
RM1900/RCache
|
cc81847e760ec89697e9eb52c1d31452c7376d24
|
0a10561f1ab71b5587d34fa2aba7342098f13d28
|
refs/heads/master
| 2020-12-25T23:56:38.634463
| 2013-04-06T22:50:04
| 2013-04-06T22:50:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 120
|
rd
|
download.Rd
|
\name{download}
\alias{download}
\title{Download a file.}
\usage{
download(url)
}
\description{
Download a file.
}
|
4a5e4333f7b5bdfe3a7c327ee171e154c43c9c79
|
68664bd25f27d41ae5c5b7618363e5c373ba5d4f
|
/scripts/classifierdemo.R
|
7752552ad9d8c396a5687568d289b63f37884f02
|
[] |
no_license
|
shriv/datalandz_biosecurity
|
5e5bb2b58f1f7bb5bbb2c8ff4168acf6e2fe4d23
|
e27f6f0aebf33c43046b96a3ea79b6b6117a2ee6
|
refs/heads/master
| 2020-03-27T17:04:02.461164
| 2018-09-03T06:15:41
| 2018-09-03T06:15:41
| 146,826,890
| 0
| 1
| null | 2018-08-31T20:49:43
| 2018-08-31T01:44:08
|
R
|
UTF-8
|
R
| false
| false
| 995
|
r
|
classifierdemo.R
|
require(dplyr)
require(shiny)
require(keras)
require(rjson)
install_keras(tensorflow = "gpu")
setwd("~/datalandz_biosecurity")
# instantiate the model
model <- application_resnet50(weights = 'imagenet')
# load the image
img_path <- "./data/)"
img <- image_load(img_path, target_size = c(224,224))
x <- image_to_array(img)
# ensure we have a 4d tensor with single element in the batch dimension,
# the preprocess the input for prediction using resnet50
x <- array_reshape(x, c(1, dim(x)))
x <- imagenet_preprocess_input(x)
# make predictions then decode and print them
preds <- model %>% predict(x)
imagenet_decode_predictions(preds, top = 3)[[1]]
# }
labels <- labelImage("./data/anastrepha_striata.jpg")
print(labels)
json_data <- fromJSON(file="./UOR.json")
json_data <- lapply(json_data, function(x) {
x[sapply(x, is.null)] <- NA
unlist(x)
})
dataset<- as.data.frame(do.call("rbind", json_data))
dataset %>% filter(sp_type_name == "Amphibian") %>% head() %>% select(name_sci)
|
4ad7d9cc0df5aed0e63172eb9a3839c1b79b6c4f
|
eb5645e68df318af0bcf2e42d6da495dd5adaaf8
|
/plot2.R
|
f32e1f75654f2c5a9711e27958ebc45b06e04ef4
|
[] |
no_license
|
vnaidu/ExData_Plotting1
|
599e1885b6797d79d52b83a9c77bf6d6ba77e6bb
|
60c9c9371ee35e2971dda99a30b0505fa096f504
|
refs/heads/master
| 2021-01-09T07:05:16.062046
| 2016-07-16T11:01:04
| 2016-07-16T11:01:04
| 63,457,802
| 0
| 0
| null | 2016-07-16T00:20:37
| 2016-07-16T00:20:37
| null |
UTF-8
|
R
| false
| false
| 378
|
r
|
plot2.R
|
source("dataHelper.R")
source("plotHelper.R", local = TRUE)
pwrc <- GetData()
# Start plotting device using default options
startDev("plot2")
# Calculate date start/end for graph
plot(pwrc$DateTime,
pwrc$Global_active_power,
type = "l",
xlab = "Day",
ylab = "Global Active Power (kW)",
cex.lab=0.7, cex.axis=0.8
)
# Close plotting device
closeDev()
|
b6e9208b406755863a09b2a21e34c021733902c5
|
fcc13976b8952fedec00b0c5d4520edc6d5103b9
|
/man/dataUploadUI.Rd
|
cebb834800f3bb43c3087edef36a25f833debee4
|
[] |
no_license
|
anngvu/DIVE
|
851173b4515ab4fd8c26e171158aa17f079785db
|
e80d254fc4be2c4a3c12f4a1b4507beff3fe3663
|
refs/heads/master
| 2023-07-26T00:30:07.924714
| 2021-09-08T15:04:34
| 2021-09-08T15:04:34
| 173,828,093
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 825
|
rd
|
dataUploadUI.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataUpload.R
\name{dataUploadUI}
\alias{dataUploadUI}
\title{Shiny module UI for user-uploaded data}
\usage{
dataUploadUI(id, ...)
}
\arguments{
\item{id}{Character ID for specifying namespace, see \code{shiny::\link[shiny]{NS}}.}
\item{...}{Named list of params passed to \code{shiny::\link[shiny]{fileInput}}.
Note that \code{multiple = FALSE} (the default) must not be overridden.}
}
\value{
UI components.
}
\description{
Create UI that contains a main \code{shiny::\link[shiny]{fileInput}}
and two optional features: a file-remove button and link to display more info
(for communicating file upload requirements and/or instructions).
}
\seealso{
Other dataUpload functions:
\code{\link{dataUploadServer}()}
}
\concept{dataUpload functions}
|
db65a28064f4a7bd77b234ff112f48564ce3f882
|
0dba5c813820966df254b96d2ff4e8590243fab1
|
/2.cfa.R
|
15862afdb2e303200688c21b062433513fda39c5
|
[] |
no_license
|
PamelaInostroza/Data_analysis-ICCS
|
1045dc8a5c06cdf6c255a4fb217999dce0a4c0fc
|
294a61b75a9783d1c2d071e579b3ea14b2c17a40
|
refs/heads/main
| 2023-02-08T15:59:26.489558
| 2020-12-31T00:09:17
| 2020-12-31T00:09:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 44,560
|
r
|
2.cfa.R
|
library(lavaan)
library(lavaan.survey)
library(semPlot)
library(sjlabelled)
memory.limit(15000)
options(survey.lonely.psu="adjust")
InverseCod <- c("BS4G6","BS4G9","BS4G13","IS2P24C","IS2P24D","IS2P24F","IS3G24C","IS3G24D","IS3G24F")
#Cycles 2-3 vars reverse whole scale
C1vars <- VarsToUse %>% filter(Domain == "Scales" & Dataset %in% c("ISG","ISE")) %>% select(VariableC1) %>% na.omit() %>% pull()
C1vars <- C1vars[grepl(paste0(InverseCod, collapse = "|"), C1vars)]
C2vars <- VarsToUse %>% filter(Domain == "Scales" & Dataset %in% c("ISG","ISE")) %>% select(VariableC2) %>% na.omit() %>% pull()
C2vars <- C2vars[!grepl(paste0(InverseCod, collapse = "|"), C2vars)]
C3vars <- VarsToUse %>% filter(Domain == "Scales" & Dataset %in% c("ISG","ISE")) %>% select(VariableC3) %>% na.omit() %>% pull()
C3vars <- C3vars[!grepl(paste0(InverseCod, collapse = "|"), C3vars)]
torecod <- c(C1vars, C2vars, C3vars)
if(!any(colnames(ISC_cfa) %in% paste0(c(C1vars, C2vars, C3vars),"."))){
dat <- data.frame(psych::reverse.code(keys = rep(-1,length(torecod)),
items = ISC_cfa[torecod],
mini = rep(1,length(torecod)),
maxi = rep(4,length(torecod))))
colnames(dat) <- colnames(ISC_cfa[, torecod])
}
#Copy labels of old variables
label <- NULL
for (i in 1:length(torecod)) {
label[i] <- eval(parse(text=paste0("get_label(ISC_cfa$",torecod[i],")")))
}
#Add (r) to label for inverse coded variables
labelr <- NULL
for (j in 1:length(InverseCod)) {
labelr[j] <- paste0(eval(parse(text=paste0("get_label(ISC_cfa$",InverseCod[j],")"))),"(r)")
}
ISC_cfa <- cbind(ISC_cfa[,!colnames(ISC_cfa) %in% torecod], dat)
ISC_cfa[torecod] <- set_label(ISC_cfa[torecod], label)
ISC_cfa[InverseCod] <- set_label(ISC_cfa[InverseCod], labelr)
ISC_cfa[Scales] <- set_labels(ISC_cfa[Scales], labels = c("strongly disagree" = 1, "disagree" = 2, "agree" = 3, "strongly agree" = 4))
if (any(grepl("99", years$year))){
model99E <-'
Gend_Equal =~ BS4G1 + BS4G4 + BS4G6 + BS4G9 + BS4G11 + BS4G13
Immi_Equal =~ BS4H1 + BS4H2 + BS4H3 + BS4H4 + BS4H5
Ethn_Equal =~ BS4G2 + BS4G5 + BS4G8 + BS4G12
BS4H1 ~~ BS4H4
BS4G9 ~~ BS4G13
BS4G6 ~~ BS4G13
BS4G6 ~~ BS4G9
'
model99nE <-'
Gend_Equal =~ BS4G1 + BS4G4 + BS4G6 + BS4G9 + BS4G11 + BS4G13
Immi_Equal =~ BS4H1 + BS4H2 + BS4H3 + BS4H4 + BS4H5
Ethn_Equal =~ BS4G2 + BS4G5 + BS4G8 + BS4G12
BS4H1 ~~ BS4H4
BS4G9 ~~ BS4G13
BS4G6 ~~ BS4G13
BS4G6 ~~ BS4G9
'
cat('## CIVED 1999 \n')
#############1999#########
index99 <- Itemdesc %>% filter(item != "index") %>% dplyr::select(CIVED_1999) %>% na.omit() %>% pull()
ds991 <- ISC_cfa %>% filter(!is.na(TOTWGT_Gc1)) %>%
dplyr::select(all_of(index99), all_of(Id), IDJK, IDCL, SENWGT_Gc1, GENDER) %>%
mutate(GENDER = as.character(GENDER))
ds99 <- ds991 %>% mutate_at(.funs = as.numeric, .vars = index99)
#European countries
ds99E <- ds99 %>% filter(!COUNTRY %in% c(CNTne,CNT2cne))
survey.design99E <- svydesign(ids= ~ IDCL, weights = ~ SENWGT_Gc1, strata = ~ IDJK, nest = TRUE, data = ds99E)
cfa99E <- cfa(model99E, data = ds99E, cluster = c("COUNTRY", "IDSCHOOL"), missing = "fiml")
survey.fit99E <- lavaan.survey(lavaan.fit = cfa99E, survey.design = survey.design99E, estimator= "MLMVS")
#cfa <- cfa(model99E, data = ds99E, cluster = c("COUNTRY", "IDSCHOOL"))
#summary(cfa, fit.measures=TRUE)
#print(modindices(cfa, sort=T)[1:10,])
#Non European countries
ds99nE <- ds99 %>% filter(COUNTRY %in% c(CNTne,CNT2cne))
survey.design99nE <- svydesign(ids= ~ IDCL, weights = ~ SENWGT_Gc1, strata = ~ IDJK, nest = TRUE, data = ds99nE)
cfa99nE <- cfa(model99nE, data = ds99nE, cluster = c("COUNTRY", "IDSCHOOL"), missing = "fiml")
survey.fit99nE <- lavaan.survey(lavaan.fit = cfa99nE, survey.design = survey.design99nE, estimator= "MLMVS")
#cfa <- cfa(model99nE, data = ds99nE, cluster = c("COUNTRY", "IDSCHOOL"))
#summary(cfa, fit.measures=TRUE)
#print(modindices(cfa,sort=T)[1:10,])
#Factor scores for latent variables
p99E <- cbind(ds99E, predict(cfa99E)) #Prediction of factor scores should be based on survey design but not possible to obtain in R using FIML
p99nE <- cbind(ds99nE, predict(cfa99nE))
p99 <- p99E %>% bind_rows(p99nE) %>% mutate(cycle = "C1") %>%
dplyr::select(all_of(Id), all_of(Newscales))
rm(cfa99E, cfa99nE) #Remove fit to save space in disk
#Fit for each country separately
cnt99 <- unique(ds99$COUNTRY)
meast99 <- NULL
stdl99 <- NULL
for (c99 in cnt99) {
dscNT <- ds99[ds99$COUNTRY == c99,]
survey.cnt99 <- svydesign(ids = ~ IDCL, weights = ~ SENWGT_Gc1, strata = ~ IDJK, nest = TRUE, data = dscNT)
if(!c99 %in% c(CNTne,CNT2cne)) model <- model99E else model <- model99nE
CNTcfa <- cfa(model, cluster = c("IDSCHOOL"), data = dscNT)
survey.CNTfit <- lavaan.survey(lavaan.fit = CNTcfa, survey.design = survey.cnt99, estimator= "MLMVS")
meas <- fitMeasures(survey.CNTfit, c("chisq","df", "cfi", "tli","rmsea", "srmr"), output = "matrix")
meas <- rbind(n = nobs(survey.CNTfit), meas)
meast99 <- cbind(meast99, meas)
stdl <- standardizedSolution(survey.CNTfit) %>%
filter(op == "=~") %>%
mutate(cntry = c99)
stdl99 <- rbind(stdl99, stdl)
}
meast99 <- as.data.frame(t(meast99))
rownames(meast99) <- cnt99
rm(CNTcfa)
cat('### CFA - ICCS 1999')
cat(' \n')
cat(' \n')
tmeasE <- t(fitMeasures(survey.fit99E, c("chisq","df","cfi", "tli","rmsea", "srmr"),
output = "matrix"))
tmeasnE <- t(fitMeasures(survey.fit99nE, c("chisq","df","cfi", "tli","rmsea", "srmr"),
output = "matrix"))
meas99 <- rbind(data.frame(Quest = "European", n = nobs(survey.fit99E), round(tmeasE, 3)),
data.frame(Quest = "Non-european", n = nobs(survey.fit99nE), round(tmeasnE, 3)))
knitr::kable(meas99) %>% print()
cat(' \n')
cat('### CFA - ICCS 1999, by countries')
cat(' \n')
knitr::kable(meast99,digits = 3) %>% print()
cat(' \n')
cat(' \n')
invisible(semPaths(survey.fit99E,"model", "std", "lisrel", edge.label.cex = 0.6, intercepts = FALSE, groups = "latent",
pastel = TRUE, title = FALSE, nCharNodes = 10, nDigits = 1))
title("CFA measurement European model", line = 2)
invisible(semPaths(survey.fit99nE,"model", "std", "lisrel", edge.label.cex = 0.6, intercepts = FALSE, groups = "latent",
pastel = TRUE, title = FALSE, nCharNodes = 10, nDigits = 1))
title("CFA measurement Non European model", line = 2)
cat(' \n')
cat(' \n')
labels <- data.frame(label = tolower(sjlabelled::get_label(ds991)))
labels <- labels %>% filter(!str_detect(rownames(labels), c("IDSTUD|IDSCHOOL|COUNTRY|TOTWGT|GENDER"))) %>%
mutate(variable = rownames(.))
stdl99 <- stdl99 %>% mutate(rhs = factor(rhs, levels = labels$variable, labels = labels$label)) %>%
mutate(Model = ifelse(cntry %in% c(CNTne,CNT2cne), "Non-European", "European"))
l1 <- stdl99 %>% data.frame() %>%
ggplot(aes(x = est.std, y = rhs, color = reorder(cntry, desc(cntry)))) +
geom_linerange(aes(xmin = ci.lower, xmax = ci.upper), position = position_dodge(0.4)) +
geom_jitter(position = position_dodge(0.4)) +
facet_grid(lhs~Model, scales = "free") +
geom_text(aes(label=cntry),hjust=0, vjust=1, position = position_dodge(0.4), size = 2) +
theme(legend.position = "none", axis.line.y = element_blank()) +
ggtitle("Loading distribution of scales - ICCS 1999") +
ylab("") +
xlab("Loadings with Confidence Interval") +
scale_y_discrete(labels = function(x) str_wrap(x, 25)) +
scale_x_continuous(breaks = c(0, 0.25, 0.5, 0.75, 1), limits = c(0,1), labels = function(x) sprintf("%.1f", x))
print(l1)
cat(' \n')
cat(' \n')
cat('### Invariance between COUNTRY')
cat(' \n')
cat(' \n')
inv.conf99E <- cfa(model99E, data = ds99E, cluster = "IDSCHOOL", group = "COUNTRY")
inv.conf99E <- lavaan.survey(lavaan.fit = inv.conf99E, survey.design = survey.design99E, estimator= "MLMVS")
inv.metr99E <- cfa(model99E, data = ds99E, cluster = "IDSCHOOL", group = "COUNTRY", group.equal = c("loadings"))
inv.metr99E <- lavaan.survey(lavaan.fit = inv.metr99E, survey.design = survey.design99E, estimator= "MLMVS")
inv.scal99E <- cfa(model99E, data = ds99E, cluster = "IDSCHOOL", group = "COUNTRY", group.equal = c("loadings","intercepts"))
inv.scal99E <- lavaan.survey(lavaan.fit = inv.scal99E, survey.design = survey.design99E, estimator= "MLMVS")
inv.stri99E <- cfa(model99E, data = ds99E, cluster = "IDSCHOOL", group = "COUNTRY", group.equal = c("loadings","intercepts","lv.variances"))
inv.stri99E <- lavaan.survey(lavaan.fit = inv.stri99E, survey.design = survey.design99E, estimator= "MLMVS")
invarCNT1 <- data.frame(Quest = "European", round(rbind(Configural = fitMeasures(inv.conf99E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Metric = fitMeasures(inv.metr99E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Scalar = fitMeasures(inv.scal99E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Strict = fitMeasures(inv.stri99E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea"))),3))
inv.conf99nE <- cfa(model99nE, data = ds99nE, cluster = "IDSCHOOL", group = "COUNTRY")
inv.conf99nE <- lavaan.survey(lavaan.fit = inv.conf99nE, survey.design = survey.design99nE, estimator= "MLMVS")
inv.metr99nE <- cfa(model99nE, data = ds99nE, cluster = "IDSCHOOL", group = "COUNTRY", group.equal = c("loadings"))
inv.metr99nE <- lavaan.survey(lavaan.fit = inv.metr99nE, survey.design = survey.design99nE, estimator= "MLMVS")
inv.scal99nE <- cfa(model99nE, data = ds99nE, cluster = "IDSCHOOL", group = "COUNTRY", group.equal = c("loadings","intercepts"))
inv.scal99nE <- lavaan.survey(lavaan.fit = inv.scal99nE, survey.design = survey.design99nE, estimator= "MLMVS")
inv.stri99nE <- cfa(model99nE, data = ds99nE, cluster = "IDSCHOOL", group = "COUNTRY", group.equal = c("loadings","intercepts","lv.variances"))
inv.stri99nE <- lavaan.survey(lavaan.fit = inv.stri99nE, survey.design = survey.design99nE, estimator= "MLMVS")
invarCNT2 <- data.frame(Quest = "Non-European", round(rbind(Configural = fitMeasures(inv.conf99nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Metric = fitMeasures(inv.metr99nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Scalar = fitMeasures(inv.scal99nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Strict = fitMeasures(inv.stri99nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea"))),3))
invarCNT <- invarCNT1 %>% mutate(Invariance = rownames(invarCNT1)) %>% relocate(Invariance, .before = npar) %>%
mutate(D_tli = tli-lag(tli),
D_cfi = cfi-lag(cfi),
D_rmsea = rmsea-lag(rmsea)) %>%
bind_rows(invarCNT2 %>% mutate(Invariance = rownames(invarCNT2)) %>% relocate(Invariance, .before = npar) %>%
mutate(D_tli = tli-lag(tli),
D_cfi = cfi-lag(cfi),
D_rmsea = rmsea-lag(rmsea))) %>%
knitr::kable() %>% print()
cat(' \n')
cat(' \n')
rm(inv.conf99E, inv.metr99E, inv.scal99E, inv.stri99E, inv.conf99nE, inv.metr99nE, inv.scal99nE, inv.stri99nE) #Remove to save space in disk
cat('### Invariance between GENDER')
cat(' \n')
cat(' \n')
inv.conf99E <- cfa(model99E, data = ds99E, cluster = "IDSCHOOL", group = "GENDER")
inv.conf99E <- lavaan.survey(lavaan.fit = inv.conf99E, survey.design = survey.design99E, estimator= "MLMVS")
inv.metr99E <- cfa(model99E, data = ds99E, cluster = "IDSCHOOL", group = "GENDER", group.equal = c("loadings"))
inv.metr99E <- lavaan.survey(lavaan.fit = inv.metr99E, survey.design = survey.design99E, estimator= "MLMVS")
inv.scal99E <- cfa(model99E, data = ds99E, cluster = "IDSCHOOL", group = "GENDER", group.equal = c("loadings","intercepts"))
inv.scal99E <- lavaan.survey(lavaan.fit = inv.scal99E, survey.design = survey.design99E, estimator= "MLMVS")
inv.stri99E <- cfa(model99E, data = ds99E, cluster = "IDSCHOOL", group = "GENDER", group.equal = c("loadings","intercepts","lv.variances"))
inv.stri99E <- lavaan.survey(lavaan.fit = inv.stri99E, survey.design = survey.design99E, estimator= "MLMVS")
invarGNDR1 <- data.frame(Quest = "European", round(rbind(Configural = fitMeasures(inv.conf99E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Metric = fitMeasures(inv.metr99E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Scalar = fitMeasures(inv.scal99E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Strict = fitMeasures(inv.stri99E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea"))),3))
inv.conf99nE <- cfa(model99nE, data = ds99nE, cluster = "IDSCHOOL", group = "GENDER")
inv.conf99nE <- lavaan.survey(lavaan.fit = inv.conf99nE, survey.design = survey.design99nE, estimator= "MLMVS")
inv.metr99nE <- cfa(model99nE, data = ds99nE, cluster = "IDSCHOOL", group = "GENDER", group.equal = c("loadings"))
inv.metr99nE <- lavaan.survey(lavaan.fit = inv.metr99nE, survey.design = survey.design99nE, estimator= "MLMVS")
inv.scal99nE <- cfa(model99nE, data = ds99nE, cluster = "IDSCHOOL", group = "GENDER", group.equal = c("loadings","intercepts"))
inv.scal99nE <- lavaan.survey(lavaan.fit = inv.scal99nE, survey.design = survey.design99nE, estimator= "MLMVS")
inv.stri99nE <- cfa(model99nE, data = ds99nE, cluster = "IDSCHOOL", group = "GENDER", group.equal = c("loadings","intercepts","lv.variances"))
inv.stri99nE <- lavaan.survey(lavaan.fit = inv.stri99nE, survey.design = survey.design99nE, estimator= "MLMVS")
invarGNDR2 <- data.frame(Quest = "Non-European", round(rbind(Configural = fitMeasures(inv.conf99nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Metric = fitMeasures(inv.metr99nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Scalar = fitMeasures(inv.scal99nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Strict = fitMeasures(inv.stri99nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea"))),3))
invarGNDR <- invarGNDR1 %>% mutate(Invariance = rownames(invarGNDR1)) %>% relocate(Invariance, .before = npar) %>%
mutate(D_tli = tli-lag(tli),
D_cfi = cfi-lag(cfi),
D_rmsea = rmsea-lag(rmsea)) %>%
bind_rows(invarGNDR2 %>% mutate(Invariance = rownames(invarGNDR2)) %>% relocate(Invariance, .before = npar) %>%
mutate(D_tli = tli-lag(tli),
D_cfi = cfi-lag(cfi),
D_rmsea = rmsea-lag(rmsea))) %>%
knitr::kable() %>% print()
cat(' \n')
cat(' \n')
rm(inv.conf99E, inv.metr99E, inv.scal99E, inv.stri99E, inv.conf99nE, inv.metr99nE, inv.scal99nE, inv.stri99nE) #Remove to save space in disk
}
if (any(grepl("09", years$year))){
model09E<-'
Gend_Equal =~ IS2P24A + IS2P24B + IS2P24C + IS2P24D + IS2P24E
Immi_Equal =~ IS2P26A + IS2P26B + IS2P26C + IS2P26D + IS2P26E
Ethn_Equal =~ IS2P25A + IS2P25B + IS2P25C + IS2P25D + IS2P25E
IS2P24C ~~ IS2P24D
IS2P25A ~~ IS2P25B
IS2P26A ~~ IS2P26D
IS2P24A ~~ IS2P24B
'
model09nE<-'
Gend_Equal =~ IS2P24A + IS2P24B + IS2P24C + IS2P24D + IS2P24E
Immi_Equal =~ IS2P26A + IS2P26B + IS2P26C + IS2P26D + IS2P26E
Ethn_Equal =~ IS2P25A + IS2P25B + IS2P25C + IS2P25D + IS2P25E
IS2P24C ~~ IS2P24D
IS2P25A ~~ IS2P25B
IS2P26A ~~ IS2P26D
'
#############2009#########
cat('## ICCS 2009 \n')
index09 <- Itemdesc %>% filter(item != "index") %>% dplyr::select(ICCS_2009) %>% na.omit() %>% pull()
ds091 <- ISC_cfa %>% filter(!is.na(TOTWGT_Gc2)) %>%
dplyr::select(all_of(index09), all_of(Id), IDJK, IDCL, SENWGT_Gc2, SGENDER) %>%
mutate(GENDER = as.character(SGENDER))
ds09 <- ds091 %>% mutate_at(.funs = as.numeric, .vars = index09)
#European countries
ds09E <- ds09 %>% filter(!COUNTRY %in% c(CNTne,CNT2cne))
survey.design09E <- svydesign(ids= ~ IDCL, weights = ~ SENWGT_Gc2, strata = ~ IDJK, nest = TRUE, data = ds09E)
cfa09E <- cfa(model09E, data = ds09E, cluster = c("COUNTRY", "IDSCHOOL"), missing = "fiml")
survey.fit09E <- lavaan.survey(lavaan.fit = cfa09E, survey.design = survey.design09E, estimator= "MLMVS")
# cfa <- cfa(model09E, data = ds09E, cluster = c("COUNTRY", "IDSCHOOL"))
# summary(cfa, fit.measures=TRUE)
# print(modindices(cfa, sort=T)[1:10,])
#Non European countries
ds09nE <- ds09 %>% filter(COUNTRY %in% c(CNTne,CNT2cne))
survey.design09nE <- svydesign(ids= ~ IDCL, weights = ~ SENWGT_Gc2, strata = ~ IDJK, nest = TRUE, data = ds09nE)
cfa09nE <- cfa(model09nE, data = ds09nE, cluster = c("COUNTRY", "IDSCHOOL"), missing = "fiml")
survey.fit09nE <- lavaan.survey(lavaan.fit = cfa09nE, survey.design = survey.design09nE, estimator= "MLMVS")
# cfa <- cfa(model09nE, data = ds09nE, cluster = c("COUNTRY", "IDSCHOOL"))
# summary(cfa, fit.measures=TRUE)
# print(modindices(cfa,sort=T)[1:10,])
#Factor scores for latent variables
p09E <- cbind(ds09E, predict(cfa09E)) #Prediction of factor scores should be based on survey design but not possible to obtain in R using FIML
p09nE <- cbind(ds09nE, predict(cfa09nE))
p09 <- p09E %>% bind_rows(p09nE) %>% mutate(cycle = "C2") %>%
dplyr::select(all_of(Id), all_of(Newscales))
rm(cfa09E, cfa09nE) #Remove fit to save space in disk
#Fit for each country separately
cnt09 <- unique(ds09$COUNTRY)
meast09 <- NULL
stdl09 <- NULL
for (c09 in cnt09) {
dscNT <- ds09[ds09$COUNTRY == c09,]
survey.cnt09 <- svydesign(ids = ~ IDCL, weights = ~ SENWGT_Gc2, strata = ~ IDJK, nest = TRUE, data = dscNT)
if(!c09 %in% c(CNTne,CNT2cne)) model <- model09E else model <- model09nE
CNTcfa <- cfa(model, cluster = c("IDSCHOOL"), data = dscNT)
survey.CNTfit <- lavaan.survey(lavaan.fit = CNTcfa, survey.design = survey.cnt09, estimator= "MLMVS")
meas <- fitMeasures(survey.CNTfit, c("chisq","df", "cfi", "tli","rmsea", "srmr"), output = "matrix")
meas <- rbind(n = nobs(survey.CNTfit), meas)
meast09 <- cbind(meast09, meas)
stdl <- standardizedSolution(survey.CNTfit) %>%
filter(op == "=~") %>%
mutate(cntry = c09)
stdl09 <- rbind(stdl09, stdl)
}
meast09 <- as.data.frame(t(meast09))
rownames(meast09) <- cnt09
rm(CNTcfa)
cat('### CFA - ICCS 2009')
cat(' \n')
cat(' \n')
tmeasE <- t(fitMeasures(survey.fit09E, c("chisq","df","cfi", "tli","rmsea", "srmr"),
output = "matrix"))
tmeasnE <- t(fitMeasures(survey.fit09nE, c("chisq","df","cfi", "tli","rmsea", "srmr"),
output = "matrix"))
meas09 <- rbind(data.frame(Quest = "European", n = nobs(survey.fit09E), round(tmeasE, 3)),
data.frame(Quest = "Non-european", n = nobs(survey.fit09nE), round(tmeasnE, 3)))
knitr::kable(meas09) %>% print()
cat(' \n')
cat('### CFA - ICCS 2009, by countries')
cat(' \n')
knitr::kable(meast09,digits = 3) %>% print()
cat(' \n')
cat(' \n')
invisible(semPaths(survey.fit09E,"model", "std", "lisrel", edge.label.cex = 0.6, intercepts = FALSE, groups = "latent",
pastel = TRUE, title = FALSE, nCharNodes = 10, nDigits = 1))
title("CFA measurement European model", line = 2)
invisible(semPaths(survey.fit09nE,"model", "std", "lisrel", edge.label.cex = 0.6, intercepts = FALSE, groups = "latent",
pastel = TRUE, title = FALSE, nCharNodes = 10, nDigits = 1))
title("CFA measurement Non European model", line = 2)
cat(' \n')
cat(' \n')
labels <- data.frame(label = tolower(sjlabelled::get_label(ds091)))
labels <- labels %>% filter(!str_detect(rownames(labels), c("IDSTUD|IDSCHOOL|COUNTRY|TOTWGT|GENDER"))) %>%
mutate(variable = rownames(.))
stdl09 <- stdl09 %>% mutate(rhs = factor(rhs, levels = labels$variable, labels = labels$label)) %>%
mutate(Model = ifelse(cntry %in% c(CNTne,CNT2cne), "Non-European", "European"))
l1 <- stdl09 %>% data.frame() %>%
ggplot(aes(x = est.std, y = rhs, color = reorder(cntry, desc(cntry)))) +
geom_linerange(aes(xmin = ci.lower, xmax = ci.upper), position = position_dodge(0.4)) +
geom_jitter(position = position_dodge(0.4)) +
facet_grid(lhs~Model, scales = "free") +
geom_text(aes(label=cntry),hjust=0, vjust=1, position = position_dodge(0.4), size = 2) +
theme(legend.position = "none", axis.line.y = element_blank()) +
ggtitle("Loading distribution of scales - ICCS 2009") +
ylab("") +
xlab("Loadings with Confidence Interval") +
scale_y_discrete(labels = function(x) str_wrap(x, 25)) +
scale_x_continuous(breaks = c(0, 0.25, 0.5, 0.75, 1), limits = c(0,1), labels = function(x) sprintf("%.1f", x))
print(l1)
cat(' \n')
cat(' \n')
cat('### Invariance between COUNTRY')
cat(' \n')
cat(' \n')
inv.conf09E <- cfa(model09E, data = ds09E, cluster = "IDSCHOOL", group = "COUNTRY")
inv.conf09E <- lavaan.survey(lavaan.fit = inv.conf09E, survey.design = survey.design09E, estimator= "MLMVS")
inv.metr09E <- cfa(model09E, data = ds09E, cluster = "IDSCHOOL", group = "COUNTRY", group.equal = c("loadings"))
inv.metr09E <- lavaan.survey(lavaan.fit = inv.metr09E, survey.design = survey.design09E, estimator= "MLMVS")
inv.scal09E <- cfa(model09E, data = ds09E, cluster = "IDSCHOOL", group = "COUNTRY", group.equal = c("loadings","intercepts"))
inv.scal09E <- lavaan.survey(lavaan.fit = inv.scal09E, survey.design = survey.design09E, estimator= "MLMVS")
inv.stri09E <- cfa(model09E, data = ds09E, cluster = "IDSCHOOL", group = "COUNTRY", group.equal = c("loadings","intercepts","lv.variances"))
inv.stri09E <- lavaan.survey(lavaan.fit = inv.stri09E, survey.design = survey.design09E, estimator= "MLMVS")
invarCNT1 <- data.frame(Quest = "European", round(rbind(Configural = fitMeasures(inv.conf09E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Metric = fitMeasures(inv.metr09E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Scalar = fitMeasures(inv.scal09E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Strict = fitMeasures(inv.stri09E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea"))),3))
inv.conf09nE <- cfa(model09nE, data = ds09nE, cluster = "IDSCHOOL", group = "COUNTRY")
inv.conf09nE <- lavaan.survey(lavaan.fit = inv.conf09nE, survey.design = survey.design09nE, estimator= "MLMVS")
inv.metr09nE <- cfa(model09nE, data = ds09nE, cluster = "IDSCHOOL", group = "COUNTRY", group.equal = c("loadings"))
inv.metr09nE <- lavaan.survey(lavaan.fit = inv.metr09nE, survey.design = survey.design09nE, estimator= "MLMVS")
inv.scal09nE <- cfa(model09nE, data = ds09nE, cluster = "IDSCHOOL", group = "COUNTRY", group.equal = c("loadings","intercepts"))
inv.scal09nE <- lavaan.survey(lavaan.fit = inv.scal09nE, survey.design = survey.design09nE, estimator= "MLMVS")
inv.stri09nE <- cfa(model09nE, data = ds09nE, cluster = "IDSCHOOL", group = "COUNTRY", group.equal = c("loadings","intercepts","lv.variances"))
inv.stri09nE <- lavaan.survey(lavaan.fit = inv.stri09nE, survey.design = survey.design09nE, estimator= "MLMVS")
invarCNT2 <- data.frame(Quest = "Non-European", round(rbind(Configural = fitMeasures(inv.conf09nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Metric = fitMeasures(inv.metr09nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Scalar = fitMeasures(inv.scal09nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Strict = fitMeasures(inv.stri09nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea"))),3))
invarCNT <- invarCNT1 %>% mutate(Invariance = rownames(invarCNT1)) %>% relocate(Invariance, .before = npar) %>%
mutate(D_tli = tli-lag(tli),
D_cfi = cfi-lag(cfi),
D_rmsea = rmsea-lag(rmsea)) %>%
bind_rows(invarCNT2 %>% mutate(Invariance = rownames(invarCNT2)) %>% relocate(Invariance, .before = npar) %>%
mutate(D_tli = tli-lag(tli),
D_cfi = cfi-lag(cfi),
D_rmsea = rmsea-lag(rmsea))) %>%
knitr::kable() %>% print()
cat(' \n')
cat(' \n')
rm(inv.conf09E, inv.metr09E, inv.scal09E, inv.stri09E, inv.conf09nE, inv.metr09nE, inv.scal09nE, inv.stri09nE) #Remove to save space in disk
cat('### Invariance between GENDER')
cat(' \n')
cat(' \n')
inv.conf09E <- cfa(model09E, data = ds09E, cluster = "IDSCHOOL", group = "GENDER")
inv.conf09E <- lavaan.survey(lavaan.fit = inv.conf09E, survey.design = survey.design09E, estimator= "MLMVS")
inv.metr09E <- cfa(model09E, data = ds09E, cluster = "IDSCHOOL", group = "GENDER", group.equal = c("loadings"))
inv.metr09E <- lavaan.survey(lavaan.fit = inv.metr09E, survey.design = survey.design09E, estimator= "MLMVS")
inv.scal09E <- cfa(model09E, data = ds09E, cluster = "IDSCHOOL", group = "GENDER", group.equal = c("loadings","intercepts"))
inv.scal09E <- lavaan.survey(lavaan.fit = inv.scal09E, survey.design = survey.design09E, estimator= "MLMVS")
inv.stri09E <- cfa(model09E, data = ds09E, cluster = "IDSCHOOL", group = "GENDER", group.equal = c("loadings","intercepts","lv.variances"))
inv.stri09E <- lavaan.survey(lavaan.fit = inv.stri09E, survey.design = survey.design09E, estimator= "MLMVS")
invarGNDR1 <- data.frame(Quest = "European", round(rbind(Configural = fitMeasures(inv.conf09E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Metric = fitMeasures(inv.metr09E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Scalar = fitMeasures(inv.scal09E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Strict = fitMeasures(inv.stri09E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea"))),3))
inv.conf09nE <- cfa(model09nE, data = ds09nE, cluster = "IDSCHOOL", group = "GENDER")
inv.conf09nE <- lavaan.survey(lavaan.fit = inv.conf09nE, survey.design = survey.design09nE, estimator= "MLMVS")
inv.metr09nE <- cfa(model09nE, data = ds09nE, cluster = "IDSCHOOL", group = "GENDER", group.equal = c("loadings"))
inv.metr09nE <- lavaan.survey(lavaan.fit = inv.metr09nE, survey.design = survey.design09nE, estimator= "MLMVS")
inv.scal09nE <- cfa(model09nE, data = ds09nE, cluster = "IDSCHOOL", group = "GENDER", group.equal = c("loadings","intercepts"))
inv.scal09nE <- lavaan.survey(lavaan.fit = inv.scal09nE, survey.design = survey.design09nE, estimator= "MLMVS")
inv.stri09nE <- cfa(model09nE, data = ds09nE, cluster = "IDSCHOOL", group = "GENDER", group.equal = c("loadings","intercepts","lv.variances"))
inv.stri09nE <- lavaan.survey(lavaan.fit = inv.stri09nE, survey.design = survey.design09nE, estimator= "MLMVS")
invarGNDR2 <- data.frame(Quest = "Non-European", round(rbind(Configural = fitMeasures(inv.conf09nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Metric = fitMeasures(inv.metr09nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Scalar = fitMeasures(inv.scal09nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Strict = fitMeasures(inv.stri09nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea"))),3))
invarGNDR <- invarGNDR1 %>% mutate(Invariance = rownames(invarGNDR1)) %>% relocate(Invariance, .before = npar) %>%
mutate(D_tli = tli-lag(tli),
D_cfi = cfi-lag(cfi),
D_rmsea = rmsea-lag(rmsea)) %>%
bind_rows(invarGNDR2 %>% mutate(Invariance = rownames(invarGNDR2)) %>% relocate(Invariance, .before = npar) %>%
mutate(D_tli = tli-lag(tli),
D_cfi = cfi-lag(cfi),
D_rmsea = rmsea-lag(rmsea))) %>%
knitr::kable() %>% print()
cat(' \n')
cat(' \n')
rm(inv.conf09E, inv.metr09E, inv.scal09E, inv.stri09E, inv.conf09nE, inv.metr09nE, inv.scal09nE, inv.stri09nE) #Remove to save space in disk
}
if (any(grepl("16", years$year))){
model16E<-'
Gend_Equal =~ IS3G24A + IS3G24B + IS3G24C + IS3G24D + IS3G24E
Immi_Equal =~ ES3G04A + ES3G04B + ES3G04C + ES3G04D + ES3G04E
Ethn_Equal =~ IS3G25A + IS3G25B + IS3G25C + IS3G25D + IS3G25E
IS3G24A ~~ IS3G24B
IS3G25A ~~ IS3G25B
ES3G04A ~~ ES3G04D
'
model16nE<-'
Gend_Equal =~ IS3G24A + IS3G24B + IS3G24C + IS3G24D + IS3G24E
Ethn_Equal =~ IS3G25A + IS3G25B + IS3G25C + IS3G25D + IS3G25E
IS3G24C ~~ IS3G24D
IS3G25A ~~ IS3G25B
IS3G24A ~~ IS3G24B
'
#############2016#########
cat('## ICCS 2016 \n')
index16 <- Itemdesc %>% filter(item != "index") %>% dplyr::select(ICCS_2016) %>% na.omit() %>% pull()
ds161 <- ISC_cfa %>% filter(!is.na(TOTWGT_Gc3)) %>%
dplyr::select(all_of(index16), all_of(Id), IDJK, IDCL, SENWGT_Gc3, S_GENDER) %>%
mutate(GENDER = as.character(S_GENDER))
ds16 <- ds161 %>% mutate_at(.funs = as.numeric, .vars = index16)
#European countries
ds16E <- ds16 %>% filter(!COUNTRY %in% c(CNTne,CNT2cne))
survey.design16E <- svydesign(ids= ~ IDCL, weights = ~ SENWGT_Gc3, strata = ~ IDJK, nest = TRUE, data = ds16E)
cfa16E <- cfa(model16E, data = ds16E, cluster = c("COUNTRY", "IDSCHOOL"), missing = "fiml")
survey.fit16E <- lavaan.survey(lavaan.fit = cfa16E, survey.design = survey.design16E, estimator= "MLMVS")
#cfa <- cfa(model16E, data = ds16E, cluster = c("COUNTRY", "IDSCHOOL"))
#summary(cfa, fit.measures=TRUE)
#print(modindices(cfa, sort=T)[1:10,])
#Non European countries
ds16nE <- ds16 %>% filter(COUNTRY %in% c(CNTne,CNT2cne))
survey.design16nE <- svydesign(ids= ~ IDCL, weights = ~ SENWGT_Gc3, strata = ~ IDJK, nest = TRUE, data = ds16nE)
cfa16nE <- cfa(model16nE, data = ds16nE, cluster = c("COUNTRY", "IDSCHOOL"), missing = "fiml")
survey.fit16nE <- lavaan.survey(lavaan.fit = cfa16nE, survey.design = survey.design16nE, estimator= "MLMVS")
# cfa <- cfa(model16nE, data = ds16nE, cluster = c("COUNTRY", "IDSCHOOL"))
# summary(cfa, fit.measures=TRUE)
# print(modindices(cfa,sort=T)[1:10,])
#Factor scores for latent variables
p16E <- cbind(ds16E, predict(cfa16E)) #Prediction of factor scores should be based on survey design but not possible to obtain in R using FIML
p16nE <- cbind(ds16nE, predict(cfa16nE))
p16 <- p16E %>% bind_rows(p16nE) %>% mutate(cycle = "C3") %>%
dplyr::select(all_of(Id), all_of(Newscales))
rm(cfa16E, cfa16nE) #Remove fit to save space in disk
#Fit for each country separately
cnt16 <- unique(ds16$COUNTRY)
meast16 <- NULL
stdl16 <- NULL
for (c16 in cnt16) {
dscNT <- ds16[ds16$COUNTRY == c16,]
survey.cnt16 <- svydesign(ids = ~ IDCL, weights = ~ SENWGT_Gc3, strata = ~ IDJK, nest = TRUE, data = dscNT)
if(!c16 %in% c(CNTne,CNT2cne)) model <- model16E else model <- model16nE
CNTcfa <- cfa(model, cluster = c("IDSCHOOL"), data = dscNT)
survey.CNTfit <- lavaan.survey(lavaan.fit = CNTcfa, survey.design = survey.cnt16, estimator= "MLMVS")
meas <- fitMeasures(survey.CNTfit, c("chisq","df", "cfi", "tli","rmsea", "srmr"), output = "matrix")
meas <- rbind(n = nobs(survey.CNTfit), meas)
meast16 <- cbind(meast16, meas)
stdl <- standardizedSolution(survey.CNTfit) %>%
filter(op == "=~") %>%
mutate(cntry = c16)
stdl16 <- rbind(stdl16, stdl)
}
meast16 <- as.data.frame(t(meast16))
rownames(meast16) <- cnt16
rm(CNTcfa)
cat('### CFA - ICCS 2016')
cat(' \n')
cat(' \n')
tmeasE <- t(fitMeasures(survey.fit16E, c("chisq","df","cfi", "tli","rmsea", "srmr"),
output = "matrix"))
tmeasnE <- t(fitMeasures(survey.fit16nE, c("chisq","df","cfi", "tli","rmsea", "srmr"),
output = "matrix"))
meas16 <- rbind(data.frame(Quest = "European", n = nobs(survey.fit16E), round(tmeasE, 3)),
data.frame(Quest = "Non-european", n = nobs(survey.fit16nE), round(tmeasnE, 3)))
knitr::kable(meas16) %>% print()
cat(' \n')
cat('### CFA - ICCS 2016, by countries')
cat(' \n')
knitr::kable(meast16,digits = 3) %>% print()
cat(' \n')
cat(' \n')
invisible(semPaths(survey.fit16E,"model", "std", "lisrel", edge.label.cex = 0.6, intercepts = FALSE, groups = "latent",
pastel = TRUE, title = FALSE, nCharNodes = 10, nDigits = 1))
title("CFA measurement European model", line = 2)
invisible(semPaths(survey.fit16nE,"model", "std", "lisrel", edge.label.cex = 0.6, intercepts = FALSE, groups = "latent",
pastel = TRUE, title = FALSE, nCharNodes = 10, nDigits = 1))
title("CFA measurement Non European model", line = 2)
cat(' \n')
cat(' \n')
labels <- data.frame(label = tolower(sjlabelled::get_label(ds161)))
labels <- labels %>% filter(!str_detect(rownames(labels), c("IDSTUD|IDSCHOOL|COUNTRY|TOTWGT|GENDER"))) %>%
mutate(variable = rownames(.),
label = str_remove(label, "moving/<immigrants> |moving/<immigrant> |rights and responsibilities/rights and responsibilities/|rights and responsibilities/roles women and men/"))
stdl162 <- stdl16 %>% mutate(rhs = factor(rhs, levels = labels$variable, labels = labels$label)) %>%
mutate(Model = ifelse(cntry %in% c(CNTne,CNT2cne), "Non-European", "European"))
l1 <- stdl162 %>% data.frame() %>%
ggplot(aes(x = est.std, y = rhs, color = reorder(cntry, desc(cntry)))) +
geom_linerange(aes(xmin = ci.lower, xmax = ci.upper), position = position_dodge(0.4)) +
geom_jitter(position = position_dodge(0.4)) +
facet_grid(lhs~Model, scales = "free") +
geom_text(aes(label=cntry),hjust=0, vjust=1, position = position_dodge(0.4), size = 2) +
theme(legend.position = "none", axis.line.y = element_blank()) +
ggtitle("Loading distribution of scales - ICCS 2016") +
ylab("") +
xlab("Loadings with Confidence Interval") +
scale_y_discrete(labels = function(x) str_wrap(x, 25)) +
scale_x_continuous(breaks = c(0, 0.25, 0.5, 0.75, 1), limits = c(0,1), labels = function(x) sprintf("%.1f", x))
print(l1)
cat(' \n')
cat(' \n')
cat('### Invariance between COUNTRY')
cat(' \n')
cat(' \n')
inv.conf16E <- cfa(model16E, data = ds16E, cluster = "IDSCHOOL", group = "COUNTRY")
inv.conf16E <- lavaan.survey(lavaan.fit = inv.conf16E, survey.design = survey.design16E, estimator= "MLMVS")
inv.metr16E <- cfa(model16E, data = ds16E, cluster = "IDSCHOOL", group = "COUNTRY", group.equal = c("loadings"))
inv.metr16E <- lavaan.survey(lavaan.fit = inv.metr16E, survey.design = survey.design16E, estimator= "MLMVS")
inv.scal16E <- cfa(model16E, data = ds16E, cluster = "IDSCHOOL", group = "COUNTRY", group.equal = c("loadings","intercepts"))
inv.scal16E <- lavaan.survey(lavaan.fit = inv.scal16E, survey.design = survey.design16E, estimator= "MLMVS")
inv.stri16E <- cfa(model16E, data = ds16E, cluster = "IDSCHOOL", group = "COUNTRY", group.equal = c("loadings","intercepts","lv.variances"))
inv.stri16E <- lavaan.survey(lavaan.fit = inv.stri16E, survey.design = survey.design16E, estimator= "MLMVS")
invarCNT1 <- data.frame(Quest = "European", round(rbind(Configural = fitMeasures(inv.conf16E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Metric = fitMeasures(inv.metr16E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Scalar = fitMeasures(inv.scal16E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Strict = fitMeasures(inv.stri16E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea"))),3))
inv.conf16nE <- cfa(model16nE, data = ds16nE, cluster = "IDSCHOOL", group = "COUNTRY")
inv.conf16nE <- lavaan.survey(lavaan.fit = inv.conf16nE, survey.design = survey.design16nE, estimator= "MLMVS")
inv.metr16nE <- cfa(model16nE, data = ds16nE, cluster = "IDSCHOOL", group = "COUNTRY", group.equal = c("loadings"))
inv.metr16nE <- lavaan.survey(lavaan.fit = inv.metr16nE, survey.design = survey.design16nE, estimator= "MLMVS")
inv.scal16nE <- cfa(model16nE, data = ds16nE, cluster = "IDSCHOOL", group = "COUNTRY", group.equal = c("loadings","intercepts"))
inv.scal16nE <- lavaan.survey(lavaan.fit = inv.scal16nE, survey.design = survey.design16nE, estimator= "MLMVS")
inv.stri16nE <- cfa(model16nE, data = ds16nE, cluster = "IDSCHOOL", group = "COUNTRY", group.equal = c("loadings","intercepts","lv.variances"))
inv.stri16nE <- lavaan.survey(lavaan.fit = inv.stri16nE, survey.design = survey.design16nE, estimator= "MLMVS")
invarCNT2 <- data.frame(Quest = "Non-European", round(rbind(Configural = fitMeasures(inv.conf16nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Metric = fitMeasures(inv.metr16nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Scalar = fitMeasures(inv.scal16nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Strict = fitMeasures(inv.stri16nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea"))),3))
invarCNT <- invarCNT1 %>% mutate(Invariance = rownames(invarCNT1)) %>% relocate(Invariance, .before = npar) %>%
mutate(D_tli = tli-lag(tli),
D_cfi = cfi-lag(cfi),
D_rmsea = rmsea-lag(rmsea)) %>%
bind_rows(invarCNT2 %>% mutate(Invariance = rownames(invarCNT2)) %>% relocate(Invariance, .before = npar) %>%
mutate(D_tli = tli-lag(tli),
D_cfi = cfi-lag(cfi),
D_rmsea = rmsea-lag(rmsea))) %>%
knitr::kable() %>% print()
cat(' \n')
cat(' \n')
rm(inv.conf16E, inv.metr16E, inv.scal16E, inv.stri16E, inv.conf16nE, inv.metr16nE, inv.scal16nE, inv.stri16nE) #Remove to save space in disk
cat('### Invariance between GENDER')
cat(' \n')
cat(' \n')
inv.conf16E <- cfa(model16E, data = ds16E, cluster = "IDSCHOOL", group = "GENDER")
inv.conf16E <- lavaan.survey(lavaan.fit = inv.conf16E, survey.design = survey.design16E, estimator= "MLMVS")
inv.metr16E <- cfa(model16E, data = ds16E, cluster = "IDSCHOOL", group = "GENDER", group.equal = c("loadings"))
inv.metr16E <- lavaan.survey(lavaan.fit = inv.metr16E, survey.design = survey.design16E, estimator= "MLMVS")
inv.scal16E <- cfa(model16E, data = ds16E, cluster = "IDSCHOOL", group = "GENDER", group.equal = c("loadings","intercepts"))
inv.scal16E <- lavaan.survey(lavaan.fit = inv.scal16E, survey.design = survey.design16E, estimator= "MLMVS")
inv.stri16E <- cfa(model16E, data = ds16E, cluster = "IDSCHOOL", group = "GENDER", group.equal = c("loadings","intercepts","lv.variances"))
inv.stri16E <- lavaan.survey(lavaan.fit = inv.stri16E, survey.design = survey.design16E, estimator= "MLMVS")
invarGNDR1 <- data.frame(Quest = "European", round(rbind(Configural = fitMeasures(inv.conf16E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Metric = fitMeasures(inv.metr16E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Scalar = fitMeasures(inv.scal16E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Strict = fitMeasures(inv.stri16E, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea"))),3))
inv.conf16nE <- cfa(model16nE, data = ds16nE, cluster = "IDSCHOOL", group = "GENDER")
inv.conf16nE <- lavaan.survey(lavaan.fit = inv.conf16nE, survey.design = survey.design16nE, estimator= "MLMVS")
inv.metr16nE <- cfa(model16nE, data = ds16nE, cluster = "IDSCHOOL", group = "GENDER", group.equal = c("loadings"))
inv.metr16nE <- lavaan.survey(lavaan.fit = inv.metr16nE, survey.design = survey.design16nE, estimator= "MLMVS")
inv.scal16nE <- cfa(model16nE, data = ds16nE, cluster = "IDSCHOOL", group = "GENDER", group.equal = c("loadings","intercepts"))
inv.scal16nE <- lavaan.survey(lavaan.fit = inv.scal16nE, survey.design = survey.design16nE, estimator= "MLMVS")
inv.stri16nE <- cfa(model16nE, data = ds16nE, cluster = "IDSCHOOL", group = "GENDER", group.equal = c("loadings","intercepts","lv.variances"))
inv.stri16nE <- lavaan.survey(lavaan.fit = inv.stri16nE, survey.design = survey.design16nE, estimator= "MLMVS")
invarGNDR2 <- data.frame(Quest = "Non-European", round(rbind(Configural = fitMeasures(inv.conf16nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Metric = fitMeasures(inv.metr16nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Scalar = fitMeasures(inv.scal16nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea")),
Strict = fitMeasures(inv.stri16nE, c("npar", "logl","chisq", "df", "tli", "cfi", "rmsea"))),3))
invarGNDR <- invarGNDR1 %>% mutate(Invariance = rownames(invarGNDR1)) %>% relocate(Invariance, .before = npar) %>%
mutate(D_tli = tli-lag(tli),
D_cfi = cfi-lag(cfi),
D_rmsea = rmsea-lag(rmsea)) %>%
bind_rows(invarGNDR2 %>% mutate(Invariance = rownames(invarGNDR2)) %>% relocate(Invariance, .before = npar) %>%
mutate(D_tli = tli-lag(tli),
D_cfi = cfi-lag(cfi),
D_rmsea = rmsea-lag(rmsea))) %>%
knitr::kable() %>% print()
cat(' \n')
cat(' \n')
rm(inv.conf16E, inv.metr16E, inv.scal16E, inv.stri16E, inv.conf16nE, inv.metr16nE, inv.scal16nE, inv.stri16nE) #Remove to save space in disk
}
cat(' \n')
cat(' \n')
cat('## Summary indexes, all countries, all cycles')
cat(' \n')
cat(' \n')
pall <- plyr::rbind.fill(p99, p09, p16)
ISC_cfa <- left_join(ISC_cfa, pall, by = all_of(Id))
set_label(ISC_cfa$Gend_Equal) <- "Positive attitudes toward gender equality"
set_label(ISC_cfa$Immi_Equal) <- "Positive attitudes toward equal rights for immigrants"
set_label(ISC_cfa$Ethn_Equal) <- "Positive attitudes toward equal rights for ethnic/racial groups"
mg <- ISC_cfa %>% dplyr::select(cycle, all_of(Newscales)) %>% group_by(cycle) %>%
summarise_at(Newscales, list(~ mean(., na.rm = TRUE))) %>%
mutate(cycle = as.factor(cycle)) %>% data.frame()
for(i in 1:length(Newscales)){
vargra <- Newscales[i]
s2 <- ISC_cfa %>% dplyr::select("cycle", "COUNTRY", vargra) %>% na.omit() %>%
ggplot(aes_string(x = vargra, y = paste0("reorder(COUNTRY, desc(COUNTRY))"), group = paste0("interaction(cycle, COUNTRY)"), fill = "COUNTRY")) +
geom_boxplot() +
facet_grid(.~ cycle)+
geom_vline(aes_string(xintercept = vargra), mg, linetype="dotted", size = 0.8) +
ggtitle(eval(parse(text=paste0("attributes(ISC_cfa$",vargra,")$label")))) +
ylab("Distribution of Scores CFA") +
xlab(paste0(vargra)) +
scale_color_brewer(palette="Accent") +
theme(legend.position = "none")
print(s2)
}
ISC_cfa <- ISC_cfa %>% select(all_of(Id), all_of(Scales), all_of(Newscales))
|
51f9b6a867c182c9a1c509304222464f460c5457
|
2df6b8f6f367e980b7f5cbd438a2b06b24b302d2
|
/datai1/R script (1).R
|
233e0b14824835c8921a444c7e12ee1f8a2f4a3d
|
[] |
no_license
|
dcamposliz/dtsc1
|
af4f6c6c0b0f340b5baa4fb1fc1d5cdee7db0cf4
|
09cfa6c5ff32046b0c91bd6bd1206eb280be47ce
|
refs/heads/master
| 2021-01-10T11:53:00.278925
| 2015-12-28T08:40:39
| 2015-12-28T08:40:39
| 48,018,237
| 0
| 1
| null | 2015-12-28T08:40:39
| 2015-12-15T03:50:16
|
R
|
UTF-8
|
R
| false
| false
| 26,557
|
r
|
R script (1).R
|
## Note separate functions should be used in this file
# However for this project perhaps it can all be on one document
## Set Working Directory to the datai1 folder
setwd("C:/Users/Peter/myProjects/dtsc1/datai1")
## These must be installed the first time
install.packages("xlsx")
install.packages("dplyr")
install.packages("lubridate")
install.packages("reshape2")
install.packages("zoo") ## Linear interpolation NA values
install.packages("sqldf")
## Calls the packages
library(xlsx)
library(dplyr)
library(lubridate)
library(reshape2)
library(zoo)
library(sqldf)
## DOWNLOAD FILE
#if(!file.exists("data")) {dir.create("data")}
#fileUrl <- "http://www.worldeconomics.com/Files/Maddison_NorthAmerica_Full.xlsx"
#download.file(fileUrl, destfile = "./data/America.xlsx") ## method = "curl" removed for windows
#dateDownloaded <- date()
## Load data from excel
NorthAmericaPopulation <- read.xlsx2("./Maddison_NorthAmerica_Full.xlsx", sheetIndex=2, startRow = 64, endRow = 202, colIndex = 1:3, header=FALSE)
NorthAmericaGDP <- read.xlsx2("./Maddison_NorthAmerica_Full.xlsx", sheetIndex=3, startRow = 64, endRow = 202, colIndex = 1:3, header=FALSE)
NorthAmericaPerCapitaGDP <- read.xlsx2("./Maddison_NorthAmerica_Full.xlsx", sheetIndex=4, startRow = 64, endRow = 202, colIndex = 1:3, header=FALSE)
EuropePopulation <- read.xlsx2("./Maddison_Europe_Full.xlsx", sheetIndex=2, startRow = 64, endRow = 202, colIndex = c(1:13,16,17), header=FALSE)
EuropeGDP <- read.xlsx2("./Maddison_Europe_Full.xlsx", sheetIndex=3, startRow = 64, endRow = 202, colIndex = c(1:13,16,17), header=FALSE)
EuropePerCapitaGDP <- read.xlsx2("./Maddison_Europe_Full.xlsx", sheetIndex=4, startRow = 64, endRow = 202, colIndex = c(1:13,16,17), header=FALSE)
CentralAndSouthAmericaPopulation <- read.xlsx2("./Maddison_CentralAndSouthAmerica_Full.xlsx", sheetIndex=2, startRow = 64, endRow = 202, colIndex = 1:24, header=FALSE)
CentralAndSouthAmericaGDP <- read.xlsx2("./Maddison_CentralAndSouthAmerica_Full.xlsx", sheetIndex=3, startRow = 64, endRow = 202, colIndex = 1:24, header=FALSE)
CentralAndSouthAmericaPerCapitaGDP <- read.xlsx2("./Maddison_CentralAndSouthAmerica_Full.xlsx", sheetIndex=4, startRow = 64, endRow = 202, colIndex = 1:24, header=FALSE)
AsiaPopulation <- read.xlsx2("./Maddison_Asia_Full.xlsx", sheetIndex=2, startRow = 64, endRow = 202, colIndex = 1:41, header=FALSE)
AsiaGDP <- read.xlsx2("./Maddison_Asia_Full.xlsx", sheetIndex=3, startRow = 64, endRow = 202, colIndex = 1:41, header=FALSE)
AsiaPerCapitaGDP <- read.xlsx2("./Maddison_Asia_Full.xlsx", sheetIndex=4, startRow = 64, endRow = 202, colIndex = 1:41, header=FALSE)
AfricaPopulation <- read.xlsx2("./Maddison_Africa_Full.xlsx", sheetIndex=2, startRow = 64, endRow = 202, colIndex = 1:54, header=FALSE)
AfricaGDP <- read.xlsx2("./Maddison_Africa_Full.xlsx", sheetIndex=3, startRow = 64, endRow = 202, colIndex = 1:54, header=FALSE)
AfricaPerCapitaGDP <- read.xlsx2("./Maddison_Africa_Full.xlsx", sheetIndex=4, startRow = 64, endRow = 202, colIndex = 1:54, header=FALSE)
## GIVE NAMES BEFORE MELT
names(NorthAmericaPopulation) <- c("year", "canada","usa")
names(NorthAmericaGDP) <- c("year", "canada", "usa")
names(NorthAmericaPerCapitaGDP) <- c("year", "canada", "usa")
names(EuropePopulation) <- c("year", "Austria","Belgium","Denmark","Finland","France","Germany","Italy","Netherlands","Norway","Sweden","Switzerland","UK","Portugal","Spain")
names(EuropeGDP) <- c("year", "Austria","Belgium","Denmark","Finland","France","Germany","Italy","Netherlands","Norway","Sweden","Switzerland","UK","Portugal","Spain")
names(EuropePerCapitaGDP) <- c("year", "Austria","Belgium","Denmark","Finland","France","Germany","Italy","Netherlands","Norway","Sweden","Switzerland","UK","Portugal","Spain")
names(CentralAndSouthAmericaPopulation) <- c("year", "Argentina","Brazil","Chile","Colombia","Mexico","Peru","Uruguay","Venezuela","Bolivia","Costa.Rica","Cuba","Dominican.Rep.",
"Ecuador","El.Salvador","Guatemala","Haiti","Honduras","Jamaica","Nicaragua","Panama","Paraguay","Puerto.Rico","T.and.Tobago")
names(CentralAndSouthAmericaGDP) <- c("year", "Argentina","Brazil","Chile","Colombia","Mexico","Peru","Uruguay","Venezuela","Bolivia","Costa.Rica","Cuba","Dominican.Rep.",
"Ecuador","El.Salvador","Guatemala","Haiti","Honduras","Jamaica","Nicaragua","Panama","Paraguay","Puerto.Rico","T.and.Tobago")
names(CentralAndSouthAmericaPerCapitaGDP) <- c("year", "Argentina","Brazil","Chile","Colombia","Mexico","Peru","Uruguay","Venezuela","Bolivia","Costa.Rica","Cuba","Dominican.Rep.",
"Ecuador","El.Salvador","Guatemala","Haiti","Honduras","Jamaica","Nicaragua","Panama","Paraguay","Puerto.Rico","T.and.Tobago")
names(AsiaPopulation) <- c("year", "China","India","Indonesia","Japan","Philippines","South.Korea","Thailand","Taiwan","Bangladesh","Burma","Hong.Kong","Malaysia",
"Nepal","Pakistan","Singapore","Sri.Lanka","Afghanistan","Cambodia","Laos","Mongolia","North.Korea","Vietnam","24.Sm.E.Asia","Bahrain","Iran","Iraq","Israel","Jordan","Kuwait",
"Lebanon","Oman","Qatar","Saudi.Arabia","Syria","Turkey","UAE","Yemen","W.Bank.and.Gaza","Australia","New.Zealand")
names(AsiaGDP) <- c("year", "China","India","Indonesia","Japan","Philippines","South.Korea","Thailand","Taiwan","Bangladesh","Burma","Hong.Kong","Malaysia",
"Nepal","Pakistan","Singapore","Sri.Lanka","Afghanistan","Cambodia","Laos","Mongolia","North.Korea","Vietnam","24.Sm.E.Asia","Bahrain","Iran","Iraq","Israel","Jordan","Kuwait",
"Lebanon","Oman","Qatar","Saudi.Arabia","Syria","Turkey","UAE","Yemen","W.Bank.and.Gaza","Australia","New.Zealand")
names(AsiaPerCapitaGDP) <- c("year", "China","India","Indonesia","Japan","Philippines","South.Korea","Thailand","Taiwan","Bangladesh","Burma","Hong.Kong","Malaysia",
"Nepal","Pakistan","Singapore","Sri.Lanka","Afghanistan","Cambodia","Laos","Mongolia","North.Korea","Vietnam","24.Sm.E.Asia","Bahrain","Iran","Iraq","Israel","Jordan","Kuwait",
"Lebanon","Oman","Qatar","Saudi.Arabia","Syria","Turkey","UAE","Yemen","W.Bank.and.Gaza","Australia","New.Zealand")
names(AfricaPopulation) <- c("year", "Algeria","Angola","Benin","Botswana","Burkina.Faso","Burundi","Cameroon","Cape.Verde","Central.Africa.Republic","Chad","Comoro.Islands","Congo",
"Cote.d.Ivoire","Djibouti","Egypt","Equatorial.Guinea","Eritrea.and.Ethiopia","Gabon","Gambia","Ghana","Guinea","Guinea.Bissau","Kenya","Lesotho","Liberia","Libya","Madagascar",
"Malawi","Mali","Mauritania","Mauritius","Morocco","Mozambique","Namibia","Niger","Nigeria","Rwanda","Sao.Tome.and.Principe","Senegal","Seychelles","Sierra.Leone","Somalia",
"South.Africa","Sudan","Swaziland","Tanzania","Togo","Tunisia","Uganda","Zaire","Zambia","Zimbabwe","Three.Small.Afr.")
names(AfricaGDP) <- c("year", "Algeria","Angola","Benin","Botswana","Burkina.Faso","Burundi","Cameroon","Cape.Verde","Central.Africa.Republic","Chad","Comoro.Islands","Congo",
"Cote.d.Ivoire","Djibouti","Egypt","Equatorial.Guinea","Eritrea.and.Ethiopia","Gabon","Gambia","Ghana","Guinea","Guinea.Bissau","Kenya","Lesotho","Liberia","Libya","Madagascar",
"Malawi","Mali","Mauritania","Mauritius","Morocco","Mozambique","Namibia","Niger","Nigeria","Rwanda","Sao.Tome.and.Principe","Senegal","Seychelles","Sierra.Leone","Somalia",
"South.Africa","Sudan","Swaziland","Tanzania","Togo","Tunisia","Uganda","Zaire","Zambia","Zimbabwe","Three.Small.Afr.")
names(AfricaPerCapitaGDP) <- c("year", "Algeria","Angola","Benin","Botswana","Burkina.Faso","Burundi","Cameroon","Cape.Verde","Central.Africa.Republic","Chad","Comoro.Islands","Congo",
"Cote.d.Ivoire","Djibouti","Egypt","Equatorial.Guinea","Eritrea.and.Ethiopia","Gabon","Gambia","Ghana","Guinea","Guinea.Bissau","Kenya","Lesotho","Liberia","Libya","Madagascar",
"Malawi","Mali","Mauritania","Mauritius","Morocco","Mozambique","Namibia","Niger","Nigeria","Rwanda","Sao.Tome.and.Principe","Senegal","Seychelles","Sierra.Leone","Somalia",
"South.Africa","Sudan","Swaziland","Tanzania","Togo","Tunisia","Uganda","Zaire","Zambia","Zimbabwe","Three.Small.Afr.")
## Set data types to numeric
for (i in 2:3) {
NorthAmericaPopulation[ , i] <- as.numeric(as.character(NorthAmericaPopulation[ , i]))
NorthAmericaGDP[ , i] <- as.numeric(as.character(NorthAmericaGDP[ , i]))
NorthAmericaPerCapitaGDP[ , i] <- as.numeric(as.character(NorthAmericaPerCapitaGDP[ , i]))
}
for (i in 2:15) {
EuropePopulation[ , i] <- as.numeric(as.character(EuropePopulation[ , i]))
EuropeGDP[ , i] <- as.numeric(as.character(EuropeGDP[ , i]))
EuropePerCapitaGDP[ , i] <- as.numeric(as.character(EuropePerCapitaGDP[ , i]))
}
for (i in 2:24) {
CentralAndSouthAmericaPopulation[ , i] <- as.numeric(as.character(CentralAndSouthAmericaPopulation[ , i]))
CentralAndSouthAmericaGDP[ , i] <- as.numeric(as.character(CentralAndSouthAmericaGDP[ , i]))
CentralAndSouthAmericaPerCapitaGDP[ , i] <- as.numeric(as.character(CentralAndSouthAmericaPerCapitaGDP[ , i]))
}
for (i in 2:41) {
AsiaPopulation[ , i] <- as.numeric(as.character(AsiaPopulation[ , i]))
AsiaGDP[ , i] <- as.numeric(as.character(AsiaGDP[ , i]))
AsiaPerCapitaGDP[ , i] <- as.numeric(as.character(AsiaPerCapitaGDP[ , i]))
}
for (i in 2:54) {
AfricaPopulation[ , i] <- as.numeric(as.character(AfricaPopulation[ , i]))
AfricaGDP[ , i] <- as.numeric(as.character(AfricaGDP[ , i]))
AfricaPerCapitaGDP[ , i] <- as.numeric(as.character(AfricaPerCapitaGDP[ , i]))
}
## Fill in the 1870 values where they are null
CentralAndSouthAmericaGDP1870 = c(); CentralAndSouthAmericaGDP1950 = c();
CentralAndSouthAmericaPerCapitaGDP1870 = c(); CentralAndSouthAmericaPerCapitaGDP1950 = c();
AsiaPopulation1870 = c(); AsiaPopulation1950 = c();
AsiaGDP1870 = c(); AsiaGDP1950 = c();
AsiaPerCapitaGDP1870 = c(); AsiaPerCapitaGDP1950 = c();
AfricaPopulation1870 = c(); AfricaPopulation1950 = c();
AfricaGDP1870 = c(); AfricaGDP1950 = c();
AfricaPerCapitaGDP1870 = c(); AfricaPerCapitaGDP1950 = c();
# Pull the row with nulls (1870) and the row with no nulls (1950).
for (i in 2:24) {
CentralAndSouthAmericaGDP1870[i-1] <- CentralAndSouthAmericaGDP[[i]][[1]]
CentralAndSouthAmericaGDP1950[i-1] <- CentralAndSouthAmericaGDP[[i]][[81]]
CentralAndSouthAmericaGDPgood <- complete.cases(CentralAndSouthAmericaGDP1870,CentralAndSouthAmericaGDP1950)
CentralAndSouthAmericaPerCapitaGDP1870[i-1] <- CentralAndSouthAmericaPerCapitaGDP[[i]][[1]]
CentralAndSouthAmericaPerCapitaGDP1950[i-1] <- CentralAndSouthAmericaPerCapitaGDP[[i]][[81]]
CentralAndSouthAmericaPerCapitaGDPgood <- complete.cases(CentralAndSouthAmericaPerCapitaGDP1870,CentralAndSouthAmericaPerCapitaGDP1950)
}
for (i in 2:41) {
AsiaPopulation1870[i-1] <- AsiaPopulation[[i]][[1]]
AsiaPopulation1950[i-1] <- AsiaPopulation[[i]][[81]]
AsiaPopulationgood <- complete.cases(AsiaPopulation1870,AsiaPopulation1950)
AsiaGDP1870[i-1] <- AsiaGDP[[i]][[1]]
AsiaGDP1950[i-1] <- AsiaGDP[[i]][[81]]
AsiaGDPgood <- complete.cases(AsiaGDP1870,AsiaGDP1950)
AsiaPerCapitaGDP1870[i-1] <- AsiaPerCapitaGDP[[i]][[1]]
AsiaPerCapitaGDP1950[i-1] <- AsiaPerCapitaGDP[[i]][[81]]
AsiaPerCapitaGDPgood <- complete.cases(AsiaPerCapitaGDP1870,AsiaPerCapitaGDP1950)
}
for (i in 2:54) {
AfricaPopulation1870[i-1] <- AfricaPopulation[[i]][[1]]
AfricaPopulation1950[i-1] <- AfricaPopulation[[i]][[81]]
AfricaPopulationgood <- complete.cases(AfricaPopulation1870,AfricaPopulation1950)
AfricaGDP1870[i-1] <- AfricaGDP[[i]][[1]]
AfricaGDP1950[i-1] <- AfricaGDP[[i]][[81]]
AfricaGDPgood <- complete.cases(AfricaGDP1870,AfricaGDP1950)
AfricaPerCapitaGDP1870[i-1] <- AfricaPerCapitaGDP[[i]][[1]]
AfricaPerCapitaGDP1950[i-1] <- AfricaPerCapitaGDP[[i]][[81]]
AfricaPerCapitaGDPgood <- complete.cases(AfricaPerCapitaGDP1870,AfricaPerCapitaGDP1950)
}
# Find the average change from 1870 to 1950
CentralAndSouthAmericaGDPArr <- array(dim=c(23,1)); CentralAndSouthAmericaPerCapitaGDPArr <- array(dim=c(23,1))
for (i in 1:23) {
if (CentralAndSouthAmericaGDPgood[i]) {
CentralAndSouthAmericaGDPArr[i] <- CentralAndSouthAmericaGDP1950[i] / CentralAndSouthAmericaGDP1870[i]
}
if (CentralAndSouthAmericaPerCapitaGDPgood[i]) {
CentralAndSouthAmericaPerCapitaGDPArr[i] <- CentralAndSouthAmericaPerCapitaGDP1950[i] / CentralAndSouthAmericaPerCapitaGDP1870[i]
}
}
CentralAndSouthAmericaGDPAverageChange <- mean(CentralAndSouthAmericaGDPArr, na.rm=TRUE)
CentralAndSouthAmericaPerCapitaGDPAverageChange <- mean(CentralAndSouthAmericaPerCapitaGDPArr, na.rm=TRUE)
AsiaPopulationArr <- array(dim=c(40,1)); AsiaGDPArr <- array(dim=c(40,1)); AsiaPerCapitaGDPArr <- array(dim=c(40,1))
for (i in 1:40) {
if (AsiaPopulationgood[i]) {
AsiaPopulationArr[i] <- AsiaPopulation1950[i] / AsiaPopulation1870[i]
}
if (AsiaGDPgood[i]) {
AsiaGDPArr[i] <- AsiaGDP1950[i] / AsiaGDP1870[i]
}
if (AsiaPerCapitaGDPgood[i]) {
AsiaPerCapitaGDPArr[i] <- AsiaPerCapitaGDP1950[i] / AsiaPerCapitaGDP1870[i]
}
}
AsiaPopulationAverageChange <- mean(AsiaPopulationArr, na.rm=TRUE)
AsiaGDPAverageChange <- mean(AsiaGDPArr, na.rm=TRUE)
AsiaPerCapitaGDPAverageChange <- mean(AsiaPerCapitaGDPArr, na.rm=TRUE)
AfricaPopulationArr <- array(dim=c(53,1)); AfricaGDPArr <- array(dim=c(53,1)); AfricaPerCapitaGDPArr <- array(dim=c(40,1))
for (i in 1:53) {
if (AfricaPopulationgood[i]) {
AfricaPopulationArr[i] <- AfricaPopulation1950[i] / AfricaPopulation1870[i]
}
if (AfricaGDPgood[i]) {
AfricaGDPArr[i] <- AfricaGDP1950[i] / AfricaGDP1870[i]
}
if (AfricaPerCapitaGDPgood[i]) {
AfricaPerCapitaGDPArr[i] <- AfricaPerCapitaGDP1950[i] / AfricaPerCapitaGDP1870[i]
}
}
AfricaPopulationAverageChange <- mean(AfricaPopulationArr, na.rm=TRUE)
AfricaGDPAverageChange <- mean(AfricaGDPArr, na.rm=TRUE)
AfricaPerCapitaGDPAverageChange <- mean(AfricaPerCapitaGDPArr, na.rm=TRUE)
# Apply this average change to 1850 data
for (i in 2:24) {
if(is.na(CentralAndSouthAmericaGDP[[i]][[1]])) {
CentralAndSouthAmericaGDP[[i]][[1]] <- CentralAndSouthAmericaGDP[[i]][[81]] / CentralAndSouthAmericaGDPAverageChange
}
if(is.na(CentralAndSouthAmericaPerCapitaGDP[[i]][[1]])) {
CentralAndSouthAmericaPerCapitaGDP[[i]][[1]] <- CentralAndSouthAmericaPerCapitaGDP[[i]][[81]] / CentralAndSouthAmericaPerCapitaGDPAverageChange
}
}
for (i in 2:41) {
if(is.na(AsiaPopulation[[i]][[1]])) {
AsiaPopulation[[i]][[1]] <- AsiaPopulation[[i]][[81]] / AsiaPopulationAverageChange
}
if(is.na(AsiaGDP[[i]][[1]])) {
AsiaGDP[[i]][[1]] <- AsiaGDP[[i]][[81]] / AsiaGDPAverageChange
}
if(is.na(AsiaPerCapitaGDP[[i]][[1]])) {
AsiaPerCapitaGDP[[i]][[1]] <- AsiaPerCapitaGDP[[i]][[81]] / AsiaPerCapitaGDPAverageChange
}
}
for (i in 2:54) {
if(is.na(AfricaPopulation[[i]][[1]])) {
AfricaPopulation[[i]][[1]] <- AfricaPopulation[[i]][[81]] / AfricaPopulationAverageChange
}
if(is.na(AfricaGDP[[i]][[1]])) {
AfricaGDP[[i]][[1]] <- AfricaGDP[[i]][[81]] / AfricaGDPAverageChange
}
if(is.na(AfricaPerCapitaGDP[[i]][[1]])) {
AfricaPerCapitaGDP[[i]][[1]] <- AfricaPerCapitaGDP[[i]][[81]] / AfricaPerCapitaGDPAverageChange
}
}
## Linear interpolation NA values in between 1870 and 1950
#Population
CentralAndSouthAmericaPopulationZoo <- zoo(CentralAndSouthAmericaPopulation)
index(CentralAndSouthAmericaPopulationZoo) <- CentralAndSouthAmericaPopulationZoo[,1]
CentralAndSouthAmericaPopulation <- na.approx(CentralAndSouthAmericaPopulationZoo)
CentralAndSouthAmericaGDPZoo <- zoo(CentralAndSouthAmericaGDP)
index(CentralAndSouthAmericaGDPZoo) <- CentralAndSouthAmericaGDPZoo[,1]
CentralAndSouthAmericaGDP <- na.approx(CentralAndSouthAmericaGDPZoo)
CentralAndSouthAmericaPerCapitaGDPZoo <- zoo(CentralAndSouthAmericaPerCapitaGDP)
index(CentralAndSouthAmericaPerCapitaGDPZoo) <- CentralAndSouthAmericaPerCapitaGDPZoo[,1]
CentralAndSouthAmericaPerCapitaGDP <- na.approx(CentralAndSouthAmericaPerCapitaGDPZoo)
AsiaPopulationZoo <- zoo(AsiaPopulation)
index(AsiaPopulationZoo) <- AsiaPopulationZoo[,1]
AsiaPopulation <- na.approx(AsiaPopulationZoo)
AsiaGDPZoo <- zoo(AsiaGDP)
index(AsiaGDPZoo) <- AsiaGDPZoo[,1]
AsiaGDP <- na.approx(AsiaGDPZoo)
AsiaPerCapitaGDPZoo <- zoo(AsiaPerCapitaGDP)
index(AsiaPerCapitaGDPZoo) <- AsiaPerCapitaGDPZoo[,1]
AsiaPerCapitaGDP <- na.approx(AsiaPerCapitaGDPZoo)
AfricaPopulationZoo <- zoo(AfricaPopulation)
index(AfricaPopulationZoo) <- AfricaPopulationZoo[,1]
AfricaPopulation <- na.approx(AfricaPopulationZoo)
AfricaGDPZoo <- zoo(AfricaGDP)
index(AfricaGDPZoo) <- AfricaGDPZoo[,1]
AfricaGDP <- na.approx(AfricaGDPZoo)
AfricaPerCapitaGDPZoo <- zoo(AfricaPerCapitaGDP)
index(AfricaPerCapitaGDPZoo) <- AfricaPerCapitaGDPZoo[,1]
AfricaPerCapitaGDP <- na.approx(AfricaPerCapitaGDPZoo)
## Assign data types again after zoo
CentralAndSouthAmericaPopulation <- data.frame(CentralAndSouthAmericaPopulation)
CentralAndSouthAmericaGDP <- data.frame(CentralAndSouthAmericaGDP)
CentralAndSouthAmericaPerCapitaGDP <- data.frame(CentralAndSouthAmericaPerCapitaGDP)
for (i in 2:24) {
CentralAndSouthAmericaPopulation[ , i] <- as.numeric(as.character(CentralAndSouthAmericaPopulation[ , i]))
CentralAndSouthAmericaGDP[ , i] <- as.numeric(as.character(CentralAndSouthAmericaGDP[ , i]))
CentralAndSouthAmericaPerCapitaGDP[ , i] <- as.numeric(as.character(CentralAndSouthAmericaPerCapitaGDP[ , i]))
}
AsiaPopulation <- data.frame(AsiaPopulation)
AsiaGDP <- data.frame(AsiaGDP)
AsiaPerCapitaGDP <- data.frame(AsiaPerCapitaGDP)
for (i in 2:41) {
AsiaPopulation[ , i] <- as.numeric(as.character(AsiaPopulation[ , i]))
AsiaGDP[ , i] <- as.numeric(as.character(AsiaGDP[ , i]))
AsiaPerCapitaGDP[ , i] <- as.numeric(as.character(AsiaPerCapitaGDP[ , i]))
}
AfricaPopulation <- data.frame(AfricaPopulation)
AfricaGDP <- data.frame(AfricaGDP)
AfricaPerCapitaGDP <- data.frame(AfricaPerCapitaGDP)
for (i in 2:54) {
AfricaPopulation[ , i] <- as.numeric(as.character(AfricaPopulation[ , i]))
AfricaGDP[ , i] <- as.numeric(as.character(AfricaGDP[ , i]))
AfricaPerCapitaGDP[ , i] <- as.numeric(as.character(AfricaPerCapitaGDP[ , i]))
}
## Melt down to 3 columns each
NorthAmericaPopulationMelt <- melt(NorthAmericaPopulation,id="year")
NorthAmericaGDPMelt <- melt(NorthAmericaGDP,id="year")
NorthAmericaPerCapitaGDPMelt <- melt(NorthAmericaPerCapitaGDP,id="year")
EuropePopulationMelt <- melt(EuropePopulation,id="year")
EuropeGDPMelt <- melt(EuropeGDP,id="year")
EuropePerCapitaGDPMelt <- melt(EuropePerCapitaGDP,id="year")
CentralAndSouthAmericaPopulationMelt <- melt(CentralAndSouthAmericaPopulation,id="year")
CentralAndSouthAmericaGDPMelt <- melt(CentralAndSouthAmericaGDP,id="year")
CentralAndSouthAmericaPerCapitaGDPMelt <- melt(CentralAndSouthAmericaPerCapitaGDP,id="year")
AsiaPopulationMelt <- melt(AsiaPopulation,id="year")
AsiaGDPMelt <- melt(AsiaGDP,id="year")
AsiaPerCapitaGDPMelt <- melt(AsiaPerCapitaGDP,id="year")
AfricaPopulationMelt <- melt(AfricaPopulation,id="year")
AfricaGDPMelt <- melt(AfricaGDP,id="year")
AfricaPerCapitaGDPMelt <- melt(AfricaPerCapitaGDP,id="year")
## Rename columns after melting
names(NorthAmericaPopulationMelt) <- c("year", "country", "population")
names(NorthAmericaGDPMelt) <- c("year", "country", "gdp")
names(NorthAmericaPerCapitaGDPMelt) <- c("year", "country", "per_capita_gdp")
names(EuropePopulationMelt) <- c("year", "country", "population")
names(EuropeGDPMelt) <- c("year", "country", "gdp")
names(EuropePerCapitaGDPMelt) <- c("year", "country", "per_capita_gdp")
names(CentralAndSouthAmericaPopulationMelt) <- c("year", "country", "population")
names(CentralAndSouthAmericaGDPMelt) <- c("year", "country", "gdp")
names(CentralAndSouthAmericaPerCapitaGDPMelt) <- c("year", "country", "per_capita_gdp")
names(AsiaPopulationMelt) <- c("year", "country", "population")
names(AsiaGDPMelt) <- c("year", "country", "gdp")
names(AsiaPerCapitaGDPMelt) <- c("year", "country", "per_capita_gdp")
names(AfricaPopulationMelt) <- c("year", "country", "population")
names(AfricaGDPMelt) <- c("year", "country", "gdp")
names(AfricaPerCapitaGDPMelt) <- c("year", "country", "per_capita_gdp")
## Convert to correct units; We may want to convert; I don't know the right conversion we want to use;
#NorthAmericaPopulationMelt <- mutate(NorthAmericaPopulationMelt, population = population * 1000)
#NorthAmericaGDPMelt <- mutate(NorthAmericaGDPMelt, gdp = gdp * 1000000)
#EuropePopulationMelt <- mutate(EuropePopulationMelt, population = population * 1000)
#EuropeGDPMelt <- mutate(EuropeGDPMelt, gdp = gdp * 1000000)
#CentralAndSouthAmericaPopulationMelt <- mutate(CentralAndSouthAmericaPopulationMelt, population = population * 1000)
#CentralAndSouthAmericaGDPMelt <- mutate(CentralAndSouthAmericaGDPMelt, gdp = gdp * 1000000)
### CONVERT YEAR TO DATE
## Add 0101 to dates paste function
#NorthAmericaPopulation <- mutate(NorthAmericaPopulation, year = paste(NorthAmericaPopulation$date,"0101", sep = ""))
#NorthAmericaGDP <- mutate(NorthAmericaGDP, year = paste(NorthAmericaGDP$year,"0101", sep = ""))
## ymd read in with lubridate
#NorthAmericaPopulation <- mutate(NorthAmericaPopulation, year = ymd(NorthAmericaPopulation$date))
#NorthAmericaGDP <- mutate(NorthAmericaGDP, year = ymd(NorthAmericaGDP$year))
## Merge data into one set
yearly_stats1 <- merge(NorthAmericaPopulationMelt,NorthAmericaGDPMelt)
yearly_stats1 <- merge(yearly_stats1,NorthAmericaPerCapitaGDPMelt)
yearly_stats2 <- merge(EuropePopulationMelt,EuropeGDPMelt)
yearly_stats2 <- merge(yearly_stats2,EuropePerCapitaGDPMelt)
yearly_stats3 <- merge(CentralAndSouthAmericaPopulationMelt,CentralAndSouthAmericaGDPMelt)
yearly_stats3 <- merge(yearly_stats3,CentralAndSouthAmericaPerCapitaGDPMelt)
yearly_stats4 <- merge(AsiaPopulationMelt,AsiaGDPMelt)
yearly_stats4 <- merge(yearly_stats4,AsiaPerCapitaGDPMelt)
yearly_stats5 <- merge(AfricaPopulationMelt,AfricaGDPMelt)
yearly_stats5 <- merge(yearly_stats5,AfricaPerCapitaGDPMelt)
## Write to file (Test Purposes)
#write.table(year_fact1, "./year_fact1.csv", sep = ",",row.names=FALSE)
#write.table(year_fact2, "./year_fact2.csv", sep = ",",row.names=FALSE)
#write.table(year_fact3, "./year_fact3.csv", sep = ",",row.names=FALSE)
#write.table(year_fact4, "./year_fact4.csv", sep = ",",row.names=FALSE)
#write.table(year_fact5, "./year_fact5.csv", sep = ",",row.names=FALSE)
## Create combined year_fact with sqldf
yearly_stats <- sqldf("
SELECT
year,
country,
population,
gdp,
per_capita_gdp
FROM
(SELECT
year,
country,
population,
gdp,
per_capita_gdp
FROM
yearly_stats1
UNION ALL
SELECT
year,
country,
population,
gdp,
per_capita_gdp
FROM
yearly_stats2
UNION ALL
SELECT
year,
country,
population,
gdp,
per_capita_gdp
FROM
yearly_stats3
UNION ALL
SELECT
year,
country,
population,
gdp,
per_capita_gdp
FROM
yearly_stats4
UNION ALL
SELECT
year,
country,
population,
gdp,
per_capita_gdp
FROM
yearly_stats5
) x
ORDER BY
1 ASC, 2 ASC
")
yearly_stats[ , 1] <- as.numeric(as.character(yearly_stats[ , 1]))
write.table(yearly_stats, "./yearly_stats.csv", sep = ",",row.names=FALSE)
## Create stats_across_five_years using sqldf
# concat isn't allowed so wasn't able to add 'display_year'
# CONCAT(CAST(five_years_ago.year AS char), '-', CAST(main_year.year AS char)) AS display_year,
stats_across_five_years <- sqldf("
SELECT
main_year.country AS Country,
five_years_ago.year AS start_year,
main_year.year AS end_year,
main_year.population - five_years_ago.population AS Delta_Population,
main_year.gdp - five_years_ago.gdp AS Delta_GDP,
main_year.per_capita_gdp - five_years_ago.per_capita_gdp AS Delta_Per_Capita_GDP,
(main_year.population - five_years_ago.population) / five_years_ago.population AS pct_change_Population,
(main_year.gdp - five_years_ago.gdp) / five_years_ago.gdp AS pct_change_GDP,
(main_year.per_capita_gdp - five_years_ago.per_capita_gdp) / five_years_ago.per_capita_gdp AS pct_change_Per_Capita_GDP
FROM
(SELECT
year,
country,
population,
gdp,
per_capita_gdp
FROM
yearly_stats
) AS main_year
INNER JOIN
(SELECT
year,
country,
population,
gdp,
per_capita_gdp
FROM
yearly_stats
) AS five_years_ago ON
five_years_ago.country = main_year.country and
five_years_ago.year = main_year.year - 5
WHERE
main_year.year % 5 == 0
ORDER BY
1 ASC, 2 ASC
")
write.table(stats_across_five_years, "./stats_across_five_years.csv", sep = ",",row.names=FALSE)
top_5_year_period_per_country <- sqldf("
SELECT
fact.country,
fact.start_year,
fact.end_year,
fact.Delta_Population,
fact.Delta_GDP,
fact.Delta_Per_Capita_GDP,
fact.pct_change_Population,
fact.pct_change_GDP,
fact.pct_change_Per_Capita_GDP,
a.greatest_change_in AS Greatest_Change1,
b.greatest_change_in AS Greatest_Change2,
c.greatest_change_in AS Greatest_Change3,
d.greatest_change_in AS Greatest_Change4,
e.greatest_change_in AS Greatest_Change5,
f.greatest_change_in AS Greatest_Change6
FROM
stats_across_five_years fact
LEFT JOIN
(SELECT
country,
MAX(Delta_Population) AS Delta_Population,
max('Delta Population') AS greatest_change_in
FROM
stats_across_five_years
group by
1
) a on
a.country = fact.country and
a.Delta_Population = fact.Delta_Population
LEFT JOIN
(SELECT
country,
MAX(Delta_GDP) AS Delta_GDP,
max('Delta_GDP') AS greatest_change_in
FROM
stats_across_five_years
group by
1
) b on
b.country = fact.country and
b.Delta_GDP = fact.Delta_GDP
LEFT JOIN
(SELECT
country,
MAX(Delta_Per_Capita_GDP) AS Delta_Per_Capita_GDP,
max('Delta_Per_Capita_GDP') AS greatest_change_in
FROM
stats_across_five_years
group by
1
) c on
c.country = fact.country and
c.Delta_Per_Capita_GDP = fact.Delta_Per_Capita_GDP
LEFT JOIN
(SELECT
country,
MAX(pct_change_Population) AS pct_change_Population,
max('%_change_Population') AS greatest_change_in
FROM
stats_across_five_years
group by
1
) d on
d.country = fact.country and
d.pct_change_Population = fact.pct_change_Population
LEFT JOIN
(SELECT
country,
MAX(pct_change_GDP) AS pct_change_GDP,
max('% change_GDP') AS greatest_change_in
FROM
stats_across_five_years
group by
1
) e on
e.country = fact.country and
e.pct_change_GDP = fact.pct_change_GDP
LEFT JOIN
(SELECT
country,
MAX(pct_change_Per_Capita_GDP) AS pct_change_Per_Capita_GDP,
max('% change_Per_Capita_GDP') AS greatest_change_in
FROM
stats_across_five_years
group by
1
) f on
f.country = fact.country and
f.pct_change_Per_Capita_GDP = fact.pct_change_Per_Capita_GDP
WHERE
a.country is not null OR b.country is not null OR c.country is not null OR d.country is not null OR e.country is not null OR f.country is not null
")
write.table(top_5_year_period_per_country, "./top_5_year_period_per_country.csv", sep = ",",row.names=FALSE)
|
85bb10135032273719a8bcaba2e906f2ba1bb1c2
|
012cc18008110914ddaa63e00cffcf6ba99c3c4e
|
/plot4.R
|
36b238e1017fc2f47960c051186d3dc0e50a1091
|
[] |
no_license
|
zackgouttel/ExData_Plotting1
|
03bfe07fbbc901ab80891e283cf7d164a4e9b9f9
|
49ad10daaa8b2e12904994051485d713f4a72492
|
refs/heads/master
| 2022-09-22T09:39:56.077110
| 2020-06-07T00:37:18
| 2020-06-07T00:37:18
| 269,798,054
| 0
| 0
| null | 2020-06-05T22:42:59
| 2020-06-05T22:42:58
| null |
UTF-8
|
R
| false
| false
| 1,061
|
r
|
plot4.R
|
sub_data<-function(df=household_power_consumption)
{
d1<- dmy("1-2-2007")
d2<- dmy("2-2-2007")
p1_sub<-subset(x = df, Date==d1 | Date==d2)
}
draw_plot4<-function(df=household_power_consumption)
{ plotdata<-sub_data(df=household_power_consumption)
plotdata$datetime<-as.POSIXct(paste(plotdata$Date,plotdata$Time))
png(filename = "plot4.png",width = 480,height = 480)
par(mfrow= c(2,2))
plot(plotdata$datetime,plotdata$Global_active_power,type = "l", ylab = "Global Active Power")
plot(plotdata$datetime,plotdata$Voltage, type = "l", xlab="datetime",ylab = "Voltage")
plot(plotdata$datetime,plotdata$Sub_metering_1, type = "l", ylab = "Energy sub metering")
lines(plotdata$datetime,plotdata$Sub_metering_2, type = "l", col="red")
lines(plotdata$datetime,plotdata$Sub_metering_3,type = "l",col="blue")
legend("topright", legend=c("sub_metering_1","sub_metering_2","sub_metering_3"),pch=c(19,19,19),col = c("black","red","blue") )
plot(plotdata$datetime,plotdata$Global_reactive_power,type="l",xlab = "datetime",ylab = "Global Reactive Power")
dev.off()
}
|
6348251170741ce24688d63910bb2b3062604571
|
c7c9fbc40a719c1d2198697ebcbd9f48b2496572
|
/global.R
|
650fc94d5865d847a55885ef02e38b123241d8d9
|
[] |
no_license
|
sarikayamehmet/ShinyLdap
|
cd6066282b35a85cfdc313c664b46ac85bde7010
|
9f4ffa463a078d26ae2b90b54eaa0c020ad12c4b
|
refs/heads/master
| 2020-06-07T03:53:12.254606
| 2019-05-15T21:38:12
| 2019-05-15T21:38:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,049
|
r
|
global.R
|
# GLOBAL.R
# instala os pacotes faltando.
list.of.packages <- c("shiny", "readr")
new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
library('shiny')
library('readr')
ldap_url <- 'ldap://jacarta.intranet....:389'
ldap_dc <- 'dc=intranet,dc=mpgo'
ldap_filtro <- 'sAMAccountName'
ldap_dominio <- 'intranet'
ldap_dc <- 'dc=intranet,dc=mpgo'
ldap_campos <- c('dn:', 'cn:', 'sn:', 'title:','displayName:', 'name:', 'employeeID:', 'sAMAccountName:', 'mail:','G_MPTV_MEMBROS', 'G_MPTV_Users','title:' )
comando <- 'ldapsearch -H ldap_url -x -D "ldap_dominio\\ldap_user" -w ldap_pass -b "ldap_dc" "(&(ldap_filtro=ldap_user))"'
debug_mode = 1; # 0 sem debug.
#tem comando
temComando <- function(comando) {
tmp <- Sys.which(comando)
return (!(paste0(tmp)==''))
}
consultaLdap <- function(usuario, senha) {
newC <- sub('ldap_url', ldap_url, comando)
newC <- sub('ldap_dominio', ldap_dominio,newC)
newC <- gsub('ldap_user',usuario,newC)
newC <- sub('ldap_pass',senha,newC)
newC <- sub('ldap_filtro',ldap_filtro,newC)
newC <- sub('ldap_dc',ldap_dc,newC)
cat(file=stderr(),"Comando: ", newC, "\n")
tmp <- system(paste0(newC),intern = TRUE)
cat(file=stderr(),"tmp: ", tmp, "\n")
atributos <- attr(tmp,which = "status")
cat(file=stderr(),"atributos: ", atributos, "\n")
if (is.null(atributos)) atributos = 0
if (atributos == 49) {
# erro de usuário / senha
res <- 49
} else {
res <- tmp;
}
return (res)
}
separaTxt <- function(info) {
return (c(strsplit(info,': '))[[1]])
}
userLdap <- function(usuario, senha) {
resLdap <- consultaLdap(usuario, senha)
cat(file=stderr(),"resLdap: ", resLdap, "\n")
if (!is.numeric(resLdap)) {
dt_usuario <- unique (grep(paste(ldap_campos,collapse="|"), resLdap, value=TRUE))
dados <- data.frame('id' = separaTxt(dt_usuario)[1],
'dados' = separaTxt(dt_usuario)[2])
return (dados[2])
} else {
return (resLdap)
}
}
|
85fb6603227d7edcf09e6cfd28db6ceb0a4b9e81
|
7d1415b3ad71b079d39d0de3311e294c7083968f
|
/plot2.R
|
d12189e61182d27ee23962282d46bf54c58eb053
|
[] |
no_license
|
svsubbarao/ExData_Plotting1
|
a9df7979d7aa5a0a0049ee86f791c05c753fac9e
|
43741eb66d824dcfd132c0c0a27c4f6cdab77996
|
refs/heads/master
| 2021-01-18T10:00:23.191081
| 2015-09-11T04:48:22
| 2015-09-11T04:48:22
| 42,287,924
| 0
| 0
| null | 2015-09-11T04:39:10
| 2015-09-11T04:39:10
| null |
UTF-8
|
R
| false
| false
| 652
|
r
|
plot2.R
|
## read the data from the file
dataR <- read.csv("household_power_consumption.txt" ,header=T,sep=";",stringsAsFactors=F, na.strings="?",colClasses=c("character", "character", "numeric", "numeric", "numeric","numeric","numeric", "numeric", "numeric"))
## subset the data from large data set
dataR <- dataR[dataR$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(dataR$Date, dataR$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
## create the png file
png(filename="plot2.png", width=480, height=480)
## draw the graph on to the file
plot(datetime, dataR$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off()
|
a873cc881055fa210d667d38e56306b801c3e975
|
b2f61fde194bfcb362b2266da124138efd27d867
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query57_query49_1344/query57_query49_1344.R
|
033c382faa0a82f56a44eaf69945cdbae3019c57
|
[] |
no_license
|
arey0pushpa/dcnf-autarky
|
e95fddba85c035e8b229f5fe9ac540b692a4d5c0
|
a6c9a52236af11d7f7e165a4b25b32c538da1c98
|
refs/heads/master
| 2021-06-09T00:56:32.937250
| 2021-02-19T15:15:23
| 2021-02-19T15:15:23
| 136,440,042
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 717
|
r
|
query57_query49_1344.R
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 10617
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 10617
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query57_query49_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 3029
c no.of clauses 10617
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 10617
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query57_query49_1344.qdimacs 3029 10617 E1 [] 0 16 3013 10617 NONE
|
4ee9d87f0cedd1e381147fec1c09c691bf836795
|
f7c8e2cdfa83f8188ced6e561934f65bfe96d640
|
/man/Class_Spells.Rd
|
861d26420af65387d4a68e04414fe51a6147ea72
|
[] |
no_license
|
vinny-paris/DnD
|
e4eb946c2dbc5b338d9318b685286b953fbeb034
|
22ce06b34b1f37709cbd2698b0c6f9f3d61e708e
|
refs/heads/master
| 2021-01-19T01:08:21.405350
| 2017-09-12T18:42:13
| 2017-09-12T18:42:13
| 87,227,701
| 10
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 489
|
rd
|
Class_Spells.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Class_Spells.R
\docType{data}
\name{Class_Spells}
\alias{Class_Spells}
\title{Spells Known by Class fully labeld}
\format{An object of class \code{list} of length 8.}
\source{
{Dungeon and Dragons Fifth Edition Player's Hand Book}
}
\usage{
Class_Spells
}
\description{
This is a list made up of the 8 spell casting classes. We do not preserve spell level but we do preserve the full name.
}
\keyword{datasets}
|
4c95e4435a92c5865efe84893cfa59b4a838c21a
|
ba6d284c860e9aee3054c1d384b6250402ebd81e
|
/man/read_recoveries.Rd
|
b57d33a9280cdc7633f81f899c05cae5b5b2f587
|
[
"MIT"
] |
permissive
|
jnshsrs/corona
|
13921b6bdad742316b46a1f849e94a2010d3d891
|
61dc1739f2ad49c8c486b19839ebcd7c9f409d61
|
refs/heads/master
| 2021-05-21T04:13:01.666292
| 2020-04-06T19:16:26
| 2020-04-06T19:16:26
| 252,537,378
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 508
|
rd
|
read_recoveries.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read-data.R
\name{read_recoveries}
\alias{read_recoveries}
\title{Read Corona Recovered Numbers}
\usage{
read_recoveries(file = NULL)
}
\arguments{
\item{file}{The file path to load from. Do not specify it, to load from
JHU github repository (which is recommended)}
}
\value{
A tibble containing country, state, date, number of recoveries
}
\description{
Read Corona Recovered Numbers
}
\examples{
\dontrun{
read_recoveries()
}
}
|
9cbcf398ab8b57e3e68b6d2aa3260f3323352b2d
|
605169a6c3f45c2c103a0bac7ecc53c720bc355f
|
/A4-w1.R
|
ea250092bbe0c7dc38bbc4b858564191eb4c1c76
|
[] |
no_license
|
layaSharifi/ExData_Plotting1
|
e41b11c0bea1947fce9f0c30e46a4a9d94582f6a
|
ce66a7394b7bbd6a53bdc9e87f3cd8c9425428ad
|
refs/heads/master
| 2020-12-31T05:57:34.953417
| 2017-02-01T21:07:01
| 2017-02-01T21:07:01
| 80,633,779
| 0
| 0
| null | 2017-02-01T15:32:24
| 2017-02-01T15:32:24
| null |
UTF-8
|
R
| false
| false
| 2,751
|
r
|
A4-w1.R
|
##Data Loading and Processing
install.packages("data.table")
library(dplyr)
setwd("C:/Users/lsharifi/Desktop/Rot2/coursera/A4-w1")
power<- read.table ('household_power_consumption.txt', sep=";" , header=TRUE, na.strings="?",
stringsAsFactors=FALSE
)
View(power)
#convert date and time variables to Date/Time class
power$Time <- strptime(paste(power$Date, power$Time), "%d/%m/%Y %H:%M:%S")
power$Date <- as.Date(power$Date, "%d/%m/%Y")
# We will only be using data from the dates 2007-02-01 and 2007-02-02
dates <- as.Date(c("2007-02-01", "2007-02-02"), "%Y-%m-%d")
power <- subset(power, Date %in% dates)
power
--------------------------------------------------------------Plot1------------------------------------------------
plot1 <- hist(power$Global_active_power, main = paste("Global Active Power"), col="red", ylab="Frequency" , xlab="Global Active Power (kilowatts)")
dev.copy(png, file="plot1.png", width=480, height=480)
dev.off()
--------------------------------------------------------------Plot2------------------------------------------------
plot2<- plot(power$Time,power$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.copy(png, file="plot2.png", width=480, height=480)
dev.off()
--------------------------------------------------------------Plot3------------------------------------------------
plot3 <- plot(power$Time,power$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(power$Time,power$Sub_metering_2,col="red")
lines(power$Time,power$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"), c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"),lty=c(1,1), lwd=c(1,1))
dev.copy(png, file="plot3.png", width=480, height=480)
dev.off()
-------------------------------------------------------------Plot4----------------------------------------------------
plot4 <-
par(mfrow=c(2,2))
##PLOT 1
plot(power$Time,power$Global_active_power, type="l", xlab="", ylab="Global Active Power")
##PLOT 2
plot(power$Time,power$Voltage, type="l", xlab="datetime", ylab="Voltage")
##PLOT 3
plot(power$Time,power$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(power$Time,power$Sub_metering_2,col="red")
lines(power$Time,power$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"), c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1), bty="n", cex=.5) #bty removes the box, cex shrinks the text, spacing added after labels so it renders correctly
#PLOT 4
plot(power$Time,power$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
#OUTPUT
dev.copy(png, file="plot4.png", width=480, height=480)
dev.off()
|
a249c95196148fc95c5cd03d567ba8b715f1d887
|
6379ae1e1875cc0e82d47163722f314fe9797ff6
|
/week2.R
|
32bcc354ea4c57cf565d4915a94865c1c312788b
|
[] |
no_license
|
aarjan/strategic-business-modeling
|
7e1c7b168ea34524d7c289c53596729049af8688
|
9fa0f86001d6441e34ecf7804ee8ef6d35ce52a5
|
refs/heads/master
| 2020-03-18T23:56:01.883265
| 2018-06-20T11:56:49
| 2018-06-20T11:56:49
| 135,440,224
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,259
|
r
|
week2.R
|
rm(list=ls(all=TRUE))
# Let's load our dataset and call it data
data=read.table('data/DATA_3.01_CREDIT.csv',sep=',',header=TRUE) # The function read.table enables us to read flat files such as .csv files
# Now let's have a look at our variables and see some summary statistics
str(data) # The str() function shows the structure of your dataset and details the type of variables that it contains
summary(data) # The summary() function provides for each variable in your dataset the minimum, mean, maximum and quartiles
hist(data$Rating) # Produce a histogram of the credit scores
cor(data[,c(1:5,10)]) # Compute the correlation between all the numerical variables of the sample
linreg=lm(Rating~.,data=data) # Estimate a linear regression model of Rating as a function of everything else.
cor(linreg$fitted.values,data$Rating) # Computes the correlation between the fitted values and the actual ones
plot(data$Rating,linreg$fitted.values) # Plot the fitted values vs. the actual ones
summary(linreg) # Reports the results of the regression
plot(data$Balance,data$Rating) # Allows to visualize the relationship between Balance and Rating
plot(data$Income,data$Rating) # Allows to visualize the relationship between Income and Rating
|
777a0d034d1f2374cf84d5119c8b50bcae87a910
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/rem/examples/reciprocityStat.Rd.R
|
4c15c6b618e23cea1732363c888e3dae6ac315cc
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,549
|
r
|
reciprocityStat.Rd.R
|
library(rem)
### Name: reciprocityStat
### Title: Calculate reciprocity statistics
### Aliases: reciprocityStat reciprocity
### ** Examples
# create some data with 'sender', 'target' and a 'time'-variable
# (Note: Data used here are random events from the Correlates of War Project)
sender <- c('TUN', 'NIR', 'NIR', 'TUR', 'TUR', 'USA', 'URU',
'IRQ', 'MOR', 'BEL', 'EEC', 'USA', 'IRN', 'IRN',
'USA', 'AFG', 'ETH', 'USA', 'SAU', 'IRN', 'IRN',
'ROM', 'USA', 'USA', 'PAN', 'USA', 'USA', 'YEM',
'SYR', 'AFG', 'NAT', 'NAT', 'USA')
target <- c('BNG', 'ZAM', 'JAM', 'SAU', 'MOM', 'CHN', 'IRQ',
'AFG', 'AFG', 'EEC', 'BEL', 'ITA', 'RUS', 'UNK',
'IRN', 'RUS', 'AFG', 'ISR', 'ARB', 'USA', 'USA',
'USA', 'AFG', 'IRN', 'IRN', 'IRN', 'AFG', 'PAL',
'ARB', 'USA', 'EEC', 'BEL', 'PAK')
time <- c('800107', '800107', '800107', '800109', '800109',
'800109', '800111', '800111', '800111', '800113',
'800113', '800113', '800114', '800114', '800114',
'800116', '800116', '800116', '800119', '800119',
'800119', '800122', '800122', '800122', '800124',
'800125', '800125', '800127', '800127', '800127',
'800204', '800204', '800204')
type <- sample(c('cooperation', 'conflict'), 33,
replace = TRUE)
important <- sample(c('important', 'not important'), 33,
replace = TRUE)
# combine them into a data.frame
dt <- data.frame(sender, target, time, type, important)
# create event sequence and order the data
dt <- eventSequence(datevar = dt$time, dateformat = "%y%m%d",
data = dt, type = "continuous",
byTime = "daily", returnData = TRUE,
sortData = TRUE)
# create counting process data set (with null-events) - conditional logit setting
dts <- createRemDataset(dt, dt$sender, dt$target, dt$event.seq.cont,
eventAttribute = dt$type,
atEventTimesOnly = TRUE, untilEventOccurrs = TRUE,
returnInputData = TRUE)
## divide up the results: counting process data = 1, original data = 2
dtrem <- dts[[1]]
dt <- dts[[2]]
## merge all necessary event attribute variables back in
dtrem$type <- dt$type[match(dtrem$eventID, dt$eventID)]
dtrem$important <- dt$important[match(dtrem$eventID, dt$eventID)]
# manually sort the data set
dtrem <- dtrem[order(dtrem$eventTime), ]
# calculate reciprocity statistic
dtrem$recip <- reciprocityStat(data = dtrem,
time = dtrem$eventTime,
sender = dtrem$sender,
target = dtrem$target,
eventvar = dtrem$eventDummy,
halflife = 2)
# plot sender-outdegree over time
library("ggplot2")
ggplot(dtrem, aes(eventTime, recip,
group = factor(eventDummy), color = factor(eventDummy)) ) +
geom_point()+ geom_smooth()
# calculate reciprocity statistic with typematch
# if a cooperated with b in the past, does
# b cooperate with a now?
dtrem$recip.typematch <- reciprocityStat(data = dtrem,
time = dtrem$eventTime,
sender = dtrem$sender,
target = dtrem$target,
eventvar = dtrem$eventDummy,
eventtypevar = dtrem$type,
eventtypevalue = 'valuematch',
halflife = 2)
# calculate reciprocity with valuemix on type
dtrem <- reciprocityStat(data = dtrem,
time = dtrem$eventTime,
sender = dtrem$sender,
target = dtrem$target,
eventvar = dtrem$eventDummy,
eventtypevar = dtrem$type,
eventtypevalue = 'valuemix',
halflife = 2,
returnData = TRUE)
# calculate reciprocity and count important events only
dtrem$recip.filtered <- reciprocityStat(data = dtrem,
time = dtrem$eventTime,
sender = dtrem$sender,
target = dtrem$target,
eventvar = dtrem$eventDummy,
eventfiltervar = dtrem$important,
eventfiltervalue = 'important',
halflife = 2)
|
b559376ac94aeb5bcfb4e9f6ed5b5d18c119b071
|
0c8251dda435aa2a13f3bf1631641f4b595ba183
|
/R/tam.jml.R
|
09ba97a36925ac0ce9fe68ef7377d864e361196f
|
[] |
no_license
|
yaozeyang90/TAM
|
e7212ff14c364126ae32738ef590dc5a6df6d0f1
|
2f9cedef0a282460415eadeb62de220774066860
|
refs/heads/master
| 2020-03-11T22:43:27.522233
| 2018-04-17T09:26:36
| 2018-04-17T09:26:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,224
|
r
|
tam.jml.R
|
## File Name: tam.jml.R
## File Version: 9.27
tam.jml <- function( resp , group = NULL , adj=.3 , disattenuate = FALSE ,
bias = TRUE, xsi.fixed=NULL , xsi.inits = NULL , theta.fixed = NULL ,
A=NULL , B=NULL , Q=NULL , ndim=1 ,
pweights = NULL , verbose = TRUE , control = list() , version = 2 )
{
CALL <- match.call()
#**** handle verbose argument
args_CALL <- as.list( sys.call() )
control$progress <- tam_args_CALL_search( args_CALL=args_CALL , variable="verbose" ,
default_value = TRUE )
#*******
if ( ! is.null(theta.fixed) ){
version <- 1
}
#**** version = 1
if (version == 1){
res <- tam_jml_version1( resp=resp , group = group , adj=adj ,
disattenuate = disattenuate ,
bias = bias, xsi.fixed=xsi.fixed , xsi.inits = xsi.inits ,
A=A , B=B , Q=Q , ndim=ndim , theta.fixed = theta.fixed ,
pweights = pweights , control = control )
}
#**** version = 2
if (version == 2){
res <- tam_jml_version2( resp=resp , group = group , adj=adj ,
disattenuate = disattenuate ,
bias = bias, xsi.fixed=xsi.fixed , xsi.inits = xsi.inits ,
A=A , B=B , Q=Q , ndim=ndim ,
pweights = pweights , control = control )
}
res$CALL <- CALL
return(res)
}
|
56007d1b6ea70d5d3d0ad75d3e2a8c120fa71f5b
|
2f227338295aaaa3caff4e211516820aee08b2ad
|
/man/gd_df_by.Rd
|
9c0f1721f8b8eba80eee8d1569c3e71389afe75e
|
[] |
no_license
|
acobos/gocDescriptives
|
b0e310f2eef2eca66e5743ab811609b3f6b11ad7
|
fb4b3ea7a7b1c3d4471126f3702d167cd5159df3
|
refs/heads/master
| 2021-09-12T07:03:24.822767
| 2018-04-15T08:55:46
| 2018-04-15T08:55:46
| 128,122,489
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,167
|
rd
|
gd_df_by.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gd_df_by.R
\name{gd_df_by}
\alias{gd_df_by}
\title{Describes a dataframe by groups}
\usage{
gd_df_by(df, by, ...)
}
\arguments{
\item{df}{\code{dataframe} to be described.}
\item{by}{Name of the variable defining groups. Should be \code{character} or
\code{factor}.}
\item{...}{Further arguments passed to function \code{gd_df}.}
}
\value{
A \code{dataframe} with columns \code{Variable}, \code{Key}, and one
additional column for each level/value of the \code{by} variable.
}
\description{
Describes a dataframe by groups, defined by the \code{by} variable.
}
\examples{
# Example data
set.seed(123)
Sex <- sample(c("Male", "Female"), 100, replace=TRUE)
Age <- floor(sample(50 + 10 * rnorm(100)))
Group <- sample(LETTERS[1:2], 100, replace=TRUE)
dat <- data.frame(Sex, Age, Group)
# Describe dataframe dat by Group
gd_df_by(dat, "Group")
# Same, using pipe
library(dplyr)
dat \%>\%
gd_df_by("Group")
# Introduce some missings and pass further arguments
dat$Sex[1:5] <- NA
gd_df_by(dat, "Group")
# Change the descriptor for missings
gd_df_by(dat, "Group", NA_label = "Unknown")
}
|
ab138f40c98d03a44e40782a523a8d52f89e4664
|
f9d49957d442694c7b8ad14c7a27b91db1032a35
|
/term-project/sample/code/03_02_OneSamplet.R
|
cf46167f1a0be0b36aaca4e40630580ffe47a2f6
|
[] |
no_license
|
tmussa1/r-programming
|
f92b1d095c14cd9709514c32dcddc3d2d19e3006
|
8ca41facb7e8d7abdc2425673b4543d0b35d4052
|
refs/heads/master
| 2023-01-23T21:47:29.837096
| 2020-12-16T18:23:07
| 2020-12-16T18:23:07
| 297,534,996
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,889
|
r
|
03_02_OneSamplet.R
|
# Title: Comparing one mean to a population:
# One-sample t-test
# File: 03_02_OneSamplet.R
# Project: R_EssT_2; R Essential Training, Part 2:
# Modeling Data
# INSTALL AND LOAD PACKAGES ################################
# Install pacman ("package manager") if needed
if (!require("pacman")) install.packages("pacman")
# pacman must already be installed; then load contributed
# packages (including pacman) with pacman
pacman::p_load(datasets, magrittr, pacman, tidyverse)
# datasets: for demonstration purposes
# magrittr: for pipes
# pacman: for loading/unloading packages
# tidyverse: for so many reasons
# LOAD AND PREPARE DATA ####################################
# Get info on "quakes" dataset
?quakes
# Get variable names
quakes %>% names()
# Save "mag" to "df" and print first few data points
df <- quakes %>%
pull(mag) %>% # Save mag as vector
glimpse() # Show beginning values
# EXPLORE DATA #############################################
# Histogram (with base graphics)
df %>% hist()
# Boxplot of values
df %>% boxplot(horizontal = T)
# Summary of magnitude
df %>% summary()
# T-TESTS ##################################################
# Use t-test for one-sample. By default, the sample mean is
# compared to a population mean of 0.
df %>% t.test()
# One-sided t-test with population mean = 4.6
df %>%
t.test(
alternative = "greater", # Use directional hypothesis
mu = 4.6 # Set population mean
)
# CLEAN UP #################################################
# Clear data
rm(list = ls()) # Removes all objects from environment
# Clear packages
detach("package:datasets", unload = T) # For base packages
p_unload(all) # Remove all contributed packages
# Clear plots
graphics.off() # Clears plots, closes all graphics devices
# Clear console
cat("\014") # Mimics ctrl+L
# Clear mind :)
|
dbfee92cb1006bc0ea1aa549d64eb0b44e82ea0f
|
007b929dbdb6ec3040d635ccbf7906f2d84a87b5
|
/htree/man/hrf.Rd
|
5279324620ca592ba8b94fc7d1ed598dc1bdd110
|
[] |
no_license
|
joxton/htree
|
7d8b85a0619121453e11c704eecafdc45ee6d30a
|
60b4f37ff41d6ddf303a432a3423b16c310ed712
|
refs/heads/main
| 2023-08-28T03:37:53.592237
| 2021-10-17T14:39:12
| 2021-10-17T14:39:12
| 418,086,957
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,993
|
rd
|
hrf.Rd
|
\name{hrf}
\alias{hrf}
\title{Random forest for longitudinal data}
\description{Fits a random forest of historical regression trees to longitudinal data.}
\usage{
hrf(x,
time=NULL,
id=NULL,
yindx,
ntrees = 100,
method="freq",
mtry=NULL,
se=FALSE,
B=100,
R=10,
nsamp=5,
historical=TRUE,
vh=NULL,
vc=NULL,
delta=NULL,
classify=FALSE,
control=list())
}
\arguments{
\item{x}{A data frame containing response and predictors}
\item{time}{A vector of observation times associated with rows of \code{x}.}
\item{id}{Subject identifier, if \code{NULL} observations are assumed independent}
\item{yindx}{Name of response variable, alt. its column number in \code{x}}
\item{ntrees}{Number of trees in ensemble}
\item{method}{Historical summary method, can be \code{freq}, \code{frac}, \code{mean0}, \code{freqw}, \code{fracw} and \code{mean0w} (see below).}
\item{mtry}{Number of predictors sampled at each split}
\item{se}{If \code{TRUE} then bootstrap standard errors are computed. Total number of trees fit for bootstrapping is \code{B*R}. }
\item{B}{Only used if \code{se=TRUE}, number of bootstrap samples, defaults to \code{100}.}
\item{R}{Only used if \code{se=TRUE}, forest size for each bootstrap sample, defaults to \code{R=10}.}
\item{nsamp}{Number of sampled \code{delta} values, see below}
\item{historical}{If \code{TRUE} then historical splitting is done, else only standard (ie concurrent predictor) splitting.}
\item{vh}{Optional vector of variable names to be used as historical predictors, can be a numeric vector giving column numbers of \code{x}.}
\item{vc}{Optional vector of variable names to be used as concurrent predictors, can be a numeric vector giving column numbers of \code{x}.}
\item{delta}{An optional vector of time-lags to be used (see below).}
\item{classify}{If \code{TRUE} then a classification tree is built (using gini-impurity node splitting).}
\item{control}{A list of control parameters (see below). All arguments, except those describing the data, can be set in \code{control}. Arguments in \code{control}
are used if both are given.}
}
\details{
The \code{hrf} function fits a random forest model to longitudinal data. Data is assumed to be of form:
\eqn{z_{ij}=(y_{ij},t_{ij},x_{ij})} for \eqn{i=1,..,n} and \eqn{j=1,..,n_i}, with \eqn{y_{ij}} being the response for the \eqn{i}-th subject
at the \eqn{j}-th observation time \eqn{t_{ij}}. The vector of predictors at time \eqn{t_{ij}} are \eqn{x_{ij}}. The number of observations can vary across
subjects, and the sampling in time can be irregular.
\code{hrf} estimates a model for the response \eqn{y_{ij}} using both \eqn{(t_{ij},x_{ij})} (the observations concurrent with \eqn{y_{ij}}) and all preceeding observations of the \eqn{i}-th subject up to (but not including) time \eqn{t_{ij}}. The model is fit using \code{historical} regression (alt. classification) trees. Here a predictor is one
of two types, either \code{concurrent} or \code{historical}. The concurrent predictors for \eqn{y_{ij}} are the elements of the vector (\eqn{(t_{ij},x_{ij})}), while a
historic predictor is the set of all preceeding values (i.e. prior to time \eqn{t_{ij}}) of a given element of \eqn{(y_{ij},t_{ij},x_{ij})}, for subject \eqn{i}. In
a historic regression tree, node splitting on a \code{concurrent} predictor follows the approach in standard regression (classification) trees. For \code{historical} predictors
the splitting is modified since, associated with each observed response \eqn{y_{ij}}, the number (and observation times) of observations of a \code{historical} predictor will vary according to
\eqn{i} and \eqn{j}. For these, the splitting is done by first transforming the preceeding values of a predictor using a \code{summary function}. This summary is
invertible, in the sense that knowledge of it is equivalent to knowledge of the covariate history. Letting \eqn{\bar{z}_{ijk}} denote the set of historical values of the
\eqn{k}-th element of \eqn{z_{ij}}, the summary function is denoted \eqn{s(\eta;\bar{z}_{ijk})} where \eqn{\eta} is the argument vector of the summary
function. Node splitting based on a historical predictor is done by solving \deqn{\mbox{argmin}\sum_{(ij)\in Node} (y_{ij}-\mu_L I(s(\eta;\bar{z}_{ijk})<c)-\mu_R I(s(\eta;\bar{z}_{ijk})\geq c))^2,} where the minimization is over the vector \eqn{(k,\mu,c,\eta)}. Each node of \code{historical} regression tree is split using the best split
among all splits of concurrent and historical predictors.
Different \code{summary functions} are available in \code{hrf}, specified by the argument \code{method}. If \code{method="freq"} the summary function summarizing a covariate history is:
\deqn{s(\eta;\bar{z}_{ijk})=\sum_{h: t_{ij}-\eta_1\leq t_{ih}<t_{ij}} I(z_{ihk}<\eta_2);} \code{method="frac"}:
\deqn{s(\eta;\bar{z}_{ijk})=\sum_{h: t_{ij}-\eta_1\leq t_{ih}<t_{ij}} I(z_{ihk}<\eta_2)/n_{ij}(\eta);} \code{method="mean0"}:
\deqn{s(\eta;\bar{z}_{ijk})=\sum_{h: t_{ij}-\eta_1\leq t_{ih}<t_{ij}} z_{ihk}/n_{ij}(\eta);} \code{method="freqw"}:
\deqn{s(\eta;\bar{z}_{ijk})=\sum_{h: t_{ij}-\eta_1\leq t_{ih}<t_{ij}-\eta_2} I(z_{ihk}<\eta_3);} \code{method="fracw"}:
\deqn{s(\eta;\bar{z}_{ijk})=\sum_{h: t_{ij}-\eta_1\leq t_{ih}<t_{ij}-\eta_2} I(z_{ihk}<\eta_3)/n_{ij}(\eta);} \code{method="meanw0"}:
\deqn{s(\eta;\bar{z}_{ijk})=\sum_{h: t_{ij}-\eta_1\leq t_{ih}<t_{ij}-\eta_2} z_{ihk}/n_{ij}(\eta).} Here \eqn{n_{ij}(\eta)} denotes the number of observations of subject
\eqn{i} in the
time window \eqn{[t_{ij}-\eta_1,t_{ij}-\eta_2)}. In the case \eqn{n_{ij}(\eta)=0}, the summary function is set to zero, i.e \eqn{s(\eta;\bar{z}_{ijk})=0}. The default is \code{method="freq"}. The possible values of \eqn{\eta_1} in the summary function can be set by the argument \code{delta}. If not supplied, the set of possible values of
\eqn{\eta_1} is
determined by the difference in time between within-subject successive observations in the data. When a split is attempted on a historical predictor, a sample of this set is
taken from which the best split is selected. The size of this set equals that of the \code{nsamp} argument. See below on \code{control} for futher arguments governing the
historical splitting. See below on \code{control} for futher arguments governing the
historical splitting.
Setting \code{se=TRUE} performs standard error estimation. The number of bootstrap samples (sampling subjects with replacement) is determined by
\code{B}. For each bootstrap sample a random forest with \code{R} trees is built, which defaults to \code{R=10}. The bias induced by
using smaller bootstrap ensemble sizes is corrected for in the estimate. Using \code{se=TRUE} will influence summaries from
the fitted model, such as providing approximate confidence intervals for partial dependence plots (when running \code{partdep_hrf}), and give
standard errors for predictions when \code{predict_hrf} is used.
All arguments (except those decribing the data \code{x}, \code{yindx},\code{time} and \code{id}) can be set in the \code{control} list. The arguments supplied
in \code{control} are used if both are supplied. So if \code{ntrees=300} and \code{control=list(ntrees=500)} then \code{500} trees are fit. Besides the arguments
described above, a number of other parameters can be set in control. These are: \code{nodesize} giving the minimum number of training observations in a terminal node;
\code{sample_fraction} giving the fraction of data sample to train each tree; \code{dtry} the number of sampled \code{delta} values used when splitting based on
a historical variable (note this is alternatively controlled by the argument \code{nsamp} above ; \code{ndelta} the number of \code{delta} values to use if \code{delta} is not supplied, these are taken as the quantiles from the
distribution of observed \code{delta} values in the data; \code{qtry} the number of sampled values of \eqn{\eta_2} for \code{method=freq/frac}, \eqn{\eta_3} for
\code{method=freqw/fracw}; \code{quantiles} is a vector of probabilities, and is used when \code{method=frac} or \code{method=fracw}, ie when covariate histories are split on their windowed empirical distributions. Splitting is restricted to these quantiles.
}
\value{ Returns a \code{list} with elements, the most important being: \code{h} is a list returned by \code{parLapply} for fitting trees in parallel, \code{error} gives the OOB error (mse for regression, misclassification rate for classification), \code{control} a list containing control arguments, \code{boot}
gives bootstrapped model estimates (if \code{se=TRUE}), \code{x,id,time} give the training data. }
\references{
L. Breiman (2001). \dQuote{Random Forests,} \emph{Machine Learning} 45(1):5-32.
Zhang and Singer (2010) \dQuote{Recursive Partioning and Applications} \emph{Springer}.
Sexton and Laake (2009) \dQuote{Standard errors for bagged and random forest estimators,} \emph{Computational Statistics and Data Analysis}.
}
\author{Joe Sexton \email{joesexton0@gmail.com}}
\seealso{ \code{\link{predict_hrf}}, \code{\link{partdep_hrf}},
\code{\link{varimp_hrf}}. }
\examples{
\dontrun{
# ----------------------------------------------------------------------------------------- ##
# Mother's stress on child illness:
# Investigate whether mother's stress is (Granger) causal for child illness
# 'hrf' model is fit using previous observations of mother's stress to predict
# child's illness at given time point, but not mother's stress at that time point
#
# Predictor variables are classified into "historical" and "concurrent"
#
# A predictor is "historical" if its prior realizations can be used to predict
# the outcome.
#
# A predictor is "concurrent" if its realization at the same timepoint as the outcome
# can be used to predict the outcome at that timepoint
#
# A predictor can be both "concurrent" and "historical", the status of the predictors
# can be set by the 'vh' and 'vc' arguments of 'hrf'.
# (if not set these are automatically determined)
#
# ------------------------------------------------------------------------------------------- ##
data(mscm)
mscm=as.data.frame(na.omit(mscm))
# -- set concurrent and historical predictors
historical_predictors=match(c("stress","illness"),names(mscm))
concurrent_predictors=which(names(mscm)!="stress")
control=list(vh=historical_predictors,vc=concurrent_predictors)
# -- fit historical random forest
# (NOTE: response is 0/1 so a regression tree is
# the same as a classification tree with Gini-index splitting)
ff=hrf(x=mscm,id=mscm$id,time=mscm$day,yindx="illness",control=control)
# out-of-bag error
plot(1:length(ff$error),ff$error,type="l",main="OOB error",xlab="forest size",ylab="mse")
# .. larger nodesize works slightly better
control$nodesize=20
ff=hrf(x=mscm,id=mscm$id,time=mscm$day,yindx="illness",control=control)
points(1:length(ff$error),ff$error,type="l",col="blue")
# -- variable importance table
vi=varimp_hrf(ff)
vi
# -- fit historical random forest with 'se=TRUE'
control$se=TRUE
ff=hrf(x=mscm,id=mscm$id,time=mscm$day,yindx="illness",control=control)
# -- partial dependence for top 4 predictors (with +/-2 SE estimates)
par(mfrow=c(2,2))
for(k in 1:4)
pd=partdep_hrf(ff,xindx=as.character(vi$Predictor[k]))
par(mfrow=c(1,1))
## -- Classification trees
## setting classify=TRUE builds classification tree (gini-impurity node splitting)
control$classify=TRUE
## ... standard error estimation not implemented .. turn off bootstrapping
control$se=FALSE
ff=hrf(x=mscm,id=mscm$id,time=mscm$day,yindx="illness",control=control)
# -- plot oob classification error
plot(1:length(ff$error),ff$error,type="l",xlab="forest size",ylab="oob classification error")
abline(mean(mscm$illness),0,lty=2) ## error of constant model
p=predict_hrf(ff)
## variable importance table (model error measured by gini-impurity)
vi=varimp_hrf(ff)
vi
# -------------------------------- #
# Data w/irregular observation times
# ------------------------------- #
data(cd4)
control=list(se=TRUE)
ff=hrf(x=cd4,id=cd4$id,time=cd4$time,yindx="count",control=control)
vi=varimp_hrf(ff)
# -- partial dependence for top 4 predictors (with +/-2 SE estimates)
par(mfrow=c(2,2))
for(k in 1:4)
pd=partdep_hrf(ff,xindx=as.character(vi$Predictor[k]))
par(mfrow=c(1,1))
plot(1:length(ff$error),ff$error,xlab="forest size",ylab="oob mse",type="l")
## by default, the number of delta values (parameter 'eta_1' above) is 20
## can set this using 'ndelta'
control$ndelta=50
control$se=FALSE # -- turning off bootstrapping ..
ff=hrf(x=cd4,id=cd4$id,time=cd4$time,yindx="count",control=control)
points(1:length(ff$error),ff$error,type="l",lty=2)
# the grid of delta values
ff$control$delta
# --------------------------------------- ##
# Boston Housing data (not longitudinal)
# --------------------------------------- ##
# library(htree)
library(mlbench)
library(randomForest)
data(BostonHousing)
dat=as.data.frame(na.omit(BostonHousing))
## omitting arguments time/id assumes rows are iid
control=list(ntrees=500,sample_fraction=.5,nodesize=1)
h=hrf(x=dat,yindx="medv",control=control)
## randomForest comparison
## (by default, randomForest samples with replacement, while hrf samples without)
r=randomForest(medv~.,data=dat,replace=F,sampsize=ceiling(.5*nrow(dat)),nodesize=1)
## plot oob-error for both
plot(1:length(r$mse),r$mse,type="l",ylim=c(min(r$mse,h$error),max(r$mse,h$error)),
main="BostonHousing",xlab="forest size",ylab="out-of-bag mse")
points(1:length(h$error),h$error,type="l",col="blue")
## -- variable importance table
vi=varimp_hrf(h)
vi
## -- partial dependence plots with approximate 95% C.I
control$se=TRUE
h=hrf(x=dat,yindx="medv",control=control)
par(mfrow=c(2,2))
for(k in 1:4)
pd=partdep_hrf(h,xindx=as.character(vi$Predictor[k]))
par(mfrow=c(1,1))
}
}
\keyword{nonparametric}
\keyword{tree}
\keyword{longitudinal}
|
ea47394f6429da64ff5d11f5518ca2387607ec24
|
c8f83302772325930ecab8f61d4f39c06e9e2e98
|
/Make_R0_Incursions.R
|
bad01f3c1d3ee97602cac5b2d08f72efed572565
|
[
"MIT"
] |
permissive
|
boydorr/TransmissionScale
|
5fda1bb6b2ae62f522cea0b2a9af1901f57456d4
|
566e1830254d57d262dfc5771ef564218981fb44
|
refs/heads/main
| 2023-04-11T21:47:34.835197
| 2022-04-21T14:55:56
| 2022-04-21T14:55:56
| 450,220,751
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,941
|
r
|
Make_R0_Incursions.R
|
# Make incursions to simulate multiple runs to compute R0, Rebecca Mancy, started 2021-08-10
# Note that the files required to run this script are large and are not provided.
rm(list=ls())
library(ggplot2)
library(dplyr)
library(tidyr) # library(sp)
library(sp)
ff <- "java_output/"
end.in.days <- 5112
f_LOWER_VAX <- "Round3_LOWER_VAX_1000"
f_R0_MULTI <- "Round3_R0_MULTI_1000"
f_R0_by_location <- "Round3_R0_by_location_1000"
n_runs <- 1000
load("data/grd_1000.rda")
grd_pops <- grd.1000@data[, 3:ncol(grd.1000)]
sum_pops <- colSums(grd_pops); plot(1:168, sum_pops) # totals by time
month_bounds <- as.numeric(names(grd_pops))
sum_all <- sum(grd_pops)
tw_weight = sum_pops/sum_all # time-window weight
gc_weight = grd_pops[,1]/sum(grd_pops[,1]) # gridcell weight
xy_weight = rep(1,nrow(grd_pops))/nrow(grd_pops)
popID_levels <- grd.1000$popID
system.time({
for (i in 1:n_runs) {
# Read in output from the LOWER_VAX scenario
sim_output_LOWER_VAX <- read.csv(file = paste0(ff, f_LOWER_VAX, "/output_files/", f_LOWER_VAX, "_sim_cases_",sprintf("%03d",i),".csv"))
n_incursions <- nrow(sim_output_LOWER_VAX)
print(paste0("Run ", i, " with ", n_incursions, " cases"))
# Draw a gridcell and time window from grd.1000 by drawing proportional to grd_pops
time_window_start <- as.numeric(sample(names(sum_pops), size = n_incursions, replace = T, prob = tw_weight))
populations <- sample(grd.1000@data$popID, size = n_incursions, replace = T, prob = gc_weight)
# head(time_window_start); head(populations)
incursion_cases_x <- numeric(n_incursions)
incursion_cases_y <- numeric(n_incursions)
# Draw random date within each population from above
sample_days <- time_window_start + runif(n = n_incursions, min = 0, max = 28)
# Draw random location within each population from above
for (j in 1:n_incursions) {
sample_coords <- spsample(x = subset(grd.1000, grd.1000@data$popID == populations[j]), n = 1, type = "random", iter = 5)
incursion_cases_x[j] <- sample_coords@coords[1]
incursion_cases_y[j] <- sample_coords@coords[2]
}
# Replace new dates and coordinates into sim_output_LOWER_VAX
incursion_list <- sim_output_LOWER_VAX
incursion_list[, 3:ncol(incursion_list)] <- -1
incursion_list$popID <- populations # incursion_cases$popID
incursion_list$x_coord <- incursion_cases_x # incursion_cases$x_coord
incursion_list$y_coord <- incursion_cases_y # incursion_cases$y_coord
incursion_list$dayInfectious <- sample_days # incursion_cases$dayInfectious
# Transform all cases to being recorded as INCURSION type
incursion_list$typeOfCase <- "INCURSION"
# Write out to file
print(paste0(ff, f_R0_MULTI, "/incursions/incursions_",sprintf("%03d",i),".csv"))
write.table(incursion_list, file = paste0(ff, f_R0_MULTI, "/incursions/incursions_",sprintf("%03d",i),".csv"),
append = FALSE, quote = FALSE, sep = ",", eol = "\n", na = "NA", dec = ".", row.names = FALSE, col.names = TRUE, fileEncoding = "")
}
})
###############################
# PLOT OLD INCURSIONS (with N/S gradient)
# PLOT NEW INCURSIONS (by pop density)
end.in.days <- 5112
pdf(paste0(ff, f_R0_MULTI, "/incursions/incursionsTEST.pdf"))
par(mfrow = c(2,1), mar = c(2,2,1,1)) # set up plotting area
for (i in 1:n_runs) {
print(i)
# sim_incursions <- read.csv(file = paste0(ff, f_R0_MULTI, "/incursions/incursions_",sprintf("%03d",i),".csv"))
sim_incursions <- read.csv(file = paste0(ff, f_R0_by_location, "/incursions/incursions_",sprintf("%03d",i),".csv"))
sim_incursions <- filter(sim_incursions, dayInfectious < end.in.days)
sim_incursions$popID_Factor <- factor(sim_incursions$popID, levels = popID_levels)
inc_summary <- sim_incursions %>%
group_by(popID_Factor, .drop = F) %>%
summarise(nCases = length(caseID))
cases = sum(inc_summary$nCases)
# plots for the first 100 runs to sanity check some outputs!
if(i < 100) {
hist(sim_incursions$dayInfectious, breaks = seq(0, end.in.days + 30.5, 30.5),
main = paste0("Cases = ", cases))
plot(sim_incursions$x_coord, sim_incursions$y_coord, cex = 0.1, col = alpha("black", 0.4))
}
}
dev.off()
###############################
# now try to generate index cases by grid cell
system.time({
for (i in 1:n_runs) {
#for (i in 297:302) {
# Read in output from the LOWER_VAX scenario
sim_output_LOWER_VAX <- read.csv(file = paste0(ff, f_LOWER_VAX, "/output_files/", f_LOWER_VAX, "_sim_cases_",sprintf("%03d",i),".csv"))
n_incursions <- nrow(sim_output_LOWER_VAX)
print(paste0("Run ", i, " with ", n_incursions, " cases"))
# Draw a gridcell and time window from grd.1000 by drawing proportional to grd_pops
time_window_start <- as.numeric(sample(names(sum_pops), size = n_incursions, replace = T, prob = tw_weight))
populations <- sample(grd.1000@data$popID, size = n_incursions, replace = T, prob = xy_weight)
# head(time_window_start); head(populations)
incursion_cases_x <- numeric(n_incursions)
incursion_cases_y <- numeric(n_incursions)
# Draw random date within each population from above
sample_days <- time_window_start + runif(n = n_incursions, min = 0, max = 28)
# Draw random location within each population from above
for (j in 1:n_incursions) {
sample_coords <- spsample(x = subset(grd.1000, grd.1000@data$popID == populations[j]), n = 1, type = "random", iter = 8)
incursion_cases_x[j] <- sample_coords@coords[1]
incursion_cases_y[j] <- sample_coords@coords[2]
}
# Replace new dates and coordinates into sim_output_LOWER_VAX
incursion_list <- sim_output_LOWER_VAX
incursion_list[, 3:ncol(incursion_list)] <- -1
incursion_list$popID <- populations # incursion_cases$popID
incursion_list$x_coord <- incursion_cases_x # incursion_cases$x_coord
incursion_list$y_coord <- incursion_cases_y # incursion_cases$y_coord
incursion_list$dayInfectious <- sample_days # incursion_cases$dayInfectious
# Transform all cases to being recorded as INCURSION type
incursion_list$typeOfCase <- "INCURSION"
# Write out to file
print(paste0(ff, f_R0_by_location, "/incursions/incursions_",sprintf("%03d",i),".csv"))
write.table(incursion_list, file = paste0(ff, f_R0_by_location, "/incursions/incursions_",sprintf("%03d",i),".csv"),
append = FALSE, quote = FALSE, sep = ",", eol = "\n", na = "NA", dec = ".", row.names = FALSE, col.names = TRUE, fileEncoding = "")
}
})
###############################
# PLOT OLD INCURSIONS (with N/S gradient)
# PLOT NEW INCURSIONS (by pop density)
end.in.days <- 5112
pdf(paste0(ff, f_R0_by_location, "/incursions/incursionsTEST.pdf"))
par(mfrow = c(2,1), mar = c(2,2,1,1)) # set up plotting area
for (i in 1:28) {
# for (i in 1:n_runs) {
print(i)
# sim_incursions <- read.csv(file = paste0(ff, f_R0_MULTI, "/incursions/incursions_",sprintf("%03d",i),".csv"))
sim_incursions <- read.csv(file = paste0(ff, f_R0_by_location, "/incursions/incursions_",sprintf("%03d",i),".csv"))
sim_incursions <- filter(sim_incursions, dayInfectious < end.in.days)
sim_incursions$popID_Factor <- factor(sim_incursions$popID, levels = popID_levels)
inc_summary <- sim_incursions %>%
group_by(popID_Factor, .drop = F) %>%
summarise(nCases = length(caseID))
cases = sum(inc_summary$nCases)
# plots for the first 100 runs to sanity check some outputs!
if(i < 100) {
hist(sim_incursions$dayInfectious, breaks = seq(0, end.in.days + 30.5, 30.5),
main = paste0("Cases = ", cases))
plot(sim_incursions$x_coord, sim_incursions$y_coord, cex = 0.1, col = alpha("black", 0.4))
}
}
dev.off()
for (i in 1:n_runs) {
# Read in output from the LOWER_VAX scenario
sim_output_LOWER_VAX <- read.csv(file = paste0(ff, f_LOWER_VAX, "/output_files/", f_LOWER_VAX, "_sim_cases_",sprintf("%03d",i),".csv"))
#### CHECK WITH A SPEEDIER VERSION #### sim_output_LOWER_VAX <- sim_output_LOWER_VAX[1:floor(nrow(sim_output_LOWER_VAX)/10),]
n_incursions <- nrow(sim_output_LOWER_VAX)
print(paste0("Run ", i, " with ", n_incursions, " cases"))
# Draw a gridcell and time window from grd.1000 by drawing proportional to grd_pops
time_window_start <- as.numeric(sample(names(sum_pops), size = n_incursions, replace = T, prob = tw_weight))
populations <- sample(grd.1000@data$popID, size = n_incursions, replace = T, prob = xy_weight)
# THIS GENERATED SAMPLING BY ID NOT DENSITY ### populations <- sample(grd.1000@data$popID, size = n_incursions, replace = T, prob = grd.1000@data$popID/sum(grd.1000@data$popID, na.rm=T))
# head(time_window_start); head(populations)
incursion_cases_x <- numeric(n_incursions)
incursion_cases_y <- numeric(n_incursions)
sample_days <- time_window_start + runif(n = n_incursions, min = 0, max = 28)
# Draw random date and location within each population from above
for (j in 1:n_incursions) {
# max_days <- month_bounds[match(time_window_start[j], month_bounds)+1] #max_days <- month_bounds[which(month_bounds == as.numeric(time_window_start[j]))+1]
# if (is.na(max_days)) { max_days <- end.in.days }
# print(paste0("min = ", as.numeric(time_window_start[j]), " and max = ", max_days))
# sample_days <- runif(n = 1, min = as.numeric(time_window_start[j]), max = max_days)
sample_coords <- spsample(x = subset(grd.1000, grd.1000@data$popID == populations[j]), n = 1, type = "random", iter = 5)
incursion_cases_x[j] <- sample_coords@coords[1] # sample_x =
incursion_cases_y[j] <- sample_coords@coords[2] # sample_y =
# if(nrow(incursion_cases) == 0) { incursion_cases <- data.frame(popID = populations[j], x_coord = sample_coords@coords[1], y_coord = sample_coords@coords[2], dayInfectious = sample_days[j]) } else {
# incursion_cases <- rbind(incursion_cases, data.frame(popID = populations[j], x_coord = sample_coords@coords[1], y_coord = sample_coords@coords[2], dayInfectious = sample_days[j]))}
}
# Replace new dates and coordinates into sim_output_LOWER_VAX
incursion_list <- sim_output_LOWER_VAX
incursion_list[, 3:ncol(incursion_list)] <- -1
incursion_list$popID <- populations # incursion_cases$popID
incursion_list$x_coord <- # incursion_cases$x_coord
incursion_list$y_coord <- # incursion_cases$y_coord
incursion_list$dayInfectious <- sample_days # incursion_cases$dayInfectious
# Transform all cases to being recorded as INCURSION type
incursion_list$typeOfCase <- "INCURSION"
# Write out to file
print(paste0(ff, f_R0_by_location, "/incursions/incursions_",sprintf("%03d",i),".csv"))
write.table(incursion_list, file = paste0(ff, f_R0_by_location, "/incursions/incursions_",sprintf("%03d",i),".csv"),
append = FALSE, quote = FALSE, sep = ",", eol = "\n", na = "NA", dec = ".", row.names = FALSE, col.names = TRUE, fileEncoding = "")
}
# TEST 238 lines - is this working?
|
ca108dbb315f871c0bd405cbff9d3d0a4526856b
|
9c17405743a5f066a2ff01ddff1e696704c8b9aa
|
/materials/labs/5_Friday/proteomics/proteomics.R
|
196ea6349812efb6eed53cfe8fc3103d3e6bd616
|
[] |
no_license
|
BioXiao/CSAMA2015
|
2a5510463012da4ce79cfb91c4fae7316cf3a5d2
|
1e3648514f2b6dd72b6c694b3183b64caf210206
|
refs/heads/master
| 2021-01-21T19:19:22.454977
| 2015-10-29T12:08:02
| 2015-10-29T12:08:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,456
|
r
|
proteomics.R
|
## ----env0, message=FALSE, echo=FALSE, warning=FALSE----------------------
library("knitr")
opts_knit$set(error = FALSE)
library("BiocInstaller")
library("RforProteomics")
library("nloptr")
## ---- env, message=FALSE, echo=TRUE, warning=FALSE-----------------------
library("mzR")
library("mzID")
library("MSnID")
library("MSnbase")
library("rpx")
library("MLInterfaces")
library("pRoloc")
library("pRolocdata")
library("MSGFplus")
library("rols")
library("hpar")
## ----r4pinstall, eval=FALSE----------------------------------------------
## library("BiocInstaller")
## biocLite("RforProteomics", dependencies = TRUE)
## ---- pk, echo=FALSE, warning=FALSE, cache=TRUE--------------------------
biocv <- as.character(biocVersion())
pp <- proteomicsPackages(biocv)
msp <- massSpectrometryPackages(biocv)
msdp <- massSpectrometryDataPackages(biocv)
## ---- pp, eval=FALSE-----------------------------------------------------
## library("RforProteomics")
## pp <- proteomicsPackages()
## display(pp)
## ---- datatab, results='asis', echo=FALSE--------------------------------
datatab <-
data.frame(Type = c("raw", "identification", "quantitation",
"peak lists", "other"),
Format = c("mzML, mzXML, netCDF, mzData",
"mzIdentML", "mzQuantML", "mgf", "mzTab"),
Package = c(
"[`mzR`](http://bioconductor.org/packages/release/bioc/html/mzR.html) (read)",
paste("[`mzR`](http://bioconductor.org/packages/release/bioc/html/mzR.html) (read) and",
"[`mzID`](http://bioconductor.org/packages/release/bioc/html/mzID.html) (read)"),
"",
"[`MSnbase`](http://bioconductor.org/packages/release/bioc/html/MSnbase.html) (read/write)",
"[`MSnbase`](http://bioconductor.org/packages/release/bioc/html/MSnbase.html) (read/write)"))
library("knitr")
kable(datatab)
## ---- rpx----------------------------------------------------------------
library("rpx")
pxannounced()
## ---- pxd, cache=TRUE----------------------------------------------------
px <- PXDataset("PXD000001")
px
pxfiles(px)
## ---- pxvar--------------------------------------------------------------
pxtax(px)
pxurl(px)
pxref(px)
## ---- pxget, eval=FALSE--------------------------------------------------
## mzf <- pxget(px, pxfiles(px)[6])
## ----pxd1----------------------------------------------------------------
library("PXD000001")
px1files()
mzf <- px1files()[5]
px1get(mzf)
## ---- rawms--------------------------------------------------------------
library("mzR")
ms <- openMSfile(mzf)
ms
## ---- hd-----------------------------------------------------------------
hd <- header(ms)
dim(hd)
names(hd)
## ------------------------------------------------------------------------
hd[1000, ]
head(peaks(ms, 1000))
plot(peaks(ms, 1000), type = "h")
## ---- msmap--------------------------------------------------------------
## a set of spectra of interest: MS1 spectra eluted
## between 30 and 35 minutes retention time
ms1 <- which(hd$msLevel == 1)
rtsel <- hd$retentionTime[ms1] / 60 > 30 &
hd$retentionTime[ms1] / 60 < 35
## the map
M <- MSmap(ms, ms1[rtsel], 521, 523, .005, hd)
plot(M, aspect = 1, allTicks = FALSE)
plot3D(M)
## With some MS2 spectra
i <- ms1[which(rtsel)][1]
j <- ms1[which(rtsel)][2]
M2 <- MSmap(ms, i:j, 100, 1000, 1, hd)
plot3D(M2)
## ---- id, cache=TRUE-----------------------------------------------------
library("mzID")
f <- dir(system.file("extdata", package = "RforProteomics"),
pattern = "mzid", full.names=TRUE)
basename(f)
id <- mzID(f)
id
## ----mzrvsid, eval = TRUE------------------------------------------------
library("mzR")
f <- dir(system.file("extdata", package = "RforProteomics"),
pattern = "mzid", full.names=TRUE)
id1 <- openIDfile(f)
fid1 <- mzR::psms(id1)
head(fid1)
## ---- rtandem, eval=FALSE------------------------------------------------
## library("rTANDEM")
## ?rtandem
## library("shinyTANDEM")
## ?shinyTANDEM
## ----ex_getfas, eval=FALSE-----------------------------------------------
## fas <- pxget(px, pxfiles(px)[10])
## basename(fas)
## ----ex_getfaslocal------------------------------------------------------
fas <- px1files()[1]
px1get(fas)
## ----ex_msgfplus, message=FALSE, cache=TRUE------------------------------
library("MSGFplus")
msgfpar <- msgfPar(database = fas,
instrument = 'HighRes',
tda = TRUE,
enzyme = 'Trypsin',
protocol = 'iTRAQ')
idres <- runMSGF(msgfpar, mzf, memory=1000)
idres
## identification file (needed below)
basename(mzID::files(idres)$id)
## ----msgfgui, eval=FALSE-------------------------------------------------
## library("MSGFgui")
## MSGFgui()
## ---- msnid--------------------------------------------------------------
library("MSnID")
msnid <- MSnID(".")
msnid <- read_mzIDs(msnid,
basename(mzID::files(idres)$id))
show(msnid)
## ----msnvars-------------------------------------------------------------
msnid <- correct_peak_selection(msnid)
msnid$msmsScore <- -log10(msnid$`MS-GF:SpecEValue`)
msnid$absParentMassErrorPPM <- abs(mass_measurement_error(msnid))
## ----idplot, echo=FALSE--------------------------------------------------
x <- psms(msnid)$msmsScore
gr <- psms(msnid)$isDecoy
library("lattice")
densityplot(x,
group = gr,
auto.key=TRUE)
## ----msnidfilt-----------------------------------------------------------
filtObj <- MSnIDFilter(msnid)
filtObj$absParentMassErrorPPM <- list(comparison="<", threshold=5.0)
filtObj$msmsScore <- list(comparison=">", threshold=8.0)
filtObj
evaluate_filter(msnid, filtObj)
## ----filtopt-------------------------------------------------------------
filtObj.grid <- optimize_filter(filtObj, msnid, fdr.max=0.01,
method="Grid", level="PSM",
n.iter=50000)
filtObj.grid
evaluate_filter(msnid, filtObj.grid)
## ----applyfilt-----------------------------------------------------------
msnid <- apply_filter(msnid, filtObj.grid)
msnid
## ---- msnbase------------------------------------------------------------
library("MSnbase")
rawFile <- dir(system.file(package = "MSnbase", dir = "extdata"),
full.name = TRUE, pattern = "mzXML$")
basename(rawFile)
msexp <- readMSData(rawFile, verbose = FALSE)
msexp
## ------------------------------------------------------------------------
length(msexp)
msexp[1:2]
msexp[[2]]
## ---- addid--------------------------------------------------------------
fData(msexp)
## find path to a mzIdentML file
identFile <- dir(system.file(package = "MSnbase", dir = "extdata"),
full.name = TRUE, pattern = "dummyiTRAQ.mzid")
basename(identFile)
msexp <- addIdentificationData(msexp, identFile)
fData(msexp)
## ---- specplot-----------------------------------------------------------
msexp[[1]]
plot(msexp[[1]], full=TRUE)
## ---- specplot2----------------------------------------------------------
msexp[1:3]
plot(msexp[1:3], full=TRUE)
## ---- quanttab, echo=FALSE, results='asis'-------------------------------
qtb <- matrix(c("XIC", "Counting", "SILAC, 15N", "iTRAQ, TMT"),
nrow = 2, ncol = 2)
dimnames(qtb) <- list(
'MS level' = c("MS1", "MS2"),
'Quantitation' = c("Label-free", "Labelled"))
kable(qtb)
## ---- itraq4plot---------------------------------------------------------
plot(msexp[[1]], full=TRUE, reporters = iTRAQ4)
## ---- quantitraq---------------------------------------------------------
msset <- quantify(msexp, method = "trap", reporters = iTRAQ4, verbose=FALSE)
exprs(msset)
processingData(msset)
## ---- lfms2--------------------------------------------------------------
exprs(si <- quantify(msexp, method = "SIn"))
exprs(saf <- quantify(msexp, method = "NSAF"))
## ----mztab, cache=TRUE, eval = FALSE-------------------------------------
## mztf <- pxget(px, pxfiles(px)[2])
## (mzt <- readMzTabData(mztf, what = "PEP"))
## ----mztablocal----------------------------------------------------------
mztf <- px1files()[3]
px1get(mztf)
(mzt <- readMzTabData(mztf, what = "PEP"))
## ---- readmsnset2--------------------------------------------------------
csv <- dir(system.file ("extdata" , package = "pRolocdata"),
full.names = TRUE, pattern = "pr800866n_si_004-rep1.csv")
getEcols(csv, split = ",")
ecols <- 7:10
res <- readMSnSet2(csv, ecols)
head(exprs(res))
head(fData(res))
## ---- pure---------------------------------------------------------------
data(itraqdata)
qnt <- quantify(itraqdata, method = "trap",
reporters = iTRAQ4, verbose = FALSE)
impurities <- matrix(c(0.929,0.059,0.002,0.000,
0.020,0.923,0.056,0.001,
0.000,0.030,0.924,0.045,
0.000,0.001,0.040,0.923),
nrow=4, byrow = TRUE)
## or, using makeImpuritiesMatrix()
## impurities <- makeImpuritiesMatrix(4)
qnt.crct <- purityCorrect(qnt, impurities)
processingData(qnt.crct)
## ---- pureplot, echo=FALSE-----------------------------------------------
plot0 <- function(x, y, main = "") {
old.par <- par(no.readonly = TRUE)
on.exit(par(old.par))
par(mar = c(4, 4, 1, 1))
par(mfrow = c(2, 2))
sx <- sampleNames(x)
sy <- sampleNames(y)
for (i in seq_len(ncol(x))) {
plot(exprs(x)[, i], exprs(y)[, i], log = "xy",
xlab = sx[i], ylab = sy[i])
abline(0, 1)
grid()
}
}
## plot0(qnt, qnt.crct, main = "Before/after correction")
## ---- norm---------------------------------------------------------------
qnt.crct.nrm <- normalise(qnt.crct, "quantiles")
## ---- plotnorm, echo=FALSE-----------------------------------------------
## plot0(qnt, qnt.crct.nrm)
## ---- comb---------------------------------------------------------------
## arbitraty grouping
g <- factor(c(rep(1, 25), rep(2, 15), rep(3, 15)))
g
prt <- combineFeatures(qnt.crct.nrm, groupBy = g, fun = "sum")
processingData(prt)
## ----impute0-------------------------------------------------------------
set.seed(1)
qnt0 <- qnt
exprs(qnt0)[sample(prod(dim(qnt0)), 10)] <- NA
table(is.na(qnt0))
image(qnt0)
## ----filterNA------------------------------------------------------------
## remove features with missing values
qnt00 <- filterNA(qnt0)
dim(qnt00)
any(is.na(qnt00))
## ----impute--------------------------------------------------------------
## impute missing values using knn imputation
qnt.imp <- impute(qnt0, method = "knn")
dim(qnt.imp)
any(is.na(qnt.imp))
## ---- ml-----------------------------------------------------------------
library("MLInterfaces")
library("pRoloc")
library("pRolocdata")
data(dunkley2006)
traininds <- which(fData(dunkley2006)$markers != "unknown")
ans <- MLearn(markers ~ ., data = t(dunkley2006), knnI(k = 5), traininds)
ans
## ----clust---------------------------------------------------------------
kcl <- MLearn( ~ ., data = dunkley2006, kmeansI, centers = 12)
kcl
plot(kcl, exprs(dunkley2006))
## ----nont, echo=FALSE, cache=TRUE----------------------------------------
library("rols")
nont <- nrow(ontologies())
## ----rols----------------------------------------------------------------
library("rols")
olsQuery("ESI", "MS")
## ---- si, echo=FALSE-----------------------------------------------------
print(sessionInfo(), local = FALSE)
|
807f9ef210976b6ddc29887be2ad012c042b126b
|
d998fb34120769092d9e273e625fec4dea381704
|
/man/read_taxa.Rd
|
a3fb8009dcab5504e6fcd3da4e48816e373cec60
|
[] |
no_license
|
cran/benthos
|
56c3be3b8ece470823486c636243baad07d290b1
|
6615e8093e4d5bd925e7a33e47319952a044bc4f
|
refs/heads/master
| 2022-09-20T20:31:47.639967
| 2022-08-22T09:10:06
| 2022-08-22T09:10:06
| 111,669,759
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 858
|
rd
|
read_taxa.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\name{read_taxa}
\alias{read_taxa}
\alias{get_taxa}
\alias{validate_taxa}
\title{Read and Validate Taxa Data}
\usage{
read_taxa(filename)
get_taxa()
validate_taxa(.data)
}
\arguments{
\item{filename}{name of taxa file}
\item{.data}{table in taxa-format}
}
\description{
This function reads files in the taxa format.
}
\details{
Taxa files have the following format:
\itemize{
\item{group} {taxonomic group}
\item{provided} {provided taxon name}
\item{accepted} {accepted taxon name}
\item{level} {taxonomic level}
}
Other columns are allowed, but silently ingored.
}
\section{Functions}{
\itemize{
\item \code{get_taxa()}: get default taxa list (TWN list extended with species Southern North Sea)
\item \code{validate_taxa()}: validator for taxa-format
}}
|
9b7d73955f281d6791a941edc4312e759c8f10de
|
80393834bd9ac5b564fc040fd5112af949891f54
|
/TodosAlgoritmosSemestre/euler.R
|
9f6ebcdb6f695220b748ba239a771c11343b4265
|
[] |
no_license
|
SantiagoRomeroPineda/AnalisisNumerico
|
59cc9a0fbefea1cbdc29fa4413b1337ecb2242bc
|
4250af68396dade545f0ee27d5155e6a63dc1e0a
|
refs/heads/master
| 2023-01-20T15:47:32.796976
| 2020-11-27T14:18:31
| 2020-11-27T14:18:31
| 285,291,665
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,115
|
r
|
euler.R
|
library(PolynomF)
require(deSolve)
rm(list=ls())
euler1 = function(f, t0, y0, h, n) {
#Datos igualmente espaciados iniciando en x0 = a, paso h. "n" datos
#f es la derivada es decie dy/dt =f(t,y)
t = seq(t0, t0 + (n-1)*h, by = h) # n datos
y = rep(NA, times=n) # n datos
y[1]=y0
for(i in 2:n ) y[i]= y[i-1]+h*f(t[i-1], y[i-1])
print(cbind(t,y)) # print
plot(t,y, pch=19, col="red", xlab = "ti", ylab = "solución",
main = "Euler vs RK4")
}
options(digits = 15)
f = function(t,y) (0.7*y)-(t^2)+1
euler1(f, 1, 1, 0.1, 10)
###
#install.packages("deSolve")######################################
fp = function(t,y, parms){
s = (0.7*y)-(t^2)+1
return(list(s)) # #ode requiere salida sea una lista
}
tis= seq(1,2,0.1)# Usamos la funcion ode()
sol = ode(c(1,1), tis, fp, parms=NULL, method = "rk4")# metodo Runge Kutta orden 4 # Salida
tabla = cbind(tis, sol[,2] )
colnames(tabla) = c("ti", " Ti ")
tabla
# Representacion
par(new=T)
plot(tis, sol[,2],
xlab = "ti",
ylab = "solucion", main = "Euler vs RK4")
a=poly_calc(tis,sol[,2])
curve(a,add=T)
abline(h=0,v=0,col="red")
print(a)
|
d34588d1c3edc99e54ae6e1a483a1a52528d10a2
|
91b827e48e510661d90d81d0155130b2703ae51d
|
/analysis/4-binary-effectmod/4g-PR-prev-adj-scoop.R
|
af318db6bfe1d4d20e2d72033c3110c5b0a031b3
|
[] |
no_license
|
LaKwong/WBB-STH-Kato-Katz
|
2f0cf1a906ced8badb35b18df72adbcf41013534
|
d4d97093fb4e04f6cb7bcf919e41c42ea25cff83
|
refs/heads/master
| 2020-09-02T05:37:17.925364
| 2019-04-24T19:43:26
| 2019-04-24T19:43:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,126
|
r
|
4g-PR-prev-adj-scoop.R
|
##############################################
# WASH Benefits Bangladesh STH Kato-Katz Study
# Primary outcome analysis
# STH adjusted analysis
# Binary STH outcomes
# Effect modification by whether household
# has a dedicated scoop for removing feces
# by Jade Benjamin-Chung
# jadebc@berkeley.edu
##############################################
rm(list=ls())
source(here::here("0-config.R"))
#----------------------------------------------
# load and pre-process analysis dataset
#----------------------------------------------
data = read.csv(sth_data_path,stringsAsFactors=TRUE)
d=preprocess.sth(data)
d=preprocess.adj.sth(d)
#----------------------------------------------
# create separate datasets for households with
# vs. without a dedicated scoop for removing feces
#----------------------------------------------
d1=d[d$scoop==1,]
d0=d[d$scoop==0,]
# roof and landphone excluded due to low prevalence
W=c("counter","birthord","month","hfiacat","aged","sex","momage","momheight","momedu",
"Nlt18","Ncomp","watmin","walls","floor",
"elec","asset_wardrobe","asset_table","asset_chair","asset_khat","asset_chouki",
"asset_tv","asset_refrig","asset_bike","asset_moto","asset_sewmach","asset_mobile")
dW1=d1[,c("block","tr","clusterid","sth","al","hw","tt",W)]
dW0=d0[,c("block","tr","clusterid","sth","al","hw","tt",W)]
#----------------------------------------------
# H1: Unadjusted prevalence ratios; each arm vs.
# control. PR, CI, P-value
#----------------------------------------------
#----------------------------------------------
# Has dedicated feces scoop
#----------------------------------------------
trlist=c("Water","Sanitation","Handwashing",
"WSH","Nutrition","Nutrition + WSH")
SL.library=c("SL.mean","SL.glm","SL.bayesglm","SL.gam","SL.glmnet")
est.al.h1.fec1=apply(matrix(trlist), 1,function(x) washb_tmle(Y=dW1$al,tr=dW1$tr,
pair=dW1$block, id=dW1$block,W=dW1[,W],
family="binomial",contrast=c("Control",x),Q.SL.library=SL.library,
g.SL.library=SL.library, pval=0.2, seed=12345, print=TRUE))
est.hw.h1.fec1=apply(matrix(trlist), 1,function(x) washb_tmle(Y=dW1$hw,tr=dW1$tr,
pair=dW1$block, id=dW1$block,W=dW1[,W],
family="binomial",contrast=c("Control",x),Q.SL.library=SL.library,
g.SL.library=SL.library, pval=0.2, seed=12345, print=TRUE))
est.tt.h1.fec1=apply(matrix(trlist), 1,function(x) washb_tmle(Y=dW1$tt,tr=dW1$tr,
pair=dW1$block, id=dW1$block,W=dW1[,W],
family="binomial",contrast=c("Control",x),Q.SL.library=SL.library,
g.SL.library=SL.library, pval=0.2, seed=12345, print=TRUE))
est.sth.h1.fec1=apply(matrix(trlist), 1,function(x) washb_tmle(Y=dW1$sth,tr=dW1$tr,
pair=dW1$block, id=dW1$block,W=dW1[,W],
family="binomial",contrast=c("Control",x),Q.SL.library=SL.library,
g.SL.library=SL.library, pval=0.2, seed=12345, print=TRUE))
al_rr_h1_fec1_j=format.tmle(est.al.h1.fec1,family="binomial")$rr
al_rd_h1_fec1_j=format.tmle(est.al.h1.fec1,family="binomial")$rd
hw_rr_h1_fec1_j=format.tmle(est.hw.h1.fec1,family="binomial")$rr
hw_rd_h1_fec1_j=format.tmle(est.hw.h1.fec1,family="binomial")$rd
tt_rr_h1_fec1_j=format.tmle(est.tt.h1.fec1,family="binomial")$rr
tt_rd_h1_fec1_j=format.tmle(est.tt.h1.fec1,family="binomial")$rd
sth_rr_h1_fec1_j=format.tmle(est.sth.h1.fec1,family="binomial")$rr
sth_rd_h1_fec1_j=format.tmle(est.sth.h1.fec1,family="binomial")$rd
rownames(al_rr_h1_fec1_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
rownames(hw_rr_h1_fec1_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
rownames(tt_rr_h1_fec1_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
rownames(sth_rr_h1_fec1_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
rownames(al_rd_h1_fec1_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
rownames(hw_rd_h1_fec1_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
rownames(tt_rd_h1_fec1_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
rownames(sth_rd_h1_fec1_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
#----------------------------------------------
# Doesn't have dedicated feces scoop
#----------------------------------------------
est.al.h1.fec0=apply(matrix(trlist), 1,function(x) washb_tmle(Y=dW0$al,tr=dW0$tr,
pair=dW0$block, id=dW0$block,W=dW0[,W],
family="binomial",contrast=c("Control",x),Q.SL.library=SL.library,
g.SL.library=SL.library, pval=0.2, seed=12345, print=TRUE))
est.hw.h1.fec0=apply(matrix(trlist), 1,function(x) washb_tmle(Y=dW0$hw,tr=dW0$tr,
pair=dW0$block, id=dW0$block,W=dW0[,W],
family="binomial",contrast=c("Control",x),Q.SL.library=SL.library,
g.SL.library=SL.library, pval=0.2, seed=12345, print=TRUE))
est.tt.h1.fec0=apply(matrix(trlist), 1,function(x) washb_tmle(Y=dW0$tt,tr=dW0$tr,
pair=dW0$block, id=dW0$block,W=dW0[,W],
family="binomial",contrast=c("Control",x),Q.SL.library=SL.library,
g.SL.library=SL.library, pval=0.2, seed=12345, print=TRUE))
est.sth.h1.fec0=apply(matrix(trlist), 1,function(x) washb_tmle(Y=dW0$sth,tr=dW0$tr,
pair=dW0$block, id=dW0$block,W=dW0[,W],
family="binomial",contrast=c("Control",x),Q.SL.library=SL.library,
g.SL.library=SL.library, pval=0.2, seed=12345, print=TRUE))
al_rr_h1_fec0_j=format.tmle(est.al.h1.fec0,family="binomial")$rr
al_rd_h1_fec0_j=format.tmle(est.al.h1.fec0,family="binomial")$rd
hw_rr_h1_fec0_j=format.tmle(est.hw.h1.fec0,family="binomial")$rr
hw_rd_h1_fec0_j=format.tmle(est.hw.h1.fec0,family="binomial")$rd
tt_rr_h1_fec0_j=format.tmle(est.tt.h1.fec0,family="binomial")$rr
tt_rd_h1_fec0_j=format.tmle(est.tt.h1.fec0,family="binomial")$rd
sth_rr_h1_fec0_j=format.tmle(est.sth.h1.fec0,family="binomial")$rr
sth_rd_h1_fec0_j=format.tmle(est.sth.h1.fec0,family="binomial")$rd
rownames(al_rr_h1_fec0_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
rownames(hw_rr_h1_fec0_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
rownames(tt_rr_h1_fec0_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
rownames(sth_rr_h1_fec0_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
rownames(al_rd_h1_fec0_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
rownames(hw_rd_h1_fec0_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
rownames(tt_rd_h1_fec0_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
rownames(sth_rd_h1_fec0_j)=c("Water vs C","Sanitation vs C","Handwashing vs C",
"WSH vs C","Nutrition vs C","Nutrition + WSH vs C")
#----------------------------------------------
# save objects
#----------------------------------------------
save(al_rr_h1_fec1_j,hw_rr_h1_fec1_j,tt_rr_h1_fec1_j,sth_rr_h1_fec1_j,
al_rd_h1_fec1_j,hw_rd_h1_fec1_j,tt_rd_h1_fec1_j,sth_rd_h1_fec1_j,
al_rr_h1_fec0_j,hw_rr_h1_fec0_j,tt_rr_h1_fec0_j,sth_rr_h1_fec0_j,
al_rd_h1_fec0_j,hw_rd_h1_fec0_j,tt_rd_h1_fec0_j,sth_rd_h1_fec0_j,
file=paste0(save_data_path, "sth_pr_adj_scoop.RData"))
|
12445347df345d96289d49b351e3c2efc3c4c7ce
|
bff9a887ea03299db2217c5b7b2d249eab946a10
|
/Siganus_sutor.R
|
3630fb302883994babe773d455f4deeb3c8173d0
|
[] |
no_license
|
zanbi/https-github.com-merrillrudd-LIME_application
|
82a0efde5a0b74110d8e33715e2fcaa7bcf79ec8
|
0608e8d466ab63676f5cca278203afd04c286b3d
|
refs/heads/master
| 2022-04-16T16:43:25.864524
| 2018-03-29T19:25:08
| 2018-03-29T19:25:08
| 256,052,816
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 50,435
|
r
|
Siganus_sutor.R
|
rm(list=ls())
## load LIME package
devtools::install_github("kaskr/TMB_contrib_R/TMBhelper")
devtools::install_github("merrillrudd/LIME", build.vignettes=TRUE, dependencies=TRUE)
library(LIME)
## load LBSPR package
devtools::install_github("adrianhordyk/LBSPR", build.vignettes=TRUE, dependencies=TRUE)
library(LBSPR)
## load Rfishbase
library(rfishbase)
library(gplots)
###################################
## Directories
###################################
main_dir <- "C:\\merrill\\LIME_application"
R_dir <- file.path(main_dir, "R_functions")
funs <- list.files(R_dir)
ignore <- sapply(1:length(funs), function(x) source(file.path(R_dir, funs[x])))
region_dir <- file.path(main_dir, "kenyan_reef_fish")
stock_dir <- file.path(region_dir, "Siganus_sutor")
dir.create(stock_dir, showWarnings=FALSE)
data_dir <- file.path(region_dir, "data")
dir.create(data_dir, showWarnings=FALSE)
figs_dir <- file.path(stock_dir, "figures")
dir.create(figs_dir, showWarnings=FALSE)
######################################
## Species life history
######################################
plist <- readRDS(file.path(data_dir, "Siganus_sutor_life_history_annual.rds"))
plist_m <- readRDS(file.path(data_dir, "Siganus_sutor_life_history_monthly.rds"))
######################################
## Load data
######################################
LCraw <- readRDS(file.path(data_dir, "Siganus_sutor_LCraw.rds"))
LCm_raw <- readRDS(file.path(data_dir, "Siganus_sutor_LCraw_monthly.rds"))
LCweight <- readRDS(file.path(data_dir, "Siganus_sutor_LC_weighted.rds"))
LCm_weight <- readRDS(file.path(data_dir, "Siganus_sutor_LCmonthly_weighted.rds"))
LCm_bt <- readRDS(file.path(data_dir, "Siganus_sutor_LCmonthly_baskettrap.rds"))
LCm_highN <- readRDS(file.path(data_dir, "Siganus_sutor_LCmonthly_weighted_Nup.rds"))
LCy_bt <- readRDS(file.path(data_dir, "Siganus_sutor_LC_baskettrap.rds"))
LCy_up <- readRDS(file.path(data_dir, "Siganus_sutor_LC_weighted_Nup.rds"))
####################################
## run assessments
####################################
res_dir <- file.path(stock_dir, "results")
dir.create(res_dir, showWarnings=FALSE)
years_o <- as.numeric(rownames(LCraw))
years_t <- min(years_o):max(years_o)
months_o <- as.numeric(rownames(LCm_raw))
months_t <- min(months_o):max(months_o)
input_data_y <- list("years"=years_t, "LF"=LCweight)
input_data_m <- list("years"=months_t, "LF"=LCm_weight)
input_data_bt <- list("years"=months_t, "LF"=LCm_bt)
input_data_up <- list("years"=months_t, "LF"=LCm_highN)
input_data_y_bt <- list("years"=years_t, "LF"=LCy_bt)
input_data_y_up <- list("years"=years_t, "LF"=LCy_up)
############### weighted length comps ##########################
## annual LBPSR
out <- file.path(res_dir, "LBSPR_LCy")
dir.create(out, showWarnings=FALSE)
run <- run_LBSPR(modpath=out, lh=plist, species="x", input_data=input_data_y, rewrite=TRUE, simulation=FALSE)
lbspr_res <- readRDS(file.path(out, "LBSPR_results.rds"))
lbspr_res$SPR[length(lbspr_res$SPR)] - 1.96*sqrt(lbspr_res$SPR_Var[length(lbspr_res$SPR_Var)])
lbspr_res$SPR[length(lbspr_res$SPR)] + 1.96*sqrt(lbspr_res$SPR_Var[length(lbspr_res$SPR_Var)])
png(file.path(out, "LCfits.png"), width=16, height=10, res=200, units="in")
plot_LCfits(Inputs=input_data_y, Report=NULL, true_lc_years=rownames(input_data_y$LF), LBSPR=lbspr_res, ylim=c(0,0.2), ML50=plist$ML50, SL50=lbspr_res$SL50)
dev.off()
## annual LBPSR
out <- file.path(res_dir, "LBSPR_LCy_baskettrap")
dir.create(out, showWarnings=FALSE)
run <- run_LBSPR(modpath=out, lh=plist, species="x", input_data=input_data_y_bt, rewrite=TRUE, simulation=FALSE)
lbspr_res_bt <- readRDS(file.path(out, "LBSPR_results.rds"))
## annual LBPSR
out <- file.path(res_dir, "LBSPR_LCy_highN")
dir.create(out, showWarnings=FALSE)
run <- run_LBSPR(modpath=out, lh=plist, species="x", input_data=input_data_y_up, rewrite=TRUE, simulation=FALSE)
lbspr_res_highN <- readRDS(file.path(out, "LBSPR_results.rds"))
## annual LBPSR
out <- file.path(res_dir, "LBSPR_LCy_altM")
dir.create(out, showWarnings=FALSE)
plist_new <- plist
plist_new$M <- 1.49
run <- run_LBSPR(modpath=out, lh=plist_new, species="x", input_data=input_data_y, rewrite=TRUE, simulation=FALSE)
lbspr_res_altM <- readRDS(file.path(out, "LBSPR_results.rds"))
## annual LIME
out <- file.path(res_dir, "LCy")
dir.create(out, showWarnings=FALSE)
res <- run_LIME(modpath=out, lh=plist, input_data=input_data_y, est_sigma="log_sigma_R", data_avail="LC", itervec=NULL, rewrite=FALSE, simulation=FALSE, f_true=FALSE, C_opt=0, LFdist=1, param_adjust=c("SigmaF","SigmaR","SigmaC","SigmaI"), val_adjust=c(0.2,0.737,0.2,0.2), F_up=10, S_l_input=-1, fix_param=FALSE, theta_type=1, randomR=TRUE)
Report <- readRDS(file.path(out, "Report.rds"))
Sdreport <- readRDS(file.path(out, "Sdreport.rds"))
Inputs <- readRDS(file.path(out, "Inputs.rds"))
png(file.path(out, "output.png"), width=16, height=10, res=200, units="in")
plot_output(Inputs=Inputs, Report=Report, Sdreport=Sdreport, all_years=input_data_y$years, lc_years=rownames(input_data_y$LF), LBSPR=lbspr_res, true_years=years_t)
dev.off()
png(file.path(out, "LCfits.png"), width=16, height=10, res=200, units="in")
plot_LCfits(Inputs=Inputs$Data, Report=Report, true_lc_years=rownames(input_data_y$LF), LBSPR=lbspr_res, ylim=c(0,0.3))
dev.off()
### weighted length comp - counts only - all months
out <- file.path(res_dir, "LCm")
dir.create(out, showWarnings=FALSE)
res <- run_LIME(modpath=out, lh=plist_m, input_data=input_data_m, est_sigma=c("log_sigma_R"), data_avail="LC", itervec=NULL, rewrite=TRUE, simulation=FALSE, f_true=FALSE, C_opt=0, LFdist=1, param_adjust=c("SigmaF","SigmaR","SigmaC","SigmaI"), val_adjust=c(0.2,0.737,0.2,0.2), F_up=10, S_l_input=-1, fix_param=FALSE, theta_type=1, randomR=TRUE)
df <- readRDS(file.path(out, "check_convergence.rds"))
Report <- readRDS(file.path(out, "Report.rds"))
Sdreport <- readRDS(file.path(out, "Sdreport.rds"))
Inputs <- readRDS(file.path(out, "Inputs.rds"))
base_rep <- Report
F40 <- tryCatch(uniroot(calc_ref, lower=0, upper=200, ages=plist_m$ages, Mat_a=Report$Mat_a, W_a=Report$W_a, M=Report$M, S_a=Report$S_a, ref=0.4)$root, error=function(e) NA)
Report$F_t/F40
sd <- summary(Sdreport)[which(rownames(summary(Sdreport))=="lF_y"),]
sd[,2][which(is.na(sd[,2]))] <- 0
read_sdreport(sd, log=TRUE)[c(length(Report$F_y), length(Report$F_y)+1)]
sd <- summary(Sdreport)[which(rownames(summary(Sdreport))=="SPR_t"),]
sd[,2][which(is.na(sd[,2]))] <- 0
read_sdreport(sd, log=FALSE)[c(length(Report$SPR_t), length(Report$SPR_t)+1)]
pthresh <- pnorm(0.3, Report$SPR_t[length(Report$SPR_t)], sd[,2][length(Report$SPR_t)])
ptarget <- pnorm(0.45, Report$SPR_t[length(Report$SPR_t)], sd[,2][length(Report$SPR_t)]) - pnorm(0.35, Report$SPR_t[length(Report$SPR_t)], sd[,2][length(Report$SPR_t)])
pup <- 1-pnorm(0.40, Report$SPR_t[length(Report$SPR_t)], sd[,2][length(Report$SPR_t)])
png(file.path(out, "output.png"), width=16, height=10, res=200, units="in")
plot_output(Inputs=Inputs, Report=Report, Sdreport=Sdreport, all_years=input_data_m$years, lc_years=rownames(input_data_m$LF), LBSPR=lbspr_res, lh=plist_m, true_years=years_t)
dev.off()
png(file.path(out, "LCfits.png"), width=16, height=10, res=200, units="in")
plot_LCfits(Inputs=Inputs$Data, Report=Report, true_lc_years=rownames(input_data_m$LF), LBSPR=NULL, ylim=c(0,0.3))
dev.off()
### weighted length comp - counts only - all months - basket trap only
out <- file.path(res_dir, "LCm_baskettrap")
dir.create(out, showWarnings=FALSE)
res <- run_LIME(modpath=out, lh=plist_m, input_data=input_data_bt, est_sigma="log_sigma_R", data_avail="LC", itervec=NULL, rewrite=FALSE, simulation=FALSE, f_true=FALSE, C_opt=0, LFdist=1, param_adjust=c("SigmaF","SigmaR","SigmaC","SigmaI"), val_adjust=c(0.2,0.737,0.2,0.2), F_up=10, S_l_input=-1, fix_param=FALSE, theta_type=1, randomR=TRUE)
df <- readRDS(file.path(out, "check_convergence.rds"))
Report <- readRDS(file.path(out, "Report.rds"))
Sdreport <- readRDS(file.path(out, "Sdreport.rds"))
Inputs <- readRDS(file.path(out, "Inputs.rds"))
png(file.path(out, "output.png"), width=16, height=10, res=200, units="in")
plot_output(Inputs=Inputs, Report=Report, Sdreport=Sdreport, all_years=input_data_bt$years, lc_years=rownames(input_data_bt$LF), LBSPR=lbspr_res_bt, lh=plist_m, true_years=years_t)
dev.off()
png(file.path(out, "LCfits.png"), width=16, height=10, res=200, units="in")
plot_LCfits(Inputs=Inputs$Data, Report=Report, true_lc_years=rownames(input_data_bt$LF), LBSPR=NULL, ylim=c(0,0.3))
dev.off()
### weighted length comp - counts only - months with high sample size
out <- file.path(res_dir, "LCm_highN")
dir.create(out, showWarnings=FALSE)
res <- run_LIME(modpath=out, lh=plist_m, input_data=input_data_up, est_sigma="log_sigma_R", data_avail="LC", itervec=NULL, rewrite=FALSE, simulation=FALSE, f_true=FALSE, C_opt=0, LFdist=1, param_adjust=c("SigmaF","SigmaR","SigmaC","SigmaI"), val_adjust=c(0.2,0.737,0.2,0.2), F_up=10, S_l_input=-1, fix_param=FALSE, theta_type=1, randomR=TRUE)
df <- readRDS(file.path(out, "check_convergence.rds"))
Report <- readRDS(file.path(out, "Report.rds"))
Sdreport <- readRDS(file.path(out, "Sdreport.rds"))
Inputs <- readRDS(file.path(out, "Inputs.rds"))
png(file.path(out, "output.png"), width=16, height=10, res=200, units="in")
plot_output(Inputs=Inputs, Report=Report, Sdreport=Sdreport, all_years=input_data_up$years, lc_years=rownames(input_data_up$LF), LBSPR=lbspr_res_highN, lh=plist_m, true_years=years_t)
dev.off()
png(file.path(out, "LCfits.png"), width=16, height=10, res=200, units="in")
plot_LCfits(Inputs=Inputs$Data, Report=Report, true_lc_years=rownames(input_data_up$LF), LBSPR=NULL, ylim=c(0,0.3))
dev.off()
### weighted length comp - counts only - all months
out <- file.path(res_dir, "LCm_altM")
dir.create(out, showWarnings=FALSE)
res <- run_LIME(modpath=out, lh=plist_m, input_data=input_data_m, est_sigma="log_sigma_R", data_avail="LC", itervec=NULL, rewrite=FALSE, simulation=FALSE, f_true=FALSE, C_opt=0, LFdist=1, param_adjust=c("SigmaF","SigmaR","SigmaC","SigmaI", "M"), val_adjust=c(0.2,0.737,0.2,0.2, 1.49), F_up=10, S_l_input=-1, fix_param=FALSE, theta_type=1, randomR=TRUE)
df <- readRDS(file.path(out, "check_convergence.rds"))
Report <- readRDS(file.path(out, "Report.rds"))
Sdreport <- readRDS(file.path(out, "Sdreport.rds"))
Inputs <- readRDS(file.path(out, "Inputs.rds"))
F40 <- tryCatch(uniroot(calc_ref, lower=0, upper=200, ages=plist_m$ages, Mat_a=Report$Mat_a, W_a=Report$W_a, M=Report$M, S_a=Report$S_a, ref=0.4)$root, error=function(e) NA)
Report$F_t/F40
sd <- summary(Sdreport)[which(rownames(summary(Sdreport))=="lF_y"),]
sd[,2][which(is.na(sd[,2]))] <- 0
read_sdreport(sd, log=TRUE)[c(length(Report$F_y), length(Report$F_y)+1)]
sd <- summary(Sdreport)[which(rownames(summary(Sdreport))=="SPR_t"),]
sd[,2][which(is.na(sd[,2]))] <- 0
read_sdreport(sd, log=FALSE)[c(length(Report$SPR_t), length(Report$SPR_t)+1)]
pthresh <- pnorm(0.3, Report$SPR_t[length(Report$SPR_t)], sd[,2][length(Report$SPR_t)])
ptarget <- pnorm(0.45, Report$SPR_t[length(Report$SPR_t)], sd[,2][length(Report$SPR_t)]) - pnorm(0.35, Report$SPR_t[length(Report$SPR_t)], sd[,2][length(Report$SPR_t)])
png(file.path(out, "output.png"), width=16, height=10, res=200, units="in")
plot_output(Inputs=Inputs, Report=Report, Sdreport=Sdreport, all_years=input_data_m$years, lc_years=rownames(input_data_m$LF), LBSPR=lbspr_res_altM, lh=plist_m, true_years=years_t)
dev.off()
png(file.path(out, "LCfits.png"), width=16, height=10, res=200, units="in")
plot_LCfits(Inputs=Inputs$Data, Report=Report, true_lc_years=rownames(input_data_m$LF), LBSPR=NULL, ylim=c(0,0.3))
dev.off()
### weighted length comp - counts only - all months
out <- file.path(res_dir, "LCm_sigRstartlow")
dir.create(out, showWarnings=FALSE)
res <- run_LIME(modpath=out, lh=plist_m, input_data=input_data_m, est_sigma=c("log_sigma_R"), data_avail="LC", itervec=NULL, rewrite=TRUE, simulation=FALSE, f_true=FALSE, C_opt=0, LFdist=1, param_adjust=c("SigmaF","SigmaR","SigmaC","SigmaI"), val_adjust=c(0.2,0.3,0.2,0.2), F_up=10, S_l_input=-1, fix_param=FALSE, theta_type=1, randomR=TRUE)
df <- readRDS(file.path(out, "check_convergence.rds"))
Report <- readRDS(file.path(out, "Report.rds"))
Sdreport <- readRDS(file.path(out, "Sdreport.rds"))
Inputs <- readRDS(file.path(out, "Inputs.rds"))
png(file.path(out, "output.png"), width=16, height=10, res=200, units="in")
plot_output(Inputs=Inputs, Report=Report, Sdreport=Sdreport, all_years=input_data_m$years, lc_years=rownames(input_data_m$LF), LBSPR=NULL, lh=plist_m, true_years=years_t)
dev.off()
png(file.path(out, "LCfits.png"), width=16, height=10, res=200, units="in")
plot_LCfits(Inputs=Inputs$Data, Report=Report, true_lc_years=rownames(input_data_m$LF), LBSPR=NULL, ylim=c(0,0.3))
dev.off()
### weighted length comp - counts only - all months
out <- file.path(res_dir, "LCm_selexstartalt")
dir.create(out, showWarnings=FALSE)
plist_m_alt <- create_lh_list(vbk=plist$vbk, linf=plist$linf, lwa=plist$lwa, lwb=plist$lwb, S50=20, S95=25, selex_input="length", M50=plist$ML50, maturity_input="length", SigmaR=0.7, SigmaF=0.2, M=plist$M, F1=NULL, CVlen=0.1, nseasons=12, binwidth=1)
res <- run_LIME(modpath=out, lh=plist_m_alt, input_data=input_data_m, est_sigma=c("log_sigma_R"), data_avail="LC", itervec=NULL, rewrite=TRUE, simulation=FALSE, f_true=FALSE, C_opt=0, LFdist=1, param_adjust=c("SigmaF","SigmaR","SigmaC","SigmaI"), val_adjust=c(0.2,0.737,0.2,0.2), F_up=10, S_l_input=-1, fix_param=FALSE, theta_type=1, randomR=TRUE)
df <- readRDS(file.path(out, "check_convergence.rds"))
Report <- readRDS(file.path(out, "Report.rds"))
Sdreport <- readRDS(file.path(out, "Sdreport.rds"))
Inputs <- readRDS(file.path(out, "Inputs.rds"))
png(file.path(out, "output.png"), width=16, height=10, res=200, units="in")
plot_output(Inputs=Inputs, Report=Report, Sdreport=Sdreport, all_years=input_data_m$years, lc_years=rownames(input_data_m$LF), LBSPR=NULL, lh=plist_m, true_years=years_t)
dev.off()
png(file.path(out, "LCfits.png"), width=16, height=10, res=200, units="in")
plot_LCfits(Inputs=Inputs$Data, Report=Report, true_lc_years=rownames(input_data_m$LF), LBSPR=NULL, ylim=c(0,0.3))
dev.off()
## dome-shaped selex
plist_m_lowdome <- create_lh_list(vbk=plist$vbk, linf=plist$linf, t0=plist$t0, M=plist$M, lwa=plist$lwa, lwb=plist$lwb, S50=plist$SL50, S95=plist$SL95, M50=plist$ML50, M95=plist$ML95, selex_input="length", maturity_input="length", nseasons=12, binwidth=1, SigmaR=0.777, SigmaF=0.2, selex_type="dome", dome_sd=41)
plist_m_highdome <- create_lh_list(vbk=plist$vbk, linf=plist$linf, t0=plist$t0, M=plist$M, lwa=plist$lwa, lwb=plist$lwb, S50=plist$SL50, S95=plist$SL95, M50=plist$ML50, M95=plist$ML95, selex_input="length", maturity_input="length", nseasons=12, binwidth=1, SigmaR=0.777, SigmaF=0.2, selex_type="dome", dome_sd=22)
par(mfrow=c(1,1), mar=c(5,5,4,4))
plot(plist_m$S_l, type="o", lwd=2, pch=19, cex.axis=2, xlab="Length (cm)", ylab="Proportion", cex.lab=2)
lines(plist_m_lowdome$S_l, type="o", lwd=2, lty=2, col="steelblue", pch=19)
lines(plist_m_highdome$S_l, type="o", lwd=2, lty=3, col="tomato", pch=19)
## low-dome
out <- file.path(res_dir, "LCm_lowdome")
dir.create(out, showWarnings=FALSE)
res <- run_LIME(modpath=out, lh=plist_m, input_data=input_data_m, est_sigma=FALSE, data_avail="LC", itervec=NULL, rewrite=FALSE, simulation=FALSE, f_true=FALSE, C_opt=0, LFdist=1, param_adjust=c("SigmaF","SigmaR","SigmaC","SigmaI"), val_adjust=c(0.2,0.737,0.2,0.2), F_up=10, S_l_input=plist_m_lowdome$S_l, fix_param=FALSE, theta_type=1, randomR=TRUE)
df <- readRDS(file.path(out, "check_convergence.rds"))
Report <- readRDS(file.path(out, "Report.rds"))
Sdreport <- readRDS(file.path(out, "Sdreport.rds"))
Inputs <- readRDS(file.path(out, "Inputs.rds"))
png(file.path(out, "output.png"), width=16, height=10, res=200, units="in")
plot_output(Inputs=Inputs, Report=Report, Sdreport=Sdreport, all_years=input_data_m$years, lc_years=rownames(input_data_m$LF), LBSPR=lbspr_res, lh=plist_m, true_years=years_t)
dev.off()
png(file.path(out, "LCfits.png"), width=16, height=10, res=200, units="in")
plot_LCfits(Inputs=Inputs$Data, Report=Report, true_lc_years=rownames(input_data_m$LF), LBSPR=NULL, ylim=c(0,0.3))
dev.off()
## high-dome
out <- file.path(res_dir, "LCm_highdome")
dir.create(out, showWarnings=FALSE)
res <- run_LIME(modpath=out, lh=plist_m, input_data=input_data_m, est_sigma=FALSE, data_avail="LC", itervec=NULL, rewrite=FALSE, simulation=FALSE, f_true=FALSE, C_opt=0, LFdist=1, param_adjust=c("SigmaF","SigmaR","SigmaC","SigmaI"), val_adjust=c(0.2,0.737,0.2,0.2), F_up=10, S_l_input=plist_m_highdome$S_l, fix_param=FALSE, theta_type=1, randomR=TRUE)
df <- readRDS(file.path(out, "check_convergence.rds"))
Report <- readRDS(file.path(out, "Report.rds"))
Sdreport <- readRDS(file.path(out, "Sdreport.rds"))
Inputs <- readRDS(file.path(out, "Inputs.rds"))
png(file.path(out, "output.png"), width=16, height=10, res=200, units="in")
plot_output(Inputs=Inputs, Report=Report, Sdreport=Sdreport, all_years=input_data_up$years, lc_years=rownames(input_data_up$LF), LBSPR=NULL, lh=plist_m, true_years=years_t)
dev.off()
png(file.path(out, "LCfits.png"), width=16, height=10, res=200, units="in")
plot_LCfits(Inputs=Inputs$Data, Report=Report, true_lc_years=rownames(input_data_up$LF), LBSPR=NULL, ylim=c(0,0.3))
dev.off()
###########################
## likelihood profiles
###########################
## sigma R
out <- file.path(res_dir, "sens_SigmaR")
dir.create(out, showWarnings=FALSE)
sigR_vec <- seq(0.05, 0.95, length=10)
for(i in 1:length(sigR_vec)){
out2 <- file.path(out, i)
dir.create(out2, showWarnings=FALSE)
res <- run_LIME(modpath=out2, lh=plist_m, input_data=input_data_m, est_sigma=FALSE, data_avail="LC", itervec=NULL, rewrite=FALSE, simulation=FALSE, f_true=FALSE, C_opt=0, LFdist=1, param_adjust=c("SigmaF","SigmaR"), val_adjust=c(0.2,sigR_vec[i]), F_up=10, S_l_input=-1, fix_param=FALSE, theta_type=1, randomR=TRUE)
}
rep1 <- readRDS(file.path(res_dir, "LCm", "Report.rds"))
sigR_like <- rep(NA, length(sigR_vec))
col_conv <- rep("black", length(sigR_vec))
for(i in 1:length(sigR_vec)){
rep <- readRDS(file.path(out, i, "Report.rds"))
sdrep <- readRDS(file.path(out, i, "Sdreport.rds"))
sigR_like[i] <- rep$jnll
if(file.exists(file.path(out, i, "high_final_gradient.txt")) | all(is.na(sdrep))) col_conv[i] <- "gray"
}
png(file.path(figs_dir, "sigR_likeprof.png"), height=10, width=14, res=200, units="in")
plot(sigR_vec, sigR_like, pch=19, cex=2, xlab="Fixed SigmaR", ylab="NLL", col=col_conv)
points(rep1$sigma_R, rep1$jnll, pch=19, cex=2, col="blue")
dev.off()
## check fishbase and natural mortality toolfor life history distributions
proper_name <- "Siganus sutor"
genus <- strsplit(proper_name, " ")[[1]][1]
# growth2 <- popgrowth(species_list(Genus=genus))
# saveRDS(growth2, file.path(data_dir, "genus_info.rds"))
## linf
out <- file.path(res_dir, "sens_linf")
dir.create(out, showWarnings=FALSE)
growth2 <- readRDS(file.path(data_dir, "genus_info.rds"))
linf_genus <- growth2$Loo
linf_vec <- seq(min(linf_genus), max(linf_genus), length=10)
for(i in 1:length(linf_vec)){
out2 <- file.path(out, i)
dir.create(out2, showWarnings=FALSE)
res <- run_LIME(modpath=out2, lh=plist_m, input_data=input_data_m, est_sigma=FALSE, data_avail="LC", itervec=NULL, rewrite=TRUE, simulation=FALSE, f_true=FALSE, C_opt=0, LFdist=1, param_adjust=c("SigmaF","SigmaR","SigmaC","SigmaI", "linf"), val_adjust=c(0.2,0.737,0.2,0.2, linf_vec[i]), F_up=10, S_l_input=-1, fix_param=FALSE, theta_type=1, randomR=TRUE)
}
rep1 <- readRDS(file.path(res_dir, "LCm", "Report.rds"))
linf_like <- rep(NA, length(linf_vec))
col_conv <- rep("black", length(linf_vec))
for(i in 1:length(linf_vec)){
rep <- readRDS(file.path(out, i, "Report.rds"))
sdrep <- readRDS(file.path(out, i, "Sdreport.rds"))
linf_like[i] <- rep$jnll
if(file.exists(file.path(out, i, "high_final_gradient.txt")) | all(is.na(sdrep))) col_conv[i] <- "gray"
}
png(file.path(figs_dir, "linf_likeprof.png"), height=10, width=14, res=200, units="in")
plot(linf_vec, linf_like, pch=19, cex=2, xlab="Assumed Linf", ylab="NLL", col=col_conv)
points(plist_m$linf, rep1$jnll, pch=19, cex=2, col="blue")
dev.off()
## vbk
out <- file.path(res_dir, "sens_vbk")
dir.create(out, showWarnings=FALSE)
growth2 <- readRDS(file.path(data_dir, "genus_info.rds"))
vbk_genus <- growth2$K
vbk_vec <- seq(min(vbk_genus), max(vbk_genus), length=10)
for(i in 1:length(vbk_vec)){
out2 <- file.path(out, i)
dir.create(out2, showWarnings=FALSE)
res <- run_LIME(modpath=out2, lh=plist_m, input_data=input_data_m, est_sigma=FALSE, data_avail="LC", itervec=NULL, rewrite=TRUE, simulation=FALSE, f_true=FALSE, C_opt=0, LFdist=1, param_adjust=c("SigmaF","SigmaR","vbk"), val_adjust=c(0.2,0.737,vbk_vec[i]), F_up=10, S_l_input=-1, fix_param=FALSE, theta_type=1, randomR=TRUE)
}
rep1 <- readRDS(file.path(res_dir, "LCm", "Report.rds"))
vbk_like <- rep(NA, length(vbk_vec))
col_conv <- rep("black", length(vbk_vec))
for(i in 1:length(vbk_vec)){
rep <- readRDS(file.path(out, i, "Report.rds"))
sdrep <- readRDS(file.path(out, i, "Sdreport.rds"))
vbk_like[i] <- rep$jnll
if(file.exists(file.path(out, i, "high_final_gradient.txt")) | all(is.na(sdrep))) col_conv[i] <- "gray"
}
png(file.path(figs_dir, "vbk_likeprof.png"), height=10, width=14, res=200, units="in")
plot(vbk_vec, vbk_like, pch=19, cex=2, xlab="Assumed vbk", ylab="NLL", col=col_conv)
points(plist_m$vbk, rep1$jnll, pch=19, cex=2, col="blue")
dev.off()
## M
out <- file.path(res_dir, "sens_M")
dir.create(out, showWarnings=FALSE)
M_tool <- c(1.37, 1.28, 1.37, 1.35, 0.95, 0.53, 1.30, 1.39, 1.88, 1.65)
M_vec <- seq(min(M_tool), max(M_tool), length=10)
for(i in 1:length(M_vec)){
out2 <- file.path(out, i)
dir.create(out2, showWarnings=FALSE)
res <- run_LIME(modpath=out2, lh=plist_m, input_data=input_data_m, est_sigma=FALSE, data_avail="LC", itervec=NULL, rewrite=TRUE, simulation=FALSE, f_true=FALSE, C_opt=0, LFdist=1, param_adjust=c("SigmaF","SigmaR","SigmaC","SigmaI", "M"), val_adjust=c(0.2,0.737,0.2,0.2, M_vec[i]), F_up=10, S_l_input=-1, fix_param=FALSE, theta_type=1, randomR=TRUE)
}
rep1 <- readRDS(file.path(res_dir, "LCm", "Report.rds"))
M_like <- rep(NA, length(M_vec))
col_conv <- rep("black", length(M_vec))
for(i in 1:length(M_vec)){
inp <- readRDS(file.path(out, i, "Inputs.rds"))
rep <- readRDS(file.path(out, i, "Report.rds"))
sdrep <- readRDS(file.path(out, i, "Sdreport.rds"))
M_like[i] <- rep$jnll
if(file.exists(file.path(out, i, "high_final_gradient.txt")) | all(is.na(sdrep))) col_conv[i] <- "gray"
}
png(file.path(figs_dir, "M_likeprof.png"), height=10, width=14, res=200, units="in")
plot(M_vec, M_like, pch=19, cex=2, xlab="Assumed M", ylab="NLL", col=col_conv)
points(plist_m$M*12, rep1$jnll, pch=19, cex=2, col="blue")
dev.off()
png(file.path(figs_dir, "like_prof_compare.png"), height=10, width=15, units="in", res=200)
mat <- matrix(c(1,2,3,4,
1,2,3,4,
5,6,7,8,
5,6,7,8,
9,9,9,9,
10,11,12,13,
10,11,12,13), nrow=7, ncol=4, byrow=TRUE)
nf <- layout(mat)
layout.show(nf)
par(mar=c(0,4,0,0), omi=c(0.5,0.5,0.5,0.5))
rep1 <- readRDS(file.path(res_dir, "LCm", "Report.rds"))
growth2 <- readRDS(file.path(data_dir, "genus_info.rds"))
## using input AgeMax=4, Linf=36.2, vbk=0.87, Age at maturity(years)=1
M_tool <- c(1.37, 1.28, 1.37, 1.35, 0.95, 0.53, 1.30, 1.39, 1.88, 1.65)
M_toUse <- median(M_tool)
M_h <- hist(M_tool, plot=FALSE)
M_x <- seq(min(M_tool), max(M_tool), length=60)
M_y <- dnorm(M_x, mean=mean(M_tool), sd=sd(M_tool))
M_y2 <- (M_y*diff(M_h$mids[1:2])*length(M_tool))/sum((M_y*diff(M_h$mids[1:2])*length(M_tool)))
plot(M_x, M_y2, col="black", lwd=2, type="l", xaxt="n", xlab="", ylab="", xaxs="i", yaxs="i", cex.axis=2)
rug(M_tool, ticksize=0.15, lwd=2)
# M_d <- rtnorm(1, mean=mean(M_tool), sd=sd(M_tool), lower=min(M_tool), upper=max(M_tool))
# abline(v=M_d, lty=2, col="red", lwd=2)
abline(v=plist$M, lwd=2, col="blue")
mtext(side=3, "M", cex=2.5, line=1)
mtext("Probability", side=2, line=3, cex=2)
print.letter("(a)", xy=c(0.9,0.95), cex=2)
linf_genus <- growth2$Loo
linf_h <- hist(linf_genus, plot=FALSE)
linf_x <- seq(min(linf_genus), max(linf_genus), length=60)
linf_y <- dnorm(linf_x, mean=mean(linf_genus), sd=sd(linf_genus))
linf_y2 <- linf_y*diff(linf_h$mids[1:2])*length(linf_genus)/sum(linf_y*diff(linf_h$mids[1:2])*length(linf_genus))
plot(linf_x, linf_y2, col="black", lwd=2, type="l", xaxt="n",xlab="", ylab="", xaxs="i", yaxs="i", cex.axis=2)
rug(linf_genus, ticksize=0.15, lwd=2)
# linf_d <- rtnorm(1, mean=mean(linf_genus), sd=sd(linf_genus), lower=min(linf_genus), upper=max(linf_genus))
# abline(v=linf_d, lty=2, col="red", lwd=2)
abline(v=plist$linf, lwd=2, col="blue")
mtext(side=3, "Linf", cex=2.5, line=1)
print.letter("(b)", xy=c(0.9,0.95), cex=2)
vbk_genus <- growth2$K
vbk_h <- hist(vbk_genus, plot=FALSE)
vbk_x <- seq(min(vbk_genus), max(vbk_genus), length=60)
vbk_y <- dnorm(vbk_x, mean=mean(vbk_genus), sd=sd(vbk_genus))
vbk_y2 <- vbk_y*diff(vbk_h$mids[1:2])*length(vbk_genus)/sum(vbk_y*diff(vbk_h$mids[1:2])*length(vbk_genus))
plot(vbk_x, vbk_y2, col="black", lwd=2, type="l", xaxt="n",xlab="", ylab="", xaxs="i", yaxs="i", cex.axis=2)
rug(vbk_genus, ticksize=0.15, lwd=2)
# vbk_d <- rtnorm(1, mean=mean(vbk_genus), sd=sd(vbk_genus), lower=min(vbk_genus), upper=max(vbk_genus))
# abline(v=vbk_d, lty=2, col="red", lwd=2)
abline(v=plist$vbk, lwd=2, col="blue")
mtext(side=3, "k", cex=2.5, line=1)
print.letter("(c)", xy=c(0.9,0.95), cex=2)
sigR_x <- seq(0, 1.5, length=60)
sigR_y <- dnorm(sigR_x, mean=mean(0.737), sd=0.353)/sum(dnorm(sigR_x, mean=mean(0.737), sd=0.353))
plot(sigR_x, sigR_y, col="black", lwd=2, type="l", xaxt="n",xlab="", ylab="", xaxs="i", yaxs="i", cex.axis=2)
# sigR_d <- rtnorm(1, mean=mean(sigR_genus), sd=sd(sigR_genus), lower=min(sigR_genus), upper=max(sigR_genus))
# abline(v=sigR_d, lty=2, col="red", lwd=2)
abline(v=0.737, lwd=2, col="blue")
mtext(side=3, "SigmaR", cex=2.5, line=1)
print.letter("(d)", xy=c(0.9,0.95), cex=2)
out <- file.path(res_dir, "sens_M")
dir.create(out, showWarnings=FALSE)
M_vec <- seq(min(M_tool), max(M_tool), length=10)
M_like <- rep(NA, length(M_vec))
col_conv <- rep("black", length(M_vec))
for(i in 1:length(M_vec)){
inp <- readRDS(file.path(out, i, "Inputs.rds"))
rep <- readRDS(file.path(out, i, "Report.rds"))
sdrep <- readRDS(file.path(out, i, "Sdreport.rds"))
M_like[i] <- rep$jnll
if(file.exists(file.path(out, i, "high_final_gradient.txt")) | all(is.na(sdrep))) col_conv[i] <- "gray"
}
i1 <- 1
i2 <- 8
col_conv[i1] <- "orange"
col_conv[i2] <- "orangered"
plot(x=M_vec, y=M_like, pch=19, cex=3, col=col_conv, xpd=NA, cex.axis=2, xlab="", ylab="", yaxt="n")
axis(2, cex.axis=2, at=pretty(c(min(M_like),max(M_like)))[-length(pretty(c(min(M_like),max(M_like))))])
points(plist_m$M*12, rep1$jnll, pch=19, cex=3, col="blue")
mtext(side=2, "NLL", cex=2, line=3)
print.letter("(e)", xy=c(0.9,0.95), cex=2)
### linf
out <- file.path(res_dir, "sens_linf")
dir.create(out, showWarnings=FALSE)
linf_genus <- growth2$Loo
linf_vec <- seq(min(linf_genus), max(linf_genus), length=10)
linf_like <- rep(NA, length(linf_vec))
col_conv_linf <- rep("black", length(linf_vec))
for(i in 1:length(linf_vec)){
rep <- readRDS(file.path(out, i, "Report.rds"))
sdrep <- readRDS(file.path(out, i, "Sdreport.rds"))
linf_like[i] <- rep$jnll
if(file.exists(file.path(out, i, "high_final_gradient.txt")) | all(is.na(sdrep))) col_conv_linf[i] <- "gray"
}
i1 <- which(col_conv_linf=="black")[1]
i2 <- 8
col_conv_linf[i1] <- "orange"
col_conv_linf[i2] <- "orangered"
plot(x=linf_vec, y=linf_like, pch=19, cex=3, col=col_conv_linf, xpd=NA, cex.axis=2, xlab="", ylab="", yaxt="n")
axis(2, cex.axis=2, at=pretty(c(min(linf_like),max(linf_like)))[-length(pretty(c(min(linf_like),max(linf_like))))])
points(plist_m$linf, rep1$jnll, pch=19, cex=3, col="blue")
print.letter("(f)", xy=c(0.9,0.95), cex=2)
## vbk
out <- file.path(res_dir, "sens_vbk")
dir.create(out, showWarnings=FALSE)
vbk_genus <- growth2$K
vbk_vec <- seq(min(vbk_genus), max(vbk_genus), length=10)
vbk_like <- rep(NA, length(vbk_vec))
col_conv_vbk <- rep("black", length(vbk_vec))
for(i in 1:length(vbk_vec)){
rep <- readRDS(file.path(out, i, "Report.rds"))
sdrep <- readRDS(file.path(out, i, "Sdreport.rds"))
vbk_like[i] <- rep$jnll
if(file.exists(file.path(out, i, "high_final_gradient.txt")) | all(is.na(sdrep))) col_conv_vbk[i] <- "gray"
}
i1 <- 2
i2 <- 5
col_conv_vbk[i1] <- "orange"
col_conv_vbk[i2] <- "orangered"
plot(x=vbk_vec, y=vbk_like, pch=19, cex=3, col=col_conv_vbk, xpd=NA, cex.axis=2, xlab="", ylab="", yaxt="n")
axis(2, cex.axis=2, at=pretty(c(min(vbk_like),max(vbk_like)))[-length(pretty(c(min(vbk_like),max(vbk_like))))])
points(plist_m$vbk, rep1$jnll, pch=19, cex=3, col="blue")
print.letter("(g)", xy=c(0.9,0.95), cex=2)
## sigma R
out <- file.path(res_dir, "sens_SigmaR")
dir.create(out, showWarnings=FALSE)
sigR_vec <- seq(0.05, 0.95, length=10)
sigR_like <- rep(NA, length(sigR_vec))
col_conv_sigR <- rep("black", length(sigR_vec))
for(i in 1:length(sigR_vec)){
rep <- readRDS(file.path(out, i, "Report.rds"))
sdrep <- readRDS(file.path(out, i, "Sdreport.rds"))
sigR_like[i] <- rep$jnll
if(file.exists(file.path(out, i, "high_final_gradient.txt")) | all(is.na(sdrep))) col_conv_sigR[i] <- "gray"
}
i1 <- 4
i2 <- 7
col_conv_sigR[i1] <- "orange"
col_conv_sigR[i2] <- "orangered"
plot(x=sigR_vec, y=sigR_like, pch=19, cex=3, col=col_conv_sigR, xpd=NA, cex.axis=2, xlab="", ylab="", xlim=c(min(sigR_x), max(sigR_x)), yaxt="n")
axis(2, cex.axis=2, at=pretty(c(min(sigR_like),max(sigR_like)))[-length(pretty(c(min(sigR_like),max(sigR_like))))])
points(rep1$sigma_R, rep1$jnll, pch=19, cex=3, col="blue")
print.letter("(h)", xy=c(0.9,0.95), cex=2)
plot(x=1,y=1,type="n", axes=F, ann=F)
### SPR
out <- file.path(res_dir, "sens_M")
i1 <- 1
i2 <- 8
replow <- readRDS(file.path(out, i1, "Report.rds"))
rephigh <- readRDS(file.path(out, i2, "Report.rds"))
plot(x=years_t, y=unique(rep1$SPR_t), col="blue", lwd=2, type="l", ylim=c(0,1.1), xlab="", ylab="", cex.axis=2)
lines(x=years_t, y=unique(replow$SPR_t), col="orange", lwd=2)
lines(x=years_t, y=unique(rephigh$SPR_t), col="orangered", lwd=2)
abline(h=0.4, lty=2)
mtext(side=2, "SPR", line=3, cex=2)
mtext(side=1, "Year", line=3, cex=2)
print.letter("(i)", xy=c(0.9,0.95), cex=2)
out <- file.path(res_dir, "sens_linf")
i1 <- which(col_conv_linf=="black")[1]
i2 <- 8
replow <- readRDS(file.path(out, i1, "Report.rds"))
rephigh <- readRDS(file.path(out, i2, "Report.rds"))
plot(x=years_t, y=unique(rep1$SPR_t), col="blue", lwd=2, type="l", ylim=c(0,1.1), xlab="", ylab="", cex.axis=2)
lines(x=years_t, y=unique(replow$SPR_t), col="orange", lwd=2)
lines(x=years_t, y=unique(rephigh$SPR_t), col="orangered", lwd=2)
abline(h=0.4, lty=2)
mtext(side=1, "Year", line=3, cex=2)
print.letter("(j)", xy=c(0.9,0.95), cex=2)
out <- file.path(res_dir, "sens_vbk")
i1 <- 2
i2 <- 5
replow <- readRDS(file.path(out, i1, "Report.rds"))
rephigh <- readRDS(file.path(out, i2, "Report.rds"))
plot(x=years_t, y=unique(rep1$SPR_t), col="blue", lwd=2, type="l", ylim=c(0,1.1), xlab="", ylab="", cex.axis=2)
lines(x=years_t, y=unique(replow$SPR_t), col="orange", lwd=2)
lines(x=years_t, y=unique(rephigh$SPR_t), col="orangered", lwd=2)
abline(h=0.4, lty=2)
mtext(side=1, "Year", line=3, cex=2)
print.letter("(k)", xy=c(0.9,0.95), cex=2)
out <- file.path(res_dir, "sens_SigmaR")
i1 <- 4
i2 <- 7
replow <- readRDS(file.path(out, i1, "Report.rds"))
rephigh <- readRDS(file.path(out, i2, "Report.rds"))
plot(x=years_t, y=unique(rep1$SPR_t), col="blue", lwd=2, type="l", ylim=c(0,1.1), xlab="", ylab="", cex.axis=2)
lines(x=years_t, y=unique(replow$SPR_t), col="orange", lwd=2)
lines(x=years_t, y=unique(rephigh$SPR_t), col="orangered", lwd=2)
abline(h=0.4, lty=2)
mtext(side=1, "Year", line=3, cex=2)
print.letter("(l)", xy=c(0.9,0.95), cex=2)
dev.off()
### model fits figure
out <- file.path(res_dir, "LCm")
dir.create(out, showWarnings=FALSE)
df <- readRDS(file.path(out, "check_convergence.rds"))
Report <- readRDS(file.path(out, "Report.rds"))
Sdreport <- readRDS(file.path(out, "Sdreport.rds"))
Inputs <- readRDS(file.path(out, "Inputs.rds"))
base_rep <- Report
out <- file.path(res_dir, "LBSPR_LCy")
dir.create(out, showWarnings=FALSE)
lbspr_res <- readRDS(file.path(out, "LBSPR_results.rds"))
obs1 <- input_data_m$LF
all_mos <- 1:(length(years_t)*12)
mo_yrs <- as.vector(sapply(1:length(years_t), function(x) rep(years_t[x], 12)))
choose_mo <- rep(NA, length(years_o))
for(i in 1:length(years_o)){
sub_mo <- months_o[which(months_o %in% all_mos[which(mo_yrs==years_o[i])])]
choose_mo[i] <- sub_mo[1]
}
obs2 <- input_data_y$LF
png(file.path(figs_dir, "CompareModelFits.png"), height=15, width=8, res=200, units="in")
par(mfcol=c(length(years_o),2), mar=c(0,0,0,0), omi=c(1,1,1,1))
for(i in 1:length(years_o)){
barplot(obs1[which(rownames(obs1)==choose_mo[i]),]/sum(obs1[which(rownames(obs1)==choose_mo[i]),]), xaxs="i", yaxs="i", xaxt="n", yaxt="n", ylim=c(0,0.2), col=gray(0.6), border=NA, space=0)
lines(base_rep$plb[choose_mo[i],], lwd=4, col="blue")
if(i==length(years_o)) axis(1, cex.axis=2)
axis(2, cex.axis=2, at=c(0,0.15), las=2)
abline(v=plist$ML50, lty=2)
box()
}
for(i in 1:length(years_o)){
barplot(obs2[which(rownames(obs2)==years_o[i]),]/sum(obs2[which(rownames(obs2)==years_o[i]),]), xaxs="i", yaxs="i", xaxt="n", yaxt="n", ylim=c(0,0.2), col=gray(0.6), border=NA, space=0)
lines(lbspr_res$pLF[,i], lwd=4, col="red")
if(i==length(years_o)) axis(1, cex.axis=2)
print.letter(years_o[i], xy=c(0.8,0.8), cex=3)
abline(v=plist$ML50, lty=2)
box()
}
mtext(side=1, "Length bin (cm)", cex=2, line=4, outer=TRUE)
mtext(side=2, "Proportions", cex=2, line=5, outer=TRUE)
dev.off()
# dev.new()
all_years <- months_t
lc_years <- months_o
true_years <- years_t
png(file.path(fig_dir, "BaseModelOutput.png"), height=10, width=18, units="in", res=200)
plot_output(all_years=months_t, lc_years=months_o, true_years=years_t, Report=Report, Inputs=Inputs, Sdreport=Sdreport, LBSPR=LBSPR, lh=plist_m)
dev.off()
png(file.path(fig_dir, "BaseModelLCfits.png"), width=16, height=14, res=200, units="in")
plot_LCfits(Inputs=Inputs$Data, Report=Report, true_lc_years=NULL, LBSPR=NULL, ylim=c(0,0.3))
dev.off()
## compare models
png(file.path(fig_dir, "CompareModels.png"), height=8, width=15, res=200, units="in")
dir_list <- list()
dir_list[[1]] <- file.path(res_dir, "LCm_highN")
dir_list[[2]] <- file.path(res_dir, "LCm_baskettrap")
dir_list[[3]] <- file.path(res_dir, "LCm")
rep_list <- sdrep_list <- inp_list <- list()
for(i in 1:length(dir_list)){
rep_list[[i]] <- readRDS(file.path(dir_list[[i]], "Report.rds"))
sdrep_list[[i]] <- readRDS(file.path(dir_list[[i]], "Sdreport.rds"))
inp_list[[i]] <- readRDS(file.path(dir_list[[i]], "Inputs.rds"))
}
col_list <- list()
col_list[[1]] <- col2hex("orange")
col_list[[2]] <- col2hex("orangered")
col_list[[3]] <- "#0000AA"
by <- round(length(true_years)/5)
lab <- rev(seq(from=true_years[length(true_years)], to=min(true_years), by=-by))
ilab <- which(true_years %in% lab)
ilab2 <- sapply(1:length(ilab), function(x){
sub <- which(Inputs$Data$S_yrs %in% ilab[x])
return(sub[length(sub)])
})
par(mfrow=c(1,3), mar=c(4,5,1,1), omi=c(0.2,0.2,0.2,0.2))
plot(x=1,y=1,type="n", xlim=c(1,length(true_years)), ylim=c(0,3.5), xaxs="i", yaxs="i", cex.axis=2, xlab="Year", ylab="Fishing mortality", cex.lab=2, xaxt="n")
for(i in 1:length(dir_list)){
Inputs <- inp_list[[i]]
Report <- rep_list[[i]]
Sdreport <- sdrep_list[[i]]
if(Inputs$Data$n_s==1){
xY <- seq_along(all_years)
xLC <- which(all_years %in% lc_years)
}
if(Inputs$Data$n_s>1){
xY <- 1:Inputs$Data$n_y
xLC <- unique(Inputs$Data$S_yrs[which(all_years %in% lc_years)])
}
F40 <- tryCatch(uniroot(calc_ref, lower=0, upper=200, ages=plist_m$ages, Mat_a=Report$Mat_a, W_a=Report$W_a, M=Report$M, S_a=Report$S_a, ref=0.4)$root, error=function(e) NA)
F30 <- tryCatch(uniroot(calc_ref, lower=0, upper=200, ages=plist_m$ages, Mat_a=Report$Mat_a, W_a=Report$W_a, M=Report$M, S_a=Report$S_a, ref=0.3)$root, error=function(e) NA)
lines(x=xY, y=Report$F_y, lwd=2, col=col_list[[i]])
points(x=xLC, y=Report$F_y[xLC], col=col_list[[i]], pch=19, xpd=NA)
if(all(is.na(Sdreport))==FALSE){
sd <- summary(Sdreport)[which(rownames(summary(Sdreport))=="lF_y"),]
sd[,2][which(is.na(sd[,2]))] <- 0
polygon( y=read_sdreport(sd, log=TRUE), x=c(which(is.na(sd[,2])==FALSE), rev(which(is.na(sd[,2])==FALSE))), col=paste0(col_list[[i]],"35"), border=NA)
}
if(i==1) axis(1, cex.axis=2, at=ilab, labels=lab)
if(i==length(dir_list)){
abline(h=F40*Inputs$Data$n_s, lwd=2, lty=2)
abline(h=F30*Inputs$Data$n_s, lwd=2, lty=3)
}
if(i==1) legend("bottomleft", legend=c("All data", "Basket trap only", "High sampling months"), col=rev(unlist(col_list)), lty=1, lwd=4, cex=2, bty="n")
}
plot(x=1,y=1,type="n", xlim=c(1,length(all_years)), ylim=c(0,2.5), xaxs="i", yaxs="i", cex.axis=2, xlab="Year", ylab="Recruitment", cex.lab=2, xaxt="n")
for(i in 1:length(dir_list)){
Inputs <- inp_list[[i]]
Report <- rep_list[[i]]
Sdreport <- sdrep_list[[i]]
lines(x=seq_along(all_years), y=Report$R_t, lwd=2, col=col_list[[i]])
points(x=which(all_years %in% lc_years), y=Report$R_t[which(all_years %in% lc_years)], col=col_list[[i]], pch=19, xpd=NA)
if(all(is.na(Sdreport))==FALSE){
sd <- summary(Sdreport)[which(rownames(summary(Sdreport))=="lR_t"),]
sd[,2][which(is.na(sd[,2]))] <- 0
polygon( y=read_sdreport(sd, log=TRUE), x=c(which(is.na(sd[,2])==FALSE), rev(which(is.na(sd[,2])==FALSE))), col=paste0(col_list[[i]],"35"), border=NA)
}
if(i==1) axis(1, cex.axis=2, at=ilab2, labels=lab)
}
plot(x=1,y=1,type="n", xlim=c(1,length(all_years)), ylim=c(0,0.6), xaxs="i", yaxs="i", cex.axis=2, xlab="Year", ylab="SPR", cex.lab=2, xaxt="n")
for(i in 1:length(dir_list)){
Inputs <- inp_list[[i]]
Report <- rep_list[[i]]
Sdreport <- sdrep_list[[i]]
lines(x=seq_along(all_years), y=Report$SPR_t, lwd=2, col=col_list[[i]])
points(x=which(all_years %in% lc_years), y=Report$SPR_t[which(all_years%in%lc_years)], col=col_list[[i]], pch=19, xpd=NA)
if(all(is.na(Sdreport))==FALSE){
sd <- summary(Sdreport)[which(rownames(summary(Sdreport))=="SPR_t"),]
sd[,2][which(is.na(sd[,2]))] <- 0
polygon( y=read_sdreport(sd, log=FALSE), x=c(which(is.na(sd[,2])==FALSE), rev(which(is.na(sd[,2])==FALSE))), col=paste0(col_list[[i]],"35"), border=NA)
}
if(i==length(dir_list)){
abline(h=0.4, lwd=2, lty=2)
abline(h=0.3, lwd=2, lty=3)
}
if(i==1) axis(1, cex.axis=2, at=ilab2, labels=lab)
}
dev.off()
## compare selectivity models
png(file.path(fig_dir, "CompareSelex.png"), height=8, width=15, res=200, units="in")
dir_list <- list()
dir_list[[1]] <- file.path(res_dir, "LCm_highdome")
dir_list[[2]] <- file.path(res_dir, "LCm_lowdome")
dir_list[[3]] <- file.path(res_dir, "LCm")
rep_list <- sdrep_list <- inp_list <- list()
for(i in 1:length(dir_list)){
rep_list[[i]] <- readRDS(file.path(dir_list[[i]], "Report.rds"))
sdrep_list[[i]] <- readRDS(file.path(dir_list[[i]], "Sdreport.rds"))
inp_list[[i]] <- readRDS(file.path(dir_list[[i]], "Inputs.rds"))
}
col_list <- list()
col_list[[1]] <- col2hex("orange")
col_list[[2]] <- col2hex("orangered")
col_list[[3]] <- "#0000AA"
par(mfrow=c(2,2), mar=c(4,5,1,1), omi=c(0.2,0.2,0.2,0.2))
plot(x=1,y=1,type="n", xlim=c(1, length(plist_m$S_l)), ylim=c(0,1.1), xaxs="i", yaxs="i", cex.axis=2, xlab="Length bin (cm)", ylab="Selectivity", cex.lab=2, xaxt="n", yaxt="n")
axis(2, cex.axis=2, at=seq(0,1,by=0.25))
for(i in 1:length(dir_list)){
Inputs <- inp_list[[i]]
Report <- rep_list[[i]]
Sdreport <- sdrep_list[[i]]
lines(x=1:length(Report$S_l), y=Report$S_l, lwd=4, col=col_list[[i]])
if(all(is.na(Sdreport))==FALSE){
sd <- summary(Sdreport)[which(rownames(summary(Sdreport))=="S_l"),]
sd[,2][which(is.na(sd[,2]))] <- 0
polygon( y=read_sdreport(sd, log=FALSE), x=c(which(is.na(sd[,2])==FALSE), rev(which(is.na(sd[,2])==FALSE))), col=paste0(col_list[[i]],"35"), border=NA)
}
if(i==1) axis(1, cex.axis=2, at=pretty(c(min(plist_m$highs),max(plist_m$highs))))
}
plot(x=1,y=1,type="n", xlim=c(1,length(true_years)), ylim=c(0,3.5), xaxs="i", yaxs="i", cex.axis=2, xlab="Year", ylab="Fishing mortality", cex.lab=2, xaxt="n")
for(i in 1:length(dir_list)){
Inputs <- inp_list[[i]]
Report <- rep_list[[i]]
Sdreport <- sdrep_list[[i]]
if(Inputs$Data$n_s==1){
xY <- seq_along(all_years)
xLC <- which(all_years %in% lc_years)
}
if(Inputs$Data$n_s>1){
xY <- 1:Inputs$Data$n_y
xLC <- unique(Inputs$Data$S_yrs[which(all_years %in% lc_years)])
}
F40 <- tryCatch(uniroot(calc_ref, lower=0, upper=200, ages=plist_m$ages, Mat_a=Report$Mat_a, W_a=Report$W_a, M=Report$M, S_a=Report$S_a, ref=0.4)$root, error=function(e) NA)
F30 <- tryCatch(uniroot(calc_ref, lower=0, upper=200, ages=plist_m$ages, Mat_a=Report$Mat_a, W_a=Report$W_a, M=Report$M, S_a=Report$S_a, ref=0.3)$root, error=function(e) NA)
lines(x=xY, y=Report$F_y, lwd=2, col=col_list[[i]])
points(x=xLC, y=Report$F_y[xLC], col=col_list[[i]], pch=19, xpd=NA)
if(all(is.na(Sdreport))==FALSE){
sd <- summary(Sdreport)[which(rownames(summary(Sdreport))=="lF_y"),]
sd[,2][which(is.na(sd[,2]))] <- 0
polygon( y=read_sdreport(sd, log=TRUE), x=c(which(is.na(sd[,2])==FALSE), rev(which(is.na(sd[,2])==FALSE))), col=paste0(col_list[[i]],"35"), border=NA)
}
if(i==1) axis(1, cex.axis=2, at=ilab, labels=lab)
if(i==length(dir_list)){
abline(h=F40*Inputs$Data$n_s, lwd=2, lty=2)
abline(h=F30*Inputs$Data$n_s, lwd=2, lty=3)
}
if(i==1) legend("topright", legend=c("Logistic", "Low dome", "High dome"), col=rev(unlist(col_list)), lty=1, lwd=4, cex=2, bty="n")
}
plot(x=1,y=1,type="n", xlim=c(1,length(all_years)), ylim=c(0,2.5), xaxs="i", yaxs="i", cex.axis=2, xlab="Year", ylab="Recruitment", cex.lab=2, xaxt="n")
for(i in 1:length(dir_list)){
Inputs <- inp_list[[i]]
Report <- rep_list[[i]]
Sdreport <- sdrep_list[[i]]
lines(x=seq_along(all_years), y=Report$R_t, lwd=2, col=col_list[[i]])
points(x=which(all_years %in% lc_years), y=Report$R_t[which(all_years %in% lc_years)], col=col_list[[i]], pch=19, xpd=NA)
if(all(is.na(Sdreport))==FALSE){
sd <- summary(Sdreport)[which(rownames(summary(Sdreport))=="lR_t"),]
sd[,2][which(is.na(sd[,2]))] <- 0
polygon( y=read_sdreport(sd, log=TRUE), x=c(which(is.na(sd[,2])==FALSE), rev(which(is.na(sd[,2])==FALSE))), col=paste0(col_list[[i]],"35"), border=NA)
}
if(i==1) axis(1, cex.axis=2, at=ilab2, labels=lab)
}
plot(x=1,y=1,type="n", xlim=c(1,length(all_years)), ylim=c(0,0.6), xaxs="i", yaxs="i", cex.axis=2, xlab="Year", ylab="SPR", cex.lab=2, xaxt="n")
for(i in 1:length(dir_list)){
Inputs <- inp_list[[i]]
Report <- rep_list[[i]]
Sdreport <- sdrep_list[[i]]
lines(x=seq_along(all_years), y=Report$SPR_t, lwd=2, col=col_list[[i]])
points(x=which(all_years %in% lc_years), y=Report$SPR_t[which(all_years%in%lc_years)], col=col_list[[i]], pch=19, xpd=NA)
if(all(is.na(Sdreport))==FALSE){
sd <- summary(Sdreport)[which(rownames(summary(Sdreport))=="SPR_t"),]
sd[,2][which(is.na(sd[,2]))] <- 0
polygon( y=read_sdreport(sd, log=FALSE), x=c(which(is.na(sd[,2])==FALSE), rev(which(is.na(sd[,2])==FALSE))), col=paste0(col_list[[i]],"35"), border=NA)
}
if(i==length(dir_list)){
abline(h=0.4, lwd=2, lty=2)
abline(h=0.3, lwd=2, lty=3)
}
if(i==1) axis(1, cex.axis=2, at=ilab2, labels=lab)
}
dev.off()
## sensitivity to biological inputs
png(file.path(fig_dir, "SensitivityBioInputs.png"), height=8, width=15, res=200, units="in")
dir_list <- list()
dir_list[[1]] <- file.path(res_dir, "sens_linf",1)
dir_list[[2]] <- file.path(res_dir, "LCm")
dir_list[[3]] <- file.path(res_dir, "sens_linf",10)
rep_list <- sdrep_list <- inp_list <- list()
for(i in 1:length(dir_list)){
rep_list[[i]] <- readRDS(file.path(dir_list[[i]], "Report.rds"))
sdrep_list[[i]] <- readRDS(file.path(dir_list[[i]], "Sdreport.rds"))
inp_list[[i]] <- readRDS(file.path(dir_list[[i]], "Inputs.rds"))
}
col_list <- list()
col_list[[1]] <- col2hex("orange")
col_list[[2]] <- "#0000AA"
col_list[[3]] <- col2hex("orangered")
by <- round(length(true_years)/5)
lab <- rev(seq(from=true_years[length(true_years)], to=min(true_years), by=-by))
ilab <- which(true_years %in% lab)
ilab2 <- sapply(1:length(ilab), function(x){
sub <- which(Inputs$Data$S_yrs %in% ilab[x])
return(sub[length(sub)])
})
par(mfrow=c(2,2), mar=c(0,0,0,0), omi=c(1,1,1,1))
plot(x=1,y=1,type="n", xlim=c(1,length(all_years)), ylim=c(0,1.2), xaxs="i", yaxs="i", cex.axis=2, xlab="", ylab="", cex.lab=2, xaxt="n", yaxt="n")
for(i in 1:length(dir_list)){
Inputs <- inp_list[[i]]
Report <- rep_list[[i]]
Sdreport <- sdrep_list[[i]]
lines(x=seq_along(all_years), y=Report$SPR_t, lwd=2, col=col_list[[i]])
points(x=which(all_years %in% lc_years), y=Report$SPR_t[which(all_years%in%lc_years)], col=col_list[[i]], pch=19, xpd=NA)
if(all(is.na(Sdreport))==FALSE){
sd <- summary(Sdreport)[which(rownames(summary(Sdreport))=="SPR_t"),]
sd[,2][which(is.na(sd[,2]))] <- 0
polygon( y=read_sdreport(sd, log=FALSE), x=c(which(is.na(sd[,2])==FALSE), rev(which(is.na(sd[,2])==FALSE))), col=paste0(col_list[[i]],"35"), border=NA)
}
if(i==length(dir_list)){
abline(h=0.4, lwd=2, lty=2)
abline(h=0.3, lwd=2, lty=3)
}
# if(i==1) axis(1, cex.axis=2, at=ilab2, labels=lab)
}
print.letter("Asymptotic length", xy=c(0.25,0.9), cex=2, font=2)
axis(2, seq(0,1,by=0.5), cex.axis=2)
dir_list <- list()
dir_list[[1]] <- file.path(res_dir, "sens_vbk",1)
dir_list[[2]] <- file.path(res_dir, "LCm")
dir_list[[3]] <- file.path(res_dir, "sens_vbk",10)
rep_list <- sdrep_list <- inp_list <- list()
for(i in 1:length(dir_list)){
rep_list[[i]] <- readRDS(file.path(dir_list[[i]], "Report.rds"))
sdrep_list[[i]] <- readRDS(file.path(dir_list[[i]], "Sdreport.rds"))
inp_list[[i]] <- readRDS(file.path(dir_list[[i]], "Inputs.rds"))
}
plot(x=1,y=1,type="n", xlim=c(1,length(all_years)), ylim=c(0,1.2), xaxs="i", yaxs="i", cex.axis=2, xlab="", ylab="", cex.lab=2, xaxt="n", yaxt="n")
for(i in 1:length(dir_list)){
Inputs <- inp_list[[i]]
Report <- rep_list[[i]]
Sdreport <- sdrep_list[[i]]
lines(x=seq_along(all_years), y=Report$SPR_t, lwd=2, col=col_list[[i]])
points(x=which(all_years %in% lc_years), y=Report$SPR_t[which(all_years%in%lc_years)], col=col_list[[i]], pch=19, xpd=NA)
if(all(is.na(Sdreport))==FALSE){
sd <- summary(Sdreport)[which(rownames(summary(Sdreport))=="SPR_t"),]
sd[,2][which(is.na(sd[,2]))] <- 0
polygon( y=read_sdreport(sd, log=FALSE), x=c(which(is.na(sd[,2])==FALSE), rev(which(is.na(sd[,2])==FALSE))), col=paste0(col_list[[i]],"35"), border=NA)
}
if(i==length(dir_list)){
abline(h=0.4, lwd=2, lty=2)
abline(h=0.3, lwd=2, lty=3)
}
# if(i==1) axis(1, cex.axis=2, at=ilab2, labels=lab)
}
print.letter("von Bertalanffy k", xy=c(0.25,0.9), cex=2, font=2)
dir_list <- list()
dir_list[[1]] <- file.path(res_dir, "sens_M",1)
dir_list[[2]] <- file.path(res_dir, "LCm")
dir_list[[3]] <- file.path(res_dir, "sens_M",10)
rep_list <- sdrep_list <- inp_list <- list()
for(i in 1:length(dir_list)){
rep_list[[i]] <- readRDS(file.path(dir_list[[i]], "Report.rds"))
sdrep_list[[i]] <- readRDS(file.path(dir_list[[i]], "Sdreport.rds"))
inp_list[[i]] <- readRDS(file.path(dir_list[[i]], "Inputs.rds"))
}
plot(x=1,y=1,type="n", xlim=c(1,length(all_years)), ylim=c(0,1.2), xaxs="i", yaxs="i", cex.axis=2, xlab="", ylab="", cex.lab=2, xaxt="n", yaxt="n")
for(i in 1:length(dir_list)){
Inputs <- inp_list[[i]]
Report <- rep_list[[i]]
Sdreport <- sdrep_list[[i]]
lines(x=seq_along(all_years), y=Report$SPR_t, lwd=2, col=col_list[[i]])
points(x=which(all_years %in% lc_years), y=Report$SPR_t[which(all_years%in%lc_years)], col=col_list[[i]], pch=19, xpd=NA)
if(all(is.na(Sdreport))==FALSE){
sd <- summary(Sdreport)[which(rownames(summary(Sdreport))=="SPR_t"),]
sd[,2][which(is.na(sd[,2]))] <- 0
polygon( y=read_sdreport(sd, log=FALSE), x=c(which(is.na(sd[,2])==FALSE), rev(which(is.na(sd[,2])==FALSE))), col=paste0(col_list[[i]],"35"), border=NA)
}
if(i==length(dir_list)){
abline(h=0.4, lwd=2, lty=2)
abline(h=0.3, lwd=2, lty=3)
}
if(i==1) axis(1, cex.axis=2, at=ilab2, labels=lab)
}
print.letter("Natural mortality", xy=c(0.25,0.9), cex=2, font=2)
axis(2, seq(0,1,by=0.5), cex.axis=2)
dir_list <- list()
dir_list[[1]] <- file.path(res_dir, "sens_ML50",1)
dir_list[[2]] <- file.path(res_dir, "LCm")
dir_list[[3]] <- file.path(res_dir, "sens_ML50",10)
rep_list <- sdrep_list <- inp_list <- list()
for(i in 1:length(dir_list)){
rep_list[[i]] <- readRDS(file.path(dir_list[[i]], "Report.rds"))
sdrep_list[[i]] <- readRDS(file.path(dir_list[[i]], "Sdreport.rds"))
inp_list[[i]] <- readRDS(file.path(dir_list[[i]], "Inputs.rds"))
}
plot(x=1,y=1,type="n", xlim=c(1,length(all_years)), ylim=c(0,1.2), xaxs="i", yaxs="i", cex.axis=2, xlab="", ylab="", cex.lab=2, xaxt="n", yaxt="n")
for(i in 1:length(dir_list)){
Inputs <- inp_list[[i]]
Report <- rep_list[[i]]
Sdreport <- sdrep_list[[i]]
lines(x=seq_along(all_years), y=Report$SPR_t, lwd=2, col=col_list[[i]])
points(x=which(all_years %in% lc_years), y=Report$SPR_t[which(all_years%in%lc_years)], col=col_list[[i]], pch=19, xpd=NA)
if(all(is.na(Sdreport))==FALSE){
sd <- summary(Sdreport)[which(rownames(summary(Sdreport))=="SPR_t"),]
sd[,2][which(is.na(sd[,2]))] <- 0
polygon( y=read_sdreport(sd, log=FALSE), x=c(which(is.na(sd[,2])==FALSE), rev(which(is.na(sd[,2])==FALSE))), col=paste0(col_list[[i]],"35"), border=NA)
}
if(i==length(dir_list)){
abline(h=0.4, lwd=2, lty=2)
abline(h=0.3, lwd=2, lty=3)
}
if(i==1) axis(1, cex.axis=2, at=ilab2, labels=lab)
}
legend("topright", legend=c("-25%", "base", "+25%"), col=unlist(col_list), lwd=4, bty="n", cex=2, title="Input value")
print.letter("Length at 50% maturity", xy=c(0.25,0.9), cex=2, font=2)
mtext(side=1, "Year", cex=2, line=4, outer=TRUE)
mtext(side=2, "SPR", cex=2, line=4, outer=TRUE)
dev.off()
|
ad3f0c198c9c6a6ca52dd7cd62423cab8726b93d
|
7ca0ee6639ecad09598c5c58d058ccda00884f6b
|
/modules/about.R
|
dbcd95fb74839555eb7e8720f2c4da8165de1de5
|
[] |
no_license
|
FlowWest/Explore-SIT-Model
|
edcf404fa66be7c2d7284b992dded66a9bfdda9f
|
7f47fc2a5f9e3a0c5bbb03e11f2f5331354105aa
|
refs/heads/master
| 2021-06-20T21:59:30.842006
| 2017-07-25T21:29:37
| 2017-07-25T21:29:37
| 94,383,223
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,172
|
r
|
about.R
|
aboutUI <- function(id) {
ns <- NS(id)
tagList(
fluidRow(
column(width = 12, style = 'padding-left: 30px;',
tags$h1('Fall Chinook Science Integration Team Model'),
tags$br(),
tags$img(src = 'model.png', width = '80%'),
tags$br(),
tags$br(),
tags$h4('Primary Author', tags$a(href = 'mailto:jt.peterson@oregonstate.edu', 'James T. Peterson')),
tags$h5('U.S. Geological Survey, Oregon Cooperative Cooperative'),
tags$h5('Fish and Wildlife Research Unit, Oregon State University'),
tags$h5('Corvallis, Oregon 97331-3803'),
tags$br())
),
fluidRow(
column(width = 12,
tags$a(tags$img(src = 'TransLogoTreb.png', width = 200), href = 'http://www.flowwest.com/', target = '_blank',
style = 'display: inline-block;'),
tags$h5('App created and maintained by', tags$a(href = 'mailto:sgill@flowwest.com', 'Sadie Gill', target = '_blank'),
style = 'display: inline-block; font-weight: 300;'))
)
)
}
about <- function(input, output, session) {
}
|
528c0723604855891c762929224043b4ad842923
|
2ed4494745086bc2fb1f018de1c0cc2a5db71f25
|
/R/app/server/helpers.R
|
ac5a4da23b9655b1714ef10c36cfc0990d73f72a
|
[] |
no_license
|
shi-xin/density_estimation
|
8e7868cb0bdca1a544f68efabe21178c8ab48ab5
|
86c4adee22df9b918e7b2514a3b84716185a72f6
|
refs/heads/master
| 2022-09-17T05:55:15.007284
| 2020-05-27T02:24:26
| 2020-05-27T02:24:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,380
|
r
|
helpers.R
|
source("server/helpers_mixture_modal.R")
# Initialize python ------------------------------------------------------------
use_python_custom = function(path) {
tryCatch({
reticulate::use_python(
python = path
)
suppressWarnings(invisible(reticulate::py_config()))
path_found = dirname(reticulate:::.globals$py_config$python)
if (path != path_found) {
showNotification(
paste(
stringi::stri_wrap(
paste(
"Python was found in", path_found, "which differs from your input."
),
width = 30),
collapse = "\n"
),
type = "warning",
duration = 7
)
}
showNotification(
paste("Python version:", reticulate:::.globals$py_config$version),
type = "message",
duration = 7
)
return(path_found)
},
error = function(c) {
showNotification(
paste("The following error was thrown when trying to load Python<\br>",
c),
type = "error",
duration = 7
)
})
}
init_python_custom <- function(input) {
withProgress(
message = "Getting Python ready",
expr = {
incProgress(0, detail = "Configuring Python...")
PYTHON_PATH <- use_python_custom(input$python_path)
incProgress(0.33, detail = "Checking packages...")
tryCatch({
msg <- reticulate::py_capture_output(
reticulate::source_python("python/check_pkgs.py"))
showNotification(
HTML(gsub("\n","<br/>", msg)),
type = "message",
duration = 7)
},
error = function(c) {
showNotification(
HTML(paste("Not all required packages have been found<br/>", c)),
type = "error",
duration = 7
)
return(invisible(NULL))
})
incProgress(0.33, detail = "Loading functions...")
reticulate::source_python("python/density_utils.py", envir = globalenv())
incProgress(0.34, detail = "Done!")
}
)
return(invisible(PYTHON_PATH))
}
init_python_shiny <- function(input) {
tryCatch({
withProgress(
message = "Getting Python ready",
expr = {
incProgress(0, detail = "Creating virtual environment...")
reticulate::virtualenv_create(
envname = "python35_env",
python = "/usr/bin/python3")
incProgress(0.2, detail = "Installing packages...")
reticulate::virtualenv_install(
envname = "python35_env",
packages = c("numpy", "scipy"))
incProgress(0.2, detail = "Loading virtual environment...")
reticulate::use_virtualenv(
virtualenv = "python35_env",
required = TRUE)
incProgress(0.2, detail = "Loading functions...")
reticulate::source_python(
file = "python/density_utils.py",
envir = globalenv())
incProgress(0.2, detail = "Done!")
}
)
return(TRUE)
},
error = function(c){
showNotification(
HTML(paste(
stringi::stri_wrap(paste("Python could not be loaded.<br/>Error:", c),
width = 30), collapse = "<br/>")),
type = "error",
duration = 7
)
return(FALSE)
})
}
# Restart session --------------------------------------------------------------
restart_r <- function() if (tolower(.Platform$GUI) == "rstudio") {
# rm(list = ls())
# .rs.restartR()
}
# Data wrangling functions -----------------------------------------------------
df_filter <- function(df, .pdf, .estimator, .bw, .size) {
df %>%
filter(
pdf %in% .pdf,
estimator %in% .estimator,
bw %in% .bw,
size %in% .size
)
}
df_facet_order <- function(df, input) {
cols <- input[["boxplots_facet_vars"]]
if (!any(cols %in% c("bw", "estimator", "pdf"))) return(df)
cols_match <- c(
"bw" = "boxplots_bw",
"estimator" = "boxplots_estimator",
"pdf" = "boxplots_pdf")
for (col in cols) {
lvls <- input[[cols_match[[col]]]]
if (col == "pdf") {
lbls <- unname(pdf_facet_lbls[lvls])
} else {
lbls <- lvls
}
df[[col]] <- factor(
x = df[[col]],
levels = lvls,
labels = lbls)
}
return(df)
}
df_trim <- function(df, .group_vars, .trim_var, .quantile) {
.group_vars <- purrr::map(.group_vars, as.name)
.trim_var <- as.name(.trim_var)
df %>%
group_by(!!!.group_vars) %>%
mutate(
upper_bound = quantile(!!.trim_var, .quantile, na.rm = TRUE)
) %>%
mutate(
!!.trim_var := ifelse(!!.trim_var >= upper_bound, NA, !!.trim_var)
) %>%
select(-upper_bound)
}
# Plotting functions -----------------------------------------------------------
check_boxplot_args <- function(args) {
# First sapply checks NULL as well
any(sapply(args, function (x) length(x) == 0)) | anyNA(args)
}
initialize_plot <- function(df, .metric) {
.metric <- as.name(.metric)
ggplot(
df,
aes(
x = factor(size),
y = !!.metric,
fill = factor(size)
)
)
}
add_boxplot <- function(outlier.shape = 19) {
geom_boxplot(
outlier.fill = alpha(DARK_GRAY, 0.3),
outlier.color = alpha(DARK_GRAY, 0.3),
outlier.shape = outlier.shape
)
}
custom_fill <- function(alpha = 0.6) {
scale_fill_viridis_d(alpha = alpha)
}
custom_theme <- function() {
theme_gray() +
theme(
panel.grid.minor.x = element_blank(),
panel.grid.major.x = element_blank(),
panel.border = element_blank(),
legend.position = "none",
strip.background = element_blank()
)
}
custom_scale <- function(limits = NULL, prec = 0.1, log10 = FALSE, time_flag = FALSE) {
scale_func <- if (log10) scale_y_log10 else scale_y_continuous
if (time_flag) {
scale_func(
limits = limits,
breaks = waiver(),
labels = get_labels(prec)
)
} else {
scale_func(
limits = limits,
breaks = waiver()
)
}
}
precision <- function (x) {
rng <- range(x, na.rm = TRUE, finite = TRUE)
span <- diff(rng)
if (span == 0) {
return(1)
}
10 ^ (floor(log10(span)))
}
get_suffix <- function(prec) {
if (prec >= 0.1) {
" sec"
} else if (prec %in% c(0.01, 0.001)) {
" ms"
} else if (prec <= 0.0001) {
" \u03BCs"
}
}
get_scale <- function(prec) {
if (prec >= 0.1) {
1
} else if (prec %in% c(0.01, 0.001)) {
1 / 1e-03
}
else if (prec <= 0.0001) {
1 / 1e-06
} else {
1 / prec
}
}
get_acc <- function(prec) {
if (prec >= 0.1) {
digits = 2 - log10(prec)
} else if (prec %in% c(0.01, 0.001)) {
digits = 2 - log10(prec / 1e-03)
}
else if (prec <= 0.0001) {
digits = 2 - log10(prec / 1e-06)
} else {
digits = 2
}
1 / 10 ^ digits
}
get_labels <- function(prec) {
scales::number_format(
accuracy = get_acc(prec),
scale = get_scale(prec),
suffix = get_suffix(prec)
)
}
# x <- c(rnorm(10, mean = 0.000200 , sd = 0.0001),
# rnorm(10, mean = 0.0000500 , sd = 0.00001),
# rnorm(10, mean = 0.000003, sd = 0.000001))
# prec <- precision(x)
# get_labels(prec)(x)
# # x
#
# x <- c(rnorm(10, mean = 0.05 , sd = 0.01),
# rnorm(10, mean = 0.004, sd = 0.001))
# prec <- precision(x)
# get_labels(prec)(x)
# x
#
# x <- c(rnorm(10, mean = 0.7, sd = 0.1),
# rnorm(10, mean = 0.05 , sd = 0.01),
# rnorm(10, mean = 0.004, sd = 0.001))
# prec <- precision(x)
# get_labels(prec)(x)
# x
#
# x <- rnorm(50, mean = 1 , sd = 0.01)
# prec <- precision(x)
# get_labels(prec)(x)
# x
#
# x <- rnorm(50, mean = 5 , sd = 1)
# prec <- precision(x)
# get_labels(prec)(x)
# x
custom_facet <- function(..., free_y, nrow = 1) {
args <- as.list(...)
if (!any(args %in% c("bw", "estimator", "pdf"))) return(NULL)
scales <- if (free_y) "free_y" else "fixed"
if (length(args) == 1) {
col_labs <- lbls_list[[args[[1]]]]
form <- as.formula(paste(".", args[[1]], sep = "~"))
facet_grid(form, scales = scales,
labeller = labeller(.cols = col_labs))
} else if (length(args) == 2) {
row_labs <- lbls_list[[args[[1]]]]
col_labs <- lbls_list[[args[[2]]]]
form <- as.formula(paste(args[[1]], args[[2]], sep = "~"))
facet_grid(form, scales = scales,
labeller = labeller(.cols = col_labs, .rows = row_labs))
}
}
get_bw_choices <- function(x) {
f <- function (x) switch(x, "mixture" = choices_bw_mixture, choices_bw_classic)
vec <- unlist(lapply(x, f))
vec[!duplicated(vec)]
}
get_size_choices <- function(x) {
f <- function (x) switch(x, "sj" = choices_size_sj, choices_size_default)
l <- lapply(x, f)
Reduce(union, l)
}
# Continuous variables arguments -----------------------------------------------
cont_dist_labels <- list(
norm = c("Mean", "Standard Deviation"),
t = c("Degrees of freedom", "NCP"),
gamma = c("Shape", "Scale"),
exp = c("Rate"),
beta = c("Shape 1", "Shape 2"),
lnorm = c("Log mean", "Log SD"),
weibull = c("Shape", "Scale"),
unif = c("Lower bound", "Upper bound")
)
cont_dist_values <- list(
norm = 0:1,
t = 1:0,
gamma = c(1, 1),
exp = 1,
beta = c(1.5, 1.5),
lnorm = 0:1,
weibull = c(0.5, 1),
unif = c(-1, 1)
)
cont_dist_mins <- list(
norm = c(-100, 0),
t = 1:0,
gamma = c(0, 0),
exp = 0,
beta = c(0, 0),
lnorm = c(-5, 0),
weibull = c(0, 0),
unif = c(-100, -100)
)
cont_dist_mins_check <- list(
norm = c(-100, 0.001),
t = 1:0,
gamma = c(0, 0.001),
exp = 0.001,
beta = c(0.01, 0.01),
lnorm = c(-5, 0.01),
weibull = c(0.01, 0.01),
unif = c(-100, -100)
)
cont_dist_maxs <- list(
norm = c(100, 100),
t = c(200, 50),
gamma = c(100, 100),
exp = 100,
beta = c(100, 100),
lnorm = c(100, 100),
weibull = c(100, 100),
unif = c(100, 100)
)
cont_dist_bounds <- list(
"norm" = function(.params) {
width <- 3 * .params[[2]]
c(.params[[1]] - width, .params[[1]] + width)
},
"t" = function(.params) {
qt(c(0.005, 0.995), .params[[1]], .params[[2]])
},
"gamma" = function(.params) {
c(0, qgamma(0.995, .params[[1]], .params[[2]]))
},
"exp" = function(.params) {
c(0, qexp(0.995, .params[[1]]))
},
"beta" = function(.params) {
c(0, 1)
},
"lnorm" = function(.params) {
c(0, qlnorm(0.995, .params[[1]], .params[[2]]))
},
"weibull" = function(.params) {
c(0, qweibull(0.995, .params[[1]], .params[[2]]))
},
"unif" = function(.params) {
c(.params[[1]], .params[[2]])
}
)
all_equal_length <- function(...) {
length(unique(vapply(list(...), length, 1L))) == 1
}
numeric_params <- function(label, value, min = NA, max = NA, step = 0.5, prefix) {
if (all(is.na(min))) min <- rep(NA, length(label))
if (all(is.na(max))) max <- rep(NA, length(label))
step <- rep(step, length(label))
stopifnot(all_equal_length(label, value, min, max, step))
ids <- paste0(prefix, "_input", seq_along(label))
out <- purrr::pmap(list(ids, label, value, min, max, step), numericInput)
purrr::iwalk(out, `[[`)
}
distribution_parameters_cont <- function(distribution, prefix = "fixed_params") {
numeric_params(
label = cont_dist_labels[[distribution]],
value = cont_dist_values[[distribution]],
min = cont_dist_mins[[distribution]],
max = cont_dist_maxs[[distribution]],
prefix = prefix
)
}
custom_column <- function(id, label, value, min, max, step) {
column(
width = 4,
numericInput(
inputId = id,
label = label,
value = value,
min = min,
max = max,
step = step,
width = "100%"
)
)
}
generate_mixture_ui <- function(label, value, min = NA, max = NA, step = NA, prefix) {
if (all(is.na(min))) min <- rep(NA, length(label))
if (all(is.na(max))) max <- rep(NA, length(label))
if (all(is.na(step))) step <- rep(NA, length(label))
stopifnot(all_equal_length(label, value, min, max, step))
ids <- paste0(prefix, "_input", seq_along(label))
out <- purrr::pmap(list(ids, label, value, min, max, step), custom_column)
purrr::iwalk(out, `[[`)
}
render_mixture_ui <- function() {
distribution = "rnorm"
prefix = "mixture1"
renderUI({
fluidRow(
generate_mixture_ui(
label = cont_dist_labels[[distribution]],
value = cont_dist_values[[distribution]],
min = cont_dist_mins[[distribution]],
max = cont_dist_maxs[[distribution]],
prefix = prefix
)
)
})
}
# Plot functions ---------------------------------------------------------------
component_rvs <- function(distribution, .params, size) {
.f <- paste0("r", distribution)
.args <- c(list(size), .params)
do.call(.f, .args)
}
mixture_rvs <- function(distributions, .params, size, wts = NULL) {
if (is.null(wts)) wts <- rep(1 / length(distributions), length(distributions))
stopifnot(length(distributions) == length(.params), length(.params) == length(wts))
stopifnot(all.equal(sum(wts), 1, tolerance = 0.011))
.l <- list(distributions, .params, round(wts * size))
unlist(purrr::pmap(.l, component_rvs))
}
component_pdf <- function(distribution, .params, x_grid) {
.f <- paste0("d", distribution)
.args <- c(list(x_grid), .params)
do.call(.f, .args)
}
pdf_bounds <- function(distribution, .params) {
.f <- cont_dist_bounds[[distribution]]
.f(.params)
}
mixture_grid <- function(distributions, .params) {
.l <- list(distributions, .params)
out <- unlist(purrr::pmap(.l, pdf_bounds))
seq(min(out), max(out), length.out = 250)
}
mixture_pdf <- function(distributions, .params, wts = NULL) {
if (is.null(wts)) wts <- rep(1 / length(distributions), length(distributions))
stopifnot(length(distributions) == length(.params), length(.params) == length(wts))
stopifnot(all.equal(sum(wts), 1, tolerance = 0.011))
x_grid <- mixture_grid(distributions, .params)
.l <- list(distributions, .params)
pdf <- unlist(purrr::pmap(.l, component_pdf, x_grid = x_grid))
pdf <- as.vector(matrix(pdf, ncol = length(wts)) %*% wts)
pdf[is.infinite(pdf)] <- NA # In some edge cases pdf is `Inf`
return(list("x" = x_grid, "pdf" = pdf))
}
get_mixture_distributions <- function(input, mixture_n) {
vapply(
X = paste0("mixture_component", seq_len(mixture_n)),
FUN = function(x) input[[x]],
FUN.VALUE = character(1),
USE.NAMES = FALSE
)
}
get_mixture_params <- function(input, mixture_n) {
# Starts with mixture_component1_input and ends with digit
pattern <- paste0("(mixture_component", seq_len(mixture_n), "_input)(\\d$)")
lapply(
X = pattern,
FUN = function(x) {
.nms <- names(input)[grepl(x, names(input))]
# Names are not sorted in `input`. Not very solid, but worked so far
.nms <- stringr::str_sort(.nms)
# Reactive values allow subsetting only one element
out <- vector("numeric", length(.nms))
for (i in seq_along(.nms)) {
out[[i]] <- input[[.nms[[i]]]]
}
out
}
)
}
get_mixture_wts <- function(input, mixture_n) {
vapply(
X = paste0("mixture_component", seq_len(mixture_n), "_input_wt"),
FUN = function(x) input[[x]],
FUN.VALUE = numeric(1),
USE.NAMES = FALSE
)
}
check_mixture_wts_gen <- function() {
wts_previous <- 1
f <- function(wts, mixture_n) {
if (any(is.na(wts))) {
wts <- round(rep(1 / mixture_n, mixture_n), 3)
} else if (!((sum(wts) > 1 - 0.0011) && (sum(wts) < 1 + 0.0011))) {
changed <- length(wts) != length(wts_previous) | !all(wts == wts_previous)
if (changed) {
showNotification(
ui = HTML(paste(c("Sum of weights is not equal to 1.",
"Weighting all components equally."),
collapse = "<br/>")),
type = "warning"
)
wts_previous <<- wts
}
wts <- round(rep(1 / mixture_n, mixture_n), 3)
}
wts
}
return(f)
}
check_mixture_wts <- check_mixture_wts_gen()
stop_custom <- function(.subclass, message, call = NULL, ...) {
.error <- structure(
list(
message = message,
call = call,
...
),
class = c(.subclass, "error", "condition")
)
stop(.error)
}
check_density_args_inner <- function(.params, mins, maxs) {
vapply(
X = seq_along(.params),
FUN = function(i) .params[[i]] >= mins[[i]] & .params[[i]] <= maxs[[i]],
FUN.VALUE = logical(1),
USE.NAMES = FALSE
)
}
check_density_args <- function(distributions, .params) {
mins <- purrr::map(distributions, .f = function(x) cont_dist_mins_check[[x]])
maxs <- purrr::map(distributions, .f = function(x) cont_dist_maxs[[x]])
lgls <- unlist(purrr::pmap(list(.params, mins, maxs), check_density_args_inner))
if (any(!lgls)) {
stop_custom(
.subclass = "bad-parameters",
message = paste(
"At least one parameter value is not supported.",
"Possible causes:",
"* Very low/high values.",
"* Value not in domain of parameter (i.e. negative SD).",
sep = "\n")
)
}
return(invisible(NULL))
}
get_density_params <- function(input) {
if (input$density_dist_type == "mixture") {
if (is.null(input[["mixture_component1_input2"]])) {
stop_custom(
.subclass = "missing-component",
message = "Specify at least one component for the mixture"
)
}
mixture_n <- input$density_mixture_n
distributions <- get_mixture_distributions(input, mixture_n)
.params <- get_mixture_params(input, mixture_n)
check_density_args(distributions, .params)
wts <- get_mixture_wts(input, mixture_n)
wts <- check_mixture_wts(wts, mixture_n)
rvs <- mixture_rvs(distributions, .params, input$density_sample_size, wts)
pdf <- mixture_pdf(distributions, .params, wts)
} else {
distribution <- input$density_fixed_distribution
dist_params <- if (distribution == "exp") {
list(input$fixed_params_input1)
} else {
list(input$fixed_params_input1, input$fixed_params_input2)
}
check_density_args(distribution, list(dist_params))
rvs <- mixture_rvs(distribution, list(dist_params), input$density_sample_size, wts = 1)
pdf <- mixture_pdf(distribution, list(dist_params), wts = 1)
}
x_true <- pdf$x
y_true <- pdf$pdf
x_range <- range(x_true)
y_range <- c(0, 1.16 * max(y_true, na.rm = TRUE))
if (input$density_estimator == "gaussian_kde") {
estimation <- estimate_density(
rvs,
bw = input$density_bw_method,
extend = input$density_extend_limits,
bound_correction = input$density_bound_correction
)
} else if (input$density_estimator == "adaptive_kde") {
estimation <- estimate_density(
rvs,
bw = input$density_bw_method,
extend = input$density_extend_limits,
bound_correction = input$density_bound_correction,
adaptive = TRUE
)
} else {
estimation <- estimate_density_em(
rvs,
extend = input$density_extend_limits
)
}
params = list("rvs" = rvs, "x_range" = x_range, "y_range" = y_range,
"x_estimation" = estimation[[1]], "y_estimation" = estimation[[2]],
"x_true" = x_true, "y_true" = y_true)
return(params)
}
add_hist = function(x, x_range, y_range, breaks_n = 40) {
hist(x, breaks = breaks_n, prob = TRUE,
main = NULL,
xlab = "X",
ylab = "Probabilty density function",
xlim = x_range,
ylim = y_range,
col = LIGHT_BLUE
)
}
add_lines = function(x_estimation, y_estimation, x_true, y_true) {
lines(x_true, y_true, lwd = 4, col = "black", lty = "dashed")
lines(x_estimation, y_estimation, lwd = 5, col = DARK_RED)
}
density_plot_generator <- function(params) {
rvs = params[["rvs"]]
x_range = params[["x_range"]]
y_range = params[["y_range"]]
x_estimation = params[["x_estimation"]]
y_estimation = params[["y_estimation"]]
x_true = params[["x_true"]]
y_true = params[["y_true"]]
f <- function() {
add_hist(rvs, x_range, y_range)
add_lines(x_estimation, y_estimation, x_true, y_true)
legend("topright", legend = c("True density", "Estimation"),
col = c("black", DARK_RED), lwd = 5, inset = 0.015)
}
return(f)
}
get_density_plot <- function(input) {
show_spinner()
density_plot_params <- get_density_params(input)
density_plot <- density_plot_generator(density_plot_params)
hide_spinner()
return(density_plot)
}
|
91379bfd78ffbfd42917200eee75042ee2a4b670
|
ba49eb475d4fcd6d61655270ec34fe829657494e
|
/man/dynamic_structure.Rd
|
f7a26ec5dd74a5cb07aaba9ed8e99f9a59c02a9b
|
[] |
no_license
|
cran/Bios2cor
|
f3b630684862d4f48e7137361e39094358336f63
|
50b2948bfdd3888e6593015247419ce1f5b58d8a
|
refs/heads/master
| 2022-07-31T18:21:28.244917
| 2022-07-08T08:25:23
| 2022-07-08T08:25:23
| 101,306,648
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,370
|
rd
|
dynamic_structure.Rd
|
\name{dynamic_structure}
\Rdversion{1.1}
\alias{dynamic_structure}
\title{
Creates the data structure for the analysis of side chain dihedral angles
}
\description{
Given a structure pdb file, a trajectory dcd file and frame indices, gathers information on side chain dihedral angles in a unique structure. This structure will be used in correlation/covariation methods aimed at analyzing side chain rotational motions during molecular dynamics simulations.
}
\usage{
dynamic_structure(pdb, trj, frames=NULL)
}
\arguments{
\item{pdb}{
Filepath of the pdb file
}
\item{trj}{
Filepath of trajectory file (dcd file)
}
\item{frames}{
Indicates the selected frames for the analysis, created with the \code{seq} function (Usage: frames <-seq(from ,to , by= ). Default is NULL (all the frames of the trajectory are taken into account).
}
}
\value{
Returns a list of class 'structure' with the following elements containing information on the sequence, structure, trajectory and side chain dihedral angles (values and names) of the protein during the simulation:
\item{pdb}{
an object of class 'pdb' created by the \code{read.pdb}function from the \code{bio3d} package
}
\item{dcd}{
A numeric matrix of xyz coordinates with a frame per row and a Cartesian coordinate per column. Created by the \code{read.dcd}function from the \code{bio3d} package
}
\item{xyz}{
A numeric matrix of xyz coordinates with a frame per row and a Cartesian coordinate per column. For each frame, the protein coordinates have been fitted on the pdb structure using the \code{fit.xyz} from the \code{bio3d} package
}
\item{tor}{
A numeric matrix of side chain dihedral angles with a frame per row and a dihedral angle per column. Contains only side chain dihedral angles. Created by the \code{xyz2tor} function from the \code{bio3d} package
}
\item{tor.names}{
a character vector with the names of all side chain chi dihedral angles. They are written as "M.chiN" where M is the residue number and N the dihedral angle chi (chi1, chi2,...). Alanine and Glycine residues which do not have side chain dihedral angles are omitted.
}
\item{tor.resno}{
a character vector with the residue number M of all side chain chi dihedral angles.
}
\item{tor.angle}{
a character vector with the dihedral angle (chiN) of all side chain chi dihedral angles.
}
\item{nb_torsions}{
a numeric value indicating the total number of dihedral angles
}
\item{prot.seq}{
a character vector with the sequence of the protein
}
\item{nb_residues}{
a numeric value indicating the number of residues in the protein
}
\item{nb_frames}{
a numeric value indicating the total number of selected frames
}
\item{frames}{
a numeric vector indicating the selected frames
}
}
\author{
Bruck TADDESE, Antoine GARNIER and Marie CHABBERT
}
\examples{
#Reading pdb and dcd files
pdb <- system.file("rotamer/toy_coordinates.pdb", package= "Bios2cor")
trj <- system.file("rotamer/toy_dynamics.dcd", package= "Bios2cor")
#Creating dynamic_structure object for selected frames
wanted_frames <- seq(from = 1, to = 40, by = 2)
dynamic_structure <- dynamic_structure(pdb, trj, wanted_frames)
#Creating dynamic_structure object for all the frames
#dynamic_structure <- dynamic_structure(pdb, trj)
}
|
a51f0f5d8a4ba06329bd12007283d3724a3d4b85
|
5b722b3e230f078b59c232a0554c416150232bbf
|
/man/video-images.Rd
|
8c29ee31e5dce2c30aa3727c0669df09b9f9b5bc
|
[] |
no_license
|
jsta/glatos
|
3abdd25c67455cacf04cbb7a9ca53718745ab917
|
f1e27cf63da53b2ae4c46e4b47d338c1f558d004
|
refs/heads/master
| 2022-07-29T00:04:04.783120
| 2022-07-05T18:56:00
| 2022-07-05T18:56:00
| 220,477,927
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 541
|
rd
|
video-images.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/extdata.r
\name{video images}
\alias{video images}
\title{Video frames of walleye movements in Lake Huron}
\format{Folder contains 30 sequentially labeled .png image files}
\source{
\url{http://glatos.glos.us/home/project/HECWL}
}
\usage{
system.file("extdata", "frames", package="glatos")
}
\description{
Sequential images of walleye movements in Lake Huron
for testing functionality of ffmpeg function.
}
\section{Filename}{
frames
}
\author{
Todd Hayden
}
|
ce21640568a8e6ed86464e5545b60d781f089b00
|
18e0c41ddb514c981090eb4ab2e3f358d88dbd70
|
/plot6.R
|
712963a9fd2b51ecfbaa3c2ef4d38ddfe05936d4
|
[] |
no_license
|
andhdo/coursera_ds_exdata_project2
|
dd37d018b71ea773e58ecf74166878a98cec7644
|
13499a09a90d5e012944d1c769d184eb8e95ecfe
|
refs/heads/master
| 2021-01-23T20:12:56.663660
| 2014-08-21T11:29:20
| 2014-08-21T11:29:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,364
|
r
|
plot6.R
|
# see complete source code at: https://github.com/andhdo/coursera_ds_exdata_project2
#
plot6 <- function() {
# be sure to load the files and precomputations
source("load_data.R")
load_prerequisites()
# load libraries
library(ggplot2)
# get the specific data to plot
NEI <- load_data_nei()
SCC <- load_data_scc()
# names(NEI)
# head(NEI)
#precomputations
# Similar to exercise 5, we are going to select the cities with the fips code
# and aggregate the data of the two populations
subsetNEI <- NEI[(NEI$fips %in% c("24510","06037")) & (NEI$type=="ON-ROAD"), ]
# then we can aggregate and plot the data
processed_ds <- aggregate(Emissions ~ year+fips , data=subsetNEI, sum) # + type
# calculate target filename & open DC
target_file <- paste0 (output_figures_folder,"/","plot6.png")
png(filename=target_file, width=480,height=480,units="px")
# process information to produce graphic
g <- ggplot(processed_ds, aes(x=year, y=Emissions, group=fips, colour=fips)) #, group=type, colour=type
g <- g + geom_point() + geom_line()
g <- g + xlab("Year") + ylab("PM") + ggtitle("Motor Emissions [1999..2008] / Baltimore & LosAngeles")
g <- g + scale_colour_discrete(label = c("LosAngeles","Baltimore")) # change legend labels
print(g)
# Turn off deviceContext
dev.off()
}
|
23443270a9274d2e1c5bbee2900f3463b7f1a26a
|
7c210bf1d85690915e99065a269336126914971e
|
/data.table/arun_ex4.R
|
769e50fe6f1d96404ade2b4245f2b0bd5c2047fe
|
[] |
no_license
|
ariasmiguel/tidyverse
|
4c5e1411fa87dc3cd221c999ef4fd20d9411fb4d
|
ea1b7b40e0e0e18797fc087ab1f14e86eab18163
|
refs/heads/master
| 2021-01-21T13:35:03.597505
| 2017-10-04T15:30:40
| 2017-10-04T15:30:40
| 102,133,802
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,455
|
r
|
arun_ex4.R
|
# -----------------------------------------------------------------
set.seed(20170703L)
DF1 = data.frame(id = sample(1:2, 9L, TRUE),
code = sample(letters[1:3], 9, TRUE),
valA = 1:9, valB = 10:18,
stringsAsFactors = FALSE)
DF2 = data.frame(id = c(3L, 1L, 1L, 2L, 3L),
code = c("b", "a", "c", "c", "d"),
mul = 5:1, stringsAsFactors = FALSE)
#create data tables
library(data.table)
DT1 <- as.data.table(DF1)
DT2 <- as.data.table(DF2)
#----------------------------------------------------------------------
# 1. In DT1, on those rows where id != 2, replace valA and valB with valA+1 and valB+1 respectively.
ans1 <- DT1[id != 2, `:=`(valA = valA+1L,
valB = valB+1L)]
ans1
# 2. On those rows where id == 2, replace valA with valB if valA is <= 7, else with valB^2.
ans2 <- DT1[id == 2, val := ifelse(valA <= 7L, valB, as.integer(valB^2))]
ans2
# 3. Create a new column `tmp` and assign `NA` to it by reference.
ans3 <- DT1[, temp := NA]
ans3
# 4. What’s the type (or class) of `tmp` column that we just created?
class(DT1[, temp])
class(DT1$temp)
# 5. Do DT1[, tmp := NULL] and observe the output.. What’s the difference compared to (3)?
ans3[, temp := NULL][]
# 6. Create a new column named “rank” which takes value 1 where code == “a”, 2 where code == “b” and 3 where code == “c”.
# Do it in as many different ways you could think of :-).
ans6 <- DT1[code == "a", rank := 1L][
code == "b", rank := 2L][
code == "c", rank := 3L]
ans6 <- DT1[, rank := as.integer(factor(code))]
tmp_dt <- data.table(keys=c("a", "b", "c"), vals =1:3)
ans6 <- DT1[tmp_dt, on=.(code=keys), rank := i.vals]
# 7. Let DT3 = DT2[!duplicated(code)]. Update both valA and valB columns with ‘valA*mul’ and ‘valB*mul’
# wherever DT3$code matches DT1$code.. What happens to those rows where there are no matches in DT1? Why?
DT3 = DT2[!duplicated(code)]
ans7 <- DT1[DT3, on=.(code), c("valA", "valB") := lapply(.SD, `*`, i.mul), .SDcols= valA:valB]
ans7
# 8. Add the column ‘mul’ from DT2 to DT1 by reference where DT2$id matches DT1$id.
# What happens to those values where DT2$id has the same value occurring more than once?
ans8 <- DT1[DT2, on=.(id), mul := i.mul]
ans8
# 9. Replace DT2$mul with NA where DT1$id, DT1$code matches DT2$id, DT2$code.
ans9 <- DT2[DT1, on=.(id, code), mul := NA]
ans9
|
30356faa57d0f6d409f39cae86aa140a5e5966e3
|
c9a347ea394f398128c19688085292f80b35993b
|
/Plot_prop_change.r
|
f71063ac74861ccbbc3f7bfe6726f4302e86874b
|
[] |
no_license
|
liuzh811/ForestDegradationWestAfrica
|
f1c780a09b3670588a8ab87e06bba1365d6b7b82
|
0e6e9a69281d7fb99d7218084c41cafe531a5917
|
refs/heads/master
| 2021-01-17T08:12:30.719691
| 2016-10-02T03:43:42
| 2016-10-02T03:43:42
| 40,625,029
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,054
|
r
|
Plot_prop_change.r
|
clasnames <- c("Water",
"RainForest",
"Woody Savannas",
"Savannas",
"Grasslands",
"Cropland/Natural Mosaic",
"Others")
#after run calculating all indices and plot trend
lc_rc1 = lc_rc*100 + TCW.trd2.grd
lc_rc1.df = data.frame(freq(lc_rc1))[-29,]
lc_rc1.df$lc = floor(lc_rc1.df$value/100)
lc_rc1.df$trend = lc_rc1.df$value - lc_rc1.df$lc*100
lc_sum = aggregate(count~lc, data = lc_rc1.df, FUN = "sum")
lc_rc1.df$prop = 100*lc_rc1.df$count/rep(lc_sum$count, each = 4)
lc_rc2 = lc_rc*100 + EVI.trd2.grd
lc_rc2.df = data.frame(freq(lc_rc2))[-29,]
lc_rc2.df$lc = floor(lc_rc2.df$value/100)
lc_rc2.df$trend = lc_rc2.df$value - lc_rc2.df$lc*100
lc_sum = aggregate(count~lc, data = lc_rc2.df, FUN = "sum")
lc_rc2.df$prop = 100*lc_rc2.df$count/rep(lc_sum$count, each = 4)
lc_rc3 = lc_rc*100 + NDWI.trd2.grd
lc_rc3.df = data.frame(freq(lc_rc3))[-29,]
lc_rc3.df$lc = floor(lc_rc3.df$value/100)
lc_rc3.df$trend = lc_rc3.df$value - lc_rc3.df$lc*100
lc_sum = aggregate(count~lc, data = lc_rc3.df, FUN = "sum")
lc_rc3.df$prop = 100*lc_rc3.df$count/rep(lc_sum$count, each = 4)
lc_rc4 = lc_rc*100 + TCG.trd2.grd
lc_rc4.df = data.frame(freq(lc_rc4))[-29,]
lc_rc4.df$lc = floor(lc_rc4.df$value/100)
lc_rc4.df$trend = lc_rc4.df$value - lc_rc4.df$lc*100
lc_sum = aggregate(count~lc, data = lc_rc4.df, FUN = "sum")
lc_rc4.df$prop = 100*lc_rc4.df$count/rep(lc_sum$count, each = 4)
lc_rc5 = lc_rc*100 + TCA.trd2.grd
lc_rc5.df = data.frame(freq(lc_rc5))[-29,]
lc_rc5.df$lc = floor(lc_rc5.df$value/100)
lc_rc5.df$trend = lc_rc5.df$value - lc_rc5.df$lc*100
lc_sum = aggregate(count~lc, data = lc_rc5.df, FUN = "sum")
lc_rc5.df$prop = 100*lc_rc5.df$count/rep(lc_sum$count, each = 4)
lc_rc.df = rbind(data.frame(lc_rc1.df, VI = rep("TCW", nrow(lc_rc1.df))),
data.frame(lc_rc2.df, VI = rep("EVI", nrow(lc_rc2.df))),
data.frame(lc_rc3.df, VI = rep("NDWI", nrow(lc_rc3.df))),
data.frame(lc_rc4.df, VI = rep("TCG", nrow(lc_rc4.df))),
data.frame(lc_rc5.df, VI = rep("TCA", nrow(lc_rc5.df))))
lc_rc.df = lc_rc.df[-which(lc_rc.df$lc == 1 | lc_rc.df$lc == 7| lc_rc.df$lc == 5), ] #Remove class 1: water; and 7: other class
lc_rc.df = lc_rc.df[-which(lc_rc.df$trend == 4), ] #remove Not Calculated classes
library(reshape2)
library(ggplot2)
lc_rc1.df.long = melt(lc_rc.df[,c("lc","trend","prop","VI")], id.vars=c("lc", "trend","VI"))
lc_rc1.df.long$trend = factor(lc_rc1.df.long$trend)
levels(lc_rc1.df.long$trend) <- c("Negative","Positive", "No Trend")
lc_rc1.df.long$lc = factor(lc_rc1.df.long$lc)
levels(lc_rc1.df.long$lc) <- clasnames[as.numeric(levels(lc_rc1.df.long$lc))]
ggplot(data=lc_rc1.df.long, aes(x=trend, y=value, fill=trend)) +
facet_grid(VI ~ lc) +
geom_bar(stat="identity", position=position_dodge(), colour="black") +
xlab("") + ylab("Percentage of Land Cover") +
theme(legend.position="none")
ggsave(".\\NBAR_results3\\trend_biomes.png", width = 10, height = 7.5, units = "in")
|
890e1d9dda93dc3830d0e4eb9c039fe51bc2f61b
|
86b335a36147f0895e82ba13dc122695d1fa155a
|
/clusterProfiler.R
|
4613cc65f22b422f5dfa8d91082612c0ec3a606e
|
[] |
no_license
|
Gig77/ccri-iamp21
|
65eaf17d2ce59683d944458ff62806232cf1a3c5
|
395bb6517c8cbf7c83d68d65869bf19021e6df93
|
refs/heads/master
| 2021-01-17T18:23:29.641089
| 2016-07-27T13:02:22
| 2016-07-27T13:02:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,986
|
r
|
clusterProfiler.R
|
# install
source("http://bioconductor.org/biocLite.R")
biocLite("clusterProfiler")
biocLite("org.Hs.eg.db")
library(clusterProfiler)
# example data
library(DOSE)
data(geneList)
# get genes IDs and their log2 fold changes
deseq <- read.delim("/mnt/projects/iamp/results/deseq/iAMP-vs-DS.tsv")
# merge Entrez gene ID
library(biomaRt)
mart <- useMart(biomart="ENSEMBL_MART_ENSEMBL", host="grch37.ensembl.org", path="/biomart/martservice" ,dataset="hsapiens_gene_ensembl") # GRCh37, v75
bm <- getBM(attributes=c("ensembl_gene_id", "entrezgene"), mart=mart)
deseq <- unique(merge(deseq, bm, by.x="id", by.y="ensembl_gene_id", all.x = TRUE))
# GO enrichment
library(GO.db)
genes.diff <-unique(deseq$entrezgene[!is.na(deseq$entrezgene) & abs(deseq$log2FoldChange) >= 2]) # differentially expressed genes
genes.bg <- unique(deseq$entrezgene[!is.na(deseq$entrezgene) & deseq$baseMean>0]) # all expresed genes
ego <- enrichGO(
gene = genes.diff,
universe = genes.bg,
organism = "human",
ont = "BP",
pAdjustMethod = "BH",
pvalueCutoff = 0.01,
qvalueCutoff = 0.05,
readable = TRUE
)
head(summary(ego))
# GO gene set enrichment
valid <- !is.na(deseq$pvalue) & !is.na(deseq$log2FoldChange) & !is.na(deseq$entrezgene) & !duplicated(deseq$entrezgene)
genes <- deseq$log2FoldChange[valid]
names(genes) <- deseq$entrezgene[valid]
genes <- sort(genes, decreasing=TRUE)
# NOTE: clusterProfiler produces non-sensical results, because all p-values are the same (0.009999)?
# Also, if I inject 200 genes with identical BP GO term GO:0007507 with the two lines below, this gene set does not come out as significant
#gogenes <- getBM(attributes=c('entrezgene', 'go_id'), filters = 'go_id', values = 'GO:0007507', mart = mart)
#names(genes[1:200]) <- gogenes
ego2 <- gseGO(
geneList = genes,
organism = "human",
ont = "BP",
nPerm = 100,
minGSSize = 10,
pvalueCutoff = 0.1,
verbose = TRUE)
result <- summary(ego2)
head(result[order(result$pvalue),], n=20)
|
c74bf959e0bbcb4c5af577c8f8ad5366ef6c3c7b
|
ddd4c23a99df404114ca93254b9925edd941e5cc
|
/R/rm.R
|
19fda9c8a909a33504e51fcd267be4a688ecf91e
|
[] |
no_license
|
cran/subselect
|
80a3ecdc41983ce0cdb7a2fe3e9b843541872642
|
803cc11ee30c42ca4bc40a999ce4ebba5493e50c
|
refs/heads/master
| 2023-07-21T07:10:13.194980
| 2023-07-10T22:30:12
| 2023-07-11T07:34:43
| 17,700,176
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,418
|
r
|
rm.R
|
rm.coef<-function(mat, indices)
{
# Computes the matrix correlation between data matrices and their
# regression on a subset of their variables. Expected input is a
# variance-covariance (or correlation) matrix.
# error checking
if (sum(!(as.integer(indices) == indices)) > 0) stop("\n The variable indices must be integers")
if (!is.matrix(mat)) {
stop("Data is missing or is not given in matrix form")}
if (dim(mat)[1] != dim(mat)[2]) {
mat<-cor(mat)
warning("Data must be given as a covariance or correlation matrix. \n It has been assumed that you wanted the correlation matrix of the \n data matrix which was supplied.")
}
tr<-function(mat){sum(diag(mat))}
rm.1d<-function(mat,indices){
sqrt(tr((mat %*% mat)[indices,indices] %*% solve(mat[indices,indices]))/tr(mat))
}
dimension<-length(dim(indices))
if (dimension > 1){
rm.2d<-function(mat,subsets){
apply(subsets,1,function(indices){rm.1d(mat,indices)})
}
if (dimension > 2) {
rm.3d<-function(mat,array3d){
apply(array3d,3,function(subsets){rm.2d(mat,subsets)})
}
output<-rm.3d(mat,indices)
}
if (dimension == 2) {output<-rm.2d(mat,indices)}
}
if (dimension < 2) {output<-rm.1d(mat,indices)}
output
}
|
65a1d3af71b415e853618de9f778a6b1b977f5e3
|
aec6b08ba76e9edf6ae2ae410face966f624fd5e
|
/Module4.R
|
923954baa2912ce1edb14e25ba80f3dd7764534e
|
[] |
no_license
|
mahesh-ml/R-data-analysis
|
46d339d1dbad5fa5ca8ec2e89f0e6456dda89948
|
3bdd637f32a97e65f5c7b928c03dccf920b3873a
|
refs/heads/main
| 2023-01-25T02:04:53.374270
| 2020-12-06T17:42:38
| 2020-12-06T17:42:38
| 318,367,407
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,911
|
r
|
Module4.R
|
#Module 4 Problem 1 Divide 743 by 2 and obtain the result such that the output gives us a value without the decimal point.
a<- 743%/%2
a
#Module 4 Problem 2 Print a 3*4 array of three dimensions, which has the input of vectors sequencing from 13554:13590 and index the second row and third column element of third dimension of the array.
a <- matrix(data=c(13554:13590),nrow=3,ncol=4)
a
a[2,3:3]
#Module 4 Problem 3: What is the command to install a package in R and how do you invoke it?
install.packages('readxl')
library('readxl')
#Module 4 Problem 4: Create an if statement that prints the name of the team that won.
# Where Team A scored 678 (out of 700), Team B scored 666 (out of 700).
a_score <- 678/700
b_score <- 666/700
if (a_score>b_score) {
print('team a won')
}else {
print('team b won')
}
#Module 4 Problem 5: Check whether the given number is positive and divisible by 5 or not using conditional statements
#Given number: 468
given_number <- 468
if(given_number>0) {
print("given number is positive")
x(given_number)
} else {
print("given number is negative")
x(given_number)
}
x <-function(a) {
if(a %% 5 ==0) {
print("given number is divisible by 5")
} else {
print("given number is not divisible by 5")
}
}
#Module 4 Problem 6: calculate normalization
normalized_value <- function(a) {
return ((a - min(a)) / (max(a) - min(a)))
}
a <- c(33,434,242,434354,
545,54,56,56,4534,5342,24,5,65,65,767,
8,78,79,79,64,635,3,4,35,57,678,5,86,
86,457,546,34,345,34,3,4,65,6,57,347)
a
#Module 4 Problem 7: calculate normalization
normalized_value(a)
#Module 4 Problem 8: Achieve a Boxplot, Histogram and scatter plot on a given data ‘Q1’. Use any column/columns to get the respective outputs.
data <- read_csv(file.choose())
data
boxplot(data$workex)
hist(data$workex)
plot(data$gmat,data$workex,xlab = 'GMAT' , ylab='WORKEX')
|
bf4f161ce46fb83892082b534e4c455f384b5d07
|
cedbb22841ea152a733ccb9aff3ffa66560e5428
|
/2-1-2-reshaping-data.R
|
e31cdd5e96f3e96996d0a8ff54f012ad328f2d3d
|
[] |
no_license
|
andre-lie/course-6-data-wrangling
|
98a50aab3458ae1ef53cadd3f81c616308220f5e
|
825aa9b47e2ae08e560c9d5a9f23d66e8f6c2598
|
refs/heads/master
| 2023-01-21T09:11:49.995948
| 2020-11-28T06:14:28
| 2020-11-28T06:14:28
| 307,327,160
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,108
|
r
|
2-1-2-reshaping-data.R
|
# original wide data
library(tidyverse)
path <- system.file("extdata",package="dslabs")
filename <- file.path(path, "fertility-two-countries-example.csv")
wide_data <-read_csv(filename)
# tidy data from dslabs
library(dslabs)
data("gapminder")
tidy_data <- gapminder %>%
filter(country %in% c("South Korea","Germany")) %>%
select(country,year,fertility)
# gather wide data to make new tidy data
new_tidy_data <- wide_data %>%
gather(year,fertility, `1960`:`2015`)
head(new_tidy_data)
# gather all columns except country
new_tidy_data <- wide_data %>%
gather(year,fertility, -country)
# gather treats column names as characters by default
class(tidy_data$year)
class(new_tidy_data$year)
# convert gathered column names to numeric
new_tidy_data <- wide_data %>%
gather(year, fertility, - country, convert=TRUE)
class(new_tidy_data$year)
# ggplot works on new tidy data
new_tidy_data %>%
ggplot(aes(year,fertility,color=country)) +
geom_point()
# spread tidy data to generate wide data
new_wide_data <- new_tidy_data %>% spread(year, fertility)
select(new_wide_data,country, `1960`:`1967`)
|
d4f34e47dcca2a8a8b2146b086d608c7c2cf13eb
|
442f9770e53101c4461c9031dfd69d3dfa69a757
|
/man/plotMultiHist.Rd
|
57d1d3f968bd262aeaa4c8cecb806c3e91a465ef
|
[] |
no_license
|
cran/WGCNA
|
edaf87638c6cf0c9105dbb67637ebe059f598cb1
|
31f538c2f9d7d48f35f7098b4effe17331357d0d
|
refs/heads/master
| 2023-01-25T02:34:33.041279
| 2023-01-18T11:10:05
| 2023-01-18T11:10:05
| 17,694,095
| 48
| 54
| null | 2019-08-17T13:25:00
| 2014-03-13T03:47:40
|
R
|
UTF-8
|
R
| false
| false
| 1,649
|
rd
|
plotMultiHist.Rd
|
\name{plotMultiHist}
\alias{plotMultiHist}
\title{
Plot multiple histograms in a single plot
}
\description{
This function plots density or cumulative distribution function of multiple histograms in a single plot, using
lines.
}
\usage{
plotMultiHist(
data,
nBreaks = 100,
col = 1:length(data),
scaleBy = c("area", "max", "none"),
cumulative = FALSE,
...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{data}{
A list in which each component corresponds to a separate histogram and is a vector of values to be shown in
each histogram.
}
\item{nBreaks}{
Number of breaks in the combined plot.
}
\item{col}{
Color of the lines. Should be a vector of the same length as \code{data}.
}
\item{scaleBy}{
Method to make the different histograms comparable. The counts are scaled such that either the total area or
the maximum are the same for all histograms, or the histograms are shown without scaling.
}
\item{cumulative}{
Logical: should the cumulative distribution be shown instead of the density?
}
\item{\dots}{
Other graphical arguments.
}
}
\value{
Invisibly,
\item{x}{A list with one component per histogram (component of \code{data}), giving the bin midpoints}
\item{y}{A list with one component per histogram (component of \code{data}), giving the scaled bin counts}
}
\author{
Peter Langfelder
}
\note{
This function is still experimental and behavior may change in the future.
}
\seealso{
\code{\link{hist}}
}
\examples{
data = list(rnorm(1000), rnorm(10000) + 2);
plotMultiHist(data, xlab = "value", ylab = "scaled density")
}
\keyword{misc}% __ONLY ONE__ keyword per line
|
16e63a850a1cced72e9c30676e6e269d02abbf6c
|
7416b60a1a57cf2ae49d9d75ab773e03394b9256
|
/man/predsu_pred.Rd
|
c5f68c7ef913fe7c9bc864bf4792ae065f44281d
|
[] |
no_license
|
olssol/predSurv
|
b50532ec7ab8d0732892e5fb9f6226a8ffc9f504
|
ebb8b8303f5c1ac8f9f719b6d252ebfe4ed39ea1
|
refs/heads/master
| 2023-04-03T14:01:11.719289
| 2021-04-25T20:21:56
| 2021-04-25T20:21:56
| 361,344,870
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 345
|
rd
|
predsu_pred.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/predSurv_tools.R
\name{predsu_pred}
\alias{predsu_pred}
\title{Predict survival time}
\usage{
predsu_pred(prob_ints, condition_t = 0, option = c("upper", "lower"))
}
\description{
Predict survival time conditioning on censoring time based on interval
probabilities
}
|
89c2f05acdba21f0a82a43aff3e92d5864547853
|
b17f27d710cab5ce27f51c41307af2df6c349fec
|
/R/iProFun.eFDR.R
|
af440bb13aa0d36a3cfd440c9709f530aefaeb23
|
[] |
no_license
|
JiayiJi/iProFun-1
|
12a45cc2462408a0d046f0f8835bc4137b4345fb
|
e3d0d63cf4e024429abb8527232ddb8c6f7abda7
|
refs/heads/master
| 2023-04-24T03:51:34.990871
| 2021-05-02T20:24:58
| 2021-05-02T20:24:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,372
|
r
|
iProFun.eFDR.R
|
#' Summarize the association probabilities for an outcome.
#'
#' Summarize the association probabilities for an outcome across different association patterns.
#' The summation includes variables with missingness in some data types.
#' @export iProFun.sum.prob.1y
#' @param prob The posterior probabilieis of association patterns across multiple data types,
#' including association patterns with missing data in some of the data types. The output of iProFun.prob can be used here.
#' @param which.y The index for one outcome data type that the summation is for.
#' @param NoProbXIndex The index for the predictor data type(s) that are not considered
#' for calculating posterior probabilities of association patterns.
#' @return
#' \item{prob.all:}{The summarized posterior probabilities for all outcome variables in the data type of interest
#' (regardless of missingness in other data types).}
#'
#'
# function to get prob for 1y, with missing
iProFun.sum.prob.1y <- function (prob, which.y=1, NoProbXIndex=NULL) {
xlength=length(prob)
x.prob=setdiff(1:xlength, NoProbXIndex)
# calculate prob on 1y
prob.all <- vector("list", length(x.prob))
for (p in 1:length(x.prob)) {
k= x.prob[p]
Q_index=prob[[k]]$Config[,which.y]
prob.all[[p]]=apply(prob[[k]]$PostProb[,which(Q_index==1)], 1, sum)
config.miss=prob[[k]]$Config.miss
if (is.null(config.miss)==F) {
for (j in 1:length(config.miss)) {
#print(j)
Q_index1= config.miss[[j]][, colnames(config.miss[[j]])==as.character(which.y)]
prob.temp=prob[[k]]$PostProb.miss[[j]]
if (length(which(Q_index1==1))==1 ) {
prob.miss1=prob.temp[,which(Q_index1==1)]
prob.all[[p]][row.match(data.frame(prob[[k]]$xName.miss[[j]]), data.frame(prob[[k]]$xName_J))]=prob.miss1
}
if (length(which(Q_index1==1))>1 & nrow(prob.temp)>1) {
prob.miss1=apply(as.matrix(prob.temp[,which(Q_index1==1)]), 1, sum)
prob.all[[p]][row.match(data.frame(prob[[k]]$xName.miss[[j]]), data.frame(prob[[k]]$xName_J))]=prob.miss1
}
}
}# end (j), which fills the list of missing values
}
return(prob.all)
}
#' Fast estimation of FDR without permutation procedure for one data type
#'
#' @export estFDR
#' @param ProbPattern1y1x Posterior probabilities for associations between one predictor data type and one outcome data type.
#' @param grids grids specify the searching grids to find significant associations
#' @return
#' \item{estFDR:}{Estimated FDR values by different posterior probability cutoffs considered in grids}
estFDR<-function(ProbPattern1y1x, grids = seq(0.01, 0.99, by=0.01)) {
# estFDR= sum_i (1-Prob_i) 1(Prob_i>lambda) over No. Prob_i>lambda
estFDR=NULL
for (i in 1:length(grids) ) {
denominator= sum(ProbPattern1y1x>grids[i], na.rm=T)
numerator = sum ( (1-ProbPattern1y1x)*(ProbPattern1y1x>grids[i]), na.rm=T)
estFDR=c(estFDR, numerator/denominator)
}
return(estFDR)
}
#' iProFun eFDR assessment for one outcome data type.
#'
#'iProFun empirical false discovery rate (eFDR) assessment for one outcome data type.
#' @export iProFun.eFDR.1y
#' @param yList yList is a list of data matrix for outcomes.
#' @param xList xList is a list of data matrix for predictors.
#' @param covariates covariates is a list of data matrix for covariate.
#' @param pi1 pi1 is pre-specified prior of proportion of non-null statistics. It cane be a number in (0, 1) or a vector
#' of numbers with length of ylist.
#' @param var.ID var.ID gives the variable name (e.g. gene/protein name) to match different data types.
#' If IDs are not specified, the first columns will be considered as ID variable.
#' @param var.ID.additional var.ID.additional allows to output additional variable names from the input.
#' Often helpful if multiple rows (e.g. probes) are considered per gene to allow clear index of the rows.
#' @param permutate_number Number of permutation, default 10
#' @param reg.all The regression summary (unformatted) such as from iProFun.reg.
#' @param which.y The index for one outcome data type that the eFDR assessment is for.
#' @param NoProbXIndex NoProbXIndex allows users to provide the index for the predictor data type(s) that are not considered
#' for calculating posterior probabilities of association patterns.
#' @param fdr norminal false discover rate, default at 0.1
#' @param PostCut PostCut specifies minimal posterior probabilty cutoff to be considered as
#' significant, default as 0.75
#' @param filter filter is a vector with the same length of xList, taking values of 1, -1, 0 or NULL.
#' "NULL" is default imposes no filtering. 1" indicates that an association is considered for
#' significance only if its significant associations are positive across all outcome platforms. "-1" indicates
#' that an association is considered
#' for significance only if its significant associations are negative across all outcome platforms. "0" indicates
#' that an association is considered for significance only if its significant association across all outcome
#' platforms preserve consistent directions (either positive or negative).
#' @param grids Grids specify the searching grids for significant associations
#' @param seed seed allows users to externally assign seed to replicate results.
#' @return
#' \item{eFDR.grid:}{eFDR by the grid of posterior probability cutoffs.}
#' \item{fdr_cutPob:}{the cutoff values for pre-specified eFDR rate and the posterior probabilities for a pair of
#' data types based on permutation.}
#' \item{No.Identified.filter:}{the number of identified variables for each pair of data types.}
#' \item{No.Identified.no.filter:}{the number of identified variables for each pair of data types.}
#' \item{Gene_fdr:}{A table summarizing the posterior probabilities (PostProb), the eFDR (eFDR.no.filter),
#' the significance under different criteria (nominal FDR, PostProb cutoffs and filter) for each variable (e.g. gene)
#' under consideration.}
#'
iProFun.eFDR.1y= function(reg.all, which.y, yList, xList, covariates, pi1,
NoProbXIndex=NULL,filter=NULL,
permutate_number=10,
grids = seq(0.01, 0.99, by=0.01),
fdr = 0.1, PostCut=0.75,
var.ID=c("Gene_ID"),
var.ID.additional=NULL, seed=NULL) {
# NoProbXIndex=NULL;filter=c(1,0);
# permutate_number=1;
# grids = seq(0.01, 0.99, by=0.01);
# fdr = 0.1; PostCut=0.75;
# var.ID=c("Gene_ID");
# var.ID.additional=NULL; seed=NULL
xlength=length(xList)
x.prob=setdiff(1:xlength, NoProbXIndex)
xlength2=length(x.prob)
# Obtain posterior probabiliy from original data - including missing
sum=multi.omic.reg.summary(reg.out.list=reg.all, var.ID=var.ID)
prob=iProFun.prob(Reg.Sum=sum, NoProbXIndex=NoProbXIndex, pi1=pi1)
prob1y=iProFun.sum.prob.1y(prob=prob, which.y=which.y, NoProbXIndex=NoProbXIndex)
count_orig_grid=lapply(1: xlength2, function(p)
sapply(1:length(grids), function(f)
sum(prob1y[[p]]>grids[f], na.rm=T)))
# permutation
perm.reg.all=reg.all
permProb1y=vector("list", xlength2)
for (perm in 1: permutate_number) {
print(c("perm", perm))
if (is.null(seed)){seed.p=NULL}
if (is.null(seed)==F) {seed.p =as.integer((seed+perm)*978)}
ft1=iProFun.reg.1y(yList.1y=yList[[which.y]], xList=xList, covariates.1y=covariates[[which.y]], permutation=T,
var.ID=var.ID,
var.ID.additional=var.ID.additional, seed=seed.p)
perm.reg.all[[which.y]]=ft1
sum1=multi.omic.reg.summary(reg.out.list=perm.reg.all, var.ID=var.ID)
prob1=iProFun.prob(Reg.Sum=sum1, NoProbXIndex=NoProbXIndex, pi1=pi1)
perm1y=iProFun.sum.prob.1y(prob=prob1, which.y=which.y, NoProbXIndex=NoProbXIndex)
for (p in 1:xlength2) {
permProb1y[[p]]=cbind(permProb1y[[p]], perm1y[[p]])
}
if( perm%%10 ==0) {print(paste0("Completed Permutation ", perm-9, " to ", perm))}
}
# calculate the number of genes significant at each threshold level
count_perm_grid=lapply(1:xlength2, function(p)
sapply(1:length(grids), function(f) sum(permProb1y[[p]]>grids[f], na.rm=T)/permutate_number ))
# calculate the eFDR on a grid of prob
eFDR.grid=lapply(1:xlength2, function(p)
count_perm_grid[[p]]/count_orig_grid[[p]])
AboveCut=which(grids>=PostCut)
grid_PostCut= AboveCut[which.min(abs(AboveCut- PostCut))]
fdr_cut=lapply(eFDR.grid, function(f)
max(min(which(f<fdr), na.rm=T), grid_PostCut))
fdr_cutPob=lapply(1:xlength2, function(f) grids[fdr_cut[[f]]] )
No.Identified.no.filter=lapply(1:xlength2, function(f)
count_orig_grid[[f]]
[fdr_cut[[f]] ])
# calculate eFDR based on cutoff
eFDR.no.filter=vector("list", xlength2);
for (f in 1:xlength2) {
t=length(prob1y[[f]])
t2=rep(NA, t)
t2[which(is.na(prob1y[[f]])==F)]=sapply(which(is.na(prob1y[[f]])==F), function(g) tail(which(prob1y[[f]][g]>grids), n=1))
t2[sapply(t2, function(x) length(x)==0)] <- NA
t2=unlist(t2)
eFDR.no.filter[[f]]=eFDR.grid[[f]][t2]
eFDR.no.filter[[f]][which(is.na(prob1y[[f]])==F & is.na(t2))] =1
}
# add filter
x_filter_gene=vector("list", xlength2)
for (j in 1: xlength2){
k=x.prob[j]
betas_J=prob[[k]]$betas_J
if (is.null(filter[j]) ){ # no requirement
x_filter_gene[[j]]=seq(1, nrow(betas_J))
} else {
temp1=which(sapply(1:nrow(betas_J), function(f)
any(betas_J[f,][(eFDR.no.filter[[j]][f]<fdr)]<=0, na.rm=T))==F)
temp2=which(sapply(1:nrow(betas_J), function(f)
any(betas_J[f,][(eFDR.no.filter[[j]][f]<fdr)]>=0, na.rm=T))==F)
if (filter[j] == 1) {x_filter_gene[[j]]= temp1} # all positive beta among signifiant results
if (filter[j] == -1) { x_filter_gene[[j]]=temp2} # all negative beta among signifiant results
if (filter[j] == 0) {x_filter_gene[[j]] = sort( union(temp1, temp2))} # all positive or all negative
}
}
No.Identified.filter=lapply(1: xlength2, function(p)
sum(prob1y[[p]][x_filter_gene[[p]]]>fdr_cutPob[[p]], na.rm=T))
Gene_fdr=vector("list", xlength2);
for (j in 1:xlength2) {
k=x.prob[j]
# no filter
temp1=temp2=rep(0, nrow(prob[[k]]$xName_J))
temp1[is.na(prob1y[[j]])]=temp2[is.na(prob1y[[j]])]=NA
temp1[which(prob1y[[j]]>fdr_cutPob[[j]])]=1
# filter
sig=intersect(x_filter_gene[[j]], which(prob1y[[j]]>fdr_cutPob[[j]]))
temp2[sig]=1
Gene_fdr[[j]]=data.frame(xName=prob[[k]]$xName_J, PostProb=prob1y[[j]], eFDR.no.filter=eFDR.no.filter[[j]], sig.no.filter=temp1, sig.filter=temp2)
}
eFDR_result=list(eFDR.grid=eFDR.grid, fdr_cutPob=fdr_cutPob, No.Identified.filter=No.Identified.filter, No.Identified.no.filter=No.Identified.no.filter, Gene_fdr=Gene_fdr)
return(eFDR_result)
}
#' iProFun eFDR assessment based on permutation for multiple outcome data type.
#'
#' iProFun empirical false discovery rate (eFDR) assessment based on permutation for multiple outcome data type.
#' @export iProFun.eFDR
#' @param yList yList is a list of data matrix for outcomes.
#' @param xList xList is a list of data matrix for predictors.
#' @param covariates covariates is a list of data matrix for covariate.
#' @param pi1 pi1 is pre-specified prior of proportion of non-null statistics. It cane be a number in (0, 1) or a vector
#' of numbers with length of ylist.
#' @param var.ID var.ID gives the variable name (e.g. gene/protein name) to match different data types.
#' If IDs are not specified, the first columns will be considered as ID variable.
#' @param var.ID.additional var.ID.additional allows to output additional variable names from the input.
#' Often helpful if multiple rows (e.g. probes) are considered per gene to allow clear index of the rows.
#' @param permutate_number Number of permutation, default 10
#' @param reg.all The regression summary (unformatted) such as from iProFun.reg.
#' @param NoProbXIndex NoProbXIndex allows users to provide the index for the predictor data type(s) that are not considered
#' for calculating posterior probabilities of association patterns.
#' @param fdr norminal false discover rate, default at 0.1
#' @param PostCut PostCut specifies minimal posterior probabilty cutoff to be considered as
#' significant, default as 0.75
#' @param filter filter is a vector with the same length of xList, taking values of 1, -1, 0 or NULL.
#' "NULL" is default imposes no filtering. 1" indicates that an association is considered for
#' significance only if its significant associations are positive across all outcome platforms. "-1" indicates
#' that an association is considered
#' for significance only if its significant associations are negative across all outcome platforms. "0" indicates
#' that an association is considered for significance only if its significant association across all outcome
#' platforms preserve consistent directions (either positive or negative).
#' @param grids Grids specify the searching grids for significant associations
#' @param seed seed allows users to externally assign seed to replicate results.
#' @return
#' \item{eFDR.grid:}{eFDR by the grid of posterior probability cutoffs.}
#' \item{fdr_cutPob:}{the cutoff values for pre-specified eFDR rate and the posterior probabilities for a pair of
#' data types based on permutation.}
#' \item{No.Identified.filter:}{the number of identified variables for each pair of data types.}
#' \item{No.Identified.no.filter:}{the number of identified variables for each pair of data types.}
#' \item{Gene_fdr:}{A table summarizing the posterior probabilities (PostProb), the eFDR (eFDR.no.filter),
#' the significance under different criteria (nominal FDR, PostProb cutoffs and filter) for each variable (e.g. gene)
#' under consideration.}
iProFun.eFDR= function(reg.all, yList, xList, covariates, pi1,
NoProbXIndex=NULL,filter=NULL,
permutate_number=10,
grids = seq(0.01, 0.99, by=0.01),
fdr = 0.1, PostCut=0.75,
var.ID=c("Gene_ID"),
var.ID.additional=c("phospho_ID", "Hybridization", "chr"), seed=NULL) {
ylength=length(yList)
xlength=length(xList)
x.prob=setdiff(1:xlength, NoProbXIndex)
xlength2=length(x.prob)
eFDR.grid=fdr_cutPob= No.Identified.filter=No.Identified.no.filter=
PostProb=eFDR.no.filter=sig.no.filter=sig.filter=xNames=vector("list", xlength2);
for (q in 1:ylength) {
print(c("Outcome", q))
if (is.null(seed)){seed.qq=NULL}
if (is.null(seed)==F) {seed.qq =as.integer((seed+q)*4109)}
eFDR1=iProFun.eFDR.1y(reg.all=reg.all, which.y=q, yList=yList, xList=xList, covariates=covariates, pi1=pi1,
NoProbXIndex=NoProbXIndex,filter=filter,
permutate_number=permutate_number,
grids = grids,fdr =fdr, PostCut=PostCut,
var.ID=var.ID, var.ID.additional=var.ID.additional, seed=seed.qq)
for (p in 1:xlength2 ) {
eFDR.grid[[p]]= cbind(eFDR.grid[[p]], eFDR1$eFDR.grid[[p]]);
fdr_cutPob[[p]]= cbind(fdr_cutPob[[p]], eFDR1$fdr_cutPob[[p]]);
No.Identified.filter[[p]]= cbind(No.Identified.filter[[p]], eFDR1$No.Identified.filter[[p]]);
No.Identified.no.filter[[p]]= cbind(No.Identified.no.filter[[p]], eFDR1$No.Identified.no.filter[[p]]);
PostProb[[p]]= cbind(PostProb[[p]], eFDR1$Gene_fdr[[p]]$PostProb);
eFDR.no.filter[[p]]= cbind(eFDR.no.filter[[p]], eFDR1$Gene_fdr[[p]]$eFDR.no.filter);
sig.no.filter[[p]]= cbind(sig.no.filter[[p]], eFDR1$Gene_fdr[[p]]$sig.no.filter);
sig.filter[[p]]= cbind(sig.filter[[p]], eFDR1$Gene_fdr[[p]]$sig.filter);
rownames(eFDR.grid[[p]])=grids
xNames[[p]]=eFDR1$Gene_fdr[[p]][,setdiff(colnames(eFDR1$Gene_fdr[[p]]), c("PostProb", "eFDR.no.filter", "sig.no.filter", "sig.filter"))]
}
}
eFDR_result=list(eFDR.grid=eFDR.grid, fdr_cutPob=fdr_cutPob,
No.Identified.no.filter=No.Identified.no.filter,
No.Identified.filter=No.Identified.filter,
PostProb=PostProb,
eFDR.no.filter=eFDR.no.filter,
sig.no.filter=sig.no.filter,
sig.filter=sig.filter, xNames=xNames)
return(eFDR_result)
}
|
93d12c8ae1e72fbc99961b3f5d1c7fd7b75569c7
|
0d46d44af30f9ef892746f61ef24c75e38aa2c9e
|
/RInvest/stocks.R
|
df0b11e07d109815ad726b64635dcece86cd32e5
|
[] |
no_license
|
ntwo1980/Invest
|
e79acead286749d20ff8ebed2f24cce02c9cadee
|
56f8e70615277826ad48d4cfbdd6f28b12d7f3c4
|
refs/heads/master
| 2020-04-06T07:00:09.272009
| 2016-09-06T05:14:43
| 2016-09-06T05:14:43
| 65,250,554
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,109
|
r
|
stocks.R
|
source("utils.R")
load.stocks.data <- function(file) {
data <- read.table(file, header = T, sep = ",", as.is = T, fileEncoding = "utf-8",
colClasses = c("factor", "character", "Date", "numeric", "numeric", "numeric"),
col.names = c("Code", "Name", "Date", "PE", "DPE", "PB"))
data <- sqldf("select Date, Code, Name, PE, DPE, PB from data order by Date")
percentiles <- apply(data[, c("PE", "DPE", "PB")], 2, get.percentile, percentile.length = 240, only.last = T)
data <- transform(data,
PEM = median(get.data.year(PE, 1), na.rm = T),
DEPM = median(get.data.year(DPE, 1), na.rm = T),
PBM = median(get.data.year(PB, 1), na.rm = T),
ROE = round(PB / PE, 2),
PEP = percentiles[, 1],
DPEP = percentiles[, 2],
PBP = percentiles[, 3])
return(data)
}
calculate_spread <- function(x, y, beta) {
return(y - beta * x)
}
calculate_beta_and_level <- function(x, y) {
dx <- diff(x)
dy <- diff(y)
r <- prcomp( ~ dx + dy)
beta <- r$rotation[2, 1] / r$rotation[1, 1]
spread <- calculate_spread(x, y, beta)
names(spread) <- "spread"
level <- mean(spread, na.rm = T)
outL <- list()
outL$spread <- spread
outL$beta <- beta
outL$level <- level
return(outL)
}
calculate_buy_sell_signals <- function(spread, beta, level, lower_threshold, upper_threshold) {
buy_signals <- ifelse(spread <= level - lower_threshold, 1, 0)
sell_signals <- ifelse(spread >= level + upper_threshold, 1, 0)
output <- cbind(spread, buy_signals, sell_signals)
colnames(output) <- c("spread", "buy_signals", "sell_signals")
return(output)
}
folder <- paste(getwd(), "/zz/", sep = "")
files <- list.files(folder, pattern = "\\.csv$")
i <- 1
pb.df <- NULL
for (file in files) {
cat("processing ", file, "\r\n", sep = "")
name.postfix <- substr(file, 1, 6)
stocks.whole.name <- paste("stocks.whole", name.postfix, sep = "")
stocks.whole <- load.stocks.data(paste(folder, file, sep = ""))
assign(stocks.whole.name, stocks.whole, pos = .GlobalEnv)
if (i == 1) {
pb.df <- sqldf(paste("SELECT Date FROM [stocks.whole]", sep = ""))
}
pb.df <- sqldf(paste("SELECT df1.*, df2.PB AS s", name.postfix, " FROM [pb.df] df1 JOIN [stocks.whole] df2 USING(Date)", sep = ""))
i <- i + 1
}
rownames(pb.df) <- pb.df[[1]]
pb.df[[1]] <- NULL
pb.xts = as.xts(pb.df)
x <- pb.xts[, 7]
y <- pb.xts[, 8]
dF <- cbind(x, y)
names(dF) <- c("x", "y")
run_regression <- function(dF) {
return (coef(lm(y ~x -1 ,data = as.data.frame(dF))))
}
rolling_beta <- function(z, width) {
rollapply(z, width = width, FUN = run_regression, by.column = FALSE, align = "right")
}
betas <- rolling_beta(diff(dF), 10)
data <- merge(betas, dF)
data$spread <- data$y - lag(betas, 1) * data$x
returns <- diff(dF) / dF
return_beta <- rolling_beta(returns, 10)
data$spreadR <-diff(data$y) / data$y - return_beta * diff(data$x) / data$x
threshold <- sd(data$spread, na.rm = T)
|
74c398309fa117d1c069fc44dc67c74087db7f49
|
1df04d65fa9ea2eea223e4a7f10049cffac4f94f
|
/backup/CCode/SHM/SHM_CM.R
|
70265eeecc7c9bdefd636912e488c60ead181e4f
|
[] |
no_license
|
jhncl/Model-in-C
|
aa3b51071b609afdb2c129840fdade7ece80de95
|
0b42e3cd587acb651c40fd965ac01a04eaa992d4
|
refs/heads/master
| 2021-01-18T20:30:09.524459
| 2014-04-28T09:22:46
| 2014-04-28T09:22:46
| 2,867,397
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,662
|
r
|
SHM_CM.R
|
filename="M_SHM_A_FULL"
library(rjags)
library(qfa)
library(qfaBayes)
Control<-c("Adam_cdc13-1_SDLV2_REP1.txt","Adam_cdc13-1_SDLV2_REP2.txt","Adam_cdc13-1_SDLV2_REP3.txt","Adam_cdc13-1_SDLV2_REP4.txt")
DescripControl<-"ExptDescriptionCDC13.txt"
a<-rod.read(files=Control,inoctimes=DescripControl)
qfa.variables(a)
Screen<-unique(a$Screen.Name)
Treat<-27
MPlate<-(unique(a$MasterPlate.Number))
a<-funcREMOVE(a,Screen,Treat,MPlate)
Row<-paste(a$Row)
Col<-paste(a$Col)
for (i in 1:nrow(a)){
if (nchar(Row[i])<2){Row[i]=paste(0,Row[i],sep="")}
if (nchar(Col[i])<2){Col[i]=paste(0,Col[i],sep="")}
}
a$ID<-paste(a$Barcode,a$MasterPlate.Number,Row,Col,sep="")
ORFuni=unique(a$ORF)########
funcIDlist<-function(x){
a$ID[a$ORF==x]
}
funcStrip<-function(x,i){x[1:i]}
IDstrip=sapply(ORFuni,funcIDlist)
IDstrip=lapply(IDstrip,unique)
IDstrip=lapply(IDstrip,i=6,funcStrip)
IDstrip=unlist(IDstrip)
IDstrip=na.omit(IDstrip)
a<-a[a$ID%in%IDstrip,]
#########
a<-a[order(a$ORF,a$ID,a$Expt.Time), ]
Scaling=TRUE
IDuni<-unique(a$ID)
ORFuni=unique(a$ORF)########
gene<-unlist(lapply(ORFuni,funcGENE,data=a))
N<-length(ORFuni);M<-length(IDuni)
NoORF_a<-unlist(lapply(ORFuni,funcNoORF,data=a))#no of repeats each orf
NoTime_a<-c(0,unlist(lapply(IDuni,funcNoTime,data=a)))# 0+ no of
NoSum_a<-c(0,unlist(lapply(1:N,funcNoSum,NoORF_vec=NoORF_a)))
dimr<-max(NoORF_a);dimc<-max(NoTime_a)
y<-funcXY(a$Growth,M,N,NoTime_a,NoSum_a,dimr,dimc)
x<-funcXY(a$Expt.Time,M,N,NoTime_a,NoSum_a,dimr,dimc)
QFA.I<-list("NoORF"=c(NoORF_a),"NoTime"=c(NoTime_a)[-1],"NoSum"=c(NoSum_a),"N"=
N,"M"=M,"gene"=gene)
if (Scaling==TRUE){y<-funcSCALING(a,y)}
QFA.D<-list(y=y,x=x,ORFuni=ORFuni)
x[is.na(x)]=-999
y[is.na(y)]=-999
xx<-aperm(x,c(2,1,3))
yy<-aperm(y,c(2,1,3))
write.table(file="xdata.txt",c(xx))
write.table(file="ydata.txt",c(yy))
write.table(file="NoORFdata.txt",c(NoORF_a))
write.table(file="NoTIMEdata.txt",c(NoTime_a)[-1])
write.table(file="LMNmaxdata.txt",c(N,max(NoORF_a),max(NoTime_a),length(y),length(NoTime_a[-1])))
#################################################
#################################################
QFA.P<-list(
sigma_K=3, phi_K=0.5,
eta_K_o=0.5, psi_K_o=1,
sigma_r=-1, phi_r=0.1,
eta_r_o=1, psi_r_o=1,
eta_nu=-1, psi_nu=1,
K_mu=log(0.2192928), eta_K_p=1,
r_mu=log(2.5), eta_r_p=1,
nu_mu=log(31), eta_nu_p=1,
P_mu=log(0.0002), eta_P=1/0.01
)
write("
model {
for (i in 1:N){
for (j in 1:NoORF[i]){
for (l in 1:NoTime[(NoSum[i]+j)]){
y[j,l,i] ~ dnorm(y.hat[j,l,i], exp(nu_l[i]))
y.hat[j,l,i] <- (K_lm[(NoSum[i]+j)]*P*exp(r_lm[(NoSum[i]+j)]*x[j,l,i]))/(K_lm[(NoSum[i]+j)]+P*(exp(r_lm[(NoSum[i]+j)]*x[j,l,i])-1))
}
K_lm[(NoSum[i]+j)]<- exp(K_lm_L[(NoSum[i]+j)])
K_lm_L[(NoSum[i]+j)] ~ dnorm(K_o_l[i],exp(tau_K_l[i]))
r_lm[(NoSum[i]+j)]<- exp(min(3.5,r_lm_L[(NoSum[i]+j)]))
r_lm_L[(NoSum[i]+j)] ~ dnorm(r_o_l[i],exp(tau_r_l[i]))
}
K_o_l[i] ~ dnorm( K_p, exp(sigma_K_o) )
r_o_l[i] ~ dnorm( r_p, exp(sigma_r_o) )
nu_l[i] ~ dnorm(nu_p, exp(sigma_nu) )
tau_K_l[i]~dnorm(sigma_K,phi_K)
tau_r_l[i]~dnorm(sigma_r,phi_r)
}
K_p ~ dnorm(K_mu,eta_K_p)
r_p ~ dnorm(r_mu,eta_r_p)
nu_p ~ dnorm(nu_mu,eta_nu_p)
P<-exp(P_L)
P_L ~ dnorm(P_mu,eta_P)
sigma_nu~dnorm(eta_nu,psi_nu)
sigma_K_o ~ dnorm(eta_K_o,psi_K_o)
sigma_r_o ~ dnorm(eta_r_o,psi_r_o)
}
","model1.bug")
jags <- jags.model('model1.bug',
data = list('x' = QFA.D$x,
'y' = QFA.D$y,
'N' = QFA.I$N,
'NoTime' = QFA.I$NoTime,
'NoORF' = QFA.I$NoORF,
'NoSum' = QFA.I$NoSum,
'sigma_K'=QFA.P$sigma_K, 'phi_K'=QFA.P$phi_K,
'sigma_r'=QFA.P$sigma_r, 'phi_r'=QFA.P$phi_r,
'eta_K_o'=QFA.P$eta_K_o, 'psi_K_o'=QFA.P$psi_K_o,
'eta_r_o'=QFA.P$eta_r_o, 'psi_r_o'=QFA.P$psi_r_o,
'eta_nu'=QFA.P$eta_nu, 'psi_nu'=QFA.P$psi_nu,
'K_mu'=QFA.P$K_mu, 'eta_K_p'=QFA.P$eta_K_p,
'r_mu'=QFA.P$r_mu, 'eta_r_p'=QFA.P$eta_r_p,
'nu_mu'=QFA.P$nu_mu, 'eta_nu_p'=QFA.P$eta_nu_p,
'P_mu'=QFA.P$P_mu, 'eta_P'=QFA.P$eta_P
),
n.chains = 1,
n.adapt = 100)
samp<-coda.samples(jags,
c('K_lm_L', 'tau_K_l',
'K_o_l', 'sigma_K_o',
'K_p',
'P_L',
'r_lm_L', 'tau_r_l',
'r_o_l', 'sigma_r_o',
'r_p',
'nu_l', 'sigma_nu',
'nu_p'),
1000,thin=1)
save(samp,file=paste(filename,"_F0.R",sep=""))
update(jags,10000)
date()
samp<-coda.samples(jags,
c('K_lm_L', 'tau_K_l',
'K_o_l', 'sigma_K_o',
'K_p',
'P_L',
'r_lm_L', 'tau_r_l',
'r_o_l', 'sigma_r_o',
'r_p',
'nu_l', 'sigma_nu',
'nu_p'),
20000,thin=20)
save(samp,file=paste(filename,"_F1.R",sep=""))
samp<-coda.samples(jags,
c('K_lm_L', 'tau_K_l',
'K_o_l', 'sigma_K_o',
'K_p',
'P_L',
'r_lm_L', 'tau_r_l',
'r_o_l', 'sigma_r_o',
'r_p',
'nu_l', 'sigma_nu',
'nu_p'),
100000,thin=10)
save(samp,file=paste(filename,"_F2.R",sep=""))
stop()
L=N=4294
M=17308
pdf(file="SHMtestplot_C_4294.pdf")
#K_lm[%i]
for (i in 1:M){
j=i
plot(density(aa[,j]),main=paste(colnames(samp)[i],t.test((aa[,j]),samp[,i])$p.value));lines(density(samp[,i]),col=2);
par(mfrow=c(2,1))
plot(c(aa[,j]),main=paste(mean(aa[,j])-mean(samp[,i])),type="l");plot(samp[,i],col=2,type="l");
par(mfrow=c(1,1))
}
dev.off()
#tau_K_l[%i]
j=M+1
for (i in (2*M+3*N+8):(2*M+4*N+7)){
plot(density(aa[,j]),main=paste(colnames(samp)[i],t.test((aa[,j]),samp[,i])$p.value));lines(density(samp[,i]),col=2);
par(mfrow=c(2,1))
plot(c(aa[,j]),main=paste(mean(aa[,j])-mean(samp[,i])),type="l");plot(samp[,i],col=2,type="l");
par(mfrow=c(1,1))
}
#"K_o_l[%i]
j=M+N+1
for (i in (M+1):(M+N)){
plot(density(aa[,j]),main=paste(colnames(samp)[i],t.test((aa[,j]),samp[,i])$p.value));lines(density(samp[,i]),col=2);
par(mfrow=c(2,1))
plot(c(aa[,j]),main=paste(mean(aa[,j])-mean(samp[,i])),type="l");plot(samp[,i],col=2,type="l");
par(mfrow=c(1,1))
}
#sigma_K_o ");
i=2*M+3*N+5
j=M+2*N+1
plot(density(aa[,j]),main=paste(colnames(samp)[i],t.test((aa[,j]),samp[,i])$p.value));lines(density(samp[,i]),col=2);
par(mfrow=c(2,1))
plot(c(aa[,j]),main=paste(mean(aa[,j])-mean(samp[,i])),type="l");plot(samp[,i],col=2,type="l");
par(mfrow=c(1,1))
#K_p ");
i=M+1+N
j=M+2*N+2
plot(density(aa[,j]),main=paste(colnames(samp)[i],t.test((aa[,j]),samp[,i])$p.value));lines(density(samp[,i]),col=2);
par(mfrow=c(2,1))
plot(c(aa[,j]),main=paste(mean(aa[,j])-mean(samp[,i])),type="l");plot(samp[,i],col=2,type="l");
par(mfrow=c(1,1))
#"P_l ");
i=(M+N+2)
j=M+2*N+3
plot(density(aa[,j]),main=paste(colnames(samp)[i],t.test((aa[,j]),samp[,i])$p.value));lines(density(samp[,i]),col=2);
par(mfrow=c(2,1))
plot(c(aa[,j]),main=paste(mean(aa[,j])-mean(samp[,i])),type="l");plot(samp[,i],col=2,type="l");
par(mfrow=c(1,1))
#r_lm[%i]
j=M+2*N+4
for (i in (M+2*N+4):(2*M+2*N+3)){
plot(density(aa[,j]),main=paste(colnames(samp)[i],t.test((aa[,j]),samp[,i])$p.value));lines(density(samp[,i]),col=2);
par(mfrow=c(2,1))
plot(c(aa[,j]),main=paste(mean(aa[,j])-mean(samp[,i])),type="l");plot(samp[,i],col=2,type="l");
par(mfrow=c(1,1))
j=j+1
}
#tau_r_l[%i] ",l);
j=2*M+2*N+4
for (i in (2*M+4*N+8):(2*M+5*N+7)){
plot(density(aa[,j]),main=paste(colnames(samp)[i],t.test((aa[,j]),samp[,i])$p.value));lines(density(samp[,i]),col=2);
par(mfrow=c(2,1))
plot(c(aa[,j]),main=paste(mean(aa[,j])-mean(samp[,i])),type="l");plot(samp[,i],col=2,type="l");
par(mfrow=c(1,1))
j=j+1
}
#r_o_l[%i] ",l);
j=2*M+3*N+4
for (i in (2*M+2*N+4):(2*M+3*N+3)){
plot(density(aa[,j]),main=paste(colnames(samp)[i],t.test((aa[,j]),samp[,i])$p.value));lines(density(samp[,i]),col=2);
par(mfrow=c(2,1))
plot(c(aa[,j]),main=paste(mean(aa[,j])-mean(samp[,i])),type="l");plot(samp[,i],col=2,type="l");
par(mfrow=c(1,1))j=j+1
}
#sigma_r_o ");
i=2*M+3*N+7
j=2*M+4*N+4
plot(density(aa[,j]),main=paste(colnames(samp)[i],t.test((aa[,j]),samp[,i])$p.value));lines(density(samp[,i]),col=2);
par(mfrow=c(2,1))
plot(c(aa[,j]),main=paste(mean(aa[,j])-mean(samp[,i])),type="l");plot(samp[,i],col=2,type="l");
par(mfrow=c(1,1))
#r_p ");
i=2*M+3*N+4
j=2*M+4*N+5
plot(density(aa[,j]),main=paste(colnames(samp)[i],t.test((aa[,j]),samp[,i])$p.value));lines(density(samp[,i]),col=2);
par(mfrow=c(2,1))
plot(c(aa[,j]),main=paste(mean(aa[,j])-mean(samp[,i])),type="l");plot(samp[,i],col=2,type="l");
par(mfrow=c(1,1))
#"nu_l[%i] ",l);
j=2*M+4*N+6
for (i in (M+N+3):(M+2*N+2)){
plot(density(aa[,j]),main=paste(colnames(samp)[i],t.test((aa[,j]),samp[,i])$p.value));lines(density(samp[,i]),col=2);
par(mfrow=c(2,1))
plot(c(aa[,j]),main=paste(mean(aa[,j])-mean(samp[,i])),type="l");plot(samp[,i],col=2,type="l");
par(mfrow=c(1,1))
j=j+1
}
#sigma_nu ");
i=2*M+3*N+6
j=2*M+5*N+6
plot(density(aa[,j]),main=paste(colnames(samp)[i],t.test((aa[,j]),samp[,i])$p.value));lines(density(samp[,i]),col=2);
par(mfrow=c(2,1))
plot(c(aa[,j]),main=paste(mean(aa[,j])-mean(samp[,i])),type="l");plot(samp[,i],col=2,type="l");
par(mfrow=c(1,1))
#nu_p ");
i=M+2*N+3
j=2*M+5*N+7
plot(density(aa[,j]),main=paste(colnames(samp)[i],t.test((aa[,j]),samp[,i])$p.value));lines(density(samp[,i]),col=2);
par(mfrow=c(2,1))
plot(c(aa[,j]),main=paste(mean(aa[,j])-mean(samp[,i])),type="l");plot(samp[,i],col=2,type="l");
par(mfrow=c(1,1))
dev.off()
i=M+N+3;j=i+N+1;
colnames(samp)[i];names(aa)[j]
|
d76440123c796da2763ecab73945d878c8793615
|
12ccdd95c02ca34822d0a3862a28bb17170955f5
|
/man/residuals.ccrm.Rd
|
78af565b0e09246f8e686c19ec0f149e5ab883b4
|
[] |
no_license
|
cran/iRegression
|
d156b210e062f4548d5cd1fa8fc64cb00832b4f1
|
4d2c16e000ed423412f980b5fe7ff27519216fe3
|
refs/heads/master
| 2021-01-19T01:41:43.944423
| 2016-07-18T20:09:24
| 2016-07-18T20:09:24
| 17,696,732
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 460
|
rd
|
residuals.ccrm.Rd
|
\name{residuals.ccrm}
\alias{residuals.ccrm}
\title{Extract Constrained Centre and Range Method Residuals}
\description{Returns the residuals from an object class \code{ccrm}.}
\usage{
\method{residuals}{ccrm}(object, ...)
}
\arguments{
\item{object}{ an object class \code{ccrm}. }
\item{\dots}{ other arguments. }
}
\value{ Residuals extracted from the object class \code{ccrm}. }
\seealso{
\code{\link{ccrm}}
}
\keyword{regression}
|
80b40eab6e3743e017743147cc8a06724fea0ce5
|
2c70811ab30b8450945af59f16253efc4e453a5b
|
/ProgrammingAssignment1/complete.R
|
d00e31af821cf0a938061031d6ddae489c1102da
|
[] |
no_license
|
manuelmunoz/datasciencecoursera
|
9dd6615e7980b2a983de46e7f1cc30b0cd3b910e
|
1caa0e9530c2c351accf31baa99ac022d5a7d8d7
|
refs/heads/master
| 2021-01-23T12:18:15.446667
| 2016-08-05T19:42:30
| 2016-08-05T19:42:30
| 64,493,102
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 587
|
r
|
complete.R
|
complete <- function(directory, id = 1:332) {
# Initializing counters
h <- 0
suma <- 0
contador <- 0
final <- data.frame()
# Retrieving list of data files
ficheros <- dir("specdata")
# Going through data files
for(h in id) {
# Dumping a single file in a data frame
datos <- read.csv(paste("/home/manuel/datasciencecoursera/", directory, "/", ficheros[h], sep = ""))
resultado <- subset(datos, (!is.na(datos[, 2])) & (!is.na(datos[, 3])))
final <- rbind(final, c(h, nrow(resultado)))
}
final <- setNames(final, c("id","nobs"))
final
}
|
1c5ec38c2bd86ba2434d1c5ac02dc46923a4fe34
|
3fc582b5ab0a1d2f778fd2582ce7c0bb670aa11d
|
/man/ComputeCropCalendar.Rd
|
32a89f402490edc24a5e90d0aa29df731e3c5105
|
[] |
no_license
|
bluegulcy/aquacrop
|
43b56a8becb9244fe3028af853f3c19c99bdd5dd
|
50a1d815d04259e8792aba763fc53e0f896bf1a0
|
refs/heads/master
| 2020-09-02T10:55:40.510230
| 2019-10-08T10:03:12
| 2019-10-08T10:03:12
| 219,206,208
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 663
|
rd
|
ComputeCropCalendar.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ComputeCropCalendar.R
\name{ComputeCropCalendar}
\alias{ComputeCropCalendar}
\title{Compute additional parameters needed to define crop}
\usage{
ComputeCropCalendar(Crop, CropName, CropChoices, Weather, ClockStruct)
}
\arguments{
\item{Crop}{list}
\item{CropName}{list with crops names}
\item{CropChoices}{crops to be analysed}
\item{Weather}{dataset with weather data}
\item{ClockStruct}{crop calendar}
}
\value{
\code{Crop}.
}
\description{
Compute additional parameters needed to define crop
}
\examples{
ComputeCropCalendar(Crop, CropName, CropChoices, Weather, ClockStruct)
}
|
f606c7f9ad76ff64034b324e7cea901e0df3def3
|
e3204b0f913527581139df64c8189a91fdeb342c
|
/.Rproj.user/E300CC8A/sources/per/t/5DF6BD11-contents
|
bc4808766cd89d0e0a3857a311dead903863dbab
|
[] |
no_license
|
DaoudaTandiangDjiba/ExamEdacySalingPricePredictions
|
c23bc1c675bee0832198361f09ef5cde6865c9ea
|
078153e4ffc87dc422456d6bcce1edfb83c82390
|
refs/heads/master
| 2020-04-06T15:06:11.115065
| 2018-11-14T16:14:06
| 2018-11-14T16:14:06
| 157,565,802
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,088
|
5DF6BD11-contents
|
#
# Import packages
library(plyr)
library(tidyverse)
library(MASS)
library(car)
library(e1071)
library(caret)
library(cowplot)
library(caTools)
library(pROC)
library(ggcorrplot)
library(reshape)
library(scales)
library(xgboost)
library(gbm)
library(randomForest)
# Load the train and test dataset
train <- read.csv("train.csv", header=TRUE, stringsAsFactors = FALSE)
test <- read.csv("test.csv",header=TRUE, stringsAsFactors = FALSE)
# Check the variables
View(train)
View(test)
str(train)
str(test)
names(train)
names(test)
# Add a "SalePrice" variable to the test set to allow for combining data sets
summary(train$SalePrice)
test.SalePrice <- data.frame(SalePrice = rep("None", nrow(test)), test[,])
# Combine data sets
data.combined <- rbind(train, test.SalePrice)
data.combined$SalePrice <- as.integer(data.combined$SalePrice)
dim(data.combined)
summary(data.combined$SalePrice)
# Take a look to the saling price
summary(data.combined$SalePrice)
ggplot(data.combined[!is.na(data.combined$SalePrice),], aes(x = SalePrice)) +
geom_histogram(binwidth = 10000) +
xlab("SalePrice") +
ylab("Total Count") +
scale_x_continuous(breaks= seq(0, 800000, by=100000), labels = comma)
# Check outliers
options(repr.plot.width =4, repr.plot.height = 4)
boxplot(data.combined$SalePrice)$out
scale_x_continuous(breaks= seq(0, 800000, by=100000), labels = comma)
# Missing data
missing_data <- data.combined %>% summarise_all(funs(sum(is.na(.))/n()))
View(missing_data)
# Transform it as a dataframe with 2 columns
missing_data <- gather(missing_data, key = "variables", value = "percent_missing")
# visualize the missing data percent for each variables
# Plot dimension
options(repr.plot.width = 12, repr.plot.height = 8)
# Missing data barplot
Missingdata_barplot <- ggplot(missing_data, aes(x = reorder(variables, percent_missing), y = percent_missing)) +
geom_bar(stat = "identity", fill = "red", aes(color = I('white')), size = 0.1)+
xlab('variables')+
coord_flip()+
theme_bw()
Missingdata_barplot
nacolumn <- which(colSums(is.na(data.combined)) > 0)
sort(colSums(sapply(data.combined[nacolumn], is.na)), decreasing = TRUE)
names(nacolumn)
# 34 variables with NAs, NAs in SalePrice is from the test set.
# Imputing missing values
# Only Fence variable and PoolQC are needed for our Work
table(data.combined$Fence)
# The NAs value on the Fence means None
data.combined$Fence[is.na(data.combined$Fence)] <- "No Fence"
#The NAs value on the PoolQC means None
table(data.combined$PoolQC)
data.combined$PoolQC[is.na(data.combined$PoolQC)] <- "No Pool"
#======================================New Dataset=============================================================
data.combined_full <- data.combined[c(18,20,29,53,62,73,74,52,80,81)]
str(data.combined_full)
data.combined_full$ExterCond <- as.factor(data.combined_full$ExterCond)
data.combined_full$PoolQC <- as.factor(data.combined_full$PoolQC)
data.combined_full$Fence <- as.factor(data.combined_full$Fence)
data.combined_full$SaleCondition <- as.factor(data.combined_full$SaleCondition)
#====================================Distribution of variables=================================================
# Distribution of the Bedroom variable
ggplot(data.combined_full, aes(x = BedroomAbvGr )) +
geom_bar() +
xlab("Bedroom") +
ylab("Total Count") +
theme_bw()
# Distribution of the Fence variable
ggplot(data.combined_full, aes(x = Fence)) +
geom_bar(fill = 'Blue') +
xlab("Fence") +
ylab("Total Count")+
theme_bw()
# Distribution of the Sale condition varibale
ggplot(data.combined_full, aes(x = SaleCondition )) +
geom_bar() +
xlab("Sale condition") +
ylab("Total Count") +
theme_bw()
# Distribution of the Extern condition variable
ggplot(data.combined_full, aes(x = ExterCond )) +
geom_bar() +
xlab("Extern condition") +
ylab("Total Count") +
theme_bw()
# Distribution of the Pool quality variable
ggplot(data.combined_full, aes(x = PoolQC )) +
geom_bar() +
xlab("Pool quality") +
ylab("Total Count") +
theme_bw()
# =======================================Correlation with the SalePrice Variable====================================
options(repr.plot.width =6, repr.plot.height = 4)
data.combined_full$BedroomAbvGr <- as.integer(data.combined_full$BedroomAbvGr)
correlation <- round(cor(data.combined_full[1:1460,][,c("YearBuilt","OverallQual","KitchenAbvGr", "GarageCars","BedroomAbvGr","SalePrice")]), 1)
ggcorrplot(correlation, title = "Correlation")+theme(plot.title = element_text(hjust = 0.5))
# Feature engineering
data.combined_full$SalePrice2[data.combined_full$SalePrice >=34900 & data.combined_full$SalePrice <=129975 ] <- 'Cheap'
data.combined_full$SalePrice2[data.combined_full$SalePrice > 129975 & data.combined_full$SalePrice <= 180921] <- 'Medium'
data.combined_full$SalePrice2[data.combined_full$SalePrice > 180921 & data.combined_full$SalePrice <= 755000] <- 'Expensive'
data.combined_full$SalePrice2 <- as.factor(data.combined_full$SalePrice2)
# Distribution of factor variables by Saleprice
ggplot(data.combined_full[1:1460,], aes(x = PoolQC, fill = SalePrice2)) +
geom_bar() +
facet_grid(~SalePrice2)+
xlab("Pool quality") +
ylab("Total Count") +
theme_bw()
ggplot(data.combined_full[1:1460,], aes(x = BedroomAbvGr, fill = SalePrice2 )) +
geom_bar() +
facet_grid(~SalePrice2)+
xlab("Pool quality") +
ylab("Total Count") +
theme_bw()
ggplot(data.combined_full[1:1460,], aes(x = Fence, fill = SalePrice2 )) +
geom_bar() +
facet_grid(~SalePrice2)+
xlab("Fence") +
ylab("Total Count") +
theme_bw()
ggplot(data.combined_full[1:1460,], aes(x = SaleCondition, fill = SalePrice2 )) +
geom_bar() +
facet_grid(~SalePrice2)+
xlab("Fence") +
ylab("Total Count") +
theme_bw()
ggplot(data.combined_full[1:1460,], aes(x = ExterCond, fill = SalePrice2 )) +
geom_bar() +
facet_grid(~SalePrice2)+
xlab("Extern condition") +
ylab("Total Count") +
theme_bw()
# Data Preparation
data.combined_full$SalePrice[is.na(data.combined$SalePrice)] <- "None"
# ====================================Exploratory modeling==============================================
# Fisrt Model: Random Forest
set.seed(2018)
data.combined_full$SalePrice <- as.factor(data.combined_full$SalePrice)
RFModel <- randomForest(formula = SalePrice ~ OverallQual+YearBuilt+ExterCond+KitchenAbvGr+
GarageCars+PoolQC+Fence+BedroomAbvGr+SaleCondition,
data = data.combined_full[0:1461,],
ntree=500)
Prediction <- predict(RFModel, data.combined_full[1461:2919,])
Prediction2 <- Prediction
PreComp_RF <- Prediction2
Compl_testRF <- data.combined_full$SalePrice[1461:2919,]
(M_RF_test <- as.matrix(table(Compl_testRF, PreComp_RF)))
perf(Test_set$Cible, Test_set$Pred.rf)
# Second Model: Gradient Boosting
gbm.fit <- gbm(
formula = SalePrice ~ OverallQual+YearBuilt+ExterCond+KitchenAbvGr+
GarageCars+PoolQC+Fence+BedroomAbvGr+SaleCondition,
distribution = "gaussian",
data = data.combined_full[1:1460,],
n.trees = 10000,
interaction.depth = 1,
shrinkage = 0.001,
cv.folds = 5,
n.cores = NULL, # will use all cores by default
verbose = FALSE
)
formula("y ~ a+b+c")
model.rf <- randomForest(formula(SalePrice ~ OverallQual+YearBuilt+ExterCond+KitchenAbvGr+GarageCars+PoolQC+Fence+
BedroomAbvGr+SaleCondition, data=data.combined_full[1:1460,],
ntree=150))
#Splitting the data
set.seed(123)
indices = sample.split(telco_final$Churn, SplitRatio = 0.7)
train = telco_final[indices,]
test = telco_final[!(indices),]
# Take a look to the saling price
summary(data.combined$SalePrice)
ggplot(data.combined[!is.na(data.combined$SalePrice),], aes(x = SalePrice)) +
geom_histogram(binwidth = 10000) +
xlab("SalePrice") +
ylab("Total Count") +
scale_x_continuous(breaks= seq(0, 800000, by=100000), labels = comma)
# Check outliers
options(repr.plot.width =4, repr.plot.height = 4)
boxplot(data.combined$SalePrice)$out
scale_x_continuous(breaks= seq(0, 800000, by=100000), labels = comma)
|
|
5932949187afe985118e9165879c12ae6ffc4386
|
287408d83dd37285f619ac76eb7607bc9a7f3928
|
/R/pcalc.R
|
f3e9ddee64a76304e3991f6eab7b3d635df2fb8e
|
[] |
no_license
|
victoraLab/pcReasy
|
fad747c80ba97d44fdbacd7775da19c74820a4d1
|
5a21263fa92ee9f92d24cf402d213b5fa61803f2
|
refs/heads/main
| 2023-07-08T10:14:22.227070
| 2021-08-17T01:24:45
| 2021-08-17T01:24:45
| 397,015,165
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,993
|
r
|
pcalc.R
|
#' Calculate PC reaction
#'
#' Returns a efficient protocol for pc reaction setup.
#'
#' @param v The final volume desired for each PC reaction. Numeric.
#' @param rxn The number of total reactions. Numeric.
#' @param initialConc Data Frame containing the initial concentrations of each component.
#' @param finalConc Data Frame containing the final concentrations of each component.
#' @param pError Pipetting error to consider for each reaction.
#' @importFrom dplyr bind_rows
#'
#' @export
pcalc <- function(v = 10, rxn = NULL, initialConc = NULL, finalConc = NULL, pError = 0){
options(scipen=999)
if(length(initialConc) == 0){
finalConc <- data.frame(
buffer = 1,
dNTP = 200,
FPrimer = 10,
RPrimer = 10,
DNA = 10,
Q5Poly = 0.02,
optional = 0,
row.names = "Q5"
)
initialConc <- data.frame(
buffer = 5,
dNTP = 10000,
FPrimer = 100,
RPrimer = 100,
DNA = 100,
Q5Poly = 2,
optional = 0,
row.names = "Q5"
)
rxn = 10
}
res <- list()
initialConc <- initialConc[,!initialConc == 0]
finalConc <- finalConc [,!finalConc == 0]
for(i in 1:ncol(initialConc)){
z <- initialConc[[i]]/finalConc[[i]]
res[[i]] <- model.frame(y ~ I(x / z), data = data.frame(x = v, y = initialConc[[i]]))
}
res <- bind_rows(res)
res <- data.frame(initialConc = as.vector(t(initialConc)),
finalConc = as.vector(t(finalConc)),
rxnVols = res[["I(x/z)"]],
row.names = colnames(initialConc))
parc <- sum(res[["rxnVols"]])
water <- v - parc
if(parc > v){
stop(
paste(
sprintf("Total volume cannot be bigger than %s", v),
sprintf("Volume is at %s", parc),
sep = "\n"))
}
pError <- 1 + pError
res["Water",] <- c(0,0, water)
res["Total",] <- c(0,0, water + parc)
res[, sprintf("rxnVols%s_pError%s", rxn, pError-1)] <- (res[,"rxnVols"] * rxn) * pError
#print(sprintf("The protocol for the reaction named: -- %s --", rownames(initialConc)))
return(res)
}
|
ea670d04e2c0d27d6b2d400f431ddf1e53b07781
|
b9af64c8c92854e98da10e3ab316ba153e408dfc
|
/NGS/script_all.R
|
be9477ff4a8523e7464aaf87c26506f885ac4108
|
[] |
no_license
|
igemsoftware2021/iGEM_Simulation
|
ba27f07c0e17a55a5b51eb3c1b7d4ab2bf3d6326
|
431584687ddab68d85f73658dcfb19419ae534f1
|
refs/heads/main
| 2023-08-24T14:25:13.752231
| 2021-10-22T01:41:11
| 2021-10-22T01:41:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,075
|
r
|
script_all.R
|
library(ggplot2)
name = c()
pos = c()
ref = c()
var = c()
read_mut = c()
read_tot = c()
file=list.files(pattern = "A122_10")
for (i in file){
tmp = read.csv2(i, sep = "\t")
name = c(name, rep(substring(i, 1,11), nrow(tmp)))
pos = c(pos, tmp$Position)
ref = c(ref, tmp$Ref)
var = c(var, tmp$VarAllele)
read_mut = c(read_mut, tmp$Reads2)
read_tot = c(read_tot, tmp$Reads1 + tmp$Reads2)
}
data = data.frame(Sample = name, Position = pos, Ref = ref, Mutation = var, Occurences = read_mut, Depth = read_tot)
data = data[data$Ref %in% c("A", "T", "C", "G"),]
#write.csv2(data, file = "donneees_ngs_variantcall.csv", sep = ";", dec = ",")
data$Sample = as.factor(data$Sample)
gg = data.frame(data[((data$Ref %in% c('C', 'G')) & data$Mutation %in% c("T", "A") & data$Depth > 20),])
mutators = c("T7", "AID-T7", "pmCDAI-T7", "rAPOBEC1-T7",
"TadA*-T7", "CGG", "AID-CGG", "pmCDA1-CGG", "rAPOBEC1-CGG",
"TadA*-CGG", "evoAPOBEC1-BE4max-CGG", "evo-CDA1-BE4max-CGG",
"ABE8.20-m-CGG", "evoAPOBEC1-BE4max-T7", "evo-CDA1-BE4max-T7",
"ABE8.20-m-T7", "pSEVA221", "pSEVA471")
id = c("100", "101", "102", "103", "104", "109", "110", "111", "112",
"113", "114", "115", "116", "117", "118", "119", "221", "471")
trans = data.frame(id = id, mut = mutators)
nom = c()
for (i in 1:nrow(gg)) {
nom = c(nom, trans$mut[trans$id == substring(as.character(gg$Sample[i]), 9, 11)])
}
gg[["nom"]] = nom
ggplot(gg) +
aes(x = nom, y = Occurences/Depth) +
ggtitle("C:G -> T:A") +
xlab("Mutator") +
ylab("variant read / total") +
geom_jitter() +
geom_boxplot(alpha = 0.5, outlier.size=0, outlier.shape=NA) +
theme(axis.text.x = element_text(angle=45, hjust = 1)) +
scale_x_discrete(name ="Mutator",
limits=c("pSEVA221", "T7", "AID-T7", "pmCDAI-T7", "rAPOBEC1-T7",
"TadA*-T7", "evoAPOBEC1-BE4max-T7", "evo-CDA1-BE4max-T7",
"ABE8.20-m-T7", "pSEVA471", "CGG", "AID-CGG", "pmCDA1-CGG", "rAPOBEC1-CGG",
"TadA*-CGG", "evoAPOBEC1-BE4max-CGG", "evo-CDA1-BE4max-CGG",
"ABE8.20-m-CGG"))
gg2 = data
nom = c()
type = c()
for (i in 1:nrow(gg2)) {
nom = c(nom, trans$mut[trans$id == substring(as.character(gg2$Sample[i]), 9, 11)])
if (((data$Ref[i] %in% c('C', 'G')) & data$Mutation[i] %in% c("T", "A"))){
type = c(type, "C:G -> T:A")
}
else {
type = c(type, "A:T -> G:C")
}
}
gg2[["nom"]] = nom
gg2[["type"]] = type
ggplot(gg2) +
aes(x = nom, fill = type) +
ggtitle("total mutations") +
xlab("Mutator") +
ylab("mutations") +
geom_bar() +
theme(axis.text.x = element_text(angle=45, hjust = 1))
id = levels(as.factor(gg2$nom))
mut = c()
idd = c()
type = c()
for (i in id){
mut = c(mut, sum(gg2$Occurences[gg2$type == "C:G -> T:A" & gg2$nom == i])/sum(gg2$Depth[gg2$type == "C:G -> T:A" & gg2$nom == i]))
mut = c(mut, sum(gg2$Occurences[gg2$type == "A:T -> G:C" & gg2$nom == i])/sum(gg2$Depth[gg2$type == "A:T -> G:C" & gg2$nom == i]))
type = c(type, "C:G -> T:A", "A:T -> G:C")
idd = c(idd, i, i)
}
gg3 = data.frame(id =idd, mut = mut, type = type)
ggplot(gg3) +
aes(x = id, y = mut, fill = type) +
ggtitle("Mutations of each type") +
xlab("Mutator") +
ylab("Proportion") +
geom_col() +
theme(axis.text.x = element_text(angle=45, hjust = 1))
ggplot(gg2[gg2$type == "A:T -> G:C",]) +
aes(x = nom, y = Occurences/Depth) +
ggtitle("A:T -> G:C") +
xlab("Mutator") +
ylab("variant read / total") +
geom_jitter() +
geom_boxplot(alpha = 0.5, outlier.size=0, outlier.shape=NA) +
theme(axis.text.x = element_text(angle=45, hjust = 1)) +
scale_x_discrete(name ="Mutator",
limits=c("pSEVA221", "T7", "AID-T7", "pmCDAI-T7", "rAPOBEC1-T7",
"TadA*-T7", "evoAPOBEC1-BE4max-T7", "evo-CDA1-BE4max-T7",
"ABE8.20-m-T7", "pSEVA471", "CGG", "AID-CGG", "pmCDA1-CGG", "rAPOBEC1-CGG",
"TadA*-CGG", "evoAPOBEC1-BE4max-CGG", "evo-CDA1-BE4max-CGG",
"ABE8.20-m-CGG"))
# mutations rate (validation)
gg4 = gg2[gg2$nom %in% c("AID-T7", "pmCDAI-T7", "rAPOBEC1-T7", "TadA*-T7"),]
id = c("AID-T7", "pmCDAI-T7", "rAPOBEC1-T7", "TadA*-T7", "AID-T7", "pmCDAI-T7", "rAPOBEC1-T7", "TadA*-T7")
activ = c("C -> T", "C -> T", "C -> T", "T -> C", "G -> A", "G -> A", "G -> A", "A -> G")
alpha = c()
for (i in c("AID-T7", "pmCDAI-T7", "rAPOBEC1-T7")){
alpha = c(alpha, sum(gg4$Occurences[gg4$Ref == "C" & gg4$nom == i])/sum(gg4$Depth[gg4$Ref == "C" & gg4$nom == i]))
}
for (i in c("TadA*-T7")){
alpha = c(alpha, sum(gg4$Occurences[gg4$Ref == "T" & gg4$nom == i])/sum(gg4$Depth[gg4$Ref == "T" & gg4$nom == i]))
}
for (i in c("AID-T7", "pmCDAI-T7", "rAPOBEC1-T7")){
alpha = c(alpha, sum(gg4$Occurences[gg4$Ref == "G" & gg4$nom == i])/sum(gg4$Depth[gg4$Ref == "G" & gg4$nom == i]))
}
for (i in c("TadA*-T7")){
alpha = c(alpha, sum(gg4$Occurences[gg4$Ref == "A" & gg4$nom == i])/sum(gg4$Depth[gg4$Ref == "A" & gg4$nom == i]))
}
#plot
gg4 = data.frame(Mutator = id, Activity = activ, Mutation_rate = alpha)
ggplot(gg4) +
aes(x = Mutator, y = Mutation_rate, fill = Activity) +
geom_col(position = "dodge") +
xlab("Mutator") +
ylab("Mutation rate") +
theme(axis.text.x = element_text(angle=45, hjust = 1)) +
scale_x_discrete(name ="Mutator",
limits=c("AID-T7", "pmCDAI-T7", "rAPOBEC1-T7",
"TadA*-T7"))
#graphe robustesse
esti = c(0.00354, 0.0564, 0.00553, 0.00452, 0.00360, 0.00664, 0.00972, 0.000709) #estimated values (spanich publication)
gg4[["Estimation"]] = esti
ggplot(gg4) +
aes(x = Estimation, y = Mutation_rate/1.23 - 0.0065) +
xlab("Estimation at g = 3 (literature)") +
ylab("Corrected estimation at g = 30") +
geom_point() +
geom_smooth(method = "lm") +
geom_abline(slope = 1, intercept = 0, linetype="dashed", color = "red")
# wo
esti = c(0.00354, 0.00553, 0.00452, 0.00360, 0.00664, 0.00972, 0.000709) #estimated values (spanich publication)
gg4 = gg4[-2,]
gg4[["Estimation"]] = esti
ggplot(gg4) +
aes(x = Estimation, y = Mutation_rate) +
ggtitle("Model accuracy") +
xlab("Estimation at 3g (literature)") +
ylab("Estimation at 30g (our NGS results)") +
geom_point() +
geom_smooth(method = "lm")
# other estimationslimits=c("pSEVA221", "T7", "AID-T7", "pmCDAI-T7", "rAPOBEC1-T7",
gg4 = gg2[gg2$nom %in% c("evoAPOBEC1-BE4max-T7", "evo-CDA1-BE4max-T7",
"ABE8.20-m-T7"),]
id = c("evoAPOBEC1-BE4max-T7", "evo-CDA1-BE4max-T7","ABE8.20-m-T7",
"evoAPOBEC1-BE4max-T7", "evo-CDA1-BE4max-T7","ABE8.20-m-T7")
activ = c("C -> T", "C -> T", "T -> C",
"G -> A", "G -> A", "A -> G")
alpha = c()
for (i in c("evoAPOBEC1-BE4max-T7", "evo-CDA1-BE4max-T7")){
alpha = c(alpha, sum(gg4$Occurences[gg4$Ref == "C" & gg4$nom == i])/sum(gg4$Depth[gg4$Ref == "C" & gg4$nom == i]))
}
for (i in c("ABE8.20-m-T7")){
alpha = c(alpha, sum(gg4$Occurences[gg4$Ref == "T" & gg4$nom == i])/sum(gg4$Depth[gg4$Ref == "T" & gg4$nom == i]))
}
for (i in c("evoAPOBEC1-BE4max-T7", "evo-CDA1-BE4max-T7")){
alpha = c(alpha, sum(gg4$Occurences[gg4$Ref == "G" & gg4$nom == i])/sum(gg4$Depth[gg4$Ref == "G" & gg4$nom == i]))
}
for (i in c("ABE8.20-m-T7")){
alpha = c(alpha, sum(gg4$Occurences[gg4$Ref == "A" & gg4$nom == i])/sum(gg4$Depth[gg4$Ref == "A" & gg4$nom == i]))
}
alpha = alpha/1.23 -0.0065 # esti
alpha
# final
gg4 = gg2[gg2$nom %in% c("AID-CGG", "pmCDA1-CGG", "rAPOBEC1-CGG", "TadA*-CGG",
"evoAPOBEC1-BE4max-CGG","evo-CDA1-BE4max-CGG","ABE8.20-m-CGG"),]
id = c("AID-CGG", "pmCDA1-CGG", "rAPOBEC1-CGG",
"TadA*-CGG", "evoAPOBEC1-BE4max-CGG", "evo-CDA1-BE4max-CGG",
"ABE8.20-m-CGG")
activ = c("C -> T", "C -> T","C -> T", "T -> C", "C -> T","C -> T", "T -> C",
"G -> A", "G -> A", "G -> A","A -> G", "G -> A", "G -> A","A -> G")
alpha = c()
for (i in c("AID-CGG", "pmCDA1-CGG", "rAPOBEC1-CGG")){
alpha = c(alpha, sum(gg4$Occurences[gg4$Ref == "C" & gg4$nom == i])/sum(gg4$Depth[gg4$Ref == "C" & gg4$nom == i]))
}
for (i in c("TadA*-CGG")){
alpha = c(alpha, sum(gg4$Occurences[gg4$Ref == "T" & gg4$nom == i])/sum(gg4$Depth[gg4$Ref == "T" & gg4$nom == i]))
}
for (i in c("evoAPOBEC1-BE4max-CGG", "evo-CDA1-BE4max-CGG")){
alpha = c(alpha, sum(gg4$Occurences[gg4$Ref == "C" & gg4$nom == i])/sum(gg4$Depth[gg4$Ref == "C" & gg4$nom == i]))
}
for (i in c("ABE8.20-m-CGG")){
alpha = c(alpha, sum(gg4$Occurences[gg4$Ref == "T" & gg4$nom == i])/sum(gg4$Depth[gg4$Ref == "T" & gg4$nom == i]))
}
for (i in c("AID-CGG", "pmCDA1-CGG", "rAPOBEC1-CGG")){
alpha = c(alpha, sum(gg4$Occurences[gg4$Ref == "G" & gg4$nom == i])/sum(gg4$Depth[gg4$Ref == "G" & gg4$nom == i]))
}
for (i in c("TadA*-CGG")){
alpha = c(alpha, sum(gg4$Occurences[gg4$Ref == "A" & gg4$nom == i])/sum(gg4$Depth[gg4$Ref == "A" & gg4$nom == i]))
}
for (i in c("evoAPOBEC1-BE4max-CGG", "evo-CDA1-BE4max-CGG")){
alpha = c(alpha, sum(gg4$Occurences[gg4$Ref == "G" & gg4$nom == i])/sum(gg4$Depth[gg4$Ref == "G" & gg4$nom == i]))
}
for (i in c("ABE8.20-m-CGG")){
alpha = c(alpha, sum(gg4$Occurences[gg4$Ref == "A" & gg4$nom == i])/sum(gg4$Depth[gg4$Ref == "A" & gg4$nom == i]))
}
# the graph
gg0 = data.frame(Mutator = c("AID-T7", "pmCDAI-T7", "rAPOBEC1-T7", "TadA*-T7",
"AID-T7", "pmCDAI-T7", "rAPOBEC1-T7", "TadA*-T7",
"evoAPOBEC1-BE4max-T7", "evo-CDA1-BE4max-T7","ABE8.20-m-T7",
"evoAPOBEC1-BE4max-T7", "evo-CDA1-BE4max-T7","ABE8.20-m-T7",
"AID-CGG", "pmCDA1-CGG", "rAPOBEC1-CGG", "TadA*-CGG",
"evoAPOBEC1-BE4max-CGG","evo-CDA1-BE4max-CGG","ABE8.20-m-CGG",
"AID-CGG", "pmCDA1-CGG", "rAPOBEC1-CGG", "TadA*-CGG",
"evoAPOBEC1-BE4max-CGG","evo-CDA1-BE4max-CGG","ABE8.20-m-CGG"),
Activity = c("C -> T", "C -> T", "C -> T", "T -> C",
"G -> A", "G -> A", "G -> A", "A -> G",
"C -> T", "C -> T", "T -> C",
"G -> A", "G -> A", "A -> G",
"C -> T", "C -> T","C -> T", "T -> C", "C -> T","C -> T", "T -> C",
"G -> A", "G -> A", "G -> A","A -> G", "G -> A", "G -> A","A -> G"),
Mutation_rate = alpha)
ggplot(gg0) +
aes(x = Mutator, y = Mutation_rate, fill = Activity) +
geom_col() +
xlab("Mutator") +
ylab("Mutation rate") +
theme(axis.text.x = element_text(angle=45, hjust = 1)) +
scale_x_discrete(name ="Mutator",
limits=c("AID-T7", "pmCDAI-T7", "rAPOBEC1-T7",
"TadA*-T7", "evoAPOBEC1-BE4max-T7", "evo-CDA1-BE4max-T7",
"ABE8.20-m-T7", "", "AID-CGG", "pmCDA1-CGG", "rAPOBEC1-CGG",
"TadA*-CGG", "evoAPOBEC1-BE4max-CGG", "evo-CDA1-BE4max-CGG",
"ABE8.20-m-CGG"))
|
ded517cf1e61bd5aefe5fe388585813d434922df
|
c766cdf25b2c437cf66f3f833821ba5e061cb3e1
|
/man/sqlGetQuery.Rd
|
bfe82ccb2857cf7f098274e724e7d9ac343ca26d
|
[
"MIT"
] |
permissive
|
danielfm123/dftools
|
91692018cb67b5251c4fd8d5af6bc6548e3a4fc1
|
51baaac3219a7e94239bfa1c16c76aea4ed6f468
|
refs/heads/master
| 2021-08-06T01:41:39.523063
| 2021-07-14T18:19:25
| 2021-07-14T18:19:25
| 204,495,463
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,881
|
rd
|
sqlGetQuery.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sqlGetQuery.R
\name{sqlGetQuery}
\alias{sqlGetQuery}
\title{Gets result from sql query}
\usage{
sqlGetQuery(server_name, query, param=c(), dt=FALSE, key=c(), ...)
}
\arguments{
\item{server_name}{character, name of the DB server from sqlServers list.}
\item{query}{character, query structure with variant parameters between @'s, if they are.}
\item{param}{character vector with the value of the parameters that would be used in query.}
\item{dt}{boolean, If true, the results would be data table class.}
\item{key}{character vector, it contains the names of the columns to be used as keys if return a data table.}
\item{...}{inherit parameters used in dbGetQuery().}
}
\value{
A data frame or data table with query results.
}
\description{
Does query to a DB in a server from sqlServers and return its result as a data frame or data table.
It calls dbGetQuery() from DBI package so the result is always free-d. Provide the option
to make general queries and change its parameters easier, as in sqlGsub() from
this package.
}
\details{
It ends the connection inmediately after getting the results. sqlServer is
a list built-in sqlGetConn().
}
\examples{
sqlWriteTable("local",mtcars,"mtcars") #create mtcars table in local server
sqlListTables("local")
q1="SELECT * FROM mtcars WHERE mpg>20" #decalrate query
sqlGetQuery("local",q1) #do a simple query
q2="SELECT @col@ FROM @table@ WHERE @condition@" #decalrate a mutable query
p1=c(table="mtcars",col="*",condition="hp>100")
p2=c(table="mtcars",col="wt",condition="wt>2.600") #and declarate the parameter to use in mutable query
sqlGetQuery("local",q2,p1)
sqlGetQuery("local",q2,p2,dt=T) # as data table
}
\seealso{
"sqlServerConn","setSqlServers()" and "sqlGsub()" documentation in toolkitEntel and "dbGetQuery()"
from DBI for more details.
}
|
c15adde00b3e2867573d214e1bb8f0d2635b34bd
|
5032968fceb435ad9d47df6885782cbf71439564
|
/PCA hierchial +k means scm.R
|
7b9363ac1dff80483c1886687e1e2bfde1a07e7e
|
[] |
no_license
|
surjithcm7/EXCEL-R-ASSIGNMENTS
|
5063f3f8434246de939a288163724067a95c3856
|
c948bba5f35c31f39bfdbcf229d963654fc4afa8
|
refs/heads/master
| 2021-03-19T00:47:02.103035
| 2020-03-13T16:46:50
| 2020-03-13T16:46:50
| 247,115,828
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,001
|
r
|
PCA hierchial +k means scm.R
|
library(readr)
data<-read.csv(file.choose())
View(data)
#since type column is irrelevant we can remove it
mydata<-(data[-1])
View(mydata)
attach(mydata)
cor(mydata)
#the correlation values are somewhere negative somewhere positive somewhere high
#which implies the columns are correlated each other
################## model pca ########################
PCA<-princomp(mydata, cor = TRUE, scores = TRUE, covmat = NULL)
summary(PCA)
#here we can see that the first 3 components give us 92% information about the data
loadings(PCA)
plot(PCA)
#here we can see that the first 5 componnts are much important.
#out of those 1st component is the most important one
biplot(PCA)
plot(cumsum(PCA$sdev*PCA$sdev)*100/(sum(PCA$sdev*PCA$sdev)),type="b")
# Showing the increase of variance with considering principal components
# Which helps in choosing number of principal components
#since we have to cluster using only 3 components we take those 3 only
pca3<-PCA$scores[,1:3]
mydata2<-cbind(mydata,pca3)
View(mydata2)
#now we have to perform clustering on both the datasets.
#that is for before performing pca and after performing the pca.
# Normalizing the data
normdata<-scale(mydata[,1:(ncol(mydata))])
View(normdata)
d<-dist(normdata,method="euclidian")
fit<-hclust(d,method="complete")
plot(fit)
plot(fit,hang=-1)
#choose k=sqrt(n/2)=9
rect.hclust(fit,k=9,border="blue")
groups<-cutree(fit,k=10)
membership<-as.matrix(groups)
final<-data.frame(mydata,membership)
View(final)
final1<-final[,c(ncol(final),1:(ncol(final)-1))]
View(final1)
write.csv(final1, file="final wine hierarchial scm1.csv",row.names = F)
getwd()
aggregate(mydata,by=list(final1$membership),mean)
#now we perform clustering on the data on which we have performed PCA
normdata<-scale(mydata2[,14:(ncol(mydata2))])
View(normdata)
d<-dist(normdata,method="euclidian")
fit<-hclust(d,method="complete")
plot(fit)
plot(fit,hang=-1)
#choose k=sqrt(n/2)=9
rect.hclust(fit,k=9,border="red")
groups<-cutree(fit,k=10)
membership<-as.matrix(groups)
final<-data.frame(mydata,membership)
View(final)
final1<-final[,c(ncol(final),1:(ncol(final)-1))]
View(final1)
write.csv(final1, file="final wine hierarchial with pca scm.csv",row.names = F)
getwd()
aggregate(mydata,by=list(final1$membership),mean)
#from this we can see that there is change in clustering before and after performing pca
#membership values are changed for many rows
###############performing k means clustering############################
normdata<-scale(mydata[,1:(ncol(mydata))])
wss = NULL
kmean1<- kmeans(normdata,10)
str(kmean1)
library(animation)
kmeans.ani(mydata,9)
wss = (nrow(normdata)-1)*sum(apply(normdata, 2, var))
for (i in 2:15) wss[i] = sum(kmeans(normdata, centers=i)$withinss)
plot(1:15, wss, type="b", xlab="Number of Clusters", ylab="Within groups sum of squares") # Look for an "elbow" in the scree plot #
title(sub = "K-Means Clustering Scree-Plot")
twss <- NULL
for (i in 2:15){
twss <- c(twss,kmeans(normdata,i)$tot.withinss)
}
twss
plot(2:15,twss,type="o")
kmean10 <- kmeans(normdata,10)
str(kmean10)
kmean11<-kmeans(normdata,11)
str(kmean11)
#using elbow curve we get that optimum no. of cluster is 11 which has minimum twss
final2<- data.frame(mydata, kmean11$cluster)
ncol(mydata)
aggregate(mydata[,1:13],by=list(kmean11$cluster), FUN=mean)
final1<-final[,c(ncol(final2),1:(ncol(final2)-1))]
View(final1)
write.csv(final1,file="wine kmean scm.csv",row.names= F)
getwd()
# now we perform kmeans clustering on the data on whichnwe have performed pca
normdata<-scale(mydata2[,11:(ncol(mydata2))])
wss = NULL
kmean1<- kmeans(normdata,9)
str(kmean1)
wss = (nrow(normdata)-1)*sum(apply(normdata, 2, var))
for (i in 2:15) wss[i] = sum(kmeans(normdata, centers=i)$withinss)
plot(1:15, wss, type="b", xlab="Number of Clusters", ylab="Within groups sum of squares") # Look for an "elbow" in the scree plot #
title(sub = "K-Means Clustering Scree-Plot")
twss <- NULL
for (i in 2:15){
twss <- c(twss,kmeans(normdata,i)$tot.withinss)
}
twss
plot(2:15,twss,type="o")
kmean3 <- kmeans(normdata,3)
str(kmean3)
kmean8<-kmeans(normdata,8)
str(kmean8)
kmean13<-kmeans(normdata,13)
str(kmean13)
#using elbow curve we get that optimum no. of cluster is 13 which has minimum twss
final2<- data.frame(mydata, kmean13$cluster)
aggregate(mydata[,1:13],by=list(kmean13$cluster), FUN=mean)
final1<-final[,c(ncol(final2),1:(ncol(final2)-1))]
View(final1)
write.csv(final1,file="wine kmean with pca.csv",row.names= F)
getwd()
#here we can see that the optimum no. of clusters using k means differs for the data set
#before we apply PCA no. of clusters was 11 and after the application of pca no. of clusters is 13.
#also we can see that the membership changes before and after applying pca
#so by applying PCA clustering groups changes in both hierchial and k means clustering.
|
034d969a6469c90daf114ab5422b85eae47eac11
|
99dd753646dc29f1f34b9faf655e119c0cc5655d
|
/ui.R
|
2324d30114493e16e07763b8942becd98d800a35
|
[] |
no_license
|
hanyangunivleejoohyun/r-test
|
4914273d422195da8e50d83a96bf08eb66bccfed
|
76a91a0082afad22313b916601877c8b11e8003d
|
refs/heads/master
| 2020-05-23T21:50:52.097164
| 2017-03-20T06:22:03
| 2017-03-20T06:22:03
| 84,793,403
| 0
| 0
| null | 2017-03-14T00:43:56
| 2017-03-13T06:49:49
|
R
|
UTF-8
|
R
| false
| false
| 1,532
|
r
|
ui.R
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Wordcloud in Moby Dick example"),
# 사이드바 UI
sidebarLayout(
sidebarPanel(
textInput("word", label = h3("분석할 단어를 입력해주세요(example : dog)"), value=""),
textInput("num", label = h3("문맥 분석할 앞,뒤 단어수를 입력해주세요."), value = "3"),
sliderInput("scale", label="빈도수에 따라 표현될 글자 크기를 입력해주세요", min = 0.5,max = 7, value = c(0.5, 5)),
numericInput("min_num", label = "워드 클라우드에 표현하고 싶은
최소 빈도수를 입력해주세요", value = 2,min=1),
sliderInput("rot_per", label="값이 높을수록 수직으로 표현되는 단어의 수가 늘어납니다.", min = 0,max = 1, value =0.3),
checkboxInput("random_order", label="단어 위치 임의 지정", value =F),
actionButton("goButton", "Go!")
),
mainPanel( # 메인 패널은 tab을 이용하여 여러자료를 표현할 수 있게했습니다. 들어가 있는 내용은 일단 임의로 넣었고
# 차후에 혹시라도 하나의 자료에 대해서 여러가지의 시각화를 하게되면 활용 가능할것 같습니다.
tabsetPanel(type = "tabs",
tabPanel("Plot", plotOutput("plot")),
tabPanel("barplot", plotOutput("barplot")),
tabPanel("table",h3("문맥 분석 결과 도출된 단어 빈도수 입니다."), tableOutput("table"))
)
)
)
)
)
|
e0eb16a8646271ce60fc4d69d81d0ceca6084ee7
|
24194d2bd6986aeb089e5557158470194923d128
|
/man/winsd.Rd
|
25b0f790426e96746e587815f7d23bfe5494e0a4
|
[] |
no_license
|
shizidushu/fsrs
|
35e83a11a77c6110225748b90baf147f030b8ba7
|
32e9ff6e132c3eeee04f2ee9472735f9bdd2fd00
|
refs/heads/master
| 2021-01-25T14:16:43.047809
| 2018-08-03T03:51:24
| 2018-08-03T03:51:24
| 123,678,350
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 899
|
rd
|
winsd.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/summation.R
\name{winsd}
\alias{winsd}
\title{Winsorized sample standard deviation}
\usage{
winsd(x, trim = 0.2, na.rm = FALSE)
}
\arguments{
\item{x}{An \R object. Currently there are methods for
numeric/logical vectors and \link[=Dates]{date},
\link{date-time} and \link{time interval} objects. Complex vectors
are allowed for \code{trim = 0}, only.}
\item{trim}{the fraction (0 to 0.5) of observations to be
trimmed from each end of \code{x} before the mean is computed.
Values of trim outside that range are taken as the nearest endpoint.
}
\item{na.rm}{a logical value indicating whether \code{NA}
values should be stripped before the computation proceeds.}
}
\description{
Winsorized sample standard deviation
}
\examples{
x <- c(1, 2, 8, 9, 10, 16, 18, 22, 27, 29, 35, 42)
winsd(x)
}
|
2dc7c099eb3e52aabebc0e7c6aea21f4ea7c0b8c
|
dc5b0c038d56db8a2f777e76971df77eb9cb2955
|
/stl-async.R
|
3f76b0ec4e674a5ccd51c4340c91bcc66c9f2379
|
[] |
no_license
|
gianordoli/health-issues-server
|
ffe0c4d9b41c029240f61e2f8d9772b57464ccef
|
a279094da05131c9adcdc042c1ed889b4ef2c78d
|
refs/heads/master
| 2021-07-11T17:05:43.287268
| 2017-10-05T13:59:14
| 2017-10-05T13:59:14
| 105,484,834
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,167
|
r
|
stl-async.R
|
needs(stlplus)
vectorToTS <- function(data) {
print("vectorToTs")
ul <- unlist(strsplit(data,","))
dataMatrix <- matrix(ul, length(data), 2, T)
# Retrieving first and last months and weeks
firstDateRow <- head(dataMatrix[,c(1)], n=1)
firstDate <- strsplit(toString(firstDateRow), "-")
firstYear <- as.integer(firstDate[[1]][1])
firstMonth <- as.integer(firstDate[[1]][2])
lastDateRow <- tail(dataMatrix[,c(1)], n=1)
lastDate <- strsplit(toString(lastDateRow), "-")
lastYear <- as.integer(lastDate[[1]][1])
lastMonth <- as.integer(lastDate[[1]][2])
values <- dataMatrix[,c(2)]
# Convert data to time series; using only second column (values)
myTS <- ts(values, start=c(firstYear, firstMonth), end=c(lastYear, lastMonth), frequency=12)
return(myTS)
}
myTS <- vectorToTS(input[[1]])
type <- input[[2]]
mySTL <- stl(myTS, t.window = NULL, s.window="periodic", robust=TRUE)
mySTL.DF <- as.data.frame(mySTL$time.series)
if(type == 'seasonal') {
response <- paste('seasonal:', toString(mySTL.DF$seasonal), collapse = "")
} else if(type == 'trend') {
response <- paste('trend:', toString(mySTL.DF$trend), collapse = "")
}
response
|
70ecc5bfa76a86b53a8d5d88d1924dcdf7a8f3c1
|
567a0e51b6b09ccb245fd18d429491950df6801e
|
/R/Sankey/Sankey diagrams.R
|
01188944883f752888016c6548b2e1c1294e93c2
|
[] |
no_license
|
samofashhurst/data-recipes
|
a6f4ab58ca64cf62d34d83562bce1cb21b6d9bc7
|
9f1eb30220736c7f6acc563f0a50ff230e0c14df
|
refs/heads/master
| 2021-10-23T12:04:12.196235
| 2021-10-13T04:30:23
| 2021-10-13T04:30:23
| 226,813,792
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,554
|
r
|
Sankey diagrams.R
|
# This script creates a Sankey diagram, which can be used e.g. for seeing common paths between pages during a browser session
# install.packages("networkD3")
library(networkD3)
# Set working dir to dir of script
initial_wd = getwd()
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# Add names for the pages in the flow
my_nodes <- c('Page 1','Page 2','Page 3','Drop-off')
nodes <- data.frame(node=c(0:(length(my_nodes)-1)),name=my_nodes)
# Load the page transition data: this should be a matrix of frequencies where rows represent 'from' pages
# and columns represent 'to' pages
temp = read.csv("sankey_prep.csv")
temp<-as.matrix(temp)
dimnames(temp)<-NULL
# Create empty dataframe
df<-data.frame(source=integer(),target=integer(),value=integer())
# For each combination of row and column, add one more row to the dataframe with source, target and value
for(i in 1:nrow(temp))
{
for(j in 1:ncol(temp))
{
# Note that source/target must be zero-indexed for JavaScript to render the plot
df[nrow(df)+1,] = list(i-1,j-1,temp[i,j])
}
}
# Create Sankey diagram
n = networkD3::sankeyNetwork(Links = df, Nodes = nodes,
Source = 'source',
Target = 'target',
Value = 'value',
NodeID = 'name',
units = 'pageviews', fontSize = 14, nodeWidth = 20)
# Save plot as html, and render onscreen
saveNetwork(n, "sankey.html", selfcontained = TRUE)
n
# Put working directory back
setwd(initial_wd)
|
1ccc91bf40badd26afc78ccb0b0dbf95e683f8dd
|
069b5c0f325197dfbcc6584288a70ae7c51c8b81
|
/RegressionModel/Quiz1.R
|
97f94887b65fbf2700558de4a4ae8578feaaf08b
|
[] |
no_license
|
JohanLi1990/DataScienceCoursera
|
3db341c5a2074f9bc25121a699a0374026fdf233
|
99aa5574e530b6f9b9dd9bf208008e2c845625ae
|
refs/heads/master
| 2021-01-23T13:44:02.512726
| 2017-12-25T00:40:32
| 2017-12-25T00:40:32
| 38,518,965
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 839
|
r
|
Quiz1.R
|
#Quiz 1-1
x <- c(0.18, -1.54, 0.42, 0.95)
w <- c(2, 1, 3, 1)
for(i in c(0.3, 1.077, 0.0025, 0.1471))
{
ans <- sum(w*(x-i)^2)
print(ans)
}
# Quiz 1-2
x <- c(0.8, 0.47, 0.51, 0.73, 0.36, 0.58, 0.57, 0.85, 0.44, 0.42)
y <- c(1.39, 0.72, 1.55, 0.48, 1.19, -1.59, 1.23, -0.65, 1.49, 0.05)
fits <- lm(y ~ x-1)
summary(fits)
#Quiz 1-3
data("mtcars")
fits <- lm(mpg ~ wt, mtcars)
summary(fits)
#Quiz 1-4
#ANS = 1
#Quiz 1-5
#Ans = 0.4 * 1.5 = 0.6
#Quiz 1-6
x <- c(8.58, 10.46, 9.01, 9.64, 8.86)
mx <- mean(x)
ms <- sd(x)
(x[1] - mx[1])/ms[1] # -0.9718658
#Quiz 1-7
x <- c(0.8, 0.47, 0.51, 0.73, 0.36, 0.58, 0.57, 0.85, 0.44, 0.42)
y <- c(1.39, 0.72, 1.55, 0.48, 1.19, -1.59, 1.23, -0.65, 1.49, 0.05)
fits <- lm(y ~ x)
fits$coefficients
#Quiz 1-9
x <- c(0.8, 0.47, 0.51, 0.73, 0.36, 0.58, 0.57, 0.85, 0.44, 0.42)
mean(x)
|
00bf26efd8e220b21245cbba95fbaf4ca76f4b3f
|
bc248968b4e8e6da338695bf782ca6711dc8410a
|
/man/CosineDFV.Rd
|
ce6ecde222ff57c748b3b4fa2391f7bd223e87b6
|
[] |
no_license
|
PengNi/dSimer
|
75a1f87f2d3494c51b3aa76e75d09d52a01f3b99
|
e5c408e583c3759685fe0e41133916dbce49d265
|
refs/heads/master
| 2020-03-28T03:01:27.040649
| 2017-01-19T11:59:16
| 2017-01-19T11:59:16
| 49,204,477
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,432
|
rd
|
CosineDFV.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CosineDFV.R
\name{CosineDFV}
\alias{CosineDFV}
\title{calculate disease similarity by using feature vectors}
\usage{
CosineDFV(D1, D2, d2f, dcol = 2, fcol = 1, ccol = 3)
}
\arguments{
\item{D1}{a vector consists of disease ids/names}
\item{D2}{another vector consists of disease ids/names}
\item{d2f}{data.frame, contains term co-occurrences between features and diseases}
\item{dcol}{integer, disease column number in d2f}
\item{fcol}{integer, feature column number in d2f}
\item{ccol}{integer, co-occurrences column number in d2f}
}
\value{
a matrix of disease disease similarity which rownames and colnames
are the disease names
}
\description{
given two (lists of) disease names, this function will calculate cosine
similarity between these diseases' feature vectors.
}
\examples{
### this is a disease-symptom-cooccurrence sample, if you want to use
### the complete data, please use "data(d2s_hsdn)" command
data(d2s_hsdn_sample)
ds <- sample(unique(d2s_hsdn_sample[,2]), 10)
simmat <- CosineDFV(ds, ds, d2s_hsdn_sample)
}
\author{
Zhihui Fei, Peng Ni, Min Li
}
\references{
Zhou X Z, Menche J, Barabasi A L, et al. Human symptoms-disease
network[J]. Nature communications, 2014, 5.
Van Driel M A, Bruggeman J, Vriend G, et al. A text-mining analysis of the human
phenome[J]. European journal of human genetics, 2006, 14(5): 535-542.
}
|
5ff39c898110f9e569c8773f2dbf0f1567515d5a
|
6504f689aa3c557f7d11634ce0b09363a9e79bb4
|
/plot6.R
|
de24ad330ad0d752a68f17d84d3f592890fc2171
|
[] |
no_license
|
ryanbthomas/EDAProj2
|
d7c3f6c6462a08f7aae6b6231574d5e2e6aaf700
|
3cbf61edd8e131cc467e0d52a0f1ecc78d770db9
|
refs/heads/master
| 2022-02-02T02:03:05.393310
| 2015-06-19T00:08:14
| 2015-06-19T00:08:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 254
|
r
|
plot6.R
|
#plot6
g <- ggplot(plot6_data) +
aes(x=year,y=Emissions/1000, fill=fips) +
geom_bar(stat="identity",position="dodge") +
scale_fill_discrete(name="City",breaks=c("06037","24510"), labels=c("Los Angeles","Baltimore City"))
g <- g + geom_p
|
86e773fa25a96cada511772a88312690a8cf817a
|
2297b13b601747e5009339028dc7d6614208ec1d
|
/Model Ensembling/Boosting/iris_XGBoost.R
|
a8269184acb19591093e89b05a091ef8df218aad
|
[] |
no_license
|
prathmesh31/machine_learning_classwork
|
c3c649aed6519a3872ac10d806e84f3f1f6b7eb7
|
9e612a9bb57653aa6a7c17e23b4cb5277c0253d8
|
refs/heads/master
| 2020-03-24T03:37:34.664141
| 2018-07-26T10:55:59
| 2018-07-26T10:55:59
| 142,426,019
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 753
|
r
|
iris_XGBoost.R
|
data("iris")
library(caret)
set.seed(1992)
intrain<-createDataPartition(y=iris$Species,p=0.7,list=FALSE)
training <- iris[ intrain , ]
validation <- iris[-intrain , ]
library(xgboost)
mat_train <- as.matrix(training[,-5])
mat_validation <- as.matrix(validation[,-5])
lbl_spec <- ifelse(training$Species=="setosa",0,
ifelse(training$Species=="versicolor",1,2))
model.xgb <- xgboost(data=mat_train,label =lbl_spec ,nrounds = 9,
num_class=3, objective="multi:softmax")
pred.xgb <- predict(model.xgb,newdata=mat_validation)
lbl_spec_val <- ifelse(validation$Species=="setosa",0,
ifelse(validation$Species=="versicolor",1,2))
confusionMatrix(as.factor(pred.xgb),as.factor(lbl_spec_val))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.