content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
install.packages("readxl")
install.packages("devtools")
install.packages("dpylr")
install.packages("tidyverse")
install.packages("ggplot2")
install.packages("kernlab")
install.packages('caret')
library("tidyverse")
library("caret")
library("readxl")
library("dplyr")
library("ggplot2")
library("tools")
library("kernlab")
# Examining the Data ------------------------------------------------------
CardioDS <- read_excel("CardiotocographyDataSet.xlsx")
str(CardioDS)
#Summaries of Attributes
summary(CardioDS$LBE)
summary(CardioDS$LB)
summary(CardioDS$AC)
summary(CardioDS$FM)
summary(CardioDS$UC)
summary(CardioDS$ASTV)
summary(CardioDS$MSTV)
summary(CardioDS$ALTV)
summary(CardioDS$mLTV)
summary(CardioDS$DL)
summary(CardioDS$DS)
summary(CardioDS$DP)
summary(CardioDS$DR)
#most reason file, oldest file
head(arrange(CardioDS, Date), 1)
tail(arrange(CardioDS, Date), 1)
range(CardioDS$Date)
# Common Trends -----------------------------------------------------------
hist(CardioDS$LB, main="Histogram for FHR Baseline Value (SisPorto)",
xlab="FHR baseline", ylab = "Frequency") #Histogram of Baseline Value
hist(CardioDS$AC, main="Histogram for Accelerations",
xlab="Accelerations", ylab = "Frequency") #Histogram of Accelerations
hist(CardioDS$FM, main="Histogram for Fetal Movement",
xlab="Fetal Movement", ylab = "Frequency") #Histogram of Fetal Movements
hist(CardioDS$UC, main="Histogram for Uterine Contractions",
xlab="Uterine Contractions", ylab = "Frequency") #Histogram of Uterine Contractions
hist(CardioDS$DS, main="Histogram for Severe Decelerations",
xlab="Severe Decelerations", ylab = "Frequency") #Histogram of Severe Decelerations
#SAMPLING ACCERATIONS - following population distribution?
sample_size = nrow(CardioDS)*.1
samples100 <- CardioDS[sample(nrow(CardioDS), sample_size, replace = TRUE), ]
hist(samples100$LB, main="Histogram for SAMPLE FHR Baseline Value (SisPorto)",
xlab="FHR baseline", ylab = "Frequency") #Histogram of Baseline Value
summary(samples100$LB)
#Attribution Correlations?
CardioDS$LBPS <- CardioDS$LB*60 #LB per Second
#scatter for Severe Decceration and Baseline Value
gscatter_DS_LBPS <- ggplot(CardioDS, aes(x=DS, y=LBPS, color=LBPS)) + geom_point() + geom_smooth(method="lm", se=FALSE)
gscatter_DS_LBPS <- gscatter_DS_LBPS + labs(title="Severe Deceleration and Baseline Value Correlation", x="Severe Decelerations", y = "Baseline Value")
gscatter_DS_LBPS # inspect scatter plot
#scatter for Acceration and Baseline Value
gscatter_AC_LBPS <- ggplot(CardioDS, aes(x=AC, y=LBPS, color=LBPS)) + geom_point() + geom_smooth(method="lm", se=FALSE)
gscatter_AC_LBPS <- gscatter_AC_LBPS + labs(title="Accelerations and Baseline Value Correlation", x="Accelerations", y = "Baseline Value")
gscatter_AC_LBPS # inspect scatter plot
#scatter for Severe Decceration and Uterine Activity
gscatter_DS_UC <- ggplot(CardioDS, aes(x=DS, y=UC, color=UC)) + geom_point() + geom_smooth(method="lm", se=FALSE)
gscatter_DS_UC <- gscatter_DS_UC + labs(title="Severe Deceleration and Uterine Activity Correlation", x="Severe Decelerations", y = "Uterine Activity")
gscatter_DS_UC # inspect scatter plot
#scatter for Accerations and Uterine Activity
gscatter_AC_UC <- ggplot(CardioDS, aes(x=AC, y=UC, color=UC)) + geom_point() + geom_smooth(method="lm", se=FALSE)
gscatter_AC_UC <- gscatter_AC_UC + labs(title="Accelerations and Uterine Activity Correlation", x="Accelerations", y = "Uterine Activity")
gscatter_AC_UC # inspect scatter plot
# Linear Model ------------------------------------------------------------
#Linear regression model
NSP_linearmodel <- lm(formula=NSP~LB, data=CardioDS) #LB and NPS
summary(NSP_linearmodel)
test1 <- lm(formula=NSP~AC, data=CardioDS) #AC and NPS
summary(test1)
test2 <- lm(formula=NSP~FM, data=CardioDS) #FM and NPS
summary(test2)
test3 <- lm(formula=NSP~UC, data=CardioDS) #UC and NPS
summary(test3)
#multiple regression
NSP_multiregression <- lm(formula=NSP~LB+AC+FM+UC, data=CardioDS)
summary(NSP_multiregression)
NSP_multiregression2 <- lm(formula=NSP~DL+DS+DP+DR, data=CardioDS)
summary(NSP_multiregression2)
# SVM Model ---------------------------------------------------------------
CardioDS$LBPS <- NULL
Train_List <- createDataPartition(y=CardioDS$NSP,p=.30,list=FALSE)
Train_Set <- CardioDS[Train_List,]
numrows <- c(1:nrow(CardioDS))
Test_List <- numrows[!numrows %in% Train_List]
Test_Set <- CardioDS[Test_List,]
NSP_svm_model <- ksvm(NSP ~ LBE+LB+AC+FM+UC+ASTV+MSTV+ALTV+MLTV+DL+DS+DP+DR+Width+Min+Max+Nmax+Nzeros+Mode+Mean+Median+Variance+Tendency, data=Train_Set,, type = "C-svc", cross=10)
NSP_svm_model
svm_trainpred <- predict(NSP_svm_model, Train_Set)
str(svm_trainpred)
train_pred_results <- table(svm_trainpred, Train_Set$NSP)
predtrain_totalCorrect <- train_pred_results[1,1] + train_pred_results[2,2]
predtrain_total <- nrow(Train_Set)
trainpred_svmaccuracy <- predtrain_totalCorrect/predtrain_total
trainpred_svmaccuracy
install.packages("e1071")
confusionMatrix(factor(svm_trainpred), factor(Train_Set$NSP))
|
/UCI Cardiotocography Prediction.R
|
no_license
|
madisontagg/Cardiotocography-Fetal-State-Prediction
|
R
| false
| false
| 5,087
|
r
|
install.packages("readxl")
install.packages("devtools")
install.packages("dpylr")
install.packages("tidyverse")
install.packages("ggplot2")
install.packages("kernlab")
install.packages('caret')
library("tidyverse")
library("caret")
library("readxl")
library("dplyr")
library("ggplot2")
library("tools")
library("kernlab")
# Examining the Data ------------------------------------------------------
CardioDS <- read_excel("CardiotocographyDataSet.xlsx")
str(CardioDS)
#Summaries of Attributes
summary(CardioDS$LBE)
summary(CardioDS$LB)
summary(CardioDS$AC)
summary(CardioDS$FM)
summary(CardioDS$UC)
summary(CardioDS$ASTV)
summary(CardioDS$MSTV)
summary(CardioDS$ALTV)
summary(CardioDS$mLTV)
summary(CardioDS$DL)
summary(CardioDS$DS)
summary(CardioDS$DP)
summary(CardioDS$DR)
#most reason file, oldest file
head(arrange(CardioDS, Date), 1)
tail(arrange(CardioDS, Date), 1)
range(CardioDS$Date)
# Common Trends -----------------------------------------------------------
hist(CardioDS$LB, main="Histogram for FHR Baseline Value (SisPorto)",
xlab="FHR baseline", ylab = "Frequency") #Histogram of Baseline Value
hist(CardioDS$AC, main="Histogram for Accelerations",
xlab="Accelerations", ylab = "Frequency") #Histogram of Accelerations
hist(CardioDS$FM, main="Histogram for Fetal Movement",
xlab="Fetal Movement", ylab = "Frequency") #Histogram of Fetal Movements
hist(CardioDS$UC, main="Histogram for Uterine Contractions",
xlab="Uterine Contractions", ylab = "Frequency") #Histogram of Uterine Contractions
hist(CardioDS$DS, main="Histogram for Severe Decelerations",
xlab="Severe Decelerations", ylab = "Frequency") #Histogram of Severe Decelerations
#SAMPLING ACCERATIONS - following population distribution?
sample_size = nrow(CardioDS)*.1
samples100 <- CardioDS[sample(nrow(CardioDS), sample_size, replace = TRUE), ]
hist(samples100$LB, main="Histogram for SAMPLE FHR Baseline Value (SisPorto)",
xlab="FHR baseline", ylab = "Frequency") #Histogram of Baseline Value
summary(samples100$LB)
#Attribution Correlations?
CardioDS$LBPS <- CardioDS$LB*60 #LB per Second
#scatter for Severe Decceration and Baseline Value
gscatter_DS_LBPS <- ggplot(CardioDS, aes(x=DS, y=LBPS, color=LBPS)) + geom_point() + geom_smooth(method="lm", se=FALSE)
gscatter_DS_LBPS <- gscatter_DS_LBPS + labs(title="Severe Deceleration and Baseline Value Correlation", x="Severe Decelerations", y = "Baseline Value")
gscatter_DS_LBPS # inspect scatter plot
#scatter for Acceration and Baseline Value
gscatter_AC_LBPS <- ggplot(CardioDS, aes(x=AC, y=LBPS, color=LBPS)) + geom_point() + geom_smooth(method="lm", se=FALSE)
gscatter_AC_LBPS <- gscatter_AC_LBPS + labs(title="Accelerations and Baseline Value Correlation", x="Accelerations", y = "Baseline Value")
gscatter_AC_LBPS # inspect scatter plot
#scatter for Severe Decceration and Uterine Activity
gscatter_DS_UC <- ggplot(CardioDS, aes(x=DS, y=UC, color=UC)) + geom_point() + geom_smooth(method="lm", se=FALSE)
gscatter_DS_UC <- gscatter_DS_UC + labs(title="Severe Deceleration and Uterine Activity Correlation", x="Severe Decelerations", y = "Uterine Activity")
gscatter_DS_UC # inspect scatter plot
#scatter for Accerations and Uterine Activity
gscatter_AC_UC <- ggplot(CardioDS, aes(x=AC, y=UC, color=UC)) + geom_point() + geom_smooth(method="lm", se=FALSE)
gscatter_AC_UC <- gscatter_AC_UC + labs(title="Accelerations and Uterine Activity Correlation", x="Accelerations", y = "Uterine Activity")
gscatter_AC_UC # inspect scatter plot
# Linear Model ------------------------------------------------------------
#Linear regression model
NSP_linearmodel <- lm(formula=NSP~LB, data=CardioDS) #LB and NPS
summary(NSP_linearmodel)
test1 <- lm(formula=NSP~AC, data=CardioDS) #AC and NPS
summary(test1)
test2 <- lm(formula=NSP~FM, data=CardioDS) #FM and NPS
summary(test2)
test3 <- lm(formula=NSP~UC, data=CardioDS) #UC and NPS
summary(test3)
#multiple regression
NSP_multiregression <- lm(formula=NSP~LB+AC+FM+UC, data=CardioDS)
summary(NSP_multiregression)
NSP_multiregression2 <- lm(formula=NSP~DL+DS+DP+DR, data=CardioDS)
summary(NSP_multiregression2)
# SVM Model ---------------------------------------------------------------
CardioDS$LBPS <- NULL
Train_List <- createDataPartition(y=CardioDS$NSP,p=.30,list=FALSE)
Train_Set <- CardioDS[Train_List,]
numrows <- c(1:nrow(CardioDS))
Test_List <- numrows[!numrows %in% Train_List]
Test_Set <- CardioDS[Test_List,]
NSP_svm_model <- ksvm(NSP ~ LBE+LB+AC+FM+UC+ASTV+MSTV+ALTV+MLTV+DL+DS+DP+DR+Width+Min+Max+Nmax+Nzeros+Mode+Mean+Median+Variance+Tendency, data=Train_Set,, type = "C-svc", cross=10)
NSP_svm_model
svm_trainpred <- predict(NSP_svm_model, Train_Set)
str(svm_trainpred)
train_pred_results <- table(svm_trainpred, Train_Set$NSP)
predtrain_totalCorrect <- train_pred_results[1,1] + train_pred_results[2,2]
predtrain_total <- nrow(Train_Set)
trainpred_svmaccuracy <- predtrain_totalCorrect/predtrain_total
trainpred_svmaccuracy
install.packages("e1071")
confusionMatrix(factor(svm_trainpred), factor(Train_Set$NSP))
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 1516039
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1516039
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/fpu/fpu-10Xh-error01-nonuniform-depth-21.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 567777
c no.of clauses 1516039
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 1516039
c
c QBFLIB/Miller-Marin/fpu/fpu-10Xh-error01-nonuniform-depth-21.qdimacs 567777 1516039 E1 [] 0 3102 564379 1516039 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Miller-Marin/fpu/fpu-10Xh-error01-nonuniform-depth-21/fpu-10Xh-error01-nonuniform-depth-21.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 693
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 1516039
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 1516039
c
c Input Parameter (command line, file):
c input filename QBFLIB/Miller-Marin/fpu/fpu-10Xh-error01-nonuniform-depth-21.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 567777
c no.of clauses 1516039
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 1516039
c
c QBFLIB/Miller-Marin/fpu/fpu-10Xh-error01-nonuniform-depth-21.qdimacs 567777 1516039 E1 [] 0 3102 564379 1516039 NONE
|
#Part1
suppressPackageStartupMessages(library(ggplot2))
suppressPackageStartupMessages(library(plyr))
suppressPackageStartupMessages(library(dplyr))
suppressPackageStartupMessages(library(sirt))
args = commandArgs(trailingOnly=TRUE)
# test if there is at least one argument: if not, return an error
if (length(args)==0) {
stop("At least one argument must be supplied (input file).n", call.=FALSE)
} else if (length(args)==1) {
args[2] = "out.txt"
}
# Barcharts with Percentages for labels
file_in <- paste0(args[1]);
file_out1 <- paste0(args[2]);
file_out2 <- paste0(args[3]);
file_out3 <- paste0(args[4]);
data <- read.csv(file_in)
numcol <- ncol(data)
gutt <- prob.guttman(data[,c(7:numcol)], guess.equal=TRUE, slip.equal=TRUE)
gutt
summary(gutt)
guttscores <- gutt$person
guttitems <- gutt$item
gutttrait <- gutt$trait
write.csv(guttscores, file_out1, row.names=FALSE, quote=FALSE)
write.csv(guttitems, file_out2, row.names=FALSE, quote=FALSE)
write.csv(gutttrait, file_out3, row.names=FALSE, quote=FALSE)
|
/AIDD/ExToolset/scripts/guttman.R
|
no_license
|
RNAdetective/AIDD
|
R
| false
| false
| 1,016
|
r
|
#Part1
suppressPackageStartupMessages(library(ggplot2))
suppressPackageStartupMessages(library(plyr))
suppressPackageStartupMessages(library(dplyr))
suppressPackageStartupMessages(library(sirt))
args = commandArgs(trailingOnly=TRUE)
# test if there is at least one argument: if not, return an error
if (length(args)==0) {
stop("At least one argument must be supplied (input file).n", call.=FALSE)
} else if (length(args)==1) {
args[2] = "out.txt"
}
# Barcharts with Percentages for labels
file_in <- paste0(args[1]);
file_out1 <- paste0(args[2]);
file_out2 <- paste0(args[3]);
file_out3 <- paste0(args[4]);
data <- read.csv(file_in)
numcol <- ncol(data)
gutt <- prob.guttman(data[,c(7:numcol)], guess.equal=TRUE, slip.equal=TRUE)
gutt
summary(gutt)
guttscores <- gutt$person
guttitems <- gutt$item
gutttrait <- gutt$trait
write.csv(guttscores, file_out1, row.names=FALSE, quote=FALSE)
write.csv(guttitems, file_out2, row.names=FALSE, quote=FALSE)
write.csv(gutttrait, file_out3, row.names=FALSE, quote=FALSE)
|
## Extract results of interest, write TAF output tables
## Before: data/
## summary.csv
## After: output/
## sag_upload.xml,
## sag_info.csv,
## sag_fishdata.csv
library(icesTAF)
library(icesSAG)
mkdir("output")
# read in summary data
summary <- read.taf("data/summary_catch.csv")
# create SAG inputs
sag_info <-
stockInfo(StockCode = "san.sa.6",
AssessmentYear = 2019,
ContactPerson = "sarahlmillar@ices.dk",
Purpose = "Advice")
sag_info$RecruitmentAge <- 0
sag_fishdata <-
stockFishdata(Year = summary$Year, Catches = summary$Total)
sag_upload <- createSAGxml(sag_info, sag_fishdata)
cat(sag_upload, file = "output/sag_upload.xml")
# write out summary data as csv
sag_info <- as.data.frame(sag_info)
write.taf(sag_info, dir = "output")
write.taf(sag_fishdata, dir = "output")
|
/output.R
|
no_license
|
ices-taf/2019_san.sa.6
|
R
| false
| false
| 864
|
r
|
## Extract results of interest, write TAF output tables
## Before: data/
## summary.csv
## After: output/
## sag_upload.xml,
## sag_info.csv,
## sag_fishdata.csv
library(icesTAF)
library(icesSAG)
mkdir("output")
# read in summary data
summary <- read.taf("data/summary_catch.csv")
# create SAG inputs
sag_info <-
stockInfo(StockCode = "san.sa.6",
AssessmentYear = 2019,
ContactPerson = "sarahlmillar@ices.dk",
Purpose = "Advice")
sag_info$RecruitmentAge <- 0
sag_fishdata <-
stockFishdata(Year = summary$Year, Catches = summary$Total)
sag_upload <- createSAGxml(sag_info, sag_fishdata)
cat(sag_upload, file = "output/sag_upload.xml")
# write out summary data as csv
sag_info <- as.data.frame(sag_info)
write.taf(sag_info, dir = "output")
write.taf(sag_fishdata, dir = "output")
|
RiskControl=function(status){
#cache space
memory.limit(10240000)
today<-Sys.Date()
#library#
library(DBI)
library(RMySQL)
library(lubridate)
library(reshape)
#get data#
#connet MySQL
con = dbConnect(
dbDriver("MySQL"),
#user = username,
#password= password,
#dbname = dbname,
#host = host
user = "root",
password= "123456",
dbname = "wf_wish",
#host = "192.168.1.2"
host = "cskk.f3322.net",
port = 85
)
#basic informations
data1=dbGetQuery(con,'select user_id,order_id,merchant_id,payment_amount,order_date,current_expected_payment_eligibility_date,
is_refunded,refund_date,refund_time_diff,loan_operation,has_been_disbursed,is_store_currently_trusted,is_chargeback,
order_month,amount,loan_period from t_wish_order')
data2=dbGetQuery(con,'select merchant_id,admittance_operation,continuous_operation,avg_sales,avg_refund_time_rate,avg_refund_amount_rate from t_merchant_basic_info')
#loaned & returned data
data4=dbGetQuery(con,'select wish_user_id,num,principal,principal_real,is_yu_qi,yu_qi,end_date from t_repay_plan where is_valid=1')
data4$principal<-as.numeric(data4$principal)
data4$principal_real<-as.numeric(data4$principal_real)
data4$is_yu_qi<-as.numeric(data4$is_yu_qi)
data4$yu_qi<-as.numeric(data4$yu_qi)
data4$end_date<-as.Date(data4$end_date)
data4[is.na(data4)]<-0
#time series dealing#
all_return_date<-c(as.Date("2017-3-1")+months(1:800),as.Date("2017-3-15")+months(1:800))
all_return_date<-all_return_date[order(all_return_date)]
#next 2 return days
k<-1
repeat{
k<-k+1
if (today>=all_return_date[k-1] & today<all_return_date[k]) break}
return_date<-as.data.frame(c(today,all_return_date[c(k,k+1,k+2)]))
colnames(return_date)<-"expected_return_date"
k<-1
repeat{
k<-k+1
if (return_date$expected_return_date[1]>=all_return_date[k-1] & return_date$expected_return_date[1]<all_return_date[k]) break}
return_date$expected_return_date[1]<-all_return_date[k-1]
return_date[,"starting_date"]<-return_date$expected_return_date-months(3)
n<-c()
n[1]=length(data1$user_id)
n[2]=length(data2$merchant_id)
if (n[1] >0 & n[2] > 0){
data1$merchant_id<-as.character(data1$merchant_id)
data2$merchant_id<-as.character(data2$merchant_id)
alldata<-merge(data1,data2,by="merchant_id",all.x=TRUE)
alldata$order_id<-as.character(alldata$order_id)
alldata$user_id<-as.character(alldata$user_id)
alldata$merchant_id<-as.character(alldata$merchant_id)
alldata$payment_amount<-as.numeric(alldata$payment_amount)
alldata$order_date<-as.Date(alldata$order_date)
alldata$current_expected_payment_eligibility_date<-as.Date(alldata$current_expected_payment_eligibility_date)
alldata$is_refunded<-as.numeric(alldata$is_refunded)
alldata$refund_time_diff<-as.numeric(alldata$refund_time_diff)
alldata$loan_operation<-as.numeric(alldata$loan_operation)
alldata$has_been_disbursed<-as.numeric(alldata$has_been_disbursed)
alldata$is_store_currently_trusted<-as.numeric(alldata$is_store_currently_trusted)
alldata$is_chargeback<-as.numeric(alldata$is_chargeback)
alldata$amount<-as.numeric(alldata$amount)
alldata$order_month<-format(as.Date(alldata$order_date),"%Y/%m")
alldata$loan_period<-alldata$current_expected_payment_eligibility_date-alldata$order_date
alldata$loan_period<-as.numeric(alldata$loan_period)
alldata$admittance_operation<-as.numeric(alldata$admittance_operation)
alldata$continuous_operation<-as.numeric(alldata$continuous_operation)
alldata$avg_sales<-as.numeric(alldata$avg_sales)
alldata$avg_refund_time_rate<-as.numeric(alldata$avg_refund_time_rate)
alldata$avg_refund_amount_rate<-as.numeric(alldata$avg_refund_amount_rate)
alldata[,"order_year"]<-format(as.Date(alldata$order_date),"%Y")
alldata[is.na(alldata)]<-0
alldata<-alldata[!duplicated(alldata$order_id),]
#apply users' summary
num_in<-aggregate(amount~user_id,data=alldata,length)
num_in<-num_in[!duplicated(num_in$user_id),]
required_user_id<-num_in$user_id
n1<-length(required_user_id)
if (n1 > 0){
result<-list(0)
credibility<-c()
refundSituation<-c()
operationTime<-c()
stability<-c()
chargeSituation<-c()
overdueSituation<-c()
withdrawalsSituation<-c()
paymentSituation<-c()
loaningMoney1<-c()
loaningMoney2<-c()
loaningRate1<-c()
feeSum1<-c()
hasBeenPassed<-c()
isObservationNeeded<-c()
partLoaningRate1<-c()
partLoaningRate2<-c()
for (i in 1:length(required_user_id)){
data<-alldata[which(alldata$user_id %in% required_user_id[i]),]
data<-data[!duplicated(data$order_id),]
data$order_date<-as.Date(data$order_date)
n2<-length(data$user_id)
if (n2 <= 0){
credibility[i]<-0
refundSituation[i]<-0
operationTime[i]<-0
stability[i]<-0
chargeSituation[i]<-0
overdueSituation[i]<-0
withdrawalsSituation[i]<-0
paymentSituation[i]<-0
loaningMoney1[i]<-0
loaningMoney2[i]<-0
loaningRate1[i]<-0
feeSum1[i]<-0
hasBeenPassed[i]<-0
partLoaningRate1[i]<-0
partLoaningRate2[i]<-0
}else if (n2 > 0){
result[[i]]<-c(0)
#Pass Standard#
#credibility
newest_date<-max(data$order_date)
is_store_currently_trusted<-data$is_store_currently_trusted[which(data$order_date %in% newest_date)]
is_store_currently_trusted[is.na(is_store_currently_trusted)]<-0
if (0 %in% is_store_currently_trusted) {
result[[i]][1]=0;credibility[i]=0
} else {result[[i]][1]=1;credibility[i]=1}
#Refund rate
data$avg_refund_amount_rate[is.na(data$avg_refund_amount_rate)]<-0
data$avg_refund_time_rate[is.na(data$avg_refund_time_rate)]<-0
if (data$avg_refund_amount_rate[1] > 0.15 || data$avg_refund_time_rate[1] > 0.15) {
result[[i]][2]=0;refundSituation[i]=0
} else {result[[i]][2]=1;refundSituation[i]=1}
#Operating time
admittance_operation<-max(data$admittance_operation)
admittance_operation[is.na(admittance_operation)]<-0
continuous_operation<-max(data$continuous_operation)
continuous_operation[is.na(continuous_operation)]<-0
if (admittance_operation < 12 || continuous_operation < 6) {
result[[i]][3]=0;operationTime[i]=0
} else {result[[i]][3]=1;operationTime[i]=1}
#stability
dfs<-melt(data,measure.vars="amount",id.vars=c("order_month","order_year","user_id"))
summary<-cast(dfs,order_month+order_year~.,sum,na.rm=T)
colnames(summary)<-c("order_month","order_year","sales_month")
summary_year<-aggregate(amount~order_year,data=data,sum)
colnames(summary_year)<-c("order_year","sales_year")
summary<-merge(summary,summary_year,by="order_year",all.x=T)
summary$sales_month<-as.numeric(summary$sales_month)
summary$sales_year<-as.numeric(summary$sales_year)
n7<-length(summary$sales_month)
if (n7 > 0) {
summary[,'rate']<-summary$sales_month/summary$sales_year
summary$rate[is.na(summary$rate)]<-0
summary$rate<-as.numeric(summary$rate)
today_month<-format(today,"%Y/%m")
earlest_month<-min(summary$order_month)
summary<-summary[which(summary$order_month != today_month & summary$order_month != earlest_month),]
n8<-length(summary$rate)
if (n8 > 0){
sta_length<-length(summary$rate[which(summary$rate >= 0.05)])
summary_length<-length(summary$rate)
sta_length[is.na(sta_length)]=0
summary_length[is.na(summary_length)]=0
if (sta_length < summary_length || sta_length == 0 || summary_length == 0){
result[[i]][4]=0;stability[i]=0
} else {result[[i]][4]=1;stability[i]=1}
}else {result[[i]][4]=1;stability[i]=1}
}else {result[[i]][4]=1;stability[i]=1}
#charge times
data$is_chargeback[is.na(data$is_chargeback)]<-0
if (1 %in% data$is_chargeback){
result[[i]][5]=0;chargeSituation[i]=0
}else {result[[i]][5]=1;chargeSituation[i]=1}
#overdue
borrowed_data<-data4[which(data4$wish_user_id %in% required_user_id[i]),]
n=length(borrowed_data$is_yu_qi)
n[is.na(n)]<-0
if (n == 0){
overdued_or_not<-0
} else if (n > 0){
last_borrowed_date<-max(borrowed_data$end_date)
overdued_or_not<-sum(borrowed_data$is_yu_qi[which(borrowed_data$end_date %in% last_borrowed_date)])
}
if (overdued_or_not > 0){
result[[i]][6]=0;overdueSituation[i]=0
}else {result[[i]][6]=1;overdueSituation[i]=1}
#sales situation
data$avg_sales[is.na(data$avg_sales)]<-0
if (data$avg_sales[1] < 1000){
result[[i]][8]=0;paymentSituation[i]=0
} else {result[[i]][8]=1;paymentSituation[i]=1}
#return_rate#
return_rate60<-length(data$order_date[which(data$loan_period >= 60)])/length(data$order_date)
return_rate90<-length(data$order_date[which(data$loan_period >= 75 & data$loan_period <= 105)])/length(data$order_date)
return_rate75_90<-return_rate90/return_rate60
#refund_rate#
refund_rate75_90<-length(data$order_date[which(data$refund_time_diff>= 75 & data$refund_time_diff <= 105 & data$loan_period >= 75
& data$is_refunded == 1)])/length(data$order_date[which(data$loan_period >= 75)])
refund_rate60_90<-length(data$order_date[which(data$refund_time_diff>= 75 & data$refund_time_diff <= 105 & data$loan_period >= 60
& data$is_refunded == 1)])/length(data$order_date[which(data$loan_period >= 60)])
cid<-which(data$order_date >= return_date$starting_date[1] &
data$order_date < return_date$starting_date[3] & data$is_refunded == 0 &
data$has_been_disbursed == 0 & data$loan_operation == 0)
n3=length(cid)
n3[is.na(n3)]<-0
if (n3 <= 0){
loaningMoney<-0
loaningRate<-0
rate1<-0
rate2<-0
feeSum<-0
}else if (n3 > 0){
loan_data<-data[cid,]
loan_data<-loan_data[!duplicated(loan_data$order_id),]
#rate
rate1<-1-refund_rate75_90
rate2<-(1-refund_rate60_90)*return_rate75_90
if (rate1 >= 0.9){
rate1=0.9
}
#money
sumPayment<-sum(loan_data$amount)
sumPayment[is.na(sumPayment)]<-0
loan_money_part1<-sum(loan_data$amount[which(loan_data$order_date >= return_date$starting_date[1]
& loan_data$order_date < return_date$starting_date[2])]) * rate1
loan_money_part2<-sum(loan_data$amount[which(loan_data$order_date >= return_date$starting_date[2]
& loan_data$order_date < return_date$starting_date[3])]) * rate2
loan_money_part1[is.na(loan_money_part1)]<-0
loan_money_part2[is.na(loan_money_part2)]<-0
loan_money_part1<-as.numeric(loan_money_part1)
loan_money_part2<-as.numeric(loan_money_part2)
if (loan_money_part2 > loan_money_part1){
loan_money_part2=loan_money_part1
} else if (loan_money_part2 <= loan_money_part1){
loan_money_part2=loan_money_part2
}
loaningMoneyOriginal<-loan_money_part1+loan_money_part2
loaningRateOriginal<-loaningMoneyOriginal/sumPayment
loaningRate<-loaningRateOriginal
loaningMoney<-loaningMoneyOriginal
loaningRateOriginal[is.na(loaningRateOriginal)]<-0
if (loaningRateOriginal > 0.8){
loaningRate<-0.8
loaningMoney<-sumPayment*0.8
}
#owed_money<-sum(borrowed_data$principal)-sum(borrowed_data$principal_real)
#loaningMoney<-loaningMoneyOriginal-owed_money
loaningMoney[is.na(loaningMoney)]<-0
loaningRate<-loaningMoney/sumPayment
rate<-loaningMoney/loaningMoneyOriginal
rate1<-loan_money_part1/sum(loan_data$amount[which(loan_data$order_date >= return_date$starting_date[1]
& loan_data$order_date < return_date$starting_date[2])])*rate
rate2<-loan_money_part2/sum(loan_data$amount[which(loan_data$order_date >= return_date$starting_date[2]
& loan_data$order_date < return_date$starting_date[3])])*rate
feeSum<-loaningMoney*0.01
}
loaningMoney<-as.numeric(loaningMoney)
loaningMoney[is.na(loaningMoney)]<-0
loaningRate<-as.numeric(loaningRate)
loaningRate[is.na(loaningRate)]<-0
feeSum<-as.numeric(feeSum)
feeSum[is.na(feeSum)]<-0
rate1<-as.numeric(rate1)
rate1[is.na(rate1)]<-0
rate2<-as.numeric(rate2)
rate2[is.na(rate2)]<-0
#loaning situation
loaningMoney[is.na(loaningMoney)]<-0
if (loaningMoney < 1000){
result[[i]][7]=0;withdrawalsSituation[i]=0
} else {result[[i]][7]=1;withdrawalsSituation[i]=1}
n=length(result[[i]][])
result[is.na(result)]<-0
n[is.na(n)]=0
if (sum(result[[i]][],na.rm=T) == n){
loaningMoney1[i]<-round(loaningMoney,4)
loaningMoney2[i]<-round(loaningMoney,4)
loaningRate1[i]<-round(loaningRate,4)
feeSum1[i]<-feeSum
hasBeenPassed[i]<-1
partLoaningRate1[i]<-round(rate1,4)
partLoaningRate2[i]<-round(rate2,4)
} else{
loaningMoney1[i]<-0
loaningMoney2[i]<-round(loaningMoney,4)
loaningRate1[i]<-round(loaningRate,4)
feeSum1[i]<-feeSum
hasBeenPassed[i]<-0
partLoaningRate1[i]<-0
partLoaningRate2[i]<-0
}
}
}
status<-as.numeric(status)
status[is.na(status)]<-1
if (status == 1){
isObservationNeeded<-rep(1,length(required_user_id))
} else {isObservationNeeded<-rep(0,length(required_user_id))}
loaningMoney3<-rep(0,length(required_user_id))
loaningMoney4<-rep(0,length(required_user_id))
resultTable<-data.frame(required_user_id,hasBeenPassed,loaningMoney1,loaningMoney2,loaningMoney3,loaningMoney4,loaningRate1,
feeSum1,isObservationNeeded,credibility,refundSituation,operationTime,stability,chargeSituation,
overdueSituation,withdrawalsSituation,paymentSituation,partLoaningRate1,partLoaningRate2)
colnames(resultTable)<-c("user_id","has_been_passed","quota","quota_original","quota60","quota90","quota_rate","interest",
"is_observation_needed","credibility","refund_situation","operation_time","stability",
"charge_situation","overdue_situation","withdrawals_situation","payment_situation","part_loaning_rate1","part_loaning_rate2")
resultTable[,'update_time']<-Sys.time()
resultTable[is.na(resultTable)]<-0
dbGetQuery(con,"set names utf8")
con
dbWriteTable(con,"result_records",resultTable,row.names=F,append=T)
}
}
dbDisconnect(con)
}
|
/src/main/resources/RiskControl.R
|
permissive
|
shimaomao/fpd
|
R
| false
| false
| 16,516
|
r
|
RiskControl=function(status){
#cache space
memory.limit(10240000)
today<-Sys.Date()
#library#
library(DBI)
library(RMySQL)
library(lubridate)
library(reshape)
#get data#
#connet MySQL
con = dbConnect(
dbDriver("MySQL"),
#user = username,
#password= password,
#dbname = dbname,
#host = host
user = "root",
password= "123456",
dbname = "wf_wish",
#host = "192.168.1.2"
host = "cskk.f3322.net",
port = 85
)
#basic informations
data1=dbGetQuery(con,'select user_id,order_id,merchant_id,payment_amount,order_date,current_expected_payment_eligibility_date,
is_refunded,refund_date,refund_time_diff,loan_operation,has_been_disbursed,is_store_currently_trusted,is_chargeback,
order_month,amount,loan_period from t_wish_order')
data2=dbGetQuery(con,'select merchant_id,admittance_operation,continuous_operation,avg_sales,avg_refund_time_rate,avg_refund_amount_rate from t_merchant_basic_info')
#loaned & returned data
data4=dbGetQuery(con,'select wish_user_id,num,principal,principal_real,is_yu_qi,yu_qi,end_date from t_repay_plan where is_valid=1')
data4$principal<-as.numeric(data4$principal)
data4$principal_real<-as.numeric(data4$principal_real)
data4$is_yu_qi<-as.numeric(data4$is_yu_qi)
data4$yu_qi<-as.numeric(data4$yu_qi)
data4$end_date<-as.Date(data4$end_date)
data4[is.na(data4)]<-0
#time series dealing#
all_return_date<-c(as.Date("2017-3-1")+months(1:800),as.Date("2017-3-15")+months(1:800))
all_return_date<-all_return_date[order(all_return_date)]
#next 2 return days
k<-1
repeat{
k<-k+1
if (today>=all_return_date[k-1] & today<all_return_date[k]) break}
return_date<-as.data.frame(c(today,all_return_date[c(k,k+1,k+2)]))
colnames(return_date)<-"expected_return_date"
k<-1
repeat{
k<-k+1
if (return_date$expected_return_date[1]>=all_return_date[k-1] & return_date$expected_return_date[1]<all_return_date[k]) break}
return_date$expected_return_date[1]<-all_return_date[k-1]
return_date[,"starting_date"]<-return_date$expected_return_date-months(3)
n<-c()
n[1]=length(data1$user_id)
n[2]=length(data2$merchant_id)
if (n[1] >0 & n[2] > 0){
data1$merchant_id<-as.character(data1$merchant_id)
data2$merchant_id<-as.character(data2$merchant_id)
alldata<-merge(data1,data2,by="merchant_id",all.x=TRUE)
alldata$order_id<-as.character(alldata$order_id)
alldata$user_id<-as.character(alldata$user_id)
alldata$merchant_id<-as.character(alldata$merchant_id)
alldata$payment_amount<-as.numeric(alldata$payment_amount)
alldata$order_date<-as.Date(alldata$order_date)
alldata$current_expected_payment_eligibility_date<-as.Date(alldata$current_expected_payment_eligibility_date)
alldata$is_refunded<-as.numeric(alldata$is_refunded)
alldata$refund_time_diff<-as.numeric(alldata$refund_time_diff)
alldata$loan_operation<-as.numeric(alldata$loan_operation)
alldata$has_been_disbursed<-as.numeric(alldata$has_been_disbursed)
alldata$is_store_currently_trusted<-as.numeric(alldata$is_store_currently_trusted)
alldata$is_chargeback<-as.numeric(alldata$is_chargeback)
alldata$amount<-as.numeric(alldata$amount)
alldata$order_month<-format(as.Date(alldata$order_date),"%Y/%m")
alldata$loan_period<-alldata$current_expected_payment_eligibility_date-alldata$order_date
alldata$loan_period<-as.numeric(alldata$loan_period)
alldata$admittance_operation<-as.numeric(alldata$admittance_operation)
alldata$continuous_operation<-as.numeric(alldata$continuous_operation)
alldata$avg_sales<-as.numeric(alldata$avg_sales)
alldata$avg_refund_time_rate<-as.numeric(alldata$avg_refund_time_rate)
alldata$avg_refund_amount_rate<-as.numeric(alldata$avg_refund_amount_rate)
alldata[,"order_year"]<-format(as.Date(alldata$order_date),"%Y")
alldata[is.na(alldata)]<-0
alldata<-alldata[!duplicated(alldata$order_id),]
#apply users' summary
num_in<-aggregate(amount~user_id,data=alldata,length)
num_in<-num_in[!duplicated(num_in$user_id),]
required_user_id<-num_in$user_id
n1<-length(required_user_id)
if (n1 > 0){
result<-list(0)
credibility<-c()
refundSituation<-c()
operationTime<-c()
stability<-c()
chargeSituation<-c()
overdueSituation<-c()
withdrawalsSituation<-c()
paymentSituation<-c()
loaningMoney1<-c()
loaningMoney2<-c()
loaningRate1<-c()
feeSum1<-c()
hasBeenPassed<-c()
isObservationNeeded<-c()
partLoaningRate1<-c()
partLoaningRate2<-c()
for (i in 1:length(required_user_id)){
data<-alldata[which(alldata$user_id %in% required_user_id[i]),]
data<-data[!duplicated(data$order_id),]
data$order_date<-as.Date(data$order_date)
n2<-length(data$user_id)
if (n2 <= 0){
credibility[i]<-0
refundSituation[i]<-0
operationTime[i]<-0
stability[i]<-0
chargeSituation[i]<-0
overdueSituation[i]<-0
withdrawalsSituation[i]<-0
paymentSituation[i]<-0
loaningMoney1[i]<-0
loaningMoney2[i]<-0
loaningRate1[i]<-0
feeSum1[i]<-0
hasBeenPassed[i]<-0
partLoaningRate1[i]<-0
partLoaningRate2[i]<-0
}else if (n2 > 0){
result[[i]]<-c(0)
#Pass Standard#
#credibility
newest_date<-max(data$order_date)
is_store_currently_trusted<-data$is_store_currently_trusted[which(data$order_date %in% newest_date)]
is_store_currently_trusted[is.na(is_store_currently_trusted)]<-0
if (0 %in% is_store_currently_trusted) {
result[[i]][1]=0;credibility[i]=0
} else {result[[i]][1]=1;credibility[i]=1}
#Refund rate
data$avg_refund_amount_rate[is.na(data$avg_refund_amount_rate)]<-0
data$avg_refund_time_rate[is.na(data$avg_refund_time_rate)]<-0
if (data$avg_refund_amount_rate[1] > 0.15 || data$avg_refund_time_rate[1] > 0.15) {
result[[i]][2]=0;refundSituation[i]=0
} else {result[[i]][2]=1;refundSituation[i]=1}
#Operating time
admittance_operation<-max(data$admittance_operation)
admittance_operation[is.na(admittance_operation)]<-0
continuous_operation<-max(data$continuous_operation)
continuous_operation[is.na(continuous_operation)]<-0
if (admittance_operation < 12 || continuous_operation < 6) {
result[[i]][3]=0;operationTime[i]=0
} else {result[[i]][3]=1;operationTime[i]=1}
#stability
dfs<-melt(data,measure.vars="amount",id.vars=c("order_month","order_year","user_id"))
summary<-cast(dfs,order_month+order_year~.,sum,na.rm=T)
colnames(summary)<-c("order_month","order_year","sales_month")
summary_year<-aggregate(amount~order_year,data=data,sum)
colnames(summary_year)<-c("order_year","sales_year")
summary<-merge(summary,summary_year,by="order_year",all.x=T)
summary$sales_month<-as.numeric(summary$sales_month)
summary$sales_year<-as.numeric(summary$sales_year)
n7<-length(summary$sales_month)
if (n7 > 0) {
summary[,'rate']<-summary$sales_month/summary$sales_year
summary$rate[is.na(summary$rate)]<-0
summary$rate<-as.numeric(summary$rate)
today_month<-format(today,"%Y/%m")
earlest_month<-min(summary$order_month)
summary<-summary[which(summary$order_month != today_month & summary$order_month != earlest_month),]
n8<-length(summary$rate)
if (n8 > 0){
sta_length<-length(summary$rate[which(summary$rate >= 0.05)])
summary_length<-length(summary$rate)
sta_length[is.na(sta_length)]=0
summary_length[is.na(summary_length)]=0
if (sta_length < summary_length || sta_length == 0 || summary_length == 0){
result[[i]][4]=0;stability[i]=0
} else {result[[i]][4]=1;stability[i]=1}
}else {result[[i]][4]=1;stability[i]=1}
}else {result[[i]][4]=1;stability[i]=1}
#charge times
data$is_chargeback[is.na(data$is_chargeback)]<-0
if (1 %in% data$is_chargeback){
result[[i]][5]=0;chargeSituation[i]=0
}else {result[[i]][5]=1;chargeSituation[i]=1}
#overdue
borrowed_data<-data4[which(data4$wish_user_id %in% required_user_id[i]),]
n=length(borrowed_data$is_yu_qi)
n[is.na(n)]<-0
if (n == 0){
overdued_or_not<-0
} else if (n > 0){
last_borrowed_date<-max(borrowed_data$end_date)
overdued_or_not<-sum(borrowed_data$is_yu_qi[which(borrowed_data$end_date %in% last_borrowed_date)])
}
if (overdued_or_not > 0){
result[[i]][6]=0;overdueSituation[i]=0
}else {result[[i]][6]=1;overdueSituation[i]=1}
#sales situation
data$avg_sales[is.na(data$avg_sales)]<-0
if (data$avg_sales[1] < 1000){
result[[i]][8]=0;paymentSituation[i]=0
} else {result[[i]][8]=1;paymentSituation[i]=1}
#return_rate#
return_rate60<-length(data$order_date[which(data$loan_period >= 60)])/length(data$order_date)
return_rate90<-length(data$order_date[which(data$loan_period >= 75 & data$loan_period <= 105)])/length(data$order_date)
return_rate75_90<-return_rate90/return_rate60
#refund_rate#
refund_rate75_90<-length(data$order_date[which(data$refund_time_diff>= 75 & data$refund_time_diff <= 105 & data$loan_period >= 75
& data$is_refunded == 1)])/length(data$order_date[which(data$loan_period >= 75)])
refund_rate60_90<-length(data$order_date[which(data$refund_time_diff>= 75 & data$refund_time_diff <= 105 & data$loan_period >= 60
& data$is_refunded == 1)])/length(data$order_date[which(data$loan_period >= 60)])
cid<-which(data$order_date >= return_date$starting_date[1] &
data$order_date < return_date$starting_date[3] & data$is_refunded == 0 &
data$has_been_disbursed == 0 & data$loan_operation == 0)
n3=length(cid)
n3[is.na(n3)]<-0
if (n3 <= 0){
loaningMoney<-0
loaningRate<-0
rate1<-0
rate2<-0
feeSum<-0
}else if (n3 > 0){
loan_data<-data[cid,]
loan_data<-loan_data[!duplicated(loan_data$order_id),]
#rate
rate1<-1-refund_rate75_90
rate2<-(1-refund_rate60_90)*return_rate75_90
if (rate1 >= 0.9){
rate1=0.9
}
#money
sumPayment<-sum(loan_data$amount)
sumPayment[is.na(sumPayment)]<-0
loan_money_part1<-sum(loan_data$amount[which(loan_data$order_date >= return_date$starting_date[1]
& loan_data$order_date < return_date$starting_date[2])]) * rate1
loan_money_part2<-sum(loan_data$amount[which(loan_data$order_date >= return_date$starting_date[2]
& loan_data$order_date < return_date$starting_date[3])]) * rate2
loan_money_part1[is.na(loan_money_part1)]<-0
loan_money_part2[is.na(loan_money_part2)]<-0
loan_money_part1<-as.numeric(loan_money_part1)
loan_money_part2<-as.numeric(loan_money_part2)
if (loan_money_part2 > loan_money_part1){
loan_money_part2=loan_money_part1
} else if (loan_money_part2 <= loan_money_part1){
loan_money_part2=loan_money_part2
}
loaningMoneyOriginal<-loan_money_part1+loan_money_part2
loaningRateOriginal<-loaningMoneyOriginal/sumPayment
loaningRate<-loaningRateOriginal
loaningMoney<-loaningMoneyOriginal
loaningRateOriginal[is.na(loaningRateOriginal)]<-0
if (loaningRateOriginal > 0.8){
loaningRate<-0.8
loaningMoney<-sumPayment*0.8
}
#owed_money<-sum(borrowed_data$principal)-sum(borrowed_data$principal_real)
#loaningMoney<-loaningMoneyOriginal-owed_money
loaningMoney[is.na(loaningMoney)]<-0
loaningRate<-loaningMoney/sumPayment
rate<-loaningMoney/loaningMoneyOriginal
rate1<-loan_money_part1/sum(loan_data$amount[which(loan_data$order_date >= return_date$starting_date[1]
& loan_data$order_date < return_date$starting_date[2])])*rate
rate2<-loan_money_part2/sum(loan_data$amount[which(loan_data$order_date >= return_date$starting_date[2]
& loan_data$order_date < return_date$starting_date[3])])*rate
feeSum<-loaningMoney*0.01
}
loaningMoney<-as.numeric(loaningMoney)
loaningMoney[is.na(loaningMoney)]<-0
loaningRate<-as.numeric(loaningRate)
loaningRate[is.na(loaningRate)]<-0
feeSum<-as.numeric(feeSum)
feeSum[is.na(feeSum)]<-0
rate1<-as.numeric(rate1)
rate1[is.na(rate1)]<-0
rate2<-as.numeric(rate2)
rate2[is.na(rate2)]<-0
#loaning situation
loaningMoney[is.na(loaningMoney)]<-0
if (loaningMoney < 1000){
result[[i]][7]=0;withdrawalsSituation[i]=0
} else {result[[i]][7]=1;withdrawalsSituation[i]=1}
n=length(result[[i]][])
result[is.na(result)]<-0
n[is.na(n)]=0
if (sum(result[[i]][],na.rm=T) == n){
loaningMoney1[i]<-round(loaningMoney,4)
loaningMoney2[i]<-round(loaningMoney,4)
loaningRate1[i]<-round(loaningRate,4)
feeSum1[i]<-feeSum
hasBeenPassed[i]<-1
partLoaningRate1[i]<-round(rate1,4)
partLoaningRate2[i]<-round(rate2,4)
} else{
loaningMoney1[i]<-0
loaningMoney2[i]<-round(loaningMoney,4)
loaningRate1[i]<-round(loaningRate,4)
feeSum1[i]<-feeSum
hasBeenPassed[i]<-0
partLoaningRate1[i]<-0
partLoaningRate2[i]<-0
}
}
}
status<-as.numeric(status)
status[is.na(status)]<-1
if (status == 1){
isObservationNeeded<-rep(1,length(required_user_id))
} else {isObservationNeeded<-rep(0,length(required_user_id))}
loaningMoney3<-rep(0,length(required_user_id))
loaningMoney4<-rep(0,length(required_user_id))
resultTable<-data.frame(required_user_id,hasBeenPassed,loaningMoney1,loaningMoney2,loaningMoney3,loaningMoney4,loaningRate1,
feeSum1,isObservationNeeded,credibility,refundSituation,operationTime,stability,chargeSituation,
overdueSituation,withdrawalsSituation,paymentSituation,partLoaningRate1,partLoaningRate2)
colnames(resultTable)<-c("user_id","has_been_passed","quota","quota_original","quota60","quota90","quota_rate","interest",
"is_observation_needed","credibility","refund_situation","operation_time","stability",
"charge_situation","overdue_situation","withdrawals_situation","payment_situation","part_loaning_rate1","part_loaning_rate2")
resultTable[,'update_time']<-Sys.time()
resultTable[is.na(resultTable)]<-0
dbGetQuery(con,"set names utf8")
con
dbWriteTable(con,"result_records",resultTable,row.names=F,append=T)
}
}
dbDisconnect(con)
}
|
vehicbaltnei<-vehicnei[vehicnei$fips==24510,]
vehicbaltnei$city<-"Baltimore City"
vehicLAnei<-vehicnei[vehicnei$fips=="06037",]
vehicLAnei$city<-"Los Angeles County"
bothNEI<-rbind(vehiclesBaltimoreNEI,vehiclesLANEI)
library(ggplot2)
plot6<-ggplot(bothNEI,aes(x=factor(year),y=(Emissions/1000),fill=city)) +
geom_bar(aes(fill=year),stat="identity") +
facet_grid(scales="free",space="free",.~city) +
guides(fill=FALSE)+theme_bw() +
labs(x="year",y=expression("PM total emission")) +
labs(title=expression("PM vehicle emissions in Baltimore and LA 1999-2008"))
print(plot6)
|
/plot6.R
|
no_license
|
thayanlima/ExploratoryDataCourseProject2
|
R
| false
| false
| 582
|
r
|
vehicbaltnei<-vehicnei[vehicnei$fips==24510,]
vehicbaltnei$city<-"Baltimore City"
vehicLAnei<-vehicnei[vehicnei$fips=="06037",]
vehicLAnei$city<-"Los Angeles County"
bothNEI<-rbind(vehiclesBaltimoreNEI,vehiclesLANEI)
library(ggplot2)
plot6<-ggplot(bothNEI,aes(x=factor(year),y=(Emissions/1000),fill=city)) +
geom_bar(aes(fill=year),stat="identity") +
facet_grid(scales="free",space="free",.~city) +
guides(fill=FALSE)+theme_bw() +
labs(x="year",y=expression("PM total emission")) +
labs(title=expression("PM vehicle emissions in Baltimore and LA 1999-2008"))
print(plot6)
|
library(shiny)
# Load the data on school usage and teacher logins
# into a dataframe, "schools".
load("./Data/schools.RData")
# Calculate a linear model for predicting "hours" from "logins"
fit<-lm(hours ~ logins, data=schools)
shinyServer(function(input, output) {
# plot hours against logins
# show linear model with red line
# show slider estimate and prediction with blue lines
output$usagePlot <- renderPlot({
x<-schools$logins
y<-schools$hours
new.school<-data.frame(logins=c(input$tlogins))
plot(
schools$logins,
schools$hours,
xlab = "number of teacher logins",
ylab = "usage (hours)"
)
abline(fit, col="red")
abline(v=input$tlogins, col="blue")
abline(h=predict(fit, new.school), col="blue")
# Show the user what slider value they selected
output$estimate<-renderText(
paste(
"You chose an estimate of",
input$tlogins,
"teacher logins."
)
)
# Show the user the corresponding estimate of hours of usage
output$result<-renderText(
paste(
"The estimated usage would be",
round(predict(fit, new.school)),
"."
)
)
})
})
|
/server.R
|
no_license
|
iargent/dataprodass
|
R
| false
| false
| 1,733
|
r
|
library(shiny)
# Load the data on school usage and teacher logins
# into a dataframe, "schools".
load("./Data/schools.RData")
# Calculate a linear model for predicting "hours" from "logins"
fit<-lm(hours ~ logins, data=schools)
shinyServer(function(input, output) {
# plot hours against logins
# show linear model with red line
# show slider estimate and prediction with blue lines
output$usagePlot <- renderPlot({
x<-schools$logins
y<-schools$hours
new.school<-data.frame(logins=c(input$tlogins))
plot(
schools$logins,
schools$hours,
xlab = "number of teacher logins",
ylab = "usage (hours)"
)
abline(fit, col="red")
abline(v=input$tlogins, col="blue")
abline(h=predict(fit, new.school), col="blue")
# Show the user what slider value they selected
output$estimate<-renderText(
paste(
"You chose an estimate of",
input$tlogins,
"teacher logins."
)
)
# Show the user the corresponding estimate of hours of usage
output$result<-renderText(
paste(
"The estimated usage would be",
round(predict(fit, new.school)),
"."
)
)
})
})
|
########## Exemplo Recuperacao de Imagens ##########
## pacotes ##
library(tidyverse)
library(magrittr)
library(reticulate)
use_python('/usr/local/bin/python3.6')
library(keras)
library(Matrix)
library(NMF)
library(NNLM)
## carrega imagem ##
imagem <- image_load( path = '/home/vm-data-science/education/dados/scallet.jpg' ) %>%
image_to_array(., data_format = "channels_first" )
# dimensao
imagem %>%
dim
# camadas
imagem[1,,] %>%
as.raster( max = max(imagem, na.rm = TRUE) ) %>%
plot()
imagem[2,,] %>%
as.raster( max = max(imagem, na.rm = TRUE) ) %>%
plot()
imagem[3,,] %>%
as.raster( max = max(imagem, na.rm = TRUE) ) %>%
plot()
## insere missings ##
camada <- 2
imagem_problema <- imagem[camada,,]
nr <- nrow( imagem_problema ) # numero de linhas
nc <- ncol( imagem_problema ) # numero de colunas
p <- 0.3 # proporcao de NA's
ina <- is.na( unlist(imagem_problema) ) # ajusta as posicoes dos NA's caso ja exista algum
n2 <- floor( p*nr*nc ) - sum( ina ) # determina local dos novos NA's
ina[ sample(which(!is.na(ina)), n2) ] <- TRUE # ajusta onde nao tem NA, caso ja exista algum
imagem_problema[matrix(ina, nr=nr,nc=nc)] <- NA # insere os NA's
imagem_problema %>%
as.raster( max = max(imagem, na.rm = TRUE) ) %>%
plot()
## Testa modelo
for( i in 1:30 ){
show( paste0("Etapa: ", i) )
modelo <- nnmf(imagem_problema,
k = i,
method = 'scd',
loss = 'mse',
n.threads = 0,
max.iter = 1000 )
imagem_recuperada <- modelo$W %*% modelo$H
imagem_recuperada %>%
as.raster( max = max(imagem_recuperada, na.rm = TRUE) ) %>%
plot() %>%
title( main = paste0('teste ', i) ) %>%
show()
}
# Compara o problema com o ajustado
par( mfrow=c(1,3) )
imagem[camada,,] %>%
as.raster( max = max(imagem, na.rm = TRUE) ) %>%
plot() %>%
title( main = 'Original' )
imagem_problema %>%
as.raster( max = max(imagem_problema, na.rm = TRUE) ) %>%
plot() %>%
title( main = 'Problemas' )
imagem_recuperada %>%
as.raster( max = max(imagem_recuperada, na.rm = TRUE) ) %>%
plot() %>%
title( main = 'Recuperada' )
|
/R_scripts/exemplo_recuperacao_imagem.R
|
no_license
|
netoalcides/education
|
R
| false
| false
| 2,189
|
r
|
########## Exemplo Recuperacao de Imagens ##########
## pacotes ##
library(tidyverse)
library(magrittr)
library(reticulate)
use_python('/usr/local/bin/python3.6')
library(keras)
library(Matrix)
library(NMF)
library(NNLM)
## carrega imagem ##
imagem <- image_load( path = '/home/vm-data-science/education/dados/scallet.jpg' ) %>%
image_to_array(., data_format = "channels_first" )
# dimensao
imagem %>%
dim
# camadas
imagem[1,,] %>%
as.raster( max = max(imagem, na.rm = TRUE) ) %>%
plot()
imagem[2,,] %>%
as.raster( max = max(imagem, na.rm = TRUE) ) %>%
plot()
imagem[3,,] %>%
as.raster( max = max(imagem, na.rm = TRUE) ) %>%
plot()
## insere missings ##
camada <- 2
imagem_problema <- imagem[camada,,]
nr <- nrow( imagem_problema ) # numero de linhas
nc <- ncol( imagem_problema ) # numero de colunas
p <- 0.3 # proporcao de NA's
ina <- is.na( unlist(imagem_problema) ) # ajusta as posicoes dos NA's caso ja exista algum
n2 <- floor( p*nr*nc ) - sum( ina ) # determina local dos novos NA's
ina[ sample(which(!is.na(ina)), n2) ] <- TRUE # ajusta onde nao tem NA, caso ja exista algum
imagem_problema[matrix(ina, nr=nr,nc=nc)] <- NA # insere os NA's
imagem_problema %>%
as.raster( max = max(imagem, na.rm = TRUE) ) %>%
plot()
## Testa modelo
for( i in 1:30 ){
show( paste0("Etapa: ", i) )
modelo <- nnmf(imagem_problema,
k = i,
method = 'scd',
loss = 'mse',
n.threads = 0,
max.iter = 1000 )
imagem_recuperada <- modelo$W %*% modelo$H
imagem_recuperada %>%
as.raster( max = max(imagem_recuperada, na.rm = TRUE) ) %>%
plot() %>%
title( main = paste0('teste ', i) ) %>%
show()
}
# Compara o problema com o ajustado
par( mfrow=c(1,3) )
imagem[camada,,] %>%
as.raster( max = max(imagem, na.rm = TRUE) ) %>%
plot() %>%
title( main = 'Original' )
imagem_problema %>%
as.raster( max = max(imagem_problema, na.rm = TRUE) ) %>%
plot() %>%
title( main = 'Problemas' )
imagem_recuperada %>%
as.raster( max = max(imagem_recuperada, na.rm = TRUE) ) %>%
plot() %>%
title( main = 'Recuperada' )
|
#!/usr/bin/env Rscript
# Author: Eva Linehan
# Date: October 2018
# Desc: This script calculates heights of trees from a given .csv file and outputs
# the result in the following format; "InputFileName_treeheights.csv"
#clear environments
rm(list=ls())
# The height is calculated bu using the given distance of each tree from its base
# and angle to its top, using the trigonometric formula;
#
# height = distance * tan(radians)
args<-commandArgs(trailingOnly = TRUE) # Defines command line arguments as vector "args"
# By trailingOnly = TRUE, only input file in command line is called
Data<-read.csv(args[1]) # Read csv file
filename <- tools::file_path_sans_ext(args[1]) # establishes a file path without the file extention using tools package
filewithoutext <- basename(filename) # Stores the file without the pathway
#Alternative to commandArgs = would extract any *.csv but difficult to alter extention for output
#directory <- "../Data/"
#filenames <- list.files(directory, pattern = "*.csv", full.names = TRUE)
#Data<-read.csv(filenames, header = TRUE)
#Data<- read.table(filenames, sep = ",", header = TRUE)
TreeHeight <- function(degrees,distance) {
radians<-degrees*pi/180
height<-distance*tan(radians)
print(paste("Tree height is:",height))
return (height)
}
Height.m<-TreeHeight(Data$Angle.degrees,Data$Distance.m)
OutputData<- cbind(Data, Height.m) #adds column tree height to output file
new.file.name <- paste0("../Results/", filewithoutext, "_treeheights.csv")
# paste0 allows you to combine things without a seperator automatically
write.csv(OutputData, file = new.file.name) # write row names
|
/Week3/Code/get_TreeHeight.R
|
no_license
|
EvalImperialforces/CMEECourseWork
|
R
| false
| false
| 1,637
|
r
|
#!/usr/bin/env Rscript
# Author: Eva Linehan
# Date: October 2018
# Desc: This script calculates heights of trees from a given .csv file and outputs
# the result in the following format; "InputFileName_treeheights.csv"
#clear environments
rm(list=ls())
# The height is calculated bu using the given distance of each tree from its base
# and angle to its top, using the trigonometric formula;
#
# height = distance * tan(radians)
args<-commandArgs(trailingOnly = TRUE) # Defines command line arguments as vector "args"
# By trailingOnly = TRUE, only input file in command line is called
Data<-read.csv(args[1]) # Read csv file
filename <- tools::file_path_sans_ext(args[1]) # establishes a file path without the file extention using tools package
filewithoutext <- basename(filename) # Stores the file without the pathway
#Alternative to commandArgs = would extract any *.csv but difficult to alter extention for output
#directory <- "../Data/"
#filenames <- list.files(directory, pattern = "*.csv", full.names = TRUE)
#Data<-read.csv(filenames, header = TRUE)
#Data<- read.table(filenames, sep = ",", header = TRUE)
TreeHeight <- function(degrees,distance) {
radians<-degrees*pi/180
height<-distance*tan(radians)
print(paste("Tree height is:",height))
return (height)
}
Height.m<-TreeHeight(Data$Angle.degrees,Data$Distance.m)
OutputData<- cbind(Data, Height.m) #adds column tree height to output file
new.file.name <- paste0("../Results/", filewithoutext, "_treeheights.csv")
# paste0 allows you to combine things without a seperator automatically
write.csv(OutputData, file = new.file.name) # write row names
|
#Ryan Batt
#23 April 2011
#What is POM made of?
#Given POM, what is a consumer made of?
#The purpose of this script is first calculate the constituent components of Ward POM from the summer of 2010.
#Next, I will determine the composition of a consumer.
#I begin with the simplifying assumption that POM is made of terrestrial (sedge and alder) and phytoplankton sources.
#I will also assume that the consumer is eating some combination of the following four sources: 1) Epi Phyto 2) Meta Phyto 3) Equal parts of Alder, Sedge, Tree 4) DOC
#Version 5:
#Intended to be the final version
#Does not do a massive simulation through possible source combinations
#Looks at 2 possible source combinations: 1 with the phytoplankton split into Epi and Meta and the macrophytes and periphyton grouped, and the other with the phytos grouped but periphyton in a group separate from the macrophytes
#Previous analyses had forgotten to remove the Watershield data point that was a "stem" (I think, anyway).
#I may need to treat the "Tree" variance difference in the future, because this is actually adding another layer of nesting within a "source"
#This version will use a larger number of chains and longer chain lengths, and will do cross-validation for the density plots
#The plots should look better overall
#There are several samples which will be automatically excluded from analysis:
#All the Meta B POC samples-- Meta B sampling was thwarted throughout the season by a massive patch of Chara
#The Hypo DOC sample-- it has an N signature that is quite different from the others
#The "Nija" sample b/c there was only 1 sample of it
#The watershield sample that was just a stem-- its deuterium was different from the "leaf" samples
#The DIC data has not been edited for these weird Meta B and Hypo E samples, but those values are not used in this at all
#Version 5.1:
#Commented out that bullshit with the terrestrial variance being copied for the pelagic epilimnion and pelagic metalimnion... zomg.
#Version 7.0: Changed the terrestrial end member to not include DOM (DOC). Also, I later changed the graphing of the phytoplankton posteriors to round to one less digit for carbon-- this is to only have 3 sig figs, and also to make sure the 13C peak for the epi didn't overlap with the estimate
#Version 8.0: Including a new data file for the isotopes, which now includes additional tree data. Averages for each species are taken from the Cascade database. For the trees, there are only one or two samples (if 2, it's just analytical replicates) per species for C/N, whereas there are a ton of dueterium samples typically. The sample number refers to the sample number for the C/N data.
#Found an error where the number of consumers in the ConsMix function was calculted as the number of columns, but it should have been the number of rows
#Version 8.1:
#Run with DOM as it's own "terrestrial" source
#Version 8.2:
#Run with the "terrestrial" source as Alder, Sedge, and DOM
#Version 8.3:
#I am reformatting the Figures according to the editorial marks that I received on 10-May-2012
#Vesion 0.0.0 (10-Jan-2013): I am starting the versioning of this program over for the analysis of isotope data post-Aquashade manipulation
#Made changes in several places so that the analysis would be year-specific
#Automated the placement of the version and the figure folder name into several strings, given their specification near the beginning of this script
#Option to specify # of iterations
#Version 0.1.0 (11-Jan-2013): The previous version "worked" fine with the new data, but I kept estimating benthic contribution to the zooplankton, which I don't believe is actually happening. In an effort to correct this, I am changing this script to allow for consumer-specific groupings of sources. I don't want to just remove the option for zooplankton (etc.) to eat periphyton, I just think that this would be a less likely diet choice if the other 3 options were more appropriate. Regardless, the idea is to have the option to tailor the resource groupings to the specific consumer being analyzed.
rm(list=ls())
graphics.off()
Version <- "v0.2.0"
FigureFolder <- paste("Figures_", Version, sep="")
YearMix <- 2010 #The year to use for consumers, POM, DOM, Water, Periphyton (everything except for terrestrial and macrophytes)
Iterations <- 2000
#Select the top 2 if on Snow Leopard, the bottom 2 if on Leopard, and the selection doesn't matter if on a PC
WINE="/Applications/Darwine/Wine.bundle/Contents/bin/wine"
WINEPATH="/Applications/Darwine/Wine.bundle/Contents/bin/winepath"
# WINEPATH="/opt/local/bin/winepath"
# WINE="/opt/local/bin/wine"
library(R2WinBUGS)
setwd("/Users/Battrd/Documents/School&Work/WiscResearch/Isotopes_2010Analysis")
source("ConsMix_v6.R")
setwd("/Users/Battrd/Documents/School&Work/WiscResearch/Data/IsotopeData2012")
DataRaw <- read.csv("WardIsotopes_2010&2012_09Jan2013.csv", header=TRUE)
Data <- subset(DataRaw, Taxon!="Nija" & !is.element(SampleID, c("O-0362", "V-0270", "P-1202", "P-1166", "O-0382", "P-1165", "P-1206", "P-1238", "P-1239", "P-1243", "Z-1110", "Z-1115", "Z-1195", "Z-1170", "O-0405")) & is.na(FishID)) # SampleID!="O-0362" & SampleID!="V-0270" & SampleID!="P-1202" & SampleID!="P-1166")
Months <- c("May", "June", "July", "August")
#Calculate the algal end member from POM
if(YearMix==2010){
TSources <- c("Alder", "Sedge", "Tamarack", "Tree")#, "Tamarack") #c("Alder", "Sedge", "DOM")
}else{
TSources <- c("Alder", "Sedge", "Tamarack", "Tree")#, "Tamarack")
}
#Signature of the terrestrial source
nTS <- length(TSources)
TMeans <- data.frame("d13C"=rep(NA,nTS),"d15N"=rep(NA,nTS),"dD"=rep(NA,nTS), row.names=TSources)
TVars <- data.frame("d13C"=rep(NA,nTS),"d15N"=rep(NA,nTS),"dD"=rep(NA,nTS), row.names=TSources)
#Td13C_aov
Td15NObs <- data.frame()
TdDObs <- data.frame() #matrix(ncol=nTS, dimnames=list(NULL,TSources))
for(i in 1:length(TSources)){
TMeans[i,] <- apply(subset(Data, Taxon==TSources[i], select=c("d13C","d15N","dD")),2,mean)
}
dCNH_Terr_Mu <- apply(TMeans, 2, mean)
dCNH_Terr_Var <- data.frame("d13C"=NA, "d15N"=NA, "dD"=NA)
if(nTS>1){
Temp_d13C_aov <- anova(lm(d13C ~ Taxon, data=subset(Data, is.element(Taxon, TSources), select=c("Taxon","d13C"))))
if(Temp_d13C_aov$Pr[1] <= 0.1){
dCNH_Terr_Var["d13C"] <- sum(Temp_d13C_aov$Mean)
}else{
dCNH_Terr_Var["d13C"] <- Temp_d13C_aov$Mean[2]
}
Temp_d15N_aov <- anova(lm(d15N ~ Taxon, data=subset(Data, is.element(Taxon, TSources), select=c("Taxon","d15N"))))
if(Temp_d15N_aov$Pr[1] <= 0.1){
dCNH_Terr_Var["d15N"] <- sum(Temp_d15N_aov$Mean)
}else{
dCNH_Terr_Var["d15N"] <- Temp_d15N_aov$Mean[2]
}
Temp_dD_aov <- anova(lm(dD ~ Taxon, data=subset(Data, is.element(Taxon, TSources), select=c("Taxon","dD"))))
if(Temp_dD_aov$Pr[1] <= 0.1){
dCNH_Terr_Var["dD"] <- sum(Temp_dD_aov$Mean)
}else{
dCNH_Terr_Var["dD"] <- Temp_dD_aov$Mean[2]
}
}else{
dCNH_Terr_Var <- apply(subset(Data, is.element(Taxon, TSources), select=c("d13C", "d15N", "dD")), 2, var)
}
#Define the Terr objects to be used in the POM Mixture portion of the BUGS model
#**************************************
T_dX <- as.numeric(dCNH_Terr_Mu)
T_dX_Var <- as.numeric(dCNH_Terr_Var)
#**************************************
for(YearMix in c(2010, 2012)){
# TODO The water will need to be defined by year. Either stored in a higher dimensional object, or have separate objects for each year.
Water_dD_Mu <- mean(subset(Data, Type=="Water" & Year==YearMix, select="dD")[,])
Water_dD_Var <- var(subset(Data, Type=="Water" & Year==YearMix, select="dD")[,])
#Calculate Epi phyto deuterium prior from water dD
dD_Water_Epi <- subset(Data, Type=="Water" & Habitat=="Epi" & Year==YearMix, select="dD")[,]
dD_Water_Adj <- mean(c(-152.8, -172.4))#Fractionation range reported in Solomon et al. 2011
dD_Phyto_Epi_Mu <- mean(dD_Water_Epi + dD_Water_Adj)
#From Solomon et al. 2011 Appendix A: alpha phyto-water = mean ± sd = 0.84 ± 0.008; qnorm(p=.025, mean=-231.945, sd=5); var=25 it should have been ~70.. ask Grace.
dD_Phyto_Epi_Var <- var(dD_Water_Epi) + 25#variance of water + fractionation = variance of Phyto
dD_Phyto_Epi_Shape <- dD_Phyto_Epi_Var*0.1#dD_Phyto_Var~dgamma(shape,rate); shape when rate==0.1
#Signature of the Epi POM mixture
dCNH_POM_Epi <- subset(Data, Type=="POM" & Habitat=="Epi" & Year==YearMix, select=c("d13C","d15N","dD"))
POM_dX_Epi_Obs <- matrix(data=c(dCNH_POM_Epi[,1], dCNH_POM_Epi[,2], dCNH_POM_Epi[,3]), ncol=3)
POM_dX_Epi_Var <- apply(dCNH_POM_Epi, 2, var)
nPOM_Epi <- length(POM_dX_Epi_Obs[,1])
#Same POM and phyto calcs for Meta
#Calculate Algal deuterium prior from water dD
dD_Water_Meta <- subset(Data, Type=="Water" & Habitat=="Meta" & Year==YearMix, select="dD")[,]
dD_Phyto_Meta_Mu <- mean(dD_Water_Meta + dD_Water_Adj)
#From Solomon et al. 2011 Appendix A: alpha phyto-water = mean ± sd = 0.84 ± 0.008; qnorm(p=.025, mean=-231.945, sd=5); var=25
dD_Phyto_Meta_Var <- var(dD_Water_Meta) + 25#variance of water + variance of fractionation = variance of Phyto
dD_Phyto_Meta_Shape <- dD_Phyto_Meta_Var*0.1#dD_Phyto_Var~dgamma(shape,rate); shape when rate==0.1
#Signature of the Meta POM mixture
dCNH_POM_Meta <- subset(Data, Type=="POM" & Habitat=="Meta" & Year==YearMix, select=c("d13C","d15N","dD"))
POM_dX_Meta_Obs <- matrix(data=c(dCNH_POM_Meta[,1], dCNH_POM_Meta[,2], dCNH_POM_Meta[,3]), ncol=3)
POM_dX_Meta_Var <- apply(dCNH_POM_Meta, 2, var)
nPOM_Meta <- length(POM_dX_Meta_Obs[,1])
#Run BUGS Part 1: Using POM, calculate the isotopic signatures of epilimnetic and metalimnetic phytoplankton
SupplyBUGS_pt1 <- list(T_dX, T_dX_Var, dD_Phyto_Epi_Mu, dD_Phyto_Epi_Shape, POM_dX_Epi_Obs, nPOM_Epi, dD_Phyto_Meta_Mu, dD_Phyto_Meta_Shape, POM_dX_Meta_Obs, nPOM_Meta)
names(SupplyBUGS_pt1) <- strsplit(c("T_dX, T_dX_Var, dD_Phyto_Epi_Mu, dD_Phyto_Epi_Shape, POM_dX_Epi_Obs, nPOM_Epi, dD_Phyto_Meta_Mu, dD_Phyto_Meta_Shape, POM_dX_Meta_Obs, nPOM_Meta"), split=", ")[[1]]
ParamBUGS_pt1 <- c("f", "P_dC_Epi", "P_dN_Epi", "P_dD_Epi", "P_dC_Epi_Var", "P_dN_Epi_Var", "P_dD_Epi_Var", "P_dC_Meta", "P_dN_Meta", "P_dD_Meta", "P_dC_Meta_Var", "P_dN_Meta_Var", "P_dD_Meta_Var", "residSd")
BUGSfile_pt1 <- "/Users/Battrd/Documents/School&Work/WiscResearch/Isotopes_2010Analysis/mix_Cons_Mixture_Ward2010_v2_pt1.bug"
if(.Platform$OS.type=="windows"){
bugsOut_pt1 <- bugs(SupplyBUGS_pt1, inits=NULL, ParamBUGS_pt1, BUGSfile_pt1, n.chains=8, n.iter=Iterations, program="winbugs", working.directory=NULL, debug=FALSE, clearWD=FALSE)
}else{
bugsOut_pt1 <- bugs(SupplyBUGS_pt1, inits=NULL, ParamBUGS_pt1, BUGSfile_pt1, n.chains=8, n.iter=Iterations, program="winbugs", working.directory=NULL, clearWD=TRUE, useWINE=TRUE, newWINE=TRUE, WINEPATH=WINEPATH, WINE=WINE, debug=FALSE)
}
#Extract and name relevant information concerning epilimnetic and metalimnetic phytoplankton
#**************************************
P_dX_Epi <- c(bugsOut_pt1$mean$P_dC_Epi, bugsOut_pt1$mean$P_dN_Epi, bugsOut_pt1$mean$P_dD_Epi)
P_dX_Epi_Var <- c(bugsOut_pt1$mean$P_dC_Epi_Var, bugsOut_pt1$mean$P_dN_Epi_Var, bugsOut_pt1$mean$P_dD_Epi_Var)
P_dX_Meta <- c(bugsOut_pt1$mean$P_dC_Meta, bugsOut_pt1$mean$P_dN_Meta, bugsOut_pt1$mean$P_dD_Meta)
P_dX_Meta_Var <- c(bugsOut_pt1$mean$P_dC_Meta_Var, bugsOut_pt1$mean$P_dN_Meta_Var, bugsOut_pt1$mean$P_dD_Meta_Var)
Sim_P_dX_Epi_Obs <- as.data.frame(matrix(data=rep(rnorm(n=nPOM_Epi),3), ncol=3, byrow=FALSE))
Sim_P_dX_Epi_Obs[,1] <- sample(bugsOut_pt1$sims.matrix[,"P_dC_Epi"], size=nPOM_Epi)
Sim_P_dX_Epi_Obs[,2] <- sample(bugsOut_pt1$sims.matrix[,"P_dN_Epi"], size=nPOM_Epi)
Sim_P_dX_Epi_Obs[,3] <- sample(bugsOut_pt1$sims.matrix[,"P_dD_Epi"], size=nPOM_Epi)
Sim_P_dX_Epi_Obs <- (Sim_P_dX_Epi_Obs-as.data.frame(matrix(data=rep(apply(Sim_P_dX_Epi_Obs,2,mean), nPOM_Epi), ncol=3, byrow=TRUE)))/as.data.frame(matrix(data=rep(apply(Sim_P_dX_Epi_Obs,2,sd), nPOM_Epi), ncol=3, byrow=TRUE))
Sim_P_dX_Epi_Obs[,1] <- Sim_P_dX_Epi_Obs[,1]*sqrt(P_dX_Epi_Var[1])+P_dX_Epi[1]
Sim_P_dX_Epi_Obs[,2] <- Sim_P_dX_Epi_Obs[,2]*sqrt(P_dX_Epi_Var[2])+P_dX_Epi[2]
Sim_P_dX_Epi_Obs[,3] <- Sim_P_dX_Epi_Obs[,3]*sqrt(P_dX_Epi_Var[3])+P_dX_Epi[3]
colnames(Sim_P_dX_Epi_Obs) <- c("d13C","d15N","dD")
Sim_P_dX_Epi_Obs <- cbind("Taxon"=rep("EpiPhyto",nPOM_Epi), Sim_P_dX_Epi_Obs)
Sim_P_dX_Meta_Obs <- as.data.frame(matrix(data=rep(rnorm(n=nPOM_Meta),3), ncol=3, byrow=FALSE))
Sim_P_dX_Meta_Obs[,1] <- sample(bugsOut_pt1$sims.matrix[,"P_dC_Meta"], size=nPOM_Meta)
Sim_P_dX_Meta_Obs[,2] <- sample(bugsOut_pt1$sims.matrix[,"P_dN_Meta"], size=nPOM_Meta)
Sim_P_dX_Meta_Obs[,3] <- sample(bugsOut_pt1$sims.matrix[,"P_dD_Meta"], size=nPOM_Meta)
Sim_P_dX_Meta_Obs <- (Sim_P_dX_Meta_Obs-as.data.frame(matrix(data=rep(apply(Sim_P_dX_Meta_Obs,2,mean), nPOM_Meta), ncol=3, byrow=TRUE)))/as.data.frame(matrix(data=rep(apply(Sim_P_dX_Meta_Obs,2,sd), nPOM_Meta), ncol=3, byrow=TRUE))
Sim_P_dX_Meta_Obs <- (Sim_P_dX_Meta_Obs-apply(Sim_P_dX_Meta_Obs,2,mean))/apply(Sim_P_dX_Meta_Obs,2,sd)
Sim_P_dX_Meta_Obs[,1] <- Sim_P_dX_Meta_Obs[,1]*sqrt(P_dX_Meta_Var[1])+P_dX_Meta[1]
Sim_P_dX_Meta_Obs[,2] <- Sim_P_dX_Meta_Obs[,2]*sqrt(P_dX_Meta_Var[2])+P_dX_Meta[2]
Sim_P_dX_Meta_Obs[,3] <- Sim_P_dX_Meta_Obs[,3]*sqrt(P_dX_Meta_Var[3])+P_dX_Meta[3]
colnames(Sim_P_dX_Meta_Obs) <- c("d13C","d15N","dD")
# Sim_P_dX_Meta_Obs <- cbind("Year"=YearMix, "Taxon"=rep("MetaPhyto",nPOM_Meta), Sim_P_dX_Meta_Obs)
Sim_P_dX_Meta_Obs <- cbind("Taxon"=rep("MetaPhyto",nPOM_Meta), Sim_P_dX_Meta_Obs)
#**************************************
#*****************************************************
#Begin for consumers and their respective sources
#*****************************************************
if(YearMix==2010){
Cons <- c("Calanoid", "Chaoborus", "Helisoma trivolvis") #, "PKS", "FHM", "YWP", "CMM", "BHD", "Mesocyclops", "DAC")
TL <- c(1, 2, 1) #, 2, 2, 3, 2.5, 2.5, 1.5, 1)
GraphTitle <- c("Skistodiaptomus oregonensis", "Chaoborus spp.", "Helisoma trivolvis") #, "Lepomis gibbosus", "Pimephales promelas", "Perca flavescens", "Umbra limi", "Ameiurus melas", "Mesocyclops spp.", "Phoxinus spp.")
}else{
Cons <- c("Calanoid", "Chaoborus", "Helisoma trivolvis") #, "PKS", "FHM", "CMM", "BHD", "Mesocyclops", "DAC")
TL <- c(1, 2, 1) #, 2, 2, 2.5, 2.5, 1.5, 1)
GraphTitle <- c("Skistodiaptomus oregonensis", "Chaoborus spp.", "Helisoma trivolvis") #, "Lepomis gibbosus", "Pimephales promelas", "Umbra limi", "Ameiurus melas", "Mesocyclops spp.", "Phoxinus spp.")
}
AllMacs <- c("Brasenia schreberi", "Chara", "Najas flexilis", "Nuphar variegata", "Nymphaea odorata", "Potamogeton amplifolius", "Potamogeton nodosus", "Potamogeton pusillus")
FloatMacs <- c("Brasenia schreberi", "Nuphar variegata", "Nymphaea odorata", "Potamogeton nodosus")
SubMacs <- c("Chara", "Najas flexilis", "Potamogeton amplifolius", "Potamogeton pusillus")
AllTerr <- c("Alder", "Sedge", "Tamarack", "Tree")
LocalTerr <- c("Alder", "Sedge", "Tamarack")
SourceOpts <- list("All Macrophytes"=AllMacs, "Floating Macrophytes"=FloatMacs, "Submersed Macrophytes"=SubMacs, "All Terrestrial"=AllTerr, "Local Terrestrial"=LocalTerr, "All Phytoplankton"=c("EpiPhyto", "MetaPhyto"), "Epi. Phytoplankton"="EpiPhyto", "Meta. Phytoplankton"="MetaPhyto", "DOM"="DOM", "Periphyton"="Periphyton")
ConsChoices <- list(
"Calanoid"=list(c("All Terrestrial", "Epi. Phytoplankton", "Periphyton", "DOM") , c("Local Terrestrial", "Epi. Phytoplankton", "Meta. Phytoplankton", "DOM")),
"Chaoborus"=list(c("All Terrestrial", "Epi. Phytoplankton", "Meta. Phytoplankton", "DOM") , c("Local Terrestrial", "Epi. Phytoplankton", "Meta. Phytoplankton", "DOM")),
"Helisoma trivolvis"=list(c("All Terrestrial", "All Macrophytes", "Periphyton", "DOM"), c("Local Terrestrial", "Floating Macrophytes", "Submersed Macrophytes", "Periphyton"))
)
SourceData <- subset(Data, Trophic==0 & Taxon!="POM" & Year==YearMix | is.element(Type, c("Macrophyte", "Terrestrial")))
SourceTaxa <- as.character(unique(SourceData[,"Taxon"]))
Source_Means <- matrix(ncol=3, nrow=length(SourceTaxa), dimnames=list(SourceTaxa,NULL))
Source_Vars <- matrix(ncol=3, nrow=length(SourceTaxa), dimnames=list(SourceTaxa,NULL))
for(i in 1:length(SourceTaxa)){
Source_Means[i,] <- apply(subset(SourceData, Taxon==SourceTaxa[i], select=c("d13C","d15N","dD")), 2, mean)
Source_Vars[i,] <- apply(subset(SourceData, Taxon==SourceTaxa[i], select=c("d13C","d15N","dD")), 2, var)
}
Source_Means <- rbind(Source_Means, "EpiPhyto"=P_dX_Epi, "MetaPhyto"=P_dX_Meta)
Source_Vars <- rbind(Source_Vars, "EpiPhyto"=P_dX_Epi_Var, "MetaPhyto"=P_dX_Meta_Var)
# nSrcs <- length(SourceNames[[f_Src]])
SourceData_dX_Obs <- SourceData[,c("Taxon","d13C","d15N","dD")]
SourceData_dX_Obs <- rbind(SourceData_dX_Obs, Sim_P_dX_Epi_Obs, Sim_P_dX_Meta_Obs)
for(g_Cons in 1:length(Cons)){
TempoCons <- Cons[g_Cons]
SourceNames <- ConsChoices[[TempoCons]]
FirstSources <- list(SourceOpts[[SourceNames[[1]][1]]], SourceOpts[[SourceNames[[2]][1]]])
SecondSources <- list(SourceOpts[[SourceNames[[1]][2]]], SourceOpts[[SourceNames[[2]][2]]])
ThirdSources <- list(SourceOpts[[SourceNames[[1]][3]]], SourceOpts[[SourceNames[[2]][3]]])
FourthSources <- list(SourceOpts[[SourceNames[[1]][4]]], SourceOpts[[SourceNames[[2]][4]]])
for(f_Src in 1:2){
Source1 <- FirstSources[[f_Src]]
Source2 <- SecondSources[[f_Src]]
Source3 <- ThirdSources[[f_Src]]
Source4 <- FourthSources[[f_Src]]
nSrcs <- length(SourceNames[[f_Src]])
for(i in 1:nSrcs){
TempName_Source <- paste("Source", i, sep="")
TempName_Mean <- paste(paste("Source", paste(i, "_Mean", sep=""), sep=""))
TempName_Var <- paste(paste("Source", paste(i, "_Var", sep=""), sep=""))
if(length(get(TempName_Source))>1){assign(TempName_Mean, apply(Source_Means[get(TempName_Source),], 2, mean))}else{assign(TempName_Mean, Source_Means[get(TempName_Source),])}
if(length(get(TempName_Source))>1){
assign(TempName_Var, data.frame("d13C"=NA, "d15N"=NA, "dD"=NA))#This is to clear the temporary data frame at the beginning of each loop
Temp_d13C_aov <- anova(lm(d13C ~ Taxon, data=subset(SourceData_dX_Obs, is.element(Taxon, get(TempName_Source)), select=c("Taxon","d13C"))))
if(Temp_d13C_aov$Pr[1] <= 0.1){
Temp_d13C_Var <- sum(Temp_d13C_aov$Mean)
}else{
Temp_d13C_Var <- Temp_d13C_aov$Mean[2]
}
Temp_d15N_aov <- anova(lm(d15N ~ Taxon, data=subset(SourceData_dX_Obs, is.element(Taxon, get(TempName_Source)), select=c("Taxon","d15N"))))
if(Temp_d15N_aov$Pr[1] <= 0.1){
Temp_d15N_Var <- sum(Temp_d15N_aov$Mean)
}else{
Temp_d15N_Var <- Temp_d15N_aov$Mean[2]
}
Temp_dD_aov <- anova(lm(dD ~ Taxon, data=subset(SourceData_dX_Obs, is.element(Taxon, get(TempName_Source)), select=c("Taxon","dD"))))
if(Temp_dD_aov$Pr[1] <= 0.1){
Temp_dD_Var <- sum(Temp_dD_aov$Mean)
}else{
Temp_dD_Var <- Temp_dD_aov$Mean[2]
}
assign(TempName_Var, c(Temp_d13C_Var, Temp_d15N_Var, Temp_dD_Var))
}else{
assign(TempName_Var, Source_Vars[get(TempName_Source),])
}
}#Finish the loop that handles each source (1 through 4) one at a time for this particular set of sources for this consumer
#Then collect the source means and variances from the previous loop; the following could have been condensed into previous loop.
Srcs_dX_Ward <- c()
Srcs_dX_Var_Ward <- c()
for(i in 1:nSrcs){
TempName_Mean <- paste(paste("Source", paste(i, "_Mean", sep=""), sep=""))
TempName_Var <- paste(paste("Source", paste(i, "_Var", sep=""), sep=""))
Srcs_dX_Ward <- cbind(Srcs_dX_Ward, get(TempName_Mean))
Srcs_dX_Var_Ward <- cbind(Srcs_dX_Var_Ward, get(TempName_Var))
}
# TODO This is where I need to begin separating out the consumer resource use by week/ year. All I have to do is add 2 more levels to the loop (1 level for Year, 1 level for week [/ all weeks at once]), change the name of "Temp_BugOut" to reflect where the loop is in these new levels (just like it does for g_Cons and f_Src... actually, I might even want to remove f_Src, or just change it to 1:1 for now). Then I'll be creating a new object for each level I break the analysis down. So for each consumer I can have each year analyzed as a whole and on a per-sampling-week basis. Later, I can make plots similar to how I did before, except instead of having the 2 columns be for Grouping 1 and Grouping 2, I can have the 2 columns be for 2010 and 2012. Then, instead of having just one density line, I can have a 1 + W density lines, where W is the number of weeks sampled, and the extra line being the answer you would get if you pooled all the samples from that year together.
ConsWeeks <- unique(subset(Data, Taxon==Cons[g_Cons] & Year==YearMix)[,"Week"])
for(WK in ConsWeeks){
ThisMonth <- Months[WK]
Temp_BugOut <- paste("bugsOut_", Cons[g_Cons], "_SrcComb", f_Src, "_",ThisMonth, sep="")
# Temp_BugOut <- paste("bugsOut", paste(Cons[g_Cons], paste("SrcComb", f_Src, sep=""), sep="_"), sep="_")
Cons_Data <- subset(Data, Taxon==Cons[g_Cons] & Year==YearMix & Week==WK, select=c("Trophic","d13C","d15N","dD"))
Cons_dX_Obs <- matrix(data=c(Cons_Data[,2], Cons_Data[,3], Cons_Data[,4]), ncol=3)
ConsName <- as.character(subset(Data, Taxon==Cons[g_Cons] & Year==YearMix & Week==WK, select=Type)[1,1])#There should be a better way to do this...
assign(Temp_BugOut, ConsMix(Cons_dX_Obs=Cons_dX_Obs, TL=TL[g_Cons], Srcs_dX=Srcs_dX_Ward, Srcs_dX_Var=Srcs_dX_Var_Ward, Water_dD_Mu, Water_dD_Var, FractModel=TRUE, SrcNames=SourceNames[[f_Src]], ConsName=ConsName, GraphTitle=GraphTitle[g_Cons], WINE=WINE, WINEPATH= WINEPATH, nChains=8, ChainLength=Iterations, Plot=FALSE))
if(g_Cons==1 & f_Src==1 & WK==1 & YearMix==2010){
ResourceUse <- data.frame("Year"=YearMix, "Month"=ThisMonth, "Consumer"=Cons[g_Cons], "Grouping"=f_Src, get(Temp_BugOut)$sims.matrix[,1:4])
}else{
TempoResourceUse <- data.frame("Year"=YearMix, "Month"=ThisMonth, "Consumer"=Cons[g_Cons], "Grouping"=f_Src, get(Temp_BugOut)$sims.matrix[,1:4])
ResourceUse <- rbind(ResourceUse, TempoResourceUse)
}
}
}#Finish loop the loop that handles the two sets of sources for this particular consumer
}#Finish loop that estimates resource use for each consumer under 2 scenarios of available resources/ grouping of resources
}#End Year loop
GroupChoose <- 2
for(i in 1:length(Cons)){
ThisRU <- droplevels(subset(ResourceUse, Consumer==Cons[i] & Grouping==GroupChoose))
ResourceNames <- ConsChoices[[i]][[GroupChoose]]
TheseMonths <- unique(subset(ThisRU, select=c("Month", "Year")))[,1]
# Rep2010 <- dim(subset(unique(subset(ThisRU, select=c("Month", "Year"))), Year==2010))[1]
# Rep2012 <- dim(subset(unique(subset(ThisRU, select=c("Month", "Year"))), Year==2012))[1]
MoChar <- as.character(sort(unique(ThisRU[,"Month"])))
YeNum <- as.numeric(sort(unique(ThisRU[,"Year"])))
TheseMonths <- as.character(expand.grid(MoChar, YeNum)[,1])
RepYearCol <- length(TheseMonths)/2
Rep2010 <- RepYearCol
Rep2012 <- RepYearCol
dev.new(width=8, height=7)
par(mfrow=c(2,2), mar=c(2.5,4,1,1), oma=c(0,0,2,0))
boxplot(DietF.1.~Month*Year, data=ThisRU, col=c(rep("#FA807225",Rep2010), rep("#3A5FCD25",Rep2012)), border=c(rep("red",Rep2010), rep("blue",Rep2012)), names=TheseMonths, outline=FALSE, ylim=c(0,1))
mtext(ResourceNames[1], side=2, line=2.5)
boxplot(DietF.2.~Month*Year, data=ThisRU, col=c(rep("#FA807225",Rep2010), rep("#3A5FCD25",Rep2012)), border=c(rep("red",Rep2010), rep("blue",Rep2012)), names=TheseMonths, outline=FALSE, ylim=c(0,1))
mtext(ResourceNames[2], side=2, line=2.5)
boxplot(DietF.3.~Month*Year, data=ThisRU, col=c(rep("#FA807225",Rep2010), rep("#3A5FCD25",Rep2012)), border=c(rep("red",Rep2010), rep("blue",Rep2012)), names=TheseMonths, outline=FALSE, ylim=c(0,1))
mtext(ResourceNames[3], side=2, line=2.5)
boxplot(DietF.4.~Month*Year, data=ThisRU, col=c(rep("#FA807225",Rep2010), rep("#3A5FCD25",Rep2012)), border=c(rep("red",Rep2010), rep("blue",Rep2012)), names=TheseMonths, outline=FALSE, ylim=c(0,1))
mtext(ResourceNames[4], side=2, line=2.5)
mtext(Cons[i], side=3, line=0, outer=TRUE)
}
#
# # TODO Change plots --- but these don't go in the paper, so maybe leave alone for now
# graphics.off()
# GroupingTitles <- c(expression(underline(bold(Grouping~1))), expression(underline(bold(Grouping~2))))
# LegendTitle <- list(c("A", "B", "C", "D"), c("E", "F", "G", "H"))
# setwd(paste("/Users/battrd/Documents/School&Work/WiscResearch/Isotopes_2012Analysis/",FigureFolder,sep=""))
# for(g_Cons in 1:length(Cons)){
# # dev.new(height=7, width=3.5)
# pdf(file=paste(paste(gsub(" ", "_", GraphTitle[g_Cons]), "_", YearMix, "_", Version, sep=""), ".pdf", sep=""), height=7, width=3.5, pointsize=9)
# par(mfcol=c(4,2), family="Times", las=1, mar=c(2.1,2.1,1.1,1.1), oma=c(2,2,5,0), cex=1)
#
# for(f_Src in 1:2){
# # Temp_BugOut <- paste("bugsOut", paste(Cons[g_Cons], paste("SrcComb", f_Src, sep=""), sep="_"), sep="_")\
# paste("bugsOut_", Cons[g_Cons], "_SrcComb", f_Src, "_",ThisMonth, sep="")
#
# TempoCons <- Cons[g_Cons]
# SourceNames <- ConsChoices[[TempoCons]]
#
# #Plot the consumer diet
# plot.density(density(get(Temp_BugOut)$sims.matrix[,1], from=0, to=1, bw="nrd0"), xlab="", ylab="", main="", bty="l", xaxt="n", zero.line=FALSE)
# title(main=LegendTitle[[f_Src]][1], adj=1, line=-0.5)
# mtext(paste(SourceNames[[f_Src]][1], paste(round(get(Temp_BugOut)$mean[[1]][1]*100, 0), "%", sep=""), sep=", "), side=3, line=0.5, outer=FALSE, las=0, font=3, cex=0.75)
# mtext(GroupingTitles[f_Src], outer=FALSE, line=2.25, cex=0.85)
#
# plot.density(density(get(Temp_BugOut)$sims.matrix[,2], from=0, to=1, bw="nrd0"), main="", ylab="", xlab="", bty="l", xaxt="n", zero.line=FALSE)
# title(main=LegendTitle[[f_Src]][2], adj=1, line=-0.5)
# mtext(paste(SourceNames[[f_Src]][2], paste(round(get(Temp_BugOut)$mean[[1]][2]*100, 0), "%", sep=""), sep=", "), side=3, line=0.5, outer=FALSE, las=0, font=3, cex=0.75)
#
#
# plot.density(density(get(Temp_BugOut)$sims.matrix[,3], from=0, to=1, bw="nrd0"), main="", xlab="Percent Diet", ylab="", bty="l", xaxt="n", zero.line=FALSE)
# title(main=LegendTitle[[f_Src]][3], adj=1, line=-0.5)
# mtext(paste(SourceNames[[f_Src]][3], paste(round(get(Temp_BugOut)$mean[[1]][3]*100, 0), "%", sep=""), sep=", "), side=3, line=0.5, outer=FALSE, las=0, font=3, cex=0.75)
#
#
# plot.density(density(get(Temp_BugOut)$sims.matrix[,4], from=0, to=1, bw="nrd0"), main="", ylab="", xlab="Percent Diet", bty="l", xaxt="s", zero.line=FALSE)
# title(main=LegendTitle[[f_Src]][4], adj=1, line=-0.5)
# mtext(paste(SourceNames[[f_Src]][4], paste(round(get(Temp_BugOut)$mean[[1]][4]*100, 0), "%", sep=""), sep=", "), side=3, line=0.5, outer=FALSE, las=0, font=3, cex=0.75)
#
# #*************************
# }
# mtext("Fraction of Diet", side=1, line=0.5, font=2, outer=TRUE, cex=0.85)
# mtext("Density", side=2, line=0.5, font=2, las=0, outer=TRUE, cex=0.85)
# if(GraphTitle[g_Cons]!="Chaoborus spp."){mtext(GraphTitle[g_Cons], side=3, line=3, font=4, cex=1, outer=TRUE)}else{mtext(expression(bolditalic(Chaoborus)~bold(spp.)), side=3, line=3, cex=1, outer=TRUE)}
# # setwd(paste("/Users/battrd/Documents/School&Work/WiscResearch/Isotopes_2012Analysis/",FigureFolder,sep=""))
# # dev2bitmap(file=paste(paste(gsub(" ", "_", GraphTitle[g_Cons]), Version, sep=""), ".tif", sep=""), type="tiffgray",height=7, width=3.5, res=200, font="Times", method="pdf", pointsize=12)
# dev.off()
#
#
# }
# setwd("/Users/battrd/Documents/School&Work/WiscResearch/Isotopes_2012Analysis")
# save(list=c("DataRaw", "Sim_P_dX_Meta_Obs", "Sim_P_dX_Epi_Obs"), file=paste("Data+Phyto_NoTree_", YearMix, "_", Version, ".RData",sep=""))
#
# setwd(paste("/Users/battrd/Documents/School&Work/WiscResearch/Isotopes_2012Analysis/",FigureFolder,sep=""))
# # TODO Change the plot for Epi POM (Figure 1)
# #Plot the composition of POM
#
# PubCex=1 #(9/(12*0.83))
# PanelNameAdj <- c(0.25, 0.33, 0.55, 0.58)
#
# #Plot the composition of Epilimnetic POM
# LegendTitle <- list(c("A)", "B)", "C)", "D)"), c("E", "F", "G", "H")) #CHANGED added )'s
# # dev.new(height=3.5, width=3.5) #CHANGED I am changing the way the the plot is saved-- now using pdf(), and then embedFonts to ensure that the fonts are embedded (uses GS)
# #Because these plots will be 2x2, the base cex is reduced by a factor of 0.83 (see ?par, mfrow). If the default point size is 12, a point size of 9 would be cex= 9/(12*0.83)
# pdf(file=paste("EpiPhyto_Post_", YearMix, "_", Version, ".pdf", sep=""), width=3.5, height=3.5, family="Times", pointsize=9)
# par(mfrow=c(2,2), las=1, mar=c(3,2.5,0.1,1), oma=c(0,0,0.2,0), cex=PubCex)
#
# TerrYLim <- range(density(bugsOut_pt1$sims.matrix[,"f[1]"], from=0, to=1)$y)*c(1, 1.15)
# plot.density(density(bugsOut_pt1$sims.matrix[,"f[1]"], from=0, to=1),xlab="", ylab="", main="", bty="l", xaxt="s", zero.line=FALSE, ylim=TerrYLim)
# title(main=LegendTitle[[1]][1], adj=0.025, line=-0.7, font.main=1, cex.main=PubCex) #CHANGED changed the adj from 1 to 0.1, added font.main=1, line from -0.5 to -1
# mtext("Terrestrial", side=3, line=-0.9, outer=FALSE, las=0, font=1, adj=PanelNameAdj[1], cex=PubCex) #CHANGED line from 0 to -1, deleted cex=0.85, changed font=3 to 1
# title(paste(round(bugsOut_pt1$mean[[1]][1]*100, 0), "%", sep=""), adj=0.1, line=-1.75, font.main=3, cex.main=PubCex) #CHANGED deleted cex.main=0.85,
#
# PdCYLim <- range(density(bugsOut_pt1$sims.matrix[,"P_dC_Epi"])$y)*c(1, 1.15)
# plot.density(density(bugsOut_pt1$sims.matrix[,"P_dC_Epi"]), main="", ylab="", xlab="", bty="l", xaxt="s", zero.line=FALSE, ylim=PdCYLim)
# title(main=LegendTitle[[1]][3], adj=0.025, line=-0.7, font.main=1, cex.main=PubCex)
# mtext(expression(Phytoplankton~phantom()^13*C), side=3, line=-1.1, outer=FALSE, las=0, font=1, adj=PanelNameAdj[3], cex=PubCex)
# title(paste(round(bugsOut_pt1$mean$P_dC_Epi, 1), "", sep=""), adj=0.1, line=-1.75, font.main=3, cex.main=PubCex)
#
# PhytYLim <- range(density(bugsOut_pt1$sims.matrix[,"f[2]"], from=0, to=1)$y)*c(1, 1.15)
# plot.density(density(bugsOut_pt1$sims.matrix[,"f[2]"], from=0, to=1), main="", xlab="", ylab="", bty="l", xaxt="s", zero.line=FALSE, ylim=PhytYLim)
# title(main=LegendTitle[[1]][2], adj=0.025, line=-0.7, font.main=1, cex.main=PubCex)
# mtext("Phytoplankton", side=3, line=-0.9, outer=FALSE, las=0, font=1, adj=PanelNameAdj[2], cex=PubCex)
# title(paste(round(bugsOut_pt1$mean[[1]][2]*100, 0), "%", sep=""), adj=0.1, line=-1.75, font.main=3, cex.main=PubCex)
# mtext("Fraction of POM", side=1, line=2, cex=PubCex, font=1, outer=FALSE)
#
# PdNYLim <- range(density(bugsOut_pt1$sims.matrix[,"P_dN_Epi"])$y)*c(1, 1.15)
# plot.density(density(bugsOut_pt1$sims.matrix[,"P_dN_Epi"]), main="", ylab="", xlab="", bty="l", xaxt="s", zero.line=FALSE, ylim=PdNYLim)
# title(main=LegendTitle[[1]][4], adj=0.025, line=-0.7, font.main=1, cex.main=PubCex)
# mtext(expression(Phytoplankton~phantom()^15*N), side=3, line=-1.1, outer=FALSE, las=0, font=1, adj=PanelNameAdj[4], cex=PubCex)
# title(paste(round(bugsOut_pt1$mean$P_dN_Epi, 2), "", sep=""), adj=0.1, line=-1.75, font.main=3, cex.main=PubCex)
# mtext("Isotopic signature", side=1, line=2, cex=PubCex, font=1, outer=FALSE)
#
# mtext("Density", side=2, line=-1, font=1, las=0, outer=TRUE, cex=PubCex)
#
# # setwd("/Users/battrd/Documents/School&Work/WiscResearch/Isotopes_2010Analysis/Figures_v8.3")
# # dev2bitmap(file="EpiPhyto_Post_v8.3.tif", type="tiffgray",height=3.5, width=3.5, res=200, font="Times", method="pdf", pointsize=12)
# dev.off()
# #*************************
#
# # TODO Change the plot for Meta POM (Figure 2)
#
# #Plot the composition of Metalimnetic POM
# LegendTitle <- list(c("A)", "B)", "C)", "D)"), c("E", "F", "G", "H")) #CHANGED added )'s
# # dev.new(height=3.5, width=3.5) #CHANGED I am changing the way the the plot is saved-- now using pdf(), and then embedFonts to ensure that the fonts are embedded (uses GS)
# #Because these plots will be 2x2, the base cex is reduced by a factor of 0.83 (see ?par, mfrow). If the default point size is 12, a point size of 9 would be cex= 9/(12*0.83)
# pdf(file=paste("MetaPhyto_Post_", YearMix, "_", Version, ".pdf", sep=""), width=3.5, height=3.5, family="Times", pointsize=9)
# par(mfrow=c(2,2), las=1, mar=c(3,2.5,0.1,1), oma=c(0,0,0.2,0), cex=PubCex)
#
# TerrYLim <- range(density(bugsOut_pt1$sims.matrix[,"f[3]"], from=0, to=1)$y)*c(1, 1.15)
# plot.density(density(bugsOut_pt1$sims.matrix[,"f[3]"], from=0, to=1),xlab="", ylab="", main="", bty="l", xaxt="s", zero.line=FALSE, ylim=TerrYLim)
# title(main=LegendTitle[[1]][1], adj=0.025, line=-0.7, font.main=1, cex.main=PubCex) #CHANGED changed the adj from 1 to 0.1, added font.main=1, line from -0.5 to -1
# mtext("Terrestrial", side=3, line=-0.9, outer=FALSE, las=0, font=1, adj=PanelNameAdj[1], cex=PubCex) #CHANGED line from 0 to -1, deleted cex=0.85, changed font=3 to 1
# title(paste(round(bugsOut_pt1$mean[[1]][3]*100, 0), "%", sep=""), adj=0.1, line=-1.75, font.main=3, cex.main=PubCex) #CHANGED deleted cex.main=0.85,
#
# PdCYLim <- range(density(bugsOut_pt1$sims.matrix[,"P_dC_Meta"])$y)*c(1, 1.15)
# plot.density(density(bugsOut_pt1$sims.matrix[,"P_dC_Meta"]), main="", ylab="", xlab="", bty="l", xaxt="s", zero.line=FALSE, ylim=PdCYLim, xlim=c(-75 , 0))
# title(main=LegendTitle[[1]][3], adj=0.025, line=-0.7, font.main=1, cex.main=PubCex)
# mtext(expression(Phytoplankton~phantom()^13*C), side=3, line=-1.1, outer=FALSE, las=0, font=1, adj=PanelNameAdj[3], cex=PubCex)
# title(paste(round(bugsOut_pt1$mean$P_dC_Meta, 1), "", sep=""), adj=0.1, line=-1.75, font.main=3, cex.main=PubCex)
#
# PhytYLim <- range(density(bugsOut_pt1$sims.matrix[,"f[4]"], from=0, to=1)$y)*c(1, 1.15)
# plot.density(density(bugsOut_pt1$sims.matrix[,"f[4]"], from=0, to=1), main="", xlab="", ylab="", bty="l", xaxt="s", zero.line=FALSE, ylim=PhytYLim)
# title(main=LegendTitle[[1]][2], adj=0.025, line=-0.7, font.main=1, cex.main=PubCex)
# mtext("Phytoplankton", side=3, line=-0.9, outer=FALSE, las=0, font=1, adj=PanelNameAdj[2], cex=PubCex)
# title(paste(round(bugsOut_pt1$mean[[1]][4]*100, 0), "%", sep=""), adj=0.1, line=-1.75, font.main=3, cex.main=PubCex)
# mtext("Fraction of POM", side=1, line=2, cex=PubCex, font=1, outer=FALSE)
#
# PdNYLim <- range(density(bugsOut_pt1$sims.matrix[,"P_dN_Meta"])$y)*c(1, 1.15)
# plot.density(density(bugsOut_pt1$sims.matrix[,"P_dN_Meta"]), main="", ylab="", xlab="", bty="l", xaxt="s", zero.line=FALSE, ylim=PdNYLim)
# title(main=LegendTitle[[1]][4], adj=0.025, line=-0.7, font.main=1, cex.main=PubCex)
# mtext(expression(Phytoplankton~phantom()^15*N), side=3, line=-1.1, outer=FALSE, las=0, font=1, adj=PanelNameAdj[4], cex=PubCex)
# title(paste(round(bugsOut_pt1$mean$P_dN_Meta, 2), "", sep=""), adj=0.1, line=-1.75, font.main=3, cex.main=PubCex)
# mtext("Isotopic signature", side=1, line=2, cex=PubCex, font=1, outer=FALSE)
#
# mtext("Density", side=2, line=-1, font=1, las=0, outer=TRUE, cex=PubCex)
#
# # setwd("/Users/battrd/Documents/School&Work/WiscResearch/Isotopes_2010Analysis/Figures_v8.3")
# # dev2bitmap(file="MetaPhyto_Post_v8.3.tif", type="tiffgray",height=3.5, width=3.5, res=200, font="Times", method="pdf", pointsize=12)
# dev.off()
# #*************************
#
#
# setwd("/Users/battrd/Documents/School&Work/WiscResearch/Isotopes_2012Analysis")
# save(list=ls(), file=paste("AllObjs_Cons_Mixture_Ward2010&2012_", YearMix, "_", Version, ".RData", sep=""))
|
/oldScripts/Cons_Mixture_Ward2010&2012_v0.2.0.R
|
no_license
|
rBatt/DarkenedLake
|
R
| false
| false
| 35,554
|
r
|
#Ryan Batt
#23 April 2011
#What is POM made of?
#Given POM, what is a consumer made of?
#The purpose of this script is first calculate the constituent components of Ward POM from the summer of 2010.
#Next, I will determine the composition of a consumer.
#I begin with the simplifying assumption that POM is made of terrestrial (sedge and alder) and phytoplankton sources.
#I will also assume that the consumer is eating some combination of the following four sources: 1) Epi Phyto 2) Meta Phyto 3) Equal parts of Alder, Sedge, Tree 4) DOC
#Version 5:
#Intended to be the final version
#Does not do a massive simulation through possible source combinations
#Looks at 2 possible source combinations: 1 with the phytoplankton split into Epi and Meta and the macrophytes and periphyton grouped, and the other with the phytos grouped but periphyton in a group separate from the macrophytes
#Previous analyses had forgotten to remove the Watershield data point that was a "stem" (I think, anyway).
#I may need to treat the "Tree" variance difference in the future, because this is actually adding another layer of nesting within a "source"
#This version will use a larger number of chains and longer chain lengths, and will do cross-validation for the density plots
#The plots should look better overall
#There are several samples which will be automatically excluded from analysis:
#All the Meta B POC samples-- Meta B sampling was thwarted throughout the season by a massive patch of Chara
#The Hypo DOC sample-- it has an N signature that is quite different from the others
#The "Nija" sample b/c there was only 1 sample of it
#The watershield sample that was just a stem-- its deuterium was different from the "leaf" samples
#The DIC data has not been edited for these weird Meta B and Hypo E samples, but those values are not used in this at all
#Version 5.1:
#Commented out that bullshit with the terrestrial variance being copied for the pelagic epilimnion and pelagic metalimnion... zomg.
#Version 7.0: Changed the terrestrial end member to not include DOM (DOC). Also, I later changed the graphing of the phytoplankton posteriors to round to one less digit for carbon-- this is to only have 3 sig figs, and also to make sure the 13C peak for the epi didn't overlap with the estimate
#Version 8.0: Including a new data file for the isotopes, which now includes additional tree data. Averages for each species are taken from the Cascade database. For the trees, there are only one or two samples (if 2, it's just analytical replicates) per species for C/N, whereas there are a ton of dueterium samples typically. The sample number refers to the sample number for the C/N data.
#Found an error where the number of consumers in the ConsMix function was calculted as the number of columns, but it should have been the number of rows
#Version 8.1:
#Run with DOM as it's own "terrestrial" source
#Version 8.2:
#Run with the "terrestrial" source as Alder, Sedge, and DOM
#Version 8.3:
#I am reformatting the Figures according to the editorial marks that I received on 10-May-2012
#Vesion 0.0.0 (10-Jan-2013): I am starting the versioning of this program over for the analysis of isotope data post-Aquashade manipulation
#Made changes in several places so that the analysis would be year-specific
#Automated the placement of the version and the figure folder name into several strings, given their specification near the beginning of this script
#Option to specify # of iterations
#Version 0.1.0 (11-Jan-2013): The previous version "worked" fine with the new data, but I kept estimating benthic contribution to the zooplankton, which I don't believe is actually happening. In an effort to correct this, I am changing this script to allow for consumer-specific groupings of sources. I don't want to just remove the option for zooplankton (etc.) to eat periphyton, I just think that this would be a less likely diet choice if the other 3 options were more appropriate. Regardless, the idea is to have the option to tailor the resource groupings to the specific consumer being analyzed.
rm(list=ls())
graphics.off()
Version <- "v0.2.0"
FigureFolder <- paste("Figures_", Version, sep="")
YearMix <- 2010 #The year to use for consumers, POM, DOM, Water, Periphyton (everything except for terrestrial and macrophytes)
Iterations <- 2000
#Select the top 2 if on Snow Leopard, the bottom 2 if on Leopard, and the selection doesn't matter if on a PC
WINE="/Applications/Darwine/Wine.bundle/Contents/bin/wine"
WINEPATH="/Applications/Darwine/Wine.bundle/Contents/bin/winepath"
# WINEPATH="/opt/local/bin/winepath"
# WINE="/opt/local/bin/wine"
library(R2WinBUGS)
setwd("/Users/Battrd/Documents/School&Work/WiscResearch/Isotopes_2010Analysis")
source("ConsMix_v6.R")
setwd("/Users/Battrd/Documents/School&Work/WiscResearch/Data/IsotopeData2012")
DataRaw <- read.csv("WardIsotopes_2010&2012_09Jan2013.csv", header=TRUE)
Data <- subset(DataRaw, Taxon!="Nija" & !is.element(SampleID, c("O-0362", "V-0270", "P-1202", "P-1166", "O-0382", "P-1165", "P-1206", "P-1238", "P-1239", "P-1243", "Z-1110", "Z-1115", "Z-1195", "Z-1170", "O-0405")) & is.na(FishID)) # SampleID!="O-0362" & SampleID!="V-0270" & SampleID!="P-1202" & SampleID!="P-1166")
Months <- c("May", "June", "July", "August")
#Calculate the algal end member from POM
if(YearMix==2010){
TSources <- c("Alder", "Sedge", "Tamarack", "Tree")#, "Tamarack") #c("Alder", "Sedge", "DOM")
}else{
TSources <- c("Alder", "Sedge", "Tamarack", "Tree")#, "Tamarack")
}
#Signature of the terrestrial source
nTS <- length(TSources)
TMeans <- data.frame("d13C"=rep(NA,nTS),"d15N"=rep(NA,nTS),"dD"=rep(NA,nTS), row.names=TSources)
TVars <- data.frame("d13C"=rep(NA,nTS),"d15N"=rep(NA,nTS),"dD"=rep(NA,nTS), row.names=TSources)
#Td13C_aov
Td15NObs <- data.frame()
TdDObs <- data.frame() #matrix(ncol=nTS, dimnames=list(NULL,TSources))
for(i in 1:length(TSources)){
TMeans[i,] <- apply(subset(Data, Taxon==TSources[i], select=c("d13C","d15N","dD")),2,mean)
}
dCNH_Terr_Mu <- apply(TMeans, 2, mean)
dCNH_Terr_Var <- data.frame("d13C"=NA, "d15N"=NA, "dD"=NA)
if(nTS>1){
Temp_d13C_aov <- anova(lm(d13C ~ Taxon, data=subset(Data, is.element(Taxon, TSources), select=c("Taxon","d13C"))))
if(Temp_d13C_aov$Pr[1] <= 0.1){
dCNH_Terr_Var["d13C"] <- sum(Temp_d13C_aov$Mean)
}else{
dCNH_Terr_Var["d13C"] <- Temp_d13C_aov$Mean[2]
}
Temp_d15N_aov <- anova(lm(d15N ~ Taxon, data=subset(Data, is.element(Taxon, TSources), select=c("Taxon","d15N"))))
if(Temp_d15N_aov$Pr[1] <= 0.1){
dCNH_Terr_Var["d15N"] <- sum(Temp_d15N_aov$Mean)
}else{
dCNH_Terr_Var["d15N"] <- Temp_d15N_aov$Mean[2]
}
Temp_dD_aov <- anova(lm(dD ~ Taxon, data=subset(Data, is.element(Taxon, TSources), select=c("Taxon","dD"))))
if(Temp_dD_aov$Pr[1] <= 0.1){
dCNH_Terr_Var["dD"] <- sum(Temp_dD_aov$Mean)
}else{
dCNH_Terr_Var["dD"] <- Temp_dD_aov$Mean[2]
}
}else{
dCNH_Terr_Var <- apply(subset(Data, is.element(Taxon, TSources), select=c("d13C", "d15N", "dD")), 2, var)
}
#Define the Terr objects to be used in the POM Mixture portion of the BUGS model
#**************************************
T_dX <- as.numeric(dCNH_Terr_Mu)
T_dX_Var <- as.numeric(dCNH_Terr_Var)
#**************************************
for(YearMix in c(2010, 2012)){
# TODO The water will need to be defined by year. Either stored in a higher dimensional object, or have separate objects for each year.
Water_dD_Mu <- mean(subset(Data, Type=="Water" & Year==YearMix, select="dD")[,])
Water_dD_Var <- var(subset(Data, Type=="Water" & Year==YearMix, select="dD")[,])
#Calculate Epi phyto deuterium prior from water dD
dD_Water_Epi <- subset(Data, Type=="Water" & Habitat=="Epi" & Year==YearMix, select="dD")[,]
dD_Water_Adj <- mean(c(-152.8, -172.4))#Fractionation range reported in Solomon et al. 2011
dD_Phyto_Epi_Mu <- mean(dD_Water_Epi + dD_Water_Adj)
#From Solomon et al. 2011 Appendix A: alpha phyto-water = mean ± sd = 0.84 ± 0.008; qnorm(p=.025, mean=-231.945, sd=5); var=25 it should have been ~70.. ask Grace.
dD_Phyto_Epi_Var <- var(dD_Water_Epi) + 25#variance of water + fractionation = variance of Phyto
dD_Phyto_Epi_Shape <- dD_Phyto_Epi_Var*0.1#dD_Phyto_Var~dgamma(shape,rate); shape when rate==0.1
#Signature of the Epi POM mixture
dCNH_POM_Epi <- subset(Data, Type=="POM" & Habitat=="Epi" & Year==YearMix, select=c("d13C","d15N","dD"))
POM_dX_Epi_Obs <- matrix(data=c(dCNH_POM_Epi[,1], dCNH_POM_Epi[,2], dCNH_POM_Epi[,3]), ncol=3)
POM_dX_Epi_Var <- apply(dCNH_POM_Epi, 2, var)
nPOM_Epi <- length(POM_dX_Epi_Obs[,1])
#Same POM and phyto calcs for Meta
#Calculate Algal deuterium prior from water dD
dD_Water_Meta <- subset(Data, Type=="Water" & Habitat=="Meta" & Year==YearMix, select="dD")[,]
dD_Phyto_Meta_Mu <- mean(dD_Water_Meta + dD_Water_Adj)
#From Solomon et al. 2011 Appendix A: alpha phyto-water = mean ± sd = 0.84 ± 0.008; qnorm(p=.025, mean=-231.945, sd=5); var=25
dD_Phyto_Meta_Var <- var(dD_Water_Meta) + 25#variance of water + variance of fractionation = variance of Phyto
dD_Phyto_Meta_Shape <- dD_Phyto_Meta_Var*0.1#dD_Phyto_Var~dgamma(shape,rate); shape when rate==0.1
#Signature of the Meta POM mixture
dCNH_POM_Meta <- subset(Data, Type=="POM" & Habitat=="Meta" & Year==YearMix, select=c("d13C","d15N","dD"))
POM_dX_Meta_Obs <- matrix(data=c(dCNH_POM_Meta[,1], dCNH_POM_Meta[,2], dCNH_POM_Meta[,3]), ncol=3)
POM_dX_Meta_Var <- apply(dCNH_POM_Meta, 2, var)
nPOM_Meta <- length(POM_dX_Meta_Obs[,1])
#Run BUGS Part 1: Using POM, calculate the isotopic signatures of epilimnetic and metalimnetic phytoplankton
SupplyBUGS_pt1 <- list(T_dX, T_dX_Var, dD_Phyto_Epi_Mu, dD_Phyto_Epi_Shape, POM_dX_Epi_Obs, nPOM_Epi, dD_Phyto_Meta_Mu, dD_Phyto_Meta_Shape, POM_dX_Meta_Obs, nPOM_Meta)
names(SupplyBUGS_pt1) <- strsplit(c("T_dX, T_dX_Var, dD_Phyto_Epi_Mu, dD_Phyto_Epi_Shape, POM_dX_Epi_Obs, nPOM_Epi, dD_Phyto_Meta_Mu, dD_Phyto_Meta_Shape, POM_dX_Meta_Obs, nPOM_Meta"), split=", ")[[1]]
ParamBUGS_pt1 <- c("f", "P_dC_Epi", "P_dN_Epi", "P_dD_Epi", "P_dC_Epi_Var", "P_dN_Epi_Var", "P_dD_Epi_Var", "P_dC_Meta", "P_dN_Meta", "P_dD_Meta", "P_dC_Meta_Var", "P_dN_Meta_Var", "P_dD_Meta_Var", "residSd")
BUGSfile_pt1 <- "/Users/Battrd/Documents/School&Work/WiscResearch/Isotopes_2010Analysis/mix_Cons_Mixture_Ward2010_v2_pt1.bug"
if(.Platform$OS.type=="windows"){
bugsOut_pt1 <- bugs(SupplyBUGS_pt1, inits=NULL, ParamBUGS_pt1, BUGSfile_pt1, n.chains=8, n.iter=Iterations, program="winbugs", working.directory=NULL, debug=FALSE, clearWD=FALSE)
}else{
bugsOut_pt1 <- bugs(SupplyBUGS_pt1, inits=NULL, ParamBUGS_pt1, BUGSfile_pt1, n.chains=8, n.iter=Iterations, program="winbugs", working.directory=NULL, clearWD=TRUE, useWINE=TRUE, newWINE=TRUE, WINEPATH=WINEPATH, WINE=WINE, debug=FALSE)
}
#Extract and name relevant information concerning epilimnetic and metalimnetic phytoplankton
#**************************************
P_dX_Epi <- c(bugsOut_pt1$mean$P_dC_Epi, bugsOut_pt1$mean$P_dN_Epi, bugsOut_pt1$mean$P_dD_Epi)
P_dX_Epi_Var <- c(bugsOut_pt1$mean$P_dC_Epi_Var, bugsOut_pt1$mean$P_dN_Epi_Var, bugsOut_pt1$mean$P_dD_Epi_Var)
P_dX_Meta <- c(bugsOut_pt1$mean$P_dC_Meta, bugsOut_pt1$mean$P_dN_Meta, bugsOut_pt1$mean$P_dD_Meta)
P_dX_Meta_Var <- c(bugsOut_pt1$mean$P_dC_Meta_Var, bugsOut_pt1$mean$P_dN_Meta_Var, bugsOut_pt1$mean$P_dD_Meta_Var)
Sim_P_dX_Epi_Obs <- as.data.frame(matrix(data=rep(rnorm(n=nPOM_Epi),3), ncol=3, byrow=FALSE))
Sim_P_dX_Epi_Obs[,1] <- sample(bugsOut_pt1$sims.matrix[,"P_dC_Epi"], size=nPOM_Epi)
Sim_P_dX_Epi_Obs[,2] <- sample(bugsOut_pt1$sims.matrix[,"P_dN_Epi"], size=nPOM_Epi)
Sim_P_dX_Epi_Obs[,3] <- sample(bugsOut_pt1$sims.matrix[,"P_dD_Epi"], size=nPOM_Epi)
Sim_P_dX_Epi_Obs <- (Sim_P_dX_Epi_Obs-as.data.frame(matrix(data=rep(apply(Sim_P_dX_Epi_Obs,2,mean), nPOM_Epi), ncol=3, byrow=TRUE)))/as.data.frame(matrix(data=rep(apply(Sim_P_dX_Epi_Obs,2,sd), nPOM_Epi), ncol=3, byrow=TRUE))
Sim_P_dX_Epi_Obs[,1] <- Sim_P_dX_Epi_Obs[,1]*sqrt(P_dX_Epi_Var[1])+P_dX_Epi[1]
Sim_P_dX_Epi_Obs[,2] <- Sim_P_dX_Epi_Obs[,2]*sqrt(P_dX_Epi_Var[2])+P_dX_Epi[2]
Sim_P_dX_Epi_Obs[,3] <- Sim_P_dX_Epi_Obs[,3]*sqrt(P_dX_Epi_Var[3])+P_dX_Epi[3]
colnames(Sim_P_dX_Epi_Obs) <- c("d13C","d15N","dD")
Sim_P_dX_Epi_Obs <- cbind("Taxon"=rep("EpiPhyto",nPOM_Epi), Sim_P_dX_Epi_Obs)
Sim_P_dX_Meta_Obs <- as.data.frame(matrix(data=rep(rnorm(n=nPOM_Meta),3), ncol=3, byrow=FALSE))
Sim_P_dX_Meta_Obs[,1] <- sample(bugsOut_pt1$sims.matrix[,"P_dC_Meta"], size=nPOM_Meta)
Sim_P_dX_Meta_Obs[,2] <- sample(bugsOut_pt1$sims.matrix[,"P_dN_Meta"], size=nPOM_Meta)
Sim_P_dX_Meta_Obs[,3] <- sample(bugsOut_pt1$sims.matrix[,"P_dD_Meta"], size=nPOM_Meta)
Sim_P_dX_Meta_Obs <- (Sim_P_dX_Meta_Obs-as.data.frame(matrix(data=rep(apply(Sim_P_dX_Meta_Obs,2,mean), nPOM_Meta), ncol=3, byrow=TRUE)))/as.data.frame(matrix(data=rep(apply(Sim_P_dX_Meta_Obs,2,sd), nPOM_Meta), ncol=3, byrow=TRUE))
Sim_P_dX_Meta_Obs <- (Sim_P_dX_Meta_Obs-apply(Sim_P_dX_Meta_Obs,2,mean))/apply(Sim_P_dX_Meta_Obs,2,sd)
Sim_P_dX_Meta_Obs[,1] <- Sim_P_dX_Meta_Obs[,1]*sqrt(P_dX_Meta_Var[1])+P_dX_Meta[1]
Sim_P_dX_Meta_Obs[,2] <- Sim_P_dX_Meta_Obs[,2]*sqrt(P_dX_Meta_Var[2])+P_dX_Meta[2]
Sim_P_dX_Meta_Obs[,3] <- Sim_P_dX_Meta_Obs[,3]*sqrt(P_dX_Meta_Var[3])+P_dX_Meta[3]
colnames(Sim_P_dX_Meta_Obs) <- c("d13C","d15N","dD")
# Sim_P_dX_Meta_Obs <- cbind("Year"=YearMix, "Taxon"=rep("MetaPhyto",nPOM_Meta), Sim_P_dX_Meta_Obs)
Sim_P_dX_Meta_Obs <- cbind("Taxon"=rep("MetaPhyto",nPOM_Meta), Sim_P_dX_Meta_Obs)
#**************************************
#*****************************************************
#Begin for consumers and their respective sources
#*****************************************************
if(YearMix==2010){
Cons <- c("Calanoid", "Chaoborus", "Helisoma trivolvis") #, "PKS", "FHM", "YWP", "CMM", "BHD", "Mesocyclops", "DAC")
TL <- c(1, 2, 1) #, 2, 2, 3, 2.5, 2.5, 1.5, 1)
GraphTitle <- c("Skistodiaptomus oregonensis", "Chaoborus spp.", "Helisoma trivolvis") #, "Lepomis gibbosus", "Pimephales promelas", "Perca flavescens", "Umbra limi", "Ameiurus melas", "Mesocyclops spp.", "Phoxinus spp.")
}else{
Cons <- c("Calanoid", "Chaoborus", "Helisoma trivolvis") #, "PKS", "FHM", "CMM", "BHD", "Mesocyclops", "DAC")
TL <- c(1, 2, 1) #, 2, 2, 2.5, 2.5, 1.5, 1)
GraphTitle <- c("Skistodiaptomus oregonensis", "Chaoborus spp.", "Helisoma trivolvis") #, "Lepomis gibbosus", "Pimephales promelas", "Umbra limi", "Ameiurus melas", "Mesocyclops spp.", "Phoxinus spp.")
}
AllMacs <- c("Brasenia schreberi", "Chara", "Najas flexilis", "Nuphar variegata", "Nymphaea odorata", "Potamogeton amplifolius", "Potamogeton nodosus", "Potamogeton pusillus")
FloatMacs <- c("Brasenia schreberi", "Nuphar variegata", "Nymphaea odorata", "Potamogeton nodosus")
SubMacs <- c("Chara", "Najas flexilis", "Potamogeton amplifolius", "Potamogeton pusillus")
AllTerr <- c("Alder", "Sedge", "Tamarack", "Tree")
LocalTerr <- c("Alder", "Sedge", "Tamarack")
SourceOpts <- list("All Macrophytes"=AllMacs, "Floating Macrophytes"=FloatMacs, "Submersed Macrophytes"=SubMacs, "All Terrestrial"=AllTerr, "Local Terrestrial"=LocalTerr, "All Phytoplankton"=c("EpiPhyto", "MetaPhyto"), "Epi. Phytoplankton"="EpiPhyto", "Meta. Phytoplankton"="MetaPhyto", "DOM"="DOM", "Periphyton"="Periphyton")
ConsChoices <- list(
"Calanoid"=list(c("All Terrestrial", "Epi. Phytoplankton", "Periphyton", "DOM") , c("Local Terrestrial", "Epi. Phytoplankton", "Meta. Phytoplankton", "DOM")),
"Chaoborus"=list(c("All Terrestrial", "Epi. Phytoplankton", "Meta. Phytoplankton", "DOM") , c("Local Terrestrial", "Epi. Phytoplankton", "Meta. Phytoplankton", "DOM")),
"Helisoma trivolvis"=list(c("All Terrestrial", "All Macrophytes", "Periphyton", "DOM"), c("Local Terrestrial", "Floating Macrophytes", "Submersed Macrophytes", "Periphyton"))
)
SourceData <- subset(Data, Trophic==0 & Taxon!="POM" & Year==YearMix | is.element(Type, c("Macrophyte", "Terrestrial")))
SourceTaxa <- as.character(unique(SourceData[,"Taxon"]))
Source_Means <- matrix(ncol=3, nrow=length(SourceTaxa), dimnames=list(SourceTaxa,NULL))
Source_Vars <- matrix(ncol=3, nrow=length(SourceTaxa), dimnames=list(SourceTaxa,NULL))
for(i in 1:length(SourceTaxa)){
Source_Means[i,] <- apply(subset(SourceData, Taxon==SourceTaxa[i], select=c("d13C","d15N","dD")), 2, mean)
Source_Vars[i,] <- apply(subset(SourceData, Taxon==SourceTaxa[i], select=c("d13C","d15N","dD")), 2, var)
}
Source_Means <- rbind(Source_Means, "EpiPhyto"=P_dX_Epi, "MetaPhyto"=P_dX_Meta)
Source_Vars <- rbind(Source_Vars, "EpiPhyto"=P_dX_Epi_Var, "MetaPhyto"=P_dX_Meta_Var)
# nSrcs <- length(SourceNames[[f_Src]])
SourceData_dX_Obs <- SourceData[,c("Taxon","d13C","d15N","dD")]
SourceData_dX_Obs <- rbind(SourceData_dX_Obs, Sim_P_dX_Epi_Obs, Sim_P_dX_Meta_Obs)
for(g_Cons in 1:length(Cons)){
TempoCons <- Cons[g_Cons]
SourceNames <- ConsChoices[[TempoCons]]
FirstSources <- list(SourceOpts[[SourceNames[[1]][1]]], SourceOpts[[SourceNames[[2]][1]]])
SecondSources <- list(SourceOpts[[SourceNames[[1]][2]]], SourceOpts[[SourceNames[[2]][2]]])
ThirdSources <- list(SourceOpts[[SourceNames[[1]][3]]], SourceOpts[[SourceNames[[2]][3]]])
FourthSources <- list(SourceOpts[[SourceNames[[1]][4]]], SourceOpts[[SourceNames[[2]][4]]])
for(f_Src in 1:2){
Source1 <- FirstSources[[f_Src]]
Source2 <- SecondSources[[f_Src]]
Source3 <- ThirdSources[[f_Src]]
Source4 <- FourthSources[[f_Src]]
nSrcs <- length(SourceNames[[f_Src]])
for(i in 1:nSrcs){
TempName_Source <- paste("Source", i, sep="")
TempName_Mean <- paste(paste("Source", paste(i, "_Mean", sep=""), sep=""))
TempName_Var <- paste(paste("Source", paste(i, "_Var", sep=""), sep=""))
if(length(get(TempName_Source))>1){assign(TempName_Mean, apply(Source_Means[get(TempName_Source),], 2, mean))}else{assign(TempName_Mean, Source_Means[get(TempName_Source),])}
if(length(get(TempName_Source))>1){
assign(TempName_Var, data.frame("d13C"=NA, "d15N"=NA, "dD"=NA))#This is to clear the temporary data frame at the beginning of each loop
Temp_d13C_aov <- anova(lm(d13C ~ Taxon, data=subset(SourceData_dX_Obs, is.element(Taxon, get(TempName_Source)), select=c("Taxon","d13C"))))
if(Temp_d13C_aov$Pr[1] <= 0.1){
Temp_d13C_Var <- sum(Temp_d13C_aov$Mean)
}else{
Temp_d13C_Var <- Temp_d13C_aov$Mean[2]
}
Temp_d15N_aov <- anova(lm(d15N ~ Taxon, data=subset(SourceData_dX_Obs, is.element(Taxon, get(TempName_Source)), select=c("Taxon","d15N"))))
if(Temp_d15N_aov$Pr[1] <= 0.1){
Temp_d15N_Var <- sum(Temp_d15N_aov$Mean)
}else{
Temp_d15N_Var <- Temp_d15N_aov$Mean[2]
}
Temp_dD_aov <- anova(lm(dD ~ Taxon, data=subset(SourceData_dX_Obs, is.element(Taxon, get(TempName_Source)), select=c("Taxon","dD"))))
if(Temp_dD_aov$Pr[1] <= 0.1){
Temp_dD_Var <- sum(Temp_dD_aov$Mean)
}else{
Temp_dD_Var <- Temp_dD_aov$Mean[2]
}
assign(TempName_Var, c(Temp_d13C_Var, Temp_d15N_Var, Temp_dD_Var))
}else{
assign(TempName_Var, Source_Vars[get(TempName_Source),])
}
}#Finish the loop that handles each source (1 through 4) one at a time for this particular set of sources for this consumer
#Then collect the source means and variances from the previous loop; the following could have been condensed into previous loop.
Srcs_dX_Ward <- c()
Srcs_dX_Var_Ward <- c()
for(i in 1:nSrcs){
TempName_Mean <- paste(paste("Source", paste(i, "_Mean", sep=""), sep=""))
TempName_Var <- paste(paste("Source", paste(i, "_Var", sep=""), sep=""))
Srcs_dX_Ward <- cbind(Srcs_dX_Ward, get(TempName_Mean))
Srcs_dX_Var_Ward <- cbind(Srcs_dX_Var_Ward, get(TempName_Var))
}
# TODO This is where I need to begin separating out the consumer resource use by week/ year. All I have to do is add 2 more levels to the loop (1 level for Year, 1 level for week [/ all weeks at once]), change the name of "Temp_BugOut" to reflect where the loop is in these new levels (just like it does for g_Cons and f_Src... actually, I might even want to remove f_Src, or just change it to 1:1 for now). Then I'll be creating a new object for each level I break the analysis down. So for each consumer I can have each year analyzed as a whole and on a per-sampling-week basis. Later, I can make plots similar to how I did before, except instead of having the 2 columns be for Grouping 1 and Grouping 2, I can have the 2 columns be for 2010 and 2012. Then, instead of having just one density line, I can have a 1 + W density lines, where W is the number of weeks sampled, and the extra line being the answer you would get if you pooled all the samples from that year together.
ConsWeeks <- unique(subset(Data, Taxon==Cons[g_Cons] & Year==YearMix)[,"Week"])
for(WK in ConsWeeks){
ThisMonth <- Months[WK]
Temp_BugOut <- paste("bugsOut_", Cons[g_Cons], "_SrcComb", f_Src, "_",ThisMonth, sep="")
# Temp_BugOut <- paste("bugsOut", paste(Cons[g_Cons], paste("SrcComb", f_Src, sep=""), sep="_"), sep="_")
Cons_Data <- subset(Data, Taxon==Cons[g_Cons] & Year==YearMix & Week==WK, select=c("Trophic","d13C","d15N","dD"))
Cons_dX_Obs <- matrix(data=c(Cons_Data[,2], Cons_Data[,3], Cons_Data[,4]), ncol=3)
ConsName <- as.character(subset(Data, Taxon==Cons[g_Cons] & Year==YearMix & Week==WK, select=Type)[1,1])#There should be a better way to do this...
assign(Temp_BugOut, ConsMix(Cons_dX_Obs=Cons_dX_Obs, TL=TL[g_Cons], Srcs_dX=Srcs_dX_Ward, Srcs_dX_Var=Srcs_dX_Var_Ward, Water_dD_Mu, Water_dD_Var, FractModel=TRUE, SrcNames=SourceNames[[f_Src]], ConsName=ConsName, GraphTitle=GraphTitle[g_Cons], WINE=WINE, WINEPATH= WINEPATH, nChains=8, ChainLength=Iterations, Plot=FALSE))
if(g_Cons==1 & f_Src==1 & WK==1 & YearMix==2010){
ResourceUse <- data.frame("Year"=YearMix, "Month"=ThisMonth, "Consumer"=Cons[g_Cons], "Grouping"=f_Src, get(Temp_BugOut)$sims.matrix[,1:4])
}else{
TempoResourceUse <- data.frame("Year"=YearMix, "Month"=ThisMonth, "Consumer"=Cons[g_Cons], "Grouping"=f_Src, get(Temp_BugOut)$sims.matrix[,1:4])
ResourceUse <- rbind(ResourceUse, TempoResourceUse)
}
}
}#Finish loop the loop that handles the two sets of sources for this particular consumer
}#Finish loop that estimates resource use for each consumer under 2 scenarios of available resources/ grouping of resources
}#End Year loop
GroupChoose <- 2
for(i in 1:length(Cons)){
ThisRU <- droplevels(subset(ResourceUse, Consumer==Cons[i] & Grouping==GroupChoose))
ResourceNames <- ConsChoices[[i]][[GroupChoose]]
TheseMonths <- unique(subset(ThisRU, select=c("Month", "Year")))[,1]
# Rep2010 <- dim(subset(unique(subset(ThisRU, select=c("Month", "Year"))), Year==2010))[1]
# Rep2012 <- dim(subset(unique(subset(ThisRU, select=c("Month", "Year"))), Year==2012))[1]
MoChar <- as.character(sort(unique(ThisRU[,"Month"])))
YeNum <- as.numeric(sort(unique(ThisRU[,"Year"])))
TheseMonths <- as.character(expand.grid(MoChar, YeNum)[,1])
RepYearCol <- length(TheseMonths)/2
Rep2010 <- RepYearCol
Rep2012 <- RepYearCol
dev.new(width=8, height=7)
par(mfrow=c(2,2), mar=c(2.5,4,1,1), oma=c(0,0,2,0))
boxplot(DietF.1.~Month*Year, data=ThisRU, col=c(rep("#FA807225",Rep2010), rep("#3A5FCD25",Rep2012)), border=c(rep("red",Rep2010), rep("blue",Rep2012)), names=TheseMonths, outline=FALSE, ylim=c(0,1))
mtext(ResourceNames[1], side=2, line=2.5)
boxplot(DietF.2.~Month*Year, data=ThisRU, col=c(rep("#FA807225",Rep2010), rep("#3A5FCD25",Rep2012)), border=c(rep("red",Rep2010), rep("blue",Rep2012)), names=TheseMonths, outline=FALSE, ylim=c(0,1))
mtext(ResourceNames[2], side=2, line=2.5)
boxplot(DietF.3.~Month*Year, data=ThisRU, col=c(rep("#FA807225",Rep2010), rep("#3A5FCD25",Rep2012)), border=c(rep("red",Rep2010), rep("blue",Rep2012)), names=TheseMonths, outline=FALSE, ylim=c(0,1))
mtext(ResourceNames[3], side=2, line=2.5)
boxplot(DietF.4.~Month*Year, data=ThisRU, col=c(rep("#FA807225",Rep2010), rep("#3A5FCD25",Rep2012)), border=c(rep("red",Rep2010), rep("blue",Rep2012)), names=TheseMonths, outline=FALSE, ylim=c(0,1))
mtext(ResourceNames[4], side=2, line=2.5)
mtext(Cons[i], side=3, line=0, outer=TRUE)
}
#
# # TODO Change plots --- but these don't go in the paper, so maybe leave alone for now
# graphics.off()
# GroupingTitles <- c(expression(underline(bold(Grouping~1))), expression(underline(bold(Grouping~2))))
# LegendTitle <- list(c("A", "B", "C", "D"), c("E", "F", "G", "H"))
# setwd(paste("/Users/battrd/Documents/School&Work/WiscResearch/Isotopes_2012Analysis/",FigureFolder,sep=""))
# for(g_Cons in 1:length(Cons)){
# # dev.new(height=7, width=3.5)
# pdf(file=paste(paste(gsub(" ", "_", GraphTitle[g_Cons]), "_", YearMix, "_", Version, sep=""), ".pdf", sep=""), height=7, width=3.5, pointsize=9)
# par(mfcol=c(4,2), family="Times", las=1, mar=c(2.1,2.1,1.1,1.1), oma=c(2,2,5,0), cex=1)
#
# for(f_Src in 1:2){
# # Temp_BugOut <- paste("bugsOut", paste(Cons[g_Cons], paste("SrcComb", f_Src, sep=""), sep="_"), sep="_")\
# paste("bugsOut_", Cons[g_Cons], "_SrcComb", f_Src, "_",ThisMonth, sep="")
#
# TempoCons <- Cons[g_Cons]
# SourceNames <- ConsChoices[[TempoCons]]
#
# #Plot the consumer diet
# plot.density(density(get(Temp_BugOut)$sims.matrix[,1], from=0, to=1, bw="nrd0"), xlab="", ylab="", main="", bty="l", xaxt="n", zero.line=FALSE)
# title(main=LegendTitle[[f_Src]][1], adj=1, line=-0.5)
# mtext(paste(SourceNames[[f_Src]][1], paste(round(get(Temp_BugOut)$mean[[1]][1]*100, 0), "%", sep=""), sep=", "), side=3, line=0.5, outer=FALSE, las=0, font=3, cex=0.75)
# mtext(GroupingTitles[f_Src], outer=FALSE, line=2.25, cex=0.85)
#
# plot.density(density(get(Temp_BugOut)$sims.matrix[,2], from=0, to=1, bw="nrd0"), main="", ylab="", xlab="", bty="l", xaxt="n", zero.line=FALSE)
# title(main=LegendTitle[[f_Src]][2], adj=1, line=-0.5)
# mtext(paste(SourceNames[[f_Src]][2], paste(round(get(Temp_BugOut)$mean[[1]][2]*100, 0), "%", sep=""), sep=", "), side=3, line=0.5, outer=FALSE, las=0, font=3, cex=0.75)
#
#
# plot.density(density(get(Temp_BugOut)$sims.matrix[,3], from=0, to=1, bw="nrd0"), main="", xlab="Percent Diet", ylab="", bty="l", xaxt="n", zero.line=FALSE)
# title(main=LegendTitle[[f_Src]][3], adj=1, line=-0.5)
# mtext(paste(SourceNames[[f_Src]][3], paste(round(get(Temp_BugOut)$mean[[1]][3]*100, 0), "%", sep=""), sep=", "), side=3, line=0.5, outer=FALSE, las=0, font=3, cex=0.75)
#
#
# plot.density(density(get(Temp_BugOut)$sims.matrix[,4], from=0, to=1, bw="nrd0"), main="", ylab="", xlab="Percent Diet", bty="l", xaxt="s", zero.line=FALSE)
# title(main=LegendTitle[[f_Src]][4], adj=1, line=-0.5)
# mtext(paste(SourceNames[[f_Src]][4], paste(round(get(Temp_BugOut)$mean[[1]][4]*100, 0), "%", sep=""), sep=", "), side=3, line=0.5, outer=FALSE, las=0, font=3, cex=0.75)
#
# #*************************
# }
# mtext("Fraction of Diet", side=1, line=0.5, font=2, outer=TRUE, cex=0.85)
# mtext("Density", side=2, line=0.5, font=2, las=0, outer=TRUE, cex=0.85)
# if(GraphTitle[g_Cons]!="Chaoborus spp."){mtext(GraphTitle[g_Cons], side=3, line=3, font=4, cex=1, outer=TRUE)}else{mtext(expression(bolditalic(Chaoborus)~bold(spp.)), side=3, line=3, cex=1, outer=TRUE)}
# # setwd(paste("/Users/battrd/Documents/School&Work/WiscResearch/Isotopes_2012Analysis/",FigureFolder,sep=""))
# # dev2bitmap(file=paste(paste(gsub(" ", "_", GraphTitle[g_Cons]), Version, sep=""), ".tif", sep=""), type="tiffgray",height=7, width=3.5, res=200, font="Times", method="pdf", pointsize=12)
# dev.off()
#
#
# }
# setwd("/Users/battrd/Documents/School&Work/WiscResearch/Isotopes_2012Analysis")
# save(list=c("DataRaw", "Sim_P_dX_Meta_Obs", "Sim_P_dX_Epi_Obs"), file=paste("Data+Phyto_NoTree_", YearMix, "_", Version, ".RData",sep=""))
#
# setwd(paste("/Users/battrd/Documents/School&Work/WiscResearch/Isotopes_2012Analysis/",FigureFolder,sep=""))
# # TODO Change the plot for Epi POM (Figure 1)
# #Plot the composition of POM
#
# PubCex=1 #(9/(12*0.83))
# PanelNameAdj <- c(0.25, 0.33, 0.55, 0.58)
#
# #Plot the composition of Epilimnetic POM
# LegendTitle <- list(c("A)", "B)", "C)", "D)"), c("E", "F", "G", "H")) #CHANGED added )'s
# # dev.new(height=3.5, width=3.5) #CHANGED I am changing the way the the plot is saved-- now using pdf(), and then embedFonts to ensure that the fonts are embedded (uses GS)
# #Because these plots will be 2x2, the base cex is reduced by a factor of 0.83 (see ?par, mfrow). If the default point size is 12, a point size of 9 would be cex= 9/(12*0.83)
# pdf(file=paste("EpiPhyto_Post_", YearMix, "_", Version, ".pdf", sep=""), width=3.5, height=3.5, family="Times", pointsize=9)
# par(mfrow=c(2,2), las=1, mar=c(3,2.5,0.1,1), oma=c(0,0,0.2,0), cex=PubCex)
#
# TerrYLim <- range(density(bugsOut_pt1$sims.matrix[,"f[1]"], from=0, to=1)$y)*c(1, 1.15)
# plot.density(density(bugsOut_pt1$sims.matrix[,"f[1]"], from=0, to=1),xlab="", ylab="", main="", bty="l", xaxt="s", zero.line=FALSE, ylim=TerrYLim)
# title(main=LegendTitle[[1]][1], adj=0.025, line=-0.7, font.main=1, cex.main=PubCex) #CHANGED changed the adj from 1 to 0.1, added font.main=1, line from -0.5 to -1
# mtext("Terrestrial", side=3, line=-0.9, outer=FALSE, las=0, font=1, adj=PanelNameAdj[1], cex=PubCex) #CHANGED line from 0 to -1, deleted cex=0.85, changed font=3 to 1
# title(paste(round(bugsOut_pt1$mean[[1]][1]*100, 0), "%", sep=""), adj=0.1, line=-1.75, font.main=3, cex.main=PubCex) #CHANGED deleted cex.main=0.85,
#
# PdCYLim <- range(density(bugsOut_pt1$sims.matrix[,"P_dC_Epi"])$y)*c(1, 1.15)
# plot.density(density(bugsOut_pt1$sims.matrix[,"P_dC_Epi"]), main="", ylab="", xlab="", bty="l", xaxt="s", zero.line=FALSE, ylim=PdCYLim)
# title(main=LegendTitle[[1]][3], adj=0.025, line=-0.7, font.main=1, cex.main=PubCex)
# mtext(expression(Phytoplankton~phantom()^13*C), side=3, line=-1.1, outer=FALSE, las=0, font=1, adj=PanelNameAdj[3], cex=PubCex)
# title(paste(round(bugsOut_pt1$mean$P_dC_Epi, 1), "", sep=""), adj=0.1, line=-1.75, font.main=3, cex.main=PubCex)
#
# PhytYLim <- range(density(bugsOut_pt1$sims.matrix[,"f[2]"], from=0, to=1)$y)*c(1, 1.15)
# plot.density(density(bugsOut_pt1$sims.matrix[,"f[2]"], from=0, to=1), main="", xlab="", ylab="", bty="l", xaxt="s", zero.line=FALSE, ylim=PhytYLim)
# title(main=LegendTitle[[1]][2], adj=0.025, line=-0.7, font.main=1, cex.main=PubCex)
# mtext("Phytoplankton", side=3, line=-0.9, outer=FALSE, las=0, font=1, adj=PanelNameAdj[2], cex=PubCex)
# title(paste(round(bugsOut_pt1$mean[[1]][2]*100, 0), "%", sep=""), adj=0.1, line=-1.75, font.main=3, cex.main=PubCex)
# mtext("Fraction of POM", side=1, line=2, cex=PubCex, font=1, outer=FALSE)
#
# PdNYLim <- range(density(bugsOut_pt1$sims.matrix[,"P_dN_Epi"])$y)*c(1, 1.15)
# plot.density(density(bugsOut_pt1$sims.matrix[,"P_dN_Epi"]), main="", ylab="", xlab="", bty="l", xaxt="s", zero.line=FALSE, ylim=PdNYLim)
# title(main=LegendTitle[[1]][4], adj=0.025, line=-0.7, font.main=1, cex.main=PubCex)
# mtext(expression(Phytoplankton~phantom()^15*N), side=3, line=-1.1, outer=FALSE, las=0, font=1, adj=PanelNameAdj[4], cex=PubCex)
# title(paste(round(bugsOut_pt1$mean$P_dN_Epi, 2), "", sep=""), adj=0.1, line=-1.75, font.main=3, cex.main=PubCex)
# mtext("Isotopic signature", side=1, line=2, cex=PubCex, font=1, outer=FALSE)
#
# mtext("Density", side=2, line=-1, font=1, las=0, outer=TRUE, cex=PubCex)
#
# # setwd("/Users/battrd/Documents/School&Work/WiscResearch/Isotopes_2010Analysis/Figures_v8.3")
# # dev2bitmap(file="EpiPhyto_Post_v8.3.tif", type="tiffgray",height=3.5, width=3.5, res=200, font="Times", method="pdf", pointsize=12)
# dev.off()
# #*************************
#
# # TODO Change the plot for Meta POM (Figure 2)
#
# #Plot the composition of Metalimnetic POM
# LegendTitle <- list(c("A)", "B)", "C)", "D)"), c("E", "F", "G", "H")) #CHANGED added )'s
# # dev.new(height=3.5, width=3.5) #CHANGED I am changing the way the the plot is saved-- now using pdf(), and then embedFonts to ensure that the fonts are embedded (uses GS)
# #Because these plots will be 2x2, the base cex is reduced by a factor of 0.83 (see ?par, mfrow). If the default point size is 12, a point size of 9 would be cex= 9/(12*0.83)
# pdf(file=paste("MetaPhyto_Post_", YearMix, "_", Version, ".pdf", sep=""), width=3.5, height=3.5, family="Times", pointsize=9)
# par(mfrow=c(2,2), las=1, mar=c(3,2.5,0.1,1), oma=c(0,0,0.2,0), cex=PubCex)
#
# TerrYLim <- range(density(bugsOut_pt1$sims.matrix[,"f[3]"], from=0, to=1)$y)*c(1, 1.15)
# plot.density(density(bugsOut_pt1$sims.matrix[,"f[3]"], from=0, to=1),xlab="", ylab="", main="", bty="l", xaxt="s", zero.line=FALSE, ylim=TerrYLim)
# title(main=LegendTitle[[1]][1], adj=0.025, line=-0.7, font.main=1, cex.main=PubCex) #CHANGED changed the adj from 1 to 0.1, added font.main=1, line from -0.5 to -1
# mtext("Terrestrial", side=3, line=-0.9, outer=FALSE, las=0, font=1, adj=PanelNameAdj[1], cex=PubCex) #CHANGED line from 0 to -1, deleted cex=0.85, changed font=3 to 1
# title(paste(round(bugsOut_pt1$mean[[1]][3]*100, 0), "%", sep=""), adj=0.1, line=-1.75, font.main=3, cex.main=PubCex) #CHANGED deleted cex.main=0.85,
#
# PdCYLim <- range(density(bugsOut_pt1$sims.matrix[,"P_dC_Meta"])$y)*c(1, 1.15)
# plot.density(density(bugsOut_pt1$sims.matrix[,"P_dC_Meta"]), main="", ylab="", xlab="", bty="l", xaxt="s", zero.line=FALSE, ylim=PdCYLim, xlim=c(-75 , 0))
# title(main=LegendTitle[[1]][3], adj=0.025, line=-0.7, font.main=1, cex.main=PubCex)
# mtext(expression(Phytoplankton~phantom()^13*C), side=3, line=-1.1, outer=FALSE, las=0, font=1, adj=PanelNameAdj[3], cex=PubCex)
# title(paste(round(bugsOut_pt1$mean$P_dC_Meta, 1), "", sep=""), adj=0.1, line=-1.75, font.main=3, cex.main=PubCex)
#
# PhytYLim <- range(density(bugsOut_pt1$sims.matrix[,"f[4]"], from=0, to=1)$y)*c(1, 1.15)
# plot.density(density(bugsOut_pt1$sims.matrix[,"f[4]"], from=0, to=1), main="", xlab="", ylab="", bty="l", xaxt="s", zero.line=FALSE, ylim=PhytYLim)
# title(main=LegendTitle[[1]][2], adj=0.025, line=-0.7, font.main=1, cex.main=PubCex)
# mtext("Phytoplankton", side=3, line=-0.9, outer=FALSE, las=0, font=1, adj=PanelNameAdj[2], cex=PubCex)
# title(paste(round(bugsOut_pt1$mean[[1]][4]*100, 0), "%", sep=""), adj=0.1, line=-1.75, font.main=3, cex.main=PubCex)
# mtext("Fraction of POM", side=1, line=2, cex=PubCex, font=1, outer=FALSE)
#
# PdNYLim <- range(density(bugsOut_pt1$sims.matrix[,"P_dN_Meta"])$y)*c(1, 1.15)
# plot.density(density(bugsOut_pt1$sims.matrix[,"P_dN_Meta"]), main="", ylab="", xlab="", bty="l", xaxt="s", zero.line=FALSE, ylim=PdNYLim)
# title(main=LegendTitle[[1]][4], adj=0.025, line=-0.7, font.main=1, cex.main=PubCex)
# mtext(expression(Phytoplankton~phantom()^15*N), side=3, line=-1.1, outer=FALSE, las=0, font=1, adj=PanelNameAdj[4], cex=PubCex)
# title(paste(round(bugsOut_pt1$mean$P_dN_Meta, 2), "", sep=""), adj=0.1, line=-1.75, font.main=3, cex.main=PubCex)
# mtext("Isotopic signature", side=1, line=2, cex=PubCex, font=1, outer=FALSE)
#
# mtext("Density", side=2, line=-1, font=1, las=0, outer=TRUE, cex=PubCex)
#
# # setwd("/Users/battrd/Documents/School&Work/WiscResearch/Isotopes_2010Analysis/Figures_v8.3")
# # dev2bitmap(file="MetaPhyto_Post_v8.3.tif", type="tiffgray",height=3.5, width=3.5, res=200, font="Times", method="pdf", pointsize=12)
# dev.off()
# #*************************
#
#
# setwd("/Users/battrd/Documents/School&Work/WiscResearch/Isotopes_2012Analysis")
# save(list=ls(), file=paste("AllObjs_Cons_Mixture_Ward2010&2012_", YearMix, "_", Version, ".RData", sep=""))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R
\name{plotLocalScalingExp}
\alias{plotLocalScalingExp}
\title{Plot local scaling exponents}
\usage{
plotLocalScalingExp(x, ...)
}
\arguments{
\item{x}{An object containing all the information needed for the estimate of
the chaotic invariant.}
\item{...}{Additional graphical parameters.}
}
\description{
Plots the local scaling exponents of the correlation sum or
the average Shannon information (when computing information dimension).
}
\references{
H. Kantz and T. Schreiber: Nonlinear Time series Analysis
(Cambridge university press)
}
\author{
Constantino A. Garcia
}
|
/man/plotLocalScalingExp.Rd
|
no_license
|
constantino-garcia/nonlinearTseries
|
R
| false
| true
| 667
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R
\name{plotLocalScalingExp}
\alias{plotLocalScalingExp}
\title{Plot local scaling exponents}
\usage{
plotLocalScalingExp(x, ...)
}
\arguments{
\item{x}{An object containing all the information needed for the estimate of
the chaotic invariant.}
\item{...}{Additional graphical parameters.}
}
\description{
Plots the local scaling exponents of the correlation sum or
the average Shannon information (when computing information dimension).
}
\references{
H. Kantz and T. Schreiber: Nonlinear Time series Analysis
(Cambridge university press)
}
\author{
Constantino A. Garcia
}
|
#DATASETS dDO R
data('mtcars')
dim(mtcars)
fix(mtcars)
View(mtcars)
summary(mtcars)
help(mtcars)
#CARREGANDO DE UM XLS, XLSX
install.packages('gdata', dependencies = T)
install.packages('gtools', dependencies = T)
library('gdata')
#CAMINHO DE ONDE ESTA O ARQUIVO EXCEL
arquivo <- file.path('teste4.xlsx')
arquivo
#ABRIR PLANILHAS
sheetCount(arquivo)
sheetNames(arquivo)
clientes <- read.xls('teste4.xlsx', verbose = T, perl = 'perl', sheet = 1)
produtos <- read.xls('teste4.xlsx', verbose = T, perl = 'perl', sheet = 'produtos')
enderecos <- read.xls('teste4.xlsx', verbose = T, perl = 'perl', sheet = 3)
clientes
produtos
enderecos
View(enderecos)
clientes$nome
produtos$preco
enderecos$bairro
#TESTE COM XLS
xlsfile <- file.path(path.package('gdata'), 'xls', 'iris.xls')
irisxls <- read.xls(xlsfile)
irisxls
dim(irisxls)
head(irisxls)
sheetCount(xlsfile)
|
/aula02/entrada_arquivo2.R
|
no_license
|
yorae39/IA-COTI
|
R
| false
| false
| 893
|
r
|
#DATASETS dDO R
data('mtcars')
dim(mtcars)
fix(mtcars)
View(mtcars)
summary(mtcars)
help(mtcars)
#CARREGANDO DE UM XLS, XLSX
install.packages('gdata', dependencies = T)
install.packages('gtools', dependencies = T)
library('gdata')
#CAMINHO DE ONDE ESTA O ARQUIVO EXCEL
arquivo <- file.path('teste4.xlsx')
arquivo
#ABRIR PLANILHAS
sheetCount(arquivo)
sheetNames(arquivo)
clientes <- read.xls('teste4.xlsx', verbose = T, perl = 'perl', sheet = 1)
produtos <- read.xls('teste4.xlsx', verbose = T, perl = 'perl', sheet = 'produtos')
enderecos <- read.xls('teste4.xlsx', verbose = T, perl = 'perl', sheet = 3)
clientes
produtos
enderecos
View(enderecos)
clientes$nome
produtos$preco
enderecos$bairro
#TESTE COM XLS
xlsfile <- file.path(path.package('gdata'), 'xls', 'iris.xls')
irisxls <- read.xls(xlsfile)
irisxls
dim(irisxls)
head(irisxls)
sheetCount(xlsfile)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/link_function.R
\name{func_link}
\alias{func_link}
\title{Link functions}
\usage{
func_link(link)
}
\arguments{
\item{link}{the link function}
}
\value{
A list of functions subject to a link function
}
\description{
This function includes necessary functions related to each link function
}
|
/man/func_link.Rd
|
no_license
|
YuqiTian35/multipledls
|
R
| false
| true
| 369
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/link_function.R
\name{func_link}
\alias{func_link}
\title{Link functions}
\usage{
func_link(link)
}
\arguments{
\item{link}{the link function}
}
\value{
A list of functions subject to a link function
}
\description{
This function includes necessary functions related to each link function
}
|
options(repos<- c(CRAN="https://mirrors.tuna.tsinghua.edu.cn/CRAN/") )
options("BioC_mirror"<- "https://mirrors.ustc.edu.cn/bioc/")
install.packages(c("devtools","curl")) ##Installs devtools and the MCPcounter dependancy 'curl'
library(devtools)
install_github("ebecht/MCPcounter",ref="master", subdir="Source")
library(MCPcounter)
estimate <- MCPcounter.estimate(rnaExpr, featuresType= "ENSEMBL_ID")
write.csv(estimate,"C:\\Users\\admin\\Desktop\\estimate.csv")
|
/MCPcounter.R
|
no_license
|
addisonli1988/2021.5.31
|
R
| false
| false
| 498
|
r
|
options(repos<- c(CRAN="https://mirrors.tuna.tsinghua.edu.cn/CRAN/") )
options("BioC_mirror"<- "https://mirrors.ustc.edu.cn/bioc/")
install.packages(c("devtools","curl")) ##Installs devtools and the MCPcounter dependancy 'curl'
library(devtools)
install_github("ebecht/MCPcounter",ref="master", subdir="Source")
library(MCPcounter)
estimate <- MCPcounter.estimate(rnaExpr, featuresType= "ENSEMBL_ID")
write.csv(estimate,"C:\\Users\\admin\\Desktop\\estimate.csv")
|
library("dplyr")
library("ggplot2")
#colsToKeep = c("ST", "JWMNP", "JWTR", "WKHP", "WKW", "JWAP", "JWDP", "PWGTP")
#pusa <- fread("D:/CU/4249_Data/Project_1/ss13pusa.csv", select = colsToKeep)
#pusb <- fread("D:/CU/4249_Data/Project_1/ss13pusb.csv", select = colsToKeep)
#WorkData <- rbind(pusa, pusb)
#rm(pusa, pusb)
#save(WorkData, file = "WorkData.RData")
load("WorkData.RData")
ST.name =read.csv("statename.csv",header = FALSE)
ST.name[,4] <- ifelse(ST.name[,4] == 1, "Northeast", ST.name[,4])
ST.name[,4] <- ifelse(ST.name[,4] == 2, "Middle", ST.name[,4])
ST.name[,4] <- ifelse(ST.name[,4] == 3, "South", ST.name[,4])
ST.name[,4] <- ifelse(ST.name[,4] == 4, "West", ST.name[,4])
Work <- mutate(WorkData, Region = ST.name[ST,4]) %>%
na.omit() %>%
group_by(Region)
#Means of transportation
Means0 <- c(0, "Car", "Bus", "Streetcar", "Subway", "Railroad", "Ferryboat",
"Taxicab", "Motorcycle", "Bicycle", "Walked", "Work at home", "other")
ggplot(Work, aes(JWTR, group = Region)) +
geom_bar(aes(colour = Region, fill = Region), alpha = 0.7) +
xlab("Means") + ylab("Count") + ggtitle("Means of transportation") +
scale_x_continuous(breaks = seq(0, 12, 1), labels = Means0)
Means1 <- Means0[c(1:12)]
Transport1 <- select(Work, JWTR, Region) %>%
filter(JWTR != 1)
ggplot(Transport1, aes(JWTR, group = Region)) +
geom_bar(aes(colour = Region, fill = Region), alpha = 0.7) +
xlab("Means") + ylab("Count") + ggtitle("Means of transportation(Remove car)") +
scale_x_continuous(breaks = seq(0, 11, 1), labels = Means1)
Transport2 <- select(Work, ST, JWTR) %>%
filter(ST %in% c(6, 36))
ggplot(Transport2, aes(JWTR, group = ST)) +
geom_bar(aes(colour = Region, fill = Region), alpha = 0.7) +
xlab("Means") + ylab("Count") + ggtitle("Means of transportation(NY and CA)") +
scale_x_continuous(breaks = seq(0, 12, 1), labels = Means0)
|
/lib/work.R
|
no_license
|
TZstatsADS/Spr2016-Proj1-Grp8-InteractiveGraphs
|
R
| false
| false
| 1,921
|
r
|
library("dplyr")
library("ggplot2")
#colsToKeep = c("ST", "JWMNP", "JWTR", "WKHP", "WKW", "JWAP", "JWDP", "PWGTP")
#pusa <- fread("D:/CU/4249_Data/Project_1/ss13pusa.csv", select = colsToKeep)
#pusb <- fread("D:/CU/4249_Data/Project_1/ss13pusb.csv", select = colsToKeep)
#WorkData <- rbind(pusa, pusb)
#rm(pusa, pusb)
#save(WorkData, file = "WorkData.RData")
load("WorkData.RData")
ST.name =read.csv("statename.csv",header = FALSE)
ST.name[,4] <- ifelse(ST.name[,4] == 1, "Northeast", ST.name[,4])
ST.name[,4] <- ifelse(ST.name[,4] == 2, "Middle", ST.name[,4])
ST.name[,4] <- ifelse(ST.name[,4] == 3, "South", ST.name[,4])
ST.name[,4] <- ifelse(ST.name[,4] == 4, "West", ST.name[,4])
Work <- mutate(WorkData, Region = ST.name[ST,4]) %>%
na.omit() %>%
group_by(Region)
#Means of transportation
Means0 <- c(0, "Car", "Bus", "Streetcar", "Subway", "Railroad", "Ferryboat",
"Taxicab", "Motorcycle", "Bicycle", "Walked", "Work at home", "other")
ggplot(Work, aes(JWTR, group = Region)) +
geom_bar(aes(colour = Region, fill = Region), alpha = 0.7) +
xlab("Means") + ylab("Count") + ggtitle("Means of transportation") +
scale_x_continuous(breaks = seq(0, 12, 1), labels = Means0)
Means1 <- Means0[c(1:12)]
Transport1 <- select(Work, JWTR, Region) %>%
filter(JWTR != 1)
ggplot(Transport1, aes(JWTR, group = Region)) +
geom_bar(aes(colour = Region, fill = Region), alpha = 0.7) +
xlab("Means") + ylab("Count") + ggtitle("Means of transportation(Remove car)") +
scale_x_continuous(breaks = seq(0, 11, 1), labels = Means1)
Transport2 <- select(Work, ST, JWTR) %>%
filter(ST %in% c(6, 36))
ggplot(Transport2, aes(JWTR, group = ST)) +
geom_bar(aes(colour = Region, fill = Region), alpha = 0.7) +
xlab("Means") + ylab("Count") + ggtitle("Means of transportation(NY and CA)") +
scale_x_continuous(breaks = seq(0, 12, 1), labels = Means0)
|
#' Annotator.annotate
#'
#' Annotate a data table/frame with additional fields.
#'
#' @param records The data table or data frame to annotate.
#' @param fields The fields to add.
#' @param include_errors Set to TRUE to include errors in the output (default: FALSE).
#' @param raw Set to TRUE to return the raw response (default: FALSE).
#'
#' @examples \dontrun{
#' Annotator.annotate(records=tbl, fields=fields)
#' }
#'
#' @references
#' \url{https://docs.solvebio.com/}
#'
#' @export
Annotator.annotate <- function(records, fields, include_errors=FALSE, raw=FALSE) {
if (missing(records) || missing(fields)) {
stop("A data table/frame and fields are both required.")
}
params <- list(
records=records,
fields=fields,
include_errors=include_errors
)
response <- .request('POST', path='v1/annotate', query=NULL, body=params)
if (raw) {
return(response)
} else {
return(response$results)
}
}
#' Expression.evaluate
#'
#' Evaluate a SolveBio expression.
#'
#' @param expression The SolveBio expression string.
#' @param data_type The data type to cast the expression result (default: string).
#' @param is_list Set to TRUE if the result is expected to be a list (default: FALSE).
#' @param data Variables used in the expression (default: NULL).
#' @param raw Set to TRUE to return the raw response (default: FALSE).
#'
#' @examples \dontrun{
#' Expression.evaluate("1 + 1", data_type="integer", is_list=FALSE)
#' }
#'
#' @references
#' \url{https://docs.solvebio.com/}
#'
#' @export
Expression.evaluate <- function(expression, data_type="string", is_list=FALSE, data=NULL, raw=FALSE) {
if (missing(expression)) {
stop("A SolveBio expression is required.")
}
params <- list(
expression=expression,
data_type=data_type,
is_list=is_list,
data=data
)
response <- .request('POST', path='v1/evaluate', query=NULL, body=params)
if (raw) {
return(response)
} else {
return(response$result)
}
}
|
/R/annotation.R
|
no_license
|
stevekm/solvebio-r
|
R
| false
| false
| 2,166
|
r
|
#' Annotator.annotate
#'
#' Annotate a data table/frame with additional fields.
#'
#' @param records The data table or data frame to annotate.
#' @param fields The fields to add.
#' @param include_errors Set to TRUE to include errors in the output (default: FALSE).
#' @param raw Set to TRUE to return the raw response (default: FALSE).
#'
#' @examples \dontrun{
#' Annotator.annotate(records=tbl, fields=fields)
#' }
#'
#' @references
#' \url{https://docs.solvebio.com/}
#'
#' @export
Annotator.annotate <- function(records, fields, include_errors=FALSE, raw=FALSE) {
if (missing(records) || missing(fields)) {
stop("A data table/frame and fields are both required.")
}
params <- list(
records=records,
fields=fields,
include_errors=include_errors
)
response <- .request('POST', path='v1/annotate', query=NULL, body=params)
if (raw) {
return(response)
} else {
return(response$results)
}
}
#' Expression.evaluate
#'
#' Evaluate a SolveBio expression.
#'
#' @param expression The SolveBio expression string.
#' @param data_type The data type to cast the expression result (default: string).
#' @param is_list Set to TRUE if the result is expected to be a list (default: FALSE).
#' @param data Variables used in the expression (default: NULL).
#' @param raw Set to TRUE to return the raw response (default: FALSE).
#'
#' @examples \dontrun{
#' Expression.evaluate("1 + 1", data_type="integer", is_list=FALSE)
#' }
#'
#' @references
#' \url{https://docs.solvebio.com/}
#'
#' @export
Expression.evaluate <- function(expression, data_type="string", is_list=FALSE, data=NULL, raw=FALSE) {
if (missing(expression)) {
stop("A SolveBio expression is required.")
}
params <- list(
expression=expression,
data_type=data_type,
is_list=is_list,
data=data
)
response <- .request('POST', path='v1/evaluate', query=NULL, body=params)
if (raw) {
return(response)
} else {
return(response$result)
}
}
|
testlist <- list(x = structure(c(2.61830011167902e+122, 2.61823523897988e+122, 1.39804328609529e-76, 1.39804328609529e-76, 1.39804328609529e-76, 1.39804328609529e-76, 1.39804328609529e-76, 1.4119288904388e-76, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 3L)))
result <- do.call(borrowr:::matchesToCor,testlist)
str(result)
|
/borrowr/inst/testfiles/matchesToCor/libFuzzer_matchesToCor/matchesToCor_valgrind_files/1609957930-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 316
|
r
|
testlist <- list(x = structure(c(2.61830011167902e+122, 2.61823523897988e+122, 1.39804328609529e-76, 1.39804328609529e-76, 1.39804328609529e-76, 1.39804328609529e-76, 1.39804328609529e-76, 1.4119288904388e-76, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 3L)))
result <- do.call(borrowr:::matchesToCor,testlist)
str(result)
|
# exploratory data analytics
library(tidyverse)
library(echarts4r)
# load data
datacuaca_aus = readr::read_csv(file = "data/mentah/weather AUS.csv")
# Rangkuman umum -------------------------
## Mengetahui isi data
glimpse(datacuaca_aus)
summary(datacuaca_aus)
## Memvisualisasikan Distribusi -------------------
## variabel kontinyu ------------------------------
datacuaca_aus %>%
ggplot(aes(x = Location)) +
geom_bar() +
coord_flip()
datacuaca_aus %>%
ggplot(aes(x = MinTemp)) +
geom_histogram(binwidth = 5)
datacuaca_aus %>%
count(cut_width(MinTemp, 5))
|
/script1/latihan hari 5a.R
|
no_license
|
eppofahmi/hujanetc
|
R
| false
| false
| 581
|
r
|
# exploratory data analytics
library(tidyverse)
library(echarts4r)
# load data
datacuaca_aus = readr::read_csv(file = "data/mentah/weather AUS.csv")
# Rangkuman umum -------------------------
## Mengetahui isi data
glimpse(datacuaca_aus)
summary(datacuaca_aus)
## Memvisualisasikan Distribusi -------------------
## variabel kontinyu ------------------------------
datacuaca_aus %>%
ggplot(aes(x = Location)) +
geom_bar() +
coord_flip()
datacuaca_aus %>%
ggplot(aes(x = MinTemp)) +
geom_histogram(binwidth = 5)
datacuaca_aus %>%
count(cut_width(MinTemp, 5))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codingSchemes_get_all.R
\name{codingSchemes_get_all}
\alias{codingSchemes_get_all}
\title{Convenience function to get a list of all available coding schemes}
\usage{
codingSchemes_get_all()
}
\value{
A list of all available coding schemes
}
\description{
Convenience function to get a list of all available coding schemes
}
\examples{
rock::codingSchemes_get_all();
}
|
/man/codingSchemes_get_all.Rd
|
no_license
|
cran/rock
|
R
| false
| true
| 463
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codingSchemes_get_all.R
\name{codingSchemes_get_all}
\alias{codingSchemes_get_all}
\title{Convenience function to get a list of all available coding schemes}
\usage{
codingSchemes_get_all()
}
\value{
A list of all available coding schemes
}
\description{
Convenience function to get a list of all available coding schemes
}
\examples{
rock::codingSchemes_get_all();
}
|
args <- commandArgs(T)
profile.all <- read.table(args[1], head = T, check.names = F)
group <- read.table(args[2], head = F, check.names = F)
pvalue <- read.table(args[3], head = T, check.names = F)
enrich <- levels(pvalue[, ncol(pvalue)])
profile.a <- profile.all[as.vector(pvalue[which(pvalue[, ncol(pvalue)] == enrich[1]), 1]), ]
profile.b <- profile.all[as.vector(pvalue[which(pvalue[, ncol(pvalue)] == enrich[2]), 1]), ]
group.a <- as.vector(group[which(group[,2] == enrich[1]),1])
group.b <- as.vector(group[which(group[,2] == enrich[2]),1])
profile.a.sortByMedian <- profile.a[order(apply(profile.a[group.a], 1, median), decreasing = T), ]
profile.b.sortByMedian <- profile.b[order(apply(profile.b[group.b], 1, median), decreasing = T), ]
profile.a.sortByMedian.top20 <- profile.a.sortByMedian[1:min(20, nrow(profile.a.sortByMedian)), ]
profile.b.sortByMedian.top20 <- profile.b.sortByMedian[1:min(20, nrow(profile.b.sortByMedian)), ]
profile.a.forPlot <- profile.a.sortByMedian.top20
profile.b.forPlot <- profile.b.sortByMedian.top20
#profile.a.forPlot.log <- log10(profile.a.forPlot)
#profile.b.forPlot.log <- log10(profile.b.forPlot)
#profile.a.forPlot.log[profile.a.forPlot.log==-Inf] <- -9
#profile.b.forPlot.log[profile.b.forPlot.log==-Inf] <- -9
boxplot.forGroup <- function(Mat, Grp = as.factor(rep("A", nrow(Mat))), col, at = 1:nrow(Mat), width = 0.7, boxwex = 0.6 / length(levels(Grp)) , mean = TRUE, mean.pch = 3, mean.col = "red", mean.cex = 1, ylab = "Abundance", srt = 50, ...) {
nBox <- length(levels(Grp))
if (is.vector(col)) col = matrix(rep(col, nBox), ncol = nBox)
atRel <- seq(from = (boxwex - width) / 2, to = (width - boxwex) / 2, length.out = nBox)
xlim <- range(at) + c(-0.5 * width, 0.5 * width)
ylim <- range(Mat)
for (i in 1:nBox){
grp <- levels(Grp)[i]
Mat.forPlot <- Mat[, which(Grp == grp)]
if(i == 1) {
boxplot(t(Mat.forPlot), col = col[i, ], at = at + atRel[i], boxwex = boxwex, xaxt = "n", add = F, ylab = ylab, xlim = xlim, ylim = ylim, cex.lab = 1.8, ...)
}else {
boxplot(t(Mat.forPlot), col = col[i, ], at = at + atRel[i], boxwex = boxwex, xaxt = "n", add = T, ...)
}
Mat.forPlot.mean = apply(Mat.forPlot, 1, mean)
if(mean) points(y = Mat.forPlot.mean, x = at + atRel[i], col = mean.col, pch = mean.pch, cex = mean.cex)
}
axis(1, labels = F, at = at)
text(labels = row.names(Mat), x = at, y = rep((min(Mat) - max(Mat)) / 10, length(at)), srt = srt, xpd = T, adj = 1, cex = 0.8)
#text(labels = row.names(Mat), x = at, y = rep(min(Mat)-0.15, length(at)), srt = srt, xpd = T, adj = 1, cex = 1)
legend("topright", legend = levels(Grp), col = col[, 1], pch = 15, bty = "n", cex = 1.8)
}
pdf(args[4], width = 12, height = 14)
layout(c(1, 2))
par(mar = c(16, 8, 1, 1), xpd = T)
boxplot.forGroup(profile.a.forPlot, group[, 2], col = c("royalblue", "orange"), pch = 20, range = 0)
par(mar = c(16, 8, 1, 1), xpd = T)
if(is.na(min(profile.b.forPlot))){
dev.off()
stop("profile.b.forPlot is null")
}
boxplot.forGroup(profile.b.forPlot, group[, 2], col = c("royalblue", "orange"), pch = 20, range = 0)
dev.off()
|
/bin/12.cazy/diff_plot.R
|
no_license
|
ms201420201029/real_metagenome_pipeline
|
R
| false
| false
| 3,120
|
r
|
args <- commandArgs(T)
profile.all <- read.table(args[1], head = T, check.names = F)
group <- read.table(args[2], head = F, check.names = F)
pvalue <- read.table(args[3], head = T, check.names = F)
enrich <- levels(pvalue[, ncol(pvalue)])
profile.a <- profile.all[as.vector(pvalue[which(pvalue[, ncol(pvalue)] == enrich[1]), 1]), ]
profile.b <- profile.all[as.vector(pvalue[which(pvalue[, ncol(pvalue)] == enrich[2]), 1]), ]
group.a <- as.vector(group[which(group[,2] == enrich[1]),1])
group.b <- as.vector(group[which(group[,2] == enrich[2]),1])
profile.a.sortByMedian <- profile.a[order(apply(profile.a[group.a], 1, median), decreasing = T), ]
profile.b.sortByMedian <- profile.b[order(apply(profile.b[group.b], 1, median), decreasing = T), ]
profile.a.sortByMedian.top20 <- profile.a.sortByMedian[1:min(20, nrow(profile.a.sortByMedian)), ]
profile.b.sortByMedian.top20 <- profile.b.sortByMedian[1:min(20, nrow(profile.b.sortByMedian)), ]
profile.a.forPlot <- profile.a.sortByMedian.top20
profile.b.forPlot <- profile.b.sortByMedian.top20
#profile.a.forPlot.log <- log10(profile.a.forPlot)
#profile.b.forPlot.log <- log10(profile.b.forPlot)
#profile.a.forPlot.log[profile.a.forPlot.log==-Inf] <- -9
#profile.b.forPlot.log[profile.b.forPlot.log==-Inf] <- -9
boxplot.forGroup <- function(Mat, Grp = as.factor(rep("A", nrow(Mat))), col, at = 1:nrow(Mat), width = 0.7, boxwex = 0.6 / length(levels(Grp)) , mean = TRUE, mean.pch = 3, mean.col = "red", mean.cex = 1, ylab = "Abundance", srt = 50, ...) {
nBox <- length(levels(Grp))
if (is.vector(col)) col = matrix(rep(col, nBox), ncol = nBox)
atRel <- seq(from = (boxwex - width) / 2, to = (width - boxwex) / 2, length.out = nBox)
xlim <- range(at) + c(-0.5 * width, 0.5 * width)
ylim <- range(Mat)
for (i in 1:nBox){
grp <- levels(Grp)[i]
Mat.forPlot <- Mat[, which(Grp == grp)]
if(i == 1) {
boxplot(t(Mat.forPlot), col = col[i, ], at = at + atRel[i], boxwex = boxwex, xaxt = "n", add = F, ylab = ylab, xlim = xlim, ylim = ylim, cex.lab = 1.8, ...)
}else {
boxplot(t(Mat.forPlot), col = col[i, ], at = at + atRel[i], boxwex = boxwex, xaxt = "n", add = T, ...)
}
Mat.forPlot.mean = apply(Mat.forPlot, 1, mean)
if(mean) points(y = Mat.forPlot.mean, x = at + atRel[i], col = mean.col, pch = mean.pch, cex = mean.cex)
}
axis(1, labels = F, at = at)
text(labels = row.names(Mat), x = at, y = rep((min(Mat) - max(Mat)) / 10, length(at)), srt = srt, xpd = T, adj = 1, cex = 0.8)
#text(labels = row.names(Mat), x = at, y = rep(min(Mat)-0.15, length(at)), srt = srt, xpd = T, adj = 1, cex = 1)
legend("topright", legend = levels(Grp), col = col[, 1], pch = 15, bty = "n", cex = 1.8)
}
pdf(args[4], width = 12, height = 14)
layout(c(1, 2))
par(mar = c(16, 8, 1, 1), xpd = T)
boxplot.forGroup(profile.a.forPlot, group[, 2], col = c("royalblue", "orange"), pch = 20, range = 0)
par(mar = c(16, 8, 1, 1), xpd = T)
if(is.na(min(profile.b.forPlot))){
dev.off()
stop("profile.b.forPlot is null")
}
boxplot.forGroup(profile.b.forPlot, group[, 2], col = c("royalblue", "orange"), pch = 20, range = 0)
dev.off()
|
testlist <- list(cost = structure(c(1.44888560957826e+135, 1.6249392498385e+65, 5.27956628994611e-134, 1.56839475268612e-251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 5L)), flow = structure(c(3.80768289350145e+125, 8.58414828913381e+155, 3.37787969964034e+43, 2.83184518248624e-19, 7.49487861616974e+223, 8.52929466674086e+86, 2.51852491380534e-303, 3.12954510408264e-253, 2.45741775099414e-215, 6.59159492364721e+70, 2.33952815237705e+77, 3.1674929214459e+282, 1.0709591854537e+63, 7.43876613929257e+191, 8.31920980250172e+78, 1.26747339146319e+161, 5.68076251060303e-141, 9.98610641272026e+182, 232665383858.491, 3.75587249552337e-34, 8.67688084914444e+71, 2.85936996201565e+135, 5.49642980516022e+268, 854537881567133, 1.33507119962914e+95, 2.76994725819545e+63, 4.08029273738449e+275, 4.93486427894025e+289, 1.24604061502336e+294, 3.2125809174767e-185, 9.58716852715016e+39, 6.94657888227078e+275, 3.46330348083089e+199, 3.28318446108869e-286, 6.12239214969922e-296 ), .Dim = c(5L, 7L)))
result <- do.call(epiphy:::costTotCPP,testlist)
str(result)
|
/epiphy/inst/testfiles/costTotCPP/AFL_costTotCPP/costTotCPP_valgrind_files/1615926912-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 1,101
|
r
|
testlist <- list(cost = structure(c(1.44888560957826e+135, 1.6249392498385e+65, 5.27956628994611e-134, 1.56839475268612e-251, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 5L)), flow = structure(c(3.80768289350145e+125, 8.58414828913381e+155, 3.37787969964034e+43, 2.83184518248624e-19, 7.49487861616974e+223, 8.52929466674086e+86, 2.51852491380534e-303, 3.12954510408264e-253, 2.45741775099414e-215, 6.59159492364721e+70, 2.33952815237705e+77, 3.1674929214459e+282, 1.0709591854537e+63, 7.43876613929257e+191, 8.31920980250172e+78, 1.26747339146319e+161, 5.68076251060303e-141, 9.98610641272026e+182, 232665383858.491, 3.75587249552337e-34, 8.67688084914444e+71, 2.85936996201565e+135, 5.49642980516022e+268, 854537881567133, 1.33507119962914e+95, 2.76994725819545e+63, 4.08029273738449e+275, 4.93486427894025e+289, 1.24604061502336e+294, 3.2125809174767e-185, 9.58716852715016e+39, 6.94657888227078e+275, 3.46330348083089e+199, 3.28318446108869e-286, 6.12239214969922e-296 ), .Dim = c(5L, 7L)))
result <- do.call(epiphy:::costTotCPP,testlist)
str(result)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/MAGNAMWAR.R
\name{download_packages}
\alias{download_packages}
\title{Download Requried Packages}
\usage{
download_packages()
}
\description{
Automatically downloads all the required packages for full analysis
}
|
/man/download_packages.Rd
|
no_license
|
chaston-lab/MAGNAMWAR
|
R
| false
| false
| 299
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/MAGNAMWAR.R
\name{download_packages}
\alias{download_packages}
\title{Download Requried Packages}
\usage{
download_packages()
}
\description{
Automatically downloads all the required packages for full analysis
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getA.R
\name{getA}
\alias{getA}
\title{\code{applyFilters} - Extracts the "a" parameter from occupancy model outputs.}
\usage{
getA(
indata = "../data/model_runs/",
keep,
REGION_IN_Q = "a",
group_name = "",
combined_output = TRUE,
max_year_model = NULL,
min_year_model = NULL,
write = FALSE,
minObs = NULL,
t0,
tn,
parallel = TRUE,
n.cores = NULL
)
}
\description{
"a" is the occupancy on the logit scale
Currently this only works for Regions. For the whole domain some recoding and calculation would be required.
This code has been copied from tempSampPost(). There is potential redundancy that could be streamlined at a later date
The data extracted this way are what we need for the bma method
}
|
/man/getA.Rd
|
no_license
|
EllieDyer/wrappeR
|
R
| false
| true
| 803
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getA.R
\name{getA}
\alias{getA}
\title{\code{applyFilters} - Extracts the "a" parameter from occupancy model outputs.}
\usage{
getA(
indata = "../data/model_runs/",
keep,
REGION_IN_Q = "a",
group_name = "",
combined_output = TRUE,
max_year_model = NULL,
min_year_model = NULL,
write = FALSE,
minObs = NULL,
t0,
tn,
parallel = TRUE,
n.cores = NULL
)
}
\description{
"a" is the occupancy on the logit scale
Currently this only works for Regions. For the whole domain some recoding and calculation would be required.
This code has been copied from tempSampPost(). There is potential redundancy that could be streamlined at a later date
The data extracted this way are what we need for the bma method
}
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2017, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Samples nr posterior estimates of the proportion of variance in Y
# explained by the Bayesian variable selection model fitted using a
# variational approximation. This function is only valid for the
# linear regression model (family = "gaussian") with an intercept.
tgen_probitpve <- function (X, fit, nr = 1000) {
# Get the number of variables (p) and the number of hyperparameter
# settings (ns).
p <- ncol(X)
ns <- length(fit$logw)
# Check input X.
if (!(is.matrix(X) & is.numeric(X) & sum(is.na(X)) == 0))
stop("Input X must be a numeric matrix with no missing values.")
if (nrow(fit$alpha) != p)
stop("Inputs X and fit are not compatible.")
# Check input "fit".
if (!is(fit,"varbvs"))
stop("Input argument \"fit\" must be an instance of class \"varbvs\".")
if (fit$family != "gaussian")
stop("varbvspve is only implemented for family = \"gaussian\".")
# Initialize storage for posterior estimates of the proportion of
# variance explained.
pve <- rep(0,nr)
# For each sample, compute the proportion of variance explained.
for (i in 1:nr) {
# Draw a hyperparameter setting from the posterior distribution.
j <- sample(ns,1,prob = fit$w)
# Sample the region coefficients.
b <- with(fit,mu[,j] + sqrt(s[,j]) * rnorm(p))
b <- b * (runif(p) < fit$alpha[,j])
# Compute the proportion of variance explained.
sz <- c(var1(X %*% b))
pve[i] <- sz/(sz + fit$sigma[j])
cat("in pve",i,"\t",pve[i],"\n")
}
return(pve)
}
|
/code/tgen_probitpve.R
|
no_license
|
vivid-/T-GEN
|
R
| false
| false
| 2,118
|
r
|
# Part of the varbvs package, https://github.com/pcarbo/varbvs
#
# Copyright (C) 2012-2017, Peter Carbonetto
#
# This program is free software: you can redistribute it under the
# terms of the GNU General Public License; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANY; without even the implied warranty of
# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# Samples nr posterior estimates of the proportion of variance in Y
# explained by the Bayesian variable selection model fitted using a
# variational approximation. This function is only valid for the
# linear regression model (family = "gaussian") with an intercept.
tgen_probitpve <- function (X, fit, nr = 1000) {
# Get the number of variables (p) and the number of hyperparameter
# settings (ns).
p <- ncol(X)
ns <- length(fit$logw)
# Check input X.
if (!(is.matrix(X) & is.numeric(X) & sum(is.na(X)) == 0))
stop("Input X must be a numeric matrix with no missing values.")
if (nrow(fit$alpha) != p)
stop("Inputs X and fit are not compatible.")
# Check input "fit".
if (!is(fit,"varbvs"))
stop("Input argument \"fit\" must be an instance of class \"varbvs\".")
if (fit$family != "gaussian")
stop("varbvspve is only implemented for family = \"gaussian\".")
# Initialize storage for posterior estimates of the proportion of
# variance explained.
pve <- rep(0,nr)
# For each sample, compute the proportion of variance explained.
for (i in 1:nr) {
# Draw a hyperparameter setting from the posterior distribution.
j <- sample(ns,1,prob = fit$w)
# Sample the region coefficients.
b <- with(fit,mu[,j] + sqrt(s[,j]) * rnorm(p))
b <- b * (runif(p) < fit$alpha[,j])
# Compute the proportion of variance explained.
sz <- c(var1(X %*% b))
pve[i] <- sz/(sz + fit$sigma[j])
cat("in pve",i,"\t",pve[i],"\n")
}
return(pve)
}
|
# Unit 4 - "Judge, Jury, and Classifier" Lecture
# VIDEO 4
# Read in the data
stevens = read.csv("stevens.csv")
str(stevens)
# Split the data
library(caTools)
set.seed(3000)
spl = sample.split(stevens$Reverse, SplitRatio = 0.7)
Train = subset(stevens, spl==TRUE)
Test = subset(stevens, spl==FALSE)
# Install rpart library
install.packages("rpart")
library(rpart)
install.packages("rpart.plot")
library(rpart.plot)
# CART model
StevensTree = rpart(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, method="class", minbucket=25)
StevensTree = rpart(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, method="class", minbucket=5)
StevensTree = rpart(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, method="class", minbucket=100)
prp(StevensTree)
# Make predictions
PredictCART = predict(StevensTree, newdata = Test, type = "class")
table(Test$Reverse, PredictCART)
(41+71)/(41+36+22+71)
# ROC curve
library(ROCR)
PredictROC = predict(StevensTree, newdata = Test)
PredictROC
pred = prediction(PredictROC[,2], Test$Reverse)
perf = performance(pred, "tpr", "fpr")
plot(perf)
as.numeric(performance(pred, "auc")@y.values)
# VIDEO 5 - Random Forests
# Install randomForest package
install.packages("randomForest")
library(randomForest)
# Build random forest model
StevensForest = randomForest(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, ntree=200, nodesize=25 )
# Convert outcome to factor
Train$Reverse = as.factor(Train$Reverse)
Test$Reverse = as.factor(Test$Reverse)
# Try again
StevensForest = randomForest(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, ntree=200, nodesize=25 )
# Make predictions
PredictForest = predict(StevensForest, newdata = Test)
table(Test$Reverse, PredictForest)
(40+74)/(40+37+19+74)
# VIDEO 6
# Install cross-validation packages
install.packages("caret")
library(caret)
install.packages("e1071")
library(e1071)
# Define cross-validation experiment
numFolds = trainControl( method = "cv", number = 10 )
cpGrid = expand.grid( .cp = seq(0.01,0.5,0.01))
# Perform the cross validation
train(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, method = "rpart", trControl = numFolds, tuneGrid = cpGrid )
# Create a new CART model
StevensTreeCV = rpart(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, method="class", cp = 0.18)
prp(StevensTreeCV)
# Make predictions
PredictCV = predict(StevensTreeCV, newdata = Test, type = "class")
table(Test$Reverse, PredictCV)
(59+64)/(59+18+29+64)
|
/Unit4_SupremeCourt.R
|
no_license
|
marlonglopes/RTests
|
R
| false
| false
| 2,722
|
r
|
# Unit 4 - "Judge, Jury, and Classifier" Lecture
# VIDEO 4
# Read in the data
stevens = read.csv("stevens.csv")
str(stevens)
# Split the data
library(caTools)
set.seed(3000)
spl = sample.split(stevens$Reverse, SplitRatio = 0.7)
Train = subset(stevens, spl==TRUE)
Test = subset(stevens, spl==FALSE)
# Install rpart library
install.packages("rpart")
library(rpart)
install.packages("rpart.plot")
library(rpart.plot)
# CART model
StevensTree = rpart(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, method="class", minbucket=25)
StevensTree = rpart(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, method="class", minbucket=5)
StevensTree = rpart(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, method="class", minbucket=100)
prp(StevensTree)
# Make predictions
PredictCART = predict(StevensTree, newdata = Test, type = "class")
table(Test$Reverse, PredictCART)
(41+71)/(41+36+22+71)
# ROC curve
library(ROCR)
PredictROC = predict(StevensTree, newdata = Test)
PredictROC
pred = prediction(PredictROC[,2], Test$Reverse)
perf = performance(pred, "tpr", "fpr")
plot(perf)
as.numeric(performance(pred, "auc")@y.values)
# VIDEO 5 - Random Forests
# Install randomForest package
install.packages("randomForest")
library(randomForest)
# Build random forest model
StevensForest = randomForest(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, ntree=200, nodesize=25 )
# Convert outcome to factor
Train$Reverse = as.factor(Train$Reverse)
Test$Reverse = as.factor(Test$Reverse)
# Try again
StevensForest = randomForest(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, ntree=200, nodesize=25 )
# Make predictions
PredictForest = predict(StevensForest, newdata = Test)
table(Test$Reverse, PredictForest)
(40+74)/(40+37+19+74)
# VIDEO 6
# Install cross-validation packages
install.packages("caret")
library(caret)
install.packages("e1071")
library(e1071)
# Define cross-validation experiment
numFolds = trainControl( method = "cv", number = 10 )
cpGrid = expand.grid( .cp = seq(0.01,0.5,0.01))
# Perform the cross validation
train(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, method = "rpart", trControl = numFolds, tuneGrid = cpGrid )
# Create a new CART model
StevensTreeCV = rpart(Reverse ~ Circuit + Issue + Petitioner + Respondent + LowerCourt + Unconst, data = Train, method="class", cp = 0.18)
prp(StevensTreeCV)
# Make predictions
PredictCV = predict(StevensTreeCV, newdata = Test, type = "class")
table(Test$Reverse, PredictCV)
(59+64)/(59+18+29+64)
|
library(shiny)
clicksUI <- function(id) {
ns <- shiny::NS(id)
div(id = "module_content",
style = "background-color: #c9d8f0; width: 200px; padding: 5px",
actionButton(ns('local_counter'), "I'm inside the module"),
textOutput(ns("local_clicks"))
)
}
clicksModule <- function(input, output, session, local_clicks) {
session$userData$clicks_observer <- observeEvent(input$local_counter, {
print(paste("Clicked", input$local_counter))
local_clicks(input$local_counter)
}, ignoreNULL = FALSE, ignoreInit = TRUE)
output$local_clicks <- renderText({
ns <- session$ns
paste("Clicks (local view):", input$local_counter)
})
}
ui <- fluidPage(
shinyjs::useShinyjs(),
div(
style = "background-color: #ffebf3; width: 200px; padding: 5px",
actionButton('add_module', '', icon = icon('plus-circle')),
actionButton('remove_module', '', icon = icon('trash'), class = "disabled"),
textOutput("local_clicks_out")
),
tags$div(
id = "container"
)
)
server <- function(input, output, session) {
local_clicks <- reactiveVal(NULL)
output$local_clicks_out <- renderText({
clicks <- 0
module_clicks <- local_clicks()
if (!is.null(module_clicks)) {
clicks <- module_clicks
}
paste("Clicks (global view):", clicks)
})
observeEvent(input$add_module, {
insertUI(
selector = '#container',
where = "beforeEnd",
ui = clicksUI("my_module")
)
shinyjs::disable("add_module")
shinyjs::enable("remove_module")
callModule(clicksModule, "my_module", local_clicks)
})
observeEvent(input$remove_module, {
removeUI(selector = "#module_content")
shinyjs::disable("remove_module")
shinyjs::enable("add_module")
local_clicks(input[["my_module-local_counter"]])
})
}
shinyApp(ui = ui, server = server)
|
/before.R
|
no_license
|
Appsilon/dynamic-shiny-modules
|
R
| false
| false
| 1,848
|
r
|
library(shiny)
clicksUI <- function(id) {
ns <- shiny::NS(id)
div(id = "module_content",
style = "background-color: #c9d8f0; width: 200px; padding: 5px",
actionButton(ns('local_counter'), "I'm inside the module"),
textOutput(ns("local_clicks"))
)
}
clicksModule <- function(input, output, session, local_clicks) {
session$userData$clicks_observer <- observeEvent(input$local_counter, {
print(paste("Clicked", input$local_counter))
local_clicks(input$local_counter)
}, ignoreNULL = FALSE, ignoreInit = TRUE)
output$local_clicks <- renderText({
ns <- session$ns
paste("Clicks (local view):", input$local_counter)
})
}
ui <- fluidPage(
shinyjs::useShinyjs(),
div(
style = "background-color: #ffebf3; width: 200px; padding: 5px",
actionButton('add_module', '', icon = icon('plus-circle')),
actionButton('remove_module', '', icon = icon('trash'), class = "disabled"),
textOutput("local_clicks_out")
),
tags$div(
id = "container"
)
)
server <- function(input, output, session) {
local_clicks <- reactiveVal(NULL)
output$local_clicks_out <- renderText({
clicks <- 0
module_clicks <- local_clicks()
if (!is.null(module_clicks)) {
clicks <- module_clicks
}
paste("Clicks (global view):", clicks)
})
observeEvent(input$add_module, {
insertUI(
selector = '#container',
where = "beforeEnd",
ui = clicksUI("my_module")
)
shinyjs::disable("add_module")
shinyjs::enable("remove_module")
callModule(clicksModule, "my_module", local_clicks)
})
observeEvent(input$remove_module, {
removeUI(selector = "#module_content")
shinyjs::disable("remove_module")
shinyjs::enable("add_module")
local_clicks(input[["my_module-local_counter"]])
})
}
shinyApp(ui = ui, server = server)
|
\name{pitch_value_contour}
\alias{pitch_value_contour}
\title{
Pitch Value Contour Plot
}
\description{
Constructs pitch value contour plot
}
\usage{
pitch_value_contour(df,
L = seq(-0.2, 0.2, by = 0.01),
title = "Pitch Value",
NCOL = 2)
}
\arguments{
\item{df}{
data frame or list containing Statcast data with a PitchValue variable
}
\item{L}{
values of the contour lines
}
\item{title}{
title of the graph
}
\item{NCOL}{
number of columns in multipanel display
}
}
\value{
Constructs a contour plot of the estimated pitch value from the gam model fit
}
\author{
Jim Albert
}
|
/man/pitch_value_contour.Rd
|
no_license
|
bayesball/CalledStrike
|
R
| false
| false
| 689
|
rd
|
\name{pitch_value_contour}
\alias{pitch_value_contour}
\title{
Pitch Value Contour Plot
}
\description{
Constructs pitch value contour plot
}
\usage{
pitch_value_contour(df,
L = seq(-0.2, 0.2, by = 0.01),
title = "Pitch Value",
NCOL = 2)
}
\arguments{
\item{df}{
data frame or list containing Statcast data with a PitchValue variable
}
\item{L}{
values of the contour lines
}
\item{title}{
title of the graph
}
\item{NCOL}{
number of columns in multipanel display
}
}
\value{
Constructs a contour plot of the estimated pitch value from the gam model fit
}
\author{
Jim Albert
}
|
#' Add dev_history.Rmd file that drives package development
#'
#' @param pkg Path where to save file
#' @param overwrite Whether to overwrite existing dev_history.Rmd file
#' @param open Logical. Whether to open file after creation
#' @param dev_dir Name of directory for development Rmarkdown files. Default to "dev".
#' @param name Name of the template file. See details.
#'
#' @details
#' Choose `name` among the different templates available:
#'
#' - "full": the full template with a reproducible package to inflate directly. Default.
#' - "minimal": Minimal template to start a new package when you already know {fusen}.
#' - "additional": Template for an additional vignette, thus additional functions.
#' - "teaching": Template with a reproducible package, simpler than "full", but everything to
#' teach the minimal structure of a package.
#'
#' @return
#' Create a dev_history.Rmd file and return its path
#' @export
#'
#' @examples
#' # Create a new project
#' tmpdir <- tempdir()
#' dummypackage <- file.path(tmpdir, "dummypackage")
#' dir.create(dummypackage)
#'
#' # Add
#' add_dev_history(pkg = dummypackage)
#'
#' # Delete dummy package
#' unlink(dummypackage, recursive = TRUE)
add_dev_history <- function(pkg = ".", overwrite = FALSE,
open = TRUE, dev_dir = "dev",
name = c("full", "minimal", "additional", "teaching")) {
project_name <- basename(normalizePath(pkg))
if (project_name != asciify_name(project_name, to_pkg = TRUE)) {
stop("Please rename your project/directory with: ", asciify_name(project_name, to_pkg = TRUE),
" as a package name should only contain letters, numbers and dots.")
}
old <- setwd(pkg)
on.exit(setwd(old))
name <- match.arg(name)
# Which template
template <- system.file(paste0("dev-template-", name, ".Rmd"), package = "fusen")
pkg <- normalizePath(pkg)
if (!dir.exists(dev_dir)) {dir.create(dev_dir)}
dev_path <- file.path(pkg, dev_dir, "dev_history.Rmd")
if (file.exists(dev_path) & overwrite == FALSE) {
n <- length(list.files(dev_dir, pattern = "^dev_history.*[.]Rmd"))
dev_path <- file.path(pkg, dev_dir, paste0("dev_history_", n + 1, ".Rmd"))
message(
"dev_history.Rmd already exists. New dev file is renamed '",
basename(dev_path), "'. Use overwrite = TRUE, if you want to ",
"overwrite the existing dev_history.Rmd file, or rename it."
)
}
# Change lines asking for pkg name
lines_template <- readLines(template)
lines_template[grepl("<my_package_name>", lines_template)] <-
gsub("<my_package_name>", basename(pkg),
lines_template[grepl("<my_package_name>", lines_template)])
cat(enc2utf8(lines_template), file = dev_path, sep = "\n")
# .Rbuildignore
# usethis::use_build_ignore(dev_dir) # Cannot be used outside project
if (length(list.files(pkg, pattern = "[.]Rproj")) == 0) {
lines <- c(paste0("^", dev_dir, "$"), "^\\.here$")
} else {
lines <- c(paste0("^", dev_dir, "$"))
}
buildfile <- normalizePath(file.path(pkg, ".Rbuildignore"), mustWork = FALSE)
if (!file.exists(buildfile)) {
existing_lines <- ""
} else {
existing_lines <- readLines(buildfile, warn = FALSE, encoding = "UTF-8")
}
new <- setdiff(lines, existing_lines)
if (length(new) != 0) {
all <- c(existing_lines, new)
cat(enc2utf8(all), file = buildfile, sep = "\n")
}
# Add a gitignore file in dev_dir
# Files to ignore
lines <- c("*.html", "*.R")
gitfile <- normalizePath(file.path(dev_dir, ".gitignore"), mustWork = FALSE)
if (!file.exists(gitfile)) {
existing_lines <- ""
} else {
existing_lines <- readLines(gitfile, warn = FALSE, encoding = "UTF-8")
}
new <- setdiff(lines, existing_lines)
if (length(new) != 0) {
all <- c(existing_lines, new)
cat(enc2utf8(all), file = gitfile, sep = "\n")
}
if (length(list.files(pkg, pattern = "[.]Rproj")) == 0) {
here::set_here(pkg)
}
if (isTRUE(open) & interactive()) {usethis::edit_file(dev_path)}
dev_path
}
#' Clean names for vignettes and package
#' @param name Character to clean
#' @param to_pkg Transform all non authorized characters to dots for packages, instead of dash
#' @noRd
asciify_name <- function(name, to_pkg = FALSE) {
# name <- "y _ p n@ é ! 1"
cleaned_name <- gsub("^-|-$", "",
gsub("-+", "-",
gsub("-_|_-", "-",
gsub("[^([:alnum:]*_*-*)*]", "-", name))))
# grepl("^[[:alpha:]][[:alnum:]_-]*$", cleaned_name)
if (isTRUE(to_pkg)) {
cleaned_name <- gsub("[^a-zA-Z0-9]+", ".",
gsub("^[0-9]+", "", cleaned_name))
} else {
# asciify from {usethis} usethis:::asciify()
cleaned_name <- gsub("[^a-zA-Z0-9_-]+", "-", cleaned_name)
}
cleaned_name
}
|
/R/add_dev_history.R
|
permissive
|
ALanguillaume/fusen
|
R
| false
| false
| 4,828
|
r
|
#' Add dev_history.Rmd file that drives package development
#'
#' @param pkg Path where to save file
#' @param overwrite Whether to overwrite existing dev_history.Rmd file
#' @param open Logical. Whether to open file after creation
#' @param dev_dir Name of directory for development Rmarkdown files. Default to "dev".
#' @param name Name of the template file. See details.
#'
#' @details
#' Choose `name` among the different templates available:
#'
#' - "full": the full template with a reproducible package to inflate directly. Default.
#' - "minimal": Minimal template to start a new package when you already know {fusen}.
#' - "additional": Template for an additional vignette, thus additional functions.
#' - "teaching": Template with a reproducible package, simpler than "full", but everything to
#' teach the minimal structure of a package.
#'
#' @return
#' Create a dev_history.Rmd file and return its path
#' @export
#'
#' @examples
#' # Create a new project
#' tmpdir <- tempdir()
#' dummypackage <- file.path(tmpdir, "dummypackage")
#' dir.create(dummypackage)
#'
#' # Add
#' add_dev_history(pkg = dummypackage)
#'
#' # Delete dummy package
#' unlink(dummypackage, recursive = TRUE)
add_dev_history <- function(pkg = ".", overwrite = FALSE,
open = TRUE, dev_dir = "dev",
name = c("full", "minimal", "additional", "teaching")) {
project_name <- basename(normalizePath(pkg))
if (project_name != asciify_name(project_name, to_pkg = TRUE)) {
stop("Please rename your project/directory with: ", asciify_name(project_name, to_pkg = TRUE),
" as a package name should only contain letters, numbers and dots.")
}
old <- setwd(pkg)
on.exit(setwd(old))
name <- match.arg(name)
# Which template
template <- system.file(paste0("dev-template-", name, ".Rmd"), package = "fusen")
pkg <- normalizePath(pkg)
if (!dir.exists(dev_dir)) {dir.create(dev_dir)}
dev_path <- file.path(pkg, dev_dir, "dev_history.Rmd")
if (file.exists(dev_path) & overwrite == FALSE) {
n <- length(list.files(dev_dir, pattern = "^dev_history.*[.]Rmd"))
dev_path <- file.path(pkg, dev_dir, paste0("dev_history_", n + 1, ".Rmd"))
message(
"dev_history.Rmd already exists. New dev file is renamed '",
basename(dev_path), "'. Use overwrite = TRUE, if you want to ",
"overwrite the existing dev_history.Rmd file, or rename it."
)
}
# Change lines asking for pkg name
lines_template <- readLines(template)
lines_template[grepl("<my_package_name>", lines_template)] <-
gsub("<my_package_name>", basename(pkg),
lines_template[grepl("<my_package_name>", lines_template)])
cat(enc2utf8(lines_template), file = dev_path, sep = "\n")
# .Rbuildignore
# usethis::use_build_ignore(dev_dir) # Cannot be used outside project
if (length(list.files(pkg, pattern = "[.]Rproj")) == 0) {
lines <- c(paste0("^", dev_dir, "$"), "^\\.here$")
} else {
lines <- c(paste0("^", dev_dir, "$"))
}
buildfile <- normalizePath(file.path(pkg, ".Rbuildignore"), mustWork = FALSE)
if (!file.exists(buildfile)) {
existing_lines <- ""
} else {
existing_lines <- readLines(buildfile, warn = FALSE, encoding = "UTF-8")
}
new <- setdiff(lines, existing_lines)
if (length(new) != 0) {
all <- c(existing_lines, new)
cat(enc2utf8(all), file = buildfile, sep = "\n")
}
# Add a gitignore file in dev_dir
# Files to ignore
lines <- c("*.html", "*.R")
gitfile <- normalizePath(file.path(dev_dir, ".gitignore"), mustWork = FALSE)
if (!file.exists(gitfile)) {
existing_lines <- ""
} else {
existing_lines <- readLines(gitfile, warn = FALSE, encoding = "UTF-8")
}
new <- setdiff(lines, existing_lines)
if (length(new) != 0) {
all <- c(existing_lines, new)
cat(enc2utf8(all), file = gitfile, sep = "\n")
}
if (length(list.files(pkg, pattern = "[.]Rproj")) == 0) {
here::set_here(pkg)
}
if (isTRUE(open) & interactive()) {usethis::edit_file(dev_path)}
dev_path
}
#' Clean names for vignettes and package
#' @param name Character to clean
#' @param to_pkg Transform all non authorized characters to dots for packages, instead of dash
#' @noRd
asciify_name <- function(name, to_pkg = FALSE) {
# name <- "y _ p n@ é ! 1"
cleaned_name <- gsub("^-|-$", "",
gsub("-+", "-",
gsub("-_|_-", "-",
gsub("[^([:alnum:]*_*-*)*]", "-", name))))
# grepl("^[[:alpha:]][[:alnum:]_-]*$", cleaned_name)
if (isTRUE(to_pkg)) {
cleaned_name <- gsub("[^a-zA-Z0-9]+", ".",
gsub("^[0-9]+", "", cleaned_name))
} else {
# asciify from {usethis} usethis:::asciify()
cleaned_name <- gsub("[^a-zA-Z0-9_-]+", "-", cleaned_name)
}
cleaned_name
}
|
# Support Vector Classifier
set.seed(1)
x = matrix(rnorm(20*2), ncol = 2)
y = c(rep(-1,10), rep(1,10))
x[y==1, ]=x[y==1,]+1
plot(x, col = (3-y))
dat = data.frame(x = x, y = as.factor(y))
library(e1071)
svmfit = svm(y~., data=dat, kernel = "linear",
cost = 10, sclae = FALSE)
plot(svmfit, dat)
svmfit$index
summary(svmfit)
svmfit = svm(y~., data=dat, kernel = "linear", cost = 0.1, scale = FALSE)
plot(svmfit, dat)
svmfit$index
set.seed(1)
tune.out = tune(svm, y~., data=dat, kernel = "linear",
ranges = list(cost = c(0.001, 0.01, 0.1, 1, 5, 10, 100)))
summary(tune.out)
bestmod = tune.out$best.model
summary(bestmod)
xtest = matrix(rnorm(20*2), ncol = 2)
ytest = sample(c(-1,1), 20, rep = TRUE)
xtest[ytest==1, ] = xtest[ytest==1,]+1
testdat = data.frame(x = xtest, y =as.factor(ytest))
ypred = predict(bestmod, testdat)
table(predict = ypred, truth = testdat$y)
svmfit = svm(y~., data = dat, kernel = "linear",
cost = 0.01, scale = FALSE)
ypred = predict(svmfit, testdat)
table(predict = ypred, truth = testdat$y)
x[y==1,] = x[y==1,] + 0.5
plot(x, col = (y+5)/2, pch = 19)
dat = data.frame(x = x, y = as.factor(y))
svmfit = svm(y~., data=dat, kernel = "linear", cost = 1e5)
summary(svmfit)
plot(svmfit, dat)
svmfit = svm(y~., data= dat, kernel = "linear", cost = 1)
summary(svmfit)
plot(svmfit, dat)
# Support Vector Machine
set.seed(1)
x = matrix(rnorm(200*2), ncol = 2)
x[1:100,] = x[1:100,] + 2
x[101:150,] = x[101:150,] -2
y = c(rep(1,150), rep(2,50))
dat = data.frame(x = x, y = as.factor(y))
plot(x, col=y)
train = sample(200, 100)
svmfit = svm(y~., data = dat[train,], kernel = "radial", gamma = 1, cost =1)
plot(svmfit, dat[train,])
summary(svmfit)
svmfit = svm(y~., data = dat[train,], kernel = "radial", gamma = 1, cost = 1e5)
plot(svmfit, dat[train,])
set.seed(1)
tune.out = tune(svm, y~., data=dat[train,], kernel = "radial",
ranges = list(cost = c(0.1,1,10,100,1000),
gamma = c(0.5, 1,2,3,4)))
summary(tune.out)
table(true = dat[-train, "y"], pred = predict(tune.out$best.model, newdata = dat[-train,]))
# ROC Curve
library(ROCR)
rocplot = function(pred, truth, ...){
predob = prediction(pred, truth)
perf = performance(predob, "tpr", "fpr")
plot(perf,...)
}
svmfit.opt = svm(y~., data = dat[train,], kernel = "radial",
gamma = 2, cost = 1, decision.values = T)
fitted = attributes(predict(svmfit.opt, dat[train,], decision.values = TRUE))$decision.values
par(mfrow = c(1,2))
rocplot(fitted, dat[train, "y"], main = "Training Data")
svmfit.flex = svm(y~., data = dat[train,], kernel = "radial",
gamma = 50, cost = 1, decision.values = t)
fitted = attributes(predict(svmfit.flex, dat[train,], decision.values = T))$decision.values
rocplot(fitted, dat[train, "y"], add=T, col="red")
fitted = attributes(predict(svmfit.opt, dat[-train,], decision.values = T))$decision.values
rocplot(fitted, dat[-train, "y"], main = "Test Data")
fitted = attributes(predict(svmfit.flex, dat[train,], decision.values = T))$decision.values
rocplot(fitted, dat[train, "y"], add=T, col="red")
set.seed(1)
x = rbind(x, matrix(rnorm(50*2), ncol = 2))
y = c(y, rep(0,50))
x[y==0,2] = x[y==0,2]+2
dat = data.frame(x = x, y=as.factor(y))
par(mfrow = c(1,1))
plot(x, col=(y+1))
svmfit = svm(y~., data = dat, kernel = "radial", cost =10, gamma =1)
plot(svmfit, dat)
# Application to gene Expression data
library(ISLR)
names(Khan)
dim(Khan$xtrain)
dim(Khan$xtest)
length(Khan$ytrain)
length(Khan$ytest)
table(Khan$ytrain)
table(Khan$ytest)
dat = data.frame(x = Khan$xtrain, y=as.factor(Khan$ytrain))
out = svm(y~., data=dat, kernel = "linear", cost =10)
summary(out)
table(out$fitted, dat$y)
dat.te = data.frame(x=Khan$xtest, y=as.factor(Khan$ytest))
pred.te = predict(out, newdata = dat.te)
table(pred.te, dat.te$y)
|
/SVM.R
|
no_license
|
ajayarunachalam/Statistical_Learning
|
R
| false
| false
| 3,863
|
r
|
# Support Vector Classifier
set.seed(1)
x = matrix(rnorm(20*2), ncol = 2)
y = c(rep(-1,10), rep(1,10))
x[y==1, ]=x[y==1,]+1
plot(x, col = (3-y))
dat = data.frame(x = x, y = as.factor(y))
library(e1071)
svmfit = svm(y~., data=dat, kernel = "linear",
cost = 10, sclae = FALSE)
plot(svmfit, dat)
svmfit$index
summary(svmfit)
svmfit = svm(y~., data=dat, kernel = "linear", cost = 0.1, scale = FALSE)
plot(svmfit, dat)
svmfit$index
set.seed(1)
tune.out = tune(svm, y~., data=dat, kernel = "linear",
ranges = list(cost = c(0.001, 0.01, 0.1, 1, 5, 10, 100)))
summary(tune.out)
bestmod = tune.out$best.model
summary(bestmod)
xtest = matrix(rnorm(20*2), ncol = 2)
ytest = sample(c(-1,1), 20, rep = TRUE)
xtest[ytest==1, ] = xtest[ytest==1,]+1
testdat = data.frame(x = xtest, y =as.factor(ytest))
ypred = predict(bestmod, testdat)
table(predict = ypred, truth = testdat$y)
svmfit = svm(y~., data = dat, kernel = "linear",
cost = 0.01, scale = FALSE)
ypred = predict(svmfit, testdat)
table(predict = ypred, truth = testdat$y)
x[y==1,] = x[y==1,] + 0.5
plot(x, col = (y+5)/2, pch = 19)
dat = data.frame(x = x, y = as.factor(y))
svmfit = svm(y~., data=dat, kernel = "linear", cost = 1e5)
summary(svmfit)
plot(svmfit, dat)
svmfit = svm(y~., data= dat, kernel = "linear", cost = 1)
summary(svmfit)
plot(svmfit, dat)
# Support Vector Machine
set.seed(1)
x = matrix(rnorm(200*2), ncol = 2)
x[1:100,] = x[1:100,] + 2
x[101:150,] = x[101:150,] -2
y = c(rep(1,150), rep(2,50))
dat = data.frame(x = x, y = as.factor(y))
plot(x, col=y)
train = sample(200, 100)
svmfit = svm(y~., data = dat[train,], kernel = "radial", gamma = 1, cost =1)
plot(svmfit, dat[train,])
summary(svmfit)
svmfit = svm(y~., data = dat[train,], kernel = "radial", gamma = 1, cost = 1e5)
plot(svmfit, dat[train,])
set.seed(1)
tune.out = tune(svm, y~., data=dat[train,], kernel = "radial",
ranges = list(cost = c(0.1,1,10,100,1000),
gamma = c(0.5, 1,2,3,4)))
summary(tune.out)
table(true = dat[-train, "y"], pred = predict(tune.out$best.model, newdata = dat[-train,]))
# ROC Curve
library(ROCR)
rocplot = function(pred, truth, ...){
predob = prediction(pred, truth)
perf = performance(predob, "tpr", "fpr")
plot(perf,...)
}
svmfit.opt = svm(y~., data = dat[train,], kernel = "radial",
gamma = 2, cost = 1, decision.values = T)
fitted = attributes(predict(svmfit.opt, dat[train,], decision.values = TRUE))$decision.values
par(mfrow = c(1,2))
rocplot(fitted, dat[train, "y"], main = "Training Data")
svmfit.flex = svm(y~., data = dat[train,], kernel = "radial",
gamma = 50, cost = 1, decision.values = t)
fitted = attributes(predict(svmfit.flex, dat[train,], decision.values = T))$decision.values
rocplot(fitted, dat[train, "y"], add=T, col="red")
fitted = attributes(predict(svmfit.opt, dat[-train,], decision.values = T))$decision.values
rocplot(fitted, dat[-train, "y"], main = "Test Data")
fitted = attributes(predict(svmfit.flex, dat[train,], decision.values = T))$decision.values
rocplot(fitted, dat[train, "y"], add=T, col="red")
set.seed(1)
x = rbind(x, matrix(rnorm(50*2), ncol = 2))
y = c(y, rep(0,50))
x[y==0,2] = x[y==0,2]+2
dat = data.frame(x = x, y=as.factor(y))
par(mfrow = c(1,1))
plot(x, col=(y+1))
svmfit = svm(y~., data = dat, kernel = "radial", cost =10, gamma =1)
plot(svmfit, dat)
# Application to gene Expression data
library(ISLR)
names(Khan)
dim(Khan$xtrain)
dim(Khan$xtest)
length(Khan$ytrain)
length(Khan$ytest)
table(Khan$ytrain)
table(Khan$ytest)
dat = data.frame(x = Khan$xtrain, y=as.factor(Khan$ytrain))
out = svm(y~., data=dat, kernel = "linear", cost =10)
summary(out)
table(out$fitted, dat$y)
dat.te = data.frame(x=Khan$xtest, y=as.factor(Khan$ytest))
pred.te = predict(out, newdata = dat.te)
table(pred.te, dat.te$y)
|
#Install
if (!requireNamespace('BiocManager', quietly = TRUE))
install.packages('BiocManager')
BiocManager::install('EnhancedVolcano')
#Load package
library(EnhancedVolcano)
#CONVERT logFC from Seurat output (natural log) to Log2 value
#Read in CSV
DEdata<-read.csv("Z:/Documents/Grad School/Data/Sequencing Projects/Fetal Retina/7_seq_May2020/DE_results/051620_noLR_ActD_Dup_nonPR(otx2-)_no1085_noSHL_noOpsinOutlier_0-4res_rod1vscone0_DE_tTest.csv", head = TRUE, sep=",")
#convert symbols from Factor to Character
DEdata$symbol <- as.character(DEdata$symbol)
#basic volcano plot. Label has to be column identity not just name of column like X and Y
EnhancedVolcano(DEdata,
lab = DEdata$symbol,
x = 'log2FC',
y = 'p_val_adj',
xlim = c(-5,4),
title='0.4 Rod Cluster 0 vs Cone Cluster 1 TTest')
#Save and output
|
/DE_enhancedVolcano_051920.R
|
no_license
|
whtns/ds_scripts
|
R
| false
| false
| 900
|
r
|
#Install
if (!requireNamespace('BiocManager', quietly = TRUE))
install.packages('BiocManager')
BiocManager::install('EnhancedVolcano')
#Load package
library(EnhancedVolcano)
#CONVERT logFC from Seurat output (natural log) to Log2 value
#Read in CSV
DEdata<-read.csv("Z:/Documents/Grad School/Data/Sequencing Projects/Fetal Retina/7_seq_May2020/DE_results/051620_noLR_ActD_Dup_nonPR(otx2-)_no1085_noSHL_noOpsinOutlier_0-4res_rod1vscone0_DE_tTest.csv", head = TRUE, sep=",")
#convert symbols from Factor to Character
DEdata$symbol <- as.character(DEdata$symbol)
#basic volcano plot. Label has to be column identity not just name of column like X and Y
EnhancedVolcano(DEdata,
lab = DEdata$symbol,
x = 'log2FC',
y = 'p_val_adj',
xlim = c(-5,4),
title='0.4 Rod Cluster 0 vs Cone Cluster 1 TTest')
#Save and output
|
library(wrapr)
### Name: apply_right.default
### Title: Default apply_right implementation.
### Aliases: apply_right.default
### ** Examples
# simulate a function pointer
apply_right.list <- function(pipe_left_arg,
pipe_right_arg,
pipe_environment,
left_arg_name,
pipe_string,
right_arg_name) {
pipe_right_arg$f(pipe_left_arg)
}
f <- list(f=sin)
2 %.>% f
f$f <- cos
2 %.>% f
|
/data/genthat_extracted_code/wrapr/examples/apply_right.default.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 532
|
r
|
library(wrapr)
### Name: apply_right.default
### Title: Default apply_right implementation.
### Aliases: apply_right.default
### ** Examples
# simulate a function pointer
apply_right.list <- function(pipe_left_arg,
pipe_right_arg,
pipe_environment,
left_arg_name,
pipe_string,
right_arg_name) {
pipe_right_arg$f(pipe_left_arg)
}
f <- list(f=sin)
2 %.>% f
f$f <- cos
2 %.>% f
|
library(humanleague)
### Name: qisi
### Title: QIS-IPF
### Aliases: qisi
### ** Examples
ageByGender = array(c(1,2,5,3,4,3,4,5,1,2), dim=c(5,2))
ethnicityByGender = array(c(4,6,5,6,4,5), dim=c(3,2))
seed = array(rep(1,30), dim=c(5,2,3))
result = qisi(seed, list(c(1,2), c(3,2)), list(ageByGender, ethnicityByGender))
|
/data/genthat_extracted_code/humanleague/examples/qisi.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 324
|
r
|
library(humanleague)
### Name: qisi
### Title: QIS-IPF
### Aliases: qisi
### ** Examples
ageByGender = array(c(1,2,5,3,4,3,4,5,1,2), dim=c(5,2))
ethnicityByGender = array(c(4,6,5,6,4,5), dim=c(3,2))
seed = array(rep(1,30), dim=c(5,2,3))
result = qisi(seed, list(c(1,2), c(3,2)), list(ageByGender, ethnicityByGender))
|
print.aep <-
function(x, ...) {
### summarising aep object information
cat("\n\tAnnual energy production\n\n")
tbl.units <- data.frame(t(names(x$aep)))
tbl.units[,] <- paste0("[", attr(x$aep[,3], "unit"), "]")
tbl.units[,1] <- paste0("[", attr(x$aep[,1], "unit"), "]")
tbl.units[,2] <- paste0("[", attr(x$aep[,2], "unit"), "]")
x$aep[x$aep==0] <- ""
obj <- as.data.frame(lapply(x$aep, as.character))
names(x$aep)[1] <- "wind speed"
names(tbl.units) <- names(obj) <- names(x$aep)
row.names(tbl.units) <- " "
row.names(obj) <- c(toupper(head(row.names(x$aep), -1)), tail(row.names(x$aep), 1))
print(rbind(tbl.units, obj), quote=FALSE)
cat("\ncapacity factor:", x$capacity, "\n")
cat("\ncall: aep(profile=", attr(x, "call")$profile, ", pc=", attr(x, "call")$pc, ", hub.h=", attr(x, "call")$hub.h, ", rho=", attr(x, "call")$rho, ", avail=", attr(x, "call")$avail, ", bins=c(", paste(attr(x, "call")$bins, collapse=", "), "), sectoral=", attr(x, "call")$sectoral, ", digits=c(", paste(attr(x, "call")$digits, collapse=", "), "), print=", attr(x, "call")$print, ")\n\n", sep="")
}
|
/R/print.aep.R
|
no_license
|
paulponcet/bReeze
|
R
| false
| false
| 1,092
|
r
|
print.aep <-
function(x, ...) {
### summarising aep object information
cat("\n\tAnnual energy production\n\n")
tbl.units <- data.frame(t(names(x$aep)))
tbl.units[,] <- paste0("[", attr(x$aep[,3], "unit"), "]")
tbl.units[,1] <- paste0("[", attr(x$aep[,1], "unit"), "]")
tbl.units[,2] <- paste0("[", attr(x$aep[,2], "unit"), "]")
x$aep[x$aep==0] <- ""
obj <- as.data.frame(lapply(x$aep, as.character))
names(x$aep)[1] <- "wind speed"
names(tbl.units) <- names(obj) <- names(x$aep)
row.names(tbl.units) <- " "
row.names(obj) <- c(toupper(head(row.names(x$aep), -1)), tail(row.names(x$aep), 1))
print(rbind(tbl.units, obj), quote=FALSE)
cat("\ncapacity factor:", x$capacity, "\n")
cat("\ncall: aep(profile=", attr(x, "call")$profile, ", pc=", attr(x, "call")$pc, ", hub.h=", attr(x, "call")$hub.h, ", rho=", attr(x, "call")$rho, ", avail=", attr(x, "call")$avail, ", bins=c(", paste(attr(x, "call")$bins, collapse=", "), "), sectoral=", attr(x, "call")$sectoral, ", digits=c(", paste(attr(x, "call")$digits, collapse=", "), "), print=", attr(x, "call")$print, ")\n\n", sep="")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process_wilcox.R
\name{process_wilcox}
\alias{process_wilcox}
\title{Process the DAF analysis through a wilcoxon test}
\usage{
process_wilcox(data, ...)
}
\arguments{
\item{data}{the ouput of the \code{\link{build_DAF_data}} function}
\item{...}{additionnal parameters of the method}
}
\value{
the output of the wilcox test for each feature
}
\description{
Process the DAF analysis through a wilcoxon test
}
|
/man/process_wilcox.Rd
|
no_license
|
leonarDubois/metaDAF
|
R
| false
| true
| 506
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process_wilcox.R
\name{process_wilcox}
\alias{process_wilcox}
\title{Process the DAF analysis through a wilcoxon test}
\usage{
process_wilcox(data, ...)
}
\arguments{
\item{data}{the ouput of the \code{\link{build_DAF_data}} function}
\item{...}{additionnal parameters of the method}
}
\value{
the output of the wilcox test for each feature
}
\description{
Process the DAF analysis through a wilcoxon test
}
|
# Kernel PCA
# Importing the dataset
dataset = read.csv(paste(getwd(), '/datasets/Social_Network_Ads.csv', sep = ""))
dataset = dataset[, 3:5]
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[, 1:2] = scale(training_set[, 1:2])
test_set[, 1:2] = scale(test_set[, 1:2])
# Applying Kernel PCA
# install.packages('kernlab')
library(kernlab)
kpca = kpca(~., data = training_set[-3], kernel = 'rbfdot', features = 2)
training_set_pca = as.data.frame(predict(kpca, training_set))
training_set_pca$Purchased = training_set$Purchased
test_set_pca = as.data.frame(predict(kpca, test_set))
test_set_pca$Purchased = test_set$Purchased
# Fitting Logistic Regression to the Training set
classifier = glm(formula = Purchased ~ .,
family = binomial,
data = training_set_pca)
# Predicting the Test set results
prob_pred = predict(classifier, type = 'response', newdata = test_set_pca[-3])
y_pred = ifelse(prob_pred > 0.5, 1, 0)
# Making the Confusion Matrix
cm = table(test_set_pca[, 3], y_pred)
# Visualising the Training set results
install.packages('ElemStatLearn')
library(ElemStatLearn)
set = training_set_pca
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('V1', 'V2')
prob_set = predict(classifier, type = 'response', newdata = grid_set)
y_grid = ifelse(prob_set > 0.5, 1, 0)
plot(set[, -3],
main = 'Logistic Regression (Training set)',
xlab = 'PC1', ylab = 'PC2',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualising the Test set results
# install.packages('ElemStatLearn')
library(ElemStatLearn)
set = test_set_pca
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('V1', 'V2')
prob_set = predict(classifier, type = 'response', newdata = grid_set)
y_grid = ifelse(prob_set > 0.5, 1, 0)
plot(set[, -3],
main = 'Logistic Regression (Test set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
|
/kernel_pca.R
|
no_license
|
prathameshbhirud/R-machine-learning-code-samples-with-sample-datasets
|
R
| false
| false
| 2,831
|
r
|
# Kernel PCA
# Importing the dataset
dataset = read.csv(paste(getwd(), '/datasets/Social_Network_Ads.csv', sep = ""))
dataset = dataset[, 3:5]
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[, 1:2] = scale(training_set[, 1:2])
test_set[, 1:2] = scale(test_set[, 1:2])
# Applying Kernel PCA
# install.packages('kernlab')
library(kernlab)
kpca = kpca(~., data = training_set[-3], kernel = 'rbfdot', features = 2)
training_set_pca = as.data.frame(predict(kpca, training_set))
training_set_pca$Purchased = training_set$Purchased
test_set_pca = as.data.frame(predict(kpca, test_set))
test_set_pca$Purchased = test_set$Purchased
# Fitting Logistic Regression to the Training set
classifier = glm(formula = Purchased ~ .,
family = binomial,
data = training_set_pca)
# Predicting the Test set results
prob_pred = predict(classifier, type = 'response', newdata = test_set_pca[-3])
y_pred = ifelse(prob_pred > 0.5, 1, 0)
# Making the Confusion Matrix
cm = table(test_set_pca[, 3], y_pred)
# Visualising the Training set results
install.packages('ElemStatLearn')
library(ElemStatLearn)
set = training_set_pca
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('V1', 'V2')
prob_set = predict(classifier, type = 'response', newdata = grid_set)
y_grid = ifelse(prob_set > 0.5, 1, 0)
plot(set[, -3],
main = 'Logistic Regression (Training set)',
xlab = 'PC1', ylab = 'PC2',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualising the Test set results
# install.packages('ElemStatLearn')
library(ElemStatLearn)
set = test_set_pca
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('V1', 'V2')
prob_set = predict(classifier, type = 'response', newdata = grid_set)
y_grid = ifelse(prob_set > 0.5, 1, 0)
plot(set[, -3],
main = 'Logistic Regression (Test set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
|
print("TO BE DONE MANUALLY")
print("Before running the scripts make sure you have <<sqldf>> package installed ")
print( "This package will be used while loading the dataset subset <<read.csv.sql>>")
data <- read.csv.sql("household_power_consumption.txt", sql = "SELECT * from file WHERE Date in ('1/2/2007', '2/2/2007')",sep = ";", header = TRUE)
#close()
library(plyr)
Date_and_Time <- paste(data$Date, data$Time)
Date_and_Time <-as.Date(Date_and_Time)
data$Date_and_Time <- Date_and_Time
# The following column is the one to be used by all expected graphs
data$DateTime <- strptime(paste(data$Date, data$Time, sep=","), format="%d/%m/%Y,%H:%M:%S")
joursDeSemaines <- weekdays(data$DateTime)
############ GRAPHS CONSTRUCTION###############
## Plot 2
plot(data$DateTime,data$Global_active_power,type = "l",ylab = "Global Active Power(kilowatts)",xlab = "")
dev.copy(png,file = "plot2.png")
dev.off()
print("Please get plot2.png under your working directory")
|
/plot2.R
|
no_license
|
ndekwe/ExData_Plotting1
|
R
| false
| false
| 969
|
r
|
print("TO BE DONE MANUALLY")
print("Before running the scripts make sure you have <<sqldf>> package installed ")
print( "This package will be used while loading the dataset subset <<read.csv.sql>>")
data <- read.csv.sql("household_power_consumption.txt", sql = "SELECT * from file WHERE Date in ('1/2/2007', '2/2/2007')",sep = ";", header = TRUE)
#close()
library(plyr)
Date_and_Time <- paste(data$Date, data$Time)
Date_and_Time <-as.Date(Date_and_Time)
data$Date_and_Time <- Date_and_Time
# The following column is the one to be used by all expected graphs
data$DateTime <- strptime(paste(data$Date, data$Time, sep=","), format="%d/%m/%Y,%H:%M:%S")
joursDeSemaines <- weekdays(data$DateTime)
############ GRAPHS CONSTRUCTION###############
## Plot 2
plot(data$DateTime,data$Global_active_power,type = "l",ylab = "Global Active Power(kilowatts)",xlab = "")
dev.copy(png,file = "plot2.png")
dev.off()
print("Please get plot2.png under your working directory")
|
#' Correlation Plot Function
#'
#' This function returns a correlation plot for all continuous numeric variables in a given year.
#'
#' @param year Takes a 4 digit year between 1950 and 2017
#' @keywords NBA basketball correlation
#' @export
#' @examples
#' corr_plot(1987)
corr_plot <- function(year){
corr_data <- dplyr::filter(Seasons_Stats_NBA, `Year`==year)
corr_data <- purrr::keep(corr_data, is.double)
corr_data <- cor(corr_data, method = "pearson", use = "complete.obs")
corrplot::corrplot(corr_data, method="circle", tl.col="black")
}
|
/R/corr_plot.R
|
no_license
|
TheStreett/NBA.Search
|
R
| false
| false
| 554
|
r
|
#' Correlation Plot Function
#'
#' This function returns a correlation plot for all continuous numeric variables in a given year.
#'
#' @param year Takes a 4 digit year between 1950 and 2017
#' @keywords NBA basketball correlation
#' @export
#' @examples
#' corr_plot(1987)
corr_plot <- function(year){
corr_data <- dplyr::filter(Seasons_Stats_NBA, `Year`==year)
corr_data <- purrr::keep(corr_data, is.double)
corr_data <- cor(corr_data, method = "pearson", use = "complete.obs")
corrplot::corrplot(corr_data, method="circle", tl.col="black")
}
|
##Name- Janki Patel
##CWID - 10457365
##subject - Knowledge discovery and data mining
##Class- CS513-A
##MidTerm_Exam
rm(list=ls())
ChooseFile<-file.choose()
Covid19<-read.csv(ChooseFile)
View(Covid19)
## Question 2(I): Summary
summary(Covid19)
## Question 2(II): Missing Values
MissingValuesCheck <- is.na(Covid19)
MissingValues <- Covid19[!complete.cases(Covid19),]
##Question 2(III): Generate Frequncey Table of Infected vs Marital Status
frequency <- table(Covid19$Infected,Covid19$MaritalStatus)
print(frequency)
##Question 2(IV): Scatter plot of Age, Marital Status and MonthAtHospital
dev.off()
pairs(Covid19[, c("Age", "MaritalStatus", "MonthAtHospital")], upper.panel = NULL)
title("Scatter Plot")
##Question 2(v): Box plot of Age, Marital Status and MonthAtHospital
boxplot(Covid19[, c("Age", "MaritalStatus", "MonthAtHospital")])
title("Box Plot")
##Question 2(VI): Replace missing values of cases with mean cases
Covid19[is.na(Covid19[,c("Cases")])] <- mean(Covid19[,c("Cases")], na.rm = TRUE)
View(Covid19)
|
/kddAssignment/MidExam/Midterm_Exam__Qus_2.r
|
no_license
|
janki1997/KDD
|
R
| false
| false
| 1,038
|
r
|
##Name- Janki Patel
##CWID - 10457365
##subject - Knowledge discovery and data mining
##Class- CS513-A
##MidTerm_Exam
rm(list=ls())
ChooseFile<-file.choose()
Covid19<-read.csv(ChooseFile)
View(Covid19)
## Question 2(I): Summary
summary(Covid19)
## Question 2(II): Missing Values
MissingValuesCheck <- is.na(Covid19)
MissingValues <- Covid19[!complete.cases(Covid19),]
##Question 2(III): Generate Frequncey Table of Infected vs Marital Status
frequency <- table(Covid19$Infected,Covid19$MaritalStatus)
print(frequency)
##Question 2(IV): Scatter plot of Age, Marital Status and MonthAtHospital
dev.off()
pairs(Covid19[, c("Age", "MaritalStatus", "MonthAtHospital")], upper.panel = NULL)
title("Scatter Plot")
##Question 2(v): Box plot of Age, Marital Status and MonthAtHospital
boxplot(Covid19[, c("Age", "MaritalStatus", "MonthAtHospital")])
title("Box Plot")
##Question 2(VI): Replace missing values of cases with mean cases
Covid19[is.na(Covid19[,c("Cases")])] <- mean(Covid19[,c("Cases")], na.rm = TRUE)
View(Covid19)
|
source('~/MFweb/data_analysis/10_stats/make_string.R')
library(car)
library(tidyverse)
library(ggpubr)
library(rstatix)
library(readxl)
library(lsr)
library(effectsize)
library(Hmisc)
library("PerformanceAnalytics")
library(ppcor)
dataMFweb <- read_excel("~/MFweb/data_analysis/10_stats/web_data_completed.xlsx")
# Take only subset: concatenate the ones we want
data_tmp_all <- subset(dataMFweb , select=c("User", "exclude", "age", "gender", "IQscore", "BIS11_TotalScore", "ASRS_Sum", "xi_SH", "xi_LH", "pickedD_SH", "pickedD_LH"))
data_tmp <- subset(data_tmp_all, exclude!=1)
# Compute mean
data_tmp$xi_mean = (data_tmp$xi_SH + data_tmp$xi_LH)/2
data_tmp$pickedD_mean = (data_tmp$pickedD_SH + data_tmp$pickedD_LH)/2
# Remove Nans
# data_ = data_tmp[complete.cases(data_tmp), ]
# Correlation
my_data <- data_tmp[, c(3,5,6,7,8,9,10,11,12,13)]
res <- cor(my_data, use = "complete.obs")
round(res, 2)
# significance
res2 <- rcorr(as.matrix(my_data))
res2
# matrix
chart.Correlation(my_data, histogram=TRUE, pch=19)
# correct for age and IQ
my_data = my_data[complete.cases(my_data), ] # remove nans
y.data=data.frame(my_data)
# BIS
res=pcor.test(y.data$pickedD_mean,y.data$BIS11_TotalScore,y.data[,c("age","IQscore")])
res1p=make_string(res, 'BIS', 'D_mean', 'partial')
res <- cor.test(y.data$pickedD_mean, y.data$BIS11_TotalScore,method = "pearson")
res1b=make_string(res, 'BIS', 'D_mean', 'bivariate')
res=pcor.test(y.data$pickedD_SH,y.data$BIS11_TotalScore,y.data[,c("age","IQscore")])
res2p=make_string(res, 'BIS', 'D_SH', 'partial')
res <- cor.test(y.data$pickedD_SH, y.data$BIS11_TotalScore,method = "pearson")
res2b=make_string(res, 'BIS', 'D_SH', 'bivariate')
res=pcor.test(y.data$pickedD_LH,y.data$BIS11_TotalScore,y.data[,c("age","IQscore")])
res3p=make_string(res, 'BIS', 'D_LH', 'partial')
res <- cor.test(y.data$pickedD_LH, y.data$BIS11_TotalScore,method = "pearson")
res3b=make_string(res, 'BIS', 'D_LH', 'bivariate')
res=pcor.test(y.data$xi_mean,y.data$BIS11_TotalScore,y.data[,c("age","IQscore")])
res4p=make_string(res, 'BIS', 'xi_mean', 'partial')
res <- cor.test(y.data$xi_mean, y.data$BIS11_TotalScore,method = "pearson")
res4b=make_string(res, 'BIS', 'xi_mean', 'bivariate')
res=pcor.test(y.data$xi_SH,y.data$BIS11_TotalScore,y.data[,c("age","IQscore")])
res5p=make_string(res, 'BIS', 'xi_SH', 'partial')
res <- cor.test(y.data$xi_SH, y.data$BIS11_TotalScore,method = "pearson")
res5b=make_string(res, 'BIS', 'xi_SH', 'bivariate')
res=pcor.test(y.data$xi_LH,y.data$BIS11_TotalScore,y.data[,c("age","IQscore")])
res6p=make_string(res, 'BIS', 'xi_LH', 'partial')
res <- cor.test(y.data$xi_LH, y.data$BIS11_TotalScore,method = "pearson")
res6b=make_string(res, 'BIS', 'xi_LH', 'bivariate')
# ASRS
res=pcor.test(y.data$pickedD_mean,y.data$ASRS_Sum,y.data[,c("age","IQscore")])
res7p=make_string(res, 'ASRS', 'D_mean', 'partial')
res <- cor.test(y.data$pickedD_mean, y.data$ASRS_Sum, method = "pearson")
res7b=make_string(res, 'ASRS', 'D_mean', 'bivariate')
res=pcor.test(y.data$pickedD_SH,y.data$ASRS_Sum,y.data[,c("age","IQscore")])
res8p=make_string(res, 'ASRS', 'D_SH', 'partial')
res <- cor.test(y.data$pickedD_SH, y.data$ASRS_Sum, method = "pearson")
res8b=make_string(res, 'ASRS', 'D_SH', 'bivariate')
res=pcor.test(y.data$pickedD_LH,y.data$ASRS_Sum,y.data[,c("age","IQscore")])
res9p=make_string(res, 'ASRS', 'D_LH', 'partial')
res <- cor.test(y.data$pickedD_LH, y.data$ASRS_Sum, method = "pearson")
res9b=make_string(res, 'ASRS', 'D_LH', 'bivariate')
res=pcor.test(y.data$xi_mean,y.data$ASRS_Sum,y.data[,c("age","IQscore")])
res10p=make_string(res, 'ASRS', 'xi_mean', 'partial')
res <- cor.test(y.data$xi_mean, y.data$ASRS_Sum, method = "pearson")
res10b=make_string(res, 'ASRS', 'xi_mean', 'bivariate')
res=pcor.test(y.data$xi_SH,y.data$ASRS_Sum,y.data[,c("age","IQscore")])
res11p=make_string(res, 'ASRS', 'xi_SH', 'partial')
res <- cor.test(y.data$xi_SH, y.data$ASRS_Sum, method = "pearson")
res11b=make_string(res, 'ASRS', 'xi_SH', 'bivariate')
res=pcor.test(y.data$xi_LH,y.data$ASRS_Sum,y.data[,c("age","IQscore")])
res12p=make_string(res, 'ASRS', 'xi_LH', 'partial')
res <- cor.test(y.data$xi_LH, y.data$ASRS_Sum, method = "pearson")
res12b=make_string(res, 'ASRS', 'xi_LH', 'bivariate')
#output_txt1=c(res1b,'',res2b,'',res3b,'', res4b,'',res5b,'',res6b,'', res7b,'',res8b,'',res9b,'', res10b,'',res11b,'',res12b,'','',
#res1p,'',res2p,'',res3p,'', res4p,'',res5p,'',res6p,'', res7p,'',res8p,'',res9p,'', res10p,'',res11p,'',res12p,'')
output_txt_BIS=c(res1b,'',res1p,'', res4b,'', res4p)
output_txt_ASRS=c(res7b,'',res7p,'', res10b,'', res10p)
#output_txt4=c(res1b,'',res4b,'',res7b,'', res10b,'','',
#res1p,'',res4p,'',res7p,'', res10p, '')
all_text = c(
'', '',
'BIS total score:','', output_txt_BIS,'','', '',
'ASRS total score:','', output_txt_ASRS
)
fileConn<-file("~/MFweb/data_analysis/10_stats/biv_part_corr/results_totalscales.doc")
writeLines(all_text, fileConn)
close(fileConn)
|
/10_stats/biv_part_corr/main_corr_biv_partial_totalscore.R
|
no_license
|
MagDub/MFweb-data_analysis
|
R
| false
| false
| 5,262
|
r
|
source('~/MFweb/data_analysis/10_stats/make_string.R')
library(car)
library(tidyverse)
library(ggpubr)
library(rstatix)
library(readxl)
library(lsr)
library(effectsize)
library(Hmisc)
library("PerformanceAnalytics")
library(ppcor)
dataMFweb <- read_excel("~/MFweb/data_analysis/10_stats/web_data_completed.xlsx")
# Take only subset: concatenate the ones we want
data_tmp_all <- subset(dataMFweb , select=c("User", "exclude", "age", "gender", "IQscore", "BIS11_TotalScore", "ASRS_Sum", "xi_SH", "xi_LH", "pickedD_SH", "pickedD_LH"))
data_tmp <- subset(data_tmp_all, exclude!=1)
# Compute mean
data_tmp$xi_mean = (data_tmp$xi_SH + data_tmp$xi_LH)/2
data_tmp$pickedD_mean = (data_tmp$pickedD_SH + data_tmp$pickedD_LH)/2
# Remove Nans
# data_ = data_tmp[complete.cases(data_tmp), ]
# Correlation
my_data <- data_tmp[, c(3,5,6,7,8,9,10,11,12,13)]
res <- cor(my_data, use = "complete.obs")
round(res, 2)
# significance
res2 <- rcorr(as.matrix(my_data))
res2
# matrix
chart.Correlation(my_data, histogram=TRUE, pch=19)
# correct for age and IQ
my_data = my_data[complete.cases(my_data), ] # remove nans
y.data=data.frame(my_data)
# BIS
res=pcor.test(y.data$pickedD_mean,y.data$BIS11_TotalScore,y.data[,c("age","IQscore")])
res1p=make_string(res, 'BIS', 'D_mean', 'partial')
res <- cor.test(y.data$pickedD_mean, y.data$BIS11_TotalScore,method = "pearson")
res1b=make_string(res, 'BIS', 'D_mean', 'bivariate')
res=pcor.test(y.data$pickedD_SH,y.data$BIS11_TotalScore,y.data[,c("age","IQscore")])
res2p=make_string(res, 'BIS', 'D_SH', 'partial')
res <- cor.test(y.data$pickedD_SH, y.data$BIS11_TotalScore,method = "pearson")
res2b=make_string(res, 'BIS', 'D_SH', 'bivariate')
res=pcor.test(y.data$pickedD_LH,y.data$BIS11_TotalScore,y.data[,c("age","IQscore")])
res3p=make_string(res, 'BIS', 'D_LH', 'partial')
res <- cor.test(y.data$pickedD_LH, y.data$BIS11_TotalScore,method = "pearson")
res3b=make_string(res, 'BIS', 'D_LH', 'bivariate')
res=pcor.test(y.data$xi_mean,y.data$BIS11_TotalScore,y.data[,c("age","IQscore")])
res4p=make_string(res, 'BIS', 'xi_mean', 'partial')
res <- cor.test(y.data$xi_mean, y.data$BIS11_TotalScore,method = "pearson")
res4b=make_string(res, 'BIS', 'xi_mean', 'bivariate')
res=pcor.test(y.data$xi_SH,y.data$BIS11_TotalScore,y.data[,c("age","IQscore")])
res5p=make_string(res, 'BIS', 'xi_SH', 'partial')
res <- cor.test(y.data$xi_SH, y.data$BIS11_TotalScore,method = "pearson")
res5b=make_string(res, 'BIS', 'xi_SH', 'bivariate')
res=pcor.test(y.data$xi_LH,y.data$BIS11_TotalScore,y.data[,c("age","IQscore")])
res6p=make_string(res, 'BIS', 'xi_LH', 'partial')
res <- cor.test(y.data$xi_LH, y.data$BIS11_TotalScore,method = "pearson")
res6b=make_string(res, 'BIS', 'xi_LH', 'bivariate')
# ASRS
res=pcor.test(y.data$pickedD_mean,y.data$ASRS_Sum,y.data[,c("age","IQscore")])
res7p=make_string(res, 'ASRS', 'D_mean', 'partial')
res <- cor.test(y.data$pickedD_mean, y.data$ASRS_Sum, method = "pearson")
res7b=make_string(res, 'ASRS', 'D_mean', 'bivariate')
res=pcor.test(y.data$pickedD_SH,y.data$ASRS_Sum,y.data[,c("age","IQscore")])
res8p=make_string(res, 'ASRS', 'D_SH', 'partial')
res <- cor.test(y.data$pickedD_SH, y.data$ASRS_Sum, method = "pearson")
res8b=make_string(res, 'ASRS', 'D_SH', 'bivariate')
res=pcor.test(y.data$pickedD_LH,y.data$ASRS_Sum,y.data[,c("age","IQscore")])
res9p=make_string(res, 'ASRS', 'D_LH', 'partial')
res <- cor.test(y.data$pickedD_LH, y.data$ASRS_Sum, method = "pearson")
res9b=make_string(res, 'ASRS', 'D_LH', 'bivariate')
res=pcor.test(y.data$xi_mean,y.data$ASRS_Sum,y.data[,c("age","IQscore")])
res10p=make_string(res, 'ASRS', 'xi_mean', 'partial')
res <- cor.test(y.data$xi_mean, y.data$ASRS_Sum, method = "pearson")
res10b=make_string(res, 'ASRS', 'xi_mean', 'bivariate')
res=pcor.test(y.data$xi_SH,y.data$ASRS_Sum,y.data[,c("age","IQscore")])
res11p=make_string(res, 'ASRS', 'xi_SH', 'partial')
res <- cor.test(y.data$xi_SH, y.data$ASRS_Sum, method = "pearson")
res11b=make_string(res, 'ASRS', 'xi_SH', 'bivariate')
res=pcor.test(y.data$xi_LH,y.data$ASRS_Sum,y.data[,c("age","IQscore")])
res12p=make_string(res, 'ASRS', 'xi_LH', 'partial')
res <- cor.test(y.data$xi_LH, y.data$ASRS_Sum, method = "pearson")
res12b=make_string(res, 'ASRS', 'xi_LH', 'bivariate')
#output_txt1=c(res1b,'',res2b,'',res3b,'', res4b,'',res5b,'',res6b,'', res7b,'',res8b,'',res9b,'', res10b,'',res11b,'',res12b,'','',
#res1p,'',res2p,'',res3p,'', res4p,'',res5p,'',res6p,'', res7p,'',res8p,'',res9p,'', res10p,'',res11p,'',res12p,'')
output_txt_BIS=c(res1b,'',res1p,'', res4b,'', res4p)
output_txt_ASRS=c(res7b,'',res7p,'', res10b,'', res10p)
#output_txt4=c(res1b,'',res4b,'',res7b,'', res10b,'','',
#res1p,'',res4p,'',res7p,'', res10p, '')
all_text = c(
'', '',
'BIS total score:','', output_txt_BIS,'','', '',
'ASRS total score:','', output_txt_ASRS
)
fileConn<-file("~/MFweb/data_analysis/10_stats/biv_part_corr/results_totalscales.doc")
writeLines(all_text, fileConn)
close(fileConn)
|
# for t0
setwd("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22678839_1.txt -conFile distances_22678839_1.txt -t dist notall -confProb 1682.188314 0.36788 -PC -removal -prefix 22678839_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22679545_1.txt -conFile distances_22679545_1.txt -t dist notall -confProb 4227.416479 0.36788 -PC -removal -prefix 22679545_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680170_1.txt -conFile distances_22680170_1.txt -t dist notall -confProb 23146.34595 0.36788 -PC -removal -prefix 22680170_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680659_1.txt -conFile distances_22680659_1.txt -t dist notall -confProb 1214.121576 0.36788 -PC -removal -prefix 22680659_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680677_1.txt -conFile distances_22680677_1.txt -t dist notall -confProb 1895.127218 0.36788 -PC -removal -prefix 22680677_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680689_1.txt -conFile distances_22680689_1.txt -t dist notall -confProb 1311.523519 0.36788 -PC -removal -prefix 22680689_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680784_1.txt -conFile distances_22680784_1.txt -t dist notall -confProb 216.899061 0.36788 -PC -removal -prefix 22680784_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680902_1.txt -conFile distances_22680902_1.txt -t dist notall -confProb 1425.506622 0.36788 -PC -removal -prefix 22680902_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680954_1.txt -conFile distances_22680954_1.txt -t dist notall -confProb 714.182781 0.36788 -PC -removal -prefix 22680954_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680996_1.txt -conFile distances_22680996_1.txt -t dist notall -confProb 2076.057859 0.36788 -PC -removal -prefix 22680996_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22681740_1.txt -conFile distances_22681740_1.txt -t dist notall -confProb 613.862355 0.36788 -PC -removal -prefix 22681740_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22682609_1.txt -conFile distances_22682609_1.txt -t dist notall -confProb 3300.902964 0.36788 -PC -removal -prefix 22682609_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22682723_1.txt -conFile distances_22682723_1.txt -t dist notall -confProb 6002.160536 0.36788 -PC -removal -prefix 22682723_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22683165_2.txt -conFile distances_22683165_2.txt -t dist notall -confProb 1251.446509 0.36788 -PC -removal -prefix 22683165_2")
#for t1
setwd("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22678839_1.txt -conFile distances_adj2_22678839_1.txt -t dist notall -confProb 1682.188314 0.36788 -PC -prefix 22678839_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22679545_1.txt -conFile distances_adj2_22679545_1.txt -t dist notall -confProb 4227.416479 0.36788 -PC -prefix 22679545_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680170_1.txt -conFile distances_adj2_22680170_1.txt -t dist notall -confProb 23146.34595 0.36788 -PC -prefix 22680170_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680659_1.txt -conFile distances_adj2_22680659_1.txt -t dist notall -confProb 1214.121576 0.36788 -PC -prefix 22680659_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680677_1.txt -conFile distances_adj2_22680677_1.txt -t dist notall -confProb 1895.127218 0.36788 -PC -prefix 22680677_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680689_1.txt -conFile distances_adj2_22680689_1.txt -t dist notall -confProb 1311.523519 0.36788 -PC -prefix 22680689_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680784_1.txt -conFile distances_adj2_22680784_1.txt -t dist notall -confProb 216.899061 0.36788 -PC -prefix 22680784_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680902_1.txt -conFile distances_adj2_22680902_1.txt -t dist notall -confProb 1425.506622 0.36788 -PC -prefix 22680902_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680954_1.txt -conFile distances_adj2_22680954_1.txt -t dist notall -confProb 714.182781 0.36788 -PC -prefix 22680954_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680996_1.txt -conFile distances_adj2_22680996_1.txt -t dist notall -confProb 2076.057859 0.36788 -PC -prefix 22680996_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22681740_1.txt -conFile distances_adj2_22681740_1.txt -t dist notall -confProb 613.862355 0.36788 -PC -prefix 22681740_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22682609_1.txt -conFile distances_adj2_22682609_1.txt -t dist notall -confProb 3300.902964 0.36788 -PC -prefix 22682609_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22682723_1.txt -conFile distances_adj2_22682723_1.txt -t dist notall -confProb 6002.160536 0.36788 -PC -prefix 22682723_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22683165_2.txt -conFile distances_adj2_22683165_2.txt -t dist notall -confProb 1251.446509 0.36788 -PC -prefix 22683165_2")
|
/connefor_run.r
|
no_license
|
Konstant1na/Development_corridors
|
R
| false
| false
| 7,069
|
r
|
# for t0
setwd("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22678839_1.txt -conFile distances_22678839_1.txt -t dist notall -confProb 1682.188314 0.36788 -PC -removal -prefix 22678839_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22679545_1.txt -conFile distances_22679545_1.txt -t dist notall -confProb 4227.416479 0.36788 -PC -removal -prefix 22679545_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680170_1.txt -conFile distances_22680170_1.txt -t dist notall -confProb 23146.34595 0.36788 -PC -removal -prefix 22680170_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680659_1.txt -conFile distances_22680659_1.txt -t dist notall -confProb 1214.121576 0.36788 -PC -removal -prefix 22680659_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680677_1.txt -conFile distances_22680677_1.txt -t dist notall -confProb 1895.127218 0.36788 -PC -removal -prefix 22680677_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680689_1.txt -conFile distances_22680689_1.txt -t dist notall -confProb 1311.523519 0.36788 -PC -removal -prefix 22680689_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680784_1.txt -conFile distances_22680784_1.txt -t dist notall -confProb 216.899061 0.36788 -PC -removal -prefix 22680784_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680902_1.txt -conFile distances_22680902_1.txt -t dist notall -confProb 1425.506622 0.36788 -PC -removal -prefix 22680902_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680954_1.txt -conFile distances_22680954_1.txt -t dist notall -confProb 714.182781 0.36788 -PC -removal -prefix 22680954_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680996_1.txt -conFile distances_22680996_1.txt -t dist notall -confProb 2076.057859 0.36788 -PC -removal -prefix 22680996_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22681740_1.txt -conFile distances_22681740_1.txt -t dist notall -confProb 613.862355 0.36788 -PC -removal -prefix 22681740_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22682609_1.txt -conFile distances_22682609_1.txt -t dist notall -confProb 3300.902964 0.36788 -PC -removal -prefix 22682609_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22682723_1.txt -conFile distances_22682723_1.txt -t dist notall -confProb 6002.160536 0.36788 -PC -removal -prefix 22682723_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t0/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22683165_2.txt -conFile distances_22683165_2.txt -t dist notall -confProb 1251.446509 0.36788 -PC -removal -prefix 22683165_2")
#for t1
setwd("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22678839_1.txt -conFile distances_adj2_22678839_1.txt -t dist notall -confProb 1682.188314 0.36788 -PC -prefix 22678839_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22679545_1.txt -conFile distances_adj2_22679545_1.txt -t dist notall -confProb 4227.416479 0.36788 -PC -prefix 22679545_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680170_1.txt -conFile distances_adj2_22680170_1.txt -t dist notall -confProb 23146.34595 0.36788 -PC -prefix 22680170_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680659_1.txt -conFile distances_adj2_22680659_1.txt -t dist notall -confProb 1214.121576 0.36788 -PC -prefix 22680659_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680677_1.txt -conFile distances_adj2_22680677_1.txt -t dist notall -confProb 1895.127218 0.36788 -PC -prefix 22680677_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680689_1.txt -conFile distances_adj2_22680689_1.txt -t dist notall -confProb 1311.523519 0.36788 -PC -prefix 22680689_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680784_1.txt -conFile distances_adj2_22680784_1.txt -t dist notall -confProb 216.899061 0.36788 -PC -prefix 22680784_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680902_1.txt -conFile distances_adj2_22680902_1.txt -t dist notall -confProb 1425.506622 0.36788 -PC -prefix 22680902_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680954_1.txt -conFile distances_adj2_22680954_1.txt -t dist notall -confProb 714.182781 0.36788 -PC -prefix 22680954_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22680996_1.txt -conFile distances_adj2_22680996_1.txt -t dist notall -confProb 2076.057859 0.36788 -PC -prefix 22680996_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22681740_1.txt -conFile distances_adj2_22681740_1.txt -t dist notall -confProb 613.862355 0.36788 -PC -prefix 22681740_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22682609_1.txt -conFile distances_adj2_22682609_1.txt -t dist notall -confProb 3300.902964 0.36788 -PC -prefix 22682609_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22682723_1.txt -conFile distances_adj2_22682723_1.txt -t dist notall -confProb 6002.160536 0.36788 -PC -prefix 22682723_1")
shell("C:/Thesis_analysis/Development_corridors/conefor/run_1/inputs/t1/conefor_1_0_86_bcc_x86.exe -nodeFile nodes_adj_22683165_2.txt -conFile distances_adj2_22683165_2.txt -t dist notall -confProb 1251.446509 0.36788 -PC -prefix 22683165_2")
|
### plotBrier.R ---
#----------------------------------------------------------------------
## author: Thomas Alexander Gerds
## created: Feb 23 2017 (11:07)
## Version:
## last-updated: Dec 6 2019 (11:18)
## By: Thomas Alexander Gerds
## Update #: 73
#----------------------------------------------------------------------
##
### Commentary:
##
### Change Log:
#----------------------------------------------------------------------
##
### Code:
##' Plot Brier score curves
##' @title Plot Brier curve
#' @param x Object obtained with \code{Score}
#' @param models Choice of models to plot
#' @param which Character. Either \code{"score"} to show AUC or
#' \code{"contrasts"} to show differences between AUC.
#' @param xlim Limits for x-axis
#' @param ylim Limits for y-axis
#' @param xlab Label for x-axis
#' @param ylab Label for y-axis
#' @param col line color
#' @param lwd line width
#' @param lty line style
#' @param cex point size
#' @param pch point style
#' @param type line type
#' @param axes Logical. If \code{TRUE} draw axes.
#' @param percent Logical. If \code{TRUE} scale y-axis in percent.
#' @param conf.int Logical. If \code{TRUE} draw confidence shadows.
#' @param legend Logical. If \code{TRUE} draw legend.
#' @param ... Used for additional control of the subroutines: plot,
#' axis, lines, legend. See \code{\link{SmartControl}}.
##' @examples
##' # survival
##' library(survival)
##' library(prodlim)
##' ds1=sampleData(40,outcome="survival")
##' ds2=sampleData(40,outcome="survival")
##' f1 <- coxph(Surv(time,event)~X1+X3+X5+X7+X9,data=ds1,x=TRUE)
##' f2 <- coxph(Surv(time,event)~X2+X4+X6+X8+X10,data=ds1,x=TRUE)
##' xscore <- Score(list(f1,f2),formula=Hist(time,event)~1,data=ds2,times=0:12,metrics="brier")
##' plotBrier(xscore)
#' @export
#'
#'
plotBrier <- function(x,
models,
which="score",
xlim,
ylim,
xlab,
ylab,
col,
lwd,
lty=1,
cex=1,
pch=1,
type="l",
axes=1L,
percent=1L,
conf.int=0L,
legend=1L,
...){
times=contrast=model=se=Brier=lower=upper=delta.Brier=reference=NULL
## cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
cbbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
which <- tolower(which[1])
pframe <- switch(which,
"score"={copy(x$Brier$score)},
"ipa"={copy(x$Brier$score)},
"contrasts"={copy(x$Brier$contrasts)},
{stop("argument 'which' has to be either 'score' for Brier, 'ipa' for IPA, or 'contrasts' for differences in Brier.")})
if (length(pframe$times)<2) stop(paste("Need at least two time points for plotting time-dependent Brier. Object has only ",length(pframe$times),"times"))
if (!missing(models)) pframe <- pframe[model %in% models]
if (which=="score"){
mm <- unique(pframe$model)
if ("se"%in%names(pframe)){
pframe[is.na(se)×==0,lower:=0]
pframe[is.na(se)×==0,upper:=0]
}
}else{
if (which =="ipa"){
mm <- unique(pframe[model!="Null model",model[1]])
pframe <- pframe[model%in%mm]
if ("se"%in%names(pframe)){
pframe[is.na(se)×==0,lower:=0]
pframe[is.na(se)×==0,upper:=0]
}
}else{
pframe[,contrast:=factor(paste(model,reference,sep=" - "))]
mm <- unique(pframe$contrast)
if ("se"%in%names(pframe)){
pframe[is.na(se)×==0,lower:=0]
pframe[is.na(se)×==0,upper:=0]
}
}
}
lenmm <- length(mm)
if(missing(xlab)) xlab <- "Time"
if(missing(ylab)) ylab <- switch(which,
"score"="Brier score",
"ipa"="Index of prediction accuracy",
expression(paste(Delta, " Brier score")))
if(missing(col)) col <- rep(cbbPalette,length.out=lenmm)
names(col) <- mm
if(missing(lwd)) lwd <- 2
lwd <- rep(lwd,length.out=lenmm)
names(lwd) <- mm
pch <- rep(pch,length.out=lenmm)
names(pch) <- mm
type <- rep(type,length.out=lenmm)
names(type) <- mm
if(missing(lwd)) lty <- 1
lty <- rep(lty,length.out=lenmm)
names(lty) <- mm
if (missing(xlim)) xlim <- pframe[,range(times)]
if (missing(ylim)){
if (which%in%c("score","ipa")) {
ylim <- c(0,.3)
if (which=="ipa"){
ylim <- c(0,max(pframe$IPA,na.rm=0))
}
axis2.DefaultArgs <- list(side=2,
las=2,
at=seq(0,ylim[2],ylim[2]/4),
mgp=c(4,1,0))
} else{
ylim <- c(floor(10*min(pframe$lower))/10,ceiling(10*max(pframe$upper))/10)
yat <- seq(ylim[1],ylim[2],0.05)
## this is a strange behaviour of R: seq(-0.6,.1,0.05)
## [1] -6.000000e-01 -5.500000e-01 -5.000000e-01 -4.500000e-01 -4.000000e-01 -3.500000e-01 -3.000000e-01 -2.500000e-01
## [9] -2.000000e-01 -1.500000e-01 -1.000000e-01 -5.000000e-02 1.110223e-16 5.000000e-02 1.000000e-01
yat <- round(100*yat)/100
## axis2.DefaultArgs <- list(side=2,las=2,at=seq(ylim[1],ylim[2],abs(ylim[2]-ylim[1])/4),mgp=c(4,1,0))
axis2.DefaultArgs <- list(side=2,las=2,at=yat,mgp=c(4,1,0))
}
}else{
axis2.DefaultArgs <- list(side=2,las=2,at=seq(ylim[1],ylim[2],abs(ylim[2]-ylim[1])/4),mgp=c(4,1,0))
}
lines.DefaultArgs <- list(pch=pch,type=type,cex=cex,lwd=lwd,col=col,lty=lty)
axis1.DefaultArgs <- list(side=1,las=1,at=seq(0,xlim[2],xlim[2]/4))
if (which%in%c("score","ipa")){
legend.DefaultArgs <- list(legend=mm,lwd=lwd,col=col,lty=lty,cex=cex,bty="n",y.intersp=1.3,x="topleft")
if (which=="ipa") legend.DefaultArgs$x="bottomleft"
} else{
legend.DefaultArgs <- list(legend=as.character(unique(pframe$contrast)),lwd=lwd,col=col,lty=lty,cex=cex,bty="n",y.intersp=1.3,x="topleft")
}
plot.DefaultArgs <- list(x=0,y=0,type = "n",ylim = ylim,xlim = xlim,ylab=ylab,xlab=xlab)
control <- prodlim::SmartControl(call= list(...),
keys=c("plot","lines","legend","axis1","axis2"),
ignore=NULL,
ignore.case=TRUE,
defaults=list("plot"=plot.DefaultArgs,
"lines"=lines.DefaultArgs,
"legend"=legend.DefaultArgs,
"axis1"=axis1.DefaultArgs,
"axis2"=axis2.DefaultArgs),
forced=list("plot"=list(axes=FALSE),
"axis1"=list(side=1)),
verbose=TRUE)
if (which%in%c("score","ipa")){
## Brier
do.call("plot",control$plot)
pframe[,{thisline <- control$line
thisline$col=thisline$col[[as.character(model[1])]]
thisline$lwd=thisline$lwd[[as.character(model[1])]]
thisline$lty=thisline$lty[[as.character(model[1])]]
thisline$pch=thisline$pch[[as.character(model[1])]]
thisline$type=thisline$type[[as.character(model[1])]]
thisline$x=times
if (which =="ipa"){
thisline$y=IPA
}else{
thisline$y=Brier
}
do.call("lines",thisline)},by=model]
}else{
## delta Brier
do.call("plot",control$plot)
pframe[,{thisline <- control$line;
thisline$col=thisline$col[[as.character(contrast[1])]];
thisline$lwd=thisline$lwd[[as.character(contrast[1])]];
thisline$lty=thisline$lty[[as.character(contrast[1])]];
thisline$pch=thisline$pch[[as.character(contrast[1])]];
thisline$type=thisline$type[[as.character(contrast[1])]];
thisline$x=times;
thisline$y=delta.Brier;
do.call("lines",thisline)},by=contrast]
}
## legend
if (!(is.logical(legend[[1]]) && legend[[1]]==FALSE)){
do.call("legend",control$legend)
}
## x-axis
if (conf.int==TRUE){
dimcol <- sapply(col,function(cc){prodlim::dimColor(cc)})
names(dimcol) <- names(col)
if (which=="score"){
pframe[,polygon(x=c(times,rev(times)),y=c(lower,rev(upper)),col=dimcol[[as.character(model)]],border=NA),by=model]
}else{
pframe[,polygon(x=c(times,rev(times)),y=c(lower,rev(upper)),col=dimcol[[as.character(contrast)]],border=NA),by=contrast]
}
}
if (axes){
control$axis2$labels <- paste(100*control$axis2$at,"%")
do.call("axis",control$axis1)
do.call("axis",control$axis2)
}
invisible(pframe)
}
#----------------------------------------------------------------------
### plotBrier.R ends here
|
/R/plotBrier.R
|
no_license
|
LoSerigne/riskRegression
|
R
| false
| false
| 11,217
|
r
|
### plotBrier.R ---
#----------------------------------------------------------------------
## author: Thomas Alexander Gerds
## created: Feb 23 2017 (11:07)
## Version:
## last-updated: Dec 6 2019 (11:18)
## By: Thomas Alexander Gerds
## Update #: 73
#----------------------------------------------------------------------
##
### Commentary:
##
### Change Log:
#----------------------------------------------------------------------
##
### Code:
##' Plot Brier score curves
##' @title Plot Brier curve
#' @param x Object obtained with \code{Score}
#' @param models Choice of models to plot
#' @param which Character. Either \code{"score"} to show AUC or
#' \code{"contrasts"} to show differences between AUC.
#' @param xlim Limits for x-axis
#' @param ylim Limits for y-axis
#' @param xlab Label for x-axis
#' @param ylab Label for y-axis
#' @param col line color
#' @param lwd line width
#' @param lty line style
#' @param cex point size
#' @param pch point style
#' @param type line type
#' @param axes Logical. If \code{TRUE} draw axes.
#' @param percent Logical. If \code{TRUE} scale y-axis in percent.
#' @param conf.int Logical. If \code{TRUE} draw confidence shadows.
#' @param legend Logical. If \code{TRUE} draw legend.
#' @param ... Used for additional control of the subroutines: plot,
#' axis, lines, legend. See \code{\link{SmartControl}}.
##' @examples
##' # survival
##' library(survival)
##' library(prodlim)
##' ds1=sampleData(40,outcome="survival")
##' ds2=sampleData(40,outcome="survival")
##' f1 <- coxph(Surv(time,event)~X1+X3+X5+X7+X9,data=ds1,x=TRUE)
##' f2 <- coxph(Surv(time,event)~X2+X4+X6+X8+X10,data=ds1,x=TRUE)
##' xscore <- Score(list(f1,f2),formula=Hist(time,event)~1,data=ds2,times=0:12,metrics="brier")
##' plotBrier(xscore)
#' @export
#'
#'
plotBrier <- function(x,
models,
which="score",
xlim,
ylim,
xlab,
ylab,
col,
lwd,
lty=1,
cex=1,
pch=1,
type="l",
axes=1L,
percent=1L,
conf.int=0L,
legend=1L,
...){
times=contrast=model=se=Brier=lower=upper=delta.Brier=reference=NULL
## cbPalette <- c("#999999", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
cbbPalette <- c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7")
which <- tolower(which[1])
pframe <- switch(which,
"score"={copy(x$Brier$score)},
"ipa"={copy(x$Brier$score)},
"contrasts"={copy(x$Brier$contrasts)},
{stop("argument 'which' has to be either 'score' for Brier, 'ipa' for IPA, or 'contrasts' for differences in Brier.")})
if (length(pframe$times)<2) stop(paste("Need at least two time points for plotting time-dependent Brier. Object has only ",length(pframe$times),"times"))
if (!missing(models)) pframe <- pframe[model %in% models]
if (which=="score"){
mm <- unique(pframe$model)
if ("se"%in%names(pframe)){
pframe[is.na(se)×==0,lower:=0]
pframe[is.na(se)×==0,upper:=0]
}
}else{
if (which =="ipa"){
mm <- unique(pframe[model!="Null model",model[1]])
pframe <- pframe[model%in%mm]
if ("se"%in%names(pframe)){
pframe[is.na(se)×==0,lower:=0]
pframe[is.na(se)×==0,upper:=0]
}
}else{
pframe[,contrast:=factor(paste(model,reference,sep=" - "))]
mm <- unique(pframe$contrast)
if ("se"%in%names(pframe)){
pframe[is.na(se)×==0,lower:=0]
pframe[is.na(se)×==0,upper:=0]
}
}
}
lenmm <- length(mm)
if(missing(xlab)) xlab <- "Time"
if(missing(ylab)) ylab <- switch(which,
"score"="Brier score",
"ipa"="Index of prediction accuracy",
expression(paste(Delta, " Brier score")))
if(missing(col)) col <- rep(cbbPalette,length.out=lenmm)
names(col) <- mm
if(missing(lwd)) lwd <- 2
lwd <- rep(lwd,length.out=lenmm)
names(lwd) <- mm
pch <- rep(pch,length.out=lenmm)
names(pch) <- mm
type <- rep(type,length.out=lenmm)
names(type) <- mm
if(missing(lwd)) lty <- 1
lty <- rep(lty,length.out=lenmm)
names(lty) <- mm
if (missing(xlim)) xlim <- pframe[,range(times)]
if (missing(ylim)){
if (which%in%c("score","ipa")) {
ylim <- c(0,.3)
if (which=="ipa"){
ylim <- c(0,max(pframe$IPA,na.rm=0))
}
axis2.DefaultArgs <- list(side=2,
las=2,
at=seq(0,ylim[2],ylim[2]/4),
mgp=c(4,1,0))
} else{
ylim <- c(floor(10*min(pframe$lower))/10,ceiling(10*max(pframe$upper))/10)
yat <- seq(ylim[1],ylim[2],0.05)
## this is a strange behaviour of R: seq(-0.6,.1,0.05)
## [1] -6.000000e-01 -5.500000e-01 -5.000000e-01 -4.500000e-01 -4.000000e-01 -3.500000e-01 -3.000000e-01 -2.500000e-01
## [9] -2.000000e-01 -1.500000e-01 -1.000000e-01 -5.000000e-02 1.110223e-16 5.000000e-02 1.000000e-01
yat <- round(100*yat)/100
## axis2.DefaultArgs <- list(side=2,las=2,at=seq(ylim[1],ylim[2],abs(ylim[2]-ylim[1])/4),mgp=c(4,1,0))
axis2.DefaultArgs <- list(side=2,las=2,at=yat,mgp=c(4,1,0))
}
}else{
axis2.DefaultArgs <- list(side=2,las=2,at=seq(ylim[1],ylim[2],abs(ylim[2]-ylim[1])/4),mgp=c(4,1,0))
}
lines.DefaultArgs <- list(pch=pch,type=type,cex=cex,lwd=lwd,col=col,lty=lty)
axis1.DefaultArgs <- list(side=1,las=1,at=seq(0,xlim[2],xlim[2]/4))
if (which%in%c("score","ipa")){
legend.DefaultArgs <- list(legend=mm,lwd=lwd,col=col,lty=lty,cex=cex,bty="n",y.intersp=1.3,x="topleft")
if (which=="ipa") legend.DefaultArgs$x="bottomleft"
} else{
legend.DefaultArgs <- list(legend=as.character(unique(pframe$contrast)),lwd=lwd,col=col,lty=lty,cex=cex,bty="n",y.intersp=1.3,x="topleft")
}
plot.DefaultArgs <- list(x=0,y=0,type = "n",ylim = ylim,xlim = xlim,ylab=ylab,xlab=xlab)
control <- prodlim::SmartControl(call= list(...),
keys=c("plot","lines","legend","axis1","axis2"),
ignore=NULL,
ignore.case=TRUE,
defaults=list("plot"=plot.DefaultArgs,
"lines"=lines.DefaultArgs,
"legend"=legend.DefaultArgs,
"axis1"=axis1.DefaultArgs,
"axis2"=axis2.DefaultArgs),
forced=list("plot"=list(axes=FALSE),
"axis1"=list(side=1)),
verbose=TRUE)
if (which%in%c("score","ipa")){
## Brier
do.call("plot",control$plot)
pframe[,{thisline <- control$line
thisline$col=thisline$col[[as.character(model[1])]]
thisline$lwd=thisline$lwd[[as.character(model[1])]]
thisline$lty=thisline$lty[[as.character(model[1])]]
thisline$pch=thisline$pch[[as.character(model[1])]]
thisline$type=thisline$type[[as.character(model[1])]]
thisline$x=times
if (which =="ipa"){
thisline$y=IPA
}else{
thisline$y=Brier
}
do.call("lines",thisline)},by=model]
}else{
## delta Brier
do.call("plot",control$plot)
pframe[,{thisline <- control$line;
thisline$col=thisline$col[[as.character(contrast[1])]];
thisline$lwd=thisline$lwd[[as.character(contrast[1])]];
thisline$lty=thisline$lty[[as.character(contrast[1])]];
thisline$pch=thisline$pch[[as.character(contrast[1])]];
thisline$type=thisline$type[[as.character(contrast[1])]];
thisline$x=times;
thisline$y=delta.Brier;
do.call("lines",thisline)},by=contrast]
}
## legend
if (!(is.logical(legend[[1]]) && legend[[1]]==FALSE)){
do.call("legend",control$legend)
}
## x-axis
if (conf.int==TRUE){
dimcol <- sapply(col,function(cc){prodlim::dimColor(cc)})
names(dimcol) <- names(col)
if (which=="score"){
pframe[,polygon(x=c(times,rev(times)),y=c(lower,rev(upper)),col=dimcol[[as.character(model)]],border=NA),by=model]
}else{
pframe[,polygon(x=c(times,rev(times)),y=c(lower,rev(upper)),col=dimcol[[as.character(contrast)]],border=NA),by=contrast]
}
}
if (axes){
control$axis2$labels <- paste(100*control$axis2$at,"%")
do.call("axis",control$axis1)
do.call("axis",control$axis2)
}
invisible(pframe)
}
#----------------------------------------------------------------------
### plotBrier.R ends here
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kmdte.R
\name{kmdte}
\alias{kmdte}
\title{Kaplan-Meier Distributional Treatment Effect}
\usage{
kmdte(out, delta, treat, ysup = NULL, xpscore, b = 1000, ci = c(0.9, 0.95,
0.99), standardize = TRUE, cores = 1)
}
\arguments{
\item{out}{vector containing the outcome of interest}
\item{delta}{vector containing the censoring indicator (1 if observed, 0 if censored)}
\item{treat}{vector containing the treatment indicator (1 if treated, 0 if control)}
\item{ysup}{scalar or vector of points for which
the distributional treatment effect is computed. If NULL,
all uncensored data points available are used.}
\item{xpscore}{matrix (or data frame) containing the covariates (and their
transformations) to be included in the propensity score estimation.
Propensity score estimation is based on Logit.}
\item{b}{The number of bootstrap replicates to be performed. Default is 1,000.}
\item{ci}{A scalar or vector with values in (0,1) containing the confidence level(s)
of the required interval(s). Default is a vector with
0,90, 0.95 and 0.99}
\item{standardize}{Default is TRUE, which normalizes propensity score weights to
sum to 1 within each treatment group.
Set to FALSE to return Horvitz-Thompson weights.}
\item{cores}{number of processesors to be used during the bootstrap (default is 1).
If cores>1, the bootstrap is conducted using snow}
}
\value{
a list containing the distributional treatment effect estimate, dte,
and the bootstrapped \emph{ci} confidence
confidence interval, l.dte (lower bound), and u.dte (upper bound).
}
\description{
\emph{kmdte} computes the Distributional Treatment Effect for possibly right-censored
outcomes. The estimator relies on the unconfoundedness assumption, and on
estimating the propensity score. For details of the estimation procedure, see
Sant'Anna (2016a), 'Program Evaluation with Right-Censored Data'.
}
|
/man/kmdte.Rd
|
no_license
|
pedrohcgs/kmte
|
R
| false
| true
| 1,953
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kmdte.R
\name{kmdte}
\alias{kmdte}
\title{Kaplan-Meier Distributional Treatment Effect}
\usage{
kmdte(out, delta, treat, ysup = NULL, xpscore, b = 1000, ci = c(0.9, 0.95,
0.99), standardize = TRUE, cores = 1)
}
\arguments{
\item{out}{vector containing the outcome of interest}
\item{delta}{vector containing the censoring indicator (1 if observed, 0 if censored)}
\item{treat}{vector containing the treatment indicator (1 if treated, 0 if control)}
\item{ysup}{scalar or vector of points for which
the distributional treatment effect is computed. If NULL,
all uncensored data points available are used.}
\item{xpscore}{matrix (or data frame) containing the covariates (and their
transformations) to be included in the propensity score estimation.
Propensity score estimation is based on Logit.}
\item{b}{The number of bootstrap replicates to be performed. Default is 1,000.}
\item{ci}{A scalar or vector with values in (0,1) containing the confidence level(s)
of the required interval(s). Default is a vector with
0,90, 0.95 and 0.99}
\item{standardize}{Default is TRUE, which normalizes propensity score weights to
sum to 1 within each treatment group.
Set to FALSE to return Horvitz-Thompson weights.}
\item{cores}{number of processesors to be used during the bootstrap (default is 1).
If cores>1, the bootstrap is conducted using snow}
}
\value{
a list containing the distributional treatment effect estimate, dte,
and the bootstrapped \emph{ci} confidence
confidence interval, l.dte (lower bound), and u.dte (upper bound).
}
\description{
\emph{kmdte} computes the Distributional Treatment Effect for possibly right-censored
outcomes. The estimator relies on the unconfoundedness assumption, and on
estimating the propensity score. For details of the estimation procedure, see
Sant'Anna (2016a), 'Program Evaluation with Right-Censored Data'.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mock.R
\name{mockup}
\alias{mockup}
\alias{tmp_package}
\alias{tmp_golem}
\alias{tmp_project}
\alias{tmp_ambiorix}
\alias{tmp_delete}
\title{Mock up}
\usage{
tmp_package()
tmp_golem()
tmp_project()
tmp_ambiorix()
tmp_delete(tmp)
}
\arguments{
\item{tmp}{A temp mock up project.}
}
\description{
Functions to mock up packages for tests
}
|
/man/mockup.Rd
|
permissive
|
DivadNojnarg/packer
|
R
| false
| true
| 419
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mock.R
\name{mockup}
\alias{mockup}
\alias{tmp_package}
\alias{tmp_golem}
\alias{tmp_project}
\alias{tmp_ambiorix}
\alias{tmp_delete}
\title{Mock up}
\usage{
tmp_package()
tmp_golem()
tmp_project()
tmp_ambiorix()
tmp_delete(tmp)
}
\arguments{
\item{tmp}{A temp mock up project.}
}
\description{
Functions to mock up packages for tests
}
|
source("Functions.R")
#Image
size = 50
einstein <- readJPEG("Einstein.jpg") %>%
scale_image(size*2) %>%
spiral_cartesian(spiral_radius = size, num_coils = 50,
chord_length = 2, rotation = 0) %>%
project_image()
#Go-to color set
gg_colors <- sel_color <- c(
"#9affd0", #Aqua
"#ffb5f5", #Pink
"#5384ff", #Blue
"#ff9e53", #Orange
#"#ffed89", #Yellow
"#de89ff", #Purple
"#00436b", #RT blue
"#ff6141", #Red/Orange
"#ff25ab" #Bright pink
)
header_spiral <- c(300, 600, 900, 1200, 2000, 2900, nrow(einstein$projected_spiral)) %>%
map2_df(
sample(gg_colors, 7),
function(ii, cc){
dat <- einstein$projected_spiral %>%
filter(row_number() <= ii) %>%
mutate(spir_group = ii,
fill = cc)
return(dat)
})
ggplot(header_spiral, aes(x=x, y=y, size = grey)) +
geom_path(aes(color = fill)) +
scale_size_continuous(range = c(0.1, 1.5))+
scale_color_identity() +
coord_fixed() +
facet_grid(cols = vars(spir_group)) +
theme_void() +
theme(
strip.text = element_blank(),
legend.position = "none"
)
|
/99_BlogHeader.R
|
no_license
|
ryantimpe/SpiralDrawings
|
R
| false
| false
| 1,117
|
r
|
source("Functions.R")
#Image
size = 50
einstein <- readJPEG("Einstein.jpg") %>%
scale_image(size*2) %>%
spiral_cartesian(spiral_radius = size, num_coils = 50,
chord_length = 2, rotation = 0) %>%
project_image()
#Go-to color set
gg_colors <- sel_color <- c(
"#9affd0", #Aqua
"#ffb5f5", #Pink
"#5384ff", #Blue
"#ff9e53", #Orange
#"#ffed89", #Yellow
"#de89ff", #Purple
"#00436b", #RT blue
"#ff6141", #Red/Orange
"#ff25ab" #Bright pink
)
header_spiral <- c(300, 600, 900, 1200, 2000, 2900, nrow(einstein$projected_spiral)) %>%
map2_df(
sample(gg_colors, 7),
function(ii, cc){
dat <- einstein$projected_spiral %>%
filter(row_number() <= ii) %>%
mutate(spir_group = ii,
fill = cc)
return(dat)
})
ggplot(header_spiral, aes(x=x, y=y, size = grey)) +
geom_path(aes(color = fill)) +
scale_size_continuous(range = c(0.1, 1.5))+
scale_color_identity() +
coord_fixed() +
facet_grid(cols = vars(spir_group)) +
theme_void() +
theme(
strip.text = element_blank(),
legend.position = "none"
)
|
#This document is shared across cobalt, WeightIt, and optweight
#Strings
word_list <- function(word.list = NULL, and.or = c("and", "or"), is.are = FALSE, quotes = FALSE) {
#When given a vector of strings, creates a string of the form "a and b"
#or "a, b, and c"
#If is.are, adds "is" or "are" appropriately
L <- length(word.list)
if (quotes) {
if (as.integer(quotes) == 2) word.list <- vapply(word.list, function(x) paste0("\"", x, "\""), character(1L))
else if (as.integer(quotes) == 1) word.list <- vapply(word.list, function(x) paste0("\'", x, "\'"), character(1L))
else stop("'quotes' must be boolean, 1, or 2.")
}
if (L == 0) {
out <- ""
attr(out, "plural") = FALSE
}
else {
word.list <- word.list[!word.list %in% c(NA_character_, "")]
L <- length(word.list)
if (L == 0) {
out <- ""
attr(out, "plural") = FALSE
}
else if (L == 1) {
out <- word.list
if (is.are) out <- paste(out, "is")
attr(out, "plural") = FALSE
}
else {
and.or <- match_arg(and.or)
if (L == 2) {
out <- paste(word.list, collapse = paste0(" ", and.or," "))
}
else {
out <- paste(paste(word.list[seq_len(L-1)], collapse = ", "),
word.list[L], sep = paste0(", ", and.or," "))
}
if (is.are) out <- paste(out, "are")
attr(out, "plural") = TRUE
}
}
return(out)
}
firstup <- function(x) {
#Capitalize first letter
substr(x, 1, 1) <- toupper(substr(x, 1, 1))
x
}
expand.grid_string <- function(..., collapse = "") {
return(apply(expand.grid(...), 1, paste, collapse = collapse))
}
num_to_superscript <- function(x) {
nums <- setNames(c("\u2070",
"\u00B9",
"\u00B2",
"\u00B3",
"\u2074",
"\u2075",
"\u2076",
"\u2077",
"\u2078",
"\u2079"),
as.character(0:9))
x <- as.character(x)
splitx <- strsplit(x, "", fixed = TRUE)
supx <- sapply(splitx, function(y) paste0(nums[y], collapse = ""))
return(supx)
}
ordinal <- function(x) {
if (!is.numeric(x) || !is.vector(x) || is_null(x)) stop("'x' must be a numeric vector.")
if (length(x) > 1) return(vapply(x, ordinal, character(1L)))
else {
x0 <- abs(x)
out <- paste0(x0, switch(substring(x0, nchar(x0), nchar(x0)),
"1" = "st",
"2" = "nd",
"3" = "rd",
"th"))
if (sign(x) == -1) out <- paste0("-", out)
return(out)
}
}
round_df_char <- function(df, digits, pad = "0", na_vals = "") {
if (NROW(df) == 0 || NCOL(df) == 0) return(df)
if (!is.data.frame(df)) df <- as.data.frame.matrix(df, stringsAsFactors = FALSE)
rn <- rownames(df)
cn <- colnames(df)
infs <- o.negs <- array(FALSE, dim = dim(df))
nas <- is.na(df)
nums <- vapply(df, is.numeric, logical(1))
infs[,nums] <- vapply(which(nums), function(i) !nas[,i] & !is.finite(df[[i]]), logical(NROW(df)))
for (i in which(!nums)) {
if (can_str2num(df[[i]])) {
df[[i]] <- str2num(df[[i]])
nums[i] <- TRUE
}
}
o.negs[,nums] <- !nas[,nums] & df[nums] < 0 & round(df[nums], digits) == 0
df[nums] <- round(df[nums], digits = digits)
for (i in which(nums)) {
df[[i]] <- format(df[[i]], scientific = FALSE, justify = "none", trim = TRUE,
drop0trailing = !identical(as.character(pad), "0"))
if (!identical(as.character(pad), "0") && any(grepl(".", df[[i]], fixed = TRUE))) {
s <- strsplit(df[[i]], ".", fixed = TRUE)
lengths <- lengths(s)
digits.r.of.. <- rep(0, NROW(df))
digits.r.of..[lengths > 1] <- nchar(vapply(s[lengths > 1], `[[`, character(1L), 2))
max.dig <- max(digits.r.of..)
dots <- ifelse(lengths > 1, "", if (as.character(pad) != "") "." else pad)
pads <- vapply(max.dig - digits.r.of.., function(n) paste(rep(pad, n), collapse = ""), character(1L))
df[[i]] <- paste0(df[[i]], dots, pads)
}
}
df[o.negs] <- paste0("-", df[o.negs])
# Insert NA placeholders
df[nas] <- na_vals
df[infs] <- "N/A"
if (length(rn) > 0) rownames(df) <- rn
if (length(cn) > 0) names(df) <- cn
return(df)
}
text_box_plot <- function(range.list, width = 12) {
full.range <- range(unlist(range.list))
ratio = diff(full.range)/(width+1)
rescaled.range.list <- lapply(range.list, function(x) round(x/ratio))
rescaled.full.range <- round(full.range/ratio)
d <- make_df(c("Min", paste(rep(" ", width + 1), collapse = ""), "Max"),
names(range.list),
"character")
d[["Min"]] <- vapply(range.list, function(x) x[1], numeric(1L))
d[["Max"]] <- vapply(range.list, function(x) x[2], numeric(1L))
for (i in seq_len(nrow(d))) {
spaces1 <- rescaled.range.list[[i]][1] - rescaled.full.range[1]
#|
dashes <- max(c(0, diff(rescaled.range.list[[i]]) - 2))
#|
spaces2 <- max(c(0, diff(rescaled.full.range) - (spaces1 + 1 + dashes + 1)))
d[i, 2] <- paste0(paste(rep(" ", spaces1), collapse = ""), "|", paste(rep("-", dashes), collapse = ""), "|", paste(rep(" ", spaces2), collapse = ""))
}
return(d)
}
equivalent.factors <- function(f1, f2) {
nu1 <- nunique(f1)
nu2 <- nunique(f2)
if (nu1 == nu2) {
return(nu1 == nunique(paste.(f1, f2)))
}
else {
return(FALSE)
}
}
equivalent.factors2 <- function(f1, f2) {
return(qr(cbind(1, as.numeric(f1), as.numeric(f2)))$rank == 2)
}
paste. <- function(..., collapse = NULL) {
#Like paste0 but with sep = ".'
paste(..., sep = ".", collapse = collapse)
}
wrap <- function(s, nchar, ...) {
vapply(s, function(s_) {
x <- strwrap(s_, width = nchar, ...)
paste(x, collapse = "\n")
}, character(1L))
}
strsplits <- function(x, splits, fixed = TRUE, ...) {
#Link strsplit but takes multiple split values.
#Only works for one string at a time (in x).
for (split in splits) x <- unlist(strsplit(x, split, fixed = TRUE, ...))
return(x[x != ""]) # Remove empty values
}
c.factor <- function(..., recursive=TRUE) {
#c() for factors
unlist(list(...), recursive=recursive)
}
can_str2num <- function(x) {
nas <- is.na(x)
suppressWarnings(x_num <- as.numeric(as.character(x[!nas])))
return(!anyNA(x_num))
}
str2num <- function(x) {
nas <- is.na(x)
suppressWarnings(x_num <- as.numeric(as.character(x)))
x_num[nas] <- NA
return(x_num)
}
trim_string <- function(x, char = " ", symmetrical = TRUE, recursive = TRUE) {
sw <- startsWith(x, char)
ew <- endsWith(x, char)
if (symmetrical) {
if (any(sw & ew)) x[sw & ew] <- gsub('^.|.$', '', x[sw & ew])
else return(x)
}
else {
asw <- any(sw)
aew <- any(ew)
if (asw || aew) {
if (asw) x[sw] <- gsub('^.', '', x[sw])
if (aew) x[ew] <- gsub('.$', '', x[ew])
}
else return(x)
}
if (recursive) {
trim_string(x, char, symmetrical, recursive)
}
else return(x)
}
#Numbers
check_if_zero <- function(x) {
# this is the default tolerance used in all.equal
tolerance <- .Machine$double.eps^0.5
abs(x) < tolerance
}
between <- function(x, range, inclusive = TRUE, na.action = FALSE) {
if (!all(is.numeric(x))) stop("'x' must be a numeric vector.", call. = FALSE)
if (length(range) != 2) stop("'range' must be of length 2.", call. = FALSE)
if (anyNA(range) || !is.numeric(range)) stop("'range' must contain numeric entries only.", call. = FALSE)
if (range[2] < range[1]) range <- c(range[2], range[1])
if (anyNA(x)) {
if (length(na.action) != 1 || !is.atomic(na.action)) stop("'na.action' must be an atomic vector of length 1.", call. = FALSE)
}
if (inclusive) out <- ifelse(is.na(x), na.action, x >= range[1] & x <= range[2])
else out <- ifelse(is.na(x), na.action, x > range[1] & x < range[2])
return(out)
}
max_ <- function(..., na.rm = TRUE) {
if (!any(is.finite(unlist(list(...))))) NA_real_
else max(..., na.rm = na.rm)
}
min_ <- function(..., na.rm = TRUE) {
if (!any(is.finite(unlist(list(...))))) NA_real_
else min(..., na.rm = na.rm)
}
check_if_int <- function(x) {
#Checks if integer-like
if (is.integer(x)) rep(TRUE, length(x))
else if (is.numeric(x)) check_if_zero(x - round(x))
else rep(FALSE, length(x))
}
#Statistics
binarize <- function(variable, zero = NULL, one = NULL) {
if (!is_binary(variable)) stop(paste0("Cannot binarize ", deparse1(substitute(variable)), ": more than two levels."))
if (is.character(variable) || is.factor(variable)) {
variable <- factor(variable, nmax = 2)
unique.vals <- levels(variable)
}
else {
unique.vals <- unique(variable, nmax = 2)
}
if (is_null(zero)) {
if (is_null(one)) {
if (can_str2num(unique.vals)) {
variable.numeric <- str2num(variable)
}
else {
variable.numeric <- as.numeric(variable)
}
if (0 %in% variable.numeric) zero <- 0
else zero <- min(variable.numeric, na.rm = TRUE)
return(setNames(as.integer(variable.numeric != zero), names(variable)))
}
else {
if (one %in% unique.vals) return(setNames(as.integer(variable == one), names(variable)))
else stop("The argument to 'one' is not the name of a level of variable.", call. = FALSE)
}
}
else {
if (zero %in% unique.vals) return(setNames(as.integer(variable != zero), names(variable)))
else stop("The argument to 'zero' is not the name of a level of variable.", call. = FALSE)
}
}
ESS <- function(w) {
sum(w)^2/sum(w^2)
}
center <- function(x, at = NULL, na.rm = TRUE) {
if (is.data.frame(x)) {
x <- as.matrix.data.frame(x)
type <- "df"
}
if (!is.numeric(x)) stop("'x' must be numeric.")
else if (is.array(x) && length(dim(x)) > 2) stop("'x' must be a numeric or matrix-like (not array).")
else if (!is.matrix(x)) {
x <- matrix(x, ncol = 1)
type <- "vec"
}
else type <- "matrix"
if (is_null(at)) at <- colMeans(x, na.rm = na.rm)
else if (length(at) %nin% c(1, ncol(x))) stop("'at' is not the right length.")
out <- x - matrix(at, byrow = TRUE, ncol = ncol(x), nrow = nrow(x))
if (type == "df") out <- as.data.frame.matrix(out)
else if (type == "vec") out <- drop(out)
return(out)
}
w.m <- function(x, w = NULL, na.rm = TRUE) {
if (is_null(w)) w <- rep(1, length(x))
if (anyNA(x)) w[is.na(x)] <- NA
return(sum(x*w, na.rm=na.rm)/sum(w, na.rm=na.rm))
}
col.w.m <- function(mat, w = NULL, na.rm = TRUE) {
if (is_null(w)) w <- 1
w.sum <- colSums(w*!is.na(mat))
return(colSums(mat*w, na.rm = na.rm)/w.sum)
}
col.w.v <- function(mat, w = NULL, bin.vars = NULL, na.rm = TRUE) {
if (!is.matrix(mat)) {
if (is.data.frame(mat)) {
if (any(vapply(mat, is_, logical(1L), types = c("factor", "character")))) {
stop("'mat' must be a numeric matrix.")
}
else mat <- data.matrix(mat)
}
else if (is.numeric(mat)) {
mat <- matrix(mat, ncol = 1)
}
else stop("'mat' must be a numeric matrix.")
}
if (is_null(bin.vars)) bin.vars <- rep(FALSE, ncol(mat))
else if (length(bin.vars) != ncol(mat) || anyNA(as.logical(bin.vars))) {
stop("'bin.vars' must be a logical vector with length equal to the number of columns of 'mat'.", call. = FALSE)
}
bin.var.present <- any(bin.vars)
non.bin.vars.present <- any(!bin.vars)
var <- setNames(numeric(ncol(mat)), colnames(mat))
if (is_null(w)) {
if (non.bin.vars.present) {
den <- colSums(!is.na(mat[, !bin.vars, drop = FALSE])) - 1
var[!bin.vars] <- colSums(center(mat[, !bin.vars, drop = FALSE])^2, na.rm = na.rm)/den
}
if (bin.var.present) {
means <- colMeans(mat[, bin.vars, drop = FALSE], na.rm = na.rm)
var[bin.vars] <- means * (1 - means)
}
}
else if (na.rm && anyNA(mat)) {
# n <- nrow(mat)
w <- array(w, dim = dim(mat))
w[is.na(mat)] <- NA
s <- colSums(w, na.rm = na.rm)
w <- mat_div(w, s)
if (non.bin.vars.present) {
x <- sqrt(w[, !bin.vars, drop = FALSE]) * center(mat[, !bin.vars, drop = FALSE],
at = colSums(w[, !bin.vars, drop = FALSE] * mat[, !bin.vars, drop = FALSE], na.rm = na.rm))
var[!bin.vars] <- colSums(x*x, na.rm = na.rm)/(1 - colSums(w[, !bin.vars, drop = FALSE]^2, na.rm = na.rm))
}
if (bin.var.present) {
means <- colSums(w[, bin.vars, drop = FALSE] * mat[, bin.vars, drop = FALSE], na.rm = na.rm)
var[bin.vars] <- means * (1 - means)
}
}
else {
if (is_null(w)) w <- rep(1, nrow(mat))
w <- w/sum(w)
if (non.bin.vars.present) {
x <- sqrt(w) * center(mat[, !bin.vars, drop = FALSE],
at = colSums(w * mat[, !bin.vars, drop = FALSE], na.rm = na.rm))
var[!bin.vars] <- colSums(x*x, na.rm = na.rm)/(1 - sum(w^2))
}
if (bin.var.present) {
means <- colSums(w * mat[, bin.vars, drop = FALSE], na.rm = na.rm)
var[bin.vars] <- means * (1 - means)
}
}
return(var)
}
col.w.cov <- function(mat, y, w = NULL, na.rm = TRUE) {
if (!is.matrix(mat)) {
if (is_null(w)) return(cov(mat, y, use = if (na.rm) "pair" else "everything"))
else mat <- matrix(mat, ncol = 1)
}
if (is_null(w)) {
y <- array(y, dim = dim(mat))
if (anyNA(mat)) y[is.na(mat)] <- NA
if (anyNA(y)) mat[is.na(y)] <- NA
den <- colSums(!is.na(mat*y)) - 1
cov <- colSums(center(mat, na.rm = na.rm)*center(y, na.rm = na.rm), na.rm = na.rm)/den
}
else if (na.rm && anyNA(mat)) {
n <- nrow(mat)
w <- array(w, dim = dim(mat))
w[is.na(mat)] <- NA_real_
s <- colSums(w, na.rm = na.rm)
w <- mat_div(w, s)
x <- w * center(mat, at = colSums(w * mat, na.rm = na.rm))
cov <- colSums(x*y, na.rm = na.rm)/(1 - colSums(w^2, na.rm = na.rm))
}
else {
n <- nrow(mat)
w <- w/sum(w)
x <- w * center(mat, at = colSums(w * mat, na.rm = na.rm))
cov <- colSums(x*y, na.rm = na.rm)/(1 - sum(w^2))
}
return(cov)
}
col.w.r <- function(mat, y, w = NULL, s.weights = NULL, bin.vars = NULL, na.rm = TRUE) {
if (is_null(w) && is_null(s.weights)) return(cor(mat, y, w, use = if (na.rm) "pair" else "everything"))
else {
cov <- col.w.cov(mat, y = y, w = w, na.rm = na.rm)
den <- sqrt(col.w.v(mat, w = s.weights, bin.vars = bin.vars, na.rm = na.rm)) *
sqrt(col.w.v(y, w = s.weights, na.rm = na.rm))
return(cov/den)
}
}
coef.of.var <- function(x, pop = TRUE) {
if (pop) sqrt(mean_fast((x-mean_fast(x, TRUE))^2, TRUE))/mean_fast(x, TRUE)
else sd(x)/mean_fast(x, TRUE)
}
mean.abs.dev <- function(x) {
mean_fast(abs(x - mean_fast(x, TRUE)), TRUE)
}
rms <- function(x) {
sqrt(mean_fast(x^2))
}
geom.mean <- function(y) {
exp(mean_fast(log(y[is.finite(log(y))]), TRUE))
}
mat_div <- function(mat, vec) {
mat/vec[col(mat)]
}
abs_ <- function(x, ratio = FALSE) {
if (ratio) pmax(x, 1/x)
else (abs(x))
}
mean_fast <- function(x, nas.possible = FALSE) {
#Equal to mean(x, na.rm = TRUE) but faster
#Set no.nas = FALSE if it's possible there are NAs
if (nas.possible && anyNA(x)) {
s <- sum(x, na.rm = TRUE)
n <- sum(!is.na(x))
return(s/n)
}
s <- sum(x)
n <- length(x)
return(s/n)
}
bw.nrd <- function(x) {
#R's bw.nrd doesn't always work, but bw.nrd0 does
bw.nrd0(x)*1.06/.9
}
#Formulas
subbars <- function(term) {
if (is.name(term) || !is.language(term))
return(term)
if (length(term) == 2) {
term[[2]] <- subbars(term[[2]])
return(term)
}
if (is.call(term) && (term[[1]] == as.name("|") || term[[1]] == as.name("||"))) {
term[[1]] <- as.name("+")
}
for (j in 2:length(term)) term[[j]] <- subbars(term[[j]])
return(term)
}
#treat/covs
get.covs.and.treat.from.formula <- function(f, data = NULL, terms = FALSE, sep = "", ...) {
A <- list(...)
#Check if data exists
if (is_not_null(data)) {
if (is.data.frame(data)) {
data.specified <- TRUE
}
else {
warning("The argument supplied to data is not a data.frame object. This may causes errors or unexpected results.", call. = FALSE)
data <- environment(f)
data.specified <- FALSE
}
}
else {
data <- environment(f)
data.specified <- FALSE
}
env <- environment(f)
if (!is.formula(f)) stop("'f' must be a formula.")
eval.model.matrx <- identical(f, f <- subbars(f))
tryCatch(tt <- terms(f, data = data),
error = function(e) {
if (conditionMessage(e) == "'.' in formula and no 'data' argument") {
stop("'.' is not allowed in formulas.", call. = FALSE)
}
else stop(conditionMessage(e), call. = FALSE)
})
#Check if response exists
if (is.formula(tt, 2)) {
resp.vars.mentioned <- as.character(tt)[2]
resp.vars.failed <- vapply(resp.vars.mentioned, function(v) {
test <- tryCatch(eval(parse(text=v), data, env), error = function(e) e)
if (inherits(test, "simpleError")) {
if (conditionMessage(test) == paste0("object '", v, "' not found")) return(TRUE)
else stop(test)
}
else if (is_null(test)) return(TRUE)
else return(FALSE)
}, logical(1L))
if (any(resp.vars.failed)) {
if (is_null(A[["treat"]])) stop(paste0("The given response variable, \"", as.character(tt)[2], "\", is not a variable in ", word_list(c("data", "the global environment")[c(data.specified, TRUE)], "or"), "."), call. = FALSE)
tt <- delete.response(tt)
}
}
else resp.vars.failed <- TRUE
if (any(!resp.vars.failed)) {
treat.name <- resp.vars.mentioned[!resp.vars.failed][1]
treat <- eval(parse(text=treat.name)[[1]], data, env)
}
else {
treat <- A[["treat"]]
treat.name <- NULL
}
#Check if RHS variables exist
tt.covs <- delete.response(tt)
rhs.vars.mentioned.lang <- attr(tt.covs, "variables")[-1]
rhs.vars.mentioned <- vapply(rhs.vars.mentioned.lang, deparse1, character(1L))
rhs.vars.failed <- vapply(rhs.vars.mentioned, function(v) {
test <- tryCatch(eval(parse(text=v), data, env), error = function(e) e)
if (inherits(test, "simpleError")) {
if (conditionMessage(test) == paste0("object '", v, "' not found")) return(TRUE)
else stop(test)
}
else if (is_null(test)) return(TRUE)
else return(FALSE)
}, logical(1L))
if (any(rhs.vars.failed)) {
stop(paste0(c("All variables in 'formula' must be variables in 'data' or objects in the global environment.\nMissing variables: ",
paste(rhs.vars.mentioned[rhs.vars.failed], collapse=", "))), call. = FALSE)
}
rhs.term.labels <- attr(tt.covs, "term.labels")
rhs.term.orders <- attr(tt.covs, "order")
rhs.df <- setNames(vapply(rhs.vars.mentioned, function(v) {
is_(try(eval(parse(text=v)[[1]], data, env), silent = TRUE),
c("data.frame", "matrix", "rms"))
}, logical(1L)), rhs.vars.mentioned)
rhs.term.labels.list <- setNames(as.list(rhs.term.labels), rhs.term.labels)
if (any(rhs.df)) {
if (any(rhs.vars.mentioned[rhs.df] %in% unlist(lapply(rhs.term.labels[rhs.term.orders > 1], function(x) strsplit(x, ":", fixed = TRUE))))) {
stop("Interactions with data.frames are not allowed in the input formula.", call. = FALSE)
}
addl.dfs <- setNames(lapply(rhs.vars.mentioned[rhs.df], function(x) {
df <- eval(parse(text=x)[[1]], data, env)
if (is_(df, "rms")) {
if (length(dim(df)) == 2L) class(df) <- "matrix"
df <- setNames(as.data.frame(as.matrix(df)), attr(df, "colnames"))
}
else if (can_str2num(colnames(df))) colnames(df) <- paste(x, colnames(df), sep = sep)
return(as.data.frame(df))
}),
rhs.vars.mentioned[rhs.df])
for (i in rhs.term.labels[rhs.term.labels %in% rhs.vars.mentioned[rhs.df]]) {
ind <- which(rhs.term.labels == i)
rhs.term.labels <- append(rhs.term.labels[-ind],
values = names(addl.dfs[[i]]),
after = ind - 1)
rhs.term.labels.list[[i]] <- names(addl.dfs[[i]])
}
if (data.specified) data <- do.call("cbind", unname(c(addl.dfs, list(data))))
else data <- do.call("cbind", unname(addl.dfs))
}
if (is_null(rhs.term.labels)) {
new.form <- as.formula("~ 1")
tt.covs <- terms(new.form)
covs <- data.frame(Intercept = rep(1, if (is_null(treat)) 1 else length(treat)))
if (is_not_null(treat.name) && treat.name == "Intercept") {
names(covs) <- "Intercept_"
}
}
else {
new.form.char <- paste("~", paste(vapply(names(rhs.term.labels.list), function(x) {
if (x %in% rhs.vars.mentioned[rhs.df]) paste0("`", rhs.term.labels.list[[x]], "`", collapse = " + ")
else rhs.term.labels.list[[x]]
# try.form <- try(as.formula(paste("~", x)), silent = TRUE)
# if (null_or_error(try.form) || (grepl("^", x, fixed = TRUE) && !startsWith(x, "I("))) {
# paste0("`", x, "`")
# }
# else x
} , character(1L)), collapse = " + "))
new.form <- as.formula(new.form.char)
tt.covs <- terms(new.form)
attr(tt.covs, "intercept") <- 0
#Get model.frame, report error
mf.covs <- quote(stats::model.frame(tt.covs, data,
drop.unused.levels = TRUE,
na.action = "na.pass"))
tryCatch({covs <- eval(mf.covs)},
error = function(e) {stop(conditionMessage(e), call. = FALSE)})
if (is_not_null(treat.name) && treat.name %in% names(covs)) stop("The variable on the left side of the formula appears on the right side too.", call. = FALSE)
}
if (eval.model.matrx) {
if (s <- !identical(sep, "")) {
if (!is.character(sep) || length(sep) > 1) stop("'sep' must be a string of length 1.", call. = FALSE)
original.covs.levels <- make_list(names(covs))
for (i in names(covs)) {
if (is.character(covs[[i]])) covs[[i]] <- factor(covs[[i]])
if (is.factor(covs[[i]])) {
original.covs.levels[[i]] <- levels(covs[[i]])
levels(covs[[i]]) <- paste0(sep, original.covs.levels[[i]])
}
}
}
#Get full model matrix with interactions too
covs.matrix <- model.matrix(tt.covs, data = covs,
contrasts.arg = lapply(Filter(is.factor, covs),
contrasts, contrasts=FALSE))
if (s) {
for (i in names(covs)) {
if (is.factor(covs[[i]])) {
levels(covs[[i]]) <- original.covs.levels[[i]]
}
}
}
}
else {
covs.matrix <- NULL
}
if (!terms) attr(covs, "terms") <- NULL
return(list(reported.covs = covs,
model.covs = covs.matrix,
treat = treat,
treat.name = treat.name))
}
assign.treat.type <- function(treat, use.multi = FALSE) {
#Returns treat with treat.type attribute
nunique.treat <- nunique(treat)
if (nunique.treat < 2) {
stop("The treatment must have at least two unique values.", call. = FALSE)
}
else if (!use.multi && nunique.treat == 2) {
treat.type <- "binary"
}
else if (use.multi || is_(treat, c("factor", "character"))) {
treat.type <- "multinomial"
if (!is_(treat, "processed.treat")) treat <- factor(treat)
}
else {
treat.type <- "continuous"
}
attr(treat, "treat.type") <- treat.type
return(treat)
}
get.treat.type <- function(treat) {
return(attr(treat, "treat.type"))
}
has.treat.type <- function(treat) {
is_not_null(get.treat.type(treat))
}
#Input processing
process.bin.vars <- function(bin.vars, mat) {
if (missing(bin.vars)) bin.vars <- is_binary_col(mat)
else if (is_null(bin.vars)) bin.vars <- rep(FALSE, ncol(mat))
else {
if (is.logical(bin.vars)) {
bin.vars[is.na(bin.vars)] <- FALSE
if (length(bin.vars) != ncol(mat)) stop("If 'bin.vars' is logical, it must have length equal to the number of columns of 'mat'.")
}
else if (is.numeric(bin.vars)) {
bin.vars <- bin.vars[!is.na(bin.vars) & bin.vars != 0]
if (any(bin.vars < 0) && any(bin.vars > 0)) stop("Positive and negative indices cannot be mixed with 'bin.vars'.")
if (any(abs(bin.vars) > ncol(mat))) stop("If 'bin.vars' is numeric, none of its values can exceed the number of columns of 'mat'.")
logical.bin.vars <- rep(any(bin.vars < 0), ncol(mat))
logical.bin.vars[abs(bin.vars)] <- !logical.bin.vars[abs(bin.vars)]
bin.vars <- logical.bin.vars
}
else if (is.character(bin.vars)) {
bin.vars <- bin.vars[!is.na(bin.vars) & bin.vars != ""]
if (is_null(colnames(mat))) stop("If 'bin.vars' is character, 'mat' must have column names.")
if (any(bin.vars %nin% colnames(mat))) stop("If 'bin.vars' is character, all its values must be column names of 'mat'.")
bin.vars <- colnames(mat) %in% bin.vars
}
else stop("'bin.vars' must be a logical, numeric, or character vector.")
}
return(bin.vars)
}
process.s.weights <- function(s.weights, data = NULL) {
#Process s.weights
if (is_not_null(s.weights)) {
if (!(is.character(s.weights) && length(s.weights) == 1) && !is.numeric(s.weights)) {
stop("The argument to 's.weights' must be a vector or data frame of sampling weights or the (quoted) names of the variable in 'data' that contains sampling weights.", call. = FALSE)
}
if (is.character(s.weights) && length(s.weights)==1) {
if (is_null(data)) {
stop("'s.weights' was specified as a string but there was no argument to 'data'.", call. = FALSE)
}
else if (s.weights %in% names(data)) {
s.weights <- data[[s.weights]]
}
else stop("The name supplied to 's.weights' is not the name of a variable in 'data'.", call. = FALSE)
}
}
else s.weights <- NULL
return(s.weights)
}
#Uniqueness
nunique <- function(x, nmax = NA, na.rm = TRUE) {
if (is_null(x)) return(0)
else {
if (na.rm && anyNA(x)) x <- na.rem(x)
if (is.factor(x)) return(nlevels(x))
else return(length(unique(x, nmax = nmax)))
}
}
nunique.gt <- function(x, n, na.rm = TRUE) {
if (missing(n)) stop("'n' must be supplied.")
if (n < 0) stop("'n' must be non-negative.")
if (is_null(x)) FALSE
else {
if (n == 1) !all_the_same(x, na.rm)
else if (length(x) < 2000) nunique(x, na.rm = na.rm) > n
else tryCatch(nunique(x, nmax = n, na.rm = na.rm) > n, error = function(e) TRUE)
}
}
all_the_same <- function(x, na.rm = TRUE) {
if (anyNA(x)) {
x <- na.rem(x)
if (!na.rm) return(is_null(x))
}
if (is.double(x)) check_if_zero(max(x) - min(x))
else all(x == x[1])
}
is_binary <- function(x, na.rm = TRUE) {
if (na.rm && anyNA(x)) x <- na.rem(x)
!all_the_same(x) && all_the_same(x[x != x[1]])
}
is_binary_col <- function(dat, na.rm = TRUE) {
if (length(dim(dat)) != 2) stop("is_binary_col cannot be used with objects that don't have 2 dimensions.")
apply(dat, 2, is_binary)
}
#R Processing
make_list <- function(n) {
if (length(n) == 1L && is.numeric(n)) {
vector("list", as.integer(n))
}
else if (is_(n, "atomic")) {
setNames(vector("list", length(n)), as.character(n))
}
else stop("'n' must be an integer(ish) scalar or an atomic variable.")
}
make_df <- function(ncol, nrow = 0, types = "numeric") {
if (length(ncol) == 1L && is.numeric(ncol)) {
col_names <- NULL
ncol <- as.integer(ncol)
}
else if (is_(ncol, "atomic")) {
col_names <- as.character(ncol)
ncol <- length(ncol)
}
if (length(nrow) == 1L && is.numeric(nrow)) {
row_names <- NULL
nrow <- as.integer(nrow)
}
else if (is_(nrow, "atomic")) {
row_names <- as.character(nrow)
nrow <- length(nrow)
}
df <- as.data.frame.matrix(matrix(NA_real_, nrow = nrow, ncol = ncol))
colnames(df) <- col_names
rownames(df) <- row_names
if (is_not_null(types)) {
if (length(types) %nin% c(1, ncol)) stop("'types' must be equal to the number of columns.")
if (any(types %nin% c("numeric", "integer", "logical", "character", NA))) {
stop("'types' must be an acceptable type. For factors, use NA.")
}
if (length(types) == 1) types <- rep(types, ncol)
for (i in seq_len(ncol)) if (!is.na(types)[i] && types[i] != "numeric") df[[i]] <- get(types[i])(nrow)
}
return(df)
}
ifelse_ <- function(...) {
dotlen <- ...length()
if (dotlen %% 2 == 0) stop("ifelse_ must have an odd number of arguments: pairs of test/yes, and one no.")
out <- ...elt(dotlen)
if (dotlen > 1) {
if (!is_(out, "atomic")) stop("The last entry to ifelse_ must be atomic.")
if (length(out) == 1) out <- rep(out, length(..1))
n <- length(out)
for (i in seq_len((dotlen - 1)/2)) {
test <- ...elt(2*i - 1)
yes <- ...elt(2*i)
if (length(yes) == 1) yes <- rep(yes, n)
if (length(yes) != n || length(test) != n) stop("All entries must have the same length.")
if (!is.logical(test)) stop(paste("The", ordinal(2*i - 1), "entry to ifelse_ must be logical."))
if (!is_(yes, "atomic")) stop(paste("The", ordinal(2*i), "entry to ifelse_ must be atomic."))
pos <- which(test)
out[pos] <- yes[pos]
}
}
else {
if (!is_(out, "atomic")) stop("The first entry to ifelse_ must be atomic.")
}
return(out)
}
is_ <- function(x, types, stop = FALSE, arg.to = FALSE) {
s1 <- deparse1(substitute(x))
if (is_not_null(x)) {
for (i in types) {
if (i == "list") it.is <- is.list(x) && !is.data.frame(x)
else if (is_not_null(get0(paste0("is_", i)))) {
it.is <- get0(paste0("is_", i))(x)
}
else if (is_not_null(get0(paste.("is", i)))) {
it.is <- get0(paste.("is", i))(x)
}
else it.is <- inherits(x, i)
if (it.is) break
}
}
else it.is <- FALSE
if (stop) {
if (!it.is) {
s0 <- ifelse(arg.to, "The argument to ", "")
s2 <- ifelse(any(types %in% c("factor", "character", "numeric", "logical")),
"vector", "")
stop(paste0(s0, "'", s1, "' must be a ", word_list(types, and.or = "or"), " ", s2, "."), call. = FALSE)
}
}
return(it.is)
}
is_null <- function(x) length(x) == 0L
is_not_null <- function(x) !is_null(x)
if_null_then <- function(x1 = NULL, x2 = NULL, ...) {
if (is_not_null(x1)) x1
else if (is_not_null(x2)) x2
else if (...length() > 0) {
for (k in seq_len(...length())) {
if (is_not_null(...elt(k))) return(...elt(k))
}
return(..1)
}
else return(x1)
}
clear_null <- function(x) {
x[vapply(x, is_null, logical(1L))] <- NULL
return(x)
}
clear_attr <- function(x, all = FALSE) {
if (all) {
attributes(x) <- NULL
}
else {
dont_clear <- c("names", "class", "dim", "dimnames", "row.names")
attributes(x)[names(attributes(x)) %nin% dont_clear] <- NULL
}
return(x)
}
probably.a.bug <- function() {
fun <- paste(deparse1(sys.call(-1)), collapse = "\n")
stop(paste0("An error was produced and is likely a bug. Please let the maintainer know a bug was produced by the function\n",
fun), call. = FALSE)
}
`%nin%` <- function(x, table) is.na(match(x, table, nomatch = NA_integer_))
`%pin%` <- function(x, table) {
#Partial in. TRUE if x uniquely identifies values in table.
!is.na(pmatch(x, table))
}
`%cin%` <- function(x, table) {
#Partial in w/ charmatch. TRUE if x at all in table.
!is.na(charmatch(x, table))
}
null_or_error <- function(x) {is_null(x) || any(class(x) == "try-error")}
match_arg <- function(arg, choices, several.ok = FALSE) {
#Replaces match.arg() but gives cleaner error message and processing
#of arg.
if (missing(arg))
stop("No argument was supplied to match_arg.", call. = FALSE)
arg.name <- deparse1(substitute(arg))
if (missing(choices)) {
formal.args <- formals(sys.function(sysP <- sys.parent()))
choices <- eval(formal.args[[as.character(substitute(arg))]],
envir = sys.frame(sysP))
}
if (is.null(arg))
return(choices[1L])
else if (!is.character(arg))
stop(paste0("The argument to '", arg.name, "' must be NULL or a character vector"), call. = FALSE)
if (!several.ok) {
if (identical(arg, choices))
return(arg[1L])
if (length(arg) > 1L)
stop(paste0("The argument to '", arg.name, "' must be of length 1"), call. = FALSE)
}
else if (is_null(arg))
stop(paste0("The argument to '", arg.name, "' must be of length >= 1"), call. = FALSE)
i <- pmatch(arg, choices, nomatch = 0L, duplicates.ok = TRUE)
if (all(i == 0L))
stop(paste0("The argument to '", arg.name, "' should be ", if (length(choices) > 1) {if (several.ok) "at least one of " else "one of "} else "",
word_list(choices, and.or = "or", quotes = 2), "."),
call. = FALSE)
i <- i[i > 0L]
if (!several.ok && length(i) > 1)
stop("There is more than one match in 'match_arg'")
choices[i]
}
last <- function(x) {
x[[length(x)]]
}
`last<-` <- function(x, value) {
`[[<-`(x, length(x), value)
}
len <- function(x, recursive = TRUE) {
if (is_null(x)) 0L
else if (length(dim(x)) > 1) NROW(x)
else if (is.list(x) && recursive) vapply(x, len, numeric(1L), recursive = FALSE)
else length(x)
}
na.rem <- function(x) {
#A faster na.omit for vectors
x[!is.na(x)]
}
anyNA_col <- function(x) {
colSums(is.na(x)) > 0
}
check.package <- function(package.name, alternative = FALSE) {
packages.not.installed <- package.name[!vapply(package.name, requireNamespace, logical(1L),
quietly = TRUE)]
if (is_not_null(packages.not.installed)) {
if (alternative) return(FALSE)
else {
plural <- length(packages.not.installed) > 1
stop(paste0("Package", if (plural) "s " else " ",
word_list(packages.not.installed, quotes = 1, is.are = TRUE),
" needed for this function to work. Please install ",
if (plural) "them" else "it","."),
call. = FALSE)
}
}
else return(invisible(TRUE))
}
check_if_call_from_fun <- function(fun) {
# Check if called from within function f
if (missing(fun) || !exists(deparse1(substitute(fun)), mode = "function")) return(FALSE)
sp <- sys.parents()
sys.funs <- lapply(sp, sys.function)
for (x in sys.funs) {
if (identical(fun, x)) return(TRUE)
}
FALSE
}
#Not used cobalt; replaced with rlang
is.formula <- function(f, sides = NULL) {
#Replaced by rlang::is_formula
res <- inherits(f, "formula") && is.name(f[[1]]) && deparse1(f[[1]]) %in% c( '~', '!') &&
length(f) >= 2
if (is_not_null(sides) && is.numeric(sides) && sides %in% c(1,2)) {
res <- res && length(f) == sides + 1
}
return(res)
}
if (getRversion() < 3.6) str2expression <- function(text) parse(text=text, keep.source=FALSE)
|
/R/SHARED.R
|
no_license
|
Zoe187419/cobalt
|
R
| false
| false
| 37,771
|
r
|
#This document is shared across cobalt, WeightIt, and optweight
#Strings
word_list <- function(word.list = NULL, and.or = c("and", "or"), is.are = FALSE, quotes = FALSE) {
#When given a vector of strings, creates a string of the form "a and b"
#or "a, b, and c"
#If is.are, adds "is" or "are" appropriately
L <- length(word.list)
if (quotes) {
if (as.integer(quotes) == 2) word.list <- vapply(word.list, function(x) paste0("\"", x, "\""), character(1L))
else if (as.integer(quotes) == 1) word.list <- vapply(word.list, function(x) paste0("\'", x, "\'"), character(1L))
else stop("'quotes' must be boolean, 1, or 2.")
}
if (L == 0) {
out <- ""
attr(out, "plural") = FALSE
}
else {
word.list <- word.list[!word.list %in% c(NA_character_, "")]
L <- length(word.list)
if (L == 0) {
out <- ""
attr(out, "plural") = FALSE
}
else if (L == 1) {
out <- word.list
if (is.are) out <- paste(out, "is")
attr(out, "plural") = FALSE
}
else {
and.or <- match_arg(and.or)
if (L == 2) {
out <- paste(word.list, collapse = paste0(" ", and.or," "))
}
else {
out <- paste(paste(word.list[seq_len(L-1)], collapse = ", "),
word.list[L], sep = paste0(", ", and.or," "))
}
if (is.are) out <- paste(out, "are")
attr(out, "plural") = TRUE
}
}
return(out)
}
firstup <- function(x) {
#Capitalize first letter
substr(x, 1, 1) <- toupper(substr(x, 1, 1))
x
}
expand.grid_string <- function(..., collapse = "") {
return(apply(expand.grid(...), 1, paste, collapse = collapse))
}
num_to_superscript <- function(x) {
nums <- setNames(c("\u2070",
"\u00B9",
"\u00B2",
"\u00B3",
"\u2074",
"\u2075",
"\u2076",
"\u2077",
"\u2078",
"\u2079"),
as.character(0:9))
x <- as.character(x)
splitx <- strsplit(x, "", fixed = TRUE)
supx <- sapply(splitx, function(y) paste0(nums[y], collapse = ""))
return(supx)
}
ordinal <- function(x) {
if (!is.numeric(x) || !is.vector(x) || is_null(x)) stop("'x' must be a numeric vector.")
if (length(x) > 1) return(vapply(x, ordinal, character(1L)))
else {
x0 <- abs(x)
out <- paste0(x0, switch(substring(x0, nchar(x0), nchar(x0)),
"1" = "st",
"2" = "nd",
"3" = "rd",
"th"))
if (sign(x) == -1) out <- paste0("-", out)
return(out)
}
}
round_df_char <- function(df, digits, pad = "0", na_vals = "") {
if (NROW(df) == 0 || NCOL(df) == 0) return(df)
if (!is.data.frame(df)) df <- as.data.frame.matrix(df, stringsAsFactors = FALSE)
rn <- rownames(df)
cn <- colnames(df)
infs <- o.negs <- array(FALSE, dim = dim(df))
nas <- is.na(df)
nums <- vapply(df, is.numeric, logical(1))
infs[,nums] <- vapply(which(nums), function(i) !nas[,i] & !is.finite(df[[i]]), logical(NROW(df)))
for (i in which(!nums)) {
if (can_str2num(df[[i]])) {
df[[i]] <- str2num(df[[i]])
nums[i] <- TRUE
}
}
o.negs[,nums] <- !nas[,nums] & df[nums] < 0 & round(df[nums], digits) == 0
df[nums] <- round(df[nums], digits = digits)
for (i in which(nums)) {
df[[i]] <- format(df[[i]], scientific = FALSE, justify = "none", trim = TRUE,
drop0trailing = !identical(as.character(pad), "0"))
if (!identical(as.character(pad), "0") && any(grepl(".", df[[i]], fixed = TRUE))) {
s <- strsplit(df[[i]], ".", fixed = TRUE)
lengths <- lengths(s)
digits.r.of.. <- rep(0, NROW(df))
digits.r.of..[lengths > 1] <- nchar(vapply(s[lengths > 1], `[[`, character(1L), 2))
max.dig <- max(digits.r.of..)
dots <- ifelse(lengths > 1, "", if (as.character(pad) != "") "." else pad)
pads <- vapply(max.dig - digits.r.of.., function(n) paste(rep(pad, n), collapse = ""), character(1L))
df[[i]] <- paste0(df[[i]], dots, pads)
}
}
df[o.negs] <- paste0("-", df[o.negs])
# Insert NA placeholders
df[nas] <- na_vals
df[infs] <- "N/A"
if (length(rn) > 0) rownames(df) <- rn
if (length(cn) > 0) names(df) <- cn
return(df)
}
text_box_plot <- function(range.list, width = 12) {
full.range <- range(unlist(range.list))
ratio = diff(full.range)/(width+1)
rescaled.range.list <- lapply(range.list, function(x) round(x/ratio))
rescaled.full.range <- round(full.range/ratio)
d <- make_df(c("Min", paste(rep(" ", width + 1), collapse = ""), "Max"),
names(range.list),
"character")
d[["Min"]] <- vapply(range.list, function(x) x[1], numeric(1L))
d[["Max"]] <- vapply(range.list, function(x) x[2], numeric(1L))
for (i in seq_len(nrow(d))) {
spaces1 <- rescaled.range.list[[i]][1] - rescaled.full.range[1]
#|
dashes <- max(c(0, diff(rescaled.range.list[[i]]) - 2))
#|
spaces2 <- max(c(0, diff(rescaled.full.range) - (spaces1 + 1 + dashes + 1)))
d[i, 2] <- paste0(paste(rep(" ", spaces1), collapse = ""), "|", paste(rep("-", dashes), collapse = ""), "|", paste(rep(" ", spaces2), collapse = ""))
}
return(d)
}
equivalent.factors <- function(f1, f2) {
nu1 <- nunique(f1)
nu2 <- nunique(f2)
if (nu1 == nu2) {
return(nu1 == nunique(paste.(f1, f2)))
}
else {
return(FALSE)
}
}
equivalent.factors2 <- function(f1, f2) {
return(qr(cbind(1, as.numeric(f1), as.numeric(f2)))$rank == 2)
}
paste. <- function(..., collapse = NULL) {
#Like paste0 but with sep = ".'
paste(..., sep = ".", collapse = collapse)
}
wrap <- function(s, nchar, ...) {
vapply(s, function(s_) {
x <- strwrap(s_, width = nchar, ...)
paste(x, collapse = "\n")
}, character(1L))
}
strsplits <- function(x, splits, fixed = TRUE, ...) {
#Link strsplit but takes multiple split values.
#Only works for one string at a time (in x).
for (split in splits) x <- unlist(strsplit(x, split, fixed = TRUE, ...))
return(x[x != ""]) # Remove empty values
}
c.factor <- function(..., recursive=TRUE) {
#c() for factors
unlist(list(...), recursive=recursive)
}
can_str2num <- function(x) {
nas <- is.na(x)
suppressWarnings(x_num <- as.numeric(as.character(x[!nas])))
return(!anyNA(x_num))
}
str2num <- function(x) {
nas <- is.na(x)
suppressWarnings(x_num <- as.numeric(as.character(x)))
x_num[nas] <- NA
return(x_num)
}
trim_string <- function(x, char = " ", symmetrical = TRUE, recursive = TRUE) {
sw <- startsWith(x, char)
ew <- endsWith(x, char)
if (symmetrical) {
if (any(sw & ew)) x[sw & ew] <- gsub('^.|.$', '', x[sw & ew])
else return(x)
}
else {
asw <- any(sw)
aew <- any(ew)
if (asw || aew) {
if (asw) x[sw] <- gsub('^.', '', x[sw])
if (aew) x[ew] <- gsub('.$', '', x[ew])
}
else return(x)
}
if (recursive) {
trim_string(x, char, symmetrical, recursive)
}
else return(x)
}
#Numbers
check_if_zero <- function(x) {
# this is the default tolerance used in all.equal
tolerance <- .Machine$double.eps^0.5
abs(x) < tolerance
}
between <- function(x, range, inclusive = TRUE, na.action = FALSE) {
if (!all(is.numeric(x))) stop("'x' must be a numeric vector.", call. = FALSE)
if (length(range) != 2) stop("'range' must be of length 2.", call. = FALSE)
if (anyNA(range) || !is.numeric(range)) stop("'range' must contain numeric entries only.", call. = FALSE)
if (range[2] < range[1]) range <- c(range[2], range[1])
if (anyNA(x)) {
if (length(na.action) != 1 || !is.atomic(na.action)) stop("'na.action' must be an atomic vector of length 1.", call. = FALSE)
}
if (inclusive) out <- ifelse(is.na(x), na.action, x >= range[1] & x <= range[2])
else out <- ifelse(is.na(x), na.action, x > range[1] & x < range[2])
return(out)
}
max_ <- function(..., na.rm = TRUE) {
if (!any(is.finite(unlist(list(...))))) NA_real_
else max(..., na.rm = na.rm)
}
min_ <- function(..., na.rm = TRUE) {
if (!any(is.finite(unlist(list(...))))) NA_real_
else min(..., na.rm = na.rm)
}
check_if_int <- function(x) {
#Checks if integer-like
if (is.integer(x)) rep(TRUE, length(x))
else if (is.numeric(x)) check_if_zero(x - round(x))
else rep(FALSE, length(x))
}
#Statistics
binarize <- function(variable, zero = NULL, one = NULL) {
if (!is_binary(variable)) stop(paste0("Cannot binarize ", deparse1(substitute(variable)), ": more than two levels."))
if (is.character(variable) || is.factor(variable)) {
variable <- factor(variable, nmax = 2)
unique.vals <- levels(variable)
}
else {
unique.vals <- unique(variable, nmax = 2)
}
if (is_null(zero)) {
if (is_null(one)) {
if (can_str2num(unique.vals)) {
variable.numeric <- str2num(variable)
}
else {
variable.numeric <- as.numeric(variable)
}
if (0 %in% variable.numeric) zero <- 0
else zero <- min(variable.numeric, na.rm = TRUE)
return(setNames(as.integer(variable.numeric != zero), names(variable)))
}
else {
if (one %in% unique.vals) return(setNames(as.integer(variable == one), names(variable)))
else stop("The argument to 'one' is not the name of a level of variable.", call. = FALSE)
}
}
else {
if (zero %in% unique.vals) return(setNames(as.integer(variable != zero), names(variable)))
else stop("The argument to 'zero' is not the name of a level of variable.", call. = FALSE)
}
}
ESS <- function(w) {
sum(w)^2/sum(w^2)
}
center <- function(x, at = NULL, na.rm = TRUE) {
if (is.data.frame(x)) {
x <- as.matrix.data.frame(x)
type <- "df"
}
if (!is.numeric(x)) stop("'x' must be numeric.")
else if (is.array(x) && length(dim(x)) > 2) stop("'x' must be a numeric or matrix-like (not array).")
else if (!is.matrix(x)) {
x <- matrix(x, ncol = 1)
type <- "vec"
}
else type <- "matrix"
if (is_null(at)) at <- colMeans(x, na.rm = na.rm)
else if (length(at) %nin% c(1, ncol(x))) stop("'at' is not the right length.")
out <- x - matrix(at, byrow = TRUE, ncol = ncol(x), nrow = nrow(x))
if (type == "df") out <- as.data.frame.matrix(out)
else if (type == "vec") out <- drop(out)
return(out)
}
w.m <- function(x, w = NULL, na.rm = TRUE) {
if (is_null(w)) w <- rep(1, length(x))
if (anyNA(x)) w[is.na(x)] <- NA
return(sum(x*w, na.rm=na.rm)/sum(w, na.rm=na.rm))
}
col.w.m <- function(mat, w = NULL, na.rm = TRUE) {
if (is_null(w)) w <- 1
w.sum <- colSums(w*!is.na(mat))
return(colSums(mat*w, na.rm = na.rm)/w.sum)
}
col.w.v <- function(mat, w = NULL, bin.vars = NULL, na.rm = TRUE) {
if (!is.matrix(mat)) {
if (is.data.frame(mat)) {
if (any(vapply(mat, is_, logical(1L), types = c("factor", "character")))) {
stop("'mat' must be a numeric matrix.")
}
else mat <- data.matrix(mat)
}
else if (is.numeric(mat)) {
mat <- matrix(mat, ncol = 1)
}
else stop("'mat' must be a numeric matrix.")
}
if (is_null(bin.vars)) bin.vars <- rep(FALSE, ncol(mat))
else if (length(bin.vars) != ncol(mat) || anyNA(as.logical(bin.vars))) {
stop("'bin.vars' must be a logical vector with length equal to the number of columns of 'mat'.", call. = FALSE)
}
bin.var.present <- any(bin.vars)
non.bin.vars.present <- any(!bin.vars)
var <- setNames(numeric(ncol(mat)), colnames(mat))
if (is_null(w)) {
if (non.bin.vars.present) {
den <- colSums(!is.na(mat[, !bin.vars, drop = FALSE])) - 1
var[!bin.vars] <- colSums(center(mat[, !bin.vars, drop = FALSE])^2, na.rm = na.rm)/den
}
if (bin.var.present) {
means <- colMeans(mat[, bin.vars, drop = FALSE], na.rm = na.rm)
var[bin.vars] <- means * (1 - means)
}
}
else if (na.rm && anyNA(mat)) {
# n <- nrow(mat)
w <- array(w, dim = dim(mat))
w[is.na(mat)] <- NA
s <- colSums(w, na.rm = na.rm)
w <- mat_div(w, s)
if (non.bin.vars.present) {
x <- sqrt(w[, !bin.vars, drop = FALSE]) * center(mat[, !bin.vars, drop = FALSE],
at = colSums(w[, !bin.vars, drop = FALSE] * mat[, !bin.vars, drop = FALSE], na.rm = na.rm))
var[!bin.vars] <- colSums(x*x, na.rm = na.rm)/(1 - colSums(w[, !bin.vars, drop = FALSE]^2, na.rm = na.rm))
}
if (bin.var.present) {
means <- colSums(w[, bin.vars, drop = FALSE] * mat[, bin.vars, drop = FALSE], na.rm = na.rm)
var[bin.vars] <- means * (1 - means)
}
}
else {
if (is_null(w)) w <- rep(1, nrow(mat))
w <- w/sum(w)
if (non.bin.vars.present) {
x <- sqrt(w) * center(mat[, !bin.vars, drop = FALSE],
at = colSums(w * mat[, !bin.vars, drop = FALSE], na.rm = na.rm))
var[!bin.vars] <- colSums(x*x, na.rm = na.rm)/(1 - sum(w^2))
}
if (bin.var.present) {
means <- colSums(w * mat[, bin.vars, drop = FALSE], na.rm = na.rm)
var[bin.vars] <- means * (1 - means)
}
}
return(var)
}
col.w.cov <- function(mat, y, w = NULL, na.rm = TRUE) {
if (!is.matrix(mat)) {
if (is_null(w)) return(cov(mat, y, use = if (na.rm) "pair" else "everything"))
else mat <- matrix(mat, ncol = 1)
}
if (is_null(w)) {
y <- array(y, dim = dim(mat))
if (anyNA(mat)) y[is.na(mat)] <- NA
if (anyNA(y)) mat[is.na(y)] <- NA
den <- colSums(!is.na(mat*y)) - 1
cov <- colSums(center(mat, na.rm = na.rm)*center(y, na.rm = na.rm), na.rm = na.rm)/den
}
else if (na.rm && anyNA(mat)) {
n <- nrow(mat)
w <- array(w, dim = dim(mat))
w[is.na(mat)] <- NA_real_
s <- colSums(w, na.rm = na.rm)
w <- mat_div(w, s)
x <- w * center(mat, at = colSums(w * mat, na.rm = na.rm))
cov <- colSums(x*y, na.rm = na.rm)/(1 - colSums(w^2, na.rm = na.rm))
}
else {
n <- nrow(mat)
w <- w/sum(w)
x <- w * center(mat, at = colSums(w * mat, na.rm = na.rm))
cov <- colSums(x*y, na.rm = na.rm)/(1 - sum(w^2))
}
return(cov)
}
col.w.r <- function(mat, y, w = NULL, s.weights = NULL, bin.vars = NULL, na.rm = TRUE) {
if (is_null(w) && is_null(s.weights)) return(cor(mat, y, w, use = if (na.rm) "pair" else "everything"))
else {
cov <- col.w.cov(mat, y = y, w = w, na.rm = na.rm)
den <- sqrt(col.w.v(mat, w = s.weights, bin.vars = bin.vars, na.rm = na.rm)) *
sqrt(col.w.v(y, w = s.weights, na.rm = na.rm))
return(cov/den)
}
}
coef.of.var <- function(x, pop = TRUE) {
if (pop) sqrt(mean_fast((x-mean_fast(x, TRUE))^2, TRUE))/mean_fast(x, TRUE)
else sd(x)/mean_fast(x, TRUE)
}
mean.abs.dev <- function(x) {
mean_fast(abs(x - mean_fast(x, TRUE)), TRUE)
}
rms <- function(x) {
sqrt(mean_fast(x^2))
}
geom.mean <- function(y) {
exp(mean_fast(log(y[is.finite(log(y))]), TRUE))
}
mat_div <- function(mat, vec) {
mat/vec[col(mat)]
}
abs_ <- function(x, ratio = FALSE) {
if (ratio) pmax(x, 1/x)
else (abs(x))
}
mean_fast <- function(x, nas.possible = FALSE) {
#Equal to mean(x, na.rm = TRUE) but faster
#Set no.nas = FALSE if it's possible there are NAs
if (nas.possible && anyNA(x)) {
s <- sum(x, na.rm = TRUE)
n <- sum(!is.na(x))
return(s/n)
}
s <- sum(x)
n <- length(x)
return(s/n)
}
bw.nrd <- function(x) {
#R's bw.nrd doesn't always work, but bw.nrd0 does
bw.nrd0(x)*1.06/.9
}
#Formulas
subbars <- function(term) {
if (is.name(term) || !is.language(term))
return(term)
if (length(term) == 2) {
term[[2]] <- subbars(term[[2]])
return(term)
}
if (is.call(term) && (term[[1]] == as.name("|") || term[[1]] == as.name("||"))) {
term[[1]] <- as.name("+")
}
for (j in 2:length(term)) term[[j]] <- subbars(term[[j]])
return(term)
}
#treat/covs
get.covs.and.treat.from.formula <- function(f, data = NULL, terms = FALSE, sep = "", ...) {
A <- list(...)
#Check if data exists
if (is_not_null(data)) {
if (is.data.frame(data)) {
data.specified <- TRUE
}
else {
warning("The argument supplied to data is not a data.frame object. This may causes errors or unexpected results.", call. = FALSE)
data <- environment(f)
data.specified <- FALSE
}
}
else {
data <- environment(f)
data.specified <- FALSE
}
env <- environment(f)
if (!is.formula(f)) stop("'f' must be a formula.")
eval.model.matrx <- identical(f, f <- subbars(f))
tryCatch(tt <- terms(f, data = data),
error = function(e) {
if (conditionMessage(e) == "'.' in formula and no 'data' argument") {
stop("'.' is not allowed in formulas.", call. = FALSE)
}
else stop(conditionMessage(e), call. = FALSE)
})
#Check if response exists
if (is.formula(tt, 2)) {
resp.vars.mentioned <- as.character(tt)[2]
resp.vars.failed <- vapply(resp.vars.mentioned, function(v) {
test <- tryCatch(eval(parse(text=v), data, env), error = function(e) e)
if (inherits(test, "simpleError")) {
if (conditionMessage(test) == paste0("object '", v, "' not found")) return(TRUE)
else stop(test)
}
else if (is_null(test)) return(TRUE)
else return(FALSE)
}, logical(1L))
if (any(resp.vars.failed)) {
if (is_null(A[["treat"]])) stop(paste0("The given response variable, \"", as.character(tt)[2], "\", is not a variable in ", word_list(c("data", "the global environment")[c(data.specified, TRUE)], "or"), "."), call. = FALSE)
tt <- delete.response(tt)
}
}
else resp.vars.failed <- TRUE
if (any(!resp.vars.failed)) {
treat.name <- resp.vars.mentioned[!resp.vars.failed][1]
treat <- eval(parse(text=treat.name)[[1]], data, env)
}
else {
treat <- A[["treat"]]
treat.name <- NULL
}
#Check if RHS variables exist
tt.covs <- delete.response(tt)
rhs.vars.mentioned.lang <- attr(tt.covs, "variables")[-1]
rhs.vars.mentioned <- vapply(rhs.vars.mentioned.lang, deparse1, character(1L))
rhs.vars.failed <- vapply(rhs.vars.mentioned, function(v) {
test <- tryCatch(eval(parse(text=v), data, env), error = function(e) e)
if (inherits(test, "simpleError")) {
if (conditionMessage(test) == paste0("object '", v, "' not found")) return(TRUE)
else stop(test)
}
else if (is_null(test)) return(TRUE)
else return(FALSE)
}, logical(1L))
if (any(rhs.vars.failed)) {
stop(paste0(c("All variables in 'formula' must be variables in 'data' or objects in the global environment.\nMissing variables: ",
paste(rhs.vars.mentioned[rhs.vars.failed], collapse=", "))), call. = FALSE)
}
rhs.term.labels <- attr(tt.covs, "term.labels")
rhs.term.orders <- attr(tt.covs, "order")
rhs.df <- setNames(vapply(rhs.vars.mentioned, function(v) {
is_(try(eval(parse(text=v)[[1]], data, env), silent = TRUE),
c("data.frame", "matrix", "rms"))
}, logical(1L)), rhs.vars.mentioned)
rhs.term.labels.list <- setNames(as.list(rhs.term.labels), rhs.term.labels)
if (any(rhs.df)) {
if (any(rhs.vars.mentioned[rhs.df] %in% unlist(lapply(rhs.term.labels[rhs.term.orders > 1], function(x) strsplit(x, ":", fixed = TRUE))))) {
stop("Interactions with data.frames are not allowed in the input formula.", call. = FALSE)
}
addl.dfs <- setNames(lapply(rhs.vars.mentioned[rhs.df], function(x) {
df <- eval(parse(text=x)[[1]], data, env)
if (is_(df, "rms")) {
if (length(dim(df)) == 2L) class(df) <- "matrix"
df <- setNames(as.data.frame(as.matrix(df)), attr(df, "colnames"))
}
else if (can_str2num(colnames(df))) colnames(df) <- paste(x, colnames(df), sep = sep)
return(as.data.frame(df))
}),
rhs.vars.mentioned[rhs.df])
for (i in rhs.term.labels[rhs.term.labels %in% rhs.vars.mentioned[rhs.df]]) {
ind <- which(rhs.term.labels == i)
rhs.term.labels <- append(rhs.term.labels[-ind],
values = names(addl.dfs[[i]]),
after = ind - 1)
rhs.term.labels.list[[i]] <- names(addl.dfs[[i]])
}
if (data.specified) data <- do.call("cbind", unname(c(addl.dfs, list(data))))
else data <- do.call("cbind", unname(addl.dfs))
}
if (is_null(rhs.term.labels)) {
new.form <- as.formula("~ 1")
tt.covs <- terms(new.form)
covs <- data.frame(Intercept = rep(1, if (is_null(treat)) 1 else length(treat)))
if (is_not_null(treat.name) && treat.name == "Intercept") {
names(covs) <- "Intercept_"
}
}
else {
new.form.char <- paste("~", paste(vapply(names(rhs.term.labels.list), function(x) {
if (x %in% rhs.vars.mentioned[rhs.df]) paste0("`", rhs.term.labels.list[[x]], "`", collapse = " + ")
else rhs.term.labels.list[[x]]
# try.form <- try(as.formula(paste("~", x)), silent = TRUE)
# if (null_or_error(try.form) || (grepl("^", x, fixed = TRUE) && !startsWith(x, "I("))) {
# paste0("`", x, "`")
# }
# else x
} , character(1L)), collapse = " + "))
new.form <- as.formula(new.form.char)
tt.covs <- terms(new.form)
attr(tt.covs, "intercept") <- 0
#Get model.frame, report error
mf.covs <- quote(stats::model.frame(tt.covs, data,
drop.unused.levels = TRUE,
na.action = "na.pass"))
tryCatch({covs <- eval(mf.covs)},
error = function(e) {stop(conditionMessage(e), call. = FALSE)})
if (is_not_null(treat.name) && treat.name %in% names(covs)) stop("The variable on the left side of the formula appears on the right side too.", call. = FALSE)
}
if (eval.model.matrx) {
if (s <- !identical(sep, "")) {
if (!is.character(sep) || length(sep) > 1) stop("'sep' must be a string of length 1.", call. = FALSE)
original.covs.levels <- make_list(names(covs))
for (i in names(covs)) {
if (is.character(covs[[i]])) covs[[i]] <- factor(covs[[i]])
if (is.factor(covs[[i]])) {
original.covs.levels[[i]] <- levels(covs[[i]])
levels(covs[[i]]) <- paste0(sep, original.covs.levels[[i]])
}
}
}
#Get full model matrix with interactions too
covs.matrix <- model.matrix(tt.covs, data = covs,
contrasts.arg = lapply(Filter(is.factor, covs),
contrasts, contrasts=FALSE))
if (s) {
for (i in names(covs)) {
if (is.factor(covs[[i]])) {
levels(covs[[i]]) <- original.covs.levels[[i]]
}
}
}
}
else {
covs.matrix <- NULL
}
if (!terms) attr(covs, "terms") <- NULL
return(list(reported.covs = covs,
model.covs = covs.matrix,
treat = treat,
treat.name = treat.name))
}
assign.treat.type <- function(treat, use.multi = FALSE) {
#Returns treat with treat.type attribute
nunique.treat <- nunique(treat)
if (nunique.treat < 2) {
stop("The treatment must have at least two unique values.", call. = FALSE)
}
else if (!use.multi && nunique.treat == 2) {
treat.type <- "binary"
}
else if (use.multi || is_(treat, c("factor", "character"))) {
treat.type <- "multinomial"
if (!is_(treat, "processed.treat")) treat <- factor(treat)
}
else {
treat.type <- "continuous"
}
attr(treat, "treat.type") <- treat.type
return(treat)
}
get.treat.type <- function(treat) {
return(attr(treat, "treat.type"))
}
has.treat.type <- function(treat) {
is_not_null(get.treat.type(treat))
}
#Input processing
process.bin.vars <- function(bin.vars, mat) {
if (missing(bin.vars)) bin.vars <- is_binary_col(mat)
else if (is_null(bin.vars)) bin.vars <- rep(FALSE, ncol(mat))
else {
if (is.logical(bin.vars)) {
bin.vars[is.na(bin.vars)] <- FALSE
if (length(bin.vars) != ncol(mat)) stop("If 'bin.vars' is logical, it must have length equal to the number of columns of 'mat'.")
}
else if (is.numeric(bin.vars)) {
bin.vars <- bin.vars[!is.na(bin.vars) & bin.vars != 0]
if (any(bin.vars < 0) && any(bin.vars > 0)) stop("Positive and negative indices cannot be mixed with 'bin.vars'.")
if (any(abs(bin.vars) > ncol(mat))) stop("If 'bin.vars' is numeric, none of its values can exceed the number of columns of 'mat'.")
logical.bin.vars <- rep(any(bin.vars < 0), ncol(mat))
logical.bin.vars[abs(bin.vars)] <- !logical.bin.vars[abs(bin.vars)]
bin.vars <- logical.bin.vars
}
else if (is.character(bin.vars)) {
bin.vars <- bin.vars[!is.na(bin.vars) & bin.vars != ""]
if (is_null(colnames(mat))) stop("If 'bin.vars' is character, 'mat' must have column names.")
if (any(bin.vars %nin% colnames(mat))) stop("If 'bin.vars' is character, all its values must be column names of 'mat'.")
bin.vars <- colnames(mat) %in% bin.vars
}
else stop("'bin.vars' must be a logical, numeric, or character vector.")
}
return(bin.vars)
}
process.s.weights <- function(s.weights, data = NULL) {
#Process s.weights
if (is_not_null(s.weights)) {
if (!(is.character(s.weights) && length(s.weights) == 1) && !is.numeric(s.weights)) {
stop("The argument to 's.weights' must be a vector or data frame of sampling weights or the (quoted) names of the variable in 'data' that contains sampling weights.", call. = FALSE)
}
if (is.character(s.weights) && length(s.weights)==1) {
if (is_null(data)) {
stop("'s.weights' was specified as a string but there was no argument to 'data'.", call. = FALSE)
}
else if (s.weights %in% names(data)) {
s.weights <- data[[s.weights]]
}
else stop("The name supplied to 's.weights' is not the name of a variable in 'data'.", call. = FALSE)
}
}
else s.weights <- NULL
return(s.weights)
}
#Uniqueness
nunique <- function(x, nmax = NA, na.rm = TRUE) {
if (is_null(x)) return(0)
else {
if (na.rm && anyNA(x)) x <- na.rem(x)
if (is.factor(x)) return(nlevels(x))
else return(length(unique(x, nmax = nmax)))
}
}
nunique.gt <- function(x, n, na.rm = TRUE) {
if (missing(n)) stop("'n' must be supplied.")
if (n < 0) stop("'n' must be non-negative.")
if (is_null(x)) FALSE
else {
if (n == 1) !all_the_same(x, na.rm)
else if (length(x) < 2000) nunique(x, na.rm = na.rm) > n
else tryCatch(nunique(x, nmax = n, na.rm = na.rm) > n, error = function(e) TRUE)
}
}
all_the_same <- function(x, na.rm = TRUE) {
if (anyNA(x)) {
x <- na.rem(x)
if (!na.rm) return(is_null(x))
}
if (is.double(x)) check_if_zero(max(x) - min(x))
else all(x == x[1])
}
is_binary <- function(x, na.rm = TRUE) {
if (na.rm && anyNA(x)) x <- na.rem(x)
!all_the_same(x) && all_the_same(x[x != x[1]])
}
is_binary_col <- function(dat, na.rm = TRUE) {
if (length(dim(dat)) != 2) stop("is_binary_col cannot be used with objects that don't have 2 dimensions.")
apply(dat, 2, is_binary)
}
#R Processing
make_list <- function(n) {
if (length(n) == 1L && is.numeric(n)) {
vector("list", as.integer(n))
}
else if (is_(n, "atomic")) {
setNames(vector("list", length(n)), as.character(n))
}
else stop("'n' must be an integer(ish) scalar or an atomic variable.")
}
make_df <- function(ncol, nrow = 0, types = "numeric") {
if (length(ncol) == 1L && is.numeric(ncol)) {
col_names <- NULL
ncol <- as.integer(ncol)
}
else if (is_(ncol, "atomic")) {
col_names <- as.character(ncol)
ncol <- length(ncol)
}
if (length(nrow) == 1L && is.numeric(nrow)) {
row_names <- NULL
nrow <- as.integer(nrow)
}
else if (is_(nrow, "atomic")) {
row_names <- as.character(nrow)
nrow <- length(nrow)
}
df <- as.data.frame.matrix(matrix(NA_real_, nrow = nrow, ncol = ncol))
colnames(df) <- col_names
rownames(df) <- row_names
if (is_not_null(types)) {
if (length(types) %nin% c(1, ncol)) stop("'types' must be equal to the number of columns.")
if (any(types %nin% c("numeric", "integer", "logical", "character", NA))) {
stop("'types' must be an acceptable type. For factors, use NA.")
}
if (length(types) == 1) types <- rep(types, ncol)
for (i in seq_len(ncol)) if (!is.na(types)[i] && types[i] != "numeric") df[[i]] <- get(types[i])(nrow)
}
return(df)
}
ifelse_ <- function(...) {
dotlen <- ...length()
if (dotlen %% 2 == 0) stop("ifelse_ must have an odd number of arguments: pairs of test/yes, and one no.")
out <- ...elt(dotlen)
if (dotlen > 1) {
if (!is_(out, "atomic")) stop("The last entry to ifelse_ must be atomic.")
if (length(out) == 1) out <- rep(out, length(..1))
n <- length(out)
for (i in seq_len((dotlen - 1)/2)) {
test <- ...elt(2*i - 1)
yes <- ...elt(2*i)
if (length(yes) == 1) yes <- rep(yes, n)
if (length(yes) != n || length(test) != n) stop("All entries must have the same length.")
if (!is.logical(test)) stop(paste("The", ordinal(2*i - 1), "entry to ifelse_ must be logical."))
if (!is_(yes, "atomic")) stop(paste("The", ordinal(2*i), "entry to ifelse_ must be atomic."))
pos <- which(test)
out[pos] <- yes[pos]
}
}
else {
if (!is_(out, "atomic")) stop("The first entry to ifelse_ must be atomic.")
}
return(out)
}
is_ <- function(x, types, stop = FALSE, arg.to = FALSE) {
s1 <- deparse1(substitute(x))
if (is_not_null(x)) {
for (i in types) {
if (i == "list") it.is <- is.list(x) && !is.data.frame(x)
else if (is_not_null(get0(paste0("is_", i)))) {
it.is <- get0(paste0("is_", i))(x)
}
else if (is_not_null(get0(paste.("is", i)))) {
it.is <- get0(paste.("is", i))(x)
}
else it.is <- inherits(x, i)
if (it.is) break
}
}
else it.is <- FALSE
if (stop) {
if (!it.is) {
s0 <- ifelse(arg.to, "The argument to ", "")
s2 <- ifelse(any(types %in% c("factor", "character", "numeric", "logical")),
"vector", "")
stop(paste0(s0, "'", s1, "' must be a ", word_list(types, and.or = "or"), " ", s2, "."), call. = FALSE)
}
}
return(it.is)
}
is_null <- function(x) length(x) == 0L
is_not_null <- function(x) !is_null(x)
if_null_then <- function(x1 = NULL, x2 = NULL, ...) {
if (is_not_null(x1)) x1
else if (is_not_null(x2)) x2
else if (...length() > 0) {
for (k in seq_len(...length())) {
if (is_not_null(...elt(k))) return(...elt(k))
}
return(..1)
}
else return(x1)
}
clear_null <- function(x) {
x[vapply(x, is_null, logical(1L))] <- NULL
return(x)
}
clear_attr <- function(x, all = FALSE) {
if (all) {
attributes(x) <- NULL
}
else {
dont_clear <- c("names", "class", "dim", "dimnames", "row.names")
attributes(x)[names(attributes(x)) %nin% dont_clear] <- NULL
}
return(x)
}
probably.a.bug <- function() {
fun <- paste(deparse1(sys.call(-1)), collapse = "\n")
stop(paste0("An error was produced and is likely a bug. Please let the maintainer know a bug was produced by the function\n",
fun), call. = FALSE)
}
`%nin%` <- function(x, table) is.na(match(x, table, nomatch = NA_integer_))
`%pin%` <- function(x, table) {
#Partial in. TRUE if x uniquely identifies values in table.
!is.na(pmatch(x, table))
}
`%cin%` <- function(x, table) {
#Partial in w/ charmatch. TRUE if x at all in table.
!is.na(charmatch(x, table))
}
null_or_error <- function(x) {is_null(x) || any(class(x) == "try-error")}
match_arg <- function(arg, choices, several.ok = FALSE) {
#Replaces match.arg() but gives cleaner error message and processing
#of arg.
if (missing(arg))
stop("No argument was supplied to match_arg.", call. = FALSE)
arg.name <- deparse1(substitute(arg))
if (missing(choices)) {
formal.args <- formals(sys.function(sysP <- sys.parent()))
choices <- eval(formal.args[[as.character(substitute(arg))]],
envir = sys.frame(sysP))
}
if (is.null(arg))
return(choices[1L])
else if (!is.character(arg))
stop(paste0("The argument to '", arg.name, "' must be NULL or a character vector"), call. = FALSE)
if (!several.ok) {
if (identical(arg, choices))
return(arg[1L])
if (length(arg) > 1L)
stop(paste0("The argument to '", arg.name, "' must be of length 1"), call. = FALSE)
}
else if (is_null(arg))
stop(paste0("The argument to '", arg.name, "' must be of length >= 1"), call. = FALSE)
i <- pmatch(arg, choices, nomatch = 0L, duplicates.ok = TRUE)
if (all(i == 0L))
stop(paste0("The argument to '", arg.name, "' should be ", if (length(choices) > 1) {if (several.ok) "at least one of " else "one of "} else "",
word_list(choices, and.or = "or", quotes = 2), "."),
call. = FALSE)
i <- i[i > 0L]
if (!several.ok && length(i) > 1)
stop("There is more than one match in 'match_arg'")
choices[i]
}
last <- function(x) {
x[[length(x)]]
}
`last<-` <- function(x, value) {
`[[<-`(x, length(x), value)
}
len <- function(x, recursive = TRUE) {
if (is_null(x)) 0L
else if (length(dim(x)) > 1) NROW(x)
else if (is.list(x) && recursive) vapply(x, len, numeric(1L), recursive = FALSE)
else length(x)
}
na.rem <- function(x) {
#A faster na.omit for vectors
x[!is.na(x)]
}
anyNA_col <- function(x) {
colSums(is.na(x)) > 0
}
check.package <- function(package.name, alternative = FALSE) {
packages.not.installed <- package.name[!vapply(package.name, requireNamespace, logical(1L),
quietly = TRUE)]
if (is_not_null(packages.not.installed)) {
if (alternative) return(FALSE)
else {
plural <- length(packages.not.installed) > 1
stop(paste0("Package", if (plural) "s " else " ",
word_list(packages.not.installed, quotes = 1, is.are = TRUE),
" needed for this function to work. Please install ",
if (plural) "them" else "it","."),
call. = FALSE)
}
}
else return(invisible(TRUE))
}
check_if_call_from_fun <- function(fun) {
# Check if called from within function f
if (missing(fun) || !exists(deparse1(substitute(fun)), mode = "function")) return(FALSE)
sp <- sys.parents()
sys.funs <- lapply(sp, sys.function)
for (x in sys.funs) {
if (identical(fun, x)) return(TRUE)
}
FALSE
}
#Not used cobalt; replaced with rlang
is.formula <- function(f, sides = NULL) {
#Replaced by rlang::is_formula
res <- inherits(f, "formula") && is.name(f[[1]]) && deparse1(f[[1]]) %in% c( '~', '!') &&
length(f) >= 2
if (is_not_null(sides) && is.numeric(sides) && sides %in% c(1,2)) {
res <- res && length(f) == sides + 1
}
return(res)
}
if (getRversion() < 3.6) str2expression <- function(text) parse(text=text, keep.source=FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ses_operations.R
\name{ses_create_configuration_set_tracking_options}
\alias{ses_create_configuration_set_tracking_options}
\title{Creates an association between a configuration set and a custom domain
for open and click event tracking}
\usage{
ses_create_configuration_set_tracking_options(ConfigurationSetName,
TrackingOptions)
}
\arguments{
\item{ConfigurationSetName}{[required] The name of the configuration set that the tracking options should be
associated with.}
\item{TrackingOptions}{[required]}
}
\description{
Creates an association between a configuration set and a custom domain
for open and click event tracking.
By default, images and links used for tracking open and click events are
hosted on domains operated by Amazon SES. You can configure a subdomain
of your own to handle these events. For information about using custom
domains, see the \href{https://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html}{Amazon SES Developer Guide}.
}
\section{Request syntax}{
\preformatted{svc$create_configuration_set_tracking_options(
ConfigurationSetName = "string",
TrackingOptions = list(
CustomRedirectDomain = "string"
)
)
}
}
\keyword{internal}
|
/paws/man/ses_create_configuration_set_tracking_options.Rd
|
permissive
|
sanchezvivi/paws
|
R
| false
| true
| 1,290
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ses_operations.R
\name{ses_create_configuration_set_tracking_options}
\alias{ses_create_configuration_set_tracking_options}
\title{Creates an association between a configuration set and a custom domain
for open and click event tracking}
\usage{
ses_create_configuration_set_tracking_options(ConfigurationSetName,
TrackingOptions)
}
\arguments{
\item{ConfigurationSetName}{[required] The name of the configuration set that the tracking options should be
associated with.}
\item{TrackingOptions}{[required]}
}
\description{
Creates an association between a configuration set and a custom domain
for open and click event tracking.
By default, images and links used for tracking open and click events are
hosted on domains operated by Amazon SES. You can configure a subdomain
of your own to handle these events. For information about using custom
domains, see the \href{https://docs.aws.amazon.com/ses/latest/DeveloperGuide/configure-custom-open-click-domains.html}{Amazon SES Developer Guide}.
}
\section{Request syntax}{
\preformatted{svc$create_configuration_set_tracking_options(
ConfigurationSetName = "string",
TrackingOptions = list(
CustomRedirectDomain = "string"
)
)
}
}
\keyword{internal}
|
# Exercise 1: practice with basic R syntax
# Create a variable `hometown` that stores the city in which you were born
hometown <- "St. Louis"
# Assign your name to the variable `my_name`
my_name <- "Mike"
# Assign your height (in inches) to a variable `my_height`
my_height <- 73.5 # inches
# Create a variable `puppies` equal to the number of puppies you'd like to have
puppies <- 8
# Create a variable `puppy_price`, which is how much you think a puppy costs
puppy_price <- 250
# Create a variable `total_cost` that has the total cost of all of your puppies
total_cost <- puppies * puppy_price
# Create a boolean variable `too_expensive`, set to TRUE if the cost is greater
# than $1,000
too_expensive <- total_cost > 1000 # Bummer!
# Create a variable `max_puppies`, which is the number of puppies you can
# afford for $1,000
max_puppies <- 1000%/%puppy_price # %/% is "divide and ignore remainder"
|
/chapter-05-exercises/exercise-1/exercise.R
|
permissive
|
ITCuw/RLessons-Solutions
|
R
| false
| false
| 915
|
r
|
# Exercise 1: practice with basic R syntax
# Create a variable `hometown` that stores the city in which you were born
hometown <- "St. Louis"
# Assign your name to the variable `my_name`
my_name <- "Mike"
# Assign your height (in inches) to a variable `my_height`
my_height <- 73.5 # inches
# Create a variable `puppies` equal to the number of puppies you'd like to have
puppies <- 8
# Create a variable `puppy_price`, which is how much you think a puppy costs
puppy_price <- 250
# Create a variable `total_cost` that has the total cost of all of your puppies
total_cost <- puppies * puppy_price
# Create a boolean variable `too_expensive`, set to TRUE if the cost is greater
# than $1,000
too_expensive <- total_cost > 1000 # Bummer!
# Create a variable `max_puppies`, which is the number of puppies you can
# afford for $1,000
max_puppies <- 1000%/%puppy_price # %/% is "divide and ignore remainder"
|
#' Method summary for ViSigrid object.
#' @title Method \code{summary-ViSigrid}
#' @name summary-ViSigrid-method
#' @rdname summary-ViSigrid-methods
#' @aliases summary,ViSigrid-method
#' @exportMethod summary
#' @docType methods
#' @param object a ViSigrid.
#' @return list \itemize{
#' \item{ \strong{ punctuals} }{ summary of punctual actions (typeA=="p").}
#' \item{ \strong{ longs} }{ summary of long actions (typeA=="p"). }
#' }
#' @seealso \code{\linkS4class{ViSigrid}}, \code{\link{buildViSiGrid}},\code{\linkS4class{ViSibook}}.
#' and see \code{\link{plot-ViSigrid-method}} for examples.
setMethod( f = "summary" ,
signature = "ViSigrid" ,
definition = function(object ) (
if ( is.null( methods::slot( object , "parameters")$informer )) {
cat( "No informers No tests were made in the call \n ")
}else{
cn = switch( methods::slot( object , "parameters")$informer , "median" = c( "q1","median","q3"), "mean" = c("mean-sd","mean","mean+sd" ) )
infpunctuals <- methods::slot( object , "informers")[ , seq( 1 , sum( methods::slot( methods::slot(object , "book" ) , "typeA" ) == "p" ) , 1 ) ]
rownames(infpunctuals) = rep(cn , dim(infpunctuals)[1]/3)
if (length( methods::slot(object , "group" ) ) > 0 ) {
if (length( methods::slot( object , "testsP" ) ) > 0 ) {
infpunctuals <- rbind( infpunctuals , methods::slot( object , "testsP")[ seq( 1 , sum( methods::slot( methods::slot(object , "book" ) , "typeA" ) == "p" ) , 1 ) ] )
}
rownames( infpunctuals ) <- c( paste( rep("Gr" , 6) ,
c( rep( levels( methods::slot( object , "group" ) )[ 1 ] , 3 ) ,
rep( levels( methods::slot( object , "group" ) )[ 2 ] , 3 ) ) ,
rep(cn , dim( infpunctuals )[ 1 ] / 3 ) ) ,
paste( switch( methods::slot( object , "parameters")$informer , "median" = "mood test", "mean" = "wilcoxon test" ) ," p.value < " , methods::slot( object , "parameters")$threshold.test) )
}
inflong <- methods::slot( object , "informers")[ , seq( sum( methods::slot( methods::slot(object , "book" ) , "typeA" ) == "p" ) + 1 , sum( methods::slot( methods::slot(object , "book" ) , "typeA" ) == "p" ) + sum( methods::slot( methods::slot(object , "book" ) , "typeA" ) == "l" ), 1 ) ]
rownames(inflong) = rep(cn , dim(inflong)[1] / 3 )
if (length( methods::slot(object , "group" ) ) > 0 ) {
if (length( methods::slot( object , "testsP" ) ) > 0 ) {
inflong <- rbind( inflong , methods::slot( object , "testsP")[ seq( sum( methods::slot( methods::slot(object , "book" ) , "typeA" ) == "p" ) + 1, sum( methods::slot( methods::slot(object , "book" ) , "typeA" ) == "p" ) + sum( methods::slot( methods::slot(object , "book" ) , "typeA" ) == "l" ) , 1 ) ] )
}
rownames( inflong ) <- c( paste( rep("Gr" , 6) ,
c( rep( levels( methods::slot( object , "group" ) )[ 1 ] , 3 ) ,
rep( levels( methods::slot( object , "group" ) )[ 2 ] , 3 ) ) ,
rep(cn , dim( inflong )[ 1 ] / 3 ) ) ,
paste( switch( methods::slot( object , "parameters")$informer ,
"median" = "mood test",
"mean" = "wilcoxon test" ),
" p.value < " ,
methods::slot( object , "parameters")$threshold.test))
}
return( list( punctuals = infpunctuals , longs = inflong ) )
}
)
)
|
/ViSiElse/R/summary.ViSigrid.r
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 4,208
|
r
|
#' Method summary for ViSigrid object.
#' @title Method \code{summary-ViSigrid}
#' @name summary-ViSigrid-method
#' @rdname summary-ViSigrid-methods
#' @aliases summary,ViSigrid-method
#' @exportMethod summary
#' @docType methods
#' @param object a ViSigrid.
#' @return list \itemize{
#' \item{ \strong{ punctuals} }{ summary of punctual actions (typeA=="p").}
#' \item{ \strong{ longs} }{ summary of long actions (typeA=="p"). }
#' }
#' @seealso \code{\linkS4class{ViSigrid}}, \code{\link{buildViSiGrid}},\code{\linkS4class{ViSibook}}.
#' and see \code{\link{plot-ViSigrid-method}} for examples.
setMethod( f = "summary" ,
signature = "ViSigrid" ,
definition = function(object ) (
if ( is.null( methods::slot( object , "parameters")$informer )) {
cat( "No informers No tests were made in the call \n ")
}else{
cn = switch( methods::slot( object , "parameters")$informer , "median" = c( "q1","median","q3"), "mean" = c("mean-sd","mean","mean+sd" ) )
infpunctuals <- methods::slot( object , "informers")[ , seq( 1 , sum( methods::slot( methods::slot(object , "book" ) , "typeA" ) == "p" ) , 1 ) ]
rownames(infpunctuals) = rep(cn , dim(infpunctuals)[1]/3)
if (length( methods::slot(object , "group" ) ) > 0 ) {
if (length( methods::slot( object , "testsP" ) ) > 0 ) {
infpunctuals <- rbind( infpunctuals , methods::slot( object , "testsP")[ seq( 1 , sum( methods::slot( methods::slot(object , "book" ) , "typeA" ) == "p" ) , 1 ) ] )
}
rownames( infpunctuals ) <- c( paste( rep("Gr" , 6) ,
c( rep( levels( methods::slot( object , "group" ) )[ 1 ] , 3 ) ,
rep( levels( methods::slot( object , "group" ) )[ 2 ] , 3 ) ) ,
rep(cn , dim( infpunctuals )[ 1 ] / 3 ) ) ,
paste( switch( methods::slot( object , "parameters")$informer , "median" = "mood test", "mean" = "wilcoxon test" ) ," p.value < " , methods::slot( object , "parameters")$threshold.test) )
}
inflong <- methods::slot( object , "informers")[ , seq( sum( methods::slot( methods::slot(object , "book" ) , "typeA" ) == "p" ) + 1 , sum( methods::slot( methods::slot(object , "book" ) , "typeA" ) == "p" ) + sum( methods::slot( methods::slot(object , "book" ) , "typeA" ) == "l" ), 1 ) ]
rownames(inflong) = rep(cn , dim(inflong)[1] / 3 )
if (length( methods::slot(object , "group" ) ) > 0 ) {
if (length( methods::slot( object , "testsP" ) ) > 0 ) {
inflong <- rbind( inflong , methods::slot( object , "testsP")[ seq( sum( methods::slot( methods::slot(object , "book" ) , "typeA" ) == "p" ) + 1, sum( methods::slot( methods::slot(object , "book" ) , "typeA" ) == "p" ) + sum( methods::slot( methods::slot(object , "book" ) , "typeA" ) == "l" ) , 1 ) ] )
}
rownames( inflong ) <- c( paste( rep("Gr" , 6) ,
c( rep( levels( methods::slot( object , "group" ) )[ 1 ] , 3 ) ,
rep( levels( methods::slot( object , "group" ) )[ 2 ] , 3 ) ) ,
rep(cn , dim( inflong )[ 1 ] / 3 ) ) ,
paste( switch( methods::slot( object , "parameters")$informer ,
"median" = "mood test",
"mean" = "wilcoxon test" ),
" p.value < " ,
methods::slot( object , "parameters")$threshold.test))
}
return( list( punctuals = infpunctuals , longs = inflong ) )
}
)
)
|
getwd()
setwd("C:/Users/avon/Documents/R/ndc")
cmp1 <- read.csv(file = "saltcmp1.csv", header = TRUE, na.strings=c("","NA"), stringsAsFactors = FALSE)
cmp_d2 <- read.csv(file = "dummy2.csv", header = TRUE, na.strings=c("","NA"), stringsAsFactors = FALSE)
cmp_d1 <- read.csv(file = "dummy.csv", header = TRUE, na.strings=c("","NA"), stringsAsFactors = FALSE)
cmp2 <- cmp1[1:15,]
a <- cmp1$Dsalt1
a
column1 <- nrow(cmp_d2)
column2 <- nrow(cmp_d2)
for (i in 1:column1){
for (j in 1:column2){
if (cmp_d2$Dsalt1[i] == cmp_d2$Dsalt2[j])
{
cmp_d2$Dsalt3[i] <- cmp_d2$Dsalt1[j]
break
}
else{
cmp_d2$Dsalt3[i] <- NA
}
}
}
|
/drugup/dummycmp0.R
|
no_license
|
gvravi/healapp
|
R
| false
| false
| 754
|
r
|
getwd()
setwd("C:/Users/avon/Documents/R/ndc")
cmp1 <- read.csv(file = "saltcmp1.csv", header = TRUE, na.strings=c("","NA"), stringsAsFactors = FALSE)
cmp_d2 <- read.csv(file = "dummy2.csv", header = TRUE, na.strings=c("","NA"), stringsAsFactors = FALSE)
cmp_d1 <- read.csv(file = "dummy.csv", header = TRUE, na.strings=c("","NA"), stringsAsFactors = FALSE)
cmp2 <- cmp1[1:15,]
a <- cmp1$Dsalt1
a
column1 <- nrow(cmp_d2)
column2 <- nrow(cmp_d2)
for (i in 1:column1){
for (j in 1:column2){
if (cmp_d2$Dsalt1[i] == cmp_d2$Dsalt2[j])
{
cmp_d2$Dsalt3[i] <- cmp_d2$Dsalt1[j]
break
}
else{
cmp_d2$Dsalt3[i] <- NA
}
}
}
|
#'@title calcHMFormFactor
#'
#'@description Calculate form factor (1+k_1) from the Holtrop & Mennen method.
#'
#'@param maxDraft Maximum summer load line draft (vector of numericals, m)
#'@param lwl Waterline length (vector of numericals, m) (see \code{\link{calclwl}})
#'@param breadth Moulded breadth (vector of numericals, m)
#'@param maxDisplacement Maximum ship displacement (vector of numericals, m^3)
#'@param Cp Prismatic coefficient (vector of numericals, dimensionless) (see
#' \code{\link{calcCp}})
#'@param Cstern Afterbody form coefficient:
#'\itemize{\item V-shaped Hull = -10
#' \item U-Shaped Hull = 10
#' \item Normal Hull = 0 (default) }
#' Can supply either a vector of numericals, a single number, or rely on the default
#'@param lcb Longitudinal position of center of buoyancy (vector of numericals,
#' see \code{\link{calclcb}})
#'
#'@return \code{formFactor} (vector of numericals)
#'
#'@references
#'Holtrop, J. and Mennen, G. G. J. 1982. "An approximate power prediction
#'method." International Shipbuilding Progress 29.
#'
#'Holtrop, J. and Mennen, G. G. J. 1984. "A Statistical Re-Analysis of Resistance
#'and Propulsion Data'.
#'
#'@seealso \itemize{
#'\item \code{\link{calclwl}}
#'\item \code{\link{calcCp}}
#'\item \code{\link{calclcb}} }
#'
#'@family Holtrop-Mennen Calculations
#'
#'@examples
#' calcHMFormFactor(c(13.57,11.49),c(218.75, 209.25),c(32.25,32.20),c(80097,52382.04),c(0.81,0.67))
#' calcHMFormFactor(13.57,218.75,32.25,80097,0.81)
#'
#'@export
calcHMFormFactor<-function(maxDraft,lwl,breadth,maxDisplacement,Cp,Cstern=0,lcb=0){
formFactor<-
0.93+0.487118*
#c14
(1+0.011*Cstern)*
((breadth/lwl)^1.06806)*
((maxDraft/lwl)^0.46106)*
( #L/Lr
(1/(1-Cp+(0.06*Cp*-lcb)/(4*Cp-1))
)^0.121563)*
((lwl^3/maxDisplacement)^0.36486)*((1-Cp)^-0.604247)
return(formFactor)
}
|
/ShipPowerModel/R/calcHMFormFactor.r
|
permissive
|
Misterfluff/Marine_Emissions_Tools
|
R
| false
| false
| 1,876
|
r
|
#'@title calcHMFormFactor
#'
#'@description Calculate form factor (1+k_1) from the Holtrop & Mennen method.
#'
#'@param maxDraft Maximum summer load line draft (vector of numericals, m)
#'@param lwl Waterline length (vector of numericals, m) (see \code{\link{calclwl}})
#'@param breadth Moulded breadth (vector of numericals, m)
#'@param maxDisplacement Maximum ship displacement (vector of numericals, m^3)
#'@param Cp Prismatic coefficient (vector of numericals, dimensionless) (see
#' \code{\link{calcCp}})
#'@param Cstern Afterbody form coefficient:
#'\itemize{\item V-shaped Hull = -10
#' \item U-Shaped Hull = 10
#' \item Normal Hull = 0 (default) }
#' Can supply either a vector of numericals, a single number, or rely on the default
#'@param lcb Longitudinal position of center of buoyancy (vector of numericals,
#' see \code{\link{calclcb}})
#'
#'@return \code{formFactor} (vector of numericals)
#'
#'@references
#'Holtrop, J. and Mennen, G. G. J. 1982. "An approximate power prediction
#'method." International Shipbuilding Progress 29.
#'
#'Holtrop, J. and Mennen, G. G. J. 1984. "A Statistical Re-Analysis of Resistance
#'and Propulsion Data'.
#'
#'@seealso \itemize{
#'\item \code{\link{calclwl}}
#'\item \code{\link{calcCp}}
#'\item \code{\link{calclcb}} }
#'
#'@family Holtrop-Mennen Calculations
#'
#'@examples
#' calcHMFormFactor(c(13.57,11.49),c(218.75, 209.25),c(32.25,32.20),c(80097,52382.04),c(0.81,0.67))
#' calcHMFormFactor(13.57,218.75,32.25,80097,0.81)
#'
#'@export
calcHMFormFactor<-function(maxDraft,lwl,breadth,maxDisplacement,Cp,Cstern=0,lcb=0){
formFactor<-
0.93+0.487118*
#c14
(1+0.011*Cstern)*
((breadth/lwl)^1.06806)*
((maxDraft/lwl)^0.46106)*
( #L/Lr
(1/(1-Cp+(0.06*Cp*-lcb)/(4*Cp-1))
)^0.121563)*
((lwl^3/maxDisplacement)^0.36486)*((1-Cp)^-0.604247)
return(formFactor)
}
|
# Set universal variables -----------------------
API_PATH <- paste0("https://app.americansocceranalysis.com/api/v1/", LEAGUE_SCHEMA, "/")
# VIOLIN_MINUTES_CUTOFF <- 500
# VIOLIN_HEIGHT <- "450px"
# VIOLIN_WIDTH <- "96%"
# START_PLAYER <- NA # Dax
FIELD_WIDTH <- 80
FIELD_LENGTH <- 115
DATABASE_TIMEZONE <- "America/New_York"
PATTERNS_OF_PLAY <- c("Corner", "Fastbreak", "Free kick", "Penalty", "Regular", "Set piece")
THIRDS_OF_FIELD <- c("Attacking", "Middle", "Defensive")
MAX_MINUTES <- 3000
MAX_SHOTS_TAKEN_FACED <- 125
MAX_KEY_PASSES <- 125
MAX_PASSES <- 2000
MLSPA_POSITIONS <- c("GK", "D", "M", "F")
# Utility functions -----------------------------
api_request <- function(path = API_PATH, endpoint, parameters = NULL) {
parameters_array <- c()
if (length(parameters) > 0) {
for (i in 1:length(parameters)) {
tmp_name <- names(parameters[i])
tmp_value <- parameters[[tmp_name]]
if (all(!is.na(tmp_value)) & all(!is.null(tmp_value))) {
if (length(tmp_value) > 1) {
tmp_value <- gsub("\\s+", "%20", paste0(tmp_value, collapse = ","))
} else {
tmp_value <- gsub("\\s+", "%20", tmp_value)
}
parameters_array <- c(parameters_array, paste0(tmp_name, "=", tmp_value))
}
}
}
parameters_array <- ifelse(length(parameters_array) > 0,
paste0("?", paste0(parameters_array, collapse = "&")),
"")
return(fromJSON(content(GET(paste0(API_PATH, endpoint, parameters_array)),
as = "text", encoding = "UTF-8")))
}
# Source dashboard utils ------------------------
utils <- paste0("../app/utils/", list.files("../app/utils")[!grepl("retrieve_data|reactive_values", list.files("../app/utils"))])
lapply(utils, source)
|
/app/global.R
|
no_license
|
NlIceD/asa-shiny-app
|
R
| false
| false
| 1,900
|
r
|
# Set universal variables -----------------------
API_PATH <- paste0("https://app.americansocceranalysis.com/api/v1/", LEAGUE_SCHEMA, "/")
# VIOLIN_MINUTES_CUTOFF <- 500
# VIOLIN_HEIGHT <- "450px"
# VIOLIN_WIDTH <- "96%"
# START_PLAYER <- NA # Dax
FIELD_WIDTH <- 80
FIELD_LENGTH <- 115
DATABASE_TIMEZONE <- "America/New_York"
PATTERNS_OF_PLAY <- c("Corner", "Fastbreak", "Free kick", "Penalty", "Regular", "Set piece")
THIRDS_OF_FIELD <- c("Attacking", "Middle", "Defensive")
MAX_MINUTES <- 3000
MAX_SHOTS_TAKEN_FACED <- 125
MAX_KEY_PASSES <- 125
MAX_PASSES <- 2000
MLSPA_POSITIONS <- c("GK", "D", "M", "F")
# Utility functions -----------------------------
api_request <- function(path = API_PATH, endpoint, parameters = NULL) {
parameters_array <- c()
if (length(parameters) > 0) {
for (i in 1:length(parameters)) {
tmp_name <- names(parameters[i])
tmp_value <- parameters[[tmp_name]]
if (all(!is.na(tmp_value)) & all(!is.null(tmp_value))) {
if (length(tmp_value) > 1) {
tmp_value <- gsub("\\s+", "%20", paste0(tmp_value, collapse = ","))
} else {
tmp_value <- gsub("\\s+", "%20", tmp_value)
}
parameters_array <- c(parameters_array, paste0(tmp_name, "=", tmp_value))
}
}
}
parameters_array <- ifelse(length(parameters_array) > 0,
paste0("?", paste0(parameters_array, collapse = "&")),
"")
return(fromJSON(content(GET(paste0(API_PATH, endpoint, parameters_array)),
as = "text", encoding = "UTF-8")))
}
# Source dashboard utils ------------------------
utils <- paste0("../app/utils/", list.files("../app/utils")[!grepl("retrieve_data|reactive_values", list.files("../app/utils"))])
lapply(utils, source)
|
library(dplyr
)
# temp cleaning script
tree_by_dists <- list.files("./analysis/data/raw_data/tree_splits/",
pattern = "berlin_trees_subset",
full.names = TRUE)
# # Charlottenburg -------------------------------------
#
# file_index <- grep("Charlottenburg",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
# # Fhain-Xberg -------------------------------------
#
# file_index <- grep("Friedrichshain",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
# # Lichtenberg -------------------------------------
#
# file_index <- grep("Lichtenberg",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
# # Marzahn -------------------------------------
#
# file_index <- grep("Marzahn",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
#
# # Mitte -------------------------------------
#
# file_index <- grep("Mitte",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
#
# # Neuk -------------------------------------
#
# file_index <- grep("Neuk",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
#
# # Pankow -------------------------------------
#
# file_index <- grep("Pankow",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
# # Reinick -------------------------------------
#
# file_index <- grep("Reinick",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
# # Spandau -------------------------------------
#
# file_index <- grep("Spandau",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
# # Stegl -------------------------------------
#
# file_index <- grep("Stegl",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
#
#
# # Tempelhof -------------------------------------
#
# file_index <- grep("Tempelhof",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
#
#
# # Trep -------------------------------------
#
# file_index <- grep("Trep",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# BY GENUS ----------------------------------------------------------------
# temp cleaning script
tree_by_dists <- list.files("./analysis/data/raw_data/tree_splits/",
pattern = "berlin_trees_subset",
full.names = TRUE)
# grab genera
genera <- sub(pattern = "(.*_)(\\w+[.]RDS$)",
replacement = "\\2",
x = tree_by_dists,
perl = FALSE) %>%
fs::path_ext_remove()
# Acer --------------------------------------------------------------------
genera[1]
file_index <- grep("Acer",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Aesculus --------------------------------------------------------------------
genera[2]
file_index <- grep("Aesculus",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Betula --------------------------------------------------------------------
genera[3]
file_index <- grep("Betula",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Carpinus --------------------------------------------------------------------
genera[4]
file_index <- grep("Carpinus",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Fraxinus --------------------------------------------------------------------
genera[5]
file_index <- grep("Fraxinus",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Other --------------------------------------------------------------------
genera[6]
file_index <- grep("Other",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Pinus --------------------------------------------------------------------
genera[7]
file_index <- grep("Pinus",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Platanus --------------------------------------------------------------------
genera[8]
file_index <- grep("Platanus",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Populus --------------------------------------------------------------------
genera[9]
file_index <- grep("Populus",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Prunus --------------------------------------------------------------------
genera[10]
file_index <- grep("Prunus",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Quercus --------------------------------------------------------------------
genera[11]
file_index <- grep("Quercus",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Robinia --------------------------------------------------------------------
genera[12]
file_index <- grep("Robinia",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Tilia --------------------------------------------------------------------
genera[13]
file_index <- grep("Tilia",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
|
/R/temp_dcr_script.R
|
permissive
|
the-Hull/berlin.trees
|
R
| false
| false
| 5,721
|
r
|
library(dplyr
)
# temp cleaning script
tree_by_dists <- list.files("./analysis/data/raw_data/tree_splits/",
pattern = "berlin_trees_subset",
full.names = TRUE)
# # Charlottenburg -------------------------------------
#
# file_index <- grep("Charlottenburg",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
# # Fhain-Xberg -------------------------------------
#
# file_index <- grep("Friedrichshain",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
# # Lichtenberg -------------------------------------
#
# file_index <- grep("Lichtenberg",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
# # Marzahn -------------------------------------
#
# file_index <- grep("Marzahn",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
#
# # Mitte -------------------------------------
#
# file_index <- grep("Mitte",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
#
# # Neuk -------------------------------------
#
# file_index <- grep("Neuk",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
#
# # Pankow -------------------------------------
#
# file_index <- grep("Pankow",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
# # Reinick -------------------------------------
#
# file_index <- grep("Reinick",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
# # Spandau -------------------------------------
#
# file_index <- grep("Spandau",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
# # Stegl -------------------------------------
#
# file_index <- grep("Stegl",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
#
#
# # Tempelhof -------------------------------------
#
# file_index <- grep("Tempelhof",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
#
#
#
#
# # Trep -------------------------------------
#
# file_index <- grep("Trep",
# tree_by_dists)
#
#
# datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# BY GENUS ----------------------------------------------------------------
# temp cleaning script
tree_by_dists <- list.files("./analysis/data/raw_data/tree_splits/",
pattern = "berlin_trees_subset",
full.names = TRUE)
# grab genera
genera <- sub(pattern = "(.*_)(\\w+[.]RDS$)",
replacement = "\\2",
x = tree_by_dists,
perl = FALSE) %>%
fs::path_ext_remove()
# Acer --------------------------------------------------------------------
genera[1]
file_index <- grep("Acer",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Aesculus --------------------------------------------------------------------
genera[2]
file_index <- grep("Aesculus",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Betula --------------------------------------------------------------------
genera[3]
file_index <- grep("Betula",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Carpinus --------------------------------------------------------------------
genera[4]
file_index <- grep("Carpinus",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Fraxinus --------------------------------------------------------------------
genera[5]
file_index <- grep("Fraxinus",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Other --------------------------------------------------------------------
genera[6]
file_index <- grep("Other",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Pinus --------------------------------------------------------------------
genera[7]
file_index <- grep("Pinus",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Platanus --------------------------------------------------------------------
genera[8]
file_index <- grep("Platanus",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Populus --------------------------------------------------------------------
genera[9]
file_index <- grep("Populus",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Prunus --------------------------------------------------------------------
genera[10]
file_index <- grep("Prunus",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Quercus --------------------------------------------------------------------
genera[11]
file_index <- grep("Quercus",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Robinia --------------------------------------------------------------------
genera[12]
file_index <- grep("Robinia",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
# Tilia --------------------------------------------------------------------
genera[13]
file_index <- grep("Tilia",
tree_by_dists)
datacleanr::dcr_app(dframe = tree_by_dists[file_index])
|
#graphing
d <- read.table(file.choose(), header=T, sep="\t",quote='"', row.names=1) #measure1 Frm1DKdatdes.txt
d2<-read.table(file.choose(), header=T, sep="\t",quote='"', row.names=1)#measure 2 Frm2DKdatdes.txt
h <- read.table(file.choose(), header=T, sep="\t",quote='"', row.names=1) #measure harvest FrmHDKdatdes.txt
#plots of pop means from means table
head(PopMeansMh)
Hcont<-PopMeansMh[PopMeansMh$Trt=="control",]
plot(Hcont$Pop, Hcont$MassH)
title(main="Shoot mass at harvest", sub="Control treatment",
xlab="Population", ylab="mass(g)")
text(Hcont$Pop, Hcont$MassH, Hcont$Pop, cex=0.6, pos=4, col="red")
#plots of pop means from data, grouped by pop, trt
library("gplot")
library("ggplot2")
str(h)
unique(h$Pop)
h$Pop<-factor(h$Pop, c("CA001","CA008","CA009","CA010", "US001", "US002","US003", "BG001","GR001","GR002","GR003","HU001","RO001", "RO005","RU008","TR001","UA004"))
print(levels(h$Pop))
png(filename="FrmassMeans.png", width=800, bg="white")
p <- ggplot(data=h, aes(Pop, Shoot.mass.gH, fill=Trt)) +
geom_boxplot()
plot(p)
dev.off()
png(filename="FrcrownMeans.png", width=800, bg="white")
p <- ggplot(data=h, aes(Pop, CrownDiam.mm, fill=Trt)) +
geom_boxplot()
plot(p)
dev.off()
str(d)
unique(d$Pop)
d$Pop<-factor(d$Pop, c("CA001","CA008","CA009","CA010", "US001", "US002","US003", "BG001","GR001","GR002","GR003","HU001","RO001", "RO005","RU008","TR001","UA004"))
print(levels(d$Pop))
png(filename="FrDlfMeans.png", width=800, bg="white")
p <- ggplot(data=d, aes(Pop, MaxLfLgth1)) +
geom_boxplot()
plot(p)
dev.off()
#barplot with se bars
#harvest control shoot mass
se <- function(x) sqrt(var(x, na.rm=TRUE)/(length(na.omit(x))-1))
Hcont2<-h[h$Trt=="control",]
tapply(Hcont2$Shoot.mass.gH, Hcont2$Pop,mean,na.rm=TRUE)
tapply(Hcont2$Shoot.mass.gH, Hcont2$Pop,se)
plt <- barplot(tapply(Hcont2$Shoot.mass.gH, Hcont2$Pop,mean,na.rm=TRUE), ylim=c(0, 30))
y.se <- tapply(Hcont2$Shoot.mass.gH, Hcont2$Pop,se)
y.mean <- tapply(Hcont2$Shoot.mass.gH, Hcont2$Pop, mean, na.rm=TRUE)
# y.mean + y.se
# max(y.mean + y.se)
# c(0, max(y.mean + y.se, na.rm=TRUE))
ylim <- c(0, max(y.mean + y.se, na.rm=TRUE))
png(filename="Frmassbar.png", width=800, bg="white")
x<- barplot(y.mean,ylim=ylim, main="Shoot mass at harvest, control", col="blue")
arrows(x, y.mean - y.se, x, y.mean + y.se,code=3, length=0.03, angle=90)
dev.off()
#axis(1, at=1:17, lab=Hcont$Pop)
#overall
tapply(h$Shoot.mass.gH, h$Pop,mean,na.rm=TRUE)
tapply(h$Shoot.mass.gH, h$Pop,se)
plt <- barplot(tapply(h$Shoot.mass.gH, h$Pop,mean,na.rm=TRUE), ylim=c(0, 30))
y.se <- tapply(h$Shoot.mass.gH, h$Pop,se)
y.mean <- tapply(h$Shoot.mass.gH, h$Pop, mean, na.rm=TRUE)
# y.mean + y.se
# max(y.mean + y.se)
c(0, max(y.mean + y.se, na.rm=TRUE))
ylim <- c(0, max(y.mean + y.se, na.rm=TRUE))
x<- barplot(y.mean,ylim=ylim, main="Shoot mass at harvest, control", col="blue", beside=TRUE)
arrows(x, y.mean - y.se, x, y.mean + y.se,code=3, length=0.03, angle=90)
#########
#summary
summary(d)
dpop<-as.data.frame(d)
dpop<-dpop[order(dpop$Origin, decreasing=FALSE),]
dpop$Pop <- factor(dpop$Pop, c("", "", "","", ""))
plot(dpop$Pop)
plot(sort(PopMeansM1$Latitude))
#axis(1, at=1:17, lab=as.vector(PopMeansM1$Pop))
plot(PopMeansM1$Latitude)
plot(PopMeansM1$Pop,PopMeansM1$Latitude,col=ifelse(PopMeansM1$Latitude==3,"red", "black"))
#col=ifelse(PopMeansM1$Origin=="inv", "red", "black")
plot(PopMeansM1$Latitude)
# > axis(1, at=1:17, lab=as.vector(PopMeansM1$Pop))
# > PopMeansM1$Origin<-factor(PopMeansM1$Origin)
# > PopMeansM1$col[PopMeansM1$Origin=="inv"]<-"red"
#PopMeansM1$col[PopMeansM1$Origin=="nat"]<-"black"
# > dotchart(PopMeansM1$Latitude, labels=PopMeansM1$Pop, groups=PopMeansM1$Origin, color=PopMeansM1$col)
# > dotchart(PopMeansM1$Latitude, labels=PopMeansM1$Pop, color=PopMeansM1$col)
# > dotchart(sort(PopMeansM1$Latitude), labels=PopMeansM1$Pop, color=PopMeansM1$col)
# > dotchart(order(PopMeansM1$Latitude), labels=PopMeansM1$Pop, color=PopMeansM1$col)
# summary(Frm1DKdatdes[Frm1DKdatdes$Origin=="nat"])
#
# source("http://bioconductor.org/biocLite.R")
# biocLite("psych")
# library(psych)
# describe.by(Frm1DKdatdes$LfCount1, Frm1DKdatdes$Origin)
#library(doBy)
#summaryBy(mpg + wt ~ cyl + vs, data = mtcars,FUN = function(x) { c(m = mean(x), s = sd(x)) } )
# produces mpg.m wt.m mpg.s wt.s for each
# combination of the levels of cyl and vs
tapply(Frm1DKcont$LfCount1, INDEX = Frm1DKcont$Origin, FUN = mean, na.rm=TRUE)
tapply(Frm1DKcont$LfCount1, Frm1DKcont$Origin, sd, na.rm = TRUE)
tapply(Frm1DKdatdes$LfCount1, INDEX = list(Frm1DKdatdes$Origin,Frm1DKdatdes$Trt),
FUN = mean, na.rm=TRUE)
# #barplots
barplot(agdatm1$x, main="Leaf Count- m 1",names.arg=paste(agdatm1$Group.1,agdatm1$Group.2),
col="blue", axis.lty=1, xlab="groups", ylab="lf count")
# aggregate data frame returning means
# for numeric variables
agdatm1 <-aggregate(Frm1DKdatdes$LfCount1, by=list(Frm1DKdatdes$Origin,Frm1DKdatdes$Trt) ,FUN=mean, na.rm=TRUE)
print(agdatm1)
#barplot with se bars
#harvest root crown
h <- FrmHDKdatdes
head(h)
h$group <- paste(h$Origin, h$Trt)
class(h$group)
h$group <- factor(h$group, levels=c("nat control","inv control","nat drought","inv drought"))
tapply(h$CrownDiam.mm, h$group,mean,na.rm=TRUE)
se <- function(x) sqrt(var(x, na.rm=TRUE)/(length(na.omit(x))-1))
tapply(h$CrownDiam.mm, h$group,se)
plt <- barplot(tapply(h$CrownDiam.mm, h$group,mean,na.rm=TRUE), ylim=c(0, 30))
plt
y.se <- tapply(h$CrownDiam.mm, h$group,se)
y.mean <- tapply(h$CrownDiam.mm, h$group, mean, na.rm=TRUE)
y.mean + y.se
c(0, max(y.mean + y.se))
ylim <- c(0, max(y.mean + y.se))
x<- barplot(y.mean,ylim=ylim, main="Root crown diameter at harvest", col="blue")
arrows(x, y.mean - y.se, x, y.mean + y.se,code=3, length=0.03, angle=90)
#m1 lf count
d <- Frm1DKdatdes
d$Origin<-factor(d$Origin, levels=c("nat","inv"))
tapply(d$LfCount1, d$Origin,mean,na.rm=TRUE)
se <- function(x) sqrt(var(x, na.rm=TRUE)/(length(na.omit(x))-1))
tapply(d$LfCount1, d$Origin,se)
barplot(tapply(d$LfCount1, d$Origin,mean,na.rm=TRUE), ylim=c(0, 10))
plt <- barplot(tapply(d$LfCount1, d$Origin,mean,na.rm=TRUE), ylim=c(0, 10))
plt
y.se <- tapply(d$LfCount1, d$Origin,se)
y.mean <- tapply(d$LfCount1, d$Origin, mean, na.rm=TRUE)
y.mean + y.se
c(0, max(y.mean + y.se))
ylim <- c(0, max(y.mean + y.se))
plt<- barplot(y.mean,ylim=ylim, main="Leaf No., week 5",cex.main=2.5,
col=1:length(unique(Frm2DKcont$Origin)),xlab="Range", ylab="Leaf number",
cex.lab=1.5)
arrows(plt, y.mean - y.se, plt, y.mean + y.se,code=3, length=0.03, angle=90)
#m2 lf width
Frm2DKcont$Origin<-factor(Frm2DKcont$Origin,levels=c("nat", "inv"))
tapply(Frm2DKcont$MaxLfWdth2, Frm2DKcont$Origin,mean,na.rm=TRUE)
se <- function(x) sqrt(var(x, na.rm=TRUE)/(length(na.omit(x))-1))
tapply(Frm2DKcont$MaxLfWdth2, Frm2DKcont$Origin,se)
barplot(tapply(Frm2DKcont$MaxLfWdth2, Frm2DKcont$Origin,mean,na.rm=TRUE), ylim=c(0, 10))
plt <- barplot(tapply(Frm2DKcont$MaxLfWdth2, Frm2DKcont$Origin,mean,na.rm=TRUE), ylim=c(0, 10))
plt
y.se <- tapply(Frm2DKcont$MaxLfWdth2, Frm2DKcont$Origin,se)
y.mean <- tapply(Frm2DKcont$MaxLfWdth2, Frm2DKcont$Origin, mean, na.rm=TRUE)
y.mean + y.se
c(0, max(y.mean + y.se))
ylim <- c(0, 5)
# Frm2DKcont$color[Frm2DKcont$Origin=="inv"]<-"red"
# Frm2DKcont$color[Frm2DKcont$Origin=="nat"]<-"black"
plt<- barplot(y.mean,ylim=ylim, main="Leaf width, week 8 ",
col=1:length(unique(Frm2DKcont$Origin)), xlab="Range", ylab="Leaf width (cm)",
cex.main=2.5,cex.lab=1.5)
arrows(plt, y.mean - y.se, plt, y.mean + y.se,code=3, length=0.03, angle=90)
#Grouped and colored dot plot
#Group and color data by genotype
Frm1DKdatdes<-Frm1DKdatdes[order(Frm1DKdatdes$Origin),]
Frm1DKdatdes$Origin<-factor(Frm1DKdatdes$Origin)
Frm1DKdatdes$color[Frm1DKdatdes$Origin=="inv"]<-"red"
Frm1DKdatdes$color[Frm1DKdatdes$Origin=="nat"]<-"black"
# Frm2datTag$color[Frm2datTag$Origin=="sk"]<-"blue"
#
par(mar=c(5,6,4,2)+0.1,mgp=c(7,1,0))
dotchart(Frm1DKdatdes$LfCount1, ylab="indiv", xlab="lf number",cex=.7,labels=row.names(Frm1DKdatdes),groups= Frm1DKdatdes$Origin,main="lf number by origin", gcolor="black", color=Frm1DKdatdes$color)
mtext("lf number", side=1,line=4)
# #lf length
#
# par(mar=c(5,6,4,2)+0.1,mgp=c(7,1,0))
# dotchart(Frm2datTag$lf.length, ylab="indiv", xlab="lf number",cex=.7,labels=row.names(Frm2datTag),groups= Frm2datTag$Origin,main="lf number by origin", gcolor="black", color=Frm2datTag$color)
# mtext("lf length", side=1,line=4)
#
# #lf width
# class(Frm2datTag$lf.width)
# Frm2datTag$lf.width<-as.numeric(Frm2datTag$lf.width)
# dotchart(Frm2datTag$lf.width, ylab="indiv", xlab="lf number",cex=.7,labels=row.names(Frm2datTag),groups= Frm2datTag$Origin,main="lf number by origin", gcolor="black", color=Frm2datTag$color)
# mtext("lf width", side=1,line=4)
#
# #rosette diameter
# class(Frm2datTag$rosette.diam)
# Frm2datTag$rosette.diam<-as.numeric(Frm2datTag$rosette.diam)
# dotchart(Frm2datTag$rosette.diam, ylab="indiv", xlab="lf number",cex=.7,labels=row.names(Frm2datTag),groups= Frm2datTag$Origin,main="lf number by origin", gcolor="black", color=Frm2datTag$color)
# mtext("rosette diam", side=1,line=4)
#
# #avg
# class()
# m2means<-as.data.frame(aggregate(Frm2Imp$lf.number, list(Frm2Imp$Origin) , mean))
# m2means$lf.number <- aggregate(Frm2Imp$lf.number, list(Frm2Imp$Origin) , mean)
# m2means$lf.width <- aggregate(Frm2Imp$lf.width, list(Frm2Imp$Origin) , mean)
# m2means$lf.length <- aggregate(Frm2Imp$lf.length, list(Frm2Imp$Origin) , mean)
# m2means$rosette.diam <- aggregate(Frm2Imp$rosette.diam, list(Frm2Imp$Origin) , mean)
# m2means
# #names(m2means) <- c('dnase.conc', 'dens.avg')
#
#
# plot(m2means$Group.1, m2means$x)
|
/draft_code_figures/Frgraphs.R
|
no_license
|
kgturner/FranceCG
|
R
| false
| false
| 9,691
|
r
|
#graphing
d <- read.table(file.choose(), header=T, sep="\t",quote='"', row.names=1) #measure1 Frm1DKdatdes.txt
d2<-read.table(file.choose(), header=T, sep="\t",quote='"', row.names=1)#measure 2 Frm2DKdatdes.txt
h <- read.table(file.choose(), header=T, sep="\t",quote='"', row.names=1) #measure harvest FrmHDKdatdes.txt
#plots of pop means from means table
head(PopMeansMh)
Hcont<-PopMeansMh[PopMeansMh$Trt=="control",]
plot(Hcont$Pop, Hcont$MassH)
title(main="Shoot mass at harvest", sub="Control treatment",
xlab="Population", ylab="mass(g)")
text(Hcont$Pop, Hcont$MassH, Hcont$Pop, cex=0.6, pos=4, col="red")
#plots of pop means from data, grouped by pop, trt
library("gplot")
library("ggplot2")
str(h)
unique(h$Pop)
h$Pop<-factor(h$Pop, c("CA001","CA008","CA009","CA010", "US001", "US002","US003", "BG001","GR001","GR002","GR003","HU001","RO001", "RO005","RU008","TR001","UA004"))
print(levels(h$Pop))
png(filename="FrmassMeans.png", width=800, bg="white")
p <- ggplot(data=h, aes(Pop, Shoot.mass.gH, fill=Trt)) +
geom_boxplot()
plot(p)
dev.off()
png(filename="FrcrownMeans.png", width=800, bg="white")
p <- ggplot(data=h, aes(Pop, CrownDiam.mm, fill=Trt)) +
geom_boxplot()
plot(p)
dev.off()
str(d)
unique(d$Pop)
d$Pop<-factor(d$Pop, c("CA001","CA008","CA009","CA010", "US001", "US002","US003", "BG001","GR001","GR002","GR003","HU001","RO001", "RO005","RU008","TR001","UA004"))
print(levels(d$Pop))
png(filename="FrDlfMeans.png", width=800, bg="white")
p <- ggplot(data=d, aes(Pop, MaxLfLgth1)) +
geom_boxplot()
plot(p)
dev.off()
#barplot with se bars
#harvest control shoot mass
se <- function(x) sqrt(var(x, na.rm=TRUE)/(length(na.omit(x))-1))
Hcont2<-h[h$Trt=="control",]
tapply(Hcont2$Shoot.mass.gH, Hcont2$Pop,mean,na.rm=TRUE)
tapply(Hcont2$Shoot.mass.gH, Hcont2$Pop,se)
plt <- barplot(tapply(Hcont2$Shoot.mass.gH, Hcont2$Pop,mean,na.rm=TRUE), ylim=c(0, 30))
y.se <- tapply(Hcont2$Shoot.mass.gH, Hcont2$Pop,se)
y.mean <- tapply(Hcont2$Shoot.mass.gH, Hcont2$Pop, mean, na.rm=TRUE)
# y.mean + y.se
# max(y.mean + y.se)
# c(0, max(y.mean + y.se, na.rm=TRUE))
ylim <- c(0, max(y.mean + y.se, na.rm=TRUE))
png(filename="Frmassbar.png", width=800, bg="white")
x<- barplot(y.mean,ylim=ylim, main="Shoot mass at harvest, control", col="blue")
arrows(x, y.mean - y.se, x, y.mean + y.se,code=3, length=0.03, angle=90)
dev.off()
#axis(1, at=1:17, lab=Hcont$Pop)
#overall
tapply(h$Shoot.mass.gH, h$Pop,mean,na.rm=TRUE)
tapply(h$Shoot.mass.gH, h$Pop,se)
plt <- barplot(tapply(h$Shoot.mass.gH, h$Pop,mean,na.rm=TRUE), ylim=c(0, 30))
y.se <- tapply(h$Shoot.mass.gH, h$Pop,se)
y.mean <- tapply(h$Shoot.mass.gH, h$Pop, mean, na.rm=TRUE)
# y.mean + y.se
# max(y.mean + y.se)
c(0, max(y.mean + y.se, na.rm=TRUE))
ylim <- c(0, max(y.mean + y.se, na.rm=TRUE))
x<- barplot(y.mean,ylim=ylim, main="Shoot mass at harvest, control", col="blue", beside=TRUE)
arrows(x, y.mean - y.se, x, y.mean + y.se,code=3, length=0.03, angle=90)
#########
#summary
summary(d)
dpop<-as.data.frame(d)
dpop<-dpop[order(dpop$Origin, decreasing=FALSE),]
dpop$Pop <- factor(dpop$Pop, c("", "", "","", ""))
plot(dpop$Pop)
plot(sort(PopMeansM1$Latitude))
#axis(1, at=1:17, lab=as.vector(PopMeansM1$Pop))
plot(PopMeansM1$Latitude)
plot(PopMeansM1$Pop,PopMeansM1$Latitude,col=ifelse(PopMeansM1$Latitude==3,"red", "black"))
#col=ifelse(PopMeansM1$Origin=="inv", "red", "black")
plot(PopMeansM1$Latitude)
# > axis(1, at=1:17, lab=as.vector(PopMeansM1$Pop))
# > PopMeansM1$Origin<-factor(PopMeansM1$Origin)
# > PopMeansM1$col[PopMeansM1$Origin=="inv"]<-"red"
#PopMeansM1$col[PopMeansM1$Origin=="nat"]<-"black"
# > dotchart(PopMeansM1$Latitude, labels=PopMeansM1$Pop, groups=PopMeansM1$Origin, color=PopMeansM1$col)
# > dotchart(PopMeansM1$Latitude, labels=PopMeansM1$Pop, color=PopMeansM1$col)
# > dotchart(sort(PopMeansM1$Latitude), labels=PopMeansM1$Pop, color=PopMeansM1$col)
# > dotchart(order(PopMeansM1$Latitude), labels=PopMeansM1$Pop, color=PopMeansM1$col)
# summary(Frm1DKdatdes[Frm1DKdatdes$Origin=="nat"])
#
# source("http://bioconductor.org/biocLite.R")
# biocLite("psych")
# library(psych)
# describe.by(Frm1DKdatdes$LfCount1, Frm1DKdatdes$Origin)
#library(doBy)
#summaryBy(mpg + wt ~ cyl + vs, data = mtcars,FUN = function(x) { c(m = mean(x), s = sd(x)) } )
# produces mpg.m wt.m mpg.s wt.s for each
# combination of the levels of cyl and vs
tapply(Frm1DKcont$LfCount1, INDEX = Frm1DKcont$Origin, FUN = mean, na.rm=TRUE)
tapply(Frm1DKcont$LfCount1, Frm1DKcont$Origin, sd, na.rm = TRUE)
tapply(Frm1DKdatdes$LfCount1, INDEX = list(Frm1DKdatdes$Origin,Frm1DKdatdes$Trt),
FUN = mean, na.rm=TRUE)
# #barplots
barplot(agdatm1$x, main="Leaf Count- m 1",names.arg=paste(agdatm1$Group.1,agdatm1$Group.2),
col="blue", axis.lty=1, xlab="groups", ylab="lf count")
# aggregate data frame returning means
# for numeric variables
agdatm1 <-aggregate(Frm1DKdatdes$LfCount1, by=list(Frm1DKdatdes$Origin,Frm1DKdatdes$Trt) ,FUN=mean, na.rm=TRUE)
print(agdatm1)
#barplot with se bars
#harvest root crown
h <- FrmHDKdatdes
head(h)
h$group <- paste(h$Origin, h$Trt)
class(h$group)
h$group <- factor(h$group, levels=c("nat control","inv control","nat drought","inv drought"))
tapply(h$CrownDiam.mm, h$group,mean,na.rm=TRUE)
se <- function(x) sqrt(var(x, na.rm=TRUE)/(length(na.omit(x))-1))
tapply(h$CrownDiam.mm, h$group,se)
plt <- barplot(tapply(h$CrownDiam.mm, h$group,mean,na.rm=TRUE), ylim=c(0, 30))
plt
y.se <- tapply(h$CrownDiam.mm, h$group,se)
y.mean <- tapply(h$CrownDiam.mm, h$group, mean, na.rm=TRUE)
y.mean + y.se
c(0, max(y.mean + y.se))
ylim <- c(0, max(y.mean + y.se))
x<- barplot(y.mean,ylim=ylim, main="Root crown diameter at harvest", col="blue")
arrows(x, y.mean - y.se, x, y.mean + y.se,code=3, length=0.03, angle=90)
#m1 lf count
d <- Frm1DKdatdes
d$Origin<-factor(d$Origin, levels=c("nat","inv"))
tapply(d$LfCount1, d$Origin,mean,na.rm=TRUE)
se <- function(x) sqrt(var(x, na.rm=TRUE)/(length(na.omit(x))-1))
tapply(d$LfCount1, d$Origin,se)
barplot(tapply(d$LfCount1, d$Origin,mean,na.rm=TRUE), ylim=c(0, 10))
plt <- barplot(tapply(d$LfCount1, d$Origin,mean,na.rm=TRUE), ylim=c(0, 10))
plt
y.se <- tapply(d$LfCount1, d$Origin,se)
y.mean <- tapply(d$LfCount1, d$Origin, mean, na.rm=TRUE)
y.mean + y.se
c(0, max(y.mean + y.se))
ylim <- c(0, max(y.mean + y.se))
plt<- barplot(y.mean,ylim=ylim, main="Leaf No., week 5",cex.main=2.5,
col=1:length(unique(Frm2DKcont$Origin)),xlab="Range", ylab="Leaf number",
cex.lab=1.5)
arrows(plt, y.mean - y.se, plt, y.mean + y.se,code=3, length=0.03, angle=90)
#m2 lf width
Frm2DKcont$Origin<-factor(Frm2DKcont$Origin,levels=c("nat", "inv"))
tapply(Frm2DKcont$MaxLfWdth2, Frm2DKcont$Origin,mean,na.rm=TRUE)
se <- function(x) sqrt(var(x, na.rm=TRUE)/(length(na.omit(x))-1))
tapply(Frm2DKcont$MaxLfWdth2, Frm2DKcont$Origin,se)
barplot(tapply(Frm2DKcont$MaxLfWdth2, Frm2DKcont$Origin,mean,na.rm=TRUE), ylim=c(0, 10))
plt <- barplot(tapply(Frm2DKcont$MaxLfWdth2, Frm2DKcont$Origin,mean,na.rm=TRUE), ylim=c(0, 10))
plt
y.se <- tapply(Frm2DKcont$MaxLfWdth2, Frm2DKcont$Origin,se)
y.mean <- tapply(Frm2DKcont$MaxLfWdth2, Frm2DKcont$Origin, mean, na.rm=TRUE)
y.mean + y.se
c(0, max(y.mean + y.se))
ylim <- c(0, 5)
# Frm2DKcont$color[Frm2DKcont$Origin=="inv"]<-"red"
# Frm2DKcont$color[Frm2DKcont$Origin=="nat"]<-"black"
plt<- barplot(y.mean,ylim=ylim, main="Leaf width, week 8 ",
col=1:length(unique(Frm2DKcont$Origin)), xlab="Range", ylab="Leaf width (cm)",
cex.main=2.5,cex.lab=1.5)
arrows(plt, y.mean - y.se, plt, y.mean + y.se,code=3, length=0.03, angle=90)
#Grouped and colored dot plot
#Group and color data by genotype
Frm1DKdatdes<-Frm1DKdatdes[order(Frm1DKdatdes$Origin),]
Frm1DKdatdes$Origin<-factor(Frm1DKdatdes$Origin)
Frm1DKdatdes$color[Frm1DKdatdes$Origin=="inv"]<-"red"
Frm1DKdatdes$color[Frm1DKdatdes$Origin=="nat"]<-"black"
# Frm2datTag$color[Frm2datTag$Origin=="sk"]<-"blue"
#
par(mar=c(5,6,4,2)+0.1,mgp=c(7,1,0))
dotchart(Frm1DKdatdes$LfCount1, ylab="indiv", xlab="lf number",cex=.7,labels=row.names(Frm1DKdatdes),groups= Frm1DKdatdes$Origin,main="lf number by origin", gcolor="black", color=Frm1DKdatdes$color)
mtext("lf number", side=1,line=4)
# #lf length
#
# par(mar=c(5,6,4,2)+0.1,mgp=c(7,1,0))
# dotchart(Frm2datTag$lf.length, ylab="indiv", xlab="lf number",cex=.7,labels=row.names(Frm2datTag),groups= Frm2datTag$Origin,main="lf number by origin", gcolor="black", color=Frm2datTag$color)
# mtext("lf length", side=1,line=4)
#
# #lf width
# class(Frm2datTag$lf.width)
# Frm2datTag$lf.width<-as.numeric(Frm2datTag$lf.width)
# dotchart(Frm2datTag$lf.width, ylab="indiv", xlab="lf number",cex=.7,labels=row.names(Frm2datTag),groups= Frm2datTag$Origin,main="lf number by origin", gcolor="black", color=Frm2datTag$color)
# mtext("lf width", side=1,line=4)
#
# #rosette diameter
# class(Frm2datTag$rosette.diam)
# Frm2datTag$rosette.diam<-as.numeric(Frm2datTag$rosette.diam)
# dotchart(Frm2datTag$rosette.diam, ylab="indiv", xlab="lf number",cex=.7,labels=row.names(Frm2datTag),groups= Frm2datTag$Origin,main="lf number by origin", gcolor="black", color=Frm2datTag$color)
# mtext("rosette diam", side=1,line=4)
#
# #avg
# class()
# m2means<-as.data.frame(aggregate(Frm2Imp$lf.number, list(Frm2Imp$Origin) , mean))
# m2means$lf.number <- aggregate(Frm2Imp$lf.number, list(Frm2Imp$Origin) , mean)
# m2means$lf.width <- aggregate(Frm2Imp$lf.width, list(Frm2Imp$Origin) , mean)
# m2means$lf.length <- aggregate(Frm2Imp$lf.length, list(Frm2Imp$Origin) , mean)
# m2means$rosette.diam <- aggregate(Frm2Imp$rosette.diam, list(Frm2Imp$Origin) , mean)
# m2means
# #names(m2means) <- c('dnase.conc', 'dens.avg')
#
#
# plot(m2means$Group.1, m2means$x)
|
## Examine how household energy usage varies over a 2-day period in February, 2007.
## This function creates graphs of Global Active Power, Voltage, Sub meter Power usage and Global reactive power over two day period
## for the dates 1/2/2007 and 2/2/2007
plot4 <- function(){
dataUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
## Read the data
download.file(dataUrl, ".\\household_power_consumption.zip", mode="wb")
## Extract compressed file
unzip(".\\household_power_consumption.zip")
## Read Data
data <- read.table(".\\household_power_consumption.txt", sep=";", header=TRUE, colClasses="character", na.strings="?")
# Filter the data of interest
#
useflags <- data$Date == "1/2/2007" | data$Date == "2/2/2007"
febData <- data[useflags,]
datetime = paste(febData$Date, febData$Time)
datetime <- strptime(datetime, "%d/%m/%Y %H:%M:%S")
## Construct the plot and save it to a PNG file with a width of 480 pixels
## and a height of 480 pixels.
png(filename = "plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
with(febData, {
plot(datetime, as.numeric(febData$Global_active_power), col= "black", type='l', xlab="", ylab="Global Active Power")
plot(datetime, as.numeric(febData$Voltage), col= "black", type='l', xlab="datetime", ylab="Voltage")
plot(datetime, as.numeric(febData$Sub_metering_1), col= "black", type='l', xlab="", ylab="Energy sub metering")
lines(datetime, as.numeric(febData$Sub_metering_2), col= "red", type='l')
lines(datetime, as.numeric(febData$Sub_metering_3), col= "blue", type='l')
legend("topright", pch=1, col=c("black","red","blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, cex = 0.7)
plot(datetime, as.numeric(febData$Global_reactive_power), col= "black", type='l', xlab="datetime", ylab="Global_reactive_power")
} )
dev.off()
}
|
/plot4.R
|
no_license
|
pdxpro/ExData_Plotting1
|
R
| false
| false
| 1,940
|
r
|
## Examine how household energy usage varies over a 2-day period in February, 2007.
## This function creates graphs of Global Active Power, Voltage, Sub meter Power usage and Global reactive power over two day period
## for the dates 1/2/2007 and 2/2/2007
plot4 <- function(){
dataUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
## Read the data
download.file(dataUrl, ".\\household_power_consumption.zip", mode="wb")
## Extract compressed file
unzip(".\\household_power_consumption.zip")
## Read Data
data <- read.table(".\\household_power_consumption.txt", sep=";", header=TRUE, colClasses="character", na.strings="?")
# Filter the data of interest
#
useflags <- data$Date == "1/2/2007" | data$Date == "2/2/2007"
febData <- data[useflags,]
datetime = paste(febData$Date, febData$Time)
datetime <- strptime(datetime, "%d/%m/%Y %H:%M:%S")
## Construct the plot and save it to a PNG file with a width of 480 pixels
## and a height of 480 pixels.
png(filename = "plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
with(febData, {
plot(datetime, as.numeric(febData$Global_active_power), col= "black", type='l', xlab="", ylab="Global Active Power")
plot(datetime, as.numeric(febData$Voltage), col= "black", type='l', xlab="datetime", ylab="Voltage")
plot(datetime, as.numeric(febData$Sub_metering_1), col= "black", type='l', xlab="", ylab="Energy sub metering")
lines(datetime, as.numeric(febData$Sub_metering_2), col= "red", type='l')
lines(datetime, as.numeric(febData$Sub_metering_3), col= "blue", type='l')
legend("topright", pch=1, col=c("black","red","blue"), legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, cex = 0.7)
plot(datetime, as.numeric(febData$Global_reactive_power), col= "black", type='l', xlab="datetime", ylab="Global_reactive_power")
} )
dev.off()
}
|
rm(list = ls(all = TRUE))
# install the required packges if needed
#install.packages("INLA", repos="http://www.math.ntnu.no/inla/R/testing")
#install.packages("bigmemory")
#install.packages("snow")
#install.packages("Rmpi")
#install.packages("ade4")
#install.packages("sp")
#install.packages("BAS")
#install.packages("https://github.com/aliaksah/EMJMCMC2016/files/270429/EMJMCMC_1.2.tar.gz", repos = NULL, type="source")
#install.packages("RCurl")
#install.packages("hash")
library(hash)
library(RCurl)
#library(EMJMCMC)
library(sp)
library(INLA)
library(parallel)
library(bigmemory)
library(snow)
library(MASS)
library(ade4)
#library(copula)
library(compiler)
library(BAS)
require(stats)
#define your working directory, where the data files are stored
workdir<-"/results"
#prepare data
simx <- read.table(text = getURL("https://raw.githubusercontent.com/aliaksah/EMJMCMC2016/master/examples/Protein%20Activity%20Data/proteincen.txt"),sep = " ")
data.example <- as.data.frame(simx)
names(data.example)[89]="Y"
#fparam <- c("Const",colnames(data)[-1])
fparam.example <- colnames(data.example)[-89]
fobserved.example <- colnames(data.example)[89]
for(i in 1:length(fparam.example))
{
fparam.example[i]=paste("I(V",i,")",sep = "")
}
# create either a standard hash table (default for now)
hashStat <- hash()
# or the one based on bigmemory package N/B do not create both data objects simultaneously, since this can lead to unpredicted results
#dataframe for results; n/b +1 is required for the summary statistics
#statistics1 <- big.matrix(nrow = 2 ^(23)+1, ncol = 16,init = NA, type = "double")
#statistics <- describe(statistics1)
#dataframe for results; n/b +1 is required for the summary statistics
#hash.keys1 <- big.matrix(nrow = 2 ^(23)+1, ncol = 88,init = 0, type = "char")
#hash.keys <- describe(hash.keys1)
#create MySearch object with default parameters
mySearch = EMJMCMC2016()
# load functions as in BAS article by Clyde, Ghosh and Littman to reproduce their first example
mySearch$estimator = estimate.bas.lm.pen
mySearch$estimator.args = list(data = data.example,prior = 3, g = 96 ,n=96,pen = 0.1, p.max = 88)
mySearch$parallelize = lapply# if the hash provided by Decision Patterns is used parallel computing is not performed!?
#
# full enumeration is infeasible
# system.time(
# FFF<-mySearch$full_selection(list(statid=6, totalit =32769, ub = 13600,mlikcur=-Inf,waiccur =100000))
# )
# # check that all models are enumerated during the full search procedure
# idn<-which(!is.na(statistics1[,1]))
# length(idn)
# hashStat
# define parameters of the search
mySearch$printable.opt=F
mySearch$max.cpu = as.integer(10)
mySearch$locstop.nd=FALSE
mySearch$max.cpu.glob = as.integer(10)
mySearch$max.N.glob=as.integer(20)
mySearch$min.N.glob=as.integer(5)
mySearch$max.N=as.integer(3)
mySearch$min.N=as.integer(1)
mySearch$recalc.margin = (500000)
distrib_of_proposals = c(76.91870,71.25264,87.68184,90.55921,17812.39852)
distrib_of_neighbourhoods=t(array(data = c(7.6651604,16.773326,14.541629,12.839445,12.964227,13.048343,7.165434,
0.9936905,15.942490,11.040131,3.200394,15.349051,15.466632,4.676458,
1.5184551,9.285762,6.125034,3.627547,13.343413,12.923767,5.318774,
14.5295380,1.521960,11.804457,5.070282,6.934380,10.578945,2.455602,
26.0826035,12.453729,14.340435,14.863495,10.028312,12.685017,13.806295),dim = c(7,5)))
mySearch$hash.length<-as.integer(20)
mySearch$double.hashing<-F
#Proceed for the predefined number of iterations
Niter <- 10
thining<-1
system.time({
mliklist<-array(data = 0, dim = c(2^mySearch$hash.length, Niter))
vect <-array(data = 0,dim = c(length(fparam.example),Niter))
vect.mc <-array(data = 0,dim = c(length(fparam.example),Niter))
inits <-array(data = 0,dim = Niter)
freqs <-array(data = 100,dim = c(5,Niter))
freqs.p <-array(data = 100,dim = c(5,7,Niter))
masses <- array(data = 0,dim = Niter)
iterats <- array(data = 0,dim = c(2,Niter))
for(i in 1:Niter)
{
#statistics1 <- big.matrix(nrow = 2 ^(length(fparam.example))+1, ncol = 15,init = NA, type = "double")
#statistics <- describe(statistics1)
hashStat <- hash()
mySearch$g.results[1,1]<--Inf
mySearch$g.results[1,2]<-1
mySearch$g.results[4,1]<-0
mySearch$g.results[4,2]<-0
mySearch$p.add = array(data = 0.5,dim = length(fparam.example))
#distrib_of_neighbourhoods=array(data = runif(n = 5*7,min = 0, max = 20),dim = c(5,7))
#distrib_of_proposals = runif(n = 5,min = 0, max = 100)
#distrib_of_proposals[5]=sum(distrib_of_proposals[1:4])*runif(n = 1,min = 50, max = 150)
print("BEGIN ITERATION!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(i)
set.seed(10*i)
initsol=rbinom(n = length(fparam.example),size = 1,prob = 0.5)
inits[i] <- mySearch$bittodec(initsol)
freqs[,i]<- distrib_of_proposals
resm<-mySearch$modejumping_mcmc(list(varcur=NULL,statid=-1, distrib_of_proposals =distrib_of_proposals,distrib_of_neighbourhoods=distrib_of_neighbourhoods, eps = 0.000000000001, trit = 2^30, trest = 2^20, burnin = 100, max.time = 24*60*6, maxit = 2^20, print.freq = 1000
))
vect[,i]<-resm$bayes.results$p.post
vect.mc[,i]<-resm$p.post
masses[i]<-resm$bayes.results$s.mass
print(masses[i])
freqs.p[,,i] <- distrib_of_neighbourhoods
iterats[1,i]<-mySearch$g.results[4,1]
iterats[2,i]<-mySearch$g.results[4,2]
print("COMPLETE ITERATION!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! with")
print(iterats[2,i])
lHash<-length(hashStat)
mliks <- -1*values(hashStat)[which((1:(lHash * 3)) %%3 == 2)]
mliklist[,i]<-mliks[1:2^mySearch$hash.length]
lHash<-length(hashStat)
mliks <- -1*values(hashStat)[which((1:(lHash * 3)) %%3 == 2)]
sum(exp(mliks))
smilks100000<-sort(mliks,decreasing = T)[1:100000]
boxplot(smilks100000,xaxt="n",ylab="log(Marginal Likelihood)",xlab="Replicates",horizontal=FALSE,pch=".",cex.lab=1.7,cex.axis=1.5,omd=c(0,0.7,0,0.7))
smilks100000[1:10]
write(mliklist[,i], file = paste("mlikspen",i,".csv"),
ncolumns = 1,
append = FALSE, sep = " ")
write(vect[,i], file = paste("pppen",i,".rs.csv"),
ncolumns = 1,
append = FALSE, sep = " ")
write(vect.mc[,i], file = paste("pppen",i,".mc.csv"),
ncolumns = 1,
append = FALSE, sep = " ")
remove(hashStat)
#clear(hashStat)
#remove(hashStat)
#remove(statistics1)
#remove(statistics)
}
}
)
print("model coverages")
mean(masses)
median(masses)
print("mean # of iterations")# even smaller on average than in BAS
mean(iterats[1,])
print("mean # of estimations")# even smaller on average than in BAS
mean(iterats[2,])
|
/examples/Protein Activity Data/Protein activity data pen.r
|
no_license
|
aliaksah/EMJMCMC2016
|
R
| false
| false
| 6,806
|
r
|
rm(list = ls(all = TRUE))
# install the required packges if needed
#install.packages("INLA", repos="http://www.math.ntnu.no/inla/R/testing")
#install.packages("bigmemory")
#install.packages("snow")
#install.packages("Rmpi")
#install.packages("ade4")
#install.packages("sp")
#install.packages("BAS")
#install.packages("https://github.com/aliaksah/EMJMCMC2016/files/270429/EMJMCMC_1.2.tar.gz", repos = NULL, type="source")
#install.packages("RCurl")
#install.packages("hash")
library(hash)
library(RCurl)
#library(EMJMCMC)
library(sp)
library(INLA)
library(parallel)
library(bigmemory)
library(snow)
library(MASS)
library(ade4)
#library(copula)
library(compiler)
library(BAS)
require(stats)
#define your working directory, where the data files are stored
workdir<-"/results"
#prepare data
simx <- read.table(text = getURL("https://raw.githubusercontent.com/aliaksah/EMJMCMC2016/master/examples/Protein%20Activity%20Data/proteincen.txt"),sep = " ")
data.example <- as.data.frame(simx)
names(data.example)[89]="Y"
#fparam <- c("Const",colnames(data)[-1])
fparam.example <- colnames(data.example)[-89]
fobserved.example <- colnames(data.example)[89]
for(i in 1:length(fparam.example))
{
fparam.example[i]=paste("I(V",i,")",sep = "")
}
# create either a standard hash table (default for now)
hashStat <- hash()
# or the one based on bigmemory package N/B do not create both data objects simultaneously, since this can lead to unpredicted results
#dataframe for results; n/b +1 is required for the summary statistics
#statistics1 <- big.matrix(nrow = 2 ^(23)+1, ncol = 16,init = NA, type = "double")
#statistics <- describe(statistics1)
#dataframe for results; n/b +1 is required for the summary statistics
#hash.keys1 <- big.matrix(nrow = 2 ^(23)+1, ncol = 88,init = 0, type = "char")
#hash.keys <- describe(hash.keys1)
#create MySearch object with default parameters
mySearch = EMJMCMC2016()
# load functions as in BAS article by Clyde, Ghosh and Littman to reproduce their first example
mySearch$estimator = estimate.bas.lm.pen
mySearch$estimator.args = list(data = data.example,prior = 3, g = 96 ,n=96,pen = 0.1, p.max = 88)
mySearch$parallelize = lapply# if the hash provided by Decision Patterns is used parallel computing is not performed!?
#
# full enumeration is infeasible
# system.time(
# FFF<-mySearch$full_selection(list(statid=6, totalit =32769, ub = 13600,mlikcur=-Inf,waiccur =100000))
# )
# # check that all models are enumerated during the full search procedure
# idn<-which(!is.na(statistics1[,1]))
# length(idn)
# hashStat
# define parameters of the search
mySearch$printable.opt=F
mySearch$max.cpu = as.integer(10)
mySearch$locstop.nd=FALSE
mySearch$max.cpu.glob = as.integer(10)
mySearch$max.N.glob=as.integer(20)
mySearch$min.N.glob=as.integer(5)
mySearch$max.N=as.integer(3)
mySearch$min.N=as.integer(1)
mySearch$recalc.margin = (500000)
distrib_of_proposals = c(76.91870,71.25264,87.68184,90.55921,17812.39852)
distrib_of_neighbourhoods=t(array(data = c(7.6651604,16.773326,14.541629,12.839445,12.964227,13.048343,7.165434,
0.9936905,15.942490,11.040131,3.200394,15.349051,15.466632,4.676458,
1.5184551,9.285762,6.125034,3.627547,13.343413,12.923767,5.318774,
14.5295380,1.521960,11.804457,5.070282,6.934380,10.578945,2.455602,
26.0826035,12.453729,14.340435,14.863495,10.028312,12.685017,13.806295),dim = c(7,5)))
mySearch$hash.length<-as.integer(20)
mySearch$double.hashing<-F
#Proceed for the predefined number of iterations
Niter <- 10
thining<-1
system.time({
mliklist<-array(data = 0, dim = c(2^mySearch$hash.length, Niter))
vect <-array(data = 0,dim = c(length(fparam.example),Niter))
vect.mc <-array(data = 0,dim = c(length(fparam.example),Niter))
inits <-array(data = 0,dim = Niter)
freqs <-array(data = 100,dim = c(5,Niter))
freqs.p <-array(data = 100,dim = c(5,7,Niter))
masses <- array(data = 0,dim = Niter)
iterats <- array(data = 0,dim = c(2,Niter))
for(i in 1:Niter)
{
#statistics1 <- big.matrix(nrow = 2 ^(length(fparam.example))+1, ncol = 15,init = NA, type = "double")
#statistics <- describe(statistics1)
hashStat <- hash()
mySearch$g.results[1,1]<--Inf
mySearch$g.results[1,2]<-1
mySearch$g.results[4,1]<-0
mySearch$g.results[4,2]<-0
mySearch$p.add = array(data = 0.5,dim = length(fparam.example))
#distrib_of_neighbourhoods=array(data = runif(n = 5*7,min = 0, max = 20),dim = c(5,7))
#distrib_of_proposals = runif(n = 5,min = 0, max = 100)
#distrib_of_proposals[5]=sum(distrib_of_proposals[1:4])*runif(n = 1,min = 50, max = 150)
print("BEGIN ITERATION!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
print(i)
set.seed(10*i)
initsol=rbinom(n = length(fparam.example),size = 1,prob = 0.5)
inits[i] <- mySearch$bittodec(initsol)
freqs[,i]<- distrib_of_proposals
resm<-mySearch$modejumping_mcmc(list(varcur=NULL,statid=-1, distrib_of_proposals =distrib_of_proposals,distrib_of_neighbourhoods=distrib_of_neighbourhoods, eps = 0.000000000001, trit = 2^30, trest = 2^20, burnin = 100, max.time = 24*60*6, maxit = 2^20, print.freq = 1000
))
vect[,i]<-resm$bayes.results$p.post
vect.mc[,i]<-resm$p.post
masses[i]<-resm$bayes.results$s.mass
print(masses[i])
freqs.p[,,i] <- distrib_of_neighbourhoods
iterats[1,i]<-mySearch$g.results[4,1]
iterats[2,i]<-mySearch$g.results[4,2]
print("COMPLETE ITERATION!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! with")
print(iterats[2,i])
lHash<-length(hashStat)
mliks <- -1*values(hashStat)[which((1:(lHash * 3)) %%3 == 2)]
mliklist[,i]<-mliks[1:2^mySearch$hash.length]
lHash<-length(hashStat)
mliks <- -1*values(hashStat)[which((1:(lHash * 3)) %%3 == 2)]
sum(exp(mliks))
smilks100000<-sort(mliks,decreasing = T)[1:100000]
boxplot(smilks100000,xaxt="n",ylab="log(Marginal Likelihood)",xlab="Replicates",horizontal=FALSE,pch=".",cex.lab=1.7,cex.axis=1.5,omd=c(0,0.7,0,0.7))
smilks100000[1:10]
write(mliklist[,i], file = paste("mlikspen",i,".csv"),
ncolumns = 1,
append = FALSE, sep = " ")
write(vect[,i], file = paste("pppen",i,".rs.csv"),
ncolumns = 1,
append = FALSE, sep = " ")
write(vect.mc[,i], file = paste("pppen",i,".mc.csv"),
ncolumns = 1,
append = FALSE, sep = " ")
remove(hashStat)
#clear(hashStat)
#remove(hashStat)
#remove(statistics1)
#remove(statistics)
}
}
)
print("model coverages")
mean(masses)
median(masses)
print("mean # of iterations")# even smaller on average than in BAS
mean(iterats[1,])
print("mean # of estimations")# even smaller on average than in BAS
mean(iterats[2,])
|
library(CreditRisk)
### Name: calibrate.at1p
### Title: AT1P model calibration to market CDS data
### Aliases: calibrate.at1p
### ** Examples
calibrate.at1p(V0 = 1, cdsrate = cdsdata$Par.spread, r = cdsdata$ED.Zero.Curve,
t = cdsdata$Maturity)
|
/data/genthat_extracted_code/CreditRisk/examples/calibrate.at1p.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 252
|
r
|
library(CreditRisk)
### Name: calibrate.at1p
### Title: AT1P model calibration to market CDS data
### Aliases: calibrate.at1p
### ** Examples
calibrate.at1p(V0 = 1, cdsrate = cdsdata$Par.spread, r = cdsdata$ED.Zero.Curve,
t = cdsdata$Maturity)
|
library(motifbreakR)
library(BSgenome.Hsapiens.UCSC.hg19)
all.variants.df <- readRDS("allVariants.Rds")
results <- motifbreakR(snpList = all.variants.df,
filterp = TRUE,
pwmList = hocomoco,
verbose = TRUE,
threshold = 1e-4,
method = "ic",
bkg = c(A=0.25, C=0.25, G=0.25, T=0.25),
BPPARAM = BiocParallel::MulticoreParam(workers=22))
saveRDS(results,"allResults.Rds")
|
/fine-mapping/motifbreakr.R
|
permissive
|
pwh124/open_chromatin
|
R
| false
| false
| 530
|
r
|
library(motifbreakR)
library(BSgenome.Hsapiens.UCSC.hg19)
all.variants.df <- readRDS("allVariants.Rds")
results <- motifbreakR(snpList = all.variants.df,
filterp = TRUE,
pwmList = hocomoco,
verbose = TRUE,
threshold = 1e-4,
method = "ic",
bkg = c(A=0.25, C=0.25, G=0.25, T=0.25),
BPPARAM = BiocParallel::MulticoreParam(workers=22))
saveRDS(results,"allResults.Rds")
|
# Assign
1->a
a<-1
a=1
assign("a", 1)
# Infinite: Inf
# We can operate normally with them
# Undefined: NaN
# Denotes a numeric number that is not a number: 0/0
# Missing: NA
# Denotes a not available value
# It is independent of the data type
# Empty: NULL
# Denotes an empty object, is skipped (i.e., removed) for Vectors
# Conversion
# 0
as.numeric(FALSE)
# 1
as.numeric(TRUE)
as.numeric("1")
as.numeric("A")
# FALSE
as.logical(0)
as.logical("FALSE")
as.logical("F")
# TRUE
as.logical(1)
as.logical("TRUE")
as.logical("T")
# Type (double, character, logical, ...)
typeof(a)
# Mode (numeric, logical, character)
mode(a)
# Length
length(a)
# Sum
s<-1+1
# Substraction
r<-2-1
# Product
p<-1*2
# Division
d<-4/2
|
/Basic/Basic.R
|
no_license
|
serbelga/Data_Science_R
|
R
| false
| false
| 723
|
r
|
# Assign
1->a
a<-1
a=1
assign("a", 1)
# Infinite: Inf
# We can operate normally with them
# Undefined: NaN
# Denotes a numeric number that is not a number: 0/0
# Missing: NA
# Denotes a not available value
# It is independent of the data type
# Empty: NULL
# Denotes an empty object, is skipped (i.e., removed) for Vectors
# Conversion
# 0
as.numeric(FALSE)
# 1
as.numeric(TRUE)
as.numeric("1")
as.numeric("A")
# FALSE
as.logical(0)
as.logical("FALSE")
as.logical("F")
# TRUE
as.logical(1)
as.logical("TRUE")
as.logical("T")
# Type (double, character, logical, ...)
typeof(a)
# Mode (numeric, logical, character)
mode(a)
# Length
length(a)
# Sum
s<-1+1
# Substraction
r<-2-1
# Product
p<-1*2
# Division
d<-4/2
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GSEA.R
\name{binomialtest.msig.enrch_deplet}
\alias{binomialtest.msig.enrch_deplet}
\title{binomialtest.msig.enrch_deplet}
\usage{
binomialtest.msig.enrch_deplet(mylist, All = All.genes, name,
thedatabase = db)
}
\description{
This function is an internal function calculating the significance
}
|
/man/binomialtest.msig.enrch_deplet.Rd
|
no_license
|
chenweng1991/EZsinglecell
|
R
| false
| true
| 376
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GSEA.R
\name{binomialtest.msig.enrch_deplet}
\alias{binomialtest.msig.enrch_deplet}
\title{binomialtest.msig.enrch_deplet}
\usage{
binomialtest.msig.enrch_deplet(mylist, All = All.genes, name,
thedatabase = db)
}
\description{
This function is an internal function calculating the significance
}
|
### Logistic regresion - metadata ###
#Script to perform logistic regression test
#Steps:
#1.Metadata quality control: replace NA for median values and remove columns with only one level
#2.Convert taxonomy table into presence/absence taxonomy table (0,1)
#3.Model1: Logistic regression with all metadata
#4.Model2: Select significant factors in step 3 and create a new model with the new factors
#5.Test model1 and model2
#6.Output
#6.1: creates a new table for each combination and each factor
#6.2: creates a new table for each combination and each factor with significant corrected p.value (<0.05)
#Input files
#
#1.Metadata table
#2.Taxonomy table
#3.column_number: number of the column in the metadata that contains the category factor - numeric value
#Output files
#
#A file for each factor that has an effect on taxonomy variance
#Example metadata_table
#
#SID factor1 factor2 factor3
#Sample1 3.2 23 no
#Sample2 2.4 3 yes
#Sample3 10.3 5 yes
#Example taxonomy_table
#
#SID tax1 tax2 tax3
#Sample1 0.01 1.34 10.2
#Sample2 5.6 0.56 50.2
#Sample3 3.2 6.2 2.34
#Example output
#
#Taxonomy presence_cat1 absence_cat1 presence_cat2 absence_cat2 factor effect p-value corrected p-value model
#tax1 57 298 2 99 age 0.09 0.0001 0.002 model1
#tax2 125 230 10 91 age 1.3 0.003 0.01 model2
#tax3 335 20 69 32 age -0.79 0.00004 0.0009 model2
logistic_regression <- function(metadata_input, taxonomy_table, column_number) {
#Package needed
library (psych)
##Function to calculate nº of 0
nzsum <- function(x){
sum (x==0)
}
##Function to calculate nº of non-0
nsum <- function(x){
sum (x!=0)
}
#Function to create a table for multiple combinations
expand.grid.unique <- function(x, y, include.equals=FALSE){
x <- unique(x)
y <- unique(y)
g <- function(i){
z <- setdiff(y, x[seq_len(i-include.equals)])
if(length(z)) cbind(x[i], z, deparse.level=0)
}
do.call(rbind, lapply(seq_along(x), g))
}
# Remove NA values
# Convert categorical values to numeric
for (i in 1:ncol(metadata_input)) {
if (is.factor(metadata_input[,i]) & any(is.na(metadata_input[,i]))) {
metadata_input[,i] <- as.integer(metadata_input[,i])
}
}
# Replace NA values: median value
for (ii in 1:ncol(metadata_input)){
for (jj in 1:nrow(metadata_input)) {
if (is.na(metadata_input[jj,ii])){
x = describe(metadata_input[,ii])
a = x$median
metadata_input[jj,ii] = a
}
}
}
##Remove columns with only one level
metadata_input <- metadata_input[, sapply(metadata_input, function(col) length(unique(col))) > 1]
#Create presence/absence taxonomy table
p_a_table <- taxonomy_table
for (i in 1:ncol(taxonomy_table)) {
for (j in 1:nrow(taxonomy_table)) {
if (taxonomy_table[j,i]>0) {
p_a_table[j,i] = 1
}
}
}
#Multiple combinations
llista = unique(as.vector(metadata_input[,column_number]))
combination_list <- expand.grid.unique(llista,llista)
matrix_list <- list()
table_variables <- matrix(ncol = 2, nrow = ncol(p_a_table))
for (aa in 1:nrow(combination_list)){
new_metadata <- metadata_input[metadata_input[,1]==combination_list[aa,1] | metadata_input[,1] == combination_list[aa,2],]
##Remove columns with only one level
new_metadata1 <- new_metadata[, sapply(new_metadata, function(col) length(unique(col))) > 1]
#For each taxonomy
for (x in 1:ncol(p_a_table)) {
#Get column
name_column <- colnames(p_a_table)[x]
taxonomy <- subset(p_a_table, select = name_column)
#Create a table for the model. Merge taxonomy column with metadata
model_table <- merge(taxonomy, new_metadata1, by = "row.names" )
row.names(model_table) <- model_table[,1]
model_table <- model_table[,-1]
#Change taxonomy name
colnames(model_table)[1] <- "Taxonomy"
#Sort
#model_table <- model_table[ , order(names(model_table))]
#Calculate model
model <- glm(Taxonomy ~ . , family = binomial(link = "logit"), data = model_table)
##Calculate Anova
anova_test <- anova(model, test = "Chisq")
##Keep significative variables for model2
variables_model2 <- subset(anova_test, anova_test[,5]<0.05)
list_variables_model1 <- as.vector(c(colnames(model_table)))
list_variables_model1 <- paste(c(list_variables_model1), collapse=',' )
#If there are significative variables
if (nrow(variables_model2)>0) {
# Save names of the significative variables: create a new metadata table with these variables (model_table2)
matrix_variables_model2 <- as.data.frame(rownames(variables_model2))
rownames(matrix_variables_model2) <- matrix_variables_model2[,1]
t_model_table <- t(model_table)
list_variables_model2 <- as.vector(matrix_variables_model2$`rownames(variables_model2)`)
list_variables_model2 <- paste(c(list_variables_model2), collapse=', ' )
t_model_table2 <- merge(matrix_variables_model2, t_model_table, by = "row.names")
rownames(t_model_table2) <- t_model_table2[,1]
t_model_table2[1:2] <- NULL
model_table2 <- t(t_model_table2)
# Merge the new table with the taxonomy again (lost in the last merge)
model_table2 <- merge(taxonomy, model_table2, by = "row.names" )
row.names(model_table2) <- model_table2[,1]
model_table2 <- model_table2[,-1]
#Change taxonomy name
colnames(model_table2)[1] <- "Taxonomy"
# Change character to numeric
for (ii in 1:ncol(model_table2)) {
column_name2 <- colnames(model_table2)[ii]
for (jjj in 1:ncol(model_table)) {
column_name <- colnames(model_table)[jjj]
if (column_name == column_name2 & is.numeric(model_table[,jjj])) {
model_table2[,ii] <- as.numeric(as.character(model_table2[,ii]))
}
}
}
# Sort
#model_table2 <- model_table2[ , order(names(model_table2))]
## Calculate model2 with the new table: contains only the significative variables in anova test
model2 <- glm(Taxonomy ~ . , family = binomial(link="logit"), data = model_table2)
# Test the two models
anova_test <- anova(model, model2, test = "Chisq")
# Model 2 and model 1 are equal: save model 1 results
if (is.na(anova_test[2,5])) {
summary_table <- summary(model)
coefficients_table <- as.data.frame(summary_table$coefficients)
category1_samples <- subset(model_table, model_table[,2]==combination_list[aa,1])
nsum1 = nsum(category1_samples$Taxonomy)
nzsum1 = nzsum(category1_samples$Taxonomy)
category2_samples <- subset(model_table, model_table[,2]==combination_list[aa,2])
nsum2 = nsum(category2_samples$Taxonomy)
nzsum2 = nzsum(category2_samples$Taxonomy)
loop_matrix <- matrix(ncol = 9, nrow = nrow(coefficients_table))
colnames(loop_matrix) <- c("Taxonomy","presence_cat1", "absence_cat1", "presence_cat2", "absence_cat2", "Variable","effect","p_value", "Model")
a = 1
for (jj in 1:nrow(coefficients_table)) {
loop_matrix[a,1] = name_column
loop_matrix[a,2] = nsum1
loop_matrix[a,3] = nzsum1
loop_matrix[a,4] = nsum2
loop_matrix[a,5] = nzsum2
loop_matrix[a,6] = rownames(coefficients_table)[jj]
loop_matrix[a,7] = coefficients_table[jj,1]
loop_matrix[a,8] = coefficients_table[jj,4]
loop_matrix[a,9] = "model_1"
a <- a + 1
}
loop_matrix <- na.omit(loop_matrix)
loop_matrix1 <- loop_matrix
for (kk in 1:nrow(loop_matrix)) {
if (loop_matrix[kk,6]=="(Intercept)") {
loop_matrix1 <- loop_matrix[-kk,]
}
}
matrix_list[[x]] <- loop_matrix1
table_variables[x,1] = name_column
table_variables[x,2] = list_variables_model1
}
#Model 2 is not better than model 1: save model 1 results
else if (anova_test[2,5]<0.05) {
summary_table <- summary(model)
coefficients_table <- as.data.frame(summary_table$coefficients)
category1_samples <- subset(model_table, model_table[,2]==combination_list[aa,1])
nsum1 = nsum(category1_samples$Taxonomy)
nzsum1 = nzsum(category1_samples$Taxonomy)
category2_samples <- subset(model_table, model_table[,2]==combination_list[aa,2])
nsum2 = nsum(category2_samples$Taxonomy)
nzsum2 = nzsum(category2_samples$Taxonomy)
loop_matrix <- matrix(ncol = 9, nrow = nrow(coefficients_table))
colnames(loop_matrix) <- c("Taxonomy","presence_cat1", "absence_cat1", "presence_cat2", "absence_cat2", "Variable","effect","p_value", "Model")
a = 1
for (jj in 1:nrow(coefficients_table)) {
loop_matrix[a,1] = name_column
loop_matrix[a,2] = nsum1
loop_matrix[a,3] = nzsum1
loop_matrix[a,4] = nsum2
loop_matrix[a,5] = nzsum2
loop_matrix[a,6] = rownames(coefficients_table)[jj]
loop_matrix[a,7] = coefficients_table[jj,1]
loop_matrix[a,8] = coefficients_table[jj,4]
loop_matrix[a,9] = "model_1"
a <- a + 1
}
loop_matrix <- na.omit(loop_matrix)
loop_matrix1 <- loop_matrix
for (kk in 1:nrow(loop_matrix)) {
if (loop_matrix[kk,6]=="(Intercept)") {
loop_matrix1 <- loop_matrix[-kk,]
}
}
matrix_list[[x]] <- loop_matrix1
table_variables[x,1] = name_column
table_variables[x,2] = list_variables_model1
}
##Model 2 is better than model 1: save model 2 results
else {
summary_table2 <- summary(model2)
#Save coefficients results
coefficients_table <- as.data.frame(summary_table2$coefficients)
#Get number presence/absence of the taxonomy for each category
category1_samples <- subset(model_table, model_table[,2]==combination_list[aa,1])
nsum1 = nsum(category1_samples$Taxonomy)
nzsum1 = nzsum(category1_samples$Taxonomy)
category2_samples <- subset(model_table, model_table[,2]==combination_list[aa,2])
nsum2 = nsum(category2_samples$Taxonomy)
nzsum2 = nzsum(category2_samples$Taxonomy)
loop_matrix <- matrix(ncol = 9, nrow = nrow(coefficients_table))
colnames(loop_matrix) <- c("Taxonomy","presence_cat1", "absence_cat1", "presence_cat2", "absence_cat2", "Variable","effect","p_value", "Model")
a = 1
#Save in a new matrix:
for (jj in 1:nrow(coefficients_table)) {
loop_matrix[a,1] = name_column #name taxonomy
loop_matrix[a,2] = nsum1 #presence taxonomy in category1
loop_matrix[a,3] = nzsum1 #absence taxonomy in category1
loop_matrix[a,4] = nsum2 #presence taxonomy in category2
loop_matrix[a,5] = nzsum2 #absence taxonomy in category2
loop_matrix[a,6] = rownames(coefficients_table)[jj] #name of the variable
loop_matrix[a,7] = coefficients_table[jj,1] #effect value
loop_matrix[a,8] = coefficients_table[jj,4] #p_value
loop_matrix[a,9] = "model_2"
a <- a + 1
}
loop_matrix <- na.omit(loop_matrix) #remove empty rows (NA values)
loop_matrix1 <- loop_matrix
for (kk in 1:nrow(loop_matrix)) { #remove (Intercept) results
if (loop_matrix[kk,6]=="(Intercept)") {
loop_matrix1 <- loop_matrix[-kk,]
}
}
matrix_list[[x]] <- loop_matrix1 #Save the new matrix in a list of matrix
table_variables[x,1] = name_column
table_variables[x,2] = list_variables_model2
}
}
## If are not significative variables in anova test: keep model1 results
else {
summary_table <- summary(model)
coefficients_table <- as.data.frame(summary_table$coefficients)
category1_samples <- subset(model_table, model_table[,2]==combination_list[aa,1])
nsum1 = nsum(category1_samples$Taxonomy)
nzsum1 = nzsum(category1_samples$Taxonomy)
category2_samples <- subset(model_table, model_table[,2]==combination_list[aa,2])
nsum2 = nsum(category2_samples$Taxonomy)
nzsum2 = nzsum(category2_samples$Taxonomy)
loop_matrix <- matrix(ncol = 9, nrow = nrow(coefficients_table))
colnames(loop_matrix) <- c("Taxonomy","presence_cat1", "absence_cat1", "presence_cat2", "absence_cat2", "Variable","effect","p_value", "Model")
a = 1
for (jj in 1:nrow(coefficients_table)) {
loop_matrix[a,1] = name_column
loop_matrix[a,2] = nsum1
loop_matrix[a,3] = nzsum1
loop_matrix[a,4] = nsum2
loop_matrix[a,5] = nzsum2
loop_matrix[a,6] = rownames(coefficients_table)[jj]
loop_matrix[a,7] = coefficients_table[jj,1]
loop_matrix[a,8] = coefficients_table[jj,4]
loop_matrix[a,9] = "model_1"
a <- a + 1
}
loop_matrix <- na.omit(loop_matrix)
loop_matrix1 <- loop_matrix
for (kk in 1:nrow(loop_matrix)) {
if (loop_matrix[kk,6]=="(Intercept)") {
loop_matrix1 <- loop_matrix[-kk,]
}
}
matrix_list[[x]] <- loop_matrix1
table_variables[x,1] = name_column
table_variables[x,2] = list_variables_model1
}
#Save in the same matrix all the results
all_matrix <- as.data.frame(do.call(rbind, matrix_list))
colnames(table_variables) <- c("Taxonomy", "Variables")
write.table(table_variables, file = "./table_variables_logistic_regression.txt", sep = "\t", quote = F)
}
#Split by Variable
split_matrix <- split(all_matrix, all_matrix$Variable)
for (bb in 1:length(split_matrix)){
#Correct by p_values
matrix <- as.data.frame(split_matrix[bb])
p_value <- as.vector(matrix[,8])
corrected_pvalues <- p.adjust(p_value, method = "fdr")
#Add a new column with the new p_values
matrix <- cbind(matrix, corrected_pvalues)
name <- colnames(matrix)[6]
name <- paste(name, combination_list[aa,1], "vs", combination_list[aa,2], sep = "")
nc <- paste(name, ".txt", sep ="")
assign(nc, matrix)
final_name_matrix <- paste("./", nc, sep = "")
write.table(matrix, file = final_name_matrix, quote = F, sep = "\t")
## Filtering significant results
#Filter by the new p_values
filtered_matrix <- subset(matrix, matrix[,10]<0.05)
name2 <- colnames(filtered_matrix)[6]
name2 <- paste(name2, combination_list[aa,1],"vs",combination_list[aa,2], sep ="")
nc2 <- paste(name2, "_filtered.txt", sep ="")
assign(nc2, filtered_matrix)
final_name_matrix2 <- paste("./", nc2, sep = "")
#Not print empty tables
if (nrow(filtered_matrix)>0) {
write.table(filtered_matrix, file = final_name_matrix2, quote = F, sep = "\t" )
}
}
}
}
|
/Function Scripts/logistic_regression_function.R
|
no_license
|
pausura/Project_Intestinal_Microbiome
|
R
| false
| false
| 16,140
|
r
|
### Logistic regresion - metadata ###
#Script to perform logistic regression test
#Steps:
#1.Metadata quality control: replace NA for median values and remove columns with only one level
#2.Convert taxonomy table into presence/absence taxonomy table (0,1)
#3.Model1: Logistic regression with all metadata
#4.Model2: Select significant factors in step 3 and create a new model with the new factors
#5.Test model1 and model2
#6.Output
#6.1: creates a new table for each combination and each factor
#6.2: creates a new table for each combination and each factor with significant corrected p.value (<0.05)
#Input files
#
#1.Metadata table
#2.Taxonomy table
#3.column_number: number of the column in the metadata that contains the category factor - numeric value
#Output files
#
#A file for each factor that has an effect on taxonomy variance
#Example metadata_table
#
#SID factor1 factor2 factor3
#Sample1 3.2 23 no
#Sample2 2.4 3 yes
#Sample3 10.3 5 yes
#Example taxonomy_table
#
#SID tax1 tax2 tax3
#Sample1 0.01 1.34 10.2
#Sample2 5.6 0.56 50.2
#Sample3 3.2 6.2 2.34
#Example output
#
#Taxonomy presence_cat1 absence_cat1 presence_cat2 absence_cat2 factor effect p-value corrected p-value model
#tax1 57 298 2 99 age 0.09 0.0001 0.002 model1
#tax2 125 230 10 91 age 1.3 0.003 0.01 model2
#tax3 335 20 69 32 age -0.79 0.00004 0.0009 model2
logistic_regression <- function(metadata_input, taxonomy_table, column_number) {
#Package needed
library (psych)
##Function to calculate nº of 0
nzsum <- function(x){
sum (x==0)
}
##Function to calculate nº of non-0
nsum <- function(x){
sum (x!=0)
}
#Function to create a table for multiple combinations
expand.grid.unique <- function(x, y, include.equals=FALSE){
x <- unique(x)
y <- unique(y)
g <- function(i){
z <- setdiff(y, x[seq_len(i-include.equals)])
if(length(z)) cbind(x[i], z, deparse.level=0)
}
do.call(rbind, lapply(seq_along(x), g))
}
# Remove NA values
# Convert categorical values to numeric
for (i in 1:ncol(metadata_input)) {
if (is.factor(metadata_input[,i]) & any(is.na(metadata_input[,i]))) {
metadata_input[,i] <- as.integer(metadata_input[,i])
}
}
# Replace NA values: median value
for (ii in 1:ncol(metadata_input)){
for (jj in 1:nrow(metadata_input)) {
if (is.na(metadata_input[jj,ii])){
x = describe(metadata_input[,ii])
a = x$median
metadata_input[jj,ii] = a
}
}
}
##Remove columns with only one level
metadata_input <- metadata_input[, sapply(metadata_input, function(col) length(unique(col))) > 1]
#Create presence/absence taxonomy table
p_a_table <- taxonomy_table
for (i in 1:ncol(taxonomy_table)) {
for (j in 1:nrow(taxonomy_table)) {
if (taxonomy_table[j,i]>0) {
p_a_table[j,i] = 1
}
}
}
#Multiple combinations
llista = unique(as.vector(metadata_input[,column_number]))
combination_list <- expand.grid.unique(llista,llista)
matrix_list <- list()
table_variables <- matrix(ncol = 2, nrow = ncol(p_a_table))
for (aa in 1:nrow(combination_list)){
new_metadata <- metadata_input[metadata_input[,1]==combination_list[aa,1] | metadata_input[,1] == combination_list[aa,2],]
##Remove columns with only one level
new_metadata1 <- new_metadata[, sapply(new_metadata, function(col) length(unique(col))) > 1]
#For each taxonomy
for (x in 1:ncol(p_a_table)) {
#Get column
name_column <- colnames(p_a_table)[x]
taxonomy <- subset(p_a_table, select = name_column)
#Create a table for the model. Merge taxonomy column with metadata
model_table <- merge(taxonomy, new_metadata1, by = "row.names" )
row.names(model_table) <- model_table[,1]
model_table <- model_table[,-1]
#Change taxonomy name
colnames(model_table)[1] <- "Taxonomy"
#Sort
#model_table <- model_table[ , order(names(model_table))]
#Calculate model
model <- glm(Taxonomy ~ . , family = binomial(link = "logit"), data = model_table)
##Calculate Anova
anova_test <- anova(model, test = "Chisq")
##Keep significative variables for model2
variables_model2 <- subset(anova_test, anova_test[,5]<0.05)
list_variables_model1 <- as.vector(c(colnames(model_table)))
list_variables_model1 <- paste(c(list_variables_model1), collapse=',' )
#If there are significative variables
if (nrow(variables_model2)>0) {
# Save names of the significative variables: create a new metadata table with these variables (model_table2)
matrix_variables_model2 <- as.data.frame(rownames(variables_model2))
rownames(matrix_variables_model2) <- matrix_variables_model2[,1]
t_model_table <- t(model_table)
list_variables_model2 <- as.vector(matrix_variables_model2$`rownames(variables_model2)`)
list_variables_model2 <- paste(c(list_variables_model2), collapse=', ' )
t_model_table2 <- merge(matrix_variables_model2, t_model_table, by = "row.names")
rownames(t_model_table2) <- t_model_table2[,1]
t_model_table2[1:2] <- NULL
model_table2 <- t(t_model_table2)
# Merge the new table with the taxonomy again (lost in the last merge)
model_table2 <- merge(taxonomy, model_table2, by = "row.names" )
row.names(model_table2) <- model_table2[,1]
model_table2 <- model_table2[,-1]
#Change taxonomy name
colnames(model_table2)[1] <- "Taxonomy"
# Change character to numeric
for (ii in 1:ncol(model_table2)) {
column_name2 <- colnames(model_table2)[ii]
for (jjj in 1:ncol(model_table)) {
column_name <- colnames(model_table)[jjj]
if (column_name == column_name2 & is.numeric(model_table[,jjj])) {
model_table2[,ii] <- as.numeric(as.character(model_table2[,ii]))
}
}
}
# Sort
#model_table2 <- model_table2[ , order(names(model_table2))]
## Calculate model2 with the new table: contains only the significative variables in anova test
model2 <- glm(Taxonomy ~ . , family = binomial(link="logit"), data = model_table2)
# Test the two models
anova_test <- anova(model, model2, test = "Chisq")
# Model 2 and model 1 are equal: save model 1 results
if (is.na(anova_test[2,5])) {
summary_table <- summary(model)
coefficients_table <- as.data.frame(summary_table$coefficients)
category1_samples <- subset(model_table, model_table[,2]==combination_list[aa,1])
nsum1 = nsum(category1_samples$Taxonomy)
nzsum1 = nzsum(category1_samples$Taxonomy)
category2_samples <- subset(model_table, model_table[,2]==combination_list[aa,2])
nsum2 = nsum(category2_samples$Taxonomy)
nzsum2 = nzsum(category2_samples$Taxonomy)
loop_matrix <- matrix(ncol = 9, nrow = nrow(coefficients_table))
colnames(loop_matrix) <- c("Taxonomy","presence_cat1", "absence_cat1", "presence_cat2", "absence_cat2", "Variable","effect","p_value", "Model")
a = 1
for (jj in 1:nrow(coefficients_table)) {
loop_matrix[a,1] = name_column
loop_matrix[a,2] = nsum1
loop_matrix[a,3] = nzsum1
loop_matrix[a,4] = nsum2
loop_matrix[a,5] = nzsum2
loop_matrix[a,6] = rownames(coefficients_table)[jj]
loop_matrix[a,7] = coefficients_table[jj,1]
loop_matrix[a,8] = coefficients_table[jj,4]
loop_matrix[a,9] = "model_1"
a <- a + 1
}
loop_matrix <- na.omit(loop_matrix)
loop_matrix1 <- loop_matrix
for (kk in 1:nrow(loop_matrix)) {
if (loop_matrix[kk,6]=="(Intercept)") {
loop_matrix1 <- loop_matrix[-kk,]
}
}
matrix_list[[x]] <- loop_matrix1
table_variables[x,1] = name_column
table_variables[x,2] = list_variables_model1
}
#Model 2 is not better than model 1: save model 1 results
else if (anova_test[2,5]<0.05) {
summary_table <- summary(model)
coefficients_table <- as.data.frame(summary_table$coefficients)
category1_samples <- subset(model_table, model_table[,2]==combination_list[aa,1])
nsum1 = nsum(category1_samples$Taxonomy)
nzsum1 = nzsum(category1_samples$Taxonomy)
category2_samples <- subset(model_table, model_table[,2]==combination_list[aa,2])
nsum2 = nsum(category2_samples$Taxonomy)
nzsum2 = nzsum(category2_samples$Taxonomy)
loop_matrix <- matrix(ncol = 9, nrow = nrow(coefficients_table))
colnames(loop_matrix) <- c("Taxonomy","presence_cat1", "absence_cat1", "presence_cat2", "absence_cat2", "Variable","effect","p_value", "Model")
a = 1
for (jj in 1:nrow(coefficients_table)) {
loop_matrix[a,1] = name_column
loop_matrix[a,2] = nsum1
loop_matrix[a,3] = nzsum1
loop_matrix[a,4] = nsum2
loop_matrix[a,5] = nzsum2
loop_matrix[a,6] = rownames(coefficients_table)[jj]
loop_matrix[a,7] = coefficients_table[jj,1]
loop_matrix[a,8] = coefficients_table[jj,4]
loop_matrix[a,9] = "model_1"
a <- a + 1
}
loop_matrix <- na.omit(loop_matrix)
loop_matrix1 <- loop_matrix
for (kk in 1:nrow(loop_matrix)) {
if (loop_matrix[kk,6]=="(Intercept)") {
loop_matrix1 <- loop_matrix[-kk,]
}
}
matrix_list[[x]] <- loop_matrix1
table_variables[x,1] = name_column
table_variables[x,2] = list_variables_model1
}
##Model 2 is better than model 1: save model 2 results
else {
summary_table2 <- summary(model2)
#Save coefficients results
coefficients_table <- as.data.frame(summary_table2$coefficients)
#Get number presence/absence of the taxonomy for each category
category1_samples <- subset(model_table, model_table[,2]==combination_list[aa,1])
nsum1 = nsum(category1_samples$Taxonomy)
nzsum1 = nzsum(category1_samples$Taxonomy)
category2_samples <- subset(model_table, model_table[,2]==combination_list[aa,2])
nsum2 = nsum(category2_samples$Taxonomy)
nzsum2 = nzsum(category2_samples$Taxonomy)
loop_matrix <- matrix(ncol = 9, nrow = nrow(coefficients_table))
colnames(loop_matrix) <- c("Taxonomy","presence_cat1", "absence_cat1", "presence_cat2", "absence_cat2", "Variable","effect","p_value", "Model")
a = 1
#Save in a new matrix:
for (jj in 1:nrow(coefficients_table)) {
loop_matrix[a,1] = name_column #name taxonomy
loop_matrix[a,2] = nsum1 #presence taxonomy in category1
loop_matrix[a,3] = nzsum1 #absence taxonomy in category1
loop_matrix[a,4] = nsum2 #presence taxonomy in category2
loop_matrix[a,5] = nzsum2 #absence taxonomy in category2
loop_matrix[a,6] = rownames(coefficients_table)[jj] #name of the variable
loop_matrix[a,7] = coefficients_table[jj,1] #effect value
loop_matrix[a,8] = coefficients_table[jj,4] #p_value
loop_matrix[a,9] = "model_2"
a <- a + 1
}
loop_matrix <- na.omit(loop_matrix) #remove empty rows (NA values)
loop_matrix1 <- loop_matrix
for (kk in 1:nrow(loop_matrix)) { #remove (Intercept) results
if (loop_matrix[kk,6]=="(Intercept)") {
loop_matrix1 <- loop_matrix[-kk,]
}
}
matrix_list[[x]] <- loop_matrix1 #Save the new matrix in a list of matrix
table_variables[x,1] = name_column
table_variables[x,2] = list_variables_model2
}
}
## If are not significative variables in anova test: keep model1 results
else {
summary_table <- summary(model)
coefficients_table <- as.data.frame(summary_table$coefficients)
category1_samples <- subset(model_table, model_table[,2]==combination_list[aa,1])
nsum1 = nsum(category1_samples$Taxonomy)
nzsum1 = nzsum(category1_samples$Taxonomy)
category2_samples <- subset(model_table, model_table[,2]==combination_list[aa,2])
nsum2 = nsum(category2_samples$Taxonomy)
nzsum2 = nzsum(category2_samples$Taxonomy)
loop_matrix <- matrix(ncol = 9, nrow = nrow(coefficients_table))
colnames(loop_matrix) <- c("Taxonomy","presence_cat1", "absence_cat1", "presence_cat2", "absence_cat2", "Variable","effect","p_value", "Model")
a = 1
for (jj in 1:nrow(coefficients_table)) {
loop_matrix[a,1] = name_column
loop_matrix[a,2] = nsum1
loop_matrix[a,3] = nzsum1
loop_matrix[a,4] = nsum2
loop_matrix[a,5] = nzsum2
loop_matrix[a,6] = rownames(coefficients_table)[jj]
loop_matrix[a,7] = coefficients_table[jj,1]
loop_matrix[a,8] = coefficients_table[jj,4]
loop_matrix[a,9] = "model_1"
a <- a + 1
}
loop_matrix <- na.omit(loop_matrix)
loop_matrix1 <- loop_matrix
for (kk in 1:nrow(loop_matrix)) {
if (loop_matrix[kk,6]=="(Intercept)") {
loop_matrix1 <- loop_matrix[-kk,]
}
}
matrix_list[[x]] <- loop_matrix1
table_variables[x,1] = name_column
table_variables[x,2] = list_variables_model1
}
#Save in the same matrix all the results
all_matrix <- as.data.frame(do.call(rbind, matrix_list))
colnames(table_variables) <- c("Taxonomy", "Variables")
write.table(table_variables, file = "./table_variables_logistic_regression.txt", sep = "\t", quote = F)
}
#Split by Variable
split_matrix <- split(all_matrix, all_matrix$Variable)
for (bb in 1:length(split_matrix)){
#Correct by p_values
matrix <- as.data.frame(split_matrix[bb])
p_value <- as.vector(matrix[,8])
corrected_pvalues <- p.adjust(p_value, method = "fdr")
#Add a new column with the new p_values
matrix <- cbind(matrix, corrected_pvalues)
name <- colnames(matrix)[6]
name <- paste(name, combination_list[aa,1], "vs", combination_list[aa,2], sep = "")
nc <- paste(name, ".txt", sep ="")
assign(nc, matrix)
final_name_matrix <- paste("./", nc, sep = "")
write.table(matrix, file = final_name_matrix, quote = F, sep = "\t")
## Filtering significant results
#Filter by the new p_values
filtered_matrix <- subset(matrix, matrix[,10]<0.05)
name2 <- colnames(filtered_matrix)[6]
name2 <- paste(name2, combination_list[aa,1],"vs",combination_list[aa,2], sep ="")
nc2 <- paste(name2, "_filtered.txt", sep ="")
assign(nc2, filtered_matrix)
final_name_matrix2 <- paste("./", nc2, sep = "")
#Not print empty tables
if (nrow(filtered_matrix)>0) {
write.table(filtered_matrix, file = final_name_matrix2, quote = F, sep = "\t" )
}
}
}
}
|
#download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip","data.zip")
#unzip("data.zip")
trainData<-read.csv("C:/Users/Dawit/Documents/CourseProject/UCI HAR Dataset/train/subject_train.txt")
#Column bind the all of the train data
trainData<-cbind(trainData,read.csv("C:/Users/Dawit/Documents/CourseProject/UCI HAR Dataset/train/X_train.txt"),read.csv("C:/Users/Dawit/Documents/CourseProject/UCI HAR Dataset/train/y_train.txt"))
#Change the names of the columns to make them readable
colnames(trainData)<-c("Subject","Measure","Activity")
testData<-read.csv("C:/Users/Dawit/Documents/CourseProject/UCI HAR Dataset/test/subject_test.txt")
testData<-cbind(testData,read.csv("C:/Users/Dawit/Documents/CourseProject/UCI HAR Dataset/test/X_test.txt"),read.csv("C:/Users/Dawit/Documents/CourseProject/UCI HAR Dataset/test/y_test.txt"))
colnames(testData)<-c("Subject","Measure","Activity")
testData[,2]<-as.numeric(testData[,2])
trainData[,2]<-as.numeric(trainData[,2])
#Merge the datasets together
mergedData<-rbind(trainData,testData)
#Turn activity varaible into a factor variable
mergedData$Activity<-as.factor(mergedData$Activity)
#Group the activity variable
mergedData$Activity<-factor(mergedData$Activity,levels=c(1,2,3,4,5,6),labels=c("WALKING","WALKING_UPSTAIRS","WALKING_DOWNSTAIRS","SITTING","STANDING","LAYING"))
#This is the dataset
head(mergedData)
#PART 2
data<-aggregate(mergedData,by=list(mergedData$Subject,mergedData$Activity),mean,na.rm=T)
#Delete the columns that I do not need
data[,3]<-NULL
data[,4]<-NULL
#Change the variable names
colnames(data)<-c("SUBJECT","ACTIVITY","MEAN_MEASUREMENT")
#This is the dataset for part 2
write.csv(data,"TidyData.csv")
|
/run_analysis.R
|
no_license
|
DawitHabtemariam/GettingData
|
R
| false
| false
| 1,743
|
r
|
#download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip","data.zip")
#unzip("data.zip")
trainData<-read.csv("C:/Users/Dawit/Documents/CourseProject/UCI HAR Dataset/train/subject_train.txt")
#Column bind the all of the train data
trainData<-cbind(trainData,read.csv("C:/Users/Dawit/Documents/CourseProject/UCI HAR Dataset/train/X_train.txt"),read.csv("C:/Users/Dawit/Documents/CourseProject/UCI HAR Dataset/train/y_train.txt"))
#Change the names of the columns to make them readable
colnames(trainData)<-c("Subject","Measure","Activity")
testData<-read.csv("C:/Users/Dawit/Documents/CourseProject/UCI HAR Dataset/test/subject_test.txt")
testData<-cbind(testData,read.csv("C:/Users/Dawit/Documents/CourseProject/UCI HAR Dataset/test/X_test.txt"),read.csv("C:/Users/Dawit/Documents/CourseProject/UCI HAR Dataset/test/y_test.txt"))
colnames(testData)<-c("Subject","Measure","Activity")
testData[,2]<-as.numeric(testData[,2])
trainData[,2]<-as.numeric(trainData[,2])
#Merge the datasets together
mergedData<-rbind(trainData,testData)
#Turn activity varaible into a factor variable
mergedData$Activity<-as.factor(mergedData$Activity)
#Group the activity variable
mergedData$Activity<-factor(mergedData$Activity,levels=c(1,2,3,4,5,6),labels=c("WALKING","WALKING_UPSTAIRS","WALKING_DOWNSTAIRS","SITTING","STANDING","LAYING"))
#This is the dataset
head(mergedData)
#PART 2
data<-aggregate(mergedData,by=list(mergedData$Subject,mergedData$Activity),mean,na.rm=T)
#Delete the columns that I do not need
data[,3]<-NULL
data[,4]<-NULL
#Change the variable names
colnames(data)<-c("SUBJECT","ACTIVITY","MEAN_MEASUREMENT")
#This is the dataset for part 2
write.csv(data,"TidyData.csv")
|
### Exploratory Data Assignment
### Peer Graded Assignment: Course Project 1 by Evgeniy Paskin
### Setting environment
library(lubridate)
### Downloading and Reading file
FileName <- "exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",FileName)
DT <- read.table(unz(FileName, "household_power_consumption.txt"), header = TRUE, sep=";", stringsAsFactors = FALSE)
unlink(FileName) # closing connection
### Cleaning data and fixing variables types
### Cleaning and setting dates, numeric variables
DT1 <- DT
DT1$Date <- as.Date(DT1$Date, format="%d/%m/%Y" )
DT1$Time <- strptime(as.character(DT1$Time), "%H:%M:%S" )
date(DT1$Time) <- DT1$Date
DT1$Global_active_power <- as.numeric( DT1$Global_active_power)
DT1$Global_reactive_power <- as.numeric(DT1$Global_reactive_power)
DT1$Voltage <- as.numeric(DT1$Voltage)
DT1$Global_intensity <- as.numeric(DT1$Global_intensity)
DT1$Sub_metering_1 <- as.numeric(DT1$Sub_metering_1)
DT1$Sub_metering_2 <- as.numeric(DT1$Sub_metering_2)
DT1$Sub_metering_3 <- as.numeric(DT1$Sub_metering_3)
# Then subsetting the required dates between 2007-02-01 and 2007-02-02
minDate <- as.Date(c("2007-02-01"))
maxDate <- as.Date(c("2007-02-02"))
DT2 <- subset(DT1, (DT1$Date>= minDate & DT1$Date<=maxDate) )
DATA <- DT2
### Plot 2
#Starting PNG device
png(filename="plot2.png", height=480, width=480, bg="transparent")
#Plotting data
plot( DATA$Time,DATA$Global_active_power,
type="l",
lty=1,
xlab = "Day of week",
ylab = "Global Active Power (kilowatts)",
main = "",
cex.axis = 0.75, #reducing label sizes
cex.lab = 0.75, #reducing label sizes
cex.main = 0.8 #reducing label sizes
)
# Saving the plot and closing device
dev.off()
|
/Plot2.R
|
no_license
|
EvgeniyPaskin/ExData_Plotting1
|
R
| false
| false
| 1,907
|
r
|
### Exploratory Data Assignment
### Peer Graded Assignment: Course Project 1 by Evgeniy Paskin
### Setting environment
library(lubridate)
### Downloading and Reading file
FileName <- "exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",FileName)
DT <- read.table(unz(FileName, "household_power_consumption.txt"), header = TRUE, sep=";", stringsAsFactors = FALSE)
unlink(FileName) # closing connection
### Cleaning data and fixing variables types
### Cleaning and setting dates, numeric variables
DT1 <- DT
DT1$Date <- as.Date(DT1$Date, format="%d/%m/%Y" )
DT1$Time <- strptime(as.character(DT1$Time), "%H:%M:%S" )
date(DT1$Time) <- DT1$Date
DT1$Global_active_power <- as.numeric( DT1$Global_active_power)
DT1$Global_reactive_power <- as.numeric(DT1$Global_reactive_power)
DT1$Voltage <- as.numeric(DT1$Voltage)
DT1$Global_intensity <- as.numeric(DT1$Global_intensity)
DT1$Sub_metering_1 <- as.numeric(DT1$Sub_metering_1)
DT1$Sub_metering_2 <- as.numeric(DT1$Sub_metering_2)
DT1$Sub_metering_3 <- as.numeric(DT1$Sub_metering_3)
# Then subsetting the required dates between 2007-02-01 and 2007-02-02
minDate <- as.Date(c("2007-02-01"))
maxDate <- as.Date(c("2007-02-02"))
DT2 <- subset(DT1, (DT1$Date>= minDate & DT1$Date<=maxDate) )
DATA <- DT2
### Plot 2
#Starting PNG device
png(filename="plot2.png", height=480, width=480, bg="transparent")
#Plotting data
plot( DATA$Time,DATA$Global_active_power,
type="l",
lty=1,
xlab = "Day of week",
ylab = "Global Active Power (kilowatts)",
main = "",
cex.axis = 0.75, #reducing label sizes
cex.lab = 0.75, #reducing label sizes
cex.main = 0.8 #reducing label sizes
)
# Saving the plot and closing device
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/user_defined_processes.R
\name{UserProcessCollection}
\alias{UserProcessCollection}
\title{User Defined Process Collection}
\description{
This object contains template functions from the users stored user defined processes (UDP), which can be reused in other process graphs.
}
\details{
This object is an unlocked R6 object, that allows us to add new functions to this object at runtime. It is structured in the same way
as the \code{\link[=ProcessCollection]{ProcessCollection()}} for predefined processes by the openEO back-end. A \code{\link[=UserProcessCollection]{UserProcessCollection()}} is usually created at
\code{\link[=user_processes]{user_processes()}}. If you have submitted new user defined processes to the back-end, make sure to call \code{\link[=user_processes]{user_processes()}} again
to fetch the latest status.
}
\section{Methods}{
\describe{
\item{\verb{$new(con = NULL)}}{The object creator created an openEO connection.}
}
}
\section{Arguments}{
\describe{
\item{con}{optional - an active and authenticated Connection (optional) otherwise \code{\link[=active_connection]{active_connection()}}
is used.}
}
}
|
/man/UserProcessCollection.Rd
|
permissive
|
Open-EO/openeo-r-client
|
R
| false
| true
| 1,213
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/user_defined_processes.R
\name{UserProcessCollection}
\alias{UserProcessCollection}
\title{User Defined Process Collection}
\description{
This object contains template functions from the users stored user defined processes (UDP), which can be reused in other process graphs.
}
\details{
This object is an unlocked R6 object, that allows us to add new functions to this object at runtime. It is structured in the same way
as the \code{\link[=ProcessCollection]{ProcessCollection()}} for predefined processes by the openEO back-end. A \code{\link[=UserProcessCollection]{UserProcessCollection()}} is usually created at
\code{\link[=user_processes]{user_processes()}}. If you have submitted new user defined processes to the back-end, make sure to call \code{\link[=user_processes]{user_processes()}} again
to fetch the latest status.
}
\section{Methods}{
\describe{
\item{\verb{$new(con = NULL)}}{The object creator created an openEO connection.}
}
}
\section{Arguments}{
\describe{
\item{con}{optional - an active and authenticated Connection (optional) otherwise \code{\link[=active_connection]{active_connection()}}
is used.}
}
}
|
i <- as.numeric(commandArgs(trailingOnly = TRUE))
library(segmentr)
library(rCGH)
library(GenomicRanges)
### get hg38 centromeres
hg38.centromeres <- rCGH::hg38
chr <- paste0("chr", hg38.centromeres$chrom)
chr[chr=="chr23"] <- "chrX"
chr[chr=="chr24"] <- "chrY"
centromeres <- GRanges(seqnames = chr,
ranges = IRanges(start = hg38.centromeres$centromerStart - 100,
end = hg38.centromeres$centromerEnd + 100))
inDir <- "/dcl01/scharpf1/data/gridcnp_analysis/tcga_gbm/jfkit/cases"
files <- list.files(inDir, "*log2norm.rds")
file <- files[i]
bins <- readRDS(file.path(inDir, file))
# Removing flagged bins
bins <- bins[bins$flag == FALSE]
# Remove bins with variance above 95th percentile in normals
upper.var <- quantile(bins$var, .95)
bins <- bins[bins$var < upper.var]
set.seed(123)
segments <- segmentr::segmentBins(bins,alpha = 0.01, undo.splits = "sdundo",
undo.SD = 2, centromeres = centromeres)
# Not fine-tuning segments with n.probes = 3
hits <- which(segments$n.probes <= 3)
if (length(hits) > 0) {
lte3 <- segments[hits]
segments <- segments[-hits]
} else {
lte3 <- NULL
}
finetuned <- segmentr::finetune.segments(bins = bins, segments = segments,
alpha = 0.01, centromeres = centromeres, undo.SD = 1)
if (length(lte3) > 0) {
finetuned <- sort(c(finetuned, lte3))
}
output.file <- gsub("log2norm", "", file)
segDir <- "/dcl01/scharpf1/data/gridcnp_analysis/tcga_gbm/jfkit/tumor_segments"
if(!dir.exists(segDir)) {
dir.create(segDir, recursive = TRUE)
}
saveRDS(finetuned, file = file.path(segDir, output.file))
quit('no')
|
/tcga_gbm_scripts/jfkit-scripts/4-segmentBins.R
|
no_license
|
cancer-genomics/gridcnp_analysis
|
R
| false
| false
| 1,695
|
r
|
i <- as.numeric(commandArgs(trailingOnly = TRUE))
library(segmentr)
library(rCGH)
library(GenomicRanges)
### get hg38 centromeres
hg38.centromeres <- rCGH::hg38
chr <- paste0("chr", hg38.centromeres$chrom)
chr[chr=="chr23"] <- "chrX"
chr[chr=="chr24"] <- "chrY"
centromeres <- GRanges(seqnames = chr,
ranges = IRanges(start = hg38.centromeres$centromerStart - 100,
end = hg38.centromeres$centromerEnd + 100))
inDir <- "/dcl01/scharpf1/data/gridcnp_analysis/tcga_gbm/jfkit/cases"
files <- list.files(inDir, "*log2norm.rds")
file <- files[i]
bins <- readRDS(file.path(inDir, file))
# Removing flagged bins
bins <- bins[bins$flag == FALSE]
# Remove bins with variance above 95th percentile in normals
upper.var <- quantile(bins$var, .95)
bins <- bins[bins$var < upper.var]
set.seed(123)
segments <- segmentr::segmentBins(bins,alpha = 0.01, undo.splits = "sdundo",
undo.SD = 2, centromeres = centromeres)
# Not fine-tuning segments with n.probes = 3
hits <- which(segments$n.probes <= 3)
if (length(hits) > 0) {
lte3 <- segments[hits]
segments <- segments[-hits]
} else {
lte3 <- NULL
}
finetuned <- segmentr::finetune.segments(bins = bins, segments = segments,
alpha = 0.01, centromeres = centromeres, undo.SD = 1)
if (length(lte3) > 0) {
finetuned <- sort(c(finetuned, lte3))
}
output.file <- gsub("log2norm", "", file)
segDir <- "/dcl01/scharpf1/data/gridcnp_analysis/tcga_gbm/jfkit/tumor_segments"
if(!dir.exists(segDir)) {
dir.create(segDir, recursive = TRUE)
}
saveRDS(finetuned, file = file.path(segDir, output.file))
quit('no')
|
library(ggplot2)
ggplot(data = iris, aes(x = Petal.Width, y= Petal.Length, col=Species)) + geom_point()
ggsave("ggtest.png")
icon.glyphicon <- makeAwesomeIcon(icon = "flag", markerColor = "blue",
iconColor = "yellow",
squareMarker = TRUE)
icon.fa <- makeAwesomeIcon(icon = "flag", markerColor = "red",
iconColor = "black")
icon.ion <- makeAwesomeIcon(icon = "home", markerColor = "green")
# Marker + Label
leaflet() %>% addTiles() %>%
addAwesomeMarkers(
lng = -118.456554, lat = 34.078039,
label = "This is a label",
icon = icon.glyphicon)
leaflet() %>% addTiles() %>%
addAwesomeMarkers(
lng = -118.456554, lat = 34.078039,
label = "This is a label",
icon = icon.fa)
leaflet() %>% addTiles() %>%
addAwesomeMarkers(
lng = -118.456554, lat = 34.078039,
label = "This is a label",
icon = icon.ion)
leaflet() %>% addTiles() %>%
addAwesomeMarkers(
lng = -118.456554, lat = 34.078039,
label = "This is a static label",
labelOptions = labelOptions(noHide = T),
icon = icon.fa)
View(quakes)
str(quakes)
data = quakes[1:20,]
leaflet() %>% addTiles() %>%
addMarkers(data$long, data$lat, popup = paste("지진 강도 : ",as.character(data$mag)), label = as.character(data$mag))
getColor <- function(quakes) {
result <- sapply(quakes$mag, function(mag) {
if(mag <= 4) {
"green"
} else if(mag <= 5) {
"orange"
} else {
"red"
} })
return(result)
}
icons <- awesomeIcons(
icon = 'ios-close',
iconColor = 'black',
library = 'ion',
markerColor = getColor(data)
)
leaflet() %>% addTiles() %>%
addAwesomeMarkers(data$long, data$lat, icon=icons, label = as.character(data$mag))
#install.packages("RColorBrewer")
library(RColorBrewer)
for(col_i in c('YlGn','RdPu', 'PuRd', 'BrBG', 'RdBu', 'RdYlBu', 'Set3', 'Set1')){
print(col_i)
print(brewer.pal(n = 5, name = col_i))
}
install.packages("KoNLP")
install.packages("rlang")
library(KoNLP)
useSystemDic()
useSejongDic()
useNIADic()
word_data <- readLines("c:/Rstudy/book/애국가(가사).txt")
word_data
useSejongDic()
word_data2 <- sapply(word_data, extractNoun, USE.NAMES = F)
word_data2
word_data3 <- extractNoun(word_data)
word_data3
add_words <- c("백두산", "남산", "철갑", "가을", "달")
buildDictionary(user_dic=data.frame(add_words, rep("ncn", length(add_words))), replace_usr_dic=T)
word_data3 <- extractNoun(word_data)
word_data3
undata <- unlist(word_data2)
undata
word_table <- table(undata)
word_table
undata2 <- Filter(function(x) {nchar(x) >= 2}, undata)
word_table2 <- table(undata2)
word_table2
final <- sort(word_table2, decreasing = T)
head(final, 10)
extractNoun("대한민국의 영토는 한반도와 그 부속도서로 한다")
SimplePos22("대한민국의 영토는 한반도와 그 부속도서로 한다")
SimplePos09("대한민국의 영토는 한반도와 그 부속도서로 한다")
install.packages("wordcloud")
library(wordcloud)
install.packages("wordcloud2")
library(wordcloud2)
(words <- read.csv("c:/Rstudy/data/wc.csv",stringsAsFactors = F))
head(words)
install.packages("wordcloud")
library(wordcloud)
windowsFonts(lett=windowsFont("휴먼옛체"))
wordcloud(words$keyword, words$freq,family="lett")
wordcloud(words$keyword, words$freq,
min.freq = 2,
random.order = FALSE,
rot.per = 0.1, scale = c(4, 1),
colors = rainbow(7))
wordcloud2(words)
wordcloud2(words,rotateRatio = 1)
wordcloud2(words,rotateRatio = 0.5)
wordcloud2(words,rotateRatio = 0)
wordcloud2(words, size=0.5,col="random-dark")
wordcloud2(words,size=0.5,col="random-dark", figPath="book/peace.png")
wordcloud2(words,size=0.7,col="random-light",backgroundColor = "black")
wordcloud2(data = demoFreq)
#install.packages("twitteR")
library(twitteR)
api_key <- "gjUkHgO8bFmNobRk4g0Jas8xb"
api_secret <- "loF0mtnzLhtQDFjahdRHox6wcR1fiD6Fw95DP5QCSy3rLTTP1K"
access_token <- "607145164-8L5HtzopZzhjuBCgusUGKE3MHOa9P4RbmhUrM0E1"
access_token_secret <- "2wn2bsCA7JIH5DZ5Ss1deS5BNLabzaX2xSpM2ZLMIqwQf"
setup_twitter_oauth(api_key,api_secret, access_token,access_token_secret)
# oauth 정보 저장 확인
key <- "수능"
key <- enc2utf8(key)
result <- searchTwitter(key, n=100)
DF <- twListToDF(result)
str(DF)
content <- DF$text
content <- gsub("[[:lower:][:upper:][:digit:][:punct:][:cntrl:]]", "", content)
content <- gsub("수능", "", content)
content
word <- extractNoun(content)
cdata <- unlist(word)
cdata
cdata <- Filter(function(x) {nchar(x) < 6 & nchar(x) >= 2} ,cdata)
wordcount <- table(cdata)
wordcount <- head(sort(wordcount, decreasing=T),30)
par(mar=c(1,1,1,1))
wordcloud(names(wordcount),freq=wordcount,scale=c(3,0.5),rot.per=0.35,min.freq=1,
random.order=F,random.color=T,colors=rainbow(20))
|
/day15_2.R
|
no_license
|
starkwon/kwon
|
R
| false
| false
| 5,012
|
r
|
library(ggplot2)
ggplot(data = iris, aes(x = Petal.Width, y= Petal.Length, col=Species)) + geom_point()
ggsave("ggtest.png")
icon.glyphicon <- makeAwesomeIcon(icon = "flag", markerColor = "blue",
iconColor = "yellow",
squareMarker = TRUE)
icon.fa <- makeAwesomeIcon(icon = "flag", markerColor = "red",
iconColor = "black")
icon.ion <- makeAwesomeIcon(icon = "home", markerColor = "green")
# Marker + Label
leaflet() %>% addTiles() %>%
addAwesomeMarkers(
lng = -118.456554, lat = 34.078039,
label = "This is a label",
icon = icon.glyphicon)
leaflet() %>% addTiles() %>%
addAwesomeMarkers(
lng = -118.456554, lat = 34.078039,
label = "This is a label",
icon = icon.fa)
leaflet() %>% addTiles() %>%
addAwesomeMarkers(
lng = -118.456554, lat = 34.078039,
label = "This is a label",
icon = icon.ion)
leaflet() %>% addTiles() %>%
addAwesomeMarkers(
lng = -118.456554, lat = 34.078039,
label = "This is a static label",
labelOptions = labelOptions(noHide = T),
icon = icon.fa)
View(quakes)
str(quakes)
data = quakes[1:20,]
leaflet() %>% addTiles() %>%
addMarkers(data$long, data$lat, popup = paste("지진 강도 : ",as.character(data$mag)), label = as.character(data$mag))
getColor <- function(quakes) {
result <- sapply(quakes$mag, function(mag) {
if(mag <= 4) {
"green"
} else if(mag <= 5) {
"orange"
} else {
"red"
} })
return(result)
}
icons <- awesomeIcons(
icon = 'ios-close',
iconColor = 'black',
library = 'ion',
markerColor = getColor(data)
)
leaflet() %>% addTiles() %>%
addAwesomeMarkers(data$long, data$lat, icon=icons, label = as.character(data$mag))
#install.packages("RColorBrewer")
library(RColorBrewer)
for(col_i in c('YlGn','RdPu', 'PuRd', 'BrBG', 'RdBu', 'RdYlBu', 'Set3', 'Set1')){
print(col_i)
print(brewer.pal(n = 5, name = col_i))
}
install.packages("KoNLP")
install.packages("rlang")
library(KoNLP)
useSystemDic()
useSejongDic()
useNIADic()
word_data <- readLines("c:/Rstudy/book/애국가(가사).txt")
word_data
useSejongDic()
word_data2 <- sapply(word_data, extractNoun, USE.NAMES = F)
word_data2
word_data3 <- extractNoun(word_data)
word_data3
add_words <- c("백두산", "남산", "철갑", "가을", "달")
buildDictionary(user_dic=data.frame(add_words, rep("ncn", length(add_words))), replace_usr_dic=T)
word_data3 <- extractNoun(word_data)
word_data3
undata <- unlist(word_data2)
undata
word_table <- table(undata)
word_table
undata2 <- Filter(function(x) {nchar(x) >= 2}, undata)
word_table2 <- table(undata2)
word_table2
final <- sort(word_table2, decreasing = T)
head(final, 10)
extractNoun("대한민국의 영토는 한반도와 그 부속도서로 한다")
SimplePos22("대한민국의 영토는 한반도와 그 부속도서로 한다")
SimplePos09("대한민국의 영토는 한반도와 그 부속도서로 한다")
install.packages("wordcloud")
library(wordcloud)
install.packages("wordcloud2")
library(wordcloud2)
(words <- read.csv("c:/Rstudy/data/wc.csv",stringsAsFactors = F))
head(words)
install.packages("wordcloud")
library(wordcloud)
windowsFonts(lett=windowsFont("휴먼옛체"))
wordcloud(words$keyword, words$freq,family="lett")
wordcloud(words$keyword, words$freq,
min.freq = 2,
random.order = FALSE,
rot.per = 0.1, scale = c(4, 1),
colors = rainbow(7))
wordcloud2(words)
wordcloud2(words,rotateRatio = 1)
wordcloud2(words,rotateRatio = 0.5)
wordcloud2(words,rotateRatio = 0)
wordcloud2(words, size=0.5,col="random-dark")
wordcloud2(words,size=0.5,col="random-dark", figPath="book/peace.png")
wordcloud2(words,size=0.7,col="random-light",backgroundColor = "black")
wordcloud2(data = demoFreq)
#install.packages("twitteR")
library(twitteR)
api_key <- "gjUkHgO8bFmNobRk4g0Jas8xb"
api_secret <- "loF0mtnzLhtQDFjahdRHox6wcR1fiD6Fw95DP5QCSy3rLTTP1K"
access_token <- "607145164-8L5HtzopZzhjuBCgusUGKE3MHOa9P4RbmhUrM0E1"
access_token_secret <- "2wn2bsCA7JIH5DZ5Ss1deS5BNLabzaX2xSpM2ZLMIqwQf"
setup_twitter_oauth(api_key,api_secret, access_token,access_token_secret)
# oauth 정보 저장 확인
key <- "수능"
key <- enc2utf8(key)
result <- searchTwitter(key, n=100)
DF <- twListToDF(result)
str(DF)
content <- DF$text
content <- gsub("[[:lower:][:upper:][:digit:][:punct:][:cntrl:]]", "", content)
content <- gsub("수능", "", content)
content
word <- extractNoun(content)
cdata <- unlist(word)
cdata
cdata <- Filter(function(x) {nchar(x) < 6 & nchar(x) >= 2} ,cdata)
wordcount <- table(cdata)
wordcount <- head(sort(wordcount, decreasing=T),30)
par(mar=c(1,1,1,1))
wordcloud(names(wordcount),freq=wordcount,scale=c(3,0.5),rot.per=0.35,min.freq=1,
random.order=F,random.color=T,colors=rainbow(20))
|
# Modern data ----
`%>%` <- magrittr::`%>%`
## Load data ----
### Metadata ----
other_southern_hemisphere_metadata <-
"data-raw/GLOBAL/other_southern_hemisphere_SPH.xlsx" %>%
readxl::read_excel(sheet = 1) %>%
janitor::clean_names() %>%
dplyr::rename(age_BP = age_bp) %>%
dplyr::mutate(ID_SAMPLE = seq_along(entity_name))
### Pollen counts ----
other_southern_hemisphere_counts <-
"data-raw/GLOBAL/other_southern_hemisphere_SPH.xlsx" %>%
readxl::read_excel(sheet = 2, col_names = FALSE) %>%
magrittr::set_names(c(
"entity_name", "taxon_name", "taxon_count"
))
### Amalgamations ----
other_southern_hemisphere_taxa_amalgamation <-
"data-raw/GLOBAL/other_southern_hemisphere_SPH.xlsx" %>%
readxl::read_excel(sheet = 3) %>%
magrittr::set_names(c(
"taxon_name", "clean", "intermediate", "amalgamated"
)) %>%
dplyr::distinct() %>%
dplyr::mutate(clean = clean %>% stringr::str_squish(),
intermediate = intermediate %>% stringr::str_squish(),
amalgamated = amalgamated %>% stringr::str_squish())
### Combine counts and amalgamation ----
other_southern_hemisphere_taxa_counts_amalgamation <-
other_southern_hemisphere_counts %>%
dplyr::left_join(other_southern_hemisphere_taxa_amalgamation,
by = c("taxon_name")) %>%
dplyr::relocate(taxon_count, .after = amalgamated) %>%
dplyr::left_join(other_southern_hemisphere_metadata %>%
dplyr::select(entity_name, ID_SAMPLE),
by = "entity_name") %>%
dplyr::select(-entity_name, -taxon_name) %>%
dplyr::relocate(ID_SAMPLE, .before = 1)
### Additional taxonomic corrections (SPH - May 20th) ----
taxonomic_corrections <- "data-raw/GLOBAL/taxonomic_corrections.xlsx" %>%
readxl::read_excel(sheet = 1) %>%
purrr::map_df(stringr::str_squish)
other_southern_hemisphere_taxa_counts_amalgamation_rev <-
other_southern_hemisphere_taxa_counts_amalgamation %>%
dplyr::mutate(ID_COUNT = seq_along(ID_SAMPLE)) %>%
dplyr::left_join(taxonomic_corrections %>%
dplyr::filter(level %in% c("clean", "all")),
by = c("clean" = "original_taxon")) %>%
dplyr::mutate(clean = dplyr::coalesce(corrected_taxon_name,
clean)) %>%
dplyr::select(-corrected_taxon_name, -level) %>%
dplyr::left_join(taxonomic_corrections %>%
dplyr::filter(level %in% c("intermediate", "all")),
by = c("clean" = "original_taxon")) %>%
dplyr::mutate(intermediate = dplyr::coalesce(corrected_taxon_name,
intermediate)) %>%
dplyr::select(-corrected_taxon_name, -level) %>%
dplyr::left_join(taxonomic_corrections %>%
dplyr::filter(level %in% c("amalgamated", "all")),
by = c("clean" = "original_taxon")) %>%
dplyr::mutate(amalgamated = dplyr::coalesce(corrected_taxon_name,
amalgamated)) %>%
dplyr::select(-corrected_taxon_name, -level) %>%
dplyr::left_join(taxonomic_corrections %>%
dplyr::filter(level %in% c("all")),
by = c("clean" = "original_taxon")) %>%
dplyr::mutate(clean = dplyr::coalesce(corrected_taxon_name,
clean)) %>%
dplyr::select(-corrected_taxon_name, -level) %>%
dplyr::left_join(taxonomic_corrections %>%
dplyr::filter(level %in% c("all")),
by = c("intermediate" = "original_taxon")) %>%
dplyr::mutate(intermediate = dplyr::coalesce(corrected_taxon_name,
intermediate)) %>%
dplyr::select(-corrected_taxon_name, -level) %>%
dplyr::left_join(taxonomic_corrections %>%
dplyr::filter(level %in% c("all")),
by = c("amalgamated" = "original_taxon")) %>%
dplyr::mutate(amalgamated = dplyr::coalesce(corrected_taxon_name,
amalgamated)) %>%
dplyr::select(-corrected_taxon_name, -level)
other_southern_hemisphere_taxa_counts_amalgamation_rev %>%
dplyr::group_by(ID_COUNT) %>%
dplyr::mutate(n = dplyr::n()) %>%
dplyr::filter(n > 1)
waldo::compare(other_southern_hemisphere_taxa_counts_amalgamation %>%
dplyr::distinct(clean, intermediate, amalgamated),
other_southern_hemisphere_taxa_counts_amalgamation_rev %>%
dplyr::distinct(clean, intermediate, amalgamated),
max_diffs = Inf)
other_southern_hemisphere_taxa_counts_amalgamation <-
other_southern_hemisphere_taxa_counts_amalgamation_rev %>%
dplyr::filter(!is.na(taxon_count), taxon_count > 0) %>%
dplyr::select(-ID_COUNT)
other_southern_hemisphere_taxa_counts_amalgamation %>%
dplyr::filter(is.na(clean) | is.na(intermediate) | is.na(amalgamated))
## Find DOIs ----
other_southern_hemisphere_metadata_pubs <-
other_southern_hemisphere_metadata %>%
dplyr::distinct(publication) %>%
dplyr::arrange(publication) %>%
dplyr::mutate(DOI = publication %>%
stringr::str_extract_all("\\[DOI\\s*(.*?)\\s*\\](;|$)") %>%
purrr::map_chr(~.x %>%
stringr::str_remove_all("^\\[DOI:|\\]$") %>%
stringr::str_squish() %>%
stringr::str_c(collapse = ";\n"))
) %>%
dplyr::mutate(ID_PUB = seq_along(publication))
# other_southern_hemisphere_metadata_pubs %>%
# readr::write_excel_csv("data-raw/GLOBAL/other_southern_hemisphere_modern-references.csv")
### Load cleaned publications list ----
other_southern_hemisphere_clean_publications <-
"data-raw/GLOBAL/other_southern_hemisphere_modern-references_clean.csv" %>%
readr::read_csv() %>%
dplyr::select(-DOI)
# dplyr::mutate(ID_PUB = seq_along(publication))
## Append clean publications ----
other_southern_hemisphere_metadata_2 <-
other_southern_hemisphere_metadata %>%
dplyr::left_join(other_southern_hemisphere_metadata_pubs %>%
dplyr::select(-DOI),
by = "publication") %>%
dplyr::left_join(other_southern_hemisphere_clean_publications,
by = "ID_PUB") %>%
dplyr::select(-publication.x, -publication.y, -doi) %>%
dplyr::rename(doi = updated_DOI,
publication = updated_publication)
## Extract PNV/BIOME ----
other_southern_hemisphere_metadata_3 <-
other_southern_hemisphere_metadata_2 %>%
smpds::parallel_extract_biome(cpus = 5) %>%
# smpds::biome_name() %>%
dplyr::relocate(ID_BIOME, .after = doi) %>%
smpds::pb()
other_southern_hemisphere_metadata_3 %>%
smpds::plot_biome(xlim = range(.$longitude, na.rm = TRUE) * 1.1,
ylim = range(.$latitude, na.rm = TRUE) * 1.1)
## Create count tables ----
### Clean ----
other_southern_hemisphere_clean <-
other_southern_hemisphere_taxa_counts_amalgamation %>%
dplyr::select(-intermediate, -amalgamated) %>%
dplyr::rename(taxon_name = clean) %>%
dplyr::group_by(ID_SAMPLE, taxon_name) %>%
dplyr::mutate(taxon_count = sum(taxon_count, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::distinct() %>%
tidyr::pivot_wider(ID_SAMPLE,
names_from = taxon_name,
values_from = taxon_count,
values_fill = 0,
names_sort = TRUE)
### Intermediate ----
other_southern_hemisphere_intermediate <-
other_southern_hemisphere_taxa_counts_amalgamation %>%
dplyr::select(-clean, -amalgamated) %>%
dplyr::rename(taxon_name = intermediate) %>%
dplyr::group_by(ID_SAMPLE, taxon_name) %>%
dplyr::mutate(taxon_count = sum(taxon_count, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::distinct() %>%
tidyr::pivot_wider(ID_SAMPLE,
names_from = taxon_name,
values_from = taxon_count,
values_fill = 0,
names_sort = TRUE)
### Amalgamated ----
other_southern_hemisphere_amalgamated <-
other_southern_hemisphere_taxa_counts_amalgamation %>%
dplyr::select(-clean, -intermediate) %>%
dplyr::rename(taxon_name = amalgamated) %>%
dplyr::group_by(ID_SAMPLE, taxon_name) %>%
dplyr::mutate(taxon_count = sum(taxon_count, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::distinct() %>%
tidyr::pivot_wider(ID_SAMPLE,
names_from = taxon_name,
values_from = taxon_count,
values_fill = 0,
names_sort = TRUE)
# Store subsets ----
southern_hemisphere_pollen <-
other_southern_hemisphere_metadata_3 %>%
dplyr::mutate(
clean = other_southern_hemisphere_clean %>%
dplyr::select(-c(ID_SAMPLE)),
intermediate = other_southern_hemisphere_intermediate %>%
dplyr::select(-c(ID_SAMPLE)),
amalgamated = other_southern_hemisphere_amalgamated %>%
dplyr::select(-c(ID_SAMPLE))
) %>%
dplyr::mutate(
basin_size_num = basin_size %>%
as.numeric() %>%
round(digits = 6) %>%
as.character(),
basin_size = dplyr::coalesce(
basin_size_num,
basin_size
),
basin_size = basin_size %>%
stringr::str_replace_all("unknown", "not known"),
entity_type = entity_type %>%
stringr::str_replace_all("unknown", "not known"),
site_type = site_type %>%
stringr::str_replace_all("unknown", "not known")
) %>%
dplyr::relocate(ID_SAMPLE, .before = clean) %>%
dplyr::mutate(source = "Southern Hemisphere pollen", .before = 1) %>%
dplyr::mutate(age_BP = as.character(age_BP)) %>%
dplyr::select(-basin_size_num)
usethis::use_data(southern_hemisphere_pollen, overwrite = TRUE, compress = "xz")
## Inspect enumerates ----
### basin_size -----
southern_hemisphere_pollen$basin_size %>%
unique() %>% sort()
### site_type ----
southern_hemisphere_pollen$site_type %>%
unique() %>% sort()
### entity_type ----
southern_hemisphere_pollen$entity_type %>%
unique() %>% sort()
# Export Excel workbook ----
wb <- openxlsx::createWorkbook()
openxlsx::addWorksheet(wb, "metadata")
openxlsx::writeData(wb, "metadata",
southern_hemisphere_pollen %>%
dplyr::select(site_name:ID_SAMPLE))
openxlsx::addWorksheet(wb, "clean")
openxlsx::writeData(wb, "clean",
southern_hemisphere_pollen %>%
dplyr::select(ID_SAMPLE, clean) %>%
tidyr::unnest(clean))
openxlsx::addWorksheet(wb, "intermediate")
openxlsx::writeData(wb, "intermediate",
southern_hemisphere_pollen %>%
dplyr::select(ID_SAMPLE, intermediate) %>%
tidyr::unnest(intermediate))
openxlsx::addWorksheet(wb, "amalgamated")
openxlsx::writeData(wb, "amalgamated",
southern_hemisphere_pollen %>%
dplyr::select(ID_SAMPLE, amalgamated) %>%
tidyr::unnest(amalgamated))
openxlsx::saveWorkbook(wb,
paste0("data-raw/GLOBAL/southern_hemisphere_pollen_",
Sys.Date(),
".xlsx"))
# Load climate reconstructions ----
climate_reconstructions <-
"data-raw/reconstructions/southern_hemisphere_pollen_climate_reconstructions_2022-04-29.csv" %>%
readr::read_csv()
# Load daily values for precipitation to compute MAP (mean annual precipitation)
climate_reconstructions_pre <-
"data-raw/reconstructions/southern_hemisphere_pollen_climate_reconstructions_pre_2022-04-29.csv" %>%
readr::read_csv() %>%
dplyr::rowwise() %>%
dplyr::mutate(map = sum(dplyr::c_across(T1:T365), na.rm = TRUE), .before = T1)
climate_reconstructions_2 <- climate_reconstructions %>%
dplyr::bind_cols(climate_reconstructions_pre %>%
dplyr::select(map))
climate_reconstructions_with_counts <-
southern_hemisphere_pollen %>%
# smpds::southern_hemisphere_pollen %>%
# dplyr::select(-c(mi:mtwa)) %>%
dplyr::bind_cols(
climate_reconstructions_2 %>%
dplyr::select(sn = site_name,
en = entity_name,
new_elevation = elevation,
mi:map)
) %>%
dplyr::relocate(mi:map, .before = clean) %>%
dplyr::mutate(elevation = dplyr::coalesce(elevation, new_elevation))
climate_reconstructions_with_counts %>%
dplyr::filter(site_name != sn | entity_name != en)
waldo::compare(smpds::southern_hemisphere_pollen,
climate_reconstructions_with_counts %>%
dplyr::select(-c(mi:map, sn, en, new_elevation))
)
southern_hemisphere_pollen <- climate_reconstructions_with_counts %>%
dplyr::select(-sn, -en, -new_elevation)
usethis::use_data(southern_hemisphere_pollen, overwrite = TRUE, compress = "xz")
waldo::compare(smpds::southern_hemisphere_pollen,
southern_hemisphere_pollen,
max_diffs = Inf)
climate_reconstructions_2 %>%
smpds::plot_climate_countour(
var = "mat",
xlim = range(.$longitude, na.rm = TRUE),
ylim = range(.$latitude, na.rm = TRUE)
)
climate_reconstructions_2 %>%
smpds::plot_climate(
var = "map",
xlim = range(.$longitude, na.rm = TRUE),
ylim = range(.$latitude, na.rm = TRUE)
)
rm(climate_reconstructions,
climate_reconstructions_2,
climate_reconstructions_pre,
climate_reconstructions_with_counts)
|
/data-raw/southern_hemisphere_pollen.R
|
permissive
|
special-uor/smpds
|
R
| false
| false
| 13,334
|
r
|
# Modern data ----
`%>%` <- magrittr::`%>%`
## Load data ----
### Metadata ----
other_southern_hemisphere_metadata <-
"data-raw/GLOBAL/other_southern_hemisphere_SPH.xlsx" %>%
readxl::read_excel(sheet = 1) %>%
janitor::clean_names() %>%
dplyr::rename(age_BP = age_bp) %>%
dplyr::mutate(ID_SAMPLE = seq_along(entity_name))
### Pollen counts ----
other_southern_hemisphere_counts <-
"data-raw/GLOBAL/other_southern_hemisphere_SPH.xlsx" %>%
readxl::read_excel(sheet = 2, col_names = FALSE) %>%
magrittr::set_names(c(
"entity_name", "taxon_name", "taxon_count"
))
### Amalgamations ----
other_southern_hemisphere_taxa_amalgamation <-
"data-raw/GLOBAL/other_southern_hemisphere_SPH.xlsx" %>%
readxl::read_excel(sheet = 3) %>%
magrittr::set_names(c(
"taxon_name", "clean", "intermediate", "amalgamated"
)) %>%
dplyr::distinct() %>%
dplyr::mutate(clean = clean %>% stringr::str_squish(),
intermediate = intermediate %>% stringr::str_squish(),
amalgamated = amalgamated %>% stringr::str_squish())
### Combine counts and amalgamation ----
other_southern_hemisphere_taxa_counts_amalgamation <-
other_southern_hemisphere_counts %>%
dplyr::left_join(other_southern_hemisphere_taxa_amalgamation,
by = c("taxon_name")) %>%
dplyr::relocate(taxon_count, .after = amalgamated) %>%
dplyr::left_join(other_southern_hemisphere_metadata %>%
dplyr::select(entity_name, ID_SAMPLE),
by = "entity_name") %>%
dplyr::select(-entity_name, -taxon_name) %>%
dplyr::relocate(ID_SAMPLE, .before = 1)
### Additional taxonomic corrections (SPH - May 20th) ----
taxonomic_corrections <- "data-raw/GLOBAL/taxonomic_corrections.xlsx" %>%
readxl::read_excel(sheet = 1) %>%
purrr::map_df(stringr::str_squish)
other_southern_hemisphere_taxa_counts_amalgamation_rev <-
other_southern_hemisphere_taxa_counts_amalgamation %>%
dplyr::mutate(ID_COUNT = seq_along(ID_SAMPLE)) %>%
dplyr::left_join(taxonomic_corrections %>%
dplyr::filter(level %in% c("clean", "all")),
by = c("clean" = "original_taxon")) %>%
dplyr::mutate(clean = dplyr::coalesce(corrected_taxon_name,
clean)) %>%
dplyr::select(-corrected_taxon_name, -level) %>%
dplyr::left_join(taxonomic_corrections %>%
dplyr::filter(level %in% c("intermediate", "all")),
by = c("clean" = "original_taxon")) %>%
dplyr::mutate(intermediate = dplyr::coalesce(corrected_taxon_name,
intermediate)) %>%
dplyr::select(-corrected_taxon_name, -level) %>%
dplyr::left_join(taxonomic_corrections %>%
dplyr::filter(level %in% c("amalgamated", "all")),
by = c("clean" = "original_taxon")) %>%
dplyr::mutate(amalgamated = dplyr::coalesce(corrected_taxon_name,
amalgamated)) %>%
dplyr::select(-corrected_taxon_name, -level) %>%
dplyr::left_join(taxonomic_corrections %>%
dplyr::filter(level %in% c("all")),
by = c("clean" = "original_taxon")) %>%
dplyr::mutate(clean = dplyr::coalesce(corrected_taxon_name,
clean)) %>%
dplyr::select(-corrected_taxon_name, -level) %>%
dplyr::left_join(taxonomic_corrections %>%
dplyr::filter(level %in% c("all")),
by = c("intermediate" = "original_taxon")) %>%
dplyr::mutate(intermediate = dplyr::coalesce(corrected_taxon_name,
intermediate)) %>%
dplyr::select(-corrected_taxon_name, -level) %>%
dplyr::left_join(taxonomic_corrections %>%
dplyr::filter(level %in% c("all")),
by = c("amalgamated" = "original_taxon")) %>%
dplyr::mutate(amalgamated = dplyr::coalesce(corrected_taxon_name,
amalgamated)) %>%
dplyr::select(-corrected_taxon_name, -level)
other_southern_hemisphere_taxa_counts_amalgamation_rev %>%
dplyr::group_by(ID_COUNT) %>%
dplyr::mutate(n = dplyr::n()) %>%
dplyr::filter(n > 1)
waldo::compare(other_southern_hemisphere_taxa_counts_amalgamation %>%
dplyr::distinct(clean, intermediate, amalgamated),
other_southern_hemisphere_taxa_counts_amalgamation_rev %>%
dplyr::distinct(clean, intermediate, amalgamated),
max_diffs = Inf)
other_southern_hemisphere_taxa_counts_amalgamation <-
other_southern_hemisphere_taxa_counts_amalgamation_rev %>%
dplyr::filter(!is.na(taxon_count), taxon_count > 0) %>%
dplyr::select(-ID_COUNT)
other_southern_hemisphere_taxa_counts_amalgamation %>%
dplyr::filter(is.na(clean) | is.na(intermediate) | is.na(amalgamated))
## Find DOIs ----
other_southern_hemisphere_metadata_pubs <-
other_southern_hemisphere_metadata %>%
dplyr::distinct(publication) %>%
dplyr::arrange(publication) %>%
dplyr::mutate(DOI = publication %>%
stringr::str_extract_all("\\[DOI\\s*(.*?)\\s*\\](;|$)") %>%
purrr::map_chr(~.x %>%
stringr::str_remove_all("^\\[DOI:|\\]$") %>%
stringr::str_squish() %>%
stringr::str_c(collapse = ";\n"))
) %>%
dplyr::mutate(ID_PUB = seq_along(publication))
# other_southern_hemisphere_metadata_pubs %>%
# readr::write_excel_csv("data-raw/GLOBAL/other_southern_hemisphere_modern-references.csv")
### Load cleaned publications list ----
other_southern_hemisphere_clean_publications <-
"data-raw/GLOBAL/other_southern_hemisphere_modern-references_clean.csv" %>%
readr::read_csv() %>%
dplyr::select(-DOI)
# dplyr::mutate(ID_PUB = seq_along(publication))
## Append clean publications ----
other_southern_hemisphere_metadata_2 <-
other_southern_hemisphere_metadata %>%
dplyr::left_join(other_southern_hemisphere_metadata_pubs %>%
dplyr::select(-DOI),
by = "publication") %>%
dplyr::left_join(other_southern_hemisphere_clean_publications,
by = "ID_PUB") %>%
dplyr::select(-publication.x, -publication.y, -doi) %>%
dplyr::rename(doi = updated_DOI,
publication = updated_publication)
## Extract PNV/BIOME ----
other_southern_hemisphere_metadata_3 <-
other_southern_hemisphere_metadata_2 %>%
smpds::parallel_extract_biome(cpus = 5) %>%
# smpds::biome_name() %>%
dplyr::relocate(ID_BIOME, .after = doi) %>%
smpds::pb()
other_southern_hemisphere_metadata_3 %>%
smpds::plot_biome(xlim = range(.$longitude, na.rm = TRUE) * 1.1,
ylim = range(.$latitude, na.rm = TRUE) * 1.1)
## Create count tables ----
### Clean ----
other_southern_hemisphere_clean <-
other_southern_hemisphere_taxa_counts_amalgamation %>%
dplyr::select(-intermediate, -amalgamated) %>%
dplyr::rename(taxon_name = clean) %>%
dplyr::group_by(ID_SAMPLE, taxon_name) %>%
dplyr::mutate(taxon_count = sum(taxon_count, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::distinct() %>%
tidyr::pivot_wider(ID_SAMPLE,
names_from = taxon_name,
values_from = taxon_count,
values_fill = 0,
names_sort = TRUE)
### Intermediate ----
other_southern_hemisphere_intermediate <-
other_southern_hemisphere_taxa_counts_amalgamation %>%
dplyr::select(-clean, -amalgamated) %>%
dplyr::rename(taxon_name = intermediate) %>%
dplyr::group_by(ID_SAMPLE, taxon_name) %>%
dplyr::mutate(taxon_count = sum(taxon_count, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::distinct() %>%
tidyr::pivot_wider(ID_SAMPLE,
names_from = taxon_name,
values_from = taxon_count,
values_fill = 0,
names_sort = TRUE)
### Amalgamated ----
other_southern_hemisphere_amalgamated <-
other_southern_hemisphere_taxa_counts_amalgamation %>%
dplyr::select(-clean, -intermediate) %>%
dplyr::rename(taxon_name = amalgamated) %>%
dplyr::group_by(ID_SAMPLE, taxon_name) %>%
dplyr::mutate(taxon_count = sum(taxon_count, na.rm = TRUE)) %>%
dplyr::ungroup() %>%
dplyr::distinct() %>%
tidyr::pivot_wider(ID_SAMPLE,
names_from = taxon_name,
values_from = taxon_count,
values_fill = 0,
names_sort = TRUE)
# Store subsets ----
southern_hemisphere_pollen <-
other_southern_hemisphere_metadata_3 %>%
dplyr::mutate(
clean = other_southern_hemisphere_clean %>%
dplyr::select(-c(ID_SAMPLE)),
intermediate = other_southern_hemisphere_intermediate %>%
dplyr::select(-c(ID_SAMPLE)),
amalgamated = other_southern_hemisphere_amalgamated %>%
dplyr::select(-c(ID_SAMPLE))
) %>%
dplyr::mutate(
basin_size_num = basin_size %>%
as.numeric() %>%
round(digits = 6) %>%
as.character(),
basin_size = dplyr::coalesce(
basin_size_num,
basin_size
),
basin_size = basin_size %>%
stringr::str_replace_all("unknown", "not known"),
entity_type = entity_type %>%
stringr::str_replace_all("unknown", "not known"),
site_type = site_type %>%
stringr::str_replace_all("unknown", "not known")
) %>%
dplyr::relocate(ID_SAMPLE, .before = clean) %>%
dplyr::mutate(source = "Southern Hemisphere pollen", .before = 1) %>%
dplyr::mutate(age_BP = as.character(age_BP)) %>%
dplyr::select(-basin_size_num)
usethis::use_data(southern_hemisphere_pollen, overwrite = TRUE, compress = "xz")
## Inspect enumerates ----
### basin_size -----
southern_hemisphere_pollen$basin_size %>%
unique() %>% sort()
### site_type ----
southern_hemisphere_pollen$site_type %>%
unique() %>% sort()
### entity_type ----
southern_hemisphere_pollen$entity_type %>%
unique() %>% sort()
# Export Excel workbook ----
wb <- openxlsx::createWorkbook()
openxlsx::addWorksheet(wb, "metadata")
openxlsx::writeData(wb, "metadata",
southern_hemisphere_pollen %>%
dplyr::select(site_name:ID_SAMPLE))
openxlsx::addWorksheet(wb, "clean")
openxlsx::writeData(wb, "clean",
southern_hemisphere_pollen %>%
dplyr::select(ID_SAMPLE, clean) %>%
tidyr::unnest(clean))
openxlsx::addWorksheet(wb, "intermediate")
openxlsx::writeData(wb, "intermediate",
southern_hemisphere_pollen %>%
dplyr::select(ID_SAMPLE, intermediate) %>%
tidyr::unnest(intermediate))
openxlsx::addWorksheet(wb, "amalgamated")
openxlsx::writeData(wb, "amalgamated",
southern_hemisphere_pollen %>%
dplyr::select(ID_SAMPLE, amalgamated) %>%
tidyr::unnest(amalgamated))
openxlsx::saveWorkbook(wb,
paste0("data-raw/GLOBAL/southern_hemisphere_pollen_",
Sys.Date(),
".xlsx"))
# Load climate reconstructions ----
climate_reconstructions <-
"data-raw/reconstructions/southern_hemisphere_pollen_climate_reconstructions_2022-04-29.csv" %>%
readr::read_csv()
# Load daily values for precipitation to compute MAP (mean annual precipitation)
climate_reconstructions_pre <-
"data-raw/reconstructions/southern_hemisphere_pollen_climate_reconstructions_pre_2022-04-29.csv" %>%
readr::read_csv() %>%
dplyr::rowwise() %>%
dplyr::mutate(map = sum(dplyr::c_across(T1:T365), na.rm = TRUE), .before = T1)
climate_reconstructions_2 <- climate_reconstructions %>%
dplyr::bind_cols(climate_reconstructions_pre %>%
dplyr::select(map))
climate_reconstructions_with_counts <-
southern_hemisphere_pollen %>%
# smpds::southern_hemisphere_pollen %>%
# dplyr::select(-c(mi:mtwa)) %>%
dplyr::bind_cols(
climate_reconstructions_2 %>%
dplyr::select(sn = site_name,
en = entity_name,
new_elevation = elevation,
mi:map)
) %>%
dplyr::relocate(mi:map, .before = clean) %>%
dplyr::mutate(elevation = dplyr::coalesce(elevation, new_elevation))
climate_reconstructions_with_counts %>%
dplyr::filter(site_name != sn | entity_name != en)
waldo::compare(smpds::southern_hemisphere_pollen,
climate_reconstructions_with_counts %>%
dplyr::select(-c(mi:map, sn, en, new_elevation))
)
southern_hemisphere_pollen <- climate_reconstructions_with_counts %>%
dplyr::select(-sn, -en, -new_elevation)
usethis::use_data(southern_hemisphere_pollen, overwrite = TRUE, compress = "xz")
waldo::compare(smpds::southern_hemisphere_pollen,
southern_hemisphere_pollen,
max_diffs = Inf)
climate_reconstructions_2 %>%
smpds::plot_climate_countour(
var = "mat",
xlim = range(.$longitude, na.rm = TRUE),
ylim = range(.$latitude, na.rm = TRUE)
)
climate_reconstructions_2 %>%
smpds::plot_climate(
var = "map",
xlim = range(.$longitude, na.rm = TRUE),
ylim = range(.$latitude, na.rm = TRUE)
)
rm(climate_reconstructions,
climate_reconstructions_2,
climate_reconstructions_pre,
climate_reconstructions_with_counts)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{readFasta}
\alias{readFasta}
\title{Read FASTA File}
\usage{
readFasta(file, rownames = FALSE)
}
\arguments{
\item{file}{File name of FASTA input.}
\item{rownames}{Use the sequence annotation line in file (starts with
\code{'>'}) as the row names. Will fail if there are duplicate items.}
}
\value{
Data frame of each sequence in rows.
}
\description{
Read nucleotide sequence files in FASTA format
}
\details{
Sequence data in FASTA files are converted into data frame
suitable as input to \code{\link{bbl}}. If sequence lengths are different,
instances longer than those already read will be truncated. Empty sequences
are skipped.
}
\examples{
file <- tempfile('data')
write('>seq1', file)
write('atgcc', file, append=TRUE)
write('>seq2', file, append=TRUE)
write('gccaa', file, append=TRUE)
system(paste0('cat ',file))
x <- readFasta(file)
x
}
|
/bbl/man/readFasta.Rd
|
no_license
|
akhikolla/InformationHouse
|
R
| false
| true
| 941
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{readFasta}
\alias{readFasta}
\title{Read FASTA File}
\usage{
readFasta(file, rownames = FALSE)
}
\arguments{
\item{file}{File name of FASTA input.}
\item{rownames}{Use the sequence annotation line in file (starts with
\code{'>'}) as the row names. Will fail if there are duplicate items.}
}
\value{
Data frame of each sequence in rows.
}
\description{
Read nucleotide sequence files in FASTA format
}
\details{
Sequence data in FASTA files are converted into data frame
suitable as input to \code{\link{bbl}}. If sequence lengths are different,
instances longer than those already read will be truncated. Empty sequences
are skipped.
}
\examples{
file <- tempfile('data')
write('>seq1', file)
write('atgcc', file, append=TRUE)
write('>seq2', file, append=TRUE)
write('gccaa', file, append=TRUE)
system(paste0('cat ',file))
x <- readFasta(file)
x
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mfxi_lm.R
\name{mfxi.lm}
\alias{mfxi.lm}
\title{Run a Regression}
\usage{
mfxi.lm(formi, datai, vcv = "standard")
}
\arguments{
\item{formi}{regression formula}
\item{datai}{data for regression}
\item{vcv}{type of covariance correction}
}
\value{
summary table
}
\description{
Run a Regression
}
|
/PrettyR/man/mfxi.lm.Rd
|
permissive
|
Jadamso/PrettyR
|
R
| false
| true
| 376
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mfxi_lm.R
\name{mfxi.lm}
\alias{mfxi.lm}
\title{Run a Regression}
\usage{
mfxi.lm(formi, datai, vcv = "standard")
}
\arguments{
\item{formi}{regression formula}
\item{datai}{data for regression}
\item{vcv}{type of covariance correction}
}
\value{
summary table
}
\description{
Run a Regression
}
|
# This script is to obtain end members froom a matrix. The data will be in 10 columns, approximately 6000 rows.
# First order of business is to figure out how the hell to do it!! The first hint alex provided was to "spherify"
# the data. Papers document a procedure called "whitening" which is a reference to spectral colour (this process is
# almost exclusively used for spectral images). The whitening process seems to be one in which the data is read in
# in and the correlation matrix obtained, the means are removed and then multiplied by the inverse of the correlation
# matrix. Lots of confusion later, we will try to get the iddentity matrix
##Junk column
#c<-t(x)
#b<-(solve(a))
#d<-t(a)
#solution<-a%*%c
#solution2<-d%*%x
#g<-which.max(solution3)
##1. Get data
setwd("C:/Users/phug7649/Desktop/TXTBIN")
y<-as.matrix(read.table("whiten.txt", sep=",", na.strings="", header=TRUE))
##2. remove means and multiply by the inverse of the corrolation matrix.
x<-(y-colMeans(y))
a<-(cov(x))
aa<-solve(a)
#a<-(cor(x))
?cov
solution3<-x%*%aa
plot(solution3,main="solution3")
plot(y,main="original data")
str(solution3)
for (i in 1:10)
#sprin<- as.matrix(solution3 [,i])
assign(paste0('S3PRIN_', i), i)
head(y)
head(x)
|
/EM_find.R
|
no_license
|
p-hughes/Dirty_business
|
R
| false
| false
| 1,240
|
r
|
# This script is to obtain end members froom a matrix. The data will be in 10 columns, approximately 6000 rows.
# First order of business is to figure out how the hell to do it!! The first hint alex provided was to "spherify"
# the data. Papers document a procedure called "whitening" which is a reference to spectral colour (this process is
# almost exclusively used for spectral images). The whitening process seems to be one in which the data is read in
# in and the correlation matrix obtained, the means are removed and then multiplied by the inverse of the correlation
# matrix. Lots of confusion later, we will try to get the iddentity matrix
##Junk column
#c<-t(x)
#b<-(solve(a))
#d<-t(a)
#solution<-a%*%c
#solution2<-d%*%x
#g<-which.max(solution3)
##1. Get data
setwd("C:/Users/phug7649/Desktop/TXTBIN")
y<-as.matrix(read.table("whiten.txt", sep=",", na.strings="", header=TRUE))
##2. remove means and multiply by the inverse of the corrolation matrix.
x<-(y-colMeans(y))
a<-(cov(x))
aa<-solve(a)
#a<-(cor(x))
?cov
solution3<-x%*%aa
plot(solution3,main="solution3")
plot(y,main="original data")
str(solution3)
for (i in 1:10)
#sprin<- as.matrix(solution3 [,i])
assign(paste0('S3PRIN_', i), i)
head(y)
head(x)
|
# The following code allows for the analysis of 6 single cell RNAseq datasets of the human pancreas
# Information on these datasets can be found in the following locations:
# https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE81076
# https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE85241
# https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE86469
# https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE84133
# https://www.ebi.ac.uk/arrayexpress/experiments/E-MTAB-5061/
# This code was written by Fahd Qadir PhD. on 06/03/2020 email: mqadir@tulane.edu
# 1. installation and loading of packages
# Devtools
install.packages('devtools')
library(devtools)
# Seuratdata
devtools::install_github('satijalab/seurat-data')
# Seurat wrappers
devtools::install_github('satijalab/seurat-wrappers')
# Load packages
library(Seurat)
library(ggplot2)
library(patchwork)
library(SeuratData)
library(SeuratWrappers)
library(future)
# Set RAM to 50GB
# options(future.globals.maxSize = 40 * 1024^3)
# check the current active plan
# plan()
# change the current plan to access parallelization
# future::availableCores()
# future::availableWorkers()
# plan("multiprocess", workers = 15)
# plan()
# Loading of refrence datasets
#GSE81076 <- read.csv("C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/pancreas/GSE81076.csv", header = TRUE, sep = ",", row.names = 1)
GSE85241 <- read.csv("C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/pancreas/GSE85241.csv", header = TRUE, sep = ",", row.names = 1)
GSE86469 <- read.csv("C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/pancreas/GSE86469.csv", header = TRUE, sep = ",", row.names = 1)
GSE84133 <- read.csv("C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/pancreas/GSE84133.csv", header = TRUE, sep = ",", row.names = 1)
EMTAB5061 <- read.csv("C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/pancreas/EMTAB5061.csv", header = TRUE, sep = ",", row.names = 1)
GSE131886 <- read.csv("C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/pancreas/GSE131886.csv", header = TRUE, sep = ",", row.names = 1)
# Create Seurat objects
#GSE81076 <- CreateSeuratObject(counts = GSE81076, project = "SeuratProject", assay = "RNA", min.cells = 3, min.features = 200)
GSE85241 <- CreateSeuratObject(counts = GSE85241, project = "SeuratProject", assay = "RNA", min.cells = 3, min.features = 200)
GSE86469 <- CreateSeuratObject(counts = GSE86469, project = "SeuratProject", assay = "RNA", min.cells = 3, min.features = 200)
GSE84133 <- CreateSeuratObject(counts = GSE84133, project = "SeuratProject", assay = "RNA", min.cells = 3, min.features = 200)
EMTAB5061 <- CreateSeuratObject(counts = EMTAB5061, project = "SeuratProject", assay = "RNA", min.cells = 3, min.features = 200)
GSE131886 <- CreateSeuratObject(counts = GSE131886, project = "SeuratProject", assay = "RNA", min.cells = 3, min.features = 200)
# Load in Luca's data
adult_pancreas <- readRDS("C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/pancreas/adult_pancreas.rds")
chronic_pancreatitis <- readRDS("C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/pancreas/chronic_pancreatitis.rds")
neonatal_pancreas <- readRDS("C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/pancreas/neonatal_pancreas.rds")
# Sample specific Metadata addition
#GSE81076$sample <- "GSE81076"
GSE85241$sample <- "GSE85241"
GSE86469$sample <- "GSE86469"
GSE84133$sample <- "GSE84133"
EMTAB5061$sample <- "EMTAB5061"
GSE131886$sample <- "GSE131886"
adult_pancreas$sample <- "EGAS00001004653_adult"
chronic_pancreatitis$sample <- "EGAS00001004653_CP"
neonatal_pancreas$sample <- "EGAS00001004653_NP"
# Sex segregation specific Metadata addition
# For GSE85241
levels(GSE85241)
male <- c("D28.1", "D28.2", "D28.3", "D28.4", "D28.5", "D28.6", "D28.7", "D28.8",
"D29.1", "D29.2", "D29.3", "D29.4", "D29.5", "D29.6", "D29.7", "D29.8",
"D31.1", "D31.2", "D31.3", "D31.4", "D31.5", "D31.6", "D31.7", "D31.8")
female <- c("D30.1", "D30.2", "D30.3", "D30.4", "D30.5", "D30.6", "D30.7", "D30.8")
GSE85241@meta.data$sex[GSE85241@meta.data$orig.ident %in% male] <- "male"
GSE85241@meta.data$sex[GSE85241@meta.data$orig.ident %in% female] <- "female"
# For EMTAB5061
levels(EMTAB5061)
male <- c("AZ", "HP1502401", "HP1504101T2D", "HP1504901", "HP1507101", "HP1509101", "HP152301T2D")
female <- c("HP1506401", "HP1508501T2D", "HP1526901T2D")
EMTAB5061@meta.data$sex[EMTAB5061@meta.data$orig.ident %in% male] <- "male"
EMTAB5061@meta.data$sex[EMTAB5061@meta.data$orig.ident %in% female] <- "female"
# fOR GSE131886
levels(GSE131886)
male <- c("HPD3")
female <- c("HPD1", "HPD2")
GSE131886@meta.data$sex[GSE131886@meta.data$orig.ident %in% male] <- "male"
GSE131886@meta.data$sex[GSE131886@meta.data$orig.ident %in% female] <- "female"
# fOR GSE84133
levels(GSE84133)
male <- c("m1", "m3")
female <- c("f2", "f4")
GSE84133@meta.data$sex[GSE84133@meta.data$orig.ident %in% male] <- "male"
GSE84133@meta.data$sex[GSE84133@meta.data$orig.ident %in% female] <- "female"
# fOR GSE86469
levels(GSE86469)
male <- c("H1", "H2", "H3", "H4", "H6", "H7", "H8")
female <- c("H5", "H9", "H10", "H11", "H12", "H13")
GSE86469@meta.data$sex[GSE86469@meta.data$orig.ident %in% male] <- "male"
GSE86469@meta.data$sex[GSE86469@meta.data$orig.ident %in% female] <- "female"
# Ref-dataset specific Metadata addition
#GSE81076$ref <- "ref"
GSE85241$ref <- "ref"
GSE86469$ref <- "ref"
GSE84133$ref <- "ref"
EMTAB5061$ref <- "ref"
GSE131886$ref <- "ref"
adult_pancreas$ref <- "ref"
chronic_pancreatitis$ref <- "ref"
neonatal_pancreas$ref <- "ref"
#Subset out to only save male and female
Idents(pancreas.integrated) <- "sex"
pancreas.integrated <- subset(pancreas.integrated, idents = c("male", "female"))
# Create a list of datasets containing seurat objects
pancreas.list <- list(#"GSE81076" = GSE81076,
"GSE85241" =GSE85241, "GSE86469" = GSE86469,
"GSE84133" = GSE84133, "EMTAB5061" = EMTAB5061, "GSE131886" = GSE131886, "EGAS00001004653_adults" = adult_pancreas,
"EGAS00001004653_CP" = chronic_pancreatitis, "EGAS00001004653_NP" = neonatal_pancreas)
#,"panc_sex_cau_m1" = panc_sex_cau_m1, "panc_sex_cau_f1" = panc_sex_cau_f1)
pancreas.list
pancreas.list <- lapply(X = pancreas.list, FUN = function(x) {
x <- NormalizeData(x, verbose = TRUE)
x <- FindVariableFeatures(x, verbose = TRUE)
})
features <- SelectIntegrationFeatures(object.list = pancreas.list)
pancreas.list <- lapply(X = pancreas.list, FUN = function(x) {
x <- ScaleData(x, features = features, verbose = FALSE)
x <- RunPCA(x, features = features, verbose = FALSE)
})
anchors <- FindIntegrationAnchors(object.list = pancreas.list, reference = c(6, 7), reduction = "rpca",
dims = 1:50)
pancreas.integrated <- IntegrateData(anchorset = anchors, dims = 1:50)
pancreas.integrated <- ScaleData(pancreas.integrated, verbose = TRUE)
pancreas.integrated <- RunPCA(pancreas.integrated, verbose = TRUE)
pancreas.integrated <- RunUMAP(pancreas.integrated, dims = 1:50)
DimPlot(pancreas.integrated, group.by = "sample")
DimPlot(pancreas.integratedx, group.by = "sex")
# Remove NAs
pancreas.integratedx <- subset(pancreas.integrated, subset = sex != "NA")
pancreas.integrated <- pancreas.integratedx
# Normalize based on RNA
pancreas.integrated <- NormalizeData(pancreas.integrated, normalization.method = "LogNormalize", assay = "RNA", scale.factor = 1e4,
verbose = TRUE)
#Clustering
pancreas.integrated <- FindNeighbors(pancreas.integrated, dims = 1:30)
pancreas.integrated <- FindClusters(pancreas.integrated, resolution = 1.2)
# For UMAP visualization
DefaultAssay(object = pancreas.integrated) <- "RNA"
FeaturePlot(object = pancreas.integrated,
features = c("ADRB1"),
pt.size = 1,
cols = c("darkgrey", "red"),
min.cutoff = 0,
max.cutoff = 20,
order = TRUE)
# Visualization Clustering
plots <- DimPlot(pancreas.integrated, group.by = c("ref", "sample"))
plots & theme(legend.position = "right") & guides(color = guide_legend(nrow = 14, byrow = TRUE,
override.aes = list(size = 5)))
Idents(pancreas.integrated) <- "CellType"
DimPlot(pancreas.integrated, label = TRUE)
# Organize clusters
Idents(pancreas.integrated) <- "seurat_clusters"
plot <- DimPlot(pancreas.integrated, reduction = "umap")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Beta")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Alpha")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Delta")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Epsilon")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Gamma")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Ductal")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Acinar")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Ducto-Acinar")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Ducto-Endocrine")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Unclassified-Endocrine")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Bcells")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Macrophage")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Tcells")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Tuftcells")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Endothelial")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Quiescent stellate")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Activated stellate")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Schwann")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Mast")
levels(pancreas.integrated)
# Saving this information in the metadata slot
head(Idents(pancreas.integrated))
pancreas.integrated$CellType <- Idents(pancreas.integrated)
head(pancreas.integrated@meta.data)
# Run find variable features again running this is questionable, as only the var features from integrated data is useful
# But Seurat recommends re-running this
DefaultAssay(object = pancreas.integrated) <- "RNA"
pancreas.integrated <- FindVariableFeatures(pancreas.integrated, selection.method = "vst", nfeatures = 3000)
# Define an order of cluster identities remember after this step-
# cluster re-assignment occurs, which re-assigns clustering in my_levels
my_levels <- c("Beta", "Alpha", "Delta", "Gamma", "Epsilon",
"Ductal", "Acinar", "Quiescent stellate", "Activated stellate",
"Schwann", "Endothelial", "Macrophage", "Mast", "Tcells", "Bcells",
"Tuftcells")
head(pancreas.integrated@meta.data$CellType)
# Re-level object@meta.data this just orders the actual metadata slot, so when you pull its already ordered
pancreas.integrated@meta.data$CellType <- factor(x = pancreas.integrated@meta.data$CellType, levels = my_levels)
DimPlot(pancreas.integrated)
#Save Object
saveRDS(pancreas.integrated, "C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/Workspace/pancreas.integrated.rds")
pancreas.integrated <- readRDS("C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/Workspace/pancreas.integrated.rds")
# Subsetting Our cells out
sex <- subset(pancreas.integrated, subset = ref == "panc_sex")
DimPlot(sex)
# Check metadata
head(pancreas.integrated@meta.data)
table(pancreas.integrated$sample)
table(Idents(pancreas.integrated))
# Check activeidents
head(Idents(pancreas.integrated))
# Change active idents to CellType
Idents(pancreas.integrated) <- "sex"
# For UMAP visualization
DefaultAssay(object = pancreas.integrated) <- "RNA"
FeaturePlot(object = pancreas.integrated,
features = c("PGR"),
pt.size = 1,
cols = c("darkgrey", "red"),
min.cutoff = 0,
max.cutoff = 20,
order = FALSE)
# Visualize information
table(pancreas.integrated$sample)
DefaultAssay(object = pancreas.integrated) <- "RNA"
VlnPlot(pancreas.integrated, c("PGR"), group.by = "CellType", split.by = "sex", assay = "RNA", slot = "data", ncol = 1, pt.size = 1)
# Average expression of all cells within a cluster
males <- subset(pancreas.integrated, subset = (sex == "male"))
females <- subset(pancreas.integrated, subset = (sex == "female"))
Idents(female) <- "CellType"
Idents(males) <- "CellType"
cluster.averages.males <- AverageExpression(males)
cluster.averages.females <- AverageExpression(females)
head(cluster.averages.males[["RNA"]])
head(cluster.averages.females[["RNA"]])
cluster.averages.males[["RNA"]][c("PGR"),]
cluster.averages.females[["RNA"]][c("PGR"),]
# Issue 371
# Subset your cluster of interest for as an example I am subsetting a cluster called 'beta'
# The following creates a seurat object of only the cluster 'beta'
betacells <- subset(pancreas.integrated, subset = (CellType == c("Beta")) & (sex == "female") & (sample == "EGAS00001004653_CP"))
#betacells <- subset(pancreas.integrated, subset = (CellType == c("Beta")) & (sex == "female"))
betacells <- subset(pancreas.integrated, subset = (CellType == c("Alpha")) & (sample == "EGAS00001004653_CP"))
# Point your new cluster towards the object you will use to perform calculations.
# I like doing this because otherwise, you have to write lengths of redundant code
# Also I'm really lazy
ThisWayIsTotallyMentalButItWorks <- betacells
GOI1 <- 'ACE2' #you will have to name your first gene here, im choosing PDX1 as an example
GOI2 <- 'TMPRSS2' #you will have to name your first gene here, im choosing INS as an example
GOI1.cutoff <- .1
GOI2.cutoff <- .1
# Enjoy!
GOI1.cells <- length(which(FetchData(ThisWayIsTotallyMentalButItWorks, vars = GOI1) > GOI1.cutoff))
GOI2.cells <- length(which(FetchData(ThisWayIsTotallyMentalButItWorks, vars = GOI2) > GOI2.cutoff))
GOI1_GOI2.cells <- length(which(FetchData(ThisWayIsTotallyMentalButItWorks, vars = GOI2) > GOI2.cutoff & FetchData(ThisWayIsTotallyMentalButItWorks, vars = GOI1) > GOI1.cutoff))
all.cells.incluster <- table(ThisWayIsTotallyMentalButItWorks@active.ident)
GOI1.cells/all.cells.incluster*100 # Percentage of cells in Beta that express GOI1
GOI2.cells/all.cells.incluster*100 #Percentage of cells in Beta that express GOI2
GOI1_GOI2.cells/all.cells.incluster*100 #Percentage of cells in Beta that co-express GOI1 + GOI2
# Some cool code for total percentage (need to x100)
betacells <- subset(pancreas.integrated, subset = (sample == "EGAS00001004653_CP"))
PrctCellExpringGene <- function(object, genes, group.by = "all"){
if(group.by == "all"){
prct = unlist(lapply(genes,calc_helper, object=object))
result = data.frame(Markers = genes, Cell_proportion = prct)
return(result)
}
else{
list = SplitObject(object, group.by)
factors = names(list)
results = lapply(list, PrctCellExpringGene, genes=genes)
for(i in 1:length(factors)){
results[[i]]$Feature = factors[i]
}
combined = do.call("rbind", results)
return(combined)
}
}
calc_helper <- function(object,genes){
counts = object[['RNA']]@counts
ncells = ncol(counts)
if(genes %in% row.names(counts)){
sum(counts[genes,]>0)/ncells
}else{return(NA)}
}
PrctCellExpringGene(betacells, c("ACE2", "TMPRSS2"), group.by = "CellType")
calc_helper(pancreas.integrated, c("ACE2", "TMPRSS2"))
# Plotting one gene on a dimplot
betacells <- subset(pancreas.integrated, subset = (sex == "female"))
betacells <- subset(pancreas.integrated, subset = (sex == "female"))
FeaturePlot(object = betacells,
features = c("ACE2"),
pt.size = 1,
cols = c("darkgrey", "red"),
min.cutoff = 0,
max.cutoff = 3,
order = TRUE)
# Set cell identity to sample identity so that you can extraxt cell type information for plotting
Idents(object = pancreas.integrated) <- pancreas.integrated@meta.data$celltype
# How can I extract expression matrix for all beta cells
betacells <- subset(pancreas.integrated, idents = c("Beta"))
# Violin plot
DefaultAssay(object = betacells) <- "RNA"
VlnPlot(object = betacells, features = c("ACE2", "TMPRSS2"), group.by = "sample", slot = "data")
# How can I extract expression matrix for all beta cells
alphacells <- subset(pancreas.integrated, idents = c("alpha"))
# Violin plot
DefaultAssay(object = alphacells) <- "RNA"
Idents(pancreas.integrated) <- "sex"
VlnPlot(object = pancreas.integrated, features = c("XIST"), group.by = "sample", split.by = "sex", slot = "data")
# Set cell identity to sample identity
Idents(object = pancreas.integrated) <- pancreas.integrated@meta.data$celltype
# Find if SRD genes are differentially expressed
beta.integrated.markers <- FindAllMarkers(object = pancreas.integrated, slot = 'data', test.use = 'wilcox')
# How can I calculate the average expression of all cells within a cluster?
cluster.averages <- AverageExpression(pancreas.integrated, assay= "RNA", slot = "data")
head(cluster.averages[["RNA"]][c("ACE2", "TMPRSS2"), 1:14])
|
/Ref_panc.R
|
no_license
|
fmjlab/Pancreas_atlas_COVID19
|
R
| false
| false
| 17,978
|
r
|
# The following code allows for the analysis of 6 single cell RNAseq datasets of the human pancreas
# Information on these datasets can be found in the following locations:
# https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE81076
# https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE85241
# https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE86469
# https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE84133
# https://www.ebi.ac.uk/arrayexpress/experiments/E-MTAB-5061/
# This code was written by Fahd Qadir PhD. on 06/03/2020 email: mqadir@tulane.edu
# 1. installation and loading of packages
# Devtools
install.packages('devtools')
library(devtools)
# Seuratdata
devtools::install_github('satijalab/seurat-data')
# Seurat wrappers
devtools::install_github('satijalab/seurat-wrappers')
# Load packages
library(Seurat)
library(ggplot2)
library(patchwork)
library(SeuratData)
library(SeuratWrappers)
library(future)
# Set RAM to 50GB
# options(future.globals.maxSize = 40 * 1024^3)
# check the current active plan
# plan()
# change the current plan to access parallelization
# future::availableCores()
# future::availableWorkers()
# plan("multiprocess", workers = 15)
# plan()
# Loading of refrence datasets
#GSE81076 <- read.csv("C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/pancreas/GSE81076.csv", header = TRUE, sep = ",", row.names = 1)
GSE85241 <- read.csv("C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/pancreas/GSE85241.csv", header = TRUE, sep = ",", row.names = 1)
GSE86469 <- read.csv("C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/pancreas/GSE86469.csv", header = TRUE, sep = ",", row.names = 1)
GSE84133 <- read.csv("C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/pancreas/GSE84133.csv", header = TRUE, sep = ",", row.names = 1)
EMTAB5061 <- read.csv("C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/pancreas/EMTAB5061.csv", header = TRUE, sep = ",", row.names = 1)
GSE131886 <- read.csv("C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/pancreas/GSE131886.csv", header = TRUE, sep = ",", row.names = 1)
# Create Seurat objects
#GSE81076 <- CreateSeuratObject(counts = GSE81076, project = "SeuratProject", assay = "RNA", min.cells = 3, min.features = 200)
GSE85241 <- CreateSeuratObject(counts = GSE85241, project = "SeuratProject", assay = "RNA", min.cells = 3, min.features = 200)
GSE86469 <- CreateSeuratObject(counts = GSE86469, project = "SeuratProject", assay = "RNA", min.cells = 3, min.features = 200)
GSE84133 <- CreateSeuratObject(counts = GSE84133, project = "SeuratProject", assay = "RNA", min.cells = 3, min.features = 200)
EMTAB5061 <- CreateSeuratObject(counts = EMTAB5061, project = "SeuratProject", assay = "RNA", min.cells = 3, min.features = 200)
GSE131886 <- CreateSeuratObject(counts = GSE131886, project = "SeuratProject", assay = "RNA", min.cells = 3, min.features = 200)
# Load in Luca's data
adult_pancreas <- readRDS("C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/pancreas/adult_pancreas.rds")
chronic_pancreatitis <- readRDS("C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/pancreas/chronic_pancreatitis.rds")
neonatal_pancreas <- readRDS("C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/pancreas/neonatal_pancreas.rds")
# Sample specific Metadata addition
#GSE81076$sample <- "GSE81076"
GSE85241$sample <- "GSE85241"
GSE86469$sample <- "GSE86469"
GSE84133$sample <- "GSE84133"
EMTAB5061$sample <- "EMTAB5061"
GSE131886$sample <- "GSE131886"
adult_pancreas$sample <- "EGAS00001004653_adult"
chronic_pancreatitis$sample <- "EGAS00001004653_CP"
neonatal_pancreas$sample <- "EGAS00001004653_NP"
# Sex segregation specific Metadata addition
# For GSE85241
levels(GSE85241)
male <- c("D28.1", "D28.2", "D28.3", "D28.4", "D28.5", "D28.6", "D28.7", "D28.8",
"D29.1", "D29.2", "D29.3", "D29.4", "D29.5", "D29.6", "D29.7", "D29.8",
"D31.1", "D31.2", "D31.3", "D31.4", "D31.5", "D31.6", "D31.7", "D31.8")
female <- c("D30.1", "D30.2", "D30.3", "D30.4", "D30.5", "D30.6", "D30.7", "D30.8")
GSE85241@meta.data$sex[GSE85241@meta.data$orig.ident %in% male] <- "male"
GSE85241@meta.data$sex[GSE85241@meta.data$orig.ident %in% female] <- "female"
# For EMTAB5061
levels(EMTAB5061)
male <- c("AZ", "HP1502401", "HP1504101T2D", "HP1504901", "HP1507101", "HP1509101", "HP152301T2D")
female <- c("HP1506401", "HP1508501T2D", "HP1526901T2D")
EMTAB5061@meta.data$sex[EMTAB5061@meta.data$orig.ident %in% male] <- "male"
EMTAB5061@meta.data$sex[EMTAB5061@meta.data$orig.ident %in% female] <- "female"
# fOR GSE131886
levels(GSE131886)
male <- c("HPD3")
female <- c("HPD1", "HPD2")
GSE131886@meta.data$sex[GSE131886@meta.data$orig.ident %in% male] <- "male"
GSE131886@meta.data$sex[GSE131886@meta.data$orig.ident %in% female] <- "female"
# fOR GSE84133
levels(GSE84133)
male <- c("m1", "m3")
female <- c("f2", "f4")
GSE84133@meta.data$sex[GSE84133@meta.data$orig.ident %in% male] <- "male"
GSE84133@meta.data$sex[GSE84133@meta.data$orig.ident %in% female] <- "female"
# fOR GSE86469
levels(GSE86469)
male <- c("H1", "H2", "H3", "H4", "H6", "H7", "H8")
female <- c("H5", "H9", "H10", "H11", "H12", "H13")
GSE86469@meta.data$sex[GSE86469@meta.data$orig.ident %in% male] <- "male"
GSE86469@meta.data$sex[GSE86469@meta.data$orig.ident %in% female] <- "female"
# Ref-dataset specific Metadata addition
#GSE81076$ref <- "ref"
GSE85241$ref <- "ref"
GSE86469$ref <- "ref"
GSE84133$ref <- "ref"
EMTAB5061$ref <- "ref"
GSE131886$ref <- "ref"
adult_pancreas$ref <- "ref"
chronic_pancreatitis$ref <- "ref"
neonatal_pancreas$ref <- "ref"
#Subset out to only save male and female
Idents(pancreas.integrated) <- "sex"
pancreas.integrated <- subset(pancreas.integrated, idents = c("male", "female"))
# Create a list of datasets containing seurat objects
pancreas.list <- list(#"GSE81076" = GSE81076,
"GSE85241" =GSE85241, "GSE86469" = GSE86469,
"GSE84133" = GSE84133, "EMTAB5061" = EMTAB5061, "GSE131886" = GSE131886, "EGAS00001004653_adults" = adult_pancreas,
"EGAS00001004653_CP" = chronic_pancreatitis, "EGAS00001004653_NP" = neonatal_pancreas)
#,"panc_sex_cau_m1" = panc_sex_cau_m1, "panc_sex_cau_f1" = panc_sex_cau_f1)
pancreas.list
pancreas.list <- lapply(X = pancreas.list, FUN = function(x) {
x <- NormalizeData(x, verbose = TRUE)
x <- FindVariableFeatures(x, verbose = TRUE)
})
features <- SelectIntegrationFeatures(object.list = pancreas.list)
pancreas.list <- lapply(X = pancreas.list, FUN = function(x) {
x <- ScaleData(x, features = features, verbose = FALSE)
x <- RunPCA(x, features = features, verbose = FALSE)
})
anchors <- FindIntegrationAnchors(object.list = pancreas.list, reference = c(6, 7), reduction = "rpca",
dims = 1:50)
pancreas.integrated <- IntegrateData(anchorset = anchors, dims = 1:50)
pancreas.integrated <- ScaleData(pancreas.integrated, verbose = TRUE)
pancreas.integrated <- RunPCA(pancreas.integrated, verbose = TRUE)
pancreas.integrated <- RunUMAP(pancreas.integrated, dims = 1:50)
DimPlot(pancreas.integrated, group.by = "sample")
DimPlot(pancreas.integratedx, group.by = "sex")
# Remove NAs
pancreas.integratedx <- subset(pancreas.integrated, subset = sex != "NA")
pancreas.integrated <- pancreas.integratedx
# Normalize based on RNA
pancreas.integrated <- NormalizeData(pancreas.integrated, normalization.method = "LogNormalize", assay = "RNA", scale.factor = 1e4,
verbose = TRUE)
#Clustering
pancreas.integrated <- FindNeighbors(pancreas.integrated, dims = 1:30)
pancreas.integrated <- FindClusters(pancreas.integrated, resolution = 1.2)
# For UMAP visualization
DefaultAssay(object = pancreas.integrated) <- "RNA"
FeaturePlot(object = pancreas.integrated,
features = c("ADRB1"),
pt.size = 1,
cols = c("darkgrey", "red"),
min.cutoff = 0,
max.cutoff = 20,
order = TRUE)
# Visualization Clustering
plots <- DimPlot(pancreas.integrated, group.by = c("ref", "sample"))
plots & theme(legend.position = "right") & guides(color = guide_legend(nrow = 14, byrow = TRUE,
override.aes = list(size = 5)))
Idents(pancreas.integrated) <- "CellType"
DimPlot(pancreas.integrated, label = TRUE)
# Organize clusters
Idents(pancreas.integrated) <- "seurat_clusters"
plot <- DimPlot(pancreas.integrated, reduction = "umap")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Beta")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Alpha")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Delta")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Epsilon")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Gamma")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Ductal")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Acinar")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Ducto-Acinar")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Ducto-Endocrine")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Unclassified-Endocrine")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Bcells")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Macrophage")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Tcells")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Tuftcells")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Endothelial")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Quiescent stellate")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Activated stellate")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Schwann")
pancreas.integrated <- CellSelector(plot = plot, object = pancreas.integrated, ident = "Mast")
levels(pancreas.integrated)
# Saving this information in the metadata slot
head(Idents(pancreas.integrated))
pancreas.integrated$CellType <- Idents(pancreas.integrated)
head(pancreas.integrated@meta.data)
# Run find variable features again running this is questionable, as only the var features from integrated data is useful
# But Seurat recommends re-running this
DefaultAssay(object = pancreas.integrated) <- "RNA"
pancreas.integrated <- FindVariableFeatures(pancreas.integrated, selection.method = "vst", nfeatures = 3000)
# Define an order of cluster identities remember after this step-
# cluster re-assignment occurs, which re-assigns clustering in my_levels
my_levels <- c("Beta", "Alpha", "Delta", "Gamma", "Epsilon",
"Ductal", "Acinar", "Quiescent stellate", "Activated stellate",
"Schwann", "Endothelial", "Macrophage", "Mast", "Tcells", "Bcells",
"Tuftcells")
head(pancreas.integrated@meta.data$CellType)
# Re-level object@meta.data this just orders the actual metadata slot, so when you pull its already ordered
pancreas.integrated@meta.data$CellType <- factor(x = pancreas.integrated@meta.data$CellType, levels = my_levels)
DimPlot(pancreas.integrated)
#Save Object
saveRDS(pancreas.integrated, "C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/Workspace/pancreas.integrated.rds")
pancreas.integrated <- readRDS("C:/Users/mqadir/Box/Lab 2301/sCell Analysis Project/Refrence Human Pancreas/scRNAseq datasets/Workspace/pancreas.integrated.rds")
# Subsetting Our cells out
sex <- subset(pancreas.integrated, subset = ref == "panc_sex")
DimPlot(sex)
# Check metadata
head(pancreas.integrated@meta.data)
table(pancreas.integrated$sample)
table(Idents(pancreas.integrated))
# Check activeidents
head(Idents(pancreas.integrated))
# Change active idents to CellType
Idents(pancreas.integrated) <- "sex"
# For UMAP visualization
DefaultAssay(object = pancreas.integrated) <- "RNA"
FeaturePlot(object = pancreas.integrated,
features = c("PGR"),
pt.size = 1,
cols = c("darkgrey", "red"),
min.cutoff = 0,
max.cutoff = 20,
order = FALSE)
# Visualize information
table(pancreas.integrated$sample)
DefaultAssay(object = pancreas.integrated) <- "RNA"
VlnPlot(pancreas.integrated, c("PGR"), group.by = "CellType", split.by = "sex", assay = "RNA", slot = "data", ncol = 1, pt.size = 1)
# Average expression of all cells within a cluster
males <- subset(pancreas.integrated, subset = (sex == "male"))
females <- subset(pancreas.integrated, subset = (sex == "female"))
Idents(female) <- "CellType"
Idents(males) <- "CellType"
cluster.averages.males <- AverageExpression(males)
cluster.averages.females <- AverageExpression(females)
head(cluster.averages.males[["RNA"]])
head(cluster.averages.females[["RNA"]])
cluster.averages.males[["RNA"]][c("PGR"),]
cluster.averages.females[["RNA"]][c("PGR"),]
# Issue 371
# Subset your cluster of interest for as an example I am subsetting a cluster called 'beta'
# The following creates a seurat object of only the cluster 'beta'
betacells <- subset(pancreas.integrated, subset = (CellType == c("Beta")) & (sex == "female") & (sample == "EGAS00001004653_CP"))
#betacells <- subset(pancreas.integrated, subset = (CellType == c("Beta")) & (sex == "female"))
betacells <- subset(pancreas.integrated, subset = (CellType == c("Alpha")) & (sample == "EGAS00001004653_CP"))
# Point your new cluster towards the object you will use to perform calculations.
# I like doing this because otherwise, you have to write lengths of redundant code
# Also I'm really lazy
ThisWayIsTotallyMentalButItWorks <- betacells
GOI1 <- 'ACE2' #you will have to name your first gene here, im choosing PDX1 as an example
GOI2 <- 'TMPRSS2' #you will have to name your first gene here, im choosing INS as an example
GOI1.cutoff <- .1
GOI2.cutoff <- .1
# Enjoy!
GOI1.cells <- length(which(FetchData(ThisWayIsTotallyMentalButItWorks, vars = GOI1) > GOI1.cutoff))
GOI2.cells <- length(which(FetchData(ThisWayIsTotallyMentalButItWorks, vars = GOI2) > GOI2.cutoff))
GOI1_GOI2.cells <- length(which(FetchData(ThisWayIsTotallyMentalButItWorks, vars = GOI2) > GOI2.cutoff & FetchData(ThisWayIsTotallyMentalButItWorks, vars = GOI1) > GOI1.cutoff))
all.cells.incluster <- table(ThisWayIsTotallyMentalButItWorks@active.ident)
GOI1.cells/all.cells.incluster*100 # Percentage of cells in Beta that express GOI1
GOI2.cells/all.cells.incluster*100 #Percentage of cells in Beta that express GOI2
GOI1_GOI2.cells/all.cells.incluster*100 #Percentage of cells in Beta that co-express GOI1 + GOI2
# Some cool code for total percentage (need to x100)
betacells <- subset(pancreas.integrated, subset = (sample == "EGAS00001004653_CP"))
PrctCellExpringGene <- function(object, genes, group.by = "all"){
if(group.by == "all"){
prct = unlist(lapply(genes,calc_helper, object=object))
result = data.frame(Markers = genes, Cell_proportion = prct)
return(result)
}
else{
list = SplitObject(object, group.by)
factors = names(list)
results = lapply(list, PrctCellExpringGene, genes=genes)
for(i in 1:length(factors)){
results[[i]]$Feature = factors[i]
}
combined = do.call("rbind", results)
return(combined)
}
}
calc_helper <- function(object,genes){
counts = object[['RNA']]@counts
ncells = ncol(counts)
if(genes %in% row.names(counts)){
sum(counts[genes,]>0)/ncells
}else{return(NA)}
}
PrctCellExpringGene(betacells, c("ACE2", "TMPRSS2"), group.by = "CellType")
calc_helper(pancreas.integrated, c("ACE2", "TMPRSS2"))
# Plotting one gene on a dimplot
betacells <- subset(pancreas.integrated, subset = (sex == "female"))
betacells <- subset(pancreas.integrated, subset = (sex == "female"))
FeaturePlot(object = betacells,
features = c("ACE2"),
pt.size = 1,
cols = c("darkgrey", "red"),
min.cutoff = 0,
max.cutoff = 3,
order = TRUE)
# Set cell identity to sample identity so that you can extraxt cell type information for plotting
Idents(object = pancreas.integrated) <- pancreas.integrated@meta.data$celltype
# How can I extract expression matrix for all beta cells
betacells <- subset(pancreas.integrated, idents = c("Beta"))
# Violin plot
DefaultAssay(object = betacells) <- "RNA"
VlnPlot(object = betacells, features = c("ACE2", "TMPRSS2"), group.by = "sample", slot = "data")
# How can I extract expression matrix for all beta cells
alphacells <- subset(pancreas.integrated, idents = c("alpha"))
# Violin plot
DefaultAssay(object = alphacells) <- "RNA"
Idents(pancreas.integrated) <- "sex"
VlnPlot(object = pancreas.integrated, features = c("XIST"), group.by = "sample", split.by = "sex", slot = "data")
# Set cell identity to sample identity
Idents(object = pancreas.integrated) <- pancreas.integrated@meta.data$celltype
# Find if SRD genes are differentially expressed
beta.integrated.markers <- FindAllMarkers(object = pancreas.integrated, slot = 'data', test.use = 'wilcox')
# How can I calculate the average expression of all cells within a cluster?
cluster.averages <- AverageExpression(pancreas.integrated, assay= "RNA", slot = "data")
head(cluster.averages[["RNA"]][c("ACE2", "TMPRSS2"), 1:14])
|
# Computes additional molecular attributes for molecules
add_mol_attribs <- function(moldbase, base=TRUE) {
# Adds to atoms:
# vd_ - vertex degree
# va_ - valence
# pi_ - the number of pi-electrons
# ar_ - aromaticity
# ne_ - vector of neighbours
# bo_ - vector of bond orders with neighbours
add_base_mol_attribs <- function(mdb) {
for (imol in 1:nmol) {
mol <- mdb[[imol]]
natoms <- length(mol$atoms)
nbonds <- length(mol$bonds)
if (natoms > 0) {
for (iatom in 1:natoms) {
atom <- mol$atoms[[iatom]]
atom$vd_ <- 0
atom$va_ <- atom$nh + abs(atom$ch)
atom$pi_ <- 0
atom$ar_ <- FALSE
atom$ne_ <- integer()
atom$bo_ <- integer()
mdb[[imol]]$atoms[[iatom]] <- atom
}
}
if (nbonds > 0) {
for (ibond in 1:nbonds) {
bond <- mdb[[imol]]$bonds[[ibond]]
atom1 <- mdb[[imol]]$atoms[[bond$at1]]
atom2 <- mdb[[imol]]$atoms[[bond$at2]]
atom1$vd_ <- atom1$vd_ + 1
atom2$vd_ <- atom2$vd_ + 1
if (bond$bo < 4) {
atom1$va_ <- atom1$va_ + bond$bo
atom2$va_ <- atom2$va_ + bond$bo
atom1$pi_ <- atom1$pi_ + bond$bo - 1
atom2$pi_ <- atom2$pi_ + bond$bo - 1
} else if (bond$bo == 4) {
atom1$va_ <- atom1$va_ + 1.5
atom2$va_ <- atom2$va_ + 1.5
atom1$pi_ <- 1
atom2$pi_ <- 1
atom1$ar_ <- TRUE
atom2$ar_ <- TRUE
}
atom1$ne_[atom1$vd_] <- bond$at2
atom2$ne_[atom2$vd_] <- bond$at1
atom1$bo_[atom1$vd_] <- bond$bo
atom2$bo_[atom2$vd_] <- bond$bo
mdb[[imol]]$atoms[[bond$at1]] <- atom1
mdb[[imol]]$atoms[[bond$at2]] <- atom2
}
for (iatom in 1:natoms) {
atom <<- mol$atoms[[iatom]]
if (atom$va_ == 4.5) atom$va_ <- 4
}
}
}
mdb
}
nmol <- length(moldbase)
moldbase1 <- moldbase
if (base) {
moldbase1 <- add_base_mol_attribs(moldbase1)
}
moldbase1
}
|
/cinf-molattribs.R
|
no_license
|
Gvein/DiplomaKarpov2018
|
R
| false
| false
| 1,963
|
r
|
# Computes additional molecular attributes for molecules
add_mol_attribs <- function(moldbase, base=TRUE) {
# Adds to atoms:
# vd_ - vertex degree
# va_ - valence
# pi_ - the number of pi-electrons
# ar_ - aromaticity
# ne_ - vector of neighbours
# bo_ - vector of bond orders with neighbours
add_base_mol_attribs <- function(mdb) {
for (imol in 1:nmol) {
mol <- mdb[[imol]]
natoms <- length(mol$atoms)
nbonds <- length(mol$bonds)
if (natoms > 0) {
for (iatom in 1:natoms) {
atom <- mol$atoms[[iatom]]
atom$vd_ <- 0
atom$va_ <- atom$nh + abs(atom$ch)
atom$pi_ <- 0
atom$ar_ <- FALSE
atom$ne_ <- integer()
atom$bo_ <- integer()
mdb[[imol]]$atoms[[iatom]] <- atom
}
}
if (nbonds > 0) {
for (ibond in 1:nbonds) {
bond <- mdb[[imol]]$bonds[[ibond]]
atom1 <- mdb[[imol]]$atoms[[bond$at1]]
atom2 <- mdb[[imol]]$atoms[[bond$at2]]
atom1$vd_ <- atom1$vd_ + 1
atom2$vd_ <- atom2$vd_ + 1
if (bond$bo < 4) {
atom1$va_ <- atom1$va_ + bond$bo
atom2$va_ <- atom2$va_ + bond$bo
atom1$pi_ <- atom1$pi_ + bond$bo - 1
atom2$pi_ <- atom2$pi_ + bond$bo - 1
} else if (bond$bo == 4) {
atom1$va_ <- atom1$va_ + 1.5
atom2$va_ <- atom2$va_ + 1.5
atom1$pi_ <- 1
atom2$pi_ <- 1
atom1$ar_ <- TRUE
atom2$ar_ <- TRUE
}
atom1$ne_[atom1$vd_] <- bond$at2
atom2$ne_[atom2$vd_] <- bond$at1
atom1$bo_[atom1$vd_] <- bond$bo
atom2$bo_[atom2$vd_] <- bond$bo
mdb[[imol]]$atoms[[bond$at1]] <- atom1
mdb[[imol]]$atoms[[bond$at2]] <- atom2
}
for (iatom in 1:natoms) {
atom <<- mol$atoms[[iatom]]
if (atom$va_ == 4.5) atom$va_ <- 4
}
}
}
mdb
}
nmol <- length(moldbase)
moldbase1 <- moldbase
if (base) {
moldbase1 <- add_base_mol_attribs(moldbase1)
}
moldbase1
}
|
#xgboost implemented on selected 34 features obtained using random forest and output is taken
#Saniya Ambavanekar
library(caret)
library(data.table)
library(xgboost)
xtrain_data<-fread("balanced_data_3.csv",stringsAsFactors = T)
View(xtrain_data)
str(xtrain_data)
smpl_size<-floor(0.75*nrow(xtrain_data))
set.seed(123)
indx <- sample(seq_len(nrow(xtrain_data)), size = smpl_size)
xtrain <- as.data.frame(xtrain_data[indx, ])
xtest <- as.data.frame(xtrain_data[-indx, ])
#Removind id and saving the target
xtrain$V1<-NULL
xtrain$id<-NULL
xtrain_target<-xtrain$target
xtrain$target<-NULL
xtrain$ps_car_11_cat<-NULL
#For test data
xtest$V1<-NULL
xtest$id<-NULL
xtest_target<-xtest$target
xtest$target<-NULL
xtest$ps_car_11_cat<-NULL
#Getting the features which are categorical
colnames_cat<-grep("_cat", names(xtrain), value=TRUE)
colnames_bin<-grep("_bin",names(xtrain),value=TRUE)
colnames_cat
colnames_bin
#Converting target and categorical to numeric for xgboost
for(i in 1:length(colnames_cat))
{
vec<-(xtrain[,colnames_cat[i]])
vec<-as.numeric(levels(vec))[vec]
xtrain[,colnames_cat[i]]<-vec
}
for(i in 1:length(colnames_bin))
{
vec<-(xtrain[,colnames_bin[i]])
vec<-as.numeric(levels(vec))[vec]
xtrain[,colnames_bin[i]]<-vec
}
str(xtrain)
#Converting target to num
xtrain_target<-as.numeric(levels(xtrain_target))[xtrain_target]
xtest_target<-as.numeric(levels(xtest_target))[xtest_target]
#Applying xgboost
xtrain<-data.table(xtrain)
xtest<-data.table(xtest)
#dtrain<-xgb.DMatrix(xtrain,labels=xtrain_target)
#dtest<-xgb.DMatrix(xtest,labels=xtest_target)
dtrain<-data.matrix(xtrain)
dtest<-data.matrix(xtest)
#Selecting appropriate params
params <- list(booster = "gbtree", objective = "binary:logistic", eta=0.3,
gamma=5, max_depth=6, min_child_weight=1, subsample=1, colsample_bytree=1)
dtrain<-xgb.DMatrix(dtrain,label=xtrain_target)
dtest<-xgb.DMatrix(dtest,label=xtest_target)
#Cross Validation xgboost
xgbcv <- xgb.cv( params = params, data = dtrain, nrounds = 100, nfold = 5,
showsd = T, stratified = T, print_every_n = 10,
early_stopping_rounds = 20, maximize = F)
#Implementing xgboost
xgb1 <- xgb.train (params = params, data =dtrain,nrounds = 500,
watchlist = list(val=dtest,train=dtrain), print_every_n = 10,
early_stop_round = 10, maximize = F , eval_metric = "error")
#Confusion Matrix
xgbpred<-predict(xgb1,dtest)
xgbpred <- ifelse (xgbpred > 0.5,1,0)
xgbconf<-confusionMatrix(xgbpred,xtest_target)
print(xgbconf)
#Predicitng for real test dataset
realtest<-fread("test_new.csv",stringsAsFactors = T)
View(realtest)
str(realtest)
realtest<-as.data.frame(realtest)
realtest<-data.matrix(realtest)
realtest<-xgb.DMatrix(realtest)
xgbpred1<-predict(xgb1,realtest)
xgbpred1
xgbpred2<-round(xgbpred1,3)
#writing the submission file
testid<-read.csv("test_id.csv",header=T)
View(testid)
final_xg<-cbind(testid$x,xgbpred1)
colnames(final_xg)<-c("id","target")
write.csv(final_xg,"xgsubmission1.csv",row.names = F)
|
/xgboost_code.R
|
no_license
|
DrRoad/Porto-Seguro-Safe-Driver-Prediction
|
R
| false
| false
| 3,068
|
r
|
#xgboost implemented on selected 34 features obtained using random forest and output is taken
#Saniya Ambavanekar
library(caret)
library(data.table)
library(xgboost)
xtrain_data<-fread("balanced_data_3.csv",stringsAsFactors = T)
View(xtrain_data)
str(xtrain_data)
smpl_size<-floor(0.75*nrow(xtrain_data))
set.seed(123)
indx <- sample(seq_len(nrow(xtrain_data)), size = smpl_size)
xtrain <- as.data.frame(xtrain_data[indx, ])
xtest <- as.data.frame(xtrain_data[-indx, ])
#Removind id and saving the target
xtrain$V1<-NULL
xtrain$id<-NULL
xtrain_target<-xtrain$target
xtrain$target<-NULL
xtrain$ps_car_11_cat<-NULL
#For test data
xtest$V1<-NULL
xtest$id<-NULL
xtest_target<-xtest$target
xtest$target<-NULL
xtest$ps_car_11_cat<-NULL
#Getting the features which are categorical
colnames_cat<-grep("_cat", names(xtrain), value=TRUE)
colnames_bin<-grep("_bin",names(xtrain),value=TRUE)
colnames_cat
colnames_bin
#Converting target and categorical to numeric for xgboost
for(i in 1:length(colnames_cat))
{
vec<-(xtrain[,colnames_cat[i]])
vec<-as.numeric(levels(vec))[vec]
xtrain[,colnames_cat[i]]<-vec
}
for(i in 1:length(colnames_bin))
{
vec<-(xtrain[,colnames_bin[i]])
vec<-as.numeric(levels(vec))[vec]
xtrain[,colnames_bin[i]]<-vec
}
str(xtrain)
#Converting target to num
xtrain_target<-as.numeric(levels(xtrain_target))[xtrain_target]
xtest_target<-as.numeric(levels(xtest_target))[xtest_target]
#Applying xgboost
xtrain<-data.table(xtrain)
xtest<-data.table(xtest)
#dtrain<-xgb.DMatrix(xtrain,labels=xtrain_target)
#dtest<-xgb.DMatrix(xtest,labels=xtest_target)
dtrain<-data.matrix(xtrain)
dtest<-data.matrix(xtest)
#Selecting appropriate params
params <- list(booster = "gbtree", objective = "binary:logistic", eta=0.3,
gamma=5, max_depth=6, min_child_weight=1, subsample=1, colsample_bytree=1)
dtrain<-xgb.DMatrix(dtrain,label=xtrain_target)
dtest<-xgb.DMatrix(dtest,label=xtest_target)
#Cross Validation xgboost
xgbcv <- xgb.cv( params = params, data = dtrain, nrounds = 100, nfold = 5,
showsd = T, stratified = T, print_every_n = 10,
early_stopping_rounds = 20, maximize = F)
#Implementing xgboost
xgb1 <- xgb.train (params = params, data =dtrain,nrounds = 500,
watchlist = list(val=dtest,train=dtrain), print_every_n = 10,
early_stop_round = 10, maximize = F , eval_metric = "error")
#Confusion Matrix
xgbpred<-predict(xgb1,dtest)
xgbpred <- ifelse (xgbpred > 0.5,1,0)
xgbconf<-confusionMatrix(xgbpred,xtest_target)
print(xgbconf)
#Predicitng for real test dataset
realtest<-fread("test_new.csv",stringsAsFactors = T)
View(realtest)
str(realtest)
realtest<-as.data.frame(realtest)
realtest<-data.matrix(realtest)
realtest<-xgb.DMatrix(realtest)
xgbpred1<-predict(xgb1,realtest)
xgbpred1
xgbpred2<-round(xgbpred1,3)
#writing the submission file
testid<-read.csv("test_id.csv",header=T)
View(testid)
final_xg<-cbind(testid$x,xgbpred1)
colnames(final_xg)<-c("id","target")
write.csv(final_xg,"xgsubmission1.csv",row.names = F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eeptools-package.r
\docType{data}
\name{stulevel}
\alias{stulevel}
\title{A synthetic data set of K-12 student attributes.}
\format{A data frame with 2700 observations on the following 32 variables.
\describe{
\item{\code{X}}{a numeric vector}
\item{\code{school}}{a numeric vector}
\item{\code{stuid}}{a numeric vector}
\item{\code{grade}}{a numeric vector}
\item{\code{schid}}{a numeric vector}
\item{\code{dist}}{a numeric vector}
\item{\code{white}}{a numeric vector}
\item{\code{black}}{a numeric vector}
\item{\code{hisp}}{a numeric vector}
\item{\code{indian}}{a numeric vector}
\item{\code{asian}}{a numeric vector}
\item{\code{econ}}{a numeric vector}
\item{\code{female}}{a numeric vector}
\item{\code{ell}}{a numeric vector}
\item{\code{disab}}{a numeric vector}
\item{\code{sch_fay}}{a numeric vector}
\item{\code{dist_fay}}{a numeric vector}
\item{\code{luck}}{a numeric vector}
\item{\code{ability}}{a numeric vector}
\item{\code{measerr}}{a numeric vector}
\item{\code{teachq}}{a numeric vector}
\item{\code{year}}{a numeric vector}
\item{\code{attday}}{a numeric vector}
\item{\code{schoolscore}}{a numeric vector}
\item{\code{district}}{a numeric vector}
\item{\code{schoolhigh}}{a numeric vector}
\item{\code{schoolavg}}{a numeric vector}
\item{\code{schoollow}}{a numeric vector}
\item{\code{readSS}}{a numeric vector}
\item{\code{mathSS}}{a numeric vector}
\item{\code{proflvl}}{a factor with levels \code{advanced} \code{basic} \code{below basic} \code{proficient}}
\item{\code{race}}{a factor with levels \code{A} \code{B} \code{H} \code{I} \code{W}}
}}
\source{
The script to generate this synthetic dataset can be found and modified
at \url{https://github.com/jknowles/r_tutorial_ed}
}
\usage{
stulevel
}
\description{
A small dataset of synthetic data on K-12 students with 2700
observations. 1200 individual students are represented, nested within
4 districts and 2 schools.
}
\details{
This data is synthetically generated to reflect student test scores
and demographic attributes.
}
\examples{
data(stulevel)
head(stulevel)
}
\keyword{datasets}
|
/man/stulevel.Rd
|
no_license
|
nutterb/eeptools
|
R
| false
| true
| 2,188
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eeptools-package.r
\docType{data}
\name{stulevel}
\alias{stulevel}
\title{A synthetic data set of K-12 student attributes.}
\format{A data frame with 2700 observations on the following 32 variables.
\describe{
\item{\code{X}}{a numeric vector}
\item{\code{school}}{a numeric vector}
\item{\code{stuid}}{a numeric vector}
\item{\code{grade}}{a numeric vector}
\item{\code{schid}}{a numeric vector}
\item{\code{dist}}{a numeric vector}
\item{\code{white}}{a numeric vector}
\item{\code{black}}{a numeric vector}
\item{\code{hisp}}{a numeric vector}
\item{\code{indian}}{a numeric vector}
\item{\code{asian}}{a numeric vector}
\item{\code{econ}}{a numeric vector}
\item{\code{female}}{a numeric vector}
\item{\code{ell}}{a numeric vector}
\item{\code{disab}}{a numeric vector}
\item{\code{sch_fay}}{a numeric vector}
\item{\code{dist_fay}}{a numeric vector}
\item{\code{luck}}{a numeric vector}
\item{\code{ability}}{a numeric vector}
\item{\code{measerr}}{a numeric vector}
\item{\code{teachq}}{a numeric vector}
\item{\code{year}}{a numeric vector}
\item{\code{attday}}{a numeric vector}
\item{\code{schoolscore}}{a numeric vector}
\item{\code{district}}{a numeric vector}
\item{\code{schoolhigh}}{a numeric vector}
\item{\code{schoolavg}}{a numeric vector}
\item{\code{schoollow}}{a numeric vector}
\item{\code{readSS}}{a numeric vector}
\item{\code{mathSS}}{a numeric vector}
\item{\code{proflvl}}{a factor with levels \code{advanced} \code{basic} \code{below basic} \code{proficient}}
\item{\code{race}}{a factor with levels \code{A} \code{B} \code{H} \code{I} \code{W}}
}}
\source{
The script to generate this synthetic dataset can be found and modified
at \url{https://github.com/jknowles/r_tutorial_ed}
}
\usage{
stulevel
}
\description{
A small dataset of synthetic data on K-12 students with 2700
observations. 1200 individual students are represented, nested within
4 districts and 2 schools.
}
\details{
This data is synthetically generated to reflect student test scores
and demographic attributes.
}
\examples{
data(stulevel)
head(stulevel)
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qs.text.R
\name{qs.text.string}
\alias{qs.text.string}
\title{String Output}
\usage{
qs.text.string(qs, varname = "question.name",
question.text = "question.text")
}
\arguments{
\item{qs}{Questions data frame}
\item{varname}{Character vector referring to the question name column}
\item{question.text}{Character vector referring to the question text columnn}
}
\description{
This function behaves the same as qs.text, but it manipulates a character vector of format strings
}
|
/man/qs.text.string.Rd
|
no_license
|
Boshoffsmit/novaReport
|
R
| false
| true
| 559
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qs.text.R
\name{qs.text.string}
\alias{qs.text.string}
\title{String Output}
\usage{
qs.text.string(qs, varname = "question.name",
question.text = "question.text")
}
\arguments{
\item{qs}{Questions data frame}
\item{varname}{Character vector referring to the question name column}
\item{question.text}{Character vector referring to the question text columnn}
}
\description{
This function behaves the same as qs.text, but it manipulates a character vector of format strings
}
|
# Author: Robert J. Hijmans
# Date: November 2009, Jan 2016
# Version 1.0
# Licence GPL v3
setMethod('as.integer', signature(x='Raster'),
function(x, filename='', ...) {
if (nlayers(x) > 1) {
out <- brick(x, values=FALSE)
} else {
out <- raster(x)
}
datatype <- list(...)$datatype
if (canProcessInMemory(x, 2)){
x <- getValues(x)
x[] <- as.integer(x)
out <- setValues(out, x)
if (filename != '') {
if (is.null(datatype)) {
out <- writeRaster(out, filename, datatype='INT4S', ...)
} else {
out <- writeRaster(out, filename, ...)
}
}
return(out)
} else {
if (filename == '') {
filename <- rasterTmpFile()
}
if (is.null(datatype)) {
out <- writeStart(out, filename=filename, datatype='INT4S', ...)
} else {
out <- writeStart(out, filename=filename, ...)
}
tr <- blockSize(x)
pb <- pbCreate(tr$n, ...)
for (i in 1:tr$n) {
v <- as.integer( getValuesBlock(x, row=tr$row[i], nrows=tr$nrows[i] ) )
out <- writeValues(out, v, tr$row[i])
pbStep(pb, i)
}
pbClose(pb)
out <- writeStop(out)
return(out)
}
}
)
setMethod('as.logical', signature(x='Raster'),
function(x, filename='', ...) {
if (nlayers(x) > 1) {
out <- brick(x, values=FALSE)
} else {
out <- raster(x)
}
datatype <- list(...)$datatype
if (canProcessInMemory(x, 2)){
x <- getValues(x)
x[] <- as.logical(x)
out <- setValues(out, x)
if (filename != '') {
if (is.null(datatype)) {
out <- writeRaster(out, filename, datatype='INT2S', ...)
} else {
out <- writeRaster(out, filename, ...)
}
}
return(out)
} else {
if (filename == '') {
filename <- rasterTmpFile()
}
if (is.null(datatype)) {
out <- writeStart(out, filename=filename, datatype='INT2S', ...)
} else {
out <- writeStart(out, filename=filename, ...)
}
tr <- blockSize(x)
pb <- pbCreate(tr$n, ...)
for (i in 1:tr$n) {
v <- as.logical ( getValuesBlock(x, row=tr$row[i], nrows=tr$nrows[i] ) )
out <- writeValues(out, v, tr$row[i])
pbStep(pb, i)
}
pbClose(pb)
out <- writeStop(out)
return(out)
}
}
)
|
/R/as.logical.R
|
no_license
|
cran/raster
|
R
| false
| false
| 2,253
|
r
|
# Author: Robert J. Hijmans
# Date: November 2009, Jan 2016
# Version 1.0
# Licence GPL v3
setMethod('as.integer', signature(x='Raster'),
function(x, filename='', ...) {
if (nlayers(x) > 1) {
out <- brick(x, values=FALSE)
} else {
out <- raster(x)
}
datatype <- list(...)$datatype
if (canProcessInMemory(x, 2)){
x <- getValues(x)
x[] <- as.integer(x)
out <- setValues(out, x)
if (filename != '') {
if (is.null(datatype)) {
out <- writeRaster(out, filename, datatype='INT4S', ...)
} else {
out <- writeRaster(out, filename, ...)
}
}
return(out)
} else {
if (filename == '') {
filename <- rasterTmpFile()
}
if (is.null(datatype)) {
out <- writeStart(out, filename=filename, datatype='INT4S', ...)
} else {
out <- writeStart(out, filename=filename, ...)
}
tr <- blockSize(x)
pb <- pbCreate(tr$n, ...)
for (i in 1:tr$n) {
v <- as.integer( getValuesBlock(x, row=tr$row[i], nrows=tr$nrows[i] ) )
out <- writeValues(out, v, tr$row[i])
pbStep(pb, i)
}
pbClose(pb)
out <- writeStop(out)
return(out)
}
}
)
setMethod('as.logical', signature(x='Raster'),
function(x, filename='', ...) {
if (nlayers(x) > 1) {
out <- brick(x, values=FALSE)
} else {
out <- raster(x)
}
datatype <- list(...)$datatype
if (canProcessInMemory(x, 2)){
x <- getValues(x)
x[] <- as.logical(x)
out <- setValues(out, x)
if (filename != '') {
if (is.null(datatype)) {
out <- writeRaster(out, filename, datatype='INT2S', ...)
} else {
out <- writeRaster(out, filename, ...)
}
}
return(out)
} else {
if (filename == '') {
filename <- rasterTmpFile()
}
if (is.null(datatype)) {
out <- writeStart(out, filename=filename, datatype='INT2S', ...)
} else {
out <- writeStart(out, filename=filename, ...)
}
tr <- blockSize(x)
pb <- pbCreate(tr$n, ...)
for (i in 1:tr$n) {
v <- as.logical ( getValuesBlock(x, row=tr$row[i], nrows=tr$nrows[i] ) )
out <- writeValues(out, v, tr$row[i])
pbStep(pb, i)
}
pbClose(pb)
out <- writeStop(out)
return(out)
}
}
)
|
# server.R
library(dplyr)
# Read in data
source('./scripts/build_map.R')
source('./scripts/build_scatter.R')
df <- read.csv('./data/electoral_college.csv', stringsAsFactors = FALSE)
state_codes <- read.csv('./data/state_codes.csv', stringsAsFactors = FALSE)
# Join together state.codes and df
joined_data <- left_join(df, state_codes, by="state")
# Compute the electoral votes per 100K people in each state
joined_data <- joined_data %>% mutate(ratio = votes/population * 100000)
# Start shinyServer
shinyServer(function(input, output) {
# Render a plotly object that returns your map
output$map <- renderPlotly({
return(build_map(joined_data, input$mapvar))
})
output$scatter <- renderPlotly({
return(build_scatter(joined_data, input$search))
})
})
|
/exercise-5/server.R
|
permissive
|
engv/ch16-shiny
|
R
| false
| false
| 781
|
r
|
# server.R
library(dplyr)
# Read in data
source('./scripts/build_map.R')
source('./scripts/build_scatter.R')
df <- read.csv('./data/electoral_college.csv', stringsAsFactors = FALSE)
state_codes <- read.csv('./data/state_codes.csv', stringsAsFactors = FALSE)
# Join together state.codes and df
joined_data <- left_join(df, state_codes, by="state")
# Compute the electoral votes per 100K people in each state
joined_data <- joined_data %>% mutate(ratio = votes/population * 100000)
# Start shinyServer
shinyServer(function(input, output) {
# Render a plotly object that returns your map
output$map <- renderPlotly({
return(build_map(joined_data, input$mapvar))
})
output$scatter <- renderPlotly({
return(build_scatter(joined_data, input$search))
})
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataDaf.R
\docType{data}
\name{myDafData}
\alias{myDafData}
\title{Sample DAF data frame}
\format{
A data frame containing polymorphic sites for selected (i) and neutral (0) classes at different DAF categories
}
\usage{
myDafData
}
\description{
Data frame containing polymorphism sample data
\itemize{
\item daf. derived allele frequency (DAF) categories
\item Pi. number of selected (i) polymorphic sites for each daf category
\item P0. number of neutral (0) polymorphic sites for each daf category
}
}
\keyword{SampleData}
|
/man/myDafData.Rd
|
no_license
|
BGD-UAB/iMKT
|
R
| false
| true
| 610
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataDaf.R
\docType{data}
\name{myDafData}
\alias{myDafData}
\title{Sample DAF data frame}
\format{
A data frame containing polymorphic sites for selected (i) and neutral (0) classes at different DAF categories
}
\usage{
myDafData
}
\description{
Data frame containing polymorphism sample data
\itemize{
\item daf. derived allele frequency (DAF) categories
\item Pi. number of selected (i) polymorphic sites for each daf category
\item P0. number of neutral (0) polymorphic sites for each daf category
}
}
\keyword{SampleData}
|
# featnames -----------
#' Get the feature labels from a dfm
#'
#' Get the features from a document-feature matrix, which are stored as the
#' column names of the \link{dfm} object.
#' @param x the dfm whose features will be extracted
#' @return character vector of the feature labels
#' @examples
#' inaugDfm <- dfm(data_corpus_inaugural, verbose = FALSE)
#'
#' # first 50 features (in original text order)
#' head(featnames(inaugDfm), 50)
#'
#' # first 50 features alphabetically
#' head(sort(featnames(inaugDfm)), 50)
#'
#' # contrast with descending total frequency order from topfeatures()
#' names(topfeatures(inaugDfm, 50))
#' @export
featnames <- function(x) {
UseMethod("featnames")
}
#' @export
#' @noRd
featnames.NULL <- function(x) {
NULL
}
#' @export
#' @noRd
featnames.dfm <- function(x) {
x <- as.dfm(x)
if (is.null(colnames(x))) {
character()
} else {
colnames(x)
}
}
# docnames -----------
#' @noRd
#' @export
docnames.dfm <- function(x) {
x <- as.dfm(x)
if (is.null(rownames(x))) {
paste0('text', seq_len(ndoc(x)))
} else {
rownames(x)
}
}
#' @noRd
#' @export
docnames.NULL <- function(x) {
NULL
}
# as.dfm -----------
#' Coercion and checking functions for dfm objects
#'
#' Convert an eligible input object into a dfm, or check whether an object is a
#' dfm. Current eligible inputs for coercion to a dfm are: \link{matrix},
#' (sparse) \link[Matrix]{Matrix}, \link[tm]{TermDocumentMatrix},
#' \link[tm]{DocumentTermMatrix}, \link{data.frame}, and other \link{dfm}
#' objects.
#' @param x a candidate object for checking or coercion to \link{dfm}
#' @return \code{as.dfm} converts an input object into a \link{dfm}. Row names
#' are used for docnames, and column names for featnames, of the resulting
#' dfm.
#' @seealso \code{\link{as.data.frame.dfm}}, \code{\link{as.matrix.dfm}},
#' \code{\link{convert}}
#' @export
as.dfm <- function(x) {
UseMethod("as.dfm")
}
#' @export
as.dfm.default <- function(x) {
stop(friendly_class_undefined_message(class(x), "as.dfm"))
}
#' @noRd
#' @method as.dfm dfm
#' @export
as.dfm.dfm <- function(x) {
x
}
#' @noRd
#' @method as.dfm matrix
#' @export
as.dfm.matrix <- function(x) {
as_dfm_constructor(x)
}
#' @noRd
#' @method as.dfm Matrix
#' @export
as.dfm.Matrix <- function(x) {
as_dfm_constructor(x)
}
#' @noRd
#' @method as.dfm data.frame
#' @export
as.dfm.data.frame <- function(x) {
as_dfm_constructor(as.matrix(x, rownames.force = TRUE))
}
#' @noRd
#' @method as.dfm dfmSparse
#' @export
as.dfm.dfmSparse <- function(x) {
as.dfm(as(x, 'dgCMatrix'))
}
#' @noRd
#' @method as.dfm DocumentTermMatrix
#' @export
as.dfm.DocumentTermMatrix <- function(x){
as.dfm(
sparseMatrix(i = x$i, j = x$j, x = x$v,
dimnames = list(rownames(x), colnames(x)))
)
}
#' @noRd
#' @method as.dfm TermDocumentMatrix
#' @export
as.dfm.TermDocumentMatrix <- function(x){
as.dfm(
sparseMatrix(i = x$j, j = x$i, x = x$v,
dimnames = list(colnames(x), rownames(x)))
)
}
as_dfm_constructor <- function(x) {
x <- Matrix(x, sparse = TRUE) # dimnames argument is not working
names(dimnames(x)) <- c("docs", "features")
if (nrow(x) > 0 && is.null(rownames(x)))
rownames(x) <- paste0(quanteda_options("base_docname"), seq_len(nrow(x)))
if (ncol(x) > 0 && is.null(colnames(x)))
colnames(x) <- paste0(quanteda_options("base_featname"), seq_len(ncol(x)))
new("dfm", x, docvars = data.frame(row.names = rownames(x)))
}
#' @rdname as.dfm
#' @return
#' \code{is.dfm} returns \code{TRUE} if and only if its argument is a \link{dfm}.
#' @export
is.dfm <- function(x) {
is(x, "dfm")
# "dfm" %in% class(x)
}
# topfeatures -----------
#' Identify the most frequent features in a dfm
#'
#' List the most (or least) frequently occurring features in a \link{dfm}, either
#' as a whole or separated by document.
#' @name topfeatures
#' @param x the object whose features will be returned
#' @param n how many top features should be returned
#' @param decreasing If \code{TRUE}, return the \code{n} most frequent features;
#' otherwise return the \code{n} least frequent features
#' @param scheme one of \code{count} for total feature frequency (within
#' \code{group} if applicable), or \code{docfreq} for the document frequencies
#' of features
#' @inheritParams groups
#' @return A named numeric vector of feature counts, where the names are the
#' feature labels, or a list of these if \code{groups} is given.
#' @examples
#' mydfm <- corpus_subset(data_corpus_inaugural, Year > 1980) %>%
#' dfm(remove_punct = TRUE)
#' mydfm_nostopw <- dfm_remove(mydfm, stopwords("english"))
#'
#' # most frequent features
#' topfeatures(mydfm)
#' topfeatures(mydfm_nostopw)
#'
#' # least frequent features
#' topfeatures(mydfm_nostopw, decreasing = FALSE)
#'
#' # top features of individual documents
#' topfeatures(mydfm_nostopw, n = 5, groups = docnames(mydfm_nostopw))
#'
#' # grouping by president last name
#' topfeatures(mydfm_nostopw, n = 5, groups = "President")
#'
#' # features by document frequencies
#' tail(topfeatures(mydfm, scheme = "docfreq", n = 200))
#' @export
topfeatures <- function(x, n = 10, decreasing = TRUE,
scheme = c("count", "docfreq"), groups = NULL) {
UseMethod("topfeatures")
}
#' @export
topfeatures.default <- function(x, n = 10, decreasing = TRUE,
scheme = c("count", "docfreq"), groups = NULL) {
stop(friendly_class_undefined_message(class(x), "topfeatures"))
}
#' @export
#' @noRd
#' @importFrom stats quantile
topfeatures.dfm <- function(x, n = 10, decreasing = TRUE,
scheme = c("count", "docfreq"), groups = NULL) {
x <- as.dfm(x)
if (!nfeat(x) || !ndoc(x)) return(numeric())
scheme <- match.arg(scheme)
if (!is.null(groups)) {
rownames(x) <- generate_groups(x, groups)
result <- list()
for (i in unique(docnames(x))) {
result[[i]] <- topfeatures(x[which(rownames(x)==i), ],
n = n, scheme = scheme,
decreasing = decreasing, groups = NULL)
}
return(result)
}
if (n > nfeat(x)) n <- nfeat(x)
if (scheme == "count") {
wght <- colSums(x)
} else if (scheme == "docfreq") {
wght <- docfreq(x)
}
result <- sort(wght, decreasing)
return(result[1:n])
# Under development by Ken
# if (is.resampled(x)) {
# subdfm <- x[, order(colSums(x[,,1]), decreasing = decreasing), ]
# subdfm <- subdfm[, 1:n, ] # only top n need to be computed
# return(data.frame(#features=colnames(subdfm),
# freq=colSums(subdfm[,,1]),
# cilo = apply(colSums(subdfm), 1, stats::quantile, (1 - ci) / 2),
# cihi = apply(colSums(subdfm), 1, stats::quantile, 1 - (1 - ci) / 2)))
# } else {
# subdfm <- sort(colSums(x), decreasing)
# return(subdfm[1:n])
#}
}
# sparsity -----------
#' Compute the sparsity of a document-feature matrix
#'
#' Return the proportion of sparseness of a document-feature matrix, equal
#' to the proportion of cells that have zero counts.
#' @param x the document-feature matrix
#' @examples
#' inaug_dfm <- dfm(data_corpus_inaugural, verbose = FALSE)
#' sparsity(inaug_dfm)
#' sparsity(dfm_trim(inaug_dfm, min_termfreq = 5))
#' @export
sparsity <- function(x) {
UseMethod("sparsity")
}
#' @export
sparsity.default <- function(x) {
stop(friendly_class_undefined_message(class(x), "sparsity"))
}
#' @export
sparsity.dfm <- function(x) {
(1 - length(x@x) / prod(dim(x)))
}
# Internal --------
#' Internal functions for dfm objects
#'
#' Internal function documentation for \link{dfm} objects.
#' @name dfm-internal
#' @keywords dfm internal
NULL
#' The \code{Compare} methods enable relational operators to be use with dfm.
#' Relational operations on a dfm with a numeric will return a
#' \link[Matrix]{dgCMatrix-class} object.
#' @rdname dfm-internal
#' @param e1 a \link{dfm}
#' @param e2 a numeric value to compare with values in a dfm
#' @export
#' @seealso \link{Comparison} operators
setMethod("Compare", c("dfm", "numeric"), function(e1, e2) {
as(callGeneric(as(e1, "dgCMatrix"), e2), "lgCMatrix")
})
|
/R/dfm-methods.R
|
no_license
|
tpaskhalis/quanteda
|
R
| false
| false
| 8,453
|
r
|
# featnames -----------
#' Get the feature labels from a dfm
#'
#' Get the features from a document-feature matrix, which are stored as the
#' column names of the \link{dfm} object.
#' @param x the dfm whose features will be extracted
#' @return character vector of the feature labels
#' @examples
#' inaugDfm <- dfm(data_corpus_inaugural, verbose = FALSE)
#'
#' # first 50 features (in original text order)
#' head(featnames(inaugDfm), 50)
#'
#' # first 50 features alphabetically
#' head(sort(featnames(inaugDfm)), 50)
#'
#' # contrast with descending total frequency order from topfeatures()
#' names(topfeatures(inaugDfm, 50))
#' @export
featnames <- function(x) {
UseMethod("featnames")
}
#' @export
#' @noRd
featnames.NULL <- function(x) {
NULL
}
#' @export
#' @noRd
featnames.dfm <- function(x) {
x <- as.dfm(x)
if (is.null(colnames(x))) {
character()
} else {
colnames(x)
}
}
# docnames -----------
#' @noRd
#' @export
docnames.dfm <- function(x) {
x <- as.dfm(x)
if (is.null(rownames(x))) {
paste0('text', seq_len(ndoc(x)))
} else {
rownames(x)
}
}
#' @noRd
#' @export
docnames.NULL <- function(x) {
NULL
}
# as.dfm -----------
#' Coercion and checking functions for dfm objects
#'
#' Convert an eligible input object into a dfm, or check whether an object is a
#' dfm. Current eligible inputs for coercion to a dfm are: \link{matrix},
#' (sparse) \link[Matrix]{Matrix}, \link[tm]{TermDocumentMatrix},
#' \link[tm]{DocumentTermMatrix}, \link{data.frame}, and other \link{dfm}
#' objects.
#' @param x a candidate object for checking or coercion to \link{dfm}
#' @return \code{as.dfm} converts an input object into a \link{dfm}. Row names
#' are used for docnames, and column names for featnames, of the resulting
#' dfm.
#' @seealso \code{\link{as.data.frame.dfm}}, \code{\link{as.matrix.dfm}},
#' \code{\link{convert}}
#' @export
as.dfm <- function(x) {
UseMethod("as.dfm")
}
#' @export
as.dfm.default <- function(x) {
stop(friendly_class_undefined_message(class(x), "as.dfm"))
}
#' @noRd
#' @method as.dfm dfm
#' @export
as.dfm.dfm <- function(x) {
x
}
#' @noRd
#' @method as.dfm matrix
#' @export
as.dfm.matrix <- function(x) {
as_dfm_constructor(x)
}
#' @noRd
#' @method as.dfm Matrix
#' @export
as.dfm.Matrix <- function(x) {
as_dfm_constructor(x)
}
#' @noRd
#' @method as.dfm data.frame
#' @export
as.dfm.data.frame <- function(x) {
as_dfm_constructor(as.matrix(x, rownames.force = TRUE))
}
#' @noRd
#' @method as.dfm dfmSparse
#' @export
as.dfm.dfmSparse <- function(x) {
as.dfm(as(x, 'dgCMatrix'))
}
#' @noRd
#' @method as.dfm DocumentTermMatrix
#' @export
as.dfm.DocumentTermMatrix <- function(x){
as.dfm(
sparseMatrix(i = x$i, j = x$j, x = x$v,
dimnames = list(rownames(x), colnames(x)))
)
}
#' @noRd
#' @method as.dfm TermDocumentMatrix
#' @export
as.dfm.TermDocumentMatrix <- function(x){
as.dfm(
sparseMatrix(i = x$j, j = x$i, x = x$v,
dimnames = list(colnames(x), rownames(x)))
)
}
as_dfm_constructor <- function(x) {
x <- Matrix(x, sparse = TRUE) # dimnames argument is not working
names(dimnames(x)) <- c("docs", "features")
if (nrow(x) > 0 && is.null(rownames(x)))
rownames(x) <- paste0(quanteda_options("base_docname"), seq_len(nrow(x)))
if (ncol(x) > 0 && is.null(colnames(x)))
colnames(x) <- paste0(quanteda_options("base_featname"), seq_len(ncol(x)))
new("dfm", x, docvars = data.frame(row.names = rownames(x)))
}
#' @rdname as.dfm
#' @return
#' \code{is.dfm} returns \code{TRUE} if and only if its argument is a \link{dfm}.
#' @export
is.dfm <- function(x) {
is(x, "dfm")
# "dfm" %in% class(x)
}
# topfeatures -----------
#' Identify the most frequent features in a dfm
#'
#' List the most (or least) frequently occurring features in a \link{dfm}, either
#' as a whole or separated by document.
#' @name topfeatures
#' @param x the object whose features will be returned
#' @param n how many top features should be returned
#' @param decreasing If \code{TRUE}, return the \code{n} most frequent features;
#' otherwise return the \code{n} least frequent features
#' @param scheme one of \code{count} for total feature frequency (within
#' \code{group} if applicable), or \code{docfreq} for the document frequencies
#' of features
#' @inheritParams groups
#' @return A named numeric vector of feature counts, where the names are the
#' feature labels, or a list of these if \code{groups} is given.
#' @examples
#' mydfm <- corpus_subset(data_corpus_inaugural, Year > 1980) %>%
#' dfm(remove_punct = TRUE)
#' mydfm_nostopw <- dfm_remove(mydfm, stopwords("english"))
#'
#' # most frequent features
#' topfeatures(mydfm)
#' topfeatures(mydfm_nostopw)
#'
#' # least frequent features
#' topfeatures(mydfm_nostopw, decreasing = FALSE)
#'
#' # top features of individual documents
#' topfeatures(mydfm_nostopw, n = 5, groups = docnames(mydfm_nostopw))
#'
#' # grouping by president last name
#' topfeatures(mydfm_nostopw, n = 5, groups = "President")
#'
#' # features by document frequencies
#' tail(topfeatures(mydfm, scheme = "docfreq", n = 200))
#' @export
topfeatures <- function(x, n = 10, decreasing = TRUE,
scheme = c("count", "docfreq"), groups = NULL) {
UseMethod("topfeatures")
}
#' @export
topfeatures.default <- function(x, n = 10, decreasing = TRUE,
scheme = c("count", "docfreq"), groups = NULL) {
stop(friendly_class_undefined_message(class(x), "topfeatures"))
}
#' @export
#' @noRd
#' @importFrom stats quantile
topfeatures.dfm <- function(x, n = 10, decreasing = TRUE,
scheme = c("count", "docfreq"), groups = NULL) {
x <- as.dfm(x)
if (!nfeat(x) || !ndoc(x)) return(numeric())
scheme <- match.arg(scheme)
if (!is.null(groups)) {
rownames(x) <- generate_groups(x, groups)
result <- list()
for (i in unique(docnames(x))) {
result[[i]] <- topfeatures(x[which(rownames(x)==i), ],
n = n, scheme = scheme,
decreasing = decreasing, groups = NULL)
}
return(result)
}
if (n > nfeat(x)) n <- nfeat(x)
if (scheme == "count") {
wght <- colSums(x)
} else if (scheme == "docfreq") {
wght <- docfreq(x)
}
result <- sort(wght, decreasing)
return(result[1:n])
# Under development by Ken
# if (is.resampled(x)) {
# subdfm <- x[, order(colSums(x[,,1]), decreasing = decreasing), ]
# subdfm <- subdfm[, 1:n, ] # only top n need to be computed
# return(data.frame(#features=colnames(subdfm),
# freq=colSums(subdfm[,,1]),
# cilo = apply(colSums(subdfm), 1, stats::quantile, (1 - ci) / 2),
# cihi = apply(colSums(subdfm), 1, stats::quantile, 1 - (1 - ci) / 2)))
# } else {
# subdfm <- sort(colSums(x), decreasing)
# return(subdfm[1:n])
#}
}
# sparsity -----------
#' Compute the sparsity of a document-feature matrix
#'
#' Return the proportion of sparseness of a document-feature matrix, equal
#' to the proportion of cells that have zero counts.
#' @param x the document-feature matrix
#' @examples
#' inaug_dfm <- dfm(data_corpus_inaugural, verbose = FALSE)
#' sparsity(inaug_dfm)
#' sparsity(dfm_trim(inaug_dfm, min_termfreq = 5))
#' @export
sparsity <- function(x) {
UseMethod("sparsity")
}
#' @export
sparsity.default <- function(x) {
stop(friendly_class_undefined_message(class(x), "sparsity"))
}
#' @export
sparsity.dfm <- function(x) {
(1 - length(x@x) / prod(dim(x)))
}
# Internal --------
#' Internal functions for dfm objects
#'
#' Internal function documentation for \link{dfm} objects.
#' @name dfm-internal
#' @keywords dfm internal
NULL
#' The \code{Compare} methods enable relational operators to be use with dfm.
#' Relational operations on a dfm with a numeric will return a
#' \link[Matrix]{dgCMatrix-class} object.
#' @rdname dfm-internal
#' @param e1 a \link{dfm}
#' @param e2 a numeric value to compare with values in a dfm
#' @export
#' @seealso \link{Comparison} operators
setMethod("Compare", c("dfm", "numeric"), function(e1, e2) {
as(callGeneric(as(e1, "dgCMatrix"), e2), "lgCMatrix")
})
|
#średnia z próby (pojemność)
displacement_mu <- mean(Autko$displacement)
#odchylenie standardowe (pojemność)
displacement_sigma <- sd(Autko$displacement)
#NORMALNE PRZEDZIAŁY UFNOŚCI DLA WARTOŚCI OCZEKIWANEJ
#przedział ufności 90%
displacement_przedz90norm <- round(displacement_mu+c(-1, 1)*sigma/sqrt(398)*qnorm(.95), 2)
#przedział ufności 95%
displacement_przedz95norm <- round(displacement_mu+c(-1, 1)*sigma/sqrt(398)*qnorm(.975), 2)
#przedział ufności 99%
displacement_przedz99norm <- round(displacement_mu+c(-1, 1)*sigma/sqrt(398)*qnorm(.995), 2)
#interpertacja: przy zbiorze 398 aut średnia pojemność w tej populacji na x% jest w danym przedziale
#PRZEDZIAŁY UFNOŚCI DLA WARIANCJI
#przedział ufności 90%
displacement_przedz90war <- round(sqrt(sigma*398/qchisq(c(1-.05,.05), 397)), 2)
#przedział ufności 95%
displacement_przedz95war <- round(sqrt(sigma*398/qchisq(c(1-.025,.025), 397)), 2)
#przedział ufności 99%
displacement_przedz99war <- round(sqrt(sigma*398/qchisq(c(1-.005,.005), 397)), 2)
#T PRZEDZIAŁY UFNOŚCI
#przedział ufności 90%
displacement_przedz90t <- round(displacement_mu+c(-1, 1)*sigma/sqrt(398)*qt(.95, 397), 2)
#przedział ufności 95%
displacement_przedz95t <- round(displacement_mu+c(-1, 1)*sigma/sqrt(398)*qt(.975, 397), 2)
#przedział ufności 99%
displacement_przedz99t <- round(displacement_mu+c(-1, 1)*sigma/sqrt(398)*qt(.995, 397), 2)
|
/Projekt_MS/Zajęcki/MS/Przedziały_ufności_displacement.R
|
no_license
|
MacPiston/MS_Proj_2020
|
R
| false
| false
| 1,399
|
r
|
#średnia z próby (pojemność)
displacement_mu <- mean(Autko$displacement)
#odchylenie standardowe (pojemność)
displacement_sigma <- sd(Autko$displacement)
#NORMALNE PRZEDZIAŁY UFNOŚCI DLA WARTOŚCI OCZEKIWANEJ
#przedział ufności 90%
displacement_przedz90norm <- round(displacement_mu+c(-1, 1)*sigma/sqrt(398)*qnorm(.95), 2)
#przedział ufności 95%
displacement_przedz95norm <- round(displacement_mu+c(-1, 1)*sigma/sqrt(398)*qnorm(.975), 2)
#przedział ufności 99%
displacement_przedz99norm <- round(displacement_mu+c(-1, 1)*sigma/sqrt(398)*qnorm(.995), 2)
#interpertacja: przy zbiorze 398 aut średnia pojemność w tej populacji na x% jest w danym przedziale
#PRZEDZIAŁY UFNOŚCI DLA WARIANCJI
#przedział ufności 90%
displacement_przedz90war <- round(sqrt(sigma*398/qchisq(c(1-.05,.05), 397)), 2)
#przedział ufności 95%
displacement_przedz95war <- round(sqrt(sigma*398/qchisq(c(1-.025,.025), 397)), 2)
#przedział ufności 99%
displacement_przedz99war <- round(sqrt(sigma*398/qchisq(c(1-.005,.005), 397)), 2)
#T PRZEDZIAŁY UFNOŚCI
#przedział ufności 90%
displacement_przedz90t <- round(displacement_mu+c(-1, 1)*sigma/sqrt(398)*qt(.95, 397), 2)
#przedział ufności 95%
displacement_przedz95t <- round(displacement_mu+c(-1, 1)*sigma/sqrt(398)*qt(.975, 397), 2)
#przedział ufności 99%
displacement_przedz99t <- round(displacement_mu+c(-1, 1)*sigma/sqrt(398)*qt(.995, 397), 2)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/audio_aug_signal.R
\name{NoiseColor}
\alias{NoiseColor}
\title{Noise Color}
\usage{
NoiseColor(...)
}
\arguments{
\item{...}{parameters to pass}
}
\value{
module
}
\description{
Noise Color
}
|
/man/NoiseColor.Rd
|
permissive
|
han-tun/fastai
|
R
| false
| true
| 270
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/audio_aug_signal.R
\name{NoiseColor}
\alias{NoiseColor}
\title{Noise Color}
\usage{
NoiseColor(...)
}
\arguments{
\item{...}{parameters to pass}
}
\value{
module
}
\description{
Noise Color
}
|
#### Humac analysis pre vs. post fifth RT session
## Author: Kristian Lian
# Purpose: This script plots mean torque per supplement (both through intervention and pre vs. post) results from the ribose project,
# and analyses the data per test (isometric, isokinetic 60, isokinetic 240) in a linear model.
## Time-points
# D-1: Baseline, before any supplementation or training
# D4, D5, D8 and D9: Day 4, 5, 8 and 9 of the intervention, humac testing of the leg that performed
# RT the preceding day
# T3: Post testing leg #1 (leg that started the intervention). Leg #1 is tested four times at T3/T4:
# Test 1 leg 1: 1.5hrs after protein ingestion, 45min before RT (T3)
# Test 2 leg 1: 30min after RT (T3)
# Test 3 leg 1: 2hrs after RT (T3)
# Test 4 leg 1: ~23hrs after RT (T4)
# Test 1 serve as a post test for the 5 RT sessions and pre test before the sixth session, test 2,
# 3, and 4 serve as post test following sixth session
# T4 and 13 follow the same design for leg #2
## Data
# Date of testing
# Subject
# Test type: isok.60 (isokinetic 60), isok.240 (isokinetic 240), isom (isometric)
# Peak.torque: Highest peak torque from each test
# Leg: left or right leg
# Supplement: glucose or placebo
# Packages
library(readxl);library(tidyverse);library(nlme);library(lme4);library(broom);library(knitr);library(emmeans)
## Handling the data by creating a new factor called time from timepoint. This factor combines any observation at T1 and T2 to baseline, etc.
# The code also sorts the order of the factor time, from baseline to session 6, using time = factor(time, levels c()), and sets placebo to be compared to
# glucose via supplement = factor(supplement, levels = c()). Acute code is called to set a new factor named acute, so that its possible to divid post 5th
# session data from post 6th session data
humac <- read_excel("./data/tests/ribose.humac.xlsx", na = "NA") %>%
mutate(time = if_else(timepoint == "D-1",
"baseline",
if_else(timepoint %in% c("D4", "D5"),
"test1",
if_else(timepoint %in% c("D8", "D9"),
"test2",
if_else(timepoint %in% c("T3", "T4") & acute %in% c("rest", "post30min", "post2h"),
"test3",
if_else(acute == "post23h", "test4", timepoint)))))) %>%
mutate(time = factor(time, levels = c("baseline", "test1", "test2", "test3", "test4")),
acute = factor(acute, levels = c("rest", "post30min", "post2h", "post23h")),
supplement = factor(supplement, levels = c("placebo", "glucose"))) %>%
print()
rest.dat <- humac %>%
filter(acute == "rest" ) %>%
print()
## Baseline analysis - comparison of the two legs
# A baseline analysis comparing peak torque for each exercise at baseline between the two legs via a paired t.test, and providing a summary of mean peak
# torque and sd
# Isometric
base.isom <- humac %>%
filter(time == "baseline",
test == "isom") %>%
select(subject, time, test, supplement, peak.torque) %>%
group_by(supplement) %>%
pivot_wider(names_from = supplement,
values_from = peak.torque) %>%
print()
isom.ttest <- t.test(base.isom$glucose, base.isom$placebo, paired = TRUE)
isom.summary <- humac %>%
filter(time == "baseline",
test == "isom") %>%
select(subject, time, test, supplement, peak.torque) %>%
group_by(supplement) %>%
mutate(m = mean(peak.torque),
s = sd(peak.torque)) %>%
print()
# Isok 60
base.60 <- humac %>%
filter(time == "baseline",
test == "isok.60") %>%
select(subject, time, test, supplement, peak.torque) %>%
group_by(supplement) %>%
pivot_wider(names_from = supplement,
values_from = peak.torque) %>%
print()
isok60.ttest <- t.test(base.60$glucose, base.60$placebo, paired = TRUE)
isok60.summary <- humac %>%
filter(time == "baseline",
test == "isok.60") %>%
select(subject, time, test, supplement, peak.torque) %>%
group_by(supplement) %>%
mutate(m = mean(peak.torque),
s = sd(peak.torque)) %>%
print()
# Isok 240
base.240 <- humac %>%
filter(time == "baseline",
test == "isok.240") %>%
select(subject, time, test, supplement, peak.torque) %>%
group_by(supplement) %>%
pivot_wider(names_from = supplement,
values_from = peak.torque) %>%
print()
isok240.ttest <- t.test(base.240$glucose, base.240$placebo, paired = TRUE)
isok240.summary <- humac %>%
filter(time == "baseline",
test == "isok.240") %>%
select(subject, time, test, supplement, peak.torque) %>%
group_by(supplement) %>%
mutate(m = mean(peak.torque),
s = sd(peak.torque)) %>%
print()
## Change-data
# The code beneath summarizes the mean values at each time, grouped by subject, time and supplement, creating a wider data set with observations of
# participants glucose measurements per time point.
# Then, mutate() is used to calculate change scores, where each timepoint is log-transformed and compared to baseline. baseline = baseline - mean(baseline,
# na.rm = TRUE) mean centers the baseline values. Subject, supplement, baseline and change scores are then selected and pivoted for modeling. The data set is
# filtered according to test exercise (isometric, isokinetic 60 or isokinetic 240)
# Isometric
isom.dat <- rest.dat %>%
filter(test == "isom") %>%
print()
change_dat <- isom.dat %>%
dplyr::select(subject, time, supplement, peak.torque) %>%
group_by(subject, time, supplement) %>%
summarise(peak.torque = mean(peak.torque, na.rm = TRUE)) %>%
pivot_wider(names_from = time,
values_from = peak.torque) %>%
ungroup() %>%
mutate(change.2 = log(test1)-log(baseline),
change.3 = log(test2)-log(baseline),
change.4 = log(test3)-log(baseline),
baseline = baseline - mean(baseline, na.rm = TRUE),
supplement = factor(supplement, levels = c("placebo", "glucose"))) %>%
select(subject, supplement, baseline, change.2, change.3, change.4) %>%
pivot_longer(names_to = "time",
values_to = "change",
cols = (change.2:change.4)) %>%
print()
# Isok.60
isok60.dat <- rest.dat %>%
filter(test == "isok.60") %>%
print()
change_dat2 <- isok60.dat %>%
dplyr::select(subject, time, supplement, peak.torque) %>%
group_by(subject, time, supplement) %>%
summarise(peak.torque = mean(peak.torque, na.rm = TRUE)) %>%
pivot_wider(names_from = time,
values_from = peak.torque) %>%
ungroup() %>%
mutate(change.2 = log(test1)-log(baseline),
change.3 = log(test2)-log(baseline),
change.4 = log(test3)-log(baseline),
baseline = baseline - mean(baseline, na.rm = TRUE),
supplement = factor(supplement, levels = c("placebo", "glucose"))) %>%
select(subject, supplement, baseline, change.2, change.3, change.4) %>%
pivot_longer(names_to = "time",
values_to = "change",
cols = (change.2:change.4)) %>%
print()
## Isok.240
isok240.dat <- rest.dat %>%
filter(test == "isok.240") %>%
print()
change_dat3 <- isok240.dat %>%
dplyr::select(subject, time, supplement, peak.torque) %>%
group_by(subject, time, supplement) %>%
summarise(peak.torque = mean(peak.torque, na.rm = TRUE)) %>%
pivot_wider(names_from = time,
values_from = peak.torque) %>%
ungroup() %>%
mutate(change.2 = log(test1)-log(baseline),
change.3 = log(test2)-log(baseline),
change.4 = log(test3)-log(baseline),
baseline = baseline - mean(baseline, na.rm = TRUE),
supplement = factor(supplement, levels = c("placebo", "glucose"))) %>%
select(subject, supplement, baseline, change.2, change.3, change.4) %>%
pivot_longer(names_to = "time",
values_to = "change",
cols = (change.2:change.4)) %>%
print()
## Linear mixed effects model
# This model tries to explain the change by time and supplement, accounting for potential differences in baseline values and that the same participants
# are measured at multiple time points.
# It produces results on both the time effect and the difference between the groups at any timepoint. We are interested in the difference between groups.
# Mean of all subjects
# Isometric
m1 <- lmerTest::lmer(change ~ 0 + baseline + time + supplement:time + (1|subject),
data = change_dat)
plot(m1)
summary(m1)
# Isok.60
m2 <- lmerTest::lmer(change ~ 0 + baseline + time + supplement:time + (1|subject),
data = change_dat2)
plot(m2)
summary(m2)
# Isok.240
m3 <- lmerTest::lmer(change ~ 0 + baseline + time + supplement:time + (1|subject),
data = change_dat3)
plot(m3)
summary(m3)
## Fold-change estimated means
# Gets estimated means from the model, these are average increase at pre = 0 (the average pre value).
# These are log-fold change values (changeble with the mutate function)
# Isometric
confint.m1 <- confint(emmeans(m1, specs = ~"supplement|time")) %>%
data.frame()
# Isok.60
confint.m2 <- confint(emmeans(m2, specs = ~"supplement|time")) %>%
data.frame() %>%
print()
# Isok.240
confint.m3 <- confint(emmeans(m3, specs = ~"supplement|time")) %>%
data.frame()
## Emmeans figures
# Isom
confint.m1 %>%
data.frame() %>%
add_row(supplement = "placebo", time = "change.1", emmean = 0, SE = 0, df = 0, lower.CL = 0, upper.CL = 0, .before =1) %>%
add_row(supplement = "glucose", time = "change.1", emmean = 0, SE = 0, df = 0, lower.CL = 0, upper.CL = 0, .before =2) %>%
ggplot(aes(time, emmean, group = supplement, fill = supplement)) +
geom_errorbar(aes(ymin = lower.CL, ymax = upper.CL),
position = pos,
width = 0.2) +
geom_line(position = pos) +
geom_point(shape = 21, position = pos, size = 3) +
scale_x_discrete(labels=c("change.1" = "Baseline", "change.2" = "Test 1", "change.3" = "Test 2",
"change.4" = "Test 3")) +
labs(x = "", y = "Isometric \n(nm change)\n", fill = "Supplement") +
theme_classic() +
theme(axis.text.x = element_text(size=8))
# Isok 60
confint.m2 %>%
data.frame() %>%
add_row(supplement = "placebo", time = "change.1", emmean = 0, SE = 0, df = 0, lower.CL = 0, upper.CL = 0, .before =1) %>%
add_row(supplement = "glucose", time = "change.1", emmean = 0, SE = 0, df = 0, lower.CL = 0, upper.CL = 0, .before =2) %>%
ggplot(aes(time, emmean, group = supplement, fill = supplement)) +
geom_errorbar(aes(ymin = lower.CL, ymax = upper.CL),
position = pos,
width = 0.2) +
geom_line(position = pos) +
geom_point(shape = 21, position = pos, size = 3) +
scale_x_discrete(labels=c("change.1" = "Baseline", "change.2" = "Test 1", "change.3" = "Test 2",
"change.4" = "Test 3")) +
labs(x = "", y = "Isokinetic 60 \n(nm change)\n", fill = "Supplement") +
theme_classic() +
theme(axis.text.x = element_text(size=8))
# Isok 240
confint.m3 %>%
data.frame() %>%
add_row(supplement = "placebo", time = "change.1", emmean = 0, SE = 0, df = 0, lower.CL = 0, upper.CL = 0, .before =1) %>%
add_row(supplement = "glucose", time = "change.1", emmean = 0, SE = 0, df = 0, lower.CL = 0, upper.CL = 0, .before =2) %>%
ggplot(aes(time, emmean, group = supplement, fill = supplement)) +
geom_errorbar(aes(ymin = lower.CL, ymax = upper.CL),
position = pos,
width = 0.2) +
geom_line(position = pos) +
geom_point(shape = 21, position = pos, size = 3) +
scale_x_discrete(labels=c("change.1" = "Baseline", "change.2" = "Test 1", "change.3" = "Test 2",
"change.4" = "Test 3")) +
labs(x = "Time-Point", y = "Isokinetic 240 \n(nm change)\n", fill = "Supplement") +
theme_classic() +
theme(axis.text.x = element_text(size=8))
|
/R/Post5th.change.R
|
no_license
|
Kristianlian/master_degree
|
R
| false
| false
| 12,989
|
r
|
#### Humac analysis pre vs. post fifth RT session
## Author: Kristian Lian
# Purpose: This script plots mean torque per supplement (both through intervention and pre vs. post) results from the ribose project,
# and analyses the data per test (isometric, isokinetic 60, isokinetic 240) in a linear model.
## Time-points
# D-1: Baseline, before any supplementation or training
# D4, D5, D8 and D9: Day 4, 5, 8 and 9 of the intervention, humac testing of the leg that performed
# RT the preceding day
# T3: Post testing leg #1 (leg that started the intervention). Leg #1 is tested four times at T3/T4:
# Test 1 leg 1: 1.5hrs after protein ingestion, 45min before RT (T3)
# Test 2 leg 1: 30min after RT (T3)
# Test 3 leg 1: 2hrs after RT (T3)
# Test 4 leg 1: ~23hrs after RT (T4)
# Test 1 serve as a post test for the 5 RT sessions and pre test before the sixth session, test 2,
# 3, and 4 serve as post test following sixth session
# T4 and 13 follow the same design for leg #2
## Data
# Date of testing
# Subject
# Test type: isok.60 (isokinetic 60), isok.240 (isokinetic 240), isom (isometric)
# Peak.torque: Highest peak torque from each test
# Leg: left or right leg
# Supplement: glucose or placebo
# Packages
library(readxl);library(tidyverse);library(nlme);library(lme4);library(broom);library(knitr);library(emmeans)
## Handling the data by creating a new factor called time from timepoint. This factor combines any observation at T1 and T2 to baseline, etc.
# The code also sorts the order of the factor time, from baseline to session 6, using time = factor(time, levels c()), and sets placebo to be compared to
# glucose via supplement = factor(supplement, levels = c()). Acute code is called to set a new factor named acute, so that its possible to divid post 5th
# session data from post 6th session data
humac <- read_excel("./data/tests/ribose.humac.xlsx", na = "NA") %>%
mutate(time = if_else(timepoint == "D-1",
"baseline",
if_else(timepoint %in% c("D4", "D5"),
"test1",
if_else(timepoint %in% c("D8", "D9"),
"test2",
if_else(timepoint %in% c("T3", "T4") & acute %in% c("rest", "post30min", "post2h"),
"test3",
if_else(acute == "post23h", "test4", timepoint)))))) %>%
mutate(time = factor(time, levels = c("baseline", "test1", "test2", "test3", "test4")),
acute = factor(acute, levels = c("rest", "post30min", "post2h", "post23h")),
supplement = factor(supplement, levels = c("placebo", "glucose"))) %>%
print()
rest.dat <- humac %>%
filter(acute == "rest" ) %>%
print()
## Baseline analysis - comparison of the two legs
# A baseline analysis comparing peak torque for each exercise at baseline between the two legs via a paired t.test, and providing a summary of mean peak
# torque and sd
# Isometric
base.isom <- humac %>%
filter(time == "baseline",
test == "isom") %>%
select(subject, time, test, supplement, peak.torque) %>%
group_by(supplement) %>%
pivot_wider(names_from = supplement,
values_from = peak.torque) %>%
print()
isom.ttest <- t.test(base.isom$glucose, base.isom$placebo, paired = TRUE)
isom.summary <- humac %>%
filter(time == "baseline",
test == "isom") %>%
select(subject, time, test, supplement, peak.torque) %>%
group_by(supplement) %>%
mutate(m = mean(peak.torque),
s = sd(peak.torque)) %>%
print()
# Isok 60
base.60 <- humac %>%
filter(time == "baseline",
test == "isok.60") %>%
select(subject, time, test, supplement, peak.torque) %>%
group_by(supplement) %>%
pivot_wider(names_from = supplement,
values_from = peak.torque) %>%
print()
isok60.ttest <- t.test(base.60$glucose, base.60$placebo, paired = TRUE)
isok60.summary <- humac %>%
filter(time == "baseline",
test == "isok.60") %>%
select(subject, time, test, supplement, peak.torque) %>%
group_by(supplement) %>%
mutate(m = mean(peak.torque),
s = sd(peak.torque)) %>%
print()
# Isok 240
base.240 <- humac %>%
filter(time == "baseline",
test == "isok.240") %>%
select(subject, time, test, supplement, peak.torque) %>%
group_by(supplement) %>%
pivot_wider(names_from = supplement,
values_from = peak.torque) %>%
print()
isok240.ttest <- t.test(base.240$glucose, base.240$placebo, paired = TRUE)
isok240.summary <- humac %>%
filter(time == "baseline",
test == "isok.240") %>%
select(subject, time, test, supplement, peak.torque) %>%
group_by(supplement) %>%
mutate(m = mean(peak.torque),
s = sd(peak.torque)) %>%
print()
## Change-data
# The code beneath summarizes the mean values at each time, grouped by subject, time and supplement, creating a wider data set with observations of
# participants glucose measurements per time point.
# Then, mutate() is used to calculate change scores, where each timepoint is log-transformed and compared to baseline. baseline = baseline - mean(baseline,
# na.rm = TRUE) mean centers the baseline values. Subject, supplement, baseline and change scores are then selected and pivoted for modeling. The data set is
# filtered according to test exercise (isometric, isokinetic 60 or isokinetic 240)
# Isometric
isom.dat <- rest.dat %>%
filter(test == "isom") %>%
print()
change_dat <- isom.dat %>%
dplyr::select(subject, time, supplement, peak.torque) %>%
group_by(subject, time, supplement) %>%
summarise(peak.torque = mean(peak.torque, na.rm = TRUE)) %>%
pivot_wider(names_from = time,
values_from = peak.torque) %>%
ungroup() %>%
mutate(change.2 = log(test1)-log(baseline),
change.3 = log(test2)-log(baseline),
change.4 = log(test3)-log(baseline),
baseline = baseline - mean(baseline, na.rm = TRUE),
supplement = factor(supplement, levels = c("placebo", "glucose"))) %>%
select(subject, supplement, baseline, change.2, change.3, change.4) %>%
pivot_longer(names_to = "time",
values_to = "change",
cols = (change.2:change.4)) %>%
print()
# Isok.60
isok60.dat <- rest.dat %>%
filter(test == "isok.60") %>%
print()
change_dat2 <- isok60.dat %>%
dplyr::select(subject, time, supplement, peak.torque) %>%
group_by(subject, time, supplement) %>%
summarise(peak.torque = mean(peak.torque, na.rm = TRUE)) %>%
pivot_wider(names_from = time,
values_from = peak.torque) %>%
ungroup() %>%
mutate(change.2 = log(test1)-log(baseline),
change.3 = log(test2)-log(baseline),
change.4 = log(test3)-log(baseline),
baseline = baseline - mean(baseline, na.rm = TRUE),
supplement = factor(supplement, levels = c("placebo", "glucose"))) %>%
select(subject, supplement, baseline, change.2, change.3, change.4) %>%
pivot_longer(names_to = "time",
values_to = "change",
cols = (change.2:change.4)) %>%
print()
## Isok.240
isok240.dat <- rest.dat %>%
filter(test == "isok.240") %>%
print()
change_dat3 <- isok240.dat %>%
dplyr::select(subject, time, supplement, peak.torque) %>%
group_by(subject, time, supplement) %>%
summarise(peak.torque = mean(peak.torque, na.rm = TRUE)) %>%
pivot_wider(names_from = time,
values_from = peak.torque) %>%
ungroup() %>%
mutate(change.2 = log(test1)-log(baseline),
change.3 = log(test2)-log(baseline),
change.4 = log(test3)-log(baseline),
baseline = baseline - mean(baseline, na.rm = TRUE),
supplement = factor(supplement, levels = c("placebo", "glucose"))) %>%
select(subject, supplement, baseline, change.2, change.3, change.4) %>%
pivot_longer(names_to = "time",
values_to = "change",
cols = (change.2:change.4)) %>%
print()
## Linear mixed effects model
# This model tries to explain the change by time and supplement, accounting for potential differences in baseline values and that the same participants
# are measured at multiple time points.
# It produces results on both the time effect and the difference between the groups at any timepoint. We are interested in the difference between groups.
# Mean of all subjects
# Isometric
m1 <- lmerTest::lmer(change ~ 0 + baseline + time + supplement:time + (1|subject),
data = change_dat)
plot(m1)
summary(m1)
# Isok.60
m2 <- lmerTest::lmer(change ~ 0 + baseline + time + supplement:time + (1|subject),
data = change_dat2)
plot(m2)
summary(m2)
# Isok.240
m3 <- lmerTest::lmer(change ~ 0 + baseline + time + supplement:time + (1|subject),
data = change_dat3)
plot(m3)
summary(m3)
## Fold-change estimated means
# Gets estimated means from the model, these are average increase at pre = 0 (the average pre value).
# These are log-fold change values (changeble with the mutate function)
# Isometric
confint.m1 <- confint(emmeans(m1, specs = ~"supplement|time")) %>%
data.frame()
# Isok.60
confint.m2 <- confint(emmeans(m2, specs = ~"supplement|time")) %>%
data.frame() %>%
print()
# Isok.240
confint.m3 <- confint(emmeans(m3, specs = ~"supplement|time")) %>%
data.frame()
## Emmeans figures
# Isom
confint.m1 %>%
data.frame() %>%
add_row(supplement = "placebo", time = "change.1", emmean = 0, SE = 0, df = 0, lower.CL = 0, upper.CL = 0, .before =1) %>%
add_row(supplement = "glucose", time = "change.1", emmean = 0, SE = 0, df = 0, lower.CL = 0, upper.CL = 0, .before =2) %>%
ggplot(aes(time, emmean, group = supplement, fill = supplement)) +
geom_errorbar(aes(ymin = lower.CL, ymax = upper.CL),
position = pos,
width = 0.2) +
geom_line(position = pos) +
geom_point(shape = 21, position = pos, size = 3) +
scale_x_discrete(labels=c("change.1" = "Baseline", "change.2" = "Test 1", "change.3" = "Test 2",
"change.4" = "Test 3")) +
labs(x = "", y = "Isometric \n(nm change)\n", fill = "Supplement") +
theme_classic() +
theme(axis.text.x = element_text(size=8))
# Isok 60
confint.m2 %>%
data.frame() %>%
add_row(supplement = "placebo", time = "change.1", emmean = 0, SE = 0, df = 0, lower.CL = 0, upper.CL = 0, .before =1) %>%
add_row(supplement = "glucose", time = "change.1", emmean = 0, SE = 0, df = 0, lower.CL = 0, upper.CL = 0, .before =2) %>%
ggplot(aes(time, emmean, group = supplement, fill = supplement)) +
geom_errorbar(aes(ymin = lower.CL, ymax = upper.CL),
position = pos,
width = 0.2) +
geom_line(position = pos) +
geom_point(shape = 21, position = pos, size = 3) +
scale_x_discrete(labels=c("change.1" = "Baseline", "change.2" = "Test 1", "change.3" = "Test 2",
"change.4" = "Test 3")) +
labs(x = "", y = "Isokinetic 60 \n(nm change)\n", fill = "Supplement") +
theme_classic() +
theme(axis.text.x = element_text(size=8))
# Isok 240
confint.m3 %>%
data.frame() %>%
add_row(supplement = "placebo", time = "change.1", emmean = 0, SE = 0, df = 0, lower.CL = 0, upper.CL = 0, .before =1) %>%
add_row(supplement = "glucose", time = "change.1", emmean = 0, SE = 0, df = 0, lower.CL = 0, upper.CL = 0, .before =2) %>%
ggplot(aes(time, emmean, group = supplement, fill = supplement)) +
geom_errorbar(aes(ymin = lower.CL, ymax = upper.CL),
position = pos,
width = 0.2) +
geom_line(position = pos) +
geom_point(shape = 21, position = pos, size = 3) +
scale_x_discrete(labels=c("change.1" = "Baseline", "change.2" = "Test 1", "change.3" = "Test 2",
"change.4" = "Test 3")) +
labs(x = "Time-Point", y = "Isokinetic 240 \n(nm change)\n", fill = "Supplement") +
theme_classic() +
theme(axis.text.x = element_text(size=8))
|
# This program reads the crypto data from Bitfinex
install.packages("Quandl")
install.packages("dygraphs")
library(xts)
library("Quandl")
library("dygraphs")
# Quandl.api_key('***************')
BTC <- Quandl("BITFINEX/BTCUSD",type="xts")
ETH <- Quandl("BITFINEX/ETHUSD")
IOTA <- Quandl("BITFINEX/IOTUSD")
test <- as.matrix(ETH[2])
# Plot
dygraph(BTC[,1:3]) %>%
dyRangeSelector()
|
/Crypto.R
|
no_license
|
mlacher/R_Crypto
|
R
| false
| false
| 388
|
r
|
# This program reads the crypto data from Bitfinex
install.packages("Quandl")
install.packages("dygraphs")
library(xts)
library("Quandl")
library("dygraphs")
# Quandl.api_key('***************')
BTC <- Quandl("BITFINEX/BTCUSD",type="xts")
ETH <- Quandl("BITFINEX/ETHUSD")
IOTA <- Quandl("BITFINEX/IOTUSD")
test <- as.matrix(ETH[2])
# Plot
dygraph(BTC[,1:3]) %>%
dyRangeSelector()
|
#' @title Median unbiased estimator
#' @description Calculates the median unbiased estimator of true response rate
#' for for Simon-like designs.
#' @details Median unbiased estimator is the value of response rate such that
#' the p-value is 0.5 (\emph{Koyama and Chen, 2008}). The solution is found using
#' numerical search, with a precision of 0.000001.
#' @param s Total number of successes.
#' @param n1 Stage 1 sample size.
#' @param r1 Stage 1 critical value (trial is stopped at stage 1 if the number of successes
#' is at most \code{r1}).
#' @param n Total sample size.
#' @param p0 Response rate under the null hypothesis.
#' @return Estimate of the response rate.
#' @references Koyama, T. and Chen, H. Proper inference from Simon's two-stage designs.
#' \emph{Stat Med}, 2008, 27, 3145-3154.
#' @seealso \code{\link{pvaluek}}, \code{\link{pquantile}}, \code{\link{pm}},
#' \code{\link{pg}}, \code{\link{pu}} and \code{\link{pp}}.
#' @export
#' @examples
#' pk(21, 19, 4, 54, 0.2)
#' @author Arsenio Nhacolo
pk <- function(s, n1, r1, n, p0){
return(pquantile(s, n1, r1, n, p0, 0.5))
}
|
/R/pk.r
|
no_license
|
arsenionhacolo/InferenceBEAGSD
|
R
| false
| false
| 1,099
|
r
|
#' @title Median unbiased estimator
#' @description Calculates the median unbiased estimator of true response rate
#' for for Simon-like designs.
#' @details Median unbiased estimator is the value of response rate such that
#' the p-value is 0.5 (\emph{Koyama and Chen, 2008}). The solution is found using
#' numerical search, with a precision of 0.000001.
#' @param s Total number of successes.
#' @param n1 Stage 1 sample size.
#' @param r1 Stage 1 critical value (trial is stopped at stage 1 if the number of successes
#' is at most \code{r1}).
#' @param n Total sample size.
#' @param p0 Response rate under the null hypothesis.
#' @return Estimate of the response rate.
#' @references Koyama, T. and Chen, H. Proper inference from Simon's two-stage designs.
#' \emph{Stat Med}, 2008, 27, 3145-3154.
#' @seealso \code{\link{pvaluek}}, \code{\link{pquantile}}, \code{\link{pm}},
#' \code{\link{pg}}, \code{\link{pu}} and \code{\link{pp}}.
#' @export
#' @examples
#' pk(21, 19, 4, 54, 0.2)
#' @author Arsenio Nhacolo
pk <- function(s, n1, r1, n, p0){
return(pquantile(s, n1, r1, n, p0, 0.5))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genotyping.R
\name{genotype_inversions}
\alias{genotype_inversions}
\title{Bayesian genotyper for inversions}
\usage{
genotype_inversions(
WW_reads,
WC_reads,
regions,
background,
base_state,
sex = "female",
prior = c(0.33, 0.33, 0.33),
prior_male = c(0.5, 0.5)
)
}
\arguments{
\item{WW_reads}{A GRanges object (or GAlignmentPairs in the PE case) containing reads for a WW composite file. See read_regions().}
\item{WC_reads}{A GRanges object (or GAlignmentPairs in the PE case) containing reads for a WC composite file. See read_regions().}
\item{regions}{A Granges object containing genomic intervals that are thought to be inversions.}
\item{background}{The fraction of background reads for the WW composite file. See WWCC_background().}
\item{base_state}{The strand state of the WW composite file: either "WW" (mostly + reads) or "CC" (mostly - reads).}
\item{sex}{Sex of sample to figure out sex chromosomes. Default "female".}
\item{prior}{Vector of three prior weights for inversion genotypes. For example, c("ref","het","hom") = c(0.9,0.05,0.05). Default c(0.33,0.33,0.33).}
\item{prior_male}{Vector of two prior weights for inversions on the male sex chromosomes. For example, c("ref", "inv") = c(0.9,0.1). Default c(0.5,0.5).}
}
\value{
A dataframe of the regions, where each region is matched with the most probable genotype and the corresponding posterior probability, as well as some read counts.
}
\description{
Given two Strand-seq composite files (WC and WW) and a list of intervals, this computes the highest posterior probability of the possible (phased) genotypes.
}
\details{
Step-by-step: It standardizes the priors so that they sum to 1, and sets prior probabilities for errors that may be present in the data. Then it chooses the binomial probabilities for
the Bayesian model. The function counts the forward and reverse reads in each inversion, and then genotypes them in the male case (details omitted: it's the same as the female case really).
The read counts and the probabilites can be combined to calculate binomial (log) likelihoods of the possible strand states of each inversion in the two composite files (e.g. WW or WC).
Given the strand-states we expect (accounting for the errors that may be present in the data), these likelihoods can be used to compute more (log) likelihoods: this time, for the
genotypes REF, HET(0|1), HET(1|0), and HOM. We convert these into regular posterior probabilties and choose the highest one, with the associated genotype.
Note that we can phase inversions only because the WC composite file already has phased reads. This means that we know a 0|1 inversion on chr1 is on the same homolog as all 0|1 inversions
on chr1 in the sample, and that all chr1 1|0 inversions are on the other homolog. However, we don't know whether a 0|1 inversion on chr1 and a 0|1 inversion chr2 came from the same parent.
0|1 inversions are distinguished from 1|0 inversions based on the strand switch in the WC composite file ( WC -> WW or WC -> CC).
}
|
/invertyper/man/genotype_inversions.Rd
|
no_license
|
mattssca/InvertypeR
|
R
| false
| true
| 3,100
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genotyping.R
\name{genotype_inversions}
\alias{genotype_inversions}
\title{Bayesian genotyper for inversions}
\usage{
genotype_inversions(
WW_reads,
WC_reads,
regions,
background,
base_state,
sex = "female",
prior = c(0.33, 0.33, 0.33),
prior_male = c(0.5, 0.5)
)
}
\arguments{
\item{WW_reads}{A GRanges object (or GAlignmentPairs in the PE case) containing reads for a WW composite file. See read_regions().}
\item{WC_reads}{A GRanges object (or GAlignmentPairs in the PE case) containing reads for a WC composite file. See read_regions().}
\item{regions}{A Granges object containing genomic intervals that are thought to be inversions.}
\item{background}{The fraction of background reads for the WW composite file. See WWCC_background().}
\item{base_state}{The strand state of the WW composite file: either "WW" (mostly + reads) or "CC" (mostly - reads).}
\item{sex}{Sex of sample to figure out sex chromosomes. Default "female".}
\item{prior}{Vector of three prior weights for inversion genotypes. For example, c("ref","het","hom") = c(0.9,0.05,0.05). Default c(0.33,0.33,0.33).}
\item{prior_male}{Vector of two prior weights for inversions on the male sex chromosomes. For example, c("ref", "inv") = c(0.9,0.1). Default c(0.5,0.5).}
}
\value{
A dataframe of the regions, where each region is matched with the most probable genotype and the corresponding posterior probability, as well as some read counts.
}
\description{
Given two Strand-seq composite files (WC and WW) and a list of intervals, this computes the highest posterior probability of the possible (phased) genotypes.
}
\details{
Step-by-step: It standardizes the priors so that they sum to 1, and sets prior probabilities for errors that may be present in the data. Then it chooses the binomial probabilities for
the Bayesian model. The function counts the forward and reverse reads in each inversion, and then genotypes them in the male case (details omitted: it's the same as the female case really).
The read counts and the probabilites can be combined to calculate binomial (log) likelihoods of the possible strand states of each inversion in the two composite files (e.g. WW or WC).
Given the strand-states we expect (accounting for the errors that may be present in the data), these likelihoods can be used to compute more (log) likelihoods: this time, for the
genotypes REF, HET(0|1), HET(1|0), and HOM. We convert these into regular posterior probabilties and choose the highest one, with the associated genotype.
Note that we can phase inversions only because the WC composite file already has phased reads. This means that we know a 0|1 inversion on chr1 is on the same homolog as all 0|1 inversions
on chr1 in the sample, and that all chr1 1|0 inversions are on the other homolog. However, we don't know whether a 0|1 inversion on chr1 and a 0|1 inversion chr2 came from the same parent.
0|1 inversions are distinguished from 1|0 inversions based on the strand switch in the WC composite file ( WC -> WW or WC -> CC).
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ExteNET_2A}
\alias{ExteNET_2A}
\title{ExteNET, figure 2A}
\format{
A data frame of 2,840 observations and 3 variables:
\tabular{lll}{
\tab \code{time} \tab event time (in months) \cr
\tab \code{event} \tab DFS event indicator (\code{0}: no event, \code{1}: event) \cr
\tab \code{arm} \tab treatment arms (neratinib, placebo) \cr
}
}
\source{
Chan A, Delaloge S, Holmes FA, et al. Neratinib after
trastuzumab-based adjuvant therapy in patients with HER2-positive
breast cancer (ExteNET): a multicentre, randomised, double-blind,
placebo-controlled, phase 3 trial. Lancet Oncol 2016; 17: 367–77.
}
\usage{
ExteNET_2A
}
\description{
Kaplan-Meier digitized data from ExteNET, figure 2A (PMID 26874901). A reported sample size of 2,840 for a primary endpoint of iDFS in breast cancer.
}
\examples{
summary(ExteNET_2A)
kmplot(ExteNET_2A)
}
\keyword{datasets}
|
/man/ExteNET_2A.Rd
|
no_license
|
Owain-S/kmdata
|
R
| false
| true
| 963
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{ExteNET_2A}
\alias{ExteNET_2A}
\title{ExteNET, figure 2A}
\format{
A data frame of 2,840 observations and 3 variables:
\tabular{lll}{
\tab \code{time} \tab event time (in months) \cr
\tab \code{event} \tab DFS event indicator (\code{0}: no event, \code{1}: event) \cr
\tab \code{arm} \tab treatment arms (neratinib, placebo) \cr
}
}
\source{
Chan A, Delaloge S, Holmes FA, et al. Neratinib after
trastuzumab-based adjuvant therapy in patients with HER2-positive
breast cancer (ExteNET): a multicentre, randomised, double-blind,
placebo-controlled, phase 3 trial. Lancet Oncol 2016; 17: 367–77.
}
\usage{
ExteNET_2A
}
\description{
Kaplan-Meier digitized data from ExteNET, figure 2A (PMID 26874901). A reported sample size of 2,840 for a primary endpoint of iDFS in breast cancer.
}
\examples{
summary(ExteNET_2A)
kmplot(ExteNET_2A)
}
\keyword{datasets}
|
cluster.functions <- makeClusterFunctionsSGE("~/SGETemplate.tmpl")
|
/WholeGenomeAlignment/pipelines/BatchJobs.R
|
no_license
|
alexander-nash/CSC
|
R
| false
| false
| 68
|
r
|
cluster.functions <- makeClusterFunctionsSGE("~/SGETemplate.tmpl")
|
clean_data <- function(x) {
#shows companies with highest total returns in descending order o identify outliers/data errors
x %>%
select(-name) %>%
filter(row_number(desc(tret)) < 10) %>%
arrange(desc(tret))
}
}
|
/clean_data.R
|
no_license
|
syl2/Final-Project-Code
|
R
| false
| false
| 266
|
r
|
clean_data <- function(x) {
#shows companies with highest total returns in descending order o identify outliers/data errors
x %>%
select(-name) %>%
filter(row_number(desc(tret)) < 10) %>%
arrange(desc(tret))
}
}
|
#library(data.table)
#' @export
#'
nonLinearTest <- function(rawData, outVars, xVars, modelType = "lrm", uniqueSampleSize = 6,returnKable=FALSE) {
modelType <- match.arg(modelType, c("lrm", "cph", "ols"))
modelFun <- get(modelType)
resultOut <- NULL
if (length(outVars) == length(xVars)) { # One outVar to one XVar
for (i in 1:length(outVars)) {
outVarOne <- outVars[[i]]
xVarOne <- xVars[i]
if (class(rawData[, xVarOne]) == "numeric" | class(rawData[, xVarOne]) == "integer") {
if (length(unique(rawData[, xVarOne])) >= uniqueSampleSize) {
if (modelType == "cph") {
formulaForModel <- as.formula(paste0("Surv(", outVarOne[1], ", ", outVarOne[2], ")", "~rcs(", xVarOne, ",3)"))
} else {
formulaForModel <- as.formula(paste0(outVarOne, "~rcs(", xVarOne, ",3)"))
}
#browser()
#formulaForModel <- as.formula(paste0(outVarOne, "~rcs(", xVarOne, ",3)"))
modelResult <- modelFun(formulaForModel, data = rawData)
modelResultAnova <- anova(modelResult)
resultOne <- c(paste(outVarOne,collapse=","), xVarOne, paste0(as.expression(formulaForModel)), as.vector(modelResultAnova[, 3]))
resultOut <- rbind(resultOut, resultOne)
}
}
}
} else {
for (outVarOne in outVars) { # Loop all outVars and XVars
for (xVarOne in xVars) {
if ("numeric" %in% class(rawData[, xVarOne]) | "integer" %in% class(rawData[, xVarOne])) {
if (length(unique(rawData[, xVarOne])) >= uniqueSampleSize) {
if (modelType == "cph") {
formulaForModel <- as.formula(paste0("Surv(", outVarOne[1], ", ", outVarOne[2], ")", "~rcs(", xVarOne, ",3)"))
} else {
formulaForModel <- as.formula(paste0(outVarOne, "~rcs(", xVarOne, ",3)"))
}
#browser()
modelResult <- modelFun(formulaForModel, data = rawData)
modelResultAnova <- anova(modelResult)
resultOne <- c(paste(outVarOne,collapse=","), xVarOne, paste0(as.expression(formulaForModel)), showP(modelResultAnova[1:3, "P"], 3, text = ""))
resultOut <- rbind(resultOut, resultOne)
}
}
}
}
}
# browser()
if (!is.null(resultOut) && nrow(resultOut)>0) {
row.names(resultOut) <- NULL
colnames(resultOut) <- c("Outcome", "X", "Formula", "P (Variable)", paste0("P (",row.names(modelResultAnova)[2:3],")"))
if (returnKable) {
# temp <- apply(resultOut, 2, function(x) all(x == "")) # remove spaces
# kable(resultOut[, which(!temp)],caption ="Non-linear Test")
kable(resultOut,caption ="Non-linear Test for continuous variables")
} else {
return(resultOut)
}
} else {
return(resultOut)
}
}
#export p and coef from modelResult
#varOne is interested Vars
#' @export
#'
exportModelResult=function(modelResult, varOne,extractStats=NULL,reportAnovaP=TRUE) {
supportedModelTypes=c("lrm", "ols", "cph")
modelType=intersect(class(modelResult),supportedModelTypes)[1]
if (length(modelType)==0) {
stop("Can't find modelType. Now only supports ",paste(supportedModelTypes,collapse=";"))
}
modelResultOut=NULL
######################
#get p value
######################
for (i in 1:length(varOne)) {
varOneToExtract <- varOne[i]
varOneInd <- grep(varOneToExtract, names(modelResult$coefficients))
varOneToExtractType=modelResult$Design$assume[which(modelResult$Design$name==varOneToExtract)]
if (length(varOneInd) > 0) {
if (reportAnovaP && (varOneToExtractType=="rcspline" | varOneToExtractType=="polynomial")) { #for continuous variables and with non-linear term only
pValueOne=anova(modelResult)[varOneToExtract,"P"]
} else {
if (modelType=="ols") { #ols, linear regression
pValueOne=summary.lm(modelResult)$coefficients[varOneInd,"Pr(>|t|)"]
} else { #lrm or cph, wald Z test to get p value
pValueOne <- (pnorm(abs(modelResult$coef / sqrt(diag(modelResult$var))), lower.tail = F) * 2)[varOneInd]
}
}
pValueOne <- showP(pValueOne, text = "", digits = 4)
} else {
warning(paste0("Can't find interested var name in model result: ", paste(varOneToExtract, collapse = ", ")))
next
}
######################
#get coef/effect
######################
##get data limits and type
#varLimitsTable=get(options("datadist")[[1]])[["limits"]][,varOneToExtract,drop=FALSE]
#modelResult$Design
if (varOneToExtractType=="rcspline") { #non linear effect for continuous variable. May have more than one p values
pValueOne <- paste(pValueOne, collapse = "; ")
}
varOneToExtractLimits=modelResult$Design$limits[,which(modelResult$Design$name==varOneToExtract),drop=FALSE]
varOneRef=varOneToExtractLimits["Adjust to",]
if (varOneToExtractType=="category") { # interested var is factor
summaryArgList <- list(quote(modelResult), varOneRef, est.all = FALSE)
names(summaryArgList)[2] <- varOneToExtract
modelResultSummary <- round(do.call(summary, summaryArgList), 3)
#browser()
varOneInd <- grep(varOneToExtract, row.names(modelResultSummary))
if (modelType == "ols") { #one row, no odds ratio
varOneOut <- data.frame(row.names(modelResultSummary)[varOneInd], pValueOne, matrix(modelResultSummary[varOneInd, c(4, 6, 7)], ncol = 3), matrix("", ncol = 6, nrow = length(varOneInd)), stringsAsFactors = FALSE)
} else { #two rows, second row odds ratio
varOneOut <- data.frame(row.names(modelResultSummary)[varOneInd], modelResultSummary[varOneInd, c(4)], pValueOne, matrix(modelResultSummary[varOneInd + 1, c(4, 6, 7)], ncol = 3), matrix("", ncol = 6, nrow = length(varOneInd)), stringsAsFactors = FALSE)
}
} else { # interested var is continous, need both +1 effect and 25%-75% quantile change effect
#varOneRef is median value
summaryArgList <- list(quote(modelResult), c(varOneRef, varOneRef + 1), est.all = FALSE)
names(summaryArgList)[2] <- varOneToExtract
#print(summaryArgList)
modelResultSummaryUnit <- round(do.call(summary, summaryArgList), 3) # Value of One Unit Change (from median+1 to median)
summaryArgList <- list(quote(modelResult), varOneToExtract, est.all = FALSE)
#print(summaryArgList)
modelResultSummary <- round(do.call(summary, summaryArgList), 3) # Value at 75% Quantile to 25% Quantile
# varOneOut=c(coefficientOne,pValueOne,modelResultSummaryUnit[2,c(4,6,7)],modelResultSummary[2,c(1,2,3,4,6,7)])
varOneInd <- grep(varOneToExtract, row.names(modelResultSummary))
#browser()
if (modelType == "ols") { #one row, no odds ratio
varOneOut <- data.frame(row.names(modelResultSummary)[varOneInd], pValueOne, matrix(modelResultSummaryUnit[varOneInd , c(4, 6, 7)], ncol = 3), matrix(modelResultSummary[varOneInd, c(1, 2, 3, 4, 6, 7)], ncol = 6), stringsAsFactors = FALSE)
} else {
varOneOut <- data.frame(row.names(modelResultSummary)[varOneInd], modelResultSummaryUnit[varOneInd, c(4)], pValueOne, matrix(modelResultSummaryUnit[varOneInd + 1, c(4, 6, 7)], ncol = 3), matrix(modelResultSummary[varOneInd + 1, c(1, 2, 3, 4, 6, 7)], ncol = 6), stringsAsFactors = FALSE)
}
}
if (modelType == "ols") { #linear regression no odds ratio
colnames(varOneOut) <- c(
"InterestedVar", "P", "Effect (One Unit)", "Effect (Lower 95%)", "Effect (Upper 95%)",
"Value (25% Quantile)", "Value (75% Quantile)", "Value Diff (75%-25%)", "Effect (Diff: 75%-25%)", "Effect (Diff, Lower 95%)", "Effect (Diff, Upper 95%)"
)
} else if (modelType == "cph") { #hazard ratio
colnames(varOneOut) <- c(
"InterestedVar", "Effect (One Unit)", "P", "Hazard Ratio (One Unit)", "HR (Lower 95%)", "HR (Upper 95%)",
"Value (25% Quantile)", "Value (75% Quantile)", "Value Diff (75%-25%)", "Hazard Ratio (Diff: 75%-25%)", "HR (Diff, Lower 95%)", "HR (Diff, Upper 95%)"
)
} else {
colnames(varOneOut) <- c(
"InterestedVar", "Effect (One Unit)", "P", "Odds Ratio (One Unit)", "OR (Lower 95%)", "OR (Upper 95%)",
"Value (25% Quantile)", "Value (75% Quantile)", "Value Diff (75%-25%)", "Odds Ratio (Diff: 75%-25%)", "OR (Diff, Lower 95%)", "OR (Diff, Upper 95%)"
)
}
#recored event level as sometimes event is factor and need to know which level is event (1)
if (modelType == "lrm") {
outVarEvent=paste(paste0(rev(names(modelResult$freq)),"(",rev((modelResult$freq)),")"),collapse=" : ")
varOneOut <- data.frame(Event=outVarEvent,varOneOut, stringsAsFactors = FALSE, check.names = FALSE)
}
varOneOut <- data.frame(Formula = paste0(modelType, " (", as.character(as.expression(modelResult$sformula)), ")"),varOneOut, stringsAsFactors = FALSE, check.names = FALSE)
if (!is.null(extractStats)) {
varOneOut <- c(varOneOut, round(modelResult$stats[extractStats], 3))
}
modelResultOut <- rbind(modelResultOut, varOneOut)
}
return(modelResultOut)
}
# make report table for multipl logistic regression models
## outVars should be list if doing survival model
#' @export
#'
modelTable <- function(dataForModelAll, outVars, interestedVars, adjVars = NULL,
nonLinearVars = NULL, nonLinearFunName="rcs",nonLinearFunPar=3,
extractStats = NULL,modelType = "lrm", printModel = FALSE, printModelFigure = printModel,
returnKable = FALSE,returnModel = FALSE,uniqueSampleSize=5,
reportAnovaP=TRUE,adjto.cat='first') {
modelType <- match.arg(modelType, c("lrm", "cph", "ols"))
modelFun <- get(modelType)
modelResultAll <- NULL
modelAll <- list()
for (outVar in outVars) {
for (varOne in interestedVars) {
varForModelOne <- c(varOne, adjVars)
if (modelType == "cph") {
formulaForModel <- paste("Surv(", outVar[1], ", ", outVar[2], ")", "~", paste0(varForModelOne, collapse = " + "), " ")
} else {
formulaForModel <- paste(outVar, "~", paste0(varForModelOne, collapse = " + "), " ")
}
if (!is.null(nonLinearVars)) {
for (nonLinearVarOne in nonLinearVars) {
formulaForModel <- gsub(paste0(" ", nonLinearVarOne, " "), paste0(" ",nonLinearFunName,"(", nonLinearVarOne, ",",nonLinearFunPar,") "), formulaForModel)
}
}
formulaForModel <- as.formula(formulaForModel)
dataForModel <- dataForModelAll[, c(outVar, varForModelOne)]
for (temp in varForModelOne) { # change all numbers with only uniqueSampleSize values in dataForModel into factor
if (length(unique(na.omit(dataForModel[, temp]))) <= uniqueSampleSize) {
dataForModel[, temp] <- factor(dataForModel[, temp])
}
}
#browser()
ddist <<- datadist(dataForModel, n.unique = uniqueSampleSize,adjto.cat=adjto.cat)
options(datadist = "ddist")
modelResult <- modelFun(formulaForModel, data = dataForModel,x=TRUE,y=TRUE)
if (printModel) {
print(paste0("Model formula: ",as.character(as.expression(formulaForModel))))
print(modelResult)
}
if (printModelFigure) {
print(plot(Predict(modelResult),ylab=outVar))
}
if (returnModel) {
modelAll=c(modelAll,list(modelResult))
}
# extract result, may have many variables in varOne
modelResultOut=exportModelResult(modelResult,varOne,reportAnovaP = reportAnovaP)
modelResultAll=rbind(modelResultAll,modelResultOut)
}
}
row.names(modelResultAll) <- NULL
if (returnKable) {
temp <- apply(modelResultAll, 2, function(x) all(x == "")) # remove spaces
print(kable(modelResultAll[, which(!temp)],caption ="Regression Model Result Summary"))
}
if (returnModel) {
return(modelAll)
} else {
return(modelResultAll)
}
}
#' summary/predict model result based on variable's value
#' @export
#'
easierSummaryByValue=function(modelResult,varOneToExtract,varOneToExtractValues) {
if ("lrm" %in% class(modelResult)) {
modelType="lrm"
} else if ("ols" %in% class(modelResult)) {
modelType="ols"
} else {
modelType="cph"
}
summaryArgList <- list(quote(modelResult), c((varOneToExtractValues[1]), (varOneToExtractValues[2])), est.all = FALSE)
names(summaryArgList)[2] <- varOneToExtract
#print(summaryArgList)
#browser()
modelResultSummary <- round(do.call(summary, summaryArgList), 3)
varOneInd <- grep(varOneToExtract, row.names(modelResultSummary))
#browser()
if (modelType == "ols") { #one row, no odds ratio
varOneOut <- data.frame(row.names(modelResultSummary)[varOneInd], matrix(modelResultSummary[varOneInd, c(1, 2, 3, 4, 6, 7)], ncol = 6), stringsAsFactors = FALSE)
} else {
varOneOut <- data.frame(row.names(modelResultSummary)[varOneInd], matrix(modelResultSummary[varOneInd + 1, c(1, 2, 3, 4, 6, 7)], ncol = 6), stringsAsFactors = FALSE)
}
modelFormula=paste0(modelType,"(",as.character(as.expression(modelResult$sformula)),")")
varOneOut=c(modelFormula,varOneOut)
if (modelType == "ols") { #linear regression no odds ratio
names(varOneOut) <- c(
"Formula","InterestedVar", "Value 1", "Value 2", "Value Diff", "Effect (Diff)", "Effect (Diff, Lower 95%)", "Effect (Diff, Upper 95%)"
)
} else if (modelType == "cph") { #hazard ratio
names(varOneOut) <- c(
"Formula","InterestedVar", "Value 1", "Value 2", "Value Diff", "Hazard Ratio (Diff)", "HR (Diff, Lower 95%)", "HR (Diff, Upper 95%)"
)
} else {
names(varOneOut) <- c(
"Formula","InterestedVar", "Value 1", "Value 2", "Value Diff", "Odds Ratio (Diff)", "OR (Diff, Lower 95%)", "OR (Diff, Upper 95%)"
)
}
return(varOneOut)
}
|
/R/rmsEasierModel.R
|
no_license
|
slzhao/cqsR
|
R
| false
| false
| 13,792
|
r
|
#library(data.table)
#' @export
#'
nonLinearTest <- function(rawData, outVars, xVars, modelType = "lrm", uniqueSampleSize = 6,returnKable=FALSE) {
modelType <- match.arg(modelType, c("lrm", "cph", "ols"))
modelFun <- get(modelType)
resultOut <- NULL
if (length(outVars) == length(xVars)) { # One outVar to one XVar
for (i in 1:length(outVars)) {
outVarOne <- outVars[[i]]
xVarOne <- xVars[i]
if (class(rawData[, xVarOne]) == "numeric" | class(rawData[, xVarOne]) == "integer") {
if (length(unique(rawData[, xVarOne])) >= uniqueSampleSize) {
if (modelType == "cph") {
formulaForModel <- as.formula(paste0("Surv(", outVarOne[1], ", ", outVarOne[2], ")", "~rcs(", xVarOne, ",3)"))
} else {
formulaForModel <- as.formula(paste0(outVarOne, "~rcs(", xVarOne, ",3)"))
}
#browser()
#formulaForModel <- as.formula(paste0(outVarOne, "~rcs(", xVarOne, ",3)"))
modelResult <- modelFun(formulaForModel, data = rawData)
modelResultAnova <- anova(modelResult)
resultOne <- c(paste(outVarOne,collapse=","), xVarOne, paste0(as.expression(formulaForModel)), as.vector(modelResultAnova[, 3]))
resultOut <- rbind(resultOut, resultOne)
}
}
}
} else {
for (outVarOne in outVars) { # Loop all outVars and XVars
for (xVarOne in xVars) {
if ("numeric" %in% class(rawData[, xVarOne]) | "integer" %in% class(rawData[, xVarOne])) {
if (length(unique(rawData[, xVarOne])) >= uniqueSampleSize) {
if (modelType == "cph") {
formulaForModel <- as.formula(paste0("Surv(", outVarOne[1], ", ", outVarOne[2], ")", "~rcs(", xVarOne, ",3)"))
} else {
formulaForModel <- as.formula(paste0(outVarOne, "~rcs(", xVarOne, ",3)"))
}
#browser()
modelResult <- modelFun(formulaForModel, data = rawData)
modelResultAnova <- anova(modelResult)
resultOne <- c(paste(outVarOne,collapse=","), xVarOne, paste0(as.expression(formulaForModel)), showP(modelResultAnova[1:3, "P"], 3, text = ""))
resultOut <- rbind(resultOut, resultOne)
}
}
}
}
}
# browser()
if (!is.null(resultOut) && nrow(resultOut)>0) {
row.names(resultOut) <- NULL
colnames(resultOut) <- c("Outcome", "X", "Formula", "P (Variable)", paste0("P (",row.names(modelResultAnova)[2:3],")"))
if (returnKable) {
# temp <- apply(resultOut, 2, function(x) all(x == "")) # remove spaces
# kable(resultOut[, which(!temp)],caption ="Non-linear Test")
kable(resultOut,caption ="Non-linear Test for continuous variables")
} else {
return(resultOut)
}
} else {
return(resultOut)
}
}
#export p and coef from modelResult
#varOne is interested Vars
#' @export
#'
exportModelResult=function(modelResult, varOne,extractStats=NULL,reportAnovaP=TRUE) {
supportedModelTypes=c("lrm", "ols", "cph")
modelType=intersect(class(modelResult),supportedModelTypes)[1]
if (length(modelType)==0) {
stop("Can't find modelType. Now only supports ",paste(supportedModelTypes,collapse=";"))
}
modelResultOut=NULL
######################
#get p value
######################
for (i in 1:length(varOne)) {
varOneToExtract <- varOne[i]
varOneInd <- grep(varOneToExtract, names(modelResult$coefficients))
varOneToExtractType=modelResult$Design$assume[which(modelResult$Design$name==varOneToExtract)]
if (length(varOneInd) > 0) {
if (reportAnovaP && (varOneToExtractType=="rcspline" | varOneToExtractType=="polynomial")) { #for continuous variables and with non-linear term only
pValueOne=anova(modelResult)[varOneToExtract,"P"]
} else {
if (modelType=="ols") { #ols, linear regression
pValueOne=summary.lm(modelResult)$coefficients[varOneInd,"Pr(>|t|)"]
} else { #lrm or cph, wald Z test to get p value
pValueOne <- (pnorm(abs(modelResult$coef / sqrt(diag(modelResult$var))), lower.tail = F) * 2)[varOneInd]
}
}
pValueOne <- showP(pValueOne, text = "", digits = 4)
} else {
warning(paste0("Can't find interested var name in model result: ", paste(varOneToExtract, collapse = ", ")))
next
}
######################
#get coef/effect
######################
##get data limits and type
#varLimitsTable=get(options("datadist")[[1]])[["limits"]][,varOneToExtract,drop=FALSE]
#modelResult$Design
if (varOneToExtractType=="rcspline") { #non linear effect for continuous variable. May have more than one p values
pValueOne <- paste(pValueOne, collapse = "; ")
}
varOneToExtractLimits=modelResult$Design$limits[,which(modelResult$Design$name==varOneToExtract),drop=FALSE]
varOneRef=varOneToExtractLimits["Adjust to",]
if (varOneToExtractType=="category") { # interested var is factor
summaryArgList <- list(quote(modelResult), varOneRef, est.all = FALSE)
names(summaryArgList)[2] <- varOneToExtract
modelResultSummary <- round(do.call(summary, summaryArgList), 3)
#browser()
varOneInd <- grep(varOneToExtract, row.names(modelResultSummary))
if (modelType == "ols") { #one row, no odds ratio
varOneOut <- data.frame(row.names(modelResultSummary)[varOneInd], pValueOne, matrix(modelResultSummary[varOneInd, c(4, 6, 7)], ncol = 3), matrix("", ncol = 6, nrow = length(varOneInd)), stringsAsFactors = FALSE)
} else { #two rows, second row odds ratio
varOneOut <- data.frame(row.names(modelResultSummary)[varOneInd], modelResultSummary[varOneInd, c(4)], pValueOne, matrix(modelResultSummary[varOneInd + 1, c(4, 6, 7)], ncol = 3), matrix("", ncol = 6, nrow = length(varOneInd)), stringsAsFactors = FALSE)
}
} else { # interested var is continous, need both +1 effect and 25%-75% quantile change effect
#varOneRef is median value
summaryArgList <- list(quote(modelResult), c(varOneRef, varOneRef + 1), est.all = FALSE)
names(summaryArgList)[2] <- varOneToExtract
#print(summaryArgList)
modelResultSummaryUnit <- round(do.call(summary, summaryArgList), 3) # Value of One Unit Change (from median+1 to median)
summaryArgList <- list(quote(modelResult), varOneToExtract, est.all = FALSE)
#print(summaryArgList)
modelResultSummary <- round(do.call(summary, summaryArgList), 3) # Value at 75% Quantile to 25% Quantile
# varOneOut=c(coefficientOne,pValueOne,modelResultSummaryUnit[2,c(4,6,7)],modelResultSummary[2,c(1,2,3,4,6,7)])
varOneInd <- grep(varOneToExtract, row.names(modelResultSummary))
#browser()
if (modelType == "ols") { #one row, no odds ratio
varOneOut <- data.frame(row.names(modelResultSummary)[varOneInd], pValueOne, matrix(modelResultSummaryUnit[varOneInd , c(4, 6, 7)], ncol = 3), matrix(modelResultSummary[varOneInd, c(1, 2, 3, 4, 6, 7)], ncol = 6), stringsAsFactors = FALSE)
} else {
varOneOut <- data.frame(row.names(modelResultSummary)[varOneInd], modelResultSummaryUnit[varOneInd, c(4)], pValueOne, matrix(modelResultSummaryUnit[varOneInd + 1, c(4, 6, 7)], ncol = 3), matrix(modelResultSummary[varOneInd + 1, c(1, 2, 3, 4, 6, 7)], ncol = 6), stringsAsFactors = FALSE)
}
}
if (modelType == "ols") { #linear regression no odds ratio
colnames(varOneOut) <- c(
"InterestedVar", "P", "Effect (One Unit)", "Effect (Lower 95%)", "Effect (Upper 95%)",
"Value (25% Quantile)", "Value (75% Quantile)", "Value Diff (75%-25%)", "Effect (Diff: 75%-25%)", "Effect (Diff, Lower 95%)", "Effect (Diff, Upper 95%)"
)
} else if (modelType == "cph") { #hazard ratio
colnames(varOneOut) <- c(
"InterestedVar", "Effect (One Unit)", "P", "Hazard Ratio (One Unit)", "HR (Lower 95%)", "HR (Upper 95%)",
"Value (25% Quantile)", "Value (75% Quantile)", "Value Diff (75%-25%)", "Hazard Ratio (Diff: 75%-25%)", "HR (Diff, Lower 95%)", "HR (Diff, Upper 95%)"
)
} else {
colnames(varOneOut) <- c(
"InterestedVar", "Effect (One Unit)", "P", "Odds Ratio (One Unit)", "OR (Lower 95%)", "OR (Upper 95%)",
"Value (25% Quantile)", "Value (75% Quantile)", "Value Diff (75%-25%)", "Odds Ratio (Diff: 75%-25%)", "OR (Diff, Lower 95%)", "OR (Diff, Upper 95%)"
)
}
#recored event level as sometimes event is factor and need to know which level is event (1)
if (modelType == "lrm") {
outVarEvent=paste(paste0(rev(names(modelResult$freq)),"(",rev((modelResult$freq)),")"),collapse=" : ")
varOneOut <- data.frame(Event=outVarEvent,varOneOut, stringsAsFactors = FALSE, check.names = FALSE)
}
varOneOut <- data.frame(Formula = paste0(modelType, " (", as.character(as.expression(modelResult$sformula)), ")"),varOneOut, stringsAsFactors = FALSE, check.names = FALSE)
if (!is.null(extractStats)) {
varOneOut <- c(varOneOut, round(modelResult$stats[extractStats], 3))
}
modelResultOut <- rbind(modelResultOut, varOneOut)
}
return(modelResultOut)
}
# make report table for multipl logistic regression models
## outVars should be list if doing survival model
#' @export
#'
modelTable <- function(dataForModelAll, outVars, interestedVars, adjVars = NULL,
nonLinearVars = NULL, nonLinearFunName="rcs",nonLinearFunPar=3,
extractStats = NULL,modelType = "lrm", printModel = FALSE, printModelFigure = printModel,
returnKable = FALSE,returnModel = FALSE,uniqueSampleSize=5,
reportAnovaP=TRUE,adjto.cat='first') {
modelType <- match.arg(modelType, c("lrm", "cph", "ols"))
modelFun <- get(modelType)
modelResultAll <- NULL
modelAll <- list()
for (outVar in outVars) {
for (varOne in interestedVars) {
varForModelOne <- c(varOne, adjVars)
if (modelType == "cph") {
formulaForModel <- paste("Surv(", outVar[1], ", ", outVar[2], ")", "~", paste0(varForModelOne, collapse = " + "), " ")
} else {
formulaForModel <- paste(outVar, "~", paste0(varForModelOne, collapse = " + "), " ")
}
if (!is.null(nonLinearVars)) {
for (nonLinearVarOne in nonLinearVars) {
formulaForModel <- gsub(paste0(" ", nonLinearVarOne, " "), paste0(" ",nonLinearFunName,"(", nonLinearVarOne, ",",nonLinearFunPar,") "), formulaForModel)
}
}
formulaForModel <- as.formula(formulaForModel)
dataForModel <- dataForModelAll[, c(outVar, varForModelOne)]
for (temp in varForModelOne) { # change all numbers with only uniqueSampleSize values in dataForModel into factor
if (length(unique(na.omit(dataForModel[, temp]))) <= uniqueSampleSize) {
dataForModel[, temp] <- factor(dataForModel[, temp])
}
}
#browser()
ddist <<- datadist(dataForModel, n.unique = uniqueSampleSize,adjto.cat=adjto.cat)
options(datadist = "ddist")
modelResult <- modelFun(formulaForModel, data = dataForModel,x=TRUE,y=TRUE)
if (printModel) {
print(paste0("Model formula: ",as.character(as.expression(formulaForModel))))
print(modelResult)
}
if (printModelFigure) {
print(plot(Predict(modelResult),ylab=outVar))
}
if (returnModel) {
modelAll=c(modelAll,list(modelResult))
}
# extract result, may have many variables in varOne
modelResultOut=exportModelResult(modelResult,varOne,reportAnovaP = reportAnovaP)
modelResultAll=rbind(modelResultAll,modelResultOut)
}
}
row.names(modelResultAll) <- NULL
if (returnKable) {
temp <- apply(modelResultAll, 2, function(x) all(x == "")) # remove spaces
print(kable(modelResultAll[, which(!temp)],caption ="Regression Model Result Summary"))
}
if (returnModel) {
return(modelAll)
} else {
return(modelResultAll)
}
}
#' summary/predict model result based on variable's value
#' @export
#'
easierSummaryByValue=function(modelResult,varOneToExtract,varOneToExtractValues) {
if ("lrm" %in% class(modelResult)) {
modelType="lrm"
} else if ("ols" %in% class(modelResult)) {
modelType="ols"
} else {
modelType="cph"
}
summaryArgList <- list(quote(modelResult), c((varOneToExtractValues[1]), (varOneToExtractValues[2])), est.all = FALSE)
names(summaryArgList)[2] <- varOneToExtract
#print(summaryArgList)
#browser()
modelResultSummary <- round(do.call(summary, summaryArgList), 3)
varOneInd <- grep(varOneToExtract, row.names(modelResultSummary))
#browser()
if (modelType == "ols") { #one row, no odds ratio
varOneOut <- data.frame(row.names(modelResultSummary)[varOneInd], matrix(modelResultSummary[varOneInd, c(1, 2, 3, 4, 6, 7)], ncol = 6), stringsAsFactors = FALSE)
} else {
varOneOut <- data.frame(row.names(modelResultSummary)[varOneInd], matrix(modelResultSummary[varOneInd + 1, c(1, 2, 3, 4, 6, 7)], ncol = 6), stringsAsFactors = FALSE)
}
modelFormula=paste0(modelType,"(",as.character(as.expression(modelResult$sformula)),")")
varOneOut=c(modelFormula,varOneOut)
if (modelType == "ols") { #linear regression no odds ratio
names(varOneOut) <- c(
"Formula","InterestedVar", "Value 1", "Value 2", "Value Diff", "Effect (Diff)", "Effect (Diff, Lower 95%)", "Effect (Diff, Upper 95%)"
)
} else if (modelType == "cph") { #hazard ratio
names(varOneOut) <- c(
"Formula","InterestedVar", "Value 1", "Value 2", "Value Diff", "Hazard Ratio (Diff)", "HR (Diff, Lower 95%)", "HR (Diff, Upper 95%)"
)
} else {
names(varOneOut) <- c(
"Formula","InterestedVar", "Value 1", "Value 2", "Value Diff", "Odds Ratio (Diff)", "OR (Diff, Lower 95%)", "OR (Diff, Upper 95%)"
)
}
return(varOneOut)
}
|
#Teralytics 2018 Origin Destination
#set working directory and access code to read in SQL queries
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
source("..\\..\\..\\Common_functions\\Loading_in_packages.R")
source("..\\..\\..\\Common_functions\\readSQL.R")
getwd()
# file comparison code between a CSV source file and raw upload SQL Table
#Read in source data
source <- read.csv("R:/DPOE/Origin-Destination/Teralytics/Source/od_sandiegocounty_and_surroundings.csv",sep='|', stringsAsFactors = FALSE)
#Read in sQL query
channel <- odbcDriverConnect('driver={SQL Server}; server=sql2014a8; database=travel_data; trusted_connection=true')
sql_query <- 'SELECT * FROM [travel_data].[teralytics2018].[origin_destination]'
db <- sqlQuery(channel,sql_query,stringsAsFactors = FALSE)
odbcClose(channel)
#To see column names in source data
colnames(source)
colnames(db)
#Check data types
str(source)
str(db)
#Change data types to date
db$Month <- format(as.Date(db$Month, format = "%Y-%m-%d"), "%Y-%m")
#Order table
source <- source[order(source$StartId, source$EndId, source$Month, source$PartOfWeek, source$HourOfDay, source$TripPurpose, source$Count, source$InSanDiegoCounty),]
db <- db[order(db$StartId, db$EndId, db$Month, db$PartOfWeek, db$HourOfDay, db$TripPurpose, db$Count, db$InSanDiegoCounty),]
#Delete unique key assigned by R so that identical function will work
rownames(source) <- NULL
rownames(db) <- NULL
# compare files
all(source == db) #check cell values only
all.equal(source,db) #check cell values and data types and will return the conflicted cells
identical(source,db) #check cell values and data types
which(source!=db, arr.ind = TRUE)
# source[1,3]
# db[1,3]
|
/Origin-destination/Teralytics/QAQC/2018 Teralytics Origin Destination QAQC.R
|
no_license
|
SANDAG/DPOE
|
R
| false
| false
| 1,733
|
r
|
#Teralytics 2018 Origin Destination
#set working directory and access code to read in SQL queries
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
source("..\\..\\..\\Common_functions\\Loading_in_packages.R")
source("..\\..\\..\\Common_functions\\readSQL.R")
getwd()
# file comparison code between a CSV source file and raw upload SQL Table
#Read in source data
source <- read.csv("R:/DPOE/Origin-Destination/Teralytics/Source/od_sandiegocounty_and_surroundings.csv",sep='|', stringsAsFactors = FALSE)
#Read in sQL query
channel <- odbcDriverConnect('driver={SQL Server}; server=sql2014a8; database=travel_data; trusted_connection=true')
sql_query <- 'SELECT * FROM [travel_data].[teralytics2018].[origin_destination]'
db <- sqlQuery(channel,sql_query,stringsAsFactors = FALSE)
odbcClose(channel)
#To see column names in source data
colnames(source)
colnames(db)
#Check data types
str(source)
str(db)
#Change data types to date
db$Month <- format(as.Date(db$Month, format = "%Y-%m-%d"), "%Y-%m")
#Order table
source <- source[order(source$StartId, source$EndId, source$Month, source$PartOfWeek, source$HourOfDay, source$TripPurpose, source$Count, source$InSanDiegoCounty),]
db <- db[order(db$StartId, db$EndId, db$Month, db$PartOfWeek, db$HourOfDay, db$TripPurpose, db$Count, db$InSanDiegoCounty),]
#Delete unique key assigned by R so that identical function will work
rownames(source) <- NULL
rownames(db) <- NULL
# compare files
all(source == db) #check cell values only
all.equal(source,db) #check cell values and data types and will return the conflicted cells
identical(source,db) #check cell values and data types
which(source!=db, arr.ind = TRUE)
# source[1,3]
# db[1,3]
|
#' Add tooltips to a plot.
#'
#' @param vis Visualisation to add tooltips to.
#' @param html A function that takes a single argument as input. This argument
#' will be a list containing the data in the mark currently under the
#' mouse. It should return a string containing HTML or \code{NULL} to
#' hide tooltip for the current element.
#' @param on Should tooltips appear on hover, or on click?
#' @export
#' @examples
#' \donttest{
#' all_values <- function(x) {
#' if(is.null(x)) return(NULL)
#' paste0(names(x), ": ", format(x), collapse = "<br />")
#' }
#'
#' base <- mtcars %>% ggvis(x = ~wt, y = ~mpg) %>%
#' layer_points()
#' base %>% add_tooltip(all_values, "hover")
#' base %>% add_tooltip(all_values, "click")
#' }
add_tooltip <- function(vis, html, on = c("hover", "click")) {
on <- match.arg(on)
show_tooltip2 <- function(data, location, session, ...) {
if (is.null(data)) {
hide_tooltip(session)
return()
}
html <- html(data)
if (is.null(html)) {
hide_tooltip(session)
} else {
show_tooltip(session, location$x + 5, location$y + 5, html)
}
}
hide_tooltip2 <- function(session) {
hide_tooltip(session)
}
switch(on,
click = handle_click(vis, show_tooltip2),
hover = handle_hover(vis, show_tooltip2, hide_tooltip2)
)
}
#' Send a message to the client to show or hide a tooltip
#'
#' @param session A Shiny session object.
#' @param l Pixel location of left edge of tooltip (relative to page)
#' @param t Pixel location of top edge of tooltip (relative to page)
#' @param html HTML to display in the tooltip box.
#'
#' @export
show_tooltip <- function(session, l = 0, t = 0, html = "") {
ggvis_message(session, "show_tooltip",
list(pagex = l, pagey = t, html = html))
}
#' @rdname show_tooltip
#' @export
hide_tooltip <- function(session) {
ggvis_message(session, "hide_tooltip")
}
|
/R/interact_tooltip.R
|
no_license
|
jjallaire/ggvis
|
R
| false
| false
| 1,892
|
r
|
#' Add tooltips to a plot.
#'
#' @param vis Visualisation to add tooltips to.
#' @param html A function that takes a single argument as input. This argument
#' will be a list containing the data in the mark currently under the
#' mouse. It should return a string containing HTML or \code{NULL} to
#' hide tooltip for the current element.
#' @param on Should tooltips appear on hover, or on click?
#' @export
#' @examples
#' \donttest{
#' all_values <- function(x) {
#' if(is.null(x)) return(NULL)
#' paste0(names(x), ": ", format(x), collapse = "<br />")
#' }
#'
#' base <- mtcars %>% ggvis(x = ~wt, y = ~mpg) %>%
#' layer_points()
#' base %>% add_tooltip(all_values, "hover")
#' base %>% add_tooltip(all_values, "click")
#' }
add_tooltip <- function(vis, html, on = c("hover", "click")) {
on <- match.arg(on)
show_tooltip2 <- function(data, location, session, ...) {
if (is.null(data)) {
hide_tooltip(session)
return()
}
html <- html(data)
if (is.null(html)) {
hide_tooltip(session)
} else {
show_tooltip(session, location$x + 5, location$y + 5, html)
}
}
hide_tooltip2 <- function(session) {
hide_tooltip(session)
}
switch(on,
click = handle_click(vis, show_tooltip2),
hover = handle_hover(vis, show_tooltip2, hide_tooltip2)
)
}
#' Send a message to the client to show or hide a tooltip
#'
#' @param session A Shiny session object.
#' @param l Pixel location of left edge of tooltip (relative to page)
#' @param t Pixel location of top edge of tooltip (relative to page)
#' @param html HTML to display in the tooltip box.
#'
#' @export
show_tooltip <- function(session, l = 0, t = 0, html = "") {
ggvis_message(session, "show_tooltip",
list(pagex = l, pagey = t, html = html))
}
#' @rdname show_tooltip
#' @export
hide_tooltip <- function(session) {
ggvis_message(session, "hide_tooltip")
}
|
\name{azprocedure}
\alias{azprocedure}
\docType{data}
\title{azprocedure}
\description{
Data come from the 1991 Arizona cardiovascular patient files. A subset of the
fields was selected to model the differential length of stay for patients entering
the hospital to receive one of two standard cardiovascular procedures: CABG and PTCA.
CABG is the standard acronym for Coronary Artery Bypass Graft, where the flow of
blood in a diseased or blocked coronary artery or vein has been grafted to bypass
the diseased sections. PTCA, or Percutaneous Transluminal Coronary Angioplasty, is
a method of placing a balloon in a blocked coronary artery to open it to blood flow.
It is a much less severe method of treatment for those having coronary blockage, with
a corresponding reduction in risk.
}
\usage{data(azprocedure)}
\format{
A data frame with 3589 observations on the following 6 variables.
\describe{
\item{\code{los}}{length of hospital stay}
\item{\code{procedure}}{1=CABG;0=PTCA}
\item{\code{sex}}{1=Male; 0=female}
\item{\code{admit}}{1=Urgent/Emerg; 0=elective (type of admission)}
\item{\code{age75}}{1= Age>75; 0=Age<=75}
\item{\code{hospital}}{encrypted facility code (string)}
}
}
\details{
azprocedure is saved as a data frame.
Count models use los as response variable. 0 counts are structurally excluded
}
\source{
1991 Arizona Medpar data, cardiovascular patient files,
National Health Economics & Research Co.
}
\references{
Hilbe, Joseph M (2014), Modeling Count Data, Cambridge University Press
Hilbe, Joseph M (2007, 2011), Negative Binomial Regression, Cambridge University Press
Hilbe, Joseph M (2009), Logistic Regression Models, Chapman & Hall/CRC
}
\examples{
library(MASS)
library(msme)
data(azprocedure)
glmazp <- glm(los ~ procedure + sex + admit, family=poisson, data=azprocedure)
summary(glmazp)
exp(coef(glmazp))
nb2 <- nbinomial(los ~ procedure + sex + admit, data=azprocedure)
summary(nb2)
exp(coef(nb2))
glmaznb <- glm.nb(los ~ procedure + sex + admit, data=azprocedure)
summary(glmaznb)
exp(coef(glmaznb))
}
\keyword{datasets}
|
/man/azprocedure.Rd
|
no_license
|
cran/COUNT
|
R
| false
| false
| 2,164
|
rd
|
\name{azprocedure}
\alias{azprocedure}
\docType{data}
\title{azprocedure}
\description{
Data come from the 1991 Arizona cardiovascular patient files. A subset of the
fields was selected to model the differential length of stay for patients entering
the hospital to receive one of two standard cardiovascular procedures: CABG and PTCA.
CABG is the standard acronym for Coronary Artery Bypass Graft, where the flow of
blood in a diseased or blocked coronary artery or vein has been grafted to bypass
the diseased sections. PTCA, or Percutaneous Transluminal Coronary Angioplasty, is
a method of placing a balloon in a blocked coronary artery to open it to blood flow.
It is a much less severe method of treatment for those having coronary blockage, with
a corresponding reduction in risk.
}
\usage{data(azprocedure)}
\format{
A data frame with 3589 observations on the following 6 variables.
\describe{
\item{\code{los}}{length of hospital stay}
\item{\code{procedure}}{1=CABG;0=PTCA}
\item{\code{sex}}{1=Male; 0=female}
\item{\code{admit}}{1=Urgent/Emerg; 0=elective (type of admission)}
\item{\code{age75}}{1= Age>75; 0=Age<=75}
\item{\code{hospital}}{encrypted facility code (string)}
}
}
\details{
azprocedure is saved as a data frame.
Count models use los as response variable. 0 counts are structurally excluded
}
\source{
1991 Arizona Medpar data, cardiovascular patient files,
National Health Economics & Research Co.
}
\references{
Hilbe, Joseph M (2014), Modeling Count Data, Cambridge University Press
Hilbe, Joseph M (2007, 2011), Negative Binomial Regression, Cambridge University Press
Hilbe, Joseph M (2009), Logistic Regression Models, Chapman & Hall/CRC
}
\examples{
library(MASS)
library(msme)
data(azprocedure)
glmazp <- glm(los ~ procedure + sex + admit, family=poisson, data=azprocedure)
summary(glmazp)
exp(coef(glmazp))
nb2 <- nbinomial(los ~ procedure + sex + admit, data=azprocedure)
summary(nb2)
exp(coef(nb2))
glmaznb <- glm.nb(los ~ procedure + sex + admit, data=azprocedure)
summary(glmaznb)
exp(coef(glmaznb))
}
\keyword{datasets}
|
# Copyright 2019 Observational Health Data Sciences and Informatics
#
# This file is part of IUDClaimsStudy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Synthesize positive controls
#'
#' @details
#' This function will synthesize positve controls based on the negative controls. The simulated outcomes
#' will be added to the cohort table.
#'
#' @param connectionDetails An object of type \code{connectionDetails} as created using the
#' \code{\link[DatabaseConnector]{createConnectionDetails}} function in the
#' DatabaseConnector package.
#' @param cdmDatabaseSchema Schema name where your patient-level data in OMOP CDM format resides.
#' Note that for SQL Server, this should include both the database and
#' schema name, for example 'cdm_data.dbo'.
#' @param cohortDatabaseSchema Schema name where intermediate data can be stored. You will need to have
#' write priviliges in this schema. Note that for SQL Server, this should
#' include both the database and schema name, for example 'cdm_data.dbo'.
#' @param cohortTable The name of the table that will be created in the work database schema.
#' This table will hold the exposure and outcome cohorts used in this
#' study.
#' @param oracleTempSchema Should be used in Oracle to specify a schema where the user has write
#' priviliges for storing temporary tables.
#' @param outputFolder Name of local folder to place results; make sure to use forward slashes
#' (/)
#' @param maxCores How many parallel cores should be used? If more cores are made available
#' this can speed up the analyses.
#'
#' @export
synthesizePositiveControls <- function(connectionDetails,
cdmDatabaseSchema,
cohortDatabaseSchema,
cohortTable = "cohort",
oracleTempSchema,
outputFolder,
maxCores = 1) {
synthesisFolder <- file.path(outputFolder, "positiveControlSynthesis")
if (!file.exists(synthesisFolder))
dir.create(synthesisFolder)
synthesisSummaryFile <- file.path(outputFolder, "SynthesisSummary.csv")
if (!file.exists(synthesisSummaryFile)) {
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "IUDClaimsStudy")
negativeControls <- read.csv(pathToCsv)
exposureOutcomePairs <- data.frame(exposureId = negativeControls$targetId,
outcomeId = negativeControls$outcomeId)
exposureOutcomePairs <- unique(exposureOutcomePairs)
pathToJson <- system.file("settings", "positiveControlSynthArgs.json", package = "IUDClaimsStudy")
args <- ParallelLogger::loadSettingsFromJson(pathToJson)
args$control$threads <- min(c(10, maxCores))
result <- MethodEvaluation::injectSignals(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
oracleTempSchema = oracleTempSchema,
exposureDatabaseSchema = cohortDatabaseSchema,
exposureTable = cohortTable,
outcomeDatabaseSchema = cohortDatabaseSchema,
outcomeTable = cohortTable,
outputDatabaseSchema = cohortDatabaseSchema,
outputTable = cohortTable,
createOutputTable = FALSE,
exposureOutcomePairs = exposureOutcomePairs,
workFolder = synthesisFolder,
modelThreads = max(1, round(maxCores/8)),
generationThreads = min(6, maxCores),
# External args start here
outputIdOffset = args$outputIdOffset,
firstExposureOnly = args$firstExposureOnly,
firstOutcomeOnly = args$firstOutcomeOnly,
removePeopleWithPriorOutcomes = args$removePeopleWithPriorOutcomes,
modelType = args$modelType,
washoutPeriod = args$washoutPeriod,
riskWindowStart = args$riskWindowStart,
riskWindowEnd = args$riskWindowEnd,
addExposureDaysToEnd = args$addExposureDaysToEnd,
effectSizes = args$effectSizes,
precision = args$precision,
prior = args$prior,
control = args$control,
maxSubjectsForModel = args$maxSubjectsForModel,
minOutcomeCountForModel = args$minOutcomeCountForModel,
minOutcomeCountForInjection = args$minOutcomeCountForInjection,
covariateSettings = args$covariateSettings
# External args stop here
)
write.csv(result, synthesisSummaryFile, row.names = FALSE)
} else {
result <- read.csv(synthesisSummaryFile)
}
ParallelLogger::logTrace("Merging positive with negative controls ")
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "IUDClaimsStudy")
negativeControls <- read.csv(pathToCsv)
synthesisSummary <- read.csv(synthesisSummaryFile)
synthesisSummary$targetId <- synthesisSummary$exposureId
synthesisSummary <- merge(synthesisSummary, negativeControls)
synthesisSummary <- synthesisSummary[synthesisSummary$trueEffectSize != 0, ]
synthesisSummary$outcomeName <- paste0(synthesisSummary$OutcomeName, ", RR=", synthesisSummary$targetEffectSize)
synthesisSummary$oldOutcomeId <- synthesisSummary$outcomeId
synthesisSummary$outcomeId <- synthesisSummary$newOutcomeId
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "IUDClaimsStudy")
negativeControls <- read.csv(pathToCsv)
negativeControls$targetEffectSize <- 1
negativeControls$trueEffectSize <- 1
negativeControls$trueEffectSizeFirstExposure <- 1
negativeControls$oldOutcomeId <- negativeControls$outcomeId
allControls <- rbind(negativeControls, synthesisSummary[, names(negativeControls)])
write.csv(allControls, file.path(outputFolder, "AllControls.csv"), row.names = FALSE)
}
|
/additionalEstimationPackage/IUDClaimsEstimation/R/SynthesizePositiveControls.R
|
permissive
|
cukarthik/IUDEHREstimationStudy
|
R
| false
| false
| 7,783
|
r
|
# Copyright 2019 Observational Health Data Sciences and Informatics
#
# This file is part of IUDClaimsStudy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Synthesize positive controls
#'
#' @details
#' This function will synthesize positve controls based on the negative controls. The simulated outcomes
#' will be added to the cohort table.
#'
#' @param connectionDetails An object of type \code{connectionDetails} as created using the
#' \code{\link[DatabaseConnector]{createConnectionDetails}} function in the
#' DatabaseConnector package.
#' @param cdmDatabaseSchema Schema name where your patient-level data in OMOP CDM format resides.
#' Note that for SQL Server, this should include both the database and
#' schema name, for example 'cdm_data.dbo'.
#' @param cohortDatabaseSchema Schema name where intermediate data can be stored. You will need to have
#' write priviliges in this schema. Note that for SQL Server, this should
#' include both the database and schema name, for example 'cdm_data.dbo'.
#' @param cohortTable The name of the table that will be created in the work database schema.
#' This table will hold the exposure and outcome cohorts used in this
#' study.
#' @param oracleTempSchema Should be used in Oracle to specify a schema where the user has write
#' priviliges for storing temporary tables.
#' @param outputFolder Name of local folder to place results; make sure to use forward slashes
#' (/)
#' @param maxCores How many parallel cores should be used? If more cores are made available
#' this can speed up the analyses.
#'
#' @export
synthesizePositiveControls <- function(connectionDetails,
cdmDatabaseSchema,
cohortDatabaseSchema,
cohortTable = "cohort",
oracleTempSchema,
outputFolder,
maxCores = 1) {
synthesisFolder <- file.path(outputFolder, "positiveControlSynthesis")
if (!file.exists(synthesisFolder))
dir.create(synthesisFolder)
synthesisSummaryFile <- file.path(outputFolder, "SynthesisSummary.csv")
if (!file.exists(synthesisSummaryFile)) {
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "IUDClaimsStudy")
negativeControls <- read.csv(pathToCsv)
exposureOutcomePairs <- data.frame(exposureId = negativeControls$targetId,
outcomeId = negativeControls$outcomeId)
exposureOutcomePairs <- unique(exposureOutcomePairs)
pathToJson <- system.file("settings", "positiveControlSynthArgs.json", package = "IUDClaimsStudy")
args <- ParallelLogger::loadSettingsFromJson(pathToJson)
args$control$threads <- min(c(10, maxCores))
result <- MethodEvaluation::injectSignals(connectionDetails = connectionDetails,
cdmDatabaseSchema = cdmDatabaseSchema,
oracleTempSchema = oracleTempSchema,
exposureDatabaseSchema = cohortDatabaseSchema,
exposureTable = cohortTable,
outcomeDatabaseSchema = cohortDatabaseSchema,
outcomeTable = cohortTable,
outputDatabaseSchema = cohortDatabaseSchema,
outputTable = cohortTable,
createOutputTable = FALSE,
exposureOutcomePairs = exposureOutcomePairs,
workFolder = synthesisFolder,
modelThreads = max(1, round(maxCores/8)),
generationThreads = min(6, maxCores),
# External args start here
outputIdOffset = args$outputIdOffset,
firstExposureOnly = args$firstExposureOnly,
firstOutcomeOnly = args$firstOutcomeOnly,
removePeopleWithPriorOutcomes = args$removePeopleWithPriorOutcomes,
modelType = args$modelType,
washoutPeriod = args$washoutPeriod,
riskWindowStart = args$riskWindowStart,
riskWindowEnd = args$riskWindowEnd,
addExposureDaysToEnd = args$addExposureDaysToEnd,
effectSizes = args$effectSizes,
precision = args$precision,
prior = args$prior,
control = args$control,
maxSubjectsForModel = args$maxSubjectsForModel,
minOutcomeCountForModel = args$minOutcomeCountForModel,
minOutcomeCountForInjection = args$minOutcomeCountForInjection,
covariateSettings = args$covariateSettings
# External args stop here
)
write.csv(result, synthesisSummaryFile, row.names = FALSE)
} else {
result <- read.csv(synthesisSummaryFile)
}
ParallelLogger::logTrace("Merging positive with negative controls ")
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "IUDClaimsStudy")
negativeControls <- read.csv(pathToCsv)
synthesisSummary <- read.csv(synthesisSummaryFile)
synthesisSummary$targetId <- synthesisSummary$exposureId
synthesisSummary <- merge(synthesisSummary, negativeControls)
synthesisSummary <- synthesisSummary[synthesisSummary$trueEffectSize != 0, ]
synthesisSummary$outcomeName <- paste0(synthesisSummary$OutcomeName, ", RR=", synthesisSummary$targetEffectSize)
synthesisSummary$oldOutcomeId <- synthesisSummary$outcomeId
synthesisSummary$outcomeId <- synthesisSummary$newOutcomeId
pathToCsv <- system.file("settings", "NegativeControls.csv", package = "IUDClaimsStudy")
negativeControls <- read.csv(pathToCsv)
negativeControls$targetEffectSize <- 1
negativeControls$trueEffectSize <- 1
negativeControls$trueEffectSizeFirstExposure <- 1
negativeControls$oldOutcomeId <- negativeControls$outcomeId
allControls <- rbind(negativeControls, synthesisSummary[, names(negativeControls)])
write.csv(allControls, file.path(outputFolder, "AllControls.csv"), row.names = FALSE)
}
|
#' LCTMtools: A package for computing a number of Latent Class Trajectory Model tools for a given hlme() object or SAS model.
#'
#' The LCTMtools package provides two categories of important functions:
#' LCTMtools (to test a models adequacy) and LCTMcompare (to aid model selection).
#'
#' @section LCTMtools functions:
#' The LCTMtools functions arw a selection of model adequacy tests for Latent Class Trajectory Models (LCTMs) which include the APPA (average posterior probability of assignment), the OCC (odds of correct classification), entropy, Relative entropy.
#'
#' @docType package
#' @name LCTMtools
NULL
|
/R/LCTMtools.R
|
no_license
|
hlennon/LCTMtools
|
R
| false
| false
| 617
|
r
|
#' LCTMtools: A package for computing a number of Latent Class Trajectory Model tools for a given hlme() object or SAS model.
#'
#' The LCTMtools package provides two categories of important functions:
#' LCTMtools (to test a models adequacy) and LCTMcompare (to aid model selection).
#'
#' @section LCTMtools functions:
#' The LCTMtools functions arw a selection of model adequacy tests for Latent Class Trajectory Models (LCTMs) which include the APPA (average posterior probability of assignment), the OCC (odds of correct classification), entropy, Relative entropy.
#'
#' @docType package
#' @name LCTMtools
NULL
|
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 4.81092546418635e-304, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result)
|
/myTAI/inst/testfiles/cpp_bootMatrix/AFL_cpp_bootMatrix/cpp_bootMatrix_valgrind_files/1615765404-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 1,809
|
r
|
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 4.81092546418635e-304, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875218e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result)
|
library(dplyr)
library(tidyr)
pg <- src_postgres()
regex_results <- tbl(pg, sql("SELECT * FROM director_bio.regex_results"))
other_dirs <-
tbl(pg, sql("SELECT * FROM director_bio.other_directorships")) %>%
select(director_id, other_director_id, fy_end, other_start_date,
other_end_date, other_first_date, other_last_date,
other_directorships) %>%
filter(other_start_date < fy_end, other_first_date < other_end_date,
other_last_date > other_start_date)
tagging_url <- function(file_name) {
temp <- gsub("^edgar/data/", "http://hal.marder.io/highlight/",
file_name)
gsub("(\\d{10})-(\\d{2})-(\\d{6})\\.txt", "\\1\\2\\3", temp)
}
who_tagged <- tbl(pg, sql("
SELECT file_name, array_agg(DISTINCT username) AS tagged_by
FROM director_bio.raw_tagging_data
WHERE category='bio'
GROUP BY file_name"))
retaggable <-
regex_results %>%
semi_join(other_dirs) %>%
group_by(file_name, non_match) %>%
summarize(count = n()) %>%
inner_join(who_tagged) %>%
collect() %>%
mutate(non_match = tolower(substr(non_match,1,1))) %>%
spread(non_match, count, fill = 0) %>%
rename(non_match = t, match = f) %>%
mutate(total = non_match + match, prop = non_match/total)
to_retag <-
retaggable %>%
filter(total > 5, prop > 0.75) %>%
mutate(url = tagging_url(file_name))
library(readr)
write_csv(to_retag, path = "~/Google Drive/director_bio/to_retag.csv")
pg <- src_postgres()
merged_test <-
tbl(pg, sql("
SELECT *
FROM director_bio.test_data
INNER JOIN director_bio.regex_results
USING (director_id, other_director_id, fy_end)"))
retaggable %>%
inner_join(merged_test %>%
select(file_name, proposed_resolution) %>%
collect()) %>%
filter(proposed_resolution=="tag_bio") %>%
arrange(desc(prop))
|
/directorships/create_retag.R
|
no_license
|
iangow/director_bio
|
R
| false
| false
| 1,901
|
r
|
library(dplyr)
library(tidyr)
pg <- src_postgres()
regex_results <- tbl(pg, sql("SELECT * FROM director_bio.regex_results"))
other_dirs <-
tbl(pg, sql("SELECT * FROM director_bio.other_directorships")) %>%
select(director_id, other_director_id, fy_end, other_start_date,
other_end_date, other_first_date, other_last_date,
other_directorships) %>%
filter(other_start_date < fy_end, other_first_date < other_end_date,
other_last_date > other_start_date)
tagging_url <- function(file_name) {
temp <- gsub("^edgar/data/", "http://hal.marder.io/highlight/",
file_name)
gsub("(\\d{10})-(\\d{2})-(\\d{6})\\.txt", "\\1\\2\\3", temp)
}
who_tagged <- tbl(pg, sql("
SELECT file_name, array_agg(DISTINCT username) AS tagged_by
FROM director_bio.raw_tagging_data
WHERE category='bio'
GROUP BY file_name"))
retaggable <-
regex_results %>%
semi_join(other_dirs) %>%
group_by(file_name, non_match) %>%
summarize(count = n()) %>%
inner_join(who_tagged) %>%
collect() %>%
mutate(non_match = tolower(substr(non_match,1,1))) %>%
spread(non_match, count, fill = 0) %>%
rename(non_match = t, match = f) %>%
mutate(total = non_match + match, prop = non_match/total)
to_retag <-
retaggable %>%
filter(total > 5, prop > 0.75) %>%
mutate(url = tagging_url(file_name))
library(readr)
write_csv(to_retag, path = "~/Google Drive/director_bio/to_retag.csv")
pg <- src_postgres()
merged_test <-
tbl(pg, sql("
SELECT *
FROM director_bio.test_data
INNER JOIN director_bio.regex_results
USING (director_id, other_director_id, fy_end)"))
retaggable %>%
inner_join(merged_test %>%
select(file_name, proposed_resolution) %>%
collect()) %>%
filter(proposed_resolution=="tag_bio") %>%
arrange(desc(prop))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qPLEXanalyzer-package.R
\docType{data}
\name{human_anno}
\alias{human_anno}
\title{human_anno dataset}
\format{
An object of class \code{\link{data.frame}} consisting of uniprot
human protein annotation.
}
\description{
Uniprot Human protein annotation table.
}
\keyword{data}
\keyword{datasets}
|
/man/human_anno.Rd
|
no_license
|
crukci-bioinformatics/qPLEXanalyzer
|
R
| false
| true
| 374
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qPLEXanalyzer-package.R
\docType{data}
\name{human_anno}
\alias{human_anno}
\title{human_anno dataset}
\format{
An object of class \code{\link{data.frame}} consisting of uniprot
human protein annotation.
}
\description{
Uniprot Human protein annotation table.
}
\keyword{data}
\keyword{datasets}
|
library(rvest)
library(dplyr)
library(RCurl)
library(scales)
require(rgdal)
require(ggmap)
require(Cairo)
require(gpclib)
require(maptools)
require(reshape)
library(stringr)
library(ggplot2)
library(tidyr)
# burl <- "http://www.depdata.ct.gov/wildlife/sighting/bearsight.asp"
#bear_table <- burl %>% read_html() %>%
# html_nodes(xpath='/html/body/center/table/tbody/tr[2]/td/div/table/tbody/tr/td/div[3]/center/table') %>%
# html_table()
burl <- "https://docs.google.com/spreadsheets/d/1iFb5ndUvQqc9adJLsbqPSkZeoU7Fr3Qem7st0qX_6pY/pub?output=csv"
gurl <- getURL(burl)
bear_data <- read.csv(textConnection(gurl))
gpclibPermit()
gpclibPermitStatus()
towntracts <- readOGR(dsn="maps", layer="ctgeo")
towntracts_only <- towntracts
towntracts <- fortify(towntracts, region="NAME10")
colnames(bear_data) <- c("id", "sightings")
bears_total_map <- left_join(towntracts, bear_data)
dtm <- ggplot() +
geom_polygon(data = bears_total_map, aes(x=long, y=lat, group=group, fill=sightings), color = "black", size=0.2) +
coord_map() +
scale_fill_distiller(type="seq", trans="reverse", palette = "Blues", breaks=pretty_breaks(n=10)) +
theme_nothing(legend=TRUE) +
labs(title="Bear sightings by town in Connecticut | 3/15 - 3/16", fill="")
dtm
library(ctnamecleaner)
bear_data_pop <- ctpopulator(id, bear_data)
bear_data_pop$percapita <- round((bear_data_pop$sightings/bear_data_pop$pop2013)*1000, 2)
bear_data_pop$id <- str_to_title(bear_data_pop$id)
bears_percapita_map <- left_join(towntracts, bear_data_pop)
#bears_percapita_map <- merge(towntracts, bear_data_pop, by="id", all.x=TRUE)
dtm2 <- ggplot() +
geom_polygon(data = bears_percapita_map, aes(x=long, y=lat, group=group, fill=percapita), color = "black", size=0.2) +
coord_map() +
scale_fill_distiller(type="seq", trans="reverse", palette = "Blues", breaks=pretty_breaks(n=10)) +
theme_nothing(legend=TRUE) +
labs(title="Bear sightings per 1,0000 residents in CT | 3/15 - 3/16", fill="")
dtm2
## Bear historical
bh <- read.csv("data/bear_history.csv")
bh$Year <- factor(bh$Year)
levels(bh$Month)
bh$Month <- factor(bh$Month, levels=c("Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"), ordered=TRUE)
ggplot(data=bh, aes(x=Month, y=Sightings, colour=Year, group=Year)) +
geom_line()
head(bear_data_pop)
bear_map <- bear_data_pop[c("id", "sightings", "percapita")]
colnames(bear_map) <- c("town", "sightings", "rate")
# These functions are specifically for creating dataviz for TrendCT.org
# It won't work unless you have our specific package
trendmap(bear_map, headline="Bear sightings in Connecticut", subhead="Total and per 1,000 residents",
src="Department of Energy & Environmental Protection", byline="TrendCT.org", url_append="date", shape="towns", color="blues")
bh_t <- spread(bh, Year, Sightings)
trendchart(bh_t, headline = "Bear sightings over time", subhead = "", src = "Department of Energy & Environmental Protection",
byline = "TrendCT.org", type = "spline", xTitle = "", yTitle = "",
xSuffix = "", ySuffix = "", xPrefix = "", yPrefix = "", option = "")
|
/bears.R
|
no_license
|
trendct-data/bear-sightings
|
R
| false
| false
| 3,152
|
r
|
library(rvest)
library(dplyr)
library(RCurl)
library(scales)
require(rgdal)
require(ggmap)
require(Cairo)
require(gpclib)
require(maptools)
require(reshape)
library(stringr)
library(ggplot2)
library(tidyr)
# burl <- "http://www.depdata.ct.gov/wildlife/sighting/bearsight.asp"
#bear_table <- burl %>% read_html() %>%
# html_nodes(xpath='/html/body/center/table/tbody/tr[2]/td/div/table/tbody/tr/td/div[3]/center/table') %>%
# html_table()
burl <- "https://docs.google.com/spreadsheets/d/1iFb5ndUvQqc9adJLsbqPSkZeoU7Fr3Qem7st0qX_6pY/pub?output=csv"
gurl <- getURL(burl)
bear_data <- read.csv(textConnection(gurl))
gpclibPermit()
gpclibPermitStatus()
towntracts <- readOGR(dsn="maps", layer="ctgeo")
towntracts_only <- towntracts
towntracts <- fortify(towntracts, region="NAME10")
colnames(bear_data) <- c("id", "sightings")
bears_total_map <- left_join(towntracts, bear_data)
dtm <- ggplot() +
geom_polygon(data = bears_total_map, aes(x=long, y=lat, group=group, fill=sightings), color = "black", size=0.2) +
coord_map() +
scale_fill_distiller(type="seq", trans="reverse", palette = "Blues", breaks=pretty_breaks(n=10)) +
theme_nothing(legend=TRUE) +
labs(title="Bear sightings by town in Connecticut | 3/15 - 3/16", fill="")
dtm
library(ctnamecleaner)
bear_data_pop <- ctpopulator(id, bear_data)
bear_data_pop$percapita <- round((bear_data_pop$sightings/bear_data_pop$pop2013)*1000, 2)
bear_data_pop$id <- str_to_title(bear_data_pop$id)
bears_percapita_map <- left_join(towntracts, bear_data_pop)
#bears_percapita_map <- merge(towntracts, bear_data_pop, by="id", all.x=TRUE)
dtm2 <- ggplot() +
geom_polygon(data = bears_percapita_map, aes(x=long, y=lat, group=group, fill=percapita), color = "black", size=0.2) +
coord_map() +
scale_fill_distiller(type="seq", trans="reverse", palette = "Blues", breaks=pretty_breaks(n=10)) +
theme_nothing(legend=TRUE) +
labs(title="Bear sightings per 1,0000 residents in CT | 3/15 - 3/16", fill="")
dtm2
## Bear historical
bh <- read.csv("data/bear_history.csv")
bh$Year <- factor(bh$Year)
levels(bh$Month)
bh$Month <- factor(bh$Month, levels=c("Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"), ordered=TRUE)
ggplot(data=bh, aes(x=Month, y=Sightings, colour=Year, group=Year)) +
geom_line()
head(bear_data_pop)
bear_map <- bear_data_pop[c("id", "sightings", "percapita")]
colnames(bear_map) <- c("town", "sightings", "rate")
# These functions are specifically for creating dataviz for TrendCT.org
# It won't work unless you have our specific package
trendmap(bear_map, headline="Bear sightings in Connecticut", subhead="Total and per 1,000 residents",
src="Department of Energy & Environmental Protection", byline="TrendCT.org", url_append="date", shape="towns", color="blues")
bh_t <- spread(bh, Year, Sightings)
trendchart(bh_t, headline = "Bear sightings over time", subhead = "", src = "Department of Energy & Environmental Protection",
byline = "TrendCT.org", type = "spline", xTitle = "", yTitle = "",
xSuffix = "", ySuffix = "", xPrefix = "", yPrefix = "", option = "")
|
library(ggplot2)
library(gridExtra)
library(FSA)
library(multcomp)
library(car)
library(fitdistrplus)
library(dplyr)
countries<-read.csv("Intro to Inferential Statistics//Countries.csv")
credit<-read.csv("Intro to Inferential Statistics//Credit.csv")
str(credit)
##################################################
################ Hypothesis testing ##############
##################################################
# mu=49 ?
summary(credit$age)
ggplot(credit)+geom_histogram(aes(age), bins=20)
t.test(x=credit$age, mu=49)
t.test(x=credit$age, mu=49, alternative = "greater")
t.test(x=credit$age, mu=49, alternative = "less", conf.level = 0.99)
####################################
########### Paired test ############
####################################
str(countries)
Summarize(data = countries, Unemployment ~ Region, digits = 2)
ggplot(countries)+geom_boxplot(aes(y=Unemployment, x=Region))
ggplot(countries)+geom_histogram(aes(Unemployment))+facet_wrap(~Region)
t.test(data = countries, Unemployment ~ Region) #Welch Test
t.test(data = countries, Unemployment ~ Region, var.equal=T) #Traditional T-test
Summarize(data = countries, Business.Freedom ~ Region, digits = 2)
ggplot(countries, aes(x = Region, y = Business.Freedom)) + geom_boxplot()
t.test(data = countries, Business.Freedom ~ Region, alternative = "less")
###################################
########### ANOVA #################
###################################
Summarize(income ~ agecat, data = credit, digits = 0)
ggplot(credit, aes(x = agecat, y = income)) + geom_boxplot()
ggplot(credit, aes(x = agecat))+geom_bar()
leveneTest(income ~ agecat, data = credit) #checking if variances are the same
anova<-aov(income ~ agecat, data = credit) #by default it takes equal variances
summary(anova)
oneway.test(income ~ agecat, data = credit, var.equal=FALSE) #takes into account different variance
#We see that they are different, but whch one of them?
TukeyHSD(anova)
pairwise.t.test(credit$income, credit$agecat)
filtered<-filter(credit, agecat=="25-34" | agecat=="18-24")
t.test(data = filtered, income ~ agecat)
###############################
###### Non-parametric tests ###
###############################
America <- countries[countries$Region == "America", ]
summary(America$Public.Debt.Perc.of.GDP)
ggplot(America, aes(x = Public.Debt.Perc.of.GDP)) +
geom_histogram(bins = 9)+
geom_vline(xintercept = 45, col = "red")
wilcox.test(America$Public.Debt.Perc.of.GDP, mu = 45,
alternative = "two.sided")
|
/Intro to Inferential Statistics/Inf_Stats_1.R
|
no_license
|
HermineGrigoryan/DS-Summer-School
|
R
| false
| false
| 2,512
|
r
|
library(ggplot2)
library(gridExtra)
library(FSA)
library(multcomp)
library(car)
library(fitdistrplus)
library(dplyr)
countries<-read.csv("Intro to Inferential Statistics//Countries.csv")
credit<-read.csv("Intro to Inferential Statistics//Credit.csv")
str(credit)
##################################################
################ Hypothesis testing ##############
##################################################
# mu=49 ?
summary(credit$age)
ggplot(credit)+geom_histogram(aes(age), bins=20)
t.test(x=credit$age, mu=49)
t.test(x=credit$age, mu=49, alternative = "greater")
t.test(x=credit$age, mu=49, alternative = "less", conf.level = 0.99)
####################################
########### Paired test ############
####################################
str(countries)
Summarize(data = countries, Unemployment ~ Region, digits = 2)
ggplot(countries)+geom_boxplot(aes(y=Unemployment, x=Region))
ggplot(countries)+geom_histogram(aes(Unemployment))+facet_wrap(~Region)
t.test(data = countries, Unemployment ~ Region) #Welch Test
t.test(data = countries, Unemployment ~ Region, var.equal=T) #Traditional T-test
Summarize(data = countries, Business.Freedom ~ Region, digits = 2)
ggplot(countries, aes(x = Region, y = Business.Freedom)) + geom_boxplot()
t.test(data = countries, Business.Freedom ~ Region, alternative = "less")
###################################
########### ANOVA #################
###################################
Summarize(income ~ agecat, data = credit, digits = 0)
ggplot(credit, aes(x = agecat, y = income)) + geom_boxplot()
ggplot(credit, aes(x = agecat))+geom_bar()
leveneTest(income ~ agecat, data = credit) #checking if variances are the same
anova<-aov(income ~ agecat, data = credit) #by default it takes equal variances
summary(anova)
oneway.test(income ~ agecat, data = credit, var.equal=FALSE) #takes into account different variance
#We see that they are different, but whch one of them?
TukeyHSD(anova)
pairwise.t.test(credit$income, credit$agecat)
filtered<-filter(credit, agecat=="25-34" | agecat=="18-24")
t.test(data = filtered, income ~ agecat)
###############################
###### Non-parametric tests ###
###############################
America <- countries[countries$Region == "America", ]
summary(America$Public.Debt.Perc.of.GDP)
ggplot(America, aes(x = Public.Debt.Perc.of.GDP)) +
geom_histogram(bins = 9)+
geom_vline(xintercept = 45, col = "red")
wilcox.test(America$Public.Debt.Perc.of.GDP, mu = 45,
alternative = "two.sided")
|
# Exercise 1: working with data frames (review)
# Install devtools package: allows installations from GitHub
install.packages("devtools")
# Install "fueleconomy" dataset from GitHub
devtools::install_github("hadley/fueleconomy")
# Use the `libary()` function to load the "fueleconomy" package
library(fueleconomy)
# You should now have access to the `vehicles` data frame
# You can use `View()` to inspect it
View(vehicles)
# Select the different manufacturers (makes) of the cars in this data set.
# Save this vector in a variable
manufacturers <- vehicles$make
# Use the `unique()` function to determine how many different car manufacturers
# are represented by the data set
length(unique(manufacturers))
# Filter the data set for vehicles manufactured in 1997
vehicles_1997 <- vehicles[vehicles$year == "1997", ]
# Arrange the 1997 cars by highway (`hwy`) gas milage
# Hint: use the `order()` function to get a vector of indices in order by value
# See also:
# https://www.r-bloggers.com/r-sorting-a-data-frame-by-the-contents-of-a-column/
# Mutate the 1997 cars data frame to add a column `average` that has the average
# gas milage (between city and highway mpg) for each car
vehicles_1997$average <- (vehicles_1997$hwy + vehicles_1997$cty)/2
# Filter the whole vehicles data set for 2-Wheel Drive vehicles that get more
# than 20 miles/gallon in the city.
# Save this new data frame in a variable.
vehicles_2wd <- vehicles[vehicles$drive == "2-Wheel Drive", ]
efficient_2wd <- vehicles_2wd[vehicles_2wd$cty > 20, ]
# Of the above vehicles, what is the vehicle ID of the vehicle with the worst
# hwy mpg?
# Hint: filter for the worst vehicle, then select its ID.
vehicles_2wd[vehicles_2wd$hwy == min(vehicles_2wd$hwy), "id"]
# Write a function that takes a `year_choice` and a `make_choice` as parameters,
# and returns the vehicle model that gets the most hwy miles/gallon of vehicles
# of that make in that year.
# You'll need to filter more (and do some selecting)!
most_miles <- function(year_choice, make_choice) {
selected_vehicles <- vehicles[vehicles$make == make_choice & vehicles$year == year_choice, ]
return(selected_vehicles[selected_vehicles$hwy == max(selected_vehicles$hwy), "model"])
}
# What was the most efficient Honda model of 1995?
|
/chapter-11-exercises/exercise-1/exercise.R
|
permissive
|
ronron0428/book-exercises
|
R
| false
| false
| 2,283
|
r
|
# Exercise 1: working with data frames (review)
# Install devtools package: allows installations from GitHub
install.packages("devtools")
# Install "fueleconomy" dataset from GitHub
devtools::install_github("hadley/fueleconomy")
# Use the `libary()` function to load the "fueleconomy" package
library(fueleconomy)
# You should now have access to the `vehicles` data frame
# You can use `View()` to inspect it
View(vehicles)
# Select the different manufacturers (makes) of the cars in this data set.
# Save this vector in a variable
manufacturers <- vehicles$make
# Use the `unique()` function to determine how many different car manufacturers
# are represented by the data set
length(unique(manufacturers))
# Filter the data set for vehicles manufactured in 1997
vehicles_1997 <- vehicles[vehicles$year == "1997", ]
# Arrange the 1997 cars by highway (`hwy`) gas milage
# Hint: use the `order()` function to get a vector of indices in order by value
# See also:
# https://www.r-bloggers.com/r-sorting-a-data-frame-by-the-contents-of-a-column/
# Mutate the 1997 cars data frame to add a column `average` that has the average
# gas milage (between city and highway mpg) for each car
vehicles_1997$average <- (vehicles_1997$hwy + vehicles_1997$cty)/2
# Filter the whole vehicles data set for 2-Wheel Drive vehicles that get more
# than 20 miles/gallon in the city.
# Save this new data frame in a variable.
vehicles_2wd <- vehicles[vehicles$drive == "2-Wheel Drive", ]
efficient_2wd <- vehicles_2wd[vehicles_2wd$cty > 20, ]
# Of the above vehicles, what is the vehicle ID of the vehicle with the worst
# hwy mpg?
# Hint: filter for the worst vehicle, then select its ID.
vehicles_2wd[vehicles_2wd$hwy == min(vehicles_2wd$hwy), "id"]
# Write a function that takes a `year_choice` and a `make_choice` as parameters,
# and returns the vehicle model that gets the most hwy miles/gallon of vehicles
# of that make in that year.
# You'll need to filter more (and do some selecting)!
most_miles <- function(year_choice, make_choice) {
selected_vehicles <- vehicles[vehicles$make == make_choice & vehicles$year == year_choice, ]
return(selected_vehicles[selected_vehicles$hwy == max(selected_vehicles$hwy), "model"])
}
# What was the most efficient Honda model of 1995?
|
###############################################################################
# Description: Add comment
#
# Author: Linh Tran <tranlm@berkeley.edu>
# Date: Aug 27, 2015
###############################################################################
#' @export
SL.svm.LT = function (Y, X, newX, family, type.reg = "eps-regression", type.class = "C-classification", nu = 0.5, gamma = 0.1, ...) {
if (family$family == "binomial" & !all(Y %in% c(0,1))) {
fit.svm = try(svm(y = Y, x = X, nu = nu, type = type.reg, fitted = FALSE, gamma=gamma), silent=TRUE)
if(inherits(fit.svm, "try-error")) {
pred = rep(mean(Y), nrow(newX))
fit = list(object="Algorithm failed")
} else {
pred = bound(predict(fit.svm, newdata = newX), c(0,1))
fit = list(object = fit.svm)
}
}
else if (family$family == "binomial") {
newY = as.factor(Y)
fit.svm = try(svm(y = newY, x = X, nu = nu, type = type.class, fitted = FALSE, gamma=gamma, probability = TRUE), silent=TRUE)
if(inherits(fit.svm, "try-error")) {
pred = rep(mean(Y), nrow(newX))
fit = list(object="Algorithm failed")
} else {
pred = try(attr(predict(fit.svm, newdata = newX, probability = TRUE), "prob")[, "1"])
if(inherits(pred, "try-error")) {
pred = rep(mean(Y), nrow(newX))
}
fit = list(object = fit.svm)
}
}
out = list(pred = pred, fit = fit)
class(out$fit) = c("SL.svm")
return(out)
}
#' @export
SL.polymars.LT = function(Y, X, newX, family, obsWeights, cv=2, seed=1000, ...){
if (family$family == "binomial" & !all(Y %in% c(0,1))) {
fit.mars = try(polymars(Y, X, weights = obsWeights), silent=TRUE)
if(inherits(fit.mars, "try-error")) {
pred = rep(mean(Y), nrow(newX))
fit = list(object="Algorithm failed")
} else {
pred = bound(predict(fit.mars, x = newX), c(0,1))
fit = list(object = fit.mars)
}
}
else if (family$family == "binomial") {
newY = Y
fit.mars = try(polyclass(newY, X, cv = cv, weight = obsWeights, seed=seed), silent=TRUE)
if(inherits(fit.mars, "try-error")) {
pred = rep(mean(Y), nrow(newX))
fit = list(object="Algorithm failed")
} else {
pred = ppolyclass(cov = newX, fit = fit.mars)[, 2]
fit = list(fit = fit.mars)
}
}
out = list(pred = pred, fit = fit)
class(out$fit) = c("SL.polymars")
return(out)
}
#' @export
SL.nnet.LT = function (Y, X, newX, family, obsWeights, size = 2, maxit = 1000, ...) {
if (family$family == "binomial" & !all(Y %in% c(0,1))) {
fit.nnet = try(nnet(x = X, y = Y, size = size, trace = FALSE, maxit = maxit, linout = TRUE, weights = obsWeights), silent=TRUE)
if(inherits(fit.nnet, "try-error")) {
pred = rep(mean(Y), nrow(newX))
fit = list(object="Algorithm failed")
} else {
pred = bound(predict(fit.nnet, newdata = newX, type = "raw"), c(0,1))
fit = list(object = fit.nnet)
}
}
else if (family$family == "binomial") {
newY = Y
fit.nnet = try(nnet(x = X, y = newY, size = size, trace = FALSE, maxit = maxit, linout = FALSE, weights = obsWeights), silent=TRUE)
if(inherits(fit.nnet, "try-error")) {
pred = rep(mean(Y), nrow(newX))
fit = list(object="Algorithm failed")
} else {
pred = predict(fit.nnet, newdata = newX, type = "raw")
fit = list(object = fit.nnet)
}
}
out = list(pred = pred, fit = fit)
class(out$fit) = c("SL.nnet")
return(out)
}
#' @export
SL.lasso.LT = function(Y, X, newX, family, obsWeights, id, alpha = 1, nfolds = 4, nlambda = 100, useMin = TRUE, ...) {
# X must be a matrix, should we use model.matrix or as.matrix
if(!is.matrix(X)) {
X = model.matrix(~ -1 + ., X)
newX = model.matrix(~ -1 + ., newX)
}
# now use CV to find lambda
Y.matrix = cbind(1-Y,Y)
fitCV = try(cv.glmnet(x = X, y = Y.matrix, weights = obsWeights, lambda = NULL, type.measure = 'deviance', nfolds = nfolds, family = family$family, alpha = alpha, nlambda = nlambda), silent=TRUE)
if(inherits(fitCV, "try-error")) {
pred = rep(mean(Y), nrow(newX))
fit = list(object="Algorithm failed")
} else {
# two options for lambda, fitCV$lambda.min and fitCV$lambda.1se
pred = predict(fitCV$glmnet.fit, newx = newX, s = ifelse(useMin, fitCV$lambda.min, fitCV$lambda.1se), type = 'response')
fit = list(object = fitCV, useMin = useMin)
}
class(fit) = 'SL.glmnet'
out = list(pred = pred, fit = fit)
return(out)
}
#' @export
SL.ridge.LT = function (Y, X, newX, family, obsWeights, id, alpha = 0, nfolds = 4, nlambda = 100, useMin = TRUE, ...) {
# X must be a matrix, should we use model.matrix or as.matrix
if(!is.matrix(X)) {
X = model.matrix(~ -1 + ., X)
newX = model.matrix(~ -1 + ., newX)
}
# now use CV to find lambda
Y.matrix = cbind(1-Y,Y)
fitCV <- try(cv.glmnet(x = X, y = Y.matrix, weights = obsWeights, lambda = NULL, type.measure = "deviance", nfolds = nfolds, family = family$family, alpha = alpha, nlambda = nlambda), silent=TRUE)
if(inherits(fitCV, "try-error")) {
pred = rep(mean(Y), nrow(newX))
fit = list(object="Algorithm failed")
} else {
# two options for lambda, fitCV$lambda.min and fitCV$lambda.1se
pred <- predict(fitCV$glmnet.fit, newx = newX, s = ifelse(useMin, fitCV$lambda.min, fitCV$lambda.1se), type = "response")
fit <- list(object = fitCV, useMin = useMin)
}
class(fit) <- "SL.glmnet"
out <- list(pred = pred, fit = fit)
return(out)
}
#' @export
SL.glmnet.LT = function (Y, X, newX, family, obsWeights, id, alpha = 0.5, nfolds = 4, nlambda = 100, useMin = TRUE, ...) {
# X must be a matrix, should we use model.matrix or as.matrix
if(!is.matrix(X)) {
X = model.matrix(~ -1 + ., X)
newX = model.matrix(~ -1 + ., newX)
}
# now use CV to find lambda
Y.matrix = cbind(1-Y,Y)
fitCV <- try(cv.glmnet(x = X, y = Y.matrix, weights = obsWeights, lambda = NULL, type.measure = "deviance", nfolds = nfolds, family = family$family, alpha = alpha, nlambda = nlambda), silent=TRUE)
if(inherits(fitCV, "try-error")) {
pred = rep(mean(Y), nrow(newX))
fit = list(object="Algorithm failed")
} else {
# two options for lambda, fitCV$lambda.min and fitCV$lambda.1se
pred <- predict(fitCV$glmnet.fit, newx = newX, s = ifelse(useMin, fitCV$lambda.min, fitCV$lambda.1se), type = "response")
fit <- list(object = fitCV, useMin = useMin)
}
class(fit) <- "SL.glmnet"
out <- list(pred = pred, fit = fit)
return(out)
}
|
/R/SuperLearner.R
|
no_license
|
guhjy/lrecCompare
|
R
| false
| false
| 6,229
|
r
|
###############################################################################
# Description: Add comment
#
# Author: Linh Tran <tranlm@berkeley.edu>
# Date: Aug 27, 2015
###############################################################################
#' @export
SL.svm.LT = function (Y, X, newX, family, type.reg = "eps-regression", type.class = "C-classification", nu = 0.5, gamma = 0.1, ...) {
if (family$family == "binomial" & !all(Y %in% c(0,1))) {
fit.svm = try(svm(y = Y, x = X, nu = nu, type = type.reg, fitted = FALSE, gamma=gamma), silent=TRUE)
if(inherits(fit.svm, "try-error")) {
pred = rep(mean(Y), nrow(newX))
fit = list(object="Algorithm failed")
} else {
pred = bound(predict(fit.svm, newdata = newX), c(0,1))
fit = list(object = fit.svm)
}
}
else if (family$family == "binomial") {
newY = as.factor(Y)
fit.svm = try(svm(y = newY, x = X, nu = nu, type = type.class, fitted = FALSE, gamma=gamma, probability = TRUE), silent=TRUE)
if(inherits(fit.svm, "try-error")) {
pred = rep(mean(Y), nrow(newX))
fit = list(object="Algorithm failed")
} else {
pred = try(attr(predict(fit.svm, newdata = newX, probability = TRUE), "prob")[, "1"])
if(inherits(pred, "try-error")) {
pred = rep(mean(Y), nrow(newX))
}
fit = list(object = fit.svm)
}
}
out = list(pred = pred, fit = fit)
class(out$fit) = c("SL.svm")
return(out)
}
#' @export
SL.polymars.LT = function(Y, X, newX, family, obsWeights, cv=2, seed=1000, ...){
if (family$family == "binomial" & !all(Y %in% c(0,1))) {
fit.mars = try(polymars(Y, X, weights = obsWeights), silent=TRUE)
if(inherits(fit.mars, "try-error")) {
pred = rep(mean(Y), nrow(newX))
fit = list(object="Algorithm failed")
} else {
pred = bound(predict(fit.mars, x = newX), c(0,1))
fit = list(object = fit.mars)
}
}
else if (family$family == "binomial") {
newY = Y
fit.mars = try(polyclass(newY, X, cv = cv, weight = obsWeights, seed=seed), silent=TRUE)
if(inherits(fit.mars, "try-error")) {
pred = rep(mean(Y), nrow(newX))
fit = list(object="Algorithm failed")
} else {
pred = ppolyclass(cov = newX, fit = fit.mars)[, 2]
fit = list(fit = fit.mars)
}
}
out = list(pred = pred, fit = fit)
class(out$fit) = c("SL.polymars")
return(out)
}
#' @export
SL.nnet.LT = function (Y, X, newX, family, obsWeights, size = 2, maxit = 1000, ...) {
if (family$family == "binomial" & !all(Y %in% c(0,1))) {
fit.nnet = try(nnet(x = X, y = Y, size = size, trace = FALSE, maxit = maxit, linout = TRUE, weights = obsWeights), silent=TRUE)
if(inherits(fit.nnet, "try-error")) {
pred = rep(mean(Y), nrow(newX))
fit = list(object="Algorithm failed")
} else {
pred = bound(predict(fit.nnet, newdata = newX, type = "raw"), c(0,1))
fit = list(object = fit.nnet)
}
}
else if (family$family == "binomial") {
newY = Y
fit.nnet = try(nnet(x = X, y = newY, size = size, trace = FALSE, maxit = maxit, linout = FALSE, weights = obsWeights), silent=TRUE)
if(inherits(fit.nnet, "try-error")) {
pred = rep(mean(Y), nrow(newX))
fit = list(object="Algorithm failed")
} else {
pred = predict(fit.nnet, newdata = newX, type = "raw")
fit = list(object = fit.nnet)
}
}
out = list(pred = pred, fit = fit)
class(out$fit) = c("SL.nnet")
return(out)
}
#' @export
SL.lasso.LT = function(Y, X, newX, family, obsWeights, id, alpha = 1, nfolds = 4, nlambda = 100, useMin = TRUE, ...) {
# X must be a matrix, should we use model.matrix or as.matrix
if(!is.matrix(X)) {
X = model.matrix(~ -1 + ., X)
newX = model.matrix(~ -1 + ., newX)
}
# now use CV to find lambda
Y.matrix = cbind(1-Y,Y)
fitCV = try(cv.glmnet(x = X, y = Y.matrix, weights = obsWeights, lambda = NULL, type.measure = 'deviance', nfolds = nfolds, family = family$family, alpha = alpha, nlambda = nlambda), silent=TRUE)
if(inherits(fitCV, "try-error")) {
pred = rep(mean(Y), nrow(newX))
fit = list(object="Algorithm failed")
} else {
# two options for lambda, fitCV$lambda.min and fitCV$lambda.1se
pred = predict(fitCV$glmnet.fit, newx = newX, s = ifelse(useMin, fitCV$lambda.min, fitCV$lambda.1se), type = 'response')
fit = list(object = fitCV, useMin = useMin)
}
class(fit) = 'SL.glmnet'
out = list(pred = pred, fit = fit)
return(out)
}
#' @export
SL.ridge.LT = function (Y, X, newX, family, obsWeights, id, alpha = 0, nfolds = 4, nlambda = 100, useMin = TRUE, ...) {
# X must be a matrix, should we use model.matrix or as.matrix
if(!is.matrix(X)) {
X = model.matrix(~ -1 + ., X)
newX = model.matrix(~ -1 + ., newX)
}
# now use CV to find lambda
Y.matrix = cbind(1-Y,Y)
fitCV <- try(cv.glmnet(x = X, y = Y.matrix, weights = obsWeights, lambda = NULL, type.measure = "deviance", nfolds = nfolds, family = family$family, alpha = alpha, nlambda = nlambda), silent=TRUE)
if(inherits(fitCV, "try-error")) {
pred = rep(mean(Y), nrow(newX))
fit = list(object="Algorithm failed")
} else {
# two options for lambda, fitCV$lambda.min and fitCV$lambda.1se
pred <- predict(fitCV$glmnet.fit, newx = newX, s = ifelse(useMin, fitCV$lambda.min, fitCV$lambda.1se), type = "response")
fit <- list(object = fitCV, useMin = useMin)
}
class(fit) <- "SL.glmnet"
out <- list(pred = pred, fit = fit)
return(out)
}
#' @export
SL.glmnet.LT = function (Y, X, newX, family, obsWeights, id, alpha = 0.5, nfolds = 4, nlambda = 100, useMin = TRUE, ...) {
# X must be a matrix, should we use model.matrix or as.matrix
if(!is.matrix(X)) {
X = model.matrix(~ -1 + ., X)
newX = model.matrix(~ -1 + ., newX)
}
# now use CV to find lambda
Y.matrix = cbind(1-Y,Y)
fitCV <- try(cv.glmnet(x = X, y = Y.matrix, weights = obsWeights, lambda = NULL, type.measure = "deviance", nfolds = nfolds, family = family$family, alpha = alpha, nlambda = nlambda), silent=TRUE)
if(inherits(fitCV, "try-error")) {
pred = rep(mean(Y), nrow(newX))
fit = list(object="Algorithm failed")
} else {
# two options for lambda, fitCV$lambda.min and fitCV$lambda.1se
pred <- predict(fitCV$glmnet.fit, newx = newX, s = ifelse(useMin, fitCV$lambda.min, fitCV$lambda.1se), type = "response")
fit <- list(object = fitCV, useMin = useMin)
}
class(fit) <- "SL.glmnet"
out <- list(pred = pred, fit = fit)
return(out)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.