content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
################ load necessary libraries ############################################################
library(igraph)
library(rNMF)
library(xgboost)
################ read input data #####################################################################
# read known miRNA-SM association dataset
knownMSA <- read.table(file = "./SM-miRNA/similar/SM-miRNA_Num_A_similar.csv", header = F,sep=",")
# # read miRNA functional similarity matrix
similaritiesOfMiRNA <- as.matrix(read.table(file = "./SM-miRNA/similar/miRNA_smilarity_maritx.csv", header = F,sep=","))
# # read SM similarity matrix
similaritiesOfSM <- as.matrix(read.table(file = "./SM-miRNA/similar/SM_similarity_matrix.csv", header = F,sep=","))
############### function to build training and testing data ##########################################
BuildTrainingAndTestingData <- function(MSA, similaritiesOfMiRNA, similaritiesOfSM, m, s, knownMSAIndices,
negativeSampleIndices, positiveAndNegativeIndices, globalLoocvTestingIndices, localLoocvTestingIndices) {
##############################
## Type 1 feature of miRNAs ##
##############################
# number of observations in each row of MSA
noOfObervationsOfMiRNA <- matrix(rep(0, m), nrow = m, ncol = 1)
for(i in 1 : m) {
noOfObervationsOfMiRNA[i] <- sum(MSA[i, ])
}
# average of all similarity scores for each miRNA
aveOfSimilaritiesOfMiRNA <- matrix(rep(0, m), nrow = m, ncol = 1)
for(i in 1 : m) {
aveOfSimilaritiesOfMiRNA[i] <- mean(similaritiesOfMiRNA[i, ])
}
# histogram feature: cut [0, 1] into five bins and count the proportion of similarity scores that fall into each bin
hist1MiRNA <- matrix(rep(0, m), nrow = m, ncol = 1) # [0, 0.2)
hist2MiRNA <- matrix(rep(0, m), nrow = m, ncol = 1) # [0.2, 0.4)
hist3MiRNA <- matrix(rep(0, m), nrow = m, ncol = 1) # [0.4, 0.6)
hist4MiRNA <- matrix(rep(0, m), nrow = m, ncol = 1) # [0.6, 0.8)
hist5MiRNA <- matrix(rep(0, m), nrow = m, ncol = 1) # [0.8, 1]
for(i in 1: m) {
hist1Count = 0
hist2Count = 0
hist3Count = 0
hist4Count = 0
hist5Count = 0
for(j in 1 : m) {
if(similaritiesOfMiRNA[i, j] < 0.2) {
hist1Count = hist1Count + 1
} else if(similaritiesOfMiRNA[i, j] < 0.4) {
hist2Count = hist2Count + 1
} else if(similaritiesOfMiRNA[i, j] < 0.6) {
hist3Count = hist3Count + 1
} else if(similaritiesOfMiRNA[i, j] < 0.8) {
hist4Count = hist4Count + 1
} else if(similaritiesOfMiRNA[i, j] <= 1) {
hist5Count = hist5Count + 1
}
}
hist1MiRNA[i] <- hist1Count / m
hist2MiRNA[i] <- hist2Count / m
hist3MiRNA[i] <- hist3Count / m
hist4MiRNA[i] <- hist4Count / m
hist5MiRNA[i] <- hist5Count / m
}
# concatenation
feature1OfMiRNA <- cbind(noOfObervationsOfMiRNA, aveOfSimilaritiesOfMiRNA, hist1MiRNA,
hist2MiRNA, hist3MiRNA, hist4MiRNA, hist5MiRNA)
colnames(feature1OfMiRNA) <- c("noOfObervationsOfMiRNA", "aveOfSimilaritiesOfMiRNA", "hist1MiRNA",
"hist2MiRNA", "hist3MiRNA", "hist4MiRNA",
"hist5MiRNA")
################################
## Type 1 feature of SMs ##
################################
# number of observations in each column of MSA
noOfObervationsOfSM <- matrix(rep(0, s), nrow = s, ncol = 1)
for(i in 1 : s) {
noOfObervationsOfSM[i] <- sum(MSA[, i])
}
# average of all similarity scores for each SM
aveOfSimilaritiesOfSM <- matrix(rep(0, s), nrow = s, ncol = 1)
for(i in 1 : s) {
aveOfSimilaritiesOfSM[i] <- mean(similaritiesOfSM[, i])
}
# histogram feature: cut [0, 1] into five bins and count the proportion of similarity scores that fall into each bin
hist1SM <- matrix(rep(0, s), nrow = s, ncol = 1) # [0, 0.2)
hist2SM <- matrix(rep(0, s), nrow = s, ncol = 1) # [0.2, 0.4)
hist3SM <- matrix(rep(0, s), nrow = s, ncol = 1) # [0.4, 0.6)
hist4SM <- matrix(rep(0, s), nrow = s, ncol = 1) # [0.6, 0.8)
hist5SM <- matrix(rep(0, s), nrow = s, ncol = 1) # [0.8, 1]
for(i in 1: s) {
hist1Count = 0
hist2Count = 0
hist3Count = 0
hist4Count = 0
hist5Count = 0
for(j in 1 : s) {
if(similaritiesOfSM[i, j] < 0.2) {
hist1Count = hist1Count + 1
} else if(similaritiesOfSM[i, j] < 0.4) {
hist2Count = hist2Count + 1
} else if(similaritiesOfSM[i, j] < 0.6) {
hist3Count = hist3Count + 1
} else if(similaritiesOfSM[i, j] < 0.8) {
hist4Count = hist4Count + 1
} else if(similaritiesOfSM[i, j] <= 1) {
hist5Count = hist5Count + 1
}
}
hist1SM[i] <- hist1Count / s
hist2SM[i] <- hist2Count / s
hist3SM[i] <- hist3Count / s
hist4SM[i] <- hist4Count / s
hist5SM[i] <- hist5Count / s
}
# concatenation
feature1OfSM <- cbind(noOfObervationsOfSM, aveOfSimilaritiesOfSM, hist1SM,
hist2SM, hist3SM, hist4SM, hist5SM)
colnames(feature1OfSM) <- c("noOfObervationsOfSM", "aveOfSimilaritiesOfSM", "hist1SM",
"hist2SM", "hist3SM", "hist4SM",
"hist5SM")
##############################
## Type 2 feature of miRNAs ##
##############################
# number of neighbors of miRNAs and similarity values for 10 nearest neighbors
numberOfNeighborsMiRNA <- matrix(rep(0, m), nrow = m, ncol = 1)
similarities10KnnMiRNA <- matrix(rep(0, 10 * m), nrow = m, ncol = 10)
averageOfFeature1MiRNA <- matrix(rep(0, 7 * m), nrow = m, ncol = 7)
weightedAverageOfFeature1MiRNA <- matrix(rep(0, 7 * m), nrow = m, ncol = 7)
similarityGraphMiRNA <- matrix(rep(0, m * m), nrow = m, ncol = m)
meanSimilarityMiRNA <- mean(similaritiesOfMiRNA)
for(i in 1 : m) {
neighborCount = 0 - 1 # similarity between an miRNA and itself is not counted
for(j in 1 : m) {
if(similaritiesOfMiRNA[i, j] >= meanSimilarityMiRNA) {
neighborCount = neighborCount + 1
similarityGraphMiRNA[i, j] = 1
}
}
numberOfNeighborsMiRNA[i] <- neighborCount
similarities10KnnMiRNA[i, ] <- sort(similaritiesOfMiRNA[i, ], decreasing = T, index.return = T)$x[2:11]
indices <- sort(similaritiesOfMiRNA[i, ], decreasing = T, index.return = T)$ix[2:11]
if(neighborCount == 0) {
averageOfFeature1MiRNA[i, ] <- rep(0, 7)
weightedAverageOfFeature1MiRNA[i, ] <- rep(0, 7)
next
} else if(neighborCount == 1) {
averageOfFeature1MiRNA[i, ] <- feature1OfMiRNA[indices[1], ] / 10
weightedAverageOfFeature1MiRNA[i, ] <- feature1OfMiRNA[indices[1], ] * similarities10KnnMiRNA[i, ][1] / 10
next
} else if (neighborCount <= length(indices)) {
indices <- indices[1 : neighborCount]
}
averageOfFeature1MiRNA[i, ] <- apply(feature1OfMiRNA[indices, ], MARGIN = 2, FUN = function(x) sum(x) / 10) # divide by 10 to make the mean calculation fair for those miRNAs with less than 10 neighbors
weightedAverageOfFeature1MiRNA[i, ] <- apply(feature1OfMiRNA[indices, ], MARGIN = 2,
FUN = function(x) sum(x * similarities10KnnMiRNA[i, ][1 : length(indices)]) / 10)
}
# build miRNA similarity graph
similarityIgraphMiRNA <- graph_from_adjacency_matrix(adjmatrix = similarityGraphMiRNA, mode = "undirected", weighted = NULL,
diag = T)
betweennessCentralityMiRNA <- betweenness(similarityIgraphMiRNA, directed = F, normalized = T)
closenessCentralityMiRNA <- closeness(similarityIgraphMiRNA, mode = "all")
eigenVectorCentralityMiRNA <- eigen_centrality(similarityIgraphMiRNA, directed = F)$vector
pageRankMiRNA <- page.rank(similarityIgraphMiRNA, directed = F)$vector
# concatenation
feature2OfMiRNA <- cbind(numberOfNeighborsMiRNA, similarities10KnnMiRNA, averageOfFeature1MiRNA, weightedAverageOfFeature1MiRNA,
betweennessCentralityMiRNA, closenessCentralityMiRNA, eigenVectorCentralityMiRNA, pageRankMiRNA)
colnames(feature2OfMiRNA) <- c("numberOfNeighborsMiRNA", "knn1SimilarityMiRNA", "knn2SimilarityMiRNA", "knn3SimilarityMiRNA",
"knn4SimilarityMiRNA", "knn5SimilarityMiRNA", "knn6SimilarityMiRNA", "knn7SimilarityMiRNA",
"knn8SimilarityMiRNA", "knn9SimilarityMiRNA", "knn10SimilarityMiRNA", "aveNoObsMiRNA",
"aveOfAveSimilarityMiRNA", "aveHist1MiRNA", "aveHist2MiRNA", "aveHist3MiRNA", "aveHist4MiRNA",
"aveHist5MiRNA", "weightedAveNoObsMiRNA", "weightedAveOfAveSimilarityMiRNA", "weightedAveHist1MiRNA",
"weightedAveHist2MiRNA", "weightedAveHist3MiRNA", "weightedAveHist4MiRNA", "weightedAveHist5MiRNA",
"betweennessCentralityMiRNA", "closenessCentralityMiRNA", "eigenVectorCentralityMiRNA", "pageRankMiRNA")
################################
## Type 2 feature of SMs ##
################################
# number of neighbors of SMs and similarity values for 10 nearest neighbors
numberOfNeighborsSM <- matrix(rep(0, s), nrow = s, ncol = 1)
similarities10KnnSM <- matrix(rep(0, 10 * s), nrow = s, ncol = 10)
averageOfFeature1SM <- matrix(rep(0, 7 * s), nrow = s, ncol = 7)
weightedAverageOfFeature1SM <- matrix(rep(0, 7 * s), nrow = s, ncol = 7)
similarityGraphSM <- matrix(rep(0, s * s), nrow = s, ncol = s)
meanSimilaritySM <- mean(similaritiesOfSM)
for(i in 1 : s) {
neighborCount = 0 - 1 # similarity between a SM and itself is not counted
for(j in 1 : s) {
if(similaritiesOfSM[i, j] >= meanSimilaritySM) {
neighborCount = neighborCount + 1
similarityGraphSM[i, j] = 1
}
}
numberOfNeighborsSM[i] <- neighborCount
similarities10KnnSM[i, ] <- sort(similaritiesOfSM[i, ], decreasing = T, index.return = T)$x[2:11]
indices <- sort(similaritiesOfSM[i, ], decreasing = T, index.return = T)$ix[2:11]
if(neighborCount == 0) {
averageOfFeature1SM[i, ] <- rep(0, 7)
weightedAverageOfFeature1SM[i, ] <- rep(0, 7)
next
} else if(neighborCount == 1) {
averageOfFeature1SM[i, ] <- feature1OfSM[indices[1], ] / 10
weightedAverageOfFeature1SM[i, ] <- feature1OfSM[indices[1], ] * similarities10KnnSM[i, ][1] / 10
next
} else if (neighborCount <= length(indices)) {
indices <- indices[1 : neighborCount]
}
averageOfFeature1SM[i, ] <- apply(feature1OfSM[indices, ], MARGIN = 2, FUN = function(x) sum(x) / 10) # divide by 10 to make the mean calculation fair for those SMs with less than 10 neighbors
weightedAverageOfFeature1SM[i, ] <- apply(feature1OfSM[indices, ], MARGIN = 2,
FUN = function(x) sum(x * similarities10KnnSM[i, ][1 : length(indices)]) / 10)
}
# build SM similarity graph
library(igraph)
similarityIgraphSM <- graph_from_adjacency_matrix(adjmatrix = similarityGraphSM, mode = "undirected", weighted = NULL,
diag = T)
betweennessCentralitySM <- betweenness(similarityIgraphSM, directed = F, normalized = T)
closenessCentralitySM <- closeness(similarityIgraphSM, mode = "all")
eigenVectorCentralitySM <- eigen_centrality(similarityIgraphSM, directed = F)$vector
pageRankSM <- page.rank(similarityIgraphSM, directed = F)$vector
# concatenation
feature2OfSM <- cbind(numberOfNeighborsSM, similarities10KnnSM, averageOfFeature1SM, weightedAverageOfFeature1SM,
betweennessCentralitySM, closenessCentralitySM, eigenVectorCentralitySM, pageRankSM)
colnames(feature2OfSM) <- c("numberOfNeighborsSM", "knn1SimilaritySM", "knn2SimilaritySM", "knn3SimilaritySM",
"knn4SimilaritySM", "knn5SimilaritySM", "knn6SimilaritySM", "knn7SimilaritySM",
"knn8SimilaritySM", "knn9SimilaritySM", "knn10SimilaritySM", "aveNoObsSM",
"aveOfAveSimilaritySM", "aveHist1SM", "aveHist2SM", "aveHist3SM", "aveHist4SM",
"aveHist5SM", "weightedAveNoObsSM", "weightedAveOfAveSimilaritySM", "weightedAveHist1SM",
"weightedAveHist2SM", "weightedAveHist3SM", "weightedAveHist4SM", "weightedAveHist5SM",
"betweennessCentralitySM", "closenessCentralitySM", "eigenVectorCentralitySM", "pageRankSM")
###########################################
## Type 3 feature of miRNA-SM pairs ##
###########################################
# matrix factorization
set.seed(666)
mfMSA <- rnmf(MSA, quiet = T, showprogress = F)
latentVectorsMiRNA <- mfMSA$W
latentVectorsSM <- mfMSA$H
# number of associations between an miRNA and a SM's neighbors
numberOfSMNeighborAssociations <- c(rep(0, m * s))
for(i in 1 : m) {
for(j in 1 : s) {
numberOfAssociations = ifelse(MSA[i, j] == 1, -1, 0)
SMNeighbors = which(t(similaritiesOfSM[j, ]) >= meanSimilaritySM, arr.ind = F)
for(k in 1 : length(SMNeighbors)) {
if(MSA[i, SMNeighbors[k]] == 1) {
numberOfAssociations = numberOfAssociations + 1
}
}
numberOfSMNeighborAssociations[(i-1)*s+j] <- numberOfAssociations
}
}
# number of associations between a SM and an miRNA's neighbors
numberOfMiRNANeighborAssociations <- c(rep(0, m * s))
for(i in 1 : s) {
for(j in 1 : m) {
numberOfAssociations = ifelse(MSA[j, i] == 1, -1, 0)
miRNANeighbors = which(t(similaritiesOfMiRNA[j, ]) >= meanSimilarityMiRNA, arr.ind = F)
for(k in 1 : length(miRNANeighbors)) {
if(MSA[miRNANeighbors[k]] == 1) {
numberOfAssociations = numberOfAssociations + 1
}
}
numberOfMiRNANeighborAssociations[(i-1)*m+j] <- numberOfAssociations
}
}
# build MSA graph
MSAGraph <- graph_from_incidence_matrix(incidence = MSA, directed = F, mode = "total")
betweennessCentralityMSA <- betweenness(MSAGraph, directed = F, normalized = T)
betweennessCentralityMiRNAInMSA <- betweennessCentralityMSA[1:541]
betweennessCentralitySMInMSA <- betweennessCentralityMSA[542:1372]
closenessCentralityMSA <- closeness(MSAGraph, mode = "all")
closenessCentralityMiRNAInMSA <- closenessCentralityMSA[1:541]
closenessCentralitySMInMSA <- closenessCentralityMSA[542:1372]
eigenVectorCentralityMSA <- eigen_centrality(MSAGraph, directed = F)$vector
eigenVectorCentralityMiRNAInMSA <- eigenVectorCentralityMSA[1:541]
eigenVectorCentralitySMInMSA <- eigenVectorCentralityMSA[542:1372]
pageRankMSA <- page.rank(MSAGraph, directed = F)$vector
pageRankMiRNAInMSA <- pageRankMSA[1:541]
pageRankSMInMSA <- pageRankMSA[542:1372]
#########################################
## function to combine feature vectors ##
#########################################
BuildFeatures <- function(positiveAndNegativeIndices) {
positiveAndNegativeMiRNAIndices <- ifelse(positiveAndNegativeIndices %% 831 == 0, positiveAndNegativeIndices / 831,
as.integer(positiveAndNegativeIndices / 831) + 1)
positiveAndNegativeSMIndices <- ifelse(positiveAndNegativeIndices %% 831 == 0, 831, positiveAndNegativeIndices %% 831)
loocvFeature1MiRNA <- feature1OfMiRNA[positiveAndNegativeMiRNAIndices, ]
loocvFeature2MiRNA <- feature2OfMiRNA[positiveAndNegativeMiRNAIndices, ]
loocvFeature1SM <- feature1OfSM[positiveAndNegativeSMIndices, ]
loocvFeature2SM <- feature2OfSM[positiveAndNegativeSMIndices, ]
loocvFeature3 <- cbind(latentVectorsMiRNA[positiveAndNegativeMiRNAIndices, ], t(latentVectorsSM[, positiveAndNegativeSMIndices]),
numberOfSMNeighborAssociations[positiveAndNegativeMiRNAIndices], numberOfMiRNANeighborAssociations[positiveAndNegativeSMIndices],
betweennessCentralityMiRNAInMSA[positiveAndNegativeMiRNAIndices], closenessCentralityMiRNAInMSA[positiveAndNegativeMiRNAIndices],
eigenVectorCentralityMiRNAInMSA[positiveAndNegativeMiRNAIndices], pageRankMiRNAInMSA[positiveAndNegativeMiRNAIndices],
betweennessCentralitySMInMSA[positiveAndNegativeSMIndices], closenessCentralitySMInMSA[positiveAndNegativeSMIndices],
eigenVectorCentralitySMInMSA[positiveAndNegativeSMIndices], pageRankSMInMSA[positiveAndNegativeSMIndices])
colnames(loocvFeature3) <- c("latentVectors1MiRNA", "latentVectors2MiRNA", "latentVectors3MiRNA", "latentVectors4MiRNA", "latentVectors5MiRNA", "latentVectors1SM",
"latentVectors2SM", "latentVectors3SM", "latentVectors4SM", "latentVectors5SM", "numberOfSMNeighborAssociations",
"numberOfMiRNANeighborAssociations", "betweennessCentralityMiRNAInMSA", "closenessCentralityMiRNAInMSA", "eigenVectorCentralityMiRNAInMSA",
"pageRankMiRNAInMSA", "betweennessCentralitySMInMSA", "closenessCentralitySMInMSA", "eigenVectorCentralitySMInMSA",
"pageRankSMInMSA")
loocvFeatureVectors <- cbind(loocvFeature1MiRNA, loocvFeature1SM, loocvFeature2MiRNA, loocvFeature2SM, loocvFeature3)
return(loocvFeatureVectors)
}
# build training labels
trainingLabels <- matrix(c(rep(1, length(knownMSAIndices)), rep(0, length(negativeSampleIndices))), nrow = length(knownMSAIndices) +
length(negativeSampleIndices), ncol = 1)
colnames(trainingLabels) <- "labels"
# build loocv training data
loocvTrainingFeatureVectors <- cbind(trainingLabels, BuildFeatures(positiveAndNegativeIndices))
# build global loocv testing data
globalLoocvTestingFeatureVectors <- BuildFeatures(globalLoocvTestingIndices)
# build local loocv testing data
localLoocvTestingFeatureVectors <- BuildFeatures(localLoocvTestingIndices)
return(list("loocvTrainingFeatureVectors" = loocvTrainingFeatureVectors,
"globalLoocvTestingFeatureVectors" = globalLoocvTestingFeatureVectors,
"localLoocvTestingFeatureVectors" = localLoocvTestingFeatureVectors))
}
############### loops for global and local loocv #####################################################
# record the miRNA count, SM count and known MSA count
m <- 541 # 495
s <- 831 # 383
noOfKnownMSA <- nrow(knownMSA) # 664
# selection of xgboost parameters
parameters <- list(eta = 1, maxDepth = 6, lambda = 1, gamma = 0)
# placeholder for loocv rankings
rankings <- matrix(nrow = 0, ncol = 2)
colnames(rankings) <- c("globalRankings", "localRankings")
# loocv loops
for(negated in 1 : nrow(knownMSA)) {
# find negated miRNA, SM and their association's index
negatedMiRNA <- knownMSA$V2[negated]
negatedSM <- knownMSA$V1[negated]
negatedIndex <- (negatedMiRNA - 1) * s + negatedSM
# build MSA matrix
loocvKnownMSA <- knownMSA[-negated, ]
originalMSA <- matrix(data = rep(0, m * s), nrow = m, ncol = s)
for(i in 1 : m) {
negatedAssociations <- subset(loocvKnownMSA, V2 == i, select = V1)
for(j in 1 : s) {
if (j %in% negatedAssociations$V1) {
originalMSA[i, j] <- 1
}
}
}
# randomly select 663 negative samples
knownMSAIndices <- which(t(originalMSA) == 1, arr.ind = F)
allIndices <- 1 : (m * s)
set.seed(666)
negativeSampleIndices <- sample(allIndices[-knownMSAIndices], size = 663, replace = F)
# find indices for training data
positiveAndNegativeIndices <- c(knownMSAIndices, negativeSampleIndices)
# find indices for global and local testing data
globalLoocvTestingIndices <- (1 : (m * s))[-knownMSAIndices]
negatedIndexInGlobalTesting <- which(globalLoocvTestingIndices == negatedIndex)
negatedIndexInLocalTesting <- which(which(originalMSA[negatedMiRNA,] == 0) == negatedSM)
localLoocvTestingIndices <- (which(originalMSA[negatedMiRNA,] == 0) - 1) * m + negatedMiRNA
#negatedIndexInLocalTesting <- which(which(MSA[,negatedSM] == 0) == negatedMiRNA)
#localLoocvTestingIndices <- (which(MSA[,negatedSM] == 0) - 1) * s + negatedSM
#MSA preprocessing
MSA<-originalMSA
# for(i in 1:m){
# for(j in 1:s){
# if(originalMSA[i,j]==0){
# ##find similar MiRNA by LINEs
# SimilarityDeacreaseOfMiRNA=sort.int(similaritiesOfMiRNA[i,],decreasing=T,index.return=T)
# count1=0 #count the top similar MiRNAs
# count2=0
# similarMiRNAIndex<-rep(0,3) #preset index of top3 similar miRNA
# similarMiRNASValue<-rep(0,3)#preset similarity value of top3 similar miRNA
# for(k in 1:m){ #find similar MiRNA by LINEs
# # flag=ifelse(SimilarityDeacreaseOfMiRNA$x[k]>0.5,1,-1)
# # while(flag==1&&count1<3){ #only find top3 similar MiRNAs with similarity value above 0.5
# if(originalMSA[k,j]==1&&count1<3){ #find top3 similar SM in all other MiRNAs
# count1=count1+1
# similarMiRNAIndex[count1]=SimilarityDeacreaseOfMiRNA$ix[k]
# similarMiRNASValue[count1]=SimilarityDeacreaseOfMiRNA$x[k]
# }
# else next
# }
# predictSVsepMiRNA<-rep(0,3)
# for(l in 1:3){
# if(is.nan(similarMiRNASValue[l])){similarMiRNASValue[l]=0}
# predictSVsepMiRNA[l]=(similarMiRNASValue[l]^2)/sum(similarMiRNASValue) # predict simi value=(sv1)*w1+(sv2)*w2+(sv3)*w3 P.S. wi=svi/sum[sv]
# if(is.nan(predictSVsepMiRNA[l])){predictSVsepMiRNA[l]=0}
# else next
# }
# predictSVMiRNA=sum(predictSVsepMiRNA)
# ##find similar SM by ROWs##
# SimilarityDeacreaseOfSM=sort.int(similaritiesOfSM[,j],decreasing=T,index.return=T)
# similarSMIndex<-rep(0,3) #preset index of top3 similar SM
# similarSMSValue<-rep(0,3)#preset similarity value of top3 similar SM
# for(k in 1:s){ #find similar SM by ROWs
# # flag=ifelse(SimilarityDeacreaseOfSM$x[k]>0.5,1,-1)
# # while(flag==1&&count2<3){ #only find top3 similar SM with similarity value above 0.5
# if(originalMSA[i,k]==1&&count2<3){ #find top3 similar SM in all other SMs
# count2=count2+1
# similarSMIndex[count2]=SimilarityDeacreaseOfSM$ix[k]
# similarSMSValue[count2]=SimilarityDeacreaseOfSM$x[k]
# }
# else next
# }
# #}
# predictSVsepSM<-rep(0,3)
# for(l in 1:3){
# if(is.nan(similarSMSValue[l])){similarSMSValue[l]=0}
# predictSVsepSM[l]=(similarSMSValue[l]^2)/sum(similarSMSValue) # predict simi value=(sv1)*w1+(sv2)*w2+(sv3)*w3 P.S. wi=svi/sum[sv]
# if(is.nan(predictSVsepSM[l])){predictSVsepSM[l]=0}
# else next
# }
# predictSVSM=sum(predictSVsepSM)
# MSA[i,j]=(predictSVMiRNA+predictSVSM)/2
# }
# else next
# }
# }
##########################
# MSA<-sMSA
# for(i in 1:m){
# for(j in 1:s){
# if(sMSA[i,j]==0){
# ##find similar MiRNA by LINEs
# SimilarityDeacreaseOfMiRNA=sort.int(similaritiesOfMiRNA[i,],decreasing=T,index.return=T)
# count1=0 #count the top similar MiRNAs
# count2=0
# similarMiRNAIndex<-rep(0,3) #preset index of top3 similar miRNA
# similarMiRNASValue<-rep(0,3)#preset similarity value of top3 similar miRNA
# for(k in 1:m){ #find similar MiRNA by LINEs
# # flag=ifelse(SimilarityDeacreaseOfMiRNA$x[k]>0.5,1,-1)
# # while(flag==1&&count1<3){ #only find top3 similar MiRNAs with similarity value above 0.5
# if(sMSA[k,j]==1&&count1<3){ #find top3 similar SM in all other MiRNAs
# count1=count1+1
# similarMiRNAIndex[count1]=SimilarityDeacreaseOfMiRNA$ix[k]
# similarMiRNASValue[count1]=SimilarityDeacreaseOfMiRNA$x[k]
# }
# else next
# }
# predictSVsepMiRNA<-rep(0,3)
# for(l in 1:3){
# if(is.nan(similarMiRNASValue[l])){similarMiRNASValue[l]=0}
# predictSVsepMiRNA[l]=(similarMiRNASValue[l]^2)/sum(similarMiRNASValue) # predict simi value=(sv1)*w1+(sv2)*w2+(sv3)*w3 P.S. wi=svi/sum[sv]
# if(is.nan(predictSVsepMiRNA[l])){predictSVsepMiRNA[l]=0}
# else next
# }
# predictSVMiRNA=sum(predictSVsepMiRNA)
# ##find similar SM by ROWs##
# SimilarityDeacreaseOfSM=sort.int(similaritiesOfSM[,j],decreasing=T,index.return=T)
# similarSMIndex<-rep(0,3) #preset index of top3 similar SM
# similarSMSValue<-rep(0,3)#preset similarity value of top3 similar SM
# for(k in 1:s){ #find similar SM by ROWs
# # flag=ifelse(SimilarityDeacreaseOfSM$x[k]>0.5,1,-1)
# # while(flag==1&&count2<3){ #only find top3 similar SM with similarity value above 0.5
# if(sMSA[i,k]==1&&count2<3){ #find top3 similar SM in all other SMs
# count1=count1+1
# similarSMIndex[count2]=SimilarityDeacreaseOfSM$ix[k]
# similarSMSValue[count2]=SimilarityDeacreaseOfSM$x[k]
# }
# else next
# }
# #}
# predictSVsepSM<-rep(0,3)
# for(l in 1:3){
# if(is.nan(similarSMSValue[l])){similarSMSValue[l]=0}
# predictSVsepSM[l]=(similarSMSValue[l]^2)/sum(similarSMSValue) # predict simi value=(sv1)*w1+(sv2)*w2+(sv3)*w3 P.S. wi=svi/sum[sv]
# if(is.nan(predictSVsepSM[l])){predictSVsepSM[l]=0}
# else next
# }
# predictSVSM=sum(predictSVsepSM)
# MSA[i,j]=(predictSVMiRNA+predictSVSM)/2
# }
# else next
# }
# }
##########################
# build training and testing data
trainingAndTestingData <- BuildTrainingAndTestingData(MSA, similaritiesOfMiRNA, similaritiesOfSM, m, s, knownMSAIndices,
negativeSampleIndices, positiveAndNegativeIndices, globalLoocvTestingIndices,
localLoocvTestingIndices)
# fit xgboost
xgboostLoocv <- xgboost(data = trainingAndTestingData$loocvTrainingFeatureVectors[,-1], booster = "gbtree",
label = trainingAndTestingData$loocvTrainingFeatureVectors[,1], params = parameters, nthread = 2, nrounds = 1,
objective = "binary:logitraw",verbose=2)
# prediction
predictedWeightsGlobal <- predict(xgboostLoocv, trainingAndTestingData$globalLoocvTestingFeatureVectors)
predictedWeightsLocal <- predict(xgboostLoocv, trainingAndTestingData$localLoocvTestingFeatureVectors)
# build rankings
########
#average_ranking
########
# globalscoreDicrease=sort.int(predictedWeightsGlobal, decreasing = T, index.return = T)
# localscoreDicrease=sort.int(predictedWeightsLocal, decreasing = T, index.return = T)
# globalRankingOfNegated <- which(globalscoreDicrease$ix == negatedIndexInGlobalTesting)
# localRankingOfNegated <- which(localscoreDicrease$ix == negatedIndexInLocalTesting)
# globalfinalrank <- mean(which(globalscoreDicrease$x==globalscoreDicrease$x[globalRankingOfNegated]))
# localfinalrank <- mean(which(localscoreDicrease$x==localscoreDicrease$x[localRankingOfNegated]))
# rankings <- rbind(rankings, c(globalfinalrank, localfinalrank))
############
#accurate ranking
############
globalRankingOfNegated <- which(sort.int(predictedWeightsGlobal, decreasing = T, index.return = T)$ix == negatedIndexInGlobalTesting)
localRankingOfNegated <- which(sort.int(predictedWeightsLocal, decreasing = T, index.return = T)$ix == negatedIndexInLocalTesting)
rankings <- rbind(rankings, c(globalRankingOfNegated, localRankingOfNegated))
}
# write rankings to disk
write.table(rankings, file = "./global_and_local_loocv_rankings.cvs", row.names = F)
| /EGBMMDA_LOOCV.R | no_license | manoov/GBDT-in-SM-MiRNA-Association-Prediction | R | false | false | 29,005 | r | ################ load necessary libraries ############################################################
library(igraph)
library(rNMF)
library(xgboost)
################ read input data #####################################################################
# read known miRNA-SM association dataset
knownMSA <- read.table(file = "./SM-miRNA/similar/SM-miRNA_Num_A_similar.csv", header = F,sep=",")
# # read miRNA functional similarity matrix
similaritiesOfMiRNA <- as.matrix(read.table(file = "./SM-miRNA/similar/miRNA_smilarity_maritx.csv", header = F,sep=","))
# # read SM similarity matrix
similaritiesOfSM <- as.matrix(read.table(file = "./SM-miRNA/similar/SM_similarity_matrix.csv", header = F,sep=","))
############### function to build training and testing data ##########################################
BuildTrainingAndTestingData <- function(MSA, similaritiesOfMiRNA, similaritiesOfSM, m, s, knownMSAIndices,
negativeSampleIndices, positiveAndNegativeIndices, globalLoocvTestingIndices, localLoocvTestingIndices) {
##############################
## Type 1 feature of miRNAs ##
##############################
# number of observations in each row of MSA
noOfObervationsOfMiRNA <- matrix(rep(0, m), nrow = m, ncol = 1)
for(i in 1 : m) {
noOfObervationsOfMiRNA[i] <- sum(MSA[i, ])
}
# average of all similarity scores for each miRNA
aveOfSimilaritiesOfMiRNA <- matrix(rep(0, m), nrow = m, ncol = 1)
for(i in 1 : m) {
aveOfSimilaritiesOfMiRNA[i] <- mean(similaritiesOfMiRNA[i, ])
}
# histogram feature: cut [0, 1] into five bins and count the proportion of similarity scores that fall into each bin
hist1MiRNA <- matrix(rep(0, m), nrow = m, ncol = 1) # [0, 0.2)
hist2MiRNA <- matrix(rep(0, m), nrow = m, ncol = 1) # [0.2, 0.4)
hist3MiRNA <- matrix(rep(0, m), nrow = m, ncol = 1) # [0.4, 0.6)
hist4MiRNA <- matrix(rep(0, m), nrow = m, ncol = 1) # [0.6, 0.8)
hist5MiRNA <- matrix(rep(0, m), nrow = m, ncol = 1) # [0.8, 1]
for(i in 1: m) {
hist1Count = 0
hist2Count = 0
hist3Count = 0
hist4Count = 0
hist5Count = 0
for(j in 1 : m) {
if(similaritiesOfMiRNA[i, j] < 0.2) {
hist1Count = hist1Count + 1
} else if(similaritiesOfMiRNA[i, j] < 0.4) {
hist2Count = hist2Count + 1
} else if(similaritiesOfMiRNA[i, j] < 0.6) {
hist3Count = hist3Count + 1
} else if(similaritiesOfMiRNA[i, j] < 0.8) {
hist4Count = hist4Count + 1
} else if(similaritiesOfMiRNA[i, j] <= 1) {
hist5Count = hist5Count + 1
}
}
hist1MiRNA[i] <- hist1Count / m
hist2MiRNA[i] <- hist2Count / m
hist3MiRNA[i] <- hist3Count / m
hist4MiRNA[i] <- hist4Count / m
hist5MiRNA[i] <- hist5Count / m
}
# concatenation
feature1OfMiRNA <- cbind(noOfObervationsOfMiRNA, aveOfSimilaritiesOfMiRNA, hist1MiRNA,
hist2MiRNA, hist3MiRNA, hist4MiRNA, hist5MiRNA)
colnames(feature1OfMiRNA) <- c("noOfObervationsOfMiRNA", "aveOfSimilaritiesOfMiRNA", "hist1MiRNA",
"hist2MiRNA", "hist3MiRNA", "hist4MiRNA",
"hist5MiRNA")
################################
## Type 1 feature of SMs ##
################################
# number of observations in each column of MSA
noOfObervationsOfSM <- matrix(rep(0, s), nrow = s, ncol = 1)
for(i in 1 : s) {
noOfObervationsOfSM[i] <- sum(MSA[, i])
}
# average of all similarity scores for each SM
aveOfSimilaritiesOfSM <- matrix(rep(0, s), nrow = s, ncol = 1)
for(i in 1 : s) {
aveOfSimilaritiesOfSM[i] <- mean(similaritiesOfSM[, i])
}
# histogram feature: cut [0, 1] into five bins and count the proportion of similarity scores that fall into each bin
hist1SM <- matrix(rep(0, s), nrow = s, ncol = 1) # [0, 0.2)
hist2SM <- matrix(rep(0, s), nrow = s, ncol = 1) # [0.2, 0.4)
hist3SM <- matrix(rep(0, s), nrow = s, ncol = 1) # [0.4, 0.6)
hist4SM <- matrix(rep(0, s), nrow = s, ncol = 1) # [0.6, 0.8)
hist5SM <- matrix(rep(0, s), nrow = s, ncol = 1) # [0.8, 1]
for(i in 1: s) {
hist1Count = 0
hist2Count = 0
hist3Count = 0
hist4Count = 0
hist5Count = 0
for(j in 1 : s) {
if(similaritiesOfSM[i, j] < 0.2) {
hist1Count = hist1Count + 1
} else if(similaritiesOfSM[i, j] < 0.4) {
hist2Count = hist2Count + 1
} else if(similaritiesOfSM[i, j] < 0.6) {
hist3Count = hist3Count + 1
} else if(similaritiesOfSM[i, j] < 0.8) {
hist4Count = hist4Count + 1
} else if(similaritiesOfSM[i, j] <= 1) {
hist5Count = hist5Count + 1
}
}
hist1SM[i] <- hist1Count / s
hist2SM[i] <- hist2Count / s
hist3SM[i] <- hist3Count / s
hist4SM[i] <- hist4Count / s
hist5SM[i] <- hist5Count / s
}
# concatenation
feature1OfSM <- cbind(noOfObervationsOfSM, aveOfSimilaritiesOfSM, hist1SM,
hist2SM, hist3SM, hist4SM, hist5SM)
colnames(feature1OfSM) <- c("noOfObervationsOfSM", "aveOfSimilaritiesOfSM", "hist1SM",
"hist2SM", "hist3SM", "hist4SM",
"hist5SM")
##############################
## Type 2 feature of miRNAs ##
##############################
# number of neighbors of miRNAs and similarity values for 10 nearest neighbors
numberOfNeighborsMiRNA <- matrix(rep(0, m), nrow = m, ncol = 1)
similarities10KnnMiRNA <- matrix(rep(0, 10 * m), nrow = m, ncol = 10)
averageOfFeature1MiRNA <- matrix(rep(0, 7 * m), nrow = m, ncol = 7)
weightedAverageOfFeature1MiRNA <- matrix(rep(0, 7 * m), nrow = m, ncol = 7)
similarityGraphMiRNA <- matrix(rep(0, m * m), nrow = m, ncol = m)
meanSimilarityMiRNA <- mean(similaritiesOfMiRNA)
for(i in 1 : m) {
neighborCount = 0 - 1 # similarity between an miRNA and itself is not counted
for(j in 1 : m) {
if(similaritiesOfMiRNA[i, j] >= meanSimilarityMiRNA) {
neighborCount = neighborCount + 1
similarityGraphMiRNA[i, j] = 1
}
}
numberOfNeighborsMiRNA[i] <- neighborCount
similarities10KnnMiRNA[i, ] <- sort(similaritiesOfMiRNA[i, ], decreasing = T, index.return = T)$x[2:11]
indices <- sort(similaritiesOfMiRNA[i, ], decreasing = T, index.return = T)$ix[2:11]
if(neighborCount == 0) {
averageOfFeature1MiRNA[i, ] <- rep(0, 7)
weightedAverageOfFeature1MiRNA[i, ] <- rep(0, 7)
next
} else if(neighborCount == 1) {
averageOfFeature1MiRNA[i, ] <- feature1OfMiRNA[indices[1], ] / 10
weightedAverageOfFeature1MiRNA[i, ] <- feature1OfMiRNA[indices[1], ] * similarities10KnnMiRNA[i, ][1] / 10
next
} else if (neighborCount <= length(indices)) {
indices <- indices[1 : neighborCount]
}
averageOfFeature1MiRNA[i, ] <- apply(feature1OfMiRNA[indices, ], MARGIN = 2, FUN = function(x) sum(x) / 10) # divide by 10 to make the mean calculation fair for those miRNAs with less than 10 neighbors
weightedAverageOfFeature1MiRNA[i, ] <- apply(feature1OfMiRNA[indices, ], MARGIN = 2,
FUN = function(x) sum(x * similarities10KnnMiRNA[i, ][1 : length(indices)]) / 10)
}
# build miRNA similarity graph
similarityIgraphMiRNA <- graph_from_adjacency_matrix(adjmatrix = similarityGraphMiRNA, mode = "undirected", weighted = NULL,
diag = T)
betweennessCentralityMiRNA <- betweenness(similarityIgraphMiRNA, directed = F, normalized = T)
closenessCentralityMiRNA <- closeness(similarityIgraphMiRNA, mode = "all")
eigenVectorCentralityMiRNA <- eigen_centrality(similarityIgraphMiRNA, directed = F)$vector
pageRankMiRNA <- page.rank(similarityIgraphMiRNA, directed = F)$vector
# concatenation
feature2OfMiRNA <- cbind(numberOfNeighborsMiRNA, similarities10KnnMiRNA, averageOfFeature1MiRNA, weightedAverageOfFeature1MiRNA,
betweennessCentralityMiRNA, closenessCentralityMiRNA, eigenVectorCentralityMiRNA, pageRankMiRNA)
colnames(feature2OfMiRNA) <- c("numberOfNeighborsMiRNA", "knn1SimilarityMiRNA", "knn2SimilarityMiRNA", "knn3SimilarityMiRNA",
"knn4SimilarityMiRNA", "knn5SimilarityMiRNA", "knn6SimilarityMiRNA", "knn7SimilarityMiRNA",
"knn8SimilarityMiRNA", "knn9SimilarityMiRNA", "knn10SimilarityMiRNA", "aveNoObsMiRNA",
"aveOfAveSimilarityMiRNA", "aveHist1MiRNA", "aveHist2MiRNA", "aveHist3MiRNA", "aveHist4MiRNA",
"aveHist5MiRNA", "weightedAveNoObsMiRNA", "weightedAveOfAveSimilarityMiRNA", "weightedAveHist1MiRNA",
"weightedAveHist2MiRNA", "weightedAveHist3MiRNA", "weightedAveHist4MiRNA", "weightedAveHist5MiRNA",
"betweennessCentralityMiRNA", "closenessCentralityMiRNA", "eigenVectorCentralityMiRNA", "pageRankMiRNA")
################################
## Type 2 feature of SMs ##
################################
# number of neighbors of SMs and similarity values for 10 nearest neighbors
numberOfNeighborsSM <- matrix(rep(0, s), nrow = s, ncol = 1)
similarities10KnnSM <- matrix(rep(0, 10 * s), nrow = s, ncol = 10)
averageOfFeature1SM <- matrix(rep(0, 7 * s), nrow = s, ncol = 7)
weightedAverageOfFeature1SM <- matrix(rep(0, 7 * s), nrow = s, ncol = 7)
similarityGraphSM <- matrix(rep(0, s * s), nrow = s, ncol = s)
meanSimilaritySM <- mean(similaritiesOfSM)
for(i in 1 : s) {
neighborCount = 0 - 1 # similarity between a SM and itself is not counted
for(j in 1 : s) {
if(similaritiesOfSM[i, j] >= meanSimilaritySM) {
neighborCount = neighborCount + 1
similarityGraphSM[i, j] = 1
}
}
numberOfNeighborsSM[i] <- neighborCount
similarities10KnnSM[i, ] <- sort(similaritiesOfSM[i, ], decreasing = T, index.return = T)$x[2:11]
indices <- sort(similaritiesOfSM[i, ], decreasing = T, index.return = T)$ix[2:11]
if(neighborCount == 0) {
averageOfFeature1SM[i, ] <- rep(0, 7)
weightedAverageOfFeature1SM[i, ] <- rep(0, 7)
next
} else if(neighborCount == 1) {
averageOfFeature1SM[i, ] <- feature1OfSM[indices[1], ] / 10
weightedAverageOfFeature1SM[i, ] <- feature1OfSM[indices[1], ] * similarities10KnnSM[i, ][1] / 10
next
} else if (neighborCount <= length(indices)) {
indices <- indices[1 : neighborCount]
}
averageOfFeature1SM[i, ] <- apply(feature1OfSM[indices, ], MARGIN = 2, FUN = function(x) sum(x) / 10) # divide by 10 to make the mean calculation fair for those SMs with less than 10 neighbors
weightedAverageOfFeature1SM[i, ] <- apply(feature1OfSM[indices, ], MARGIN = 2,
FUN = function(x) sum(x * similarities10KnnSM[i, ][1 : length(indices)]) / 10)
}
# build SM similarity graph
library(igraph)
similarityIgraphSM <- graph_from_adjacency_matrix(adjmatrix = similarityGraphSM, mode = "undirected", weighted = NULL,
diag = T)
betweennessCentralitySM <- betweenness(similarityIgraphSM, directed = F, normalized = T)
closenessCentralitySM <- closeness(similarityIgraphSM, mode = "all")
eigenVectorCentralitySM <- eigen_centrality(similarityIgraphSM, directed = F)$vector
pageRankSM <- page.rank(similarityIgraphSM, directed = F)$vector
# concatenation
feature2OfSM <- cbind(numberOfNeighborsSM, similarities10KnnSM, averageOfFeature1SM, weightedAverageOfFeature1SM,
betweennessCentralitySM, closenessCentralitySM, eigenVectorCentralitySM, pageRankSM)
colnames(feature2OfSM) <- c("numberOfNeighborsSM", "knn1SimilaritySM", "knn2SimilaritySM", "knn3SimilaritySM",
"knn4SimilaritySM", "knn5SimilaritySM", "knn6SimilaritySM", "knn7SimilaritySM",
"knn8SimilaritySM", "knn9SimilaritySM", "knn10SimilaritySM", "aveNoObsSM",
"aveOfAveSimilaritySM", "aveHist1SM", "aveHist2SM", "aveHist3SM", "aveHist4SM",
"aveHist5SM", "weightedAveNoObsSM", "weightedAveOfAveSimilaritySM", "weightedAveHist1SM",
"weightedAveHist2SM", "weightedAveHist3SM", "weightedAveHist4SM", "weightedAveHist5SM",
"betweennessCentralitySM", "closenessCentralitySM", "eigenVectorCentralitySM", "pageRankSM")
###########################################
## Type 3 feature of miRNA-SM pairs ##
###########################################
# matrix factorization
set.seed(666)
mfMSA <- rnmf(MSA, quiet = T, showprogress = F)
latentVectorsMiRNA <- mfMSA$W
latentVectorsSM <- mfMSA$H
# number of associations between an miRNA and a SM's neighbors
numberOfSMNeighborAssociations <- c(rep(0, m * s))
for(i in 1 : m) {
for(j in 1 : s) {
numberOfAssociations = ifelse(MSA[i, j] == 1, -1, 0)
SMNeighbors = which(t(similaritiesOfSM[j, ]) >= meanSimilaritySM, arr.ind = F)
for(k in 1 : length(SMNeighbors)) {
if(MSA[i, SMNeighbors[k]] == 1) {
numberOfAssociations = numberOfAssociations + 1
}
}
numberOfSMNeighborAssociations[(i-1)*s+j] <- numberOfAssociations
}
}
# number of associations between a SM and an miRNA's neighbors
numberOfMiRNANeighborAssociations <- c(rep(0, m * s))
for(i in 1 : s) {
for(j in 1 : m) {
numberOfAssociations = ifelse(MSA[j, i] == 1, -1, 0)
miRNANeighbors = which(t(similaritiesOfMiRNA[j, ]) >= meanSimilarityMiRNA, arr.ind = F)
for(k in 1 : length(miRNANeighbors)) {
if(MSA[miRNANeighbors[k]] == 1) {
numberOfAssociations = numberOfAssociations + 1
}
}
numberOfMiRNANeighborAssociations[(i-1)*m+j] <- numberOfAssociations
}
}
# build MSA graph
MSAGraph <- graph_from_incidence_matrix(incidence = MSA, directed = F, mode = "total")
betweennessCentralityMSA <- betweenness(MSAGraph, directed = F, normalized = T)
betweennessCentralityMiRNAInMSA <- betweennessCentralityMSA[1:541]
betweennessCentralitySMInMSA <- betweennessCentralityMSA[542:1372]
closenessCentralityMSA <- closeness(MSAGraph, mode = "all")
closenessCentralityMiRNAInMSA <- closenessCentralityMSA[1:541]
closenessCentralitySMInMSA <- closenessCentralityMSA[542:1372]
eigenVectorCentralityMSA <- eigen_centrality(MSAGraph, directed = F)$vector
eigenVectorCentralityMiRNAInMSA <- eigenVectorCentralityMSA[1:541]
eigenVectorCentralitySMInMSA <- eigenVectorCentralityMSA[542:1372]
pageRankMSA <- page.rank(MSAGraph, directed = F)$vector
pageRankMiRNAInMSA <- pageRankMSA[1:541]
pageRankSMInMSA <- pageRankMSA[542:1372]
#########################################
## function to combine feature vectors ##
#########################################
BuildFeatures <- function(positiveAndNegativeIndices) {
positiveAndNegativeMiRNAIndices <- ifelse(positiveAndNegativeIndices %% 831 == 0, positiveAndNegativeIndices / 831,
as.integer(positiveAndNegativeIndices / 831) + 1)
positiveAndNegativeSMIndices <- ifelse(positiveAndNegativeIndices %% 831 == 0, 831, positiveAndNegativeIndices %% 831)
loocvFeature1MiRNA <- feature1OfMiRNA[positiveAndNegativeMiRNAIndices, ]
loocvFeature2MiRNA <- feature2OfMiRNA[positiveAndNegativeMiRNAIndices, ]
loocvFeature1SM <- feature1OfSM[positiveAndNegativeSMIndices, ]
loocvFeature2SM <- feature2OfSM[positiveAndNegativeSMIndices, ]
loocvFeature3 <- cbind(latentVectorsMiRNA[positiveAndNegativeMiRNAIndices, ], t(latentVectorsSM[, positiveAndNegativeSMIndices]),
numberOfSMNeighborAssociations[positiveAndNegativeMiRNAIndices], numberOfMiRNANeighborAssociations[positiveAndNegativeSMIndices],
betweennessCentralityMiRNAInMSA[positiveAndNegativeMiRNAIndices], closenessCentralityMiRNAInMSA[positiveAndNegativeMiRNAIndices],
eigenVectorCentralityMiRNAInMSA[positiveAndNegativeMiRNAIndices], pageRankMiRNAInMSA[positiveAndNegativeMiRNAIndices],
betweennessCentralitySMInMSA[positiveAndNegativeSMIndices], closenessCentralitySMInMSA[positiveAndNegativeSMIndices],
eigenVectorCentralitySMInMSA[positiveAndNegativeSMIndices], pageRankSMInMSA[positiveAndNegativeSMIndices])
colnames(loocvFeature3) <- c("latentVectors1MiRNA", "latentVectors2MiRNA", "latentVectors3MiRNA", "latentVectors4MiRNA", "latentVectors5MiRNA", "latentVectors1SM",
"latentVectors2SM", "latentVectors3SM", "latentVectors4SM", "latentVectors5SM", "numberOfSMNeighborAssociations",
"numberOfMiRNANeighborAssociations", "betweennessCentralityMiRNAInMSA", "closenessCentralityMiRNAInMSA", "eigenVectorCentralityMiRNAInMSA",
"pageRankMiRNAInMSA", "betweennessCentralitySMInMSA", "closenessCentralitySMInMSA", "eigenVectorCentralitySMInMSA",
"pageRankSMInMSA")
loocvFeatureVectors <- cbind(loocvFeature1MiRNA, loocvFeature1SM, loocvFeature2MiRNA, loocvFeature2SM, loocvFeature3)
return(loocvFeatureVectors)
}
# build training labels
trainingLabels <- matrix(c(rep(1, length(knownMSAIndices)), rep(0, length(negativeSampleIndices))), nrow = length(knownMSAIndices) +
length(negativeSampleIndices), ncol = 1)
colnames(trainingLabels) <- "labels"
# build loocv training data
loocvTrainingFeatureVectors <- cbind(trainingLabels, BuildFeatures(positiveAndNegativeIndices))
# build global loocv testing data
globalLoocvTestingFeatureVectors <- BuildFeatures(globalLoocvTestingIndices)
# build local loocv testing data
localLoocvTestingFeatureVectors <- BuildFeatures(localLoocvTestingIndices)
return(list("loocvTrainingFeatureVectors" = loocvTrainingFeatureVectors,
"globalLoocvTestingFeatureVectors" = globalLoocvTestingFeatureVectors,
"localLoocvTestingFeatureVectors" = localLoocvTestingFeatureVectors))
}
############### loops for global and local loocv #####################################################
# record the miRNA count, SM count and known MSA count
m <- 541 # 495
s <- 831 # 383
noOfKnownMSA <- nrow(knownMSA) # 664
# selection of xgboost parameters
parameters <- list(eta = 1, maxDepth = 6, lambda = 1, gamma = 0)
# placeholder for loocv rankings
rankings <- matrix(nrow = 0, ncol = 2)
colnames(rankings) <- c("globalRankings", "localRankings")
# loocv loops
for(negated in 1 : nrow(knownMSA)) {
# find negated miRNA, SM and their association's index
negatedMiRNA <- knownMSA$V2[negated]
negatedSM <- knownMSA$V1[negated]
negatedIndex <- (negatedMiRNA - 1) * s + negatedSM
# build MSA matrix
loocvKnownMSA <- knownMSA[-negated, ]
originalMSA <- matrix(data = rep(0, m * s), nrow = m, ncol = s)
for(i in 1 : m) {
negatedAssociations <- subset(loocvKnownMSA, V2 == i, select = V1)
for(j in 1 : s) {
if (j %in% negatedAssociations$V1) {
originalMSA[i, j] <- 1
}
}
}
# randomly select 663 negative samples
knownMSAIndices <- which(t(originalMSA) == 1, arr.ind = F)
allIndices <- 1 : (m * s)
set.seed(666)
negativeSampleIndices <- sample(allIndices[-knownMSAIndices], size = 663, replace = F)
# find indices for training data
positiveAndNegativeIndices <- c(knownMSAIndices, negativeSampleIndices)
# find indices for global and local testing data
globalLoocvTestingIndices <- (1 : (m * s))[-knownMSAIndices]
negatedIndexInGlobalTesting <- which(globalLoocvTestingIndices == negatedIndex)
negatedIndexInLocalTesting <- which(which(originalMSA[negatedMiRNA,] == 0) == negatedSM)
localLoocvTestingIndices <- (which(originalMSA[negatedMiRNA,] == 0) - 1) * m + negatedMiRNA
#negatedIndexInLocalTesting <- which(which(MSA[,negatedSM] == 0) == negatedMiRNA)
#localLoocvTestingIndices <- (which(MSA[,negatedSM] == 0) - 1) * s + negatedSM
#MSA preprocessing
MSA<-originalMSA
# for(i in 1:m){
# for(j in 1:s){
# if(originalMSA[i,j]==0){
# ##find similar MiRNA by LINEs
# SimilarityDeacreaseOfMiRNA=sort.int(similaritiesOfMiRNA[i,],decreasing=T,index.return=T)
# count1=0 #count the top similar MiRNAs
# count2=0
# similarMiRNAIndex<-rep(0,3) #preset index of top3 similar miRNA
# similarMiRNASValue<-rep(0,3)#preset similarity value of top3 similar miRNA
# for(k in 1:m){ #find similar MiRNA by LINEs
# # flag=ifelse(SimilarityDeacreaseOfMiRNA$x[k]>0.5,1,-1)
# # while(flag==1&&count1<3){ #only find top3 similar MiRNAs with similarity value above 0.5
# if(originalMSA[k,j]==1&&count1<3){ #find top3 similar SM in all other MiRNAs
# count1=count1+1
# similarMiRNAIndex[count1]=SimilarityDeacreaseOfMiRNA$ix[k]
# similarMiRNASValue[count1]=SimilarityDeacreaseOfMiRNA$x[k]
# }
# else next
# }
# predictSVsepMiRNA<-rep(0,3)
# for(l in 1:3){
# if(is.nan(similarMiRNASValue[l])){similarMiRNASValue[l]=0}
# predictSVsepMiRNA[l]=(similarMiRNASValue[l]^2)/sum(similarMiRNASValue) # predict simi value=(sv1)*w1+(sv2)*w2+(sv3)*w3 P.S. wi=svi/sum[sv]
# if(is.nan(predictSVsepMiRNA[l])){predictSVsepMiRNA[l]=0}
# else next
# }
# predictSVMiRNA=sum(predictSVsepMiRNA)
# ##find similar SM by ROWs##
# SimilarityDeacreaseOfSM=sort.int(similaritiesOfSM[,j],decreasing=T,index.return=T)
# similarSMIndex<-rep(0,3) #preset index of top3 similar SM
# similarSMSValue<-rep(0,3)#preset similarity value of top3 similar SM
# for(k in 1:s){ #find similar SM by ROWs
# # flag=ifelse(SimilarityDeacreaseOfSM$x[k]>0.5,1,-1)
# # while(flag==1&&count2<3){ #only find top3 similar SM with similarity value above 0.5
# if(originalMSA[i,k]==1&&count2<3){ #find top3 similar SM in all other SMs
# count2=count2+1
# similarSMIndex[count2]=SimilarityDeacreaseOfSM$ix[k]
# similarSMSValue[count2]=SimilarityDeacreaseOfSM$x[k]
# }
# else next
# }
# #}
# predictSVsepSM<-rep(0,3)
# for(l in 1:3){
# if(is.nan(similarSMSValue[l])){similarSMSValue[l]=0}
# predictSVsepSM[l]=(similarSMSValue[l]^2)/sum(similarSMSValue) # predict simi value=(sv1)*w1+(sv2)*w2+(sv3)*w3 P.S. wi=svi/sum[sv]
# if(is.nan(predictSVsepSM[l])){predictSVsepSM[l]=0}
# else next
# }
# predictSVSM=sum(predictSVsepSM)
# MSA[i,j]=(predictSVMiRNA+predictSVSM)/2
# }
# else next
# }
# }
##########################
# MSA<-sMSA
# for(i in 1:m){
# for(j in 1:s){
# if(sMSA[i,j]==0){
# ##find similar MiRNA by LINEs
# SimilarityDeacreaseOfMiRNA=sort.int(similaritiesOfMiRNA[i,],decreasing=T,index.return=T)
# count1=0 #count the top similar MiRNAs
# count2=0
# similarMiRNAIndex<-rep(0,3) #preset index of top3 similar miRNA
# similarMiRNASValue<-rep(0,3)#preset similarity value of top3 similar miRNA
# for(k in 1:m){ #find similar MiRNA by LINEs
# # flag=ifelse(SimilarityDeacreaseOfMiRNA$x[k]>0.5,1,-1)
# # while(flag==1&&count1<3){ #only find top3 similar MiRNAs with similarity value above 0.5
# if(sMSA[k,j]==1&&count1<3){ #find top3 similar SM in all other MiRNAs
# count1=count1+1
# similarMiRNAIndex[count1]=SimilarityDeacreaseOfMiRNA$ix[k]
# similarMiRNASValue[count1]=SimilarityDeacreaseOfMiRNA$x[k]
# }
# else next
# }
# predictSVsepMiRNA<-rep(0,3)
# for(l in 1:3){
# if(is.nan(similarMiRNASValue[l])){similarMiRNASValue[l]=0}
# predictSVsepMiRNA[l]=(similarMiRNASValue[l]^2)/sum(similarMiRNASValue) # predict simi value=(sv1)*w1+(sv2)*w2+(sv3)*w3 P.S. wi=svi/sum[sv]
# if(is.nan(predictSVsepMiRNA[l])){predictSVsepMiRNA[l]=0}
# else next
# }
# predictSVMiRNA=sum(predictSVsepMiRNA)
# ##find similar SM by ROWs##
# SimilarityDeacreaseOfSM=sort.int(similaritiesOfSM[,j],decreasing=T,index.return=T)
# similarSMIndex<-rep(0,3) #preset index of top3 similar SM
# similarSMSValue<-rep(0,3)#preset similarity value of top3 similar SM
# for(k in 1:s){ #find similar SM by ROWs
# # flag=ifelse(SimilarityDeacreaseOfSM$x[k]>0.5,1,-1)
# # while(flag==1&&count2<3){ #only find top3 similar SM with similarity value above 0.5
# if(sMSA[i,k]==1&&count2<3){ #find top3 similar SM in all other SMs
# count1=count1+1
# similarSMIndex[count2]=SimilarityDeacreaseOfSM$ix[k]
# similarSMSValue[count2]=SimilarityDeacreaseOfSM$x[k]
# }
# else next
# }
# #}
# predictSVsepSM<-rep(0,3)
# for(l in 1:3){
# if(is.nan(similarSMSValue[l])){similarSMSValue[l]=0}
# predictSVsepSM[l]=(similarSMSValue[l]^2)/sum(similarSMSValue) # predict simi value=(sv1)*w1+(sv2)*w2+(sv3)*w3 P.S. wi=svi/sum[sv]
# if(is.nan(predictSVsepSM[l])){predictSVsepSM[l]=0}
# else next
# }
# predictSVSM=sum(predictSVsepSM)
# MSA[i,j]=(predictSVMiRNA+predictSVSM)/2
# }
# else next
# }
# }
##########################
# build training and testing data
trainingAndTestingData <- BuildTrainingAndTestingData(MSA, similaritiesOfMiRNA, similaritiesOfSM, m, s, knownMSAIndices,
negativeSampleIndices, positiveAndNegativeIndices, globalLoocvTestingIndices,
localLoocvTestingIndices)
# fit xgboost
xgboostLoocv <- xgboost(data = trainingAndTestingData$loocvTrainingFeatureVectors[,-1], booster = "gbtree",
label = trainingAndTestingData$loocvTrainingFeatureVectors[,1], params = parameters, nthread = 2, nrounds = 1,
objective = "binary:logitraw",verbose=2)
# prediction
predictedWeightsGlobal <- predict(xgboostLoocv, trainingAndTestingData$globalLoocvTestingFeatureVectors)
predictedWeightsLocal <- predict(xgboostLoocv, trainingAndTestingData$localLoocvTestingFeatureVectors)
# build rankings
########
#average_ranking
########
# globalscoreDicrease=sort.int(predictedWeightsGlobal, decreasing = T, index.return = T)
# localscoreDicrease=sort.int(predictedWeightsLocal, decreasing = T, index.return = T)
# globalRankingOfNegated <- which(globalscoreDicrease$ix == negatedIndexInGlobalTesting)
# localRankingOfNegated <- which(localscoreDicrease$ix == negatedIndexInLocalTesting)
# globalfinalrank <- mean(which(globalscoreDicrease$x==globalscoreDicrease$x[globalRankingOfNegated]))
# localfinalrank <- mean(which(localscoreDicrease$x==localscoreDicrease$x[localRankingOfNegated]))
# rankings <- rbind(rankings, c(globalfinalrank, localfinalrank))
############
#accurate ranking
############
globalRankingOfNegated <- which(sort.int(predictedWeightsGlobal, decreasing = T, index.return = T)$ix == negatedIndexInGlobalTesting)
localRankingOfNegated <- which(sort.int(predictedWeightsLocal, decreasing = T, index.return = T)$ix == negatedIndexInLocalTesting)
rankings <- rbind(rankings, c(globalRankingOfNegated, localRankingOfNegated))
}
# write rankings to disk
write.table(rankings, file = "./global_and_local_loocv_rankings.cvs", row.names = F)
|
#' Train PLS for train dataset by cross-validation
#'
#' Train PLS for train dataset by cross-validation. The preprocessing method will be optimized automatically.
#' However, the number of latent variables has to be determined manually. Planning to add variable reduction in the future.
#'
#' @param x predictor matrix
#' @param y prediction target vector
#' @param maxncomp maximum ncomp for calculation
#' @param cvsegments refer to mvrCv's segments argument
#' @param ncomp `auto`,`manual` or `fixed`
#' @param fixedncomp fixed numerical value
#' @param threshold threshold for selecting ncomp
#'
#' @import pls
# @import gridGraphics
# @import gridExtra
#' @importFrom grid viewport
# @import EEM
#'
#' @export
trainPLS_general <- function(x, y, maxncomp = 20, cvsegments = 10, round = 2,
ncomp = c("auto", "manual", "fixed"), fixedncomp = NULL,
threshold = 0.02, saveModel = FALSE, plotting = TRUE){
## set up
x_varname <- substitute(x)
y_varname <- substitute(y)
result_list <- list()
model <- list()
if (length(ncomp) == 3) ncomp <- "auto"
if (!is.matrix(x)) x <- as.matrix(x)
if (!is.matrix(y)) y <- as.matrix(y)
if (maxncomp > nrow(x)) maxncomp <- nrow(x) - 1
## creating a function to select ncomp and return statistical values from the model
calStats <- function(model){
## selecting ncomp depending on each problem.
if (ncomp == "auto"){
ncomp <- find_ncomp2(model, threshold = threshold)
} else if (ncomp == "fixed"){
if (is.null(fixedncomp)) break
ncomp <- fixedncomp
} else {
plot(model, ncomp = 1:maxncomp, plottype = "validation", type = "b", main = paste("Model", r), cex.lab = 1.3, ylab = "RMSECV", legendpos = "topright")
cat("Model", r, ": ")
ncomp <- as.numeric(readline("Select ncomp: "))
}
localresult <- data.frame(preprocessing = pre,
nvar = dim(model$model[[2]])[2],
ncomp = ncomp,
R2C = round(getR2(model, ncomp = ncomp, estimate = "train", showprint = FALSE), round),
RMSEC = round(getRMSE(model, ncomp = ncomp, estimate = "train", showprint = FALSE), round),
R2CV = round(getR2(model, ncomp = ncomp, estimate = "CV", showprint = FALSE), round),
RMSECV = round(getRMSE(model, ncomp = ncomp, estimate = "CV", showprint = FALSE), round))
return(localresult)
}
## building models
# model 1: mean-centering
r <- 1 # row number
pre <- "Mean-centering"
model[[r]] <- plsr(y ~ x, ncomp = maxncomp, validation = "CV", method = "oscorespls", segments = cvsegments)
result_list[[r]] <- calStats(model[[r]])
# model 2: norm + mean-centering
r <- 2 # row number
pre <- "Norm + Mean-centering"
model[[r]] <- plsr(y ~ normalize(x), ncomp = maxncomp, validation = "CV", method = "oscorespls", segments = cvsegments)
result_list[[r]] <- calStats(model[[r]])
# model 3: autoscale
r <- 3 # row number
pre <- "Autoscale"
index <- which(colSums(x) == 0)
if (length(index) == 0) x_nozero <- x else x_nozero <- x[,-index] # get rid of columns with sum = 0
model[[r]] <- plsr(y ~ x_nozero, ncomp = maxncomp, validation = "CV", method = "oscorespls", segments = cvsegments, scale = TRUE)
result_list[[r]] <- calStats(model[[r]])
result <- do.call(rbind.data.frame, result_list)
if (saveModel) output <- list(result = result, model_list = model) else {
output <- result
}
# plot
if (plotting){ ## error now. solve this later
# find best model
best_model_index <- which.min(result$RMSECV)
best_model <- model[[best_model_index]]
best_model_ncomp <- result$ncomp[best_model_index]
# plot layout
default_mar <- c(5, 4, 4, 2) + 0.1
layout(matrix(c(1,2,3),3,1), heights = c(1,6,6))
par(mar = c(0.5, 4.5, 0.5, 0.5))
frame()
title_text <- paste0("x: ", deparse(x_varname), " (nvar=", result$nvar[best_model_index], ") y: ",
deparse(y_varname), "\nPreprocessing: ",
result$preprocessing[best_model_index])
mtext(title_text, side=3, outer=TRUE, line=-3)
par(mar = default_mar)
# 1st plot
plot_ncomp(best_model, ncomp = best_model_ncomp, cex.lab = 1)
# 2nd plot
plsplot(best_model, ncomp = best_model_ncomp, estimate = "CV", cex.lab = 1)
# reset layout
layout(matrix(1))
}
return(output)
} | /R/trainPLS_general.R | no_license | chengvt/cheng | R | false | false | 4,703 | r | #' Train PLS for train dataset by cross-validation
#'
#' Train PLS for train dataset by cross-validation. The preprocessing method will be optimized automatically.
#' However, the number of latent variables has to be determined manually. Planning to add variable reduction in the future.
#'
#' @param x predictor matrix
#' @param y prediction target vector
#' @param maxncomp maximum ncomp for calculation
#' @param cvsegments refer to mvrCv's segments argument
#' @param ncomp `auto`,`manual` or `fixed`
#' @param fixedncomp fixed numerical value
#' @param threshold threshold for selecting ncomp
#'
#' @import pls
# @import gridGraphics
# @import gridExtra
#' @importFrom grid viewport
# @import EEM
#'
#' @export
trainPLS_general <- function(x, y, maxncomp = 20, cvsegments = 10, round = 2,
ncomp = c("auto", "manual", "fixed"), fixedncomp = NULL,
threshold = 0.02, saveModel = FALSE, plotting = TRUE){
## set up
x_varname <- substitute(x)
y_varname <- substitute(y)
result_list <- list()
model <- list()
if (length(ncomp) == 3) ncomp <- "auto"
if (!is.matrix(x)) x <- as.matrix(x)
if (!is.matrix(y)) y <- as.matrix(y)
if (maxncomp > nrow(x)) maxncomp <- nrow(x) - 1
## creating a function to select ncomp and return statistical values from the model
calStats <- function(model){
## selecting ncomp depending on each problem.
if (ncomp == "auto"){
ncomp <- find_ncomp2(model, threshold = threshold)
} else if (ncomp == "fixed"){
if (is.null(fixedncomp)) break
ncomp <- fixedncomp
} else {
plot(model, ncomp = 1:maxncomp, plottype = "validation", type = "b", main = paste("Model", r), cex.lab = 1.3, ylab = "RMSECV", legendpos = "topright")
cat("Model", r, ": ")
ncomp <- as.numeric(readline("Select ncomp: "))
}
localresult <- data.frame(preprocessing = pre,
nvar = dim(model$model[[2]])[2],
ncomp = ncomp,
R2C = round(getR2(model, ncomp = ncomp, estimate = "train", showprint = FALSE), round),
RMSEC = round(getRMSE(model, ncomp = ncomp, estimate = "train", showprint = FALSE), round),
R2CV = round(getR2(model, ncomp = ncomp, estimate = "CV", showprint = FALSE), round),
RMSECV = round(getRMSE(model, ncomp = ncomp, estimate = "CV", showprint = FALSE), round))
return(localresult)
}
## building models
# model 1: mean-centering
r <- 1 # row number
pre <- "Mean-centering"
model[[r]] <- plsr(y ~ x, ncomp = maxncomp, validation = "CV", method = "oscorespls", segments = cvsegments)
result_list[[r]] <- calStats(model[[r]])
# model 2: norm + mean-centering
r <- 2 # row number
pre <- "Norm + Mean-centering"
model[[r]] <- plsr(y ~ normalize(x), ncomp = maxncomp, validation = "CV", method = "oscorespls", segments = cvsegments)
result_list[[r]] <- calStats(model[[r]])
# model 3: autoscale
r <- 3 # row number
pre <- "Autoscale"
index <- which(colSums(x) == 0)
if (length(index) == 0) x_nozero <- x else x_nozero <- x[,-index] # get rid of columns with sum = 0
model[[r]] <- plsr(y ~ x_nozero, ncomp = maxncomp, validation = "CV", method = "oscorespls", segments = cvsegments, scale = TRUE)
result_list[[r]] <- calStats(model[[r]])
result <- do.call(rbind.data.frame, result_list)
if (saveModel) output <- list(result = result, model_list = model) else {
output <- result
}
# plot
if (plotting){ ## error now. solve this later
# find best model
best_model_index <- which.min(result$RMSECV)
best_model <- model[[best_model_index]]
best_model_ncomp <- result$ncomp[best_model_index]
# plot layout
default_mar <- c(5, 4, 4, 2) + 0.1
layout(matrix(c(1,2,3),3,1), heights = c(1,6,6))
par(mar = c(0.5, 4.5, 0.5, 0.5))
frame()
title_text <- paste0("x: ", deparse(x_varname), " (nvar=", result$nvar[best_model_index], ") y: ",
deparse(y_varname), "\nPreprocessing: ",
result$preprocessing[best_model_index])
mtext(title_text, side=3, outer=TRUE, line=-3)
par(mar = default_mar)
# 1st plot
plot_ncomp(best_model, ncomp = best_model_ncomp, cex.lab = 1)
# 2nd plot
plsplot(best_model, ncomp = best_model_ncomp, estimate = "CV", cex.lab = 1)
# reset layout
layout(matrix(1))
}
return(output)
} |
## This is HISAT2 aligner
# Load all required packages
library("DESeq2")
library("edgeR")
library("limma")
library("sva")
library("data.table")
library("pheatmap")
library("RColorBrewer")
library("ggplot2")
library("affy")
library("ggfortify")
library("Hmisc")
library("corrplot")
library("reshape2")
library("geneplotter")
library("VennDiagram")
# Set working directory
setwd("C:/Users/Albert Doughan/Desktop/My Project/R Datasets/Hisat/")
dir = "C:/Users/Albert Doughan/Desktop/My Project/R Datasets/Hisat/"
# Import metadata
metadatah = read.csv(file= "metadata.csv", header=T, sep = ",")
head(metadatah)
# Reading counts data from featureCounts
counts = read.csv(file = "hisat2_count.csv", header = T, sep = ",")
head(counts)
# Remove the Gene ID column
countdata <- counts[, -c(1)]
# Making "Geneid" column the rownames
rownames(countdata) <- counts[,1]
head(countdata)
# Check if the metadata and samples have the same names
table(colnames(countdata)==metadatah$SampleID)
##########################################
# Running differential expression analysis with DESeq2
##########################################
# Create the DESeqDataSet object from Matrix of counts and metadata
dds <- DESeqDataSetFromMatrix(countData = round(countdata),
colData = metadatah,
design = ~Condition)
nrow(dds)
# Hidden batch effect detection and removal
dds <- DESeq(dds)
dat1 <- counts(dds, normalized=TRUE)
idx <- rowMeans(dat1) > 1
dat1 <- dat1[idx,]
mod <- model.matrix(~ as.factor(Condition), colData(dds))
mod0 <- model.matrix(~ 1, colData(dds))
# Using the SVA package
svseq <- svaseq(dat1, mod, mod0, n.sv = 2)
ddssva <- dds
ddssva$SV1 <- svseq$sv[,1]
ddssva$SV2 <- svseq$sv[,2]
design(ddssva) <- ~ SV1 + SV2 + Condition
# Visualize the batches
par(mfrow=c(2,1),oma = c(0, 0, 2, 0))
stripchart(svseq$sv[,1] ~ dds$Condition,vertical=TRUE,main="SV1")
abline(h=0)
stripchart(svseq$sv[,2] ~ dds$Condition,vertical=TRUE,main="SV2")
abline(h=0)
# Remove Genes with low counts
# Here we perform a minimal pre-filtering to keep only rows that have at least 10 reads total.
dds1 <- ddssva[rowSums(counts(ddssva)) > 100,]
nrow(dds1)
# Run DESeq function on the data to perform differential gene expression analysis
dds1 <- DESeq(dds1)
head(assay(dds1))
# Building out results table
res_table <- results(dds1)
summary(res_table)
# Working with alpha 0.05
res2 <- results(dds1, alpha=0.05)
summary(res2)
# How many adjusted p-values were less than 0.05?
sum(res2$padj < 0.05, na.rm=TRUE)
# We order our results table by the smallest p value:
res_small_p <- res2[order(res2$pvalue),]
# Select genes with p less than 0.05
res_sig <- subset(res_small_p, padj < 0.05)
dim(res_sig)
# Write final list to file
write.csv(as.data.frame(res_sig), "hisat_deseq_project.csv")
#####################
## DATA EXPLORATION
#####################
# Principal component analysis
PCAdata <- prcomp(t(assay(dds1)))
autoplot(PCAdata, data = metadatah, colour = "Condition",label = FALSE, size = 5)+
theme_bw() +
labs(colour="Condition")+
theme(legend.title = element_text(size = 21),
legend.text = element_text(size = 20),
axis.title.x = element_text(size=24),
axis.title.y = element_text(size=24),
axis.text=element_text(size=21))
# Hierarchical clustering
clusters2 <- hclust(dist(t(assay(dds1))), method ="ward.D")
plot(clusters2, labels = FALSE)
# Density plot
plotDensity(assay(dds1), col=1:24,lwd=2,lty=1,xlab("Density"),ylab("Counts"))
# Data normalization
vst = vst(dds1, blind=FALSE)
# Visualize transformed data
par(mfrow=c(1,2))
plot(assay(dds1))
plot(assay(vst))
# Principal component analysis after normalization
PCAdata1 <- prcomp(t(assay(vst)))
autoplot(PCAdata1, data = metadatah, colour = "Condition",label = FALSE, size = 5)+
theme_bw() +
labs(colour="Condition")+
theme(legend.title = element_text(size = 21),
legend.text = element_text(size = 20),
axis.title.x = element_text(size=24),
axis.title.y = element_text(size=24),
axis.text=element_text(size=21))
# Hierarchical clustering after normalization
clusters1 <- hclust(dist( t( assay(vst) ) ),method ="ward.D")
plot(clusters1, label = FALSE)
# Density plot after normalization
plotDensity(assay(vst), lwd=2,lty=1,xlab("Density"),ylab("Counts"), main = "Density plot")
# Heatmap of sample-to-sample distances
sampleDists <- dist(t(assay(vst)))
sampleDistMatrix <- as.matrix( sampleDists )
rownames(sampleDistMatrix) <- paste(vst$SampleID, vst$Condition, sep="-" )
colnames(sampleDistMatrix) <- paste(dds1$SampleID, dds1$Condition, sep="-")
colors <- colorRampPalette(rev(brewer.pal(9, "Reds")))(255)
pheatmap(sampleDistMatrix,
clustering_distance_rows=sampleDists,
clustering_distance_cols=sampleDists,
col=colors, main = "Sample Distance Matrix ")
# We can use plotCounts fxn to compare the normalized counts
#between treated and control groups for our top 6 genes
par(mfrow=c(2,3))
plotCounts(dds1, gene="ENSG00000198648", intgroup="Condition")
plotCounts(dds1, gene="ENSG00000152642", intgroup="Condition")
plotCounts(dds1, gene="ENSG00000154165", intgroup="Condition")
plotCounts(dds1, gene="ENSG00000196549", intgroup="Condition")
plotCounts(dds1, gene="ENSG00000113916", intgroup="Condition")
plotCounts(dds1, gene="ENSG00000163453", intgroup="Condition")
## Spearman's correlation plots
## Compute correlation matrix
resu <- cor(assay(vst), method = "spearman", use = "complete.obs")
round(resu, 2)
## compute the significance levels
resa = rcorr(assay(vst), type = "spearman")
# Heatmap of sample-to-sample distances
col<- colorRampPalette(c("blue", "white", "red"))(100)
heatmap(x = resu, col = col, symm = TRUE)
## MA-plot
plotMA(res_table, colSig = "red3", colNonSig = "gray32")
##########################################
# Running differential expression analysis with edgeR
##########################################
# Creating the edgeR gene list
y <- DGEList(counts=countdata,group=factor(metadatah$Condition))
dim(y)
# Remove low count genes
keep <- filterByExpr(y)
y <- y[keep, , keep.lib.sizes=FALSE]
dim(y)
# After filtering, it is a good idea to reset the library sizes:
y$samples$lib.size <- colSums(y$counts)
y$samples
# Normalizing the data
y <- calcNormFactors(y)
y$samples
# Plot MDS
col=c(rep("black",50), rep("red",50))
plotMDS(y, labels = NULL, pch = 16, cex = 1, col = col)
legend("top", c("Normal","Disease"), pch = 16, col = c("black","red"))
# The Design matrix
group = y$samples$group
design <- model.matrix(~0+group)
colnames(design) <- levels(group)
design
# Estimating the Dispersion
y <- estimateDisp(y, design, robust=TRUE)
y$common.dispersion
# Plot the dispersion
plotBCV(y)
# Model the quasi-likelihood dispersions of variation within model
fit <- glmQLFit(y, design, robust=TRUE)
head(fit$coefficients)
# Testing for differential expression
# Set the contrast to be tested
DvsN <- makeContrasts(Disease-Normal, levels=design)
res <- glmQLFTest(fit, contrast=DvsN)
topTags(res)
# The total number of differentially expressed genes at FDR< 0:05 is:
is.de <- decideTestsDGE(res)
summary(is.de)
## Plot log-fold change against log-counts per million, with DE genes highlighted
plotMD(res, status = is.de, cex = .4)
abline(h=c(-1, 1), col="black")
# Order by FDR-corrected p-values
deg <- as.data.frame(topTags(res, n=Inf))
deg = as.data.frame(deg)
order_res <- deg[order(deg$FDR),]
dim(order_res)
# Select only genes with FDR-corrected p-values less that 0.05
sig_deg <- subset(order_res, FDR < 0.05)
dim(sig_deg)
# Write final list to file
write.csv(as.data.frame(sig_deg), "hisat_edger_project.csv")
##########################################
# Running differential expression analysis with limma+voom
##########################################
#Creating the gene list through edgeR
dge <- DGEList(countdata)
dim(dge)
# Transformations from the raw-scale
cpm <- cpm(dge)
lcpm <- cpm(dge, log=TRUE)
L <- mean(dge$samples$lib.size) * 1e-6
M <- median(dge$samples$lib.size) * 1e-6
c(L, M)
# Number of genes with 0 count in all samples
table(rowSums(dge$counts==0)==100)
# Removing genes that are lowly expressed
keep.exprs <- filterByExpr(dge, group=metadatah$Condition)
dge <- dge[keep.exprs,, keep.lib.sizes=FALSE]
dim(dge)
# Plot density before and after filtering
cpm1 <- cpm(dge)
lcpm1 <- cpm(dge, log=TRUE)
lcpm.cutoff <- log2(10/M + 2/L)
plotDensity(lcpm, main = "A. Raw data", xlab = "Log CPM")
abline(v=lcpm.cutoff, lty=3)
plotDensity(lcpm1, main = "B. Filtered data", xlab = "Log CPM")
abline(v=lcpm.cutoff, lty=3)
# Data normalization
dge1 <- calcNormFactors(dge, method = "TMM")
dge1$samples$norm.factors
# Box plots before and after normalization
nsamples <- ncol(dge1)
col <- brewer.pal(12, "Paired")
un <- cpm(dge, log=TRUE)
n <- cpm(dge1, log=TRUE)
boxplot(un, main = "A: Unnormalized data", col=col)
boxplot(n, main = "B: Normalized data", col=col)
# Unsupervised clustering
col=c(rep("blue",50), rep("red",50))
plotMDS(dge1, labels = NULL, pch = 16, cex = 1, col = col)
legend("top", c("Normal","Disease"), pch = 16, col = c("blue","red"))
# Create a design matrix
design <- cbind("1"=1,"1vs2"=rep(c(1,2), each = nrow(metadatah)/2))
# Running the limma voom function
v <- voom(dge1, design, plot=TRUE, normalize="quantile")
# After this, the usual limma pipelines for differential expression is be applied.
fit <- lmFit(v, design)
fit <- eBayes(fit)
res <- topTable(fit, coef=ncol(design),number=Inf)
summary(decideTests(fit))
res_pvalue <- as.data.frame(subset(res, adj.P.Val < 0.05))
dim(res_pvalue)
# We order our results table by the smallest p value:
order_res <- res_pvalue[order(res_pvalue$adj.P.Val),]
dim(order_res)
# Display the top 6 most significant genes
topTreat(fit, coef=1, n=6)
# Write final list to file
write.csv(as.data.frame(order_res), "hisat_limma_project.csv")
## All HISAT common
des1 = read.csv("hisat_deseq_project.csv", header = T)
head(des1)
edg1 = read.csv("hisat_edger_project.csv", header = T)
head(edg1)
lim1 = read.csv("hisat_limma_project.csv", header = T)
head(lim1)
inter1 = intersect(intersect(des1$X, edg1$X), lim1$X)
length(inter1)
write.csv(inter1, "hisat_common_project.csv")
## All Hisat common
x = list(des1$X, edg1$X, lim1$X)
myCol <- brewer.pal(3, "Pastel2")
# Chart
venn.diagram(
x = x,
category.names = c("DESeq2" , "edgeR " , "limma"),
filename = 'hisat_common.png',
output=TRUE,
# Output features
imagetype="png" ,
height = 480 ,
width = 480 ,
resolution = 300,
compression = "lzw",
# Circles
lwd = 2,
lty = 'blank',
fill = c("#999999", "#E69F00", "#56B4E9"),
# Numbers
cex = .6,
#fontface = "bold",
fontfamily = "sans",
# Set names
cat.cex = 0.6,
cat.fontface = "bold",
cat.default.pos = "outer",
cat.pos = c(-27, 27, 135),
cat.dist = c(0.055, 0.055, 0.085),
cat.fontfamily = "sans",
rotation = 1
)
| /all_hisat_project.R | no_license | luppo1/KNUST-masters-project | R | false | false | 11,379 | r | ## This is HISAT2 aligner
# Load all required packages
library("DESeq2")
library("edgeR")
library("limma")
library("sva")
library("data.table")
library("pheatmap")
library("RColorBrewer")
library("ggplot2")
library("affy")
library("ggfortify")
library("Hmisc")
library("corrplot")
library("reshape2")
library("geneplotter")
library("VennDiagram")
# Set working directory
setwd("C:/Users/Albert Doughan/Desktop/My Project/R Datasets/Hisat/")
dir = "C:/Users/Albert Doughan/Desktop/My Project/R Datasets/Hisat/"
# Import metadata
metadatah = read.csv(file= "metadata.csv", header=T, sep = ",")
head(metadatah)
# Reading counts data from featureCounts
counts = read.csv(file = "hisat2_count.csv", header = T, sep = ",")
head(counts)
# Remove the Gene ID column
countdata <- counts[, -c(1)]
# Making "Geneid" column the rownames
rownames(countdata) <- counts[,1]
head(countdata)
# Check if the metadata and samples have the same names
table(colnames(countdata)==metadatah$SampleID)
##########################################
# Running differential expression analysis with DESeq2
##########################################
# Create the DESeqDataSet object from Matrix of counts and metadata
dds <- DESeqDataSetFromMatrix(countData = round(countdata),
colData = metadatah,
design = ~Condition)
nrow(dds)
# Hidden batch effect detection and removal
dds <- DESeq(dds)
dat1 <- counts(dds, normalized=TRUE)
idx <- rowMeans(dat1) > 1
dat1 <- dat1[idx,]
mod <- model.matrix(~ as.factor(Condition), colData(dds))
mod0 <- model.matrix(~ 1, colData(dds))
# Using the SVA package
svseq <- svaseq(dat1, mod, mod0, n.sv = 2)
ddssva <- dds
ddssva$SV1 <- svseq$sv[,1]
ddssva$SV2 <- svseq$sv[,2]
design(ddssva) <- ~ SV1 + SV2 + Condition
# Visualize the batches
par(mfrow=c(2,1),oma = c(0, 0, 2, 0))
stripchart(svseq$sv[,1] ~ dds$Condition,vertical=TRUE,main="SV1")
abline(h=0)
stripchart(svseq$sv[,2] ~ dds$Condition,vertical=TRUE,main="SV2")
abline(h=0)
# Remove Genes with low counts
# Here we perform a minimal pre-filtering to keep only rows that have at least 10 reads total.
dds1 <- ddssva[rowSums(counts(ddssva)) > 100,]
nrow(dds1)
# Run DESeq function on the data to perform differential gene expression analysis
dds1 <- DESeq(dds1)
head(assay(dds1))
# Building out results table
res_table <- results(dds1)
summary(res_table)
# Working with alpha 0.05
res2 <- results(dds1, alpha=0.05)
summary(res2)
# How many adjusted p-values were less than 0.05?
sum(res2$padj < 0.05, na.rm=TRUE)
# We order our results table by the smallest p value:
res_small_p <- res2[order(res2$pvalue),]
# Select genes with p less than 0.05
res_sig <- subset(res_small_p, padj < 0.05)
dim(res_sig)
# Write final list to file
write.csv(as.data.frame(res_sig), "hisat_deseq_project.csv")
#####################
## DATA EXPLORATION
#####################
# Principal component analysis
PCAdata <- prcomp(t(assay(dds1)))
autoplot(PCAdata, data = metadatah, colour = "Condition",label = FALSE, size = 5)+
theme_bw() +
labs(colour="Condition")+
theme(legend.title = element_text(size = 21),
legend.text = element_text(size = 20),
axis.title.x = element_text(size=24),
axis.title.y = element_text(size=24),
axis.text=element_text(size=21))
# Hierarchical clustering
clusters2 <- hclust(dist(t(assay(dds1))), method ="ward.D")
plot(clusters2, labels = FALSE)
# Density plot
plotDensity(assay(dds1), col=1:24,lwd=2,lty=1,xlab("Density"),ylab("Counts"))
# Data normalization
vst = vst(dds1, blind=FALSE)
# Visualize transformed data
par(mfrow=c(1,2))
plot(assay(dds1))
plot(assay(vst))
# Principal component analysis after normalization
PCAdata1 <- prcomp(t(assay(vst)))
autoplot(PCAdata1, data = metadatah, colour = "Condition",label = FALSE, size = 5)+
theme_bw() +
labs(colour="Condition")+
theme(legend.title = element_text(size = 21),
legend.text = element_text(size = 20),
axis.title.x = element_text(size=24),
axis.title.y = element_text(size=24),
axis.text=element_text(size=21))
# Hierarchical clustering after normalization
clusters1 <- hclust(dist( t( assay(vst) ) ),method ="ward.D")
plot(clusters1, label = FALSE)
# Density plot after normalization
plotDensity(assay(vst), lwd=2,lty=1,xlab("Density"),ylab("Counts"), main = "Density plot")
# Heatmap of sample-to-sample distances
sampleDists <- dist(t(assay(vst)))
sampleDistMatrix <- as.matrix( sampleDists )
rownames(sampleDistMatrix) <- paste(vst$SampleID, vst$Condition, sep="-" )
colnames(sampleDistMatrix) <- paste(dds1$SampleID, dds1$Condition, sep="-")
colors <- colorRampPalette(rev(brewer.pal(9, "Reds")))(255)
pheatmap(sampleDistMatrix,
clustering_distance_rows=sampleDists,
clustering_distance_cols=sampleDists,
col=colors, main = "Sample Distance Matrix ")
# We can use plotCounts fxn to compare the normalized counts
#between treated and control groups for our top 6 genes
par(mfrow=c(2,3))
plotCounts(dds1, gene="ENSG00000198648", intgroup="Condition")
plotCounts(dds1, gene="ENSG00000152642", intgroup="Condition")
plotCounts(dds1, gene="ENSG00000154165", intgroup="Condition")
plotCounts(dds1, gene="ENSG00000196549", intgroup="Condition")
plotCounts(dds1, gene="ENSG00000113916", intgroup="Condition")
plotCounts(dds1, gene="ENSG00000163453", intgroup="Condition")
## Spearman's correlation plots
## Compute correlation matrix
resu <- cor(assay(vst), method = "spearman", use = "complete.obs")
round(resu, 2)
## compute the significance levels
resa = rcorr(assay(vst), type = "spearman")
# Heatmap of sample-to-sample distances
col<- colorRampPalette(c("blue", "white", "red"))(100)
heatmap(x = resu, col = col, symm = TRUE)
## MA-plot
plotMA(res_table, colSig = "red3", colNonSig = "gray32")
##########################################
# Running differential expression analysis with edgeR
##########################################
# Creating the edgeR gene list
y <- DGEList(counts=countdata,group=factor(metadatah$Condition))
dim(y)
# Remove low count genes
keep <- filterByExpr(y)
y <- y[keep, , keep.lib.sizes=FALSE]
dim(y)
# After filtering, it is a good idea to reset the library sizes:
y$samples$lib.size <- colSums(y$counts)
y$samples
# Normalizing the data
y <- calcNormFactors(y)
y$samples
# Plot MDS
col=c(rep("black",50), rep("red",50))
plotMDS(y, labels = NULL, pch = 16, cex = 1, col = col)
legend("top", c("Normal","Disease"), pch = 16, col = c("black","red"))
# The Design matrix
group = y$samples$group
design <- model.matrix(~0+group)
colnames(design) <- levels(group)
design
# Estimating the Dispersion
y <- estimateDisp(y, design, robust=TRUE)
y$common.dispersion
# Plot the dispersion
plotBCV(y)
# Model the quasi-likelihood dispersions of variation within model
fit <- glmQLFit(y, design, robust=TRUE)
head(fit$coefficients)
# Testing for differential expression
# Set the contrast to be tested
DvsN <- makeContrasts(Disease-Normal, levels=design)
res <- glmQLFTest(fit, contrast=DvsN)
topTags(res)
# The total number of differentially expressed genes at FDR< 0:05 is:
is.de <- decideTestsDGE(res)
summary(is.de)
## Plot log-fold change against log-counts per million, with DE genes highlighted
plotMD(res, status = is.de, cex = .4)
abline(h=c(-1, 1), col="black")
# Order by FDR-corrected p-values
deg <- as.data.frame(topTags(res, n=Inf))
deg = as.data.frame(deg)
order_res <- deg[order(deg$FDR),]
dim(order_res)
# Select only genes with FDR-corrected p-values less that 0.05
sig_deg <- subset(order_res, FDR < 0.05)
dim(sig_deg)
# Write final list to file
write.csv(as.data.frame(sig_deg), "hisat_edger_project.csv")
##########################################
# Running differential expression analysis with limma+voom
##########################################
#Creating the gene list through edgeR
dge <- DGEList(countdata)
dim(dge)
# Transformations from the raw-scale
cpm <- cpm(dge)
lcpm <- cpm(dge, log=TRUE)
L <- mean(dge$samples$lib.size) * 1e-6
M <- median(dge$samples$lib.size) * 1e-6
c(L, M)
# Number of genes with 0 count in all samples
table(rowSums(dge$counts==0)==100)
# Removing genes that are lowly expressed
keep.exprs <- filterByExpr(dge, group=metadatah$Condition)
dge <- dge[keep.exprs,, keep.lib.sizes=FALSE]
dim(dge)
# Plot density before and after filtering
cpm1 <- cpm(dge)
lcpm1 <- cpm(dge, log=TRUE)
lcpm.cutoff <- log2(10/M + 2/L)
plotDensity(lcpm, main = "A. Raw data", xlab = "Log CPM")
abline(v=lcpm.cutoff, lty=3)
plotDensity(lcpm1, main = "B. Filtered data", xlab = "Log CPM")
abline(v=lcpm.cutoff, lty=3)
# Data normalization
dge1 <- calcNormFactors(dge, method = "TMM")
dge1$samples$norm.factors
# Box plots before and after normalization
nsamples <- ncol(dge1)
col <- brewer.pal(12, "Paired")
un <- cpm(dge, log=TRUE)
n <- cpm(dge1, log=TRUE)
boxplot(un, main = "A: Unnormalized data", col=col)
boxplot(n, main = "B: Normalized data", col=col)
# Unsupervised clustering
col=c(rep("blue",50), rep("red",50))
plotMDS(dge1, labels = NULL, pch = 16, cex = 1, col = col)
legend("top", c("Normal","Disease"), pch = 16, col = c("blue","red"))
# Create a design matrix
design <- cbind("1"=1,"1vs2"=rep(c(1,2), each = nrow(metadatah)/2))
# Running the limma voom function
v <- voom(dge1, design, plot=TRUE, normalize="quantile")
# After this, the usual limma pipelines for differential expression is be applied.
fit <- lmFit(v, design)
fit <- eBayes(fit)
res <- topTable(fit, coef=ncol(design),number=Inf)
summary(decideTests(fit))
res_pvalue <- as.data.frame(subset(res, adj.P.Val < 0.05))
dim(res_pvalue)
# We order our results table by the smallest p value:
order_res <- res_pvalue[order(res_pvalue$adj.P.Val),]
dim(order_res)
# Display the top 6 most significant genes
topTreat(fit, coef=1, n=6)
# Write final list to file
write.csv(as.data.frame(order_res), "hisat_limma_project.csv")
## All HISAT common
des1 = read.csv("hisat_deseq_project.csv", header = T)
head(des1)
edg1 = read.csv("hisat_edger_project.csv", header = T)
head(edg1)
lim1 = read.csv("hisat_limma_project.csv", header = T)
head(lim1)
inter1 = intersect(intersect(des1$X, edg1$X), lim1$X)
length(inter1)
write.csv(inter1, "hisat_common_project.csv")
## All Hisat common
x = list(des1$X, edg1$X, lim1$X)
myCol <- brewer.pal(3, "Pastel2")
# Chart
venn.diagram(
x = x,
category.names = c("DESeq2" , "edgeR " , "limma"),
filename = 'hisat_common.png',
output=TRUE,
# Output features
imagetype="png" ,
height = 480 ,
width = 480 ,
resolution = 300,
compression = "lzw",
# Circles
lwd = 2,
lty = 'blank',
fill = c("#999999", "#E69F00", "#56B4E9"),
# Numbers
cex = .6,
#fontface = "bold",
fontfamily = "sans",
# Set names
cat.cex = 0.6,
cat.fontface = "bold",
cat.default.pos = "outer",
cat.pos = c(-27, 27, 135),
cat.dist = c(0.055, 0.055, 0.085),
cat.fontfamily = "sans",
rotation = 1
)
|
#' Create perpendicular lines
#'
#' Functions creates lines perpendicular to the mid-pool line (from \code{\link{create_mid_line}}).
#' [[1]] is full perp line, [[2]] is between the mid-pool line and the bed/water surfaces.
#'
#' @param midLine Output from \code{\link{create_mid_line}}
#' @param b1 SpatialLines object equivalent to water surface
#' @param b2 SpatialLines object equivalent to bed surface
#' @return List of full SpatialLines perpendicular lines and the extent that
#' intersect with surfaces.
#' @export
create_perp_line <- function(midLine, b1, b2) {
#### Function generates lines extending from midchannelline to banks (profile)
#### in order to calculate the mean water depth
xy <- data.frame(midLine[[5]]) # dataframe the midpoints
xy <- xy[complete.cases(xy),] # remove NAs i.e. non-polygon intersecting values
l <- length(xy[, 1]) - 1
if (l < 1) {
return(list(NA, NA))
}
for (i in 1:l) {
xy[i, 3] <- (xy[i, 2] - xy[i + 1, 2])/1
}
degrees <- atan(xy[, 3]) # convert m to degrees
t1o <- sin(degrees) # convert above to x coords
t1a <- cos(degrees) # convert above to y coords
newxp <- xy[, 1] + t1o # xpositive
newxn <- xy[, 1] - t1o # xnegative
newyp <- xy[, 2] + t1a # ypositive
newyn <- xy[, 2] - t1a # ynegative
perp <- vector("list", length = l)
channel <- vector("list", length = l)
for (i in 1:l) {
p <- c(newxp[i], newxn[i], newyp[i], newyn[i]) # matrix of points
p <- matrix(p, nrow = 2, ncol = 2)
perp[[i]] <- p
perp[[i]] <- sp::Line(perp[[i]])
perp[[i]] <- sp::SpatialLines(list(Lines(list(perp[[i]]), ID = "1")))
sp::proj4string(perp[[i]]) <- sp::CRS("+proj=utm")
p.intersect1 <- rgeos::gIntersection(perp[[i]], b1) # intersection with banks
p.intersect2 <- rgeos::gIntersection(perp[[i]], b2)
if (is.null(p.intersect1)|is.null(p.intersect2) == "TRUE") { # if no intersection, just use full width honk honk
channel[[i]] <- perp[[i]]
} else {
b1.p <- which.max(p.intersect1@coords[, 2])
b2.p <- which.min(p.intersect2@coords[, 2]) # order line storage
c <- matrix(c(p.intersect1@coords[b1.p, 1], p.intersect2@coords[b2.p, 1],
max(p.intersect1@coords[b1.p, 2]), p.intersect2@coords[b2.p, 2]),
nrow = 2, ncol = 2)
channel[[i]] <- c
channel[[i]] <- sp::Line(channel[[i]])
channel[[i]] <- sp::SpatialLines(list(Lines(list(channel[[i]]), ID = "1")))
sp::proj4string(channel[[i]]) <- sp::CRS("+proj=utm")
}
}
results <- list(perp, channel)
return(results)
}
| /R/create_perp_line.R | no_license | WillBooker/slopeFromImage | R | false | false | 2,638 | r | #' Create perpendicular lines
#'
#' Functions creates lines perpendicular to the mid-pool line (from \code{\link{create_mid_line}}).
#' [[1]] is full perp line, [[2]] is between the mid-pool line and the bed/water surfaces.
#'
#' @param midLine Output from \code{\link{create_mid_line}}
#' @param b1 SpatialLines object equivalent to water surface
#' @param b2 SpatialLines object equivalent to bed surface
#' @return List of full SpatialLines perpendicular lines and the extent that
#' intersect with surfaces.
#' @export
create_perp_line <- function(midLine, b1, b2) {
#### Function generates lines extending from midchannelline to banks (profile)
#### in order to calculate the mean water depth
xy <- data.frame(midLine[[5]]) # dataframe the midpoints
xy <- xy[complete.cases(xy),] # remove NAs i.e. non-polygon intersecting values
l <- length(xy[, 1]) - 1
if (l < 1) {
return(list(NA, NA))
}
for (i in 1:l) {
xy[i, 3] <- (xy[i, 2] - xy[i + 1, 2])/1
}
degrees <- atan(xy[, 3]) # convert m to degrees
t1o <- sin(degrees) # convert above to x coords
t1a <- cos(degrees) # convert above to y coords
newxp <- xy[, 1] + t1o # xpositive
newxn <- xy[, 1] - t1o # xnegative
newyp <- xy[, 2] + t1a # ypositive
newyn <- xy[, 2] - t1a # ynegative
perp <- vector("list", length = l)
channel <- vector("list", length = l)
for (i in 1:l) {
p <- c(newxp[i], newxn[i], newyp[i], newyn[i]) # matrix of points
p <- matrix(p, nrow = 2, ncol = 2)
perp[[i]] <- p
perp[[i]] <- sp::Line(perp[[i]])
perp[[i]] <- sp::SpatialLines(list(Lines(list(perp[[i]]), ID = "1")))
sp::proj4string(perp[[i]]) <- sp::CRS("+proj=utm")
p.intersect1 <- rgeos::gIntersection(perp[[i]], b1) # intersection with banks
p.intersect2 <- rgeos::gIntersection(perp[[i]], b2)
if (is.null(p.intersect1)|is.null(p.intersect2) == "TRUE") { # if no intersection, just use full width honk honk
channel[[i]] <- perp[[i]]
} else {
b1.p <- which.max(p.intersect1@coords[, 2])
b2.p <- which.min(p.intersect2@coords[, 2]) # order line storage
c <- matrix(c(p.intersect1@coords[b1.p, 1], p.intersect2@coords[b2.p, 1],
max(p.intersect1@coords[b1.p, 2]), p.intersect2@coords[b2.p, 2]),
nrow = 2, ncol = 2)
channel[[i]] <- c
channel[[i]] <- sp::Line(channel[[i]])
channel[[i]] <- sp::SpatialLines(list(Lines(list(channel[[i]]), ID = "1")))
sp::proj4string(channel[[i]]) <- sp::CRS("+proj=utm")
}
}
results <- list(perp, channel)
return(results)
}
|
#' @title Catboost variable importance
#' @description
#' \Sexpr[results=rd, stage=render]{lifecycle::badge("experimental")}
#' @import tibble
#' @param object A catboost model
#' @return
#' A data frame class of \code{varimp, data.frame)}
#' @author Resul Akay
#'
#' @examples
#' \dontrun{
#' get_var_imp(fit)
#' }
#' @export
get_var_imp <- function(object){
catboost2 <- loadNamespace(package = "catboost")
varimp <- tibble::rownames_to_column(
data.frame(catboost2$catboost.get_feature_importance(object)),
"variables")
colnames(varimp) <- c("variables", "varimp")
class(varimp) <- c("varimp", "data.frame")
return(varimp)
}
#' @title Catboost variable importance plot
#' @description
#' \Sexpr[results=rd, stage=render]{lifecycle::badge("experimental")}
#' @import ggplot2
#' @param varimp A dataframe class of varimp, e.g. from get_var_imp
#' @return
#' variable importance plot
#' @author Resul Akay
#'
#' @examples
#' \dontrun{
#' varimp <- get_var_imp(fit)
#'
#' plot.varimp(varimp)
#' }
#' @export
plot_varimp <- function(varimp){
ggplot(varimp, aes(y = .data$variables, x = varimp)) + geom_col()
}
`%notin%` <- Negate(`%in%`)
| /R/tools.R | permissive | Akai01/MicEcon | R | false | false | 1,163 | r | #' @title Catboost variable importance
#' @description
#' \Sexpr[results=rd, stage=render]{lifecycle::badge("experimental")}
#' @import tibble
#' @param object A catboost model
#' @return
#' A data frame class of \code{varimp, data.frame)}
#' @author Resul Akay
#'
#' @examples
#' \dontrun{
#' get_var_imp(fit)
#' }
#' @export
get_var_imp <- function(object){
catboost2 <- loadNamespace(package = "catboost")
varimp <- tibble::rownames_to_column(
data.frame(catboost2$catboost.get_feature_importance(object)),
"variables")
colnames(varimp) <- c("variables", "varimp")
class(varimp) <- c("varimp", "data.frame")
return(varimp)
}
#' @title Catboost variable importance plot
#' @description
#' \Sexpr[results=rd, stage=render]{lifecycle::badge("experimental")}
#' @import ggplot2
#' @param varimp A dataframe class of varimp, e.g. from get_var_imp
#' @return
#' variable importance plot
#' @author Resul Akay
#'
#' @examples
#' \dontrun{
#' varimp <- get_var_imp(fit)
#'
#' plot.varimp(varimp)
#' }
#' @export
plot_varimp <- function(varimp){
ggplot(varimp, aes(y = .data$variables, x = varimp)) + geom_col()
}
`%notin%` <- Negate(`%in%`)
|
# multi-objective
#' @export
renderExampleRunPlot.MBOExampleRunMultiCrit = function(object, iter, densregion = TRUE,
se.factor = 1, single.prop.point.plots = FALSE, xlim = NULL, ylim = NULL, point.size = 3,
line.size = 1, trafo = NULL, colors = c("red", "blue", "green"), ...) {
# extract variables and some short names
mbo.res = object$mbo.res
opt.path = as.data.frame(mbo.res$opt.path)
models = object$mbo.res$models[[iter]]
models = if (inherits(models, "WrappedModel")) list(models) else models
par.set = object$par.set
control = object$control
x.name = getParamIds(par.set, repeated = TRUE, with.nr = TRUE)
y.name = control$y.name
method = control$multicrit.method
# get x space and y space data
data.y = as.data.frame(mbo.res$opt.path, include.x = FALSE, include.rest = FALSE)
data.y = setRowNames(rbind(data.y, object$nsga2.paretofront), NULL)
data.x = as.data.frame(mbo.res$opt.path, include.y = FALSE, include.rest = FALSE)
data.x = setRowNames(rbind(data.x, object$nsga2.paretoset), NULL)
idx = getIDX(opt.path, iter)
idx.nsga2.paretofront = (getOptPathLength(mbo.res$opt.path) + 1):nrow(data.y)
plots = list()
if (control$propose.points == 1L || single.prop.point.plots) {
# Render X Space Plot.
pl.xspace = makeXPlot(data.x, idx, idx.nsga2.paretofront, method, x.name,
control$infill.crit, models, control, par.set, opt.path, object$points.per.dim,
iter, control$propose.points, object, colors)
# Render Y Space Plot
pl.yspace = makeYPlot(data.y, idx, idx.nsga2.paretofront, method, y.name,
opt.path, control, iter, control$propose.points, object, colors)
plots = list(pl.set = pl.xspace, pl.front = pl.yspace)
} else {
idx.propose = idx
for (propose.iter in seq_len(control$propose.points)) {
# set idx - add propose.iter - 1 to seq points, propose is only propose [propose.iter]
idx.propose$seq = c(idx.propose$seq, idx$propose[propose.iter - 1])
idx.propose$past = c(idx.propose$past, idx$propose[propose.iter - 1])
idx.propose$proposed = idx$proposed[propose.iter]
# Render X Space Plot.
if (method == "parego") {
prop.models = models[propose.iter]
} else {
prop.models = models
}
pl.xspace = makeXPlot(data.x, idx.propose, idx.nsga2.paretofront, method,
x.name, control$infill.crit, prop.models, control, par.set, opt.path,
object$points.per.dim, iter, 1L, object, colors)
# Render Y Space Plot
pl.yspace = makeYPlot(data.y, idx.propose, idx.nsga2.paretofront, method,
y.name, opt.path, control, iter, 1L, object, colors)
plots[[propose.iter]] = list(pl.set = pl.xspace, pl.front = pl.yspace)
}
}
return(plots)
}
makeXPlot = function(data.x, idx, idx.nsga2.paretofront, method, x.name, crit.name,
models, control, par.set, opt.path, points.per.dim, iter, propose.points, object, colors) {
pl.xspace = ggplot()
pl.xspace = pl.xspace + guides(colour = FALSE, shape = FALSE)
gg.points.xspace = getPlotData(data.x, idx, idx.nsga2.paretofront, x.name)
# first, fill background if possible. note: 2 different plots for mspot since
# we have 2 infill crits, one per model
if (method == "mspot") {
data.crit1 = getInfillCritGrid(crit.name, points.per.dim, models[1],
control, par.set, opt.path[idx$past, ])
data.crit2 = getInfillCritGrid(crit.name, points.per.dim, models[2],
control, par.set, opt.path[idx$past, ])
crit1.plot = fillBackgroundWithInfillCrit(pl.xspace, data.crit1, x.name, crit.name) +
ggtitle("XSpace - model 1")
crit2.plot = fillBackgroundWithInfillCrit(pl.xspace, data.crit2, x.name, crit.name) +
ggtitle("XSpace - model 2")
pl.xspace = list(
crit1 = createBasicSpacePlot(crit1.plot, gg.points.xspace, iter, object, x.name, 0.8, "x", colors),
crit2 = createBasicSpacePlot(crit2.plot, gg.points.xspace, iter, object, x.name, 0.8, "x", colors)
)
}
if (method %in% c("parego", "dib")) {
if (propose.points == 1L) {
data.crit = getInfillCritGrid(crit.name, points.per.dim, models,
control, par.set, opt.path[idx$past, ], iter)
pl.xspace = fillBackgroundWithInfillCrit(pl.xspace, data.crit, x.name, crit.name) +
ggtitle("XSpace")
}
pl.xspace = createBasicSpacePlot(pl.xspace, gg.points.xspace, iter, object, x.name, 0.8, "x", colors)
}
return(pl.xspace)
}
makeYPlot = function(data.y, idx, idx.nsga2.paretofront, method, y.name, opt.path,
control, iter, propose.points, object, colors) {
gg.points.yspace = getPlotData(data.y, idx, idx.nsga2.paretofront, y.name)
pl.yspace = ggplot()
pl.yspace = createBasicSpacePlot(pl.yspace, gg.points.yspace, iter, object, y.name, 0.4, "y", colors)
if (method == "parego" && propose.points == 1L)
pl.yspace = addParegoWeightLines(pl.yspace, data.y, idx, opt.path, 1L, control$multicrit.parego.rho)
pl.yspace = pl.yspace + ggtitle("YSpace")
return(pl.yspace)
}
| /R/renderExampleRunPlotMultiCrit.R | no_license | hildafab/mlrMBO-MtLSMAC | R | false | false | 4,991 | r | # multi-objective
#' @export
renderExampleRunPlot.MBOExampleRunMultiCrit = function(object, iter, densregion = TRUE,
se.factor = 1, single.prop.point.plots = FALSE, xlim = NULL, ylim = NULL, point.size = 3,
line.size = 1, trafo = NULL, colors = c("red", "blue", "green"), ...) {
# extract variables and some short names
mbo.res = object$mbo.res
opt.path = as.data.frame(mbo.res$opt.path)
models = object$mbo.res$models[[iter]]
models = if (inherits(models, "WrappedModel")) list(models) else models
par.set = object$par.set
control = object$control
x.name = getParamIds(par.set, repeated = TRUE, with.nr = TRUE)
y.name = control$y.name
method = control$multicrit.method
# get x space and y space data
data.y = as.data.frame(mbo.res$opt.path, include.x = FALSE, include.rest = FALSE)
data.y = setRowNames(rbind(data.y, object$nsga2.paretofront), NULL)
data.x = as.data.frame(mbo.res$opt.path, include.y = FALSE, include.rest = FALSE)
data.x = setRowNames(rbind(data.x, object$nsga2.paretoset), NULL)
idx = getIDX(opt.path, iter)
idx.nsga2.paretofront = (getOptPathLength(mbo.res$opt.path) + 1):nrow(data.y)
plots = list()
if (control$propose.points == 1L || single.prop.point.plots) {
# Render X Space Plot.
pl.xspace = makeXPlot(data.x, idx, idx.nsga2.paretofront, method, x.name,
control$infill.crit, models, control, par.set, opt.path, object$points.per.dim,
iter, control$propose.points, object, colors)
# Render Y Space Plot
pl.yspace = makeYPlot(data.y, idx, idx.nsga2.paretofront, method, y.name,
opt.path, control, iter, control$propose.points, object, colors)
plots = list(pl.set = pl.xspace, pl.front = pl.yspace)
} else {
idx.propose = idx
for (propose.iter in seq_len(control$propose.points)) {
# set idx - add propose.iter - 1 to seq points, propose is only propose [propose.iter]
idx.propose$seq = c(idx.propose$seq, idx$propose[propose.iter - 1])
idx.propose$past = c(idx.propose$past, idx$propose[propose.iter - 1])
idx.propose$proposed = idx$proposed[propose.iter]
# Render X Space Plot.
if (method == "parego") {
prop.models = models[propose.iter]
} else {
prop.models = models
}
pl.xspace = makeXPlot(data.x, idx.propose, idx.nsga2.paretofront, method,
x.name, control$infill.crit, prop.models, control, par.set, opt.path,
object$points.per.dim, iter, 1L, object, colors)
# Render Y Space Plot
pl.yspace = makeYPlot(data.y, idx.propose, idx.nsga2.paretofront, method,
y.name, opt.path, control, iter, 1L, object, colors)
plots[[propose.iter]] = list(pl.set = pl.xspace, pl.front = pl.yspace)
}
}
return(plots)
}
makeXPlot = function(data.x, idx, idx.nsga2.paretofront, method, x.name, crit.name,
models, control, par.set, opt.path, points.per.dim, iter, propose.points, object, colors) {
pl.xspace = ggplot()
pl.xspace = pl.xspace + guides(colour = FALSE, shape = FALSE)
gg.points.xspace = getPlotData(data.x, idx, idx.nsga2.paretofront, x.name)
# first, fill background if possible. note: 2 different plots for mspot since
# we have 2 infill crits, one per model
if (method == "mspot") {
data.crit1 = getInfillCritGrid(crit.name, points.per.dim, models[1],
control, par.set, opt.path[idx$past, ])
data.crit2 = getInfillCritGrid(crit.name, points.per.dim, models[2],
control, par.set, opt.path[idx$past, ])
crit1.plot = fillBackgroundWithInfillCrit(pl.xspace, data.crit1, x.name, crit.name) +
ggtitle("XSpace - model 1")
crit2.plot = fillBackgroundWithInfillCrit(pl.xspace, data.crit2, x.name, crit.name) +
ggtitle("XSpace - model 2")
pl.xspace = list(
crit1 = createBasicSpacePlot(crit1.plot, gg.points.xspace, iter, object, x.name, 0.8, "x", colors),
crit2 = createBasicSpacePlot(crit2.plot, gg.points.xspace, iter, object, x.name, 0.8, "x", colors)
)
}
if (method %in% c("parego", "dib")) {
if (propose.points == 1L) {
data.crit = getInfillCritGrid(crit.name, points.per.dim, models,
control, par.set, opt.path[idx$past, ], iter)
pl.xspace = fillBackgroundWithInfillCrit(pl.xspace, data.crit, x.name, crit.name) +
ggtitle("XSpace")
}
pl.xspace = createBasicSpacePlot(pl.xspace, gg.points.xspace, iter, object, x.name, 0.8, "x", colors)
}
return(pl.xspace)
}
makeYPlot = function(data.y, idx, idx.nsga2.paretofront, method, y.name, opt.path,
control, iter, propose.points, object, colors) {
gg.points.yspace = getPlotData(data.y, idx, idx.nsga2.paretofront, y.name)
pl.yspace = ggplot()
pl.yspace = createBasicSpacePlot(pl.yspace, gg.points.yspace, iter, object, y.name, 0.4, "y", colors)
if (method == "parego" && propose.points == 1L)
pl.yspace = addParegoWeightLines(pl.yspace, data.y, idx, opt.path, 1L, control$multicrit.parego.rho)
pl.yspace = pl.yspace + ggtitle("YSpace")
return(pl.yspace)
}
|
#' @importFrom twitchr get_users
#' @importFrom dplyr filter
#' @noRd
get_twitch_id <- function(user_name) {
user <- get_users(login = user_name)
message(glue::glue("user_name: {user_name} - id: {x}", x = user$id))
res <- dplyr::select(user, id, description, profile_image_url)
return(res)
}
parse_duration <- function(x,
time_unit = c("seconds", "minutes", "hours"),
dur_regex = "([0-9]{1,2}h)?([0-9]{1,2}m)?([0-9]{1,2}(\\.[0-9]{1,3})?s)?") {
# process to reverse engineer the starting time of each video
# clever regex found at https://stackoverflow.com/a/11293491
# we get a matrix back with 2 rows (second row is meaningless)
# columns are the following
# - col 1: the raw duration string
# - col 2: the "hour" part of the duration string (ex "1h")
# - col 3: the "minute" part of the duration string (ex "1m")
# - col 4: the "second" part of the duration string (ex "1s")
# - col 5: meaningless
time_unit <- match.arg(time_unit)
dur_parsed <- stringr::str_match_all(x, dur_regex)[[1]]
# set up final duration
dur_final <- 0
# extract relevant parts of duration matrix
dur_vec <- readr::parse_number(dur_parsed[1, c(2,3,4)])
names(dur_vec) <- c("hours", "minutes", "seconds")
time_mult <- switch(
time_unit,
hours = c(1, (1/60), (1/3600)),
minutes = c(60, 1, (1/60)),
seconds = c(3600, 60, 1)
)
dur_vec <- sum(dur_vec * time_mult)
return(dur_vec)
}
# https://stackoverflow.com/questions/27397332/find-round-the-time-to-nearest-half-an-hour-from-a-single-file
round_time <- function(x) {
x <- as.POSIXlt(x)
x$min <- round(x$min / 30) * 30
x$sec <- floor(x$sec / 60)
x <- as.POSIXct(x)
return(x)
}
shift_week <- function(x, convert = TRUE, which = "next") {
if (!convert) {
return(x)
} else {
# get current day of week from supplied date
x_weekday <- clock::as_year_month_weekday(x) %>% clock::get_day()
res <- clock::date_shift(x, target = clock::weekday(x_weekday), which = which, boundary = "advance")
return(res)
}
}
compute_end_clock <- function(start_clock, stream_length, precision = "hour") {
# truncate the stream length to floor of nearest hour
new_length <- clock::duration_round(clock::duration_seconds(stream_length), precision = precision)
end_clock <- clock::add_hours(start_clock, new_length)
return(end_clock)
}
time_parser <- function(x, orig_zone = "UTC", new_zone = "America/New_York", format = "%Y-%m-%dT%H:%M:%SZ", ambiguous = "earliest", convert_to_char = TRUE) {
# was format = "%Y%m%dT%H%M%S" for ical
x <- clock::date_time_parse(x, orig_zone, format = format, ambiguous = ambiguous)
x_z <- clock::as_zoned_time(x)
# change to the desired time zone
x_final <- clock::zoned_time_set_zone(x_z, new_zone) %>% clock::as_naive_time()
if (convert_to_char) {
x_final <- as.character(x_final)
}
return(x_final)
}
get_twitch_schedule <- function(id) {
r <- httr::GET("https://api.twitch.tv/helix/schedule", query = list(broadcaster_id = id))
status <- httr::status_code(r)
if (status != 200) {
warning(glue::glue("User {id} does not have valid schedule data. Proceeding to infer a schedule based on videos uploaded (status code {status})"))
r <- httr::GET("https://api.twitch.tv/helix/videos", query = list(user_id = id, period = "week"))
status <- httr::status_code(r)
if (status != 200) {
warning(glue::glue("User {id} does not have any videos! Skipping ..."))
return(NULL)
} else {
current_weekday <- clock::date_now("America/New_York") %>%
clock::as_year_month_weekday() %>%
clock::get_day()
prev_week_date <- clock::date_now("America/New_York") %>%
clock::date_shift(target = clock::weekday(current_weekday), which = "previous", boundary = "advance")
current_sunday <- clock::date_now("America/New_York") %>%
clock::date_shift(target = clock::weekday(clock::clock_weekdays$sunday), which = "previous")
res <- httr::content(r, "parsed") %>%
purrr::pluck("data") %>%
tibble::tibble() %>%
tidyr::unnest_wider(1)
res_int <- res %>%
mutate(start = purrr::map(created_at, ~time_parser(.x, convert_to_char = FALSE))) %>%
mutate(start = purrr::map(start, ~clock::as_date_time(.x, zone = "America/New_York"))) %>%
mutate(duration2 = purrr::map_dbl(duration, ~parse_duration(.x, "seconds"))) %>%
tidyr::unnest(cols = c(start)) %>%
mutate(start = purrr::map(start, ~round_time(.x))) %>%
mutate(end = purrr::map2(start, duration2, ~compute_end_clock(.x, .y))) %>%
mutate(category = "time",
recurrenceRule = "Every week",
start_time = NA,
end_time = NA) %>%
tidyr::unnest(cols = c(start, end)) %>%
filter(start > prev_week_date)
if (nrow(res_int) < 1) {
return(NULL)
} else {
res_final <- res_int %>%
mutate(before_week_ind = start < current_sunday) %>%
mutate(start = purrr::map2(start, before_week_ind, ~shift_week(.x, .y))) %>%
mutate(end = purrr::map2(end, before_week_ind, ~shift_week(.x, .y))) %>%
tidyr::unnest(cols = c(start, end)) %>%
mutate(start = as.character(start), end = as.character(end)) %>%
dplyr::select(start_time, start, end_time, end, title, category, recurrenceRule)
}
}
} else {
res <- httr::content(r, "parsed") %>%
purrr::pluck("data", "segments") %>%
tibble::tibble() %>%
tidyr::unnest_wider(1)
res_int <- res %>%
mutate(start = purrr::map(start_time, ~time_parser(.x, convert_to_char = FALSE)),
end = purrr::map(end_time, ~time_parser(.x, convert_to_char = FALSE)),
category = "time",
recurrenceRule = "Every week") %>%
dplyr::select(start_time, start, end_time, end, title, category, recurrenceRule)
#tidyr::unnest(cols = c(start, end))
# grab the first records of each unique stream
res_first <- res_int %>%
dplyr::group_by(title) %>%
dplyr::arrange(title, start) %>%
dplyr::slice(1) %>%
dplyr::ungroup() %>%
mutate(start = purrr::map(start, ~clock::as_date_time(.x, zone = "America/New_York")),
end = purrr::map(end, ~clock::as_date_time(.x, zone = "America/New_York"))) %>%
mutate(start = purrr::map(start, ~shift_week(.x, which = "previous")),
end = purrr::map(end, ~shift_week(.x, which = "previous"))) %>%
mutate(start = purrr::map(start, ~clock::as_naive_time(.x)),
end = purrr::map(end, ~clock::as_naive_time(.x)))
# bind back together
res_final <- dplyr::bind_rows(
tidyr::unnest(res_first, c("start", "end")),
tidyr::unnest(res_int, c("start", "end"))
) %>%
mutate(start = as.character(start), end = as.character(end))
}
return(res_final)
}
get_twitch_videos <- function(id) {
message(glue::glue("twitch id {id}"))
videos <- twitchr::get_videos(user_id = id, first = 100)
if (is.null(videos)) {
# try getting clips instead
videos <- twitchr::get_all_clips(broadcaster_id = id)
if (is.null(videos)) {
warning(glue::glue("There are no videos for user {id}"))
return(NA)
} else {
videos_play <- videos %>%
dplyr::mutate(video_id = purrr::map_chr(url, ~{
tmp <- stringr::str_split(.x, "/")
n_items <- length(tmp[[1]])
res <- tmp[[1]][n_items]
return(res)
})) %>%
dplyr::slice(1) %>%
dplyr::pull(video_id)
return(videos_play)
}
}
videos_play <- videos$data %>%
#tibble() %>%
dplyr::mutate(video_id = purrr::map_chr(url, ~{
tmp <- stringr::str_split(.x, "/")
n_items <- length(tmp[[1]])
res <- tmp[[1]][n_items]
return(res)
})) %>%
dplyr::slice(1) %>%
dplyr::pull(video_id)
return(videos_play)
}
#' Import calendar directly from server
#'
#' @param cal_slug string for URL slug of calendar.
#'
#' @return data frame with calendar event contents
#' @export
#' @import caldav
#' @importFrom calendar ic_read ical
import_cal <- function(cal_slug = "wimpys-world-of-streamers", cal_base_url = NULL) {
if (is.null(cal_base_url)) {
cal_base_url <- get_golem_config("cal_base_url")
}
caldav_url = glue::glue("{cal_base_url}/{cal_slug}")
cal_data <-
caldav::caldav_get_all_simple_auth(
url = caldav_url,
user = Sys.getenv("NEXTCLOUD_USER"),
password = Sys.getenv("NEXTCLOUD_PASSWORD")
)
x <- cal_data$calendar
res <- withr::with_tempfile("tf", {
cat(x, file = tf)
ic_read(tf)
})
return(res)
}
#' @importFrom dplyr mutate select left_join filter case_when
#' @importFrom clock date_time_parse as_naive_time as_zoned_time zoned_time_set_zone
#' @importFrom purrr map map2
#' @noRd
process_cal <- function(raw_df) {
dt_df <- raw_df %>%
mutate(uid = UID) %>%
select(uid, starts_with("DTSTART"), starts_with("DTEND")) %>%
tidyr::pivot_longer(!uid, names_to = c("dt_type", "timezone"), names_sep = ";", values_to = "time") %>%
filter(!is.na(time)) %>%
tidyr::pivot_wider(names_from = c(dt_type), values_from = time)
dt_df2 <- dt_df %>%
mutate(
timezone = stringr::str_remove_all(timezone, "TZID="),
start_clock = purrr::map2(DTSTART, timezone, ~time_parser(.x, .y)),
#start = as.character(start_clock),
end_clock = purrr::map2(DTEND, timezone, ~time_parser(.x, .y)),
#end = as.character(end_clock)
) %>%
select(uid, start_clock, end_clock, timezone) %>%
tidyr::unnest(cols = c(start_clock, end_clock))
rec_df <- raw_df %>%
mutate(uid = UID) %>%
mutate(recurrenceRule = case_when(
stringr::str_detect(RRULE, "FREQ=WEEKLY") ~ "Every week",
TRUE ~ RRULE
)) %>%
select(uid, recurrenceRule)
final_df <- raw_df %>%
mutate(uid = UID) %>%
select(uid, title = SUMMARY, location = LOCATION) %>%
left_join(dt_df2, by = "uid") %>%
left_join(rec_df, by = "uid") %>%
mutate(raw = uid)
return(final_df)
}
process_raw_timezones <- function() {
source_html <- "inst/app/www/timezones_raw.html"
# ALL credit goes to Tan for rescuing me yet again!
y <- rvest::read_html(source_html) %>%
rvest::html_element("form select") %>%
rvest::html_children()
timezone_res <- tibble::tibble(
label = y %>% rvest::html_attr("label"),
value = y %>% rvest::html_attr("value")
) %>%
tidyr::fill(label) %>%
dplyr::filter(!is.na(value)) %>%
dplyr::group_by(label) %>%
dplyr::summarise(value = list(value)) %>%
tibble::deframe()
return(timezone_res)
} | /R/mod_cal_viewer_fct_helpers.R | permissive | pvictor/shinycal | R | false | false | 10,773 | r | #' @importFrom twitchr get_users
#' @importFrom dplyr filter
#' @noRd
get_twitch_id <- function(user_name) {
user <- get_users(login = user_name)
message(glue::glue("user_name: {user_name} - id: {x}", x = user$id))
res <- dplyr::select(user, id, description, profile_image_url)
return(res)
}
parse_duration <- function(x,
time_unit = c("seconds", "minutes", "hours"),
dur_regex = "([0-9]{1,2}h)?([0-9]{1,2}m)?([0-9]{1,2}(\\.[0-9]{1,3})?s)?") {
# process to reverse engineer the starting time of each video
# clever regex found at https://stackoverflow.com/a/11293491
# we get a matrix back with 2 rows (second row is meaningless)
# columns are the following
# - col 1: the raw duration string
# - col 2: the "hour" part of the duration string (ex "1h")
# - col 3: the "minute" part of the duration string (ex "1m")
# - col 4: the "second" part of the duration string (ex "1s")
# - col 5: meaningless
time_unit <- match.arg(time_unit)
dur_parsed <- stringr::str_match_all(x, dur_regex)[[1]]
# set up final duration
dur_final <- 0
# extract relevant parts of duration matrix
dur_vec <- readr::parse_number(dur_parsed[1, c(2,3,4)])
names(dur_vec) <- c("hours", "minutes", "seconds")
time_mult <- switch(
time_unit,
hours = c(1, (1/60), (1/3600)),
minutes = c(60, 1, (1/60)),
seconds = c(3600, 60, 1)
)
dur_vec <- sum(dur_vec * time_mult)
return(dur_vec)
}
# https://stackoverflow.com/questions/27397332/find-round-the-time-to-nearest-half-an-hour-from-a-single-file
round_time <- function(x) {
x <- as.POSIXlt(x)
x$min <- round(x$min / 30) * 30
x$sec <- floor(x$sec / 60)
x <- as.POSIXct(x)
return(x)
}
shift_week <- function(x, convert = TRUE, which = "next") {
if (!convert) {
return(x)
} else {
# get current day of week from supplied date
x_weekday <- clock::as_year_month_weekday(x) %>% clock::get_day()
res <- clock::date_shift(x, target = clock::weekday(x_weekday), which = which, boundary = "advance")
return(res)
}
}
compute_end_clock <- function(start_clock, stream_length, precision = "hour") {
# truncate the stream length to floor of nearest hour
new_length <- clock::duration_round(clock::duration_seconds(stream_length), precision = precision)
end_clock <- clock::add_hours(start_clock, new_length)
return(end_clock)
}
time_parser <- function(x, orig_zone = "UTC", new_zone = "America/New_York", format = "%Y-%m-%dT%H:%M:%SZ", ambiguous = "earliest", convert_to_char = TRUE) {
# was format = "%Y%m%dT%H%M%S" for ical
x <- clock::date_time_parse(x, orig_zone, format = format, ambiguous = ambiguous)
x_z <- clock::as_zoned_time(x)
# change to the desired time zone
x_final <- clock::zoned_time_set_zone(x_z, new_zone) %>% clock::as_naive_time()
if (convert_to_char) {
x_final <- as.character(x_final)
}
return(x_final)
}
get_twitch_schedule <- function(id) {
r <- httr::GET("https://api.twitch.tv/helix/schedule", query = list(broadcaster_id = id))
status <- httr::status_code(r)
if (status != 200) {
warning(glue::glue("User {id} does not have valid schedule data. Proceeding to infer a schedule based on videos uploaded (status code {status})"))
r <- httr::GET("https://api.twitch.tv/helix/videos", query = list(user_id = id, period = "week"))
status <- httr::status_code(r)
if (status != 200) {
warning(glue::glue("User {id} does not have any videos! Skipping ..."))
return(NULL)
} else {
current_weekday <- clock::date_now("America/New_York") %>%
clock::as_year_month_weekday() %>%
clock::get_day()
prev_week_date <- clock::date_now("America/New_York") %>%
clock::date_shift(target = clock::weekday(current_weekday), which = "previous", boundary = "advance")
current_sunday <- clock::date_now("America/New_York") %>%
clock::date_shift(target = clock::weekday(clock::clock_weekdays$sunday), which = "previous")
res <- httr::content(r, "parsed") %>%
purrr::pluck("data") %>%
tibble::tibble() %>%
tidyr::unnest_wider(1)
res_int <- res %>%
mutate(start = purrr::map(created_at, ~time_parser(.x, convert_to_char = FALSE))) %>%
mutate(start = purrr::map(start, ~clock::as_date_time(.x, zone = "America/New_York"))) %>%
mutate(duration2 = purrr::map_dbl(duration, ~parse_duration(.x, "seconds"))) %>%
tidyr::unnest(cols = c(start)) %>%
mutate(start = purrr::map(start, ~round_time(.x))) %>%
mutate(end = purrr::map2(start, duration2, ~compute_end_clock(.x, .y))) %>%
mutate(category = "time",
recurrenceRule = "Every week",
start_time = NA,
end_time = NA) %>%
tidyr::unnest(cols = c(start, end)) %>%
filter(start > prev_week_date)
if (nrow(res_int) < 1) {
return(NULL)
} else {
res_final <- res_int %>%
mutate(before_week_ind = start < current_sunday) %>%
mutate(start = purrr::map2(start, before_week_ind, ~shift_week(.x, .y))) %>%
mutate(end = purrr::map2(end, before_week_ind, ~shift_week(.x, .y))) %>%
tidyr::unnest(cols = c(start, end)) %>%
mutate(start = as.character(start), end = as.character(end)) %>%
dplyr::select(start_time, start, end_time, end, title, category, recurrenceRule)
}
}
} else {
res <- httr::content(r, "parsed") %>%
purrr::pluck("data", "segments") %>%
tibble::tibble() %>%
tidyr::unnest_wider(1)
res_int <- res %>%
mutate(start = purrr::map(start_time, ~time_parser(.x, convert_to_char = FALSE)),
end = purrr::map(end_time, ~time_parser(.x, convert_to_char = FALSE)),
category = "time",
recurrenceRule = "Every week") %>%
dplyr::select(start_time, start, end_time, end, title, category, recurrenceRule)
#tidyr::unnest(cols = c(start, end))
# grab the first records of each unique stream
res_first <- res_int %>%
dplyr::group_by(title) %>%
dplyr::arrange(title, start) %>%
dplyr::slice(1) %>%
dplyr::ungroup() %>%
mutate(start = purrr::map(start, ~clock::as_date_time(.x, zone = "America/New_York")),
end = purrr::map(end, ~clock::as_date_time(.x, zone = "America/New_York"))) %>%
mutate(start = purrr::map(start, ~shift_week(.x, which = "previous")),
end = purrr::map(end, ~shift_week(.x, which = "previous"))) %>%
mutate(start = purrr::map(start, ~clock::as_naive_time(.x)),
end = purrr::map(end, ~clock::as_naive_time(.x)))
# bind back together
res_final <- dplyr::bind_rows(
tidyr::unnest(res_first, c("start", "end")),
tidyr::unnest(res_int, c("start", "end"))
) %>%
mutate(start = as.character(start), end = as.character(end))
}
return(res_final)
}
get_twitch_videos <- function(id) {
message(glue::glue("twitch id {id}"))
videos <- twitchr::get_videos(user_id = id, first = 100)
if (is.null(videos)) {
# try getting clips instead
videos <- twitchr::get_all_clips(broadcaster_id = id)
if (is.null(videos)) {
warning(glue::glue("There are no videos for user {id}"))
return(NA)
} else {
videos_play <- videos %>%
dplyr::mutate(video_id = purrr::map_chr(url, ~{
tmp <- stringr::str_split(.x, "/")
n_items <- length(tmp[[1]])
res <- tmp[[1]][n_items]
return(res)
})) %>%
dplyr::slice(1) %>%
dplyr::pull(video_id)
return(videos_play)
}
}
videos_play <- videos$data %>%
#tibble() %>%
dplyr::mutate(video_id = purrr::map_chr(url, ~{
tmp <- stringr::str_split(.x, "/")
n_items <- length(tmp[[1]])
res <- tmp[[1]][n_items]
return(res)
})) %>%
dplyr::slice(1) %>%
dplyr::pull(video_id)
return(videos_play)
}
#' Import calendar directly from server
#'
#' @param cal_slug string for URL slug of calendar.
#'
#' @return data frame with calendar event contents
#' @export
#' @import caldav
#' @importFrom calendar ic_read ical
import_cal <- function(cal_slug = "wimpys-world-of-streamers", cal_base_url = NULL) {
if (is.null(cal_base_url)) {
cal_base_url <- get_golem_config("cal_base_url")
}
caldav_url = glue::glue("{cal_base_url}/{cal_slug}")
cal_data <-
caldav::caldav_get_all_simple_auth(
url = caldav_url,
user = Sys.getenv("NEXTCLOUD_USER"),
password = Sys.getenv("NEXTCLOUD_PASSWORD")
)
x <- cal_data$calendar
res <- withr::with_tempfile("tf", {
cat(x, file = tf)
ic_read(tf)
})
return(res)
}
#' @importFrom dplyr mutate select left_join filter case_when
#' @importFrom clock date_time_parse as_naive_time as_zoned_time zoned_time_set_zone
#' @importFrom purrr map map2
#' @noRd
process_cal <- function(raw_df) {
dt_df <- raw_df %>%
mutate(uid = UID) %>%
select(uid, starts_with("DTSTART"), starts_with("DTEND")) %>%
tidyr::pivot_longer(!uid, names_to = c("dt_type", "timezone"), names_sep = ";", values_to = "time") %>%
filter(!is.na(time)) %>%
tidyr::pivot_wider(names_from = c(dt_type), values_from = time)
dt_df2 <- dt_df %>%
mutate(
timezone = stringr::str_remove_all(timezone, "TZID="),
start_clock = purrr::map2(DTSTART, timezone, ~time_parser(.x, .y)),
#start = as.character(start_clock),
end_clock = purrr::map2(DTEND, timezone, ~time_parser(.x, .y)),
#end = as.character(end_clock)
) %>%
select(uid, start_clock, end_clock, timezone) %>%
tidyr::unnest(cols = c(start_clock, end_clock))
rec_df <- raw_df %>%
mutate(uid = UID) %>%
mutate(recurrenceRule = case_when(
stringr::str_detect(RRULE, "FREQ=WEEKLY") ~ "Every week",
TRUE ~ RRULE
)) %>%
select(uid, recurrenceRule)
final_df <- raw_df %>%
mutate(uid = UID) %>%
select(uid, title = SUMMARY, location = LOCATION) %>%
left_join(dt_df2, by = "uid") %>%
left_join(rec_df, by = "uid") %>%
mutate(raw = uid)
return(final_df)
}
process_raw_timezones <- function() {
source_html <- "inst/app/www/timezones_raw.html"
# ALL credit goes to Tan for rescuing me yet again!
y <- rvest::read_html(source_html) %>%
rvest::html_element("form select") %>%
rvest::html_children()
timezone_res <- tibble::tibble(
label = y %>% rvest::html_attr("label"),
value = y %>% rvest::html_attr("value")
) %>%
tidyr::fill(label) %>%
dplyr::filter(!is.na(value)) %>%
dplyr::group_by(label) %>%
dplyr::summarise(value = list(value)) %>%
tibble::deframe()
return(timezone_res)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{matn}
\alias{matn}
\alias{rmatn}
\alias{dmatn}
\title{Generate from and evaluate the density of the matrix normal distribution}
\usage{
rmatn(M, Q, P)
dmatn(X, M, Q, P, logd = FALSE)
}
\arguments{
\item{M}{\code{p * q} mean matrix}
\item{Q}{\code{q * q} covariance matrix}
\item{P}{\code{p * p} covariance matrix}
\item{X}{\code{p * q} matrix at which to evaluate the density}
\item{logd}{logical; if \code{TRUE} the logarithm of the density is returned}
}
\value{
For \code{rmatn} a matrix \code{X}, for \code{dmatn} the (logarithm of) the density evaluation.
}
\description{
Generate from and evaluate the density of the matrix normal distribution
}
\examples{
set.seed(100)
p <- 20
q <- 5
M <- matrix(rnorm(p*q), p, q)
P <- crossprod(matrix(rnorm(p*p), p, p))
Q <- crossprod(matrix(rnorm(q*q), q, q))
X <- rmatn(M, Q, P)
dmatn(X, M, Q, P, logd = TRUE)
}
\references{
Karlsson, S. (2013). Forecasting with Bayesian vector autoregressions.
In G. Elliott & T. Timmermann (Eds.), Handbook of economic forecasting,
volume 2B. North Holland, Elsevier.
}
| /man/matn.Rd | no_license | ankargren/fdr | R | false | true | 1,154 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{matn}
\alias{matn}
\alias{rmatn}
\alias{dmatn}
\title{Generate from and evaluate the density of the matrix normal distribution}
\usage{
rmatn(M, Q, P)
dmatn(X, M, Q, P, logd = FALSE)
}
\arguments{
\item{M}{\code{p * q} mean matrix}
\item{Q}{\code{q * q} covariance matrix}
\item{P}{\code{p * p} covariance matrix}
\item{X}{\code{p * q} matrix at which to evaluate the density}
\item{logd}{logical; if \code{TRUE} the logarithm of the density is returned}
}
\value{
For \code{rmatn} a matrix \code{X}, for \code{dmatn} the (logarithm of) the density evaluation.
}
\description{
Generate from and evaluate the density of the matrix normal distribution
}
\examples{
set.seed(100)
p <- 20
q <- 5
M <- matrix(rnorm(p*q), p, q)
P <- crossprod(matrix(rnorm(p*p), p, p))
Q <- crossprod(matrix(rnorm(q*q), q, q))
X <- rmatn(M, Q, P)
dmatn(X, M, Q, P, logd = TRUE)
}
\references{
Karlsson, S. (2013). Forecasting with Bayesian vector autoregressions.
In G. Elliott & T. Timmermann (Eds.), Handbook of economic forecasting,
volume 2B. North Holland, Elsevier.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots-palettesCDISC.R
\docType{data}
\name{shapePaletteNRIND}
\alias{shapePaletteNRIND}
\title{Shape palette for a standard CDISC Normal/Reference
Range Indicator.}
\format{
A named character vector with shape symbol for
typical Normal Reference Range Indicator variable:
\itemize{
\item{"LOW": }{filled down-pointing arrow (\code{25})}
\item{"NORMAL": }{filled circle (21)}
\item{"HIGH": }{filled up-pointing arrow (\code{24})}
\item{"ABNORMAL": }{diamond (\code{18})}
\item{"UNKNOWN" or 'NA': }{cross (\code{3})}
\item{"NA": }{cross (\code{3})}
}
}
\usage{
shapePaletteNRIND
}
\description{
These symbols should be supported in Windows and Linux.
}
\keyword{datasets}
| /package/clinUtils/man/shapePaletteNRIND.Rd | no_license | Lion666/clinUtils | R | false | true | 749 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots-palettesCDISC.R
\docType{data}
\name{shapePaletteNRIND}
\alias{shapePaletteNRIND}
\title{Shape palette for a standard CDISC Normal/Reference
Range Indicator.}
\format{
A named character vector with shape symbol for
typical Normal Reference Range Indicator variable:
\itemize{
\item{"LOW": }{filled down-pointing arrow (\code{25})}
\item{"NORMAL": }{filled circle (21)}
\item{"HIGH": }{filled up-pointing arrow (\code{24})}
\item{"ABNORMAL": }{diamond (\code{18})}
\item{"UNKNOWN" or 'NA': }{cross (\code{3})}
\item{"NA": }{cross (\code{3})}
}
}
\usage{
shapePaletteNRIND
}
\description{
These symbols should be supported in Windows and Linux.
}
\keyword{datasets}
|
library(EnviroPRA)
### Name: AIRboot
### Title: Inhalation of airborne chemicals by bootstrap
### Aliases: AIRboot
### Keywords: methods
### ** Examples
# Carcinogenic effects
c <- rnorm( n= 10, mean = 0.2, sd = 0.05 )
b <- rnorm( n= 100, mean = 20, sd = 5 )
AIRboot (n = 1000, CA=c, IR=25, ET = 24, EF = 300, ED = 24, BW = b)
| /data/genthat_extracted_code/EnviroPRA/examples/AIRboot.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 337 | r | library(EnviroPRA)
### Name: AIRboot
### Title: Inhalation of airborne chemicals by bootstrap
### Aliases: AIRboot
### Keywords: methods
### ** Examples
# Carcinogenic effects
c <- rnorm( n= 10, mean = 0.2, sd = 0.05 )
b <- rnorm( n= 100, mean = 20, sd = 5 )
AIRboot (n = 1000, CA=c, IR=25, ET = 24, EF = 300, ED = 24, BW = b)
|
library(parallel)
nworkers = 2
timeout = 600
cls = makeCluster(nworkers, "PSOCK")
workers = vector(nworkers, mode = "list")
close.NULL = function(...) NULL
connect = function(server, client, port, timeout, sleep = 0.1, ...) {
if (ID == server) {
con = socketConnection(port = port, server = TRUE, blocking = TRUE, open = "a+b", timeout = timeout, ...)
workers[[client]] <<- con
}
if (ID == client) {
Sys.sleep(sleep)
con = socketConnection(port = port, server = FALSE, blocking = TRUE, open = "a+b", timeout = timeout, ...)
workers[[server]] <<- con
}
NULL
}
environment(connect) = environment(close.NULL) = .GlobalEnv
clusterExport(cls, c("workers", "connect", "close.NULL"), envir = environment())
clusterMap(cls, assign, "ID", seq(nworkers), MoreArgs = list(envir = .GlobalEnv))
socket_map = read.csv(text = "\n\"server\",\"client\",\"port\"\n1,2,33000\n")
by(socket_map, seq(nrow(socket_map)), function(x) {
clusterCall(cls, connect, x$server, x$client, x$port, timeout = timeout)
})
worker_code = c("if(ID != 1)\n stop(sprintf(\"Worker is attempting to execute wrong code.\nThis code is for 1, but manager assigned ID %s\", ID))\n\nx = 1\ny = 2\ntenmb <- unserialize(workers[[2]])\nout = sum(x, y, tenmb)\nwrite.table(out, \"script6.R.log\")", "if(ID != 2)\n stop(sprintf(\"Worker is attempting to execute wrong code.\nThis code is for 2, but manager assigned ID %s\", ID))\n\ntenmb = as.numeric(seq(10 * 2^20/8))\nserialize(tenmb, workers[[1]], xdr = FALSE)")
evalg = function(codestring) {
code = parse(text = codestring)
eval(code, .GlobalEnv)
NULL
}
parLapply(cls, worker_code, evalg)
clusterEvalQ(cls, lapply(workers, close))
stopCluster(cls)
| /tests/generated/gen_script6.R | no_license | cran/makeParallel | R | false | false | 1,735 | r | library(parallel)
nworkers = 2
timeout = 600
cls = makeCluster(nworkers, "PSOCK")
workers = vector(nworkers, mode = "list")
close.NULL = function(...) NULL
connect = function(server, client, port, timeout, sleep = 0.1, ...) {
if (ID == server) {
con = socketConnection(port = port, server = TRUE, blocking = TRUE, open = "a+b", timeout = timeout, ...)
workers[[client]] <<- con
}
if (ID == client) {
Sys.sleep(sleep)
con = socketConnection(port = port, server = FALSE, blocking = TRUE, open = "a+b", timeout = timeout, ...)
workers[[server]] <<- con
}
NULL
}
environment(connect) = environment(close.NULL) = .GlobalEnv
clusterExport(cls, c("workers", "connect", "close.NULL"), envir = environment())
clusterMap(cls, assign, "ID", seq(nworkers), MoreArgs = list(envir = .GlobalEnv))
socket_map = read.csv(text = "\n\"server\",\"client\",\"port\"\n1,2,33000\n")
by(socket_map, seq(nrow(socket_map)), function(x) {
clusterCall(cls, connect, x$server, x$client, x$port, timeout = timeout)
})
worker_code = c("if(ID != 1)\n stop(sprintf(\"Worker is attempting to execute wrong code.\nThis code is for 1, but manager assigned ID %s\", ID))\n\nx = 1\ny = 2\ntenmb <- unserialize(workers[[2]])\nout = sum(x, y, tenmb)\nwrite.table(out, \"script6.R.log\")", "if(ID != 2)\n stop(sprintf(\"Worker is attempting to execute wrong code.\nThis code is for 2, but manager assigned ID %s\", ID))\n\ntenmb = as.numeric(seq(10 * 2^20/8))\nserialize(tenmb, workers[[1]], xdr = FALSE)")
evalg = function(codestring) {
code = parse(text = codestring)
eval(code, .GlobalEnv)
NULL
}
parLapply(cls, worker_code, evalg)
clusterEvalQ(cls, lapply(workers, close))
stopCluster(cls)
|
# Pacotes ------------------------------------------------------------------
library(tidymodels)
library(ISLR)
library(tidyverse)
library(modeldata)
library(pROC)
library(vip)
library(skimr)
library(naniar)
# PASSO 0) CARREGAR AS BASES -----------------------------------------------
data("credit_data")
help(credit_data)
glimpse(credit_data) # German Risk
credit_data %>% count(Status)
# PASSO 1) BASE TREINO/TESTE -----------------------------------------------
set.seed(1)
credit_initial_split <- initial_split(credit_data, strata = "Status", p = 0.75)
credit_train <- training(credit_initial_split)
credit_test <- testing(credit_initial_split)
# PASSO 2) EXPLORAR A BASE -------------------------------------------------
vis_miss(credit_data)
skim(credit_data)
GGally::ggpairs(credit_train %>% select_if((is.numeric)) %>% mutate_all(log))
credit_data %>%
filter(Assets > 100) %>%
dplyr::select(where(is.numeric), Status, Records) %>%
pivot_longer(-c(Status, Records)) %>%
ggplot(aes(x = Records, y = value, fill = Status)) +
geom_boxplot() +
facet_wrap(~name, scales = "free_y") +
scale_y_log10()
#
# GGally::ggpairs(credit_data %>% select(where(~!is.numeric(.))))
# PASSO 3) DATAPREP --------------------------------------------------------
credit_receita <- recipe(Status ~ ., data = credit_train) %>%
step_modeimpute(Home, Marital, Job) %>%
step_medianimpute(Debt) %>%
step_bagimpute(Income, Assets) %>%
step_zv(all_predictors()) %>%
step_normalize(all_numeric()) %>%
step_dummy(all_nominal(), -all_outcomes()) %>%
step_interact(~ starts_with("Seniority"):starts_with("Records")) %>%
step_interact(~ starts_with("Amount"):starts_with("Records"))
ok <-juice(prep(credit_receita))
# PASSO 4) MODELO ----------------------------------------------------------
# Definiรงรฃo de
# a) a f(x): logistc_reg()
# b) modo (natureza da var resp): classification
# c) hiperparametros que queremos tunar: penalty = tune()
# d) hiperparametros que nรฃo queremos tunar: mixture = 1 # LASSO
# e) o motor que queremos usar: glmnet
credit_lr_model <- logistic_reg(penalty = tune(), mixture = 1) %>%
set_mode("classification") %>%
set_engine("glmnet")
# workflow ----------------------------------------------------------------
credit_wf <- workflow() %>%
add_model(credit_lr_model) %>%
add_recipe(credit_receita)
# PASSO 5) TUNAGEM DE HIPERPARรMETROS --------------------------------------
# a) bases de reamostragem para validaรงรฃo: vfold_cv()
# b) (opcional) grade de parรขmetros: parameters() %>% update() %>% grid_regular()
# c) tune_grid()
# d) escolha das mรฉtricas (rmse, roc_auc, etc)
# d) collect_metrics() ou autoplot() para ver o resultado
credit_resamples <- vfold_cv(credit_train, v = 5)
credit_lr_tune_grid <- tune_grid(
credit_wf,
resamples = credit_resamples,
metrics = metric_set(
accuracy,
kap, # KAPPA
roc_auc,
precision,
recall,
f_meas,
mn_log_loss #binary cross entropy
)
)
# minha versรฃo do autoplot()
collect_metrics(credit_lr_tune_grid)
collect_metrics(credit_lr_tune_grid) %>%
filter(penalty < 00.01) %>%
ggplot(aes(x = penalty, y = mean)) +
geom_point() +
geom_errorbar(aes(ymin = mean - std_err, ymax = mean + std_err)) +
facet_wrap(~.metric, scales = "free") +
scale_x_log10()
# PASSO 6) DESEMPENHO DO MODELO FINAL ------------------------------------------
# a) extrai melhor modelo com select_best()
# b) finaliza o modelo inicial com finalize_model()
# c) ajusta o modelo final com todos os dados de treino (bases de validaรงรฃo jรก era)
credit_lr_best_params <- select_best(credit_lr_tune_grid, "roc_auc")
credit_wf <- credit_wf %>% finalize_workflow(credit_lr_best_params)
credit_lr_last_fit <- last_fit(
credit_wf,
credit_initial_split
)
collect_metrics(credit_lr_last_fit)
# Variรกveis importantes
credit_lr_last_fit_model <- credit_lr_last_fit$.workflow[[1]]$fit$fit
vip(credit_lr_last_fit_model)
# PASSO 7) GUARDA TUDO ---------------------------------------------------------
write_rds(credit_lr_last_fit, "credit_lr_last_fit.rds")
write_rds(credit_lr_last_fit_model, "credit_lr_last_fit_model.rds")
collect_metrics(credit_lr_last_fit)
credit_test_preds <- collect_predictions(credit_lr_last_fit)
# roc
credit_roc_curve <- credit_test_preds %>% roc_curve(Status, .pred_bad)
autoplot(credit_roc_curve)
# confusion matrix
credit_test_preds %>%
mutate(
Status_class = factor(if_else(.pred_bad > 0.6, "bad", "good"))
) %>%
conf_mat(Status, Status_class)
# grรกficos extras!
# risco por faixa de score
credit_test_preds %>%
mutate(
score = factor(ntile(.pred_bad, 10))
) %>%
count(score, Status) %>%
ggplot(aes(x = score, y = n, fill = Status)) +
geom_col(position = "fill") +
geom_label(aes(label = n), position = "fill") +
coord_flip()
# grรกfico sobre os da classe "bad"
percentis = 20
credit_test_preds %>%
mutate(
score = factor(ntile(.pred_bad, percentis))
) %>%
filter(Status == "bad") %>%
group_by(score) %>%
summarise(
n = n(),
media = mean(.pred_bad)
) %>%
mutate(p = n/sum(n)) %>%
ggplot(aes(x = p, y = score)) +
geom_col() +
geom_label(aes(label = scales::percent(p))) +
geom_vline(xintercept = 1/percentis, colour = "red", linetype = "dashed", size = 1)
# PASSO 7) MODELO FINAL -----------------------------------------------------
credit_final_lr_model <- credit_lr_model %>% fit(Status ~ ., credit_data)
write_rds(credit_final_lr_model, "credit_final_lr_model.rds")
# EXTRA - KS ##############################################
# https://pt.wikipedia.org/wiki/Teste_Kolmogorov-Smirnov
ks_vec <- function(truth, estimate) {
truth_lvls <- unique(truth)
ks_test <- suppressWarnings(ks.test(estimate[truth %in% truth_lvls[1]], estimate[truth %in% truth_lvls[2]]))
ks_test$statistic
}
comparacao_de_modelos <- collect_predictions(credit_lr_last_fit) %>%
summarise(
auc = roc_auc_vec(Status, .pred_bad),
acc = accuracy_vec(Status, .pred_class),
prc = precision_vec(Status, .pred_class),
rec = recall_vec(Status, .pred_class),
ks = ks_vec(Status, .pred_bad),
roc = list(roc(Status, .pred_bad))
)
# KS no ggplot2 -------
densidade_acumulada <- credit_test_preds %>%
ggplot(aes(x = .pred_bad, colour = Status)) +
stat_ecdf(size = 1) +
theme_minimal() +
labs(title = "Densidade Acumulada")
densidade <- credit_test_preds %>%
ggplot(aes(x = .pred_bad, colour = Status)) +
stat_density(size = 0.5, aes(fill = Status), alpha = 0.2 , position = "identity") +
theme_minimal() +
labs(title = "Densidade")
library(patchwork)
densidade / densidade_acumulada
# KS "na raรงa" ---------
ks_na_raca_df <- collect_predictions(credit_lr_last_fit) %>%
mutate(modelo = "Regressao Logistica",
pred_prob = .pred_bad) %>%
mutate(score_categ = cut_interval(pred_prob, 1000)) %>%
arrange(modelo, score_categ, Status) %>%
group_by(modelo, Status, score_categ) %>%
summarise(
n = n(),
pred_prob_mean = mean(pred_prob)
) %>%
mutate(
ecdf = cumsum(n)/sum(n)
)
ks_na_raca_df %>%
ggplot(aes(x = pred_prob_mean, y = ecdf, linetype = Status, colour = modelo)) +
geom_line(size = 1) +
theme_minimal()
# descobrindo onde acontece o mรกximo ------------
ks_na_raca_onde <- ks_na_raca_df %>%
select(-n, -score_categ) %>%
ungroup() %>%
complete(modelo, Status, pred_prob_mean) %>%
fill(ecdf) %>%
spread(Status, ecdf) %>%
group_by(modelo) %>%
na.omit() %>%
summarise(
ks = max(abs(bad- good)),
ks_onde = which.max(abs(bad- good)),
pred_prob_mean_onde = pred_prob_mean[ks_onde],
y_max = bad[ks_onde],
y_min = good[ks_onde]
)
ks_na_raca_df %>%
ggplot(aes(x = pred_prob_mean, y = ecdf, colour = modelo)) +
geom_line(size = 1, aes(linetype = Status)) +
geom_segment(data = ks_na_raca_onde, aes(x = pred_prob_mean_onde, xend = pred_prob_mean_onde, y = y_max, yend = y_min), size = 2, arrow = arrow(ends = "both")) +
theme_minimal()
# ignorar
vip_ok <- vi(credit_lr_last_fit_model) %>%
mutate(Variable = fct_reorder(Variable, abs(Importance))) %>%
ggplot(aes(x = abs(Importance), y = Variable, fill = Sign)) +
geom_col()
write_rds(vip_ok, "vip_ok.rds")
| /exemplos/06-regressao-logistica.R | no_license | rcbull/202006-intro-ml | R | false | false | 8,185 | r | # Pacotes ------------------------------------------------------------------
library(tidymodels)
library(ISLR)
library(tidyverse)
library(modeldata)
library(pROC)
library(vip)
library(skimr)
library(naniar)
# PASSO 0) CARREGAR AS BASES -----------------------------------------------
data("credit_data")
help(credit_data)
glimpse(credit_data) # German Risk
credit_data %>% count(Status)
# PASSO 1) BASE TREINO/TESTE -----------------------------------------------
set.seed(1)
credit_initial_split <- initial_split(credit_data, strata = "Status", p = 0.75)
credit_train <- training(credit_initial_split)
credit_test <- testing(credit_initial_split)
# PASSO 2) EXPLORAR A BASE -------------------------------------------------
vis_miss(credit_data)
skim(credit_data)
GGally::ggpairs(credit_train %>% select_if((is.numeric)) %>% mutate_all(log))
credit_data %>%
filter(Assets > 100) %>%
dplyr::select(where(is.numeric), Status, Records) %>%
pivot_longer(-c(Status, Records)) %>%
ggplot(aes(x = Records, y = value, fill = Status)) +
geom_boxplot() +
facet_wrap(~name, scales = "free_y") +
scale_y_log10()
#
# GGally::ggpairs(credit_data %>% select(where(~!is.numeric(.))))
# PASSO 3) DATAPREP --------------------------------------------------------
credit_receita <- recipe(Status ~ ., data = credit_train) %>%
step_modeimpute(Home, Marital, Job) %>%
step_medianimpute(Debt) %>%
step_bagimpute(Income, Assets) %>%
step_zv(all_predictors()) %>%
step_normalize(all_numeric()) %>%
step_dummy(all_nominal(), -all_outcomes()) %>%
step_interact(~ starts_with("Seniority"):starts_with("Records")) %>%
step_interact(~ starts_with("Amount"):starts_with("Records"))
ok <-juice(prep(credit_receita))
# PASSO 4) MODELO ----------------------------------------------------------
# Definiรงรฃo de
# a) a f(x): logistc_reg()
# b) modo (natureza da var resp): classification
# c) hiperparametros que queremos tunar: penalty = tune()
# d) hiperparametros que nรฃo queremos tunar: mixture = 1 # LASSO
# e) o motor que queremos usar: glmnet
credit_lr_model <- logistic_reg(penalty = tune(), mixture = 1) %>%
set_mode("classification") %>%
set_engine("glmnet")
# workflow ----------------------------------------------------------------
credit_wf <- workflow() %>%
add_model(credit_lr_model) %>%
add_recipe(credit_receita)
# PASSO 5) TUNAGEM DE HIPERPARรMETROS --------------------------------------
# a) bases de reamostragem para validaรงรฃo: vfold_cv()
# b) (opcional) grade de parรขmetros: parameters() %>% update() %>% grid_regular()
# c) tune_grid()
# d) escolha das mรฉtricas (rmse, roc_auc, etc)
# d) collect_metrics() ou autoplot() para ver o resultado
credit_resamples <- vfold_cv(credit_train, v = 5)
credit_lr_tune_grid <- tune_grid(
credit_wf,
resamples = credit_resamples,
metrics = metric_set(
accuracy,
kap, # KAPPA
roc_auc,
precision,
recall,
f_meas,
mn_log_loss #binary cross entropy
)
)
# minha versรฃo do autoplot()
collect_metrics(credit_lr_tune_grid)
collect_metrics(credit_lr_tune_grid) %>%
filter(penalty < 00.01) %>%
ggplot(aes(x = penalty, y = mean)) +
geom_point() +
geom_errorbar(aes(ymin = mean - std_err, ymax = mean + std_err)) +
facet_wrap(~.metric, scales = "free") +
scale_x_log10()
# PASSO 6) DESEMPENHO DO MODELO FINAL ------------------------------------------
# a) extrai melhor modelo com select_best()
# b) finaliza o modelo inicial com finalize_model()
# c) ajusta o modelo final com todos os dados de treino (bases de validaรงรฃo jรก era)
credit_lr_best_params <- select_best(credit_lr_tune_grid, "roc_auc")
credit_wf <- credit_wf %>% finalize_workflow(credit_lr_best_params)
credit_lr_last_fit <- last_fit(
credit_wf,
credit_initial_split
)
collect_metrics(credit_lr_last_fit)
# Variรกveis importantes
credit_lr_last_fit_model <- credit_lr_last_fit$.workflow[[1]]$fit$fit
vip(credit_lr_last_fit_model)
# PASSO 7) GUARDA TUDO ---------------------------------------------------------
write_rds(credit_lr_last_fit, "credit_lr_last_fit.rds")
write_rds(credit_lr_last_fit_model, "credit_lr_last_fit_model.rds")
collect_metrics(credit_lr_last_fit)
credit_test_preds <- collect_predictions(credit_lr_last_fit)
# roc
credit_roc_curve <- credit_test_preds %>% roc_curve(Status, .pred_bad)
autoplot(credit_roc_curve)
# confusion matrix
credit_test_preds %>%
mutate(
Status_class = factor(if_else(.pred_bad > 0.6, "bad", "good"))
) %>%
conf_mat(Status, Status_class)
# grรกficos extras!
# risco por faixa de score
credit_test_preds %>%
mutate(
score = factor(ntile(.pred_bad, 10))
) %>%
count(score, Status) %>%
ggplot(aes(x = score, y = n, fill = Status)) +
geom_col(position = "fill") +
geom_label(aes(label = n), position = "fill") +
coord_flip()
# grรกfico sobre os da classe "bad"
percentis = 20
credit_test_preds %>%
mutate(
score = factor(ntile(.pred_bad, percentis))
) %>%
filter(Status == "bad") %>%
group_by(score) %>%
summarise(
n = n(),
media = mean(.pred_bad)
) %>%
mutate(p = n/sum(n)) %>%
ggplot(aes(x = p, y = score)) +
geom_col() +
geom_label(aes(label = scales::percent(p))) +
geom_vline(xintercept = 1/percentis, colour = "red", linetype = "dashed", size = 1)
# PASSO 7) MODELO FINAL -----------------------------------------------------
credit_final_lr_model <- credit_lr_model %>% fit(Status ~ ., credit_data)
write_rds(credit_final_lr_model, "credit_final_lr_model.rds")
# EXTRA - KS ##############################################
# https://pt.wikipedia.org/wiki/Teste_Kolmogorov-Smirnov
ks_vec <- function(truth, estimate) {
truth_lvls <- unique(truth)
ks_test <- suppressWarnings(ks.test(estimate[truth %in% truth_lvls[1]], estimate[truth %in% truth_lvls[2]]))
ks_test$statistic
}
comparacao_de_modelos <- collect_predictions(credit_lr_last_fit) %>%
summarise(
auc = roc_auc_vec(Status, .pred_bad),
acc = accuracy_vec(Status, .pred_class),
prc = precision_vec(Status, .pred_class),
rec = recall_vec(Status, .pred_class),
ks = ks_vec(Status, .pred_bad),
roc = list(roc(Status, .pred_bad))
)
# KS no ggplot2 -------
densidade_acumulada <- credit_test_preds %>%
ggplot(aes(x = .pred_bad, colour = Status)) +
stat_ecdf(size = 1) +
theme_minimal() +
labs(title = "Densidade Acumulada")
densidade <- credit_test_preds %>%
ggplot(aes(x = .pred_bad, colour = Status)) +
stat_density(size = 0.5, aes(fill = Status), alpha = 0.2 , position = "identity") +
theme_minimal() +
labs(title = "Densidade")
library(patchwork)
densidade / densidade_acumulada
# KS "na raรงa" ---------
ks_na_raca_df <- collect_predictions(credit_lr_last_fit) %>%
mutate(modelo = "Regressao Logistica",
pred_prob = .pred_bad) %>%
mutate(score_categ = cut_interval(pred_prob, 1000)) %>%
arrange(modelo, score_categ, Status) %>%
group_by(modelo, Status, score_categ) %>%
summarise(
n = n(),
pred_prob_mean = mean(pred_prob)
) %>%
mutate(
ecdf = cumsum(n)/sum(n)
)
ks_na_raca_df %>%
ggplot(aes(x = pred_prob_mean, y = ecdf, linetype = Status, colour = modelo)) +
geom_line(size = 1) +
theme_minimal()
# descobrindo onde acontece o mรกximo ------------
ks_na_raca_onde <- ks_na_raca_df %>%
select(-n, -score_categ) %>%
ungroup() %>%
complete(modelo, Status, pred_prob_mean) %>%
fill(ecdf) %>%
spread(Status, ecdf) %>%
group_by(modelo) %>%
na.omit() %>%
summarise(
ks = max(abs(bad- good)),
ks_onde = which.max(abs(bad- good)),
pred_prob_mean_onde = pred_prob_mean[ks_onde],
y_max = bad[ks_onde],
y_min = good[ks_onde]
)
ks_na_raca_df %>%
ggplot(aes(x = pred_prob_mean, y = ecdf, colour = modelo)) +
geom_line(size = 1, aes(linetype = Status)) +
geom_segment(data = ks_na_raca_onde, aes(x = pred_prob_mean_onde, xend = pred_prob_mean_onde, y = y_max, yend = y_min), size = 2, arrow = arrow(ends = "both")) +
theme_minimal()
# ignorar
vip_ok <- vi(credit_lr_last_fit_model) %>%
mutate(Variable = fct_reorder(Variable, abs(Importance))) %>%
ggplot(aes(x = abs(Importance), y = Variable, fill = Sign)) +
geom_col()
write_rds(vip_ok, "vip_ok.rds")
|
##Eploratory Data Analysis
##Course Project1
##Code for reading the data from file & plotting and producing plot3.png
library(graphics)
data<-read.table("household_power_consumption.txt", ##read the data
header = TRUE, sep=";",na.strings="?")
good<-((data$"Date"=="1/2/2007")|(data$"Date"=="2/2/2007")) ##select the dates given
qdata<-data[good,]
#Putting the date and time in one variable and convert it into POSIXlt:
qdata$DateTime<-paste(qdata$Date,qdata$Time)
qdata$DateTime<-strptime(qdata$DateTime, "%d/%m/%Y %H:%M:%S")
png(filename = "plot3.png", width = 480, height = 480, units = "px") ##launch png device
plot(qdata$DateTime, qdata$"Sub_metering_1",type="l",main="",xlab="",
ylab="Energy sub metering")
lines(qdata$DateTime,qdata$"Sub_metering_2",col="red")
lines(qdata$DateTime,qdata$"Sub_metering_3",col="blue")
legend("topright", col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
,lty=c(1,1,1))
dev.off() ##closing the graphics device
| /plot3.R | no_license | Andylain3n/ExData_Plotting1 | R | false | false | 1,024 | r | ##Eploratory Data Analysis
##Course Project1
##Code for reading the data from file & plotting and producing plot3.png
library(graphics)
data<-read.table("household_power_consumption.txt", ##read the data
header = TRUE, sep=";",na.strings="?")
good<-((data$"Date"=="1/2/2007")|(data$"Date"=="2/2/2007")) ##select the dates given
qdata<-data[good,]
#Putting the date and time in one variable and convert it into POSIXlt:
qdata$DateTime<-paste(qdata$Date,qdata$Time)
qdata$DateTime<-strptime(qdata$DateTime, "%d/%m/%Y %H:%M:%S")
png(filename = "plot3.png", width = 480, height = 480, units = "px") ##launch png device
plot(qdata$DateTime, qdata$"Sub_metering_1",type="l",main="",xlab="",
ylab="Energy sub metering")
lines(qdata$DateTime,qdata$"Sub_metering_2",col="red")
lines(qdata$DateTime,qdata$"Sub_metering_3",col="blue")
legend("topright", col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
,lty=c(1,1,1))
dev.off() ##closing the graphics device
|
logit2prob <- function(logit){
odds <- exp(logit)
prob <- odds / (1 + odds)
return(prob)
} | /myfolder/01_functions/logit2prob.R | no_license | idoba2030/0002_Maayan-Pereg_free_vs_forced_learning | R | false | false | 96 | r | logit2prob <- function(logit){
odds <- exp(logit)
prob <- odds / (1 + odds)
return(prob)
} |
library(readr)
library(magrittr)
library(similR)
#read in data of participant perceptions of advice ties among their other teammates
advice_css <- readRDS("data/networks_advice_css_jen.rds")
truth <- readRDS("data/networks_truth.rds")[["3"]]$advice
groups_ids <- names(advice_css) %>%
unique
# Imputing missing preception
LAS <- vector("list", length(advice_css))
names(LAS) <- groups_ids
for (g in groups_ids) {
# Adding i's perception from the true network data
for (i in names(advice_css[[g]]))
advice_css[[g]][[i]][i,] <- truth[[g]][i,]
# Creating the LAS matrix
LAS[[g]] <- las(advice_css[[g]], rule = "i")
dimnames(LAS[[g]]) <- dimnames(advice_css[[g]][[1]])
}
# Problem with the CSS network 31 B
# advice_css$`31B`
# 31A 31B 31C 31D
# 31A NA 1 1 1
# 31B 999 NA 999 999
# 31C 0 0 NA 0
# 31D 99 99 99 NA
saveRDS(LAS, "data/networks_advice_las.rds")
| /data/networks_advice_las.R | no_license | muriteams/css-and-ci | R | false | false | 927 | r | library(readr)
library(magrittr)
library(similR)
#read in data of participant perceptions of advice ties among their other teammates
advice_css <- readRDS("data/networks_advice_css_jen.rds")
truth <- readRDS("data/networks_truth.rds")[["3"]]$advice
groups_ids <- names(advice_css) %>%
unique
# Imputing missing preception
LAS <- vector("list", length(advice_css))
names(LAS) <- groups_ids
for (g in groups_ids) {
# Adding i's perception from the true network data
for (i in names(advice_css[[g]]))
advice_css[[g]][[i]][i,] <- truth[[g]][i,]
# Creating the LAS matrix
LAS[[g]] <- las(advice_css[[g]], rule = "i")
dimnames(LAS[[g]]) <- dimnames(advice_css[[g]][[1]])
}
# Problem with the CSS network 31 B
# advice_css$`31B`
# 31A 31B 31C 31D
# 31A NA 1 1 1
# 31B 999 NA 999 999
# 31C 0 0 NA 0
# 31D 99 99 99 NA
saveRDS(LAS, "data/networks_advice_las.rds")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gsva_plotting_functions.R
\name{plot_gsva_pca,ReactomeAnalysisResult-method}
\alias{plot_gsva_pca,ReactomeAnalysisResult-method}
\title{plot_gsva_pca - ReactomeAnalysisResult}
\usage{
\S4method{plot_gsva_pca}{ReactomeAnalysisResult}(object, pathway_ids = NULL, ...)
}
\arguments{
\item{object}{A \code{\link{ReactomeAnalysisResult}} object containing a ssGSEA result}
\item{pathway_ids}{A character vector of pathway ids. If set, only these pathways will be
used for the PCA analysis.}
\item{...}{Additional parameters are passed to \code{prcomp}}
}
\value{
A ggplot2 object representing the plot.
}
\description{
Runs a Principal Component analysis (using \code{prcomp}) on the samples
based on the pathway analysis results.
}
\examples{
# load the scRNA-seq example data
library(ReactomeGSA.data)
data(jerby_b_cells)
# perform the GSVA analysis
gsva_result <- analyse_sc_clusters(jerby_b_cells, verbose = FALSE)
}
| /man/plot_gsva_pca-ReactomeAnalysisResult-method.Rd | no_license | reactome/ReactomeGSA | R | false | true | 997 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gsva_plotting_functions.R
\name{plot_gsva_pca,ReactomeAnalysisResult-method}
\alias{plot_gsva_pca,ReactomeAnalysisResult-method}
\title{plot_gsva_pca - ReactomeAnalysisResult}
\usage{
\S4method{plot_gsva_pca}{ReactomeAnalysisResult}(object, pathway_ids = NULL, ...)
}
\arguments{
\item{object}{A \code{\link{ReactomeAnalysisResult}} object containing a ssGSEA result}
\item{pathway_ids}{A character vector of pathway ids. If set, only these pathways will be
used for the PCA analysis.}
\item{...}{Additional parameters are passed to \code{prcomp}}
}
\value{
A ggplot2 object representing the plot.
}
\description{
Runs a Principal Component analysis (using \code{prcomp}) on the samples
based on the pathway analysis results.
}
\examples{
# load the scRNA-seq example data
library(ReactomeGSA.data)
data(jerby_b_cells)
# perform the GSVA analysis
gsva_result <- analyse_sc_clusters(jerby_b_cells, verbose = FALSE)
}
|
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
s <- NULL
set <- function(y) {
x <<- y
s <<- NULL
}
#Return input matrix x
get <- function() x
#Get inverse of x and assign to variable s
setsolve <- function(solve) s <<- solve
#Return s
getsolve <- function() s
list(set = set, get = get, setsolve = setsolve, getsolve = getsolve)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been
## calculated (and the matrix has not changed), then cacheSolve should
## retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
#Get solved matrix otherwise NULL if it does not exist
s <-x$getsolve()
#Get original matrix
o <-x$get()
if(!is.null(s)) {
message("Getting cached data")
return(s)
}
## Return a matrix that is the inverse of 'x'
data <- x$get()
s <- solve(data)
x$setsolve(s)
s
}
| /cachematrix.R | no_license | msimanga/ProgrammingAssignment2 | R | false | false | 1,054 | r |
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
s <- NULL
set <- function(y) {
x <<- y
s <<- NULL
}
#Return input matrix x
get <- function() x
#Get inverse of x and assign to variable s
setsolve <- function(solve) s <<- solve
#Return s
getsolve <- function() s
list(set = set, get = get, setsolve = setsolve, getsolve = getsolve)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already been
## calculated (and the matrix has not changed), then cacheSolve should
## retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
#Get solved matrix otherwise NULL if it does not exist
s <-x$getsolve()
#Get original matrix
o <-x$get()
if(!is.null(s)) {
message("Getting cached data")
return(s)
}
## Return a matrix that is the inverse of 'x'
data <- x$get()
s <- solve(data)
x$setsolve(s)
s
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{evaluate}
\alias{evaluate}
\title{Function for evaluating F1 by cell type,
Adapted from automated cell type identifiaction benchmarking paper (Abdelaal et al. Genome Biology, 2019)}
\usage{
evaluate(true, predicted)
}
\arguments{
\item{true}{vector of true labels}
\item{predicted}{vector of predicted labels}
}
\description{
Function for evaluating F1 by cell type,
Adapted from automated cell type identifiaction benchmarking paper (Abdelaal et al. Genome Biology, 2019)
}
| /man/evaluate.Rd | permissive | korsunskylab/symphony | R | false | true | 568 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{evaluate}
\alias{evaluate}
\title{Function for evaluating F1 by cell type,
Adapted from automated cell type identifiaction benchmarking paper (Abdelaal et al. Genome Biology, 2019)}
\usage{
evaluate(true, predicted)
}
\arguments{
\item{true}{vector of true labels}
\item{predicted}{vector of predicted labels}
}
\description{
Function for evaluating F1 by cell type,
Adapted from automated cell type identifiaction benchmarking paper (Abdelaal et al. Genome Biology, 2019)
}
|
HouseHoldTable <- read.table("household_power_consumption.txt", sep=";",header=TRUE,nrows=10)
classes <- sapply(HouseHoldTable,class)
classes[1:2] <- 'character'
HouseHoldTable <- read.table(file = "household_power_consumption.txt", sep=";", header=TRUE, colClasses = classes, na.strings = '?')
TestTable <- subset(HouseHoldTable, as.Date(HouseHoldTable$Date, format = '%d/%m/%Y') >= as.Date('2007-02-01'))
TestTable <- subset(TestTable, as.Date(TestTable$Date, format = '%d/%m/%Y') <= as.Date('2007-02-02'))
TestTable$DateTime <- c(NA)
TestTable$DateTime <- paste(TestTable$Date,TestTable$Time,sep='T')
TestTable$DateTime <- as.POSIXct(TestTable$DateTime, format="%d/%m/%YT%H:%M:%S")
png(filename ="plot3.png", width = 480, height = 480, units = "px")
plot(TestTable$DateTime,TestTable$Sub_metering_1,type='n',xlab = "", ylab = "Energy sub metering")
lines(TestTable$DateTime,TestTable$Sub_metering_1,col="black")
lines(TestTable$DateTime,TestTable$Sub_metering_2,col="red")
lines(TestTable$DateTime,TestTable$Sub_metering_3,col="blue")
legend("topright", lty=c(1,1),col=c("black","red","blue"),legend=c('Sub_metering_1','Sub_metering_2','Sub_metering_3'))
dev.off() | /plot3.R | no_license | phraniiac/ExData_Plotting1 | R | false | false | 1,167 | r | HouseHoldTable <- read.table("household_power_consumption.txt", sep=";",header=TRUE,nrows=10)
classes <- sapply(HouseHoldTable,class)
classes[1:2] <- 'character'
HouseHoldTable <- read.table(file = "household_power_consumption.txt", sep=";", header=TRUE, colClasses = classes, na.strings = '?')
TestTable <- subset(HouseHoldTable, as.Date(HouseHoldTable$Date, format = '%d/%m/%Y') >= as.Date('2007-02-01'))
TestTable <- subset(TestTable, as.Date(TestTable$Date, format = '%d/%m/%Y') <= as.Date('2007-02-02'))
TestTable$DateTime <- c(NA)
TestTable$DateTime <- paste(TestTable$Date,TestTable$Time,sep='T')
TestTable$DateTime <- as.POSIXct(TestTable$DateTime, format="%d/%m/%YT%H:%M:%S")
png(filename ="plot3.png", width = 480, height = 480, units = "px")
plot(TestTable$DateTime,TestTable$Sub_metering_1,type='n',xlab = "", ylab = "Energy sub metering")
lines(TestTable$DateTime,TestTable$Sub_metering_1,col="black")
lines(TestTable$DateTime,TestTable$Sub_metering_2,col="red")
lines(TestTable$DateTime,TestTable$Sub_metering_3,col="blue")
legend("topright", lty=c(1,1),col=c("black","red","blue"),legend=c('Sub_metering_1','Sub_metering_2','Sub_metering_3'))
dev.off() |
library(ggplot2) #ggplot, geom_bar
library(plyr) #count
library(ggthemes) #theme_wsj()
library(grid)
library(chron)
library(plyr)
#Run the R script in the same place as the data or set to relevant directory
setwd("/Users/patrick/Dropbox/Spring 2015/")
#Read data, make it a data frame, reorder the weekdays
crime <- read.csv("testcrime.csv")
crime <- data.frame(crime)
#crime$Weekdays <-factor(crime$Weekdays, levels = c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"))
#Change to Date format and get months, reorder months
crime$Month <- months(as.Date(crime$Occurred, "%m/%d/%Y"))
crime$Month <-factor(crime$Month, levels = c("January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"))
crime$Weekdays <- weekdays(as.Date(crime$Occurred, "%m/%d/%Y"), abbreviate = TRUE)
crime$Weekdays <-factor(crime$Weekdays, levels = c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"))
#crime$TimeGroups <- factor(crime$TimeGroups, levels = c("3 AM - 6 AM", "6 AM - 9 AM", "9 AM - 12 PM", "12 PM - 3 PM", "3 PM - 6 PM", "6 PM - 9 PM", "9 PM - 12 AM", "12 AM - 3 AM"))
crime$IncidentGroups[grepl("ADMINISTRATIVE",crime$Incident.Type ) | grepl("ALARM RESPONSE", crime$Incident.Type) | grepl("DISORDERLY CONDUCT", crime$Incident.Type)| grepl("DISTURBANCE", crime$Incident.Type)| grepl("EH&S", crime$Incident.Type)| grepl("FIRE", crime$Incident.Type)| grepl("HARASSMENT", crime$Incident.Type)| grepl("HEALTH & SAFETY", crime$Incident.Type)| grepl("OBSCENE ACTIVITY", crime$Incident.Type)| grepl("OFFICER STATUS", crime$Incident.Type)| grepl("SERVICE", crime$Incident.Type)| grepl("TRAFFIC", crime$Incident.Type)| grepl("TRESPASS", crime$Incident.Type)| grepl("VANDALISM", crime$Incident.Type)| grepl("VEHICLE CODE", crime$Incident.Type)] <- "ADMIN/SERVICE/COMPLAINT"
crime$IncidentGroups[grepl("BURGLARY", crime$Incident.Type) | grepl("BURGLARY-MOTOR VEHICLE", crime$Incident.Type) | grepl("BURGLARY-OTHER", crime$Incident.Type) | grepl("EXTORTION", crime$Incident.Type) | grepl("FRAUD", crime$Incident.Type) | grepl("IDENTITY THEFT", crime$Incident.Type) | grepl("PROPERTY", crime$Incident.Type) | grepl("ROBBERY", crime$Incident.Type) | grepl("Theft Petty-Plain", crime$Incident.Type) | grepl("THEFT-GRAND", crime$Incident.Type) | grepl("THEFT-GRAND AUTO", crime$Incident.Type) | grepl("THEFT-GRAND PERSON", crime$Incident.Type) | grepl("THEFT-MOTOR VEHICLE", crime$Incident.Type) | grepl("THEFT-PETTY", crime$Incident.Type) | grepl("THEFT-TRICK", crime$Incident.Type)] <- "THEFT"
crime$IncidentGroups[grepl("CHILD NEGLECT", crime$Incident.Type)] <- "NON-VIOLENT"
crime$IncidentGroups[grepl("NON-FORCIBLE SEX OFFENSE", crime$Incident.Type)] <- "SEXUAL ASSAULT"
crime$IncidentGroups[grepl("ALCOHO", crime$Incident.Type)| grepl("ALCOHOL", crime$Incident.Type) | grepl("NARCOTICS", crime$Incident.Type)] <- "SUBSTANCE ABUSE"
crime$IncidentGroups[grepl("ASSAULT", crime$Incident.Type) | grepl("BATTERY", crime$Incident.Type) | grepl("CRIMINAL THREATS", crime$Incident.Type) | grepl("DOMESTIC VIOLENCE", crime$Incident.Type) | grepl("FORCIBLE SEX OFFENSE", crime$Incident.Type) | grepl("INCIDENT", crime$Incident.Type) | grepl("LA MUNICIPAL CODE", crime$Incident.Type) | grepl("SUICIDE", crime$Incident.Type) | grepl("WARRANT", crime$Incident.Type) | grepl("WEAPONS", crime$Incident.Type) ] <- "VIOLENT ACT"
crime$IncidentGroups <- gsub("ADMIN/SERVICE/COMPLAINT", "ADMIN/SERVICE/\nCOMPLAINT", crime$IncidentGroups)
crime$Hours <- strptime(crime$Time.Occurred, "%I:%M %p")
crime$Hours <- format(crime$Hours, "%H")
crime$TimeGroups[grepl(0, crime$Hours) | grepl(1, crime$Hours) | grepl(2, crime$Hours)] <- "12 AM - 3 AM"
crime$TimeGroups[grepl(3, crime$Hours) | grepl(4, crime$Hours) | grepl(5, crime$Hours)] <- "3 AM - 6 AM"
crime$TimeGroups[grepl(6, crime$Hours) | grepl(7, crime$Hours) | grepl(8, crime$Hours)] <- "6 AM - 9 AM"
crime$TimeGroups[grepl(9, crime$Hours) | grepl(10, crime$Hours) | grepl(11, crime$Hours)] <- "9 AM - 12 PM"
crime$TimeGroups[grepl(12, crime$Hours) | grepl(13, crime$Hours) | grepl(14, crime$Hours)] <- "12 PM - 3 PM"
crime$TimeGroups[grepl(15, crime$Hours) | grepl(16, crime$Hours) | grepl(17, crime$Hours)] <- "3 PM - 6 PM"
crime$TimeGroups[grepl(18, crime$Hours) | grepl(19, crime$Hours) | grepl(20, crime$Hours)] <- "6 PM - 9 PM"
crime$TimeGroups[grepl(21, crime$Hours) | grepl(22, crime$Hours) | grepl(23, crime$Hours)] <- "9 PM - 12 AM"
crime$TimeGroups <- factor(crime$TimeGroups, levels = c("3 AM - 6 AM", "6 AM - 9 AM", "9 AM - 12 PM", "12 PM - 3 PM", "3 PM - 6 PM", "6 PM - 9 PM", "9 PM - 12 AM", "12 AM - 3 AM"))
#Subset with no XXXs
#crime_nox <- crime[- grep("XXX", crime$AREA_COLOR),]
#crime_nox <- crime[- grep("XX", crime$AREA_COLOR),]
#crime_all <- count(crime_nox, vars=c("Incident.Type","Weekdays", "Month"))
#crime_all_sum <- ddply(crime_all,.(Weekdays,Incident.Type),summarize,sums=sum(freq))
#crime_nox <- subset(crime, crime$AREA_COLOR != XXX, select = c("IncidentGroups","Weekdays", "Month", "freq"))
#Subset Theft only
crime_theft <- count(crime_nox, vars=c("IncidentGroups", "Weekdays", "Month"))
crime_theft <- subset(crime_theft, crime_theft$IncidentGroups == "THEFT", select = c("IncidentGroups","Weekdays", "Month", "freq"))
theft_sum <- ddply(crime_theft,.(Weekdays),summarize,sums=sum(freq))
#Subset Greek
#crime_greek <- count(crime_nox, vars=c("IncidentGroups","Weekdays", "Month"))
#crime_greek <- subset(crime_greek, crime_greek$AREA_COLOR == "K", select = c("Incident.Type","Weekdays", "Month", "freq", "AREA_COLOR") )
#crime_greek_sum <- ddply(crime_greek,.(Weekdays, IncidentGroups),summarize,sums=sum(freq))
#Frequency by Time Groupings
crime_time <- count(crime_nox, vars=c("IncidentGroups", "TimeGroups"))
#Frequency by Time and Location
#crime_timelocation <- count(crime_nox, vars=c("IncidentGroups", "TimeGroups", "AREA_COLOR"))
#Frequency and Day of the Week, Theft Only
##########################
#Graph bar plot x= Weekday y= freq of theft
final_theft <- ggplot(crime_theft)+geom_bar(aes(x=Weekdays, y=freq, label = freq), stat="identity", fill = "#01516c")
#Economist theme
final_theft + theme_economist(stata = TRUE) + scale_fill_economist( stata = TRUE ) + labs( title = "Thefts at USC by Weekday \n", x= "Weekday", y = "Frequency") +
scale_y_continuous(breaks =seq(0,100,5)) +
theme(axis.text.x=element_text(size = 10, angle = -90, hjust = 1, vjust = 0.5), axis.title.x=element_text(size=12),
axis.title.y=element_text(size=12, vjust = 1.5), panel.margin = unit(1.5, "lines"), plot.title = element_text(size = 14,hjust = 0.5))+
geom_text(data= theft_sum, aes(x=Weekdays, y=sums,label = theft_sum$sums), vjust= 0, size =4)
#Save as .png.
#Height and width measured in cm? not pixels
ggsave(file = "weekday_theft_jameson.png", width = 10, height = 10)
##########################
#Frequency and Day of the Week, All Crime
##########################
#Graph bar plot x= weekday y= freq of all crime
final_allcrime <- ggplot(crime_all)+geom_bar(aes(x=Weekdays, y=freq), stat="identity", fill = "#01516c")+facet_wrap(~IncidentGroups, nrow = 2)
#Economist theme
final_allcrime + theme_economist(stata = TRUE) + scale_fill_economist( stata = TRUE ) + labs( title = "Crimes at USC by Weekday \n", x= "Weekday", y = "Frequency") +
scale_y_continuous(breaks =seq(0,100,5)) +
theme(axis.text.x=element_text(size = 10, angle = -90, hjust = 1, vjust = 0.5), axis.title.x=element_text(size=12),
axis.title.y=element_text(size=12, vjust = 1.5), panel.margin = unit(1.5, "lines"), plot.title = element_text(size = 14,hjust = 0.5))
#+geom_text(data= crime_all_sum, aes(x=Weekdays, y=sums,label = crime_all_sum$sums), vjust= 0, size =4)
#Save as .png.
#Height and width measured in cm? not pixels
ggsave(file = "weekday_allcrime_jameson.png", width = 10, height = 10)
##########################
#Frequency and Time Groupings by Crime Groupings
##########################
#Graph bar plot x= Time Groupings y= Frequency by Crime groups
final_time <- ggplot(crime_time)+geom_bar(aes(x=TIME_GROUP, y=freq), stat="identity", fill = "#01516c")+facet_wrap(~IncidentGroups, ncol = 3)
#Economist theme
final_time + theme_economist(stata = TRUE) + scale_fill_economist( stata = TRUE ) + labs( title = "Crimes at USC by Time \n", x= "Time Groups", y = "Frequency") +
scale_y_continuous(breaks =seq(0,100,5)) +
theme(axis.text.x=element_text(size = 10, angle = -90, hjust = 1, vjust = 0.5), axis.title.x=element_text(size=12),
axis.title.y=element_text(size=12, vjust = 1.5), panel.margin = unit(1.5, "lines"), plot.title = element_text(size = 14,hjust = 0.5))
#Save as .png.
#Height and width measured in cm? not pixels
ggsave(file = "weekday_timegroups_full_jameson.png", width = 10, height = 10)
| /jameson_graphs.R | no_license | patrickvossler18/USC_DPS_Crime_Analysis | R | false | false | 8,785 | r | library(ggplot2) #ggplot, geom_bar
library(plyr) #count
library(ggthemes) #theme_wsj()
library(grid)
library(chron)
library(plyr)
#Run the R script in the same place as the data or set to relevant directory
setwd("/Users/patrick/Dropbox/Spring 2015/")
#Read data, make it a data frame, reorder the weekdays
crime <- read.csv("testcrime.csv")
crime <- data.frame(crime)
#crime$Weekdays <-factor(crime$Weekdays, levels = c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"))
#Change to Date format and get months, reorder months
crime$Month <- months(as.Date(crime$Occurred, "%m/%d/%Y"))
crime$Month <-factor(crime$Month, levels = c("January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"))
crime$Weekdays <- weekdays(as.Date(crime$Occurred, "%m/%d/%Y"), abbreviate = TRUE)
crime$Weekdays <-factor(crime$Weekdays, levels = c("Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"))
#crime$TimeGroups <- factor(crime$TimeGroups, levels = c("3 AM - 6 AM", "6 AM - 9 AM", "9 AM - 12 PM", "12 PM - 3 PM", "3 PM - 6 PM", "6 PM - 9 PM", "9 PM - 12 AM", "12 AM - 3 AM"))
crime$IncidentGroups[grepl("ADMINISTRATIVE",crime$Incident.Type ) | grepl("ALARM RESPONSE", crime$Incident.Type) | grepl("DISORDERLY CONDUCT", crime$Incident.Type)| grepl("DISTURBANCE", crime$Incident.Type)| grepl("EH&S", crime$Incident.Type)| grepl("FIRE", crime$Incident.Type)| grepl("HARASSMENT", crime$Incident.Type)| grepl("HEALTH & SAFETY", crime$Incident.Type)| grepl("OBSCENE ACTIVITY", crime$Incident.Type)| grepl("OFFICER STATUS", crime$Incident.Type)| grepl("SERVICE", crime$Incident.Type)| grepl("TRAFFIC", crime$Incident.Type)| grepl("TRESPASS", crime$Incident.Type)| grepl("VANDALISM", crime$Incident.Type)| grepl("VEHICLE CODE", crime$Incident.Type)] <- "ADMIN/SERVICE/COMPLAINT"
crime$IncidentGroups[grepl("BURGLARY", crime$Incident.Type) | grepl("BURGLARY-MOTOR VEHICLE", crime$Incident.Type) | grepl("BURGLARY-OTHER", crime$Incident.Type) | grepl("EXTORTION", crime$Incident.Type) | grepl("FRAUD", crime$Incident.Type) | grepl("IDENTITY THEFT", crime$Incident.Type) | grepl("PROPERTY", crime$Incident.Type) | grepl("ROBBERY", crime$Incident.Type) | grepl("Theft Petty-Plain", crime$Incident.Type) | grepl("THEFT-GRAND", crime$Incident.Type) | grepl("THEFT-GRAND AUTO", crime$Incident.Type) | grepl("THEFT-GRAND PERSON", crime$Incident.Type) | grepl("THEFT-MOTOR VEHICLE", crime$Incident.Type) | grepl("THEFT-PETTY", crime$Incident.Type) | grepl("THEFT-TRICK", crime$Incident.Type)] <- "THEFT"
crime$IncidentGroups[grepl("CHILD NEGLECT", crime$Incident.Type)] <- "NON-VIOLENT"
crime$IncidentGroups[grepl("NON-FORCIBLE SEX OFFENSE", crime$Incident.Type)] <- "SEXUAL ASSAULT"
crime$IncidentGroups[grepl("ALCOHO", crime$Incident.Type)| grepl("ALCOHOL", crime$Incident.Type) | grepl("NARCOTICS", crime$Incident.Type)] <- "SUBSTANCE ABUSE"
crime$IncidentGroups[grepl("ASSAULT", crime$Incident.Type) | grepl("BATTERY", crime$Incident.Type) | grepl("CRIMINAL THREATS", crime$Incident.Type) | grepl("DOMESTIC VIOLENCE", crime$Incident.Type) | grepl("FORCIBLE SEX OFFENSE", crime$Incident.Type) | grepl("INCIDENT", crime$Incident.Type) | grepl("LA MUNICIPAL CODE", crime$Incident.Type) | grepl("SUICIDE", crime$Incident.Type) | grepl("WARRANT", crime$Incident.Type) | grepl("WEAPONS", crime$Incident.Type) ] <- "VIOLENT ACT"
crime$IncidentGroups <- gsub("ADMIN/SERVICE/COMPLAINT", "ADMIN/SERVICE/\nCOMPLAINT", crime$IncidentGroups)
crime$Hours <- strptime(crime$Time.Occurred, "%I:%M %p")
crime$Hours <- format(crime$Hours, "%H")
crime$TimeGroups[grepl(0, crime$Hours) | grepl(1, crime$Hours) | grepl(2, crime$Hours)] <- "12 AM - 3 AM"
crime$TimeGroups[grepl(3, crime$Hours) | grepl(4, crime$Hours) | grepl(5, crime$Hours)] <- "3 AM - 6 AM"
crime$TimeGroups[grepl(6, crime$Hours) | grepl(7, crime$Hours) | grepl(8, crime$Hours)] <- "6 AM - 9 AM"
crime$TimeGroups[grepl(9, crime$Hours) | grepl(10, crime$Hours) | grepl(11, crime$Hours)] <- "9 AM - 12 PM"
crime$TimeGroups[grepl(12, crime$Hours) | grepl(13, crime$Hours) | grepl(14, crime$Hours)] <- "12 PM - 3 PM"
crime$TimeGroups[grepl(15, crime$Hours) | grepl(16, crime$Hours) | grepl(17, crime$Hours)] <- "3 PM - 6 PM"
crime$TimeGroups[grepl(18, crime$Hours) | grepl(19, crime$Hours) | grepl(20, crime$Hours)] <- "6 PM - 9 PM"
crime$TimeGroups[grepl(21, crime$Hours) | grepl(22, crime$Hours) | grepl(23, crime$Hours)] <- "9 PM - 12 AM"
crime$TimeGroups <- factor(crime$TimeGroups, levels = c("3 AM - 6 AM", "6 AM - 9 AM", "9 AM - 12 PM", "12 PM - 3 PM", "3 PM - 6 PM", "6 PM - 9 PM", "9 PM - 12 AM", "12 AM - 3 AM"))
#Subset with no XXXs
#crime_nox <- crime[- grep("XXX", crime$AREA_COLOR),]
#crime_nox <- crime[- grep("XX", crime$AREA_COLOR),]
#crime_all <- count(crime_nox, vars=c("Incident.Type","Weekdays", "Month"))
#crime_all_sum <- ddply(crime_all,.(Weekdays,Incident.Type),summarize,sums=sum(freq))
#crime_nox <- subset(crime, crime$AREA_COLOR != XXX, select = c("IncidentGroups","Weekdays", "Month", "freq"))
#Subset Theft only
crime_theft <- count(crime_nox, vars=c("IncidentGroups", "Weekdays", "Month"))
crime_theft <- subset(crime_theft, crime_theft$IncidentGroups == "THEFT", select = c("IncidentGroups","Weekdays", "Month", "freq"))
theft_sum <- ddply(crime_theft,.(Weekdays),summarize,sums=sum(freq))
#Subset Greek
#crime_greek <- count(crime_nox, vars=c("IncidentGroups","Weekdays", "Month"))
#crime_greek <- subset(crime_greek, crime_greek$AREA_COLOR == "K", select = c("Incident.Type","Weekdays", "Month", "freq", "AREA_COLOR") )
#crime_greek_sum <- ddply(crime_greek,.(Weekdays, IncidentGroups),summarize,sums=sum(freq))
#Frequency by Time Groupings
crime_time <- count(crime_nox, vars=c("IncidentGroups", "TimeGroups"))
#Frequency by Time and Location
#crime_timelocation <- count(crime_nox, vars=c("IncidentGroups", "TimeGroups", "AREA_COLOR"))
#Frequency and Day of the Week, Theft Only
##########################
#Graph bar plot x= Weekday y= freq of theft
final_theft <- ggplot(crime_theft)+geom_bar(aes(x=Weekdays, y=freq, label = freq), stat="identity", fill = "#01516c")
#Economist theme
final_theft + theme_economist(stata = TRUE) + scale_fill_economist( stata = TRUE ) + labs( title = "Thefts at USC by Weekday \n", x= "Weekday", y = "Frequency") +
scale_y_continuous(breaks =seq(0,100,5)) +
theme(axis.text.x=element_text(size = 10, angle = -90, hjust = 1, vjust = 0.5), axis.title.x=element_text(size=12),
axis.title.y=element_text(size=12, vjust = 1.5), panel.margin = unit(1.5, "lines"), plot.title = element_text(size = 14,hjust = 0.5))+
geom_text(data= theft_sum, aes(x=Weekdays, y=sums,label = theft_sum$sums), vjust= 0, size =4)
#Save as .png.
#Height and width measured in cm? not pixels
ggsave(file = "weekday_theft_jameson.png", width = 10, height = 10)
##########################
#Frequency and Day of the Week, All Crime
##########################
#Graph bar plot x= weekday y= freq of all crime
final_allcrime <- ggplot(crime_all)+geom_bar(aes(x=Weekdays, y=freq), stat="identity", fill = "#01516c")+facet_wrap(~IncidentGroups, nrow = 2)
#Economist theme
final_allcrime + theme_economist(stata = TRUE) + scale_fill_economist( stata = TRUE ) + labs( title = "Crimes at USC by Weekday \n", x= "Weekday", y = "Frequency") +
scale_y_continuous(breaks =seq(0,100,5)) +
theme(axis.text.x=element_text(size = 10, angle = -90, hjust = 1, vjust = 0.5), axis.title.x=element_text(size=12),
axis.title.y=element_text(size=12, vjust = 1.5), panel.margin = unit(1.5, "lines"), plot.title = element_text(size = 14,hjust = 0.5))
#+geom_text(data= crime_all_sum, aes(x=Weekdays, y=sums,label = crime_all_sum$sums), vjust= 0, size =4)
#Save as .png.
#Height and width measured in cm? not pixels
ggsave(file = "weekday_allcrime_jameson.png", width = 10, height = 10)
##########################
#Frequency and Time Groupings by Crime Groupings
##########################
#Graph bar plot x= Time Groupings y= Frequency by Crime groups
final_time <- ggplot(crime_time)+geom_bar(aes(x=TIME_GROUP, y=freq), stat="identity", fill = "#01516c")+facet_wrap(~IncidentGroups, ncol = 3)
#Economist theme
final_time + theme_economist(stata = TRUE) + scale_fill_economist( stata = TRUE ) + labs( title = "Crimes at USC by Time \n", x= "Time Groups", y = "Frequency") +
scale_y_continuous(breaks =seq(0,100,5)) +
theme(axis.text.x=element_text(size = 10, angle = -90, hjust = 1, vjust = 0.5), axis.title.x=element_text(size=12),
axis.title.y=element_text(size=12, vjust = 1.5), panel.margin = unit(1.5, "lines"), plot.title = element_text(size = 14,hjust = 0.5))
#Save as .png.
#Height and width measured in cm? not pixels
ggsave(file = "weekday_timegroups_full_jameson.png", width = 10, height = 10)
|
\name{bl}
\alias{bl}
\title{
Analysis of broken line regression
}
\description{
The function performs analysis of broken line regression
}
\usage{
bl(data, model=1, alpha=0.05, xlab = "Explanatory Variable", ylab = "Response Variable",
position = 1, digits = 6, mean = TRUE, sd=FALSE, legend = TRUE, lty=2,
col="dark blue", pch=20, xlim="default.x",ylim="default.y", ...)
}
\arguments{
\item{data}{
data is a data.frame
The first column contain the treatments (explanatory variable) and the
second column the response variable
}
\item{model}{
model for analysis: 1=two linear; 2=linear plateau (LRP); 3= model 1 with blocks random; 4 = model 2 with blocks random
}
\item{alpha}{
significant level for cofidence intervals (parameters estimated)
}
\item{xlab}{
name of explanatory variable
}
\item{ylab}{
name of response variable
}
\item{position}{
position of equation in the graph
top=1
bottomright=2
bottom=3
bottomleft=4
left=5
topleft=6 (default)
topright=7
right=8
center=9
}
\item{digits}{
number of digits (default=6)
}
\item{mean}{
mean=TRUE (plot mean of data)
mean=FALSE (plot all data)
}
\item{sd}{
sd=FALSE (plot without standard deviation)
sd=TRUE (plot with standard deviation)
}
\item{legend}{
legend=TRUE (plot legend)
legend=FALSE (not plot legend)
}
\item{lty}{
line type
}
\item{col}{
line color
}
\item{pch}{
point type
}
\item{xlim}{
limits for x
}
\item{ylim}{
limits for y
}
\item{...}{
others graphical parameters (see par)
}
}
\value{
Returns coefficients of the models, t test for coefficients, knot (break point), R squared, adjusted R squared, AIC, BIC, residuals and shapiro-wilk test for residuals.
}
\references{
KAPS, M. and LAMBERSON, W. R. Biostatistics for Animal Science: an introductory text. 2nd Edition. CABI Publishing, Wallingford, Oxfordshire, UK, 2009. 504p.
}
\author{
Emmanuel Arnhold <emmanuelarnhold@yahoo.com.br>
}
\seealso{
lm, ea1(easyanova package), er1
}
\examples{
# the growth of Zagorje turkeys (Kaps and Lamberson, 2009)
weight=c(44,66,100,150,265,370,455,605)
age=c(1,7,14,21,28,35,42,49)
data2=data.frame(age,weight)
# two linear
regplot(data2, model=5, start=c(25,6,10,20))
bl(data2, digits=2)
#linear and quadratic plateau
x=c(0,1,2,3,4,5,6)
y=c(1,2,3,6.1,5.9,6,6.1)
data=data.frame(x,y)
bl(data,model=2, lty=1, col=1, digits=2, position=8)
# effect os blocks
x=c(1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8)
y=c(4,12,9,20,16,25,21,31,28,42,33,46,33,46,34,44)
blocks=rep(c(1,2),8)
dat=data.frame(x,blocks,y)
bl(dat, 3)
bl(dat,4, sd=TRUE)
bl(dat,4, mean=FALSE)
}
| /man/bl.Rd | no_license | cran/easyreg | R | false | false | 2,579 | rd | \name{bl}
\alias{bl}
\title{
Analysis of broken line regression
}
\description{
The function performs analysis of broken line regression
}
\usage{
bl(data, model=1, alpha=0.05, xlab = "Explanatory Variable", ylab = "Response Variable",
position = 1, digits = 6, mean = TRUE, sd=FALSE, legend = TRUE, lty=2,
col="dark blue", pch=20, xlim="default.x",ylim="default.y", ...)
}
\arguments{
\item{data}{
data is a data.frame
The first column contain the treatments (explanatory variable) and the
second column the response variable
}
\item{model}{
model for analysis: 1=two linear; 2=linear plateau (LRP); 3= model 1 with blocks random; 4 = model 2 with blocks random
}
\item{alpha}{
significant level for cofidence intervals (parameters estimated)
}
\item{xlab}{
name of explanatory variable
}
\item{ylab}{
name of response variable
}
\item{position}{
position of equation in the graph
top=1
bottomright=2
bottom=3
bottomleft=4
left=5
topleft=6 (default)
topright=7
right=8
center=9
}
\item{digits}{
number of digits (default=6)
}
\item{mean}{
mean=TRUE (plot mean of data)
mean=FALSE (plot all data)
}
\item{sd}{
sd=FALSE (plot without standard deviation)
sd=TRUE (plot with standard deviation)
}
\item{legend}{
legend=TRUE (plot legend)
legend=FALSE (not plot legend)
}
\item{lty}{
line type
}
\item{col}{
line color
}
\item{pch}{
point type
}
\item{xlim}{
limits for x
}
\item{ylim}{
limits for y
}
\item{...}{
others graphical parameters (see par)
}
}
\value{
Returns coefficients of the models, t test for coefficients, knot (break point), R squared, adjusted R squared, AIC, BIC, residuals and shapiro-wilk test for residuals.
}
\references{
KAPS, M. and LAMBERSON, W. R. Biostatistics for Animal Science: an introductory text. 2nd Edition. CABI Publishing, Wallingford, Oxfordshire, UK, 2009. 504p.
}
\author{
Emmanuel Arnhold <emmanuelarnhold@yahoo.com.br>
}
\seealso{
lm, ea1(easyanova package), er1
}
\examples{
# the growth of Zagorje turkeys (Kaps and Lamberson, 2009)
weight=c(44,66,100,150,265,370,455,605)
age=c(1,7,14,21,28,35,42,49)
data2=data.frame(age,weight)
# two linear
regplot(data2, model=5, start=c(25,6,10,20))
bl(data2, digits=2)
#linear and quadratic plateau
x=c(0,1,2,3,4,5,6)
y=c(1,2,3,6.1,5.9,6,6.1)
data=data.frame(x,y)
bl(data,model=2, lty=1, col=1, digits=2, position=8)
# effect os blocks
x=c(1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8)
y=c(4,12,9,20,16,25,21,31,28,42,33,46,33,46,34,44)
blocks=rep(c(1,2),8)
dat=data.frame(x,blocks,y)
bl(dat, 3)
bl(dat,4, sd=TRUE)
bl(dat,4, mean=FALSE)
}
|
#ifelse(condition,action if true,action if false)
x<-c(6,10,9,5,20,7,16)
y<-ifelse(x%%2 == 0,x/2,x)
y
#explanation
#if x%%2==0
#true condition given is x/2 which means
#divide the value by 2 so 6 became 3
#else x
#false condition is x only
#so the x value copied as it is
## see 9
#ex2
x<-c(2,6,15,10,20,14)
y<- ifelse(x<12,x*5,x*13)
#this means if x > 12
# do x*5
#if not then do x*3
y
#ex3
x<-c(25,-36,100,0,-1,49,4,-68)
y<-ifelse(x>=0,sqrt(x),sqrt(-x))
y
#we'll get Nan warning to remove
#we can do this
y<-sqrt(ifelse(x>=0,x,-x))
y
#ex4
x<-c(15,6,10,30,4)
y<-ifelse(x^2>100,1,0)
y
| /R-Programming for Absolute Beginers-Udemy/24-The ifelse() Function.R | no_license | manasohara/R-Programming | R | false | false | 626 | r | #ifelse(condition,action if true,action if false)
x<-c(6,10,9,5,20,7,16)
y<-ifelse(x%%2 == 0,x/2,x)
y
#explanation
#if x%%2==0
#true condition given is x/2 which means
#divide the value by 2 so 6 became 3
#else x
#false condition is x only
#so the x value copied as it is
## see 9
#ex2
x<-c(2,6,15,10,20,14)
y<- ifelse(x<12,x*5,x*13)
#this means if x > 12
# do x*5
#if not then do x*3
y
#ex3
x<-c(25,-36,100,0,-1,49,4,-68)
y<-ifelse(x>=0,sqrt(x),sqrt(-x))
y
#we'll get Nan warning to remove
#we can do this
y<-sqrt(ifelse(x>=0,x,-x))
y
#ex4
x<-c(15,6,10,30,4)
y<-ifelse(x^2>100,1,0)
y
|
/decomp_2dim_cnn_keras_fin.R | no_license | eumhwa/decomp_research | R | false | false | 12,528 | r | ||
testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 1.09028143739796e-310, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) | /DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615827630-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 487 | r | testlist <- list(Beta = 0, CVLinf = -1.37672045511449e-268, FM = 1.09028143739796e-310, L50 = 0, L95 = 0, LenBins = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), LenMids = numeric(0), Linf = 0, MK = 0, Ml = numeric(0), Prob = structure(0, .Dim = c(1L, 1L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 537479424L, rLens = numeric(0))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result) |
library(shiny)
library(RCurl)
library(caret)
mpg <- read.csv("mpg.csv")
modFit <- train(mpg ~ cyl + disp + horse + weight + accel + year + origin, method="glm", data=mpg)
shinyServer(
function(input, output) {
#pred = predict(modFit, data)
#output$prediction <- renderPrint ({as.string(pred)})
output$prediction <- renderPrint ({
cyl = input$cyl
disp = input$disp
horse = input$horse
weight = input$weight
accel = input$accel
year = input$year
origin = input$origin
predict(modFit,data.frame(cyl, disp, horse, weight, accel, year, origin))})
}
) | /server.R | no_license | HaojunZhu/Coursera_Data_Products_Project | R | false | false | 610 | r | library(shiny)
library(RCurl)
library(caret)
mpg <- read.csv("mpg.csv")
modFit <- train(mpg ~ cyl + disp + horse + weight + accel + year + origin, method="glm", data=mpg)
shinyServer(
function(input, output) {
#pred = predict(modFit, data)
#output$prediction <- renderPrint ({as.string(pred)})
output$prediction <- renderPrint ({
cyl = input$cyl
disp = input$disp
horse = input$horse
weight = input$weight
accel = input$accel
year = input$year
origin = input$origin
predict(modFit,data.frame(cyl, disp, horse, weight, accel, year, origin))})
}
) |
# Load models and create ideal point plots and discrimination tables/plots
# Code for When National Unity Governments are neither National, United, nor Governments: The Case of Tunisia
# by Robert Kubinec
require(idealstan)
require(bayesplot)
require(dplyr)
require(tidyr)
require(ggplot2)
require(lubridate)
require(stringr)
require(forcats)
require(xtable)
arp_ar1 <- readRDS('data/estimate_all_ar1_vb.rds')
arp_rw <- readRDS('data/estimate_all_rw_vb.rds')
group2_ar1 <- readRDS('data/estimate_all_2groups_ar_vb.rds')
group2_rw <- readRDS('data/estimate_all_2groups_rw_vb.rds')
# basic descriptives
# move plot to bawsala plot
all_data <- readRDS('data/combine_sessions.rds')
select(all_data,law_unique,law_date) %>%
distinct %>%
ggplot(aes(x=law_date)) + geom_histogram(fill='grey',
colour=NA) +
theme_minimal() + xlab('') + ylab('Number of Roll Call Votes') +
theme(panel.grid=element_blank()) +
geom_vline(aes(xintercept=lubridate::ymd('2016-07-30')),
linetype=2) +
annotate(geom='text',x=ymd('2016-07-30'),y=450,label='Carthage Agreement') +
ggtitle('Legislative Activity in the Tunisian Parliament')
ggsave('bill_density.png')
# test with linear model
# need ideal point scores
all_scores <- summary(group2_rw,aggregate=F) %>%
group_by(Time_Point) %>%
summarize(polar=median(Ideal_Points[Group=='Islamists']) - median(Ideal_Points[Group=='Secularists']),
polar_high=quantile(Ideal_Points[Group=='Islamists'],.95) - quantile(Ideal_Points[Group=='Secularists'],.95),
polar_low=quantile(Ideal_Points[Group=='Islamists'],.05) - quantile(Ideal_Points[Group=='Secularists'],.05)) %>%
left_join(all_data,by=c(Time_Point='law_date'))
all_scores %>%
distinct(polar,Time_Point,.keep_all = T) %>%
ggplot(aes(y=polar,x=Time_Point)) +
geom_line(linetype=2,size=1) +
geom_ribbon(aes(ymin=polar_high,
ymax=polar_low),
fill='grey80',
alpha=0.5) +
ylab('Difference Between Islamists and Secularists') +
xlab('') +
theme(panel.grid=element_blank(),
panel.background = element_blank())
ggsave('diff_over_time.png')
all_scores_dist <- distinct(all_scores,polar,Time_Point,law_unique) %>%
count(polar,Time_Point)
summary(lm(n~polar,data=all_scores_dist))
# plot relative approval of project (aid) finance
all_data <- mutate(all_data,
proj_finance=grepl(x=law_unique,
pattern='financement du projet') & grepl(x=law_unique,
pattern='accord'))
all_data %>% group_by(law_date) %>%
summarize(mean_proj=mean(proj_finance)) %>%
ggplot(aes(y=mean_proj,
x=law_date)) +
geom_col() +
scale_y_continuous(labels=scales::percent) +
theme(panel.grid = element_blank(),
panel.background = element_blank()) +
xlab('') +
ylab('Percentage of Votes') +
geom_vline(xintercept = ymd('2015-02-01'),linetype=2) +
annotate('text',
x=ymd('2013-12-01'),
y=.25,
label='2015 National Unity Government Formed')
ggsave('vote_finance.png')
# ARP AR1 ---------------------------------------------------
# need to reverse the scale because the ID strategy ends up reversing itself
# for compatibility with the RW model
id_plot_legis_dyn(arp_ar1,person_plot=F,use_ci = F,plot_text = F) +
geom_vline(aes(xintercept=lubridate::ymd('2016-07-30')),
linetype=2) +
scale_y_continuous(labels=c('More\nIslamist','6','3','0.0','-3','-6','More\nSecular'),
breaks=c(10,6,3,0.0,-3,-6,-10)) +
guides(colour='none') +
facet_wrap(~group_id)
#ggtitle('Stationary Party Ideal Points for Tunisian National Representative Assembly (ARP)')
ggsave('party_over_time_ar1_vb.png')
id_plot_cov(arp_ar1) +
scale_x_continuous(labels=c('More\nIslamist','5','0.0','-5','More\nSecular'),
breaks=c(8,5,0.0,-5,-8))
# ggtitle('Effect of Carthage Agreement on Party-level Ideal Points',
# subtitle = 'Based on Rollcall Vote Data from the Tunisian National Representative Assembly (ARP)')
ggsave('id_plot_cov_arp_all.png')
id_plot_cov(arp_ar1,filter_cov = c('change:blocHorra','change'))
ggsave('id_plot_cov_arp_horra.png')
id_plot_legis_var(arp_ar1,person_labels = F)
ggsave('id_plot_var_arp_ar1.png')
# ARP rw ------------------------------------------------------------------
id_plot_legis_dyn(arp_rw,person_plot=F,use_ci = F,
highlight=c('Nahda','Nidaa Tounes','Front Populaire','Horra')) +
geom_vline(aes(xintercept=lubridate::ymd('2016-07-30')),
linetype=2) +
scale_y_continuous(labels=c('More\nSecular','-1.5','0.0','1.5','More\nIslamist'),
breaks=c(-2.5,-1.5,0.0,1.5,2.5)) +
guides(colour='none')
#ggtitle('Random-Walk Party Ideal Points for Tunisian National Representative Assembly (ARP)')
ggsave('party_over_time_rw_vb.png')
id_plot_legis_dyn(arp_rw,person_plot=F,use_ci = T,plot_text = F) +
geom_vline(aes(xintercept=lubridate::ymd('2016-07-30')),
linetype=2) +
scale_y_continuous(labels=c('More\nSecular','-1.5','0.0','1.5','More\nIslamist'),
breaks=c(-2.5,-1.5,0.0,1.5,2.5)) +
guides(colour='none') +
facet_wrap(~group_id)
#ggtitle('Random-Walk Party Ideal Points for Tunisian National Representative Assembly (ARP)')
ggsave('party_over_time_panel_rw_vb.png')
id_plot_legis_var(arp_rw,person_labels = F)
ggsave('id_plot_var_arp_rw.png')
# 2 groups RW -------------------------------------------------------------
id_plot_legis_dyn(group2_rw,
group_color=F,person_plot=F,text_size_label=8) +
geom_vline(aes(xintercept=lubridate::ymd('2016-07-30')),
linetype=2) +
geom_vline(aes(xintercept=lubridate::ymd('2014-10-26')),
linetype=3) +
geom_vline(aes(xintercept=lubridate::ymd('2013-10-23')),
linetype=4) +
annotate(geom='text',x=ymd('2016-07-30'),y=0.9,label=' Carthage Agreement') +
annotate(geom='text',x=ymd('2014-12-2'),y=0.65,label='New Parliament\nSession') +
annotate(geom='text',x=ymd('2013-10-23'),y=.7,label='Troika\nNegotiations') +
scale_y_continuous(labels=c('More\nSecular','0.0','0.5','More\nIslamist'),
breaks=c(0.0,0.5,1.0,1.5),
limits=c(-0.25,1.5)) +
scale_color_discrete(guide='none') +
scale_x_date(date_breaks = '1 year',
date_labels='%Y')
ggsave('party_over_time_2groups_1mo_rw.png')
# 2 groups AR1 ------------------------------------------------------------
id_plot_legis_dyn(group2_ar1,
group_color=F,person_plot=F,text_size_label=8) +
geom_vline(aes(xintercept=lubridate::ymd('2016-07-30')),
linetype=2) +
geom_vline(aes(xintercept=lubridate::ymd('2014-10-26')),
linetype=3) +
geom_vline(aes(xintercept=lubridate::ymd('2013-10-23')),
linetype=4) +
annotate(geom='text',x=ymd('2016-07-30'),y=0.2,label=' Carthage Agreement') +
annotate(geom='text',x=ymd('2014-12-2'),y=1.8,label='New Parliament\nSession') +
annotate(geom='text',x=ymd('2013-10-23'),y=.7,label='Anti-Islamist\nProtests') +
scale_y_continuous(labels=c('More\nSecular','0.0','0.5','More\nIslamist'),
breaks=c(-0.5,0.0,1,2)) +
scale_color_discrete(guide='none') +
scale_x_date(date_breaks = '1 year',
date_labels='%Y')
ggsave('party_over_time_2groups_1mo_ar.png')
# covariate plot
id_plot_cov(group2_ar1) +
ggtitle('Effect of 2014 Election on Party-level Ideal Points',
subtitle = 'Based on Rollcall Vote Data from 1st and 2nd Sessions of Tunisian Parliament') +
scale_x_continuous(labels=c('More\nSecular','-0.25','0.0','0.25','More\nIslamist'),
breaks=c(-0.5,-0.25,0.0,0.25,0.5))
ggsave('id_plot_cov_2groups.png')
# calculate interaction for Islamists
cov_iter <- rstan::extract(group2_ar1@stan_samples,'legis_x')
median(cov_iter[[1]][,3] + cov_iter[[1]][,4])
quantile(cov_iter[[1]][,3] + cov_iter[[1]][,4],.95)
quantile(cov_iter[[1]][,3] + cov_iter[[1]][,4],.05)
median(cov_iter[[1]][,3])
quantile(cov_iter[[1]][,3],.95)
quantile(cov_iter[[1]][,3],.05)
# bill discrim ------------------------------------------------------------
# pull out 2-group bill discrimination parameters
all_params <- summary(group2_rw,pars='items')
# plot item midpoints across categories
all_params %>%
filter(grepl(pattern = 'Discrimination',x=`Item Type`)) %>%
ggplot(aes(x=`Posterior Median`)) +
geom_density(aes(fill=`Item Type`),colour=NA,alpha=0.5) +
scale_fill_brewer(name='Parameter\nType') +
theme(panel.background = element_blank(),
panel.grid = element_blank()) +
xlab('Ideal Point Scale') +
scale_x_continuous(labels=c('More\nSecular','0.0','More\nIslamist'),
breaks=c(-2.5,0.0,2.5)) +
ylab('Density') +
ggtitle('Discrimination Values of Bills for Combined Tunisian Parliament',
subtitle = 'Higher Distance from Zero Indicates Higher Bill Polarization')
ggsave('combined_discrim_density.png')
all_out_top <- all_params %>%
filter(grepl(x=`Item Type`, pattern='Non-Inflated Discrimination')) %>%
arrange(desc(`Posterior Median`)) %>%
slice(1:25) %>%
select(Vote='Parameter',
`Low Interval`="Low Posterior Interval",
`Discrimination Score`="Posterior Median",
`High Interval`="High Posterior Interval") %>%
xtable(.)
print(all_out_top,type='latex',file='discrim_bill_combined_high.tex')
all_out_bottom <- all_params %>%
filter(grepl(x=`Item Type`, pattern='Non-Inflated Discrimination')) %>%
arrange(`Posterior Median`) %>%
slice(1:25) %>%
select(Vote='Parameter',
`Low Interval`="Low Posterior Interval",
`Discrimination Score`="Posterior Median",
`High Interval`="High Posterior Interval") %>%
xtable(.)
print(all_out_bottom,type='latex',file='discrim_bill_combined_low.tex')
# Calculate average discrimination of items over time
bill_sum <- left_join(all_params,group2_ar1@score_data@score_matrix,by=c(Parameter='item_id'))
require(ggridges)
bill_plot <- bill_sum %>%
distinct(Parameter,time_id,`Posterior Median`,`Item Type`) %>%
mutate(Discrimination=`Posterior Median`) %>%
filter(grepl(x=`Item Type`,
pattern='Discrimination'))
ggplot(bill_plot,aes(x=Discrimination,
y=time_id,
group=time_id)) +
geom_density_ridges(scale = 10, size = 0.25, rel_min_height = 0.03,
colour='white',
alpha=0.8,
aes(fill=`Item Type`)) +
ylab('') +
scale_fill_brewer(palette='Paired') +
guides(fill='none') +
scale_x_continuous(labels=c('More\nIslamist','2','0.0','-2','More\nSecular'),
breaks=c(4,2,0.0,-2,-4)) +
xlab('Bill Discrimination Values') +
theme_ridges(grid=FALSE) +
ggtitle('Density of Tunisian Parliament Bill Discrimination Over Time') +
facet_wrap(~`Item Type`) +
theme(strip.background = element_blank())
ggsave('bill_plot_density.png')
bill_plot %>%
mutate(avg_sq_discrim=abs(Discrimination^2)) %>%
ggplot(aes(y=avg_sq_discrim,x=time_id)) +
theme(panel.grid = element_blank(),
panel.background = element_blank()) +
ylab('Absolute Discrimination') +
geom_point(alpha=0.2) +
stat_summary(colour='red',fun.data='mean_cl_normal',size=.6) +
ggtitle('Absolute Discrimination of Bills From Both Tunisian Parliaments',
subtitle='Red Points Show Average Discrimination by Time Point') +
facet_wrap(~`Item Type`) +
xlab('') +
theme(strip.background = element_blank())
ggsave('bill_plot_discrim_avg.png')
# pull out ARP bill discrimination parameters
# all_params <- summary(estimate_all_rw,pars='items')
# just_discrim <- filter(all_params,grepl(pattern = 'sigma_reg_free',x=parameters)) %>%
# mutate(abs_score=abs(posterior_median),
# index=as.numeric(str_extract(parameters,'[0-9]+'))) %>%
# arrange(desc(abs_score))
# group_ids <- select(estimate_all_rw@score_data@score_matrix,item_id) %>%
# mutate(index=as.numeric(item_id)) %>%
# distinct
#
# just_discrim <- left_join(just_discrim,group_ids,'index')
#
# all_out <- xtable(select(just_discrim,
# Vote='item_id',
# `Discrimination Score`="posterior_median",
# `Standard Deviation (Error)`="posterior_sd"))
# print(all_out,type='latex',file='discrim_bill.tex')
| /R_scripts/bawsala_plot.R | no_license | saudiwin/tunisia_parliament | R | false | false | 12,611 | r | # Load models and create ideal point plots and discrimination tables/plots
# Code for When National Unity Governments are neither National, United, nor Governments: The Case of Tunisia
# by Robert Kubinec
require(idealstan)
require(bayesplot)
require(dplyr)
require(tidyr)
require(ggplot2)
require(lubridate)
require(stringr)
require(forcats)
require(xtable)
arp_ar1 <- readRDS('data/estimate_all_ar1_vb.rds')
arp_rw <- readRDS('data/estimate_all_rw_vb.rds')
group2_ar1 <- readRDS('data/estimate_all_2groups_ar_vb.rds')
group2_rw <- readRDS('data/estimate_all_2groups_rw_vb.rds')
# basic descriptives
# move plot to bawsala plot
all_data <- readRDS('data/combine_sessions.rds')
select(all_data,law_unique,law_date) %>%
distinct %>%
ggplot(aes(x=law_date)) + geom_histogram(fill='grey',
colour=NA) +
theme_minimal() + xlab('') + ylab('Number of Roll Call Votes') +
theme(panel.grid=element_blank()) +
geom_vline(aes(xintercept=lubridate::ymd('2016-07-30')),
linetype=2) +
annotate(geom='text',x=ymd('2016-07-30'),y=450,label='Carthage Agreement') +
ggtitle('Legislative Activity in the Tunisian Parliament')
ggsave('bill_density.png')
# test with linear model
# need ideal point scores
all_scores <- summary(group2_rw,aggregate=F) %>%
group_by(Time_Point) %>%
summarize(polar=median(Ideal_Points[Group=='Islamists']) - median(Ideal_Points[Group=='Secularists']),
polar_high=quantile(Ideal_Points[Group=='Islamists'],.95) - quantile(Ideal_Points[Group=='Secularists'],.95),
polar_low=quantile(Ideal_Points[Group=='Islamists'],.05) - quantile(Ideal_Points[Group=='Secularists'],.05)) %>%
left_join(all_data,by=c(Time_Point='law_date'))
all_scores %>%
distinct(polar,Time_Point,.keep_all = T) %>%
ggplot(aes(y=polar,x=Time_Point)) +
geom_line(linetype=2,size=1) +
geom_ribbon(aes(ymin=polar_high,
ymax=polar_low),
fill='grey80',
alpha=0.5) +
ylab('Difference Between Islamists and Secularists') +
xlab('') +
theme(panel.grid=element_blank(),
panel.background = element_blank())
ggsave('diff_over_time.png')
all_scores_dist <- distinct(all_scores,polar,Time_Point,law_unique) %>%
count(polar,Time_Point)
summary(lm(n~polar,data=all_scores_dist))
# plot relative approval of project (aid) finance
all_data <- mutate(all_data,
proj_finance=grepl(x=law_unique,
pattern='financement du projet') & grepl(x=law_unique,
pattern='accord'))
all_data %>% group_by(law_date) %>%
summarize(mean_proj=mean(proj_finance)) %>%
ggplot(aes(y=mean_proj,
x=law_date)) +
geom_col() +
scale_y_continuous(labels=scales::percent) +
theme(panel.grid = element_blank(),
panel.background = element_blank()) +
xlab('') +
ylab('Percentage of Votes') +
geom_vline(xintercept = ymd('2015-02-01'),linetype=2) +
annotate('text',
x=ymd('2013-12-01'),
y=.25,
label='2015 National Unity Government Formed')
ggsave('vote_finance.png')
# ARP AR1 ---------------------------------------------------
# need to reverse the scale because the ID strategy ends up reversing itself
# for compatibility with the RW model
id_plot_legis_dyn(arp_ar1,person_plot=F,use_ci = F,plot_text = F) +
geom_vline(aes(xintercept=lubridate::ymd('2016-07-30')),
linetype=2) +
scale_y_continuous(labels=c('More\nIslamist','6','3','0.0','-3','-6','More\nSecular'),
breaks=c(10,6,3,0.0,-3,-6,-10)) +
guides(colour='none') +
facet_wrap(~group_id)
#ggtitle('Stationary Party Ideal Points for Tunisian National Representative Assembly (ARP)')
ggsave('party_over_time_ar1_vb.png')
id_plot_cov(arp_ar1) +
scale_x_continuous(labels=c('More\nIslamist','5','0.0','-5','More\nSecular'),
breaks=c(8,5,0.0,-5,-8))
# ggtitle('Effect of Carthage Agreement on Party-level Ideal Points',
# subtitle = 'Based on Rollcall Vote Data from the Tunisian National Representative Assembly (ARP)')
ggsave('id_plot_cov_arp_all.png')
id_plot_cov(arp_ar1,filter_cov = c('change:blocHorra','change'))
ggsave('id_plot_cov_arp_horra.png')
id_plot_legis_var(arp_ar1,person_labels = F)
ggsave('id_plot_var_arp_ar1.png')
# ARP rw ------------------------------------------------------------------
id_plot_legis_dyn(arp_rw,person_plot=F,use_ci = F,
highlight=c('Nahda','Nidaa Tounes','Front Populaire','Horra')) +
geom_vline(aes(xintercept=lubridate::ymd('2016-07-30')),
linetype=2) +
scale_y_continuous(labels=c('More\nSecular','-1.5','0.0','1.5','More\nIslamist'),
breaks=c(-2.5,-1.5,0.0,1.5,2.5)) +
guides(colour='none')
#ggtitle('Random-Walk Party Ideal Points for Tunisian National Representative Assembly (ARP)')
ggsave('party_over_time_rw_vb.png')
id_plot_legis_dyn(arp_rw,person_plot=F,use_ci = T,plot_text = F) +
geom_vline(aes(xintercept=lubridate::ymd('2016-07-30')),
linetype=2) +
scale_y_continuous(labels=c('More\nSecular','-1.5','0.0','1.5','More\nIslamist'),
breaks=c(-2.5,-1.5,0.0,1.5,2.5)) +
guides(colour='none') +
facet_wrap(~group_id)
#ggtitle('Random-Walk Party Ideal Points for Tunisian National Representative Assembly (ARP)')
ggsave('party_over_time_panel_rw_vb.png')
id_plot_legis_var(arp_rw,person_labels = F)
ggsave('id_plot_var_arp_rw.png')
# 2 groups RW -------------------------------------------------------------
id_plot_legis_dyn(group2_rw,
group_color=F,person_plot=F,text_size_label=8) +
geom_vline(aes(xintercept=lubridate::ymd('2016-07-30')),
linetype=2) +
geom_vline(aes(xintercept=lubridate::ymd('2014-10-26')),
linetype=3) +
geom_vline(aes(xintercept=lubridate::ymd('2013-10-23')),
linetype=4) +
annotate(geom='text',x=ymd('2016-07-30'),y=0.9,label=' Carthage Agreement') +
annotate(geom='text',x=ymd('2014-12-2'),y=0.65,label='New Parliament\nSession') +
annotate(geom='text',x=ymd('2013-10-23'),y=.7,label='Troika\nNegotiations') +
scale_y_continuous(labels=c('More\nSecular','0.0','0.5','More\nIslamist'),
breaks=c(0.0,0.5,1.0,1.5),
limits=c(-0.25,1.5)) +
scale_color_discrete(guide='none') +
scale_x_date(date_breaks = '1 year',
date_labels='%Y')
ggsave('party_over_time_2groups_1mo_rw.png')
# 2 groups AR1 ------------------------------------------------------------
id_plot_legis_dyn(group2_ar1,
group_color=F,person_plot=F,text_size_label=8) +
geom_vline(aes(xintercept=lubridate::ymd('2016-07-30')),
linetype=2) +
geom_vline(aes(xintercept=lubridate::ymd('2014-10-26')),
linetype=3) +
geom_vline(aes(xintercept=lubridate::ymd('2013-10-23')),
linetype=4) +
annotate(geom='text',x=ymd('2016-07-30'),y=0.2,label=' Carthage Agreement') +
annotate(geom='text',x=ymd('2014-12-2'),y=1.8,label='New Parliament\nSession') +
annotate(geom='text',x=ymd('2013-10-23'),y=.7,label='Anti-Islamist\nProtests') +
scale_y_continuous(labels=c('More\nSecular','0.0','0.5','More\nIslamist'),
breaks=c(-0.5,0.0,1,2)) +
scale_color_discrete(guide='none') +
scale_x_date(date_breaks = '1 year',
date_labels='%Y')
ggsave('party_over_time_2groups_1mo_ar.png')
# covariate plot
id_plot_cov(group2_ar1) +
ggtitle('Effect of 2014 Election on Party-level Ideal Points',
subtitle = 'Based on Rollcall Vote Data from 1st and 2nd Sessions of Tunisian Parliament') +
scale_x_continuous(labels=c('More\nSecular','-0.25','0.0','0.25','More\nIslamist'),
breaks=c(-0.5,-0.25,0.0,0.25,0.5))
ggsave('id_plot_cov_2groups.png')
# calculate interaction for Islamists
cov_iter <- rstan::extract(group2_ar1@stan_samples,'legis_x')
median(cov_iter[[1]][,3] + cov_iter[[1]][,4])
quantile(cov_iter[[1]][,3] + cov_iter[[1]][,4],.95)
quantile(cov_iter[[1]][,3] + cov_iter[[1]][,4],.05)
median(cov_iter[[1]][,3])
quantile(cov_iter[[1]][,3],.95)
quantile(cov_iter[[1]][,3],.05)
# bill discrim ------------------------------------------------------------
# pull out 2-group bill discrimination parameters
all_params <- summary(group2_rw,pars='items')
# plot item midpoints across categories
all_params %>%
filter(grepl(pattern = 'Discrimination',x=`Item Type`)) %>%
ggplot(aes(x=`Posterior Median`)) +
geom_density(aes(fill=`Item Type`),colour=NA,alpha=0.5) +
scale_fill_brewer(name='Parameter\nType') +
theme(panel.background = element_blank(),
panel.grid = element_blank()) +
xlab('Ideal Point Scale') +
scale_x_continuous(labels=c('More\nSecular','0.0','More\nIslamist'),
breaks=c(-2.5,0.0,2.5)) +
ylab('Density') +
ggtitle('Discrimination Values of Bills for Combined Tunisian Parliament',
subtitle = 'Higher Distance from Zero Indicates Higher Bill Polarization')
ggsave('combined_discrim_density.png')
all_out_top <- all_params %>%
filter(grepl(x=`Item Type`, pattern='Non-Inflated Discrimination')) %>%
arrange(desc(`Posterior Median`)) %>%
slice(1:25) %>%
select(Vote='Parameter',
`Low Interval`="Low Posterior Interval",
`Discrimination Score`="Posterior Median",
`High Interval`="High Posterior Interval") %>%
xtable(.)
print(all_out_top,type='latex',file='discrim_bill_combined_high.tex')
all_out_bottom <- all_params %>%
filter(grepl(x=`Item Type`, pattern='Non-Inflated Discrimination')) %>%
arrange(`Posterior Median`) %>%
slice(1:25) %>%
select(Vote='Parameter',
`Low Interval`="Low Posterior Interval",
`Discrimination Score`="Posterior Median",
`High Interval`="High Posterior Interval") %>%
xtable(.)
print(all_out_bottom,type='latex',file='discrim_bill_combined_low.tex')
# Calculate average discrimination of items over time
bill_sum <- left_join(all_params,group2_ar1@score_data@score_matrix,by=c(Parameter='item_id'))
require(ggridges)
bill_plot <- bill_sum %>%
distinct(Parameter,time_id,`Posterior Median`,`Item Type`) %>%
mutate(Discrimination=`Posterior Median`) %>%
filter(grepl(x=`Item Type`,
pattern='Discrimination'))
ggplot(bill_plot,aes(x=Discrimination,
y=time_id,
group=time_id)) +
geom_density_ridges(scale = 10, size = 0.25, rel_min_height = 0.03,
colour='white',
alpha=0.8,
aes(fill=`Item Type`)) +
ylab('') +
scale_fill_brewer(palette='Paired') +
guides(fill='none') +
scale_x_continuous(labels=c('More\nIslamist','2','0.0','-2','More\nSecular'),
breaks=c(4,2,0.0,-2,-4)) +
xlab('Bill Discrimination Values') +
theme_ridges(grid=FALSE) +
ggtitle('Density of Tunisian Parliament Bill Discrimination Over Time') +
facet_wrap(~`Item Type`) +
theme(strip.background = element_blank())
ggsave('bill_plot_density.png')
bill_plot %>%
mutate(avg_sq_discrim=abs(Discrimination^2)) %>%
ggplot(aes(y=avg_sq_discrim,x=time_id)) +
theme(panel.grid = element_blank(),
panel.background = element_blank()) +
ylab('Absolute Discrimination') +
geom_point(alpha=0.2) +
stat_summary(colour='red',fun.data='mean_cl_normal',size=.6) +
ggtitle('Absolute Discrimination of Bills From Both Tunisian Parliaments',
subtitle='Red Points Show Average Discrimination by Time Point') +
facet_wrap(~`Item Type`) +
xlab('') +
theme(strip.background = element_blank())
ggsave('bill_plot_discrim_avg.png')
# pull out ARP bill discrimination parameters
# all_params <- summary(estimate_all_rw,pars='items')
# just_discrim <- filter(all_params,grepl(pattern = 'sigma_reg_free',x=parameters)) %>%
# mutate(abs_score=abs(posterior_median),
# index=as.numeric(str_extract(parameters,'[0-9]+'))) %>%
# arrange(desc(abs_score))
# group_ids <- select(estimate_all_rw@score_data@score_matrix,item_id) %>%
# mutate(index=as.numeric(item_id)) %>%
# distinct
#
# just_discrim <- left_join(just_discrim,group_ids,'index')
#
# all_out <- xtable(select(just_discrim,
# Vote='item_id',
# `Discrimination Score`="posterior_median",
# `Standard Deviation (Error)`="posterior_sd"))
# print(all_out,type='latex',file='discrim_bill.tex')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enhance_manifesto_df.R
\name{enhance_uncoded_manifesto_}
\alias{enhance_uncoded_manifesto_}
\title{Enhance uncoded manifesto data frame}
\usage{
enhance_uncoded_manifesto_(u.df)
}
\arguments{
\item{u.df}{the 1-row manifesto dataframe recording the entire manifesto text in a unit-length character column 'text'}
}
\value{
The input \code{u.df} \code{\link[tibble]{tibble}} enhanced by column
'qs_nr' (running quasi-sentence counter),
'sent_nr' (running sentence counter),
'role' (indicator, here 'qs' for all rows), and
'bloc_nr'
}
\description{
\code{enhance_manifesto_df} helper
}
\note{
The function takes the unit-legnth character contained in column 'text' of the 1-row data frame,
and splits it into sentences.
Sentences are then numbered, and because the manifesto is uncoded, \code{qs_nr == sent_nr},
'role' is 'qs' and bloc_nr is 1 for all rows.
}
| /man/enhance_uncoded_manifesto_.Rd | no_license | haukelicht/manifestoEnhanceR | R | false | true | 970 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enhance_manifesto_df.R
\name{enhance_uncoded_manifesto_}
\alias{enhance_uncoded_manifesto_}
\title{Enhance uncoded manifesto data frame}
\usage{
enhance_uncoded_manifesto_(u.df)
}
\arguments{
\item{u.df}{the 1-row manifesto dataframe recording the entire manifesto text in a unit-length character column 'text'}
}
\value{
The input \code{u.df} \code{\link[tibble]{tibble}} enhanced by column
'qs_nr' (running quasi-sentence counter),
'sent_nr' (running sentence counter),
'role' (indicator, here 'qs' for all rows), and
'bloc_nr'
}
\description{
\code{enhance_manifesto_df} helper
}
\note{
The function takes the unit-legnth character contained in column 'text' of the 1-row data frame,
and splits it into sentences.
Sentences are then numbered, and because the manifesto is uncoded, \code{qs_nr == sent_nr},
'role' is 'qs' and bloc_nr is 1 for all rows.
}
|
###ANOVA ana one by one RA up 0.005
rm(list = ls())
wdir <- "/var/data/19.Richa_new/"
setwd(wdir)
inputFolder <- "./3.data_for_figure/"
outputFolder <- "./2.stastics_analysis_result/"
source("script/Cbind.R")
library("dplyr")
library("tidyr")
library("plyr")
library("qpcR")
##aov_analysis and p.adjust function
aov_adjust<-function(control_name,sample_name,data){
control_id<-rownames(map)[map$Genotype==control_name]
sample_id<-rownames(map)[map$Genotype==sample_name]
data[,c(control_id,sample_id)]
data1<-data[,c(control_id,sample_id)]
data1<-log2(data1+1)
dim(data1)
data[,2]
group<-as.factor( c(rep(1,length(control_id)),rep(2,length(sample_id))))
aov_file=NULL
for(i in 1:length(data1[,1])){a=data1[i,];df<-data.frame(x=as.numeric(a),y=group);aov.out<-TukeyHSD(aov(x~y,data=df));pvalue<-aov.out$y[1,];aov_file<-rbind(aov_file,pvalue)}
row.names(aov_file)<-row.names(data1)
c_s_aov<-as.data.frame(aov_file)
BH<-p.adjust(c_s_aov$`p adj`,"fdr")
c_s_aov<-cbind(c_s_aov,BH=BH)
fold<-apply(data[,sample_id],1,mean)/apply(data[,control_id],1,mean)
c_s_aov<-cbind(c_s_aov,FC=fold)
return(c_s_aov)
}
cmp_wt_mutant <- function(aov_file,s_control,s_mutant,data,type){
aov_file<-data.frame()
for (i in s_mutant)
{
aov_df<-aov_adjust(s_control,i,data)
if( nrow(aov_file)==0)
{aov_file<-aov_df}
else
{aov_file<-cbind(aov_file,aov_df)}
}
colnames(aov_file)<-paste(rep(s_mutant,each=6),colnames(aov_file),sep="_")
if(type=="otu"){
aov_file<-data.frame(aov_file,tax)
}
else{
rownames(aov_file)<-rownames(data)
aov_file
}
}
##read_tables(acm phylum family)
##read_table
argv <- commandArgs(T)
otu_table<-"otu_change_syringae.txt"
ACM_table<-"ACM_norm_tax_form_change_syringae.txt"
ACM_RA<-"ACM_RA_norm_tax_form.txt"
fam_RA<-"Family_RA_table.txt"
phy_RA<-"Phy_RA_table.txt"
otu_table<-read.delim(paste(inputFolder,otu_table, sep=""),header=T,row.names= 1,sep = "\t",stringsAsFactors = F)
acm_table<-read.table(paste(inputFolder,ACM_table, sep=""),header=T,row.names = 1,sep = "\t",stringsAsFactors = F)
#group_type <- argv[6]
##expermant design
map<-read.table(paste(inputFolder,"map.tsv",sep=""),header=T,row.names=1,sep = "\t",stringsAsFactors = F)
rownames(map) <- paste0("X",rownames(map))
map <- map[rownames(map[rownames(map)%in%colnames(otu_table),]),]
Rhizo <- rownames(map[grepl("rizho",map$Genotype),])
root <- rownames(map[grepl("root",map$Genotype),])
soil <- rownames(map[grepl("soil",map$Genotype),])
rhizo_wt <- unique(map[Rhizo,]$Genotype)[1]
rhizo_mutant<- unique(map[Rhizo,]$Genotype)[-1]
root_wt <- unique(map[root,]$Genotype)[1]
root_mutant<- unique(map[root,]$Genotype)[-1]
bs1 <- unique(map[soil,]$Genotype)[1]
bs2<- unique(map[soil,]$Genotype)[-1]
#--------ACM------------
pwm<-read.delim(paste(inputFolder,ACM_RA, sep=""),header=T,row.names=1,sep = "\t",stringsAsFactors = F)
data <- pwm[,-((ncol(pwm)-6):ncol(pwm))]
data_5<-apply(data,2,function(x){ifelse(x>=5,1,0)})
ACM_RA_5<-data[apply(data_5,1,sum)>=1,]
tax<-apply(pwm[rownames(ACM_RA_5),((ncol(pwm)-6):ncol(pwm))],1,paste0,collapse=";")
#--------Family-----------
fam_pwm<-read.delim(paste(inputFolder,fam_RA, sep=""),header=T,row.names=1,sep = "\t")
fam_data<-fam_pwm[,2:ncol(fam_pwm)]
fam_data_5<-apply(fam_data,2,function(x){ifelse(x>=5,1,0)})
fam_RA_5<-fam_data[apply(fam_data_5,1,sum)>=1,]
#-------phylum-----------
phy_pwm<-read.delim(paste(inputFolder,phy_RA, sep=""),header=T,row.names=1,sep = "\t")
phy_data<-phy_pwm
phy_data_5<-apply(phy_data,2,function(x){ifelse(x>=5,1,0)})
phy_RA_5<-phy_data[apply(phy_data_5,1,sum)>=1,]
#otu_wt_mutant
otu_rhizo_wt_mutant <- cmp_wt_mutant(rhizo_wt_mutant,rhizo_wt,rhizo_mutant,ACM_RA_5,"otu")
otu_root_wt_mutant <- cmp_wt_mutant(root_wt_mutant,root_wt,root_mutant,ACM_RA_5,"otu")
otu_bs1_bs2 <-cmp_wt_mutant(bs_wt_mutant,bs1,bs2,ACM_RA_5,"otu")
#family_wt_mutant
fam_rhizo_wt_mutant <- cmp_wt_mutant(rhizo_wt_mutant,rhizo_wt,rhizo_mutant,fam_RA_5,"fam")
fam_root_wt_mutant <- cmp_wt_mutant(root_wt_mutant,root_wt,root_mutant,fam_RA_5,"fam")
fam_bs1_bs2 <-cmp_wt_mutant(bs_wt_mutant,bs1,bs2,fam_RA_5,"fam")
##phylum_wt_mutant
phy_rhizo_wt_mutant <- cmp_wt_mutant(rhizo_wt_mutant,rhizo_wt,rhizo_mutant,phy_RA_5,"phy")
phy_root_wt_mutant <- cmp_wt_mutant(root_wt_mutant,root_wt,root_mutant,phy_RA_5,"phy")
phy_bs1_bs2 <-cmp_wt_mutant(bs_wt_mutant,bs1,bs2,phy_RA_5,"phy")
###ACM Fam phy RA>5
ACM_RA_5_with_Desc<-rbind(OTU_id=as.vector(map[colnames(ACM_RA_5),]$Ge),ACM_RA_5)
fam_RA_5_Desc<-rbind(OTU_id=as.vector(map[colnames(fam_RA_5),]$Genotype),fam_RA_5)
phy_RA_5_Desc<-rbind(OTU_id=as.vector(map[colnames(phy_RA_5),]$Genotype),phy_RA_5)
##write to excel
jgc <- function()
{
gc()
.jcall("java/lang/System", method = "gc")
}
library("rJava")
library("xlsx")
set.seed(19790801)
n_sheets <- 40
options(java.parameters = "-Xmx8000m")
#df<-list(otu_table,acm_table,pwm,fam_pwm,phy_pwm,ACM_RA_5_with_geno,Fam_ACM_RA_5_geno,phy_ACM_RA_5_geno,rhizo_wt_mutant_with_tax,silique_wt_mutant_with_tax,root_wt_mutant_with_tax,stem_wt_mutant_with_tax,bs1_bs2_tax,
#family_rhizo_wt_mutant,family_silique_wt_mutant,family_root_wt_mutant,family_stem_wt_mutant,family_BS2_BS1,phy_rhizo_wt_mutant,phy_silique_wt_mutant,phy_root_wt_mutant,phy_stem_wt_mutant,phy_BS2_BS1)
wb<-createWorkbook()
sname <- c("ACM_table","ACM_RA","fam_RA","phy_RA","ACM_RA_5","fam_RA_5","phy_RA_5","otu_rhizo_wt_mutant_with_tax","otu_root_wt_mutant_with_tax",
"otu_bs1_bs2_tax","family_rhizo_wt_mutant","family_root_wt_mutant","family_bs2_bs1",
"phy_rhizo_wt_mutant","phy_root_wt_mutant","phy_bs2_bs1")
df<-list(acm_table,pwm,fam_pwm,phy_pwm,ACM_RA_5_with_Desc,fam_RA_5_Desc,phy_RA_5_Desc,otu_rhizo_wt_mutant,otu_root_wt_mutant,otu_bs1_bs2,
fam_rhizo_wt_mutant,fam_root_wt_mutant,fam_bs1_bs2,
phy_rhizo_wt_mutant,phy_root_wt_mutant,phy_bs1_bs2)
for (i in 1:16){
jgc()
sheet<-createSheet(wb,sheetName = as.character(sname[i]) )
addDataFrame(df[[i]],sheet,row.names = TRUE)
}
saveWorkbook(wb,paste(outputFolder,"stat_otus_family_phyrum_result_with_FC_and_RA_table_group1.xlsx",sep=""))
| /script/PL_R_script/2.ANOVA_ana_one_by_one_up_0.005.R | no_license | penglbio/16S | R | false | false | 6,109 | r | ###ANOVA ana one by one RA up 0.005
rm(list = ls())
wdir <- "/var/data/19.Richa_new/"
setwd(wdir)
inputFolder <- "./3.data_for_figure/"
outputFolder <- "./2.stastics_analysis_result/"
source("script/Cbind.R")
library("dplyr")
library("tidyr")
library("plyr")
library("qpcR")
##aov_analysis and p.adjust function
aov_adjust<-function(control_name,sample_name,data){
control_id<-rownames(map)[map$Genotype==control_name]
sample_id<-rownames(map)[map$Genotype==sample_name]
data[,c(control_id,sample_id)]
data1<-data[,c(control_id,sample_id)]
data1<-log2(data1+1)
dim(data1)
data[,2]
group<-as.factor( c(rep(1,length(control_id)),rep(2,length(sample_id))))
aov_file=NULL
for(i in 1:length(data1[,1])){a=data1[i,];df<-data.frame(x=as.numeric(a),y=group);aov.out<-TukeyHSD(aov(x~y,data=df));pvalue<-aov.out$y[1,];aov_file<-rbind(aov_file,pvalue)}
row.names(aov_file)<-row.names(data1)
c_s_aov<-as.data.frame(aov_file)
BH<-p.adjust(c_s_aov$`p adj`,"fdr")
c_s_aov<-cbind(c_s_aov,BH=BH)
fold<-apply(data[,sample_id],1,mean)/apply(data[,control_id],1,mean)
c_s_aov<-cbind(c_s_aov,FC=fold)
return(c_s_aov)
}
cmp_wt_mutant <- function(aov_file,s_control,s_mutant,data,type){
aov_file<-data.frame()
for (i in s_mutant)
{
aov_df<-aov_adjust(s_control,i,data)
if( nrow(aov_file)==0)
{aov_file<-aov_df}
else
{aov_file<-cbind(aov_file,aov_df)}
}
colnames(aov_file)<-paste(rep(s_mutant,each=6),colnames(aov_file),sep="_")
if(type=="otu"){
aov_file<-data.frame(aov_file,tax)
}
else{
rownames(aov_file)<-rownames(data)
aov_file
}
}
##read_tables(acm phylum family)
##read_table
argv <- commandArgs(T)
otu_table<-"otu_change_syringae.txt"
ACM_table<-"ACM_norm_tax_form_change_syringae.txt"
ACM_RA<-"ACM_RA_norm_tax_form.txt"
fam_RA<-"Family_RA_table.txt"
phy_RA<-"Phy_RA_table.txt"
otu_table<-read.delim(paste(inputFolder,otu_table, sep=""),header=T,row.names= 1,sep = "\t",stringsAsFactors = F)
acm_table<-read.table(paste(inputFolder,ACM_table, sep=""),header=T,row.names = 1,sep = "\t",stringsAsFactors = F)
#group_type <- argv[6]
##expermant design
map<-read.table(paste(inputFolder,"map.tsv",sep=""),header=T,row.names=1,sep = "\t",stringsAsFactors = F)
rownames(map) <- paste0("X",rownames(map))
map <- map[rownames(map[rownames(map)%in%colnames(otu_table),]),]
Rhizo <- rownames(map[grepl("rizho",map$Genotype),])
root <- rownames(map[grepl("root",map$Genotype),])
soil <- rownames(map[grepl("soil",map$Genotype),])
rhizo_wt <- unique(map[Rhizo,]$Genotype)[1]
rhizo_mutant<- unique(map[Rhizo,]$Genotype)[-1]
root_wt <- unique(map[root,]$Genotype)[1]
root_mutant<- unique(map[root,]$Genotype)[-1]
bs1 <- unique(map[soil,]$Genotype)[1]
bs2<- unique(map[soil,]$Genotype)[-1]
#--------ACM------------
pwm<-read.delim(paste(inputFolder,ACM_RA, sep=""),header=T,row.names=1,sep = "\t",stringsAsFactors = F)
data <- pwm[,-((ncol(pwm)-6):ncol(pwm))]
data_5<-apply(data,2,function(x){ifelse(x>=5,1,0)})
ACM_RA_5<-data[apply(data_5,1,sum)>=1,]
tax<-apply(pwm[rownames(ACM_RA_5),((ncol(pwm)-6):ncol(pwm))],1,paste0,collapse=";")
#--------Family-----------
fam_pwm<-read.delim(paste(inputFolder,fam_RA, sep=""),header=T,row.names=1,sep = "\t")
fam_data<-fam_pwm[,2:ncol(fam_pwm)]
fam_data_5<-apply(fam_data,2,function(x){ifelse(x>=5,1,0)})
fam_RA_5<-fam_data[apply(fam_data_5,1,sum)>=1,]
#-------phylum-----------
phy_pwm<-read.delim(paste(inputFolder,phy_RA, sep=""),header=T,row.names=1,sep = "\t")
phy_data<-phy_pwm
phy_data_5<-apply(phy_data,2,function(x){ifelse(x>=5,1,0)})
phy_RA_5<-phy_data[apply(phy_data_5,1,sum)>=1,]
#otu_wt_mutant
otu_rhizo_wt_mutant <- cmp_wt_mutant(rhizo_wt_mutant,rhizo_wt,rhizo_mutant,ACM_RA_5,"otu")
otu_root_wt_mutant <- cmp_wt_mutant(root_wt_mutant,root_wt,root_mutant,ACM_RA_5,"otu")
otu_bs1_bs2 <-cmp_wt_mutant(bs_wt_mutant,bs1,bs2,ACM_RA_5,"otu")
#family_wt_mutant
fam_rhizo_wt_mutant <- cmp_wt_mutant(rhizo_wt_mutant,rhizo_wt,rhizo_mutant,fam_RA_5,"fam")
fam_root_wt_mutant <- cmp_wt_mutant(root_wt_mutant,root_wt,root_mutant,fam_RA_5,"fam")
fam_bs1_bs2 <-cmp_wt_mutant(bs_wt_mutant,bs1,bs2,fam_RA_5,"fam")
##phylum_wt_mutant
phy_rhizo_wt_mutant <- cmp_wt_mutant(rhizo_wt_mutant,rhizo_wt,rhizo_mutant,phy_RA_5,"phy")
phy_root_wt_mutant <- cmp_wt_mutant(root_wt_mutant,root_wt,root_mutant,phy_RA_5,"phy")
phy_bs1_bs2 <-cmp_wt_mutant(bs_wt_mutant,bs1,bs2,phy_RA_5,"phy")
###ACM Fam phy RA>5
ACM_RA_5_with_Desc<-rbind(OTU_id=as.vector(map[colnames(ACM_RA_5),]$Ge),ACM_RA_5)
fam_RA_5_Desc<-rbind(OTU_id=as.vector(map[colnames(fam_RA_5),]$Genotype),fam_RA_5)
phy_RA_5_Desc<-rbind(OTU_id=as.vector(map[colnames(phy_RA_5),]$Genotype),phy_RA_5)
##write to excel
jgc <- function()
{
gc()
.jcall("java/lang/System", method = "gc")
}
library("rJava")
library("xlsx")
set.seed(19790801)
n_sheets <- 40
options(java.parameters = "-Xmx8000m")
#df<-list(otu_table,acm_table,pwm,fam_pwm,phy_pwm,ACM_RA_5_with_geno,Fam_ACM_RA_5_geno,phy_ACM_RA_5_geno,rhizo_wt_mutant_with_tax,silique_wt_mutant_with_tax,root_wt_mutant_with_tax,stem_wt_mutant_with_tax,bs1_bs2_tax,
#family_rhizo_wt_mutant,family_silique_wt_mutant,family_root_wt_mutant,family_stem_wt_mutant,family_BS2_BS1,phy_rhizo_wt_mutant,phy_silique_wt_mutant,phy_root_wt_mutant,phy_stem_wt_mutant,phy_BS2_BS1)
wb<-createWorkbook()
sname <- c("ACM_table","ACM_RA","fam_RA","phy_RA","ACM_RA_5","fam_RA_5","phy_RA_5","otu_rhizo_wt_mutant_with_tax","otu_root_wt_mutant_with_tax",
"otu_bs1_bs2_tax","family_rhizo_wt_mutant","family_root_wt_mutant","family_bs2_bs1",
"phy_rhizo_wt_mutant","phy_root_wt_mutant","phy_bs2_bs1")
df<-list(acm_table,pwm,fam_pwm,phy_pwm,ACM_RA_5_with_Desc,fam_RA_5_Desc,phy_RA_5_Desc,otu_rhizo_wt_mutant,otu_root_wt_mutant,otu_bs1_bs2,
fam_rhizo_wt_mutant,fam_root_wt_mutant,fam_bs1_bs2,
phy_rhizo_wt_mutant,phy_root_wt_mutant,phy_bs1_bs2)
for (i in 1:16){
jgc()
sheet<-createSheet(wb,sheetName = as.character(sname[i]) )
addDataFrame(df[[i]],sheet,row.names = TRUE)
}
saveWorkbook(wb,paste(outputFolder,"stat_otus_family_phyrum_result_with_FC_and_RA_table_group1.xlsx",sep=""))
|
#' Shiny dotplot
#'
#' Shiny application to perform dot-plot pacman-plot
#'
#' @examples Shiny_dot_plot()
#' @author Simon Leonard - simon_leonard[a]hotmail.fr
Shiny_dot_plot <- function (RStudio = F)
{
Sys.setenv(R_MAX_NUM_DLLS = 180)
cat("Launching Shniy app...")
if (RStudio) {
options(shiny.launch.browser = .rs.invokeShinyWindowViewer)
}
shiny::runApp(system.file("app", package = "FlexDotPlot"))
}
| /R/Shiny_dot_plot.R | no_license | EikoSAIJOU334/FlexDotPlot | R | false | false | 438 | r | #' Shiny dotplot
#'
#' Shiny application to perform dot-plot pacman-plot
#'
#' @examples Shiny_dot_plot()
#' @author Simon Leonard - simon_leonard[a]hotmail.fr
Shiny_dot_plot <- function (RStudio = F)
{
Sys.setenv(R_MAX_NUM_DLLS = 180)
cat("Launching Shniy app...")
if (RStudio) {
options(shiny.launch.browser = .rs.invokeShinyWindowViewer)
}
shiny::runApp(system.file("app", package = "FlexDotPlot"))
}
|
doSim =
function(i, alpha = 1, beta = 1, N = 1000, verbose = TRUE)
{
if(verbose) print(i)
simData <- matrix(rbeta(n = N*i*16, shape1 = alpha, shape2 = beta), N*i, 16) # simulate data (16 trials per participant)
ans = t(apply(simData, 1, function(x) c(mean = mean(x, na.rm = TRUE), sd = sd(x, na.rm = TRUE))) )
cbind(ans, obs = rep(1:N, each = i))
}
| /Michaela/funs3.R | no_license | ryanHutchings/RBeyondBasics | R | false | false | 370 | r |
doSim =
function(i, alpha = 1, beta = 1, N = 1000, verbose = TRUE)
{
if(verbose) print(i)
simData <- matrix(rbeta(n = N*i*16, shape1 = alpha, shape2 = beta), N*i, 16) # simulate data (16 trials per participant)
ans = t(apply(simData, 1, function(x) c(mean = mean(x, na.rm = TRUE), sd = sd(x, na.rm = TRUE))) )
cbind(ans, obs = rep(1:N, each = i))
}
|
diffSplice <- function(fit,geneid,exonid=NULL,robust=FALSE,verbose=TRUE)
# Test for splicing variants between conditions
# using linear model fit of exon data.
# Gordon Smyth and Charity Law
# Created 13 Dec 2013. Last modified 20 April 2017.
{
# Make sure there is always an annotation frame
exon.genes <- fit$genes
if(is.null(exon.genes)) exon.genes <- data.frame(ExonID=1:nrow(fit))
# Get ID columns for genes and exons
if(length(geneid)==1) {
genecolname <- as.character(geneid)
geneid <- exon.genes[[genecolname]]
} else {
exon.genes$GeneID <- geneid
genecolname <- "GeneID"
}
if(is.null(exonid)) {
exoncolname <- NULL
} else {
if(length(exonid)==1) {
exoncolname <- as.character(exonid)
exonid <- exon.genes[[exoncolname]]
} else {
exon.genes$ExonID <- exonid
exoncolname <- "ExonID"
}
}
# Treat NA geneids as genes with one exon
if(anyNA(geneid)) {
isna <- which(is.na(geneid))
geneid[isna] <- paste0("NA",1:length(isna))
}
# Sort by geneid
if(is.null(exonid))
o <- order(geneid)
else
o <- order(geneid,exonid)
geneid <- geneid[o]
exon.genes <- exon.genes[o,,drop=FALSE]
exon.coefficients <- fit$coefficients[o,,drop=FALSE]
exon.stdev.unscaled <- fit$stdev.unscaled[o,,drop=FALSE]
exon.df.residual <- fit$df.residual[o]
exon.s2 <- fit$sigma[o]^2
# Count exons by gene and get genewise variances
exon.stat <- cbind(1,exon.df.residual,exon.s2)
gene.sum <- rowsum(exon.stat,geneid,reorder=FALSE)
gene.nexons <- gene.sum[,1]
gene.df.residual <- gene.sum[,2]
gene.s2 <- gene.sum[,3] / gene.sum[,1]
if(verbose) {
cat("Total number of exons: ", length(geneid), "\n")
cat("Total number of genes: ", length(gene.nexons), "\n")
cat("Number of genes with 1 exon: ", sum(gene.nexons==1), "\n")
cat("Mean number of exons in a gene: ", round(mean(gene.nexons),0), "\n")
cat("Max number of exons in a gene: ", max(gene.nexons), "\n")
}
# Posterior genewise variances
squeeze <- squeezeVar(var=gene.s2, df=gene.df.residual, robust=robust)
# Remove genes with only 1 exon
gene.keep <- gene.nexons>1
ngenes <- sum(gene.keep)
if(ngenes==0) stop("No genes with more than one exon")
exon.keep <- rep(gene.keep,gene.nexons)
geneid <- geneid[exon.keep]
exon.genes <- exon.genes[exon.keep,,drop=FALSE]
exon.coefficients <- exon.coefficients[exon.keep,,drop=FALSE]
exon.stdev.unscaled <- exon.stdev.unscaled[exon.keep,,drop=FALSE]
exon.df.residual <- exon.df.residual[exon.keep]
gene.nexons <- gene.nexons[gene.keep]
gene.df.test <- gene.nexons-1
gene.df.residual <- gene.df.residual[gene.keep]
if(robust) squeeze$df.prior <- squeeze$df.prior[gene.keep]
gene.df.total <- gene.df.residual+squeeze$df.prior
gene.df.total <- pmin(gene.df.total,sum(gene.df.residual))
gene.s2.post <- squeeze$var.post[gene.keep]
# Genewise betas
u2 <- 1/exon.stdev.unscaled^2
u2.rowsum <- rowsum(u2,geneid,reorder=FALSE)
gene.betabar <- rowsum(exon.coefficients*u2,geneid,reorder=FALSE) / u2.rowsum
# T-statistics for exon-level tests
g <- rep(1:ngenes,times=gene.nexons)
exon.coefficients <- exon.coefficients-gene.betabar[g,,drop=FALSE]
exon.t <- exon.coefficients / exon.stdev.unscaled / sqrt(gene.s2.post[g])
gene.F <- rowsum(exon.t^2,geneid,reorder=FALSE) / gene.df.test
exon.1mleverage <- 1 - (u2 / u2.rowsum[g,,drop=FALSE])
exon.coefficients <- exon.coefficients / exon.1mleverage
exon.t <- exon.t / sqrt(exon.1mleverage)
exon.p.value <- 2 * pt(abs(exon.t), df=gene.df.total[g], lower.tail=FALSE)
gene.F.p.value <- pf(gene.F, df1=gene.df.test, df2=gene.df.total, lower.tail=FALSE)
# Exon level output
out <- new("MArrayLM",list())
out$genes <- exon.genes
out$genecolname <- genecolname
out$exoncolname <- exoncolname
out$coefficients <- exon.coefficients
out$t <- exon.t
out$p.value <- exon.p.value
# Gene level output
out$gene.df.prior <- squeeze$df.prior
out$gene.df.residual <- gene.df.residual
out$gene.df.total <- gene.df.total
out$gene.s2 <- gene.s2[gene.keep]
out$gene.s2.post <- gene.s2.post
out$gene.F <- gene.F
out$gene.F.p.value <- gene.F.p.value
# Which columns of exon.genes contain gene level annotation?
gene.lastexon <- cumsum(gene.nexons)
gene.firstexon <- gene.lastexon-gene.nexons+1
no <- logical(nrow(exon.genes))
isdup <- vapply(exon.genes,duplicated,no)[-gene.firstexon,,drop=FALSE]
isgenelevel <- apply(isdup,2,all)
out$gene.genes <- exon.genes[gene.lastexon,isgenelevel, drop=FALSE]
out$gene.genes$NExons <- gene.nexons
out$gene.firstexon <- gene.firstexon
out$gene.lastexon <- gene.lastexon
# Simes adjustment of exon level p-values
penalty <- rep_len(1L,length(g))
penalty[gene.lastexon] <- 1L-gene.nexons
penalty <- cumsum(penalty)[-gene.lastexon]
penalty <- penalty / rep(gene.nexons-1L,gene.nexons-1L)
g2 <- g[-gene.lastexon]
out$gene.simes.p.value <- gene.F.p.value
for (j in 1:ncol(fit)) {
o <- order(g,exon.p.value[,j])
p.adj <- pmin(exon.p.value[o,j][-gene.lastexon] / penalty, 1)
o <- order(g2,p.adj)
out$gene.simes.p.value[,j] <- p.adj[o][gene.firstexon-0L:(ngenes-1L)]
}
out
}
| /R/diffSplice.R | no_license | hdeberg/limma | R | false | false | 5,187 | r | diffSplice <- function(fit,geneid,exonid=NULL,robust=FALSE,verbose=TRUE)
# Test for splicing variants between conditions
# using linear model fit of exon data.
# Gordon Smyth and Charity Law
# Created 13 Dec 2013. Last modified 20 April 2017.
{
# Make sure there is always an annotation frame
exon.genes <- fit$genes
if(is.null(exon.genes)) exon.genes <- data.frame(ExonID=1:nrow(fit))
# Get ID columns for genes and exons
if(length(geneid)==1) {
genecolname <- as.character(geneid)
geneid <- exon.genes[[genecolname]]
} else {
exon.genes$GeneID <- geneid
genecolname <- "GeneID"
}
if(is.null(exonid)) {
exoncolname <- NULL
} else {
if(length(exonid)==1) {
exoncolname <- as.character(exonid)
exonid <- exon.genes[[exoncolname]]
} else {
exon.genes$ExonID <- exonid
exoncolname <- "ExonID"
}
}
# Treat NA geneids as genes with one exon
if(anyNA(geneid)) {
isna <- which(is.na(geneid))
geneid[isna] <- paste0("NA",1:length(isna))
}
# Sort by geneid
if(is.null(exonid))
o <- order(geneid)
else
o <- order(geneid,exonid)
geneid <- geneid[o]
exon.genes <- exon.genes[o,,drop=FALSE]
exon.coefficients <- fit$coefficients[o,,drop=FALSE]
exon.stdev.unscaled <- fit$stdev.unscaled[o,,drop=FALSE]
exon.df.residual <- fit$df.residual[o]
exon.s2 <- fit$sigma[o]^2
# Count exons by gene and get genewise variances
exon.stat <- cbind(1,exon.df.residual,exon.s2)
gene.sum <- rowsum(exon.stat,geneid,reorder=FALSE)
gene.nexons <- gene.sum[,1]
gene.df.residual <- gene.sum[,2]
gene.s2 <- gene.sum[,3] / gene.sum[,1]
if(verbose) {
cat("Total number of exons: ", length(geneid), "\n")
cat("Total number of genes: ", length(gene.nexons), "\n")
cat("Number of genes with 1 exon: ", sum(gene.nexons==1), "\n")
cat("Mean number of exons in a gene: ", round(mean(gene.nexons),0), "\n")
cat("Max number of exons in a gene: ", max(gene.nexons), "\n")
}
# Posterior genewise variances
squeeze <- squeezeVar(var=gene.s2, df=gene.df.residual, robust=robust)
# Remove genes with only 1 exon
gene.keep <- gene.nexons>1
ngenes <- sum(gene.keep)
if(ngenes==0) stop("No genes with more than one exon")
exon.keep <- rep(gene.keep,gene.nexons)
geneid <- geneid[exon.keep]
exon.genes <- exon.genes[exon.keep,,drop=FALSE]
exon.coefficients <- exon.coefficients[exon.keep,,drop=FALSE]
exon.stdev.unscaled <- exon.stdev.unscaled[exon.keep,,drop=FALSE]
exon.df.residual <- exon.df.residual[exon.keep]
gene.nexons <- gene.nexons[gene.keep]
gene.df.test <- gene.nexons-1
gene.df.residual <- gene.df.residual[gene.keep]
if(robust) squeeze$df.prior <- squeeze$df.prior[gene.keep]
gene.df.total <- gene.df.residual+squeeze$df.prior
gene.df.total <- pmin(gene.df.total,sum(gene.df.residual))
gene.s2.post <- squeeze$var.post[gene.keep]
# Genewise betas
u2 <- 1/exon.stdev.unscaled^2
u2.rowsum <- rowsum(u2,geneid,reorder=FALSE)
gene.betabar <- rowsum(exon.coefficients*u2,geneid,reorder=FALSE) / u2.rowsum
# T-statistics for exon-level tests
g <- rep(1:ngenes,times=gene.nexons)
exon.coefficients <- exon.coefficients-gene.betabar[g,,drop=FALSE]
exon.t <- exon.coefficients / exon.stdev.unscaled / sqrt(gene.s2.post[g])
gene.F <- rowsum(exon.t^2,geneid,reorder=FALSE) / gene.df.test
exon.1mleverage <- 1 - (u2 / u2.rowsum[g,,drop=FALSE])
exon.coefficients <- exon.coefficients / exon.1mleverage
exon.t <- exon.t / sqrt(exon.1mleverage)
exon.p.value <- 2 * pt(abs(exon.t), df=gene.df.total[g], lower.tail=FALSE)
gene.F.p.value <- pf(gene.F, df1=gene.df.test, df2=gene.df.total, lower.tail=FALSE)
# Exon level output
out <- new("MArrayLM",list())
out$genes <- exon.genes
out$genecolname <- genecolname
out$exoncolname <- exoncolname
out$coefficients <- exon.coefficients
out$t <- exon.t
out$p.value <- exon.p.value
# Gene level output
out$gene.df.prior <- squeeze$df.prior
out$gene.df.residual <- gene.df.residual
out$gene.df.total <- gene.df.total
out$gene.s2 <- gene.s2[gene.keep]
out$gene.s2.post <- gene.s2.post
out$gene.F <- gene.F
out$gene.F.p.value <- gene.F.p.value
# Which columns of exon.genes contain gene level annotation?
gene.lastexon <- cumsum(gene.nexons)
gene.firstexon <- gene.lastexon-gene.nexons+1
no <- logical(nrow(exon.genes))
isdup <- vapply(exon.genes,duplicated,no)[-gene.firstexon,,drop=FALSE]
isgenelevel <- apply(isdup,2,all)
out$gene.genes <- exon.genes[gene.lastexon,isgenelevel, drop=FALSE]
out$gene.genes$NExons <- gene.nexons
out$gene.firstexon <- gene.firstexon
out$gene.lastexon <- gene.lastexon
# Simes adjustment of exon level p-values
penalty <- rep_len(1L,length(g))
penalty[gene.lastexon] <- 1L-gene.nexons
penalty <- cumsum(penalty)[-gene.lastexon]
penalty <- penalty / rep(gene.nexons-1L,gene.nexons-1L)
g2 <- g[-gene.lastexon]
out$gene.simes.p.value <- gene.F.p.value
for (j in 1:ncol(fit)) {
o <- order(g,exon.p.value[,j])
p.adj <- pmin(exon.p.value[o,j][-gene.lastexon] / penalty, 1)
o <- order(g2,p.adj)
out$gene.simes.p.value[,j] <- p.adj[o][gene.firstexon-0L:(ngenes-1L)]
}
out
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{confirm}
\alias{confirm}
\alias{confirm.default}
\title{Confirm an Analysis}
\usage{
confirm(x, ...)
\method{confirm}{default}(x, ...)
}
\arguments{
\item{x}{the object to be confirmed}
\item{\dots}{additional arguments required for specific methods}
}
\value{
The object returned depends on the specific method.
}
\description{
Reviews/accepts the results of an analysis: method for "default" data.
}
\note{
The default method simply returns the object and issues a warning.
}
\keyword{manip}
| /man/confirm.Rd | permissive | oceanspace/smwrStats | R | false | false | 556 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{confirm}
\alias{confirm}
\alias{confirm.default}
\title{Confirm an Analysis}
\usage{
confirm(x, ...)
\method{confirm}{default}(x, ...)
}
\arguments{
\item{x}{the object to be confirmed}
\item{\dots}{additional arguments required for specific methods}
}
\value{
The object returned depends on the specific method.
}
\description{
Reviews/accepts the results of an analysis: method for "default" data.
}
\note{
The default method simply returns the object and issues a warning.
}
\keyword{manip}
|
url <- "https://daymet.ornl.gov/single-pixel/api/data?lat=35.9621&lon=-84.2916&vars=dayl,prcp,srad,swe,tmax,tmin,vp&years=1980"
download.file(url = url, destfile = "daymet/test", extra = "--content-disposition")
| /R_testing/downloadingDaymetData.R | no_license | cjcampbell/cavesNmines | R | false | false | 217 | r |
url <- "https://daymet.ornl.gov/single-pixel/api/data?lat=35.9621&lon=-84.2916&vars=dayl,prcp,srad,swe,tmax,tmin,vp&years=1980"
download.file(url = url, destfile = "daymet/test", extra = "--content-disposition")
|
# 6.2.1
ages <- c(25, 26, 55, 37, 21, 42)
affils <- c("R", "D", "D", "R", "U", "D")
tapply(ages, affils, mean)
d <- data.frame(list(gender = c("M", "M", "F", "M", "F", "F"),
ages = c(47, 59, 21, 32, 33, 24), income = c(55000, 88000,
32450, 76500, 123000, 45650)))
d$over25 <- ifelse(d$ages > 25, 1, 0)
tapply(d$income, list(d$gender, d$over25), mean)
# 6.2.2
split(d$income, list(d$gender, d$over25))
findworks <- function(tf){
# read in the words from the files, into a vector of mode character
txt <- scan(tf, "")
words <- split(1:length(txt), txt)
return(words)
}
# 6.2.3
aba <- read.csv("abalone.data", header = TRUE)
by(aba, aba$Gender, function(m) lm(m[, 2] ~ m[, 3]))
# 6.3
u <- c(22, 8, 33, 6, 8, 29, -2)
fl <- list(c(5, 12, 13, 12, 13, 5, 13), c("a", "bc", "a", "a", "bc", "a", "a"))
tapply(u, fl, length)
# 6.3.2 subtable()
subtable <- function(tbl, subnames){
# get array of cell counts in tbl
tblarray <- unclass(tbl)
# we'll get the subarray of cell counts corresponding to subnames by
# calling do.call() on the "[" function; we need to build up a list
# of arguments first
dcargs <- list(tblarray)
ndims <- length(sbunames) # number of dimensions
for (i in 1:ndims){
dcargs[[i+1]] <- subnames[[i]]
}
subarray <- do.call("[", dcargs)
# now we'll build the new table, consisting of the subarray, the
# numbers of levels in each dimension, and the dimnames() value, plus
# the "table" class attribute
dims <- lapply(subnames, length)
subtbl <- array(subarray, dims, dimnames = subnames)
class(subtbl) <- "table"
return(subtbl)
}
#6.3.3
# finds the cells in table tbl with the k highest frequencies; handling
# of ties is unrefined
tabdom <- function(tbl, k){
# create a data frame representation of tbl, adding a Freq column
tbldf <- as.data.frame(tbl)
}
| /ch6.R | no_license | quincy0319/art_of_r | R | false | false | 2,031 | r | # 6.2.1
ages <- c(25, 26, 55, 37, 21, 42)
affils <- c("R", "D", "D", "R", "U", "D")
tapply(ages, affils, mean)
d <- data.frame(list(gender = c("M", "M", "F", "M", "F", "F"),
ages = c(47, 59, 21, 32, 33, 24), income = c(55000, 88000,
32450, 76500, 123000, 45650)))
d$over25 <- ifelse(d$ages > 25, 1, 0)
tapply(d$income, list(d$gender, d$over25), mean)
# 6.2.2
split(d$income, list(d$gender, d$over25))
findworks <- function(tf){
# read in the words from the files, into a vector of mode character
txt <- scan(tf, "")
words <- split(1:length(txt), txt)
return(words)
}
# 6.2.3
aba <- read.csv("abalone.data", header = TRUE)
by(aba, aba$Gender, function(m) lm(m[, 2] ~ m[, 3]))
# 6.3
u <- c(22, 8, 33, 6, 8, 29, -2)
fl <- list(c(5, 12, 13, 12, 13, 5, 13), c("a", "bc", "a", "a", "bc", "a", "a"))
tapply(u, fl, length)
# 6.3.2 subtable()
subtable <- function(tbl, subnames){
# get array of cell counts in tbl
tblarray <- unclass(tbl)
# we'll get the subarray of cell counts corresponding to subnames by
# calling do.call() on the "[" function; we need to build up a list
# of arguments first
dcargs <- list(tblarray)
ndims <- length(sbunames) # number of dimensions
for (i in 1:ndims){
dcargs[[i+1]] <- subnames[[i]]
}
subarray <- do.call("[", dcargs)
# now we'll build the new table, consisting of the subarray, the
# numbers of levels in each dimension, and the dimnames() value, plus
# the "table" class attribute
dims <- lapply(subnames, length)
subtbl <- array(subarray, dims, dimnames = subnames)
class(subtbl) <- "table"
return(subtbl)
}
#6.3.3
# finds the cells in table tbl with the k highest frequencies; handling
# of ties is unrefined
tabdom <- function(tbl, k){
# create a data frame representation of tbl, adding a Freq column
tbldf <- as.data.frame(tbl)
}
|
semente <- addTaskCallback(function(...) {set.seed(99);TRUE}) #cria/roda semente
#~~cria data frame de cadastro
Cadastro <- data.frame(ID = sample(c(000:999), 100, replace = TRUE),
genero = sample(c("Masc", "Fem"), 100, replace = TRUE),
idade = rnorm(n = 100, mean = 25, sd = 5))
removeTaskCallback(semente) # para semente
| /script/tarefa_criar_data_frame.r | no_license | CaioMalaquias/tarefa_eletiva_polsci_ufpe | R | false | false | 369 | r | semente <- addTaskCallback(function(...) {set.seed(99);TRUE}) #cria/roda semente
#~~cria data frame de cadastro
Cadastro <- data.frame(ID = sample(c(000:999), 100, replace = TRUE),
genero = sample(c("Masc", "Fem"), 100, replace = TRUE),
idade = rnorm(n = 100, mean = 25, sd = 5))
removeTaskCallback(semente) # para semente
|
require (shapes)
### banco de dados
data.dist = read.csv2 (file = 'GLMALL2.csv', header = TRUE, row.names = NULL)
### landmarks
vis = scan ("vistas.csv", what = "")
lmA = vis[1:35]
lmA[4] = "NA"
lmZ = vis[36:length (vis)]
rm (vis)
### get skulls
USNM = get.skull (DIR = "USNMPRO", lmA = lmA, lmZ = lmZ)
dim (USNM$A)
### AMNH remove ind w/ missing lms
usnm.miss.A = apply (USNM$A, 3, find.miss.lm)
usnm.miss.Z = apply (USNM$Z, 3, find.miss.lm)
usnm.rem = usnm.miss.A & usnm.miss.Z
sum (usnm.rem)
### compare svd with ols
usnm.raw = glue.skulls (USNM$A[,,usnm.rem], USNM$Z[,,usnm.rem], soln = 'svd')
### carregar banco de dados de distรขncias
dimnames (usnm.raw)[[3]] %in% as.character (data.dist [,1] [data.dist$MUSEUM == 'USNM'])
usnm.data = data.dist [data.dist$MUSEUM == 'USNM',1:10]
elem1 = function (element) {return (element[1])}
usnm.id = sapply (strsplit (as.character (usnm.data$ID), split = '.', fixed = TRUE), elem1)
usnm.data$ID = usnm.id
usnm.match = usnm.id %in% dimnames (usnm.raw) [[3]]
usnm.data = usnm.data [usnm.match,]
usnm.match = dimnames (usnm.raw) [[3]] %in% usnm.id
usnm.raw = usnm.raw [,,usnm.match]
usnm.order = match (usnm.data$ID, dimnames (usnm.raw) [[3]])
usnm.raw = usnm.raw [,,usnm.order]
dim (usnm.raw)
usnm.export = list ('data' = usnm.data,
'raw' = usnm.raw)
### checando formas
vis.seq (usnm.raw, start = 1)
save (usnm.export, file = 'N.USNM.RData')
| /old_code/mnrj.R | no_license | wgar84/Primaset | R | false | false | 1,409 | r | require (shapes)
### banco de dados
data.dist = read.csv2 (file = 'GLMALL2.csv', header = TRUE, row.names = NULL)
### landmarks
vis = scan ("vistas.csv", what = "")
lmA = vis[1:35]
lmA[4] = "NA"
lmZ = vis[36:length (vis)]
rm (vis)
### get skulls
USNM = get.skull (DIR = "USNMPRO", lmA = lmA, lmZ = lmZ)
dim (USNM$A)
### AMNH remove ind w/ missing lms
usnm.miss.A = apply (USNM$A, 3, find.miss.lm)
usnm.miss.Z = apply (USNM$Z, 3, find.miss.lm)
usnm.rem = usnm.miss.A & usnm.miss.Z
sum (usnm.rem)
### compare svd with ols
usnm.raw = glue.skulls (USNM$A[,,usnm.rem], USNM$Z[,,usnm.rem], soln = 'svd')
### carregar banco de dados de distรขncias
dimnames (usnm.raw)[[3]] %in% as.character (data.dist [,1] [data.dist$MUSEUM == 'USNM'])
usnm.data = data.dist [data.dist$MUSEUM == 'USNM',1:10]
elem1 = function (element) {return (element[1])}
usnm.id = sapply (strsplit (as.character (usnm.data$ID), split = '.', fixed = TRUE), elem1)
usnm.data$ID = usnm.id
usnm.match = usnm.id %in% dimnames (usnm.raw) [[3]]
usnm.data = usnm.data [usnm.match,]
usnm.match = dimnames (usnm.raw) [[3]] %in% usnm.id
usnm.raw = usnm.raw [,,usnm.match]
usnm.order = match (usnm.data$ID, dimnames (usnm.raw) [[3]])
usnm.raw = usnm.raw [,,usnm.order]
dim (usnm.raw)
usnm.export = list ('data' = usnm.data,
'raw' = usnm.raw)
### checando formas
vis.seq (usnm.raw, start = 1)
save (usnm.export, file = 'N.USNM.RData')
|
# The path to the currently-loaded project, if any.
# NULL when no project is currently loaded.
the$project_path <- NULL
# Flag indicating whether we're checking if the project is synchronized.
the$project_synchronized_check_running <- FALSE
#' Retrieve the active project
#'
#' Retrieve the path to the active project (if any).
#'
#' @param default The value to return when no project is
#' currently active. Defaults to `NULL`.
#'
#' @export
#'
#' @return The active project directory, as a length-one character vector.
#'
#' @examples
#' \dontrun{
#'
#' # get the currently-active renv project
#' renv::project()
#'
#' }
project <- function(default = NULL) {
renv_project_get(default = default)
}
renv_project_get <- function(default = NULL) {
the$project_path %||% default
}
# NOTE: RENV_PROJECT kept for backwards compatibility with RStudio
renv_project_set <- function(project) {
the$project_path <- project
Sys.setenv(RENV_PROJECT = project)
}
# NOTE: 'RENV_PROJECT' kept for backwards compatibility with RStudio
renv_project_clear <- function() {
the$project_path <- NULL
Sys.unsetenv("RENV_PROJECT")
}
renv_project_resolve <- function(project = NULL, default = getwd()) {
project <- project %||% renv_project_get(default = default)
renv_path_normalize(project)
}
renv_project_initialized <- function(project) {
lockfile <- renv_lockfile_path(project)
if (file.exists(lockfile))
return(TRUE)
library <- renv_paths_library(project = project)
if (file.exists(library))
return(TRUE)
FALSE
}
renv_project_type <- function(path) {
if (!nzchar(path))
return("unknown")
path <- renv_path_normalize(path)
filebacked(
context = "renv_project_type",
path = file.path(path, "DESCRIPTION"),
callback = renv_project_type_impl
)
}
renv_project_type_impl <- function(path) {
if (!file.exists(path))
return("unknown")
desc <- tryCatch(
renv_dcf_read(path),
error = identity
)
if (inherits(desc, "error"))
return("unknown")
type <- desc$Type
if (!is.null(type))
return(tolower(type))
package <- desc$Package
if (!is.null(package))
return("package")
"unknown"
}
renv_project_remotes <- function(project, fields = NULL) {
descpath <- file.path(project, "DESCRIPTION")
if (!file.exists(descpath))
return(NULL)
# first, parse remotes (if any)
remotes <- renv_description_remotes(descpath)
# next, find packages mentioned in the DESCRIPTION file
deps <- renv_dependencies_discover_description(
path = descpath,
project = project
)
if (empty(deps))
return(list())
# split according to package
specs <- split(deps, deps$Package)
# drop ignored specs
ignored <- renv_project_ignored_packages(project = project)
specs <- specs[setdiff(names(specs), c("R", ignored))]
# if any Roxygen fields are included,
# infer a dependency on roxygen2 and devtools
desc <- renv_description_read(descpath)
if (any(grepl("^Roxygen", names(desc)))) {
for (package in c("devtools", "roxygen2")) {
if (!package %in% ignored) {
specs[[package]] <-
specs[[package]] %||%
renv_dependencies_list(descpath, package, dev = TRUE)
}
}
}
# now, try to resolve the packages
records <- enumerate(specs, function(package, spec) {
# use remote if supplied
if (!is.null(remotes[[package]]))
return(remotes[[package]])
# check for explicit version requirement
explicit <- spec[spec$Require == "==", ]
if (nrow(explicit) == 0)
return(renv_remotes_resolve(package))
version <- spec$Version[[1]]
if (!nzchar(version))
return(renv_remotes_resolve(package))
entry <- paste(package, version, sep = "@")
renv_remotes_resolve(entry)
})
# return records
records
}
renv_project_ignored_packages <- function(project) {
# if we don't have a project, nothing to do
if (is.null(project))
return(character())
# read base set of ignored packages
ignored <- c(
settings$ignored.packages(project = project),
renv_project_ignored_packages_self(project)
)
# return collected set of ignored packages
ignored
}
renv_project_ignored_packages_self <- function(project) {
# only ignore self in package projects
if (renv_project_type(project) != "package")
return(NULL)
# read current package
desc <- renv_description_read(project)
package <- desc[["Package"]]
# respect user preference if set
ignore <- getOption("renv.snapshot.ignore.self", default = NULL)
if (identical(ignore, TRUE))
return(package)
else if (identical(ignore, FALSE))
return(NULL)
# don't ignore self in golem projets
golem <- file.path(project, "inst/golem-config.yml")
if (file.exists(golem))
return(NULL)
# hack for renv: don't depend on self
if (identical(package, "renv"))
return(NULL)
# return the package name
package
}
renv_project_id <- function(project) {
idpath <- renv_id_path(project = project)
if (!file.exists(idpath)) {
id <- renv_id_generate()
writeLines(id, con = idpath)
}
readLines(idpath, n = 1L, warn = FALSE)
}
# TODO: this gets really dicey once the user starts configuring where
# renv places its project-local state ...
renv_project_find <- function(path = NULL) {
path <- path %||% getwd()
anchors <- c("renv.lock", "renv/activate.R")
resolved <- renv_file_find(path, function(parent) {
for (anchor in anchors)
if (file.exists(file.path(parent, anchor)))
return(parent)
})
if (is.null(resolved)) {
fmt <- "couldn't resolve renv project associated with path %s"
stopf(fmt, renv_path_pretty(path))
}
resolved
}
renv_project_lock <- function(project = NULL) {
if (!config$locking.enabled())
return()
path <- the$project_path
if (!identical(project, path))
return()
project <- renv_project_resolve(project)
path <- file.path(project, "renv/lock")
ensure_parent_directory(path)
renv_scope_lock(path, scope = parent.frame())
}
renv_project_loaded <- function(project) {
!is.null(project) && identical(project, the$project_path)
}
| /R/project.R | permissive | rstudio/renv | R | false | false | 6,145 | r |
# The path to the currently-loaded project, if any.
# NULL when no project is currently loaded.
the$project_path <- NULL
# Flag indicating whether we're checking if the project is synchronized.
the$project_synchronized_check_running <- FALSE
#' Retrieve the active project
#'
#' Retrieve the path to the active project (if any).
#'
#' @param default The value to return when no project is
#' currently active. Defaults to `NULL`.
#'
#' @export
#'
#' @return The active project directory, as a length-one character vector.
#'
#' @examples
#' \dontrun{
#'
#' # get the currently-active renv project
#' renv::project()
#'
#' }
project <- function(default = NULL) {
renv_project_get(default = default)
}
renv_project_get <- function(default = NULL) {
the$project_path %||% default
}
# NOTE: RENV_PROJECT kept for backwards compatibility with RStudio
renv_project_set <- function(project) {
the$project_path <- project
Sys.setenv(RENV_PROJECT = project)
}
# NOTE: 'RENV_PROJECT' kept for backwards compatibility with RStudio
renv_project_clear <- function() {
the$project_path <- NULL
Sys.unsetenv("RENV_PROJECT")
}
renv_project_resolve <- function(project = NULL, default = getwd()) {
project <- project %||% renv_project_get(default = default)
renv_path_normalize(project)
}
renv_project_initialized <- function(project) {
lockfile <- renv_lockfile_path(project)
if (file.exists(lockfile))
return(TRUE)
library <- renv_paths_library(project = project)
if (file.exists(library))
return(TRUE)
FALSE
}
renv_project_type <- function(path) {
if (!nzchar(path))
return("unknown")
path <- renv_path_normalize(path)
filebacked(
context = "renv_project_type",
path = file.path(path, "DESCRIPTION"),
callback = renv_project_type_impl
)
}
renv_project_type_impl <- function(path) {
if (!file.exists(path))
return("unknown")
desc <- tryCatch(
renv_dcf_read(path),
error = identity
)
if (inherits(desc, "error"))
return("unknown")
type <- desc$Type
if (!is.null(type))
return(tolower(type))
package <- desc$Package
if (!is.null(package))
return("package")
"unknown"
}
renv_project_remotes <- function(project, fields = NULL) {
descpath <- file.path(project, "DESCRIPTION")
if (!file.exists(descpath))
return(NULL)
# first, parse remotes (if any)
remotes <- renv_description_remotes(descpath)
# next, find packages mentioned in the DESCRIPTION file
deps <- renv_dependencies_discover_description(
path = descpath,
project = project
)
if (empty(deps))
return(list())
# split according to package
specs <- split(deps, deps$Package)
# drop ignored specs
ignored <- renv_project_ignored_packages(project = project)
specs <- specs[setdiff(names(specs), c("R", ignored))]
# if any Roxygen fields are included,
# infer a dependency on roxygen2 and devtools
desc <- renv_description_read(descpath)
if (any(grepl("^Roxygen", names(desc)))) {
for (package in c("devtools", "roxygen2")) {
if (!package %in% ignored) {
specs[[package]] <-
specs[[package]] %||%
renv_dependencies_list(descpath, package, dev = TRUE)
}
}
}
# now, try to resolve the packages
records <- enumerate(specs, function(package, spec) {
# use remote if supplied
if (!is.null(remotes[[package]]))
return(remotes[[package]])
# check for explicit version requirement
explicit <- spec[spec$Require == "==", ]
if (nrow(explicit) == 0)
return(renv_remotes_resolve(package))
version <- spec$Version[[1]]
if (!nzchar(version))
return(renv_remotes_resolve(package))
entry <- paste(package, version, sep = "@")
renv_remotes_resolve(entry)
})
# return records
records
}
renv_project_ignored_packages <- function(project) {
# if we don't have a project, nothing to do
if (is.null(project))
return(character())
# read base set of ignored packages
ignored <- c(
settings$ignored.packages(project = project),
renv_project_ignored_packages_self(project)
)
# return collected set of ignored packages
ignored
}
renv_project_ignored_packages_self <- function(project) {
# only ignore self in package projects
if (renv_project_type(project) != "package")
return(NULL)
# read current package
desc <- renv_description_read(project)
package <- desc[["Package"]]
# respect user preference if set
ignore <- getOption("renv.snapshot.ignore.self", default = NULL)
if (identical(ignore, TRUE))
return(package)
else if (identical(ignore, FALSE))
return(NULL)
# don't ignore self in golem projets
golem <- file.path(project, "inst/golem-config.yml")
if (file.exists(golem))
return(NULL)
# hack for renv: don't depend on self
if (identical(package, "renv"))
return(NULL)
# return the package name
package
}
renv_project_id <- function(project) {
idpath <- renv_id_path(project = project)
if (!file.exists(idpath)) {
id <- renv_id_generate()
writeLines(id, con = idpath)
}
readLines(idpath, n = 1L, warn = FALSE)
}
# TODO: this gets really dicey once the user starts configuring where
# renv places its project-local state ...
renv_project_find <- function(path = NULL) {
path <- path %||% getwd()
anchors <- c("renv.lock", "renv/activate.R")
resolved <- renv_file_find(path, function(parent) {
for (anchor in anchors)
if (file.exists(file.path(parent, anchor)))
return(parent)
})
if (is.null(resolved)) {
fmt <- "couldn't resolve renv project associated with path %s"
stopf(fmt, renv_path_pretty(path))
}
resolved
}
renv_project_lock <- function(project = NULL) {
if (!config$locking.enabled())
return()
path <- the$project_path
if (!identical(project, path))
return()
project <- renv_project_resolve(project)
path <- file.path(project, "renv/lock")
ensure_parent_directory(path)
renv_scope_lock(path, scope = parent.frame())
}
renv_project_loaded <- function(project) {
!is.null(project) && identical(project, the$project_path)
}
|
# Johns Hopkins Data Science Track
# Getting and Cleaning Data
#
# Course Project
# run_analysis.R performs the following steps:
# - Merges the training and the test sets to create one data set.
# - Extracts only the measurements on the mean and standard deviation for each
# measurement (assumed to be features with mean() or std() in name).
# - Uses descriptive activity names to name the activities in the data set
# - Appropriately labels the data set with descriptive variable names.
# - From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject.
# script assumes that the raw data download has been unzipped into a directory
# named "UCI HAR Dataset" placed in the current working director
library(reshape2)
#load training data
trainSubject <- read.table("./UCI HAR Dataset/train/subject_train.txt");
trainX <- read.table("./UCI HAR Dataset/train/X_train.txt");
trainY <- read.table("./UCI HAR Dataset/train/Y_train.txt");
#load test data
testSubject <- read.table("./UCI HAR Dataset/test/subject_test.txt");
testX <- read.table("./UCI HAR Dataset/test/X_test.txt");
testY <- read.table("./UCI HAR Dataset/test/Y_test.txt");
#merge training data together, test data together, and concatenate
df <- rbind(cbind(trainSubject,trainY,trainX),
cbind(testSubject,testY,testX));
#read feature file
features <- read.table("./UCI HAR Dataset/features.txt");
features <- rename(features,col = V1, feature = V2);
#extract feature names that include "mean()" or "std()"
#note: meanFreq() features are excluded
index <- grep("mean\\(\\)|std\\(\\)",features$feature);
features <- features[index,];
#extract subject, activity, and selected features from data frame
df <- df[,c(1,2,index+2)];
#rename variables
colnames(df) <- c("subjectid","activityid",
as.character(features$feature));
#load activity names
activities <- read.table("./UCI HAR Dataset/activity_labels.txt");
#add activity name to data frame and remove activity id
df <- mutate(df, activity = factor(activities$V2[df$activityid])) %>% select(-activityid);
#reshape data frame into long tidy dataset
df <- melt(df,id=c("subjectid","activity"),measure.vars = features$feature);
#compute mean by subject and activity for each variable
groups <- group_by(df,subjectid,activity,variable);
tidy.df <- summarize(groups,varmean = mean(value));
#write tidy.df to txt file in current working directory
write.table(tidy.df,"./tidydf.txt",row.name=FALSE); | /run_analysis.R | no_license | cprovan/getting-cleaning-data | R | false | false | 2,546 | r | # Johns Hopkins Data Science Track
# Getting and Cleaning Data
#
# Course Project
# run_analysis.R performs the following steps:
# - Merges the training and the test sets to create one data set.
# - Extracts only the measurements on the mean and standard deviation for each
# measurement (assumed to be features with mean() or std() in name).
# - Uses descriptive activity names to name the activities in the data set
# - Appropriately labels the data set with descriptive variable names.
# - From the data set in step 4, creates a second, independent tidy data set
# with the average of each variable for each activity and each subject.
# script assumes that the raw data download has been unzipped into a directory
# named "UCI HAR Dataset" placed in the current working director
library(reshape2)
#load training data
trainSubject <- read.table("./UCI HAR Dataset/train/subject_train.txt");
trainX <- read.table("./UCI HAR Dataset/train/X_train.txt");
trainY <- read.table("./UCI HAR Dataset/train/Y_train.txt");
#load test data
testSubject <- read.table("./UCI HAR Dataset/test/subject_test.txt");
testX <- read.table("./UCI HAR Dataset/test/X_test.txt");
testY <- read.table("./UCI HAR Dataset/test/Y_test.txt");
#merge training data together, test data together, and concatenate
df <- rbind(cbind(trainSubject,trainY,trainX),
cbind(testSubject,testY,testX));
#read feature file
features <- read.table("./UCI HAR Dataset/features.txt");
features <- rename(features,col = V1, feature = V2);
#extract feature names that include "mean()" or "std()"
#note: meanFreq() features are excluded
index <- grep("mean\\(\\)|std\\(\\)",features$feature);
features <- features[index,];
#extract subject, activity, and selected features from data frame
df <- df[,c(1,2,index+2)];
#rename variables
colnames(df) <- c("subjectid","activityid",
as.character(features$feature));
#load activity names
activities <- read.table("./UCI HAR Dataset/activity_labels.txt");
#add activity name to data frame and remove activity id
df <- mutate(df, activity = factor(activities$V2[df$activityid])) %>% select(-activityid);
#reshape data frame into long tidy dataset
df <- melt(df,id=c("subjectid","activity"),measure.vars = features$feature);
#compute mean by subject and activity for each variable
groups <- group_by(df,subjectid,activity,variable);
tidy.df <- summarize(groups,varmean = mean(value));
#write tidy.df to txt file in current working directory
write.table(tidy.df,"./tidydf.txt",row.name=FALSE); |
testlist <- list(testX = c(191493125665849920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 3.18748534627155e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.5105425626729e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 3.80478583615452e-72, 1.21321365773924e-195, 9.69744674150153e-268, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537493e+266, 2.65348875902107e+136, 8.14965241967603e+92, 2.59677146539475e-173, 1.55228780425777e-91, 8.25550184376779e+105, 1.18572662524891e+134, 1.04113208597565e+183, 1.01971211553913e-259, 1.23680594512923e-165, 5.24757023065221e+62, 3.41816623041351e-96 ), .Dim = c(5L, 7L)))
result <- do.call(dann:::calc_distance_C,testlist)
str(result) | /dann/inst/testfiles/calc_distance_C/AFL_calc_distance_C/calc_distance_C_valgrind_files/1609868913-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 1,199 | r | testlist <- list(testX = c(191493125665849920, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), trainX = structure(c(1.78844646178735e+212, 1.93075223605916e+156, 121373.193669204, 1.26689771433298e+26, 3.18748534627155e+129, 8.54794497535107e-83, 2.61907806894971e-213, 1.5105425626729e+200, 6.51877713351675e+25, 4.40467528702727e-93, 7.6427933587945, 34208333744.1307, 1.6400690920442e-111, 3.9769673154778e-304, 4.76127371594362e-307, 8.63819952335095e+122, 1.18662128550178e-59, 1128.83285802937, 3.80478583615452e-72, 1.21321365773924e-195, 9.69744674150153e-268, 8.98899319496613e+272, 7.63669788330223e+285, 3.85830749537493e+266, 2.65348875902107e+136, 8.14965241967603e+92, 2.59677146539475e-173, 1.55228780425777e-91, 8.25550184376779e+105, 1.18572662524891e+134, 1.04113208597565e+183, 1.01971211553913e-259, 1.23680594512923e-165, 5.24757023065221e+62, 3.41816623041351e-96 ), .Dim = c(5L, 7L)))
result <- do.call(dann:::calc_distance_C,testlist)
str(result) |
library(qtl)
library(snowfall)
load("/share/malooflab/Ruijuan/F2/QTL_analysis/data/LG.f2.after.crossover.43traits.Rdata")
LG.f2.after.crossover <- sim.geno(LG.f2.after.crossover,step=1,n.draws=32)
LG.f2.after.crossover <- calc.genoprob(LG.f2.after.crossover,step=1)
sfInit(parallel = TRUE, cpus = 16)
sfExport("LG.f2.after.crossover")
sfLibrary(qtl)
# run scantwo for all traits at once
set.seed(6)
system.time(
scantwo.perm.imp.18.6 <-
scantwo(LG.f2.after.crossover,pheno.col=37:38,method="hk",n.perm=10,n.cluster = 16)
)
sfStop()
# save output
save(scantwo.perm.imp.18.6, file = "/share/malooflab/Ruijuan/F2/QTL_analysis/output/scantwo/scantwo.perm.imp.18.6.Rdata")
| /F2/scantwo/scantwo_perm_18.6_new.R | no_license | leejimmy93/KIAT_cabernet | R | false | false | 680 | r | library(qtl)
library(snowfall)
load("/share/malooflab/Ruijuan/F2/QTL_analysis/data/LG.f2.after.crossover.43traits.Rdata")
LG.f2.after.crossover <- sim.geno(LG.f2.after.crossover,step=1,n.draws=32)
LG.f2.after.crossover <- calc.genoprob(LG.f2.after.crossover,step=1)
sfInit(parallel = TRUE, cpus = 16)
sfExport("LG.f2.after.crossover")
sfLibrary(qtl)
# run scantwo for all traits at once
set.seed(6)
system.time(
scantwo.perm.imp.18.6 <-
scantwo(LG.f2.after.crossover,pheno.col=37:38,method="hk",n.perm=10,n.cluster = 16)
)
sfStop()
# save output
save(scantwo.perm.imp.18.6, file = "/share/malooflab/Ruijuan/F2/QTL_analysis/output/scantwo/scantwo.perm.imp.18.6.Rdata")
|
\name{exp2d.rand}
\alias{exp2d.rand}
\title{ Random 2-d Exponential Data }
\description{
A Random subsample of \code{data(\link{exp2d})}, or
Latin Hypercube sampled data evaluated with \code{\link{exp2d.Z}}
}
\usage{exp2d.rand(n1 = 50, n2 = 30, lh = NULL, dopt = 1)}
\arguments{
\item{n1}{Number of samples from the first, interesting, quadrant}
\item{n2}{Number of samples from the other three, uninteresting, quadrants}
\item{lh}{If \code{!is.null(lh)} then Latin Hypercube (LH) sampling
(\code{\link{lhs}}) is used instead of subsampling from
\code{data(\link{exp2d})}; \code{lh} should be a single nonnegative
integer specifying the desired number of predictive locations,
\code{XX}; or, it should be a vector of length 4, specifying the
number of predictive locations desired from each of the four
quadrants (interesting quadrant first, then counter-clockwise)}
\item{dopt}{If \code{dopt >= 2} then d-optimal subsampling from LH
candidates of the multiple indicated by the value of
\code{dopt} will be used. This argument only
makes sense when \code{!is.null(lh)}}
}
\value{
Output is a \code{list} with entries:
\item{X}{2-d \code{data.frame} with \code{n1 + n2} input locations}
\item{Z}{Numeric vector describing the responses (with noise) at the
\code{X} input locations}
\item{Ztrue}{Numeric vector describing the true responses (without
noise) at the \code{X} input locations}
\item{XX}{2-d \code{data.frame} containing the remaining
\code{441 - (n1 + n2)} input locations}
\item{ZZ}{Numeric vector describing the responses (with noise) at
the \code{XX} predictive locations}
\item{ZZtrue}{Numeric vector describing the responses (without
noise) at the \code{XX} predictive locations}
}
\details{
When \code{is.null(lh)}, data is subsampled without replacement from
\code{data(\link{exp2d})}. Of the \code{n1 + n2 <= 441}
input/response pairs \code{X,Z}, there are \code{n1} are taken from the
first quadrant, i.e., where the response is interesting,
and the remaining \code{n2} are taken from the other three
quadrants. The remaining \code{441 - (n1 + n2)} are treated as
predictive locations
Otherwise, when \code{!is.null(lh)}, Latin Hypercube Sampling
(\code{\link{lhs}}) is used
If \code{dopt >= 2} then \code{n1*dopt} LH candidates are used
for to get a D-optimal subsample of size \code{n1} from the
first (interesting) quadrant. Similarly \code{n2*dopt} in the
rest of the un-interesting region.
A total of \code{lh*dopt} candidates will be used for sequential D-optimal
subsampling for predictive locations \code{XX} in all four
quadrants assuming the already-sampled \code{X} locations will
be in the design.
In all three cases, the response is evaluated as
\deqn{Z(X)=x_1 * \exp(x_1^2-x_2^2).}{Z(X) = X1 * exp(-X1^2-X2^2),}
thus creating the outputs \code{Ztrue} and \code{ZZtrue}.
Zero-mean normal noise with \code{sd=0.001} is added to the
responses \code{Z} and \code{ZZ}
}
\author{
Robert B. Gramacy, \email{rbgramacy@chicagobooth.edu}, and
Matt Taddy, \email{taddy@chicagobooth.edu}
}
\references{
Gramacy, R. B. (2007). \emph{\pkg{tgp}: An \R Package for Bayesian Nonstationary, Semiparametric Nonlinear Regression and Design by Treed Gaussian Process Models.}
Journal of Statistical Software, \bold{19}(9).
\url{http://www.jstatsoft.org/v19/i09}
Gramacy, R. B., Lee, H. K. H. (2007).
\emph{Bayesian treed Gaussian process models with an application to computer modeling}
Journal of the American Statistical Association, \bold{to appear}.
Also available as ArXiv article 0710.4536
\url{http://arxiv.org/abs/0710.4536}
\url{http://bobby.gramacy.com/r_packages/tgp}
}
\seealso{\code{\link{lhs}}, \code{\link{exp2d}}, \code{\link{exp2d.Z}},
\code{\link{btgp}}, and other \code{b*} functions}
\examples{
## randomly subsampled data
## ------------------------
eds <- exp2d.rand()
# higher span = 0.5 required because the data is sparse
# and was generated randomly
eds.g <- interp.loess(eds$X[,1], eds$X[,2], eds$Z, span=0.5)
# perspective plot, and plot of the input (X & XX) locations
par(mfrow=c(1,2), bty="n")
persp(eds.g, main="loess surface", theta=-30, phi=20,
xlab="X[,1]", ylab="X[,2]", zlab="Z")
plot(eds$X, main="Randomly Subsampled Inputs")
points(eds$XX, pch=19, cex=0.5)
## Latin Hypercube sampled data
## ----------------------------
edlh <- exp2d.rand(lh=c(20, 15, 10, 5))
# higher span = 0.5 required because the data is sparse
# and was generated randomly
edlh.g <- interp.loess(edlh$X[,1], edlh$X[,2], edlh$Z, span=0.5)
# perspective plot, and plot of the input (X & XX) locations
par(mfrow=c(1,2), bty="n")
persp(edlh.g, main="loess surface", theta=-30, phi=20,
xlab="X[,1]", ylab="X[,2]", zlab="Z")
plot(edlh$X, main="Latin Hypercube Sampled Inputs")
points(edlh$XX, pch=19, cex=0.5)
# show the quadrants
abline(h=2, col=2, lty=2, lwd=2)
abline(v=2, col=2, lty=2, lwd=2)
\dontrun{
## D-optimal subsample with a factor of 10 (more) candidates
## ---------------------------------------------------------
edlhd <- exp2d.rand(lh=c(20, 15, 10, 5), dopt=10)
# higher span = 0.5 required because the data is sparse
# and was generated randomly
edlhd.g <- interp.loess(edlhd$X[,1], edlhd$X[,2], edlhd$Z, span=0.5)
# perspective plot, and plot of the input (X & XX) locations
par(mfrow=c(1,2), bty="n")
persp(edlhd.g, main="loess surface", theta=-30, phi=20,
xlab="X[,1]", ylab="X[,2]", zlab="Z")
plot(edlhd$X, main="D-optimally Sampled Inputs")
points(edlhd$XX, pch=19, cex=0.5)
# show the quadrants
abline(h=2, col=2, lty=2, lwd=2)
abline(v=2, col=2, lty=2, lwd=2)
}
}
\keyword{datasets}
\keyword{datagen}
| /man/exp2d.rand.Rd | no_license | stochtastic/tgp | R | false | false | 5,746 | rd |
\name{exp2d.rand}
\alias{exp2d.rand}
\title{ Random 2-d Exponential Data }
\description{
A Random subsample of \code{data(\link{exp2d})}, or
Latin Hypercube sampled data evaluated with \code{\link{exp2d.Z}}
}
\usage{exp2d.rand(n1 = 50, n2 = 30, lh = NULL, dopt = 1)}
\arguments{
\item{n1}{Number of samples from the first, interesting, quadrant}
\item{n2}{Number of samples from the other three, uninteresting, quadrants}
\item{lh}{If \code{!is.null(lh)} then Latin Hypercube (LH) sampling
(\code{\link{lhs}}) is used instead of subsampling from
\code{data(\link{exp2d})}; \code{lh} should be a single nonnegative
integer specifying the desired number of predictive locations,
\code{XX}; or, it should be a vector of length 4, specifying the
number of predictive locations desired from each of the four
quadrants (interesting quadrant first, then counter-clockwise)}
\item{dopt}{If \code{dopt >= 2} then d-optimal subsampling from LH
candidates of the multiple indicated by the value of
\code{dopt} will be used. This argument only
makes sense when \code{!is.null(lh)}}
}
\value{
Output is a \code{list} with entries:
\item{X}{2-d \code{data.frame} with \code{n1 + n2} input locations}
\item{Z}{Numeric vector describing the responses (with noise) at the
\code{X} input locations}
\item{Ztrue}{Numeric vector describing the true responses (without
noise) at the \code{X} input locations}
\item{XX}{2-d \code{data.frame} containing the remaining
\code{441 - (n1 + n2)} input locations}
\item{ZZ}{Numeric vector describing the responses (with noise) at
the \code{XX} predictive locations}
\item{ZZtrue}{Numeric vector describing the responses (without
noise) at the \code{XX} predictive locations}
}
\details{
When \code{is.null(lh)}, data is subsampled without replacement from
\code{data(\link{exp2d})}. Of the \code{n1 + n2 <= 441}
input/response pairs \code{X,Z}, there are \code{n1} are taken from the
first quadrant, i.e., where the response is interesting,
and the remaining \code{n2} are taken from the other three
quadrants. The remaining \code{441 - (n1 + n2)} are treated as
predictive locations
Otherwise, when \code{!is.null(lh)}, Latin Hypercube Sampling
(\code{\link{lhs}}) is used
If \code{dopt >= 2} then \code{n1*dopt} LH candidates are used
for to get a D-optimal subsample of size \code{n1} from the
first (interesting) quadrant. Similarly \code{n2*dopt} in the
rest of the un-interesting region.
A total of \code{lh*dopt} candidates will be used for sequential D-optimal
subsampling for predictive locations \code{XX} in all four
quadrants assuming the already-sampled \code{X} locations will
be in the design.
In all three cases, the response is evaluated as
\deqn{Z(X)=x_1 * \exp(x_1^2-x_2^2).}{Z(X) = X1 * exp(-X1^2-X2^2),}
thus creating the outputs \code{Ztrue} and \code{ZZtrue}.
Zero-mean normal noise with \code{sd=0.001} is added to the
responses \code{Z} and \code{ZZ}
}
\author{
Robert B. Gramacy, \email{rbgramacy@chicagobooth.edu}, and
Matt Taddy, \email{taddy@chicagobooth.edu}
}
\references{
Gramacy, R. B. (2007). \emph{\pkg{tgp}: An \R Package for Bayesian Nonstationary, Semiparametric Nonlinear Regression and Design by Treed Gaussian Process Models.}
Journal of Statistical Software, \bold{19}(9).
\url{http://www.jstatsoft.org/v19/i09}
Gramacy, R. B., Lee, H. K. H. (2007).
\emph{Bayesian treed Gaussian process models with an application to computer modeling}
Journal of the American Statistical Association, \bold{to appear}.
Also available as ArXiv article 0710.4536
\url{http://arxiv.org/abs/0710.4536}
\url{http://bobby.gramacy.com/r_packages/tgp}
}
\seealso{\code{\link{lhs}}, \code{\link{exp2d}}, \code{\link{exp2d.Z}},
\code{\link{btgp}}, and other \code{b*} functions}
\examples{
## randomly subsampled data
## ------------------------
eds <- exp2d.rand()
# higher span = 0.5 required because the data is sparse
# and was generated randomly
eds.g <- interp.loess(eds$X[,1], eds$X[,2], eds$Z, span=0.5)
# perspective plot, and plot of the input (X & XX) locations
par(mfrow=c(1,2), bty="n")
persp(eds.g, main="loess surface", theta=-30, phi=20,
xlab="X[,1]", ylab="X[,2]", zlab="Z")
plot(eds$X, main="Randomly Subsampled Inputs")
points(eds$XX, pch=19, cex=0.5)
## Latin Hypercube sampled data
## ----------------------------
edlh <- exp2d.rand(lh=c(20, 15, 10, 5))
# higher span = 0.5 required because the data is sparse
# and was generated randomly
edlh.g <- interp.loess(edlh$X[,1], edlh$X[,2], edlh$Z, span=0.5)
# perspective plot, and plot of the input (X & XX) locations
par(mfrow=c(1,2), bty="n")
persp(edlh.g, main="loess surface", theta=-30, phi=20,
xlab="X[,1]", ylab="X[,2]", zlab="Z")
plot(edlh$X, main="Latin Hypercube Sampled Inputs")
points(edlh$XX, pch=19, cex=0.5)
# show the quadrants
abline(h=2, col=2, lty=2, lwd=2)
abline(v=2, col=2, lty=2, lwd=2)
\dontrun{
## D-optimal subsample with a factor of 10 (more) candidates
## ---------------------------------------------------------
edlhd <- exp2d.rand(lh=c(20, 15, 10, 5), dopt=10)
# higher span = 0.5 required because the data is sparse
# and was generated randomly
edlhd.g <- interp.loess(edlhd$X[,1], edlhd$X[,2], edlhd$Z, span=0.5)
# perspective plot, and plot of the input (X & XX) locations
par(mfrow=c(1,2), bty="n")
persp(edlhd.g, main="loess surface", theta=-30, phi=20,
xlab="X[,1]", ylab="X[,2]", zlab="Z")
plot(edlhd$X, main="D-optimally Sampled Inputs")
points(edlhd$XX, pch=19, cex=0.5)
# show the quadrants
abline(h=2, col=2, lty=2, lwd=2)
abline(v=2, col=2, lty=2, lwd=2)
}
}
\keyword{datasets}
\keyword{datagen}
|
# #### Storing data ####
#
# #' @include analysis_object.R
# NULL
#
# #' Store data from a simulation
# #'
# #' The data from a simulation is typically stored in a \code{simulation_analysis_object} object, which is typically constructed by the \code{tidy.simulation_data} function. There are two methods for storing the data from an experiment: \code{store.csv} for storing the data in a \code{.csv} folder and \code{store.wd} for storing the \code{simulation_analysis_object} in the global environment.
# #'
# #' @usage store.csv( data, model_object, iterate, csv_name )
# #' @usage store.global( data, model_object, iterate, object_name )
# #'
# #' @param data A \code{simulation_analysis_object}.
# #' @param model_object A \code{model} object.
# #' @param iterate A numeric corresponding to which iteration of the condition list generated the data. Defaults to \code{NULL}. Either \code{iterate} or \code{csv_name}/\code{object_name} must be specified.
# #' @param object_name For \code{store.global}. A string specifying the variable label that the data will be stored as in the global environment. Defaults to \code{NULL}. See details.
# #' @param csv_name For \code{store.csv}. A character specifying the file name for the \code{.csv} file. Defaults to \code{NULL}. See details.
# #'
# #' @details
# #' When writing to a \code{.csv} file with \code{store.csv}, the \code{.csv} file will contain:
# #'
# #' 1) An \eqn{n x 2} matrix of strings with the first column listing the \code{control} objects required in the simulation and the second column containing strings that specify th parameters for the corresponding \code{control} object.
# #'
# #' 2) An \eqn{n x 2} matrix of the data in time-event format.
# #'
# #' By default, the name of the file will be "(model_object_name)_iterate_X.csv", where X is the iterate.
# #'
# #' When storing the data in the global environment with \code{store.global}, the \code{simulation_analysis_object} is stored directly in the global environment with the name specified by \code{object_name} or the default "(model_object_name)_iterate_X" if \code{object_name} is unspecified.
# #' In general, when conducting a large scale simulation, it is not advised to use \code{store.global} because you will probably not have enough RAM to store everything that you want.
# #'
# #' @seealso
# #' \code{\link{class.analysis_object}} for \code{simulation_analysis_object}.
# #'
# #' @rdname store_data
# #' @exportMethod store.csv
# #' @aliases store.csv
#
# conditions.write_string = function( conditions ){
# condition_params = sapply( conditions, helper.conditions.write_string )
# data.frame( Controls = names( conditions ), Parameters = condition_params )
# }
#
# helper.conditions.write_string = function( x ){
# c = paste( names( x ), as.character( x ), sep = ": " )
# d = paste( c, collapse = " | " )
# }
#
# store.obj_name = function( model_object, iterate, csv = F ){
# a = paste( class( model_object ), "iterate", iterate, sep = "_" )
# if ( csv ) paste( a, ".csv", sep = "" )
# else a
# }
#
# setGeneric( "store.csv", function( data, model_object, iterate = NULL, csv_name = NULL ) standardGeneric( "store.csv" ) )
#
# setMethod( "store.csv", signature( data = "simulation_analysis_object" ),
# function( data, model_object, iterate, csv_name )
# {
# if ( is.null( iterate ) & is.null(csv_name) ) stop( "One of 'iterate' or 'csv_name' must be specified" )
# if ( is.null( csv_name ) ){
# csv_name = store.obj_name( model_object, iterate, csv = T )
# }
# meta_datas = conditions.write_string( data@meta_data )
# write.table( meta_datas, csv_name, row.names = F, sep = "," )
# write.table( data@analysis_object, csv_name, row.names = F, append = T, sep = "," )
# }
# )
#
# #' @rdname store_data
# #' @exportMethod store.global
# #' @aliases store.global
#
# setGeneric( "store.global", function( data, model_object, iterate, object_name ) standardGeneric( "store.global" ) )
#
# setMethod( "store.global", signature( data = "simulation_analysis_object"),
# function( data, model_object, iterate = NULL, object_name = NULL ){
#
# if ( is.null( iterate ) & is.null(object_name) ) stop( "One of 'iterate' or 'object_name' must be specified" )
# if ( is.null( object_name ) ){
# object_name = store.obj_name( model_object, iterate )
# }
# assign( object_name, data, envir = globalenv() )
# }
# )
| /R/store_data__DEPRECATED.R | no_license | Don-Li/CAB | R | false | false | 4,455 | r | # #### Storing data ####
#
# #' @include analysis_object.R
# NULL
#
# #' Store data from a simulation
# #'
# #' The data from a simulation is typically stored in a \code{simulation_analysis_object} object, which is typically constructed by the \code{tidy.simulation_data} function. There are two methods for storing the data from an experiment: \code{store.csv} for storing the data in a \code{.csv} folder and \code{store.wd} for storing the \code{simulation_analysis_object} in the global environment.
# #'
# #' @usage store.csv( data, model_object, iterate, csv_name )
# #' @usage store.global( data, model_object, iterate, object_name )
# #'
# #' @param data A \code{simulation_analysis_object}.
# #' @param model_object A \code{model} object.
# #' @param iterate A numeric corresponding to which iteration of the condition list generated the data. Defaults to \code{NULL}. Either \code{iterate} or \code{csv_name}/\code{object_name} must be specified.
# #' @param object_name For \code{store.global}. A string specifying the variable label that the data will be stored as in the global environment. Defaults to \code{NULL}. See details.
# #' @param csv_name For \code{store.csv}. A character specifying the file name for the \code{.csv} file. Defaults to \code{NULL}. See details.
# #'
# #' @details
# #' When writing to a \code{.csv} file with \code{store.csv}, the \code{.csv} file will contain:
# #'
# #' 1) An \eqn{n x 2} matrix of strings with the first column listing the \code{control} objects required in the simulation and the second column containing strings that specify th parameters for the corresponding \code{control} object.
# #'
# #' 2) An \eqn{n x 2} matrix of the data in time-event format.
# #'
# #' By default, the name of the file will be "(model_object_name)_iterate_X.csv", where X is the iterate.
# #'
# #' When storing the data in the global environment with \code{store.global}, the \code{simulation_analysis_object} is stored directly in the global environment with the name specified by \code{object_name} or the default "(model_object_name)_iterate_X" if \code{object_name} is unspecified.
# #' In general, when conducting a large scale simulation, it is not advised to use \code{store.global} because you will probably not have enough RAM to store everything that you want.
# #'
# #' @seealso
# #' \code{\link{class.analysis_object}} for \code{simulation_analysis_object}.
# #'
# #' @rdname store_data
# #' @exportMethod store.csv
# #' @aliases store.csv
#
# conditions.write_string = function( conditions ){
# condition_params = sapply( conditions, helper.conditions.write_string )
# data.frame( Controls = names( conditions ), Parameters = condition_params )
# }
#
# helper.conditions.write_string = function( x ){
# c = paste( names( x ), as.character( x ), sep = ": " )
# d = paste( c, collapse = " | " )
# }
#
# store.obj_name = function( model_object, iterate, csv = F ){
# a = paste( class( model_object ), "iterate", iterate, sep = "_" )
# if ( csv ) paste( a, ".csv", sep = "" )
# else a
# }
#
# setGeneric( "store.csv", function( data, model_object, iterate = NULL, csv_name = NULL ) standardGeneric( "store.csv" ) )
#
# setMethod( "store.csv", signature( data = "simulation_analysis_object" ),
# function( data, model_object, iterate, csv_name )
# {
# if ( is.null( iterate ) & is.null(csv_name) ) stop( "One of 'iterate' or 'csv_name' must be specified" )
# if ( is.null( csv_name ) ){
# csv_name = store.obj_name( model_object, iterate, csv = T )
# }
# meta_datas = conditions.write_string( data@meta_data )
# write.table( meta_datas, csv_name, row.names = F, sep = "," )
# write.table( data@analysis_object, csv_name, row.names = F, append = T, sep = "," )
# }
# )
#
# #' @rdname store_data
# #' @exportMethod store.global
# #' @aliases store.global
#
# setGeneric( "store.global", function( data, model_object, iterate, object_name ) standardGeneric( "store.global" ) )
#
# setMethod( "store.global", signature( data = "simulation_analysis_object"),
# function( data, model_object, iterate = NULL, object_name = NULL ){
#
# if ( is.null( iterate ) & is.null(object_name) ) stop( "One of 'iterate' or 'object_name' must be specified" )
# if ( is.null( object_name ) ){
# object_name = store.obj_name( model_object, iterate )
# }
# assign( object_name, data, envir = globalenv() )
# }
# )
|
df=data.frame(delta=c(1,2,3), lambda=c(4,5,6), rho=c(7,8,9), phi=c(10,11,12), rmse=c(100,200,300))
checked_values=data.frame(delta=c(1,0), lambda=c(4,0), rho=c(7,0), phi=c(10,0), rmse=c(100,0))
df[
apply( # which rows of df are not present in checked_values
apply(df[1:length(coords)],1,function(crd){ # which coordinates in df do not match something in checked_values
!(
apply(checked_values,1,function(x){ # which in checked_values match the given coordinate
all(crd==x[1:length(crd)])
})
)
}),2,all),]
#x axis is df
# y axis is checked values
# to find which of df is not present
# we want to grab the
eval_function=hill_climber_eval
hc=MakeHillClimber(as.character(1), hill_climber_eval, step_size, F, bounds, human_data, model_params)
hc$hill_climb()
crd=coords
rmse(hc_dat[hc_dat$hc_id=="1",]$rmse)
plot(hc_dat[hc_dat$hc_id=="1" & hc_dat$chosen,]$round,
hc_dat[hc_dat$hc_id=="1" & hc_dat$chosen,]$rmse,
type="l")
library(ggplot2)
ggplot(hc_dat[hc_dat$chosen,],aes(x=round,y=rmse,group=hc_id, color=hc_id))+
geom_line()
# need to improve stochastic method so that odds are greater for lower rmse
# too much randomness
# maybe add in a parameter there?
# hill_find=ifelse(seek_maxima, which.max, which.min)(df$rmse)
df=data.frame(rmse=c(1,2,3,4,5,6,7,8,9,10)/sum(c(1,2,3,4,5,6,7,8,9,10)))
seek_maxima=F
hill_find=ifelse(seek_maxima, which.max, which.min)(df$rmse)
hill_find=replace(rep(F,dim(df)[1]),sample(1:length(df$rmse), 1, prob=sum(df$rmse)/(df$rmse)),T)
sum(df$rmse)/(df$rmse)
replace(rep(F,dim(df)[1]),sample(1:length(df$rmse), 1, prob=sum(df$rmse)/(df$rmse)),T)
#####
# 4-6-19
parameter_means=c(.5,.5,.5,.5)
parameter_sds=c(.1,.1,.1,.1)
choice_data=c(0.025, 0.100, 0.200, 0.250, 0.175, 0.075, 0.175)
n_sims=5
h_dat=human_data
# return(model_run(coords,
# model_params$psd,
# model_params$choice_prob_data,
# model_params$number_of_sims,
# human_data))
a=Make_EWA_model("1",parameter_means,parameter_sds,choice_data)
a$id
model_run(parameter_means,parameter_sds,choice_data,n_sims,human_data)
# Correlation of
# mean of all choices for all 4 players for all 20 rounds within 1 simulation
# Variance of choice for all 4 players for all 20 rounds within 1 simulation
mean_var_corr=
cor(
aggregate(model_data$own_choice, by=list(sim=model_data$sim), FUN=mean)$x,
aggregate(model_data$own_choice, by=list(sim=model_data$sim), FUN=var)$x
)
mean_var_corr=
cor(
aggregate(h_dat$choice, by=list(sim=h_dat$sim), FUN=mean)$x,
aggregate(h_dat$choice, by=list(sim=h_dat$sim), FUN=var)$x
)
data.frame(unique(select(mutate(group_by(model_data, round),
mean=mean(own_choice),
agent_type="model",
mean_var_corr=
cor(
aggregate(model_data$own_choice, by=list(sim=model_data$sim), FUN=mean)$x,
aggregate(model_data$own_choice, by=list(sim=model_data$sim), FUN=var)$x
)
),
agent_type,round,mean,mean_var_corr))
)
data.frame(unique(select(mutate(group_by(h_dat, round),
mean=mean(choice),
agent_type="human",
mean_var_corr=
cor(
aggregate(h_dat$choice, by=list(sim=h_dat$group), FUN=mean)$x,
aggregate(h_dat$choice, by=list(sim=h_dat$group), FUN=var)$x
)
),
agent_type,round,mean,mean_var_corr))
)
a=c(1,2,3,4)
(a-a)^2
euclid_dist=function(a,b){
sqrt(sum((a-b)^2))
}
library(ggplot2)
# setwd("C:/Users/Kevin/Dropbox/minimum_effort_game/EWA_Model")
# hc_dat=readRDS("./data/2019-04-07_11-56-04_EWA-hc.rds")
setwd("E:/Libraries/r projects/MEG_EWA_model-master")
setwd("C:/Users/Kevin/Dropbox/minimum_effort_game/EWA_Model")
hc_dat=readRDS("./data/2019-04-11_01-23-43_EWA-hc.rds")
mean(hc_dat$lambda_sd)
# this will inspect the data
ggplot(hc_dat[hc_dat$chosen,], aes(x=round,y=mean_var_corr_abs_diff, group=hc_id, color=hc_id))+
geom_line()
# lines(hc_dat[hc_dat$chosen,]$round,hc_dat[hc_dat$chosen,]$mean_var_corr_abs_diff)
ggplot(hc_dat[hc_dat$chosen,], aes(x=rmse,y=mean_var_corr_abs_diff, group=hc_id, color=hc_id))+
geom_point()
hc_start=data.frame()
for(hc in unique(hc_dat$hc_id)){
hc_start=rbind(hc_start,
cbind(hc=hc,
hc_dat[hc_dat$chosen & hc_dat$hc_id==hc,][1,c(1,3,5,7)]
)
)
}
hc_dat$dist_from_start=
apply(hc_dat,1,function(x){
euclid_dist(
x[c(1,3,5,7)],
hc_start[x$hc_id==hc_start$hc,2:5]
)
}
)
ggplot(hc_dat[hc_dat$chosen,], aes(x=round,y=dist_from_start, group=hc_id, color=hc_id))+
geom_line()
probs=c(1,2,3)
n=-1
k=probs^(1/n)/sum(probs^(1/n))
pie(k)
| /scratchpad.r | no_license | koneill1994/MEG_EWA_model | R | false | false | 4,987 | r | df=data.frame(delta=c(1,2,3), lambda=c(4,5,6), rho=c(7,8,9), phi=c(10,11,12), rmse=c(100,200,300))
checked_values=data.frame(delta=c(1,0), lambda=c(4,0), rho=c(7,0), phi=c(10,0), rmse=c(100,0))
df[
apply( # which rows of df are not present in checked_values
apply(df[1:length(coords)],1,function(crd){ # which coordinates in df do not match something in checked_values
!(
apply(checked_values,1,function(x){ # which in checked_values match the given coordinate
all(crd==x[1:length(crd)])
})
)
}),2,all),]
#x axis is df
# y axis is checked values
# to find which of df is not present
# we want to grab the
eval_function=hill_climber_eval
hc=MakeHillClimber(as.character(1), hill_climber_eval, step_size, F, bounds, human_data, model_params)
hc$hill_climb()
crd=coords
rmse(hc_dat[hc_dat$hc_id=="1",]$rmse)
plot(hc_dat[hc_dat$hc_id=="1" & hc_dat$chosen,]$round,
hc_dat[hc_dat$hc_id=="1" & hc_dat$chosen,]$rmse,
type="l")
library(ggplot2)
ggplot(hc_dat[hc_dat$chosen,],aes(x=round,y=rmse,group=hc_id, color=hc_id))+
geom_line()
# need to improve stochastic method so that odds are greater for lower rmse
# too much randomness
# maybe add in a parameter there?
# hill_find=ifelse(seek_maxima, which.max, which.min)(df$rmse)
df=data.frame(rmse=c(1,2,3,4,5,6,7,8,9,10)/sum(c(1,2,3,4,5,6,7,8,9,10)))
seek_maxima=F
hill_find=ifelse(seek_maxima, which.max, which.min)(df$rmse)
hill_find=replace(rep(F,dim(df)[1]),sample(1:length(df$rmse), 1, prob=sum(df$rmse)/(df$rmse)),T)
sum(df$rmse)/(df$rmse)
replace(rep(F,dim(df)[1]),sample(1:length(df$rmse), 1, prob=sum(df$rmse)/(df$rmse)),T)
#####
# 4-6-19
parameter_means=c(.5,.5,.5,.5)
parameter_sds=c(.1,.1,.1,.1)
choice_data=c(0.025, 0.100, 0.200, 0.250, 0.175, 0.075, 0.175)
n_sims=5
h_dat=human_data
# return(model_run(coords,
# model_params$psd,
# model_params$choice_prob_data,
# model_params$number_of_sims,
# human_data))
a=Make_EWA_model("1",parameter_means,parameter_sds,choice_data)
a$id
model_run(parameter_means,parameter_sds,choice_data,n_sims,human_data)
# Correlation of
# mean of all choices for all 4 players for all 20 rounds within 1 simulation
# Variance of choice for all 4 players for all 20 rounds within 1 simulation
mean_var_corr=
cor(
aggregate(model_data$own_choice, by=list(sim=model_data$sim), FUN=mean)$x,
aggregate(model_data$own_choice, by=list(sim=model_data$sim), FUN=var)$x
)
mean_var_corr=
cor(
aggregate(h_dat$choice, by=list(sim=h_dat$sim), FUN=mean)$x,
aggregate(h_dat$choice, by=list(sim=h_dat$sim), FUN=var)$x
)
data.frame(unique(select(mutate(group_by(model_data, round),
mean=mean(own_choice),
agent_type="model",
mean_var_corr=
cor(
aggregate(model_data$own_choice, by=list(sim=model_data$sim), FUN=mean)$x,
aggregate(model_data$own_choice, by=list(sim=model_data$sim), FUN=var)$x
)
),
agent_type,round,mean,mean_var_corr))
)
data.frame(unique(select(mutate(group_by(h_dat, round),
mean=mean(choice),
agent_type="human",
mean_var_corr=
cor(
aggregate(h_dat$choice, by=list(sim=h_dat$group), FUN=mean)$x,
aggregate(h_dat$choice, by=list(sim=h_dat$group), FUN=var)$x
)
),
agent_type,round,mean,mean_var_corr))
)
a=c(1,2,3,4)
(a-a)^2
euclid_dist=function(a,b){
sqrt(sum((a-b)^2))
}
library(ggplot2)
# setwd("C:/Users/Kevin/Dropbox/minimum_effort_game/EWA_Model")
# hc_dat=readRDS("./data/2019-04-07_11-56-04_EWA-hc.rds")
setwd("E:/Libraries/r projects/MEG_EWA_model-master")
setwd("C:/Users/Kevin/Dropbox/minimum_effort_game/EWA_Model")
hc_dat=readRDS("./data/2019-04-11_01-23-43_EWA-hc.rds")
mean(hc_dat$lambda_sd)
# this will inspect the data
ggplot(hc_dat[hc_dat$chosen,], aes(x=round,y=mean_var_corr_abs_diff, group=hc_id, color=hc_id))+
geom_line()
# lines(hc_dat[hc_dat$chosen,]$round,hc_dat[hc_dat$chosen,]$mean_var_corr_abs_diff)
ggplot(hc_dat[hc_dat$chosen,], aes(x=rmse,y=mean_var_corr_abs_diff, group=hc_id, color=hc_id))+
geom_point()
hc_start=data.frame()
for(hc in unique(hc_dat$hc_id)){
hc_start=rbind(hc_start,
cbind(hc=hc,
hc_dat[hc_dat$chosen & hc_dat$hc_id==hc,][1,c(1,3,5,7)]
)
)
}
hc_dat$dist_from_start=
apply(hc_dat,1,function(x){
euclid_dist(
x[c(1,3,5,7)],
hc_start[x$hc_id==hc_start$hc,2:5]
)
}
)
ggplot(hc_dat[hc_dat$chosen,], aes(x=round,y=dist_from_start, group=hc_id, color=hc_id))+
geom_line()
probs=c(1,2,3)
n=-1
k=probs^(1/n)/sum(probs^(1/n))
pie(k)
|
#' ---
#' title: "Waterheating functions"
#' author: "Centre for Sustainable Energy"
#' date: "2015"
#' output: pdf_document
#' ---
#+ echo=FALSE
#Sources (makes available) the common functions required to make the stock.
source("common.R", chdir=T)
#'##Make and save waterheating data
#'
#'Create a .csv file using the function make.waterheating, which creates a dataframe
#'containing a complete set of populated variables for the water-heating.csv stock
#'file.
#'
#'The water-heating.csv file contains a set of information on the hot water heating
#'systems, including fuel type and efficencies. However, a significant majority of
#'dwellings will have their hot water provide by their space heating systems (e.g.
#'mains gas combi boilers or standard boilers with a water tank). In these instances,
#'much less information is required in the water-heating.csv and data that describes
#'the efficiency or fuel used by the water heating system will be taken from the
#'space heating system for that case.
#'
#'Otherwise the water heating data in 'Table 4a: Heating systems (space and water)'
#'in the SAP 2012 documentation is used to allocate heating system types and
#'efficiencies where a dwelling has separate space heating and water heating systems.
#'
#'@param shcs - the scottish survey data
#'
#'@param output - the path to the output file including the required file name and
#' the extension .csv
#'
#'@param path.to.input - the path to the folder where the survey data and lookup
#' tables are placed
#'
#'@param path.to.outputs - the path to the output folder
save.waterheating <- function(shcs, output, path.to.input, path.to.output) {
write.csv(make.waterheating(shcs,path.to.input,path.to.output)
, file=output, row.names=FALSE, na ="NULL")
}
#'Make the dataframe that contains all the information required for the
#' waterheating.csv file
#'
#'The dataframe is constructed and returned (as it is the last thing in the function
#' that is assigned)
#'
#'@param shcs - the scottish survey data
#'
#'@param path.to.input - the path to the folder where the survey data and lookup
#' tables are placed
#'
#'@param path.to.outputs - the path to the output folder
make.waterheating <- function(shcs,path.to.input,path.to.output) {
#First the waterheating dataframe is created from information contained in the
#scottish survey, the spaceheating.csv and the waterheating look up table
waterheating <- create.waterheating(shcs,path.to.output,path.to.input)
the.basicefficiency <- wh.basic.efficiency(waterheating$basicefficiency
,waterheating$spaceheatingbasicefficiency
,waterheating$withcentralheating)
the.chpfraction <- wh.chp.fraction(waterheating$M18
,waterheating$withcentralheating)
the.communitychargingusagebased <-
wh.community.chargingusagebased(waterheating$M18,waterheating$withcentralheating)
the.electrictariff <- electric.tariff(waterheating$L2,waterheating$M2)
the.fluetype <- wh.flue.type(waterheating$fluetype)
#No information available about summer or winter efficiency waterheating
the.summerefficiency <- rep("NULL", length(waterheating$uprn_new))
the.winterefficiency <- rep("NULL", length(waterheating$uprn_new))
the.cylinderfactoryinsulated <-
cylinder.factoryinsulated(waterheating$M30,waterheating$waterheatingsystemtype
,waterheating$immersionheatertype
,waterheating$spaceheatingsystemtype)
the.cylinderinsulationthickness <-
cylinder.insulationthickness(waterheating$M31
,waterheating$waterheatingsystemtype
,waterheating$immersionheatertype
,waterheating$spaceheatingsystemtype)
the.cylinderthermostatpresent <-
cylinder.thermostatpresent(waterheating$M32
,waterheating$waterheatingsystemtype
,waterheating$immersionheatertype
,waterheating$spaceheatingsystemtype)
the.cylindervolume <- cylinder.volume(waterheating$M29
,waterheating$waterheatingsystemtype
,waterheating$immersionheatertype
,waterheating$spaceheatingsystemtype)
#No information available about installation year of waterheating
the.installationyear <- rep("NULL", length(waterheating$uprn_new))
the.mainheatingfuel <- wh.main.heatingfuel(waterheating$M18
,waterheating$spaceheatingmainfuel
,waterheating$withcentralheating)
the.solarhotwaterpresent <- solar.hotwaterpresent(waterheating$D9)
the.solarstoreincylinder <- solar.storeincylinder(the.solarhotwaterpresent
,the.cylindervolume)
the.solarstorevolume <- solar.storevolume(the.solarhotwaterpresent
,the.cylindervolume)
the.withcentralheating <- with.centralheating(waterheating$withcentralheating)
data.frame(aacode = waterheating$uprn_new
,basicefficiency = the.basicefficiency
,chpfraction = the.chpfraction
,communitychargingusagebased = the.communitychargingusagebased
,electrictariff = the.electrictariff
,fluetype = the.fluetype
,summerefficiency = the.summerefficiency
,winterefficiency = the.winterefficiency
,cylinderfactoryinsulated = the.cylinderfactoryinsulated
,cylinderinsulationthickness = the.cylinderinsulationthickness
,cylinderthermostatpresent = the.cylinderthermostatpresent
,cylindervolume = the.cylindervolume
,immersionheatertype = waterheating$immersionheatertype
,installationyear = the.installationyear
,mainheatingfuel = the.mainheatingfuel
,solarhotwaterpresent = the.solarhotwaterpresent
,solarstoreincylinder = the.solarstoreincylinder
,solarstorevolume = the.solarstorevolume
,waterheatingsystemtype = waterheating$waterheatingsystemtype
,withcentralheating = the.withcentralheating
)
}
#'\pagebreak
#'
#'##Create waterheating
#'
#'The dataframe is created by importing the spaceheating file that was created
#' for the stock and matching that with waterheating systems from the waterheating
#' look up table, which has been created using a subset of information contained in
#' the 'Table 4a: Heating systems (space and water)' in the SAP 2012 documentation.
#'
#' 'lup-water-heating.csv' represents this information and is stored with the other
#' lookup table csv files for heating systems.
#'
#'@param shcs - the scottish survey data
#'
#'@param path.to.input - the path to the folder where the survey data and lookup
#' tables are placed
#'
#'@param path.to.outputs - the path to the output folder
create.waterheating <- function(shcs,path.to.output,path.to.input){
#import the spaceheating data that was created by spaceheating.R
spaceheating <- read.csv(file.path(path.to.output,"space-heating.csv")
, header=TRUE)
#Rename the columns to allow the join onto the scottish survey without overwriting
#required variables
spaceheating <- data.frame(uprn_new = spaceheating$aacode
,spaceheatingsystemtype =
spaceheating$spaceheatingsystemtype
,spaceheatingbasicefficiency =
spaceheating$basicefficiency
,spaceheatingmainfuel = spaceheating$mainheatingfuel
,withcentralheating = 0)
#Join the spaceheating dataframe to the scottish survey
spaceheating <- join(spaceheating,shcs,by="uprn_new")
#If the waterheating is flagged as from mains heating and the main heating type
#is not a room heater or a storage heater then with central heating is set to 1.
spaceheating$withcentralheating[spaceheating$M17=="Mains heating" &
(spaceheating$spaceheatingsystemtype != "storage_heater" &
spaceheating$spaceheatingsystemtype != "room_heater")] <- 1
#If the main heating system is combi or cpsu it is assumed that waterheating comes
#from the main heating system and the withcentralheating is set to 1.
spaceheating$withcentralheating[spaceheating$spaceheatingsystemtype == "combi" |
spaceheating$spaceheatingsystemtype == "cpsu"] <- 1
#The waterheating look up file is imported
waterheating <- read.csv(file.path(path.to.input,"lup-water-heating.csv")
, header=TRUE,na.strings = "")
#Ensures that only stock where withcentralheating is false
#have waterheating matched
waterheating$withcentralheating <- 0
matched.waterheating <- join(spaceheating,waterheating
,by=c("M17","M18","withcentralheating"))
return(matched.waterheating)
}
#'
#'\pagebreak
#'
#'##Waterheating systems
#'
#'With central heating
#'
#'Values are mapped against true and false for the NHM, converting 1 to TRUE and 0 to
#' FALSE (as allocated above)
#'
#'@param withcentralheating - flag created during create.waterheating
#' value is 0 if there is a separate water heating system to the spaceheating system
with.centralheating <- function(withcentralheating){
withcentralheating <- as.factor(checked.revalue(
as.factor(withcentralheating)
,c("0" = "FALSE"
,"1" = "TRUE"
)))
return(withcentralheating)
}
#'Main heating fuel (for water systems)
#'
#'The water heating fuel is determined from the information cotained in the water
#'heating fuel type variable in the SHCS unless the hot water is produced by the main
#' central heating.
#'
#' NOte: community_heat is not a fuel type and it is assumed that for the significant
#' majority of community heating systems, mains_gas is the heating fuel.
#'
#'@param wh.fuel - the heating fuel type of waterheating which comes from the lookup
#' table
#'
#'@param sh.fuel - the heating fuel type from the spaceheating file
#'
#'@param ch - flag indicating if the waterheating is provided by the spaceheating
#' 1 indicates waterheating is provided by spaceheating
wh.main.heatingfuel <- function(wh.fuel, sh.fuel, ch){
#waterheating fuel column is changed to the correct values for the NHM.
wh.fuel<-as.factor(checked.revalue(
wh.fuel,
c("Gas (mains)" = "mains_gas"
,"Bulk LPG" = "bulk_lpg"
,"Bottled gas" = "bottled_lpg"
,"Oil" = "oil"
,"House coal" = "house_coal"
,"Smokeless fuel" = "house_coal"
,"Antracite nuts and grain" = "house_coal"
,"Wood chips" = "biomass_woodchip"
,"Wood logs" = "biomass_wood"
,"Wood pellets" = "biomass_pellets"
,"Peak electric" = "electricity"
,"Off-peak electric" = "electricity"
,"Communal heating, no CHP" = "mains_gas"
,"Communal heating, with CHP" = "mains_gas"
,"Biogas" = "NULL"
,"Dual fuel" = "house_coal"
,"Other" = "NULL"
,"Not applicable" = "NULL"
,"Unobtainable" = "NULL"
)))
#The two columns are then combined
all.fuel <- ifelse(ch==1,"",levels(wh.fuel)[wh.fuel])
return(all.fuel)
}
#'Basic efficiency
#'
#'Basic efficiency is made from the basic efficiency of waterheating unless the
#' heating comes from central heating
#'
#'@param wh.efficiency - the efficiency of waterheating which comes from the lookup
#' table
#'
#'@param sh.efficiency - the efficiency of spaceheating which comes from spaceheating
#' file created by the spaceheating.R script
#'
#'@param ch - flag indicating if the waterheating is provided by the spaceheating
#' 1 indicates waterheating is provided by spaceheating
wh.basic.efficiency <- function(wh.efficiency, sh.efficiency, ch){
all.efficiency <- ifelse(ch==1,0,(wh.efficiency/100))
return(all.efficiency)
}
#'Flue type
#'
#'Flue type of the waterheating system is mapped to the correct values
#'
#'@param flue - the flue type of the waterheating system from the waterheating lookup
#' table
wh.flue.type <- function(flue){
as.factor(checked.revalue(
flue
,c("notapplicable" = "notapplicable"
,"openflue"="openflue"
)))
return(flue)
}
#'
#'\pagebreak
#'
#'##Cylinder information
#'
#'Cylinder factory insulated
#'
#'Values are mapped against true and false for the NHM and checked for consistency
#'
#'@param factory - M30 instalation type for hot water cylinder from SHCS
#'
#'@param w.heating - water heating system type
#'
#'@param immersion - type of immersion heater (single/duel/null)
#'
#'@param s.heating - spaceheating system type
cylinder.factoryinsulated <- function(factory,w.heating,immersion,s.heating){
factory <- as.factor(checked.revalue(
factory,
c("Sprayed" = "TRUE"
,"Jacket" = "FALSE"
,"Encapsulated" = "TRUE"
,"Both" = "TRUE"
,"No Insulation" = "FALSE"
,"No hw storage" = "NULL"
,"Unobtainable" = "FALSE")))
# These system types should not have cylinders
factory[s.heating == "cpsu" | s.heating == "combi" | w.heating == "multipoint"
| w.heating == "singlepoint"] <- "NULL"
#These system types should all have cylinders,
#if no information assume no factory insulation
factory[factory == "NULL" & (w.heating == "back_boiler"
| w.heating == "standard_boiler"
| immersion == "dual_coil"
| immersion == "single_coil")] <- "FALSE"
return(factory)
}
#'Cylinder insulation thickness
#'
#'Values are mapped against true and false for the NHM and checked for consistency
#'
#'@param thickness - M31 cylinder insulation thickness from scottish survey
#'
#'@param w.heating - water heating system type
#'
#'@param immersion - type of immersion heater (single/duel/null)
#'
#'@param s.heating - spaceheating system type
cylinder.insulationthickness <- function(thickness,w.heating,immersion,s.heating){
thickness <- checked.renum(thickness,
data.frame(a = c(888,999), b = c(NA,NA)))
# These system types should not have cylinders
thickness[s.heating == "cpsu" | s.heating == "combi" | w.heating == "multipoint"
| w.heating == "singlepoint"] <- NA
#These system types should all have cylinders,
#if no information assume 0 insulation thickness
thickness[is.na(thickness) == "TRUE" & (w.heating == "back_boiler"
| w.heating == "standard_boiler"
| immersion == "dual_coil"
| immersion == "single_coil")] <- 0
return(thickness)
}
#'Cylinder thermostat present
#'
#'Values are mapped against true and false for the NHM and checked for consistency
#'
#'@param thermostat - M32 cylinder thermostat present from scottish survey
#'
#'@param w.heating - water heating system type
#'
#'@param immersion - type of immersion heater (single/duel/null)
#'
#'@param s.heating - spaceheating system type
cylinder.thermostatpresent <- function(thermostat,w.heating,immersion,s.heating){
thermostat <- as.factor(checked.revalue(
thermostat
,c("Yes" = "TRUE"
,"No" = "FALSE"
,"Not applicable" = "NULL"
,"Unobtainable" = "FALSE")))
# These system types should not have cylinders
thermostat[s.heating == "cpsu" | s.heating == "combi"
| w.heating == "multipoint" | w.heating == "singlepoint"] <- "NULL"
#These system types should all have cylinders, if no information assume
#no thermostat
thermostat[thermostat == "NULL" & (w.heating == "back_boiler"
| w.heating == "standard_boiler"
| immersion == "dual_coil"
| immersion == "single_coil")] <- "FALSE"
return(thermostat)
}
#'cylinder volume
#'
#'Values are mapped against true and false for the NHM and checked for consistency,
#'using information contained in the RD SAP documentation on typical sizes of
#'cylinders.
#'
#'@param volume - M29 cylinder volume from SHCS
#'
#'@param w.heating - water heating system type
#'
#'@param immersion - type of immersion heater (single/duel/null)
#'
#'@param s.heating - spaceheating system type
cylinder.volume <- function(volume,w.heating,immersion,s.heating){
volume <-as.factor(checked.revalue(
volume
,c("Small (<90 L)" = "80" ##SAP 2009 table 2a value is 80
,"Normal (90-130 L)" = "110"
,"Medium (130-170 L)" = "140" ##SAP 2009 table 2a value is 140
,"Large (> 170 L)" = "210"
,"No hw storage" = "NULL"
,"Unobtainable" = "110" ##Assume normal size
)))
# These system types should not have cylinders
volume[s.heating == "cpsu" | s.heating == "combi" | w.heating == "multipoint"
| w.heating == "singlepoint"] <- "NULL"
#These system types should all have cylinders, if they do not then assume
#normal size
volume[volume == "NULL" & (w.heating == "back_boiler"
| w.heating == "standard_boiler"
| immersion == "dual_coil"
| immersion == "single_coil")] <- "110"
return(volume)
}
#'
#' \pagebreak
#'
#'##Community heating
#'
#'Set the chpfraction of chp heating systems
#'
#'When unknown, the default value for the fraction of heat optained from a CHP system
#'is 0.35.
#'
#'@param wh.chp a vector containing the waterheating fuel type
#'
#'@param ch - flag indicating if the waterheating is provided by the spaceheating
#' 1 indicates waterheating is provided by spaceheating
wh.chp.fraction <- function(wh.chp,ch){
wh.chp <- as.factor(checked.revalue(
wh.chp,
c("Gas (mains)" = "NULL"
,"Bulk LPG" = "NULL"
,"Bottled gas" = "NULL"
,"Oil" = "NULL"
,"House coal" = "NULL"
,"Smokeless fuel" = "NULL"
,"Antracite nuts and grain" = "NULL"
,"Wood chips" = "NULL"
,"Wood logs" = "NULL"
,"Wood pellets" = "NULL"
,"Peak electric" = "NULL"
,"Off-peak electric" = "NULL"
,"Communal heating, no CHP" = "NULL"
,"Communal heating, with CHP" = "0.35"
,"Biogas" = "NULL"
,"Dual fuel" = "NULL"
, "Other" = "NULL"
,"Not applicable" = "NULL"
,"Unobtainable" = "NULL"
)))
wh.chp[ch==1] <- "NULL"
return(wh.chp)
}
#'Community charging usage based
#'
#'This function sets 'communitychargingusagebased' to TRUE if a communal system. When
#'unknown, it is assumed that charging for community systems is usage based.
#'#'
#'@param wh.communal a vector containing the spaceheating fuel type from the survey
#'
#'@param ch - flag indicating if the waterheating is provided by the spaceheating
#' 1 indicates waterheating is provided by spaceheating
wh.community.chargingusagebased<- function(wh.communal,ch){
wh.communal <- as.factor(checked.revalue(
wh.communal,
c("Gas (mains)" = "NULL"
,"Bulk LPG" = "NULL"
,"Bottled gas" = "NULL"
,"Oil" = "NULL"
,"House coal" = "NULL"
,"Smokeless fuel" = "NULL"
,"Antracite nuts and grain" = "NULL"
,"Wood chips" = "NULL"
,"Wood logs" = "NULL"
,"Wood pellets" = "NULL"
,"Peak electric" = "NULL"
,"Off-peak electric" = "NULL"
,"Communal heating, no CHP" = "true"
,"Communal heating, with CHP" = "true"
,"Biogas" = "NULL"
,"Dual fuel" = "NULL"
, "Other" = "NULL"
,"Not applicable" = "NULL"
,"Unobtainable" = "NULL"
)))
wh.communal[ch==1] <- "NULL"
return(wh.communal)
}
#'
#' \pagebreak
#'
#'##Solar hot water heating
#'
#'Existance of solar hot water
#'
#'If an area bigger than zero of solar hot water panels exist assume that
#' case has solar hot water
#'
#' @param solar - D9 (% roof with solar installed) from the scottish survey
solar.hotwaterpresent <- function(solar){
#88: not applicable and 99: unobtainable
solar[solar == 88 | solar == 99] <- NA
solar[solar > 0] <- "TRUE"
solar[is.na(solar)=="TRUE"] <- "FALSE"
return(solar)
}
#'Solar stored in cylinder
#'
#'If solar hot water exists and there is a cylinder for hot water then it is assumed
#' that all hot water is stored in the same cylinder as no other information is
#' present in the scottish survey
#'
#'@param solar - the.solarhotwaterpresent from the solar.howaterpresent function
#'
#'@param volume - the volume of the hotwater cyclinder
solar.storeincylinder <- function(solar,volume){
solar <- ifelse(solar=="TRUE" & volume != "NULL","TRUE","FALSE")
return(solar)
}
#'Solar stored volume
#'
#'If solar hot water exists and there is a cylinder for hot water then it is assumed
#' that half the cylinder volume is used for storing hot water from the solar thermal
#' system. If there is no cylinder for the hot water system it is assumed that a
#' tank of volume 75 has been installed for the solar thermal system.
#'
#'@param solar - the.solarhotwaterpresent from the solar.howaterpresent function
#'
#'@param volume - the volume of the hotwater cyclinder
solar.storevolume <- function(solar,volume){
solar <- ifelse(solar=="TRUE",as.numeric(levels(volume)[volume])/2,0)
solar[is.na(solar)=="TRUE"] <- 75
return(solar)
}
| /scotland/waterheating.R | no_license | KieranIngram/national-household-model-stock-files-creator | R | false | false | 21,648 | r | #' ---
#' title: "Waterheating functions"
#' author: "Centre for Sustainable Energy"
#' date: "2015"
#' output: pdf_document
#' ---
#+ echo=FALSE
#Sources (makes available) the common functions required to make the stock.
source("common.R", chdir=T)
#'##Make and save waterheating data
#'
#'Create a .csv file using the function make.waterheating, which creates a dataframe
#'containing a complete set of populated variables for the water-heating.csv stock
#'file.
#'
#'The water-heating.csv file contains a set of information on the hot water heating
#'systems, including fuel type and efficencies. However, a significant majority of
#'dwellings will have their hot water provide by their space heating systems (e.g.
#'mains gas combi boilers or standard boilers with a water tank). In these instances,
#'much less information is required in the water-heating.csv and data that describes
#'the efficiency or fuel used by the water heating system will be taken from the
#'space heating system for that case.
#'
#'Otherwise the water heating data in 'Table 4a: Heating systems (space and water)'
#'in the SAP 2012 documentation is used to allocate heating system types and
#'efficiencies where a dwelling has separate space heating and water heating systems.
#'
#'@param shcs - the scottish survey data
#'
#'@param output - the path to the output file including the required file name and
#' the extension .csv
#'
#'@param path.to.input - the path to the folder where the survey data and lookup
#' tables are placed
#'
#'@param path.to.outputs - the path to the output folder
save.waterheating <- function(shcs, output, path.to.input, path.to.output) {
write.csv(make.waterheating(shcs,path.to.input,path.to.output)
, file=output, row.names=FALSE, na ="NULL")
}
#'Make the dataframe that contains all the information required for the
#' waterheating.csv file
#'
#'The dataframe is constructed and returned (as it is the last thing in the function
#' that is assigned)
#'
#'@param shcs - the scottish survey data
#'
#'@param path.to.input - the path to the folder where the survey data and lookup
#' tables are placed
#'
#'@param path.to.outputs - the path to the output folder
make.waterheating <- function(shcs,path.to.input,path.to.output) {
#First the waterheating dataframe is created from information contained in the
#scottish survey, the spaceheating.csv and the waterheating look up table
waterheating <- create.waterheating(shcs,path.to.output,path.to.input)
the.basicefficiency <- wh.basic.efficiency(waterheating$basicefficiency
,waterheating$spaceheatingbasicefficiency
,waterheating$withcentralheating)
the.chpfraction <- wh.chp.fraction(waterheating$M18
,waterheating$withcentralheating)
the.communitychargingusagebased <-
wh.community.chargingusagebased(waterheating$M18,waterheating$withcentralheating)
the.electrictariff <- electric.tariff(waterheating$L2,waterheating$M2)
the.fluetype <- wh.flue.type(waterheating$fluetype)
#No information available about summer or winter efficiency waterheating
the.summerefficiency <- rep("NULL", length(waterheating$uprn_new))
the.winterefficiency <- rep("NULL", length(waterheating$uprn_new))
the.cylinderfactoryinsulated <-
cylinder.factoryinsulated(waterheating$M30,waterheating$waterheatingsystemtype
,waterheating$immersionheatertype
,waterheating$spaceheatingsystemtype)
the.cylinderinsulationthickness <-
cylinder.insulationthickness(waterheating$M31
,waterheating$waterheatingsystemtype
,waterheating$immersionheatertype
,waterheating$spaceheatingsystemtype)
the.cylinderthermostatpresent <-
cylinder.thermostatpresent(waterheating$M32
,waterheating$waterheatingsystemtype
,waterheating$immersionheatertype
,waterheating$spaceheatingsystemtype)
the.cylindervolume <- cylinder.volume(waterheating$M29
,waterheating$waterheatingsystemtype
,waterheating$immersionheatertype
,waterheating$spaceheatingsystemtype)
#No information available about installation year of waterheating
the.installationyear <- rep("NULL", length(waterheating$uprn_new))
the.mainheatingfuel <- wh.main.heatingfuel(waterheating$M18
,waterheating$spaceheatingmainfuel
,waterheating$withcentralheating)
the.solarhotwaterpresent <- solar.hotwaterpresent(waterheating$D9)
the.solarstoreincylinder <- solar.storeincylinder(the.solarhotwaterpresent
,the.cylindervolume)
the.solarstorevolume <- solar.storevolume(the.solarhotwaterpresent
,the.cylindervolume)
the.withcentralheating <- with.centralheating(waterheating$withcentralheating)
data.frame(aacode = waterheating$uprn_new
,basicefficiency = the.basicefficiency
,chpfraction = the.chpfraction
,communitychargingusagebased = the.communitychargingusagebased
,electrictariff = the.electrictariff
,fluetype = the.fluetype
,summerefficiency = the.summerefficiency
,winterefficiency = the.winterefficiency
,cylinderfactoryinsulated = the.cylinderfactoryinsulated
,cylinderinsulationthickness = the.cylinderinsulationthickness
,cylinderthermostatpresent = the.cylinderthermostatpresent
,cylindervolume = the.cylindervolume
,immersionheatertype = waterheating$immersionheatertype
,installationyear = the.installationyear
,mainheatingfuel = the.mainheatingfuel
,solarhotwaterpresent = the.solarhotwaterpresent
,solarstoreincylinder = the.solarstoreincylinder
,solarstorevolume = the.solarstorevolume
,waterheatingsystemtype = waterheating$waterheatingsystemtype
,withcentralheating = the.withcentralheating
)
}
#'\pagebreak
#'
#'##Create waterheating
#'
#'The dataframe is created by importing the spaceheating file that was created
#' for the stock and matching that with waterheating systems from the waterheating
#' look up table, which has been created using a subset of information contained in
#' the 'Table 4a: Heating systems (space and water)' in the SAP 2012 documentation.
#'
#' 'lup-water-heating.csv' represents this information and is stored with the other
#' lookup table csv files for heating systems.
#'
#'@param shcs - the scottish survey data
#'
#'@param path.to.input - the path to the folder where the survey data and lookup
#' tables are placed
#'
#'@param path.to.outputs - the path to the output folder
create.waterheating <- function(shcs,path.to.output,path.to.input){
#import the spaceheating data that was created by spaceheating.R
spaceheating <- read.csv(file.path(path.to.output,"space-heating.csv")
, header=TRUE)
#Rename the columns to allow the join onto the scottish survey without overwriting
#required variables
spaceheating <- data.frame(uprn_new = spaceheating$aacode
,spaceheatingsystemtype =
spaceheating$spaceheatingsystemtype
,spaceheatingbasicefficiency =
spaceheating$basicefficiency
,spaceheatingmainfuel = spaceheating$mainheatingfuel
,withcentralheating = 0)
#Join the spaceheating dataframe to the scottish survey
spaceheating <- join(spaceheating,shcs,by="uprn_new")
#If the waterheating is flagged as from mains heating and the main heating type
#is not a room heater or a storage heater then with central heating is set to 1.
spaceheating$withcentralheating[spaceheating$M17=="Mains heating" &
(spaceheating$spaceheatingsystemtype != "storage_heater" &
spaceheating$spaceheatingsystemtype != "room_heater")] <- 1
#If the main heating system is combi or cpsu it is assumed that waterheating comes
#from the main heating system and the withcentralheating is set to 1.
spaceheating$withcentralheating[spaceheating$spaceheatingsystemtype == "combi" |
spaceheating$spaceheatingsystemtype == "cpsu"] <- 1
#The waterheating look up file is imported
waterheating <- read.csv(file.path(path.to.input,"lup-water-heating.csv")
, header=TRUE,na.strings = "")
#Ensures that only stock where withcentralheating is false
#have waterheating matched
waterheating$withcentralheating <- 0
matched.waterheating <- join(spaceheating,waterheating
,by=c("M17","M18","withcentralheating"))
return(matched.waterheating)
}
#'
#'\pagebreak
#'
#'##Waterheating systems
#'
#'With central heating
#'
#'Values are mapped against true and false for the NHM, converting 1 to TRUE and 0 to
#' FALSE (as allocated above)
#'
#'@param withcentralheating - flag created during create.waterheating
#' value is 0 if there is a separate water heating system to the spaceheating system
with.centralheating <- function(withcentralheating){
withcentralheating <- as.factor(checked.revalue(
as.factor(withcentralheating)
,c("0" = "FALSE"
,"1" = "TRUE"
)))
return(withcentralheating)
}
#'Main heating fuel (for water systems)
#'
#'The water heating fuel is determined from the information cotained in the water
#'heating fuel type variable in the SHCS unless the hot water is produced by the main
#' central heating.
#'
#' NOte: community_heat is not a fuel type and it is assumed that for the significant
#' majority of community heating systems, mains_gas is the heating fuel.
#'
#'@param wh.fuel - the heating fuel type of waterheating which comes from the lookup
#' table
#'
#'@param sh.fuel - the heating fuel type from the spaceheating file
#'
#'@param ch - flag indicating if the waterheating is provided by the spaceheating
#' 1 indicates waterheating is provided by spaceheating
wh.main.heatingfuel <- function(wh.fuel, sh.fuel, ch){
#waterheating fuel column is changed to the correct values for the NHM.
wh.fuel<-as.factor(checked.revalue(
wh.fuel,
c("Gas (mains)" = "mains_gas"
,"Bulk LPG" = "bulk_lpg"
,"Bottled gas" = "bottled_lpg"
,"Oil" = "oil"
,"House coal" = "house_coal"
,"Smokeless fuel" = "house_coal"
,"Antracite nuts and grain" = "house_coal"
,"Wood chips" = "biomass_woodchip"
,"Wood logs" = "biomass_wood"
,"Wood pellets" = "biomass_pellets"
,"Peak electric" = "electricity"
,"Off-peak electric" = "electricity"
,"Communal heating, no CHP" = "mains_gas"
,"Communal heating, with CHP" = "mains_gas"
,"Biogas" = "NULL"
,"Dual fuel" = "house_coal"
,"Other" = "NULL"
,"Not applicable" = "NULL"
,"Unobtainable" = "NULL"
)))
#The two columns are then combined
all.fuel <- ifelse(ch==1,"",levels(wh.fuel)[wh.fuel])
return(all.fuel)
}
#'Basic efficiency
#'
#'Basic efficiency is made from the basic efficiency of waterheating unless the
#' heating comes from central heating
#'
#'@param wh.efficiency - the efficiency of waterheating which comes from the lookup
#' table
#'
#'@param sh.efficiency - the efficiency of spaceheating which comes from spaceheating
#' file created by the spaceheating.R script
#'
#'@param ch - flag indicating if the waterheating is provided by the spaceheating
#' 1 indicates waterheating is provided by spaceheating
wh.basic.efficiency <- function(wh.efficiency, sh.efficiency, ch){
all.efficiency <- ifelse(ch==1,0,(wh.efficiency/100))
return(all.efficiency)
}
#'Flue type
#'
#'Flue type of the waterheating system is mapped to the correct values
#'
#'@param flue - the flue type of the waterheating system from the waterheating lookup
#' table
wh.flue.type <- function(flue){
as.factor(checked.revalue(
flue
,c("notapplicable" = "notapplicable"
,"openflue"="openflue"
)))
return(flue)
}
#'
#'\pagebreak
#'
#'##Cylinder information
#'
#'Cylinder factory insulated
#'
#'Values are mapped against true and false for the NHM and checked for consistency
#'
#'@param factory - M30 instalation type for hot water cylinder from SHCS
#'
#'@param w.heating - water heating system type
#'
#'@param immersion - type of immersion heater (single/duel/null)
#'
#'@param s.heating - spaceheating system type
cylinder.factoryinsulated <- function(factory,w.heating,immersion,s.heating){
factory <- as.factor(checked.revalue(
factory,
c("Sprayed" = "TRUE"
,"Jacket" = "FALSE"
,"Encapsulated" = "TRUE"
,"Both" = "TRUE"
,"No Insulation" = "FALSE"
,"No hw storage" = "NULL"
,"Unobtainable" = "FALSE")))
# These system types should not have cylinders
factory[s.heating == "cpsu" | s.heating == "combi" | w.heating == "multipoint"
| w.heating == "singlepoint"] <- "NULL"
#These system types should all have cylinders,
#if no information assume no factory insulation
factory[factory == "NULL" & (w.heating == "back_boiler"
| w.heating == "standard_boiler"
| immersion == "dual_coil"
| immersion == "single_coil")] <- "FALSE"
return(factory)
}
#'Cylinder insulation thickness
#'
#'Values are mapped against true and false for the NHM and checked for consistency
#'
#'@param thickness - M31 cylinder insulation thickness from scottish survey
#'
#'@param w.heating - water heating system type
#'
#'@param immersion - type of immersion heater (single/duel/null)
#'
#'@param s.heating - spaceheating system type
cylinder.insulationthickness <- function(thickness,w.heating,immersion,s.heating){
thickness <- checked.renum(thickness,
data.frame(a = c(888,999), b = c(NA,NA)))
# These system types should not have cylinders
thickness[s.heating == "cpsu" | s.heating == "combi" | w.heating == "multipoint"
| w.heating == "singlepoint"] <- NA
#These system types should all have cylinders,
#if no information assume 0 insulation thickness
thickness[is.na(thickness) == "TRUE" & (w.heating == "back_boiler"
| w.heating == "standard_boiler"
| immersion == "dual_coil"
| immersion == "single_coil")] <- 0
return(thickness)
}
#'Cylinder thermostat present
#'
#'Values are mapped against true and false for the NHM and checked for consistency
#'
#'@param thermostat - M32 cylinder thermostat present from scottish survey
#'
#'@param w.heating - water heating system type
#'
#'@param immersion - type of immersion heater (single/duel/null)
#'
#'@param s.heating - spaceheating system type
cylinder.thermostatpresent <- function(thermostat,w.heating,immersion,s.heating){
thermostat <- as.factor(checked.revalue(
thermostat
,c("Yes" = "TRUE"
,"No" = "FALSE"
,"Not applicable" = "NULL"
,"Unobtainable" = "FALSE")))
# These system types should not have cylinders
thermostat[s.heating == "cpsu" | s.heating == "combi"
| w.heating == "multipoint" | w.heating == "singlepoint"] <- "NULL"
#These system types should all have cylinders, if no information assume
#no thermostat
thermostat[thermostat == "NULL" & (w.heating == "back_boiler"
| w.heating == "standard_boiler"
| immersion == "dual_coil"
| immersion == "single_coil")] <- "FALSE"
return(thermostat)
}
#'cylinder volume
#'
#'Values are mapped against true and false for the NHM and checked for consistency,
#'using information contained in the RD SAP documentation on typical sizes of
#'cylinders.
#'
#'@param volume - M29 cylinder volume from SHCS
#'
#'@param w.heating - water heating system type
#'
#'@param immersion - type of immersion heater (single/duel/null)
#'
#'@param s.heating - spaceheating system type
cylinder.volume <- function(volume,w.heating,immersion,s.heating){
volume <-as.factor(checked.revalue(
volume
,c("Small (<90 L)" = "80" ##SAP 2009 table 2a value is 80
,"Normal (90-130 L)" = "110"
,"Medium (130-170 L)" = "140" ##SAP 2009 table 2a value is 140
,"Large (> 170 L)" = "210"
,"No hw storage" = "NULL"
,"Unobtainable" = "110" ##Assume normal size
)))
# These system types should not have cylinders
volume[s.heating == "cpsu" | s.heating == "combi" | w.heating == "multipoint"
| w.heating == "singlepoint"] <- "NULL"
#These system types should all have cylinders, if they do not then assume
#normal size
volume[volume == "NULL" & (w.heating == "back_boiler"
| w.heating == "standard_boiler"
| immersion == "dual_coil"
| immersion == "single_coil")] <- "110"
return(volume)
}
#'
#' \pagebreak
#'
#'##Community heating
#'
#'Set the chpfraction of chp heating systems
#'
#'When unknown, the default value for the fraction of heat optained from a CHP system
#'is 0.35.
#'
#'@param wh.chp a vector containing the waterheating fuel type
#'
#'@param ch - flag indicating if the waterheating is provided by the spaceheating
#' 1 indicates waterheating is provided by spaceheating
wh.chp.fraction <- function(wh.chp,ch){
wh.chp <- as.factor(checked.revalue(
wh.chp,
c("Gas (mains)" = "NULL"
,"Bulk LPG" = "NULL"
,"Bottled gas" = "NULL"
,"Oil" = "NULL"
,"House coal" = "NULL"
,"Smokeless fuel" = "NULL"
,"Antracite nuts and grain" = "NULL"
,"Wood chips" = "NULL"
,"Wood logs" = "NULL"
,"Wood pellets" = "NULL"
,"Peak electric" = "NULL"
,"Off-peak electric" = "NULL"
,"Communal heating, no CHP" = "NULL"
,"Communal heating, with CHP" = "0.35"
,"Biogas" = "NULL"
,"Dual fuel" = "NULL"
, "Other" = "NULL"
,"Not applicable" = "NULL"
,"Unobtainable" = "NULL"
)))
wh.chp[ch==1] <- "NULL"
return(wh.chp)
}
#'Community charging usage based
#'
#'This function sets 'communitychargingusagebased' to TRUE if a communal system. When
#'unknown, it is assumed that charging for community systems is usage based.
#'#'
#'@param wh.communal a vector containing the spaceheating fuel type from the survey
#'
#'@param ch - flag indicating if the waterheating is provided by the spaceheating
#' 1 indicates waterheating is provided by spaceheating
wh.community.chargingusagebased<- function(wh.communal,ch){
wh.communal <- as.factor(checked.revalue(
wh.communal,
c("Gas (mains)" = "NULL"
,"Bulk LPG" = "NULL"
,"Bottled gas" = "NULL"
,"Oil" = "NULL"
,"House coal" = "NULL"
,"Smokeless fuel" = "NULL"
,"Antracite nuts and grain" = "NULL"
,"Wood chips" = "NULL"
,"Wood logs" = "NULL"
,"Wood pellets" = "NULL"
,"Peak electric" = "NULL"
,"Off-peak electric" = "NULL"
,"Communal heating, no CHP" = "true"
,"Communal heating, with CHP" = "true"
,"Biogas" = "NULL"
,"Dual fuel" = "NULL"
, "Other" = "NULL"
,"Not applicable" = "NULL"
,"Unobtainable" = "NULL"
)))
wh.communal[ch==1] <- "NULL"
return(wh.communal)
}
#'
#' \pagebreak
#'
#'##Solar hot water heating
#'
#'Existance of solar hot water
#'
#'If an area bigger than zero of solar hot water panels exist assume that
#' case has solar hot water
#'
#' @param solar - D9 (% roof with solar installed) from the scottish survey
solar.hotwaterpresent <- function(solar){
#88: not applicable and 99: unobtainable
solar[solar == 88 | solar == 99] <- NA
solar[solar > 0] <- "TRUE"
solar[is.na(solar)=="TRUE"] <- "FALSE"
return(solar)
}
#'Solar stored in cylinder
#'
#'If solar hot water exists and there is a cylinder for hot water then it is assumed
#' that all hot water is stored in the same cylinder as no other information is
#' present in the scottish survey
#'
#'@param solar - the.solarhotwaterpresent from the solar.howaterpresent function
#'
#'@param volume - the volume of the hotwater cyclinder
solar.storeincylinder <- function(solar,volume){
solar <- ifelse(solar=="TRUE" & volume != "NULL","TRUE","FALSE")
return(solar)
}
#'Solar stored volume
#'
#'If solar hot water exists and there is a cylinder for hot water then it is assumed
#' that half the cylinder volume is used for storing hot water from the solar thermal
#' system. If there is no cylinder for the hot water system it is assumed that a
#' tank of volume 75 has been installed for the solar thermal system.
#'
#'@param solar - the.solarhotwaterpresent from the solar.howaterpresent function
#'
#'@param volume - the volume of the hotwater cyclinder
solar.storevolume <- function(solar,volume){
solar <- ifelse(solar=="TRUE",as.numeric(levels(volume)[volume])/2,0)
solar[is.na(solar)=="TRUE"] <- 75
return(solar)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assert-is-code.R, R/is-code.R
\name{assert_is_debugged}
\alias{assert_is_debugged}
\alias{is_debugged}
\title{Is the input function being debugged?}
\usage{
assert_is_debugged(x, severity = getOption("assertive.severity", "stop"))
is_debugged(x, .xname = get_name_in_parent(x))
}
\arguments{
\item{x}{Input to check.}
\item{severity}{How severe should the consequences of the assertion be?
Either \code{"stop"}, \code{"warning"}, \code{"message"}, or \code{"none"}.}
\item{.xname}{Not intended to be used directly.}
}
\value{
\code{is_debugged} wraps \code{\link[base]{isdebugged}}, providing
more information on failure. \code{assert_is_debugged} returns nothing but
throws an error if \code{is_debugged} returns \code{FALSE}.
}
\description{
Checks to see if the input DLL (a.k.a. shared object) is loaded.
}
\seealso{
\code{\link[base]{isdebugged}}.
}
| /man/is_debugged.Rd | no_license | cran/assertive.code | R | false | true | 938 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/assert-is-code.R, R/is-code.R
\name{assert_is_debugged}
\alias{assert_is_debugged}
\alias{is_debugged}
\title{Is the input function being debugged?}
\usage{
assert_is_debugged(x, severity = getOption("assertive.severity", "stop"))
is_debugged(x, .xname = get_name_in_parent(x))
}
\arguments{
\item{x}{Input to check.}
\item{severity}{How severe should the consequences of the assertion be?
Either \code{"stop"}, \code{"warning"}, \code{"message"}, or \code{"none"}.}
\item{.xname}{Not intended to be used directly.}
}
\value{
\code{is_debugged} wraps \code{\link[base]{isdebugged}}, providing
more information on failure. \code{assert_is_debugged} returns nothing but
throws an error if \code{is_debugged} returns \code{FALSE}.
}
\description{
Checks to see if the input DLL (a.k.a. shared object) is loaded.
}
\seealso{
\code{\link[base]{isdebugged}}.
}
|
# Set the correct working directory
#setwd("./DataScience/GettingAndCleaningData/CourseProject")
# Loading library
library(dplyr)
# Reading all the data files into varialbes
yTest <- read.table("./Data/UCI HAR Dataset/test/y_test.txt")
xTest <- read.table("./Data/UCI HAR Dataset/test/X_test.txt")
subjectTest <- read.table("./Data/UCI HAR Dataset/test/subject_test.txt")
yTrain <- read.table("./Data/UCI HAR Dataset/train/y_train.txt")
xTrain <- read.table("./Data/UCI HAR Dataset/train/X_train.txt")
subjectTrain <- read.table("./Data/UCI HAR Dataset/train/subject_train.txt")
ActivityLabel <- read.table("./Data/UCI HAR Dataset/activity_labels.txt")
Features <- read.table("./Data/UCI HAR Dataset/features.txt")
# Give columns to have more meaningful names
yTest <- dplyr::rename(yTest, ActivityId = V1)
yTrain <- dplyr::rename(yTrain, ActivityId = V1)
subjectTest <- dplyr::rename(subjectTest, SubjectId =V1)
subjectTrain <- dplyr::rename(subjectTrain, SubjectId =V1)
ActivityLabel <- dplyr::rename(ActivityLabel, ActivityId=V1, ActivityName=V2)
Features <- dplyr::rename(Features, FeatureId =V1, FeatureName = V2)
# Clean up the feature names a bit by replacing dash and comma to period mark and remove single quotations
Features$FeatureName <- gsub("-", ".", Features$FeatureName)
Features$FeatureName <- gsub(" ", "", Features$FeatureName)
Features$FeatureName <- gsub(",", ".", Features$FeatureName)
#Features$FeatureName <- gsub("\\()", ".", Features$FeatureName)
Features$FeatureName <- gsub("\\(", "", Features$FeatureName)
Features$FeatureName <- gsub("\\)", "", Features$FeatureName)
# Finally, convert the feature names to lower case
Features$FeatureName <- tolower(Features$FeatureName)
# Merge activity list with activity names
yTestNew = merge(yTest, ActivityLabel, by.x="ActivityId", by.y="ActivityId", all="TRUE")
yTrainNew = merge(yTrain, ActivityLabel, by.x="ActivityId", by.y="ActivityId", all="TRUE")
# Apply the updated feature names as the column names in the measurement file
colnames(xTest) <- Features[, 2]
colnames(xTrain) <- Features[, 2]
# Add static column TestType to Activity list. That way, upon merge, TestType will show up at the front columns.
yTestNew$TestType="Test"
yTrainNew$TestType="Train"
# Merging data vertically
TestDataSet <- cbind(subjectTest, yTestNew)
TestDataSet <- cbind(TestDataSet, xTest)
TrainDataSet <- cbind(subjectTrain, yTrainNew)
TrainDataSet <- cbind(TrainDataSet, xTrain)
# Merging Testing and Training data set horizontally
MasterDataSet <- rbind(TestDataSet, TrainDataSet)
# Extract mean/std columns to a variable as Step2 requested but als
GrepPattern <-"mean|std"
MasterColNames <- names(MasterDataSet)
MasterColIndex <- grep(GrepPattern,MasterColNames )
MeanAndStdOutput <- cbind(MasterDataSet[, c(1,2)], MasterDataSet[,MasterColIndex])
# Time to take mean and group by SubjectId and ActivityId
TidyDataSet <- aggregate(MeanAndStdOutput[, c(3:ncol(MeanAndStdOutput))], list(MeanAndStdOutput$SubjectId, MeanAndStdOutput$ActivityId), mean)
# Rename the SubjectId and ActivityId back
TidyDataSet <- dplyr::rename(TidyDataSet, SubjectId=Group.1)
TidyDataSet <- dplyr::rename(TidyDataSet, ActivityId=Group.2)
# Output to file
write.table(file = "activitydata.txt", x = TidyDataSet, row.names = FALSE)
| /run_analysis.R | no_license | Alucarrd/GettingAndCleaningData | R | false | false | 3,287 | r |
# Set the correct working directory
#setwd("./DataScience/GettingAndCleaningData/CourseProject")
# Loading library
library(dplyr)
# Reading all the data files into varialbes
yTest <- read.table("./Data/UCI HAR Dataset/test/y_test.txt")
xTest <- read.table("./Data/UCI HAR Dataset/test/X_test.txt")
subjectTest <- read.table("./Data/UCI HAR Dataset/test/subject_test.txt")
yTrain <- read.table("./Data/UCI HAR Dataset/train/y_train.txt")
xTrain <- read.table("./Data/UCI HAR Dataset/train/X_train.txt")
subjectTrain <- read.table("./Data/UCI HAR Dataset/train/subject_train.txt")
ActivityLabel <- read.table("./Data/UCI HAR Dataset/activity_labels.txt")
Features <- read.table("./Data/UCI HAR Dataset/features.txt")
# Give columns to have more meaningful names
yTest <- dplyr::rename(yTest, ActivityId = V1)
yTrain <- dplyr::rename(yTrain, ActivityId = V1)
subjectTest <- dplyr::rename(subjectTest, SubjectId =V1)
subjectTrain <- dplyr::rename(subjectTrain, SubjectId =V1)
ActivityLabel <- dplyr::rename(ActivityLabel, ActivityId=V1, ActivityName=V2)
Features <- dplyr::rename(Features, FeatureId =V1, FeatureName = V2)
# Clean up the feature names a bit by replacing dash and comma to period mark and remove single quotations
Features$FeatureName <- gsub("-", ".", Features$FeatureName)
Features$FeatureName <- gsub(" ", "", Features$FeatureName)
Features$FeatureName <- gsub(",", ".", Features$FeatureName)
#Features$FeatureName <- gsub("\\()", ".", Features$FeatureName)
Features$FeatureName <- gsub("\\(", "", Features$FeatureName)
Features$FeatureName <- gsub("\\)", "", Features$FeatureName)
# Finally, convert the feature names to lower case
Features$FeatureName <- tolower(Features$FeatureName)
# Merge activity list with activity names
yTestNew = merge(yTest, ActivityLabel, by.x="ActivityId", by.y="ActivityId", all="TRUE")
yTrainNew = merge(yTrain, ActivityLabel, by.x="ActivityId", by.y="ActivityId", all="TRUE")
# Apply the updated feature names as the column names in the measurement file
colnames(xTest) <- Features[, 2]
colnames(xTrain) <- Features[, 2]
# Add static column TestType to Activity list. That way, upon merge, TestType will show up at the front columns.
yTestNew$TestType="Test"
yTrainNew$TestType="Train"
# Merging data vertically
TestDataSet <- cbind(subjectTest, yTestNew)
TestDataSet <- cbind(TestDataSet, xTest)
TrainDataSet <- cbind(subjectTrain, yTrainNew)
TrainDataSet <- cbind(TrainDataSet, xTrain)
# Merging Testing and Training data set horizontally
MasterDataSet <- rbind(TestDataSet, TrainDataSet)
# Extract mean/std columns to a variable as Step2 requested but als
GrepPattern <-"mean|std"
MasterColNames <- names(MasterDataSet)
MasterColIndex <- grep(GrepPattern,MasterColNames )
MeanAndStdOutput <- cbind(MasterDataSet[, c(1,2)], MasterDataSet[,MasterColIndex])
# Time to take mean and group by SubjectId and ActivityId
TidyDataSet <- aggregate(MeanAndStdOutput[, c(3:ncol(MeanAndStdOutput))], list(MeanAndStdOutput$SubjectId, MeanAndStdOutput$ActivityId), mean)
# Rename the SubjectId and ActivityId back
TidyDataSet <- dplyr::rename(TidyDataSet, SubjectId=Group.1)
TidyDataSet <- dplyr::rename(TidyDataSet, ActivityId=Group.2)
# Output to file
write.table(file = "activitydata.txt", x = TidyDataSet, row.names = FALSE)
|
## The functions listed here computes the inverse of an invertible matrix
## and caches the result. This avoids re-computation of the inverse on
## subsequent calls saving system resources.
## example:
## test <- makeCacheMatrix(matrix(c(4,2,7,6),2,2))
## cacheSolve(test)
## On the first call the console will show the computed result, but on
## subsequent calls of cacheSolve(test) the console will print the message
## 'getting cached data' and display the result from the cache.
## makeCacheMatrix function takes a matrix as an input and creates a special list
## containing functions to set the matrix, get the matrix, cache the inverse of
## the matrix, retrieve the cached inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve function takes the special list returned by the makeCacheMatrix function
## and return the inverse of a matrix. On the first call, it computes the inverse and
## sets the result into the cache and on subsequent calls, fetches the result from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
inv
}
| /cachematrix.R | no_license | parthakom/ProgrammingAssignment2 | R | false | false | 1,682 | r | ## The functions listed here computes the inverse of an invertible matrix
## and caches the result. This avoids re-computation of the inverse on
## subsequent calls saving system resources.
## example:
## test <- makeCacheMatrix(matrix(c(4,2,7,6),2,2))
## cacheSolve(test)
## On the first call the console will show the computed result, but on
## subsequent calls of cacheSolve(test) the console will print the message
## 'getting cached data' and display the result from the cache.
## makeCacheMatrix function takes a matrix as an input and creates a special list
## containing functions to set the matrix, get the matrix, cache the inverse of
## the matrix, retrieve the cached inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## cacheSolve function takes the special list returned by the makeCacheMatrix function
## and return the inverse of a matrix. On the first call, it computes the inverse and
## sets the result into the cache and on subsequent calls, fetches the result from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
inv
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/getcounts.R
\name{refineRegions}
\alias{refineRegions}
\title{Refine regions}
\usage{
refineRegions(regions, binsize)
}
\arguments{
\item{regions}{GRanges object containing the genomic regions of interest.}
\item{binsize}{The size of each bin in basepairs.}
}
\value{
A GRanges object with the refined regions.
}
\description{
Refine a GRanges object to make sure that it is compatible with a
binning scheme of a given binsize. There is more than one way of doing it.
In the way it is done here, the start coordinates and the end coordinates
of the provided regions will become respectively the next number of the form
\code{binsize*k + 1} and the previous number of the form \code{binsize*k},
so that the refined regions will always be contained in the original ones.
}
| /man/refineRegions.Rd | no_license | SamBuckberry/epicseg | R | false | false | 859 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/getcounts.R
\name{refineRegions}
\alias{refineRegions}
\title{Refine regions}
\usage{
refineRegions(regions, binsize)
}
\arguments{
\item{regions}{GRanges object containing the genomic regions of interest.}
\item{binsize}{The size of each bin in basepairs.}
}
\value{
A GRanges object with the refined regions.
}
\description{
Refine a GRanges object to make sure that it is compatible with a
binning scheme of a given binsize. There is more than one way of doing it.
In the way it is done here, the start coordinates and the end coordinates
of the provided regions will become respectively the next number of the form
\code{binsize*k + 1} and the previous number of the form \code{binsize*k},
so that the refined regions will always be contained in the original ones.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/umx_build_high_level_models.R
\name{umxFactorScores}
\alias{umxFactorScores}
\title{Return factor scores from a model as an easily consumable dataframe.}
\usage{
umxFactorScores(model, type = c("ML", "WeightedML", "Regression"),
minManifests = NA)
}
\arguments{
\item{model}{The model to generate scores from.}
\item{type}{The method used to compute the score ('ML', 'WeightedML', or 'Regression').}
\item{minManifests}{The least number of variables required to return a score for a participant (Default = NA).}
}
\value{
\itemize{
\item dataframe of scores.
}
}
\description{
umxFactorScores takes a model, and computes factors scores using the selected method (one
of 'ML', 'WeightedML', or 'Regression')
It is a simple wrapper around mxFactorScores. For missing data, you must specify the least number of
variables allowed for a score (subjects with fewer than minManifests will return a score of NA.
}
\examples{
m1 = umxEFA(mtcars, factors = 2)
x = umxFactorScores(m1, type = c('Regression'), minManifests = 3)
\dontrun{
m1 = umxEFA(mtcars, factors = 1)
x = umxFactorScores(m1, type = c('Regression'), minManifests = 3)
x
}
}
\references{
\itemize{
\item \url{https://github.com/tbates/umx}, \url{https://tbates.github.io}
}
}
\seealso{
\itemize{
\item \code{\link[=mxFactorScores]{mxFactorScores()}}
}
Other Reporting Functions: \code{\link{FishersMethod}},
\code{\link{loadings.MxModel}},
\code{\link{tmx_is.identified}}, \code{\link{tmx_show}},
\code{\link{umxAPA}}, \code{\link{umxGetParameters}},
\code{\link{umxParameters}}, \code{\link{umxReduce}},
\code{\link{umxWeightedAIC}}, \code{\link{umx_APA_pval}},
\code{\link{umx_aggregate}}, \code{\link{umx_names}},
\code{\link{umx_print}}, \code{\link{umx_time}},
\code{\link{umx}}
}
\concept{Reporting Functions}
| /man/umxFactorScores.Rd | no_license | jpritikin/umx | R | false | true | 1,871 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/umx_build_high_level_models.R
\name{umxFactorScores}
\alias{umxFactorScores}
\title{Return factor scores from a model as an easily consumable dataframe.}
\usage{
umxFactorScores(model, type = c("ML", "WeightedML", "Regression"),
minManifests = NA)
}
\arguments{
\item{model}{The model to generate scores from.}
\item{type}{The method used to compute the score ('ML', 'WeightedML', or 'Regression').}
\item{minManifests}{The least number of variables required to return a score for a participant (Default = NA).}
}
\value{
\itemize{
\item dataframe of scores.
}
}
\description{
umxFactorScores takes a model, and computes factors scores using the selected method (one
of 'ML', 'WeightedML', or 'Regression')
It is a simple wrapper around mxFactorScores. For missing data, you must specify the least number of
variables allowed for a score (subjects with fewer than minManifests will return a score of NA.
}
\examples{
m1 = umxEFA(mtcars, factors = 2)
x = umxFactorScores(m1, type = c('Regression'), minManifests = 3)
\dontrun{
m1 = umxEFA(mtcars, factors = 1)
x = umxFactorScores(m1, type = c('Regression'), minManifests = 3)
x
}
}
\references{
\itemize{
\item \url{https://github.com/tbates/umx}, \url{https://tbates.github.io}
}
}
\seealso{
\itemize{
\item \code{\link[=mxFactorScores]{mxFactorScores()}}
}
Other Reporting Functions: \code{\link{FishersMethod}},
\code{\link{loadings.MxModel}},
\code{\link{tmx_is.identified}}, \code{\link{tmx_show}},
\code{\link{umxAPA}}, \code{\link{umxGetParameters}},
\code{\link{umxParameters}}, \code{\link{umxReduce}},
\code{\link{umxWeightedAIC}}, \code{\link{umx_APA_pval}},
\code{\link{umx_aggregate}}, \code{\link{umx_names}},
\code{\link{umx_print}}, \code{\link{umx_time}},
\code{\link{umx}}
}
\concept{Reporting Functions}
|
prin("This file was created within Rstudio")
prin("And now it lives on GitHub")
| /datasciencecoursera.R | no_license | Cybala/datasciencecoursera | R | false | false | 81 | r | prin("This file was created within Rstudio")
prin("And now it lives on GitHub")
|
library(ggplot2)
# Initial plot
d <- ggplot(dat, aes(x=ANGLE_CLASS, y=DISP_NORM, fill=DIRECTION)) +
geom_bar(stat="identity", position="identity") +
scale_y_continuous(
limits=c(-380000, 90000), # lowered the min limit slightly
breaks=c(90000,0,-90000,-180000,-270000,-350000),
labels=abs , # note the use of abs
expand=c(0,0)) + # use expand so axis start exactly at limits
scale_x_continuous(
limits=c(16,30), # added x-axis limits (min is < your min break)
breaks=seq(18,30,by=2),
labels=seq(18,30,by=2) ,
expand=c(0,0)) +
xlab("small RNA length [nt]") +
ylab("normalized small RNA counts") +
scale_fill_manual(values = c("red", "blue"))
d <- d + theme_bw() +
theme(axis.line = element_blank(), # remove both axis lines
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border=element_blank())
# Add in segments for the axis - allow a gap at the corner
d +
geom_segment(x=17,xend=30,y=-380000,yend=-380000) + # x-axis
geom_segment(x=16,xend=16,y=-350000,yend=90000) # y-axis | /Untitled.R | no_license | Victor-Alfred/Scripts_R_analysis | R | false | false | 1,093 | r | library(ggplot2)
# Initial plot
d <- ggplot(dat, aes(x=ANGLE_CLASS, y=DISP_NORM, fill=DIRECTION)) +
geom_bar(stat="identity", position="identity") +
scale_y_continuous(
limits=c(-380000, 90000), # lowered the min limit slightly
breaks=c(90000,0,-90000,-180000,-270000,-350000),
labels=abs , # note the use of abs
expand=c(0,0)) + # use expand so axis start exactly at limits
scale_x_continuous(
limits=c(16,30), # added x-axis limits (min is < your min break)
breaks=seq(18,30,by=2),
labels=seq(18,30,by=2) ,
expand=c(0,0)) +
xlab("small RNA length [nt]") +
ylab("normalized small RNA counts") +
scale_fill_manual(values = c("red", "blue"))
d <- d + theme_bw() +
theme(axis.line = element_blank(), # remove both axis lines
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border=element_blank())
# Add in segments for the axis - allow a gap at the corner
d +
geom_segment(x=17,xend=30,y=-380000,yend=-380000) + # x-axis
geom_segment(x=16,xend=16,y=-350000,yend=90000) # y-axis |
library(shiny)
library(dplyr)
library(ggplot2)
summer <- read.csv("../data/summer.csv", stringsAsFactors = FALSE)
sort_summer <- summer %>%
group_by(Gender, Year, Country) %>%
count(Gender)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$Plot <- renderPlot({
#test <- input$country_namedfdf
summer_country <- summer %>% group_by(Gender, Year, Country) %>% filter(Country == input$country_name) %>% count()
colnames(summer_country)[colnames(summer_country) == 'n'] <- 'Medals'
ggplot(summer_country, aes(x = Year, y = Medals)) +
geom_line(aes(colour = Gender)) +
ggtitle("Medals Won by Men and Women in Each Country", subtitle="This is a comparison of the amount of medals won by men and women in the country of the user's choosing.")
})
})
| /Shannon/server.R | no_license | NickHytrek/info201-Final-Project | R | false | false | 838 | r |
library(shiny)
library(dplyr)
library(ggplot2)
summer <- read.csv("../data/summer.csv", stringsAsFactors = FALSE)
sort_summer <- summer %>%
group_by(Gender, Year, Country) %>%
count(Gender)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
output$Plot <- renderPlot({
#test <- input$country_namedfdf
summer_country <- summer %>% group_by(Gender, Year, Country) %>% filter(Country == input$country_name) %>% count()
colnames(summer_country)[colnames(summer_country) == 'n'] <- 'Medals'
ggplot(summer_country, aes(x = Year, y = Medals)) +
geom_line(aes(colour = Gender)) +
ggtitle("Medals Won by Men and Women in Each Country", subtitle="This is a comparison of the amount of medals won by men and women in the country of the user's choosing.")
})
})
|
##########################
## Script params ######
##########################
# dirs
dirs <- list()
# technically not a dir, but for convenience: mallet binary
dirs$mallet <- # path to Mallet binary (the binary itself, not just the folder containing it)
dirs$data <- # path to the working directory containing data
dirs$output <- file.path(dirs$data, "workdir_part2")
if (!dir.exists(dirs$output)){
dir.create(dirs$output)
}
# mallet
# also not a dir, but the input data file for mallet
dirs$topic.trainer.input <- "sotu_baseline.mallet"
# set list of topic numbers to iterate over
n.topics <- c(3, 5, 10, 25, 50, 100, 200, 300)
# number of chains for each topic
n.chains <- 10
# number of Gibbs sampler iterations for each chain
n.iters <- 500
# Seeds to replicate mallet.results. Should be a vector with length = n.chains,
# each element specifying the seed for a chain. 0 will correspond to
# random (mallet will use the clock)
mallet.seed <- c(1,20,67,89,45,980,567,340,267,109)
#######################
## Main #########
#######################
setwd(dirs$data)
source("functions.R") # load functions to control Mallet and plot results
setwd(dirs$output)
results <- ldply(n.topics, function(n){
run.mallet(n.topics = n, n.chains, n.iters, mallet.seed)
})
results$n.topics <- as.factor(results$n.topics)
# set position for error bars in the plot
dodge <- position_dodge(0.2)
plot.data <- results %>%
tidyr::gather(key = chain, value = LL, -n.topics) %>% # convert to long format
group_by(n.topics) %>% # for each number of topics...
summarize(meanLL = mean(LL), # calculate mean LL
se_min = meanLL - sd(LL),
se_max = meanLL + sd(LL)) # and standard deviation
LL.plot <- ggplot(plot.data) + # plot the values
geom_bar(aes(x = n.topics, y = meanLL), stat = "identity", position = dodge) +
geom_errorbar(aes(x = n.topics, ymin = se_min, ymax = se_max), position = dodge)+
ggtitle(paste("Mean Log-likelihood values averaged over
",
n.chains, "iterations of each model")) +
coord_cartesian(ylim = c(-8, -9)) +
scale_y_reverse()
# ============================
# Compare different models
n.topics <- 25
n.chains <- 10
n.iters <- 500
mallet.seed <- c(1,20,67,89,45,980,567,340,267,109)
input.data <- c("sotu_ner", "sotu_np", "sotu_bigrams","sotu_ner_cleaned", "sotu_np_cleaned")
models <- list()
for (d in input.data){
dirs[["topic.trainer.input"]] <- paste0(d,".mallet")
dirs[["output"]] <- file.path(dirs$data, "workdir_part2", d)
if (!dir.exists(dirs$output)){
dir.create(dirs$output)
}
setwd(dirs$output)
models[[d]] <- run.mallet(dirs, n.topics, n.chains, n.iters, mallet.seed)
}
models$sotu_baseline <- results[results$n.topics == n.topics,]
models.results <- ldply(models)
models.results$n.topics <- NULL
colnames(models.results)[1] <- "model"
models.plot.data <- models.results %>%
tidyr::gather(key = chain, value = LL, -model) %>% # convert to long format
group_by(model) %>% # for each model...
summarize(meanLL = mean(LL), # calculate mean LL
se_min = meanLL - sd(LL),
se_max = meanLL + sd(LL)) # and standard deviation
models.LL.plot <- ggplot(models.plot.data) + # plot the values
geom_bar(aes(x = model, y = meanLL), stat = "identity", position = dodge) +
geom_errorbar(aes(x = model, ymin = se_min, ymax = se_max), position = dodge)+
ggtitle(paste("Mean Log-likelihood values for models with
different preprocessing")) +
coord_cartesian(ylim = c(-8.5, -11)) +
scale_y_reverse()
# ===========================
# Look at the topic composition for different models
# Figure out which run gave us the best log likelihood and then
# use the seed from that run
best.seeds <- models.results %>%
tidyr::gather(key = iter, value = LL, -model) %>%
group_by(model) %>%
filter(LL == max(LL))
best.seeds$seed <- mallet.seed[as.numeric(best.seeds$iter)]
# convert factor to numeric
best.seeds$iter <- as.numeric(best.seeds$iter)
for (m in best.seeds$model){
dirs[["output"]] <- file.path(dirs$data, "workdir_part2", m)
dirs[["topic.trainer.input"]] <- paste0(m,".mallet")
setwd(dirs$output)
run.mallet(dirs = dirs,
n.topics = n.topics,
n.chains = 1,
n.iters = n.iters,
mallet.seed = best.seeds[best.seeds$model == m,"seed"][[1]],
produce.output = TRUE)
}
models.words.plots <- list()
for (m in best.seeds$model){
setwd(file.path(dirs$data, "workdir_part2", m))
models.words.data <- parse.xml(paste("diagnostics", n.topics,"xml", sep = "."))
models.words.plots[[m]] <- plot.topics(models.words.data, size.multiplier = 45)
}
## Save results of the runs
setwd(filepath(dirs$data), "workdir_part2")
save(results, LL.plot,
models, models.results, models.LL.plot,
models.words.plots,
file = paste0("sotu_results_", n.topics, "topics.Rdata")) | /sotu.R | no_license | petershan1119/convote_sotu_topics | R | false | false | 4,931 | r | ##########################
## Script params ######
##########################
# dirs
dirs <- list()
# technically not a dir, but for convenience: mallet binary
dirs$mallet <- # path to Mallet binary (the binary itself, not just the folder containing it)
dirs$data <- # path to the working directory containing data
dirs$output <- file.path(dirs$data, "workdir_part2")
if (!dir.exists(dirs$output)){
dir.create(dirs$output)
}
# mallet
# also not a dir, but the input data file for mallet
dirs$topic.trainer.input <- "sotu_baseline.mallet"
# set list of topic numbers to iterate over
n.topics <- c(3, 5, 10, 25, 50, 100, 200, 300)
# number of chains for each topic
n.chains <- 10
# number of Gibbs sampler iterations for each chain
n.iters <- 500
# Seeds to replicate mallet.results. Should be a vector with length = n.chains,
# each element specifying the seed for a chain. 0 will correspond to
# random (mallet will use the clock)
mallet.seed <- c(1,20,67,89,45,980,567,340,267,109)
#######################
## Main #########
#######################
setwd(dirs$data)
source("functions.R") # load functions to control Mallet and plot results
setwd(dirs$output)
results <- ldply(n.topics, function(n){
run.mallet(n.topics = n, n.chains, n.iters, mallet.seed)
})
results$n.topics <- as.factor(results$n.topics)
# set position for error bars in the plot
dodge <- position_dodge(0.2)
plot.data <- results %>%
tidyr::gather(key = chain, value = LL, -n.topics) %>% # convert to long format
group_by(n.topics) %>% # for each number of topics...
summarize(meanLL = mean(LL), # calculate mean LL
se_min = meanLL - sd(LL),
se_max = meanLL + sd(LL)) # and standard deviation
LL.plot <- ggplot(plot.data) + # plot the values
geom_bar(aes(x = n.topics, y = meanLL), stat = "identity", position = dodge) +
geom_errorbar(aes(x = n.topics, ymin = se_min, ymax = se_max), position = dodge)+
ggtitle(paste("Mean Log-likelihood values averaged over
",
n.chains, "iterations of each model")) +
coord_cartesian(ylim = c(-8, -9)) +
scale_y_reverse()
# ============================
# Compare different models
n.topics <- 25
n.chains <- 10
n.iters <- 500
mallet.seed <- c(1,20,67,89,45,980,567,340,267,109)
input.data <- c("sotu_ner", "sotu_np", "sotu_bigrams","sotu_ner_cleaned", "sotu_np_cleaned")
models <- list()
for (d in input.data){
dirs[["topic.trainer.input"]] <- paste0(d,".mallet")
dirs[["output"]] <- file.path(dirs$data, "workdir_part2", d)
if (!dir.exists(dirs$output)){
dir.create(dirs$output)
}
setwd(dirs$output)
models[[d]] <- run.mallet(dirs, n.topics, n.chains, n.iters, mallet.seed)
}
models$sotu_baseline <- results[results$n.topics == n.topics,]
models.results <- ldply(models)
models.results$n.topics <- NULL
colnames(models.results)[1] <- "model"
models.plot.data <- models.results %>%
tidyr::gather(key = chain, value = LL, -model) %>% # convert to long format
group_by(model) %>% # for each model...
summarize(meanLL = mean(LL), # calculate mean LL
se_min = meanLL - sd(LL),
se_max = meanLL + sd(LL)) # and standard deviation
models.LL.plot <- ggplot(models.plot.data) + # plot the values
geom_bar(aes(x = model, y = meanLL), stat = "identity", position = dodge) +
geom_errorbar(aes(x = model, ymin = se_min, ymax = se_max), position = dodge)+
ggtitle(paste("Mean Log-likelihood values for models with
different preprocessing")) +
coord_cartesian(ylim = c(-8.5, -11)) +
scale_y_reverse()
# ===========================
# Look at the topic composition for different models
# Figure out which run gave us the best log likelihood and then
# use the seed from that run
best.seeds <- models.results %>%
tidyr::gather(key = iter, value = LL, -model) %>%
group_by(model) %>%
filter(LL == max(LL))
best.seeds$seed <- mallet.seed[as.numeric(best.seeds$iter)]
# convert factor to numeric
best.seeds$iter <- as.numeric(best.seeds$iter)
for (m in best.seeds$model){
dirs[["output"]] <- file.path(dirs$data, "workdir_part2", m)
dirs[["topic.trainer.input"]] <- paste0(m,".mallet")
setwd(dirs$output)
run.mallet(dirs = dirs,
n.topics = n.topics,
n.chains = 1,
n.iters = n.iters,
mallet.seed = best.seeds[best.seeds$model == m,"seed"][[1]],
produce.output = TRUE)
}
models.words.plots <- list()
for (m in best.seeds$model){
setwd(file.path(dirs$data, "workdir_part2", m))
models.words.data <- parse.xml(paste("diagnostics", n.topics,"xml", sep = "."))
models.words.plots[[m]] <- plot.topics(models.words.data, size.multiplier = 45)
}
## Save results of the runs
setwd(filepath(dirs$data), "workdir_part2")
save(results, LL.plot,
models, models.results, models.LL.plot,
models.words.plots,
file = paste0("sotu_results_", n.topics, "topics.Rdata")) |
library(castor)
### Name: simulate_rou_model
### Title: Simulate a reflected Ornstein-Uhlenbeck model for continuous
### trait evolution.
### Aliases: simulate_rou_model
### Keywords: OU model random
### ** Examples
# generate a random tree
tree = generate_random_tree(list(birth_rate_intercept=1),max_tips=10000)$tree
# simulate evolution of a continuous trait whose value is always >=1
tip_states = simulate_rou_model(tree, reflection_point=1, spread=2, decay_rate=0.1)$tip_states
# plot histogram of simulated tip states
hist(tip_states, breaks=20, xlab="state", main="Trait probability distribution", prob=TRUE)
| /data/genthat_extracted_code/castor/examples/simulate_rou_model.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 627 | r | library(castor)
### Name: simulate_rou_model
### Title: Simulate a reflected Ornstein-Uhlenbeck model for continuous
### trait evolution.
### Aliases: simulate_rou_model
### Keywords: OU model random
### ** Examples
# generate a random tree
tree = generate_random_tree(list(birth_rate_intercept=1),max_tips=10000)$tree
# simulate evolution of a continuous trait whose value is always >=1
tip_states = simulate_rou_model(tree, reflection_point=1, spread=2, decay_rate=0.1)$tip_states
# plot histogram of simulated tip states
hist(tip_states, breaks=20, xlab="state", main="Trait probability distribution", prob=TRUE)
|
library("xcms")
library("msdata")
library("MSnbase")
library("CAMERA")
library("readMzXmlData")
# Set directory
setwd("C:/Users/Simon Ollivier/Desktop/working directory")
input_dir <- 'C:/Users/Simon Ollivier/Desktop/working directory'
input_files <- list.files(input_dir, pattern = "[.]mzXML")
# Detection parameters
for (i in 1:length(input_files)){
file <- readMzXmlFile(input_files[[i]],removeMetaData = FALSE)
# For Thermo F. Q-Exactive
if(grepl('FTMS',file[[1]]$metaData$msInstrument$msMassAnalyzer)==TRUE){
p <- MassifquantParam(ppm = 15, peakwidth = c(15, 150), snthresh = 2,
prefilter = c(3, 100), mzCenterFun = "wMean", integrate = 1L,
mzdiff = -0.001, fitgauss = FALSE, noise = 0,
verboseColumns = FALSE, criticalValue = 1.125,
consecMissedLimit = 50, unions = 1, checkBack = 0,
withWave = FALSE)
}
## For Bruker MaXis 4G
if(grepl('Quadrupole TOF',file[[1]]$metaData$msInstrument$msMassAnalyzer)==TRUE){
p <- MassifquantParam(ppm = 15, peakwidth = c(15, 150), snthresh = 2,
prefilter = c(5, 2000), mzCenterFun = "wMean", integrate = 1L,
mzdiff = -0.001, fitgauss = FALSE, noise = 0,
verboseColumns = FALSE, criticalValue = 1.125,
consecMissedLimit = 50, unions = 1, checkBack = 0,
withWave = FALSE)
}
register(SerialParam())
# Generates Peak Lists
monfichier <- readMSData(input_files[[i]], msLevel. = 1, mode = "onDisk")
monfichier2 <- findChromPeaks(monfichier, param = p, return.type = "xcmsSet")
monfichier3 <- xsAnnotate(monfichier2)
#monfichier4 <- groupFWHM(monfichier3, perfwhm = 100) ## Chromatographic parameter
monfichier5 <- findIsotopes (monfichier3, mzabs = 0.01)
#monfichier6 <- groupCorr(monfichier5, cor_eic_th = 0.75) ## Chromatographic parameter
#monfichier7 <- findAdducts(monfichier6, polarity="positive") ## Only considers ESI ## Only considers LC data whereas DIMS includes more [M+Na]+
peaklist <- getPeaklist(monfichier5)
# Creates a deisotoped peak list with isotope cluster analysis
## First step : Generating an appropriate matrix
basenames <- c("is_monoiso", "estim_nC", "estim_nN", "estim_nO", "estim_nS", "estim_nCl", "estim_nBr")
isopeaklist_gen <- matrix(ncol=length(basenames))
colnames(isopeaklist_gen) <- c(basenames)
## Second step : Pasting the data from the original peaklist in our matrix
data.matrix(peaklist)
mat.list <- list(peaklist,isopeaklist_gen)
df.list <- lapply(mat.list, as.data.frame)
cat.df <- function(d1,d2) {d1[names(d2)] <- d2; d1}
as_one.df <- Reduce(cat.df, df.list)
isopeaklist_mat <- data.matrix(as_one.df)
isopeaklist <- isopeaklist_mat[do.call("order", as.data.frame(isopeaklist_mat)),]
## Third step : Isotope clustering
print("Calculating isotopic contributions...")
#Setting polarity for electron mass correction
if(grepl('+',file[[1]]$metaData$polarity)==TRUE){
correction <- +0.000549
}
if(grepl('-',file[[1]]$metaData$polarity)==TRUE){
correction <- -0.000549
}
#Setting parameters
for (p in (1:nrow(isopeaklist))){
for (q in (1:nrow(isopeaklist))){
mz_M <- as.numeric(isopeaklist[p,"mz"])
mz_M1 <- as.numeric(isopeaklist[q,"mz"])
into_M <- as.numeric(isopeaklist[p,"into"])
into_M1 <- as.numeric(isopeaklist[q,"into"])
# Set ppm cut-off
ppm_value <- 3
deiso_ppm <- 1e-6 * mz_M * ppm_value
# Set maximum intensity for isotope contribution depanding on m/z (see Table)
## Correction of electron mass
target <- mz_M+correction
## Setting upper limit for the estimated number of elements depending on m/z
if((target <= 200)==TRUE){
Cmax <- 15
Nmax <- 8
Omax <- 7
Smax <- 6
Clmax <- 4
Brmax <- 2
}
if(((target>200)==TRUE)&((target<=400)==TRUE)){
Cmax <- 30
Nmax <- 10
Omax <- 14
Smax <- 12
Clmax <- 7
Brmax <- 4
}
if(((target>400)==TRUE)&((target<=600)==TRUE)){
Cmax <- 42
Nmax <- 13
Omax <- 21
Smax <- 12
Clmax <- 8
Brmax <- 6
}
if(((target>600)==TRUE)&((target<=800)==TRUE)){
Cmax <- 56
Nmax <- 16
Omax <- 25
Smax <- 20
Clmax <- 10
Brmax <- 8
}
if(((target>800)==TRUE)&((target<=1000)==TRUE)){
Cmax <- 66
Nmax <- 25
Omax <- 37
Smax <- 20
Clmax <- 11
Brmax <- 8
}
if(((target>1000)==TRUE)&((target<=1500)==TRUE)){
Cmax <- 100
Nmax <- 26
Omax <- 44
Smax <- 20
Clmax <- 11
Brmax <- 8
}
## Implementing isotopic abundance
percent_M1 <- (into_M1/into_M)*100
est_nC <- percent_M1/1.10800
est_nO <- percent_M1/0.20004
est_nN <- percent_M1/0.36630
est_nS33 <- percent_M1/0.74869
est_nS34 <- percent_M1/4.19599
est_nCl <- percent_M1/24.23530
est_nBr <- percent_M1/49.31400
# Set relative intensity threshold for research of isotopic cluster
bp_into <- max(as.numeric(isopeaklist[p,"into"]))
q_thresh <- 0.01/100*bp_into
## If peak intensity is over detection limit assign value > detected max
if(is.na(into_M)==TRUE){isopeaklist[p,"into"]=1e10}
if(is.na(into_M1)==TRUE){isopeaklist[q,"into"]=1e10}
# Isotope counter
########################################################################################################################
# WARNING: DO NOT ATTEMPT TO IMPLEMENT AN 'IFELSE' LOOP!!! THE COMPARISON WITH THE OTHER PEAKS OF THE MATRIX WOULD
# RETURN AN ESTIMATED VALUE OF 0!!
########################################################################################################################
## Carbon
if (((mz_M + 1.003355 - deiso_ppm <= mz_M1)==TRUE)&((mz_M1 <= mz_M + 1.003355 + deiso_ppm)==TRUE)) {
if(((into_M1 >= q_thresh)==TRUE)&((into_M >= q_thresh)==TRUE)&((est_nC <= Cmax)==TRUE)) {
isopeaklist[q,"is_monoiso"] = "13C"
isopeaklist[p,"estim_nC"] = round(est_nC)
}
}
## Oxygen
if (((mz_M + 2.004245 - deiso_ppm <= mz_M1)==TRUE)&((mz_M1 <= mz_M + 2.004245 + deiso_ppm)==TRUE)) {
if(((into_M1 >= q_thresh)==TRUE)&((into_M >= q_thresh)==TRUE)&((est_nO <= Omax)==TRUE)) {
isopeaklist[q,"is_monoiso"] = "18O"
isopeaklist[p,"estim_nO"] = round(est_nO)
}
}
## Nitrogen
if (((mz_M + 0.997035 - deiso_ppm <= mz_M1)==TRUE)&((mz_M1 <= mz_M + 0.997035 + deiso_ppm)==TRUE)) {
if(((into_M1 >= q_thresh)==TRUE)&((into_M >= q_thresh)==TRUE)&((est_nN <= Nmax)==TRUE)) {
isopeaklist[q,"is_monoiso"] = "15N"
isopeaklist[p,"estim_nN"] = round(est_nN)
}
}
## Sulphur
### 34S
if (((mz_M + 1.995796 - deiso_ppm <= mz_M1)==TRUE)&((mz_M1 <= mz_M + 1.995796 + deiso_ppm)==TRUE)) {
if(((into_M1 >= q_thresh)==TRUE)&((into_M >= q_thresh)==TRUE)&((est_nS34 <= Smax)==TRUE)) {
isopeaklist[q,"is_monoiso"] = "34S"
isopeaklist[p,"estim_nS"] = round(est_nS34)
}
}
### 33S (might be detected as well if the number of S atoms is high, but is least adapted for calculation of estim_nS)
if (((mz_M + 0.999388 - deiso_ppm <= mz_M1)==TRUE)&((mz_M1 <= mz_M + 0.999388 + deiso_ppm)==TRUE)) {
if(((into_M1 >= q_thresh)==TRUE)&((into_M >= q_thresh)==TRUE)&((est_nS33 <= Smax)==TRUE)) {
isopeaklist[q,"is_monoiso"] = "33S"
}
}
## Halogens
#### Bromine (unlock code if necessary)
# if (((mz_M + 1.997952 - deiso_ppm <= mz_M1)==TRUE)&((mz_M1 <= mz_M + 1.997952 + deiso_ppm)==TRUE)) {
# if(((into_M1 >= q_thresh)==TRUE)&((into_M >= q_thresh)==TRUE)&((est_nBr <= Brmax)==TRUE)) {
# isopeaklist[q,"is_monoiso"] = "81Br"
# isopeaklist[p,"estim_nBr"] = round(est_nBr)
# }
# }
### Chlorine
if (((mz_M + 1.997050 - deiso_ppm <= mz_M1)==TRUE)&((mz_M1 <= mz_M + 1.997050 + deiso_ppm)==TRUE)) {
if(((into_M1 >= q_thresh)==TRUE)&((into_M >= q_thresh)==TRUE)&((est_nCl <= Clmax)==TRUE)) {
isopeaklist[q,"is_monoiso"] = "37Cl"
isopeaklist[p,"estim_nCl"] = round(est_nCl)
}
}}}
print("Calculated isotopic contributions!")
for(x in (1:nrow(isopeaklist))){
if(is.na(isopeaklist[x,"is_monoiso"]==TRUE)==TRUE){isopeaklist[x,"is_monoiso"]=1}
}
## Creates a CSV export file (unlock to print peaklist w/ identified isotopic clusters)
# CSVfile <- paste(substr(input_files[[i]],1,nchar(input_files[[i]])-6), "_isopeaklist.CSV",sep ="")
# write.csv(isopeaklist, file = CSVfile)
## Fourth step : Deisotoping (i.e. 'is_monoiso' stays NA)
deisopeaklist <- subset(isopeaklist, isopeaklist[,"is_monoiso"] == 1)
# Apply a lock-mass calibration on the resulting peak list w/ higher accuracy than constructor software
# for Bruker maXis (lock on palmitic acid C16H32O2)
## Instrument
if(grepl('Quadrupole TOF',file[[1]]$metaData$msInstrument$msMassAnalyzer)==TRUE){
##Polarity
if(grepl('+',file[[1]]$metaData$polarity)==TRUE){
lock.ref=257.247507
}
if(grepl('-',file[[1]]$metaData$polarity)==TRUE){
lock.ref=255.232955
}
a<-which(abs(as.numeric(deisopeaklist[,"mz"])-lock.ref)==min(abs(as.numeric(deisopeaklist[,"mz"])-lock.ref)))
lock.mes<-as.numeric(deisopeaklist[a,"mz"])
lock.into<-as.numeric(deisopeaklist[a,"into"])
c <- lock.ref/lock.mes
for(z in 1:nrow(deisopeaklist)){
mz.mes <- as.numeric(deisopeaklist[z,"mz"])
mz.into <- as.numeric(deisopeaklist[z,"into"])
deisopeaklist[z,"mz"] = c*mz.mes
}
}
## Creates a CSV export file
CSVfile <- paste(substr(input_files[[i]],1,nchar(input_files[[i]])-6), "_deisopeaklist.CSV",sep ="")
write.csv(deisopeaklist, file = CSVfile)
}
| /1- IsoCalc.r | no_license | siollivier/directacquisitionproject | R | false | false | 10,268 | r | library("xcms")
library("msdata")
library("MSnbase")
library("CAMERA")
library("readMzXmlData")
# Set directory
setwd("C:/Users/Simon Ollivier/Desktop/working directory")
input_dir <- 'C:/Users/Simon Ollivier/Desktop/working directory'
input_files <- list.files(input_dir, pattern = "[.]mzXML")
# Detection parameters
for (i in 1:length(input_files)){
file <- readMzXmlFile(input_files[[i]],removeMetaData = FALSE)
# For Thermo F. Q-Exactive
if(grepl('FTMS',file[[1]]$metaData$msInstrument$msMassAnalyzer)==TRUE){
p <- MassifquantParam(ppm = 15, peakwidth = c(15, 150), snthresh = 2,
prefilter = c(3, 100), mzCenterFun = "wMean", integrate = 1L,
mzdiff = -0.001, fitgauss = FALSE, noise = 0,
verboseColumns = FALSE, criticalValue = 1.125,
consecMissedLimit = 50, unions = 1, checkBack = 0,
withWave = FALSE)
}
## For Bruker MaXis 4G
if(grepl('Quadrupole TOF',file[[1]]$metaData$msInstrument$msMassAnalyzer)==TRUE){
p <- MassifquantParam(ppm = 15, peakwidth = c(15, 150), snthresh = 2,
prefilter = c(5, 2000), mzCenterFun = "wMean", integrate = 1L,
mzdiff = -0.001, fitgauss = FALSE, noise = 0,
verboseColumns = FALSE, criticalValue = 1.125,
consecMissedLimit = 50, unions = 1, checkBack = 0,
withWave = FALSE)
}
register(SerialParam())
# Generates Peak Lists
monfichier <- readMSData(input_files[[i]], msLevel. = 1, mode = "onDisk")
monfichier2 <- findChromPeaks(monfichier, param = p, return.type = "xcmsSet")
monfichier3 <- xsAnnotate(monfichier2)
#monfichier4 <- groupFWHM(monfichier3, perfwhm = 100) ## Chromatographic parameter
monfichier5 <- findIsotopes (monfichier3, mzabs = 0.01)
#monfichier6 <- groupCorr(monfichier5, cor_eic_th = 0.75) ## Chromatographic parameter
#monfichier7 <- findAdducts(monfichier6, polarity="positive") ## Only considers ESI ## Only considers LC data whereas DIMS includes more [M+Na]+
peaklist <- getPeaklist(monfichier5)
# Creates a deisotoped peak list with isotope cluster analysis
## First step : Generating an appropriate matrix
basenames <- c("is_monoiso", "estim_nC", "estim_nN", "estim_nO", "estim_nS", "estim_nCl", "estim_nBr")
isopeaklist_gen <- matrix(ncol=length(basenames))
colnames(isopeaklist_gen) <- c(basenames)
## Second step : Pasting the data from the original peaklist in our matrix
data.matrix(peaklist)
mat.list <- list(peaklist,isopeaklist_gen)
df.list <- lapply(mat.list, as.data.frame)
cat.df <- function(d1,d2) {d1[names(d2)] <- d2; d1}
as_one.df <- Reduce(cat.df, df.list)
isopeaklist_mat <- data.matrix(as_one.df)
isopeaklist <- isopeaklist_mat[do.call("order", as.data.frame(isopeaklist_mat)),]
## Third step : Isotope clustering
print("Calculating isotopic contributions...")
#Setting polarity for electron mass correction
if(grepl('+',file[[1]]$metaData$polarity)==TRUE){
correction <- +0.000549
}
if(grepl('-',file[[1]]$metaData$polarity)==TRUE){
correction <- -0.000549
}
#Setting parameters
for (p in (1:nrow(isopeaklist))){
for (q in (1:nrow(isopeaklist))){
mz_M <- as.numeric(isopeaklist[p,"mz"])
mz_M1 <- as.numeric(isopeaklist[q,"mz"])
into_M <- as.numeric(isopeaklist[p,"into"])
into_M1 <- as.numeric(isopeaklist[q,"into"])
# Set ppm cut-off
ppm_value <- 3
deiso_ppm <- 1e-6 * mz_M * ppm_value
# Set maximum intensity for isotope contribution depanding on m/z (see Table)
## Correction of electron mass
target <- mz_M+correction
## Setting upper limit for the estimated number of elements depending on m/z
if((target <= 200)==TRUE){
Cmax <- 15
Nmax <- 8
Omax <- 7
Smax <- 6
Clmax <- 4
Brmax <- 2
}
if(((target>200)==TRUE)&((target<=400)==TRUE)){
Cmax <- 30
Nmax <- 10
Omax <- 14
Smax <- 12
Clmax <- 7
Brmax <- 4
}
if(((target>400)==TRUE)&((target<=600)==TRUE)){
Cmax <- 42
Nmax <- 13
Omax <- 21
Smax <- 12
Clmax <- 8
Brmax <- 6
}
if(((target>600)==TRUE)&((target<=800)==TRUE)){
Cmax <- 56
Nmax <- 16
Omax <- 25
Smax <- 20
Clmax <- 10
Brmax <- 8
}
if(((target>800)==TRUE)&((target<=1000)==TRUE)){
Cmax <- 66
Nmax <- 25
Omax <- 37
Smax <- 20
Clmax <- 11
Brmax <- 8
}
if(((target>1000)==TRUE)&((target<=1500)==TRUE)){
Cmax <- 100
Nmax <- 26
Omax <- 44
Smax <- 20
Clmax <- 11
Brmax <- 8
}
## Implementing isotopic abundance
percent_M1 <- (into_M1/into_M)*100
est_nC <- percent_M1/1.10800
est_nO <- percent_M1/0.20004
est_nN <- percent_M1/0.36630
est_nS33 <- percent_M1/0.74869
est_nS34 <- percent_M1/4.19599
est_nCl <- percent_M1/24.23530
est_nBr <- percent_M1/49.31400
# Set relative intensity threshold for research of isotopic cluster
bp_into <- max(as.numeric(isopeaklist[p,"into"]))
q_thresh <- 0.01/100*bp_into
## If peak intensity is over detection limit assign value > detected max
if(is.na(into_M)==TRUE){isopeaklist[p,"into"]=1e10}
if(is.na(into_M1)==TRUE){isopeaklist[q,"into"]=1e10}
# Isotope counter
########################################################################################################################
# WARNING: DO NOT ATTEMPT TO IMPLEMENT AN 'IFELSE' LOOP!!! THE COMPARISON WITH THE OTHER PEAKS OF THE MATRIX WOULD
# RETURN AN ESTIMATED VALUE OF 0!!
########################################################################################################################
## Carbon
if (((mz_M + 1.003355 - deiso_ppm <= mz_M1)==TRUE)&((mz_M1 <= mz_M + 1.003355 + deiso_ppm)==TRUE)) {
if(((into_M1 >= q_thresh)==TRUE)&((into_M >= q_thresh)==TRUE)&((est_nC <= Cmax)==TRUE)) {
isopeaklist[q,"is_monoiso"] = "13C"
isopeaklist[p,"estim_nC"] = round(est_nC)
}
}
## Oxygen
if (((mz_M + 2.004245 - deiso_ppm <= mz_M1)==TRUE)&((mz_M1 <= mz_M + 2.004245 + deiso_ppm)==TRUE)) {
if(((into_M1 >= q_thresh)==TRUE)&((into_M >= q_thresh)==TRUE)&((est_nO <= Omax)==TRUE)) {
isopeaklist[q,"is_monoiso"] = "18O"
isopeaklist[p,"estim_nO"] = round(est_nO)
}
}
## Nitrogen
if (((mz_M + 0.997035 - deiso_ppm <= mz_M1)==TRUE)&((mz_M1 <= mz_M + 0.997035 + deiso_ppm)==TRUE)) {
if(((into_M1 >= q_thresh)==TRUE)&((into_M >= q_thresh)==TRUE)&((est_nN <= Nmax)==TRUE)) {
isopeaklist[q,"is_monoiso"] = "15N"
isopeaklist[p,"estim_nN"] = round(est_nN)
}
}
## Sulphur
### 34S
if (((mz_M + 1.995796 - deiso_ppm <= mz_M1)==TRUE)&((mz_M1 <= mz_M + 1.995796 + deiso_ppm)==TRUE)) {
if(((into_M1 >= q_thresh)==TRUE)&((into_M >= q_thresh)==TRUE)&((est_nS34 <= Smax)==TRUE)) {
isopeaklist[q,"is_monoiso"] = "34S"
isopeaklist[p,"estim_nS"] = round(est_nS34)
}
}
### 33S (might be detected as well if the number of S atoms is high, but is least adapted for calculation of estim_nS)
if (((mz_M + 0.999388 - deiso_ppm <= mz_M1)==TRUE)&((mz_M1 <= mz_M + 0.999388 + deiso_ppm)==TRUE)) {
if(((into_M1 >= q_thresh)==TRUE)&((into_M >= q_thresh)==TRUE)&((est_nS33 <= Smax)==TRUE)) {
isopeaklist[q,"is_monoiso"] = "33S"
}
}
## Halogens
#### Bromine (unlock code if necessary)
# if (((mz_M + 1.997952 - deiso_ppm <= mz_M1)==TRUE)&((mz_M1 <= mz_M + 1.997952 + deiso_ppm)==TRUE)) {
# if(((into_M1 >= q_thresh)==TRUE)&((into_M >= q_thresh)==TRUE)&((est_nBr <= Brmax)==TRUE)) {
# isopeaklist[q,"is_monoiso"] = "81Br"
# isopeaklist[p,"estim_nBr"] = round(est_nBr)
# }
# }
### Chlorine
if (((mz_M + 1.997050 - deiso_ppm <= mz_M1)==TRUE)&((mz_M1 <= mz_M + 1.997050 + deiso_ppm)==TRUE)) {
if(((into_M1 >= q_thresh)==TRUE)&((into_M >= q_thresh)==TRUE)&((est_nCl <= Clmax)==TRUE)) {
isopeaklist[q,"is_monoiso"] = "37Cl"
isopeaklist[p,"estim_nCl"] = round(est_nCl)
}
}}}
print("Calculated isotopic contributions!")
for(x in (1:nrow(isopeaklist))){
if(is.na(isopeaklist[x,"is_monoiso"]==TRUE)==TRUE){isopeaklist[x,"is_monoiso"]=1}
}
## Creates a CSV export file (unlock to print peaklist w/ identified isotopic clusters)
# CSVfile <- paste(substr(input_files[[i]],1,nchar(input_files[[i]])-6), "_isopeaklist.CSV",sep ="")
# write.csv(isopeaklist, file = CSVfile)
## Fourth step : Deisotoping (i.e. 'is_monoiso' stays NA)
deisopeaklist <- subset(isopeaklist, isopeaklist[,"is_monoiso"] == 1)
# Apply a lock-mass calibration on the resulting peak list w/ higher accuracy than constructor software
# for Bruker maXis (lock on palmitic acid C16H32O2)
## Instrument
if(grepl('Quadrupole TOF',file[[1]]$metaData$msInstrument$msMassAnalyzer)==TRUE){
##Polarity
if(grepl('+',file[[1]]$metaData$polarity)==TRUE){
lock.ref=257.247507
}
if(grepl('-',file[[1]]$metaData$polarity)==TRUE){
lock.ref=255.232955
}
a<-which(abs(as.numeric(deisopeaklist[,"mz"])-lock.ref)==min(abs(as.numeric(deisopeaklist[,"mz"])-lock.ref)))
lock.mes<-as.numeric(deisopeaklist[a,"mz"])
lock.into<-as.numeric(deisopeaklist[a,"into"])
c <- lock.ref/lock.mes
for(z in 1:nrow(deisopeaklist)){
mz.mes <- as.numeric(deisopeaklist[z,"mz"])
mz.into <- as.numeric(deisopeaklist[z,"into"])
deisopeaklist[z,"mz"] = c*mz.mes
}
}
## Creates a CSV export file
CSVfile <- paste(substr(input_files[[i]],1,nchar(input_files[[i]])-6), "_deisopeaklist.CSV",sep ="")
write.csv(deisopeaklist, file = CSVfile)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{icd9cm_intent_mech_regex}
\alias{icd9cm_intent_mech_regex}
\title{icd9 intent and mechanism.}
\format{Data frame}
\usage{
icd9cm_intent_mech_regex
}
\description{
Dataset of 84 rows and 4 variables.
}
\examples{
}
\keyword{reference}
| /man/icd9cm_intent_mech_regex.Rd | no_license | injuryepi/injuryepi | R | false | true | 358 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{icd9cm_intent_mech_regex}
\alias{icd9cm_intent_mech_regex}
\title{icd9 intent and mechanism.}
\format{Data frame}
\usage{
icd9cm_intent_mech_regex
}
\description{
Dataset of 84 rows and 4 variables.
}
\examples{
}
\keyword{reference}
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
files <- paste(directory, "/", list.files(path = directory), sep = "")
df <- bind_rows(lapply(files, read_csv))
return(colMeans(df[df$ID %in% id, pollutant], na.rm = TRUE))
} | /pollutantmean.R | no_license | jeffc00/datasciencecoursera | R | false | false | 246 | r | pollutantmean <- function(directory, pollutant, id = 1:332) {
files <- paste(directory, "/", list.files(path = directory), sep = "")
df <- bind_rows(lapply(files, read_csv))
return(colMeans(df[df$ID %in% id, pollutant], na.rm = TRUE))
} |
%
% Copyright 2007-2020 by the individuals mentioned in the source code history
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
\name{MxData-class}
\alias{MxData-class}
\alias{MxData}
\alias{$,MxData-method}
\alias{$<-,MxData-method}
\alias{MxNonNullData-class}
\alias{print,MxNonNullData-method}
\alias{show,MxNonNullData-method}
\title{MxData Class}
\description{
MxData is an S4 class. An MxData object is a \link[=Named-entity]{named entity}.
New instances of this class can be created using the function \link{mxData}.
MxData is an S4 class union. An MxData object is either \link{NULL} or a
MxNonNullData object.
}
\details{
The MxNonNullData class has the following slots:
\tabular{rcl}{
\tab \tab \cr
name \tab - \tab The name of the object \cr
observed \tab - \tab Either a matrix or a data frame \cr
vector \tab - \tab A vector for means, or NA if missing \cr
type \tab - \tab Either 'raw', 'cov', or 'cor' \cr
numObs \tab - \tab The number of observations \cr
}
The 'name' slot is the name of the MxData object. %Use of MxData objects in other functions in the \link{OpenMx} library may require reference by name.
The \sQuote{observed} slot is used to contain data, either as a matrix or as a data frame. Use of the data in this slot by other functions depends on the value of the 'type' slot. When 'type' is equal to 'cov' or 'cor', the data input into the 'matrix' slot should be a symmetric matrix or data frame.
The 'vector' slot is used to contain a vector of numeric values, which is used as a vector of means for MxData objects with 'type' equal to 'cov' or 'cor'. This slot may be used in estimation using the \link{mxFitFunctionML} function.
The 'type' slot may take one of four supported values:
\describe{
\item{raw}{The contents of the \sQuote{observed} slot are treated as raw data. Missing values are permitted and must be designated as the system missing value. The 'vector' and 'numObs' slots cannot be specified, as the 'vector' argument is not relevant and the 'numObs' argument is automatically populated with the number of rows in the data. Data of this type may use the \link{mxFitFunctionML} function as its fit function in MxModel objects, which can deal with covariance estimation under full-information maximum likelihood.}
\item{cov}{The contents of the \sQuote{observed} slot are treated as a covariance matrix. The 'vector' argument is not required, but may be included for estimations involving means. The 'numObs' slot is required. Data of this type may use fit functions such as the \link{mxFitFunctionML}, depending on the specified model.}
\item{cor}{The contents of the \sQuote{observed} slot are treated as a correlation matrix. The 'vector' argument is not required, but may be included for estimations involving means. The 'numObs' slot is required. Data of this type may use fit functions such as the \link{mxFitFunctionML}, depending on the specified model.}
}
The 'numObs' slot describes the number of observations in the data. If 'type' equals 'raw', then 'numObs' is automatically populated as the number of rows in the matrix or data frame in the \sQuote{observed} slot. If 'type' equals 'cov' or 'cor', then this slot must be input using the 'numObs' argument in the \link{mxData} function when the MxData argument is created.
MxData objects may not be included in \link{MxAlgebra} objects or use the \link{mxFitFunctionAlgebra} function. If these capabilities are desired, data should be appropriately input or transformed using the \link{mxMatrix} and \link{mxAlgebra} functions.
While column names are stored in the \sQuote{observed} slot of MxData objects, these names are not recognized as variable names in \link[=MxPath-class]{MxPath} objects. Variable names must be specified using the 'manifestVars' argument of the \link{mxModel} function prior to use in \link[=MxPath-class]{MxPath} objects.
The mxData function does not currently place restrictions on the size, shape, or symmetry of matrices input into the \sQuote{observed} argument. While it is possible to specify MxData objects as covariance or correlation matrices that do not have the properties commonly associated with these matrices, failure to correctly specify these matrices will likely lead to problems in model estimation.
}
\references{
The OpenMx User's guide can be found at \url{https://openmx.ssri.psu.edu/documentation/}.
}
\seealso{
\link{mxData} for creating MxData objects, \link{matrix} and \link{data.frame} for objects which may be entered as arguments in the 'matrix' slot. More information about the OpenMx package may be found \link[=OpenMx]{here}.
}
| /man/MxData-class.Rd | no_license | mirkoruks/OpenMx | R | false | false | 5,183 | rd | %
% Copyright 2007-2020 by the individuals mentioned in the source code history
%
% Licensed under the Apache License, Version 2.0 (the "License");
% you may not use this file except in compliance with the License.
% You may obtain a copy of the License at
%
% http://www.apache.org/licenses/LICENSE-2.0
%
% Unless required by applicable law or agreed to in writing, software
% distributed under the License is distributed on an "AS IS" BASIS,
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
% See the License for the specific language governing permissions and
% limitations under the License.
\name{MxData-class}
\alias{MxData-class}
\alias{MxData}
\alias{$,MxData-method}
\alias{$<-,MxData-method}
\alias{MxNonNullData-class}
\alias{print,MxNonNullData-method}
\alias{show,MxNonNullData-method}
\title{MxData Class}
\description{
MxData is an S4 class. An MxData object is a \link[=Named-entity]{named entity}.
New instances of this class can be created using the function \link{mxData}.
MxData is an S4 class union. An MxData object is either \link{NULL} or a
MxNonNullData object.
}
\details{
The MxNonNullData class has the following slots:
\tabular{rcl}{
\tab \tab \cr
name \tab - \tab The name of the object \cr
observed \tab - \tab Either a matrix or a data frame \cr
vector \tab - \tab A vector for means, or NA if missing \cr
type \tab - \tab Either 'raw', 'cov', or 'cor' \cr
numObs \tab - \tab The number of observations \cr
}
The 'name' slot is the name of the MxData object. %Use of MxData objects in other functions in the \link{OpenMx} library may require reference by name.
The \sQuote{observed} slot is used to contain data, either as a matrix or as a data frame. Use of the data in this slot by other functions depends on the value of the 'type' slot. When 'type' is equal to 'cov' or 'cor', the data input into the 'matrix' slot should be a symmetric matrix or data frame.
The 'vector' slot is used to contain a vector of numeric values, which is used as a vector of means for MxData objects with 'type' equal to 'cov' or 'cor'. This slot may be used in estimation using the \link{mxFitFunctionML} function.
The 'type' slot may take one of four supported values:
\describe{
\item{raw}{The contents of the \sQuote{observed} slot are treated as raw data. Missing values are permitted and must be designated as the system missing value. The 'vector' and 'numObs' slots cannot be specified, as the 'vector' argument is not relevant and the 'numObs' argument is automatically populated with the number of rows in the data. Data of this type may use the \link{mxFitFunctionML} function as its fit function in MxModel objects, which can deal with covariance estimation under full-information maximum likelihood.}
\item{cov}{The contents of the \sQuote{observed} slot are treated as a covariance matrix. The 'vector' argument is not required, but may be included for estimations involving means. The 'numObs' slot is required. Data of this type may use fit functions such as the \link{mxFitFunctionML}, depending on the specified model.}
\item{cor}{The contents of the \sQuote{observed} slot are treated as a correlation matrix. The 'vector' argument is not required, but may be included for estimations involving means. The 'numObs' slot is required. Data of this type may use fit functions such as the \link{mxFitFunctionML}, depending on the specified model.}
}
The 'numObs' slot describes the number of observations in the data. If 'type' equals 'raw', then 'numObs' is automatically populated as the number of rows in the matrix or data frame in the \sQuote{observed} slot. If 'type' equals 'cov' or 'cor', then this slot must be input using the 'numObs' argument in the \link{mxData} function when the MxData argument is created.
MxData objects may not be included in \link{MxAlgebra} objects or use the \link{mxFitFunctionAlgebra} function. If these capabilities are desired, data should be appropriately input or transformed using the \link{mxMatrix} and \link{mxAlgebra} functions.
While column names are stored in the \sQuote{observed} slot of MxData objects, these names are not recognized as variable names in \link[=MxPath-class]{MxPath} objects. Variable names must be specified using the 'manifestVars' argument of the \link{mxModel} function prior to use in \link[=MxPath-class]{MxPath} objects.
The mxData function does not currently place restrictions on the size, shape, or symmetry of matrices input into the \sQuote{observed} argument. While it is possible to specify MxData objects as covariance or correlation matrices that do not have the properties commonly associated with these matrices, failure to correctly specify these matrices will likely lead to problems in model estimation.
}
\references{
The OpenMx User's guide can be found at \url{https://openmx.ssri.psu.edu/documentation/}.
}
\seealso{
\link{mxData} for creating MxData objects, \link{matrix} and \link{data.frame} for objects which may be entered as arguments in the 'matrix' slot. More information about the OpenMx package may be found \link[=OpenMx]{here}.
}
|
# get the number of parameters of the bayesian network.
nparams = function(x, data, effective = FALSE, debug = FALSE) {
# check x's class.
check.bn.or.fit(x)
# check debug and effective.
check.logical(debug)
check.logical(effective)
if (is(x, "bn")) {
# check the data are there.
check.data(data)
# check the network against the data.
check.bn.vs.data(x, data)
# the number of parameters is unknown for partially directed graphs.
if (is.pdag(x$arcs, names(x$nodes)))
stop("the graph is only partially directed.")
if (effective) {
# fit the network to compute the number of non-zero parameters.
x = bn.fit(x, data)
return(nparams.fitted(x, effective = TRUE, debug = debug))
}#THEN
else {
return(nparams.backend(x, data = data, debug = debug))
}#ELSE
}#THEN
else {
if (!missing(data))
warning("unused argument data.")
return(nparams.fitted(x, effective = effective, debug = debug))
}#ELSE
}#NPARAMS
# get the number of tests/scores used in structure learning.
ntests = function(x) {
# check x's class.
check.bn(x)
x$learning$ntests
}#NTESTS
# structural hamming distance.
shd = function(learned, true, wlbl = FALSE, debug = FALSE) {
# check x's class.
check.bn(learned)
check.bn(true)
# check debug and wlbl.
check.logical(debug)
check.logical(wlbl)
# the two networks must have the same node set.
match.bn(learned, true)
structural.hamming.distance(learned = learned, true = true, wlbl = wlbl,
debug = debug)
}#SHD
# Hamming distance.
hamming = function(learned, true, debug = FALSE) {
# check learned's and true's class.
check.bn(learned)
check.bn(true)
# check debug.
check.logical(debug)
# the two networks must have the same node set.
match.bn(learned, true)
hamming.distance(learned = learned, true = true, debug = debug)
}#HAMMING
# get the whitelist used by the learning algorithm.
whitelist = function(x) {
# check x's class.
check.bn(x)
if (is.null(x$learning$whitelist))
return(matrix(character(0), nrow = 0, ncol = 2,
dimnames = list(NULL, c("from", "to"))))
else
return(x$learning$whitelist)
}#WHITELIST
# get the blacklist used by the learning algorithm.
blacklist = function(x) {
# check x's class.
check.bn(x)
if (is.null(x$learning$blacklist))
return(matrix(character(0), nrow = 0, ncol = 2,
dimnames = list(NULL, c("from", "to"))))
else
return(x$learning$blacklist)
}#BLACKLIST
# reconstruct the equivalence class of a network.
cpdag = function(x, moral = TRUE, wlbl = FALSE, debug = FALSE) {
# check x's class.
check.bn(x)
# check moral, debug and wlbl.
check.logical(moral)
check.logical(debug)
check.logical(wlbl)
# check whether the graph is acyclic, to be sure to return a DAG.
if (!is.acyclic(x$arcs, names(x$nodes), directed = TRUE))
stop("the specified network contains cycles.")
cpdag.backend(x = x, moral = moral, fix = FALSE, wlbl = wlbl, debug = debug)
}#CPDAG
# contruct a consistent DAG extension of a PDAG.
cextend = function(x, strict = TRUE, debug = FALSE) {
# check x's class.
check.bn(x)
# check debug.
check.logical(debug)
# check strict.
check.logical(strict)
# check whether the graph is acclic, to be sure to return a DAG.
if (!is.acyclic(x$arcs, names(x$nodes), directed = TRUE))
stop("the specified network contains cycles.")
cpdag = cpdag.extension(x = x, debug = debug)
# if the graph is not completely directed, the extension was not successful.
if (is.pdag(cpdag$arcs, names(cpdag$nodes)))
if (strict)
stop("no consistent extension of ", deparse(substitute(x)), " is possible.")
else
warning("no consistent extension of ", deparse(substitute(x)), " is possible.")
return(cpdag)
}#CEXTEND
# report v-structures in the network.
vstructs = function(x, arcs = FALSE, moral = TRUE, debug = FALSE) {
# check x's class.
check.bn(x)
# check debug, moral and arcs.
check.logical(arcs)
check.logical(moral)
check.logical(debug)
vstructures(x = x, arcs = arcs, moral = moral, debug = debug)
}#VSTRUCTS
# reconstruct the equivalence class of a network.
moral = function(x, debug = FALSE) {
# check x's class.
check.bn(x)
# check debug.
check.logical(debug)
dag2ug.backend(x = x, moral = TRUE, debug = debug)
}#MORAL
# mutilated network used in likelihood weighting.
mutilated = function(x, evidence) {
# check x's class.
check.bn.or.fit(x)
# check the evidence.
evidence = check.mutilated.evidence(evidence, graph = x)
if (is(x, "bn"))
return(mutilated.backend.bn(x, evidence))
else
return(mutilated.backend.fitted(x, evidence))
}#MUTILATED
# test d-separation.
dsep = function(bn, x, y, z) {
# check x's class.
check.bn.or.fit(bn)
# check the sets of nodes.
check.nodes(x, graph = bn, max.nodes = 1)
check.nodes(y, graph = bn, max.nodes = 1)
if (missing(z))
z = c()
else
check.nodes(z, graph = bn, min.nodes = 0)
# go back to the network structure if needed.
if (is(bn, "bn.fit"))
bn = bn.net(bn)
# if the graph is not directed, take it as a CPDAG and extend it.
if (!is.dag(bn$arcs, names(bn$nodes)))
bn = cpdag.extension(bn)
dseparation(bn = bn, x = x, y = y, z = z)
}#DSEP
| /R/frontend-bn.R | no_license | josealberto-arcos-sanchez/bnlearn | R | false | false | 5,316 | r |
# get the number of parameters of the bayesian network.
nparams = function(x, data, effective = FALSE, debug = FALSE) {
# check x's class.
check.bn.or.fit(x)
# check debug and effective.
check.logical(debug)
check.logical(effective)
if (is(x, "bn")) {
# check the data are there.
check.data(data)
# check the network against the data.
check.bn.vs.data(x, data)
# the number of parameters is unknown for partially directed graphs.
if (is.pdag(x$arcs, names(x$nodes)))
stop("the graph is only partially directed.")
if (effective) {
# fit the network to compute the number of non-zero parameters.
x = bn.fit(x, data)
return(nparams.fitted(x, effective = TRUE, debug = debug))
}#THEN
else {
return(nparams.backend(x, data = data, debug = debug))
}#ELSE
}#THEN
else {
if (!missing(data))
warning("unused argument data.")
return(nparams.fitted(x, effective = effective, debug = debug))
}#ELSE
}#NPARAMS
# get the number of tests/scores used in structure learning.
ntests = function(x) {
# check x's class.
check.bn(x)
x$learning$ntests
}#NTESTS
# structural hamming distance.
shd = function(learned, true, wlbl = FALSE, debug = FALSE) {
# check x's class.
check.bn(learned)
check.bn(true)
# check debug and wlbl.
check.logical(debug)
check.logical(wlbl)
# the two networks must have the same node set.
match.bn(learned, true)
structural.hamming.distance(learned = learned, true = true, wlbl = wlbl,
debug = debug)
}#SHD
# Hamming distance.
hamming = function(learned, true, debug = FALSE) {
# check learned's and true's class.
check.bn(learned)
check.bn(true)
# check debug.
check.logical(debug)
# the two networks must have the same node set.
match.bn(learned, true)
hamming.distance(learned = learned, true = true, debug = debug)
}#HAMMING
# get the whitelist used by the learning algorithm.
whitelist = function(x) {
# check x's class.
check.bn(x)
if (is.null(x$learning$whitelist))
return(matrix(character(0), nrow = 0, ncol = 2,
dimnames = list(NULL, c("from", "to"))))
else
return(x$learning$whitelist)
}#WHITELIST
# get the blacklist used by the learning algorithm.
blacklist = function(x) {
# check x's class.
check.bn(x)
if (is.null(x$learning$blacklist))
return(matrix(character(0), nrow = 0, ncol = 2,
dimnames = list(NULL, c("from", "to"))))
else
return(x$learning$blacklist)
}#BLACKLIST
# reconstruct the equivalence class of a network.
cpdag = function(x, moral = TRUE, wlbl = FALSE, debug = FALSE) {
# check x's class.
check.bn(x)
# check moral, debug and wlbl.
check.logical(moral)
check.logical(debug)
check.logical(wlbl)
# check whether the graph is acyclic, to be sure to return a DAG.
if (!is.acyclic(x$arcs, names(x$nodes), directed = TRUE))
stop("the specified network contains cycles.")
cpdag.backend(x = x, moral = moral, fix = FALSE, wlbl = wlbl, debug = debug)
}#CPDAG
# contruct a consistent DAG extension of a PDAG.
cextend = function(x, strict = TRUE, debug = FALSE) {
# check x's class.
check.bn(x)
# check debug.
check.logical(debug)
# check strict.
check.logical(strict)
# check whether the graph is acclic, to be sure to return a DAG.
if (!is.acyclic(x$arcs, names(x$nodes), directed = TRUE))
stop("the specified network contains cycles.")
cpdag = cpdag.extension(x = x, debug = debug)
# if the graph is not completely directed, the extension was not successful.
if (is.pdag(cpdag$arcs, names(cpdag$nodes)))
if (strict)
stop("no consistent extension of ", deparse(substitute(x)), " is possible.")
else
warning("no consistent extension of ", deparse(substitute(x)), " is possible.")
return(cpdag)
}#CEXTEND
# report v-structures in the network.
vstructs = function(x, arcs = FALSE, moral = TRUE, debug = FALSE) {
# check x's class.
check.bn(x)
# check debug, moral and arcs.
check.logical(arcs)
check.logical(moral)
check.logical(debug)
vstructures(x = x, arcs = arcs, moral = moral, debug = debug)
}#VSTRUCTS
# reconstruct the equivalence class of a network.
moral = function(x, debug = FALSE) {
# check x's class.
check.bn(x)
# check debug.
check.logical(debug)
dag2ug.backend(x = x, moral = TRUE, debug = debug)
}#MORAL
# mutilated network used in likelihood weighting.
mutilated = function(x, evidence) {
# check x's class.
check.bn.or.fit(x)
# check the evidence.
evidence = check.mutilated.evidence(evidence, graph = x)
if (is(x, "bn"))
return(mutilated.backend.bn(x, evidence))
else
return(mutilated.backend.fitted(x, evidence))
}#MUTILATED
# test d-separation.
dsep = function(bn, x, y, z) {
# check x's class.
check.bn.or.fit(bn)
# check the sets of nodes.
check.nodes(x, graph = bn, max.nodes = 1)
check.nodes(y, graph = bn, max.nodes = 1)
if (missing(z))
z = c()
else
check.nodes(z, graph = bn, min.nodes = 0)
# go back to the network structure if needed.
if (is(bn, "bn.fit"))
bn = bn.net(bn)
# if the graph is not directed, take it as a CPDAG and extend it.
if (!is.dag(bn$arcs, names(bn$nodes)))
bn = cpdag.extension(bn)
dseparation(bn = bn, x = x, y = y, z = z)
}#DSEP
|
#' dcem_star_train: Part of DCEM package.
#'
#' Implements the improved EM* ([1], [2]) algorithm. EM* avoids revisiting all but high
#' expressive data via structure based data segregation thus resulting in significant speed gain.
#' It calls the \code{\link{dcem_star_cluster_uv}} routine internally (univariate data) and
#' \code{\link{dcem_star_cluster_mv}} for (multivariate data).
#'
#' @param data (dataframe): The dataframe containing the data. See \code{\link{trim_data}} for
#' cleaning the data.
#'
#' @param iteration_count (numeric): The number of iterations for which the algorithm should run, if the
#' convergence is not achieved then the algorithm stops and exit. \strong{Default: 200}.
#'
#' @param num_clusters (numeric): The number of clusters. Default: \strong{2}
#'
#' @param seed_meu (matrix): The user specified set of meu to use as initial centroids. Default: \strong{None}
#'
#' @param seeding (string): The initialization scheme ('rand', 'improved'). Default: \strong{rand}
#'
#' @return
#' A list of objects. This list contains parameters associated with the Gaussian(s)
#' (posterior probabilities, meu, sigma and priors). The
#' parameters can be accessed as follows where sample_out is the list containing
#' the output:
#'
#'\enumerate{
#' \item (1) Posterior Probabilities: \strong{sample_out$prob}
#' A matrix of posterior-probabilities.
#'
#' \item (2) Meu(s): \strong{sample_out$meu}
#'
#' For multivariate data: It is a matrix of meu(s). Each row in
#' the matrix corresponds to one mean.
#'
#' For univariate data: It is a vector of meu(s). Each element of the vector
#' corresponds to one meu.
#'
#' \item (3) Co-variance matrices: \strong{sample_out$sigma}
#'
#' For multivariate data: List of co-variance matrices.
#'
#' Standard-deviation: \strong{sample_out$sigma}
#'
#' For univariate data: Vector of standard deviation.
#'
#' \item (4) Priors: \strong{sample_out$prior}
#' A vector of priors.
#'
#' \item (5) Membership: \strong{sample_out$membership}: A dataframe of
#' cluster membership for data. Columns numbers are data indices and values
#' are the assigned clusters.
#' }
#'
#' @usage
#' dcem_star_train(data, iteration_count, num_clusters, seed_meu, seeding)
#'
#' @references
#' Parichit Sharma, Hasan Kurban, Mehmet Dalkilic DCEM: An R package for clustering big data via
#' data-centric modification of Expectation Maximization, SoftwareX, 17, 100944 URL
#' https://doi.org/10.1016/j.softx.2021.100944
#'
#' @examples
#'# Simulating a mixture of univariate samples from three distributions
#'# with mean as 20, 70 and 100 and standard deviation as 10, 100 and 40 respectively.
#' sample_uv_data = as.data.frame(c(rnorm(100, 20, 5), rnorm(70, 70, 1), rnorm(50, 100, 2)))
#'
#'# Randomly shuffle the samples.
#' sample_uv_data = as.data.frame(sample_uv_data[sample(nrow(sample_uv_data)),])
#'
#'# Calling the dcem_star_train() function on the simulated data with iteration count of 1000
#'# and random seeding respectively.
#' sample_uv_out = dcem_star_train(sample_uv_data, num_clusters = 3, iteration_count = 100)
#'
#'# Simulating a mixture of multivariate samples from 2 gaussian distributions.
#' sample_mv_data = as.data.frame(rbind(MASS::mvrnorm(n=2, rep(2,5), Sigma = diag(5)),
#' MASS::mvrnorm(n=5, rep(14,5), Sigma = diag(5))))
#'
#'# Calling the dcem_star_train() function on the simulated data with iteration count of 100 and
#'# random seeding method respectively.
#' sample_mv_out = dcem_star_train(sample_mv_data, iteration_count = 100, num_clusters=2)
#'
#'# Access the output
#' sample_mv_out$meu
#' sample_mv_out$sigma
#' sample_mv_out$prior
#' sample_mv_out$prob
#' print(sample_mv_out$membership)
#'
#' @export
dcem_star_train <-
function(data,
iteration_count,
num_clusters, seed_meu, seeding) {
if (missing(iteration_count)) {
iteration_count = 200
print("Using default value for iteration count = 200.")
}
else{
print(paste("Specified iterations = ", iteration_count))
}
if (missing(num_clusters)) {
num_clusters = 2
print("Using default value for number of clusters = 2.")
}
else{
print(paste("Specified number of clusters = ", num_clusters))
}
if (missing(seeding) || seeding == "rand") {
seeding = "rand"
print("Using the random initialisation scheme.")
}
else{
seeding = seeding
print("Using the improved Kmeans++ initialisation scheme.")
}
# Remove any missing data
data <- apply(data, 2, as.numeric)
data[is.na(data)] <- NULL
# Safe copy the data for operations
test_data <- as.matrix(data)
num_data <- nrow(test_data)
valid_columns <- ncol(test_data)
# Variable to store the output
emstar_out = list()
# Call clustering routine for multivariate data
# Get the initial values for meu, sigma and priors
if (valid_columns >= 2) {
if (missing(seed_meu)){
if (seeding == "rand"){
meu = meu_mv(test_data, num_clusters)
}
else if(seeding == "improved"){
meu = meu_mv_impr(test_data, num_clusters)
}
}
else{
meu <- seed_meu
}
sigma = sigma_mv(num_clusters, valid_columns)
priors = get_priors(num_clusters)
emstar_out = dcem_star_cluster_mv(
test_data,
meu,
sigma,
priors,
num_clusters,
iteration_count,
num_data)
}
# Call clustering routine for univariate data
# Get the initial values for meu, sigma and priors
if (valid_columns < 2) {
if(seeding=="rand"){
meu = meu_uv(test_data, num_clusters)
}
else if(seeding == "improved"){
meu = meu_uv_impr(test_data, num_clusters)
}
sigma = sigma_uv(test_data, num_clusters)
priors = get_priors(num_clusters)
emstar_out = dcem_star_cluster_uv(
test_data,
meu,
sigma,
priors,
num_clusters,
num_data,
iteration_count)
}
return(emstar_out)
}
| /R/dcem_star_train.R | no_license | parichit/DCEM | R | false | false | 6,204 | r | #' dcem_star_train: Part of DCEM package.
#'
#' Implements the improved EM* ([1], [2]) algorithm. EM* avoids revisiting all but high
#' expressive data via structure based data segregation thus resulting in significant speed gain.
#' It calls the \code{\link{dcem_star_cluster_uv}} routine internally (univariate data) and
#' \code{\link{dcem_star_cluster_mv}} for (multivariate data).
#'
#' @param data (dataframe): The dataframe containing the data. See \code{\link{trim_data}} for
#' cleaning the data.
#'
#' @param iteration_count (numeric): The number of iterations for which the algorithm should run, if the
#' convergence is not achieved then the algorithm stops and exit. \strong{Default: 200}.
#'
#' @param num_clusters (numeric): The number of clusters. Default: \strong{2}
#'
#' @param seed_meu (matrix): The user specified set of meu to use as initial centroids. Default: \strong{None}
#'
#' @param seeding (string): The initialization scheme ('rand', 'improved'). Default: \strong{rand}
#'
#' @return
#' A list of objects. This list contains parameters associated with the Gaussian(s)
#' (posterior probabilities, meu, sigma and priors). The
#' parameters can be accessed as follows where sample_out is the list containing
#' the output:
#'
#'\enumerate{
#' \item (1) Posterior Probabilities: \strong{sample_out$prob}
#' A matrix of posterior-probabilities.
#'
#' \item (2) Meu(s): \strong{sample_out$meu}
#'
#' For multivariate data: It is a matrix of meu(s). Each row in
#' the matrix corresponds to one mean.
#'
#' For univariate data: It is a vector of meu(s). Each element of the vector
#' corresponds to one meu.
#'
#' \item (3) Co-variance matrices: \strong{sample_out$sigma}
#'
#' For multivariate data: List of co-variance matrices.
#'
#' Standard-deviation: \strong{sample_out$sigma}
#'
#' For univariate data: Vector of standard deviation.
#'
#' \item (4) Priors: \strong{sample_out$prior}
#' A vector of priors.
#'
#' \item (5) Membership: \strong{sample_out$membership}: A dataframe of
#' cluster membership for data. Columns numbers are data indices and values
#' are the assigned clusters.
#' }
#'
#' @usage
#' dcem_star_train(data, iteration_count, num_clusters, seed_meu, seeding)
#'
#' @references
#' Parichit Sharma, Hasan Kurban, Mehmet Dalkilic DCEM: An R package for clustering big data via
#' data-centric modification of Expectation Maximization, SoftwareX, 17, 100944 URL
#' https://doi.org/10.1016/j.softx.2021.100944
#'
#' @examples
#'# Simulating a mixture of univariate samples from three distributions
#'# with mean as 20, 70 and 100 and standard deviation as 10, 100 and 40 respectively.
#' sample_uv_data = as.data.frame(c(rnorm(100, 20, 5), rnorm(70, 70, 1), rnorm(50, 100, 2)))
#'
#'# Randomly shuffle the samples.
#' sample_uv_data = as.data.frame(sample_uv_data[sample(nrow(sample_uv_data)),])
#'
#'# Calling the dcem_star_train() function on the simulated data with iteration count of 1000
#'# and random seeding respectively.
#' sample_uv_out = dcem_star_train(sample_uv_data, num_clusters = 3, iteration_count = 100)
#'
#'# Simulating a mixture of multivariate samples from 2 gaussian distributions.
#' sample_mv_data = as.data.frame(rbind(MASS::mvrnorm(n=2, rep(2,5), Sigma = diag(5)),
#' MASS::mvrnorm(n=5, rep(14,5), Sigma = diag(5))))
#'
#'# Calling the dcem_star_train() function on the simulated data with iteration count of 100 and
#'# random seeding method respectively.
#' sample_mv_out = dcem_star_train(sample_mv_data, iteration_count = 100, num_clusters=2)
#'
#'# Access the output
#' sample_mv_out$meu
#' sample_mv_out$sigma
#' sample_mv_out$prior
#' sample_mv_out$prob
#' print(sample_mv_out$membership)
#'
#' @export
dcem_star_train <-
function(data,
iteration_count,
num_clusters, seed_meu, seeding) {
if (missing(iteration_count)) {
iteration_count = 200
print("Using default value for iteration count = 200.")
}
else{
print(paste("Specified iterations = ", iteration_count))
}
if (missing(num_clusters)) {
num_clusters = 2
print("Using default value for number of clusters = 2.")
}
else{
print(paste("Specified number of clusters = ", num_clusters))
}
if (missing(seeding) || seeding == "rand") {
seeding = "rand"
print("Using the random initialisation scheme.")
}
else{
seeding = seeding
print("Using the improved Kmeans++ initialisation scheme.")
}
# Remove any missing data
data <- apply(data, 2, as.numeric)
data[is.na(data)] <- NULL
# Safe copy the data for operations
test_data <- as.matrix(data)
num_data <- nrow(test_data)
valid_columns <- ncol(test_data)
# Variable to store the output
emstar_out = list()
# Call clustering routine for multivariate data
# Get the initial values for meu, sigma and priors
if (valid_columns >= 2) {
if (missing(seed_meu)){
if (seeding == "rand"){
meu = meu_mv(test_data, num_clusters)
}
else if(seeding == "improved"){
meu = meu_mv_impr(test_data, num_clusters)
}
}
else{
meu <- seed_meu
}
sigma = sigma_mv(num_clusters, valid_columns)
priors = get_priors(num_clusters)
emstar_out = dcem_star_cluster_mv(
test_data,
meu,
sigma,
priors,
num_clusters,
iteration_count,
num_data)
}
# Call clustering routine for univariate data
# Get the initial values for meu, sigma and priors
if (valid_columns < 2) {
if(seeding=="rand"){
meu = meu_uv(test_data, num_clusters)
}
else if(seeding == "improved"){
meu = meu_uv_impr(test_data, num_clusters)
}
sigma = sigma_uv(test_data, num_clusters)
priors = get_priors(num_clusters)
emstar_out = dcem_star_cluster_uv(
test_data,
meu,
sigma,
priors,
num_clusters,
num_data,
iteration_count)
}
return(emstar_out)
}
|
testlist <- list(barrier = 0, ben = numeric(0), fee = 0, penalty = numeric(0), spot = c(0, 0, NaN, 5.87747738120058e-39, -4.20646884065946e-296, 8.88824096868403e-321, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(valuer::calc_account,testlist)
str(result) | /valuer/inst/testfiles/calc_account/libFuzzer_calc_account/calc_account_valgrind_files/1616985837-test.R | no_license | akhikolla/updatedatatype-list4 | R | false | false | 502 | r | testlist <- list(barrier = 0, ben = numeric(0), fee = 0, penalty = numeric(0), spot = c(0, 0, NaN, 5.87747738120058e-39, -4.20646884065946e-296, 8.88824096868403e-321, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(valuer::calc_account,testlist)
str(result) |
pilihPV <- function(num, a, i, t, m)
switch(num,
satu = {
tunggal = a/(1+i*t)
print(tunggal)
},
dua = {
nominal = a/((1+(i/m))^(m*t))
print(nominal)
},
tiga = {
kontinu = a/exp(i*t)
print(kontinu)
}
)
| /present value bunga.R | no_license | 16611037/Salman-Fadhilurrrohman | R | false | false | 341 | r | pilihPV <- function(num, a, i, t, m)
switch(num,
satu = {
tunggal = a/(1+i*t)
print(tunggal)
},
dua = {
nominal = a/((1+(i/m))^(m*t))
print(nominal)
},
tiga = {
kontinu = a/exp(i*t)
print(kontinu)
}
)
|
###########################################################################/**
# @set "class=AffymetrixCelFile"
# @RdocMethod createFrom
#
# @title "Creates a CEL file using another as a template"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{filename, path}{The filename and path of to the CEL
# file to be created.}
# \item{version}{The file-format version of the CEL file to be created.}
# \item{methods}{If \code{"copy"}, the new file is created as a copy of the
# template file. If \code{"create"}, the new file is created from
# scratch from the template file.}
# \item{clear}{If @TRUE, the fields of the CEL file are cleared (zeroed),
# otherwise they contain the same information as the source file.}
# \item{defValue}{A @numeric value that cleared/allocated elements have.}
# \item{...}{Not used.}
# \item{verbose}{See "R.utils::Verbose".}
# }
#
# \value{
# Returns a @see "AffymetrixCelFile" reference to the new CEL file.
# }
#
# @author "HB"
#
# \seealso{
# @seeclass
# }
#
# @keyword IO
# @keyword programming
#*/###########################################################################
setMethodS3("createFrom", "AffymetrixCelFile", function(this, filename, path=NULL, overwrite=FALSE, skip=!overwrite, version=c("4", "3"), methods=c("copy", "create"), clear=FALSE, defValue=0, ..., verbose=FALSE) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'filename' and 'path':
pathname <- Arguments$getWritablePathname(filename, path=path)
# Rename lower-case *.cel to *.CEL, if that is the case. Old versions
# of the package generated lower-case CEL files. /HB 2007-08-09
if (regexpr("[.]cel$", pathname) != -1) {
pathname <- AffymetrixFile$renameToUpperCaseExt(pathname)
}
pathname <- Arguments$getWritablePathname(pathname,
mustNotExist=(!overwrite && !skip))
# Argument 'version':
version <- match.arg(version)
# Argument 'methods':
if (!all(methods %in% c("copy", "create"))) {
throw("Unknown value of argument 'methods': ",
paste(methods, collapse=", "))
}
# Argument 'defValue':
defValue <- Arguments$getNumeric(defValue)
# Argument 'verbose':
verbose <- Arguments$getVerbose(verbose)
if (verbose) {
pushState(verbose)
on.exit(popState(verbose))
}
# Get the CDF of the template CEL file
cdf <- getCdf(this)
verbose && enter(verbose, "Creating CEL file")
verbose && cat(verbose, "Chip type: ", getChipType(cdf))
verbose && cat(verbose, "Pathname: ", pathname)
# Nothing to do?
if (skip && isFile(pathname)) {
verbose && cat(verbose, "Returning already existing file.")
res <- newInstance(this, pathname)
ver <- getHeader(res)$version
if (ver != version) {
throw("Cannot not retrieve CEL file of version ", version,
". The existing CEL file has version ", ver, ": ", pathname)
}
setCdf(res, cdf)
verbose && exit(verbose)
return(res)
}
# First create/copy to a temporary file, then rename
pathnameT <- pushTemporaryFile(pathname, verbose=verbose)
msgs <- list()
res <- NULL
for (method in methods) {
verbose && enter(verbose, "Method '", method, "'")
if (method == "copy") {
# Check version of template CEL file
ver <- getHeader(this)$version
if (ver != version) {
msgs[[method]] <- paste("Cannot create CEL file of version ", version,
" (", pathname, "). Template CEL file is of version ",
ver, ": ", getPathname(this), sep="")
verbose && cat(verbose, msgs[[method]])
verbose && exit(verbose)
next
}
# 1. Create a temporary file
res <- copyTo(this, filename=pathnameT, path=NULL, copy.mode=FALSE,
verbose=less(verbose))
# verbose && cat(verbose, "Temporary file:")
# verbose && print(verbose, res)
# 2. Update the temporary file
if (clear) {
clearData(res, ..., value=defValue, .forSure=TRUE,
verbose=less(verbose))
}
# 3. Rename the temporary file
renameTo(res, filename=pathname, verbose=less(verbose))
# Break out of the methods loop
verbose && exit(verbose)
break
}
if (method == "create") {
if (version != "4") {
msgs[[method]] <- paste(
"Can only create binary CEL files (version 4) from scratch, ",
"not files of version ", version, ": ", pathname, sep="")
verbose && cat(verbose, msgs[[method]])
verbose && exit(verbose)
next
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Setting up CEL header
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
fullname <- getFullName(this)
celHeader <- .cdfHeaderToCelHeader(getHeader(cdf), sampleName=fullname)
# Add some extra information about what the CEL file is for
params <- c(Descripion=sprintf("This CEL file contains data saved by the aroma.affymetrix v%s package.", getVersion(aroma.affymetrix)))
parameters <- gsub(" ", "_", params)
names(parameters) <- names(params)
parameters <- paste(names(parameters), parameters, sep=":")
parameters <- paste(parameters, collapse=";")
parameters <- paste(celHeader$parameters, parameters, "", sep=";")
parameters <- gsub(";;", ";", parameters)
parameters <- gsub(";$", "", parameters)
celHeader$parameters <- parameters
# Not needed anymore
params <- parameters <- NULL
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Creating empty CEL file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# 1. Create a temporary file
verbose && enter(verbose, "Creating an empty temporary CEL file")
.createCel(pathnameT, header=celHeader, overwrite=overwrite, ...,
verbose=less(verbose))
# Not needed anymore
celHeader <- NULL
verbose && exit(verbose)
# 2. Update the temporary file
if (clear) {
if (defValue != 0) {
res <- newInstance(this, pathnameT)
clearData(res, ..., value=defValue, .forSure=TRUE,
verbose=less(verbose))
# Not needed anymore
res <- NULL
}
} else {
verbose && enter(verbose, "Copying CEL data")
cells <- seq_len(nbrOfCells(this))
lapplyInChunks(cells, function(cells) {
verbose && enter(verbose, "Reading subset of data from source CEL file")
data <- .readCel(getPathname(this), indices=cells, readIntensities=TRUE, readStdvs=TRUE, readPixels=TRUE)
verbose && str(verbose, data, level=-50)
verbose && printf(verbose, "RAM: %s\n", hsize(object.size(data), digits = 2L, standard = "IEC"), level=-40)
verbose && exit(verbose)
gc <- gc()
verbose && enter(verbose, "Writing data to new CEL file")
.updateCel(pathnameT, indices=cells, intensities=data)
verbose && exit(verbose)
# Not needed anymore
data <- NULL
gc <- gc()
verbose && print(verbose, gc)
}, chunkSize=1e6, verbose=verbose)
# Not needed anymore
cells <- NULL
verbose && exit(verbose)
}
# 3. Rename the temporary file
popTemporaryFile(pathnameT, verbose=verbose)
res <- newInstance(this, pathname)
# Break out of the methods loop
verbose && exit(verbose)
break
}
verbose && exit(verbose)
} # for (method ...)
if (is.null(res)) {
msgs <- unlist(msgs)
msgs <- paste(names(msgs), msgs, sep=": ")
msgs <- paste(msgs, collapse="; ")
msg <- paste("Failed to create CEL file. Error messages: ", msgs)
verbose && cat(verbose, "Error: ", msg)
throw(msg)
}
# Make sure the CDF is carried down
setCdf(res, cdf)
verbose && print(verbose, res)
verbose && exit(verbose)
res
}, protected=TRUE)
| /R/AffymetrixCelFile.createFrom.R | no_license | HenrikBengtsson/aroma.affymetrix | R | false | false | 8,348 | r | ###########################################################################/**
# @set "class=AffymetrixCelFile"
# @RdocMethod createFrom
#
# @title "Creates a CEL file using another as a template"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{filename, path}{The filename and path of to the CEL
# file to be created.}
# \item{version}{The file-format version of the CEL file to be created.}
# \item{methods}{If \code{"copy"}, the new file is created as a copy of the
# template file. If \code{"create"}, the new file is created from
# scratch from the template file.}
# \item{clear}{If @TRUE, the fields of the CEL file are cleared (zeroed),
# otherwise they contain the same information as the source file.}
# \item{defValue}{A @numeric value that cleared/allocated elements have.}
# \item{...}{Not used.}
# \item{verbose}{See "R.utils::Verbose".}
# }
#
# \value{
# Returns a @see "AffymetrixCelFile" reference to the new CEL file.
# }
#
# @author "HB"
#
# \seealso{
# @seeclass
# }
#
# @keyword IO
# @keyword programming
#*/###########################################################################
setMethodS3("createFrom", "AffymetrixCelFile", function(this, filename, path=NULL, overwrite=FALSE, skip=!overwrite, version=c("4", "3"), methods=c("copy", "create"), clear=FALSE, defValue=0, ..., verbose=FALSE) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'filename' and 'path':
pathname <- Arguments$getWritablePathname(filename, path=path)
# Rename lower-case *.cel to *.CEL, if that is the case. Old versions
# of the package generated lower-case CEL files. /HB 2007-08-09
if (regexpr("[.]cel$", pathname) != -1) {
pathname <- AffymetrixFile$renameToUpperCaseExt(pathname)
}
pathname <- Arguments$getWritablePathname(pathname,
mustNotExist=(!overwrite && !skip))
# Argument 'version':
version <- match.arg(version)
# Argument 'methods':
if (!all(methods %in% c("copy", "create"))) {
throw("Unknown value of argument 'methods': ",
paste(methods, collapse=", "))
}
# Argument 'defValue':
defValue <- Arguments$getNumeric(defValue)
# Argument 'verbose':
verbose <- Arguments$getVerbose(verbose)
if (verbose) {
pushState(verbose)
on.exit(popState(verbose))
}
# Get the CDF of the template CEL file
cdf <- getCdf(this)
verbose && enter(verbose, "Creating CEL file")
verbose && cat(verbose, "Chip type: ", getChipType(cdf))
verbose && cat(verbose, "Pathname: ", pathname)
# Nothing to do?
if (skip && isFile(pathname)) {
verbose && cat(verbose, "Returning already existing file.")
res <- newInstance(this, pathname)
ver <- getHeader(res)$version
if (ver != version) {
throw("Cannot not retrieve CEL file of version ", version,
". The existing CEL file has version ", ver, ": ", pathname)
}
setCdf(res, cdf)
verbose && exit(verbose)
return(res)
}
# First create/copy to a temporary file, then rename
pathnameT <- pushTemporaryFile(pathname, verbose=verbose)
msgs <- list()
res <- NULL
for (method in methods) {
verbose && enter(verbose, "Method '", method, "'")
if (method == "copy") {
# Check version of template CEL file
ver <- getHeader(this)$version
if (ver != version) {
msgs[[method]] <- paste("Cannot create CEL file of version ", version,
" (", pathname, "). Template CEL file is of version ",
ver, ": ", getPathname(this), sep="")
verbose && cat(verbose, msgs[[method]])
verbose && exit(verbose)
next
}
# 1. Create a temporary file
res <- copyTo(this, filename=pathnameT, path=NULL, copy.mode=FALSE,
verbose=less(verbose))
# verbose && cat(verbose, "Temporary file:")
# verbose && print(verbose, res)
# 2. Update the temporary file
if (clear) {
clearData(res, ..., value=defValue, .forSure=TRUE,
verbose=less(verbose))
}
# 3. Rename the temporary file
renameTo(res, filename=pathname, verbose=less(verbose))
# Break out of the methods loop
verbose && exit(verbose)
break
}
if (method == "create") {
if (version != "4") {
msgs[[method]] <- paste(
"Can only create binary CEL files (version 4) from scratch, ",
"not files of version ", version, ": ", pathname, sep="")
verbose && cat(verbose, msgs[[method]])
verbose && exit(verbose)
next
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Setting up CEL header
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
fullname <- getFullName(this)
celHeader <- .cdfHeaderToCelHeader(getHeader(cdf), sampleName=fullname)
# Add some extra information about what the CEL file is for
params <- c(Descripion=sprintf("This CEL file contains data saved by the aroma.affymetrix v%s package.", getVersion(aroma.affymetrix)))
parameters <- gsub(" ", "_", params)
names(parameters) <- names(params)
parameters <- paste(names(parameters), parameters, sep=":")
parameters <- paste(parameters, collapse=";")
parameters <- paste(celHeader$parameters, parameters, "", sep=";")
parameters <- gsub(";;", ";", parameters)
parameters <- gsub(";$", "", parameters)
celHeader$parameters <- parameters
# Not needed anymore
params <- parameters <- NULL
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Creating empty CEL file
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# 1. Create a temporary file
verbose && enter(verbose, "Creating an empty temporary CEL file")
.createCel(pathnameT, header=celHeader, overwrite=overwrite, ...,
verbose=less(verbose))
# Not needed anymore
celHeader <- NULL
verbose && exit(verbose)
# 2. Update the temporary file
if (clear) {
if (defValue != 0) {
res <- newInstance(this, pathnameT)
clearData(res, ..., value=defValue, .forSure=TRUE,
verbose=less(verbose))
# Not needed anymore
res <- NULL
}
} else {
verbose && enter(verbose, "Copying CEL data")
cells <- seq_len(nbrOfCells(this))
lapplyInChunks(cells, function(cells) {
verbose && enter(verbose, "Reading subset of data from source CEL file")
data <- .readCel(getPathname(this), indices=cells, readIntensities=TRUE, readStdvs=TRUE, readPixels=TRUE)
verbose && str(verbose, data, level=-50)
verbose && printf(verbose, "RAM: %s\n", hsize(object.size(data), digits = 2L, standard = "IEC"), level=-40)
verbose && exit(verbose)
gc <- gc()
verbose && enter(verbose, "Writing data to new CEL file")
.updateCel(pathnameT, indices=cells, intensities=data)
verbose && exit(verbose)
# Not needed anymore
data <- NULL
gc <- gc()
verbose && print(verbose, gc)
}, chunkSize=1e6, verbose=verbose)
# Not needed anymore
cells <- NULL
verbose && exit(verbose)
}
# 3. Rename the temporary file
popTemporaryFile(pathnameT, verbose=verbose)
res <- newInstance(this, pathname)
# Break out of the methods loop
verbose && exit(verbose)
break
}
verbose && exit(verbose)
} # for (method ...)
if (is.null(res)) {
msgs <- unlist(msgs)
msgs <- paste(names(msgs), msgs, sep=": ")
msgs <- paste(msgs, collapse="; ")
msg <- paste("Failed to create CEL file. Error messages: ", msgs)
verbose && cat(verbose, "Error: ", msg)
throw(msg)
}
# Make sure the CDF is carried down
setCdf(res, cdf)
verbose && print(verbose, res)
verbose && exit(verbose)
res
}, protected=TRUE)
|
rm(list=ls())
#library("Hmisc")
source("TE_function_kar.R")
#source("TE_mean.R")
#source("TE_sd.R")
source("MI_function_kar.R")
source("CMI_function_kar.R")
load("../../linear2.RData")
library(doParallel)
#cl<-makeCluster(detectCores())
cl<-5
registerDoParallel(cl)
################# LONG time series ######################
result4<-foreach(i = 1:11,.combine='rbind')%dopar%{
#A<-TE(y[95000:100000],x[i,95000:100000])
A<-CMI(y[i,94999:99999],x[i,95000:100000],x[i,94999:99999])
B<-MI(y[i,94999:99999],x[i,95000:100000])
C<-MI(x[i,94999:99999],x[i,95000:100000])
return(c(A,B,C))}
################### 1000 time units ############
#result3<-foreach(i = 1:11,.combine='rbind')%dopar%{
#A<-TE(y[99000:100000],x[i,99000:100000])
#B<-MI(y[98999:99999],x[i,99000:100000])
#C<-MI(x[94999:99999],x[i,95000:100000])
#return(c(A,B,C))}
######################### 500 time units ###########
#result2<-foreach(i = 1:11,.combine='rbind')%dopar%{
#A<-TE(y[99500:100000],x[i,99500:100000])
#B<-MI(y[99499:99999],x[i,99500:100000])
##C<-MI(x[94999:99999],x[i,95000:100000])
#return(c(A,B))}
######################### 200 time units #############
#result1<-foreach(i = 1:11,.combine='rbind')%dopar%{
#A<-TE(y[99800:100000],x[i,99800:100000])
#B<-MI(y[99799:99999],x[i,99800:100000])
#return(c(A,B))}
####### 2 standard deviations ######
NS<-result4[,1]-result4[,2]
IXY<-result4[,1]+result4[,3]
############################################################
cp<-seq(0,1,0.1)
plot(cp[1:9],IXY[1:9],type="l",col="red",lwd=2.5,ylim=c(0,0.4),lty=1,pch=18,xlab="coupling coefficient",ylab="Information exchange (nats)",cex.lab=1.2,cex.axis=1.5,main="Kraskov")
#lines(cp,result3[,1],type="l",col="red",lwd=2.5)
#lines(cp,result2[,1],type="l",col="red",lwd=2.5)
#lines(cp,result1[,1],type="l",col="red",lwd=2.5)
lines(cp[1:9],result4[1:9,2],type="l",col="blue",lwd=2.5)
lines(cp[1:9],result4[1:9,3],type="l",col="green",lwd=2.5)
lines(cp[1:9],NS[1:9],type="l",col="black",lwd=2.5)
#lines(cp,result2[,2],type="l",col="blue",lwd=2.5)
#lines(cp,result1[,2],type="l",col="blue",lwd=2.5)
lt<-c(1,2,3,4,5,1,2,3,4,5)
pc<-c(18,23,20,21,22,18,23,20,21,22)
colors<-c("red","red","red","red","red",rep("white",5),"blue","blue","blue","blue","blue")
#legend("top",legend=c("n=5000","n=3000","n=1000","n=500","n=200",rep(" ",5),"n=5000","n=3000","n=1000","n=500","n=200"),ncol=3,col=colors,pch=pc,bty="n",cex=1.4,horiz=F,lty=lt,lwd=c(rep(1.4,5),rep(0,5),rep(1.4,5)),,text.width=c(0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.2))
legend("topleft",legend=c("I(Xt;{Yt-1,Xt-1})","I(Xt;Yt-1)","I(Xt;Xt-1)","Net Synergy"),col=c("red","blue","green","black"),lty=1,lwd=1.4,bty="n",cex=1.4)
| /KRASKOV/FIG2/fig2_kar.R | no_license | pothapakulapraveen/ESD | R | false | false | 2,650 | r | rm(list=ls())
#library("Hmisc")
source("TE_function_kar.R")
#source("TE_mean.R")
#source("TE_sd.R")
source("MI_function_kar.R")
source("CMI_function_kar.R")
load("../../linear2.RData")
library(doParallel)
#cl<-makeCluster(detectCores())
cl<-5
registerDoParallel(cl)
################# LONG time series ######################
result4<-foreach(i = 1:11,.combine='rbind')%dopar%{
#A<-TE(y[95000:100000],x[i,95000:100000])
A<-CMI(y[i,94999:99999],x[i,95000:100000],x[i,94999:99999])
B<-MI(y[i,94999:99999],x[i,95000:100000])
C<-MI(x[i,94999:99999],x[i,95000:100000])
return(c(A,B,C))}
################### 1000 time units ############
#result3<-foreach(i = 1:11,.combine='rbind')%dopar%{
#A<-TE(y[99000:100000],x[i,99000:100000])
#B<-MI(y[98999:99999],x[i,99000:100000])
#C<-MI(x[94999:99999],x[i,95000:100000])
#return(c(A,B,C))}
######################### 500 time units ###########
#result2<-foreach(i = 1:11,.combine='rbind')%dopar%{
#A<-TE(y[99500:100000],x[i,99500:100000])
#B<-MI(y[99499:99999],x[i,99500:100000])
##C<-MI(x[94999:99999],x[i,95000:100000])
#return(c(A,B))}
######################### 200 time units #############
#result1<-foreach(i = 1:11,.combine='rbind')%dopar%{
#A<-TE(y[99800:100000],x[i,99800:100000])
#B<-MI(y[99799:99999],x[i,99800:100000])
#return(c(A,B))}
####### 2 standard deviations ######
NS<-result4[,1]-result4[,2]
IXY<-result4[,1]+result4[,3]
############################################################
cp<-seq(0,1,0.1)
plot(cp[1:9],IXY[1:9],type="l",col="red",lwd=2.5,ylim=c(0,0.4),lty=1,pch=18,xlab="coupling coefficient",ylab="Information exchange (nats)",cex.lab=1.2,cex.axis=1.5,main="Kraskov")
#lines(cp,result3[,1],type="l",col="red",lwd=2.5)
#lines(cp,result2[,1],type="l",col="red",lwd=2.5)
#lines(cp,result1[,1],type="l",col="red",lwd=2.5)
lines(cp[1:9],result4[1:9,2],type="l",col="blue",lwd=2.5)
lines(cp[1:9],result4[1:9,3],type="l",col="green",lwd=2.5)
lines(cp[1:9],NS[1:9],type="l",col="black",lwd=2.5)
#lines(cp,result2[,2],type="l",col="blue",lwd=2.5)
#lines(cp,result1[,2],type="l",col="blue",lwd=2.5)
lt<-c(1,2,3,4,5,1,2,3,4,5)
pc<-c(18,23,20,21,22,18,23,20,21,22)
colors<-c("red","red","red","red","red",rep("white",5),"blue","blue","blue","blue","blue")
#legend("top",legend=c("n=5000","n=3000","n=1000","n=500","n=200",rep(" ",5),"n=5000","n=3000","n=1000","n=500","n=200"),ncol=3,col=colors,pch=pc,bty="n",cex=1.4,horiz=F,lty=lt,lwd=c(rep(1.4,5),rep(0,5),rep(1.4,5)),,text.width=c(0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.2))
legend("topleft",legend=c("I(Xt;{Yt-1,Xt-1})","I(Xt;Yt-1)","I(Xt;Xt-1)","Net Synergy"),col=c("red","blue","green","black"),lty=1,lwd=1.4,bty="n",cex=1.4)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/roi.R
\name{neuprint_ROI_mesh}
\alias{neuprint_ROI_mesh}
\title{Import a region of interest as a mesh}
\usage{
neuprint_ROI_mesh(roi, dataset = NULL, conn = NULL, ...)
}
\arguments{
\item{roi}{region of interest for a dataset}
\item{dataset}{optional, a dataset you want to query. If NULL, the default specified by your R environ file is used. See \code{neuprint_login} for details.}
\item{conn}{optional, a neuprintr connection object, which also specifies the neuPrint server see \code{?neuprint_login}.
If NULL, your defaults set in your R.profile or R.environ are used.}
\item{...}{methods passed to \code{neuprint_login}}
}
\description{
Import a region of interest as a mesh
}
| /man/neuprint_ROI_mesh.Rd | no_license | mmc46/neuprintr | R | false | true | 764 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/roi.R
\name{neuprint_ROI_mesh}
\alias{neuprint_ROI_mesh}
\title{Import a region of interest as a mesh}
\usage{
neuprint_ROI_mesh(roi, dataset = NULL, conn = NULL, ...)
}
\arguments{
\item{roi}{region of interest for a dataset}
\item{dataset}{optional, a dataset you want to query. If NULL, the default specified by your R environ file is used. See \code{neuprint_login} for details.}
\item{conn}{optional, a neuprintr connection object, which also specifies the neuPrint server see \code{?neuprint_login}.
If NULL, your defaults set in your R.profile or R.environ are used.}
\item{...}{methods passed to \code{neuprint_login}}
}
\description{
Import a region of interest as a mesh
}
|
\name{iBATProbit}
\alias{iBATProbit}
\title{
Main - Probit selection prior
}
\description{
Perform MCMC iterations of the model described in the reference.
}
\usage{
iBATProbit(Y, X, distance, disfix, intercept=1, xi,
R=-1, tran, mu, sigma=((rgamma(4,1,1))^(-0.5)),
cmu=1/1000000, c=10, delta=3, d, alpha0=2.32,
alpha1=1, deltak=c(-1,0,0.58,1), tauk=c(1,1,1,2),
upp_bounds=c(-0.1, 0.1, 0.73, Inf),
low_bounds=c(-Inf, -0.1, 0.1, 0.73),
alpha_IG=c(1,1,1,1), beta_IG=c(1,1,1,1),
low_IG=c(0.41,0.41,0.41,1), a=c(1,1,1,1),
niter=500000, burnin=200000, Cout=1000,
phi=0.5, pR=0.4, selectioncgh=-1, pXI=0.6)
}
\arguments{
\item{Y}{
Matrix of gene expression data
}
\item{X}{
Matrix of CGH data
}
\item{distance}{
Vector of distance between CGH probes
}
\item{disfix}{
Length of the chromosome under investigation
}
\item{intercept}{
If set to one an intercept is included in the regression model
}
\item{xi}{
Initialized matrix of latent states
}
\item{R}{
Initialized association matrix in a vector form. Default set to -1, that automatically creates a vector with all the positions set to zero
}
\item{tran}{
Initialized transition matrix
}
\item{mu}{
Initialized state specific mean vector
}
\item{sigma}{
Initialized state specific standard deviation vector
}
\item{cmu}{
Parameter that controls the variance of the prior on the intercept
}
\item{c}{
Parameter that determines the shrinkage in the model
}
\item{delta}{
Parameter of the Inverse-Gamma prior on the error variance
}
\item{d}{
Parameter of the Inverse-Gamma prior on the error variance
}
\item{alpha0}{
Baseline intercept of the selection prior
}
\item{alpha1}{
Parameter that regulates the strength of the spatially informed dependence
}
\item{deltak}{
Vector of mean of the prior on the state specific mean
}
\item{tauk}{
Vector of sd of the prior on the state specific mean
}
\item{upp_bounds}{
Vector of upper bounds of the prior on the state specific mean
}
\item{low_bounds}{
Vector of lower bounds of the prior on the state specific mean
}
\item{alpha_IG}{
Parameter of the prior on the state specific standard deviation
}
\item{beta_IG}{
Parameter of the on the state specific standard deviation
}
\item{low_IG}{
Truncation of the prior on the state specific standard deviation
}
\item{a}{
Vector of parameters of the prior on the transition matrix
}
\item{niter}{
Number of Monte Carlo Markov Chain iteration
}
\item{burnin}{
Burn-in
}
\item{Cout}{
Print the number of iterations ran every Cout iterations
}
\item{phi}{
Probability of an A/D step
}
\item{pR}{
Parameter of the distribution used to select the rows to be updated at every MCMC iteration
}
\item{selectioncgh}{
Number of samples not in neutral state in order to consider a CGH as a potential candidate for association with gene expression. Default set to -1 that automatically set it to 10\% of the samples
}
\item{pXI}{
Parameter of the distribution used to select the rows to be updated at every MCMC iteration
}
}
\value{
The output consists of an R list composed by 4*niter+3 objects, where niter is the number of MCMC iterations. The first niter objects of the list are vectors, each containing the positions of the association matrix set to one, at the corresponding MCMC iteration. Each of the following niter objects of the list are the transition matrices at the corresponding MCMC iteration, while the third and the fourth set of niter objects are the vectors of state specific mean and state specific variance, respectively. The last three objects of the list consist of three matrices counting the number of times the corresponding latent state has been set to 1,3 and 4, respectively.
}
\references{
Cassese A, Guindani M, Vannucci M. A Bayesian integrative model for genetical genomics with spatially informed variable selection. Cancer Informatics.
}
\author{
Alberto Cassese
}
\examples{
\dontrun{
data(TCGA_lung)
Y <- TCGA_lung$Affy
X <- TCGA_lung$aCGH
distance <- TCGA_lung$distance
disfix <- 199446827
xi <- InitXi(X)
tran <- Tran(xi)
mu <- InitMu()
d=0.2587288
Y <- Center(Y)
res <- iBATProbit(Y=Y,X=X,distance=distance,disfix=disfix,xi=xi,tran=tran,mu=mu,d=d)
summRes <- Inference(res,G=dim(Y)[[2]],M=dim(X)[[2]],niter=niter,burnin=bi,threshold=0.5)
}
}
\keyword{Main}
| /iBATCGH/man/iBATProbit.Rd | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 4,292 | rd | \name{iBATProbit}
\alias{iBATProbit}
\title{
Main - Probit selection prior
}
\description{
Perform MCMC iterations of the model described in the reference.
}
\usage{
iBATProbit(Y, X, distance, disfix, intercept=1, xi,
R=-1, tran, mu, sigma=((rgamma(4,1,1))^(-0.5)),
cmu=1/1000000, c=10, delta=3, d, alpha0=2.32,
alpha1=1, deltak=c(-1,0,0.58,1), tauk=c(1,1,1,2),
upp_bounds=c(-0.1, 0.1, 0.73, Inf),
low_bounds=c(-Inf, -0.1, 0.1, 0.73),
alpha_IG=c(1,1,1,1), beta_IG=c(1,1,1,1),
low_IG=c(0.41,0.41,0.41,1), a=c(1,1,1,1),
niter=500000, burnin=200000, Cout=1000,
phi=0.5, pR=0.4, selectioncgh=-1, pXI=0.6)
}
\arguments{
\item{Y}{
Matrix of gene expression data
}
\item{X}{
Matrix of CGH data
}
\item{distance}{
Vector of distance between CGH probes
}
\item{disfix}{
Length of the chromosome under investigation
}
\item{intercept}{
If set to one an intercept is included in the regression model
}
\item{xi}{
Initialized matrix of latent states
}
\item{R}{
Initialized association matrix in a vector form. Default set to -1, that automatically creates a vector with all the positions set to zero
}
\item{tran}{
Initialized transition matrix
}
\item{mu}{
Initialized state specific mean vector
}
\item{sigma}{
Initialized state specific standard deviation vector
}
\item{cmu}{
Parameter that controls the variance of the prior on the intercept
}
\item{c}{
Parameter that determines the shrinkage in the model
}
\item{delta}{
Parameter of the Inverse-Gamma prior on the error variance
}
\item{d}{
Parameter of the Inverse-Gamma prior on the error variance
}
\item{alpha0}{
Baseline intercept of the selection prior
}
\item{alpha1}{
Parameter that regulates the strength of the spatially informed dependence
}
\item{deltak}{
Vector of mean of the prior on the state specific mean
}
\item{tauk}{
Vector of sd of the prior on the state specific mean
}
\item{upp_bounds}{
Vector of upper bounds of the prior on the state specific mean
}
\item{low_bounds}{
Vector of lower bounds of the prior on the state specific mean
}
\item{alpha_IG}{
Parameter of the prior on the state specific standard deviation
}
\item{beta_IG}{
Parameter of the on the state specific standard deviation
}
\item{low_IG}{
Truncation of the prior on the state specific standard deviation
}
\item{a}{
Vector of parameters of the prior on the transition matrix
}
\item{niter}{
Number of Monte Carlo Markov Chain iteration
}
\item{burnin}{
Burn-in
}
\item{Cout}{
Print the number of iterations ran every Cout iterations
}
\item{phi}{
Probability of an A/D step
}
\item{pR}{
Parameter of the distribution used to select the rows to be updated at every MCMC iteration
}
\item{selectioncgh}{
Number of samples not in neutral state in order to consider a CGH as a potential candidate for association with gene expression. Default set to -1 that automatically set it to 10\% of the samples
}
\item{pXI}{
Parameter of the distribution used to select the rows to be updated at every MCMC iteration
}
}
\value{
The output consists of an R list composed by 4*niter+3 objects, where niter is the number of MCMC iterations. The first niter objects of the list are vectors, each containing the positions of the association matrix set to one, at the corresponding MCMC iteration. Each of the following niter objects of the list are the transition matrices at the corresponding MCMC iteration, while the third and the fourth set of niter objects are the vectors of state specific mean and state specific variance, respectively. The last three objects of the list consist of three matrices counting the number of times the corresponding latent state has been set to 1,3 and 4, respectively.
}
\references{
Cassese A, Guindani M, Vannucci M. A Bayesian integrative model for genetical genomics with spatially informed variable selection. Cancer Informatics.
}
\author{
Alberto Cassese
}
\examples{
\dontrun{
data(TCGA_lung)
Y <- TCGA_lung$Affy
X <- TCGA_lung$aCGH
distance <- TCGA_lung$distance
disfix <- 199446827
xi <- InitXi(X)
tran <- Tran(xi)
mu <- InitMu()
d=0.2587288
Y <- Center(Y)
res <- iBATProbit(Y=Y,X=X,distance=distance,disfix=disfix,xi=xi,tran=tran,mu=mu,d=d)
summRes <- Inference(res,G=dim(Y)[[2]],M=dim(X)[[2]],niter=niter,burnin=bi,threshold=0.5)
}
}
\keyword{Main}
|
\name{Medline-class}
\docType{class}
\alias{Medline}
\alias{Medline-class}
\title{Class \code{"Medline"}}
\description{
Class for Medline citation of query to PubMed.
}
\section{Objects from the Class}{
Objects can be created by calls of the form \code{new("Medline", ...)}.
}
\section{Slots}{
Unless stated otherwise, each of the components is a vector of the given field where each element corresponds to a PubMed article.
\describe{
\item{\code{Query}:}{}
\item{\code{PMID}:}{}
\item{\code{YearAccepted}:}{}
\item{\code{MonthAccepted}:}{}
\item{\code{DayAccepted}:}{}
\item{\code{HourAccepted}:}{}
\item{\code{MinuteAccepted}:}{}
\item{\code{YearReceived}:}{}
\item{\code{MonthReceived}:}{}
\item{\code{DayReceived}:}{}
\item{\code{HourReceived}:}{}
\item{\code{MinuteReceived}:}{}
\item{\code{YearEpublish}:}{}
\item{\code{MonthEpublish}:}{}
\item{\code{DayEpublish}:}{}
\item{\code{HourEpublish}:}{}
\item{\code{MinuteEpublish}:}{}
\item{\code{YearPpublish}:}{}
\item{\code{MonthPpublish}:}{}
\item{\code{DayPpublish}:}{}
\item{\code{HourPpublish}:}{}
\item{\code{MinutePpublish}:}{}
\item{\code{YearPmc}:}{}
\item{\code{MonthPmc}:}{}
\item{\code{DayPmc}:}{}
\item{\code{HourPmc}:}{}
\item{\code{MinutePmc}:}{}
\item{\code{YearPubmed}:}{}
\item{\code{MonthPubmed}:}{}
\item{\code{DayPubmed}:}{}
\item{\code{HourPubmed}:}{}
\item{\code{MinutePubmed}:}{}
\item{\code{Author}:}{ list of data frames giving \code{LastName}, \code{ForeName}, \code{Initials}, and \code{order} of authors by PubMed article.}
\item{\code{ISSN}:}{}
\item{\code{Title}:}{}
\item{\code{ArticleTitle}:}{}
\item{\code{ELocationID}:}{}
\item{\code{AbstractText}:}{}
\item{\code{Affiliation}:}{}
\item{\code{Language}:}{}
\item{\code{PublicationType}:}{}
\item{\code{MedlineTA}:}{}
\item{\code{NlmUniqueID}:}{}
\item{\code{ISSNLinking}:}{}
\item{\code{PublicationStatus}:}{}
\item{\code{ArticleId}:}{}
\item{\code{Volume}:}{}
\item{\code{Issue}:}{}
\item{\code{ISOAbbreviation}:}{}
\item{\code{MedlinePgn}:}{}
\item{\code{CopyrightInformation}:}{}
\item{\code{Country}:}{}
\item{\code{GrantID}:}{}
\item{\code{Acronym}:}{}
\item{\code{Agency}:}{}
\item{\code{RegistryNumber}:}{}
\item{\code{RefSource}:}{}
\item{\code{CollectiveName}:}{}
\item{\code{Mesh}:}{ list of data frames giving \code{Heading} and \code{Type} of MeSH term or \code{NA} if no terms are in MEDLINE record}
}
}
\section{Methods}{
In addition to \code{print} and \code{show} methods, each slot of the \code{Medline} class has a corresponding extractor of the same name.
\describe{
\item{print}{\code{signature(x = "Medline", ...)}: ...}
\item{show}{\code{signature(object = "Medline")}: ...}
\item{Query}{\code{signature(object = "Medline")}: ...}
\item{PMID}{\code{signature(object = "Medline")}: ...}
\item{YearAccepted}{\code{signature(object = "Medline")}: ...}
\item{MonthAccepted}{\code{signature(object = "Medline")}: ...}
\item{DayAccepted}{\code{signature(object = "Medline")}: ...}
\item{HourAccepted}{\code{signature(object = "Medline")}: ...}
\item{MinuteAccepted}{\code{signature(object = "Medline")}: ...}
\item{YearReceived}{\code{signature(object = "Medline")}: ...}
\item{MonthReceived}{\code{signature(object = "Medline")}: ...}
\item{DayReceived}{\code{signature(object = "Medline")}: ...}
\item{HourReceived}{\code{signature(object = "Medline")}: ...}
\item{MinuteReceived}{\code{signature(object = "Medline")}: ...}
\item{YearEpublish}{\code{signature(object = "Medline")}: ...}
\item{MonthEpublish}{\code{signature(object = "Medline")}: ...}
\item{DayEpublish}{\code{signature(object = "Medline")}: ...}
\item{HourEpublish}{\code{signature(object = "Medline")}: ...}
\item{MinuteEpublish}{\code{signature(object = "Medline")}: ...}
\item{YearPpublish}{\code{signature(object = "Medline")}: ...}
\item{MonthPpublish}{\code{signature(object = "Medline")}: ...}
\item{DayPpublish}{\code{signature(object = "Medline")}: ...}
\item{HourPpublish}{\code{signature(object = "Medline")}: ...}
\item{MinutePpublish}{\code{signature(object = "Medline")}: ...}
\item{YearPmc}{\code{signature(object = "Medline")}: ...}
\item{MonthPmc}{\code{signature(object = "Medline")}: ...}
\item{DayPmc}{\code{signature(object = "Medline")}: ...}
\item{HourPmc}{\code{signature(object = "Medline")}: ...}
\item{MinutePmc}{\code{signature(object = "Medline")}: ...}
\item{YearPubmed}{\code{signature(object = "Medline")}: ...}
\item{MonthPubmed}{\code{signature(object = "Medline")}: ...}
\item{DayPubmed}{\code{signature(object = "Medline")}: ...}
\item{HourPubmed}{\code{signature(object = "Medline")}: ...}
\item{MinutePubmed}{\code{signature(object = "Medline")}: ...}
\item{Author}{\code{signature(object = "Medline")}: ...}
\item{ISSN}{\code{signature(object = "Medline")}: ...}
\item{Title}{\code{signature(object = "Medline")}: ...}
\item{ArticleTitle}{\code{signature(object = "Medline")}: ...}
\item{ELocationID}{\code{signature(object = "Medline")}: ...}
\item{AbstractText}{\code{signature(object = "Medline")}: ...}
\item{Affiliation}{\code{signature(object = "Medline")}: ...}
\item{Language}{\code{signature(object = "Medline")}: ...}
\item{PublicationType}{\code{signature(object = "Medline")}: ...}
\item{MedlineTA}{\code{signature(object = "Medline")}: ...}
\item{NlmUniqueID}{\code{signature(object = "Medline")}: ...}
\item{ISSNLinking}{\code{signature(object = "Medline")}: ...}
\item{PublicationStatus}{\code{signature(object = "Medline")}: ...}
\item{ArticleId}{\code{signature(object = "Medline")}: ...}
\item{Volume}{\code{signature(object = "Medline")}: ...}
\item{Issue}{\code{signature(object = "Medline")}: ...}
\item{ISOAbbreviation}{\code{signature(object = "Medline")}: ...}
\item{MedlinePgn}{\code{signature(object = "Medline")}: ...}
\item{CopyrightInformation}{\code{signature(object = "Medline")}: ...}
\item{Country}{\code{signature(object = "Medline")}: ...}
\item{GrantID}{\code{signature(object = "Medline")}: ...}
\item{Acronym}{\code{signature(object = "Medline")}: ...}
\item{Agency}{\code{signature(object = "Medline")}: ...}
\item{RegistryNumber}{\code{signature(object = "Medline")}: ...}
\item{RefSource}{\code{signature(object = "Medline")}: ...}
\item{CollectiveName}{\code{signature(object = "Medline")}: ...}
\item{Mesh}{\code{signature(object = "Medline")}: ...}
}
}
\author{
Stephanie Kovalchik
}
\keyword{classes}
| /man/Medline.Rd | no_license | parisni/RISmed | R | false | false | 7,621 | rd | \name{Medline-class}
\docType{class}
\alias{Medline}
\alias{Medline-class}
\title{Class \code{"Medline"}}
\description{
Class for Medline citation of query to PubMed.
}
\section{Objects from the Class}{
Objects can be created by calls of the form \code{new("Medline", ...)}.
}
\section{Slots}{
Unless stated otherwise, each of the components is a vector of the given field where each element corresponds to a PubMed article.
\describe{
\item{\code{Query}:}{}
\item{\code{PMID}:}{}
\item{\code{YearAccepted}:}{}
\item{\code{MonthAccepted}:}{}
\item{\code{DayAccepted}:}{}
\item{\code{HourAccepted}:}{}
\item{\code{MinuteAccepted}:}{}
\item{\code{YearReceived}:}{}
\item{\code{MonthReceived}:}{}
\item{\code{DayReceived}:}{}
\item{\code{HourReceived}:}{}
\item{\code{MinuteReceived}:}{}
\item{\code{YearEpublish}:}{}
\item{\code{MonthEpublish}:}{}
\item{\code{DayEpublish}:}{}
\item{\code{HourEpublish}:}{}
\item{\code{MinuteEpublish}:}{}
\item{\code{YearPpublish}:}{}
\item{\code{MonthPpublish}:}{}
\item{\code{DayPpublish}:}{}
\item{\code{HourPpublish}:}{}
\item{\code{MinutePpublish}:}{}
\item{\code{YearPmc}:}{}
\item{\code{MonthPmc}:}{}
\item{\code{DayPmc}:}{}
\item{\code{HourPmc}:}{}
\item{\code{MinutePmc}:}{}
\item{\code{YearPubmed}:}{}
\item{\code{MonthPubmed}:}{}
\item{\code{DayPubmed}:}{}
\item{\code{HourPubmed}:}{}
\item{\code{MinutePubmed}:}{}
\item{\code{Author}:}{ list of data frames giving \code{LastName}, \code{ForeName}, \code{Initials}, and \code{order} of authors by PubMed article.}
\item{\code{ISSN}:}{}
\item{\code{Title}:}{}
\item{\code{ArticleTitle}:}{}
\item{\code{ELocationID}:}{}
\item{\code{AbstractText}:}{}
\item{\code{Affiliation}:}{}
\item{\code{Language}:}{}
\item{\code{PublicationType}:}{}
\item{\code{MedlineTA}:}{}
\item{\code{NlmUniqueID}:}{}
\item{\code{ISSNLinking}:}{}
\item{\code{PublicationStatus}:}{}
\item{\code{ArticleId}:}{}
\item{\code{Volume}:}{}
\item{\code{Issue}:}{}
\item{\code{ISOAbbreviation}:}{}
\item{\code{MedlinePgn}:}{}
\item{\code{CopyrightInformation}:}{}
\item{\code{Country}:}{}
\item{\code{GrantID}:}{}
\item{\code{Acronym}:}{}
\item{\code{Agency}:}{}
\item{\code{RegistryNumber}:}{}
\item{\code{RefSource}:}{}
\item{\code{CollectiveName}:}{}
\item{\code{Mesh}:}{ list of data frames giving \code{Heading} and \code{Type} of MeSH term or \code{NA} if no terms are in MEDLINE record}
}
}
\section{Methods}{
In addition to \code{print} and \code{show} methods, each slot of the \code{Medline} class has a corresponding extractor of the same name.
\describe{
\item{print}{\code{signature(x = "Medline", ...)}: ...}
\item{show}{\code{signature(object = "Medline")}: ...}
\item{Query}{\code{signature(object = "Medline")}: ...}
\item{PMID}{\code{signature(object = "Medline")}: ...}
\item{YearAccepted}{\code{signature(object = "Medline")}: ...}
\item{MonthAccepted}{\code{signature(object = "Medline")}: ...}
\item{DayAccepted}{\code{signature(object = "Medline")}: ...}
\item{HourAccepted}{\code{signature(object = "Medline")}: ...}
\item{MinuteAccepted}{\code{signature(object = "Medline")}: ...}
\item{YearReceived}{\code{signature(object = "Medline")}: ...}
\item{MonthReceived}{\code{signature(object = "Medline")}: ...}
\item{DayReceived}{\code{signature(object = "Medline")}: ...}
\item{HourReceived}{\code{signature(object = "Medline")}: ...}
\item{MinuteReceived}{\code{signature(object = "Medline")}: ...}
\item{YearEpublish}{\code{signature(object = "Medline")}: ...}
\item{MonthEpublish}{\code{signature(object = "Medline")}: ...}
\item{DayEpublish}{\code{signature(object = "Medline")}: ...}
\item{HourEpublish}{\code{signature(object = "Medline")}: ...}
\item{MinuteEpublish}{\code{signature(object = "Medline")}: ...}
\item{YearPpublish}{\code{signature(object = "Medline")}: ...}
\item{MonthPpublish}{\code{signature(object = "Medline")}: ...}
\item{DayPpublish}{\code{signature(object = "Medline")}: ...}
\item{HourPpublish}{\code{signature(object = "Medline")}: ...}
\item{MinutePpublish}{\code{signature(object = "Medline")}: ...}
\item{YearPmc}{\code{signature(object = "Medline")}: ...}
\item{MonthPmc}{\code{signature(object = "Medline")}: ...}
\item{DayPmc}{\code{signature(object = "Medline")}: ...}
\item{HourPmc}{\code{signature(object = "Medline")}: ...}
\item{MinutePmc}{\code{signature(object = "Medline")}: ...}
\item{YearPubmed}{\code{signature(object = "Medline")}: ...}
\item{MonthPubmed}{\code{signature(object = "Medline")}: ...}
\item{DayPubmed}{\code{signature(object = "Medline")}: ...}
\item{HourPubmed}{\code{signature(object = "Medline")}: ...}
\item{MinutePubmed}{\code{signature(object = "Medline")}: ...}
\item{Author}{\code{signature(object = "Medline")}: ...}
\item{ISSN}{\code{signature(object = "Medline")}: ...}
\item{Title}{\code{signature(object = "Medline")}: ...}
\item{ArticleTitle}{\code{signature(object = "Medline")}: ...}
\item{ELocationID}{\code{signature(object = "Medline")}: ...}
\item{AbstractText}{\code{signature(object = "Medline")}: ...}
\item{Affiliation}{\code{signature(object = "Medline")}: ...}
\item{Language}{\code{signature(object = "Medline")}: ...}
\item{PublicationType}{\code{signature(object = "Medline")}: ...}
\item{MedlineTA}{\code{signature(object = "Medline")}: ...}
\item{NlmUniqueID}{\code{signature(object = "Medline")}: ...}
\item{ISSNLinking}{\code{signature(object = "Medline")}: ...}
\item{PublicationStatus}{\code{signature(object = "Medline")}: ...}
\item{ArticleId}{\code{signature(object = "Medline")}: ...}
\item{Volume}{\code{signature(object = "Medline")}: ...}
\item{Issue}{\code{signature(object = "Medline")}: ...}
\item{ISOAbbreviation}{\code{signature(object = "Medline")}: ...}
\item{MedlinePgn}{\code{signature(object = "Medline")}: ...}
\item{CopyrightInformation}{\code{signature(object = "Medline")}: ...}
\item{Country}{\code{signature(object = "Medline")}: ...}
\item{GrantID}{\code{signature(object = "Medline")}: ...}
\item{Acronym}{\code{signature(object = "Medline")}: ...}
\item{Agency}{\code{signature(object = "Medline")}: ...}
\item{RegistryNumber}{\code{signature(object = "Medline")}: ...}
\item{RefSource}{\code{signature(object = "Medline")}: ...}
\item{CollectiveName}{\code{signature(object = "Medline")}: ...}
\item{Mesh}{\code{signature(object = "Medline")}: ...}
}
}
\author{
Stephanie Kovalchik
}
\keyword{classes}
|
# K Nearest Neighbor
# Importing the dataset
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[3:5]
# Encoding the target feature as factor
dataset$Purchased = factor(dataset$Purchased, levels = c(0, 1))
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[-3] = scale(training_set[-3])
test_set[-3] = scale(test_set[-3])
# Fitting K-NN to the Training set and Predicting the Test set results
#install.packages('class')
library(class)
y_pred = knn(train = training_set[, 1:2],
test = test_set[, 1:2],
cl = training_set[, 3],
k = 5)
# Making the Confusion Matrix
cm = table(test_set[, 3], y_pred)
# Visualising the Training set results
library(ElemStatLearn)
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = knn(train = training_set[, 1:2],
test = grid_set[, 1:2],
cl = training_set[, 3],
k = 5)
plot(set[, -3],
main = 'K-NN (Training set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualising the Test set results
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = knn(train = training_set[, 1:2],
test = grid_set[, 1:2],
cl = training_set[, 3],
k = 5)
plot(set[, -3], main = 'K-NN (Test set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3')) | /Part 3 - Classification/Section 15 - K-Nearest Neighbors (K-NN)/k_nearest_neighbor.R | no_license | asrabon/Machine-Learning-A-Z | R | false | false | 2,444 | r | # K Nearest Neighbor
# Importing the dataset
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[3:5]
# Encoding the target feature as factor
dataset$Purchased = factor(dataset$Purchased, levels = c(0, 1))
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[-3] = scale(training_set[-3])
test_set[-3] = scale(test_set[-3])
# Fitting K-NN to the Training set and Predicting the Test set results
#install.packages('class')
library(class)
y_pred = knn(train = training_set[, 1:2],
test = test_set[, 1:2],
cl = training_set[, 3],
k = 5)
# Making the Confusion Matrix
cm = table(test_set[, 3], y_pred)
# Visualising the Training set results
library(ElemStatLearn)
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = knn(train = training_set[, 1:2],
test = grid_set[, 1:2],
cl = training_set[, 3],
k = 5)
plot(set[, -3],
main = 'K-NN (Training set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualising the Test set results
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = knn(train = training_set[, 1:2],
test = grid_set[, 1:2],
cl = training_set[, 3],
k = 5)
plot(set[, -3], main = 'K-NN (Test set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3')) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rand.R
\name{rand}
\alias{rand}
\title{Rand index between two partitions}
\usage{
rand(P, Q, symmetric = TRUE, adj = TRUE)
}
\arguments{
\item{P}{a factor, e.g., the first partition.}
\item{Q}{a factor, e.g., the second partition.}
\item{symmetric}{a boolean. If 'FALSE' the asymmetrical Rand index is
calculated.}
\item{adj}{a boolean. If 'TRUE' the corrected index is calculated.}
}
\description{
Returns the Rand index, the corrected Rand index or the asymmetrical Rand
index. The asymmetrical Rand index (corrected or not) measures the
inclusion of a partition 'P' into and partition 'Q' with the number of
clusters in 'P' greater than the number of clusters in 'Q'.
}
\author{
Marie Chavent <marie.chavent@u-bordeaux.fr>, Vanessa Kuentz, Amaury Labenne,
Benoit Liquet, Jerome Saracco
}
\seealso{
\code{\link{stability}}
}
\keyword{cluster}
\keyword{multivariate}
| /man/rand.Rd | no_license | praveenarunachalam/ClustOfVar | R | false | true | 951 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rand.R
\name{rand}
\alias{rand}
\title{Rand index between two partitions}
\usage{
rand(P, Q, symmetric = TRUE, adj = TRUE)
}
\arguments{
\item{P}{a factor, e.g., the first partition.}
\item{Q}{a factor, e.g., the second partition.}
\item{symmetric}{a boolean. If 'FALSE' the asymmetrical Rand index is
calculated.}
\item{adj}{a boolean. If 'TRUE' the corrected index is calculated.}
}
\description{
Returns the Rand index, the corrected Rand index or the asymmetrical Rand
index. The asymmetrical Rand index (corrected or not) measures the
inclusion of a partition 'P' into and partition 'Q' with the number of
clusters in 'P' greater than the number of clusters in 'Q'.
}
\author{
Marie Chavent <marie.chavent@u-bordeaux.fr>, Vanessa Kuentz, Amaury Labenne,
Benoit Liquet, Jerome Saracco
}
\seealso{
\code{\link{stability}}
}
\keyword{cluster}
\keyword{multivariate}
|
# imports
library(tidyverse)
library(janitor)
library(lubridate)
library(maps)
library(stringr)
# processing
raw <- read_csv("KY_gathered.csv")
# import FIPS codes
output <- raw %>%
pivot_longer(cols = !starts_with("county"), names_to = "date", values_to = "claims") %>%
mutate(
# date info
date = ceiling_date(dym(paste("1-", date)), "month") -1,
week = NA,
month = month(date),
year = year(date),
# set general info
state = "Kentucky",
state_fips = 22,
state_short = "KY",
polyname = case_when(
TRUE ~ paste("kentucky,", tolower(county), sep = "")
)
) %>%
# Join with FIPS
left_join(county.fips, by = "polyname") %>%
rename(
county_fips = fips
) %>%
select(
state, state_fips, state_short, county, county_fips,
date, week, month, year, claims
) %>%
arrange(week)
# output
write.csv(output, file = "KY_compiled.csv", row.names = FALSE)
| /initial_claims/KY/compile_KY.R | no_license | andyzwang/coronavirus-unemployment | R | false | false | 951 | r | # imports
library(tidyverse)
library(janitor)
library(lubridate)
library(maps)
library(stringr)
# processing
raw <- read_csv("KY_gathered.csv")
# import FIPS codes
output <- raw %>%
pivot_longer(cols = !starts_with("county"), names_to = "date", values_to = "claims") %>%
mutate(
# date info
date = ceiling_date(dym(paste("1-", date)), "month") -1,
week = NA,
month = month(date),
year = year(date),
# set general info
state = "Kentucky",
state_fips = 22,
state_short = "KY",
polyname = case_when(
TRUE ~ paste("kentucky,", tolower(county), sep = "")
)
) %>%
# Join with FIPS
left_join(county.fips, by = "polyname") %>%
rename(
county_fips = fips
) %>%
select(
state, state_fips, state_short, county, county_fips,
date, week, month, year, claims
) %>%
arrange(week)
# output
write.csv(output, file = "KY_compiled.csv", row.names = FALSE)
|
myiAWvar.BF = function (value, memSubjects)
{
u.memSubjects = sort(unique(memSubjects))
if (length(u.memSubjects) != 2) {
stop("memSubjects must take 2 and only 2 values\n")
}
if (!identical(u.memSubjects, c(0, 1))) {
stop("memSubjects must only take values 0 or 1\n")
}
if (length(value) != length(memSubjects)) {
stop("value must have the same length as memSubjects\n")
}
pos1 = which(memSubjects == 1)
pos0 = which(memSubjects == 0)
value1 = value[pos1]
value0 = value[pos0]
var1 = var(value1, na.rm=TRUE)
var0 = var(value0, na.rm=TRUE)
m.value1 = median(value1, na.rm = TRUE)
m.value0 = median(value0, na.rm = TRUE)
value1.2 = abs(value1 - m.value1)
value0.2 = abs(value0 - m.value0)
z = rep(NA, length(value))
z[pos1] = value1.2
z[pos0] = value0.2
memSubjectsbar = mean(memSubjects, na.rm = TRUE)
U2 = sum((memSubjects - memSubjectsbar) * z, na.rm = TRUE)
zbar = mean(z, na.rm = TRUE)
varU2 = memSubjectsbar * (1 - memSubjectsbar) * sum((z - zbar)^2, na.rm = TRUE)
T2 = U2^2/varU2
pval = 1 - pchisq(T2, df = 1)
#res = list(U2 = U2, varU2 = varU2, stat = T2, pval = pval,
# z = z, zbar = zbar)
if(var1 < var0)
{
T2 = -T2
}
res = c(pval, T2)
names(res) = c("pvalue", "stat")
return(res)
}
| /R/myiAWvar.BF.R | no_license | cran/MMDvariance | R | false | false | 1,366 | r | myiAWvar.BF = function (value, memSubjects)
{
u.memSubjects = sort(unique(memSubjects))
if (length(u.memSubjects) != 2) {
stop("memSubjects must take 2 and only 2 values\n")
}
if (!identical(u.memSubjects, c(0, 1))) {
stop("memSubjects must only take values 0 or 1\n")
}
if (length(value) != length(memSubjects)) {
stop("value must have the same length as memSubjects\n")
}
pos1 = which(memSubjects == 1)
pos0 = which(memSubjects == 0)
value1 = value[pos1]
value0 = value[pos0]
var1 = var(value1, na.rm=TRUE)
var0 = var(value0, na.rm=TRUE)
m.value1 = median(value1, na.rm = TRUE)
m.value0 = median(value0, na.rm = TRUE)
value1.2 = abs(value1 - m.value1)
value0.2 = abs(value0 - m.value0)
z = rep(NA, length(value))
z[pos1] = value1.2
z[pos0] = value0.2
memSubjectsbar = mean(memSubjects, na.rm = TRUE)
U2 = sum((memSubjects - memSubjectsbar) * z, na.rm = TRUE)
zbar = mean(z, na.rm = TRUE)
varU2 = memSubjectsbar * (1 - memSubjectsbar) * sum((z - zbar)^2, na.rm = TRUE)
T2 = U2^2/varU2
pval = 1 - pchisq(T2, df = 1)
#res = list(U2 = U2, varU2 = varU2, stat = T2, pval = pval,
# z = z, zbar = zbar)
if(var1 < var0)
{
T2 = -T2
}
res = c(pval, T2)
names(res) = c("pvalue", "stat")
return(res)
}
|
\name{gantt_wrap}
\alias{gantt_wrap}
\title{Gantt Plot}
\usage{
gantt_wrap(dataframe, plot.var, facet.vars = NULL,
fill.var = NULL, title = NULL,
ylab = as.character(plot.var),
xlab = "duration.default", rev.factor = TRUE,
transform = FALSE, ncol = NULL, minor.line.freq = NULL,
major.line.freq = NULL, sig.dig.line.freq = 1,
hms.scale = NULL, scale = NULL, space = NULL, size = 3,
rm.horiz.lines = FALSE, x.ticks = TRUE, y.ticks = TRUE,
legend.position = NULL, bar.color = NULL,
border.color = NULL, border.size = 2,
border.width = 0.1, constrain = TRUE)
}
\arguments{
\item{dataframe}{A data frame with plotting variable(s)
and a column of start and end times.}
\item{plot.var}{A factor plotting variable (y axis).}
\item{facet.vars}{An optional single vector or list of 1
or 2 to facet by.}
\item{fill.var}{An optional variable to fill the code
strips by.}
\item{title}{An optional title for the plot.}
\item{ylab}{An optional y label.}
\item{xlab}{An optional x label.}
\item{rev.factor}{logical. If \code{TRUE} reverse the
current plotting order so the first element in the
plotting variable's levels is plotted on top.}
\item{ncol}{if an integer value is passed to this
\code{\link[qdap]{gantt_wrap}} uses
\code{\link[ggplot2]{facet_wrap}} rather than
\code{\link[ggplot2]{facet_grid}}.}
\item{transform}{logical. If \code{TRUE} the repeated
facets will be transformed from stacked to side by side.}
\item{minor.line.freq}{A numeric value for frequency of
minor grid lines.}
\item{major.line.freq}{A numeric value for frequency of
major grid lines.}
\item{sig.dig.line.freq}{An internal rounding factor for
minor and major line freq. Generally, default value of 1
suffices for larger range of x scale may need to be set
to -2.}
\item{hms.scale}{logical. If \code{TRUE} converts scale
to h:m:s format. Default \code{NULL} attempts to detect
if object is a cm_time2long object}
\item{scale}{Should scales be fixed (\code{"fixed"}, the
default), free (\code{"free"}), or free in one dimension
(\code{"free_x"}, \code{"free_y"})}
\item{space}{If \code{"fixed"}, the default, all panels
have the same size. If \code{"free_y"} their height will
be proportional to the length of the y scale; if
\code{"free_x"} their width will be proportional to the
length of the x scale; or if \code{"free"} both height
and width will vary. This setting has no effect unless
the appropriate scales also vary.}
\item{size}{The width of the plot bars.}
\item{rm.horiz.lines}{logical. If \code{TRUE} the
horizontal lines will be removed.}
\item{x.ticks}{logical. If \code{TRUE} the x ticks will
be displayed.}
\item{y.ticks}{logical. If \code{TRUE} the y ticks will
be displayed.}
\item{legend.position}{The position of legends.
(\code{"left"}, \code{"right"}, \code{"bottom"},
\code{"top"}, or two-element numeric vector).}
\item{bar.color}{Optional color to constrain all bars.}
\item{border.color}{The color to plot border around Gantt
bars (default is \code{NULL}).}
\item{border.size}{An integer value for the size to plot
borders around Gantt bars. Controls length (width also
controlled if not specified).}
\item{border.width}{Controls border width around Gantt
bars. Use a numeric value in addition to border size if
plot borders appear disproportional.}
\item{constrain}{logical. If \code{TRUE} the Gantt bars
touch the edge of the graph.}
}
\value{
Returns a Gantt style visualization. Invisibly returns
the ggplot2 list object.
}
\description{
A \href{http://docs.ggplot2.org/current/}{ggplot2}
wrapper that produces a Gantt plot.
}
\note{
For non repeated measures data/plotting use
\code{\link[qdap]{gantt}}; for repeated measures data
output use \code{\link[qdap]{gantt_rep}}; and for a
convenient wrapper that takes text and generates plots
use \code{\link[qdap]{gantt_plot}}.
}
\examples{
\dontrun{
dat <- gantt(mraja1$dialogue, list(mraja1$fam.aff, mraja1$sex),
units = "sentences", plot.colors = 'black', sums = TRUE,
col.sep = "_")$gantt.df
htruncdf(dat)
gantt_wrap(dat, fam.aff_sex, title = "Gantt Plot")
dat$codes <- sample(LETTERS[1:3], nrow(dat), TRUE)
gantt_wrap(dat, fam.aff_sex, fill.var = "codes",
legend.position = "bottom")
dat2 <- with(rajSPLIT, gantt_rep(act, dialogue,
list(fam.aff, sex), units = "words", col.sep = "_"))
htruncdf(dat2)
x <- gantt_wrap(dat2, fam.aff_sex, facet.vars = "act",
title = "Repeated Measures Gantt Plot")
library(ggplot2); library(scales); library(RColorBrewer)
x + scale_color_manual(values=rep("black",
length(levels(dat2$fam.aff_sex))))
}
}
\author{
Andrie de Vries and Tyler Rinker
<tyler.rinker@gmail.com>.
}
\references{
Clark, W. & Gantt, H. (1922) The Gantt chart, a working
tool of management. New York, Ronald Press.
}
\seealso{
\code{\link[qdap]{gantt}},
\code{\link[qdap]{gantt_plot}},
\code{\link[qdap]{gantt_rep}},
\code{\link[ggplot2]{facet_grid}},
\code{\link[ggplot2]{facet_wrap}}
}
\keyword{Gantt}
| /man/gantt_wrap.Rd | no_license | abresler/qdap | R | false | false | 5,118 | rd | \name{gantt_wrap}
\alias{gantt_wrap}
\title{Gantt Plot}
\usage{
gantt_wrap(dataframe, plot.var, facet.vars = NULL,
fill.var = NULL, title = NULL,
ylab = as.character(plot.var),
xlab = "duration.default", rev.factor = TRUE,
transform = FALSE, ncol = NULL, minor.line.freq = NULL,
major.line.freq = NULL, sig.dig.line.freq = 1,
hms.scale = NULL, scale = NULL, space = NULL, size = 3,
rm.horiz.lines = FALSE, x.ticks = TRUE, y.ticks = TRUE,
legend.position = NULL, bar.color = NULL,
border.color = NULL, border.size = 2,
border.width = 0.1, constrain = TRUE)
}
\arguments{
\item{dataframe}{A data frame with plotting variable(s)
and a column of start and end times.}
\item{plot.var}{A factor plotting variable (y axis).}
\item{facet.vars}{An optional single vector or list of 1
or 2 to facet by.}
\item{fill.var}{An optional variable to fill the code
strips by.}
\item{title}{An optional title for the plot.}
\item{ylab}{An optional y label.}
\item{xlab}{An optional x label.}
\item{rev.factor}{logical. If \code{TRUE} reverse the
current plotting order so the first element in the
plotting variable's levels is plotted on top.}
\item{ncol}{if an integer value is passed to this
\code{\link[qdap]{gantt_wrap}} uses
\code{\link[ggplot2]{facet_wrap}} rather than
\code{\link[ggplot2]{facet_grid}}.}
\item{transform}{logical. If \code{TRUE} the repeated
facets will be transformed from stacked to side by side.}
\item{minor.line.freq}{A numeric value for frequency of
minor grid lines.}
\item{major.line.freq}{A numeric value for frequency of
major grid lines.}
\item{sig.dig.line.freq}{An internal rounding factor for
minor and major line freq. Generally, default value of 1
suffices for larger range of x scale may need to be set
to -2.}
\item{hms.scale}{logical. If \code{TRUE} converts scale
to h:m:s format. Default \code{NULL} attempts to detect
if object is a cm_time2long object}
\item{scale}{Should scales be fixed (\code{"fixed"}, the
default), free (\code{"free"}), or free in one dimension
(\code{"free_x"}, \code{"free_y"})}
\item{space}{If \code{"fixed"}, the default, all panels
have the same size. If \code{"free_y"} their height will
be proportional to the length of the y scale; if
\code{"free_x"} their width will be proportional to the
length of the x scale; or if \code{"free"} both height
and width will vary. This setting has no effect unless
the appropriate scales also vary.}
\item{size}{The width of the plot bars.}
\item{rm.horiz.lines}{logical. If \code{TRUE} the
horizontal lines will be removed.}
\item{x.ticks}{logical. If \code{TRUE} the x ticks will
be displayed.}
\item{y.ticks}{logical. If \code{TRUE} the y ticks will
be displayed.}
\item{legend.position}{The position of legends.
(\code{"left"}, \code{"right"}, \code{"bottom"},
\code{"top"}, or two-element numeric vector).}
\item{bar.color}{Optional color to constrain all bars.}
\item{border.color}{The color to plot border around Gantt
bars (default is \code{NULL}).}
\item{border.size}{An integer value for the size to plot
borders around Gantt bars. Controls length (width also
controlled if not specified).}
\item{border.width}{Controls border width around Gantt
bars. Use a numeric value in addition to border size if
plot borders appear disproportional.}
\item{constrain}{logical. If \code{TRUE} the Gantt bars
touch the edge of the graph.}
}
\value{
Returns a Gantt style visualization. Invisibly returns
the ggplot2 list object.
}
\description{
A \href{http://docs.ggplot2.org/current/}{ggplot2}
wrapper that produces a Gantt plot.
}
\note{
For non repeated measures data/plotting use
\code{\link[qdap]{gantt}}; for repeated measures data
output use \code{\link[qdap]{gantt_rep}}; and for a
convenient wrapper that takes text and generates plots
use \code{\link[qdap]{gantt_plot}}.
}
\examples{
\dontrun{
dat <- gantt(mraja1$dialogue, list(mraja1$fam.aff, mraja1$sex),
units = "sentences", plot.colors = 'black', sums = TRUE,
col.sep = "_")$gantt.df
htruncdf(dat)
gantt_wrap(dat, fam.aff_sex, title = "Gantt Plot")
dat$codes <- sample(LETTERS[1:3], nrow(dat), TRUE)
gantt_wrap(dat, fam.aff_sex, fill.var = "codes",
legend.position = "bottom")
dat2 <- with(rajSPLIT, gantt_rep(act, dialogue,
list(fam.aff, sex), units = "words", col.sep = "_"))
htruncdf(dat2)
x <- gantt_wrap(dat2, fam.aff_sex, facet.vars = "act",
title = "Repeated Measures Gantt Plot")
library(ggplot2); library(scales); library(RColorBrewer)
x + scale_color_manual(values=rep("black",
length(levels(dat2$fam.aff_sex))))
}
}
\author{
Andrie de Vries and Tyler Rinker
<tyler.rinker@gmail.com>.
}
\references{
Clark, W. & Gantt, H. (1922) The Gantt chart, a working
tool of management. New York, Ronald Press.
}
\seealso{
\code{\link[qdap]{gantt}},
\code{\link[qdap]{gantt_plot}},
\code{\link[qdap]{gantt_rep}},
\code{\link[ggplot2]{facet_grid}},
\code{\link[ggplot2]{facet_wrap}}
}
\keyword{Gantt}
|
# this function creates a special matrix object that can cache its inverse.
#
# function set : # set function to set the value of matrix
# function get : # get to return the value of matrix
# function setinverse : # set the inverse of the matrix
# function getinverse : # get the inverse of the matrix which is stored in parameter m
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)# inverse the matrix
x$setinverse(m)
m # return the last statement which is the inverse matrix
}
| /cachematrix.R | no_license | fhadinezhad/ProgrammingAssignment2 | R | false | false | 1,298 | r |
# this function creates a special matrix object that can cache its inverse.
#
# function set : # set function to set the value of matrix
# function get : # get to return the value of matrix
# function setinverse : # set the inverse of the matrix
# function getinverse : # get the inverse of the matrix which is stored in parameter m
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# This function computes the inverse of the special "matrix" returned by makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)# inverse the matrix
x$setinverse(m)
m # return the last statement which is the inverse matrix
}
|
library(solrad)
### Name: Transmittance
### Title: Atmospheric Transmittance
### Aliases: Transmittance
### Keywords: Atmospheric Transmittance
### ** Examples
#Calculating atmospheric transmittance coefficient for two consecutive days on 45 degree
# latitude and 10 degree longitude and at 100 m altitude.
DOY <- seq(0, 2, .05)
tb <- Transmittance(DOY, Lat = 45, Lon=10, SLon=10, DS=0, Elevation = 100)
#Note: only the difference between Lon and SLon matters not each value
plot(DOY, tb)
| /data/genthat_extracted_code/solrad/examples/Transmittance.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 502 | r | library(solrad)
### Name: Transmittance
### Title: Atmospheric Transmittance
### Aliases: Transmittance
### Keywords: Atmospheric Transmittance
### ** Examples
#Calculating atmospheric transmittance coefficient for two consecutive days on 45 degree
# latitude and 10 degree longitude and at 100 m altitude.
DOY <- seq(0, 2, .05)
tb <- Transmittance(DOY, Lat = 45, Lon=10, SLon=10, DS=0, Elevation = 100)
#Note: only the difference between Lon and SLon matters not each value
plot(DOY, tb)
|
# Download necessary files directly to the appopriate folder
# Small files go to data/raw-data/small/
# while larger files (>100M) go to data/raw-data/big/
library(tidyverse)
library(jsonlite)
library(tigris)
library(sf)
options(use_tigris_cache = T,
tigris_class = "sf")
# Create output directory to place big raw-data in.
dir.create("data/raw-data/big", showWarnings = FALSE)
#----Download Census tracts from Census FTP site ------------------------------
# Census tract geographic files
# Tracts simplify to 1:500k,
# Census tract (2010)
download_by_state <- function(state) {
#Note we use GENZ2018, or the Cartographic boundary files
download.file(url = str_glue("https://www2.census.gov/geo/tiger/GENZ2018/shp/cb_2018_{state}_tract_500k.zip"),
destfile = str_glue("data/raw-data/big/{state}.zip"))
unzip(zipfile = str_glue("data/raw-data/big/{state}.zip"),
exdir = "data/raw-data/big")
file.remove(str_glue("data/raw-data/big/{state}.zip"))
}
state_fips <- fromJSON("https://api.census.gov/data/2010/dec/sf1?get=NAME&for=state:*")
state_fips <- state_fips[, 2][c(2:length(state_fips[, 2]))]
dl <- state_fips %>% map(download_by_state)
#----Download LODES data-----------------------------------
# Downloaded from the Urban Institute Data Catalog
# All jobs, RAC
download.file(url = "https://ui-spark-data-public.s3.amazonaws.com/lodes/summarized-files/Tract_level/all_jobs_excluding_fed_jobs/rac_all_tract_level.csv",
destfile = "data/raw-data/big/rac_all.csv")
# All jobs, RAC, >=$40,000 per year
download.file(url = "https://urban-data-catalog.s3.amazonaws.com/drupal-root-live/2020/03/30/rac_se03_tract.csv",
destfile = "data/raw-data/big/rac_se03.csv")
#----Download Unemployment data from BLS and WA------------
# BLS CES Data
download.file(url = "https://download.bls.gov/pub/time.series/ce/ce.data.0.AllCESSeries",
destfile = "data/raw-data/big/ces_all.txt")
# BLS QCEW Data for Washington (SAE does not have all industries for WA)
download.file(url = "https://data.bls.gov/cew/data/files/2019/csv/2019_qtrly_by_area.zip",
destfile = "data/raw-data/big/wa_qcew.zip")
unzip("data/raw-data/big/wa_qcew.zip",
files = c("2019.q1-q3.by_area/2019.q1-q3 53000 Washington -- Statewide.csv"),
exdir = "data/raw-data/big")
file.remove("data/raw-data/big/wa_qcew.zip")
file.rename(from = "data/raw-data/big/2019.q1-q3.by_area/2019.q1-q3 53000 Washington -- Statewide.csv",
to = "data/raw-data/big/wa_qcew.csv")
unlink("data/raw-data/big/2019.q1-q3.by_area", recursive = TRUE)
# WA state unemployment estimates, most recent
download.file(url = "https://esdorchardstorage.blob.core.windows.net/esdwa/Default/ESDWAGOV/labor-market-info/Libraries/Regional-reports/UI-Claims-Karen/2020 claims/UI claims week 14_2020.xlsx",
destfile = "data/raw-data/big/UI claims week 14_2020.xlsx",
#download file in binary mode, if you don't, xlsx file is corrupted
mode="wb")
#----Download cbsas, counties, and states from tigris------------
my_cbsas<-core_based_statistical_areas(cb = T)
my_counties <- counties(cb = T)
my_states <- states(cb = T)
clean_and_write_sf <- function(name, filepath) {
if(!file.exists(filepath)){
name %>%
st_transform(4326) %>%
st_write(., filepath, delete_dsn = TRUE)
}
}
dir.create("data/processed-data/s3_final", showWarnings = FALSE)
clean_and_write_sf(my_cbsas, "data/raw-data/big/cbsas.geojson")
clean_and_write_sf(my_counties, "data/raw-data/big/counties.geojson")
clean_and_write_sf(my_states, "data/raw-data/big/states.geojson")
| /scripts/1-download-data.R | no_license | brittany-durkin/covid-neighborhood-job-analysis | R | false | false | 3,659 | r | # Download necessary files directly to the appopriate folder
# Small files go to data/raw-data/small/
# while larger files (>100M) go to data/raw-data/big/
library(tidyverse)
library(jsonlite)
library(tigris)
library(sf)
options(use_tigris_cache = T,
tigris_class = "sf")
# Create output directory to place big raw-data in.
dir.create("data/raw-data/big", showWarnings = FALSE)
#----Download Census tracts from Census FTP site ------------------------------
# Census tract geographic files
# Tracts simplify to 1:500k,
# Census tract (2010)
download_by_state <- function(state) {
#Note we use GENZ2018, or the Cartographic boundary files
download.file(url = str_glue("https://www2.census.gov/geo/tiger/GENZ2018/shp/cb_2018_{state}_tract_500k.zip"),
destfile = str_glue("data/raw-data/big/{state}.zip"))
unzip(zipfile = str_glue("data/raw-data/big/{state}.zip"),
exdir = "data/raw-data/big")
file.remove(str_glue("data/raw-data/big/{state}.zip"))
}
state_fips <- fromJSON("https://api.census.gov/data/2010/dec/sf1?get=NAME&for=state:*")
state_fips <- state_fips[, 2][c(2:length(state_fips[, 2]))]
dl <- state_fips %>% map(download_by_state)
#----Download LODES data-----------------------------------
# Downloaded from the Urban Institute Data Catalog
# All jobs, RAC
download.file(url = "https://ui-spark-data-public.s3.amazonaws.com/lodes/summarized-files/Tract_level/all_jobs_excluding_fed_jobs/rac_all_tract_level.csv",
destfile = "data/raw-data/big/rac_all.csv")
# All jobs, RAC, >=$40,000 per year
download.file(url = "https://urban-data-catalog.s3.amazonaws.com/drupal-root-live/2020/03/30/rac_se03_tract.csv",
destfile = "data/raw-data/big/rac_se03.csv")
#----Download Unemployment data from BLS and WA------------
# BLS CES Data
download.file(url = "https://download.bls.gov/pub/time.series/ce/ce.data.0.AllCESSeries",
destfile = "data/raw-data/big/ces_all.txt")
# BLS QCEW Data for Washington (SAE does not have all industries for WA)
download.file(url = "https://data.bls.gov/cew/data/files/2019/csv/2019_qtrly_by_area.zip",
destfile = "data/raw-data/big/wa_qcew.zip")
unzip("data/raw-data/big/wa_qcew.zip",
files = c("2019.q1-q3.by_area/2019.q1-q3 53000 Washington -- Statewide.csv"),
exdir = "data/raw-data/big")
file.remove("data/raw-data/big/wa_qcew.zip")
file.rename(from = "data/raw-data/big/2019.q1-q3.by_area/2019.q1-q3 53000 Washington -- Statewide.csv",
to = "data/raw-data/big/wa_qcew.csv")
unlink("data/raw-data/big/2019.q1-q3.by_area", recursive = TRUE)
# WA state unemployment estimates, most recent
download.file(url = "https://esdorchardstorage.blob.core.windows.net/esdwa/Default/ESDWAGOV/labor-market-info/Libraries/Regional-reports/UI-Claims-Karen/2020 claims/UI claims week 14_2020.xlsx",
destfile = "data/raw-data/big/UI claims week 14_2020.xlsx",
#download file in binary mode, if you don't, xlsx file is corrupted
mode="wb")
#----Download cbsas, counties, and states from tigris------------
my_cbsas<-core_based_statistical_areas(cb = T)
my_counties <- counties(cb = T)
my_states <- states(cb = T)
clean_and_write_sf <- function(name, filepath) {
if(!file.exists(filepath)){
name %>%
st_transform(4326) %>%
st_write(., filepath, delete_dsn = TRUE)
}
}
dir.create("data/processed-data/s3_final", showWarnings = FALSE)
clean_and_write_sf(my_cbsas, "data/raw-data/big/cbsas.geojson")
clean_and_write_sf(my_counties, "data/raw-data/big/counties.geojson")
clean_and_write_sf(my_states, "data/raw-data/big/states.geojson")
|
#' The Minimum Ellipsoid Problem
#'
#'\code{minelips} creates input for sqlp to solve the minimum ellipsoid problem -
#'given a set of n points, find the minimum volume ellipsoid that contains all the points
#'
#'@details
#' for a set of points (x1,...,xn) determines the ellipse of minimum volume that contains all points.
#' Mathematical and implementation details can be found in the vignette
#'
#' @param V An nxp matrix consisting of the points to be contained in the ellipsoid
#'
#' @return
#' \item{X}{A list containing the solution matrix to the primal problem}
#' \item{y}{A list containing the solution vector to the dual problem}
#' \item{Z}{A list containing the solution matrix to the dual problem}
#' \item{pobj}{The achieved value of the primary objective function}
#' \item{dobj}{The achieved value of the dual objective function}
#'
#' @examples
#' data(Vminelips)
#'
#' #Not Run
#' #out <- minelips(Vminelips)
#'
#' @export
minelips <- function(V){
#Error Checking
stopifnot(is.matrix(V), is.numeric(V))
if(ncol(V) > nrow(V)){
warning("Point Configuration has higher dimension than number of points.")
}
#Define Variables
V <- t(V)
p <- nrow(V)
m <- ncol(V)
N <- (p+1)*m
blk <- matrix(list(),2,2)
At <- matrix(list(),2,1)
C <- matrix(list(),2,1)
parbarrier <- matrix(list(),2,1)
OPTIONS <- list()
blk[[1,1]] <- "s"
blk[[1,2]] <- (p+1)*matrix(1,1,m)
blk[[2,1]] <- "s"
blk[[2,2]] <- p
count <- 0
Ftmp <- matrix(list(),2,2*p+p*(p-1)/2)
for(j in 1:p){
s1 <- V[j,]
i1 <- seq(j, (p+1)*(m-1)+j, p+1)
j1 <- seq(p+1,(p+1)*m,p+1)
tmp <- Matrix(0, N, N,sparse=TRUE)
for(i in 1:length(i1)){
tmp[i1[i],j1[i]] <- s1[i]
}
tmp <- tmp + t(tmp)
count <- count + 1
Ftmp[[1,count]] <- -tmp
Ftmp[[2,count]] <- Matrix(0,p,p,sparse=TRUE)
Ftmp[[2,count]][j,j] <- -1
}
for(j in 2:p){
for(k in 1:(j-1)){
s1 <- V[k,]
i1 <- seq(j,(p+1)*(m-1)+j,p+1)
j1 <- seq(p+1,(p+1)*m,p+1)
s2 <- V[j,]
i2 <- seq(k,(p+1)*(m-1)+k,p+1)
j2 <- seq(p+1,(p+1)*m,p+1)
tmp1 <- Matrix(0,N,N,sparse=TRUE)
tmp2 <- Matrix(0,N,N,sparse=TRUE)
for(i in 1:length(i1)){
tmp1[i1[i],j1[i]] <- s1[i]
}
for(i in 1:length(i2)){
tmp2[i2[i],j2[i]] <- s2[i]
}
tmp <- tmp1 + tmp2
tmp <- tmp + t(tmp)
count <- count + 1
Ftmp[[1,count]] <- -tmp
Ftmp[[2,count]] <- Matrix(0,p,p,sparse=TRUE)
Ftmp[[2,count]][j,k] <- -1
Ftmp[[2,count]][k,j] <- -1
}
}
for(j in 1:p){
s1 <- rep(1,m)
i1 <- seq(j,(p+1)*(m-1)+j,p+1)
j1 <- seq(p+1,(p+1)*m,p+1)
tmp <- Matrix(0,N,N,sparse=TRUE)
for(i in 1:length(i1)){
tmp[i1[i],j1[i]] <- s1[i]
}
tmp <- tmp + t(tmp)
count <- count + 1
Ftmp[[1,count]] <- -tmp
Ftmp[[2,count]] <- Matrix(0,p,p,sparse=TRUE)
}
At <- svec(blk,Ftmp,rep(1,2))
C[[1,1]] <- Diagonal(N,1)
C[[2,1]] <- matrix(0,p,p)
b <- matrix(0,p*(p+1)/2+p,1)
parbarrier[[1,1]] <- 0
parbarrier[[2,1]] <- 1
OPTIONS$parbarrier <- parbarrier
out <- sqlp_base(blk=blk, At=At, b=b, C=C, OPTIONS = OPTIONS)
dim(out$X) <- NULL
dim(out$Z) <- NULL
return(out)
} | /R/minelips.R | no_license | cran/sdpt3r | R | false | false | 3,396 | r | #' The Minimum Ellipsoid Problem
#'
#'\code{minelips} creates input for sqlp to solve the minimum ellipsoid problem -
#'given a set of n points, find the minimum volume ellipsoid that contains all the points
#'
#'@details
#' for a set of points (x1,...,xn) determines the ellipse of minimum volume that contains all points.
#' Mathematical and implementation details can be found in the vignette
#'
#' @param V An nxp matrix consisting of the points to be contained in the ellipsoid
#'
#' @return
#' \item{X}{A list containing the solution matrix to the primal problem}
#' \item{y}{A list containing the solution vector to the dual problem}
#' \item{Z}{A list containing the solution matrix to the dual problem}
#' \item{pobj}{The achieved value of the primary objective function}
#' \item{dobj}{The achieved value of the dual objective function}
#'
#' @examples
#' data(Vminelips)
#'
#' #Not Run
#' #out <- minelips(Vminelips)
#'
#' @export
minelips <- function(V){
#Error Checking
stopifnot(is.matrix(V), is.numeric(V))
if(ncol(V) > nrow(V)){
warning("Point Configuration has higher dimension than number of points.")
}
#Define Variables
V <- t(V)
p <- nrow(V)
m <- ncol(V)
N <- (p+1)*m
blk <- matrix(list(),2,2)
At <- matrix(list(),2,1)
C <- matrix(list(),2,1)
parbarrier <- matrix(list(),2,1)
OPTIONS <- list()
blk[[1,1]] <- "s"
blk[[1,2]] <- (p+1)*matrix(1,1,m)
blk[[2,1]] <- "s"
blk[[2,2]] <- p
count <- 0
Ftmp <- matrix(list(),2,2*p+p*(p-1)/2)
for(j in 1:p){
s1 <- V[j,]
i1 <- seq(j, (p+1)*(m-1)+j, p+1)
j1 <- seq(p+1,(p+1)*m,p+1)
tmp <- Matrix(0, N, N,sparse=TRUE)
for(i in 1:length(i1)){
tmp[i1[i],j1[i]] <- s1[i]
}
tmp <- tmp + t(tmp)
count <- count + 1
Ftmp[[1,count]] <- -tmp
Ftmp[[2,count]] <- Matrix(0,p,p,sparse=TRUE)
Ftmp[[2,count]][j,j] <- -1
}
for(j in 2:p){
for(k in 1:(j-1)){
s1 <- V[k,]
i1 <- seq(j,(p+1)*(m-1)+j,p+1)
j1 <- seq(p+1,(p+1)*m,p+1)
s2 <- V[j,]
i2 <- seq(k,(p+1)*(m-1)+k,p+1)
j2 <- seq(p+1,(p+1)*m,p+1)
tmp1 <- Matrix(0,N,N,sparse=TRUE)
tmp2 <- Matrix(0,N,N,sparse=TRUE)
for(i in 1:length(i1)){
tmp1[i1[i],j1[i]] <- s1[i]
}
for(i in 1:length(i2)){
tmp2[i2[i],j2[i]] <- s2[i]
}
tmp <- tmp1 + tmp2
tmp <- tmp + t(tmp)
count <- count + 1
Ftmp[[1,count]] <- -tmp
Ftmp[[2,count]] <- Matrix(0,p,p,sparse=TRUE)
Ftmp[[2,count]][j,k] <- -1
Ftmp[[2,count]][k,j] <- -1
}
}
for(j in 1:p){
s1 <- rep(1,m)
i1 <- seq(j,(p+1)*(m-1)+j,p+1)
j1 <- seq(p+1,(p+1)*m,p+1)
tmp <- Matrix(0,N,N,sparse=TRUE)
for(i in 1:length(i1)){
tmp[i1[i],j1[i]] <- s1[i]
}
tmp <- tmp + t(tmp)
count <- count + 1
Ftmp[[1,count]] <- -tmp
Ftmp[[2,count]] <- Matrix(0,p,p,sparse=TRUE)
}
At <- svec(blk,Ftmp,rep(1,2))
C[[1,1]] <- Diagonal(N,1)
C[[2,1]] <- matrix(0,p,p)
b <- matrix(0,p*(p+1)/2+p,1)
parbarrier[[1,1]] <- 0
parbarrier[[2,1]] <- 1
OPTIONS$parbarrier <- parbarrier
out <- sqlp_base(blk=blk, At=At, b=b, C=C, OPTIONS = OPTIONS)
dim(out$X) <- NULL
dim(out$Z) <- NULL
return(out)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trackdata.class.R
\name{plot.trackdata}
\alias{plot.trackdata}
\title{Produces time-series plots from trackdata}
\usage{
\method{plot}{trackdata}(x, timestart = NULL, xlim = NULL, ylim = NULL,
labels = NULL, col = TRUE, lty = FALSE, type = "p", pch = NULL,
contig = TRUE, ...)
}
\arguments{
\item{x}{A trackdata object.}
\item{timestart}{A single valued numeric vector for setting the time at
which the trackdata should start. Defaults to NULL which means that the
start time is taken from start(trackdata), i.e. the time at which the
trackdata object starts.}
\item{xlim}{A numeric vector of two values for specifying the time interval
over which the trackdata is to be plotted. Defaults to NULL which means
that the trackdata object is plotted between between the start time of the
first segment and the end time of the last segment.}
\item{ylim}{Specify a yaxis range.}
\item{labels}{A character vector the same length as the number of segments
in the trackdata object. Each label is plotted at side = 3 on the plotted
at the temporal midpoint of each segment in the trackdata object. Defaults
to NULL (plot no labels). Labels will only be plotted if xlim=NULL.}
\item{col}{A single element logical vector. Defaults to T to plot each
label type in a different colour}
\item{lty}{A single element logical vector. Defaults to F. If TRUE, plot
each label type in a different linetype}
\item{type}{Specify the type of plot. See \link{plot} for the various
possibilities}
\item{pch}{The symbol types to be used for plotting. Should be specified as
a numeric vector of the same length as there are unique label classes}
\item{contig}{A single valued logical vector T or F. If T, then all the
segments of the trackdata object are assumed to be temporally contiguous,
i.e. the boundaries of the segments are abutting in time and the start time
of segment[j,] is the end time of segment[j-1,]. In this case, all the
segments of the trackdata object are plotted on the same plot as a function
of time. An example of a contiguous trackdata object is coutts.sam. contig
= FALSE is when a trackdata object is non-contiguous e.g. all "i:" vowels
in a database. An example of a non-contiguous trackdata object is
vowlax.fdat. If contig=F then each segment of the trackdata object is
plotted separately.}
\item{...}{the same graphical parameters can be supplied to this function
as for plot e.g type="l", lty=2 etc.}
}
\description{
The function produces a plot as a function of time for a single segment or
multiple plots as a function of time for several segments.
}
\details{
The function plots a single segment of trackdata as a function of time. If
the segment contains multiple tracks, then these will be overlaid. If there
are several temporally non-contiguous segments in the trackdata object,
each segment is plotted in a different panel by specifying contig=F. This
function is not suitable for overlaying trackdata from more than one
segments on the same plot as a function of time: for this use dplot().
}
\examples{
# a single segment of trackdata (F1) plotted as a function of time.
plot(vowlax.fdat[1,1])
# as above, but limits are set for the time axis.
plot(vowlax.fdat[1,1], xlim=c(880, 920))
# the the start-time of the x-axis is set to 0 ms, plot F1 and F3, lineplot
plot(vowlax.fdat[1,c(1,3)], timestart=0, type="l")
# plot F1-F4, same colour, same plotting symbol, between 900
# and 920 ms, type is line and points plot, different linetype per track, no box
plot(vowlax.fdat[1,], col="blue", pch=20, xlim=c(900, 920), type="b", lty=TRUE, bty="n")
# F1 and F2 of six vowels with labels, separate windows
par(mfrow=c(2,3))
plot(vowlax.fdat[1:6,1:2], contig=FALSE, labels=vowlax.l[1:6], ylab="F1 and F2",
xlab="Time (ms)", type="b", ylim=c(300, 2400))
# As above, timestart set to zero, colour set to blue, different plotting
# symbols for the two tracks
plot(vowlax.fdat[1:6,1:2], contig=FALSE, labels=vowlax.l[1:6], ylab="F1 and F2",
xlab="Time (ms)", type="b", col="blue", pch=c(1,2), ylim=c(300, 2400), timestart=0)
# RMS energy for the utterance 'just relax said Coutts'
plot(coutts.rms, type="l")
# as above a different colour
plot(coutts.rms, type="l", col="pink")
# as above, linetype 2, double line thickness, no box, times reset to 0 ms
plot(coutts.rms, type="l", col="pink", lty=2, lwd=2, bty="n", timestart=0)
# as above but plotted as non-contiguous segments, i.e one segment per panel
par(mfrow=c(2,3))
plot(coutts.rms, type="l", col="pink", lty=2, lwd=2, bty="n", timestart=0, contig=FALSE)
# plot with labels
labels = label(coutts)
par(mfrow=c(1,1))
plot(coutts.rms, labels=labels, type="l", bty="n")
# as above, double line-thickness, green, line type 3, no box,
# time start 0 ms with x and y axis labels
plot(coutts.rms, labels=labels, type="l", lwd=2,
col="green", lty=3, bty="n", timestart=0, xlab="Time (ms)", ylab="Amplitude")
# as above with a different plotting symbol for the points
par(mfrow=c(2,3))
plot(coutts.rms, labels=labels, type="b", lwd=2, col="green",
timestart=0, bty="n", contig=FALSE, pch=20)
}
\seealso{
\code{\link{plot}}, \code{\link{dplot}}
}
\author{
Jonathan Harrington
}
\keyword{dplot}
| /man/plot.trackdata.Rd | no_license | NPoe/emuR | R | false | true | 5,277 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trackdata.class.R
\name{plot.trackdata}
\alias{plot.trackdata}
\title{Produces time-series plots from trackdata}
\usage{
\method{plot}{trackdata}(x, timestart = NULL, xlim = NULL, ylim = NULL,
labels = NULL, col = TRUE, lty = FALSE, type = "p", pch = NULL,
contig = TRUE, ...)
}
\arguments{
\item{x}{A trackdata object.}
\item{timestart}{A single valued numeric vector for setting the time at
which the trackdata should start. Defaults to NULL which means that the
start time is taken from start(trackdata), i.e. the time at which the
trackdata object starts.}
\item{xlim}{A numeric vector of two values for specifying the time interval
over which the trackdata is to be plotted. Defaults to NULL which means
that the trackdata object is plotted between between the start time of the
first segment and the end time of the last segment.}
\item{ylim}{Specify a yaxis range.}
\item{labels}{A character vector the same length as the number of segments
in the trackdata object. Each label is plotted at side = 3 on the plotted
at the temporal midpoint of each segment in the trackdata object. Defaults
to NULL (plot no labels). Labels will only be plotted if xlim=NULL.}
\item{col}{A single element logical vector. Defaults to T to plot each
label type in a different colour}
\item{lty}{A single element logical vector. Defaults to F. If TRUE, plot
each label type in a different linetype}
\item{type}{Specify the type of plot. See \link{plot} for the various
possibilities}
\item{pch}{The symbol types to be used for plotting. Should be specified as
a numeric vector of the same length as there are unique label classes}
\item{contig}{A single valued logical vector T or F. If T, then all the
segments of the trackdata object are assumed to be temporally contiguous,
i.e. the boundaries of the segments are abutting in time and the start time
of segment[j,] is the end time of segment[j-1,]. In this case, all the
segments of the trackdata object are plotted on the same plot as a function
of time. An example of a contiguous trackdata object is coutts.sam. contig
= FALSE is when a trackdata object is non-contiguous e.g. all "i:" vowels
in a database. An example of a non-contiguous trackdata object is
vowlax.fdat. If contig=F then each segment of the trackdata object is
plotted separately.}
\item{...}{the same graphical parameters can be supplied to this function
as for plot e.g type="l", lty=2 etc.}
}
\description{
The function produces a plot as a function of time for a single segment or
multiple plots as a function of time for several segments.
}
\details{
The function plots a single segment of trackdata as a function of time. If
the segment contains multiple tracks, then these will be overlaid. If there
are several temporally non-contiguous segments in the trackdata object,
each segment is plotted in a different panel by specifying contig=F. This
function is not suitable for overlaying trackdata from more than one
segments on the same plot as a function of time: for this use dplot().
}
\examples{
# a single segment of trackdata (F1) plotted as a function of time.
plot(vowlax.fdat[1,1])
# as above, but limits are set for the time axis.
plot(vowlax.fdat[1,1], xlim=c(880, 920))
# the the start-time of the x-axis is set to 0 ms, plot F1 and F3, lineplot
plot(vowlax.fdat[1,c(1,3)], timestart=0, type="l")
# plot F1-F4, same colour, same plotting symbol, between 900
# and 920 ms, type is line and points plot, different linetype per track, no box
plot(vowlax.fdat[1,], col="blue", pch=20, xlim=c(900, 920), type="b", lty=TRUE, bty="n")
# F1 and F2 of six vowels with labels, separate windows
par(mfrow=c(2,3))
plot(vowlax.fdat[1:6,1:2], contig=FALSE, labels=vowlax.l[1:6], ylab="F1 and F2",
xlab="Time (ms)", type="b", ylim=c(300, 2400))
# As above, timestart set to zero, colour set to blue, different plotting
# symbols for the two tracks
plot(vowlax.fdat[1:6,1:2], contig=FALSE, labels=vowlax.l[1:6], ylab="F1 and F2",
xlab="Time (ms)", type="b", col="blue", pch=c(1,2), ylim=c(300, 2400), timestart=0)
# RMS energy for the utterance 'just relax said Coutts'
plot(coutts.rms, type="l")
# as above a different colour
plot(coutts.rms, type="l", col="pink")
# as above, linetype 2, double line thickness, no box, times reset to 0 ms
plot(coutts.rms, type="l", col="pink", lty=2, lwd=2, bty="n", timestart=0)
# as above but plotted as non-contiguous segments, i.e one segment per panel
par(mfrow=c(2,3))
plot(coutts.rms, type="l", col="pink", lty=2, lwd=2, bty="n", timestart=0, contig=FALSE)
# plot with labels
labels = label(coutts)
par(mfrow=c(1,1))
plot(coutts.rms, labels=labels, type="l", bty="n")
# as above, double line-thickness, green, line type 3, no box,
# time start 0 ms with x and y axis labels
plot(coutts.rms, labels=labels, type="l", lwd=2,
col="green", lty=3, bty="n", timestart=0, xlab="Time (ms)", ylab="Amplitude")
# as above with a different plotting symbol for the points
par(mfrow=c(2,3))
plot(coutts.rms, labels=labels, type="b", lwd=2, col="green",
timestart=0, bty="n", contig=FALSE, pch=20)
}
\seealso{
\code{\link{plot}}, \code{\link{dplot}}
}
\author{
Jonathan Harrington
}
\keyword{dplot}
|
library(MODIStsp)
library(raster)
library(velox)
library(mgcv)
library(rgeos)
library(sp)
library(rgdal)
library(tmap)
library(data.table)
library(scales)
library(quantreg)
library(FlexParamCurve)
library(signal)
library(zoo)
library(rasterVis)
library(FRutils)
library(phenex)
#MODIStsp()
findminmax<-function(x,n=1,beg=1,end=length(x),max=TRUE){
stopifnot(n<=length(beg:end))
r<-rank(ifelse(max,-1,1)*x)
val<-sort(r[beg:end])[1:n]
index<-match(val,r)
index
}
findminmax<-function(x,n=1,beg="06-01",end="11-01",max=TRUE){
stopifnot(!is.null(names(x)))
d<-substr(names(x),6,10)
bloc<-d>=beg & d<=end
run<-rle(bloc)
l<-Map(":",c(1,head(cumsum(run[[1]]),-1))[run[[2]]],cumsum(run[[1]])[run[[2]]])
r<-rank(ifelse(max,-1,1)*x)
res<-lapply(l,function(i){
val<-sort(r[i])[1:n]
index<-match(val,r)
index
})
res
}
##### load RData #####
path<-"C:/Users/rouf1703/Documents/UdeS/Consultation/L-ARenaud/MODIS/VI_16Days_250m_v6/Time_Series/RData/"
l<-list.files(path)
for(i in seq_along(l)){
load(paste0(path,l[i]))
assign(paste0("r",i),raster_ts)
}
rm(raster_ts)
rv<-r5 #r5 is the initial raster used in plot ndvi3
rd<-r3
re<-r4
r<-rv
rc<-SpatialPointsDataFrame(coordinates(r),proj4string=CRS(proj4string(r)),data.frame(id=seq_len(dim(r)[1]*dim(r)[2])))
#v<-velox(r[[1:100]])
#writeRaster(rv[[1]],"C:/Users/rouf1703/Documents/UdeS/Consultation/L-ARenaud/Doc/ram_ndvi.tif")
#writeRaster(re,"C:/Users/rouf1703/Documents/UdeS/Consultation/L-ARenaud/Doc/ram_evi.tif")
#writeRaster(rd,"C:/Users/rouf1703/Documents/UdeS/Consultation/L-ARenaud/Doc/ram_doy.tif")
ram<-readOGR("C:/Users/rouf1703/Documents/UdeS/Consultation/L-ARenaud/Doc",layer="ram")
plot(ram)
ram<-gBuffer(ram,width=-0.01)
allr<-gBuffer(ram,width=0.25)
plot(ram,add=TRUE)
o<-over(rc,allr)
erv_raw<-extract(r,allr)
erd_raw<-extract(rd,allr)
#ee<-do.call("cbind",lapply(strsplit(v$extract(ram,fun=function(i){paste(i,collapse="_")}),"_"),as.integer))
erv<-erv_raw[[1]]
erd<-erd_raw[[1]]
day<-lapply(1980:2017,function(i){
seq.Date(as.Date(paste0(i,"-01-01")),as.Date(paste0(i,"-12-31")),by=1)
})
names(day)<-1980:2017
julp<-sapply(strsplit(dimnames(erv)[[2]],"_"),function(i){
as.integer(i[4])
})
datep<-as.Date(sapply(strsplit(dimnames(erv)[[2]],"_"),function(i){
day[[as.character(i[3])]][as.integer(i[4])]
}),origin="1970-01-01")
ind<-sapply(strsplit(dimnames(erv)[[2]],"_"),function(i){
i[2]
})
sat<-substr(sapply(strsplit(dimnames(erv)[[2]],"_"),function(i){
i[1]
}),1,3)
id<-rep(as.integer(names(o)[!is.na(o)][1:nrow(erv)]),each=ncol(erv))
##### raw images and models ####
d<-as.data.table(data.frame(id=id,x=rep(1:ncol(erv),nrow(erv)),datep=datep,julp=julp,ind=ind,sat=sat,year=as.integer(substr(datep,1,4)),y=as.vector(t(erv)),jul=as.vector(t(erd))),date)
d$y<-d$y/10000
d<-d[!is.na(d$jul) & !is.na(d$y),]
d<-d[,.(id,y,ind,sat,year,julp,jul,np=.N),by=.(datep)]
d<-d[,.(id,y,ind,sat,year,julp,np,median=quantile(y,0.5,na.rm=TRUE),mean=mean(y,na.rm=TRUE),n=.N),by=.(datep,jul)]
d<-as.data.frame(d)
d$date<-as.Date(sapply(1:nrow(d),function(i){
k<-ifelse(d$jul[i]<d$julp[i],1,0)
day[[as.character(d$year[i]+k)]][d$jul[i]]
}),origin="1970-01-01")
d<-d[d$date>="2002-11-20",]
d$datex<-as.numeric(d$date)
d<-d[order(d$id,d$datep,d$jul),]
###### PIXEL #####
years<-sort(unique(d$year))
years<-years[years>2002 & years<2017]
ids<-unique(d$id)
ans<-NULL
l<-vector(mode="list",length(ids))
for(i in seq_along(ids)){
#dd<-d[d$id==ids[i] & d$year%in%years,]
dd<-d[d$id==ids[i],]
comp<-seq(min(dd$datex),max(dd$datex),by=1)
#ndvi<-rep(NA,length(comp))
#ndvi[match(dd$datex,comp)]<-dd$y
#ndvi<-na.spline(ndvi)
s0<-sgolayfilt(dd$y,p=3,n=41,m=0)
s1<-sgolayfilt(dd$y,p=3,n=41,m=1)
s2<-sgolayfilt(dd$y,p=3,n=41,m=2)
s3<-sgolayfilt(dd$y,p=3,n=41,m=3)
invisible(peak<-lapply(seq_along(years),function(i){
year<-years[i]
### up
#k<-which(dd$datep>=paste0(year-1,"-11-20") & dd$datep<=paste0(year,"-10-16"))
#ddd<-dd[k,]
#lo1<-list(Asym=0,xmid=10000,scal=2,c=-0.0)
#up1<-list(Asym=1,xmid=18000,scal=30,c=0.3)
#m1<-nls(y~Asym/(1+exp((xmid-datex)/scal))+c,data=ddd,start=list(Asym=0.5,xmid=quantile(ddd$datex,0.5,na.rm=TRUE),scal=3,c=0.2),control=list(minFactor=1e-12,maxiter=500),lower=lo1,upper=up1,algorithm="port")
#se<-seq(min(ddd$datex),max(ddd$datex),by=1)
#lines(se,predict(m1,data.frame(datex=se)),col=alpha("green4",0.75),lwd=3)
k2<-which(dd$date>=paste0(year,"-02-01") & dd$date<=paste0(year,"-10-01"))
gu<-dd$datex[findminmax(s1,beg=min(k2),end=max(k2),max=TRUE)]
k2<-which(dd$date>=paste0(year,"-08-01") & dd$date<=paste0(year,"-12-31"))
gd<-dd$datex[findminmax(s1,beg=min(k2),end=max(k2),max=FALSE)]
c(gu,gd)
}))
transp<-0.001
trans<-0.01
if(F){
#plot(dd$datex,dd$y,ylim=c(-0.2,1),col=alpha("black",transp),type="n",xaxt="n")
axis.Date(1,at=seq(min(dd$date,na.rm=TRUE),max(dd$date,na.rm=TRUE),by="2 month"), format="%Y-%m-%d",las=2,cex.axis=0.5)
points(dd$datex,dd$y,ylim=c(-0.2,1),col=alpha("black",transp))
abline(0,0)
lines(dd$datex,s0,col=alpha("black",trans))
lines(dd$datex,s1,col=alpha("black",trans))
#points(dd$datex,s1,col=alpha(colo.scale(s1,rev(c("green4","brown"))),trans),pch=1,cex=0.1)
lines(dd$datex,s2*5,col=alpha("red",transp))
lines(dd$datex,s3*50,col=alpha("blue",transp))
invisible(sapply(peak,function(j){
lines(rep(j[1],2),c(0,1),col=alpha("green4",trans),pch=16,lwd=1)
}))
invisible(sapply(peak,function(j){
lines(rep(j[2],2),c(0,1),col=alpha("brown",trans),pch=16,lwd=1)
}))
}
ans<-c(ans,mean(as.integer(format(as.Date(sapply(peak,"[",2),origin="1970-01-01"),"%j"))))
l[[i]]<-sapply(peak,"[",2)
}
rans<-r[[1]]
rans[ids]<-ans
rl<-lapply(seq_along(years),function(i){
r<-r[[1]]
ans<-sapply(l,"[",i)
ans<-as.integer(format(as.Date(ans,origin="1970-01-01"),"%j"))
r[ids]<-ans
r
})
R<-do.call("stack",rl)
names(R)<-years
levelplot(R,col.regions=rev(terrain.colors(101)),cuts=100)+layer(sp.polygons(ram))
dat<-substr(seq(as.Date("2007-01-01"),as.Date("2077-12-31"),by=1)[round(cellStats(R,mean))],6,10)
jul<-cellStats(R,mean)
centered_jul<-cellStats(R,mean)-mean(cellStats(R,mean))
sd_jul<-cellStats(R,sd)
x<-data.frame(year=gsub("X","",names(jul)),date=dat,jul=jul,centered_jul=centered_jul,sd_jul=sd_jul,stringsAsFactors=FALSE)
#fwrite(x,"C:/Users/rouf1703/Documents/UdeS/Consultation/L-ARenaud/Doc/greendown.csv",row.names=FALSE,sep=";")
tmap_mode("view")
tm_shape(rans)+tm_raster(alpha=0.9,n=10,palette=rev(terrain.colors(10)))+tm_shape(ram)+tm_borders(lwd=5)+tm_layout(basemaps = c("Esri.WorldImagery","HERE.hybridDay"))
#### png #####
#png("C:/Users/rouf1703/Documents/UdeS/Consultation/L-ARenaud/Doc/ndvi4.png",width=22,height=10,units="in",res=300)
par(mar=c(7,4,4,4))
plot(d$date,d$y,col=gray(0,0.1),xaxt="n",xlab="Date",ylab="NDVI",type="n",xlim=c(min(d$date)-300,max(d$date)))
years<-seq_along(unique(d$year))
invisible(lapply(years,function(i){
val<-as.numeric(day[[as.character(unique(d$year)[i])]])
rect(xleft=val-0.5,ybottom=-2000,xright=val+0.5,ytop=12000,col=alpha("grey20",seq_along(val)/length(val)/4),border=NA,xpd=FALSE)
}))
axis.Date(1, at=seq(min(d$date,na.rm=TRUE),max(d$date,na.rm=TRUE),by="1 month"), format="%Y-%m-%d",las=2,cex.axis=0.5)
points(d$date,d$y,col=gray(0,0.025))
d2<-unique(d[,c("date","mean","sat","n","np")])
points(d2$date,d2$mean,col=alpha("green4",d2$n/d2$np),pch=ifelse(d2$sat=="MOD",16,17),cex=1)
sa<-d$sat%in%c("MYD","MOD")
m1<-gam(y~s(datex,k=100),data=d[sa,])
m2<-loess(y~datex,data=d[sa,],span=0.03)
sx<-seq(min(d$datex,na.rm=TRUE),max(d$datex,na.rm=TRUE),by=1)
p1<-predict(m1,data.frame(datex=sx),type="response")
p2<-predict(m2,data.frame(datex=sx),type="response")
lines(sx,p1,col=alpha("blue",0.35),lwd=4)
lines(sx,p2,col=alpha("red",0.35),lwd=4)
peak<-NULL
invisible(peak<-lapply(years[-length(years)],function(i){
year<-unique(d$year)[i]
### up
dd<-d[d$datep>=paste0(year-1,"-12-01") & d$datep<=paste0(year,"-09-30"),]
lo1<-list(Asym=0,xmid=12000,scal=0.5,c=0.1)
up1<-list(Asym=1,xmid=18000,scal=50,c=0.4)
m1<-nls(y~Asym/(1+exp((xmid-datex)/scal))+c,data=dd,start=list(Asym=0.5,xmid=quantile(dd$datex,0.5,na.rm=TRUE),scal=10,c=0.2),control=list(minFactor=1e-12,maxiter=50),lower=lo1,upper=up1,algorithm="port")
se<-seq(min(dd$datex),max(dd$datex),by=1)
se<-se[-c(1:20,(length(se)-19):length(se))]
lines(se,predict(m1,data.frame(datex=se)),col=alpha("green4",0.85),lwd=4)
# bounds
if(i==1){
se2<-seq(min(dd$datex)-70,max(dd$datex)+70,by=1)-400
c2<-mean(d$y[d$jul%in%c(330:365,1:60)])
Asym2<-mean(d$y[d$jul%in%170:270])-c2
with(lo1,lines(se2,Asym2/(1+exp((coef(m1)["xmid"]-400-se2)/scal))+c2,col=alpha("green4",0.85),lwd=4))
with(up1,lines(se2,Asym2/(1+exp((coef(m1)["xmid"]-400-se2)/scal))+c2,col=alpha("green4",0.85),lwd=4))
}
co<-as.list(coef(m1))
peak<-c(peak,co$xmid)
### optimums
l<-logistic_optimum(alpha=co$Asym,beta=-co$xmid/co$scal,gamma=1/co$scal)
l<-unique(unlist(l))
invisible(lapply(l,function(j){
lines(rep(j,2),c(-0.3,-0.2),col="green4",lwd=1)
}))
### steepness makes it difficult for convergence...
dd<-d[d$datep>=paste0(year,"-07-01") & d$datep<=paste0(year+1,"-03-01"),]
lo2<-c(Asym=-1,xmid=12000,scal=0.1,c=co$Asym+co$c)
up2<-c(Asym=0,xmid=18000,scal=50,c=co$Asym+co$c)
m2<-tryCatch(nls(y~Asym/(1+exp((xmid-datex)/scal))+c,data=dd,start=list(Asym=-co$Asym,xmid=quantile(dd$datex,0.5,na.rm=TRUE),scal=0.1,c=co$Asym+co$c),control=list(minFactor=1e-12,maxiter=50),lower=lo2,upper=up2,algorithm="port"),error=function(j){TRUE})
if(!isTRUE(m2)){
se<-seq(min(dd$datex),max(dd$datex),by=1)
se<-se[-c(1:20,(length(se)-19):length(se))]
lines(se,predict(m2,data.frame(datex=se)),col=alpha("green4",0.85),lwd=4)
}
return(peak)}))
legend("topright",title="NDVI",pch=c(1,16,17,NA,NA,NA),lwd=c(NA,NA,NA,4,4,4),col=c(gray(0,0.3),alpha("green4",0.5),alpha("green4",0.5),alpha("blue",0.35),alpha("red",0.35),"green4"),legend=c("Value in a 250m pixel","Moy. Aqua sat.","Moy. Terra sat.","GAM","LOESS","Double logistic"),bty="n",inset=c(0.05,0))
#dev.off()
### tmap ##########################
gu<-as.integer(format(as.Date(unlist(peak),origin="1970-01-01"),"%j"))
gu<-gu-mean(gu)
hist(gu)
###
fun<-function(){
plot(ram,add=TRUE)
}
plot(r[[1:10]],addfun=fun)
#### visualisation prediction (dynamic) ######
tmap_mode("view")
tm_shape(r[["MYD13Q1_NDVI_2009_233"]])+tm_raster(alpha=0.9,n=10,palette=rev(terrain.colors(10)))+tm_shape(ram)+tm_borders(lwd=5)+tm_layout(basemaps = c("Esri.WorldImagery","HERE.hybridDay"))+tm_shape(rc[!is.na(o),])+tm_text("id")
tm_shape(v[[1]])+tm_raster(alpha=0.9,n=10,palette=rev(terrain.colors(10)))+tm_shape(ram)+tm_borders(lwd=5)+tm_layout(basemaps = c("Esri.WorldImagery","HERE.hybridDay"))
tm_shape(rans)+tm_raster(alpha=0.9,n=10,palette=rev(terrain.colors(10)))+tm_shape(ram)+tm_borders(lwd=5)+tm_layout(basemaps = c("Esri.WorldImagery","HERE.hybridDay"))
##### Logistic ######
# Asym
### alpha beta gamma
logistic<-function(x,alpha=1,beta=1,gamma=1,c=0){
d0<-function(alpha,beta,gamma,c){
alpha/(1+exp(-beta-gamma*x))+c
}
d1<-function(alpha,beta,gamma,c){
alpha*gamma*exp(-beta-gamma*x)*(1+exp(-beta-gamma*x))^(-2)
}
d2<-function(alpha,beta,gamma,c){
alpha*gamma^2*exp(-beta-gamma*x)*(exp(-beta-gamma*x)-1)*(1+exp(-beta-gamma*x))^(-3)
}
d3<-function(alpha,beta,gamma,c){
alpha*gamma^3*exp(-beta-gamma*x)*(1-4*exp(-beta-gamma*x)+exp(-beta-gamma*x)^2)*(1+exp(-beta-gamma*x))^(-4)
}
d4<-function(alpha,beta,gamma,c){
alpha*gamma^4*exp(-beta-gamma*x)*(-1+(11*exp(-beta-gamma*x))-(11*(exp(-beta-gamma*x)^2))+exp(-beta-gamma*x)^3)*(1+exp(-beta-gamma*x))^(-5)
}
y0<-d0(alpha,beta,gamma,c)
y1<-d1(alpha,beta,gamma,c)
y2<-d2(alpha,beta,gamma,c)
y3<-d3(alpha,beta,gamma,c)
y4<-d4(alpha,beta,gamma,c)
col<-gray((0:4)/5)
plot(x,y0,ylim=range(c(y0,y1,y2,y3,y4)),type="n")
lines(x,y0,lwd=4,col=col[1])
lines(x,y1,lwd=2,col=col[2])
lines(x,y2,lwd=2,col=col[3])
lines(x,y3,lwd=2,col=col[4])
lines(x,y4,lwd=2,col=col[5])
legend("right",lwd=c(4,2,2,2,2),col=col,legend=c("logistic",paste("derivative",1:4)))
}
logistic(seq(-10,20,by=0.01),alpha=2,beta=1/5,gamma=0.8)
logistic_optimum<-function(alpha=1,beta=1,gamma=1,c=0){
#logisitic function derivative's optimums
l<-list()
l[[1]]<--beta/gamma
l[[2]]<-c(-(log(2+sqrt(3))+beta)/gamma,-(log(2-sqrt(3))+beta)/gamma)
l[[3]]<-c(-(log(5+2*sqrt(6))+beta)/gamma,-beta/gamma,-(log(5-2*sqrt(6))+beta)/gamma)
l
}
l<-logistic_optimum(seq(-10,20,by=0.01),alpha=2,beta=1/5,gamma=0.8)
l<-unique(unlist(l))
lapply(l,function(i){
lines(rep(i,2),c(-1000,1000),lty=2)
})
#### Savitsky-Golay filtering ####
m<-ts(t(erv)/10000,frequency=723)
x<-runif(10)
x
findminmax(x,beg=7,end=10)
par(mar=c(4,3,3,0.5))
plot(0,0,xlim=c(1,nrow(m)),ylim=c(-0.2,1),type="n")
abline(0,0)
for(i in 1:ncol(m)){
s<-sample(ncol(m),1)
m2<-m[,s]
m2<-m[,i]
m2<-na.spline(m2)
points(sgolayfilt(m2),col=gray(0,0.02))
n<-23
s0<-sgolayfilt(m2,p=3,n=n,m=0)
s1<-sgolayfilt(m2,p=3,n=n,m=1)
s2<-sgolayfilt(m2,p=3,n=n,m=2)
s3<-sgolayfilt(m2,p=3,n=n,m=3)
trans<-0.03
lines(s0,col=alpha("black",trans))
lines(s1*2,lty=2,col=alpha("red",trans))
lines(s2*4,lty=3,col=alpha("blue",trans))
lines(s3*4,lty=3,col=alpha("green4",trans))
rc$id[!is.na(o)][s]
se<-seq(1,length(s1),by=48)
invisible(lapply(se,function(x){
k<-findminmax(s1,beg=x,end=x+48)
lines(rep(k,2),c(0,1),lty=2,col=alpha("red",0.03))
}))
}
#### NDVI quantiles ##################
x<-subset(r,1:dim(r)[[3]])
v<-calc(x,function(i){quantile(i,probs=c(0.05,0.95),na.rm=TRUE)})
levelplot(v,col.regions=rev(terrain.colors(101)),cuts=100)
| /L-ARenauld/MODIStsp.R | no_license | frousseu/UdeS | R | false | false | 13,829 | r |
library(MODIStsp)
library(raster)
library(velox)
library(mgcv)
library(rgeos)
library(sp)
library(rgdal)
library(tmap)
library(data.table)
library(scales)
library(quantreg)
library(FlexParamCurve)
library(signal)
library(zoo)
library(rasterVis)
library(FRutils)
library(phenex)
#MODIStsp()
findminmax<-function(x,n=1,beg=1,end=length(x),max=TRUE){
stopifnot(n<=length(beg:end))
r<-rank(ifelse(max,-1,1)*x)
val<-sort(r[beg:end])[1:n]
index<-match(val,r)
index
}
findminmax<-function(x,n=1,beg="06-01",end="11-01",max=TRUE){
stopifnot(!is.null(names(x)))
d<-substr(names(x),6,10)
bloc<-d>=beg & d<=end
run<-rle(bloc)
l<-Map(":",c(1,head(cumsum(run[[1]]),-1))[run[[2]]],cumsum(run[[1]])[run[[2]]])
r<-rank(ifelse(max,-1,1)*x)
res<-lapply(l,function(i){
val<-sort(r[i])[1:n]
index<-match(val,r)
index
})
res
}
##### load RData #####
path<-"C:/Users/rouf1703/Documents/UdeS/Consultation/L-ARenaud/MODIS/VI_16Days_250m_v6/Time_Series/RData/"
l<-list.files(path)
for(i in seq_along(l)){
load(paste0(path,l[i]))
assign(paste0("r",i),raster_ts)
}
rm(raster_ts)
rv<-r5 #r5 is the initial raster used in plot ndvi3
rd<-r3
re<-r4
r<-rv
rc<-SpatialPointsDataFrame(coordinates(r),proj4string=CRS(proj4string(r)),data.frame(id=seq_len(dim(r)[1]*dim(r)[2])))
#v<-velox(r[[1:100]])
#writeRaster(rv[[1]],"C:/Users/rouf1703/Documents/UdeS/Consultation/L-ARenaud/Doc/ram_ndvi.tif")
#writeRaster(re,"C:/Users/rouf1703/Documents/UdeS/Consultation/L-ARenaud/Doc/ram_evi.tif")
#writeRaster(rd,"C:/Users/rouf1703/Documents/UdeS/Consultation/L-ARenaud/Doc/ram_doy.tif")
ram<-readOGR("C:/Users/rouf1703/Documents/UdeS/Consultation/L-ARenaud/Doc",layer="ram")
plot(ram)
ram<-gBuffer(ram,width=-0.01)
allr<-gBuffer(ram,width=0.25)
plot(ram,add=TRUE)
o<-over(rc,allr)
erv_raw<-extract(r,allr)
erd_raw<-extract(rd,allr)
#ee<-do.call("cbind",lapply(strsplit(v$extract(ram,fun=function(i){paste(i,collapse="_")}),"_"),as.integer))
erv<-erv_raw[[1]]
erd<-erd_raw[[1]]
day<-lapply(1980:2017,function(i){
seq.Date(as.Date(paste0(i,"-01-01")),as.Date(paste0(i,"-12-31")),by=1)
})
names(day)<-1980:2017
julp<-sapply(strsplit(dimnames(erv)[[2]],"_"),function(i){
as.integer(i[4])
})
datep<-as.Date(sapply(strsplit(dimnames(erv)[[2]],"_"),function(i){
day[[as.character(i[3])]][as.integer(i[4])]
}),origin="1970-01-01")
ind<-sapply(strsplit(dimnames(erv)[[2]],"_"),function(i){
i[2]
})
sat<-substr(sapply(strsplit(dimnames(erv)[[2]],"_"),function(i){
i[1]
}),1,3)
id<-rep(as.integer(names(o)[!is.na(o)][1:nrow(erv)]),each=ncol(erv))
##### raw images and models ####
d<-as.data.table(data.frame(id=id,x=rep(1:ncol(erv),nrow(erv)),datep=datep,julp=julp,ind=ind,sat=sat,year=as.integer(substr(datep,1,4)),y=as.vector(t(erv)),jul=as.vector(t(erd))),date)
d$y<-d$y/10000
d<-d[!is.na(d$jul) & !is.na(d$y),]
d<-d[,.(id,y,ind,sat,year,julp,jul,np=.N),by=.(datep)]
d<-d[,.(id,y,ind,sat,year,julp,np,median=quantile(y,0.5,na.rm=TRUE),mean=mean(y,na.rm=TRUE),n=.N),by=.(datep,jul)]
d<-as.data.frame(d)
d$date<-as.Date(sapply(1:nrow(d),function(i){
k<-ifelse(d$jul[i]<d$julp[i],1,0)
day[[as.character(d$year[i]+k)]][d$jul[i]]
}),origin="1970-01-01")
d<-d[d$date>="2002-11-20",]
d$datex<-as.numeric(d$date)
d<-d[order(d$id,d$datep,d$jul),]
###### PIXEL #####
years<-sort(unique(d$year))
years<-years[years>2002 & years<2017]
ids<-unique(d$id)
ans<-NULL
l<-vector(mode="list",length(ids))
for(i in seq_along(ids)){
#dd<-d[d$id==ids[i] & d$year%in%years,]
dd<-d[d$id==ids[i],]
comp<-seq(min(dd$datex),max(dd$datex),by=1)
#ndvi<-rep(NA,length(comp))
#ndvi[match(dd$datex,comp)]<-dd$y
#ndvi<-na.spline(ndvi)
s0<-sgolayfilt(dd$y,p=3,n=41,m=0)
s1<-sgolayfilt(dd$y,p=3,n=41,m=1)
s2<-sgolayfilt(dd$y,p=3,n=41,m=2)
s3<-sgolayfilt(dd$y,p=3,n=41,m=3)
invisible(peak<-lapply(seq_along(years),function(i){
year<-years[i]
### up
#k<-which(dd$datep>=paste0(year-1,"-11-20") & dd$datep<=paste0(year,"-10-16"))
#ddd<-dd[k,]
#lo1<-list(Asym=0,xmid=10000,scal=2,c=-0.0)
#up1<-list(Asym=1,xmid=18000,scal=30,c=0.3)
#m1<-nls(y~Asym/(1+exp((xmid-datex)/scal))+c,data=ddd,start=list(Asym=0.5,xmid=quantile(ddd$datex,0.5,na.rm=TRUE),scal=3,c=0.2),control=list(minFactor=1e-12,maxiter=500),lower=lo1,upper=up1,algorithm="port")
#se<-seq(min(ddd$datex),max(ddd$datex),by=1)
#lines(se,predict(m1,data.frame(datex=se)),col=alpha("green4",0.75),lwd=3)
k2<-which(dd$date>=paste0(year,"-02-01") & dd$date<=paste0(year,"-10-01"))
gu<-dd$datex[findminmax(s1,beg=min(k2),end=max(k2),max=TRUE)]
k2<-which(dd$date>=paste0(year,"-08-01") & dd$date<=paste0(year,"-12-31"))
gd<-dd$datex[findminmax(s1,beg=min(k2),end=max(k2),max=FALSE)]
c(gu,gd)
}))
transp<-0.001
trans<-0.01
if(F){
#plot(dd$datex,dd$y,ylim=c(-0.2,1),col=alpha("black",transp),type="n",xaxt="n")
axis.Date(1,at=seq(min(dd$date,na.rm=TRUE),max(dd$date,na.rm=TRUE),by="2 month"), format="%Y-%m-%d",las=2,cex.axis=0.5)
points(dd$datex,dd$y,ylim=c(-0.2,1),col=alpha("black",transp))
abline(0,0)
lines(dd$datex,s0,col=alpha("black",trans))
lines(dd$datex,s1,col=alpha("black",trans))
#points(dd$datex,s1,col=alpha(colo.scale(s1,rev(c("green4","brown"))),trans),pch=1,cex=0.1)
lines(dd$datex,s2*5,col=alpha("red",transp))
lines(dd$datex,s3*50,col=alpha("blue",transp))
invisible(sapply(peak,function(j){
lines(rep(j[1],2),c(0,1),col=alpha("green4",trans),pch=16,lwd=1)
}))
invisible(sapply(peak,function(j){
lines(rep(j[2],2),c(0,1),col=alpha("brown",trans),pch=16,lwd=1)
}))
}
ans<-c(ans,mean(as.integer(format(as.Date(sapply(peak,"[",2),origin="1970-01-01"),"%j"))))
l[[i]]<-sapply(peak,"[",2)
}
rans<-r[[1]]
rans[ids]<-ans
rl<-lapply(seq_along(years),function(i){
r<-r[[1]]
ans<-sapply(l,"[",i)
ans<-as.integer(format(as.Date(ans,origin="1970-01-01"),"%j"))
r[ids]<-ans
r
})
R<-do.call("stack",rl)
names(R)<-years
levelplot(R,col.regions=rev(terrain.colors(101)),cuts=100)+layer(sp.polygons(ram))
dat<-substr(seq(as.Date("2007-01-01"),as.Date("2077-12-31"),by=1)[round(cellStats(R,mean))],6,10)
jul<-cellStats(R,mean)
centered_jul<-cellStats(R,mean)-mean(cellStats(R,mean))
sd_jul<-cellStats(R,sd)
x<-data.frame(year=gsub("X","",names(jul)),date=dat,jul=jul,centered_jul=centered_jul,sd_jul=sd_jul,stringsAsFactors=FALSE)
#fwrite(x,"C:/Users/rouf1703/Documents/UdeS/Consultation/L-ARenaud/Doc/greendown.csv",row.names=FALSE,sep=";")
tmap_mode("view")
tm_shape(rans)+tm_raster(alpha=0.9,n=10,palette=rev(terrain.colors(10)))+tm_shape(ram)+tm_borders(lwd=5)+tm_layout(basemaps = c("Esri.WorldImagery","HERE.hybridDay"))
#### png #####
#png("C:/Users/rouf1703/Documents/UdeS/Consultation/L-ARenaud/Doc/ndvi4.png",width=22,height=10,units="in",res=300)
par(mar=c(7,4,4,4))
plot(d$date,d$y,col=gray(0,0.1),xaxt="n",xlab="Date",ylab="NDVI",type="n",xlim=c(min(d$date)-300,max(d$date)))
years<-seq_along(unique(d$year))
invisible(lapply(years,function(i){
val<-as.numeric(day[[as.character(unique(d$year)[i])]])
rect(xleft=val-0.5,ybottom=-2000,xright=val+0.5,ytop=12000,col=alpha("grey20",seq_along(val)/length(val)/4),border=NA,xpd=FALSE)
}))
axis.Date(1, at=seq(min(d$date,na.rm=TRUE),max(d$date,na.rm=TRUE),by="1 month"), format="%Y-%m-%d",las=2,cex.axis=0.5)
points(d$date,d$y,col=gray(0,0.025))
d2<-unique(d[,c("date","mean","sat","n","np")])
points(d2$date,d2$mean,col=alpha("green4",d2$n/d2$np),pch=ifelse(d2$sat=="MOD",16,17),cex=1)
sa<-d$sat%in%c("MYD","MOD")
m1<-gam(y~s(datex,k=100),data=d[sa,])
m2<-loess(y~datex,data=d[sa,],span=0.03)
sx<-seq(min(d$datex,na.rm=TRUE),max(d$datex,na.rm=TRUE),by=1)
p1<-predict(m1,data.frame(datex=sx),type="response")
p2<-predict(m2,data.frame(datex=sx),type="response")
lines(sx,p1,col=alpha("blue",0.35),lwd=4)
lines(sx,p2,col=alpha("red",0.35),lwd=4)
peak<-NULL
invisible(peak<-lapply(years[-length(years)],function(i){
year<-unique(d$year)[i]
### up
dd<-d[d$datep>=paste0(year-1,"-12-01") & d$datep<=paste0(year,"-09-30"),]
lo1<-list(Asym=0,xmid=12000,scal=0.5,c=0.1)
up1<-list(Asym=1,xmid=18000,scal=50,c=0.4)
m1<-nls(y~Asym/(1+exp((xmid-datex)/scal))+c,data=dd,start=list(Asym=0.5,xmid=quantile(dd$datex,0.5,na.rm=TRUE),scal=10,c=0.2),control=list(minFactor=1e-12,maxiter=50),lower=lo1,upper=up1,algorithm="port")
se<-seq(min(dd$datex),max(dd$datex),by=1)
se<-se[-c(1:20,(length(se)-19):length(se))]
lines(se,predict(m1,data.frame(datex=se)),col=alpha("green4",0.85),lwd=4)
# bounds
if(i==1){
se2<-seq(min(dd$datex)-70,max(dd$datex)+70,by=1)-400
c2<-mean(d$y[d$jul%in%c(330:365,1:60)])
Asym2<-mean(d$y[d$jul%in%170:270])-c2
with(lo1,lines(se2,Asym2/(1+exp((coef(m1)["xmid"]-400-se2)/scal))+c2,col=alpha("green4",0.85),lwd=4))
with(up1,lines(se2,Asym2/(1+exp((coef(m1)["xmid"]-400-se2)/scal))+c2,col=alpha("green4",0.85),lwd=4))
}
co<-as.list(coef(m1))
peak<-c(peak,co$xmid)
### optimums
l<-logistic_optimum(alpha=co$Asym,beta=-co$xmid/co$scal,gamma=1/co$scal)
l<-unique(unlist(l))
invisible(lapply(l,function(j){
lines(rep(j,2),c(-0.3,-0.2),col="green4",lwd=1)
}))
### steepness makes it difficult for convergence...
dd<-d[d$datep>=paste0(year,"-07-01") & d$datep<=paste0(year+1,"-03-01"),]
lo2<-c(Asym=-1,xmid=12000,scal=0.1,c=co$Asym+co$c)
up2<-c(Asym=0,xmid=18000,scal=50,c=co$Asym+co$c)
m2<-tryCatch(nls(y~Asym/(1+exp((xmid-datex)/scal))+c,data=dd,start=list(Asym=-co$Asym,xmid=quantile(dd$datex,0.5,na.rm=TRUE),scal=0.1,c=co$Asym+co$c),control=list(minFactor=1e-12,maxiter=50),lower=lo2,upper=up2,algorithm="port"),error=function(j){TRUE})
if(!isTRUE(m2)){
se<-seq(min(dd$datex),max(dd$datex),by=1)
se<-se[-c(1:20,(length(se)-19):length(se))]
lines(se,predict(m2,data.frame(datex=se)),col=alpha("green4",0.85),lwd=4)
}
return(peak)}))
legend("topright",title="NDVI",pch=c(1,16,17,NA,NA,NA),lwd=c(NA,NA,NA,4,4,4),col=c(gray(0,0.3),alpha("green4",0.5),alpha("green4",0.5),alpha("blue",0.35),alpha("red",0.35),"green4"),legend=c("Value in a 250m pixel","Moy. Aqua sat.","Moy. Terra sat.","GAM","LOESS","Double logistic"),bty="n",inset=c(0.05,0))
#dev.off()
### tmap ##########################
gu<-as.integer(format(as.Date(unlist(peak),origin="1970-01-01"),"%j"))
gu<-gu-mean(gu)
hist(gu)
###
fun<-function(){
plot(ram,add=TRUE)
}
plot(r[[1:10]],addfun=fun)
#### visualisation prediction (dynamic) ######
tmap_mode("view")
tm_shape(r[["MYD13Q1_NDVI_2009_233"]])+tm_raster(alpha=0.9,n=10,palette=rev(terrain.colors(10)))+tm_shape(ram)+tm_borders(lwd=5)+tm_layout(basemaps = c("Esri.WorldImagery","HERE.hybridDay"))+tm_shape(rc[!is.na(o),])+tm_text("id")
tm_shape(v[[1]])+tm_raster(alpha=0.9,n=10,palette=rev(terrain.colors(10)))+tm_shape(ram)+tm_borders(lwd=5)+tm_layout(basemaps = c("Esri.WorldImagery","HERE.hybridDay"))
tm_shape(rans)+tm_raster(alpha=0.9,n=10,palette=rev(terrain.colors(10)))+tm_shape(ram)+tm_borders(lwd=5)+tm_layout(basemaps = c("Esri.WorldImagery","HERE.hybridDay"))
##### Logistic ######
# Asym
### alpha beta gamma
logistic<-function(x,alpha=1,beta=1,gamma=1,c=0){
d0<-function(alpha,beta,gamma,c){
alpha/(1+exp(-beta-gamma*x))+c
}
d1<-function(alpha,beta,gamma,c){
alpha*gamma*exp(-beta-gamma*x)*(1+exp(-beta-gamma*x))^(-2)
}
d2<-function(alpha,beta,gamma,c){
alpha*gamma^2*exp(-beta-gamma*x)*(exp(-beta-gamma*x)-1)*(1+exp(-beta-gamma*x))^(-3)
}
d3<-function(alpha,beta,gamma,c){
alpha*gamma^3*exp(-beta-gamma*x)*(1-4*exp(-beta-gamma*x)+exp(-beta-gamma*x)^2)*(1+exp(-beta-gamma*x))^(-4)
}
d4<-function(alpha,beta,gamma,c){
alpha*gamma^4*exp(-beta-gamma*x)*(-1+(11*exp(-beta-gamma*x))-(11*(exp(-beta-gamma*x)^2))+exp(-beta-gamma*x)^3)*(1+exp(-beta-gamma*x))^(-5)
}
y0<-d0(alpha,beta,gamma,c)
y1<-d1(alpha,beta,gamma,c)
y2<-d2(alpha,beta,gamma,c)
y3<-d3(alpha,beta,gamma,c)
y4<-d4(alpha,beta,gamma,c)
col<-gray((0:4)/5)
plot(x,y0,ylim=range(c(y0,y1,y2,y3,y4)),type="n")
lines(x,y0,lwd=4,col=col[1])
lines(x,y1,lwd=2,col=col[2])
lines(x,y2,lwd=2,col=col[3])
lines(x,y3,lwd=2,col=col[4])
lines(x,y4,lwd=2,col=col[5])
legend("right",lwd=c(4,2,2,2,2),col=col,legend=c("logistic",paste("derivative",1:4)))
}
logistic(seq(-10,20,by=0.01),alpha=2,beta=1/5,gamma=0.8)
logistic_optimum<-function(alpha=1,beta=1,gamma=1,c=0){
#logisitic function derivative's optimums
l<-list()
l[[1]]<--beta/gamma
l[[2]]<-c(-(log(2+sqrt(3))+beta)/gamma,-(log(2-sqrt(3))+beta)/gamma)
l[[3]]<-c(-(log(5+2*sqrt(6))+beta)/gamma,-beta/gamma,-(log(5-2*sqrt(6))+beta)/gamma)
l
}
l<-logistic_optimum(seq(-10,20,by=0.01),alpha=2,beta=1/5,gamma=0.8)
l<-unique(unlist(l))
lapply(l,function(i){
lines(rep(i,2),c(-1000,1000),lty=2)
})
#### Savitsky-Golay filtering ####
m<-ts(t(erv)/10000,frequency=723)
x<-runif(10)
x
findminmax(x,beg=7,end=10)
par(mar=c(4,3,3,0.5))
plot(0,0,xlim=c(1,nrow(m)),ylim=c(-0.2,1),type="n")
abline(0,0)
for(i in 1:ncol(m)){
s<-sample(ncol(m),1)
m2<-m[,s]
m2<-m[,i]
m2<-na.spline(m2)
points(sgolayfilt(m2),col=gray(0,0.02))
n<-23
s0<-sgolayfilt(m2,p=3,n=n,m=0)
s1<-sgolayfilt(m2,p=3,n=n,m=1)
s2<-sgolayfilt(m2,p=3,n=n,m=2)
s3<-sgolayfilt(m2,p=3,n=n,m=3)
trans<-0.03
lines(s0,col=alpha("black",trans))
lines(s1*2,lty=2,col=alpha("red",trans))
lines(s2*4,lty=3,col=alpha("blue",trans))
lines(s3*4,lty=3,col=alpha("green4",trans))
rc$id[!is.na(o)][s]
se<-seq(1,length(s1),by=48)
invisible(lapply(se,function(x){
k<-findminmax(s1,beg=x,end=x+48)
lines(rep(k,2),c(0,1),lty=2,col=alpha("red",0.03))
}))
}
#### NDVI quantiles ##################
x<-subset(r,1:dim(r)[[3]])
v<-calc(x,function(i){quantile(i,probs=c(0.05,0.95),na.rm=TRUE)})
levelplot(v,col.regions=rev(terrain.colors(101)),cuts=100)
|
require(sp)
require(gstat)
require(fields)
require(spam64) #?large_matrix
##############################################################
##############################################################
##############################################################
# here we apply the kriking method for all the images and save
# them in a folder that will be used later with Matlab
for(i in 1:87){
# reading the image
wd <- getwd()
w <- scan(file = sprintf(
"/home/rodney/Documents/Images_TimeSeries_files/ImagesTStxt/image-%i.txt",
i), sep = ",")
tmp <- matrix(w, ncol = 512)
I <- t(tmp)
rm(w); rm(tmp)
# assuming the pixels are in the
# grid (i,j) \in \mathbb{Z}^d
coord <- cbind(x = rep(1:512, 512), y = rep(1:512, each = 512))
#fullData <- as.data.frame(cbind(coord, w))
idx <- seq(1, 512, by = 1)
idy <- seq(1, 512, by = 1)
Is <- I[idx, idy]
ws <- as.numeric(Is)
vecIndex <- matrix(1:(nrow(I)*ncol(I)), nrow = nrow(I), ncol = ncol(I))[idx, idy]
coords <- coord[vecIndex,]
I.hat <- mKrig(y = ws, x = coords,
cov.function = "stationary.taper.cov",
lambda = 0.005/0.01, # nugget/variance
Covariance = "Exponential",
theta = 10, # Spatial dependence
Taper = "Wendland",
Taper.args = list(theta = 4, # approx neighborhood
k = 2,
dimension = 2),
chol.args = list(pivot = TRUE,
memory = list(nnzR= 900000)))
w.hat <- as.numeric(predict(I.hat))
# Distance matrix is relatively dense at theta = 4
Is.hat <- matrix(w.hat, ncol = ncol(Is))
rm(I.hat); rm(w.hat); rm(ws); rm(vecIndex)
rm(I); rm(coord); rm(coords)
# saving the treated image
write.table(Is.hat,sprintf("%s/ImagesHatTS/imagehs-%i.txt",wd,i),
row.names = FALSE, col.names = FALSE)
}
| /Functions/ImageProcessing.R | no_license | rodneyfv/Image_TimeSeries_MDDM | R | false | false | 1,952 | r | require(sp)
require(gstat)
require(fields)
require(spam64) #?large_matrix
##############################################################
##############################################################
##############################################################
# here we apply the kriking method for all the images and save
# them in a folder that will be used later with Matlab
for(i in 1:87){
# reading the image
wd <- getwd()
w <- scan(file = sprintf(
"/home/rodney/Documents/Images_TimeSeries_files/ImagesTStxt/image-%i.txt",
i), sep = ",")
tmp <- matrix(w, ncol = 512)
I <- t(tmp)
rm(w); rm(tmp)
# assuming the pixels are in the
# grid (i,j) \in \mathbb{Z}^d
coord <- cbind(x = rep(1:512, 512), y = rep(1:512, each = 512))
#fullData <- as.data.frame(cbind(coord, w))
idx <- seq(1, 512, by = 1)
idy <- seq(1, 512, by = 1)
Is <- I[idx, idy]
ws <- as.numeric(Is)
vecIndex <- matrix(1:(nrow(I)*ncol(I)), nrow = nrow(I), ncol = ncol(I))[idx, idy]
coords <- coord[vecIndex,]
I.hat <- mKrig(y = ws, x = coords,
cov.function = "stationary.taper.cov",
lambda = 0.005/0.01, # nugget/variance
Covariance = "Exponential",
theta = 10, # Spatial dependence
Taper = "Wendland",
Taper.args = list(theta = 4, # approx neighborhood
k = 2,
dimension = 2),
chol.args = list(pivot = TRUE,
memory = list(nnzR= 900000)))
w.hat <- as.numeric(predict(I.hat))
# Distance matrix is relatively dense at theta = 4
Is.hat <- matrix(w.hat, ncol = ncol(Is))
rm(I.hat); rm(w.hat); rm(ws); rm(vecIndex)
rm(I); rm(coord); rm(coords)
# saving the treated image
write.table(Is.hat,sprintf("%s/ImagesHatTS/imagehs-%i.txt",wd,i),
row.names = FALSE, col.names = FALSE)
}
|
library(tidyverse)
murders <- read_csv("data/murders.csv")
murders <- murders %>% mutate(region = factor(region), rate = total / population *10^5)
save(murders, file ="rda/murders.rda") | /wrangle-data.R | no_license | chandu30193/murders | R | false | false | 185 | r | library(tidyverse)
murders <- read_csv("data/murders.csv")
murders <- murders %>% mutate(region = factor(region), rate = total / population *10^5)
save(murders, file ="rda/murders.rda") |
rm(list=ls())
setwd("Results/")
library(ape)
#load matrix
Matrix <- read.csv("matrix.csv", header = T)
#neighbour-joining
stree = nj(dist.gene(Matrix))
#change tip labels to characters in the first column of the matrix
stree$tip.label <- as.character(Matrix[,1])
#open png file
png("tree.png",800,500)
#plot tree
plot(stree)
#save to file
dev.off()
| /Visualisation/Code/make_tree.R | no_license | VibhutiNandel/Gene_Distribution | R | false | false | 352 | r | rm(list=ls())
setwd("Results/")
library(ape)
#load matrix
Matrix <- read.csv("matrix.csv", header = T)
#neighbour-joining
stree = nj(dist.gene(Matrix))
#change tip labels to characters in the first column of the matrix
stree$tip.label <- as.character(Matrix[,1])
#open png file
png("tree.png",800,500)
#plot tree
plot(stree)
#save to file
dev.off()
|
#'LogitUCB algorithm
#'
#'Control data in visitor_reward with \code{\link{BanditRewardControl}}
#'Stop if something is wrong.
#' \itemize{ At each iteration
#' \item Calculates the arm probabilities according to a logit regression of context in dt dataframe
#' \item Choose the arm with the maximum upper bound (with alpha parameter)
#' \item Receives a reward in visitor_reward for the arm and associated iteration
#' \item Updates the results matrix S.
#' }
#'Returns the calculation time.
#'Review the estimated, actual coefficient for each arm.
#'See also \code{\link{ReturnRealTheta}},
#'Require \code{\link{tic}} and \code{\link{toc}} from \code{\link{tictoc}} library
#'
#'@param dt Dataframe of integer or numeric values
#'@param visitor_reward Dataframe of integer or numeric values
#'@param K Integer value (optional)
#'@param alpha Numeric value (optional)
#'
#'@return
#' \itemize{ List of element:
#' \item choice: choices of UCB,
#' \item proba: probability of the chosen arms,
#' \item time: time of cumputation,
#' \item theta_hat: coefficients estimated of each arm
#' \item theta: real coefficients of each arm
#' }
#'
#'@examples
#'size.tot = 1000
#'set.seed(4649) # this makes the example exactly reproducible
#'x1 = runif(size.tot, min=0, max=10) # you have 4, largely uncorrelated predictors
#'x2 = runif(size.tot, min=0, max=10)
#'x3 = runif(size.tot, min=0, max=10)
#'x4 = runif(size.tot, min=0, max=10)
#'dt = cbind(x1,x2,x3,x4)
#'#arm reward
#'arm_1 <- as.vector(c(-1,9,-8,4))
#'K1 = 1/(1+exp(- crossprod(t(dt),arm_1))) # inverse logit transform of linear predictor
#'K1 = vapply(K1, function(x) rbinom(1, 1, x), as.integer(1L))
#'arm_2 <- as.vector(c(-1,2,1,0))
#'K2 = 1/(1+exp(- crossprod(t(dt),arm_2))) # inverse logit transform of linear predictor
#'K2 = vapply(K2, function(x) rbinom(1, 1, x), as.integer(1L))
#'arm_3 <- as.vector(c(-1,-5,1,10))
#'K3 = 1/(1+exp(- crossprod(t(dt),arm_3)))
#'K3 = vapply(K3, function(x) rbinom(1, 1, x), as.integer(1L))
#'visitor_reward <- data.frame(K1,K2,K3)
#'dt <- as.data.frame(dt)
#'LOGITUCB(dt,visitor_reward)
#'@import tictoc
#'@export
#LOGITUCB
LOGITUCB <- function(dt, visitor_reward, alpha=1, K = ncol(visitor_reward)) {
#control data
DataControlK(visitor_reward, K=K)
DataControlContextReward(dt,visitor_reward)
#data formating
visitor_reward <- as.matrix(visitor_reward)
#Context matrix
D <- as.matrix(dt)
n <- nrow(dt)
n_f <- ncol(D)
#Keep the past choice for regression
choices = list(rep.int(0,n))
rewards = list(rep.int(0,n))
proba = list(rep.int(0,n))
#parameters to modelize
th_hat = array(0, c(K, n_f))
colnames(th_hat) <- colnames(dt)
rownames(th_hat) <- colnames(visitor_reward)
#regression variable
b <- matrix(0,K, n_f)
A <- array(0, c(n_f,n_f,K))
#tempory variable
p = list(rep.int(0,K))
#time keeper
tic()
#initialization
for (j in 1:K) {
A[,,j]= diag(n_f)
}
for (i in 1:n) {
x_i = D[i,]
for (j in 1:K) {
A_inv = solve(A[,,j])
th_hat[j,] = A_inv %*% b[j,]
ta = t(x_i) %*% A_inv %*% x_i
a_upper_ci = alpha * sqrt(ta) # upper part of variance interval
a_mean = th_hat[j,] %*% x_i # current estimate of mean
proba_mean = 1/(1+exp(-a_mean)) # inverse logit transform of linear predictor
p[j] = proba_mean + a_upper_ci # top CI
}
# choose the highest,
choices[i] = which.max(p)
#save probability
proba[i] = max(unlist(p))
# see what kind of result we get
rewards[i] = visitor_reward[i,as.integer(choices[i])]
# update the input vector
A[,,as.integer(choices[i])] = A[,,as.integer(choices[i])] + x_i %*% t(x_i)
b[as.integer(choices[i]),] = b[as.integer(choices[i]),] + x_i * as.numeric(rewards[i])
}
time <- toc()
#return real theta from a rigide regression
th <- ReturnRealTheta(dt=dt,visitor_reward=visitor_reward,option="logit")
#return data , models, groups and results
return (list('proba' = unlist(proba),'theta_hat'=th_hat,'theta'=th,'choice'=unlist(choices),'time'=(time$toc - time$tic)))
}
| /R/LOGITUCB.R | no_license | manuclaeys/bandit4abtest | R | false | false | 4,178 | r | #'LogitUCB algorithm
#'
#'Control data in visitor_reward with \code{\link{BanditRewardControl}}
#'Stop if something is wrong.
#' \itemize{ At each iteration
#' \item Calculates the arm probabilities according to a logit regression of context in dt dataframe
#' \item Choose the arm with the maximum upper bound (with alpha parameter)
#' \item Receives a reward in visitor_reward for the arm and associated iteration
#' \item Updates the results matrix S.
#' }
#'Returns the calculation time.
#'Review the estimated, actual coefficient for each arm.
#'See also \code{\link{ReturnRealTheta}},
#'Require \code{\link{tic}} and \code{\link{toc}} from \code{\link{tictoc}} library
#'
#'@param dt Dataframe of integer or numeric values
#'@param visitor_reward Dataframe of integer or numeric values
#'@param K Integer value (optional)
#'@param alpha Numeric value (optional)
#'
#'@return
#' \itemize{ List of element:
#' \item choice: choices of UCB,
#' \item proba: probability of the chosen arms,
#' \item time: time of cumputation,
#' \item theta_hat: coefficients estimated of each arm
#' \item theta: real coefficients of each arm
#' }
#'
#'@examples
#'size.tot = 1000
#'set.seed(4649) # this makes the example exactly reproducible
#'x1 = runif(size.tot, min=0, max=10) # you have 4, largely uncorrelated predictors
#'x2 = runif(size.tot, min=0, max=10)
#'x3 = runif(size.tot, min=0, max=10)
#'x4 = runif(size.tot, min=0, max=10)
#'dt = cbind(x1,x2,x3,x4)
#'#arm reward
#'arm_1 <- as.vector(c(-1,9,-8,4))
#'K1 = 1/(1+exp(- crossprod(t(dt),arm_1))) # inverse logit transform of linear predictor
#'K1 = vapply(K1, function(x) rbinom(1, 1, x), as.integer(1L))
#'arm_2 <- as.vector(c(-1,2,1,0))
#'K2 = 1/(1+exp(- crossprod(t(dt),arm_2))) # inverse logit transform of linear predictor
#'K2 = vapply(K2, function(x) rbinom(1, 1, x), as.integer(1L))
#'arm_3 <- as.vector(c(-1,-5,1,10))
#'K3 = 1/(1+exp(- crossprod(t(dt),arm_3)))
#'K3 = vapply(K3, function(x) rbinom(1, 1, x), as.integer(1L))
#'visitor_reward <- data.frame(K1,K2,K3)
#'dt <- as.data.frame(dt)
#'LOGITUCB(dt,visitor_reward)
#'@import tictoc
#'@export
#LOGITUCB
LOGITUCB <- function(dt, visitor_reward, alpha=1, K = ncol(visitor_reward)) {
#control data
DataControlK(visitor_reward, K=K)
DataControlContextReward(dt,visitor_reward)
#data formating
visitor_reward <- as.matrix(visitor_reward)
#Context matrix
D <- as.matrix(dt)
n <- nrow(dt)
n_f <- ncol(D)
#Keep the past choice for regression
choices = list(rep.int(0,n))
rewards = list(rep.int(0,n))
proba = list(rep.int(0,n))
#parameters to modelize
th_hat = array(0, c(K, n_f))
colnames(th_hat) <- colnames(dt)
rownames(th_hat) <- colnames(visitor_reward)
#regression variable
b <- matrix(0,K, n_f)
A <- array(0, c(n_f,n_f,K))
#tempory variable
p = list(rep.int(0,K))
#time keeper
tic()
#initialization
for (j in 1:K) {
A[,,j]= diag(n_f)
}
for (i in 1:n) {
x_i = D[i,]
for (j in 1:K) {
A_inv = solve(A[,,j])
th_hat[j,] = A_inv %*% b[j,]
ta = t(x_i) %*% A_inv %*% x_i
a_upper_ci = alpha * sqrt(ta) # upper part of variance interval
a_mean = th_hat[j,] %*% x_i # current estimate of mean
proba_mean = 1/(1+exp(-a_mean)) # inverse logit transform of linear predictor
p[j] = proba_mean + a_upper_ci # top CI
}
# choose the highest,
choices[i] = which.max(p)
#save probability
proba[i] = max(unlist(p))
# see what kind of result we get
rewards[i] = visitor_reward[i,as.integer(choices[i])]
# update the input vector
A[,,as.integer(choices[i])] = A[,,as.integer(choices[i])] + x_i %*% t(x_i)
b[as.integer(choices[i]),] = b[as.integer(choices[i]),] + x_i * as.numeric(rewards[i])
}
time <- toc()
#return real theta from a rigide regression
th <- ReturnRealTheta(dt=dt,visitor_reward=visitor_reward,option="logit")
#return data , models, groups and results
return (list('proba' = unlist(proba),'theta_hat'=th_hat,'theta'=th,'choice'=unlist(choices),'time'=(time$toc - time$tic)))
}
|
################################################################################
# File: DEG_Plotting_Functions.R #
# Author: Adam Faranda #
# Created: Sept 24, 2019 #
# Purpose: Plotting functions for analyzing overlap #
# #
################################################################################
# Setup Workspace
rm(list=ls()) # Clear any pre-existing environment data
options(echo=F)
library('openxlsx')
library('dplyr')
library('reshape2')
library('GEOquery')
library('limma')
wd<-getwd()
source('Overlap_Comparison_Functions.R')
source('DEG_Overlap_ETL_Functions.R')
gene_id_target = "symbol"
# Standardized Column Headers for pairwise result table
pr_cols<-c(
"contrast_label", "gene_id", "group_1_avg", "group_2_avg","logFC",
"p_value", "fdr"
)
# Standardized Column Headers for measurement table
ms_cols<-c(
"sample_label", "gene_id", "measurement_value", "meas_type"
)
| /DEG_Plotting_Functions.R | no_license | afaranda/DEG_Overlap_Analysis_System | R | false | false | 1,159 | r | ################################################################################
# File: DEG_Plotting_Functions.R #
# Author: Adam Faranda #
# Created: Sept 24, 2019 #
# Purpose: Plotting functions for analyzing overlap #
# #
################################################################################
# Setup Workspace
rm(list=ls()) # Clear any pre-existing environment data
options(echo=F)
library('openxlsx')
library('dplyr')
library('reshape2')
library('GEOquery')
library('limma')
wd<-getwd()
source('Overlap_Comparison_Functions.R')
source('DEG_Overlap_ETL_Functions.R')
gene_id_target = "symbol"
# Standardized Column Headers for pairwise result table
pr_cols<-c(
"contrast_label", "gene_id", "group_1_avg", "group_2_avg","logFC",
"p_value", "fdr"
)
# Standardized Column Headers for measurement table
ms_cols<-c(
"sample_label", "gene_id", "measurement_value", "meas_type"
)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_missing_values.R
\name{write_missing_values}
\alias{write_missing_values}
\title{Construct table of missing values codes and their meaning}
\usage{
write_missing_values(storage, dataObject, field, MVC)
}
\arguments{
\item{storage}{(charcter) write_missing_values stores output in a tibble or dataframe. The
object should be emtpy and consist of three columns (attributeName
(character), code (character), and definition (charcater)). The user should
create this object prior to running the function and pass the unquoted name
of the object to write_missing_values via the storage parameter.}
\item{dataObject}{(character) The unqouted name of the data entity that will be examined for
missing values.}
\item{field}{(character) The quoted name of the field within the data entity that will be
examined for missing values.}
\item{MVC}{(optional)
(character) If relevant, the quoted name of a missing value code other than
NA or NaN that should be documented.}
}
\value{
A tibble documenting the presence of NA, NaN, or a user-specified
missing value code for the field of a tibble or dataframe.
}
\description{
The write_missing_values function reads an entity's attribute
details from a "entity name"_attrs.yaml or "entity name"_attrs.csv file in
the working directory - the details of which are incorporated into the EML
metadata for that entity. If present in the working directory, factor
metadata in a "entity name"_factors.yaml or "entity name"_factors.csv are
read and incorporated into attribute metadata.
}
\details{
The write_missing_values function reads an entity's attribute
details from a "entity name"_attrs.yaml or "entity name"_attrs.csv file in
the working directory - the details of which are incorporated into the EML
metadata for that entity. If present in the working directory, factor
metadata in a "entity name"_factors.yaml or "entity name"_factors.csv are
read and incorporated into attribute metadata.
}
\note{
The write_missing_values function is intended primarily as a helper to
other functions in the capeml ecosystem so is not meant to be called
directly (but can be).
}
\examples{
\dontrun{
missing_value_frame <- tibble::tibble(
attributeName = as.character(),
code = as.character(),
definition = as.character()
)
write_missing_values(
storage = missingValueFrame,
dataObject = captures,
field = "weight",
MVC = "X"
)
# write_missing_values can be run on a single field (as above) of a data
# object but the most common use case, and how it is applied in the capeml
# ecosystem, is to loop over all fields in a data entity (sensu with purrr
# below).
purrr::map_df(
.x = colnames(data_entity),
.f = capeml::write_missing_values,
storage = missing_value_frame,
dataObject = data_entity,
MVC = "X"
)
}
}
| /man/write_missing_values.Rd | no_license | CAPLTER/capeml | R | false | true | 2,873 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/write_missing_values.R
\name{write_missing_values}
\alias{write_missing_values}
\title{Construct table of missing values codes and their meaning}
\usage{
write_missing_values(storage, dataObject, field, MVC)
}
\arguments{
\item{storage}{(charcter) write_missing_values stores output in a tibble or dataframe. The
object should be emtpy and consist of three columns (attributeName
(character), code (character), and definition (charcater)). The user should
create this object prior to running the function and pass the unquoted name
of the object to write_missing_values via the storage parameter.}
\item{dataObject}{(character) The unqouted name of the data entity that will be examined for
missing values.}
\item{field}{(character) The quoted name of the field within the data entity that will be
examined for missing values.}
\item{MVC}{(optional)
(character) If relevant, the quoted name of a missing value code other than
NA or NaN that should be documented.}
}
\value{
A tibble documenting the presence of NA, NaN, or a user-specified
missing value code for the field of a tibble or dataframe.
}
\description{
The write_missing_values function reads an entity's attribute
details from a "entity name"_attrs.yaml or "entity name"_attrs.csv file in
the working directory - the details of which are incorporated into the EML
metadata for that entity. If present in the working directory, factor
metadata in a "entity name"_factors.yaml or "entity name"_factors.csv are
read and incorporated into attribute metadata.
}
\details{
The write_missing_values function reads an entity's attribute
details from a "entity name"_attrs.yaml or "entity name"_attrs.csv file in
the working directory - the details of which are incorporated into the EML
metadata for that entity. If present in the working directory, factor
metadata in a "entity name"_factors.yaml or "entity name"_factors.csv are
read and incorporated into attribute metadata.
}
\note{
The write_missing_values function is intended primarily as a helper to
other functions in the capeml ecosystem so is not meant to be called
directly (but can be).
}
\examples{
\dontrun{
missing_value_frame <- tibble::tibble(
attributeName = as.character(),
code = as.character(),
definition = as.character()
)
write_missing_values(
storage = missingValueFrame,
dataObject = captures,
field = "weight",
MVC = "X"
)
# write_missing_values can be run on a single field (as above) of a data
# object but the most common use case, and how it is applied in the capeml
# ecosystem, is to loop over all fields in a data entity (sensu with purrr
# below).
purrr::map_df(
.x = colnames(data_entity),
.f = capeml::write_missing_values,
storage = missing_value_frame,
dataObject = data_entity,
MVC = "X"
)
}
}
|
#' Retrieve the current supported type names in Athena
#'
#' @export
#' @examples
#' athena_supported_types()
athena_supported_types <- function() {
gsub(
"^AJ_|_TYPE_NAME$", "",
grep(
"^AJ_",
names(rJava::J("com.simba.athena.athena.AthenaTypes")),
value=TRUE
)
)
} | /R/types.R | permissive | hrbrmstr/metis-jars | R | false | false | 299 | r | #' Retrieve the current supported type names in Athena
#'
#' @export
#' @examples
#' athena_supported_types()
athena_supported_types <- function() {
gsub(
"^AJ_|_TYPE_NAME$", "",
grep(
"^AJ_",
names(rJava::J("com.simba.athena.athena.AthenaTypes")),
value=TRUE
)
)
} |
#
# Exploratory Data Analysis
# Project #1
#
# 2015-10-09
# jptanguay
#
#
# plot1.r
#
#############################
#
#############################
# set the working directory to the correct path
# and load the common script that loads and prepares data
setwd("C:/Users/jptanguay/Documents/coursera/ExploratoryDataAnalysis/project1-ver2")
source(file="project1_common.r")
#############################
# plot #1
#############################
par(mfrow=c(1,1))
hist(dat2$Global_active_power, xlab = "Global Active Power (kilowatts)", c="orangered3", main="Global Active Power")
dev.copy(png,filename="plot1.png");
dev.off ();
| /plot1.r | no_license | jptanguay/ExData_Plotting1 | R | false | false | 633 | r |
#
# Exploratory Data Analysis
# Project #1
#
# 2015-10-09
# jptanguay
#
#
# plot1.r
#
#############################
#
#############################
# set the working directory to the correct path
# and load the common script that loads and prepares data
setwd("C:/Users/jptanguay/Documents/coursera/ExploratoryDataAnalysis/project1-ver2")
source(file="project1_common.r")
#############################
# plot #1
#############################
par(mfrow=c(1,1))
hist(dat2$Global_active_power, xlab = "Global Active Power (kilowatts)", c="orangered3", main="Global Active Power")
dev.copy(png,filename="plot1.png");
dev.off ();
|
# Let's make some fake data to demonstrate the time interveal merging function
library('tidyverse')
library('lubridate')
set.seed(1)
# make some fake_ids and start_times
ids = 1:10
hours = sample(0:23, size = 10, replace = TRUE)
minutes = sample(0:60, size = 10, replace = TRUE)
start_times = paste0(
'2020-11-03 ',
hours, ':', minutes,':00'
) %>%
ymd_hms()
periods_data = tibble(
id = ids,
start_times = start_times
)
make_ts = function(start_time){
stop = 0
size_param = sample(
x = c(1, 2, 3),
size = 1
)
# initialize_data frame:
df = tibble(
ts_start = start_time,
ts_end = start_time + size_param*minutes(sample(1:10, 1))
)
#browser()
while(stop == 0){
size_param = (size_param*sample(x = c(0.5, 1, 2),size = 1)) %>%
ceiling()
df = df %>%
bind_rows(
tibble(
ts_start = last(df$ts_end) + minutes(size_param*sample((-5):10, 1)),
ts_end = ts_start + size_param*minutes(sample(1:10, 1))
)
)
if(runif(1) > 0.97){
stop = 1
}
}
df
}
periods_data = periods_data %>%
nest_by(id, start_times) %>%
mutate(
repeated_vals = list(make_ts(start_times))
) %>%
unnest(c(data, repeated_vals)) %>%
ungroup() %>%
select(-start_times) %>%
arrange(id, ts_start, ts_end)
usethis::use_data(periods_data, overwrite = TRUE)
| /inst/make_interval_data.R | no_license | svenhalvorson/SvenR | R | false | false | 1,357 | r | # Let's make some fake data to demonstrate the time interveal merging function
library('tidyverse')
library('lubridate')
set.seed(1)
# make some fake_ids and start_times
ids = 1:10
hours = sample(0:23, size = 10, replace = TRUE)
minutes = sample(0:60, size = 10, replace = TRUE)
start_times = paste0(
'2020-11-03 ',
hours, ':', minutes,':00'
) %>%
ymd_hms()
periods_data = tibble(
id = ids,
start_times = start_times
)
make_ts = function(start_time){
stop = 0
size_param = sample(
x = c(1, 2, 3),
size = 1
)
# initialize_data frame:
df = tibble(
ts_start = start_time,
ts_end = start_time + size_param*minutes(sample(1:10, 1))
)
#browser()
while(stop == 0){
size_param = (size_param*sample(x = c(0.5, 1, 2),size = 1)) %>%
ceiling()
df = df %>%
bind_rows(
tibble(
ts_start = last(df$ts_end) + minutes(size_param*sample((-5):10, 1)),
ts_end = ts_start + size_param*minutes(sample(1:10, 1))
)
)
if(runif(1) > 0.97){
stop = 1
}
}
df
}
periods_data = periods_data %>%
nest_by(id, start_times) %>%
mutate(
repeated_vals = list(make_ts(start_times))
) %>%
unnest(c(data, repeated_vals)) %>%
ungroup() %>%
select(-start_times) %>%
arrange(id, ts_start, ts_end)
usethis::use_data(periods_data, overwrite = TRUE)
|
myboxplot=function (x, ..., range = 1.5, width = NULL, varwidth = FALSE,
notch = FALSE, outline = TRUE, names, boxwex = 0.8, plot = TRUE,
border = par("fg"), col = NULL, log = "", pars = NULL, horizontal = FALSE,
add = FALSE, at = NULL)
{
args <- list(x, ...)
namedargs <- if (!is.null(attributes(args)$names))
attributes(args)$names != ""
else rep(FALSE, length.out = length(args))
pars <- c(args[namedargs], pars)
groups <- if (is.list(x))
x
else args[!namedargs]
if (0 == (n <- length(groups)))
stop("invalid first argument")
if (length(class(groups)))
groups <- unclass(groups)
if (!missing(names))
attr(groups, "names") <- names
else {
if (is.null(attr(groups, "names")))
attr(groups, "names") <- 1:n
names <- attr(groups, "names")
}
for (i in 1:n) groups[i] <- list(myboxplot.stats(groups[[i]],
range))
stats <- matrix(0, nr = 5, nc = n)
conf <- matrix(0, nr = 2, nc = n)
ng <- out <- group <- numeric(0)
ct <- 1
for (i in groups) {
stats[, ct] <- i$stats
conf[, ct] <- i$conf
ng <- c(ng, i$n)
if ((lo <- length(i$out))) {
out <- c(out, i$out)
group <- c(group, rep.int(ct, lo))
}
ct <- ct + 1
}
z <- list(stats = stats, n = ng, conf = conf, out = out,
group = group, names = names)
if (plot) {
bxp(z, width, varwidth = varwidth, notch = notch, boxwex = boxwex,
border = border, col = col, log = log, pars = pars,
outline = outline, horizontal = horizontal, add = add,
at = at)
invisible(z)
}
else z
} | /r/myboxplot.r | no_license | denistanwh/Energy | R | false | false | 1,730 | r | myboxplot=function (x, ..., range = 1.5, width = NULL, varwidth = FALSE,
notch = FALSE, outline = TRUE, names, boxwex = 0.8, plot = TRUE,
border = par("fg"), col = NULL, log = "", pars = NULL, horizontal = FALSE,
add = FALSE, at = NULL)
{
args <- list(x, ...)
namedargs <- if (!is.null(attributes(args)$names))
attributes(args)$names != ""
else rep(FALSE, length.out = length(args))
pars <- c(args[namedargs], pars)
groups <- if (is.list(x))
x
else args[!namedargs]
if (0 == (n <- length(groups)))
stop("invalid first argument")
if (length(class(groups)))
groups <- unclass(groups)
if (!missing(names))
attr(groups, "names") <- names
else {
if (is.null(attr(groups, "names")))
attr(groups, "names") <- 1:n
names <- attr(groups, "names")
}
for (i in 1:n) groups[i] <- list(myboxplot.stats(groups[[i]],
range))
stats <- matrix(0, nr = 5, nc = n)
conf <- matrix(0, nr = 2, nc = n)
ng <- out <- group <- numeric(0)
ct <- 1
for (i in groups) {
stats[, ct] <- i$stats
conf[, ct] <- i$conf
ng <- c(ng, i$n)
if ((lo <- length(i$out))) {
out <- c(out, i$out)
group <- c(group, rep.int(ct, lo))
}
ct <- ct + 1
}
z <- list(stats = stats, n = ng, conf = conf, out = out,
group = group, names = names)
if (plot) {
bxp(z, width, varwidth = varwidth, notch = notch, boxwex = boxwex,
border = border, col = col, log = log, pars = pars,
outline = outline, horizontal = horizontal, add = add,
at = at)
invisible(z)
}
else z
} |
ui = fluidPage(theme = add_theme(getShinyOption('theme')),
add_css(),
sidebarLayout(
sidebarPanel(width = 5,
shinyAce::aceEditor(fontSize = 16,
wordWrap = T,
outputId = "hazace",
mode = "r",
theme = "github",
autoComplete = 'live',
height = '500px',
value =
"par(family = 'serif',mar = c(4,6,2,1))
curve(
dlnorm(x, meanlog = log(1.25), sdlog = 1)/
(1-plnorm(x, meanlog = log(1.25), sdlog = 1)),
xlab = 'Time, t',
ylab = expression(h(t)[Log-Normal]),
ylim = c(0,1),
xlim = c(0,6),
lwd = 3,
lty = 4,
col = 5,
cex.lab = 1.5,
cex.axis = 1.5,
las = 1)"),
actionButton("hazeval", "Evaluate")),
mainPanel(plotOutput("hazoutput", height = '600px'), width = 7)))
| /inst/apps/haz_ace/ui.R | no_license | Auburngrads/teachingApps | R | false | false | 889 | r | ui = fluidPage(theme = add_theme(getShinyOption('theme')),
add_css(),
sidebarLayout(
sidebarPanel(width = 5,
shinyAce::aceEditor(fontSize = 16,
wordWrap = T,
outputId = "hazace",
mode = "r",
theme = "github",
autoComplete = 'live',
height = '500px',
value =
"par(family = 'serif',mar = c(4,6,2,1))
curve(
dlnorm(x, meanlog = log(1.25), sdlog = 1)/
(1-plnorm(x, meanlog = log(1.25), sdlog = 1)),
xlab = 'Time, t',
ylab = expression(h(t)[Log-Normal]),
ylim = c(0,1),
xlim = c(0,6),
lwd = 3,
lty = 4,
col = 5,
cex.lab = 1.5,
cex.axis = 1.5,
las = 1)"),
actionButton("hazeval", "Evaluate")),
mainPanel(plotOutput("hazoutput", height = '600px'), width = 7)))
|
\name{lmrdpoints}
\alias{lmrdpoints}
\alias{lmrdlines}
\title{Add points or lines to an L-moment ratio diagram}
\description{
\code{lmrdpoints} adds points,
and \code{lmrdlines} adds connected line segments,
to an \eqn{L}-moment ratio diagram.
}
\usage{
lmrdpoints(x, y=NULL, type="p", ...)
lmrdlines(x, y=NULL, type="l", ...)
}
\arguments{
\item{x}{Numeric vector of \eqn{L}-skewness values.}
\item{y}{Numeric vector of \eqn{L}-kurtosis values.
May be omitted: see \dQuote{Details} below.}
\item{type}{Character indicating the type of plotting.
Can be any valid value for the \code{type} argument
of \code{plot.default}.}
\item{...}{Further arguments (graphics parameters),
passed to \code{points} or \code{lines}.}
}
\details{
The functions \code{lmrdpoints} and \code{lmrdlines} are equivalent to
\code{points} and \code{lines} respectively,
except that if argument \code{y} is omitted, \code{x} is assumed to be
an object that contains both \eqn{L}-skewness and \eqn{L}-kurtosis values.
As in \code{lmrd}, it can be a vector with elements named
\code{"t_3"} and \code{"t_4"} (or \code{"tau_3"} and \code{"tau_4"}),
a matrix or data frame with columns named
\code{"t_3"} and \code{"t_4"} (or \code{"tau_3"} and \code{"tau_4"}),
or an object of class \code{"regdata"} (as defined in package \pkg{lmomRFA}).
}
%\value{}
%\references{}
\author{J. R. M. Hosking \email{jrmhosking@gmail.com}}
%\note{}
\seealso{\code{\link{lmrd}}, \code{\link{points}}, \code{\link{lines}}.}
\examples{
# Plot L-moment ratio diagram of Wind from the airquality data set
data(airquality)
lmrd(samlmu(airquality$Wind), xlim=c(-0.2, 0.2))
# Sample L-moments of each month's data
( lmom.monthly <- with(airquality,
t(sapply(5:9, function(mo) samlmu(Wind[Month==mo])))) )
# Add the monthly values to the plot
lmrdpoints(lmom.monthly, pch=19, col="blue")
# Draw an L-moment ratio diagram and add a line for the
# Weibull distribution
lmrd(xaxs="i", yaxs="i", las=1)
weimom <- sapply( seq(0, 0.9, by=0.01),
function(tau3) lmrwei(pelwei(c(0,1,tau3)), nmom=4) )
lmrdlines(t(weimom), col='darkgreen', lwd=2)
}
\keyword{hplot}
| /man/lmrdpoints.Rd | no_license | cran/lmom | R | false | false | 2,223 | rd | \name{lmrdpoints}
\alias{lmrdpoints}
\alias{lmrdlines}
\title{Add points or lines to an L-moment ratio diagram}
\description{
\code{lmrdpoints} adds points,
and \code{lmrdlines} adds connected line segments,
to an \eqn{L}-moment ratio diagram.
}
\usage{
lmrdpoints(x, y=NULL, type="p", ...)
lmrdlines(x, y=NULL, type="l", ...)
}
\arguments{
\item{x}{Numeric vector of \eqn{L}-skewness values.}
\item{y}{Numeric vector of \eqn{L}-kurtosis values.
May be omitted: see \dQuote{Details} below.}
\item{type}{Character indicating the type of plotting.
Can be any valid value for the \code{type} argument
of \code{plot.default}.}
\item{...}{Further arguments (graphics parameters),
passed to \code{points} or \code{lines}.}
}
\details{
The functions \code{lmrdpoints} and \code{lmrdlines} are equivalent to
\code{points} and \code{lines} respectively,
except that if argument \code{y} is omitted, \code{x} is assumed to be
an object that contains both \eqn{L}-skewness and \eqn{L}-kurtosis values.
As in \code{lmrd}, it can be a vector with elements named
\code{"t_3"} and \code{"t_4"} (or \code{"tau_3"} and \code{"tau_4"}),
a matrix or data frame with columns named
\code{"t_3"} and \code{"t_4"} (or \code{"tau_3"} and \code{"tau_4"}),
or an object of class \code{"regdata"} (as defined in package \pkg{lmomRFA}).
}
%\value{}
%\references{}
\author{J. R. M. Hosking \email{jrmhosking@gmail.com}}
%\note{}
\seealso{\code{\link{lmrd}}, \code{\link{points}}, \code{\link{lines}}.}
\examples{
# Plot L-moment ratio diagram of Wind from the airquality data set
data(airquality)
lmrd(samlmu(airquality$Wind), xlim=c(-0.2, 0.2))
# Sample L-moments of each month's data
( lmom.monthly <- with(airquality,
t(sapply(5:9, function(mo) samlmu(Wind[Month==mo])))) )
# Add the monthly values to the plot
lmrdpoints(lmom.monthly, pch=19, col="blue")
# Draw an L-moment ratio diagram and add a line for the
# Weibull distribution
lmrd(xaxs="i", yaxs="i", las=1)
weimom <- sapply( seq(0, 0.9, by=0.01),
function(tau3) lmrwei(pelwei(c(0,1,tau3)), nmom=4) )
lmrdlines(t(weimom), col='darkgreen', lwd=2)
}
\keyword{hplot}
|
library(SDraw)
### Name: extended.gcd
### Title: Extended Greatest Common Denominator (GCD) algorithm.
### Aliases: extended.gcd
### ** Examples
x <- extended.gcd( c(16,27,27,46), c(9,16,9,240) )
# Check
cbind(x$a*x$t + x$b*x$s, x$gcd)
| /data/genthat_extracted_code/SDraw/examples/extended.gcd.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 251 | r | library(SDraw)
### Name: extended.gcd
### Title: Extended Greatest Common Denominator (GCD) algorithm.
### Aliases: extended.gcd
### ** Examples
x <- extended.gcd( c(16,27,27,46), c(9,16,9,240) )
# Check
cbind(x$a*x$t + x$b*x$s, x$gcd)
|
#!/usr/bin/Rscript --vanilla
# compiles all .Rmd files in _R directory into .md files in Pages directory,
# if the input file is older than the output file.
# run ./knitpages.R to update all knitr files that need to be updated.
# the script is from dgrtwo.github.com
KnitPost <- function(input, outfile, figsfolder, cachefolder, base.url="/") {
# this function is a modified version of an example here:
# http://jfisher-usgs.github.com/r/2012/07/03/knitr-jekyll/
require(knitr);
opts_knit$set(base.url = base.url)
fig.path <- paste0(figsfolder, sub(".Rmd$", "", basename(input)), "/")
cache.path <- file.path(cachefolder, sub(".Rmd$", "", basename(input)), "/")
opts_chunk$set(fig.path = fig.path)
opts_chunk$set(cache.path = cache.path)
opts_chunk$set(fig.cap = "center")
render_jekyll()
knit(input, outfile, envir = parent.frame())
}
knit_folder <- function(infolder, outfolder, figsfolder, cachefolder) {
for (infile in list.files(infolder, pattern = "*.Rmd", full.names = TRUE)) {
pattern = "\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\\-"
print(infile)
# folder = ifelse(grepl(pattern, infile), outfolder, "pages")
outfile = paste0(outfolder, "/", sub(".Rmd$", ".md", basename(infile)))
print(outfile)
# knit only if the input file is the last one modified
if (!file.exists(outfile) |
file.info(infile)$mtime > file.info(outfile)$mtime) {
KnitPost(infile, outfile, figsfolder, cachefolder)
}
}
}
knit_folder("_R", "_posts", "figs/", "_caches/")
#knit_folder("_R/drafts", "_drafts", "figs/drafts/")
| /_scripts/knitpages.R | no_license | chvlyl/chvlyl.github.io | R | false | false | 1,655 | r | #!/usr/bin/Rscript --vanilla
# compiles all .Rmd files in _R directory into .md files in Pages directory,
# if the input file is older than the output file.
# run ./knitpages.R to update all knitr files that need to be updated.
# the script is from dgrtwo.github.com
KnitPost <- function(input, outfile, figsfolder, cachefolder, base.url="/") {
# this function is a modified version of an example here:
# http://jfisher-usgs.github.com/r/2012/07/03/knitr-jekyll/
require(knitr);
opts_knit$set(base.url = base.url)
fig.path <- paste0(figsfolder, sub(".Rmd$", "", basename(input)), "/")
cache.path <- file.path(cachefolder, sub(".Rmd$", "", basename(input)), "/")
opts_chunk$set(fig.path = fig.path)
opts_chunk$set(cache.path = cache.path)
opts_chunk$set(fig.cap = "center")
render_jekyll()
knit(input, outfile, envir = parent.frame())
}
knit_folder <- function(infolder, outfolder, figsfolder, cachefolder) {
for (infile in list.files(infolder, pattern = "*.Rmd", full.names = TRUE)) {
pattern = "\\d\\d\\d\\d\\-\\d\\d\\-\\d\\d\\-"
print(infile)
# folder = ifelse(grepl(pattern, infile), outfolder, "pages")
outfile = paste0(outfolder, "/", sub(".Rmd$", ".md", basename(infile)))
print(outfile)
# knit only if the input file is the last one modified
if (!file.exists(outfile) |
file.info(infile)$mtime > file.info(outfile)$mtime) {
KnitPost(infile, outfile, figsfolder, cachefolder)
}
}
}
knit_folder("_R", "_posts", "figs/", "_caches/")
#knit_folder("_R/drafts", "_drafts", "figs/drafts/")
|
# Exercise 3: Data Frame Practice with `dplyr`.
# Use a different appraoch to accomplish the same tasks as exercise-1
# Install devtools package: allows installations from GitHub
install.packages('devtools')
install.packages('dplyr')
# Install "fueleconomy" package from GitHub
devtools::install_github("hadley/fueleconomy")
# Require/library the fueleconomy package
library(fueleconomy)
library(dplyr)
# Which Accura model has the best hwy MPG in 2015? (without method chaining)
filtered.acura = filter(vehicles, make=='Acura')
filtered.year = filter(filtered.acura, year==2015)
filtered.max = filter(filtered.year, hwy==max(hwy))
best.acura.2015 = select(filtered.max, model)
# Which Accura model has the best hwy MPG in 2015? (nesting functions)
best.acura.2015.nest = select(filter(filter(vehicles, make=='Acura', year==2015), hwy==max(hwy)), model)
# Which Accura model has the best hwy MPG in 2015? (pipe operator)
best.acura.2015.pipe = filter(vehicles, make=='Acura', year==2015) %>% filter(hwy==max(hwy)) %>% select(model)
### Bonus ###
# Write 3 functions, one for each approach. Then,
# Test how long it takes to perform each one 1000 times
WithoutChaining <- function(car.make, car.year){
start.time = Sys.time()
for(i in 1000){
filtered.acura = filter(vehicles, make==car.make)
filtered.year = filter(filtered.acura, year==car.year)
filtered.max = filter(filtered.year, hwy==max(hwy))
best.acura.2015 = select(filtered.max, model)
}
end.time = Sys.time()
return(end.time-start.time)
}
NestedFunction <- function(car.make, car.year){
start.time = Sys.time()
for(i in 1000){
select(filter(filter(vehicles, make==car.make, year==car.year), hwy==max(hwy)), model)
}
end.time = Sys.time()
return(end.time-start.time)
}
PipeOperator <- function(car.make, car.year){
start.time = Sys.time()
for(i in 1000){
filter(vehicles, make==car.make, year==car.year) %>% filter(hwy==max(hwy)) %>% select(model)
}
end.time = Sys.time()
return(end.time-start.time)
}
time.without.chaining = WithoutChaining('Acura', 1995)
time.nested.function = NestedFunction('Acura', 1995)
time.pipe.operator = PipeOperator('Acura', 1995)
| /exercise-3/exercise.R | permissive | BrydenR/m9-dplyr | R | false | false | 2,186 | r | # Exercise 3: Data Frame Practice with `dplyr`.
# Use a different appraoch to accomplish the same tasks as exercise-1
# Install devtools package: allows installations from GitHub
install.packages('devtools')
install.packages('dplyr')
# Install "fueleconomy" package from GitHub
devtools::install_github("hadley/fueleconomy")
# Require/library the fueleconomy package
library(fueleconomy)
library(dplyr)
# Which Accura model has the best hwy MPG in 2015? (without method chaining)
filtered.acura = filter(vehicles, make=='Acura')
filtered.year = filter(filtered.acura, year==2015)
filtered.max = filter(filtered.year, hwy==max(hwy))
best.acura.2015 = select(filtered.max, model)
# Which Accura model has the best hwy MPG in 2015? (nesting functions)
best.acura.2015.nest = select(filter(filter(vehicles, make=='Acura', year==2015), hwy==max(hwy)), model)
# Which Accura model has the best hwy MPG in 2015? (pipe operator)
best.acura.2015.pipe = filter(vehicles, make=='Acura', year==2015) %>% filter(hwy==max(hwy)) %>% select(model)
### Bonus ###
# Write 3 functions, one for each approach. Then,
# Test how long it takes to perform each one 1000 times
WithoutChaining <- function(car.make, car.year){
start.time = Sys.time()
for(i in 1000){
filtered.acura = filter(vehicles, make==car.make)
filtered.year = filter(filtered.acura, year==car.year)
filtered.max = filter(filtered.year, hwy==max(hwy))
best.acura.2015 = select(filtered.max, model)
}
end.time = Sys.time()
return(end.time-start.time)
}
NestedFunction <- function(car.make, car.year){
start.time = Sys.time()
for(i in 1000){
select(filter(filter(vehicles, make==car.make, year==car.year), hwy==max(hwy)), model)
}
end.time = Sys.time()
return(end.time-start.time)
}
PipeOperator <- function(car.make, car.year){
start.time = Sys.time()
for(i in 1000){
filter(vehicles, make==car.make, year==car.year) %>% filter(hwy==max(hwy)) %>% select(model)
}
end.time = Sys.time()
return(end.time-start.time)
}
time.without.chaining = WithoutChaining('Acura', 1995)
time.nested.function = NestedFunction('Acura', 1995)
time.pipe.operator = PipeOperator('Acura', 1995)
|
library(ggplot2)
library(scales)
library(patchwork)
# meta file
meta.file <- '/home/drizzle_zhang/microbiome/result/meta_sample.out.txt'
df.meta <- read.delim(meta.file, stringsAsFactors = FALSE)
# KEGG
file.KEGG.L3 <- '/home/drizzle_zhang/microbiome/result/9.PICRUSt/origin_data/KO_KEGG_L3.txt'
df.db.KEGG <- read.delim(file.KEGG.L3, row.names = 1, header = F,
stringsAsFactors = F)
names.KEGG.L3 <- as.character(df.db.KEGG[1,])
names(df.db.KEGG) <- names.KEGG.L3
df.db.KEGG <- df.db.KEGG[-1,]
# cutoff
type.cutoff <- 'fdr'
# dose
# vec.dose <- c(0, 1, 2, 3)
vec.dose <- c(0, 1)
# time series
# series.time <- unique(df.meta$Time)
# mod <- 'all'
series.time <- c(-1, 1, 5, 9, 17, 21, 25, 29, 33, 41, 49, 60, 68, 84)
mod <- 'sel'
# series.time <- c(-1, 1, 5, 9, 17, 21, 25, 29, 33, 37, 41, 45, 49)
# mod <- 'old'
############################# GSEA
level <- 'L3'
# male
gender <- 'male'
df.meta.gender <- df.meta[df.meta$Gender == gender, ]
path.plot.male <- paste0('/home/drizzle_zhang/microbiome/result/9.PICRUSt/heatmap_',
gender, '_', type.cutoff)
if (!file.exists(path.plot.male)) {
dir.create(path.plot.male)
}
path.out <- '/home/drizzle_zhang/microbiome/result/Figs/'
df.GSEA <- data.frame(ID = names.KEGG.L3)
for (sub.time in series.time) {
file.GSEA <- paste0(path.plot.male, "/GSEA_", level, "_", sub.time, "_",
paste0(as.character(vec.dose), collapse = ''),
".txt")
# file.GSEA <- paste0(path.plot.male, "/GSEA_", level, "_", sub.time,
# paste0(as.character(vec.dose), collapse = ''),
# ".txt")
sub.GSEA <- read.delim(file.GSEA, row.names = 1)
sub.GSEA$logPval <- -log10(sub.GSEA$pvalue) *
(sub.GSEA$enrichmentScore / abs(sub.GSEA$enrichmentScore))
# sub.GSEA$order <- rank(sub.GSEA$logPval)
sub.GSEA <- sub.GSEA[, c("Description", "logPval")]
names(sub.GSEA) <- c("ID", sub.time)
df.GSEA <- merge(df.GSEA, sub.GSEA, by = 'ID', all = T)
}
row.names(df.GSEA) <- df.GSEA$ID
df.GSEA$ID <- NULL
df.GSEA[is.na(df.GSEA)] <- 0
df.GSEA.score <- df.GSEA
df.GSEA <- as.data.frame(apply(df.GSEA.score, 2, rank))
# sort
df.sort <- data.frame(stringsAsFactors = F)
for (row in row.names(df.GSEA)) {
for (col in as.numeric(names(df.GSEA))) {
if ((col > 0) & (col < 42)) {
df.sort <- rbind(df.sort, data.frame(pathway = row, time = col,
value = df.GSEA[row, as.character(col)],
stringsAsFactors = F))
}
}
}
df.sort$ID <- paste(df.sort$pathway, df.sort$time, sep = '_')
df.sort <- df.sort[order(df.sort$value),]
sort.value <- df.sort$value
df.ks <- data.frame(stringsAsFactors = F)
for (pathway in names.KEGG.L3) {
sub.sort <- df.sort[df.sort$pathway == pathway, 'value']
enrich.control <- ks.test(sub.sort, sort.value, alternative = 'greater')
enrich.treat <- ks.test(sub.sort, sort.value, alternative = 'less')
df.ks <- rbind(df.ks, data.frame(pathway = pathway,
pvalue.control = enrich.control$p.value,
pvalue.treat = enrich.treat$p.value))
}
df.ks$qvalue.control <- p.adjust(df.ks$pvalue.control, method = 'fdr')
df.ks$qvalue.treat <- p.adjust(df.ks$pvalue.treat, method = 'fdr')
# use ks score to plot
df.ks.male <- df.ks
# df.ks.male.filter <- df.ks.male[
# df.ks.male$qvalue.control < 0.05 | df.ks.male$qvalue.treat < 0.05,]
df.ks.male.filter <- df.ks.male[
df.ks.male$pvalue.control < 0.012 | df.ks.male$pvalue.treat < 0.012,]
# df.ks.male.filter <- df.ks.male[
# df.ks.male$pvalue.control < 0.1 | df.ks.male$pvalue.treat < 0.03,]
# df.ks.male.filter <- df.ks.male[
# df.ks.male$pvalue.control < 0.01 | df.ks.male$pvalue.treat < 0.01,]
df.ks.male.filter <-
df.ks.male.filter[!(df.ks.male.filter$pathway %in%
c('ABC transporters', 'Lipopolysaccharide biosynthesis')),]
log10Pval <- c()
for (i in row.names(df.ks.male.filter)) {
pvalue.control <- -log10(df.ks.male.filter[i, 'pvalue.control'])
pvalue.treat <- -log10(df.ks.male.filter[i, 'pvalue.treat'])
if (pvalue.control > pvalue.treat) {
log10Pval <- c(log10Pval, -pvalue.control)
} else {
log10Pval <- c(log10Pval, pvalue.treat)
}
}
df.ks.male.filter$log10Pval <- log10Pval
df.ks.male.filter <- df.ks.male.filter[
order(df.ks.male.filter$log10Pval, decreasing = T), ]
vec.color <- c()
for (pval in df.ks.male.filter$log10Pval) {
if (pval < 0) {
vec.color <- c(vec.color, 'Enrich in Control')
} else {
vec.color <- c(vec.color, 'Enrich in Treatment')
}
}
df.ks.male.filter$color <- factor(vec.color,
levels = c('Enrich in Treatment', 'Enrich in Control'))
plot.male <-
ggplot(data = df.ks.male.filter, aes(x = reorder(pathway, X = log10Pval),
y = log10Pval, fill = color)) +
geom_bar(stat = 'identity') +
labs(x = 'Pathway', y = expression(paste("-log"[10], "(adj", italic("P"), "-value)")),
fill = '') +
scale_fill_manual(values = c(muted("red"), muted("blue"))) +
coord_flip() +
theme_bw() +
theme(panel.background = element_rect(color = 'black', size = 1.5,
fill = 'transparent'),
panel.grid.major.x = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major.y = element_line(colour = "gray", size = 0.1,
linetype = 2),
axis.text.x = element_text(size = 9, color = "black", family = 'Arial'),
axis.text.y = element_text(size = 10, color = "black", family = 'Arial'))
# ggsave(filename = paste0("/Male_Combine_Sum_GSEA_", mod, '_',
# paste0(as.character(vec.dose), collapse = ''), ".png"),
# path = path.out, plot = plot.male,
# height = 15, width = 20, units = 'cm')
# heatmap
df.GSEA <- df.GSEA.score
df.GSEA.male <- df.GSEA[as.character(df.ks.male.filter$pathway),]
df.heatmap.male <- data.frame(stringsAsFactors = F)
for (pathway in row.names(df.GSEA.male)) {
for (sub.time in names(df.GSEA.male)) {
df.heatmap.male <-
rbind(df.heatmap.male,
data.frame(pathway = pathway, time = sub.time,
score = df.GSEA.male[pathway, sub.time],
stringsAsFactors = F))
}
}
df.heatmap.male$pathway <-
factor(df.heatmap.male$pathway,
levels = rev(as.character(df.ks.male.filter$pathway)), ordered = T)
df.heatmap.male$time <-
factor(df.heatmap.male$time,
levels = as.character(series.time), ordered = T)
plot.heatmap.male <-
ggplot(data = df.heatmap.male,
aes(x = time, y = pathway, fill = score)) +
geom_tile() +
scale_fill_gradient2(low = muted("blue"), high = muted("red"), mid = "#F5F5F5") +
labs(x = 'Time', y = 'Pathway', fill = 'Enrichment Score') +
theme_bw() +
theme(
panel.border = element_blank(),
panel.background = element_rect(color = 'transparent', size = 0,
fill = 'transparent'),
panel.grid = element_blank(),
axis.ticks = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.text.x = element_text(size = 9, color = "black", family = 'Arial'),
legend.text = element_text(size = 9)
)
# ggsave(filename = paste0("/Male_Combine_Heatmap_GSEA_", mod, '_',
# paste0(as.character(vec.dose), collapse = ''), ".png"),
# path = path.out, plot = plot.heatmap.male,
# height = 15, width = 20, units = 'cm')
plot.final.male <- plot.male + plot.heatmap.male + plot_layout(widths = c(1, 1.6),
guides = 'collect')
ggsave(plot = plot.final.male, path = path.out,
filename = paste0("/Male_GSEA_",
paste0(as.character(vec.dose), collapse = ''), ".png"),
height = 11, width = 25, units = 'cm')
# female
gender <- 'female'
df.meta.gender <- df.meta[df.meta$Gender == gender, ]
path.plot.female <- paste0('/home/drizzle_zhang/microbiome/result/9.PICRUSt/heatmap_',
gender, '_', type.cutoff)
df.GSEA <- data.frame(ID = names.KEGG.L3)
for (sub.time in series.time) {
file.GSEA <- paste0(path.plot.female, "/GSEA_", level, "_", sub.time, "_",
paste0(as.character(vec.dose), collapse = ''),
".txt")
sub.GSEA <- read.delim(file.GSEA, row.names = 1)
sub.GSEA$logPval <- -log10(sub.GSEA$pvalue) *
(sub.GSEA$enrichmentScore / abs(sub.GSEA$enrichmentScore))
sub.GSEA <- sub.GSEA[, c("Description", "logPval")]
names(sub.GSEA) <- c("ID", sub.time)
df.GSEA <- merge(df.GSEA, sub.GSEA, by = 'ID', all = T)
}
row.names(df.GSEA) <- df.GSEA$ID
df.GSEA$ID <- NULL
df.GSEA[is.na(df.GSEA)] <- 0
# sort
df.sort <- data.frame(stringsAsFactors = F)
for (row in row.names(df.GSEA)) {
for (col in as.numeric(names(df.GSEA))) {
if ((col == 1) | (col == 25) | ((col >= 33) & (col < 85))) {
# if ((col == 1) | ((col >= 1) & (col < 85))) {
df.sort <- rbind(df.sort, data.frame(pathway = row, time = col,
value = df.GSEA[row, as.character(col)],
stringsAsFactors = F))
}
}
}
df.sort$ID <- paste(df.sort$pathway, df.sort$time, sep = '_')
df.sort <- df.sort[order(df.sort$value),]
sort.value <- df.sort$value
df.ks <- data.frame(stringsAsFactors = F)
for (pathway in names.KEGG.L3) {
sub.sort <- df.sort[df.sort$pathway == pathway, 'value']
enrich.control <- ks.test(sub.sort, sort.value, alternative = 'greater')
enrich.treat <- ks.test(sub.sort, sort.value, alternative = 'less')
df.ks <- rbind(df.ks, data.frame(pathway = pathway,
pvalue.control = enrich.control$p.value,
pvalue.treat = enrich.treat$p.value))
}
df.ks$qvalue.control <- p.adjust(df.ks$pvalue.control, method = 'fdr')
df.ks$qvalue.treat <- p.adjust(df.ks$pvalue.treat, method = 'fdr')
# use ks score to plot
df.ks.female <- df.ks
# df.ks.female.filter <- df.ks.female[
# df.ks.female$qvalue.control < 0.15 | df.ks.female$qvalue.treat < 0.4,]
# df.ks.female.filter <- df.ks.female[
# df.ks.female$pvalue.control < 0.035 | df.ks.female$pvalue.treat < 0.043,]
# df.ks.female.filter <- df.ks.female[
# df.ks.female$pvalue.control < 0.05 | df.ks.female$pvalue.treat < 0.07,]
# df.ks.female.filter <- df.ks.female[
# df.ks.female$pvalue.control < 0.01 | df.ks.female$pvalue.treat < 0.003,]
df.ks.female.filter <- df.ks.female[
df.ks.female$pvalue.control < 0.2 | df.ks.female$pvalue.treat < 0.1,]
df.ks.female.filter <- df.ks.female.filter[
df.ks.female.filter$pathway != 'ABC transporters',]
sel.pathway <- c('Lipopolysaccharide biosynthesis proteins',
'Fatty acid biosynthesis', 'Nitrogen metabolism',
'Peptidases', 'Phosphotransferase system (PTS)',
'Benzoate degradation',
'Tryptophan metabolism', 'Starch and sucrose metabolism',
'Peroxisome', 'Galactose metabolism', 'Lysine biosynthesis',
'Terpenoid backbone biosynthesis')
df.ks.female.filter <- df.ks.female.filter[df.ks.female.filter$pathway %in% sel.pathway,]
log10Pval <- c()
for (i in row.names(df.ks.female.filter)) {
pvalue.control <- -log10(df.ks.female.filter[i, 'pvalue.control'])
pvalue.treat <- -log10(df.ks.female.filter[i, 'pvalue.treat'])
if (pvalue.control > pvalue.treat) {
log10Pval <- c(log10Pval, -pvalue.control)
} else {
log10Pval <- c(log10Pval, pvalue.treat)
}
}
df.ks.female.filter$log10Pval <- log10Pval
df.ks.female.filter <- df.ks.female.filter[
order(df.ks.female.filter$log10Pval, decreasing = T), ]
vec.color <- c()
for (pval in df.ks.female.filter$log10Pval) {
if (pval > 0) {
vec.color <- c(vec.color, 'Enrich in Treatment')
} else {
vec.color <- c(vec.color, 'Enrich in Control')
}
}
df.ks.female.filter$color <- factor(vec.color,
levels = c('Enrich in Treatment', 'Enrich in Control'))
plot.female <-
ggplot(data = df.ks.female.filter, aes(x = reorder(pathway, X = log10Pval),
y = log10Pval, fill = color)) +
geom_bar(stat = 'identity') +
labs(x = 'Pathway', y = expression(paste("-log"[10], "(adj", italic("P"), "-value)")),
fill = '') +
scale_fill_manual(values = c(muted("red"), muted("blue"))) +
coord_flip() +
theme_bw() +
theme(panel.background = element_rect(color = 'black', size = 1.5,
fill = 'transparent'),
panel.grid.major.x = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major.y = element_line(colour = "gray", size = 0.1,
linetype = 2),
axis.text.x = element_text(size = 9, color = "black", family = 'Arial'),
axis.text.y = element_text(size = 10, color = "black", family = 'Arial'))
# ggsave(filename = paste0("/Female_Combine_Sum_GSEA_", mod, '_',
# paste0(as.character(vec.dose), collapse = ''), ".png"),
# path = path.out, plot = plot.female,
# height = 15, width = 20, units = 'cm')
# heatmap
df.GSEA.female <- df.GSEA[as.character(df.ks.female.filter$pathway),]
df.heatmap.female <- data.frame(stringsAsFactors = F)
for (pathway in row.names(df.GSEA.female)) {
for (sub.time in names(df.GSEA.female)) {
df.heatmap.female <-
rbind(df.heatmap.female,
data.frame(pathway = pathway, time = sub.time,
score = df.GSEA.female[pathway, sub.time],
stringsAsFactors = F))
}
}
df.heatmap.female$pathway <-
factor(df.heatmap.female$pathway,
levels = rev(as.character(df.ks.female.filter$pathway)), ordered = T)
df.heatmap.female$time <-
factor(df.heatmap.female$time,
levels = as.character(series.time), ordered = T)
plot.heatmap.female <-
ggplot(data = df.heatmap.female,
aes(x = time, y = pathway, fill = score)) +
geom_tile() +
scale_fill_gradient2(low = muted("blue"), high = muted("red"), mid = "#F5F5F5") +
labs(x = 'Time', y = 'Pathway', fill = 'Enrichment Score') +
theme_bw() +
theme(
panel.border = element_blank(),
panel.background = element_rect(color = 'transparent', size = 1.5,
fill = 'transparent'),
panel.grid = element_blank(),
axis.ticks = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.text.x = element_text(size = 9, color = "black", family = 'Arial'),
legend.text = element_text(size = 9))
# ggsave(filename = paste0("/Female_Combine_Heatmap_GSEA_", mod, '_',
# paste0(as.character(vec.dose), collapse = ''), ".png"),
# path = path.out, plot = plot.heatmap.female,
# height = 15, width = 20, units = 'cm')
plot.final.female <- plot.female + plot.heatmap.female +
plot_layout(widths = c(1, 1.6), guides = 'collect')
ggsave(plot = plot.final.female, path = path.out,
filename = paste0("/Female_GSEA_",
paste0(as.character(vec.dose), collapse = ''), ".png"),
height = 8, width = 25, units = 'cm')
| /microbiome/figures/supp5_KEGG.R | no_license | Drizzle-Zhang/bioinformatics | R | false | false | 15,886 | r | library(ggplot2)
library(scales)
library(patchwork)
# meta file
meta.file <- '/home/drizzle_zhang/microbiome/result/meta_sample.out.txt'
df.meta <- read.delim(meta.file, stringsAsFactors = FALSE)
# KEGG
file.KEGG.L3 <- '/home/drizzle_zhang/microbiome/result/9.PICRUSt/origin_data/KO_KEGG_L3.txt'
df.db.KEGG <- read.delim(file.KEGG.L3, row.names = 1, header = F,
stringsAsFactors = F)
names.KEGG.L3 <- as.character(df.db.KEGG[1,])
names(df.db.KEGG) <- names.KEGG.L3
df.db.KEGG <- df.db.KEGG[-1,]
# cutoff
type.cutoff <- 'fdr'
# dose
# vec.dose <- c(0, 1, 2, 3)
vec.dose <- c(0, 1)
# time series
# series.time <- unique(df.meta$Time)
# mod <- 'all'
series.time <- c(-1, 1, 5, 9, 17, 21, 25, 29, 33, 41, 49, 60, 68, 84)
mod <- 'sel'
# series.time <- c(-1, 1, 5, 9, 17, 21, 25, 29, 33, 37, 41, 45, 49)
# mod <- 'old'
############################# GSEA
level <- 'L3'
# male
gender <- 'male'
df.meta.gender <- df.meta[df.meta$Gender == gender, ]
path.plot.male <- paste0('/home/drizzle_zhang/microbiome/result/9.PICRUSt/heatmap_',
gender, '_', type.cutoff)
if (!file.exists(path.plot.male)) {
dir.create(path.plot.male)
}
path.out <- '/home/drizzle_zhang/microbiome/result/Figs/'
df.GSEA <- data.frame(ID = names.KEGG.L3)
for (sub.time in series.time) {
file.GSEA <- paste0(path.plot.male, "/GSEA_", level, "_", sub.time, "_",
paste0(as.character(vec.dose), collapse = ''),
".txt")
# file.GSEA <- paste0(path.plot.male, "/GSEA_", level, "_", sub.time,
# paste0(as.character(vec.dose), collapse = ''),
# ".txt")
sub.GSEA <- read.delim(file.GSEA, row.names = 1)
sub.GSEA$logPval <- -log10(sub.GSEA$pvalue) *
(sub.GSEA$enrichmentScore / abs(sub.GSEA$enrichmentScore))
# sub.GSEA$order <- rank(sub.GSEA$logPval)
sub.GSEA <- sub.GSEA[, c("Description", "logPval")]
names(sub.GSEA) <- c("ID", sub.time)
df.GSEA <- merge(df.GSEA, sub.GSEA, by = 'ID', all = T)
}
row.names(df.GSEA) <- df.GSEA$ID
df.GSEA$ID <- NULL
df.GSEA[is.na(df.GSEA)] <- 0
df.GSEA.score <- df.GSEA
df.GSEA <- as.data.frame(apply(df.GSEA.score, 2, rank))
# sort
df.sort <- data.frame(stringsAsFactors = F)
for (row in row.names(df.GSEA)) {
for (col in as.numeric(names(df.GSEA))) {
if ((col > 0) & (col < 42)) {
df.sort <- rbind(df.sort, data.frame(pathway = row, time = col,
value = df.GSEA[row, as.character(col)],
stringsAsFactors = F))
}
}
}
df.sort$ID <- paste(df.sort$pathway, df.sort$time, sep = '_')
df.sort <- df.sort[order(df.sort$value),]
sort.value <- df.sort$value
df.ks <- data.frame(stringsAsFactors = F)
for (pathway in names.KEGG.L3) {
sub.sort <- df.sort[df.sort$pathway == pathway, 'value']
enrich.control <- ks.test(sub.sort, sort.value, alternative = 'greater')
enrich.treat <- ks.test(sub.sort, sort.value, alternative = 'less')
df.ks <- rbind(df.ks, data.frame(pathway = pathway,
pvalue.control = enrich.control$p.value,
pvalue.treat = enrich.treat$p.value))
}
df.ks$qvalue.control <- p.adjust(df.ks$pvalue.control, method = 'fdr')
df.ks$qvalue.treat <- p.adjust(df.ks$pvalue.treat, method = 'fdr')
# use ks score to plot
df.ks.male <- df.ks
# df.ks.male.filter <- df.ks.male[
# df.ks.male$qvalue.control < 0.05 | df.ks.male$qvalue.treat < 0.05,]
df.ks.male.filter <- df.ks.male[
df.ks.male$pvalue.control < 0.012 | df.ks.male$pvalue.treat < 0.012,]
# df.ks.male.filter <- df.ks.male[
# df.ks.male$pvalue.control < 0.1 | df.ks.male$pvalue.treat < 0.03,]
# df.ks.male.filter <- df.ks.male[
# df.ks.male$pvalue.control < 0.01 | df.ks.male$pvalue.treat < 0.01,]
df.ks.male.filter <-
df.ks.male.filter[!(df.ks.male.filter$pathway %in%
c('ABC transporters', 'Lipopolysaccharide biosynthesis')),]
log10Pval <- c()
for (i in row.names(df.ks.male.filter)) {
pvalue.control <- -log10(df.ks.male.filter[i, 'pvalue.control'])
pvalue.treat <- -log10(df.ks.male.filter[i, 'pvalue.treat'])
if (pvalue.control > pvalue.treat) {
log10Pval <- c(log10Pval, -pvalue.control)
} else {
log10Pval <- c(log10Pval, pvalue.treat)
}
}
df.ks.male.filter$log10Pval <- log10Pval
df.ks.male.filter <- df.ks.male.filter[
order(df.ks.male.filter$log10Pval, decreasing = T), ]
vec.color <- c()
for (pval in df.ks.male.filter$log10Pval) {
if (pval < 0) {
vec.color <- c(vec.color, 'Enrich in Control')
} else {
vec.color <- c(vec.color, 'Enrich in Treatment')
}
}
df.ks.male.filter$color <- factor(vec.color,
levels = c('Enrich in Treatment', 'Enrich in Control'))
plot.male <-
ggplot(data = df.ks.male.filter, aes(x = reorder(pathway, X = log10Pval),
y = log10Pval, fill = color)) +
geom_bar(stat = 'identity') +
labs(x = 'Pathway', y = expression(paste("-log"[10], "(adj", italic("P"), "-value)")),
fill = '') +
scale_fill_manual(values = c(muted("red"), muted("blue"))) +
coord_flip() +
theme_bw() +
theme(panel.background = element_rect(color = 'black', size = 1.5,
fill = 'transparent'),
panel.grid.major.x = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major.y = element_line(colour = "gray", size = 0.1,
linetype = 2),
axis.text.x = element_text(size = 9, color = "black", family = 'Arial'),
axis.text.y = element_text(size = 10, color = "black", family = 'Arial'))
# ggsave(filename = paste0("/Male_Combine_Sum_GSEA_", mod, '_',
# paste0(as.character(vec.dose), collapse = ''), ".png"),
# path = path.out, plot = plot.male,
# height = 15, width = 20, units = 'cm')
# heatmap
df.GSEA <- df.GSEA.score
df.GSEA.male <- df.GSEA[as.character(df.ks.male.filter$pathway),]
df.heatmap.male <- data.frame(stringsAsFactors = F)
for (pathway in row.names(df.GSEA.male)) {
for (sub.time in names(df.GSEA.male)) {
df.heatmap.male <-
rbind(df.heatmap.male,
data.frame(pathway = pathway, time = sub.time,
score = df.GSEA.male[pathway, sub.time],
stringsAsFactors = F))
}
}
df.heatmap.male$pathway <-
factor(df.heatmap.male$pathway,
levels = rev(as.character(df.ks.male.filter$pathway)), ordered = T)
df.heatmap.male$time <-
factor(df.heatmap.male$time,
levels = as.character(series.time), ordered = T)
plot.heatmap.male <-
ggplot(data = df.heatmap.male,
aes(x = time, y = pathway, fill = score)) +
geom_tile() +
scale_fill_gradient2(low = muted("blue"), high = muted("red"), mid = "#F5F5F5") +
labs(x = 'Time', y = 'Pathway', fill = 'Enrichment Score') +
theme_bw() +
theme(
panel.border = element_blank(),
panel.background = element_rect(color = 'transparent', size = 0,
fill = 'transparent'),
panel.grid = element_blank(),
axis.ticks = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.text.x = element_text(size = 9, color = "black", family = 'Arial'),
legend.text = element_text(size = 9)
)
# ggsave(filename = paste0("/Male_Combine_Heatmap_GSEA_", mod, '_',
# paste0(as.character(vec.dose), collapse = ''), ".png"),
# path = path.out, plot = plot.heatmap.male,
# height = 15, width = 20, units = 'cm')
plot.final.male <- plot.male + plot.heatmap.male + plot_layout(widths = c(1, 1.6),
guides = 'collect')
ggsave(plot = plot.final.male, path = path.out,
filename = paste0("/Male_GSEA_",
paste0(as.character(vec.dose), collapse = ''), ".png"),
height = 11, width = 25, units = 'cm')
# female
gender <- 'female'
df.meta.gender <- df.meta[df.meta$Gender == gender, ]
path.plot.female <- paste0('/home/drizzle_zhang/microbiome/result/9.PICRUSt/heatmap_',
gender, '_', type.cutoff)
df.GSEA <- data.frame(ID = names.KEGG.L3)
for (sub.time in series.time) {
file.GSEA <- paste0(path.plot.female, "/GSEA_", level, "_", sub.time, "_",
paste0(as.character(vec.dose), collapse = ''),
".txt")
sub.GSEA <- read.delim(file.GSEA, row.names = 1)
sub.GSEA$logPval <- -log10(sub.GSEA$pvalue) *
(sub.GSEA$enrichmentScore / abs(sub.GSEA$enrichmentScore))
sub.GSEA <- sub.GSEA[, c("Description", "logPval")]
names(sub.GSEA) <- c("ID", sub.time)
df.GSEA <- merge(df.GSEA, sub.GSEA, by = 'ID', all = T)
}
row.names(df.GSEA) <- df.GSEA$ID
df.GSEA$ID <- NULL
df.GSEA[is.na(df.GSEA)] <- 0
# sort
df.sort <- data.frame(stringsAsFactors = F)
for (row in row.names(df.GSEA)) {
for (col in as.numeric(names(df.GSEA))) {
if ((col == 1) | (col == 25) | ((col >= 33) & (col < 85))) {
# if ((col == 1) | ((col >= 1) & (col < 85))) {
df.sort <- rbind(df.sort, data.frame(pathway = row, time = col,
value = df.GSEA[row, as.character(col)],
stringsAsFactors = F))
}
}
}
df.sort$ID <- paste(df.sort$pathway, df.sort$time, sep = '_')
df.sort <- df.sort[order(df.sort$value),]
sort.value <- df.sort$value
df.ks <- data.frame(stringsAsFactors = F)
for (pathway in names.KEGG.L3) {
sub.sort <- df.sort[df.sort$pathway == pathway, 'value']
enrich.control <- ks.test(sub.sort, sort.value, alternative = 'greater')
enrich.treat <- ks.test(sub.sort, sort.value, alternative = 'less')
df.ks <- rbind(df.ks, data.frame(pathway = pathway,
pvalue.control = enrich.control$p.value,
pvalue.treat = enrich.treat$p.value))
}
df.ks$qvalue.control <- p.adjust(df.ks$pvalue.control, method = 'fdr')
df.ks$qvalue.treat <- p.adjust(df.ks$pvalue.treat, method = 'fdr')
# use ks score to plot
df.ks.female <- df.ks
# df.ks.female.filter <- df.ks.female[
# df.ks.female$qvalue.control < 0.15 | df.ks.female$qvalue.treat < 0.4,]
# df.ks.female.filter <- df.ks.female[
# df.ks.female$pvalue.control < 0.035 | df.ks.female$pvalue.treat < 0.043,]
# df.ks.female.filter <- df.ks.female[
# df.ks.female$pvalue.control < 0.05 | df.ks.female$pvalue.treat < 0.07,]
# df.ks.female.filter <- df.ks.female[
# df.ks.female$pvalue.control < 0.01 | df.ks.female$pvalue.treat < 0.003,]
df.ks.female.filter <- df.ks.female[
df.ks.female$pvalue.control < 0.2 | df.ks.female$pvalue.treat < 0.1,]
df.ks.female.filter <- df.ks.female.filter[
df.ks.female.filter$pathway != 'ABC transporters',]
sel.pathway <- c('Lipopolysaccharide biosynthesis proteins',
'Fatty acid biosynthesis', 'Nitrogen metabolism',
'Peptidases', 'Phosphotransferase system (PTS)',
'Benzoate degradation',
'Tryptophan metabolism', 'Starch and sucrose metabolism',
'Peroxisome', 'Galactose metabolism', 'Lysine biosynthesis',
'Terpenoid backbone biosynthesis')
df.ks.female.filter <- df.ks.female.filter[df.ks.female.filter$pathway %in% sel.pathway,]
log10Pval <- c()
for (i in row.names(df.ks.female.filter)) {
pvalue.control <- -log10(df.ks.female.filter[i, 'pvalue.control'])
pvalue.treat <- -log10(df.ks.female.filter[i, 'pvalue.treat'])
if (pvalue.control > pvalue.treat) {
log10Pval <- c(log10Pval, -pvalue.control)
} else {
log10Pval <- c(log10Pval, pvalue.treat)
}
}
df.ks.female.filter$log10Pval <- log10Pval
df.ks.female.filter <- df.ks.female.filter[
order(df.ks.female.filter$log10Pval, decreasing = T), ]
vec.color <- c()
for (pval in df.ks.female.filter$log10Pval) {
if (pval > 0) {
vec.color <- c(vec.color, 'Enrich in Treatment')
} else {
vec.color <- c(vec.color, 'Enrich in Control')
}
}
df.ks.female.filter$color <- factor(vec.color,
levels = c('Enrich in Treatment', 'Enrich in Control'))
plot.female <-
ggplot(data = df.ks.female.filter, aes(x = reorder(pathway, X = log10Pval),
y = log10Pval, fill = color)) +
geom_bar(stat = 'identity') +
labs(x = 'Pathway', y = expression(paste("-log"[10], "(adj", italic("P"), "-value)")),
fill = '') +
scale_fill_manual(values = c(muted("red"), muted("blue"))) +
coord_flip() +
theme_bw() +
theme(panel.background = element_rect(color = 'black', size = 1.5,
fill = 'transparent'),
panel.grid.major.x = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major.y = element_line(colour = "gray", size = 0.1,
linetype = 2),
axis.text.x = element_text(size = 9, color = "black", family = 'Arial'),
axis.text.y = element_text(size = 10, color = "black", family = 'Arial'))
# ggsave(filename = paste0("/Female_Combine_Sum_GSEA_", mod, '_',
# paste0(as.character(vec.dose), collapse = ''), ".png"),
# path = path.out, plot = plot.female,
# height = 15, width = 20, units = 'cm')
# heatmap
df.GSEA.female <- df.GSEA[as.character(df.ks.female.filter$pathway),]
df.heatmap.female <- data.frame(stringsAsFactors = F)
for (pathway in row.names(df.GSEA.female)) {
for (sub.time in names(df.GSEA.female)) {
df.heatmap.female <-
rbind(df.heatmap.female,
data.frame(pathway = pathway, time = sub.time,
score = df.GSEA.female[pathway, sub.time],
stringsAsFactors = F))
}
}
df.heatmap.female$pathway <-
factor(df.heatmap.female$pathway,
levels = rev(as.character(df.ks.female.filter$pathway)), ordered = T)
df.heatmap.female$time <-
factor(df.heatmap.female$time,
levels = as.character(series.time), ordered = T)
plot.heatmap.female <-
ggplot(data = df.heatmap.female,
aes(x = time, y = pathway, fill = score)) +
geom_tile() +
scale_fill_gradient2(low = muted("blue"), high = muted("red"), mid = "#F5F5F5") +
labs(x = 'Time', y = 'Pathway', fill = 'Enrichment Score') +
theme_bw() +
theme(
panel.border = element_blank(),
panel.background = element_rect(color = 'transparent', size = 1.5,
fill = 'transparent'),
panel.grid = element_blank(),
axis.ticks = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.text.x = element_text(size = 9, color = "black", family = 'Arial'),
legend.text = element_text(size = 9))
# ggsave(filename = paste0("/Female_Combine_Heatmap_GSEA_", mod, '_',
# paste0(as.character(vec.dose), collapse = ''), ".png"),
# path = path.out, plot = plot.heatmap.female,
# height = 15, width = 20, units = 'cm')
plot.final.female <- plot.female + plot.heatmap.female +
plot_layout(widths = c(1, 1.6), guides = 'collect')
ggsave(plot = plot.final.female, path = path.out,
filename = paste0("/Female_GSEA_",
paste0(as.character(vec.dose), collapse = ''), ".png"),
height = 8, width = 25, units = 'cm')
|
testcoef.rrenv.apweights <- function(m, L, R, A) {
# User needs to supply L, R and A as matrices.
if (is.null(m$Gamma)) stop("beta is a zero matrix, no test is interesting.")
a <- dim(m$beta)
r <- a[1]
p <- a[2]
if (ncol(L) != r) stop("The size of L is not supported")
if (nrow(R) != p) stop("The size of R is not supported")
if (nrow(L) != nrow(A) | ncol(R) != ncol(A)) stop("The size of A is not supported")
tmp1 <- kronecker(t(R), L)
Sigma <- tmp1 %*% tcrossprod(m$covMatrix, tmp1) / m$n
tmp2 <- matrix(c(L %*% m$beta %*% R - A), nrow = 1)
chisqStatistic <- tmp2 %*% tcrossprod(chol2inv(chol(Sigma)), tmp2)
dof <- nrow(L) * ncol(R)
pValue <- stats::pchisq(chisqStatistic, dof, lower.tail = F)
covMatrix <- Sigma
return(list(chisqStatistic = chisqStatistic, dof = dof, pValue = pValue, covMatrix = covMatrix))
}
| /R/testcoef.rrenv.apweights.R | no_license | cran/Renvlp | R | false | false | 890 | r | testcoef.rrenv.apweights <- function(m, L, R, A) {
# User needs to supply L, R and A as matrices.
if (is.null(m$Gamma)) stop("beta is a zero matrix, no test is interesting.")
a <- dim(m$beta)
r <- a[1]
p <- a[2]
if (ncol(L) != r) stop("The size of L is not supported")
if (nrow(R) != p) stop("The size of R is not supported")
if (nrow(L) != nrow(A) | ncol(R) != ncol(A)) stop("The size of A is not supported")
tmp1 <- kronecker(t(R), L)
Sigma <- tmp1 %*% tcrossprod(m$covMatrix, tmp1) / m$n
tmp2 <- matrix(c(L %*% m$beta %*% R - A), nrow = 1)
chisqStatistic <- tmp2 %*% tcrossprod(chol2inv(chol(Sigma)), tmp2)
dof <- nrow(L) * ncol(R)
pValue <- stats::pchisq(chisqStatistic, dof, lower.tail = F)
covMatrix <- Sigma
return(list(chisqStatistic = chisqStatistic, dof = dof, pValue = pValue, covMatrix = covMatrix))
}
|
rm(list=ls())
library(ggplot2)
## CONFIG OPTIONS
setwd('~/research/lexicase/gptp-2019-subsampled-lexicase/output')
RM_CMP_STR_LENS = T
# Load in shared data
source('../tools/shared.r')
# Load data, convert string 'True'/'False' to R booleans
data = read.csv('effort_data.csv')
data$solution_found = data$solution_found == 'True'
# Filter out runs with no results
data = data[data$solution_found,]
if(RM_CMP_STR_LENS){
data = data[data$problem != 'compare-string-lengths',]
}
# Grab all configuration variables present
problems = as.character(unique(data$problem))
sizes = as.character(unique(data$num_tests))
treatments = as.character(unique(data$treatment))
# Create and fill data frame to show how many successful runs were present in each configuration
count_df = data.frame(data=matrix(nrow=0, ncol=4))
for(prob in problems){
for(size in sizes){
for(trt in treatments){
trt_df = data[data$problem == prob & data$num_tests == size & data$treatment == trt,]
count_df = rbind(count_df, c(prob, size, trt, sum(trt_df$solution_found)), stringsAsFactors=F)
}
}
}
colnames(count_df) = c('problem', 'num_tests', 'treatment', 'solution_count')
# We only have full runs at 0% subsampling and subsampled at 10% subsampling
# So filter accordingly to remove any unneccesary zeros!
count_df = count_df[count_df$treatment == 'full' | count_df$num_tests != '100',]
count_df = count_df[count_df$treatment != 'full' | count_df$num_tests == '100',]
# Check to make sure enough runs finished!
min_count = min(count_df$solution_count)
if(min_count < 25){
print(paste0('Error! Not enough runs! Wanted 25, min = ', min_count))
}
# No we need to filter down the main data set so all treatments in a problem x size combo
# have the same number of solution counts
filtered_data = data.frame(data = matrix(nrow = 0, ncol = ncol(data)))
desired_count = 25
for(prob in problems){
for(size in sizes){
for(trt in treatments){
trt_df = data[data$problem == prob & data$num_tests == size & data$treatment == trt,]
if(nrow(trt_df) > 0){
trt_df = trt_df[order(trt_df$evals),]
filtered_data = rbind(filtered_data, trt_df[1:desired_count, ])
}
}
}
}
# We ended up rerunning this experiment to only show the 10% level, so we let's modify our legend
trt_lookup[['downsampled']] = 'Down-sampled (10%)'
trt_lookup[['cohort']] = 'Cohort (10%)'
trt_lookup[['truncated']] = 'Truncated (10%)'
# Give each row prettier names for the configuration variables
filtered_data$prob_name = 0
filtered_data$trt_name = 0
filtered_data$size_name = 0
filtered_data$dil_name = 0
for(prob in unique(filtered_data$problem)){
for(trt in unique(filtered_data$treatment)){
for(size in unique(filtered_data$num_tests)){
for(dil in unique(filtered_data$dilution)){
if(nrow(filtered_data[filtered_data$problem == prob & filtered_data$treatment == trt & filtered_data$num_tests == size & filtered_data$dilution == dil,]) > 0){
#cat(prob, ' ', trt, ' ', size, ' ', dil, '\n')
filtered_data[filtered_data$problem == prob & filtered_data$treatment == trt & filtered_data$num_tests == size & filtered_data$dilution == dil,]$prob_name = prob_lookup[[prob]]
filtered_data[filtered_data$problem == prob & filtered_data$treatment == trt & filtered_data$num_tests == size & filtered_data$dilution == dil,]$trt_name = trt_lookup[[toString(trt)]]
filtered_data[filtered_data$problem == prob & filtered_data$treatment == trt & filtered_data$num_tests == size & filtered_data$dilution == dil,]$size_name = size_lookup[[toString(size)]]
filtered_data[filtered_data$problem == prob & filtered_data$treatment == trt & filtered_data$num_tests == size & filtered_data$dilution == dil,]$dil_name = dil_lookup[[dil]]
}
}
}
}
}
# Change full to "Standard Lexicase"
filtered_data[filtered_data$treatment == 'full',]$trt_name = 'Standard'
# And modify the order to match
#trt_levels = c(trt_lookup[['truncated']], trt_lookup[['cohort']], trt_lookup[['downsampled']], 'Standard')
trt_levels = c('Standard', trt_lookup[['downsampled']], trt_lookup[['cohort']], trt_lookup[['truncated']])
# Set our color order
#color_vec = c(truncated_color, cohort_color, downsampled_color, full_color)
color_vec = c(full_color, downsampled_color, cohort_color, truncated_color)
# Turn those names into factors
filtered_data$size_name = as.factor(filtered_data$size_name)
filtered_data$dil_name = as.factor(filtered_data$dil_name)
filtered_data$trt_name = as.factor(filtered_data$trt_name)
filtered_data$prob_name = as.factor(filtered_data$prob_name)
# (gg)Plot!
x_scale_size = 0.8
ggplot(filtered_data, aes(x = 0, y = evals, fill=factor(trt_name, levels=trt_levels))) +
geom_boxplot(position = position_dodge(1.5), width=1) +
coord_cartesian(clip = 'off', ylim = c(3*10^5, 10^8)) +
facet_grid(cols = vars(factor(prob_name, levels=prob_levels))) +
ggtitle('Computational Effort') +
scale_y_log10() +
scale_x_continuous(limits = c(-x_scale_size, x_scale_size)) +
scale_fill_manual(values = color_vec) +
ylab('Number of Evaluations') +
ggtitle('Computational Effort') +
guides(fill=guide_legend(title="Lexicase Selection Variant", reverse=F, title.theme = element_text(size = 18))) +
theme(plot.title = element_text(size=20, hjust = 0.5)) +
theme(strip.text = element_text(size=18, face = 'bold')) + # For the facet labels
theme(axis.title = element_text(size=18)) +
theme(axis.text = element_text(size=18)) +
theme(legend.text = element_text(size=18), legend.position="bottom") +
theme(axis.ticks.x= element_blank()) +
theme(axis.title.x = element_blank()) +
theme(axis.text.x = element_blank()) +
theme(panel.grid.minor.x = element_blank()) +
theme(panel.grid.major.x = element_blank()) +
ggsave('./plots/computational_effort.pdf', units = 'in', width = 14, height = 4)
# Run the stats
# Kruskal-wallis to test for any effect across all treatments (hint: they all have one)
# Then do a Mann-Whitney comparison (i.e., unpaired Wilcox) between standard and each treatment
# Holm correction is used for multiple comparisons
stats_df = data.frame(data = matrix(nrow = 0, ncol = 5))
colnames(stats_df) = c('problem', 'treatment', 'kruskal_p_value', 'p_value', 'p_value_adj')
for(prob in unique(filtered_data$problem)){
prob_name = prob_lookup[[prob]]
cat('Problem: ', prob_name, '\n')
kruskal_res = kruskal.test(evals ~ treatment, data = filtered_data[filtered_data$problem == prob,])
cat('p-value:', kruskal_res$p.value, '\n\n')
if(kruskal_res$p.value < 0.05){
ctrl_data = filtered_data[filtered_data$problem == prob & filtered_data$treatment == 'full',]
for(trt in setdiff(treatments, c('full'))){
trt_data = filtered_data[filtered_data$problem == prob & filtered_data$treatment == trt,]
wilcox_res = wilcox.test(ctrl_data$evals, trt_data$evals, paired=F)
stats_df[nrow(stats_df) + 1,] = c(prob, trt, kruskal_res$p.value, wilcox_res$p.value, 0)
}
stats_df$p_value = as.numeric(stats_df$p_value)
stats_df[stats_df$problem == prob,]$p_value_adj = p.adjust(stats_df[stats_df$problem == prob,]$p_value, method = 'holm')
}
else{
stats_df[nrow(stats_df) + 1,] = c(prob, trt, kruskal_res$p.value, 'NA', 'NA')
}
}
stats_df$kruskal_p_value = as.numeric(stats_df$kruskal_p_value)
stats_df$p_value = as.numeric(stats_df$p_value)
stats_df$p_value_adj = as.numeric(stats_df$p_value_adj)
stats_df$significant_at_0_05 = stats_df$p_value_adj <= 0.05
print(stats_df)
write.csv(stats_df, './stats/effort_stats.csv')
# Format stats such that significance stars can be added to the plot
# Create mock stats for the full treatments (so we can be general about how we plot)
for(prob in unique(filtered_data$problem)){
stats_df[nrow(stats_df) + 1, ] = c(prob, 'full', 0, 1, 1, F)
}
# Add some formatting data
stats_df$trt_name = ''
stats_df$prob_name = ''
stats_df$sig_str = ' '
stats_df$median = 0
stats_df$significant_at_0_05 = as.logical(stats_df$significant_at_0_05)
for(row in 1:nrow(stats_df)){
stats_df[row,]$trt_name = trt_lookup[[stats_df[row,]$treatment]]
stats_df[row,]$prob_name = prob_lookup[[stats_df[row,]$problem]]
if(stats_df[row,]$significant_at_0_05){
stats_df[row,]$sig_str = '*'
}
stats_df[row,]$median = median(filtered_data[filtered_data$problem == stats_df[row,]$problem & filtered_data$treatment == stats_df[row,]$treatment,]$evals)
}
# (gg)Plot with significance stars!
x_scale_size = 0.8
ggplot(filtered_data, aes(x = 0, y = evals, fill=factor(trt_name, levels=trt_levels))) +
geom_boxplot(position = position_dodge(1.5), width=1) +
coord_cartesian(clip = 'off', ylim = c(3*10^5, 10^8)) +
geom_text(data = stats_df, aes(x = 0, y = 1.5*10^5, label = sig_str), position = position_dodge(1.5), size = (5/14) * 30) +
facet_grid(cols = vars(factor(prob_name, levels=prob_levels))) +
ggtitle('Computational Effort') +
scale_y_log10() +
scale_x_continuous(limits = c(-x_scale_size, x_scale_size)) +
scale_fill_manual(values = color_vec) +
ylab('Number of Evaluations') +
ggtitle('Computational Effort') +
guides(fill=guide_legend(title="Lexicase Selection Variant", reverse=F, title.theme = element_text(size = 18))) +
theme(plot.title = element_text(size=20, hjust = 0.5)) +
theme(strip.text = element_text(size=18, face = 'bold')) + # For the facet labels
theme(axis.title = element_text(size=18)) +
theme(axis.text = element_text(size=18)) +
theme(legend.text = element_text(size=18), legend.position="bottom") +
theme(axis.ticks.x= element_blank()) +
theme(axis.title.x = element_blank()) +
theme(axis.text.x = element_blank()) +
theme(panel.grid.minor.x = element_blank()) +
theme(panel.grid.major.x = element_blank()) +
ggsave('./plots/computational_effort_stats.pdf', units = 'in', width = 14, height = 4)
print('Finished!') | /tools/effort_analysis.r | no_license | FergusonAJ/gptp-2019-subsampled-lexicase | R | false | false | 9,891 | r | rm(list=ls())
library(ggplot2)
## CONFIG OPTIONS
setwd('~/research/lexicase/gptp-2019-subsampled-lexicase/output')
RM_CMP_STR_LENS = T
# Load in shared data
source('../tools/shared.r')
# Load data, convert string 'True'/'False' to R booleans
data = read.csv('effort_data.csv')
data$solution_found = data$solution_found == 'True'
# Filter out runs with no results
data = data[data$solution_found,]
if(RM_CMP_STR_LENS){
data = data[data$problem != 'compare-string-lengths',]
}
# Grab all configuration variables present
problems = as.character(unique(data$problem))
sizes = as.character(unique(data$num_tests))
treatments = as.character(unique(data$treatment))
# Create and fill data frame to show how many successful runs were present in each configuration
count_df = data.frame(data=matrix(nrow=0, ncol=4))
for(prob in problems){
for(size in sizes){
for(trt in treatments){
trt_df = data[data$problem == prob & data$num_tests == size & data$treatment == trt,]
count_df = rbind(count_df, c(prob, size, trt, sum(trt_df$solution_found)), stringsAsFactors=F)
}
}
}
colnames(count_df) = c('problem', 'num_tests', 'treatment', 'solution_count')
# We only have full runs at 0% subsampling and subsampled at 10% subsampling
# So filter accordingly to remove any unneccesary zeros!
count_df = count_df[count_df$treatment == 'full' | count_df$num_tests != '100',]
count_df = count_df[count_df$treatment != 'full' | count_df$num_tests == '100',]
# Check to make sure enough runs finished!
min_count = min(count_df$solution_count)
if(min_count < 25){
print(paste0('Error! Not enough runs! Wanted 25, min = ', min_count))
}
# No we need to filter down the main data set so all treatments in a problem x size combo
# have the same number of solution counts
filtered_data = data.frame(data = matrix(nrow = 0, ncol = ncol(data)))
desired_count = 25
for(prob in problems){
for(size in sizes){
for(trt in treatments){
trt_df = data[data$problem == prob & data$num_tests == size & data$treatment == trt,]
if(nrow(trt_df) > 0){
trt_df = trt_df[order(trt_df$evals),]
filtered_data = rbind(filtered_data, trt_df[1:desired_count, ])
}
}
}
}
# We ended up rerunning this experiment to only show the 10% level, so we let's modify our legend
trt_lookup[['downsampled']] = 'Down-sampled (10%)'
trt_lookup[['cohort']] = 'Cohort (10%)'
trt_lookup[['truncated']] = 'Truncated (10%)'
# Give each row prettier names for the configuration variables
filtered_data$prob_name = 0
filtered_data$trt_name = 0
filtered_data$size_name = 0
filtered_data$dil_name = 0
for(prob in unique(filtered_data$problem)){
for(trt in unique(filtered_data$treatment)){
for(size in unique(filtered_data$num_tests)){
for(dil in unique(filtered_data$dilution)){
if(nrow(filtered_data[filtered_data$problem == prob & filtered_data$treatment == trt & filtered_data$num_tests == size & filtered_data$dilution == dil,]) > 0){
#cat(prob, ' ', trt, ' ', size, ' ', dil, '\n')
filtered_data[filtered_data$problem == prob & filtered_data$treatment == trt & filtered_data$num_tests == size & filtered_data$dilution == dil,]$prob_name = prob_lookup[[prob]]
filtered_data[filtered_data$problem == prob & filtered_data$treatment == trt & filtered_data$num_tests == size & filtered_data$dilution == dil,]$trt_name = trt_lookup[[toString(trt)]]
filtered_data[filtered_data$problem == prob & filtered_data$treatment == trt & filtered_data$num_tests == size & filtered_data$dilution == dil,]$size_name = size_lookup[[toString(size)]]
filtered_data[filtered_data$problem == prob & filtered_data$treatment == trt & filtered_data$num_tests == size & filtered_data$dilution == dil,]$dil_name = dil_lookup[[dil]]
}
}
}
}
}
# Change full to "Standard Lexicase"
filtered_data[filtered_data$treatment == 'full',]$trt_name = 'Standard'
# And modify the order to match
#trt_levels = c(trt_lookup[['truncated']], trt_lookup[['cohort']], trt_lookup[['downsampled']], 'Standard')
trt_levels = c('Standard', trt_lookup[['downsampled']], trt_lookup[['cohort']], trt_lookup[['truncated']])
# Set our color order
#color_vec = c(truncated_color, cohort_color, downsampled_color, full_color)
color_vec = c(full_color, downsampled_color, cohort_color, truncated_color)
# Turn those names into factors
filtered_data$size_name = as.factor(filtered_data$size_name)
filtered_data$dil_name = as.factor(filtered_data$dil_name)
filtered_data$trt_name = as.factor(filtered_data$trt_name)
filtered_data$prob_name = as.factor(filtered_data$prob_name)
# (gg)Plot!
x_scale_size = 0.8
ggplot(filtered_data, aes(x = 0, y = evals, fill=factor(trt_name, levels=trt_levels))) +
geom_boxplot(position = position_dodge(1.5), width=1) +
coord_cartesian(clip = 'off', ylim = c(3*10^5, 10^8)) +
facet_grid(cols = vars(factor(prob_name, levels=prob_levels))) +
ggtitle('Computational Effort') +
scale_y_log10() +
scale_x_continuous(limits = c(-x_scale_size, x_scale_size)) +
scale_fill_manual(values = color_vec) +
ylab('Number of Evaluations') +
ggtitle('Computational Effort') +
guides(fill=guide_legend(title="Lexicase Selection Variant", reverse=F, title.theme = element_text(size = 18))) +
theme(plot.title = element_text(size=20, hjust = 0.5)) +
theme(strip.text = element_text(size=18, face = 'bold')) + # For the facet labels
theme(axis.title = element_text(size=18)) +
theme(axis.text = element_text(size=18)) +
theme(legend.text = element_text(size=18), legend.position="bottom") +
theme(axis.ticks.x= element_blank()) +
theme(axis.title.x = element_blank()) +
theme(axis.text.x = element_blank()) +
theme(panel.grid.minor.x = element_blank()) +
theme(panel.grid.major.x = element_blank()) +
ggsave('./plots/computational_effort.pdf', units = 'in', width = 14, height = 4)
# Run the stats
# Kruskal-wallis to test for any effect across all treatments (hint: they all have one)
# Then do a Mann-Whitney comparison (i.e., unpaired Wilcox) between standard and each treatment
# Holm correction is used for multiple comparisons
stats_df = data.frame(data = matrix(nrow = 0, ncol = 5))
colnames(stats_df) = c('problem', 'treatment', 'kruskal_p_value', 'p_value', 'p_value_adj')
for(prob in unique(filtered_data$problem)){
prob_name = prob_lookup[[prob]]
cat('Problem: ', prob_name, '\n')
kruskal_res = kruskal.test(evals ~ treatment, data = filtered_data[filtered_data$problem == prob,])
cat('p-value:', kruskal_res$p.value, '\n\n')
if(kruskal_res$p.value < 0.05){
ctrl_data = filtered_data[filtered_data$problem == prob & filtered_data$treatment == 'full',]
for(trt in setdiff(treatments, c('full'))){
trt_data = filtered_data[filtered_data$problem == prob & filtered_data$treatment == trt,]
wilcox_res = wilcox.test(ctrl_data$evals, trt_data$evals, paired=F)
stats_df[nrow(stats_df) + 1,] = c(prob, trt, kruskal_res$p.value, wilcox_res$p.value, 0)
}
stats_df$p_value = as.numeric(stats_df$p_value)
stats_df[stats_df$problem == prob,]$p_value_adj = p.adjust(stats_df[stats_df$problem == prob,]$p_value, method = 'holm')
}
else{
stats_df[nrow(stats_df) + 1,] = c(prob, trt, kruskal_res$p.value, 'NA', 'NA')
}
}
stats_df$kruskal_p_value = as.numeric(stats_df$kruskal_p_value)
stats_df$p_value = as.numeric(stats_df$p_value)
stats_df$p_value_adj = as.numeric(stats_df$p_value_adj)
stats_df$significant_at_0_05 = stats_df$p_value_adj <= 0.05
print(stats_df)
write.csv(stats_df, './stats/effort_stats.csv')
# Format stats such that significance stars can be added to the plot
# Create mock stats for the full treatments (so we can be general about how we plot)
for(prob in unique(filtered_data$problem)){
stats_df[nrow(stats_df) + 1, ] = c(prob, 'full', 0, 1, 1, F)
}
# Add some formatting data
stats_df$trt_name = ''
stats_df$prob_name = ''
stats_df$sig_str = ' '
stats_df$median = 0
stats_df$significant_at_0_05 = as.logical(stats_df$significant_at_0_05)
for(row in 1:nrow(stats_df)){
stats_df[row,]$trt_name = trt_lookup[[stats_df[row,]$treatment]]
stats_df[row,]$prob_name = prob_lookup[[stats_df[row,]$problem]]
if(stats_df[row,]$significant_at_0_05){
stats_df[row,]$sig_str = '*'
}
stats_df[row,]$median = median(filtered_data[filtered_data$problem == stats_df[row,]$problem & filtered_data$treatment == stats_df[row,]$treatment,]$evals)
}
# (gg)Plot with significance stars!
x_scale_size = 0.8
ggplot(filtered_data, aes(x = 0, y = evals, fill=factor(trt_name, levels=trt_levels))) +
geom_boxplot(position = position_dodge(1.5), width=1) +
coord_cartesian(clip = 'off', ylim = c(3*10^5, 10^8)) +
geom_text(data = stats_df, aes(x = 0, y = 1.5*10^5, label = sig_str), position = position_dodge(1.5), size = (5/14) * 30) +
facet_grid(cols = vars(factor(prob_name, levels=prob_levels))) +
ggtitle('Computational Effort') +
scale_y_log10() +
scale_x_continuous(limits = c(-x_scale_size, x_scale_size)) +
scale_fill_manual(values = color_vec) +
ylab('Number of Evaluations') +
ggtitle('Computational Effort') +
guides(fill=guide_legend(title="Lexicase Selection Variant", reverse=F, title.theme = element_text(size = 18))) +
theme(plot.title = element_text(size=20, hjust = 0.5)) +
theme(strip.text = element_text(size=18, face = 'bold')) + # For the facet labels
theme(axis.title = element_text(size=18)) +
theme(axis.text = element_text(size=18)) +
theme(legend.text = element_text(size=18), legend.position="bottom") +
theme(axis.ticks.x= element_blank()) +
theme(axis.title.x = element_blank()) +
theme(axis.text.x = element_blank()) +
theme(panel.grid.minor.x = element_blank()) +
theme(panel.grid.major.x = element_blank()) +
ggsave('./plots/computational_effort_stats.pdf', units = 'in', width = 14, height = 4)
print('Finished!') |
rm(list = ls())
url <- "https://dapi.kakao.com/v3/search/book"
query <- ""
# ํ๊ธ๋ก ๋ณํ
query <- URLencode(iconv(query, to='UTF-8'))
#์ฌ์ดํธ url %s ?์ธ์ ๋๊ธฐ๊ธฐ # ?์ธ์ ๋๊ธฐ๊ธฐ
query_str <- sprintf("%s?target=title&query=%s", url, query)
query_str
ls()
# ์นด์นด์ค ์ธ์ฆํค ๋ฃ๊ธฐ
#################################################################
kakao_api_key <-"์ฌ๋ฌ๋ถ์ ์ธ์ฆํค๋ก ๋ฐ๊ฟ์ฃผ์ธ์"
################################################################
auth_key_no <- sprintf("KakaoAK %s",kakao_api_key) # ์๋ ์๋ณธ ๋ณด๊ณ ๋ณต์ฌ
# -H "Authorization: KakaoAK {REST_API_KEY}" # ๋์๋ฌธ์ ์ฃผ์!
#์ฌ์ดํธ ์์ฒญ
#install.packages("httr")
library(httr)
resp <-GET(query_str, add_headers("Authorization"=auth_key_no))
resp
# ํด๋์ค ํ์ธ
class(resp)
#[1] "response"
# ๋ฌธ์๋ก ๊ฐ์ ๋ณํ
resp_char <- as.character(resp)
#json
library(jsonlite)
d = fromJSON(resp_char)
class(d)
df <- data.frame(d)
# list๋ฅผ ๋งคํธ๋ฆญ์ค๋ก ๋ณํ
txt_1 <- as.matrix(df)
write.csv(txt_1,"kakao_api.csv", row.names = F)
########################### ๋ถ๋ฌ์ฌ๋ ์๋ฌ (๊ดํธ ๋ฐ ์ฌ๋ฌ ์ด๋ฆ ๋ค์์นธ์ )
# ์นด์นด์ค ์ฑ
์ด๋ฆ ๊ฐ๊ณ ์ค๊ธฐ
a<-read.csv("kakao_api.csv")
View(a)
View(df)
์๋ฌ ๋จ................
############################
#Error in read.table(file = file, header = header, sep = sep, quote = quote, :
# ์ด์ ๊ฐ์๊ฐ ์ด์ ์ด๋ฆ๋ค๋ณด๋ค ๋ง์ต๋๋ค
View(df)
df[1,1]
df[1,2]
df[2,1]
library(stringr)
for (r in 1:nrow(df)) { # nrowํ์ ๊ฐ์
for (c in 1:col(df)) { # ์ด์์
df[r,c] = paste(df[r,c], collapse = " ") # ๋ฌธ์์ด ํ๋๋ก ํฉ์น ๋
df[r,c] = str_replace_all(df[r,c], "," , ":") # ,๋ฅผ :๋ก ๋ฐ๊ฟ๋ผ
}
}
View(df)
###################
df2 <- as.matrix(df)
write.csv(df2, "kakao_api_final.csv", row.names = F)
getwd()
| /day14/5_7_kakao_api_book_query.R | no_license | euka96/R | R | false | false | 1,980 | r |
rm(list = ls())
url <- "https://dapi.kakao.com/v3/search/book"
query <- ""
# ํ๊ธ๋ก ๋ณํ
query <- URLencode(iconv(query, to='UTF-8'))
#์ฌ์ดํธ url %s ?์ธ์ ๋๊ธฐ๊ธฐ # ?์ธ์ ๋๊ธฐ๊ธฐ
query_str <- sprintf("%s?target=title&query=%s", url, query)
query_str
ls()
# ์นด์นด์ค ์ธ์ฆํค ๋ฃ๊ธฐ
#################################################################
kakao_api_key <-"์ฌ๋ฌ๋ถ์ ์ธ์ฆํค๋ก ๋ฐ๊ฟ์ฃผ์ธ์"
################################################################
auth_key_no <- sprintf("KakaoAK %s",kakao_api_key) # ์๋ ์๋ณธ ๋ณด๊ณ ๋ณต์ฌ
# -H "Authorization: KakaoAK {REST_API_KEY}" # ๋์๋ฌธ์ ์ฃผ์!
#์ฌ์ดํธ ์์ฒญ
#install.packages("httr")
library(httr)
resp <-GET(query_str, add_headers("Authorization"=auth_key_no))
resp
# ํด๋์ค ํ์ธ
class(resp)
#[1] "response"
# ๋ฌธ์๋ก ๊ฐ์ ๋ณํ
resp_char <- as.character(resp)
#json
library(jsonlite)
d = fromJSON(resp_char)
class(d)
df <- data.frame(d)
# list๋ฅผ ๋งคํธ๋ฆญ์ค๋ก ๋ณํ
txt_1 <- as.matrix(df)
write.csv(txt_1,"kakao_api.csv", row.names = F)
########################### ๋ถ๋ฌ์ฌ๋ ์๋ฌ (๊ดํธ ๋ฐ ์ฌ๋ฌ ์ด๋ฆ ๋ค์์นธ์ )
# ์นด์นด์ค ์ฑ
์ด๋ฆ ๊ฐ๊ณ ์ค๊ธฐ
a<-read.csv("kakao_api.csv")
View(a)
View(df)
์๋ฌ ๋จ................
############################
#Error in read.table(file = file, header = header, sep = sep, quote = quote, :
# ์ด์ ๊ฐ์๊ฐ ์ด์ ์ด๋ฆ๋ค๋ณด๋ค ๋ง์ต๋๋ค
View(df)
df[1,1]
df[1,2]
df[2,1]
library(stringr)
for (r in 1:nrow(df)) { # nrowํ์ ๊ฐ์
for (c in 1:col(df)) { # ์ด์์
df[r,c] = paste(df[r,c], collapse = " ") # ๋ฌธ์์ด ํ๋๋ก ํฉ์น ๋
df[r,c] = str_replace_all(df[r,c], "," , ":") # ,๋ฅผ :๋ก ๋ฐ๊ฟ๋ผ
}
}
View(df)
###################
df2 <- as.matrix(df)
write.csv(df2, "kakao_api_final.csv", row.names = F)
getwd()
|
library(dplyr)
library(ggplot2)
library(ggsignif)
library(data.table)
library(Rcmdr)
gastric <- CXXC5_Expression_Values[1:12,]
normal <- CXXC5_Expression_Values[13:27,]
normal_values <- c(normal$Average)
gastric_values <- c(gastric$Average)
n_avg <- mean(normal_values)
n_sd <- sd(normal_values)
n_var <- var(normal_values)
n_sem <- sqrt(n_var)/sqrt(length(normal_values))
n_lower <- n_avg - n_sem
n_upper <- n_avg + n_sem
g_avg <- mean(gastric_values)
g_sd <- sd(gastric_values)
g_var <- var(gastric_values)
g_sem <- sqrt(g_var)/sqrt(length(gastric_values))
g_lower <- g_avg - g_sem
g_upper <- g_avg + g_sem
lowers <- c(n_lower, g_lower)
uppers <- c(n_upper, g_upper)
expression_value <- c(n_avg, g_avg)
norm <- c("Normal")
norm <- rep(norm, length(normal_values))
gast <- c("Gastric Tumor")
gast <- rep(gast, length(gastric_values))
group <- c("Normal", "Gastric Tumor")
df = data.frame(group, expression_value)
df_errorbars = data.frame(group, expression_value, lowers, uppers)
p_value <- tibble(
x = c("Normal", "Normal", "Gastric Tumor", "Gastric Tumor"),
y = c(3200, 3200, 3200, 3200)
)
g <- ggplot(df, aes(x=group, y=expression_value)) + geom_bar(stat = "identity") +
annotate("text", x=1.5, y=3230, label="*", size = 7) +
coord_cartesian(ylim = c(0, 3500)) + geom_errorbar(
data=df_errorbars, mapping=aes(x=group, ymin=uppers, ymax=lowers,
width = 0.3 )) +
geom_line(data = p_value, aes(x = x, y=y, group = 1)) + labs(y="Mean CXXC5 Expression Level", x="Group")
g
u <- t.test(normal_values, gastric_values)
u
ggsave("cxxc5_gse19826.png")
| /gastric_3_gse19826/Figure1.R | no_license | ryan96db/cxxc5_rotation_project | R | false | false | 1,614 | r | library(dplyr)
library(ggplot2)
library(ggsignif)
library(data.table)
library(Rcmdr)
gastric <- CXXC5_Expression_Values[1:12,]
normal <- CXXC5_Expression_Values[13:27,]
normal_values <- c(normal$Average)
gastric_values <- c(gastric$Average)
n_avg <- mean(normal_values)
n_sd <- sd(normal_values)
n_var <- var(normal_values)
n_sem <- sqrt(n_var)/sqrt(length(normal_values))
n_lower <- n_avg - n_sem
n_upper <- n_avg + n_sem
g_avg <- mean(gastric_values)
g_sd <- sd(gastric_values)
g_var <- var(gastric_values)
g_sem <- sqrt(g_var)/sqrt(length(gastric_values))
g_lower <- g_avg - g_sem
g_upper <- g_avg + g_sem
lowers <- c(n_lower, g_lower)
uppers <- c(n_upper, g_upper)
expression_value <- c(n_avg, g_avg)
norm <- c("Normal")
norm <- rep(norm, length(normal_values))
gast <- c("Gastric Tumor")
gast <- rep(gast, length(gastric_values))
group <- c("Normal", "Gastric Tumor")
df = data.frame(group, expression_value)
df_errorbars = data.frame(group, expression_value, lowers, uppers)
p_value <- tibble(
x = c("Normal", "Normal", "Gastric Tumor", "Gastric Tumor"),
y = c(3200, 3200, 3200, 3200)
)
g <- ggplot(df, aes(x=group, y=expression_value)) + geom_bar(stat = "identity") +
annotate("text", x=1.5, y=3230, label="*", size = 7) +
coord_cartesian(ylim = c(0, 3500)) + geom_errorbar(
data=df_errorbars, mapping=aes(x=group, ymin=uppers, ymax=lowers,
width = 0.3 )) +
geom_line(data = p_value, aes(x = x, y=y, group = 1)) + labs(y="Mean CXXC5 Expression Level", x="Group")
g
u <- t.test(normal_values, gastric_values)
u
ggsave("cxxc5_gse19826.png")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/example.R
\name{add_numbers}
\alias{add_numbers}
\title{Add numbers}
\usage{
add_numbers(x, y)
}
\arguments{
\item{x}{a number to be added.}
\item{y}{another number}
}
\value{
number
}
\description{
Add numbers
}
\examples{
add_numbers(3, 4)
}
| /man/add_numbers.Rd | permissive | murraycadzow/exampleRpkg | R | false | true | 323 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/example.R
\name{add_numbers}
\alias{add_numbers}
\title{Add numbers}
\usage{
add_numbers(x, y)
}
\arguments{
\item{x}{a number to be added.}
\item{y}{another number}
}
\value{
number
}
\description{
Add numbers
}
\examples{
add_numbers(3, 4)
}
|
#######################################################
## Parm_Estm.R ##
## PSYCH 7695, Spring 2017 ##
## Maximum Likelihood Estimation (Myung, JMP, 2003) ##
## By Yun Tang, Psychology, OSU ##
## ##
## Main Program ##
## Code Written on 12/18/2009 ##
## ##
## Modified by Joonsuk Park on Jan 28 2015 ##
## Modified by Jay Myung in Feb 2017 ##
## Modified by Woo-Young Ahn in March 2018 ##
#######################################################
# Loading the (minus) log-likelihood functions
# Please modify the path according to the actual location of the file "MLE_LSE.R"
# e.g., setwd("/Users/youngahn/this-course/")
rm(list=ls()) # clear workspace
graphics.off() # close all figures
set.seed(08826) # set a seed number for replication
source("MLE_LSE.R") # source MLE_LSE.R code
##########################
## General Setup ##
## Data and Parameters ##
##########################
n_total <- 50 # sample size
t_int <- c(0.5, 1, 2, 4, 8, 12, 16, 18) # time interval values
n_corr <- c(44, 34, 27, 26, 19, 17, 20, 11) # number of correct responses
p_corr <- n_corr/n_total # proportion correct
# Generate random uniform numbers between 0 and 1 to use as initials for the optim procedure
param1_init <- runif(1)
param2_init <- runif(2)
param3_init <- runif(3)
param_pow1_low <- c(0); param_pow1_up <- c(3);
param_pow2_low <- c(0, 0); param_pow2_up <- c(1, 3); # lower and upper bounds of POW2 model (0<a<1, 0<b<3)
param_exp1_low <- c(0); param_exp1_up <- c(3);
param_exp2_low <- c(0, 0); param_exp2_up <- c(1, 3); # lower and upper bounds of EXP2 model (0<a<1, 0<b<3)
param_expow_low <- c(0, 0, -Inf); param_expow_up <- c(1, Inf, 3);
param_hyp1_low <- c(0); param_hyp1_up <- c(1);
param_hyp2_low <- c(0, 0); param_hyp2_up <- c(1, 1);
##########################
## MLE ##
##########################
# Call general purpose optimization rountine
mle_model_pow1 <- optim(param1_init, mle_pow1, method="L-BFGS-B", lower=param_pow1_low, upper=param_pow1_up, int=t_int, n=n_total, x=n_corr)
mle_model_pow2 <- optim(param2_init, mle_pow2, method="L-BFGS-B", lower=param_pow2_low, upper=param_pow2_up, int=t_int, n=n_total, x=n_corr)
mle_model_exp1 <- optim(param1_init, mle_exp1, method="L-BFGS-B", lower=param_pow1_low, upper=param_exp1_up, int=t_int, n=n_total, x=n_corr)
mle_model_exp2 <- optim(param2_init, mle_exp2, method="L-BFGS-B", lower=param_exp2_low, upper=param_exp2_up, int=t_int, n=n_total, x=n_corr)
mle_model_expow <- optim(param3_init, mle_expow, method="L-BFGS-B", lower=param_expow_low, upper=param_expow_up, int=t_int, n=n_total, x=n_corr)
mle_model_hyp1 <- optim(param1_init, mle_hyp1, method="L-BFGS-B", lower=param_hyp1_low, upper=param_hyp1_up, int=t_int, n=n_total, x=n_corr)
mle_model_hyp2 <- optim(param2_init, mle_hyp2, method="L-BFGS-B", lower=param_hyp2_low, upper=param_hyp2_up, int=t_int, n=n_total, x=n_corr)
# Try many different inits to escape from the local maxima
for (i in 1:100) {
# Re-generate random inits. Is it the best way to do this?
param1_init <- runif(1); param2_init <- runif(2); param3_init <- runif(3);
# Do the MLE again
temp_pow1 <- optim(param1_init, mle_pow1, method="L-BFGS-B", lower=param_pow1_low, upper=param_pow1_up, int=t_int, n=n_total, x=n_corr)
temp_pow2 <- optim(param2_init, mle_pow2, method="L-BFGS-B", lower=param_pow2_low, upper=param_pow2_up, int=t_int, n=n_total, x=n_corr)
temp_exp1 <- optim(param1_init, mle_exp1, method="L-BFGS-B", lower=param_pow1_low, upper=param_exp1_up, int=t_int, n=n_total, x=n_corr)
temp_exp2 <- optim(param2_init, mle_exp2, method="L-BFGS-B", lower=param_exp2_low, upper=param_exp2_up, int=t_int, n=n_total, x=n_corr)
temp_expow <- optim(param3_init, mle_expow, method="L-BFGS-B", lower=param_expow_low, upper=param_expow_up, int=t_int, n=n_total, x=n_corr)
temp_hyp1 <- optim(param1_init, mle_hyp1, method="L-BFGS-B", lower=param_hyp1_low, upper=param_hyp1_up, int=t_int, n=n_total, x=n_corr)
temp_hyp2 <- optim(param2_init, mle_hyp2, method="L-BFGS-B", lower=param_hyp2_low, upper=param_hyp2_up, int=t_int, n=n_total, x=n_corr)
# Replace the results if the latest optimization yields better result
if(temp_pow1$value < mle_model_pow1$value) mle_model_pow1 <- temp_pow1
if(temp_pow2$value < mle_model_pow2$value) mle_model_pow2 <- temp_pow2
if(temp_exp1$value < mle_model_exp1$value) mle_model_exp1 <- temp_exp1
if(temp_exp2$value < mle_model_exp2$value) mle_model_exp2 <- temp_exp2
if(temp_expow$value < mle_model_expow$value) mle_model_expow <- temp_expow
if(temp_hyp1$value < mle_model_hyp1$value) mle_model_hyp1 <- temp_hyp1
if(temp_hyp2$value < mle_model_hyp2$value) mle_model_hyp2 <- temp_hyp2
}
# Save the MLE parameter estimates
parm_pow1 <- mle_model_pow1$par
parm_pow2 <- mle_model_pow2$par
parm_exp1 <- mle_model_exp1$par
parm_exp2 <- mle_model_exp2$par
parm_expow <- mle_model_expow$par
parm_hyp1 <- mle_model_hyp1$par
parm_hyp2 <- mle_model_hyp2$par
# MLE predictions
int <- t_int
p_prd_pow1 <- (1+int)^(-parm_pow1[1])
p_prd_pow2 <- parm_pow2[1]*(t_int+1)^(-parm_pow2[2])
p_prd_exp1 <- exp((-parm_exp1[1])*int)
p_prd_exp2 <- parm_exp2[1]*exp(-parm_exp2[2]*t_int)
p_prd_expow <- parm_expow[1]*exp((-parm_expow[2])*int)*(1+int)^(-parm_expow[3])
p_prd_hyp1 <- 1/(1+parm_hyp1[1]*int)
p_prd_hyp2 <- parm_hyp2[1]/(1+parm_hyp2[2]*int)
# Proportion of the explained variances for each model
r2_pow1 = 1-sum((p_corr-p_prd_pow1)^2)/sum((p_corr-mean(p_corr))^2)
r2_pow2 = 1-sum((p_corr-p_prd_pow2)^2)/sum((p_corr-mean(p_corr))^2)
r2_exp1 = 1-sum((p_corr-p_prd_exp1)^2)/sum((p_corr-mean(p_corr))^2)
r2_exp2 = 1-sum((p_corr-p_prd_exp2)^2)/sum((p_corr-mean(p_corr))^2)
r2_expow = 1-sum((p_corr-p_prd_expow)^2)/sum((p_corr-mean(p_corr))^2)
r2_hyp1 = 1-sum((p_corr-p_prd_hyp1)^2)/sum((p_corr-mean(p_corr))^2)
r2_hyp2 = 1-sum((p_corr-p_prd_hyp2)^2)/sum((p_corr-mean(p_corr))^2)
# Generate summary
minus_loglik_MLE = round(c(mle_model_pow1$value, mle_model_pow2$value, mle_model_exp1$value, mle_model_exp2$value, mle_model_expow$value, mle_model_hyp1$value,
mle_model_hyp2$value), 3)
r2_mle <- round(c(r2_pow1, r2_pow2, r2_exp1, r2_exp2, r2_expow, r2_hyp1, r2_hyp2), 3)
names = c("POW1", "POW2", "EXP1", "EXP2", "EXPOW", "HYP1", "HYP2")
pars_mle <- round(cbind(c(mle_model_pow1$par, NA,NA), c(mle_model_pow2$par,NA), c(mle_model_exp1$par,NA,NA),
c(mle_model_exp2$par,NA), mle_model_expow$par,
c(mle_model_hyp1$par, NA, NA), c(mle_model_hyp2$par,NA)),3)
dimnames(pars_mle) = list(c('par1', 'par2', 'par3'),c('POW1', 'POW2', 'EXP1', 'EXP2', 'EXPOW', 'HYP1', 'HYP2'))
mle_summary = data.frame(Models = names, loglik = - minus_loglik_MLE, r2 = r2_mle)
# Plot the MLE results
x <- seq(0,20, 0.05)
p_pow1 <- (1+x)^(-parm_pow1[1])
p_pow2 <- parm_pow2[1]*(x+1)^(-parm_pow2[2])
p_exp1 <- exp((-parm_exp1[1])*x)
p_exp2 <- parm_exp2[1]*exp(-parm_exp2[2]*x)
p_expow <- parm_expow[1]*exp((-parm_expow[2])*x)*(1+x)^(-parm_expow[3])
p_hyp1 <- 1/(1+parm_hyp1[1]*x)
p_hyp2 <- parm_hyp2[1]/(1+parm_hyp2[2]*x)
graph_p <- data.frame(x, p_pow1, p_pow2, p_exp1, p_exp2, p_expow, p_hyp1, p_hyp2)
library(ggplot2)
library(reshape2)
melted=melt(graph_p,id.vars="x")
p1 <- ggplot()+
geom_line(data=melted, aes(x=x,y=value, colour=variable), size=1.2)+
geom_point(aes(t_int,p_corr), size=4)+
labs(title="MLE results",x="Time t", y="Proportion Correct", colour="Model")+
theme(panel.background=element_rect(fill='white', colour='black'))+
theme(text=element_text(size=16,family="serif"))+
theme(plot.title=element_text(hjust=0.5))
p1
# print maximized likehood values
print('- MLE results ------------')
print(mle_summary,4)
# print bet-fit parameter values
print('- Best-fit parameters --------')
print(pars_mle,4)
############
#LSE
############
# sse optimization
lse_model_pow1 <- optim(param1_init, lse_pow1, method="L-BFGS-B", lower=param_pow1_low, upper=param_pow1_up, int=t_int, n=n_total, x=n_corr)
lse_model_pow2 <- optim(param2_init, lse_pow2, method="L-BFGS-B", lower=param_pow2_low, upper=param_pow2_up, int=t_int, n=n_total, x=n_corr)
lse_model_exp1 <- optim(param1_init, lse_exp1, method="L-BFGS-B", lower=param_pow1_low, upper=param_exp1_up, int=t_int, n=n_total, x=n_corr)
lse_model_exp2 <- optim(param2_init, lse_exp2, method="L-BFGS-B", lower=param_exp2_low, upper=param_exp2_up, int=t_int, n=n_total, x=n_corr)
lse_model_expow <- optim(param3_init, lse_expow, method="L-BFGS-B", lower=param_expow_low, upper=param_expow_up, int=t_int, n=n_total, x=n_corr)
lse_model_hyp1 <- optim(param1_init, lse_hyp1, method="L-BFGS-B", lower=param_hyp1_low, upper=param_hyp1_up, int=t_int, n=n_total, x=n_corr)
lse_model_hyp2 <- optim(param2_init, lse_hyp2, method="L-BFGS-B", lower=param_hyp2_low, upper=param_hyp2_up, int=t_int, n=n_total, x=n_corr)
# Try many different inits to escape from the local maxima
for (i in 1:100) {
# Re-generate random inits. Is it the best way to do this?
param1_init <- runif(1); param2_init <- runif(2); param3_init <- runif(3);
# Do the LSE again
temp_pow1 <- optim(param1_init, lse_pow1, method="L-BFGS-B", lower=param_pow1_low, upper=param_pow1_up, int=t_int, n=n_total, x=n_corr)
temp_pow2 <- optim(param2_init, lse_pow2, method="L-BFGS-B", lower=param_pow2_low, upper=param_pow2_up, int=t_int, n=n_total, x=n_corr)
temp_exp1 <- optim(param1_init, lse_exp1, method="L-BFGS-B", lower=param_pow1_low, upper=param_exp1_up, int=t_int, n=n_total, x=n_corr)
temp_exp2 <- optim(param2_init, lse_exp2, method="L-BFGS-B", lower=param_exp2_low, upper=param_exp2_up, int=t_int, n=n_total, x=n_corr)
temp_expow <- optim(param3_init, lse_expow, method="L-BFGS-B", lower=param_expow_low, upper=param_expow_up, int=t_int, n=n_total, x=n_corr)
temp_hyp1 <- optim(param1_init, lse_hyp1, method="L-BFGS-B", lower=param_hyp1_low, upper=param_hyp1_up, int=t_int, n=n_total, x=n_corr)
temp_hyp2 <- optim(param2_init, lse_hyp2, method="L-BFGS-B", lower=param_hyp2_low, upper=param_hyp2_up, int=t_int, n=n_total, x=n_corr)
# Replace the results if the latest optimization yields better result
if(temp_pow1$value < lse_model_pow1$value) lse_model_pow1 <- temp_pow1
if(temp_pow2$value < lse_model_pow2$value) lse_model_pow2 <- temp_pow2
if(temp_exp1$value < lse_model_exp1$value) lse_model_exp1 <- temp_exp1
if(temp_exp2$value < lse_model_exp2$value) lse_model_exp2 <- temp_exp2
if(temp_expow$value < lse_model_expow$value) lse_model_expow <- temp_expow
if(temp_hyp1$value < lse_model_hyp1$value) lse_model_hyp1 <- temp_hyp1
if(temp_hyp2$value < lse_model_hyp2$value) lse_model_hyp2 <- temp_hyp2
}
# Save the LSE parameter estimates
lse_parm_pow1 <- lse_model_pow1$par
lse_parm_pow2 <- lse_model_pow2$par
lse_parm_exp1 <- lse_model_exp1$par
lse_parm_exp2 <- lse_model_exp2$par
lse_parm_expow <- lse_model_expow$par
lse_parm_hyp1 <- lse_model_hyp1$par
lse_parm_hyp2 <- lse_model_hyp2$par
# LSE predictions
int <- t_int
lse_p_prd_pow1 <- (1+int)^(-lse_parm_pow1[1])
lse_p_prd_pow2 <- lse_parm_pow2[1]*(t_int+1)^(-lse_parm_pow2[2])
lse_p_prd_exp1 <- exp((-lse_parm_exp1[1])*int)
lse_p_prd_exp2 <- lse_parm_exp2[1]*exp(-lse_parm_exp2[2]*t_int)
lse_p_prd_expow <- lse_parm_expow[1]*exp((-lse_parm_expow[2])*int)*(1+int)^(-lse_parm_expow[3])
lse_p_prd_hyp1 <- 1/(1+lse_parm_hyp1[1]*int)
lse_p_prd_hyp2 <- lse_parm_hyp2[1]/(1+lse_parm_hyp2[2]*int)
# Proportion of the explained variances for each model
lse_r2_pow1 = 1-sum((p_corr-lse_p_prd_pow1)^2)/sum((p_corr-mean(p_corr))^2)
lse_r2_pow2 = 1-sum((p_corr-lse_p_prd_pow2)^2)/sum((p_corr-mean(p_corr))^2)
lse_r2_exp1 = 1-sum((p_corr-lse_p_prd_exp1)^2)/sum((p_corr-mean(p_corr))^2)
lse_r2_exp2 = 1-sum((p_corr-lse_p_prd_exp2)^2)/sum((p_corr-mean(p_corr))^2)
lse_r2_expow = 1-sum((p_corr-lse_p_prd_expow)^2)/sum((p_corr-mean(p_corr))^2)
lse_r2_hyp1 = 1-sum((p_corr-lse_p_prd_hyp1)^2)/sum((p_corr-mean(p_corr))^2)
lse_r2_hyp2 = 1-sum((p_corr-lse_p_prd_hyp2)^2)/sum((p_corr-mean(p_corr))^2)
# Generate summary
LSE = round(c(lse_model_pow1$value, lse_model_pow2$value, lse_model_exp1$value, lse_model_exp2$value, lse_model_expow$value, lse_model_hyp1$value,
lse_model_hyp2$value), 3)
r2_lse <- round(c(lse_r2_pow1, lse_r2_pow2, lse_r2_exp1, lse_r2_exp2, lse_r2_expow, lse_r2_hyp1, lse_r2_hyp2), 3)
names = c("POW1", "POW2", "EXP1", "EXP2", "EXPOW", "HYP1", "HYP2")
pars_lse <- round(cbind(c(lse_model_pow1$par, NA,NA), c(lse_model_pow2$par,NA), c(lse_model_exp1$par,NA,NA),
c(lse_model_exp2$par,NA), lse_model_expow$par,
c(lse_model_hyp1$par, NA, NA), c(lse_model_hyp2$par,NA)),3)
dimnames(pars_lse) = list(c('par1', 'par2', 'par3'),c('POW1', 'POW2', 'EXP1', 'EXP2', 'EXPOW', 'HYP1', 'HYP2'))
lse_summary = data.frame(Models = names, sse = LSE, r2 = r2_lse)
# Plot the LSE results
x <- seq(0,20, 0.05)
lse_p_pow1 <- (1+x)^(-lse_parm_pow1[1])
lse_p_pow2 <- lse_parm_pow2[1]*(x+1)^(-lse_parm_pow2[2])
lse_p_exp1 <- exp((-lse_parm_exp1[1])*x)
lse_p_exp2 <- lse_parm_exp2[1]*exp(-lse_parm_exp2[2]*x)
lse_p_expow <- lse_parm_expow[1]*exp((-lse_parm_expow[2])*x)*(1+x)^(lse_parm_expow[3])
lse_p_hyp1 <- 1/(1+lse_parm_hyp1[1]*x)
lse_p_hyp2 <- lse_parm_hyp2[1]/(1+lse_parm_hyp2[2]*x)
lse_graph_p <- data.frame(x, lse_p_pow1, lse_p_pow2, lse_p_exp1, lse_p_exp2, lse_p_expow, lse_p_hyp1, lse_p_hyp2)
lse_melted=melt(lse_graph_p,id.vars="x")
p2 <- ggplot()+
geom_line(data=lse_melted, aes(x=x,y=value, colour=variable), size=1.2)+
geom_point(aes(t_int,p_corr), size=4)+
labs(title="LSE results",x="Time t", y="Proportion Correct", colour="Model")+
theme(panel.background=element_rect(fill='white', colour='black'))+
theme(text=element_text(size=16,family="serif"))+
theme(plot.title=element_text(hjust=0.5))
p2
# print maximized likehood values
print('- LSE results ------------')
print(lse_summary,4)
# print bet-fit parameter values
print('- Best-fit parameters --------')
print(pars_lse,4)
##multiplot function by "Cookbook for R"
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
#best fit curves, MLE parameter table, R2, discussion.
png(filename="graph.png", width=1000, height=600)
multiplot(p1, p2, cols=2)
dev.off()
pars <- array(NA, c(3,ncol(pars_mle),2), dimnames=list(c("par1", "par2", "par3"), names, c("MLE","LSE")));
pars[,,1]<-pars_mle
pars[,,2]<-pars_lse
pars
r2 <- array(NA, c(1,ncol(pars_mle),2), dimnames=list(c("r2"),names,c("MLE","LSE")))
r2[,,1] <- r2_mle
r2[,,2] <- r2_lse
r2
| /Parm_Estm.R | no_license | mindy2801/Computational_Modeling | R | false | false | 16,116 | r | #######################################################
## Parm_Estm.R ##
## PSYCH 7695, Spring 2017 ##
## Maximum Likelihood Estimation (Myung, JMP, 2003) ##
## By Yun Tang, Psychology, OSU ##
## ##
## Main Program ##
## Code Written on 12/18/2009 ##
## ##
## Modified by Joonsuk Park on Jan 28 2015 ##
## Modified by Jay Myung in Feb 2017 ##
## Modified by Woo-Young Ahn in March 2018 ##
#######################################################
# Loading the (minus) log-likelihood functions
# Please modify the path according to the actual location of the file "MLE_LSE.R"
# e.g., setwd("/Users/youngahn/this-course/")
rm(list=ls()) # clear workspace
graphics.off() # close all figures
set.seed(08826) # set a seed number for replication
source("MLE_LSE.R") # source MLE_LSE.R code
##########################
## General Setup ##
## Data and Parameters ##
##########################
n_total <- 50 # sample size
t_int <- c(0.5, 1, 2, 4, 8, 12, 16, 18) # time interval values
n_corr <- c(44, 34, 27, 26, 19, 17, 20, 11) # number of correct responses
p_corr <- n_corr/n_total # proportion correct
# Generate random uniform numbers between 0 and 1 to use as initials for the optim procedure
param1_init <- runif(1)
param2_init <- runif(2)
param3_init <- runif(3)
param_pow1_low <- c(0); param_pow1_up <- c(3);
param_pow2_low <- c(0, 0); param_pow2_up <- c(1, 3); # lower and upper bounds of POW2 model (0<a<1, 0<b<3)
param_exp1_low <- c(0); param_exp1_up <- c(3);
param_exp2_low <- c(0, 0); param_exp2_up <- c(1, 3); # lower and upper bounds of EXP2 model (0<a<1, 0<b<3)
param_expow_low <- c(0, 0, -Inf); param_expow_up <- c(1, Inf, 3);
param_hyp1_low <- c(0); param_hyp1_up <- c(1);
param_hyp2_low <- c(0, 0); param_hyp2_up <- c(1, 1);
##########################
## MLE ##
##########################
# Call general purpose optimization rountine
mle_model_pow1 <- optim(param1_init, mle_pow1, method="L-BFGS-B", lower=param_pow1_low, upper=param_pow1_up, int=t_int, n=n_total, x=n_corr)
mle_model_pow2 <- optim(param2_init, mle_pow2, method="L-BFGS-B", lower=param_pow2_low, upper=param_pow2_up, int=t_int, n=n_total, x=n_corr)
mle_model_exp1 <- optim(param1_init, mle_exp1, method="L-BFGS-B", lower=param_pow1_low, upper=param_exp1_up, int=t_int, n=n_total, x=n_corr)
mle_model_exp2 <- optim(param2_init, mle_exp2, method="L-BFGS-B", lower=param_exp2_low, upper=param_exp2_up, int=t_int, n=n_total, x=n_corr)
mle_model_expow <- optim(param3_init, mle_expow, method="L-BFGS-B", lower=param_expow_low, upper=param_expow_up, int=t_int, n=n_total, x=n_corr)
mle_model_hyp1 <- optim(param1_init, mle_hyp1, method="L-BFGS-B", lower=param_hyp1_low, upper=param_hyp1_up, int=t_int, n=n_total, x=n_corr)
mle_model_hyp2 <- optim(param2_init, mle_hyp2, method="L-BFGS-B", lower=param_hyp2_low, upper=param_hyp2_up, int=t_int, n=n_total, x=n_corr)
# Try many different inits to escape from the local maxima
for (i in 1:100) {
# Re-generate random inits. Is it the best way to do this?
param1_init <- runif(1); param2_init <- runif(2); param3_init <- runif(3);
# Do the MLE again
temp_pow1 <- optim(param1_init, mle_pow1, method="L-BFGS-B", lower=param_pow1_low, upper=param_pow1_up, int=t_int, n=n_total, x=n_corr)
temp_pow2 <- optim(param2_init, mle_pow2, method="L-BFGS-B", lower=param_pow2_low, upper=param_pow2_up, int=t_int, n=n_total, x=n_corr)
temp_exp1 <- optim(param1_init, mle_exp1, method="L-BFGS-B", lower=param_pow1_low, upper=param_exp1_up, int=t_int, n=n_total, x=n_corr)
temp_exp2 <- optim(param2_init, mle_exp2, method="L-BFGS-B", lower=param_exp2_low, upper=param_exp2_up, int=t_int, n=n_total, x=n_corr)
temp_expow <- optim(param3_init, mle_expow, method="L-BFGS-B", lower=param_expow_low, upper=param_expow_up, int=t_int, n=n_total, x=n_corr)
temp_hyp1 <- optim(param1_init, mle_hyp1, method="L-BFGS-B", lower=param_hyp1_low, upper=param_hyp1_up, int=t_int, n=n_total, x=n_corr)
temp_hyp2 <- optim(param2_init, mle_hyp2, method="L-BFGS-B", lower=param_hyp2_low, upper=param_hyp2_up, int=t_int, n=n_total, x=n_corr)
# Replace the results if the latest optimization yields better result
if(temp_pow1$value < mle_model_pow1$value) mle_model_pow1 <- temp_pow1
if(temp_pow2$value < mle_model_pow2$value) mle_model_pow2 <- temp_pow2
if(temp_exp1$value < mle_model_exp1$value) mle_model_exp1 <- temp_exp1
if(temp_exp2$value < mle_model_exp2$value) mle_model_exp2 <- temp_exp2
if(temp_expow$value < mle_model_expow$value) mle_model_expow <- temp_expow
if(temp_hyp1$value < mle_model_hyp1$value) mle_model_hyp1 <- temp_hyp1
if(temp_hyp2$value < mle_model_hyp2$value) mle_model_hyp2 <- temp_hyp2
}
# Save the MLE parameter estimates
parm_pow1 <- mle_model_pow1$par
parm_pow2 <- mle_model_pow2$par
parm_exp1 <- mle_model_exp1$par
parm_exp2 <- mle_model_exp2$par
parm_expow <- mle_model_expow$par
parm_hyp1 <- mle_model_hyp1$par
parm_hyp2 <- mle_model_hyp2$par
# MLE predictions
int <- t_int
p_prd_pow1 <- (1+int)^(-parm_pow1[1])
p_prd_pow2 <- parm_pow2[1]*(t_int+1)^(-parm_pow2[2])
p_prd_exp1 <- exp((-parm_exp1[1])*int)
p_prd_exp2 <- parm_exp2[1]*exp(-parm_exp2[2]*t_int)
p_prd_expow <- parm_expow[1]*exp((-parm_expow[2])*int)*(1+int)^(-parm_expow[3])
p_prd_hyp1 <- 1/(1+parm_hyp1[1]*int)
p_prd_hyp2 <- parm_hyp2[1]/(1+parm_hyp2[2]*int)
# Proportion of the explained variances for each model
r2_pow1 = 1-sum((p_corr-p_prd_pow1)^2)/sum((p_corr-mean(p_corr))^2)
r2_pow2 = 1-sum((p_corr-p_prd_pow2)^2)/sum((p_corr-mean(p_corr))^2)
r2_exp1 = 1-sum((p_corr-p_prd_exp1)^2)/sum((p_corr-mean(p_corr))^2)
r2_exp2 = 1-sum((p_corr-p_prd_exp2)^2)/sum((p_corr-mean(p_corr))^2)
r2_expow = 1-sum((p_corr-p_prd_expow)^2)/sum((p_corr-mean(p_corr))^2)
r2_hyp1 = 1-sum((p_corr-p_prd_hyp1)^2)/sum((p_corr-mean(p_corr))^2)
r2_hyp2 = 1-sum((p_corr-p_prd_hyp2)^2)/sum((p_corr-mean(p_corr))^2)
# Generate summary
minus_loglik_MLE = round(c(mle_model_pow1$value, mle_model_pow2$value, mle_model_exp1$value, mle_model_exp2$value, mle_model_expow$value, mle_model_hyp1$value,
mle_model_hyp2$value), 3)
r2_mle <- round(c(r2_pow1, r2_pow2, r2_exp1, r2_exp2, r2_expow, r2_hyp1, r2_hyp2), 3)
names = c("POW1", "POW2", "EXP1", "EXP2", "EXPOW", "HYP1", "HYP2")
pars_mle <- round(cbind(c(mle_model_pow1$par, NA,NA), c(mle_model_pow2$par,NA), c(mle_model_exp1$par,NA,NA),
c(mle_model_exp2$par,NA), mle_model_expow$par,
c(mle_model_hyp1$par, NA, NA), c(mle_model_hyp2$par,NA)),3)
dimnames(pars_mle) = list(c('par1', 'par2', 'par3'),c('POW1', 'POW2', 'EXP1', 'EXP2', 'EXPOW', 'HYP1', 'HYP2'))
mle_summary = data.frame(Models = names, loglik = - minus_loglik_MLE, r2 = r2_mle)
# Plot the MLE results
x <- seq(0,20, 0.05)
p_pow1 <- (1+x)^(-parm_pow1[1])
p_pow2 <- parm_pow2[1]*(x+1)^(-parm_pow2[2])
p_exp1 <- exp((-parm_exp1[1])*x)
p_exp2 <- parm_exp2[1]*exp(-parm_exp2[2]*x)
p_expow <- parm_expow[1]*exp((-parm_expow[2])*x)*(1+x)^(-parm_expow[3])
p_hyp1 <- 1/(1+parm_hyp1[1]*x)
p_hyp2 <- parm_hyp2[1]/(1+parm_hyp2[2]*x)
graph_p <- data.frame(x, p_pow1, p_pow2, p_exp1, p_exp2, p_expow, p_hyp1, p_hyp2)
library(ggplot2)
library(reshape2)
melted=melt(graph_p,id.vars="x")
p1 <- ggplot()+
geom_line(data=melted, aes(x=x,y=value, colour=variable), size=1.2)+
geom_point(aes(t_int,p_corr), size=4)+
labs(title="MLE results",x="Time t", y="Proportion Correct", colour="Model")+
theme(panel.background=element_rect(fill='white', colour='black'))+
theme(text=element_text(size=16,family="serif"))+
theme(plot.title=element_text(hjust=0.5))
p1
# print maximized likehood values
print('- MLE results ------------')
print(mle_summary,4)
# print bet-fit parameter values
print('- Best-fit parameters --------')
print(pars_mle,4)
############
#LSE
############
# sse optimization
lse_model_pow1 <- optim(param1_init, lse_pow1, method="L-BFGS-B", lower=param_pow1_low, upper=param_pow1_up, int=t_int, n=n_total, x=n_corr)
lse_model_pow2 <- optim(param2_init, lse_pow2, method="L-BFGS-B", lower=param_pow2_low, upper=param_pow2_up, int=t_int, n=n_total, x=n_corr)
lse_model_exp1 <- optim(param1_init, lse_exp1, method="L-BFGS-B", lower=param_pow1_low, upper=param_exp1_up, int=t_int, n=n_total, x=n_corr)
lse_model_exp2 <- optim(param2_init, lse_exp2, method="L-BFGS-B", lower=param_exp2_low, upper=param_exp2_up, int=t_int, n=n_total, x=n_corr)
lse_model_expow <- optim(param3_init, lse_expow, method="L-BFGS-B", lower=param_expow_low, upper=param_expow_up, int=t_int, n=n_total, x=n_corr)
lse_model_hyp1 <- optim(param1_init, lse_hyp1, method="L-BFGS-B", lower=param_hyp1_low, upper=param_hyp1_up, int=t_int, n=n_total, x=n_corr)
lse_model_hyp2 <- optim(param2_init, lse_hyp2, method="L-BFGS-B", lower=param_hyp2_low, upper=param_hyp2_up, int=t_int, n=n_total, x=n_corr)
# Try many different inits to escape from the local maxima
for (i in 1:100) {
# Re-generate random inits. Is it the best way to do this?
param1_init <- runif(1); param2_init <- runif(2); param3_init <- runif(3);
# Do the LSE again
temp_pow1 <- optim(param1_init, lse_pow1, method="L-BFGS-B", lower=param_pow1_low, upper=param_pow1_up, int=t_int, n=n_total, x=n_corr)
temp_pow2 <- optim(param2_init, lse_pow2, method="L-BFGS-B", lower=param_pow2_low, upper=param_pow2_up, int=t_int, n=n_total, x=n_corr)
temp_exp1 <- optim(param1_init, lse_exp1, method="L-BFGS-B", lower=param_pow1_low, upper=param_exp1_up, int=t_int, n=n_total, x=n_corr)
temp_exp2 <- optim(param2_init, lse_exp2, method="L-BFGS-B", lower=param_exp2_low, upper=param_exp2_up, int=t_int, n=n_total, x=n_corr)
temp_expow <- optim(param3_init, lse_expow, method="L-BFGS-B", lower=param_expow_low, upper=param_expow_up, int=t_int, n=n_total, x=n_corr)
temp_hyp1 <- optim(param1_init, lse_hyp1, method="L-BFGS-B", lower=param_hyp1_low, upper=param_hyp1_up, int=t_int, n=n_total, x=n_corr)
temp_hyp2 <- optim(param2_init, lse_hyp2, method="L-BFGS-B", lower=param_hyp2_low, upper=param_hyp2_up, int=t_int, n=n_total, x=n_corr)
# Replace the results if the latest optimization yields better result
if(temp_pow1$value < lse_model_pow1$value) lse_model_pow1 <- temp_pow1
if(temp_pow2$value < lse_model_pow2$value) lse_model_pow2 <- temp_pow2
if(temp_exp1$value < lse_model_exp1$value) lse_model_exp1 <- temp_exp1
if(temp_exp2$value < lse_model_exp2$value) lse_model_exp2 <- temp_exp2
if(temp_expow$value < lse_model_expow$value) lse_model_expow <- temp_expow
if(temp_hyp1$value < lse_model_hyp1$value) lse_model_hyp1 <- temp_hyp1
if(temp_hyp2$value < lse_model_hyp2$value) lse_model_hyp2 <- temp_hyp2
}
# Save the LSE parameter estimates
lse_parm_pow1 <- lse_model_pow1$par
lse_parm_pow2 <- lse_model_pow2$par
lse_parm_exp1 <- lse_model_exp1$par
lse_parm_exp2 <- lse_model_exp2$par
lse_parm_expow <- lse_model_expow$par
lse_parm_hyp1 <- lse_model_hyp1$par
lse_parm_hyp2 <- lse_model_hyp2$par
# LSE predictions
int <- t_int
lse_p_prd_pow1 <- (1+int)^(-lse_parm_pow1[1])
lse_p_prd_pow2 <- lse_parm_pow2[1]*(t_int+1)^(-lse_parm_pow2[2])
lse_p_prd_exp1 <- exp((-lse_parm_exp1[1])*int)
lse_p_prd_exp2 <- lse_parm_exp2[1]*exp(-lse_parm_exp2[2]*t_int)
lse_p_prd_expow <- lse_parm_expow[1]*exp((-lse_parm_expow[2])*int)*(1+int)^(-lse_parm_expow[3])
lse_p_prd_hyp1 <- 1/(1+lse_parm_hyp1[1]*int)
lse_p_prd_hyp2 <- lse_parm_hyp2[1]/(1+lse_parm_hyp2[2]*int)
# Proportion of the explained variances for each model
lse_r2_pow1 = 1-sum((p_corr-lse_p_prd_pow1)^2)/sum((p_corr-mean(p_corr))^2)
lse_r2_pow2 = 1-sum((p_corr-lse_p_prd_pow2)^2)/sum((p_corr-mean(p_corr))^2)
lse_r2_exp1 = 1-sum((p_corr-lse_p_prd_exp1)^2)/sum((p_corr-mean(p_corr))^2)
lse_r2_exp2 = 1-sum((p_corr-lse_p_prd_exp2)^2)/sum((p_corr-mean(p_corr))^2)
lse_r2_expow = 1-sum((p_corr-lse_p_prd_expow)^2)/sum((p_corr-mean(p_corr))^2)
lse_r2_hyp1 = 1-sum((p_corr-lse_p_prd_hyp1)^2)/sum((p_corr-mean(p_corr))^2)
lse_r2_hyp2 = 1-sum((p_corr-lse_p_prd_hyp2)^2)/sum((p_corr-mean(p_corr))^2)
# Generate summary
LSE = round(c(lse_model_pow1$value, lse_model_pow2$value, lse_model_exp1$value, lse_model_exp2$value, lse_model_expow$value, lse_model_hyp1$value,
lse_model_hyp2$value), 3)
r2_lse <- round(c(lse_r2_pow1, lse_r2_pow2, lse_r2_exp1, lse_r2_exp2, lse_r2_expow, lse_r2_hyp1, lse_r2_hyp2), 3)
names = c("POW1", "POW2", "EXP1", "EXP2", "EXPOW", "HYP1", "HYP2")
pars_lse <- round(cbind(c(lse_model_pow1$par, NA,NA), c(lse_model_pow2$par,NA), c(lse_model_exp1$par,NA,NA),
c(lse_model_exp2$par,NA), lse_model_expow$par,
c(lse_model_hyp1$par, NA, NA), c(lse_model_hyp2$par,NA)),3)
dimnames(pars_lse) = list(c('par1', 'par2', 'par3'),c('POW1', 'POW2', 'EXP1', 'EXP2', 'EXPOW', 'HYP1', 'HYP2'))
lse_summary = data.frame(Models = names, sse = LSE, r2 = r2_lse)
# Plot the LSE results
x <- seq(0,20, 0.05)
lse_p_pow1 <- (1+x)^(-lse_parm_pow1[1])
lse_p_pow2 <- lse_parm_pow2[1]*(x+1)^(-lse_parm_pow2[2])
lse_p_exp1 <- exp((-lse_parm_exp1[1])*x)
lse_p_exp2 <- lse_parm_exp2[1]*exp(-lse_parm_exp2[2]*x)
lse_p_expow <- lse_parm_expow[1]*exp((-lse_parm_expow[2])*x)*(1+x)^(lse_parm_expow[3])
lse_p_hyp1 <- 1/(1+lse_parm_hyp1[1]*x)
lse_p_hyp2 <- lse_parm_hyp2[1]/(1+lse_parm_hyp2[2]*x)
lse_graph_p <- data.frame(x, lse_p_pow1, lse_p_pow2, lse_p_exp1, lse_p_exp2, lse_p_expow, lse_p_hyp1, lse_p_hyp2)
lse_melted=melt(lse_graph_p,id.vars="x")
p2 <- ggplot()+
geom_line(data=lse_melted, aes(x=x,y=value, colour=variable), size=1.2)+
geom_point(aes(t_int,p_corr), size=4)+
labs(title="LSE results",x="Time t", y="Proportion Correct", colour="Model")+
theme(panel.background=element_rect(fill='white', colour='black'))+
theme(text=element_text(size=16,family="serif"))+
theme(plot.title=element_text(hjust=0.5))
p2
# print maximized likehood values
print('- LSE results ------------')
print(lse_summary,4)
# print bet-fit parameter values
print('- Best-fit parameters --------')
print(pars_lse,4)
##multiplot function by "Cookbook for R"
multiplot <- function(..., plotlist=NULL, file, cols=1, layout=NULL) {
library(grid)
# Make a list from the ... arguments and plotlist
plots <- c(list(...), plotlist)
numPlots = length(plots)
# If layout is NULL, then use 'cols' to determine layout
if (is.null(layout)) {
# Make the panel
# ncol: Number of columns of plots
# nrow: Number of rows needed, calculated from # of cols
layout <- matrix(seq(1, cols * ceiling(numPlots/cols)),
ncol = cols, nrow = ceiling(numPlots/cols))
}
if (numPlots==1) {
print(plots[[1]])
} else {
# Set up the page
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
# Make each plot, in the correct location
for (i in 1:numPlots) {
# Get the i,j matrix positions of the regions that contain this subplot
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(plots[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
}
#best fit curves, MLE parameter table, R2, discussion.
png(filename="graph.png", width=1000, height=600)
multiplot(p1, p2, cols=2)
dev.off()
pars <- array(NA, c(3,ncol(pars_mle),2), dimnames=list(c("par1", "par2", "par3"), names, c("MLE","LSE")));
pars[,,1]<-pars_mle
pars[,,2]<-pars_lse
pars
r2 <- array(NA, c(1,ncol(pars_mle),2), dimnames=list(c("r2"),names,c("MLE","LSE")))
r2[,,1] <- r2_mle
r2[,,2] <- r2_lse
r2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.