blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cb2c4621a550a564df0031ba29eddebf12f2c6dd | 774a6a6bf1fdde190e634fb9aefda06bff522a5a | /SampleProportionDifferenceConfidenceInterval.R | 333d138c58d455842e7acea93dae70be588412b5 | [] | no_license | david-kochar/SampleProportions | 2590b1f8b964b15c476656bbdc3e61696960882d | 406fe15dcb3eb818f8687f2e639e372a79519bd8 | refs/heads/master | 2021-08-23T05:48:19.953762 | 2017-12-03T18:33:59 | 2017-12-03T18:33:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,073 | r | SampleProportionDifferenceConfidenceInterval.R | #In early October 2013, a Gallup poll asked "Do you think
#there should or should not be a law that would ban the
#possession of handguns, except by the police and other
#authorized persons?" Also, Coursera students were polled
#and the results are as follows:
# Successes Sample Size P Hat
#US 257 1028 0.25
#Coursera 59 83 0.71
#How do Coursera students and the American public at large
#compare with respect to their views on laws banning possession
#of handguns?
rm ( list = ls ( all = TRUE ) ) #clear variables
n1 <- 1028 #US sample size
n2 <- 83 #Coursera sample size
phat1 <- 257/1028 #sample proportion of US
nonphat1 <- 1 - phat1
phat2 <- 59/83
nonphat2 <- 1 - phat2
phatdiff <- phat2 - phat1
z <- 1.96 #Z-score for 95% Confidence Level
se <- sqrt ( ( (phat1 * nonphat1) / n1) + (phat2 * nonphat2) / n2 ) #calculate standard error
#Confidence interval (95% Confidence Level) formula: ( Phat Coursera - Phat US ) +/- z * SE
CI <- c( phatdiff - ( z * se ), phatdiff + ( z * se ) ) #Calculate confidence interval
CI |
a40737e14bd4de524c5d0bb068943541dc681600 | cdbb38bcc56f263b0b78111e8e685d2edfd23bbc | /server.R | 1451031846173e96c2dc69efd0e85ee40f8b7dc9 | [] | no_license | Phippsy/uk-property-sales | 3e61d25b417eb8dc10aba9bf6f37224e78b24a8f | 35badb97dec9b50ae1ebad5bf7718fca99f9f1d6 | refs/heads/master | 2021-01-10T16:02:25.022855 | 2016-02-07T14:54:02 | 2016-02-07T14:54:02 | 51,240,792 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,957 | r | server.R | library(shiny)
library(scales)
library(bigrquery)
library(ggplot2)
library(dplyr)
library(lubridate)
remove_outliers <- function(x, na.rm = TRUE, ...) {
qnt <- quantile(x, probs=c(.000001, 0.98), na.rm = na.rm, ...)
H <- 1.5 * IQR(x, na.rm = na.rm)
y <- x
y[x < (qnt[1] - H)] <- NA
y[x > (qnt[2] + H)] <- NA
y
}
project<-"cloud-storage-test-1205"
# server.R
options(scipen=5)
shinyServer(
function(input, output) {
# Getting the data
dataMain1 <- eventReactive(input$submit, {
tableName<-ifelse(input$datePicker[1]>=as.Date("2014-01-01"), "2014_2015","ppcomplete")
sql <- paste0("SELECT price, dateOfTransfer AS date, PAON AS address1, SAON AS address2, Street, town_city, County, postcode FROM [govdata.",tableName,"] WHERE REGEXP_MATCH(postcode, r'^",
toupper(input$postcodeInput),
"') LIMIT 10000000")
results<-query_exec(sql, project = project)
results$date<-as.POSIXct(results$date)
results$postcode<-as.factor(results$postcode)
results<-filter(results,
date>as.POSIXct(input$datePicker[1]),
date<as.POSIXct(input$datePicker[2]))
if ( length(input$streetInput > 0 ) ) {
results<-filter(results, grepl(input$streetInput, Street, ignore.case = TRUE) )
}
results
})
dataMain<-reactive({
data<-dataMain1()
if (input$removeOutliers) {
data$price<-remove_outliers(data$price)
}
data
})
plotData <- reactive({
data2<-dataMain() %>% group_by(date) %>% summarise(avgprice = mean(price), countsale = length(price) ) %>% as.data.frame()
data2
})
# Average price plot
output$avgPrice <- renderPlot({
data<-plotData()
town<-dataMain()$town_city[1]
plot<-ggplot(data, aes(x=date, y=avgprice)) +
geom_line() +
ggtitle(paste("Average property sale value in", input$postcodeInput, "(", town, ")")) +
xlab("Date") +
ylab("Average price paid (per day), £") + scale_y_continuous(labels=comma)
if (input$showTrend) {
plot<-plot+
geom_line(stat="smooth", method="loess",
size = 1,
colour = "#0072B2",
alpha = 0.75)
}
plot
})
# Sale scatter
output$saleScatter <- renderPlot({
data<-dataMain()
town<-dataMain()$town_city[1]
plot<-ggplot(data, aes(x=date, y=price)) +
geom_point(aes(alpha=0.5), size=0.5) +
scale_y_continuous(labels = comma) +
ggtitle(paste("Scatterplot of individual sales in", input$postcodeInput, "(", town, ")")) +
xlab("Date") +
ylab("Sale price") + theme(legend.position="none")
if (input$showTrend) {
plot<-plot+
geom_line(stat="smooth", method="loess",
size = 1,
colour = "#0072B2",
alpha = 0.75)
}
plot
})
# Sale boxplot
output$saleBoxplot <- renderPlot({
data<-dataMain()
town<-dataMain()$town_city[1]
plot<-ggplot(data, aes(x=1,y= price)) + geom_boxplot() +
scale_y_continuous(labels = comma) +
ggtitle(paste("Boxplot of sale prices")) +
ylab("Sale price") + coord_flip()
plot
})
output$resultsTable <- renderDataTable({
tableData<-dataMain()
tableData<-select(tableData, date, address1, address2, Street, postcode, price)
tableData<-arrange(tableData, desc(date))
tableData
}, options = list(pageLength = 10))
output$downloadData <- downloadHandler(
filename = function() { paste0("Property Sales-",input$postcodeInput, ".csv") },
content = function(file) {
write.csv(dataMain(), file)
}) # end downloadData
})
|
349db66941a52ed232620fb703600078dfaa581c | cb66ae3bf5bd2422e70df574340e0d5f5388eb8e | /jacknife_bootstrap_gini.R | e69c4bb301667b59d03f5302dc19822cccea10fb | [] | no_license | jvoorheis/MSA_Ineq | 779f28947f243495d4c28b6841b56d2c51dc97e6 | 3dbec52e82d0ae86d6d88c6550aadba4b43cb81a | refs/heads/master | 2016-08-02T22:44:29.331869 | 2013-12-28T07:50:20 | 2013-12-28T07:50:20 | 11,228,792 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,540 | r | jacknife_bootstrap_gini.R | #Testing script to generate Gini coefficients, Lorenz Curves, etc. using both Jackknife and bootstrapped standard errors
library(reldist)
library(boot)
library(ineq)
library(GB2)
library(doMC)
library(parallel)
source("/media/john/Shared Linux_Windows Files/MSA Level Inequality/Code/functions.r")
load("/media/john/Shared Linux_Windows Files/MSA Level Inequality/Data/CPS_topcode_hh1.rda")
NewYork<-subset(CPS.work.hh, CPS.work.hh$MSA=="New York-Northern New Jersey-Long Island")
temp.year<-subset(CPS.work.hh, CPS.work.hh$year==2005)
bottom_cutoff<-quantile(temp.year$sqrt_equivinc, probs=0.3)
temp.year<-subset(temp.year, temp.year$sqrt_equivinc>bottom_cutoff)
Natl_fit<-ml.gb2(temp.year$cellmean_equivinc)
temp.MSA<-subset(NewYork, NewYork$year==2005)
gini_try_MSA<-c(rep(0, 100))
gini_var_MSA<-c(rep(0, 100))
lorenz_ords<-matrix(0,19,100)
lorenz_var<-matrix(0,19,100)
ptm <- proc.time()
for (k in 1:100){
temp.data.replace<-vapply(temp.MSA$topcoded_equivinc, FUN=topcode_sub, Natl_fit, FUN.VALUE=0.0)
temp.data.final<-temp.data.replace+temp.MSA$bottom_equivinc
temp.lorenz<-Lc(temp.data.final)
gini_try_MSA[k]<-gini(temp.data.final)
gini_var_MSA[k]<-var(boot(temp.data.final, gini, 100)$t)
for (i in 1:19){
lorenz_ords[i,k]<-temp.lorenz$L[as.integer((i/20)*length(temp.data.final))]
lorenz_var[i,k]<-var(boot(temp.data.final, lorenz_point, 100, ordinate=(i/20))$t)
}
}
proc.time() - ptm
virtual_inc<-matrix(0, length(temp.MSA$topcoded_equivinc), 100)
for (k in 1:100){
temp.data.replace<-vapply(temp.MSA$topcoded_equivinc, FUN=topcode_sub, Natl_fit, FUN.VALUE=0.0)
virtual_inc[,k]<-temp.data.replace+temp.MSA$bottom_equivinc
}
gini_MSA<-apply(virtual_inc, 2, gini)
ptm<-proc.time()
gini_var<-apply(virtual_inc, 2, function(x) var(boot(x, gini, 100)$t))
proc.time()-ptm
ptm<-proc.time()
gini_var1<-apply(virtual_inc, 2, jackknife_gini)
proc.time()-ptm
jackknife_gini<-function(income){
n <- length(income)
income_temp <- matrix(income, length(income), length(income))
income_temp1 <- matrix(0, length(income)-1, length(income))
for (i in 1:n){
income_temp1[,i]<-income_temp[-i,i]
}
G_nk<-apply(income_temp1, 2, gini)
Gbar<-mean(G_nk)
return(((n-1)/n)*sum((G_nk-Gbar)^2))
}
ptm<-proc.time()
jackknife_gini(temp.data.final)
proc.time()-ptm
jackknife_gini(c(1,2,2,3,4,99))
lorenz_point<-function(income, weights, ordinate=0.5){
n <- length(income)
L_temp<-Lc(income, n=weights)
return(L_temp$L[as.integer(ordinate*n)])
}
lorenz_point(temp.data.final)
boot()
temp.MSA<-subset(NewYork, NewYork$year==2012)
gini_try_MSA<-c(rep(0, 100))
gini_var_MSA<-c(rep(0, 100))
lorenz_ords<-matrix(0,19,100)
lorenz_var<-matrix(0,19,100)
virtual_inc<-matrix(0, length(temp.MSA$topcoded_equivinc), 100)
for (k in 1:100){
temp.data.replace<-vapply(temp.MSA$topcoded_equivinc, FUN=topcode_sub, Natl_fit, FUN.VALUE=0.0)
virtual_inc[,k]<-temp.data.replace+temp.MSA$bottom_equivinc
}
x<-apply(virtual_inc, 2, lorenz_point, c(rep(1, length(virtual_inc[,1]))))
for (i in 1:19){
lorenz_ords[i,]<-apply(virtual_inc, 2, lorenz_point, c(rep(1, length(virtual_inc[,1]))), ordinate=(i/20))
lorenz_var[i,]<-apply(virtual_inc, 2, function(x) var(boot(x, lorenz_point, 500, ordinate=(i/20))$t))
}
lorenz_mean2012<-c(rep(0,19))
lorenz_variance2012<-c(rep(0,19))
for (i in 1:19){
lorenz_mean2012[i]<-mean(lorenz_ords[i,])
}
for (i in 1:19){
lorenz_variance2012[i]<-(1/99)*sum((lorenz_ords[i,]-lorenz_mean2012[i])^2)+mean(lorenz_var[i,])
}
temp.MSA<-subset(NewYork, NewYork$year==1995)
gini_try_MSA<-c(rep(0, 100))
gini_var_MSA<-c(rep(0, 100))
lorenz_ords<-matrix(0,19,100)
lorenz_var<-matrix(0,19,100)
virtual_inc<-matrix(0, length(temp.MSA$topcoded_equivinc), 100)
for (k in 1:100){
temp.data.replace<-vapply(temp.MSA$topcoded_equivinc, FUN=topcode_sub, Natl_fit, FUN.VALUE=0.0)
virtual_inc[,k]<-temp.data.replace+temp.MSA$bottom_equivinc
}
x<-apply(virtual_inc, 2, lorenz_point, c(rep(1, length(virtual_inc[,1]))))
for (i in 1:19){
lorenz_ords[i,]<-apply(virtual_inc, 2, lorenz_point, c(rep(1, length(virtual_inc[,1]))), ordinate=(i/20))
lorenz_var[i,]<-apply(virtual_inc, 2, function(x) var(boot(x, lorenz_point, 500, ordinate=(i/20))$t))
}
lorenz_mean1995<-c(rep(0,19))
lorenz_variance1995<-c(rep(0,19))
for (i in 1:19){
lorenz_mean1995[i]<-mean(lorenz_ords[i,])
}
for (i in 1:19){
lorenz_variance1995[i]<-(1/99)*sum((lorenz_ords[i,]-lorenz_mean1995[i])^2)+mean(lorenz_var[i,])
}
test_stat<-(lorenz_mean2012 - lorenz_mean1995)/sqrt(lorenz_variance2012+lorenz_variance1995)
test_stat |
e87496311b945495e19d9d6298491bfe4a9e6253 | 406813a89e145c36c73f7abb07957a7720dc8bed | /run_analysis.R | 8a1bc496a6a48952f851cda22401faf7a3cb3041 | [] | no_license | samu224/Getting_and_cleaning_data_course_project | d9fd1b800bf085a01b06f8642874506a516267cf | b915b1a29206e0479344a428226091eb47fc5256 | refs/heads/master | 2020-08-27T03:30:37.917969 | 2014-06-22T18:37:45 | 2014-06-22T18:37:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,753 | r | run_analysis.R |
directory <- "UCI HAR Dataset"
require(reshape2)
## 1 - merge the training and test set data to create on data set
## get the dataset in test and train sets
test_set <- read.table(paste("./", directory, "/test/X_test.txt", sep=""));
train_set <- read.table(paste("./", directory, "/train/X_train.txt", sep=""))
## retireves the activity labels
activity_labels <- read.table(paste("./", directory, "/activity_labels.txt", sep=""))
## get the test and train labels
train_labels <- read.table(paste("./", directory, "/train/subject_train.txt", sep=""))
test_labels <- read.table(paste("./", directory, "/test/subject_test.txt", sep=""))
## read the test and training y labels
y_train <- read.table(paste("./", directory, "/train/y_train.txt", sep=""))
y_test <- read.table(paste("./", directory, "/test/y_test.txt", sep=""))
## merge y test and training activity labels
y_train_labels <- merge(y_train,activity_labels,by="V1")
y_test_labels <- merge(y_test,activity_labels,by="V1")
## merge the test and training data and the respective labels together
train_set <- cbind(train_labels,y_train_labels,train_set)
test_set <- cbind(test_labels,y_test_labels,test_set)
## merge the test and training data together
complete_data_set <- rbind(train_set,test_set)
## 2 - grabs the mean and standard deviation for the data set passed in
features <- read.table(paste("./", directory, "/features.txt", sep=""))
## get the rows where the name is mean and std
mean_std_rows <- subset(features, grepl("(mean\\(\\)|std\\(\\))", features$V2) )
## add the column headers to the data set
colnames(complete_data_set) <- c("Subject","Activity_Id","Activity",as.vector(features[,2]))
## extract the data from the merged data where the column names are mean OR std
mean_col <- grep("mean()", colnames(complete_data_set), fixed=TRUE)
std_col <- grep("std()", colnames(complete_data_set), fixed=TRUE)
## add mean and std columns into single vector
measurements_vect <- c(mean_col, std_col)
## sort the vector
measurements_vect <- sort(measurements_vect)
cleaned_data_set <- complete_data_set[,c(1,2,3,measurements_vect)]
## 3-5
melted_data <- melt(cleaned_data_set, id=c("Subject","Activity_Id","Activity"))
tidy_data <- dcast(melted_data, formula = Subject + Activity_Id + Activity ~ variable, mean)
## replace column names with something that makes more sense
col_names <- colnames(tidy_data)
col_names <- gsub("-mean()","Mean",col_names,fixed=TRUE)
col_names <- gsub("-std()","Standard Deviation",col_names,fixed=TRUE)
col_names <- gsub("BodyBody","Body",col_names,fixed=TRUE)
## put back in the tidy column names
colnames(tidy_data) <- col_names
## write the output into a file
write.table(tidy_data, file="./tidy_data.txt", sep="\t", row.names=FALSE)
|
f2ce57d4a43ec96af601e7b84257f6877198f99f | f3bec6b324c71301e3b51e029b14501d1d42982f | /lecture05/lecture05.1.R | c095add8a1c845e96893312dc57f061e612b23dd | [] | no_license | shabopbop/bggn213_WI19_classwork | a5c6d0c589a214e3a5680f62be0d8bfaf84e71aa | 83d1c863136e8d7a77c165b29104d3d3ad56f7db | refs/heads/master | 2020-04-22T18:51:13.362826 | 2019-03-13T22:05:37 | 2019-03-13T22:05:37 | 170,589,578 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 748 | r | lecture05.1.R | fc<- read.table("bimm143_05_rstats/feature_counts.txt", header=TRUE,
sep="\t")
par(mar= c(5.1, 11.0, 4.1, 2.1))
barplot(fc$Count, horiz = TRUE, ylab= "",
names.arg = fc$Feature, main= "babaganush", las=1, col=1:10)
mfc<- read.table("bimm143_05_rstats/male_female_counts.txt", header=TRUE,
sep="\t")
par(mar=c(8,5,4,4))
barplot(mfc$Count, names.arg= mfc$Sample, las=2, ylab= "People",
main = "Those who have met Cathulhu", col=c("red2", "purple2"))
ud<- read.delim("bimm143_05_rstats/up_down_expression.txt")
nrow(ud)
table(ud$State)
palette(c("purple", "gray", "orange"))
plot(ud$Condition1, ud$Condition2, col=ud$State,
xlab="Amount eaten", ylab= "The Gainz", main= "HULKING OUT")
|
9d49d562f7cddb2907a6a7850babf04dba64bd32 | 2d057b0e0ac2b4bc04e9146a75d1b1048ec6ea89 | /Linear_IsoGP_functions.R | ad7ec031f384e9f79c3cf2f4c3d72d71761ab9fa | [
"MIT"
] | permissive | olga-egorova/CSP_multifidelity_modelling | a8bae95aa799d7d0d8fda2d2605aa7c0efb59348 | bfb5f9352fe1d177f5c7fb122cf086cffa1c56e0 | refs/heads/master | 2022-11-30T04:35:30.165586 | 2020-08-20T15:50:08 | 2020-08-20T15:50:08 | 289,027,630 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,700 | r | Linear_IsoGP_functions.R | ####################################################################################
### File containing functions for the Bayesian GP modelling for prediction: with a fixed part
####################################################################################
##### Functions for kernel and posteriors (marginal and integrated out)
## Isomorphic kernel
K_iso_gaussian = function(phi) {
## single phi
return (as.matrix(exp(-(D_all^2)/phi)))
}
K_all = K_iso_gaussian # by default - Gaussian kernel
K = function(phi) {
return (K_all(phi)[training_idx, training_idx])
}
S = function(phi) {
return (K(phi) + diag(tau2, nrow = n_training))
}
##########################################################
## Posterior hyper-parameters (Yiolanda, pp 19-21, (2.20))
##########################################################
## F_ contains model terms (intercept and linear term). n x k matrix
F_ = function(n_training) {
return (matrix(c(rep(1, n_training), linear_training), byrow = FALSE, ncol = 2))
}
# sigma2 ~ IG(alpha_, gamma_)
alpha_ = function (n_training) { return (alpha + 0.5*n_training) }
gamma_ = function(phi) {
y_ = as.numeric(dt_energy[training_idx, energy]) - F_(n_training)%*%beta_0
return (gamma +
0.5*(matrix(y_, nrow = 1)%*%solve(S(phi)+F_(n_training)%*%R%*%t(F_(n_training)))
%*%matrix(y_, ncol = 1)))
}
S_inv = function(phi) {
return (solve(S(phi)))
}
Sigma_ = function(phi) {
return (solve(t(F_(n_training))%*%S_inv(phi)%*%F_(n_training) + solve(R)))
}
mu_ = function(phi) {
y = as.numeric(dt_energy[training_idx, energy])
return (Sigma_(phi)%*%(t(F_(n_training))%*%S_inv(phi)%*%y + solve(R)%*%beta_0))
}
####################################
## Posterior distribution for phi
####################################
phi_p_Uniform = function(phi, svd.eps = 10^(-5)) { ## using SVD, optimising on log scale
A = S(phi) + F_(n_training)%*%R%*%t(F_(n_training))
svd_A = svd(A)
A_inv = svd_A$v%*%(diag(1./svd_A$d))%*%t(svd_A$u)
y_ = as.numeric(dt_energy[training_idx, energy]) - F_(n_training)%*%beta_0
gg = gamma + 0.5*(matrix(y_, nrow = 1)%*%A_inv%*%matrix(y_, ncol = 1))
return (-0.5*sum(log(svd_A$d)) - alpha_(n_training)*log(gg))
}
phi_p = phi_p_Uniform # by default - Uniform prior
## Univariate phi optimisation
phi_iso_optim = function(phi_min = 10^(-5), phi_max = 10^10) {
phi_opt = optimise(phi_p, lower = phi_min, upper = phi_max, maximum = TRUE)
return (list(phi_post = phi_opt$maximum, phi_value = phi_opt$objective))
}
## Optimisation of phi.
phi_optim = function(phi_low = 10^(-10), n_start = 300) {
m_phi_start = matrix(runif(n_start, min = 10^(-5), max = 10^3), ncol = 1)
phi_dim = ncol(m_phi_start)
m_phi_post = m_phi_value = err_start = NULL
for (i in 1:n_start) {
phi_start = m_phi_start[i,]
## Multivariate optimisation
tryCatch(
{
phi_opt = optim(par = phi_start, fn = phi_p, control=list(fnscale=-1), lower = rep(phi_low, phi_dim),
method = "L-BFGS-B") # SANN BFGS
m_phi_post = rbind(m_phi_post, phi_opt$par)
m_phi_value = rbind(m_phi_value, phi_opt$value)
},
silent = TRUE,
error = function(e) {err_start = rbind(err_start, phi_start)}
)
}
return (list(m_phi_post = m_phi_post, m_phi_value = m_phi_value, err_start = err_start))
}
## Posterior mode
phi_post_value = function(phi_opt) { return (phi_opt$m_phi_post[which.max(phi_opt$m_phi_value),]) }
########################################
## Predictive posterior distribution
########################################
## GP kernel between the test and training sets
k = function(phi) {
return (K_all(phi)[test_idx, training_idx])
}
## y_pred ~ t_(2alpha) (1,a_ + b_y*mu_, gamma_/alpha_ * Sigma_y)
a_ = function(phi) {
return (k(phi)%*%S_inv(phi))
}
b_t = function(phi) {
return (matrix(c(rep(1, n_test), linear_test), byrow = FALSE, ncol=2) - a_(phi)%*%F_(n_training))
}
## Posterior predictive mean
mu_y = function(phi) {
y = as.numeric(dt_energy[training_idx, energy])
return (a_(phi)%*%y + b_t(phi)%*%mu_(phi))
}
Sigma_y = function(phi) {
return (diag(1 + tau2, n_test) - a_(phi)%*%t(k(phi)) +
b_t(phi)%*%Sigma_(phi)%*%t(b_t(phi))
)
}
y_pred_mean = function(phi_post) {
return (mu_y(phi_post))
}
# Variance-covariance matrix of posterior predictive, for the lower level
y_pred_var_low = function(phi_post) {
return ((Sigma_y(phi_post))*as.numeric(gamma_(phi_post))/alpha_(n_training))
}
y_pred_sd = function(phi_post) {
return (sqrt(diag(Sigma_y(phi_post))*as.numeric(gamma_(phi_post))/alpha_(n_training)))
}
# Variance-covariance matrix of posterior predictive, for the higher level
y_pred_var_high = function(phi_post, low_variance) {
return (((mu_(phi_post)[2])^2 + as.matrix(Sigma_(phi_post))[2,2]*as.numeric(gamma_(phi_post))/alpha_(n_training))*low_variance
+ y_pred_var_low(phi_post))
}
# SDs for high level predictive variances
y_pred_sd_high = function(y_pred_var) {
return (as.numeric(sqrt(diag(y_pred_var))))
}
y_true = function(test_idx) {
return (dt_energy[test_idx, energy])
}
## Sampling predicted responses
n_sample = 10^3
y_pred_sample = function(n_sample = 10^3, phi_post) {
mm = matrix(rt.scaled(n = n_sample*n_test, df = 2*alpha_(n_training),
mean = as.vector(y_pred_mean(phi_post)),
sd = y_pred_sd(phi_post)),
nrow = n_test, byrow = FALSE)
return (mm)
}
#############################
## Choosing the training set
#############################
training_set_random = function(n_training, C_scaled) {
n_s = nrow(C_scaled)
training_idx = sort(sample.int(n_s, size = n_training, replace = FALSE))
return (training_idx)
}
## Maximin design: scaling the sets of coordinates
get_coord_scaling = function(C_all) {
for (k in 1:length(C_all)) {
max_norm = max(as.numeric(apply(C_all[[k]], 1, norm, type = "2")))
C_all[[k]] = C_all[[k]]/max_norm
}
return (C_all)
}
## Maximin design: stacking all the coordinates
full_coord_matrix = function(C_all_scaled) {
return (as.matrix(do.call(cbind, C_all_scaled)))
}
## C_matrix has n_s rows. Returns sorted training_idx
training_set_maximin = function(n_training, C_scaled) {
mm_design = maximin.cand.upd(n = n_training, Xcand = C_scaled, Tmax = nrow(C_scaled))
#cat(mm_design$Treached)
return (sort(as.numeric(mm_design$inds)))
}
test_set = function(training_idx) {
return (sort(setdiff(1:n_structures, training_idx)))
}
y_train = function(dt_energy, training_idx) {
return (as.numeric(dt_energy[training_idx, energy]))
}
##########################################
### Obtain distance matrices
##########################################
## Not taking into account the symmetry.
get_distance_matrices = function(C_all) {
n_dim = length(C_all)
D_all = vector("list", n_dim)
for (k in 1:n_dim) {
D_all[[k]] = as.matrix(dist(C_all[[k]]))
}
return (D_all)
}
## Taking into account the symmetry of the molecule. C_all_scaled(!)
sym_oxalic_distance_matrices = function(C_all) {
n_dim = length(C_all)
D_all = vector("list", n_dim)
for (k in 1:n_dim) {
## Atom reordering in case of symmetry of the molecule
# ne = ncol(C_all[[k]])/n_atoms
# atom_reorder = c((2*ne+1):(4*ne), 1:(2*ne), (5*ne+1):(6*ne),
# (4*ne+1):(5*ne), (7*ne+1):(8*ne), (6*ne+1):(7*ne))
D = matrix(0, ncol = n_structures, nrow = n_structures)
for (s1 in 1:n_structures) {
for (s2 in 1:n_structures) {
d1 = sqrt(sum((C_all[[k]][s1,] - C_all[[k]][s2,])^2))
#d2 = sqrt(sum((C_all[[k]][s1,] - C_all[[k]][s2, atom_reorder])^2))
D[s1,s2] = D[s2,s1] = d1 #min(d1,d2)
}
}
D_all[[k]] = D
}
return (D_all)
}
##########################################
### Output functions: plotting, MAE, RMSE
##########################################
## Some plotting
### generate data frame with predicted, true values, SD of the predictions and min distance to the training set
gen_df_pred = function(phi_post) {
df_pred = data.frame("y_true" = as.numeric(dt_energy[test_idx, energy]),
"y_pred" = y_pred_mean(phi_post),
"y_sd" = y_pred_sd(phi_post),
"min_dist" = apply(Dist_scaled[test_idx, training_idx], 1, min))
return (df_pred)
}
# as above, but also with the indicator of whether it is a minimum energy structure
gen_df_pred_perturbed = function(phi_post) {
df_pred = data.frame("y_true" = as.numeric(dt_energy[test_idx, energy]),
"y_pred" = y_pred_mean(phi_post),
"y_sd" = y_pred_sd(phi_post),
"min_dist" = apply(D_all[test_idx, training_idx], 1, min),
"minima" = c(rep(0, n_test - n_minima), rep(1, n_minima)))
}
## prediction means and true values
gen_pred_true_plot = function(df_pred) {
gg = ggplot(data = df_pred, aes(x = y_true)) +
geom_point(aes(y = y_pred, colour = "y_pred"), pch = 4) +
geom_point(aes(y = y_true, colour = "y_true"), pch = 4) +
ggtitle("Predicted responses") +
ylab("Predicted and true responses") + xlab("True response") +
theme_minimal()
return (gg)
}
## prediction means, true values and training points
gen_pred_true_train_plot = function(df_pred, y_training) {
gg = ggplot() +
geom_point(aes(x = df_pred$y_true, y = df_pred$y_true, colour = "y_true"), pch = 4) +
geom_point(aes(x = df_pred$y_true, y = df_pred$y_pred, colour = "y_pred"), pch = 4) +
geom_point(aes(x = y_training, y = y_training), colour = "black", pch = 17) +
ggtitle("Predicted responses") +
ylab("Predicted and true responses") +
theme_minimal()
return (gg)
}
## prediction means and prediciton variance (SD)
gen_pred_sd_plot = function(df_pred) {
gg = ggplot(data = df_pred, aes(x = y_true)) +
geom_line(aes(y = y_pred, colour = "y_pred")) +
geom_line(aes(y = y_sd, colour = "y_sd")) +
scale_y_continuous(sec.axis = sec_axis(~./1, name = "y_sd")) +
ggtitle("Predicted responses and prediction SD") +
theme_minimal()
return (gg)
}
## prediciton variance (SD) and min distance to the training set
gen_sd_dist_plot = function(df_pred) {
gg = ggplot(data = df_pred, aes(x = min_dist)) +
geom_line(aes(y = y_sd, colour = "y_sd")) +
ggtitle("Prediction SD and min distane to the training set") +
theme_minimal()
return (gg)
}
# Mean absolute error (MAE)
get_pred_mae = function(y_true, y_pred) {
return (mean(abs(y_true - y_pred)))
}
# Root mean squared error (RMSE)
get_pred_rmse = function(y_true, y_pred) {
return (sqrt(mean((y_true - y_pred)^2)))
} |
dfbe7e5fbcdecfefded0f84e575d82236b595f71 | 2d6bbc82fb96f81e6c836619814078538474aa09 | /inst/application/app.R | 5bc1aca5884fde21d59b615106d47fafe0ccad67 | [
"MIT"
] | permissive | rsquaredacademy/pkginfo | b3967a9993b0ee0383be1951413c5a3652373db5 | 8a716283eaf53b07130cc5a943cc048db3919ce6 | refs/heads/master | 2023-08-30T17:01:54.900072 | 2023-05-31T14:53:33 | 2023-05-31T14:53:33 | 150,448,657 | 5 | 2 | null | 2018-10-02T11:47:08 | 2018-09-26T15:28:03 | HTML | UTF-8 | R | false | false | 25,363 | r | app.R | library(magrittr)
ui <- shinydashboard::dashboardPage(skin = "blue",
shinydashboard::dashboardHeader(title = "pkginfo"),
shinydashboard::dashboardSidebar(
shinydashboard::sidebarMenu(
id = "tabs",
shinydashboard::menuItem("Welcome", tabName = "welcome", icon = shiny::icon("th")),
shinydashboard::menuItem("Overview", tabName = "basic_info", icon = shiny::icon("th")),
shinydashboard::menuItem("Downloads", tabName = "downloads", icon = shiny::icon("th")),
shinydashboard::menuItem("Indicators", tabName = "build_status", icon = shiny::icon("th")),
shinydashboard::menuItem("CRAN Check", tabName = "cran_check", icon = shiny::icon("th")),
shinydashboard::menuItem("Issues", tabName = "issues", icon = shiny::icon("th")),
shinydashboard::menuItem("Releases", tabName = "releases", icon = shiny::icon("th")),
shinydashboard::menuItem("Branches", tabName = "branches", icon = shiny::icon("th")),
shinydashboard::menuItem("Dependencies", tabName = "deps", icon = shiny::icon("th")),
shinydashboard::menuItem("Pull Requests", tabName = "pr", icon = shiny::icon("th")),
shinydashboard::menuItem("Stack Overflow", tabName = "so", icon = shiny::icon("th")),
shinydashboard::menuItem("Exit", tabName = "exit", icon = shiny::icon("power-off"))
)
),
shinydashboard::dashboardBody(
shinydashboard::tabItems(
shinydashboard::tabItem(tabName = "welcome",
shiny::fluidRow(
shiny::column(12, align = 'center',
shiny::br(),
shiny::textInput("repo_name", "Package/Repo Name", value = NULL),
shiny::actionButton("check_package_name", "Verify Package Name"),
shiny::br(),
shiny::br()
)
),
shiny::fluidRow(
shiny::uiOutput("package_check")
)
),
shinydashboard::tabItem(tabName = "basic_info",
shiny::fluidRow(
shiny::column(12, align = 'center',
shiny::h2("Overview")
)
),
shiny::br(),
shiny::br(),
shiny::fluidRow(
shiny::uiOutput('out_basic_title') %>%
shinycssloaders::withSpinner(),
shiny::uiOutput('out_basic_desc') %>%
shinycssloaders::withSpinner(),
shiny::uiOutput('out_basic_version') %>%
shinycssloaders::withSpinner(),
shiny::uiOutput('out_basic_pub') %>%
shinycssloaders::withSpinner(),
shiny::uiOutput('out_basic_maintainer') %>%
shinycssloaders::withSpinner(),
shiny::uiOutput('out_basic_maintainer_email') %>%
shinycssloaders::withSpinner(),
shiny::uiOutput('out_basic_cran') %>%
shinycssloaders::withSpinner(),
shiny::uiOutput('out_basic_bugs') %>%
shinycssloaders::withSpinner(),
shiny::uiOutput('out_basic_github') %>%
shinycssloaders::withSpinner(),
shiny::uiOutput('out_basic_website') %>%
shinycssloaders::withSpinner()
)
),
shinydashboard::tabItem(tabName = "downloads",
shiny::fluidRow(
shiny::column(12, align = 'center',
shiny::h2("CRAN Downloads"),
shiny::br(),
shiny::tableOutput("cran_downloads") %>%
shinycssloaders::withSpinner()
)
),
shiny::fluidRow(
shiny::column(6, align = 'right',
shiny::dateInput("start_date", "From")
),
shiny::column(6, align = 'left',
shiny::dateInput("end_date", "To")
)
),
shiny::fluidRow(
column(2),
column(8, align = 'center',
plotOutput("downloads_plot") %>%
shinycssloaders::withSpinner()
),
column(2)
),
shiny::br(),
shiny::br(),
shiny::br()
),
shinydashboard::tabItem(tabName = "build_status",
shiny::fluidRow(
shiny::column(12, align = 'center',
shiny::h2("Indicators")
)
),
shiny::fluidRow(
shinycssloaders::withSpinner(shinydashboard::infoBoxOutput("coverageBox"))
),
shiny::fluidRow(
shinycssloaders::withSpinner(shinydashboard::valueBoxOutput("starsBox")),
shinycssloaders::withSpinner(shinydashboard::valueBoxOutput("forksBox")),
shinycssloaders::withSpinner(shinydashboard::valueBoxOutput("issuesBox"))
),
shiny::fluidRow(
shinycssloaders::withSpinner(shinydashboard::valueBoxOutput("importsBox")),
shinycssloaders::withSpinner(shinydashboard::valueBoxOutput("suggestsBox")),
shinycssloaders::withSpinner(shinydashboard::valueBoxOutput("prBox"))
),
shiny::fluidRow(
shinycssloaders::withSpinner(shinydashboard::valueBoxOutput("branchesBox")),
shinycssloaders::withSpinner(shinydashboard::valueBoxOutput("releasesBox")),
shinycssloaders::withSpinner(shinydashboard::valueBoxOutput("versionBox"))
)
),
shinydashboard::tabItem(tabName = "cran_check",
shiny::fluidRow(
shiny::column(12, align = 'center',
shiny::h2("CRAN Check Results"),
shiny::br(),
shiny::tableOutput("cran_check_results_table") %>%
shinycssloaders::withSpinner()
)
)
),
shinydashboard::tabItem(tabName = "issues",
shiny::fluidRow(
shiny::column(12, align = 'center',
shiny::h2("Open Issues"),
shiny::br(),
shiny::tableOutput("gh_issues") %>%
shinycssloaders::withSpinner()
)
)
),
shinydashboard::tabItem(tabName = "releases",
shiny::fluidRow(
shiny::column(12, align = 'center',
shiny::h2("Releases"),
shiny::br(),
shiny::tableOutput("gh_releases") %>%
shinycssloaders::withSpinner()
)
)
),
shinydashboard::tabItem(tabName = "branches",
shiny::fluidRow(
shiny::column(12, align = 'center',
shiny::h2("GitHub Branches"),
shiny::br(),
shiny::tableOutput("gh_branches") %>%
shinycssloaders::withSpinner()
)
)
),
shinydashboard::tabItem(tabName = "deps",
shiny::fluidRow(
shiny::column(12, align = 'center',
shiny::h2("Dependencies")
)
),
shiny::fluidRow(
shiny::column(6, align = 'center',
shiny::tableOutput("cran_imports") %>%
shinycssloaders::withSpinner()
),
shiny::column(6, align = 'center',
shiny::tableOutput("cran_suggests") %>%
shinycssloaders::withSpinner()
)
)
),
shinydashboard::tabItem(tabName = "pr",
shiny::fluidRow(
shiny::column(12, align = 'center',
shiny::h2("Open Pull Requests"),
shiny::br(),
shiny::tableOutput("gh_prs") %>%
shinycssloaders::withSpinner()
)
)
),
shinydashboard::tabItem(tabName = "so",
shiny::fluidRow(
shiny::column(12, align = 'center',
shiny::h2("Stack Overflow"),
shiny::br(),
shiny::tableOutput("gh_so") %>%
shinycssloaders::withSpinner()
)
)
),
shinydashboard::tabItem(tabName = "exit",
shiny::fluidRow(shiny::column(12, align = 'center',
shiny::h2("Thank you for using", shiny::strong("pkginfo"), "!"))),
shiny::fluidRow(shiny::column(12, align = 'center',
shiny::actionButton("exit_button", "Exit App")))
)
)
)
)
server <- function(input, output, session) {
valid_package <- shiny::eventReactive(input$check_package_name, {
pkginfo:::check_package_name(input$repo_name)
})
output$package_check <- renderUI({
if (valid_package()) {
shiny::column(12, align = 'center',
shiny::textInput("user_name", "GitHub Owner/Org", value = NULL),
shiny::br(),
shiny::actionButton(inputId = "check_repo_name", label = "Find User/Org"),
shiny::actionButton("retrieve_info", "Retrieve Info"),
shiny::br(),
shiny::br(),
shiny::column(3),
shiny::column(6, align = 'center',
shiny::h4("Click on the Find User/Org button if you do not know the GitHub username or
organization name. The app will find it if the package has a GitHub repository.")
),
shiny::column(3)
)
} else {
shiny::column(12, align = 'center',
shiny::h5("You have entered an invalid package name.")
)
}
})
pkg_details <-
shiny::eventReactive(input$retrieve_info, {
pkginfo::get_pkg_details(input$repo_name)
})
bug_url <- shiny::eventReactive(input$retrieve_info, {
get_bugs_url <-
pkg_details() %>%
pkginfo::get_pkg_urls() %>%
dplyr::filter(website == "Bugs") %>%
nrow()
if (get_bugs_url != 1) {
"NA"
} else {
pkg_details() %>%
pkginfo::get_pkg_urls() %>%
dplyr::filter(website == "Bugs") %>%
dplyr::pull(urls)
}
})
bugs_url_link <- shiny::eventReactive(input$retrieve_info, {
if (bug_url() != "NA") {
"Link"
} else {
"NA"
}
})
github_url <- shiny::eventReactive(input$retrieve_info, {
get_github_url <-
pkg_details() %>%
pkginfo::get_pkg_urls() %>%
dplyr::filter(website == "GitHub") %>%
nrow()
if (get_github_url != 1) {
git_url <- "NA"
} else {
git_url <-
pkg_details() %>%
pkginfo::get_pkg_urls() %>%
dplyr::filter(website == "GitHub") %>%
dplyr::pull(urls)
}
})
github_url_link <- shiny::eventReactive(input$retrieve_info, {
if (github_url() != "NA") {
"Link"
} else {
"NA"
}
})
website_url <- shiny::eventReactive(input$retrieve_info, {
get_docs_url <-
pkg_details() %>%
pkginfo::get_pkg_urls() %>%
dplyr::filter(website == "Others") %>%
nrow()
if (get_docs_url != 1) {
"NA"
} else {
pkg_details() %>%
pkginfo::get_pkg_urls() %>%
dplyr::filter(website == "Others") %>%
dplyr::pull(urls)
}
})
website_url_link <- shiny::eventReactive(input$retrieve_info, {
if (website_url() != "NA") {
"Link"
} else {
"NA"
}
})
basic_info_title <- eventReactive(input$repo_name, {
shiny::fluidRow(
shiny::column(3),
shiny::column(1, align = 'left',
shiny::h5('Title ')
),
shiny::column(5, align = 'left',
shiny::h5(pkginfo::get_pkg_title(pkg_details()))
),
shiny::column(3)
)
})
basic_info_desc <- eventReactive(input$repo_name, {
shiny::fluidRow(
shiny::column(3),
shiny::column(1, align = 'left',
shiny::h5('Description ')
),
shiny::column(5, align = 'left',
shiny::h5(pkginfo::get_pkg_desc(pkg_details()))
),
shiny::column(3)
)
})
basic_info_version <- eventReactive(input$repo_name, {
shiny::fluidRow(
shiny::column(3),
shiny::column(1, align = 'left',
shiny::h5('Version ')
),
shiny::column(5, align = 'left',
shiny::h5(pkginfo::get_pkg_version(pkg_details()))
),
shiny::column(3)
)
})
basic_info_pub <- eventReactive(input$repo_name, {
shiny::fluidRow(
shiny::column(3),
shiny::column(1, align = 'left',
shiny::h5('Published ')
),
shiny::column(5, align = 'left',
shiny::h5(pkginfo::get_pkg_publish_date(pkg_details()))
),
shiny::column(3)
)
})
basic_info_maintainter <- eventReactive(input$repo_name, {
shiny::fluidRow(
shiny::column(3),
shiny::column(1, align = 'left',
shiny::h5('Maintainer ')
),
shiny::column(5, align = 'left',
shiny::h5(pkginfo::get_pkg_maintainer(pkg_details())[[1]])
),
shiny::column(3)
)
})
basic_info_maintainter_email <- eventReactive(input$repo_name, {
shiny::fluidRow(
shiny::column(3),
shiny::column(1, align = 'left',
shiny::h5('Email ')
),
shiny::column(5, align = 'left',
shiny::h5(pkginfo::get_pkg_maintainer(pkg_details())[[2]])
),
shiny::column(3)
)
})
basic_info_cran <- eventReactive(input$repo_name, {
shiny::fluidRow(
shiny::column(3),
shiny::column(1, align = 'left',
shiny::h5('CRAN ')
),
shiny::column(5, align = 'left',
shiny::h5(shiny::tagList("", shiny::a("Link",
href=paste0("https://CRAN.R-project.org/package=", input$repo_name),
target="_blank")))
),
shiny::column(3)
)
})
basic_info_bug <- eventReactive(input$repo_name, {
shiny::fluidRow(
shiny::column(3),
shiny::column(1, align = 'left',
shiny::h5('Bugs ')
),
shiny::column(5, align = 'left',
shiny::h5(shiny::tagList("", shiny::a(bugs_url_link(), href=bug_url(),
target="_blank")))
),
shiny::column(3)
)
})
basic_info_github <- eventReactive(input$repo_name, {
shiny::fluidRow(
shiny::column(3),
shiny::column(1, align = 'left',
shiny::h5('GitHub ')
),
shiny::column(5, align = 'left',
shiny::h5(shiny::tagList("", shiny::a(github_url_link(), href=github_url(),
target="_blank")))
),
shiny::column(3)
)
})
basic_info_website <- eventReactive(input$repo_name, {
shiny::fluidRow(
shiny::column(3),
shiny::column(1, align = 'left',
shiny::h5('Website ')
),
shiny::column(5, align = 'left',
shiny::h5(shiny::tagList("", shiny::a(website_url_link(), href=website_url(),
target="_blank")))
),
shiny::column(3)
)
})
output$out_basic_title <- shiny::renderUI({
basic_info_title()
})
output$out_basic_desc <- shiny::renderUI({
basic_info_desc()
})
output$out_basic_version <- shiny::renderUI({
basic_info_version()
})
output$out_basic_pub <- shiny::renderUI({
basic_info_pub()
})
output$out_basic_maintainer <- shiny::renderUI({
basic_info_maintainter()
})
output$out_basic_maintainer_email <- shiny::renderUI({
basic_info_maintainter_email()
})
output$out_basic_cran <- shiny::renderUI({
basic_info_cran()
})
output$out_basic_bugs <- shiny::renderUI({
basic_info_bug()
})
output$out_basic_github <- shiny::renderUI({
basic_info_github()
})
output$out_basic_website <- shiny::renderUI({
basic_info_website()
})
update_repo <- shiny::eventReactive(input$check_repo_name, {
repo_name <- pkginfo::get_gh_username(input$repo_name)
if (is.null(repo_name)) {
"NA"
} else {
repo_name
}
})
shiny::observe({
shiny::updateTextInput(
session,
inputId = "user_name",
value = update_repo()
)
})
shiny::observeEvent(input$retrieve_info, {
shinydashboard::updateTabItems(session, "tabs", "basic_info")
})
shiny::observeEvent(input$retrieve_info, {
shiny::updateDateInput(
session,
inputId = "start_date",
value = lubridate::today() - 8
)
})
shiny::observeEvent(input$retrieve_info, {
shiny::updateDateInput(
session,
inputId = "end_date",
value = lubridate::today() - 2
)
})
compute_downloads <- reactive({
cranlogs::cran_downloads(input$repo_name, from = input$start_date,
to = input$end_date) %>%
dplyr::select(date, count) %>%
ggplot2::ggplot() +
ggplot2::geom_line(ggplot2::aes(x = date, y = count), color = 'red') +
ggplot2::xlab("Date") + ggplot2::ylab("Downloads") +
ggplot2::ggtitle("CRAN Downloads")
})
output$downloads_plot <- shiny::renderPlot({
compute_downloads()
})
output$cran_downloads <- shiny::renderTable({
pkginfo::get_pkg_downloads(input$repo_name) %>%
dplyr::rename(Latest = latest, `Last Week` = last_week,
`Last Month` = last_month, Total = total)
})
# # indicators: travis status
# travis_status <- shiny::eventReactive(input$retrieve_info, {
# if (input$user_name == "NA") {
# out <- NA
# } else {
# out <- pkginfo::get_status_travis(input$repo_name, input$user_name)
# }
# return(out)
# })
# output$travisBox <- shinydashboard::renderInfoBox({
# shinydashboard::infoBox(
# "Travis", travis_status(),
# icon = shiny::icon("list"),
# color = "purple"
# )
# })
# # indicators: appveyor status
# appveyor_status <- shiny::eventReactive(input$retrieve_info, {
# if (input$user_name == "NA") {
# out <- NA
# } else {
# out <- pkginfo::get_status_appveyor(input$repo_name, input$user_name)
# }
# return(out)
# })
# output$appveyorBox <- shinydashboard::renderInfoBox({
# shinydashboard::infoBox(
# "Appveyor", appveyor_status(),
# icon = shiny::icon("list"),
# color = "purple"
# )
# })
# indicators: code coverage
code_status <- shiny::eventReactive(input$retrieve_info, {
if (input$user_name == "NA") {
out <- NA
} else {
out <- pkginfo::get_code_coverage(input$repo_name, input$user_name)
}
return(out)
})
output$coverageBox <- shinydashboard::renderInfoBox({
shinydashboard::infoBox(
"Coverage", code_status(),
icon = shiny::icon("list"),
color = "purple"
)
})
# indicators: GitHub stars
github_stars <- shiny::eventReactive(input$retrieve_info, {
if (input$user_name == "NA") {
out <- NA
} else {
out <- pkginfo::get_gh_stats(input$repo_name, input$user_name)$stars
}
return(out)
})
output$starsBox <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
github_stars(), "Stars", icon = shiny::icon("list"),
color = "purple"
)
})
# indicators: GitHub forks
github_forks <- shiny::eventReactive(input$retrieve_info, {
if (input$user_name == "NA") {
out <- NA
} else {
out <- pkginfo::get_gh_stats(input$repo_name, input$user_name)$forks
}
return(out)
})
output$forksBox <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
github_forks(), "Forks", icon = shiny::icon("list"),
color = "purple"
)
})
# indicators: GitHub issues
github_issues <- shiny::eventReactive(input$retrieve_info, {
if (input$user_name == "NA") {
out <- NA
} else {
out <- pkginfo::get_gh_stats(input$repo_name, input$user_name)$issues
}
return(out)
})
output$issuesBox <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
github_issues(), "Issues", icon = shiny::icon("list"),
color = "purple"
)
})
# indicators: GitHub releases
github_releases <- shiny::eventReactive(input$retrieve_info, {
if (input$user_name == "NA") {
out <- NA
} else {
out <-
pkginfo::get_gh_releases(input$repo_name, input$user_name) %>%
nrow() %>%
as.character()
}
return(out)
})
output$releasesBox <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
github_releases(), "Releases", icon = shiny::icon("list"),
color = "purple"
)
})
# indicators: GitHub branches
github_branches <- shiny::eventReactive(input$retrieve_info, {
if (input$user_name == "NA") {
out <- NA
} else {
out <-
pkginfo::get_gh_branches(input$repo_name, input$user_name) %>%
nrow() %>%
as.character()
}
return(out)
})
output$branchesBox <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
github_branches(), "Branches", icon = shiny::icon("list"),
color = "purple"
)
})
# indicators: GitHub pull requests
github_prs <- shiny::eventReactive(input$retrieve_info, {
if (input$user_name == "NA") {
out <- NA
} else {
out <-
pkginfo::get_gh_pr(input$repo_name, input$user_name) %>%
nrow() %>%
as.character()
}
return(out)
})
output$prBox <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
github_prs(), "Pull Requests", icon = shiny::icon("list"),
color = "purple"
)
})
# indicators: imports
imports <- shiny::reactive({
pkg_details() %>%
pkginfo::get_pkg_imports()
})
output$importsBox <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
as.character(length(imports())), "Imports", icon = shiny::icon("list"),
color = "purple"
)
})
# indicators: suggests
suggests <- shiny::reactive({
pkg_details() %>%
pkginfo::get_pkg_suggests()
})
output$suggestsBox <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
as.character(length(suggests())), "Suggests", icon = shiny::icon("list"),
color = "purple"
)
})
# indicators: R version
output$versionBox <- shinydashboard::renderValueBox({
shinydashboard::valueBox(
pkginfo::get_pkg_r_dep(pkg_details()), "R Version",
icon = shiny::icon("list"),
color = "purple"
)
})
# cran check results
cr_check <- shiny::eventReactive(input$retrieve_info, {
itable <-
pkginfo::get_pkg_cran_check_results(input$repo_name) %>%
dplyr::rename(OS = os, R = r, Status = status)
prep_url <- itable$URL
itable %>%
knitr::kable("html", escape = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("hover", "condensed"), full_width = FALSE)
})
output$cran_check_results_table <- shiny::renderPrint({
cr_check()
})
# issues
github_issues_list <- shiny::eventReactive(input$retrieve_info, {
if (input$user_name == "NA") {
"There is no GitHub repository associated with this R package."
} else {
itable <-
pkginfo::get_gh_issues(input$repo_name, input$user_name) %>%
dplyr::rename(Date = date, Number = number, Author = author, Title = title)
prep_url <-
paste0("https://github.com/", input$user_name, "/", input$repo_name,
"/issues/", itable$Number)
itable %>%
dplyr::mutate(Link = kableExtra::cell_spec("Link", "html", link = prep_url)) %>%
knitr::kable("html", escape = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("hover", "condensed"), full_width = FALSE)
}
})
output$gh_issues <- shiny::renderPrint({
github_issues_list()
})
# releases
github_releases_list <- shiny::eventReactive(input$retrieve_info, {
if (input$user_name == "NA") {
"There is no GitHub repository associated with this R package."
} else {
itable <-
pkginfo::get_gh_releases(input$repo_name, input$user_name) %>%
dplyr::rename(Tag = tag, Date = date, Title = title, Prerelease = prerelease)
prep_url <-
paste0("https://github.com/", input$user_name, "/", input$repo_name,
"/releases/tag/", itable$Tag)
itable %>%
dplyr::mutate(Link = kableExtra::cell_spec("Link", "html", link = prep_url)) %>%
knitr::kable("html", escape = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("hover", "condensed"), full_width = FALSE)
}
})
output$gh_releases <- shiny::renderPrint({
github_releases_list()
})
# branches
github_branches_list <- shiny::eventReactive(input$retrieve_info, {
if (input$user_name == "NA") {
"There is no GitHub repository associated with this R package."
} else {
itable <-
pkginfo::get_gh_branches(input$repo_name, input$user_name) %>%
dplyr::rename(Branch = branches)
prep_url <-
paste0("https://github.com/", input$user_name, "/", input$repo_name,
"/tree/", itable$Branches)
itable %>%
dplyr::mutate(Link = kableExtra::cell_spec("Link", "html", link = prep_url)) %>%
knitr::kable("html", escape = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("hover", "condensed"), full_width = FALSE)
}
})
output$gh_branches <- shiny::renderPrint({
github_branches_list()
})
# pull requests
github_prs_list <- shiny::eventReactive(input$retrieve_info, {
if (input$user_name == "NA") {
"There is no GitHub repository associated with this R package."
} else {
itable <-
pkginfo::get_gh_pr(input$repo_name, input$user_name) %>%
dplyr::rename(Number = number, Date = date, Title = title, Status = status)
prep_url <-
paste0("https://github.com/", input$user_name, "/", input$repo_name,
"/pull/", itable$Number)
itable %>%
dplyr::mutate(Link = kableExtra::cell_spec("Link", "html", link = prep_url)) %>%
knitr::kable("html", escape = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("hover", "condensed"), full_width = FALSE)
}
})
output$gh_prs <- shiny::renderPrint({
github_prs_list()
})
# stack overflow
github_so_list <- shiny::eventReactive(input$retrieve_info, {
if (input$user_name == "NA") {
"There are no questions associated with this R package on Stack Overflow."
} else {
itable <-
pkginfo::get_so_questions(input$repo_name) %>%
dplyr::rename(Date = date, Title = title, Owner = owner,
Answered = answered, Views = views, qlink = link)
itable %>%
dplyr::mutate(Link = kableExtra::cell_spec("Link", "html", link = itable$qlink)) %>%
dplyr::select(-qlink) %>%
knitr::kable("html", escape = FALSE) %>%
kableExtra::kable_styling(bootstrap_options = c("hover", "condensed"), full_width = FALSE)
}
})
output$gh_so <- shiny::renderPrint({
github_so_list()
})
# imports
output$cran_imports <- shiny::renderPrint({
imports() %>%
tibble::tibble() %>%
magrittr::set_colnames("packages") %>%
dplyr::mutate(Imports = kableExtra::cell_spec(packages, "html",
link = paste0("https://CRAN.R-project.org/package=", packages))
) %>%
dplyr::select(Imports) %>%
knitr::kable("html", escape = FALSE, target = "_blank") %>%
kableExtra::kable_styling(bootstrap_options = c("hover", "condensed"), full_width = FALSE)
}
)
# suggests
output$cran_suggests <- shiny::renderPrint({
suggests() %>%
tibble::tibble() %>%
magrittr::set_colnames("packages") %>%
dplyr::mutate(Suggests = kableExtra::cell_spec(packages, "html",
link = paste0("https://CRAN.R-project.org/package=", packages))
) %>%
dplyr::select(Suggests) %>%
knitr::kable("html", escape = FALSE, target = "_blank") %>%
kableExtra::kable_styling(bootstrap_options = c("hover", "condensed"), full_width = FALSE)
}
)
shiny::observeEvent(input$exit_button, {
shiny::stopApp()
})
}
shiny::shinyApp(ui, server)
|
998c4af8205b4cb2a4a79436d08a1e1b4e4d2d2f | 04419dc48831a321789f1ce169e5a983da4d4141 | /R/ResidualOutliers.R | 0632173defda2df438561a4bbddb807d34f35120 | [] | no_license | gwd999/RemixAutoML | fcfe309c845b6677c91dbf45c0f59ec2bf46a102 | 51130a874629d78b037f73eb01d3ec0b9cc8baa3 | refs/heads/master | 2020-06-23T02:41:48.446408 | 2019-07-23T17:25:20 | 2019-07-23T17:25:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,442 | r | ResidualOutliers.R | #' ResidualOutliers is an automated time series outlier detection function
#'
#' ResidualOutliers is an automated time series outlier detection function that utilizes tsoutliers and auto.arima. It looks for five types of outliers: "AO" Additive outliter - a singular extreme outlier that surrounding values aren't affected by; "IO" Innovational outlier - Initial outlier with subsequent anomalous values; "LS" Level shift - An initial outlier with subsequent observations being shifted by some constant on average; "TC" Transient change - initial outlier with lingering effects that dissapate exponentially over time; "SLS" Seasonal level shift - similar to level shift but on a seasonal scale.
#'
#' @author Adrian Antico
#' @family Unsupervised Learning
#' @param data the source residuals data.table
#' @param DateColName The name of your data column to use in reference to the target variable
#' @param TargetColName The name of your target variable column
#' @param PredictedColName The name of your predicted value column. If you supply this, you will run anomaly detection of the difference between the target variable and your predicted value. If you leave PredictedColName NULL then you will run anomaly detection over the target variable.
#' @param TimeUnit The time unit of your date column: hour, day, week, month, quarter, year
#' @param maxN the largest lag or moving average (seasonal too) values for the arima fit
#' @param tstat the t-stat value for tsoutliers
#' @examples
#' data <- data.table::data.table(DateTime = as.Date(Sys.time()),
#' Target = as.numeric(stats::filter(rnorm(1000,
#' mean = 50,
#' sd = 20),
#' filter=rep(1,10),
#' circular=TRUE)))
#' data[, temp := seq(1:1000)][, DateTime := DateTime - temp][, temp := NULL]
#' data <- data[order(DateTime)]
#' data[, Predicted := as.numeric(stats::filter(rnorm(1000,
#' mean = 50,
#' sd = 20),
#' filter=rep(1,10),
#' circular=TRUE))]
#' stuff <- ResidualOutliers(data = data,
#' DateColName = "DateTime",
#' TargetColName = "Target",
#' PredictedColName = NULL,
#' TimeUnit = "day",
#' maxN = 5,
#' tstat = 4)
#' data <- stuff[[1]]
#' model <- stuff[[2]]
#' outliers <- data[type != "<NA>"]
#' @return A named list containing FullData = original data.table with outliers data and ARIMA_MODEL = the arima model.
#' @export
ResidualOutliers <- function(data,
DateColName = "DateTime",
TargetColName = "Target",
PredictedColName = NULL,
TimeUnit = "day",
maxN = 5,
tstat = 2) {
# Define TS Frequency
if (tolower(TimeUnit) == "hour") {
freq <- 24
} else if (tolower(TimeUnit) == "day") {
freq <- 365
} else if (tolower(TimeUnit) == "week") {
freq <- 52
} else if (tolower(TimeUnit) == "month") {
freq <- 12
} else if (tolower(TimeUnit) == "quarter") {
freq <- 4
} else if (tolower(TimeUnit) == "year") {
freq <- 1
} else {
warning("TimeUnit is not in hour, day, week, month,
quarter, or year")
}
# Ensure data is a data.table
if (!data.table::is.data.table(data)) {
data <- data.table::as.data.table(data)
}
# Ensure data is sorted
data.table::setorderv(x = data,
cols = eval(DateColName),
order = 1)
# Keep columns
if (!is.null(PredictedColName)) {
data[, Residuals := get(TargetColName) - get(PredictedColName)]
} else {
data[, Residuals := get(TargetColName)]
}
keep <- c(DateColName, "Residuals")
temp <- data[, ..keep]
MinVal <- min(data[[eval(TargetColName)]], na.rm = TRUE)
# Convert to time series object
tsData <- stats::ts(temp,
start = temp[, min(as.POSIXct(get(DateColName)))][[1]],
frequency = freq)
# Build the auto arimia
if (MinVal > 0) {
fit <-
tryCatch({
forecast::auto.arima(
y = tsData[, "Residuals"],
max.p = maxN,
max.q = maxN,
max.P = maxN,
max.Q = maxN,
max.d = 1,
max.D = 1,
ic = "bic",
lambda = TRUE,
biasadj = TRUE,
stepwise = TRUE
)
},
error = function(x)
"empty")
} else {
fit <-
tryCatch({
forecast::auto.arima(
y = tsData[, "Residuals"],
max.p = maxN,
max.q = maxN,
max.P = maxN,
max.Q = maxN,
max.d = 1,
max.D = 1,
ic = "bic",
lambda = FALSE,
biasadj = FALSE,
stepwise = TRUE
)
},
error = function(x)
"empty")
}
# Store the arima parameters
pars <- tsoutliers::coefs2poly(fit)
# Store the arima residuals
resid <- cbind(tsData, stats::residuals(fit))
# Find the outliers
x <- data.table::as.data.table(tsoutliers::locate.outliers(
resid = resid[, 3],
pars = pars,
cval = tstat,
types = c("AO", "TC", "LS", "IO", "SLS")
))
# Merge back to source data
residDT <- data.table::as.data.table(resid)
z <- cbind(data, residDT)
z[, ind := 1:.N]
data.table::setnames(z,
names(z)[c((ncol(z) - 3):(ncol(z) - 1))],
c("ObsNum", "Preds", "ARIMA_Residuals"))
z[, ObsNum := NULL]
data <- merge(z, x, by = "ind", all.x = TRUE)
data[, ':=' (ind = NULL, coefhat = NULL)]
data[type == "<NA>", type := NA]
# Reorder data, remove the coefhat column to send to database or stakeholder
return(list(FullData = data, ARIMA_MODEL = fit))
}
|
e98ff1b43591634d67dd1da985b7cb156b703a5b | d21c2a13b845de156f1b63c2bc7070dd3cf08872 | /man/radioMatHeader.Rd | 6b23c28d8d1076e15b1105b048130c1b472f9c5a | [
"MIT"
] | permissive | nklepeis/shinysurveys | bdab20401cac7839f55984983705de6f060b6614 | 5706c798dd41e5eb6bd008639172faba211488a1 | refs/heads/main | 2023-07-09T15:43:49.789151 | 2021-08-20T01:25:20 | 2021-08-20T01:25:20 | 397,759,626 | 0 | 0 | NOASSERTION | 2021-08-18T23:26:06 | 2021-08-18T23:26:06 | null | UTF-8 | R | false | true | 508 | rd | radioMatHeader.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/input_radioMatrixInput.R
\name{radioMatHeader}
\alias{radioMatHeader}
\title{Create the radio matrix input's header}
\usage{
radioMatHeader(.choices, .required)
}
\arguments{
\item{.choices}{Possible choices}
\item{.required}{Logical: TRUE/FALSE should a required asterisk be placed on the matrix question}
}
\value{
Header for the table (matrix input)
}
\description{
Create the radio matrix input's header
}
\keyword{internal}
|
bc2fc2b9ecf9a4bead787cd23b6fb7497a08e824 | a49c9e0fadc13481f4312a3f787b29cabbd8998f | /cachematrix.R | 32870167fab1cff7559abbd1cc153f6418436a07 | [] | no_license | Dooozy/test | 6560161b374d045690f1d135bbe3be5a8e633805 | 61a08db5473dae98b97aba34d4bbd207ec8988ec | refs/heads/main | 2023-03-01T23:15:04.580301 | 2021-02-10T01:38:10 | 2021-02-10T01:38:10 | 337,585,961 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,321 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
#Below are two functions that are used to create a special object that stores a
#"matrix" object and cache's its inverse matrix.
## Write a short comment describing this function
# This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inverse <<- NULL
}
get <- function()x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv,
getinv = getinv)
}
## Write a short comment describing this function
#This function computes the inverse of the special "matrix" created by
# makeCacheMatrix above. It first checks to see if the mean has already been
#calculated. If so, it gets the inverse from the cache and skips the
#computation. Otherwise, it calculates the inverse of the data and sets the
#value of the inverse in the cache via the setinv function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
b1d22525313b2b1345b69cfe0a26e48e07432f3c | b8bfe27d3c2ac7837c5d67ff09da9127ba8bbb0b | /mason_herit_sm.R | 68b27abd3eebe26a65add5e86977a5e039bd961f | [] | no_license | mason-kulbaba/geum-aster | 85d1ab1bb7bb90354f8c27c3656513f523d0097a | 87e052b3c1a5f231e863c2f4e24b4f272c938f71 | refs/heads/master | 2023-07-23T04:41:45.407318 | 2023-07-18T17:48:27 | 2023-07-18T17:48:27 | 183,484,654 | 0 | 3 | null | 2019-07-08T20:19:12 | 2019-04-25T17:58:11 | R | UTF-8 | R | false | false | 16,481 | r | mason_herit_sm.R | setwd("C:/Users/Mason Kulbaba/Dropbox/git/geum-aster/")
dat<- read.csv("C:/Users/Mason Kulbaba/Dropbox/git/geum-aster/final_data.csv")
library(MASS)
dat2<- subset(dat, Germination.Y.N== 1)
# combine 2016 + 2017 sm to reflect how fitness was estiamted in 2017
dat2$sm.2b<- dat2$sm + dat2$sm.2
dat2$tot.seeds<- dat2$sm + dat2$sm.2 + dat2$sm.3
dat.gla<- subset(dat2, Region=="GL_alvar")
dat.mba<- subset(dat2, Region=="MB_alvar")
dat.pr<- subset(dat2, Region=="Prairie")
#dists for p->Germ
library(MASS)
norm<- fitdistr(dat.gla$Planting.to.DTFF.2017, "normal")
poi<- fitdistr(dat.gla$Planting.to.DTFF.2017, "Poisson")
negb<- fitdistr(dat.gla$Planting.to.DTFF.2017, "negative binomial")
AIC(norm, poi, negb)
#install.packages("lme4")
library(lme4)
#make block a factor
dat.gla$Block.ID<- as.factor(dat.gla$Block.ID)
dat.mba$Block.ID<- as.factor(dat.mba$Block.ID)
dat.pr$Block.ID<- as.factor(dat.pr$Block.ID)
############################
# Start with Germination
datG.gla<- subset(dat, Region=="GL_alvar")
datG.mba<- subset(dat, Region=="MB_alvar")
datG.pr<- subset(dat, Region=="Prairie")
datG.gla$Block.ID<- as.factor(datG.gla$Block.ID)
datG.mba$Block.ID<- as.factor(datG.mba$Block.ID)
datG.pr$Block.ID<- as.factor(datG.pr$Block.ID)
##########################################################
# Number of tot.seeds: GL, MB, and prairie
#models for : GL_alvar
gla_tot.seeds.1<- lmer(tot.seeds ~ Block.ID + Population + (1| Family.Unique),
data=dat.gla)
gla_tot.seeds.2<- glmer(tot.seeds ~ Block.ID + Population + (1| Family.Unique),
data=dat.gla, family="poisson", maxiter=500)
gla_tot.seeds.3<- glmer.nb(tot.seeds ~ Block.ID + Population + (1| Family.Unique),
data=dat.gla, maxiter=500)
summary(gla_tot.seeds.1)
summary(gla_tot.seeds.2)
summary(gla_tot.seeds.3)
AIC(gla_tot.seeds.1)
AIC(gla_tot.seeds.2)
AIC(gla_tot.seeds.3)
#models for germination: MB_alvar
mba_tot.seeds.1<- lmer(tot.seeds ~ Block.ID + Population + (1| Family.Unique),
data=dat.mba)
mba_tot.seeds.2<- glmer(tot.seeds ~ Block.ID + Population + (1| Family.Unique),
data=dat.mba, family="poisson", maxiter=500)
mba_tot.seeds.3<- glmer.nb(tot.seeds ~ Block.ID + Population + (1| Family.Unique),
data=dat.mba, maxiter=500)
summary(mba_tot.seeds.1)
summary(mba_tot.seeds.2)
summary(mba_tot.seeds.3)
AIC(mba_tot.seeds.1)
AIC(mba_tot.seeds.2)
AIC(mba_tot.seeds.3)
#models for germination: Prairie
pr_tot.seeds.1<- lmer(tot.seeds ~ Block.ID + Population + (1| Family.Unique),
data=dat.pr)
pr_tot.seeds.2<- glmer(tot.seeds ~ Block.ID + Population + (1| Family.Unique),
data=dat.pr, family="poisson", maxiter=500)
pr_tot.seeds.3<- glmer.nb(tot.seeds ~ Block.ID + Population + (1| Family.Unique),
data=dat.pr, maxiter=500)
summary(pr_tot.seeds.1)
summary(pr_tot.seeds.2)
summary(pr_tot.seeds.3)
AIC(pr_tot.seeds.1)
AIC(pr_tot.seeds.2)
AIC(pr_tot.seeds.3)
#estract family-level variance in seed mass
#GL_alvar
#extract variance component - gaussian dist
glavars.tot.seeds <- as.data.frame(VarCorr(gla_tot.seeds.3))
glavars.tot.seeds
print(VarCorr(gla_tot.seeds.3), comp = "Variance")
va.gla.tot.seeds <-glavars.tot.seeds[1,4]
va.gla.tot.seeds
#convert to response scale -
Va.gla.tot.seeds<- log(va.gla.tot.seeds/(1-va.gla.tot.seeds))
Va.gla.tot.seeds
#remove NAs for calc of var
days.tot.seeds <- dat.gla$tot.seeds[!is.na(dat.gla$tot.seeds)]
vp.gla.tot.seeds<- var(days.tot.seeds)
vp.gla.tot.seeds
h2.gla.tot.seeds<- Va.gla.tot.seeds/vp.gla.tot.seeds
h2.gla.tot.seeds #0
#MB_alvar - poisson
mbavars.tot.seeds <- as.data.frame(VarCorr(mba_tot.seeds.3))
mbavars.tot.seeds
print(VarCorr(mba_tot.seeds.3), comp = "Variance")
va.mba.tot.seeds <-mbavars.tot.seeds[1,4]
va.mba.tot.seeds
#convert to response scale -
Va.mba.tot.seeds<- log(va.mba.tot.seeds/(1-va.mba.tot.seeds))
Va.mba.tot.seeds
#remove NAs for calc of var
days.tot.seeds <- dat.mba$tot.seeds[!is.na(dat.mba$tot.seeds)]
vp.mba.tot.seeds<- var(days.tot.seeds)
vp.mba.tot.seeds
h2.mba.tot.seeds<- Va.mba.tot.seeds/vp.mba.tot.seeds
h2.mba.tot.seeds #0
#Prairie - Poisson
prvars.tot.seeds <- as.data.frame(VarCorr(pr_tot.seeds.3))
prvars.tot.seeds
print(VarCorr(pr_tot.seeds.3), comp = "Variance")
va.pr.tot.seeds <-mbavars.tot.seeds[1,4]
va.pr.tot.seeds
#convert to response scale - antilog
Va.pr.tot.seeds<- 10^(va.pr.tot.seeds)
Va.pr.tot.seeds
#remove NAs for calc of var
days.tot.seeds <- dat.pr$tot.seeds[!is.na(dat.pr$tot.seeds)]
vp.pr.tot.seeds<- var(days.tot.seeds)
vp.pr.tot.seeds
h2.pr.tot.seeds<- Va.pr.tot.seeds/vp.pr.tot.seeds
h2.pr.tot.seeds #0
##################################################################################
# 2016 Seed Mass: sm
#models for : GL_alvar
gla_sm.1<- lmer(sm ~ Block.ID + Population + (1| Family.Unique),
data=dat.gla)
gla_sm.2<- glmer(sm ~ Block.ID + Population + (1| Family.Unique),
data=dat.gla, family="poisson", maxiter=500)
gla_sm.3<- glmer.nb(sm ~ Block.ID + Population + (1| Family.Unique),
data=dat.gla, maxiter=500)
summary(gla_sm.1)
summary(gla_sm.2)
summary(gla_sm.3)
AIC(gla_sm.1)
AIC(gla_sm.2)
AIC(gla_sm.3)
#models for germination: MB_alvar
mba_sm.1<- lmer(sm ~ Block.ID + Population + (1| Family.Unique),
data=dat.mba)
mba_sm.2<- glmer(sm ~ Block.ID + Population + (1| Family.Unique),
data=dat.mba, family="poisson", maxiter=500)
mba_sm.3<- glmer.nb(sm ~ Block.ID + Population + (1| Family.Unique),
data=dat.mba, maxiter=500)
summary(mba_sm.1)
summary(mba_sm.2)
summary(mba_sm.3)
AIC(mba_sm.1)
AIC(mba_sm.2)
AIC(mba_sm.3)
#models for germination: Prairie
pr_sm.1<- lmer(sm ~ Block.ID + Population + (1| Family.Unique),
data=dat.pr)
pr_sm.2<- glmer(sm ~ Block.ID + Population + (1| Family.Unique),
data=dat.pr, family="poisson", maxiter=500)
pr_sm.3<- glmer.nb(sm ~ Block.ID + Population + (1| Family.Unique),
data=dat.pr, maxiter=500)
summary(pr_sm.1)
summary(pr_sm.2)
summary(pr_sm.3)
AIC(pr_sm.1)
AIC(pr_sm.2)
AIC(pr_sm.3)
#estract family-level variance in seed mass
#GL_alvar
#extract variance component - gaussian dist
glavars.sm <- as.data.frame(VarCorr(gla_sm.1))
glavars.sm
print(VarCorr(gla_sm.1), comp = "Variance")
va.gla.sm <-glavars.sm[1,4]
va.gla.sm
#convert to response scale - identity
Va.gla.sm<- va.gla.sm
Va.gla.sm
#remove NAs for calc of var
days.sm <- dat.gla$sm[!is.na(dat.gla$sm)]
vp.gla.sm<- var(days.sm)
vp.gla.sm
h2.gla.sm<- Va.gla.sm/vp.gla.sm
h2.gla.sm #0.012
#MB_alvar - poisson
mbavars.sm <- as.data.frame(VarCorr(mba_sm.1))
mbavars.sm
print(VarCorr(mba_sm.1), comp = "Variance")
va.mba.sm <-mbavars.sm[1,4]
va.mba.sm
#convert to response scale -
Va.mba.sm<- log10(va.mba.sm)
Va.mba.sm
#remove NAs for calc of var
days.sm <- dat.mba$sm[!is.na(dat.mba$sm)]
vp.mba.sm<- var(days.sm)
vp.mba.sm
h2.mba.sm<- Va.mba.sm/vp.mba.sm
h2.mba.sm #0
#Prairie - Poisson
prvars.sm <- as.data.frame(VarCorr(pr_sm.1))
prvars.sm
print(VarCorr(pr_sm.1), comp = "Variance")
va.pr.sm <-prvars.sm[1,4]
va.pr.sm
#convert to response scale - identity
Va.pr.sm<- va.pr.sm
Va.pr.sm
#remove NAs for calc of var
days.sm <- dat.pr$sm[!is.na(dat.pr$sm)]
vp.pr.sm<- var(days.sm)
vp.pr.sm
h2.pr.sm<- Va.pr.sm/vp.pr.sm
h2.pr.sm#0
##################################################################################
# 2017 Seed Mass: sm.2
#models for : GL_alvar
gla_sm.2.1<- lmer(sm.2 ~ Block.ID + Population + (1| Family.Unique),
data=dat.gla)
gla_sm.2.2<- glmer(sm.2 ~ Block.ID + Population + (1| Family.Unique),
data=dat.gla, family="poisson", maxiter=500)
gla_sm.2.3<- glmer.nb(sm.2 ~ Block.ID + Population + (1| Family.Unique),
data=dat.gla, maxiter=500)
summary(gla_sm.2.1)
summary(gla_sm.2.2)
summary(gla_sm.2.3)
AIC(gla_sm.2.1)
AIC(gla_sm.2.2)
AIC(gla_sm.2.3)
#models for germination: MB_alvar
mba_sm.2.1<- lmer(sm.2 ~ Block.ID + Population + (1| Family.Unique),
data=dat.mba)
mba_sm.2.2<- glmer(sm.2 ~ Block.ID + Population + (1| Family.Unique),
data=dat.mba, family="poisson", maxiter=500)
mba_sm.2.3<- glmer.nb(sm.2 ~ Block.ID + Population + (1| Family.Unique),
data=dat.mba, maxiter=500)
summary(mba_sm.2.1)
summary(mba_sm.2.2)
summary(mba_sm.2.3)
AIC(mba_sm.2.1)
AIC(mba_sm.2.2)
AIC(mba_sm.2.3)
#models for germination: Prairie
pr_sm.2.1<- lmer(sm.2 ~ Block.ID + Population + (1| Family.Unique),
data=dat.pr)
pr_sm.2.2<- glmer(sm.2 ~ Block.ID + Population + (1| Family.Unique),
data=dat.pr, family="poisson", maxiter=500)
pr_sm.2.3<- glmer.nb(sm.2 ~ Block.ID + Population + (1| Family.Unique),
data=dat.pr, maxiter=500)
summary(pr_sm.2.1)
summary(pr_sm.2.2)
summary(pr_sm.2.3)
AIC(pr_sm.2.1)
AIC(pr_sm.2.2)
AIC(pr_sm.2.3)
#estract family-level variance in seed mass
#GL_alvar
#extract variance component - gaussian dist
glavars.sm.2 <- as.data.frame(VarCorr(gla_sm.2.3))
glavars.sm.2
print(VarCorr(gla_sm.2.3), comp = "Variance")
va.gla.sm.2 <-glavars.sm.2[1,4]
va.gla.sm.2
#convert to response scale -
Va.gla.sm.2<- 10^va.gla.sm.2
Va.gla.sm.2
#remove NAs for calc of var
days.sm.2 <- dat.gla$sm.2[!is.na(dat.gla$sm.2)]
vp.gla.sm.2<- var(days.sm.2)
vp.gla.sm.2
h2.gla.sm.2<- Va.gla.sm.2/vp.gla.sm.2
h2.gla.sm.2 #0
#MB_alvar - poisson
mbavars.sm.2 <- as.data.frame(VarCorr(mba_sm.2.1))
mbavars.sm.2
print(VarCorr(mba_sm.2.3), comp = "Variance")
va.mba.sm.2 <-mbavars.sm.2[1,4]
va.mba.sm.2
#convert to response scale -
Va.mba.sm.2<- log(va.mba.sm.2)
Va.mba.sm.2
#remove NAs for calc of var
days.sm.2 <- dat.mba$sm.2[!is.na(dat.mba$sm.2)]
vp.mba.sm.2<- var(days.sm.2)
vp.mba.sm.2
h2.mba.sm.2<- Va.mba.sm.2/vp.mba.sm.2
h2.mba.sm.2 #0
#Prairie - Poisson
prvars.sm.2 <- as.data.frame(VarCorr(pr_sm.2.3))
prvars.sm.2
print(VarCorr(pr_tot.sm.2.3), comp = "Variance")
va.pr.sm.2 <-prvars.sm.2[1,4]
va.pr.sm.2
#convert to response scale - identity
Va.pr.sm.2<- va.pr.sm.2
Va.pr.sm.2
#remove NAs for calc of var
days.sm.2 <- dat.pr$sm.2[!is.na(dat.pr$sm.2)]
vp.pr.sm.2<- var(days.sm.2)
vp.pr.sm.2
h2.pr.sm.2<- Va.pr.sm.2/vp.pr.sm.2
h2.pr.sm.2#0
##################################################################################
# 2018 Seed Mass: sm.3
#models for : GL_alvar
gla_sm.3.1<- lmer(sm.3 ~ Block.ID + Population + (1| Family.Unique),
data=dat.gla)
gla_sm.3.2<- glmer(sm.3 ~ Block.ID + Population + (1| Family.Unique),
data=dat.gla, family="poisson", maxiter=500)
gla_sm.3.3<- glmer.nb(sm.3 ~ Block.ID + Population + (1| Family.Unique),
data=dat.gla, maxiter=500)
summary(gla_sm.3.1)
summary(gla_sm.3.2)
summary(gla_sm.3.3)
AIC(gla_sm.3.1)
AIC(gla_sm.3.2)
AIC(gla_sm.3.3)
#models for germination: MB_alvar
mba_sm.3.1<- lmer(sm.3 ~ Block.ID + Population + (1| Family.Unique),
data=dat.mba)
mba_sm.3.2<- glmer(sm.3 ~ Block.ID + Population + (1| Family.Unique),
data=dat.mba, family="poisson", maxiter=500)
mba_sm.3.3<- glmer.nb(sm.3 ~ Block.ID + Population + (1| Family.Unique),
data=dat.mba, maxiter=500)
summary(mba_sm.3.1)
summary(mba_sm.3.2)
summary(mba_sm.3.3)
AIC(mba_sm.3.1)
AIC(mba_sm.3.2)
AIC(mba_sm.3.3)
#models for germination: Prairie
pr_sm.3.1<- lmer(sm.3 ~ Block.ID + Population + (1| Family.Unique),
data=dat.pr)
pr_sm.3.2<- glmer(sm.3 ~ Block.ID + Population + (1| Family.Unique),
data=dat.pr, family="poisson", maxiter=500)
pr_sm.3.3<- glmer.nb(sm.3 ~ Block.ID + Population + (1| Family.Unique),
data=dat.pr, maxiter=500)
summary(pr_sm.3.1)
summary(pr_sm.3.2)
summary(pr_sm.3.3)
AIC(pr_sm.3.1)
AIC(pr_sm.3.2)
AIC(pr_sm.3.3)
#estract family-level variance in seed mass
#GL_alvar
#extract variance component - gaussian dist
glavars.sm.3 <- as.data.frame(VarCorr(gla_sm.3.3))
glavars.sm.3
print(VarCorr(gla_sm.3.3), comp = "Variance")
va.gla.sm.3 <-glavars.sm.3[1,4]
va.gla.sm.3
#convert to response scale -
Va.gla.sm.3<- 10^va.gla.sm.3
Va.gla.sm.3
#remove NAs for calc of var
days.sm.3 <- dat.gla$sm.3[!is.na(dat.gla$sm.3)]
vp.gla.sm.3<- var(days.sm.3)
vp.gla.sm.3
h2.gla.sm.3<- Va.gla.sm.3/vp.gla.sm.3
h2.gla.sm.3 #
#MB_alvar - poisson
mbavars.sm.3 <- as.data.frame(VarCorr(mba_sm.3.3))
mbavars.sm.3
print(VarCorr(mba_sm.3.3), comp = "Variance")
va.mba.sm.3 <-mbavars.sm.3[1,4]
va.mba.sm.3
#convert to response scale -
Va.mba.sm.3<- 10^(va.mba.sm.3)
Va.mba.sm.3
#remove NAs for calc of var
days.sm.3 <- dat.mba$sm.3[!is.na(dat.mba$sm.3)]
vp.mba.sm.3<- var(days.sm.3)
vp.mba.sm.3
h2.mba.sm.3<- Va.mba.sm.3/vp.mba.sm.3
h2.mba.sm.3 #0
#Prairie - Poisson
prvars.sm.3 <- as.data.frame(VarCorr(pr_sm.3.3))
prvars.sm.3
print(VarCorr(pr_sm.3.3), comp = "Variance")
va.pr.sm.3 <-mbavars.sm.3[1,4]
va.pr.sm.3
#convert to response scale - identity
Va.pr.sm.3<-10^ va.pr.sm.3
Va.pr.sm.3
#remove NAs for calc of var
days.sm.3 <- dat.pr$sm.3[!is.na(dat.pr$sm.3)]
vp.pr.sm.3<- var(days.sm.3)
vp.pr.sm.3
h2.pr.sm.3<- Va.pr.sm.3/vp.pr.sm.3
h2.pr.sm.3#0
##################################################################################
# 2017 Seed Mass: sm.2b
# NOTE: sm.2b = sm + sm.2. This better reflets how fitness was estiamted
#models for : GL_alvar
gla_sm.2b.1<- lmer(sm.2b ~ Block.ID + Population + (1| Family.Unique),
data=dat.gla)
gla_sm.2b.2<- glmer(sm.2b ~ Block.ID + Population + (1| Family.Unique),
data=dat.gla, family="poisson", maxiter=500)
gla_sm.2b.3<- glmer.nb(sm.2b ~ Block.ID + Population + (1| Family.Unique),
data=dat.gla, maxiter=500)
summary(gla_sm.2b.1)
summary(gla_sm.2b.2)
summary(gla_sm.2b.3)
AIC(gla_sm.2b.1)
AIC(gla_sm.2b.2)
AIC(gla_sm.2b.3)
#models for germination: MB_alvar
mba_sm.2b.1<- lmer(sm.2b ~ Block.ID + Population + (1| Family.Unique),
data=dat.mba)
mba_sm.2b.2<- glmer(sm.2b ~ Block.ID + Population + (1| Family.Unique),
data=dat.mba, family="poisson", maxiter=500)
mba_sm.2b.3<- glmer.nb(sm.2b ~ Block.ID + Population + (1| Family.Unique),
data=dat.mba, maxiter=500)
summary(mba_sm.2b.1)
summary(mba_sm.2b.2)
summary(mba_sm.2b.3)
AIC(mba_sm.2b.1)
AIC(mba_sm.2b.2)
AIC(mba_sm.2b.3)
#models for germination: Prairie
pr_sm.2b.1<- lmer(sm.2b ~ Block.ID + Population + (1| Family.Unique),
data=dat.pr)
pr_sm.2b.2<- glmer(sm.2b ~ Block.ID + Population + (1| Family.Unique),
data=dat.pr, family="poisson", maxiter=500)
pr_sm.2b.3<- glmer.nb(sm.2b ~ Block.ID + Population + (1| Family.Unique),
data=dat.pr, maxiter=500)
summary(pr_sm.2b.1)
summary(pr_sm.2b.2)
summary(pr_sm.2b.3)
AIC(pr_sm.2b.1)
AIC(pr_sm.2b.2)
AIC(pr_sm.2b.3)
#estract family-level variance in seed mass
#GL_alvar
#extract variance component - gaussian dist
glavars.sm.2b <- as.data.frame(VarCorr(gla_sm.2b.3))
glavars.sm.2b
print(VarCorr(gla_sm.2b.3), comp = "Variance")
va.gla.sm.2b <-glavars.sm.2b[1,4]
va.gla.sm.2b
#convert to response scale -
Va.gla.sm.2b<- 10^(va.gla.sm.2b)
Va.gla.sm.2b
#remove NAs for calc of var
days.sm.2b <- dat.gla$sm.2b[!is.na(dat.gla$sm.2b)]
vp.gla.sm.2b<- var(days.sm.2b)
vp.gla.sm.2b
h2.gla.sm.2b<- Va.gla.sm.2b/vp.gla.sm.2b
h2.gla.sm.2b #0
#MB_alvar -
mbavars.sm.2b <- as.data.frame(VarCorr(mba_sm.2b.3))
mbavars.sm.2b
print(VarCorr(mba_sm.2b.3), comp = "Variance")
va.mba.sm.2b <-mbavars.sm.2b[1,4]
va.mba.sm.2b
#convert to response scale -
Va.mba.sm.2b<- 10^(va.mba.sm.2b)
Va.mba.sm.2b
#remove NAs for calc of var
days.sm.2b <- dat.mba$sm.2b[!is.na(dat.mba$sm.2b)]
vp.mba.sm.2b<- var(days.sm.2b)
vp.mba.sm.2b
h2.mba.sm.2b<- Va.mba.sm.2b/vp.mba.sm.2b
h2.mba.sm.2b #0.001 (0.0006)
#Prairie - Poisson
prvars.sm.2b <- as.data.frame(VarCorr(pr_sm.2b.3))
prvars.sm.2b
print(VarCorr(pr_sm.2b.3), comp = "Variance")
va.pr.sm.2b <-prvars.sm.2b[1,4]
va.pr.sm.2b
#convert to response scale - identity
Va.pr.sm.2b<-10^( va.pr.sm.2b)
Va.pr.sm.2b
#remove NAs for calc of var
days.sm.2b <- dat.pr$sm.2b[!is.na(dat.pr$sm.2b)]
vp.pr.sm.2b<- var(days.sm.2b)
vp.pr.sm.2b
h2.pr.sm.2b<- Va.pr.sm.2b/vp.pr.sm.2b
h2.pr.sm.2b#0.00
|
6a34e278de0d3bb18482f190dc783352c09bcf8f | ad2d14020197b8979031e12e5a7dfb5a5a895a70 | /Loan_Case_Study.R | ec81e6b8d1266e854285a867daee7e14581d84a9 | [] | no_license | shankhadeep-ghosal/Risk-Analytics-Loan-Approval-EDA | 684fe986961dab3f6b76691783a604597f43b758 | 9d0b18a347c3e27902532f94553a934c988b4068 | refs/heads/master | 2020-03-22T17:51:35.938977 | 2018-07-10T11:18:59 | 2018-07-10T11:18:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 23,934 | r | Loan_Case_Study.R | # Load the data
loan_df <- read.csv('loan/loan.csv',stringsAsFactors = FALSE)
loan_df_copy <- read.csv('loan/loan.csv',stringsAsFactors = FALSE)
View(loan_df)
# Loading the required libraries
install.packages('tidyr')
library('tidyr')
install.packages('dplyr')
library("dplyr")
install.packages('stringr')
library("stringr")
install.packages('lubridate')
library("lubridate")
install.packages('ggplot2')
library("ggplot2")
install.packages('sqldf')
library("sqldf")
install.packages("gridExtra")
library("gridExtra")
install.packages('corrplot')
library('corrplot')
install.packages('PerformanceAnalytics')
library("PerformanceAnalytics")
# Structure of the dataset
glimpse(loan_df)
# No. of counts of data
row_counts <- nrow(loan_df) # no. of rows = 39717
Percentage_15 <- row_counts*15/100
ncol(loan_df) # no. of cols = 111
# There are many columns with only a single unique value- e.g. NA, 0, F etc. Lets us drop
# all such columns as they will not be useful for EDA or visualisation
col_unique_1 <- as.vector(which(sapply(loan_df, function(x) length(unique(x))==1)))
# As a next step we should also drop columns which only have 0 or NA's as their unique values
col_0orNA <- as.vector(which(sapply(loan_df, function(x) length(unique(x))==2 & sum(unique(x) %in% c(0,NA))==2)))
colnames_gt_15_na <- c((colnames(loan_df)[colSums(is.na(loan_df)) > Percentage_15]))
# Drop these columns
loan_df <- loan_df[,-c(col_unique_1,col_0orNA)]
loan_df <- loan_df[,!(colnames(loan_df) %in% colnames_gt_15_na)]
ncol(loan_df)
View(loan_df)
# col wise data cleaning
# 1-10 cols
glimpse(loan_df[,1:10])
n_distinct(loan_df$id) # 39717 distinct_id is there
n_distinct(loan_df$member_id) # 39717 distinct_member_id is there
# Loan amount range is 500 to 35000
max_loan_amt <- max(loan_df$loan_amnt, na.rm = TRUE)
min_loan_amt <- min(loan_df$loan_amnt, na.rm = TRUE)
# Loan_df$term data cleaning and term_year new derived column
loan_df <- loan_df %>% mutate(term_year = as.numeric(substr(loan_df$term,0,3))/12)
sqldf("select term,term_year,count(*) from loan_df group by term,term_year")
# Loan_df$int_rate data cleaning
loan_df$int_rate <- as.numeric(gsub("%","",loan_df$int_rate))
#11-20 cols
glimpse(loan_df[,11:20])
# emp_title, desc and URL is irrelevent in this analysis so dropping the column
loan_df <- loan_df[ , !(names(loan_df) %in% c('emp_title','url','desc'))]
# emp_length data cleaning
loan_df$emp_length <- gsub("[year|years]","",loan_df$emp_length)
# issue_d data cleaning and fetch the year
loan_df$issue_d_year <- as.numeric(substr(loan_df$issue_d,5,nchar(loan_df$issue_d)))+2000
#21-30 cols
glimpse(loan_df[,21:30])
# zip_code,addr_state can be deleted
# earliest_cr_line data cleaning , set all the date 1st of that month
?parse_date_time()
loan_df <- loan_df %>%
mutate(earliest_cr_line = paste("01",earliest_cr_line,sep="-"),
earliest_cr_line = parse_date_time(earliest_cr_line, orders = c("dmy"), locale = "eng"))
#31-40 cols
glimpse(loan_df[,31:40])
# revol_util data cleaning
loan_df$revol_util <- as.numeric(gsub("%","",loan_df$revol_util))
#41-46 cols
glimpse(loan_df[,41:46])
#There will be some warnings because of the blank values
loan_df <- loan_df %>% mutate(last_pymnt_d = paste("01",last_pymnt_d,sep="-"),
last_pymnt_d = parse_date_time(last_pymnt_d, orders = c("dmy"), locale = "eng"),
next_pymnt_d = paste("01",next_pymnt_d,sep="-"),
next_pymnt_d = parse_date_time(next_pymnt_d, orders = c("dmy"), locale = "eng"),
last_credit_pull_d = paste("01",last_credit_pull_d,sep="-"),
last_credit_pull_d = parse_date_time(last_credit_pull_d, orders = c("dmy"), locale = "eng"))
#Outliers chcking
# For a given continuous variable, outliers are those observations that lie outside 1.5 * IQR,
# where IQR, the 'Inter Quartile Range' is the difference between 75th and 25th quartiles.
# Look at the points outside the whiskers in below box plot.
outlier_detection <- function(data,var){
var <- eval(substitute(var),eval(data))
na_1 <- sum(is.na(var))
par(mfrow = c(2,2))
boxplot(var, horizontal = T)
hist(var, main = "With Outliers")
outlier <- boxplot.stats(var)$out
length(outlier)
upper_limit <- as.numeric(quantile(var)[4] + 1.5*IQR(var))
# var_within_upper_limit <- ifelse(var <= upper_limit,var,NA)
var <- ifelse(var %in% outlier, NA, var)
boxplot(var, horizontal = T)
hist(var, main = "Without Outliers")
title("Outlier Check", outer=TRUE)
na_2 <- sum(is.na(var))
cat("Outliers identified:", na_2 - na_1, " ")
cat("Propotion of outliers:", round((na_2 - na_1) / sum(!is.na(var))*100, 1), "%")
return(upper_limit)
}
upper_limit_loan_amnt <- outlier_detection(loan_df,loan_amnt)
upper_limit_annul_inc <- outlier_detection(loan_df,annual_inc)
upper_limit_funded_amnt_inv <- outlier_detection(loan_df,funded_amnt_inv)
# Replaceing any values over upper_limit with NA
loan_df$loan_amnt <- ifelse(loan_df$loan_amnt >= upper_limit_loan_amnt,NA,loan_df$loan_amnt)
loan_df$annual_inc <- ifelse(loan_df$annual_inc >= upper_limit_annul_inc,NA,loan_df$annual_inc)
loan_df$funded_amnt_inv <- ifelse(loan_df$funded_amnt_inv >= upper_limit_funded_amnt_inv,NA,loan_df$funded_amnt_inv)
# Taking a copy of current loan data fram
loan_df_2 <- loan_df
# As loan_status="Current" will not serving our purpose we are omitting it for further analysis
loan_df <- loan_df %>% filter(loan_status != "Current")
# 1.. Home Ownership distribution,fill = loan_status
sqldf("select count(id),home_ownership,loan_status from loan_df where trim(home_ownership)='NONE' group by home_ownership,loan_status")
# Set back ground theme
theme_set(theme_bw())
options(scipen = 999)
par(mfrow=c(1,1))
G1 <- ggplot(data = loan_df %>%
group_by(home_ownership,loan_status)) +
geom_bar(aes(x=home_ownership,fill=loan_status),color='black',position="stack") +
labs(title="G1-Home Ownership & Loan Status",x="Home Ownership",y="Count",fill="Loan Status: ")+
theme(axis.text.x = element_text(angle = 50, hjust = 1))+
scale_x_discrete(limits=c("RENT","MORTGAGE","OWN","OTHER","NONE"))+
theme(legend.position="top")+
theme_minimal()+
scale_fill_manual(values=c('#999999','#E69F00'))
# 2..Verfication status distribution
G2 <- ggplot(data = loan_df %>%
group_by(verification_status,loan_status)) +
geom_bar(aes(x=verification_status,fill=loan_status),color='black',position="stack") +
labs(title="G1-verification_status & Loan Status",x="Verification Status",y="Count",fill="Loan Status: ")+
theme(axis.text.x = element_text(angle = 50, hjust = 1))+
theme_minimal()+
theme(legend.position="top")+
scale_fill_manual(values=c('#999999','#E69F00'))
# 3..Public Derogatory Record
G3 <- ggplot(data = loan_df %>%
group_by(pub_rec,loan_status))+
geom_bar(aes(x=pub_rec,fill=loan_status),color='black',position="stack") +
labs(title="G3-Public dergatory tecord & Loan Status",x="Public Record",y="Count",fill="Loan Status: ")+
theme(axis.text.x = element_text(angle = 50, hjust = 1))+
theme_minimal()+
theme(legend.position="top")+
scale_fill_manual(values=c('#999999','#E69F00'))
# 4..Public Derogatory bankruptcies Record
G4 <- ggplot(data = loan_df %>%
group_by(pub_rec_bankruptcies,loan_status))+
geom_bar(aes(x=pub_rec_bankruptcies,fill=loan_status),color='black',position="stack") +
labs(title="G4-Public dergatory record bankruptcies & Loan Status",x="Public Record",y="Count",fill="Loan Status: ")+
theme(axis.text.x = element_text(angle = 50, hjust = 1))+
theme_minimal()+
theme(legend.position="top")+
scale_fill_manual(values=c('#999999','#E69F00'))
# 5..Term of the loan and loan Status
G5 <- ggplot(data = loan_df %>%
group_by(term_year,loan_status))+
geom_bar(aes(x=as.factor(term_year),fill=loan_status),color='black',position="dodge") +
labs(title="G5-Loan term & Loan Status",x="Loan term in Years",y="Count",fill="Loan Status: ")+
theme_minimal()+
theme(legend.position="top")+
scale_fill_manual(values=c('#999999','#E69F00'))
#6..Annual income and loan Status
G6 <- ggplot(data = loan_df)+
geom_histogram(aes(x = annual_inc,fill = loan_status),color = "black") +
xlim(0, 150000)+
theme_minimal()+
theme(legend.position="top")+
labs(title="G6-Annual Income & Loan Status",x="Annual Income in Rupees",y="Count",fill="Loan Status: ")+
scale_fill_manual(values=c('#999999','#E69F00'))
grid.arrange(G1,G2,G3,G4,G5,G6,nrow=2,ncol=3)
#7..Sub Grade and loan Status
G7 <- ggplot(data = loan_df)+
geom_bar(aes(x = sub_grade, fill = loan_status))+
labs(title="G7-Sub Grade & Loan Status",x="Sub Grade",y="Count",fill="Loan Status: ")+
theme_minimal()+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
theme(legend.position="top")+
scale_fill_brewer()
#scale_fill_manual(values=c('#999999','#E69F00'))
#8..Grade and loan Status
G8 <- ggplot(data = loan_df)+
geom_bar(aes(x = grade,fill = loan_status))+
labs(title = "G8-Grade & Loan Status",x="Sub Grade",y="Count",fill="Loan Status: ")+
theme_minimal()+
theme(legend.position = "top")+
scale_fill_brewer()
grid.arrange(G7,G8,nrow=1,ncol=2)
#9.. Purpose and loan status
G9 <- ggplot(data = loan_df)+
geom_bar(aes(x = purpose,fill = loan_status))+
labs(title = "G9-Purpose & Loan Status",x="Purpose",y="Count",fill="Loan Status: ")+
theme_minimal()+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
theme(legend.position = "top")+
scale_fill_manual(values=c('#999999','#E69F00'))
#..10 revol_util analysis
# Check NA values in revol_util: (main data)
length(which(is.na(loan_df$revol_util))) #50 NAs
# median & mean of this revol_util
mean(loan_df$revol_util,na.rm=T)
# 48.70278
median(loan_df$revol_util,na.rm=T)
# 49.1
# based on the mean and median value we can say that data is evenly distributed
# We are going consider median for replacement.
loan_df[which(is.na(loan_df$revol_util)),'revol_util'] <- median(loan_df$revol_util,na.rm=TRUE)
G10 <- ggplot(data = loan_df,aes(x = revol_util,fill = loan_status))+
geom_histogram(col='black',bins=21)+
labs(title = "G10-Purpose & Loan Status",x="Revol util",y="Count",fill="Loan Status: ")+
theme_minimal()+
theme(legend.position = "top")+
scale_fill_manual(values=c('#999999','#E69F00'))
G10_1 <- ggplot(loan_df, aes(x=revol_util)) +
geom_density(fill='red',alpha = 0.5)+
geom_vline(aes(xintercept=median(revol_util)),
color="blue", linetype="dashed", size=1)+
theme_minimal()
grid.arrange(G10,G10_1,nrow=2,ncol=1)
G11 <- ggplot(data = loan_df %>%
group_by(loan_amnt,loan_status))+
geom_histogram(aes(x = loan_amnt,fill = loan_status),col = 'black')+
labs(title = "G8-Grade & Loan Status",x="Sub Grade",y="Count",fill="Loan Status: ")+
theme_minimal()+
theme(legend.position = "top")+
scale_fill_brewer()
#Loan_amnt_range derived matrics
loan_df$loan_amnt_range <- (gsub("]",")",cut(loan_df$loan_amnt,60,dig.lab = 10)))
grid.arrange(G10,G10_1,nrow=2,ncol=1)
#..12 addr_state
G12 <- ggplot(data = loan_df %>%
group_by(addr_state,loan_status)) +
geom_bar(aes(x=addr_state,fill=loan_status), position="dodge") +
theme_minimal()+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
theme(legend.position="top")+
scale_fill_manual(values=c('#999999','#E69F00'))+
labs(title="G12 - State - Address State",x="State",y="Count",fill="Loan Status")
# 13..delinq_2yrs
G13 <- ggplot(loan_df %>%
group_by(delinq_2yrs,loan_status))+
geom_bar(aes(x=delinq_2yrs,fill=loan_status),position="dodge") +
# scale_y_continuous(breaks=seq(0,1,0.1)) +
# scale_x_continuous(breaks=seq(0,12,1)) +
theme_minimal()+
theme(legend.position="top")+
scale_fill_manual(values=c('#999999','#E69F00'))+
labs(title="G13 - Delinquent in last 2 years Vs\nLoan Status",x="Number of delinquent",y="Percentage",fill="Loan Status")
#14..Employee experience
G14 <- ggplot(data = loan_df %>%
filter(emp_length!='n/') %>%
group_by(emp_length,loan_status)) +
geom_bar(aes(x=as.factor(emp_length),fill=loan_status), position="dodge") +
#scale_x_discrete(c("<1","1","2","3","4","6","7","8","9","10+"))+
theme_minimal()+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
theme(legend.position="top")+
scale_fill_manual(values=c('#999999','#E69F00'))+
labs(title="G14 - State - Employee Experience",x="State",y="Count",fill="Loan Status")
grid.arrange(G12,G13,G14,nrow=1,ncol=3)
# Variables that effect the charge off Vs Fully paid loans
# 1. home_ownership
# 2. verification_status
# 3. public bankruptcy records
# 4. term
# 5. sub_grade
# 6. purpose
# 7. revol_util_range
# 8. loan_amnt_range
effective_cols= c("loan_amnt", "int_rate", "installment", "sub_grade", "annual_inc","dti","revol_util","delinq_2yrs")
correlation <- select(loan_df, one_of(effective_cols))
correlation <- correlation[complete.cases(correlation), ]
# Convert the character types to numeric types #
correlation$subgrade_n <- as.numeric(as.factor(correlation$sub_grade))
# Remove the character
correlation <- select(correlation, -sub_grade)
C_plot <- cor(correlation)
par(mfrow=c(1,1))
col <- colorRampPalette(c("#BB4444", "#EE9988", "#FFFFFF", "#77AADD", "#4477AA"))
corrplot(C_plot, method = "color",
col=col(200),
title = "Correlation Map of Subgrade & Factors",
type = "upper",
order = "FPC",
number.cex = 1,
tl.cex = 0.9,
addCoef.col = "black",
bg="white")
# 15.. Grades and revol_util relation #
G15 <- ggplot(loan_df) +
geom_boxplot(aes(x=sub_grade,y=revol_util,fill=grade)) +
geom_line(data = (loan_df %>% group_by(sub_grade) %>% summarize(avg=mean(revol_util,na.rm=TRUE))),aes(x=sub_grade,y=avg,group=1)) +
scale_y_continuous(breaks=seq(0,100,5)) +
theme_minimal()+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
theme(legend.position="top")+
scale_fill_brewer()+
labs(title="G15 - Grade Vs Revolving Utilization",x="Sub Grade",y="Revolving Utilization(%)",fill="Grade")
# There seems to a relationship between the grade of the loan and the revolving credit utilization
# However, this isnt consistent. So it is better if we consider revol_util as a separate variable
# 16.. Grades on loan amount #
# Note : the line signifies the mean of the dataset
G16 <- ggplot(loan_df) +
geom_boxplot(aes(x=sub_grade,y=loan_amnt,fill=grade)) +
geom_line(data = (loan_df %>%
group_by(sub_grade) %>%
summarize(avg=mean(loan_amnt,na.rm=TRUE))),
aes(x=sub_grade,y=avg,group=1)) +
scale_y_continuous(breaks=seq(0,30000,5000)) +
scale_fill_manual(values=c('#B7C8B6','#838B83','#71C671','#699864','#00CD00','#228B22','#003300'))+
theme_minimal()+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
theme(legend.position="top")+
labs(title="G16 - Grades Vs Loan Amount",x="Sub Grades",y="Loan Amount",fill="Grade")
# Inference : It seems that Grade/sub grade is related to loan_amnt, but there seem
# to be exceptions to it. It is better to keep loan_amnt separate for analysis.
# Inference:
# Variables that effect the charge off Vs Fully paid loans
# 1. home_ownership
# 2. verification_status
# 3. public bankruptcy records
# 4. term
# 5. sub_grade
# 6. purpose
# 7. revol_util_range
# 8. loan_amnt_range
# Build a separate column for sub_grade for the numerical value #
loan_df$sub_grade_2 <- sapply(loan_df$sub_grade,function(x) str_split(x,"")[[1]][2])
# 17.. Relation between DTI and GRADES
# Note : the line signifies the mean DTI in that sub_grade
G17 <- ggplot(loan_df) +
geom_boxplot(aes(x=sub_grade,y=dti,fill=grade)) +
geom_line(data=(loan_df %>%
group_by(sub_grade) %>%
summarize(avg_dti=mean(dti,na.rm=TRUE))),
aes(x=sub_grade,y=avg_dti,group=1)) +
scale_y_continuous(breaks=seq(0,32,1)) +
labs(title="G17 - Grades Vs DTI",x="Sub Grade",y="DTI",fill="Grade")
# Since median & median both seems to justify the distribution in a just manner,
# lets look it from a different perspective
G18 <- ggplot(data = loan_df %>%
group_by(grade,sub_grade_2) %>%
summarize(median_dti = median(dti,na.rm=TRUE)),aes(x=grade,y=sub_grade_2,value=median_dti)) +
geom_tile(aes(fill=median_dti)) +
geom_text(aes(label=median_dti),col="white") +
labs(title="G18 - Grades vs DTI(Median)",x="Grade",y="Sub Grade",fill="Median DTI")
grid.arrange(G16,G17,G18,nrow=1,ncol=3)
# DTI vs Grades vs Loan Status
G19 <- ggplot(data = loan_df %>%
group_by(grade,sub_grade_2,loan_status) %>%
summarize(median_dti = median(dti,na.rm=TRUE)),aes(x=grade,y=sub_grade_2,value=median_dti)) +
geom_tile(aes(fill=median_dti)) +
geom_text(aes(label=median_dti),col="white") +
labs(title="G19 - DTI Vs Grades Vs Loan Status",x="Grade",y="Sub Grade",fill="Median DTI") +
facet_wrap(~loan_status)
grid.arrange(G16,G17,G18,G19,nrow=2,ncol=2)
# Inference : There is a direct relationship between the DTI and the Grade, therefore,
# between dti and grades, grades can be considered as a valid reflection for DTI data as well.
#### IMPORTANT ####
# Grade = G3 seems is having oulier in the loan allocation. There is a relationship between these
# Grade level and the number of Charged off loans.
# Grade Vs Sub_Grade Vs Median DTI (tile) Vs Percentage Charged Off #
G20 <- ggplot() +
geom_tile(data = loan_df %>%
group_by(grade,sub_grade_2) %>%
summarize(median_dti = median(dti,na.rm=TRUE)),aes(x=grade,y=sub_grade_2,fill=median_dti)) +
geom_text(data = (loan_df %>%
group_by(grade,sub_grade_2,loan_status) %>%
summarize(count=length(id)) %>%
mutate(ratio=paste("Charged Off =",round(count/sum(count),4)*100,"%")) %>%
filter(loan_status=="Charged Off")),
aes(x=grade,y=sub_grade_2,label=ratio),col="black") +
geom_text(data = (loan_df %>%
group_by(grade,sub_grade_2,loan_status) %>%
summarize(count=length(id)) %>%
mutate(ratio=paste("Fully Paid =",round(count/sum(count),4)*100,"%")) %>%
filter(loan_status=="Fully Paid")),
aes(x=grade,y=sub_grade_2,label=ratio),col="black",vjust=-1.2) +
geom_text(data = (loan_df %>%
group_by(grade,sub_grade_2) %>%
summarize(count=length(id)) %>%
mutate(count_2=paste("Total = ",count))),
aes(x=grade,y=sub_grade_2,label=count_2),col="black",vjust=-2.4) +
labs(title="G20 - Grade vs Sub Grade vs Median DTI\nWith percentage Charged off for each\nSub Grade",
x="Grade",y="Sub Grade",fill="Median DTI",label="Percentage of Charged Off Loans")+
scale_fill_gradientn(colours = terrain.colors(10))
# G3 Grade level is a clear risk for LC #
# Influence of Grades on Interest Rate
# Note : the line signifies the mean interest rate in that sub_grade
G21 <- ggplot(loan_df) +
geom_boxplot(aes(x=sub_grade,y=int_rate,fill=grade)) +
geom_line(data=(loan_df %>%
group_by(sub_grade) %>%
summarize(avg_dti=mean(int_rate,na.rm=TRUE))),
aes(x=sub_grade,y=avg_dti,group=1)) +
scale_y_continuous(breaks=seq(0,25,1)) +
scale_fill_manual(values=c('#B7C8B6','#838B83','#71C671','#699864','#00CD00','#228B22','#003300'))+
theme_minimal()+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
theme(legend.position="top")+
labs(title="G21 - Grades vs Interest Rate",x="Sub Grade",y="Interest Rate",fill="Grade")
G22 <- ggplot(data = loan_df %>%
group_by(grade,sub_grade_2) %>%
summarize(med=median(int_rate)),
aes(x=grade,y=sub_grade_2,value=med)) +
geom_tile(aes(fill=med)) +
geom_text(aes(label=med),col="Black") +
theme(legend.position="top")+
scale_fill_gradientn(colours = terrain.colors(7))+
labs(title="G22 - Grade vs Sub_Grade vs Interest Rate",x="Grade",y="Sub Grade",fill="Median\nInterest Rate")
G23 <- ggplot(data = loan_df %>%
group_by(grade,sub_grade_2,loan_status) %>%
summarize(med=median(int_rate)),
aes(x=grade,y=sub_grade_2,value=med)) +
geom_tile(aes(fill=med)) +
geom_text(aes(label=med),col="black") +
facet_wrap(~loan_status) +
theme(legend.position="top")+
scale_fill_gradientn(colours = terrain.colors(7))+
labs(title="G23 - Grade vs Sub_Grade vs Interest Rate vs Loan Status",x="Grade",y="Sub Grade",fill="Median\nInterest Rate")
grid.arrange(G21,G22,G23,nrow=1,ncol=3)
#Inference : There seems to be direct relationship between Interest Rate and the Grade, therefore,
# between interest rate and grades, grades can be considered as a valid reflection of interest
# data as well.
# Influence of grade on verification status
G24 <- ggplot(loan_df %>%
group_by(verification_status,sub_grade) %>%
summarize(count=length(id))) +
geom_col(aes(x=verification_status,y=count,fill=sub_grade),position="fill") +
scale_y_continuous(breaks=seq(0,1,0.1)) +
labs(title="G24 - Verification Status vs Grade",x="Verification Status",y="Percentage",fill="Grade")
# Influence of grade on home ownership
G25 <- ggplot(loan_df %>%
group_by(home_ownership,sub_grade) %>%
summarize(count=length(id))) +
geom_col(aes(x=home_ownership,y=count,fill=sub_grade),position="fill") +
scale_y_continuous(breaks=seq(0,1,0.1)) +
labs(title="G23 - Verification Status Vs Grade",x="Verification Status",y="Percentage",fill="Grade")
grid.arrange(G24,G25,nrow=1,ncol=2)
# Inference : There is relationship between home_ownership and verification status, hence
# keeping them as separate influential variables
# Inference:
# Variables that effect the charge off Vs Fully paid loans
# 1. sub_grade
# 2. purpose
# 3. term
# 4. home_ownership
# 5. verification_status
# 6. public bankruptcy records
# 7. revol_util_range
# 8. loan_amnt_range
### Final EDA Conclusion Graph - for Influencing Variables #
grid.arrange(G7,G8,G6,G2,G3,G5,G9,G10,nrow=2,ncol=4)
|
d10628fa1ed1091e591b2fe61774b888163d0922 | be2f7f17e0ec4f82fe63bb493ca8c4f5c108ed6c | /web_scraping.R | 0281f90ea1200cb9d7946453597c3ff27388587d | [] | no_license | Tantatorn-dev/r-practice | f2cd6e183e1bad62a2303285017ee8a8afde35f3 | 4508dc83742282f71476fdbb7f8bed63b7aed96f | refs/heads/master | 2023-01-21T08:56:45.908520 | 2020-11-15T06:58:48 | 2020-11-15T06:58:48 | 276,140,752 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 175 | r | web_scraping.R | url <- "http://biostat.jhsph.edu/~jleek/contact.html"
htmlCode = readLines(url)
close(url)
nchar(htmlCode[10])
nchar(htmlCode[20])
nchar(htmlCode[30])
nchar(htmlCode[100])
|
52852d20f426eecab59ee5d4b8c30ab6f167f756 | 1c6ae31af2699919446d7aa6cdaa4d177f30601b | /code/postprocess_monolix_bootstrap+PPC.R | 421b5b6f3cc5fa7ad51d9c3f61883f7b6cbf73f8 | [
"LicenseRef-scancode-warranty-disclaimer",
"CC-BY-4.0"
] | permissive | deanbot1/immunomem | 7946d53a1050c81c9426b5187ff4ef16a1ef74bd | b134a342f458a4b374bfd0bb54be4c50b802cafd | refs/heads/main | 2023-05-01T12:19:06.176332 | 2021-05-19T21:10:42 | 2021-05-19T21:10:42 | 368,999,975 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,077 | r | postprocess_monolix_bootstrap+PPC.R | # plot distribution of stuff with CI's
rm(list=ls())
library(ggplot2)
library(matrixStats)
library(survival)
library(data.table)
#library(survminer)
dinm <- 365.25/12 # number of days in a month
bootstrp <- function(projname,Mnum=1,LOS,endpoint,CIwidth = 0.95,Nboot=5000,tsim=seq(0,36,by=.1)){
#projname='run015_PSV-neut-titer_M01-base';Mnum=1;LOS=20;endpoint='PSV Neutralizing Titer'
T <- read.csv(paste0('../monolix/',projname,'/IndividualParameters/simulatedIndividualParameters.txt'))
if (Mnum==2){T$TTel <- (T$Tin + log(T$kin*T$Tin/LOS)/T$kel)/dinm}
if (Mnum==1){T$TTel <- (-log(LOS/T$y0)/T$kel)/dinm}
T$Thalf = (log(2)/T$kel)/dinm
T$endpoint <- endpoint
Nrep = max(T$rep)
uPAT = unique(T$id)
Npat = length(uPAT)
bootfun = function(Nrep,Npat){
booti <- sample.int(Nrep,size=Npat,replace=TRUE) # which replicate to take for each patient
bootbase <- seq(0,Nrep*Npat-1,by=Nrep)
sampi <- bootbase + booti
ysamp <- T$TTel[sampi]
ec <- ecdf(ysamp)
return(ec(tsim))
}
foop <- replicate(Nboot,bootfun(Nrep,Npat))
QQ <- rowQuantiles(foop,probs=c((1-CIwidth)/2,0.5,1-(1-CIwidth)/2))
lower <- QQ[,1]
median <- QQ[,2]
upper <- QQ[,3]
Tout <- data.frame(tsim,lower,median,upper)
Tout$endpoint <- paste(endpoint,'>',LOS)
Tout$type <- 'model'
return(Tout)
}
boot.PSV <- bootstrp(projname='run015_PSV-neut-titer_M01-base',Mnum=1,LOS=20,endpoint='PSV Neutralizing Titer')
boot.Tcells <- bootstrp(projname='run013_CD4-spike-T_M01-base',Mnum=1,LOS=0.03,endpoint='Spike-specific CD4+ Tcells (%)')
Tbig <- rbind(boot.Tcells,boot.PSV)
months <- c(6,12,18,24)
surfun <- function(csvname,endpoint,LOS,IDname,Nboot=2500,CIwidth=0.95){
dat <- read.csv(csvname,na='.')
dat <- dat[!is.na(dat$DV),]
uID <- unique(dat[[IDname]])
#print(uID)
TTN <- c(); CEN <- c();
for (j in 1:length(uID)){
Tlit <- dat[is.element(dat[[IDname]],uID[j]),]
# print(Tlit)
if (nrow(Tlit)==0){print(paste(j,uID[j]))}
igood <- which(Tlit$DV <= LOS)
if (length(igood)>0){
TTN <- c(TTN,Tlit$Days.PSO[min(igood)])
CEN <- c(CEN,0)}
else{
TTN <- c(TTN,max(Tlit$Days.PSO))
CEN <- c(CEN,1)
}
}
Tout <- data.frame(uID,TTN,CEN)
Tout$endpoint <- endpoint
print(min(Tout$TTN))
tsim <- seq(0,max(Tout$TTN),by=1)
bootsfun = function(Npat){
booti <- sample.int(Npat,size=Npat,replace=TRUE) # which replicate to take for each patient
Ttest <- Tout[booti,]
ss <- survfit(Surv(TTN,1-CEN)~1,data=Ttest)
ssim <- rep(NA,length(tsim))
ssim[ss$time+1] = ss$surv
ssim <- nafill(ssim,'locf')
return(ssim)
}
Npat <- length(uID)
foop <- replicate(Nboot,bootsfun(Npat))
QQ <- rowQuantiles(1-foop,probs=c((1-CIwidth)/2,0.5,1-(1-CIwidth)/2))
lower <- QQ[,1]
median <- QQ[,2]
upper <- QQ[,3]
# sfit <- survfit(Surv(TTN,1-CEN)~1,data=Tout)
# tsim <- sfit$time/dinm
# median <- 1-sfit$surv
# lower <- 1-(sfit$surv - 2*sfit$std.err)
# upper <- 1-(sfit$surv + 2*sfit$std.err)
tsim <- tsim/dinm
Tsurv <- data.frame(tsim,lower,median,upper)
Tsurv$endpoint <- paste(endpoint,'>',LOS)
Tsurv$type <- 'observed'
return(Tsurv)
}
sur.PSV <- surfun('../monolix/PSV-neut-titer.csv','PSV Neutralizing Titer',LOS=20,IDname='Donor.ID')
sur.Tcells <- surfun('../monolix/CD4-spike-T.csv','Spike-specific CD4+ Tcells (%)',LOS=0.03,IDname='UID')
sur.both <- rbind(sur.PSV,sur.Tcells)
bigdat <- rbind(sur.both,Tbig)
bigdat <- bigdat[bigdat$endpoint=='PSV Neutralizing Titer > 20',]
plo<-ggplot(data=bigdat) + geom_line(aes(x=tsim,y=median,color=type),size=1) +
geom_ribbon(aes(x=tsim,ymin=lower,ymax=upper,fill=type),alpha=0.25) +
coord_cartesian(xlim=c(0,12))+ scale_x_continuous(breaks=seq(0,18,by=2)) +
scale_color_discrete() +
scale_y_reverse(labels=function(x){100-100*x}) +
xlab('Months post symptom onset') +
ylab(paste0('% of population above LOS')) +
theme_bw() + theme(legend.position=c(0.9,0.9), legend.title = element_blank()) + #stat_ecdf(data=dat.both,aes(x=TTel,color=endpoint),size=2)
#geom_label(data=Tbig[is.element(Tbig$tsim,months),],aes(x=tsim,y=median,color=endpoint,label=paste0(100-100*signif(median,2),'%'))) +
facet_grid(rows=vars(endpoint))
print(plo)
# ggplot(data=Tbig) + +
#
# geom_line(data=sur.both,aes(x=tsim/dinm,y=1-median,color=endpoint),size=1) +
# geom_ribbon(data=sur.both,aes(x=tsim/dinm,ymax=1-lower,ymin=1-upper,fill=endpoint),alpha=0.25) +
# coord_cartesian(xlim=c(0,30))+ scale_x_continuous(breaks=c(6,12,18,24)) +
# scale_color_discrete() +
# scale_y_reverse(labels=function(x){100-100*x}) +
# xlab('Months post symptom onset') +
# ylab(paste0('% of population above LOS')) +
# theme_bw() + theme(legend.position='none') + #stat_ecdf(data=dat.both,aes(x=TTel,color=endpoint),size=2)
# geom_label(data=Tbig[is.element(Tbig$tsim,months),],aes(x=tsim,y=median,color=endpoint,label=paste0(100-100*signif(median,2),'%'))) +
# facet_grid(rows=vars(endpoint)) |
9d7f8c5891acebc1cc576e64ac3c4c06f2795b17 | ad47b0bfeedc17e90297fe60f2b2cdc012dc045e | /breakpoint_graph/SV_local_CN_segment_wgs_raw_somatic.R | 92f10f89b8226c03962396734479fc999136b42b | [] | no_license | dmcblab/InfoGenomeR | 7acb2f5077ff3ec08a7da7fc25d51d720f1a97e3 | 86cc05bf731679b1c8e55becc16690ec8be8a204 | refs/heads/master | 2023-04-12T01:36:04.631965 | 2022-05-27T05:43:54 | 2022-05-27T05:43:54 | 269,232,281 | 13 | 6 | null | null | null | null | UTF-8 | R | false | false | 10,526 | r | SV_local_CN_segment_wgs_raw_somatic.R |
#germ=read.table("../CNV.output_germ", header=T,stringsAsFactors=F);
#germ_break=list();
#break_thres=10000;
#for(i in 1:23){
# if(i!=23){
# germ_prefix=i;
# }else{
# germ_prefix="X"
# }
# germ_break[[i]]=c(germ[germ$chrom==germ_prefix,2],germ[germ$chrom==germ_prefix,3]);
#}
args <- commandArgs(TRUE)
#args[7]="/DASstorage6/leeyh/MCF7/fastq/SRR3384112_rg_sorted_dup_removed.bam"
if(args[12]=="0"){
chr_prefix="";
}else{
chr_prefix="chr";
}
min_seg_length=1e3;
min_log_diff=0;
min_marker=0;
bicseq_bin_size=100;
library(DNAcopy)
germ_remove<-function(x){
j=2;
while(j<=nrow(x)){
# if(x[j-1,2]==x[j,2]){
if(strsplit(x[j,1],"\\.")[[1]][1]!="X"){
chr_index=as.numeric(as.character(strsplit(x[j,1],"\\.")[[1]][1]));
}else{
chr_index=23;
}
if(length(which(abs(germ_break[[chr_index]]-x[j,2])<break_thres))){
x[j-1,7]=((x[j-1,3]-x[j-1,2]+1)*x[j-1,7]+(x[j,3]-x[j,2]+1)*x[j,7])/((x[j-1,3]-x[j-1,2]+1)+(x[j,3]-x[j,2]+1));
x[j-1,4]=x[j-1,4]+x[j,4];
x[j-1,3]=x[j,3];
x[j:(nrow(x)-1),]=x[(j+1):nrow(x),];
x=x[-(nrow(x)),];
}else{
j=j+1;
}
# }else{
# j=j+1;
# }
}
return(x);
}
my.merge<-function(x){
i<-1
j<-2
while(j<=nrow(x)){
if(x[i,3]-x[i,2]<min_seg_length){
x<-x[-i,];
}
else if(x[j,3]-x[j,2]<min_seg_length){
x<-x[-j,];
}
else{
if(x[i,7]-x[j,7]<min_log_diff&&x[i,7]-x[j,7]>-min_log_diff){
x[i,3]<-x[j,3];
x[i,4]<-x[i,4]+x[j,4];
x[i,7]<-((x[i,3]-x[i,2])*x[i,7]+(x[j,3]-x[j,2])*x[j,7])/(x[i,3]-x[i,2]+x[j,3]-x[j,2]);
x<-x[-j,];
}else{
i<-i+1;
j<-j+1;
}
}
}
return(x);
}
#table_output=data.frame()
SV=read.table(args[1], sep="\t")
bicseq_config=data.frame();
for(i in 1:23){
if(i!=23){
iindex=i;
}else{
iindex="X";
}
brp=data.frame(br=integer(0),ori=integer(0), stringsAsFactors=F);
SVindex=1;
for(j in 1:nrow(SV)){
if(SV[j,2]==iindex){
brp[SVindex,1]=SV[j,3];
brp[SVindex,2]=as.integer(strsplit(as.character(SV[j,6]),"to")[[1]][1]);
SVindex=SVindex+1;
}
if(SV[j,4]==iindex){
brp[SVindex,1]=SV[j,5];
brp[SVindex,2]=as.integer(strsplit(as.character(SV[j,6]),"to")[[1]][2]);
SVindex=SVindex+1;
}
}
t=read.table(paste(args[3],iindex,".norm.bin",sep=""), header=T);
normal=read.table(paste(args[8],iindex,".norm.bin",sep=""), header=T);
brp=rbind(c(t[1,1],5),brp,c(t[nrow(t),2],3));
brp=brp[order(brp[1],-brp[2]),]
#FINAL=data.frame(ID=character(), chrom=character(), loc.start=numeric(), loc.end=numeric(), num.mark=numeric(), seg.mean=numeric(), bstat=numeric(), pval=numeric(), lcl=numeric(), ucl=numeric(), "brp[l - 1, 1]"=numeric(), "brp[l, 1]"=numeric(), stringsAsFactors=F)
# bicseq_config=data.frame();
for(l in 2:nrow(brp)){
tindex1=0;nindex1=0;
tindex2=0;nindex2=0;
tindex_t=which(brp[l-1,1]<=t[,1] & t[,2]<=brp[l,1]);
nindex_t=which(brp[l-1,1]<=normal[,1] & normal[,2]<=brp[l,1]);
if(length(tindex_t)!=0){
tindex1=min(tindex_t);
tindex2=max(tindex_t);
}
if(length(nindex_t)!=0){
nindex1=min(nindex_t);
nindex2=max(nindex_t);
}
if(brp[l-1,2]==5){
brp_adjusted1=brp[l-1,1]
}else{
brp_adjusted1=brp[l-1,1]+1
}
if(brp[l,2]==5){
brp_adjusted2=brp[l,1]-1
}else{
brp_adjusted2=brp[l,1]
}
if(brp_adjusted1<=brp_adjusted2){
if(tindex1<tindex2 && nindex1<nindex2){
cn_table=t[tindex1:tindex2,]
write.table(cn_table,paste(args[9],iindex,".norm.bin.",brp_adjusted1,"-",brp_adjusted2,sep=""),quote=F, sep="\t", col.names=T, row.names=F)
cn_table=normal[nindex1:nindex2,]
write.table(cn_table,paste(args[10],iindex,".norm.bin.",brp_adjusted1,"-",brp_adjusted2,sep=""),quote=F, sep="\t", col.names=T, row.names=F)
bicseq_config=rbind(bicseq_config,data.frame(chromName=paste(iindex,".",brp_adjusted1,"-",brp_adjusted2,sep=""),binFileNorm.Case=paste(args[9],iindex,".norm.bin.",brp_adjusted1,"-",brp_adjusted2,sep=""),binFileNorm.Control=paste(args[10],iindex,".norm.bin.",brp_adjusted1,"-",brp_adjusted2,sep="")))
}
}
}
}
write.table(bicseq_config, "configFile", quote=F, sep="\t", col.names=T, row.names=F);
#system(paste("perl",args[5],"--lambda",args[6],"--tmp",args[4],"--strict","--fig CNV.png","configFile","CNV.output","--noscale",sep=" "));
#system(paste("perl",args[5],"--tmp",args[4],"--fig CNV.png","configFile","CNV.output","--nrm","--noscale","--control",sep=" "));
system(paste("perl",args[5],"--tmp",args[4], "--lambda",args[6],"--fig CNV.png","configFile","CNV.output","--nrm","--noscale","--control",sep=" "));
system("cat CNV.output | awk '{print $1\"\t\"$2\"\t\"$3\"\t\"$4\"\t\"$5\"\t\"$6\"\t\"$9}' > CNV.output.somatic_format");
#system(paste("perl",args[5],"--lambda",args[6],"--tmp",args[4],"--strict","--fig CNV.png","configFile","CNV.output",sep=" "));
segs2=read.table("CNV.output.somatic_format", header=T, stringsAsFactors=F);
names(segs2)=c("X1","X2","X3","X4","X5","X6","X7");
FINAL=segs2[-(1:nrow(segs2)),]
for(i in 1:23){
if(i!=23){
iindex=i;
}else{
iindex="X";
}
brp=data.frame(br=integer(0),ori=integer(0), stringsAsFactors=F);
SVindex=1;
for(j in 1:nrow(SV)){
if(SV[j,2]==iindex){
brp[SVindex,1]=SV[j,3];
brp[SVindex,2]=as.integer(strsplit(as.character(SV[j,6]),"to")[[1]][1]);
SVindex=SVindex+1;
}
if(SV[j,4]==iindex){
brp[SVindex,1]=SV[j,5];
brp[SVindex,2]=as.integer(strsplit(as.character(SV[j,6]),"to")[[1]][2]);
SVindex=SVindex+1;
}
}
t=read.table(paste(args[3],iindex,".norm.bin",sep=""), header=T);
normal=read.table(paste(args[8],iindex,".norm.bin",sep=""), header=T);
brp=rbind(c(t[1,1],5),brp,c(t[nrow(t),2],3));
brp=brp[order(brp[1],-brp[2]),]
for(l in 2:nrow(brp)){
tindex1=0;nindex1=0;
tindex2=0;nindex2=0;
tindex_t=which(brp[l-1,1]<=t[,1] & t[,2]<=brp[l,1]);
nindex_t=which(brp[l-1,1]<=normal[,1] & normal[,2]<=brp[l,1]);
if(length(tindex_t)!=0){
tindex1=min(tindex_t);
tindex2=max(tindex_t);
}
if(length(nindex_t)!=0){
nindex1=min(nindex_t);
nindex2=max(nindex_t);
}
if(brp[l-1,2]==5){
brp_adjusted1=brp[l-1,1]
}else{
brp_adjusted1=brp[l-1,1]+1
}
if(brp[l,2]==5){
brp_adjusted2=brp[l,1]-1
}else{
brp_adjusted2=brp[l,1]
}
if(brp_adjusted1<=brp_adjusted2){
if(tindex1<tindex2 && nindex1<nindex2){
segs2_i=which(segs2[,1]==paste(iindex,".",brp_adjusted1,"-",brp_adjusted2,sep=""));
if(length(segs2_i)!=0){
segs2_u=segs2[min(segs2_i):max(segs2_i),];
segs2_u=my.merge(segs2_u);
# segs2_u=germ_remove(segs2_u);
segs2_u[1,2]<-brp_adjusted1;
segs2_u[nrow(segs2_u),3]<-brp_adjusted2;
print(segs2_u);
FINAL=rbind(FINAL,segs2_u);
}else{
NAframe=data.frame(matrix(c(as.character(iindex),brp_adjusted1,brp_adjusted2,rep(NA,times=4)),ncol=7), stringsAsFactors=F);
FINAL=rbind(FINAL,NAframe)
}
} else {
NAframe=data.frame(matrix(c(as.character(iindex),brp_adjusted1,brp_adjusted2,rep(NA,times=4)),ncol=7), stringsAsFactors=F);
FINAL=rbind(FINAL,NAframe)
}
}
}
}
for(FINAL_i in 1:nrow(FINAL)){
FINAL[FINAL_i,1]=strsplit(FINAL[FINAL_i,1],"\\.")[[1]][1];
}
#print("debug1");
FINAL=data.frame(ID="Sample.1", chrom=FINAL[,1], loc.start=FINAL[,2], loc.end=FINAL[,3], num.mark=FINAL[,4], seg.mean=FINAL[,7], bstat=NA, pval=NA, lcl=NA, ucl=NA,"brp[l - 1, 1]"=FINAL[,2],"brp[l, 1]"=FINAL[,3])
FINAL=FINAL[order(FINAL[,2],FINAL[,3]),];
#print("debug2");
names(FINAL)<-c("ID","chrom","loc.start","loc.end","num.mark","seg.mean","bstat","pval","lcl","ucl","brp[l - 1, 1]","brp[l, 1]")
############# marker <15#################
for(FINAL_i in 1:nrow(FINAL)){
if(is.na(FINAL[FINAL_i,5])!=T&&as.integer(FINAL[FINAL_i,5])<min_marker){
FINAL[FINAL_i,6]="NA"
FINAL[FINAL_i,5]="NA"
}
}
write.table(FINAL,"copy_numbers",sep="\t", quote=F, row.names=F, col.names=F)
#table_output<-rbind(table_output,FINAL);
#write.table(table_output, "SNP6_level2.txt.copynumber.filtered.segmean", sep="\t", quote=F, row.names=F, col.names=F)
t=read.table("copy_numbers")
t$V2=factor(t$V2, levels=c(1:22,"X"))
t=t[order(t$V2,t$V3),]
write.table(t,"copy_numbers",sep="\t", quote=F, row.names=F, col.names=F)
segmean_output=read.table("copy_numbers");
cnv_output=read.table("CNV.output.somatic_format", header=T,stringsAsFactors=F)
#global_norm=cnv_output[1,7]-log(cnv_output[1,5]/cnv_output[1,6],base=2);
new_output=segmean_output[0,];
for(j in 1:23){
if(j!=23){
iindex=j;
}else{
iindex="X";
}
t=read.table(paste(args[3],iindex,".norm.bin",sep=""), header=T);
normal=read.table(paste(args[8],iindex,".norm.bin",sep=""), header=T);
S=segmean_output[segmean_output$V2==iindex,]
# S$V13=0;
for(i in 1:nrow(S)){
if(is.na(S[i,6])){
# a=t[(S$V3[i]<t$start & t$start<S$V4[i]) | (S$V3[i]< t$end & t$end<S$V4[i]) | (t$start < S$V3[i] & S$V4[i] < t$end ),]
# if(nrow(a)!=0){
S[i,5]=-ceiling((S$V4[i]-S$V3[i])/bicseq_bin_size);
S[i,6]=NA;
}
}
new_output=rbind(new_output, S);
}
write.table(new_output,"copy_numbers",sep="\t", quote=F, row.names=F, col.names=F)
|
982da3981d08675eea5e67130a3f546462a0ebd1 | a22898091937663673802193cd64b938697e7e06 | /_Common/Attic/HAWG data overviews.r | af6f989b2c36cadf9bfcc538135e7a3dc8fe96f8 | [] | no_license | ices-eg/wg_HAWG | 3e6443ecf8b81d48be34a4199625032cd9731f34 | 0b78290317fa3a07ca0efaf66861870e6726ea60 | refs/heads/master | 2023-04-02T12:30:12.871750 | 2023-03-22T09:28:59 | 2023-03-22T09:28:59 | 37,528,676 | 0 | 6 | null | 2017-03-30T09:55:55 | 2015-06-16T12:27:56 | R | UTF-8 | R | false | false | 14,978 | r | HAWG data overviews.r | ################################################################################
# HAWG data overviews
#
# Generating overviews of several stocks assessed within HAWG
#
# 18/03/2018 coding during HAWG 2018
# 20/03/2018 added all weight in the catch; added the crayola plots; Note stock trends now
# via the SAG download
# 21/03/2019 Updated during HAWG 2019 (MP)
# 25/03/2020 Updated for HAWG 2020 (MP)
################################################################################
rm(list=ls());
# R.version
# find_rtools()
# library(devtools)
# library(pkgbuild)
# Installing the stockassessment package is tricky. Better done in R directly than in RStudio
# (because there the RTools cannot be found)
# install.packages("Matrix")
# install.packages("ellipse")
# install.packages("TMB")
# devtools::install_github("fishfollower/SAM/stockassessment", ref="components", dependencies=FALSE)
library(stockassessment)
# libraries
library(tidyverse)
library(lubridate)
library(RColorBrewer)
library(directlabels)
library(ggmisc) # e.g. for crayola
library(icesSAG)
# source("_Common/crayola.r")
# use token
options(icesSAG.use_token = TRUE)
# Load utils code
source("../mptools/r/my_utils.r")
# ===================================================================================
# Load datasets
# ===================================================================================
# Load NSAS data
load("//community.ices.dk@SSL/DavWWWRoot/ExpertGroups/HAWG/2020 Meeting Docs/06. Data/her.27.3a47d/NSH_HAWG2020_sf.Rdata")
NSH.df <- as.data.frame(NSH) %>% mutate(stock="her.27.3a47d")
NSH.tun.df <- as.data.frame(NSH.tun) %>% mutate(stock="her.27.3a47d")
NSH.sag.df <- as.data.frame(getSAG(stock = "her.27.3a47d", year = 2020)) %>% tolower()
# Irish Sea herring ---------------------------------------------------
load("//community.ices.dk@SSL/DavWWWRoot/ExpertGroups/HAWG/2020 Meeting Docs/06. Data/her.27.nirs/SAM/results/ISH_assessment 2020.RData")
ISH.df <- as.data.frame(ISH) %>% mutate(stock="her.27.nirs")
ISH.tun.df <- as.data.frame(ISH.tun) %>% mutate(stock="her.27.nirs")
# Load Celtic Sea herring ------------------------------------
mypath <- "//community.ices.dk@SSL/DavWWWRoot/ExpertGroups/HAWG/2020 Meeting docs/06. Data/her.27.irls/data/"
CSH.df <- readFLStock(file.path(mypath, 'index.txt')) %>% as.data.frame() %>% mutate(stock="her.27.irls")
CSH.tun.df <- readFLIndices(file.path(mypath, "fleet.txt")) %>% as.data.frame() %>% mutate(stock="her.27.irls")
# Load WBSS data -------------------------------------------
WBSS.dir <- "//community.ices.dk@SSL/DavWWWRoot/ExpertGroups/HAWG/2020 Meeting docs/06. Data/her.27.2024/data/"
WBSS.df <- readFLStock(file.path(WBSS.dir, 'data/index.txt')) %>% as.data.frame() %>% mutate(stock="her.27.20-24")
WBSS.tun.df <- readFLIndices(file.path(WBSS.dir, "data/survey.dat")) %>% as.data.frame() %>% mutate(stock="her.27.20-24")
NSH.sag.df <- getSAG(stock = "her.27.20-24", year = 2020)
data.frame(stockassessment::ssbtable( stockassessment::fitfromweb("WBSS_HAWG_2020_sf")))
# Load 6a-7bc data -----------------------------------------------------
load("//community.ices.dk@SSL/DavWWWRoot/ExpertGroups/HAWG/2019 Meeting Docs/06. Data/6a7bc/Final_2017_VIaHerring.Rdata")
MSH.stock.n <-
slot(MSH,"stock.n") %>%
as.data.frame() %>%
dplyr::select(year, age, number = data) %>%
filter(number != -1) %>%
mutate(stock = "MSH")
MSH.canum <-
slot(MSH,"catch.n") %>%
as.data.frame() %>%
dplyr::select(year, age, number = data) %>%
filter(number != -1) %>%
mutate(stock = "MSH")
MSH.weca <-
slot(MSH,"catch.wt") %>%
as.data.frame() %>%
dplyr::select(year, age, weight = data) %>%
filter(weight != -1) %>%
mutate(stock = "MSH")
MSH.west <-
slot(MSH,"stock.wt") %>%
as.data.frame() %>%
dplyr::select(year, age, weight = data) %>%
filter(weight != -1) %>%
mutate(stock = "MSH")
MSH.HERAS <-
slot(MSH.tun[[1]],"index") %>%
as.data.frame() %>%
dplyr::select(year, age, index = data) %>%
filter(index != -1) %>%
mutate(stock = "MSH",
survey = "MSHAS")
# North Sea sprat ---------------------------------------------------
# spr.path <- "//community.ices.dk@SSL/DavWWWRoot/ExpertGroups/HAWG/2018 Meeting docs1/05. Data/Celtic Sea/"
spr.path <- "D:/HAWG/2019/06. Data/SPR-3a4"
startyear <- 1974
NSsprat.canum <-
read.table(file=file.path(spr.path, "canum.in"),
header=FALSE, skip=1) %>%
setNames(c("age0","age1","age2","age3")) %>%
mutate(year = startyear + floor((row_number()-1)/4),
season = (row_number() %% 4),
season = ifelse(season == 0, 4, season)) %>%
gather(key=age, value=number, age0:age3) %>%
mutate(age = an(gsub("age", "", age))) %>%
group_by(year, age) %>%
summarize(number= sum(number, na.rm=TRUE)) %>%
ungroup() %>%
mutate(stock = "SPR34")
NSsprat.weca <-
read.table(file=file.path(spr.path, "weca.in"),
header=FALSE, skip=1) %>%
setNames(c("age0","age1","age2","age3")) %>%
mutate(year = startyear + floor((row_number()-1)/4),
season = (row_number() %% 4),
season = ifelse(season == 0, 4, season)) %>%
gather(key=age, value=weight, age0:age3) %>%
mutate(age = an(gsub("age", "", age))) %>%
left_join(NSsprat.canum, by=c("year","age")) %>%
group_by(year, age) %>%
summarize(weight= weighted.mean(weight, number, na.rm=TRUE)) %>%
ungroup() %>%
mutate(stock = "SPR34")
# ===================================================================================
# Combine all the data
# ===================================================================================
canum <-
bind_rows(
NSH.canum,
MSH.canum,
WBSS.canum,
CSH.canum,
ISH.canum,
NSsprat.canum
) %>%
mutate(stock = factor(stock, levels=c("CSH","ISH","MSH","NSH","WBSS","SPR34")))
weca <-
bind_rows(
NSH.weca,
MSH.weca,
WBSS.weca,
CSH.weca,
ISH.weca,
NSsprat.weca
) %>%
filter(!is.na(weight), !weight <= 0) %>%
mutate(stock = factor(stock, levels=c("CSH","ISH","MSH","NSH","WBSS","SPR34")))
west <-
bind_rows(
NSH.west,
# MSH.west,
WBSS.west,
CSH.west
# ISH.west
) %>%
filter(!is.na(weight), !weight <= 0)
# ===================================================================================
# Plot the crayola of catch at age or stock at age
# ===================================================================================
canum %>%
filter(stock %in% c("WBSS")) %>%
# filter(stock %in% c("NSH","WoS")) %>%
# filter(stock %in% c("CSH","ISH")) %>%
# filter(stock %in% c("NSH", "MSH")) %>%
filter(year >= 1980) %>%
filter(age %in% 0:9) %>%
# first calculate the proportions at age
group_by(stock, year) %>%
mutate(number = number/sum(number, na.rm=TRUE)) %>%
group_by(stock, year, age) %>%
summarise(value = sum(number, na.rm=TRUE)) %>%
group_by(stock, age) %>%
mutate(value = value/mean(value, na.rm=TRUE)) %>%
mutate(yc = year - age) %>%
data.frame() %>%
ggplot() +
theme_bw() +
theme(legend.position = "none") +
theme(axis.text.y = element_blank()) +
theme(panel.border = element_rect(colour="black" , size=0.1)) +
theme(axis.ticks.y = element_blank() ) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1, size=10)) +
theme(panel.spacing = unit(0.2, "lines")) +
geom_col(aes(year, value, fill = factor(yc))) +
scale_fill_crayola() +
labs(x = NULL, y = NULL, title="Herring relative stock at age") +
facet_grid(age ~ stock, scale = "free_y", switch = "y")
# in a loop
for (i in 1:length(levels(canum$stock))) {
t <-
canum %>%
complete(year, age, stock) %>%
filter(stock %in% levels(canum$stock)[i]) %>%
filter(year >= 1980) %>%
filter(age %in% 1:8) %>%
group_by(stock, year, age) %>%
summarise(value = sum(number, na.rm=TRUE)) %>%
group_by(stock, age) %>%
mutate(value = value/mean(value, na.rm=TRUE)) %>%
mutate(yc = year - age) %>%
data.frame()
assign(
paste("p",levels(canum$stock)[i],sep=""),
ggplot(t) +
theme_bw() +
theme(legend.position = "none") +
theme(axis.text.y = element_blank()) +
theme(panel.border = element_rect(colour="black" , size=0.1)) +
theme(axis.ticks.y = element_blank() ) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1, size=10)) +
theme(panel.spacing = unit(0.2, "lines")) +
{if (i != 1) theme(strip.background.y = element_blank()) } +
{if (i != 1) theme(strip.text.y = element_blank()) } +
theme(plot.margin=unit(c(0,0,0,0),"mm")) +
geom_col(aes(year, value, fill = factor(yc))) +
scale_fill_crayola() +
labs(x = NULL, y = NULL, title=NULL) +
facet_grid(age ~ stock, scale = "free_y", switch = "y")
) # end of assign
}
cowplot::plot_grid(plotlist=mget(paste("p", levels(canum$stock), sep="")),
ncol=length(unique(canum$stock)), scale=0.99, align="hv",
rel_widths = c(1.0, rep(1.0,length(levels(canum$stock)))) )
# ===================================================================================
# Plot the crayola of acoustic survey
# ===================================================================================
NSH.HERAS %>%
# bind_rows(NSH.HERAS, MSH.HERAS) %>%
group_by(stock, survey, year, age) %>%
filter(year >= 1990) %>%
filter(age %in% 1:8) %>%
summarise(value = sum(index, na.rm=TRUE)) %>%
group_by(stock, survey, age) %>%
mutate(value = value/mean(value, na.rm=TRUE)) %>%
mutate(yc = year - age) %>%
data.frame() %>%
ggplot() +
theme_bw() +
theme(legend.position = "none") +
theme(axis.text.y = element_blank()) +
theme(panel.border = element_rect(colour="black" , size=0.1)) +
theme(axis.ticks.y = element_blank() ) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1, size=10)) +
theme(panel.spacing = unit(0.2, "lines")) +
geom_col(aes(year, value, fill = factor(yc))) +
scale_fill_crayola() +
labs(x = NULL, y = NULL, title="Herring acoustic survey relative index at age") +
facet_grid(age ~ survey, scale = "free_y", switch = "y")
# integrated plots
bind_rows(NSH.HERAS, MSH.MSHAS2) %>%
group_by(stock, survey, year, age) %>%
filter(year >= 2000) %>%
filter(age %in% 1:8) %>%
summarise(value = sum(index, na.rm=TRUE)) %>%
group_by(stock, survey, age) %>%
mutate(value = value/mean(value, na.rm=TRUE)) %>%
mutate(yc = year - age) %>%
data.frame() %>%
ggplot() +
theme_bw() +
# theme(legend.position = "none") +
theme(axis.text.y = element_blank()) +
theme(panel.border = element_rect(colour="black" , size=0.1)) +
theme(axis.ticks.y = element_blank() ) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1, size=10)) +
theme(panel.spacing = unit(0.2, "lines")) +
geom_bar(aes(year, value, fill = factor(survey)), stat="identity", position=position_dodge()) +
scale_fill_crayola() +
labs(x = NULL, y = NULL, title="Herring acoustic survey relative index at age") +
facet_grid(age ~ ., scale = "free_y", switch = "y")
# ===================================================================================
# weight in the stock
# ===================================================================================
bind_rows(NSH.stock.n, MSH.stock.n, ISH.stock.n) %>%
setNames(gsub("number","stock.n", names(.))) %>%
left_join(bind_rows(NSH.west, MSH.west, ISH.west), by=c("year","age","stock")) %>%
filter(year >= 1960) %>%
group_by(stock, year) %>%
summarize(weightw = weighted.mean(weight, w=stock.n, na.rm=TRUE),
weight = mean(weight, na.rm=TRUE)) %>%
group_by(stock) %>%
mutate(rel_weight = weight / mean(weight, na.rm=TRUE),
rel_weightw = weightw / mean(weightw, na.rm=TRUE)) %>%
ggplot(aes(year,rel_weight, group=stock)) +
theme_bw() +
geom_point(aes(colour=stock)) +
geom_smooth(aes(colour=stock), size=1, method="loess", se=FALSE, span=0.3)
bind_rows(NSH.stock.n, MSH.stock.n, ISH.stock.n) %>%
setNames(gsub("number","stock.n", names(.))) %>%
left_join(bind_rows(NSH.west, MSH.west, ISH.west), by=c("year","age","stock")) %>%
filter(year >= 1960) %>%
filter(age == 4) %>%
ggplot(aes(year,weight, group=stock)) +
theme_bw() +
geom_point(aes(colour=stock)) +
geom_smooth(aes(colour=stock), size=1, method="loess", se=FALSE, span=0.3)
# ===================================================================================
# weight in the catch
# ===================================================================================
for (i in 1:length(levels(weca$stock))) {
t <-
weca %>%
complete(year, age, stock) %>%
filter(stock %in% levels(weca$stock)[i]) %>%
filter(year >= 1980) %>%
filter(age %in% 1:8)
assign(
paste("p",levels(weca$stock)[i],sep=""),
ggplot(t, aes(year,weight, group=stock)) +
theme_bw() +
theme(legend.position = "none") +
# theme(axis.text.y = element_blank()) +
# theme(axis.ticks.y = element_blank() ) +
theme(panel.border = element_rect(colour="black" , size=0.1)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1, size=10)) +
theme(panel.spacing = unit(0.1, "lines")) +
{if (i != 6) theme(strip.background.y = element_blank()) } +
{if (i != 6) theme(strip.text.y = element_blank()) } +
{if (i != 6) theme(plot.margin = unit( c(0,0.5,0,0), units = "lines")) else theme(plot.margin=unit(c(0,0,0,0),"lines"))} +
geom_point(aes(colour=stock)) +
geom_smooth(aes(colour=stock, fill=stock), size=1, method="loess", span=0.5, se=TRUE, alpha=0.3) +
labs(x=NULL, y=NULL, title=NULL) +
facet_grid(age ~ stock, scale = "free_y")
) # end of assign
}
cowplot::plot_grid(plotlist=mget(paste("p", levels(weca$stock), sep="")),
ncol=length(unique(weca$stock)), scale=0.99, align="hv",
rel_widths = c(1.0, rep(1.0,length(levels(weca$stock)))) )
weca %>%
complete(year, age, stock) %>%
filter(stock %in% c("NSH")) %>%
filter(year >= 1980) %>%
filter(age %in% 1:8) %>%
ggplot(aes(year,weight, group=stock)) +
theme_bw() +
theme(legend.position = "none") +
# theme(axis.text.y = element_blank()) +
# theme(axis.ticks.y = element_blank() ) +
theme(panel.border = element_rect(colour="black" , size=0.1)) +
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1, size=10)) +
theme(panel.spacing = unit(0.1, "lines")) +
geom_point(aes(colour=stock)) +
geom_smooth(aes(colour=stock, fill=stock), size=1, method="loess", span=0.5, se=TRUE, alpha=0.3) +
labs(x=NULL, y=NULL, title=NULL) +
facet_wrap(. ~ age, scale = "free_y")
|
c1a2edbfd3a18f2e34e8cd750d3a2cbdc50e42c3 | 0844c2cc35bfeec674c60a1507eb83c427310653 | /Programmes&algorithme/Data_cleaning.R | b580fe279c765b3ae4fa50285f7a64c2f644bcb3 | [] | no_license | pbazie/kaggle | 7751755fc5b00a91007c5a30fdc703c510a3048b | 8f350a8205996429ffc3b4307a244677cbf47966 | refs/heads/master | 2020-03-29T17:38:56.070834 | 2018-09-24T15:39:42 | 2018-09-24T15:39:42 | 150,174,782 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 317 | r | Data_cleaning.R | #############################################################
#
# Part : Data cleaning
#
# Autor : Yacouba.K
############################################################
exemple<-read.csv("F:/Semestre 3/Challenge Kaggle & Big Data/Projet_Kaggle/Data de base/RequetesGoogleParRegion/Alsace.csv",header = T, sep=",")
|
79f757a233fb1bb74a87df8e62ddc55b84ad9625 | 0569ada9d9b0149504affd9a4e4a803dc0d9e727 | /prep_data_profiles.R | e0b07869b53eeb6717907d92e2db6f42ea6f687a | [] | no_license | mrblasco/herox | 47230b31d5d91580e15248894af00d86b0a74044 | 304111729db0a9ccb8550c00ff8bd02cf4f04875 | refs/heads/master | 2021-01-22T19:26:09.984931 | 2017-05-12T00:26:55 | 2017-05-12T00:26:55 | 85,201,130 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,623 | r | prep_data_profiles.R | rm(list=ls())
# Load profiles data from Qualtrics
input <- list.files("Data/Profiles", full=T, pattern="HeroX")
read.csv(tail(input, 1), stringsAsFactors=FALSE) -> dat
questions <- unlist(droplevels(dat[1, ]))
questions.info <- dat[2, ]
dat <- subset(dat, Finished=='True')
# Extra data
graphics <- read.csv("Data/Profiles/qualtrics_pictures_urls.txt", strings=FALSE)
choices.attr <- c("Extremely attractive", "Moderately attractive", "Slightly attractive", "Neither attractive nor unattractive", "Slightly unattractive", "Moderately unattractive", "Extremely unattractive")
choices.rm <- c("Strongly agree", "Agree", "Somewhat agree", "Neither agree nor disagree", "Somewhat disagree", "Disagree", "Strongly disagree")
# Master dataset
input <- list.files("Data", full=T, pattern="Users")
read.csv(tail(input, 1), stringsAsFactors=FALSE) -> users
emails <- users$Email
bio <- users$Bio
last.col <- which(colnames(users)=='Last.Login')
# Print
print.profile <- function(x) {
cat("\n\n[[Question:MC:SingleAnswer:Vertical]]\n")
# Match picture with urls
index <- grep(x[9], graphics$picture)
if (length(index)==1)
cat(sprintf("<img src=\"%s\"><br>\n", graphics$url[index]))
# Match name by email with master
index <- which(tolower(emails)==tolower(as.character(x[17])))
cat(sprintf("Name: %s<br>", users$First.name[index]))
# AWARDS
if (length(index)>0) {
y <- t(users[index, (last.col+1):ncol(users)])
rows <- which(y!="N")
won <- sum(y=="4")
subm <- sum(y=="3") + won
cat(sprintf("<h4>Awards</h4>\n<p>%s awards received (out of %s competitions with submissions)</p>\n", won, subm))
sapply(rows, function(x) {
if(y[x]=='4') cat("<p>Won an award in "
, rownames(y)[x],"</p>\n")
if(y[x]=='3') cat("<p>Participated with submissions in ", rownames(y)[x],"</p>\n")})
}
cat(sprintf("<h4>Motivations to be on HeroX</h4>\n%s<br>\n", x[22]))
if (x[23]=='') x[23] <- bio[index]
cat(sprintf("<h4>Short Bio</h4>\n%s<br>\n", x[23]))
# cat("\n[[Choices]]\n")
# for (x in choices) cat(x, sep='\n')
# cat("\n\n[[Question:MC:SingleAnswer:Vertical]]\n")
cat("<hr>")
cat("<h4>Do you agree or disagree on the following: 'This profile can be viewed as a role model for other members of HeroX'</h4>")
cat("\n[[Choices]]\n")
for (x in choices.rm) cat(x, sep='\n')
}
index <- match(tolower(as.character(dat$Q11_1)), tolower(emails))
x <- cbind(users[index, c("First.name", "Last.name")], email=dat$Q11_1, Motives=dat$Q3, Bio=dat$Q4)
write.csv(x, file="~/Desktop/recruited_profiles.csv", row.names=FALSE)
# Print all profiles
cat("[[AdvancedFormat]]\n\n")
apply(dat, 1, print.profile) -> x
|
a3b266efadab20ec638e5a1f62117d41a9ee031a | 249f56515cf8686948e7cfdf65bc735c37ddea4f | /raw_data/Sandbox/hotMarkupSum.r | 5a2ea0cc9202ed5143f9408ca273048713be0ddd | [] | no_license | EconomiCurtis/hotellingMarkupsAnalysis | 54b2ae8025cedb07386bf51f18e89cb7c569dd20 | 720d57685b2d4c3395d1aee5dc1135eb78fdef5d | refs/heads/master | 2020-04-06T04:42:50.179751 | 2018-06-15T18:03:03 | 2018-06-15T18:03:03 | 82,885,579 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,781 | r | hotMarkupSum.r | # libraries
library(readr)
library(dplyr)
library(ggplot2)
# load data
hotMarkup_2p_01 <- read_csv("~/Dropbox/SSEL/hotelling_marketups/hotellingMarkupsAnalysis/raw_data/20170209 2p 01 hotellingmarkup stacked.csv")
hotMarkup_4p_01 <- read_csv("~/Dropbox/SSEL/hotelling_marketups/hotellingMarkupsAnalysis/raw_data/20170209 4p 01 hotellingmarkup stacked.csv")
# 2 player game
names(hotMarkup_2p_01)
unique(hotMarkup_2p_01$session.code)
hotMarkup_2p_01 %>%
group_by(session.code,player.period_number,player.transport_cost) %>%
summarise(
mean_profit = mean(player.round_payoff * 100)
) %>%
View
ggplot(
hotMarkup_2p_01 %>%
group_by(session.code,player.transport_cost,player.transport_cost) %>%
summarise(
mean_profit = mean(player.round_payoff * 100)
) %>%
filter(
!is.na(mean_profit)
)
) +
geom_point(
aes(
x = player.transport_cost,
y = mean_profit,
color = paste(session.code,player.transport_cost)
)
)
# 4 player game
names(hotMarkup_4p_01)
unique(hotMarkup_4p_01$session.code)
hotMarkup_4p_01 %>%
group_by(session.code,player.period_number,player.transport_cost) %>%
summarise(
mean_profit = mean(player.round_payoff * 100)
) %>%
View
hotMarkup_4p_01 %>%
group_by(session.code,player.period_number,player.transport_cost) %>%
filter(subsession.round_number == 20) %>%
summarise(
mean_profit = mean(player.round_payoff * 100)
) %>%
View
ggplot(
hotMarkup_4p_01 %>%
group_by(session.code,player.transport_cost,player.period_number) %>%
summarise(
mean_profit = mean(player.round_payoff * 100)
) %>%
filter(
!is.na(mean_profit)
)
) +
geom_point(
aes(
x = player.transport_cost,
y = mean_profit,
color = player.period_number)
)
|
316cb0a230c3c09e89508462e54786a5e4069436 | 020c2ff9ba9e7b0e53826e10275b9ed161433561 | /man/data_genotype_count_compatibility.Rd | d96a8c5e2eb3aebd3710b2671ff26867f5568a69 | [] | no_license | aimeertaylor/FreqEstimationModel | 9d2e27eeba9adf8ecb719cfcccc72498a1ee864f | a0b48873752bd656d5ea838d45370e54a2782f76 | refs/heads/master | 2021-12-23T03:18:57.597770 | 2016-11-18T18:33:42 | 2016-11-18T18:33:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 390 | rd | data_genotype_count_compatibility.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_genotype_count_compatibility.R
\name{data_genotype_count_compatibility}
\alias{data_genotype_count_compatibility}
\title{Data genotype count compatibility}
\usage{
data_genotype_count_compatibility(comp_genotypes, genotype_counts, raw_data)
}
\description{
Data and genotype compatibility check function
}
|
db36713a46d1f2c6eed62947283a1daba6c71ef0 | 6c53e9e2b85206e9e8cf33f47b85cbc2c2e2139e | /cachematrix.R | dfb014a2b90236782f4b247a394bf209a4669e79 | [] | no_license | jijojosen/ProgrammingAssignment2 | 3c751f457dc47f13277375fa6089f1b60c694bad | afe9fcdbe821cb0a28a040cf5ac01275afef272f | refs/heads/master | 2021-01-15T22:47:14.930092 | 2017-08-14T09:38:31 | 2017-08-14T09:38:31 | 99,914,624 | 0 | 0 | null | 2017-08-10T11:11:56 | 2017-08-10T11:11:56 | null | UTF-8 | R | false | false | 1,528 | r | cachematrix.R | ## This file contains 2 functions
## The first function creates a special "matrix" object that can cache its inverse.
## The second function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated
## then the cachesolve will retrieve the inverse from the cache.
## The function to cache the inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
## setting the value of matrix
set <- function(y) {
x <<- y
inv <<- NULL
}
## getting the value of the matrix
get <- function() x
## setting the value of the inverse
setInverse <- function(Inverse) inv <<- Inverse
## getting the value of the inverse
getInverse <- function() inv
#returns a list with elements that set & get the value of the matrix and
# set & get the value of the inverse of the matrix
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above
cacheSolve <- function(x, ...) {
## the cachesolve retrieves the inverse from the cache
inv <- x$getInverse()
if(!is.null(inv)) {
message("getting cached data")
## Return a matrix that is the inverse of 'x' from cache
return(inv)
}
## Computing the inverse of the special "matrix"
data <- x$get()
inv <- solve(data, ...)
x$setInverse(inv)
## Return a matrix that is the inverse of 'x' by computing it
inv
}
|
7489247dcc9aee0b11b06e27db5b09a0682f0c01 | 300ffcf3c9426898771b5de769afdd9a1efcdc90 | /R/p0023.r | 9aaff5bfde18f6188edb9d99d273a7f1ce43b33d | [] | no_license | kohske/peuleR | bd684367a3f669b3df4002253e0936a1820b712f | e5226fc728132e8960854b8c85f56d0ef38bc767 | refs/heads/master | 2016-08-08T03:25:37.765414 | 2016-01-21T02:31:21 | 2016-01-21T02:31:29 | 3,796,316 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 390 | r | p0023.r | # http://projecteuler.net/problem=23
# should be optimized
library(pracma)
f <- function(n) {
z <- factorize(n)
r <- unique(apply(z * t(do.call("expand.grid", rep(list(0:1), length(z)))), 2, function(x) prod(x[x>0])))
sum(r[r!=n]) > n
}
b <- which(Vectorize(f)(seq(28123)))
a <- sum(seq(28123)[!seq(28123) %in% sort(c(outer(b, b, "+")))])
print(a)
cat(a, file = pipe('pbcopy'))
|
db13a131c5de32071085bbc8343719fe256e4bb8 | a0aa511780814e67695203dec46022eda1c75467 | /man/mod.Rd | 182b102e7b8abd70083b0da0925686b0d74152bb | [] | no_license | leonpheng/xptdefine | ab031216806bbb22e8fbf7b5b56ac5fcfba726a3 | 62388d9927436fac5df9a05305bf6bffc2124e56 | refs/heads/master | 2020-04-05T02:29:19.060784 | 2019-11-19T15:39:29 | 2019-11-19T15:39:29 | 114,052,833 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 225 | rd | mod.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\name{mod}
\alias{mod}
\title{mod}
\usage{
mod(filename, var)
}
\description{
internal used function.
}
\examples{
mod()
}
\keyword{mod}
|
01d9904262182a186036d2b17d22e137aca9c206 | c5baacf45414864179c18c4878af5464e103ece8 | /Lab17/Lab17_Keras_deep_learning.R | 675bdf1660726611d7d23c4ac2041dc4bbce6ded | [] | no_license | VladimirShleyev/Method_R_doc | ed1cbbd9b59cc1cec445e87a9e5696f665f83065 | 85aa7c64e3816108f0e84a0ff1efa11cc8e37d3b | refs/heads/master | 2023-07-16T00:29:57.871114 | 2021-09-03T12:13:19 | 2021-09-03T12:13:19 | 286,023,236 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,607 | r | Lab17_Keras_deep_learning.R | renv::init() # инициализация виртуального окружения
renv::install("class", "caret", "lattice", "e1071", "mailR") # установка библиотеки из CRAN
renv::snapshot() # делаем снимок версий библиотек в нашем виртуальном окружении
# фиксируем этот список в .lock-файле для возможности восстановления
# renv::restore() # команда отктиться к предыдушему удачному обновления библиотек
# -------------------
# Лабораторная работа №17:
# Введение в сверточные сети. Keras.
library(keras)
mnist <- dataset_mnist()
train_images <- mnist$train$x
train_labels <- mnist$train$y
test_images <- mnist$test$x
test_labels <- mnist$test$y
str(train_images)
str(train_labels)
network <- keras_model_sequential()%>%
layer_dense(units = 512, activation = "relu", input_shape = c(28*28))%>%
layer_dense(units = 10, activation = "softmax")
network%>% compile(
optimizer = "rmsprop",
loss = "categorical_crossentropy",
metrics = c("accuracy")
)
train_images <- array_reshape(train_images, c(60000, 28*28))
train_images <- train_images/255
test_images <- array_reshape(test_images, c(10000, 28*28))
test_images <- test_images/255
train_labels <- to_categorical(train_labels)
test_labels <- to_categorical(test_labels)
network%>%fit(train_images, train_labels, epochs =5, batch_size = 128)
metrics <- network%>% evaluate(test_images, test_labels)
metrics |
e64459b3155da331bd6967344b37b476d465b0bf | 251d09f73d7a10acbe02828b412566b4fd46d0d1 | /Hate_Crimes_in_US/ui.R | 0066ac82b25c8d86575f6c98a3c9c757633cb9d4 | [] | no_license | foyaza/Factors-Influence-Hate-Crimes | ad12455d8fd4840c031391917e6142f895fd063d | 733092d988b34df3c91c0bbd20ca0f4d21743248 | refs/heads/master | 2020-12-03T20:46:20.744730 | 2020-01-18T03:50:05 | 2020-01-18T03:50:05 | 231,481,868 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,322 | r | ui.R | shinyUI(
dashboardPage(
skin = "purple",
header = dashboardHeader(title = 'Hate Crimes in US'),
sidebar = dashboardSidebar(
sidebarMenu(
menuItem("Overview", tabName = "Overview", icon = icon("eye")),
menuItem("Trends", tabName = "Trends", icon = icon("poll")),
menuItem("Key Factors", tabName = "Key_Factors", icon = icon("list-alt"))
)
),
body = dashboardBody(
tabItems(
tabItem(tabName = "Overview",
h2("Hate Crimes in The US"),
h4("This App allows users to look at the trends on Hate Crimes in the US from the year 2009 to 2018
and also shows a snapshot of hate crimes from a dataset from 2016 and visualizes the relashionship
among hate crimes and other key factors")
),
tabItem(tabName = "Trends",
selectInput("State", label = "State:", choices = States,
selected = 'Total'),
"Data Source:" ,
div(a(href = "https://ucr.fbi.gov/hate-crime", img(src="fbi.jpeg", width = 100), target = "_blank")),
fluidRow(
br(),
plotlyOutput("line", width = 800)),
fluidRow(
box(width = 12, status = 'primary',
'Click on column name to sort.',
dataTableOutput("table")))
),
tabItem(tabName = "Key_Factors",
selectInput("factors_choice", label = "Factors", choices = choices,
selected = 'Median Household Income'),
"Data Source:" ,
div(a(href = "https://data.fivethirtyeight.com/", img(src="five.png", width = 100), target = "_blank")),
fluidRow(
br(),
plotlyOutput("scatter", width = 800))
)))))
|
30d03dd7f34da05ea36bbfb702bdf52fce5828e1 | f1d778c92ada96287ea912028e118fcc24467038 | /man-roxygen/CohortStagingTable.R | a7e70b7f770e6f73eaf4f57520317e977772e1fc | [
"Apache-2.0"
] | permissive | ohdsi-studies/ScyllaCharacterization | 82a9d6811d2e03d439b7bf474916c55aa2c64428 | 42ccae88acb708ee345635f6db21e3bb6aab3e20 | refs/heads/master | 2023-04-18T12:28:47.201548 | 2021-04-22T15:09:20 | 2021-04-22T15:09:20 | 284,043,640 | 2 | 1 | Apache-2.0 | 2021-04-22T15:09:21 | 2020-07-31T13:31:28 | R | UTF-8 | R | false | false | 409 | r | CohortStagingTable.R | #' @param cohortDatabaseSchema Schema name where your cohort table resides. Note that for SQL Server,
#' this should include both the database and schema name, for example
#' 'scratch.dbo'.
#' @param cohortStagingTable Name of the staging cohort table containing the cohorts before performing
#' any sub-grouping
|
1a834fe94a21fcff24f252defdb2cf09d0a6e964 | c3979af4d5e88510fc6bc204d15a89999173c78f | /man/infmort.Rd | de66aee471a49192c2269759e269f84a920482fa | [] | no_license | cran/faraway | a82ac66b6e3696ce4b3d3959c61e7136d2ef7aa9 | fd738166e58ee12f02afe35029b4e5e7ebff58d1 | refs/heads/master | 2022-08-31T20:44:43.259095 | 2022-08-23T13:50:02 | 2022-08-23T13:50:02 | 17,695,973 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 764 | rd | infmort.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/faraway-package.R
\docType{data}
\name{infmort}
\alias{infmort}
\title{Infant mortality according to income and region}
\format{
This data frame contains the following columns: \describe{
\item{region}{ Region of the world, Africa, Europe, Asia or the
Americas } \item{income}{ Per capita annual income in dollars }
\item{mortality}{ Infant mortality in deaths per 1000 births }
\item{oil}{ Does the country export oil or not? } }
}
\source{
Unknown
}
\description{
The \code{infmort} data frame has 105 rows and 4 columns. The infant
mortality in regions of the world may be related to per capita income and
whether oil is exported. The dataset is not recent.
}
\keyword{datasets}
|
9f1dc2284e24f0f10116799aab31a3aad33e406c | cbc50d31df806961b751467ed734debddb745e53 | /man/occitanie.Rd | db43d4743b49c36971cc7c9801a100e9e76d038e | [] | no_license | npp2016/linemap | 94d7e3b8a51c6a5129df82c27f5fede7389c9a43 | e814642237a70f93d2e166678b3818645e37013f | refs/heads/master | 2021-08-06T14:58:24.629909 | 2017-11-06T09:46:10 | 2017-11-06T09:46:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 348 | rd | occitanie.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\docType{data}
\name{occitanie}
\alias{occitanie}
\title{Occitanie Region}
\format{sf}
\source{
Extract from GEOFLA® 2016 v2.2 Communes France Métropolitaine - \url{http://professionnels.ign.fr/geofla}
}
\description{
Delineations of the Occitanie Region.
}
|
8cbf840550e16a3c8f945aa10a2f7aca630afb38 | c9bf542a7b473848a7b5106454194588044524b4 | /man/MEDIPS.plotSeqCoverage.Rd | 39d68a006ddbd189d106890f2dc864911aa4ff6c | [] | no_license | chavez-lab/MEDIPS | 0e2225510b2274eea0c47c1610db4382503fcd58 | d96d613fdc202af0abe95db9b1c07cbabd6bbc40 | refs/heads/master | 2021-06-16T12:33:12.729930 | 2021-02-17T00:32:22 | 2021-02-17T00:32:22 | 141,762,762 | 3 | 7 | null | 2021-02-16T18:55:26 | 2018-07-20T22:19:08 | R | UTF-8 | R | false | false | 1,641 | rd | MEDIPS.plotSeqCoverage.Rd | \name{MEDIPS.plotSeqCoverage}
\alias{MEDIPS.plotSeqCoverage}
\title{
Function plots the results of the MEDIPS.seqCoverage function.
}
\description{
The results of the sequence pattern coverage analysis will be visualized in two possible ways.
}
\usage{
MEDIPS.plotSeqCoverage(seqCoverageObj=NULL, main=NULL, type="pie", cov.level = c(0,1,2,3,4,5), t="Inf")
}
\arguments{
\item{seqCoverageObj}{
The coverage results object returned by the MEDIPS.seqCoverage function.
}
\item{main}{
The title of the coverage plot.
}
\item{type}{
there are two types of visualization. The pie chart (default) illustrates the fraction of CpGs covered by the given reads at different coverage level (see also the parameter cov.level).
As an alternative, a histogram over all coverage level can be ploted ("hist").
}
\item{cov.level}{
The pie chart illustrates the fraction of CpGs covered by the given reads according to their coverage level.
The visualized coverage levels can be adjusted by the cov.level parameter.
}
\item{t}{
specifies the maximal coverage depth to be plotted, if type="hist"
}
}
\value{
The sequence pattern coverage plot will be visualized.
}
\author{
Lukas Chavez
}
\examples{
library(MEDIPSData)
library(BSgenome.Hsapiens.UCSC.hg19)
bam.file.hESCs.Rep1.MeDIP = system.file("extdata", "hESCs.MeDIP.Rep1.chr22.bam", package="MEDIPSData")
cr=MEDIPS.seqCoverage(file=bam.file.hESCs.Rep1.MeDIP, pattern="CG", BSgenome="BSgenome.Hsapiens.UCSC.hg19", chr.select="chr22", extend=250, shift=0, uniq=1e-3, paired=FALSE)
MEDIPS.plotSeqCoverage(seqCoverageObj=cr, main="Sequence pattern coverage", type="pie", cov.level = c(0,1,2,3,4,5))
}
|
548d3d992f387be11e5759c76f3a6a86127af8d3 | 1af0dd9f58c2471088b14ffc9fac27769975345e | /man/compareClusterAbundancesPaired.Rd | 6199f643c69e220eceae771c9c60c6f3ef382dc2 | [] | no_license | dyohanne/RepAn | 6f65effee98aa83e02bf0bd14a1127ea3685ea16 | 4febd250282cb3ebdc2b006f09ad6427381f69a2 | refs/heads/master | 2023-01-22T19:55:54.793264 | 2023-01-09T09:12:51 | 2023-01-09T09:12:51 | 142,448,425 | 1 | 2 | null | null | null | null | UTF-8 | R | false | true | 388 | rd | compareClusterAbundancesPaired.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RepDaAnalysisFns.R
\name{compareClusterAbundancesPaired}
\alias{compareClusterAbundancesPaired}
\title{compare cluster abundances for paired cases}
\usage{
compareClusterAbundancesPaired(sam1, sam2, clusMatchTable, s1cls, s2cls)
}
\description{
compare cluster abundances for paired cases
}
\keyword{internal}
|
edb0be692dec651f30d0df7bf86aacd4b4808281 | af76514bf6b025034f3eafd71726af293edd155e | /run_analysis.R | a4e08b132805c9981d194f45fb26a25cb2bdc6bb | [] | no_license | Palvin10/Week-4-Assignment | 6926755d5a272e6abdc71f4b4f7c3c7dba41c2dc | 315698bf5f5a4e3566fef5cf973a60ccf8b24aec | refs/heads/master | 2020-03-20T19:52:35.451504 | 2018-06-17T16:52:30 | 2018-06-17T16:52:30 | 137,658,468 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,874 | r | run_analysis.R |
## Here are the data for the project:
## https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
##You should create one R script called run_analysis.R that does the following.
## 1. Merge the training and the test sets to create one data set.
## 2. Extracts only the measurements on the mean and standard deviation for each measurement.
## 3. Uses descriptive activity names to name the activities in the data set
## 4. Appropriately labels the data set with descriptive variable names.
## 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
library(dplyr)
library(data.table)
library(readr)
##1. Download the data files
path <- getwd()
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url, file.path(path, "dataFiles.zip"))
unzip(zipfile = "dataFiles.zip")
# Load the activity labels + features
activity_labels <- fread(file.path(path, "UCI HAR Dataset/activity_labels.txt") ##extract out activity labels from .txt to R & specifying column names
, col.names = c("class_labels", "activity_name"))
features <- fread(file.path(path, "UCI HAR Dataset/features.txt") ## extract out features from .txt to R & specifying column names
, col.names = c("index", "feature_names"))
features_interested <- grep("(mean|std)\\(\\)", features[, feature_names]) ##select only those with mean & sd in features name
measurements <- features[features_interested, feature_names] ##combine features wanted and features names
measurements <- gsub('[()]', '', measurements) ## remove () to ''
# Load the train datasets ## bind subjects, activity and measurements in train datasets together.
train <- fread(file.path(path, "UCI HAR Dataset/train/X_train.txt"))[, features_interested, with = FALSE] ##select only the relevant columns with with=false
data.table::setnames(train, colnames(train), measurements) ##set the features names stored in measurements to appropriate columns
train_activities <- fread(file.path(path, "UCI HAR Dataset/train/Y_train.txt")
, col.names = c("Activity"))
train_subjects <- fread(file.path(path, "UCI HAR Dataset/train/subject_train.txt")
, col.names = c("SubjectNum"))
train <- cbind(train_subjects, train_activities, train) ##column bind subjects and activities in training set
# Load the test datasets ## bind subjects, activity and measurements in test datasets together.
test <- fread(file.path(path, "UCI HAR Dataset/test/X_test.txt"))[, features_interested, with = FALSE]
data.table::setnames(test, colnames(test), measurements)
test_activities <- fread(file.path(path, "UCI HAR Dataset/test/Y_test.txt")
, col.names = c("Activity"))
test_subjects <- fread(file.path(path, "UCI HAR Dataset/test/subject_test.txt")
, col.names = c("SubjectNum"))
test <- cbind(test_subjects, test_activities, test)
# merge train and test datasets
combined_data <- rbind(train, test)
# Add labels. COnverting the codes 1-6 to activity labels
combined_data[["Activity"]] <- factor(combined_data[, Activity]
, levels = activity_labels[["class_labels"]]
, labels = activity_labels[["activity_name"]])
combined_data[["SubjectNum"]] <- as.factor(combined_data[, SubjectNum])
combined_data <- reshape2::melt(data = combined_data, id = c("SubjectNum", "Activity"))
combined_data <- reshape2::dcast(data = combined_data, SubjectNum + Activity ~ variable, fun.aggregate = mean) ##reshape based on data, subject number and activity
data.table::fwrite(x = combined_data, file = "tidy.txt", quote = FALSE)
|
a50e8c46774bafe532e4e85e76a9ebf0d1cb0520 | de47db6a8a358c999904444361c8c10ce7a2aa45 | /scripts/workflow.treering.R | ed740974fcbe71280b8ddacd652267cda62d9974 | [
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | davidjpmoore/pecan | 0d42d0983203ad0791ef4668862726a4fc47ec97 | 73ac58ad36764fbf8c44290bb8f2cd4b741e73b8 | refs/heads/master | 2021-01-15T17:56:19.229136 | 2015-09-01T15:11:40 | 2015-09-01T15:11:40 | 41,746,837 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,538 | r | workflow.treering.R | #--------------------------------------------------------------------------------#
# functions used to write STATUS used by history
#--------------------------------------------------------------------------------#
options(warn = 1, keep.source = TRUE, error = quote({
status.end("ERROR")
}))
status.start <- function(name) {
cat(paste(name, format(Sys.time(), "%F %T"), sep="\t"), file=file.path(settings$outdir, "STATUS"), append=TRUE)
}
status.end <- function(status="DONE") {
cat(paste("", format(Sys.time(), "%F %T"), status, "\n", sep="\t"), file=file.path(settings$outdir, "STATUS"), append=TRUE)
}
#---------------- Load libraries. -----------------------------------------------------------------#
require(PEcAn.all)
library(PEcAn.assim.sequential)
library(PEcAn.visualization)
#--------------------------------------------------------------------------------------------------#
#
# dir.create("~/demo.sda")
# clean.settings("~/demo.pda/demo.xml","~/demo.sda/demo.xml")
#---------------- Load PEcAn settings file. -------------------------------------------------------#
# Open and read in settings file for PEcAn run.
settings <- read.settings("~/demo.sda/demo.xml")
#--------------------------------------------------------------------------------------------------#
#---------------- Load plot and tree ring data. -------------------------------------------------------#
status.start("LOAD DATA")
## Read tree data
trees <- read.csv("/home/carya/Camp2014/ForestPlots/treecores2014.csv")
## Read tree ring data
rings <- Read_Tuscon("/home/carya/Camp2014/ForestPlots/Tucson/")
## Match observations & format for JAGS
combined <- matchInventoryRings(trees,rings,extractor="Tag",nyears=36,coredOnly=FALSE)
data <- buildJAGSdata_InventoryRings(combined)
status.end()
#---------------- Load plot and tree ring data. -------------------------------------------------------#
status.start("TREE RING MODEL")
## Tree Ring model
n.iter = 3000
jags.out = InventoryGrowthFusion(data,n.iter=n.iter)
save(trees,rings,combined,data,jags.out,
file=file.path(settings$outdir,"treering.Rdata"))
pdf(file.path(settings$outdir,"treering.Diagnostics.pdf"))
InventoryGrowthFusionDiagnostics(jags.out,combined)
dev.off()
status.end()
#-------------- Allometry Model -------------------------------#
status.start("ALLOMETRY")
library(PEcAn.allometry)
con <- db.open(settings$database$bety)
pft.data = list()
for(ipft in 1:length(settings$pfts)){ ## loop over PFTs
pft_name = settings$pfts[[ipft]]$name
query <- paste0("SELECT s.spcd,",'s."Symbol"'," as acronym from pfts as p join pfts_species on p.id = pfts_species.pft_id join species as s on pfts_species.specie_id = s.id where p.name like '%",pft_name,"%'")
pft.data[[pft_name]] <- db.query(query, con)
}
allom.stats = AllomAve(pft.data,outdir = settings$outdir,ngibbs=n.iter/10)
save(allom.stats,file=file.path(settings$outdir,"allom.stats.Rdata"))
status.end()
#-------------- Convert tree-level growth & diamter to stand-level NPP & AGB -------------------------------#
status.start("PLOT2AGB")
out = as.matrix(jags.out)
sel = grep('x[',colnames(out),fixed=TRUE)
state = plot2AGB(combined,out[,sel],settings$outdir,allom.stats,unit.conv=0.01)
obs = data.frame(mean = apply(state$NPP[1,,],2,mean,na.rm=TRUE),
sd = apply(state$NPP[1,,],2,sd,na.rm=TRUE))
status.end()
#---------------- Build Initial Conditions ----------------------------------------------------------------------#
status.start("IC")
ne = as.numeric(settings$assim.sequential$n.ensemble)
IC = sample.IC.SIPNET(ne,state)
status.end()
#---------------- Load Priors ----------------------------------------------------------------------#
status.start("PRIORS")
prior = sample.parameters(ne,settings,con)
status.end()
#--------------- Assimilation -------------------------------------------------------#
status.start("MCMC")
sda.enkf(settings,IC,prior,obs)
status.end()
#--------------------------------------------------------------------------------------------------#
### PEcAn workflow run complete
status.start("FINISHED")
if (settings$workflow$id != 'NA') {
query.base(paste("UPDATE workflows SET finished_at=NOW() WHERE id=", settings$workflow$id, "AND finished_at IS NULL"),con)
}
status.end()
db.close(con)
##close any open database connections
for(i in dbListConnections(PostgreSQL())) db.close(i)
print("---------- PEcAn Workflow Complete ----------")
#--------------------------------------------------------------------------------------------------#
|
48471efdb9ef41fd0c2323db528e5408e754931a | 0c3e94ac074efbe60d2c14669d97fcb4fb1e506d | /data.scripts/transitSkimAnalysis/visualization/All accessible TAPS.R | 6b967e8bf121cda21a1401977bafe0850115982a | [] | no_license | nsriram13/travel-model-two | 04aa027924a385734303a6c76dc23940decf8b1c | e5812f2ab1278f85a10f3a92051cf0395a466738 | refs/heads/master | 2020-04-08T20:14:56.773420 | 2015-03-20T23:56:32 | 2015-03-20T23:56:32 | 32,354,779 | 1 | 0 | null | 2015-03-16T21:42:12 | 2015-03-16T21:42:12 | null | UTF-8 | R | false | false | 3,439 | r | All accessible TAPS.R | #' Script for visualizing set of accessible TAPs in MTC TM2 transit network
#'
#' The useR has to specify the following parameters
#' @param period Skim time period to use ['EA','AM','MD','PM','EV']
#' @param TAP_BEING_QUERIED tap id for which we want to plot the accessible set of taps
#' @param TAP_NAME name of the tap give the user a general idea of the origin tap area. For pretty plotting
#'
#' The script produces a map showing TAPs colored based on whether or not they are accessible from TAP_BEING_QUERIED
#' Green - TAP is accessible; Red - TAP is not accessible; Blue - TAP_BEING_QUERIED
#'
#' @date: 2013-11-07
#' @author: sn, narayanamoorthys AT pbworld DOT com
library(maptools)
library(sp)
library(dplyr)
library(ggplot2)
library(RColorBrewer)
library(rgdal)
library(ggmap)
#Specify input parameters
PERIOD = 'AM'
TAP_BEING_QUERIED = 487
TAP_NAME = "Near Trans Bay Terminal" #Just for making the plot title nice
#Reading skim csvs using pandas csv reader (fastest option available)
cmd <- "C:\\Python27\\python.exe quickSkimRead.py @@TAP@@ @@PERIOD@@"
cmd <- gsub("@@TAP@@",TAP_BEING_QUERIED,cmd)
cmd <- gsub("@@PERIOD@@",PERIOD,cmd)
system(cmd, intern=FALSE, show.output.on.console=TRUE)
# Create a blank theme for plotting the map
blankMapTheme <- theme_grey()
blankMapTheme$axis.text <- element_blank()
blankMapTheme$axis.title <- element_blank()
#Read in shapefile and unproject
taps <- readOGR("shapefiles","mtc_tap_nodes")
if (is.projected(taps)) taps <- spTransform(taps, CRS("+proj=longlat +ellps=WGS84"))
#Read in skim data to get accessible TAPs
skimSet1 = read.csv(gsub("@@TAP@@", TAP_BEING_QUERIED, gsub("@@PERIOD@@",PERIOD, gsub("@@SET@@","SET1","./plot_csv/ts_plot_@@SET@@_@@PERIOD@@_@@TAP@@.csv"))))
skimSet2 = read.csv(gsub("@@TAP@@", TAP_BEING_QUERIED, gsub("@@PERIOD@@",PERIOD, gsub("@@SET@@","SET2","./plot_csv/ts_plot_@@SET@@_@@PERIOD@@_@@TAP@@.csv"))))
skimSet3 = read.csv(gsub("@@TAP@@", TAP_BEING_QUERIED, gsub("@@PERIOD@@",PERIOD, gsub("@@SET@@","SET3","./plot_csv/ts_plot_@@SET@@_@@PERIOD@@_@@TAP@@.csv"))))
zone_list = unique(c(skimSet1$DTAP,skimSet2$DTAP,skimSet3$DTAP))
#Create plot layers
accessible_taps = taps[taps@data$TAPSEQ %in% zone_list,]
accessible_taps.df <- as.data.frame(accessible_taps@coords)
inaccessible_taps = taps[!(taps@data$TAPSEQ %in% zone_list),]
inaccessible_taps.df <- as.data.frame(inaccessible_taps@coords)
query_tap = taps[taps@data$TAPSEQ %in% c(TAP_BEING_QUERIED),]
query_tap.df <- as.data.frame(query_tap@coords)
#Fetch basemap for the Bay Area
mtc_baseMap = get_googlemap(center = c(lon = mean(taps@coords[,"coords.x1"]), lat = mean(taps@coords[,"coords.x2"]))
,zoom = 8
,scale = 4
,maptype="roadmap"
,size = c(640, 640))
#Plot
p1 <- ggmap(mtc_baseMap) +
geom_point(data = inaccessible_taps.df, aes(x=coords.x1, y=coords.x2), inherit.aes=FALSE,fill="red",pch=21,size=1) +
geom_point(data = accessible_taps.df, aes(x=coords.x1, y=coords.x2), inherit.aes=FALSE,fill="Lawn Green",pch=21,size=1) +
geom_point(data = query_tap.df, aes(x=coords.x1, y=coords.x2), inherit.aes=FALSE,fill="#377eb8",size=4,pch=21,size=1) +
blankMapTheme+
labs(title = paste(TAP_NAME, "All Sets :",PERIOD, TAP_BEING_QUERIED,sep=" "))
ggsave(file = paste(TAP_NAME, " All Sets ",PERIOD, TAP_BEING_QUERIED,".png",sep=""), width=10,height=10,dpi = 1080)
|
b97e219c32ecdd46a490e06b925ccb7024dbb7de | 573df0a5a6f7e36281085aa97e37cec0603ae2e6 | /R/fts2kmh.r | 296bc83f3162350472276a88d90f6502ab6f08c5 | [
"MIT"
] | permissive | alfcrisci/rBiometeo | 3fe1bc89bf76e8f24512bae5e21cdc5b5953bb53 | 1fe0113d017372393de2ced18b884f356c76049b | refs/heads/master | 2021-06-11T06:04:50.066548 | 2021-04-07T14:00:36 | 2021-04-07T14:00:36 | 65,311,485 | 2 | 2 | null | null | null | null | UTF-8 | R | false | false | 630 | r | fts2kmh.r | #' fts2kmh
#'
#' Conversion speed in feet per second to kilometers per hour.
#'
#' @param fts numeric Speed in feets per second.
#' @return kilometers per hour
#'
#'
#' @author Istituto per la Bioeconomia CNR Firenze Italy Alfonso Crisci \email{alfonso.crisci@@ibe.cnr.it}
#' @keywords fts2kmh
#'
#' @export
#'
#'
#'
#'
fts2kmh=function(fts) {
ct$assign("fts", as.array(fts))
ct$eval("var res=[]; for(var i=0, len=fts.length; i < len; i++){ res[i]=fts2kmh(fts[i])};")
res=ct$get("res")
return(ifelse(res==9999,NA,res))
}
|
e30494d713eefa94acb53ec2a60de9d8b6852806 | c84f4694c07cd32674abee7235e1d31353e225a3 | /Distributions/eachGraphs/hyperbolicSecantDistribution.R | d33183eed2c5a13e1f6a1ed4af5d49c29fd57a74 | [] | no_license | praster1/Note_SurvivalAnalysis | c1e2fee2865432fa4be89a4000c20e38ef1bef3e | db50363874c69fea6d4d47a24409c387c5276f84 | refs/heads/master | 2020-03-26T23:00:24.345471 | 2018-12-04T10:34:49 | 2018-12-04T10:34:49 | 145,502,914 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,731 | r | hyperbolicSecantDistribution.R | # https://www.rdocumentation.org/packages/VaRES/versions/1.0/topics/secant
source("colorPalette.R")
require(pracma)
##### hyperbolicsecant Distribution
### parameter
alpha = c(-1, -0.75, -0.5, -0.25, 0, 0.25, 0.5, 0.75, 1)
beta = c(0.25, 0.5, 0.75, 1, 2, 4, 8)
### input varialbe
x = seq(-10, 10, length.out = 1000)
### 수명 분포
dhyperbolicsecant = function(x, alpha = 1, beta = 1)
{
fx = (1 / (beta * pi)) * sech((x - alpha)/beta)
return(fx)
}
### 난수 함수
rhyperbolicsecant = function (n, min=-10, max=10, alpha = 1, beta = 1)
{
normalization = function(x) { (x-min(x))/(max(x)-min(x)); }
xseq = seq(min, max, length=1000000)
res = sample(xseq, size=n, prob=normalization(dhyperbolicsecant(xseq, alpha = alpha, beta = beta)), replace=TRUE)
return(res)
}
### 누적분포함수
phyperbolicsecant = function(x, alpha = 1, beta = 1)
{
fx = -(shyperbolicsecant(x, alpha, beta) - 1)
return(fx)
}
### 생존함수
shyperbolicsecant = function (x, alpha = 1, beta = 0)
{
fx = 1 - (2/pi) * atan(exp((x - alpha)/beta))
return(fx)
}
### 위험함수
hhyperbolicsecant = function (x, alpha = 1, beta = 0)
{
fx = dhyperbolicsecant(x, alpha, beta) / shyperbolicsecant(x, alpha, beta)
return(fx)
}
##### Plot
plot.hyperbolicsecant_seq = function(x, alpha = 1, beta = 0, xlim=c(0, 10), ylim=c(0, 5), func="dhyperbolicsecant")
{
color=colorPalette(300)
len_alpha = length(alpha) # alpha 파라메터의 길이
len_beta = length(beta) # beta 파라메터의 길이
color_counter = 1
for (i in 1:len_alpha) ### 파라메터: alpha
{
color_counter_init = color_counter
legend_name = NULL;
if (func=="dhyperbolicsecant") # 수명분포
{
plot(x, dhyperbolicsecant(x, alpha=alpha[1], beta=beta[1]), xlim=xlim, ylim=ylim, col=color[1], lwd=2, type = 'n', main="Life Distribution Function")
for (j in 1:len_beta) ### 파라메터: beta
{
lines(x, dhyperbolicsecant(x, alpha=alpha[i], beta=beta[j]), col=color[color_counter], lwd=2);
color_counter = color_counter + 1;
legend_name = c(legend_name, paste("alpha = ", alpha[i], " / beta = ", beta[j], sep=""))
}
}
else if (func == "phyperbolicsecant") # 누적분포함수
{
plot(x, phyperbolicsecant(x, alpha=alpha[1], beta=beta[1]), xlim=xlim, ylim=ylim, col=color[1], lwd=2, type = 'n', main="Cumulative Distribution Function")
for (j in 1:len_beta) ### 파라메터: beta
{
lines(x, phyperbolicsecant(x, alpha=alpha[i], beta=beta[j]), col=color[color_counter], lwd=2);
color_counter = color_counter + 1;
legend_name = c(legend_name, paste("alpha = ", alpha[i], " / beta = ", beta[j], sep=""))
}
}
else if (func == "shyperbolicsecant") # 생존함수
{
plot(x, shyperbolicsecant(x, alpha=alpha[1], beta=beta[1]), xlim=xlim, ylim=ylim, col=color[1], lwd=2, type = 'n', main="Survival Function")
for (j in 1:len_beta) ### 파라메터: beta
{
lines(x, shyperbolicsecant(x, alpha=alpha[i], beta=beta[j]), col=color[color_counter], lwd=2);
color_counter = color_counter + 1;
legend_name = c(legend_name, paste("alpha = ", alpha[i], " / beta = ", beta[j], sep=""))
}
}
else if (func == "hhyperbolicsecant") # 위험함수
{
plot(x, hhyperbolicsecant(x, alpha=alpha[1], beta=beta[1]), xlim=xlim, ylim=ylim, col=color[1], lwd=2, type = 'n', main="Hazard Function")
for (j in 1:len_beta) ### 파라메터: beta
{
lines(x, hhyperbolicsecant(x, alpha=alpha[i], beta=beta[j]), col=color[color_counter], lwd=2);
color_counter = color_counter + 1;
legend_name = c(legend_name, paste("alpha = ", alpha[i], " / beta = ", beta[j], sep=""))
}
}
legend('right', bty = 'n', lwd=2, col=color[color_counter_init:(color_counter - 1)], legend = legend_name)
}
}
par(mfrow = c(3, 3))
plot.hyperbolicsecant_seq(x, alpha, beta, xlim=c(min(x), max(x)), ylim=c(0, 5), func="dhyperbolicsecant")
par(mfrow = c(3, 3))
plot.hyperbolicsecant_seq(x, alpha, beta, xlim=c(min(x), max(x)), ylim=c(0, 1), func="phyperbolicsecant")
par(mfrow = c(3, 3))
plot.hyperbolicsecant_seq(x, alpha, beta, xlim=c(min(x), max(x)), ylim=c(0, 1), func="shyperbolicsecant")
par(mfrow = c(3, 3))
plot.hyperbolicsecant_seq(x, alpha, beta, xlim=c(min(x), max(x)), ylim=c(0, 10), func="hhyperbolicsecant")
|
2a400ae92e0f324556b43e95623bce3c03b75643 | 8b7d2ca3ccd28b64ce9caef1dea281d2951004fb | /R/aveytoolkit_runLimma.R | ef77a94763f51cd7d8ce869ca3c9581203771d05 | [] | no_license | stefanavey/aveytoolkit | 4a1275e7071d2d7a4e836d3d94514b673cb92739 | 739f0016fbe868af9d11cc1200c20e2a12441db6 | refs/heads/master | 2021-02-13T17:59:46.686828 | 2020-03-04T19:09:37 | 2020-03-04T19:09:37 | 244,718,968 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,364 | r | aveytoolkit_runLimma.R | ##' runLimma
##'
##' A wrapper around limma functions to perform a basic analysis on the given expression matrix
##'
##' @param eset the expression matrix (not expression set object)
##' @param labels the labels for each column of the eset
##' @param contrasts Vector of contrasts to make
##' @param block vector or factor specifying a blocking variable on the arrays. Has length equal to the number of arrays. Must be ‘NULL’ if ‘ndups>2’. (Not extensively tested, use with caution)
##' @param covariates data frame of covariates (of same length as labels) to include in the model. Use this if there are paired samples, etc.
##' @param min.fold.change Minimum log2 fold change to be differentially expressed. Default is 1.
##' @param min.intensity Minimum log2 intensity (at any time) to be differentially expressed. Default is 4.
##' @param p.cutoff FDR corrected cutoff for significant differential expression. Default is 0.05.
##' @param fitOnly If true, will return fit2, rather than the matrix of significant genes. Default is FALSE.
##' @param robust passed to \code{eBayes}
##' @param ... additional arguments passed to \code{lmFit}
##' @return depends on \code{fitOnly}
##' @details Generally, an expression matrix is made up of rows of genes (or any other features) and columns of samples. The matrix has data for multiple classes (which are denoted with the 'labels' parameter) and the classes are compared using the vector of contrasts. Block can be used for biological (or technical) replicates or for separate subjects (in which case it will determinte the inter-subject correlation). See \code{?duplicateCorrelation} for more information.
##' ## Example:
##' If you have a m X 10 matrix 'eset', with 5 samples of class A and 5 of class B,
##' you could compare class A to class B using the following code:
##'
##' results = runLimma(eset, c('A','A','A','A','A','B','B','B','B','B'), "B-A")
##'
##' This will return to you a matrix with columns for each comparison and rows for each gene.
##' The value in each cells will either be -1, 0, or 1, depending on whether the gene is
##' significantly higher in B, not significant, or significantly higher in A, respectively.
##' If you want information on p-values and fold changes, set "fitOnly=T", and you can access
##' the fit object to get the information.
##'
##' For other comparisons, you can look at the LIMMA user guide: limmaUsersGuide()
##' @author Christopher Bolen, Stefan Avey
##' @keywords aveytoolkit
##' @seealso \code{\link{limma}}
##' @export
##' @examples
##' \dontrun{
##' ## Load in example data from colonCA package (install if necessary)
##' ## source("http://bioconductor.org/biocLite.R")
##' ## biocLite("colonCA")
##' library(colonCA)
##' ## Look at head of data
##' head(pData(colonCA))
##' labels <- pData(colonCA)$class # t and n for tumor and normal
##' ## Data are paired (-1 and 1 come from same subject)
##' pair <- factor(abs(pData(colonCA)$samp))
##' covars <- data.frame(Pairing=as.character(pair))
##' deRes <- runLimma(eset=exprs(colonCA), labels=as.character(labels), contrasts="t-n",
##' covariates=covars, fitOnly=TRUE)
##' topTable(deRes)
##' ## Or just do tests in the function to get -1, 0, 1 for DE status of each probe
##' testRes <- runLimma(eset=exprs(colonCA), labels=as.character(labels), contrasts="t-n",
##' covariates=data.frame(Pairing=as.character(pair)), fitOnly=FALSE)
##' head(testRes)
##' }
runLimma <- function (eset, labels, contrasts, block = NULL, covariates = NULL,
min.fold.change = 1, min.intensity = 4, p.cutoff = 0.05,
fitOnly = FALSE, robust = FALSE, ...)
{
if (!require(limma)) {
stop("runLimma requires the Bioconductor limma package.\n",
"Please install with get_bioc(\"limma\")")
}
tooLow = eset < min.intensity
drop = apply(tooLow, 1, all)
eset = eset[!drop, ]
labels = as.factor(as.vector(labels))
if (is.null(covariates)) {
design <- model.matrix(~-1 + labels)
colnames(design) <- levels(labels)
}
else {
if (!is.data.frame(covariates)) {
stop("Covariates must be a data frame")
}
f = paste(c("~ 0 + labels", colnames(covariates)), collapse = "+")
cns = levels(labels)
for (i in 1:ncol(covariates)) {
covariates[, i] = as.factor(covariates[, i])
cns = c(cns, paste("cov", i, levels(covariates[,
i])[-1], sep = "."))
}
attach(covariates, warn.conflicts = FALSE)
design <- model.matrix(formula(f))
colnames(design) = cns
detach(covariates)
}
cor <- NULL
if (!is.null(block)) {
block <- as.factor(block)
corfit <- duplicateCorrelation(eset, design = design,
block = block)
cor <- corfit$consensus.correlation
}
fit <- lmFit(eset, design = design, block = block, correlation = cor, ...)
rownames(fit$coefficients) <- rownames(eset)
rownames(fit$stdev.unscaled) <- rownames(eset)
contrast.matrix <- makeContrasts(contrasts = contrasts, levels = design)
fit2 <- contrasts.fit(fit, contrast.matrix)
fit2 <- eBayes(fit2, robust = robust)
results <- decideTests(fit2, adjust.method = "BH", p.value = p.cutoff,
lfc = min.fold.change)
if (fitOnly) {
return(fit2)
}
else {
return(results)
}
}
|
c0a144ae00901090a74e2952cfdc82cdb0fe5e41 | 71bdb3ebd08a35492b3166ed5d06bd372aeea681 | /simulation.R | 414435ebb569a63e32ba1ea872988abe4bca8cc5 | [
"MIT"
] | permissive | dpopadic/learnResR | 8a365b013f5105020e4c4ad42dc01ed7624b9be7 | 739c125d2e7e534fe2dfea5bc52dd127d9e6680b | refs/heads/master | 2022-01-19T04:20:31.827642 | 2022-01-12T15:12:32 | 2022-01-12T15:12:32 | 191,589,277 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 622 | r | simulation.R | # SIMULATIONS IN R
# ----------------
# 1 SOLVING CALCULUS PROBLEMS
# ---------------------------
# Let's say we want to integrate x^2 over interval [0, 1]. Using monte-carlo simulation,
# one can throw darts at the curve to count the number falling below the curve. The challenge
# is to vectorise the simulation.
# non-vectorised:
mc <- function(n) {
hits <- 0
for (i in seq_len(n)) {
u1 <- runif(1)
u2 <- runif(1)
if (u1^2 > u2)
hits <- hits + 1
}
return(hits / n)
}
system.time(mc(n = 1000000))
# vectorised:
mc <- function(n) sum(runif(n)^2 > runif(n)) / n
system.time(mc(n = 1000000))
|
909e3a8ad191299abd8d212442fc348ba9adb191 | f9055f2e316129ec21665a10200fda9ad856f975 | /man/tf_morlet.Rd | 68b04455186c9c18e1c8fd98bf8b92e9750ef4ec | [
"MIT"
] | permissive | bnicenboim/eegUtils | 879b4a8173acc29de17b1dc0ca7ec8453dbc2522 | 09eb5fcc983d24d058bfa4575d5e8a2537fcfe21 | refs/heads/master | 2020-03-11T03:16:30.714963 | 2018-05-03T18:53:24 | 2018-05-03T18:53:24 | 129,742,381 | 0 | 0 | null | 2018-04-16T12:47:20 | 2018-04-16T12:47:19 | null | UTF-8 | R | false | true | 801 | rd | tf_morlet.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/psd.R
\name{tf_morlet}
\alias{tf_morlet}
\alias{tf_morlet.eeg_epochs}
\title{Time-frequency analysis}
\usage{
tf_morlet(data, ...)
\method{tf_morlet}{eeg_epochs}(data, foi, n_freq, n_cycles, ...)
}
\arguments{
\item{data}{EEG data to be TF transformed}
\item{...}{Further parameters of the timefreq transformation}
\item{foi}{Frequencies of interest. Scalar or character vector of the lowest and highest frequency to resolve.}
\item{n_freq}{Number of frequencies to be resolved.}
\item{n_cycles}{Number of cycles at each frequency.}
}
\description{
Morlet wavelet time-frequency analysis.
}
\section{Methods (by class)}{
\itemize{
\item \code{eeg_epochs}: Time-frequency decomposition of \code{eeg_epochs} object.
}}
|
55c6a801c79bceff0af4e6defc722a4166b50db3 | 62d99d35137feaef5ac4bd44ee92007dfe4749b8 | /plot1.R | 992cf08a932b7c208dda0944c2d6d56ce7a640bf | [] | no_license | dcotes/ExData_Plotting1 | afa450bf3169cfbfa39c3c216d93616688602fc7 | c412d841b5688e16188d34e219fa8a5214633ec0 | refs/heads/master | 2021-01-18T08:57:14.240091 | 2015-01-11T19:14:14 | 2015-01-11T19:14:14 | 22,801,460 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 575 | r | plot1.R |
data <- read.table("power.txt", header = T, sep = ";", na.strings = "?") ##Reads in data
data$Date <- strptime(paste(data$Date,data$Time), "%d/%m/%Y %H:%M:%S") ##Combines date and time into a proper class
datasub <- data[66637:69516,] ##Subsets correct interval of time
png("Plot1.png", width = 480, height = 480) ##Set plot device
hist(datasub$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col = "red") ##Create Plot
dev.off() ## turn off plot device
|
7ddeffb19154670fe285b235e9f3217ec718c63e | 4476502e4fed662b9d761c83e352c4aed3f2a1c2 | /GIT_NOTE/02_Rscript_workplace/chap12/chap12_Regression from lecture.R | 993aaf0bfebf66871bc879619a1a9d9256c718a5 | [] | no_license | yeon4032/STUDY | 7772ef57ed7f1d5ccc13e0a679dbfab9589982f3 | d7ccfa509c68960f7b196705b172e267678ef593 | refs/heads/main | 2023-07-31T18:34:52.573979 | 2021-09-16T07:45:57 | 2021-09-16T07:45:57 | 407,009,836 | 0 | 0 | null | null | null | null | UHC | R | false | false | 10,369 | r | chap12_Regression from lecture.R | # chap12_Regression
######################################################
# 회귀분석(Regression Analysis)
######################################################
# - 특정 변수(독립변수:설명변수)가 다른 변수(종속변수:반응변수)에 어떠한 영향을 미치는가 분석
###################################
## 1. 단순회귀분석
###################################
# - 독립변수와 종속변수가 1개인 경우
# 단순선형회귀 모델 생성
# 형식) lm(formula= y ~ x 변수, data)
setwd("C:/ITWILL/2_Rwork/data")
product <- read.csv("product.csv", header=TRUE)
head(product) # 친밀도 적절성 만족도(등간척도 - 5점 척도)
str(product) # 'data.frame': 264 obs. of 3 variables:
y = product$'제품_만족도' # 종속변수
x = product$'제품_적절성' # 독립변수
df <- data.frame(x, y)
head(df) # x y
# 회귀모델 생성
result.lm <- lm(formula=y ~ x, data=df)
result.lm # 회귀계수
#Coefficients:
# (Intercept)=절편(b) x=기울기(a)
# 0.7789 0.7393
# 회귀방정식
a = 0.7393 # 기울기
b = 0.7789 # 절편
X = 4
y = X*a + b #+ 오차
y # 3.7361
Y = 3
# 오차(잔차)
err <- Y - y
err # -0.7361
# 12 칼럼 : 회귀분석 결과
names(result.lm)
# "coefficients" : 회귀계수
# "residuals" : 오차(잔차)
# "fitted.values" : 적합치 = 회귀방정식
result.lm$coefficients
# 0.7788583 0.7392762
result.lm$residuals[1] # -0.735963
mean(result.lm$residuals) # -5.617479e-17 = 0
result.lm$fitted.values[1] # 3.735963
# 회귀모델 예측치 : 미지의 x값 -> y예측
y_pred <- predict(result.lm, data.frame(x=5))
# (2) 선형회귀 분석 결과 보기
summary(result.lm)
# <분석절차>
# 1. 모델 통계적 유의성 검정 : F-검정 p-value < 0.05
# 2. 모델 설명력 : R-squared(1수렴 정도)
# 3. X변수의 유의성 검정 : t-검정 p-value, * 강도
# (3) 단순선형회귀 시각화
# x,y 산점도 그리기
plot(formula=y ~ x, data=df)
# 회귀분석
result.lm <- lm(formula=y ~ x, data=df)
result.lm
# 회귀선
abline(result.lm, col='red')
# 점수와 지능지수
score_iq <- read.csv('score_iq.csv')
head(score_iq)
cor(score_iq[2:3]) # 0.882220
plot(score_iq$iq, score_iq$score) # (x, y)
model <- lm(score ~ iq, data = score_iq) # (y ~ x)
model
abline(model, col='red')
summary(model)
0.882220^2 # 0.7783121
range(score_iq$score) # 65 90
###################################
## 2. 다중회귀분석
###################################
# - 여러 개의 독립변수 -> 종속변수에 미치는 영향 분석
# 가설 : 음료수 제품의 적절성(x1)과 친밀도(x2)는 제품 만족도(y)에 정의 영향을 미친다.
product <- read.csv("product.csv", header=TRUE)
head(product) # 친밀도 적절성 만족도(등간척도 - 5점 척도)
# 1) 적절성 + 친밀도 -> 만족도
y = product$'제품_만족도' # 종속변수
x1 = product$'제품_친밀도' # 독립변수1
x2 = product$'제품_적절성' # 독립변수2
df <- data.frame(x1, x2, y)
result.lm <- lm(formula=y ~ x1 + x2, data=df)
result.lm <- lm(formula=y ~ ., data=df) # . : y를 제외한 나머지 x변수
# 계수 확인
result.lm
#(Intercept)=b x1=a1 x2=a2
# 0.66731 0.09593 0.68522
# 회귀방정식
head(df, 1)
X1 = 3
X2 = 4
Y = 3
a1 = 0.09593
a2 = 0.68522
b = 0.66731
y = X1*a1 + X2*a2 + b
print(y) # 3.69598
#y = (X %*% a) + b # 행렬곱
X <- matrix(data = c(3, 4), nrow = 1, ncol = 2)
X
a <- matrix(data = c(0.09593, 0.68522), nrow = 2, ncol = 1)
a
dim(X) # 1 2
dim(a) # 2 1
y = (X %*% a) + b
y # 3.69598
dim(y) # 1 1
names(result.lm)
result.lm$fitted.values[1] # 3.69598
# 다중회귀분석 결과
summary(result.lm)
# 2) 102개 직업군 평판 dataset 이용
#install.packages("car") # 다중공선성 문제 확인
library(car)
Prestige # car 제공 dataset
class(Prestige) # "data.frame"
names(Prestige) # 변수 추출 : 6개
row.names(Prestige) # 행 이름 -> 102 직업군 이름
# 102개 직업군의 평판 : 교육수준,수입,여성비율,평판(프레스티지),센서스(인구수=직원수)
str(Prestige)
Cor <- cor(Prestige[1:4])
Cor['income',] # 0.5775802 1.0000000 -0.4410593 0.7149057
# 종속변수 : income
# 독립변수 : education + women + prestige
model <- lm(income ~ education + women + prestige, data = Prestige)
model
summary(model)
#Coefficients:
#Estimate Std. Error t value Pr(>|t|)
#(Intercept) -253.850 1086.157 -0.234 0.816
#education 177.199 187.632 0.944 0.347 -> 영향력 없음
#women -50.896 8.556 -5.948 4.19e-08 *** -> 부(-)의 영향력
#prestige 141.435 29.910 4.729 7.58e-06 *** -> 정(+)의 영향력
# Adjusted R-squared: 0.6323
# 다중회귀분석 회귀선 시각화
install.packages('psych')
library(psych)
newdata <- Prestige[c(2,1,3:4)] # income 기준
head(newdata)
# stars : 상관계수 유의성, lm : 회귀선, ci : 회귀선 신뢰구간
pairs.panels(newdata, stars = TRUE, lm = TRUE, ci = TRUE)
# 상관타원 : 타원의 폭이 좁을 수록 상관성 높다.
###################################
## 3. 변수 선택법 : ppt.24
###################################
# 전진선택법 : 절편만 포함시킨 model + 독립변수 1개씩 추가
# 후진제거법 : model+모든 변수 -> 독립변수 1개씩 제거
# 단계선택법 : 혼합법
str(Prestige)
newdata <- Prestige[-6] # type 제외
dim(newdata) # 102 5
model <- lm(income ~ ., data = newdata)
model
library(MASS)
step <- stepAIC(model, direction = 'both')
#AIC값이 낮을수록 방정식이 적절한것임
#Step: AIC=1604.96
#income ~ women + prestige (마지막 테이블 값) +부호는 변수를 빼라는 의미
new_model <- lm(income ~ women + prestige, data = newdata)
new_model
summary(new_model)
# Adjusted R-squared: 0.6327
# 차원의 저주 : 차원이 많으면, 과적합 현상 발생
# - training dataset 정확도 높고, new dataset 정확도 낮게
###################################
# 4. 다중공선성과 기계학습
###################################
# - 독립변수 간의 강한 상관관계로 인해서 회귀분석의 결과를 신뢰할 수 없는 현상
# - 생년월일과 나이를 독립변수로 갖는 경우
# - 해결방안 : 강한 상관관계를 갖는 독립변수 제거
# (1) 다중공선성 문제 확인
library(car)
names(iris)
fit <- lm(formula=Sepal.Length ~ Sepal.Width+Petal.Length+Petal.Width, data=iris)
vif(fit)
sqrt(vif(fit))>2 # root(VIF)가 2 이상인 것은 다중공선성 문제 의심
# (2) iris 변수 간의 상관계수 구하기
cor(iris[,-5]) # 변수간의 상관계수 보기(Species 제외)
#x변수 들끼 계수값이 높을 수도 있다. -> 해당 변수 제거(모형 수정) <- Petal.Width
###############################
## 기계학습 : train/test
###############################
dim(iris) # 150 5
# (1) 학습데이터와 검정데이터 분류
x <- sample(nrow(iris), 0.7*nrow(iris)) # 전체중 70%만 추출
x # 행번호
train <- iris[x, ] # 학습데이터 추출
test <- iris[-x, ] # 검정데이터 추출
dim(train) # 105 5
dim(test) # 45 5
names(test)
# (2) model 생성 : Petal.Width 변수를 제거한 후 회귀분석
result.lm <- lm(formula=Sepal.Length ~ Sepal.Width + Petal.Length, data=train)
result.lm
summary(result.lm)
# (3) model 예측치 : test dataset
y_pred <- predict(result.lm, test) # test = Sepal.Width + Petal.Length
length(y_pred) # 45
y_true <- test$Sepal.Length # 관측치(정답)
# (4) model 평가
# 1) MSE = Mean Square Error
# MSE = mean(err ^ 2)
err <- y_pred - y_true
MSE = mean(err ^ 2)
# 제곱(Square) : 부호 절대값, 패널티
MSE # 0.1361803 -> 0 수렴정도
# 2) RMSE : Root MSE
RMSE <- sqrt(MSE)
RMSE # 0.369026
# 3) 상관계수
cor(y_true, y_pred) # 0.9015176
# 4) real value vs pred value
plot(y_pred, col='red', pch = 18, type='o')
par(new = T) # 그래프 겹치기
plot(y_true, col='blue', pch = 19, type='o',
axes = FALSE, ann = FALSE)
# 범례
legend('topleft', legend = c('predict value', 'real value'),
col = c('red', 'blue'), pch = c(18, 19), lty=1)
##########################################
## 5. 선형회귀분석 잔차검정과 모형진단
##########################################
# 1. 변수 모델링
# 2. 회귀모델 생성
# 3. 모형의 잔차검정
# 1) 잔차의 등분산성 검정
# 2) 잔차의 정규성 검정
# 3) 잔차의 독립성(자기상관) 검정
# 4. 다중공선성 검사
# 5. 회귀모델 생성/ 평가
names(iris)
# 1. 변수 모델링 : y:Sepal.Length <- x:Sepal.Width,Petal.Length,Petal.Width
formula = Sepal.Length ~ Sepal.Width + Petal.Length + Petal.Width
# 2. 회귀모델 생성
model <- lm(formula = formula, data=iris)
model
names(model) # 12칼럼
res <- model$residuals # 잔차(오차)
res
# 3. 모형의 잔차검정
plot(model)
#Hit <Return> to see next plot: 잔차 vs 적합값 -> 패턴없이 무작위 분포(포물선 분포 좋지않은 적합)
#Hit <Return> to see next plot: Normal Q-Q -> 정규분포 : 대각선이면 잔차의 정규성
#Hit <Return> to see next plot: 척도 vs 위치 -> 중심을 기준으로 고루 분포
#Hit <Return> to see next plot: 잔차 vs 지렛대값 -> 중심을 기준으로 고루 분포
# (1) 등분산성 검정 -> 자료추가 & 변수변환(변수선택, 정규화)
plot(model, which = 1)
methods('plot') # plot()에서 제공되는 객체 보기
# (2) 잔차 정규성 검정 -> 자료추가
attributes(model) # coefficients(계수), residuals(잔차), fitted.values(적합값)
res <- residuals(model) # 잔차 추출
shapiro.test(res) # 정규성 검정 - p-value = 0.9349 >= 0.05
# 귀무가설 : 정규성과 차이가 없다.
# 정규성 시각화
hist(res, freq = F)
qqnorm(res)
# (4) 잔차의 독립성(자기상관 검정 : Durbin-Watson) -> 비선형모델 & 변수변환
install.packages('lmtest')
library(lmtest) # 자기상관 진단 패키지 설치
dwtest(model) # 더빈 왓슨 값(2~4)
# DW = 2.0604, p-value = 0.6013
# 4. 다중공선성 검사
library(car)
sqrt(vif(model)) > 2 # TRUE
# 5. 모델 생성/평가
formula = Sepal.Length ~ Sepal.Width + Petal.Length
model <- lm(formula = formula, data=iris)
summary(model) # 모델 평가
|
e9e119cc24cc56f1ec44c4f9b6b65952e5b50d19 | 4f768967ddca2e6771d7790cc14fcf1fc276817e | /R/CalcRes.fun.R | ce7d02d6b9e2312f9502884e6255a90c72d65aac | [] | no_license | statwonk/NHPoisson | 34f7cb777d018c8fca470e34191bd8b6079e0275 | 438eafac6dc7f018d20eba4d96347cede87960e0 | refs/heads/master | 2021-01-14T10:27:10.345102 | 2016-04-13T02:45:29 | 2016-04-13T02:45:29 | 56,107,917 | 0 | 1 | null | 2016-04-13T02:22:57 | 2016-04-13T00:29:42 | R | UTF-8 | R | false | false | 1,976 | r | CalcRes.fun.R | CalcRes.fun <-
function(mlePP, lint, h=NULL, typeRes=NULL)
{
n<-length(mlePP@lambdafit)
t<-mlePP@t
tit<-mlePP@tit
if (is.null(h))
{
h<-1/mlePP@lambdafit**0.5
typeRes<-'Pearson'
}
if (is.null(typeRes)) stop('Please indicate argument typeRes')
inddat<-mlePP@inddat
inddat[inddat==0]<-NA
posE<-mlePP@posE
lambdafit<-mlePP@lambdafit*h*inddat
lambdafitR<-mlePP@lambdafit*inddat
indice<-rep(0,n)
indice[posE]<-1*h[posE]
indice<-indice*inddat
indiceR<-rep(0,n)
indiceR[posE]<-1
indiceR<-indiceR*inddat
iini<-1
ifin<-lint
posmed<-floor(lint/2)+1
emplambda<-NULL
emplambda[1:(posmed-1)]<-NA
emplambda[posmed]<-mean(indice[iini:ifin], na.rm=T)
sumalfit <- NULL
sumalfit[1:(posmed - 1)] <- NA
sumalfit[posmed] <- mean(lambdafit[iini:ifin], na.rm=T)
emplambdaR<-NULL
emplambdaR[1:(posmed-1)]<-NA
emplambdaR[posmed]<-mean(indiceR[iini:ifin], na.rm=T)
sumalfitR <- NULL
sumalfitR[1:(posmed - 1)] <- NA
sumalfitR[posmed] <- mean(lambdafitR[iini:ifin], na.rm=T)
lintV<- NULL
lintV[1:(posmed - 1)] <- NA
lintV[posmed] <- sum(inddat[iini:ifin],na.rm=T)
j <- posmed+1
while((ifin < n))
{
iini<-iini+1
ifin<-ifin+1
emplambda[j]<-mean(indice[iini:ifin], na.rm=T)
sumalfit[j] <- mean(lambdafit[iini:ifin], na.rm=T)
emplambdaR[j]<-mean(indiceR[iini:ifin], na.rm=T)
sumalfitR[j] <- mean(lambdafitR[iini:ifin], na.rm=T)
lintV[j]<-sum(inddat[iini:ifin], na.rm=TRUE)
j<-j+1
}
sumalfit[j:n] <- NA
emplambda[j:n]<-NA
sumalfitR[j:n] <- NA
emplambdaR[j:n]<-NA
lintV[j:n]<-NA
ScaRes<-emplambda-sumalfit
RawRes<-emplambdaR-sumalfitR
return(list(RawRes=RawRes,ScaRes=list(ScaRes=ScaRes,typeRes=typeRes),
emplambda=emplambdaR,fittedlambda=sumalfitR,lintV=lintV,lint=lint,
typeI='Overlapping',h=h,mlePP=mlePP))
}
|
91be86154a5a824c7042fd0f4f7b860c84ebff1c | 63a737f2dbbf63b52759a6f3d7814b80330050f8 | /tests/testthat/test-mock-api.R | 2582281dca3345ecef195d14641d22e7f0db7422 | [] | no_license | byapparov/httptest | 19769ef336d2b1bb84f528d9f80c8c69902ecbd1 | ca07f470c4b2ee4c6d00693c991ea609e0570bf6 | refs/heads/master | 2021-01-09T06:52:29.236322 | 2017-03-15T07:53:14 | 2017-03-15T07:53:14 | 81,112,714 | 0 | 0 | null | 2017-03-16T00:04:27 | 2017-02-06T17:26:23 | R | UTF-8 | R | false | false | 3,528 | r | test-mock-api.R | context("Mock API")
public({
with_mock_API({
test_that("Can load an object and file extension is added", {
a <- GET("api/")
expect_identical(content(a), list(value="api/object1/"))
b <- GET(content(a)$value)
expect_identical(content(b), list(object=TRUE))
})
test_that("GET with query", {
obj <- GET("api/object1/", query=list(a=1))
expect_json_equivalent(content(obj),
list(query=list(a=1), mocked="yes"))
})
test_that("GET files that don't exist errors", {
expect_GET(GET("api/NOTAFILE/"), "api/NOTAFILE/")
expect_GET(GET("api/NOTAFILE/", query=list(a=1)),
"api/NOTAFILE/?a=1")
})
test_that("POST method reads from correct file", {
b <- POST("api/object1", body = "", content_type_json(),
add_headers(Accept = "application/json",
"Content-Type" = "application/json"))
expect_identical(content(b), list(method="POST"))
})
test_that("Other verbs error too", {
expect_PUT(PUT("api/"), "api/")
expect_PATCH(PATCH("api/"), "api/")
expect_POST(POST("api/"), "api/")
expect_DELETE(DELETE("api/"), "api/")
})
test_that("File download copies the file", {
f <- tempfile()
dl <- download.file("api.json", f)
expect_equal(dl, 0)
expect_identical(readLines(f), readLines("api.json"))
})
test_that("File download if file doesn't exist", {
f2 <- tempfile()
expect_error(dl <- download.file("NOTAFILE", f2),
"DOWNLOAD NOTAFILE")
})
test_that("mock API with http:// URL, not file path", {
expect_GET(GET("http://httpbin.org/get"),
"http://httpbin.org/get",
"(httpbin.org/get.json)")
expect_GET(GET("https://httpbin.org/get"),
"https://httpbin.org/get",
"(httpbin.org/get.json)")
expect_identical(content(GET("http://example.com/get")),
list(loaded=TRUE))
})
test_that("Mocking a GET with more function args (path, auth)", {
expect_identical(content(GET("http://example.com",
path="/get",
add_headers("Content-Type"="application/json"),
authenticate("d", "d"))),
list(loaded=TRUE))
})
})
})
context("Mock URL")
test_that("Path to the fake file is correct", {
# GET (default) method
file <- buildMockURL("http://www.test.com/api/call")
expect <- "www.test.com/api/call.json"
expect_identical(file, expect, label = "Get method without query string")
# GET method with query in URL
file <- buildMockURL("http://www.test.com/api/call?q=1")
expect <- "www.test.com/api/call-a3679d.json"
expect_identical(file, expect, label = "Get method with query string")
# POST method
file <- buildMockURL("http://www.test.com/api/call", method = "POST")
expect <- "www.test.com/api/call-POST.json"
expect_identical(file, expect, "POST method without query string")
# POST method with query in URL
file <- buildMockURL("http://www.test.com/api/call?q=1", method = "POST")
expect <- "www.test.com/api/call-a3679d-POST.json"
expect_identical(file, expect, "POST method with query string")
})
|
af847a441ec3e6cb8849cf3a4bc7d70c6fe1cd10 | 7665057b28fb224108b09ce4231981c472de38e3 | /man/filter_signal.Rd | dd26554f8449d2de9b79231e6f25a900de954e11 | [] | no_license | cbroeckl/RAMClustR | e60e01a0764d9cb1b690a5f5f73f3ecc35c40a32 | e0ca67df1f993a89be825fcbf3bfa6eaabbadfef | refs/heads/master | 2023-08-03T10:26:16.494752 | 2023-06-21T14:24:06 | 2023-06-21T14:24:06 | 13,795,865 | 10 | 13 | null | 2023-08-01T01:42:17 | 2013-10-23T07:04:07 | R | UTF-8 | R | false | true | 601 | rd | filter_signal.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rc.feature.filter.blanks.R
\name{filter_signal}
\alias{filter_signal}
\title{filter_signal}
\usage{
filter_signal(ms.qc.mean, ms.blank.mean, sn)
}
\arguments{
\item{ms.qc.mean}{ms qc mean signal intensities}
\item{ms.blank.mean}{ms blank mean signal intensities}
\item{sn}{numeric defines the ratio for 'signal'. i.e. sn = 3 indicates that signal intensity must be 3 fold higher in sample than in blanks, on average, to be retained.}
}
\value{
union of which signal is at least 3x larger
}
\description{
filter signal
}
|
313a0e8a65773c714d333589021adbc2958e499b | 9db98428a50bdcaab6ad807c35f5a24963a9d82a | /R/packages.R | 52f28583d9788ee60ff11db38f0988cc6c4a1f73 | [] | no_license | simroy16/drakeProj | 08e0654b80433b37b9fdfb4bccf4a09955fb3754 | 97e09dfc00443f7983b495b7ad94fddc43a64bd5 | refs/heads/master | 2020-09-25T02:56:42.156678 | 2019-12-04T16:23:43 | 2019-12-04T16:23:43 | 225,902,140 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 165 | r | packages.R | library(drake)
library(ggplot2)
library(stringr)
library(fishualize)
library(dplyr)
library(taxize)
library(tidyr)
library(readr)
library(tidyverse)
library(forcats) |
9aa5a769be2e5c8eca95636166d23e5728c6aa7a | 6b534cd4d569376f82d042319950461363b0c509 | /man/plot-cfPressure-missing-method.Rd | 9e54540af31588d1b83842cc250f625e7bddbf5d | [] | no_license | ropensci/clifro | 65acc77df1921263345b2e612b2aaa9655dc2c0b | 46c72d399ee3d6069cfffa87a4452f85794bdef9 | refs/heads/master | 2023-05-23T18:35:17.839044 | 2023-03-09T06:42:07 | 2023-03-09T06:42:07 | 19,559,638 | 26 | 12 | null | 2018-06-28T08:52:29 | 2014-05-08T03:43:50 | R | UTF-8 | R | false | true | 2,159 | rd | plot-cfPressure-missing-method.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cfData-plotMethods.R
\name{plot,cfPressure,missing-method}
\alias{plot,cfPressure,missing-method}
\alias{plot.cfPressure}
\title{Plot Mean Sea Level Atmospheric Pressure}
\usage{
\S4method{plot}{cfPressure,missing}(
x,
y,
ggtheme = c("grey", "gray", "bw", "linedraw", "light", "minimal", "classic"),
scales = c("fixed", "free_x", "free_y", "free"),
n_col = 1,
...
)
}
\arguments{
\item{x}{a cfPressure object.}
\item{y}{missing.}
\item{ggtheme}{character string (partially) matching the
\code{\link[ggplot2]{ggtheme}} to be used for plotting, see
'Theme Selection' below.}
\item{scales}{character string partially matching the \code{scales} argument
in the \code{link[ggplot2]{facet_wrap}} function.}
\item{n_col}{the number of columns of plots (default 1).}
\item{...}{further arguments passed to \code{\link[ggplot2]{theme}}.}
}
\description{
Plot the MSL atmospheric pressure through time.
}
\examples{
\dontrun{
# Retrieve public hourly atmospheric pressure data for the last 30 days at
# Reefton Ews station
# Subtract 30 days from today's date to get the start date
last_month = paste(as.character(Sys.Date() - 30), 0)
reefton_pressure = cf_query(cf_user(), cf_datatype(7, 1, 1), cf_station(),
start_date = last_month)
class(reefton_pressure) # cfPressure object
# Plot the atmospheric pressure data using the defaults
plot(reefton_pressure)
# Enlarge the text and add the observations as points
library(ggplot2) # for element_text() and geom_point()
plot(reefton_pressure, ggtheme = "bw", text = element_text(size = 16)) +
geom_point(size = 3, shape = 1)
# Save the plot as a png to the current working directory
library(ggplot2) # for ggsave()
ggsave("my_pressure_plot.png")
}
}
\seealso{
\code{\link{plot,cfDataList,missing-method}} for general
information on default plotting of \code{cfData} and \code{cfDataList}
objects, and the links within. See \code{\link{cf_query}} for creating
\code{cfPressure} objects.
Refer to \code{\link[ggplot2]{theme}} for more possible arguments to pass
to these methods.
}
|
ee656a7e4bc7dc30c79e31a32f9288b3eea56ea6 | 2a7862d20fc9a6819e849907bf0e76733ac270ef | /1.processing.data.R | 30e235bb06c84a771f4ccaf4479bdd4f58310732 | [] | no_license | LucasMS/HMA-LMA-SMP2017 | da5090cd82ba2372b505d56232b6ae0ab90d159d | 12fe422eda6244ecc065831a09bf6dfba4178913 | refs/heads/master | 2020-04-07T08:56:21.509665 | 2018-11-23T13:31:44 | 2018-11-23T13:31:44 | 158,233,921 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,781 | r | 1.processing.data.R |
load("../../../Data/v6/3.b.Make.final/hl.v6.r.taxa.RData")
datas=c("smp.r.p" ,"smp.r.c" ,"smp.r.o" ,"smp.r.f" ,"smp.r.g" ,"smp.r.s", "smp.r")
meta=hl.r.taxa$meta.r
meta=meta[, c("sample_name", "host_scientific_name", "Status", "Region")]
#rename to fit on the old code
colnames(meta)= c("SampleID","Species","MicAbundance","Region")
rownames(meta)=meta$SampleID
#separate train and predict
meta.train=meta[meta$MicAbundance != "NA",]
meta.predict=meta[meta$MicAbundance == "NA",]
#write
write.csv(meta.predict, "./Data/01metadata.Predict.csv", row.names = T)
write.csv(meta.train, "./Data/01metadata.Train.csv", row.names = T)
###
#phylum
p=hl.r.taxa$smp.r.p
p.train=p[match(meta.train$SampleID ,rownames(p)),]
p.predict=p[match(meta.predict$SampleID ,rownames(p)),]
identical(rownames(p.train), meta.train$SampleID)
identical(rownames(p.predict), meta.predict$SampleID)
write.csv(p.train, "./Data/01phylum.train.csv", row.names = T)
write.csv(p.predict, "./Data/01phylum.predict.csv", row.names = T)
#class
c=hl.r.taxa$smp.r.c
c.train=c[match(meta.train$SampleID ,rownames(c)),]
c.predict=c[match(meta.predict$SampleID ,rownames(c)),]
identical(rownames(c.train), meta.train$SampleID)
identical(rownames(c.predict), meta.predict$SampleID)
write.csv(c.train, "./Data/02class.train.csv", row.names = T)
write.csv(c.predict, "./Data/02class.predict.csv", row.names = T)
#Otu
otu=hl.r.taxa$smp.r
otu.train=otu[match(meta.train$SampleID ,rownames(otu)),]
otu.predict=otu[match(meta.predict$SampleID ,rownames(otu)),]
identical(rownames(otu.train), meta.train$SampleID)
identical(rownames(otu.predict), meta.predict$SampleID)
write.csv(otu.predict, "./Data/03OTU.predict.csv", row.names = T)
write.csv(otu.train, "./Data/03OTU.train.csv", row.names = T)
|
9e504ef6ec2a6b291c01c5e7f833d0d924a042cf | c90dac176024e17fc2f984c2e5bf3390ee08235b | /Chapter 5/5.4.6.r | 2eb249e28c15907cdcbc7b1365515b3bb4e1468b | [] | no_license | HariharasudhanAS/ISLR-Exercises | 4317851a5c6fafe0f63f4f0be3b6363ea5bb9593 | 0b6066ce81c19cefeb582703f3b3a7f3148d5af3 | refs/heads/master | 2020-03-31T00:26:17.718212 | 2018-12-11T11:05:53 | 2018-12-11T11:05:53 | 151,739,548 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 279 | r | 5.4.6.r | set.seed(1)
glm.fit = glm(default~income+balance, data=Default, family=binomial)
summary(glm.fit)
boot.fn = function(data, index){
coefficients(glm(default~income+balance, data=data, family=binomial, subset=index))
}
boot(Default,boot.fn,1000)
# The results are very similar |
704d7ec4447e7920cbe5e6610f1fc888d0e25519 | 89d219d3dfa744543c6ba1c1b3a99e4dcabb1442 | /man/op-disjunction-functional.Rd | 249f69c7f6a14e856290cda81d77bd7c052f2d41 | [] | no_license | pteetor/tutils | e2eb5d2fba238cbfe37bf3c16b90df9fa76004bb | fe9b936d8981f5cb9b275850908ef08adeffef4e | refs/heads/master | 2022-06-17T11:34:30.590173 | 2022-06-14T02:02:15 | 2022-06-14T02:02:15 | 77,761,145 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 682 | rd | op-disjunction-functional.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/operators.R
\name{op-disjunction-functional}
\alias{op-disjunction-functional}
\alias{\%or\%}
\title{Combine two predicates}
\usage{
p1 \%or\% p2
}
\arguments{
\item{p1}{A predicate of one argument}
\item{p2}{A predicate of one argument}
}
\value{
Returns a \emph{function}, not
a value. (Be careful.)
}
\description{
Given two predicates, this operator combines them
into one, new predicate. The new predicate calls
the given predicates, applying "||" to the results.
}
\details{
}
\examples{
(is.null \%or\% is.numeric)(NULL)
(is.null \%or\% is.numeric)(pi)
(is.null \%or\% is.numeric)("foo")
}
|
33b1c0cafbe6b59f02d457fb69485b2cb5248f44 | 29585dff702209dd446c0ab52ceea046c58e384e | /DJL/R/dm.sf.R | f6e4d802d9e7639c8bae0c9a4b826fde0a729c93 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,675 | r | dm.sf.R | dm.sf <-
function(xdata,ydata,rts,g,w=NULL,se=0,sg="ssm",date=NULL){
# Initial checks
if(is.na(match(rts,c("crs","vrs","irs","drs")))){stop('rts must be "crs", "vrs", "irs", or "drs".')}
if(is.na(match(se,c(0,1)))){stop('se must be either 0 or 1.')}
if(is.na(match(sg,c("ssm","max","min")))){stop('sg must be "ssm", "max", or "min".')}
# Load library
# library(lpSolveAPI)
# Parameters
xdata<-as.matrix(xdata);ydata<-as.matrix(ydata);g<-as.matrix(g);if(!is.null(date))date<-as.matrix(date) # format input data as matrix
n<-nrow(xdata); m<-ncol(xdata); s<-ncol(ydata)
if(is.null(w)) w<-matrix(c(0),ncol=s) else w<-as.matrix(w)
# Data frames
results.efficiency<-matrix(rep(NA,n),nrow=n,ncol=1)
results.lambda<-matrix(rep(NA,n^2),nrow=n,ncol=n)
results.mu<-matrix(rep(NA,n^2),nrow=n,ncol=n)
results.xslack<-matrix(rep(NA,n*m),nrow=n,ncol=m)
results.yslack<-matrix(rep(NA,n*s),nrow=n,ncol=s)
for (k in 1:n){
# Declare LP
lp.sf<-make.lp(0,n+n+1+m+s) # lambda+mu+efficiency+xslack+yslack
# Set objective
set.objfn(lp.sf,c(-1),indices=c(n+n+1))
# RTS
if(rts=="vrs"){add.constraint(lp.sf,c(rep(1,n*2)),indices=c(1:(n*2)),"=",1)}
if(rts=="crs"){set.constr.type(lp.sf,0,1)}
if(rts=="irs"){add.constraint(lp.sf,c(rep(1,n*2)),indices=c(1:(n*2)),">=",1)}
if(rts=="drs"){add.constraint(lp.sf,c(rep(1,n*2)),indices=c(1:(n*2)),"<=",1)}
# Mu
if(rts=="crs"||rts=="drs"||sum(w)==0){add.constraint(lp.sf,c(rep(1,n)),indices=c((n+1):(n+n)),"=",0)}
# Input constraints
for(i in 1:m){add.constraint(lp.sf,c(xdata[,i],xdata[,i],g[k,i],1),indices=c(1:n,(n+1):(n+n),n+n+1,n+n+1+i),"=",xdata[k,i])}
# Output constraints
for(r in 1:s){
if(w[1,r]==1){
add.constraint(lp.sf,c(ydata[,r],ydata[,r],g[k,m+r]),indices=c(1:n,(n+1):(n+n),n+n+1),"=",ydata[k,r])
add.constraint(lp.sf,c(1),indices=c(n+n+1+m+r),"=",0)
}else{add.constraint(lp.sf,c(ydata[,r],-g[k,m+r],-1),indices=c(1:n,n+n+1,n+n+1+m+r),"=",ydata[k,r])}
if(se==1){add.constraint(lp.sf,c(ydata[,r],-1),indices=c(1:n,n+n+1+m+r),">=",0)}
}
# PPS for Super
if(se==1){add.constraint(lp.sf,c(1,1),indices=c(k,n+k),"=",0)}
# Bounds
set.bounds(lp.sf,lower=c(rep(0,n+n),-Inf,rep(0,m+s)))
# Solve
solve.lpExtPtr(lp.sf)
# Get results
results.efficiency[k]<--1*get.objective(lp.sf)
# Get results
temp.p<-get.variables(lp.sf)
results.lambda[k,]<-temp.p[1:n]
results.mu[k,]<-temp.p[(n+1):(n+n)]
results.xslack[k,]<-temp.p[(n+n+2):(n+n+1+m)]
results.yslack[k,]<-temp.p[(n+n+1+m+1):(n+n+1+m+s)]
# Stage II
if(exists("sg")){
# Link previous solutions
add.constraint(lp.sf,c(1),indices=c(n+n+1),"=",results.efficiency[k])
# date sum
if(sg=="max"){set.objfn(lp.sf,c(-date[1:n],-date[1:n]),indices=c(1:n,(n+1):(n+n)))}
if(sg=="min"){set.objfn(lp.sf,c(date[1:n],date[1:n]),indices=c(1:n,(n+1):(n+n)))}
# slack sum max
if(sg=="ssm"){set.objfn(lp.sf,c(rep(-1,m+s)),indices=c((n+n+2):(n+n+1+m+s)))}
# solve
solve.lpExtPtr(lp.sf)
# get results
temp.s<-get.variables(lp.sf)
results.lambda[k,]<-temp.s[1:n]
results.mu[k,]<-temp.s[(n+1):(n+n)]
results.xslack[k,]<-temp.s[(n+n+2):(n+n+1+m)]
results.yslack[k,]<-temp.s[(n+n+1+m+1):(n+n+1+m+s)]
}
}
results<-list(eff=results.efficiency,lambda=results.lambda,mu=results.mu,xslack=results.xslack,yslack=results.yslack)
return(results)
}
|
17dfc3e3fd8568ad7fa911642fc525071e110834 | 60a568d1a18f5a71f1b0e82425ffdeee35da3024 | /man/getAssetClasses.Rd | cfc543491c510c1eecb9442cccbed495b93f2e4f | [] | no_license | beatnaut/remaputils | 9a7d035c58d91e9f241e7ac2aa92c6d5868f46f3 | 58d68ba45ff3a1a00f3ca15a343f5e26302b13d8 | refs/heads/master | 2023-09-04T06:50:17.069462 | 2023-09-01T07:30:18 | 2023-09-01T07:30:18 | 145,116,777 | 0 | 1 | null | 2023-09-07T19:17:40 | 2018-08-17T12:16:23 | R | UTF-8 | R | false | true | 374 | rd | getAssetClasses.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/import.R
\name{getAssetClasses}
\alias{getAssetClasses}
\title{Get the asset class data frame with the full names appended}
\usage{
getAssetClasses(session)
}
\arguments{
\item{session}{The rdecaf session.}
}
\value{
A data frame with the asset classes.
}
\description{
This is the description
}
|
4ea0f04e0085926abc562a54a6927e152f0c9309 | c009f093f7e5c0f164061a29841d35dbcd313a74 | /man/print.CustomLayout.Rd | 7275ea289a45ebac789dbdc6f364a6987748a7ae | [] | no_license | davidgohel/customLayout | 5ccb2ebe34358346428000af56f6b4f5a35a9877 | a680d677c0728af538247aa9525082090770b124 | refs/heads/master | 2020-09-03T14:08:51.631710 | 2019-11-23T17:42:40 | 2019-11-23T17:42:40 | 219,481,116 | 1 | 0 | null | 2019-11-04T11:03:00 | 2019-11-04T11:03:00 | null | UTF-8 | R | false | true | 682 | rd | print.CustomLayout.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Layout.R
\name{print.CustomLayout}
\alias{print.CustomLayout}
\title{Print a CustomLayout object.}
\usage{
\method{print}{CustomLayout}(x, ...)
}
\arguments{
\item{x}{object of class CustomLayout.}
\item{...}{optional arguments to print or plot methods. Not used here.}
}
\description{
Print a CustomLayout object.
}
\examples{
lay <- lay_new(matrix(1:4,nc=2),widths=c(3,2),heights=c(2,1))
lay2 <- lay_new(matrix(1:3))
cl <- lay_bind_col(lay,lay2, widths=c(3,1))
print(cl)
cl2 <- lay_bind_col(cl,cl, c(2,1))
print(cl2)
cl3 <- lay_bind_row(cl,cl, c(20,1))
print(cl3)
}
\seealso{
lay_new lay_show
}
|
1fc3c86f05999156e17771b5fd63972d003b9cfd | 2e8e31440e761ca5643ec924ff36c487c1add164 | /R/functions_for_processing.R | 6092cb0ffe29d33ff3e8d3831484448152555eaa | [] | no_license | guhjy/optweight | f57fd5cc10f78ed27c2941c1bb19a7637f2186fb | dac2223e86213de169629a9631d80ff59587b73f | refs/heads/master | 2020-04-09T10:59:27.808261 | 2018-11-06T20:13:39 | 2018-11-06T20:13:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,616 | r | functions_for_processing.R | get.covs.and.treat.from.formula <- function(f, data = NULL, env = .GlobalEnv, ...) {
A <- list(...)
tt <- terms(f, data = data)
#Check if data exists
if (is_not_null(data) && is.data.frame(data)) {
data.specified <- TRUE
}
else data.specified <- FALSE
#Check if response exists
if (is.formula(tt, 2)) {
resp.vars.mentioned <- as.character(tt)[2]
resp.vars.failed <- vapply(resp.vars.mentioned, function(v) {
is_null_or_error(try(eval(parse(text = v), c(data, env)), silent = TRUE))
}, logical(1L))
if (any(resp.vars.failed)) {
if (is_null(A[["treat"]])) stop(paste0("The given response variable, \"", as.character(tt)[2], "\", is not a variable in ", word.list(c("data", "the global environment")[c(data.specified, TRUE)], "or"), "."), call. = FALSE)
tt <- delete.response(tt)
}
}
else resp.vars.failed <- TRUE
if (any(!resp.vars.failed)) {
treat.name <- resp.vars.mentioned[!resp.vars.failed][1]
tt.treat <- terms(as.formula(paste0(treat.name, " ~ 1")))
mf.treat <- quote(stats::model.frame(tt.treat, data,
drop.unused.levels = TRUE,
na.action = "na.pass"))
tryCatch({mf.treat <- eval(mf.treat, c(data, env))},
error = function(e) {stop(conditionMessage(e), call. = FALSE)})
treat <- model.response(mf.treat)
}
else {
treat <- A[["treat"]]
treat.name <- NULL
}
#Check if RHS variables exist
tt.covs <- delete.response(tt)
rhs.vars.mentioned.lang <- attr(tt.covs, "variables")[-1]
rhs.vars.mentioned <- vapply(rhs.vars.mentioned.lang, deparse, character(1L))
rhs.vars.failed <- vapply(rhs.vars.mentioned.lang, function(v) {
is_null_or_error(try(eval(v, c(data, env)), silent = TRUE))
}, logical(1L))
if (any(rhs.vars.failed)) {
stop(paste0(c("All variables in formula must be variables in data or objects in the global environment.\nMissing variables: ",
paste(rhs.vars.mentioned[rhs.vars.failed], collapse=", "))), call. = FALSE)
}
rhs.term.labels <- attr(tt.covs, "term.labels")
rhs.term.orders <- attr(tt.covs, "order")
rhs.df <- vapply(rhs.vars.mentioned.lang, function(v) {
is.data.frame(try(eval(v, c(data, env)), silent = TRUE))
}, logical(1L))
if (any(rhs.df)) {
if (any(rhs.vars.mentioned[rhs.df] %in% unlist(sapply(rhs.term.labels[rhs.term.orders > 1], function(x) strsplit(x, ":", fixed = TRUE))))) {
stop("Interactions with data.frames are not allowed in the input formula.", call. = FALSE)
}
addl.dfs <- setNames(lapply(rhs.vars.mentioned.lang[rhs.df], function(x) {eval(x, env)}),
rhs.vars.mentioned[rhs.df])
for (i in rhs.term.labels[rhs.term.labels %in% rhs.vars.mentioned[rhs.df]]) {
ind <- which(rhs.term.labels == i)
rhs.term.labels <- append(rhs.term.labels[-ind],
values = names(addl.dfs[[i]]),
after = ind - 1)
}
new.form <- as.formula(paste("~", paste(rhs.term.labels, collapse = " + ")))
tt.covs <- terms(new.form)
if (is_not_null(data)) data <- do.call("cbind", unname(c(addl.dfs, list(data))))
else data <- do.call("cbind", unname(addl.dfs))
}
#Get model.frame, report error
mf.covs <- quote(stats::model.frame(tt.covs, data,
drop.unused.levels = TRUE,
na.action = "na.pass"))
tryCatch({covs <- eval(mf.covs, c(data, env))},
error = function(e) {stop(conditionMessage(e), call. = FALSE)})
if (is_not_null(treat.name) && treat.name %in% names(covs)) stop("The variable on the left side of the formula appears on the right side too.", call. = FALSE)
if (is_null(rhs.vars.mentioned)) {
covs <- data.frame(Intercept = rep(1, if (is_null(treat)) 1 else length(treat)))
}
else attr(tt.covs, "intercept") <- 0
covs.levels <- setNames(vector("list", ncol(covs)), names(covs))
for (i in names(covs)) {
if (is.character(covs[[i]])) covs[[i]] <- factor(covs[[i]])
if (is.factor(covs[[i]])) {
covs.levels[[i]] <- levels(covs[[i]])
levels(covs[[i]]) <- paste0("_", covs.levels[[i]])
}
}
#Get full model matrix with interactions too
covs.matrix <- model.matrix(tt.covs, data = covs,
contrasts.arg = lapply(Filter(is.factor, covs),
contrasts, contrasts=FALSE))
for (i in names(covs)) {
if (is.factor(covs[[i]])) {
levels(covs[[i]]) <- covs.levels[[i]]
}
}
#attr(covs, "terms") <- NULL
return(list(reported.covs = covs,
model.covs = covs.matrix,
treat = treat,
treat.name = treat.name))
}
get.treat.type <- function(treat) {
#Returns treat with treat.type attribute
nunique.treat <- nunique(treat)
if (nunique.treat == 2) {
treat.type <- "binary"
}
else if (nunique.treat < 2) {
stop("The treatment must have at least two unique values.", call. = FALSE)
}
else if (is.factor(treat) || is.character(treat)) {
treat.type <- "multinomial"
treat <- factor(treat)
}
else {
treat.type <- "continuous"
}
attr(treat, "treat.type") <- treat.type
return(treat)
}
process.focal.and.estimand <- function(focal, estimand, targets, treat, treat.type) {
if ((is_null(targets) || all(is.na(targets))) && is_not_null(estimand)) {
if (!(length(estimand) == 1 && is.character(estimand))) {
stop("estimand must be a character vector of length 1.", call. = FALSE)
}
estimand_ <- toupper(estimand)[[1]]
#Allowable estimands
AE <- list(binary = c("ATT", "ATC", "ATE"),
multinomial = c("ATT", "ATE"),
continuous = "ATE")
if (estimand_ %nin% AE[[treat.type]]) {
stop(paste0("\"", estimand, "\" is not an allowable estimand with ", treat.type, " treatments. Only ", word.list(AE[[treat.type]], quotes = TRUE, and.or = "and", is.are = TRUE),
" allowed."), call. = FALSE)
}
reported.estimand <- estimand_
}
else {
if (is_not_null(estimand)) warning("targets are not NULL; ignoring estimand.", call. = FALSE, immediate. = TRUE)
estimand <- NULL
reported.estimand <- "targets"
estimand_ <- NULL
}
#Check focal
if (treat.type %in% c("binary", "multinomial")) {
if (is_null(estimand)) {
if (is_not_null(focal)) {
warning(paste("Only estimand = \"ATT\" is compatible with focal. Ignoring focal."), call. = FALSE)
focal <- NULL
}
}
else if (estimand_ == "ATT") {
if (is_null(focal)) {
if (treat.type == "multinomial") {
stop("When estimand = \"ATT\" for multinomial treatments, an argument must be supplied to focal.", call. = FALSE)
}
}
else if (length(focal) > 1L || !is.atomic(focal) || !any(unique(treat) == focal)) {
stop("The argument supplied to focal must be the name of a level of treat.", call. = FALSE)
}
}
else {
if (is_not_null(focal)) {
warning(paste(estimand_, "is not compatible with focal. Ignoring focal."), call. = FALSE)
focal <- NULL
}
}
}
#Get focal, estimand, and reported estimand
if (isTRUE(treat.type == "binary")) {
unique.treat <- unique(treat, nmax = 2)
unique.treat.bin <- unique(binarize(treat), nmax = 2)
if (is_not_null(estimand)) {
if (estimand_ == "ATT") {
if (is_null(focal)) {
focal <- unique.treat[unique.treat.bin == 1]
}
else if (focal == unique.treat[unique.treat.bin == 0]){
reported.estimand <- "ATC"
}
}
else if (estimand_ == "ATC") {
focal <- unique.treat[unique.treat.bin == 0]
estimand_ <- "ATT"
}
}
}
return(list(focal = focal,
estimand = estimand_,
reported.estimand = reported.estimand))
}
process.s.weights <- function(s.weights, data = NULL) {
#Process s.weights
if (is_not_null(s.weights)) {
if (!(is.character(s.weights) && length(s.weights) == 1) && !is.numeric(s.weights)) {
stop("The argument to s.weights must be a vector or data frame of sampling weights or the (quoted) names of variables in data that contain sampling weights.", call. = FALSE)
}
if (is.character(s.weights) && length(s.weights)==1) {
if (is_null(data)) {
stop("s.weights was specified as a string but there was no argument to data.", call. = FALSE)
}
else if (s.weights %in% names(data)) {
s.weights <- data[[s.weights]]
}
else stop("The name supplied to s.weights is not the name of a variable in data.", call. = FALSE)
}
}
return(s.weights)
}
nunique <- function(x, nmax = NA, na.rm = TRUE) {
if (is_null(x)) return(0)
else {
if (na.rm) x <- x[!is.na(x)]
if (is.factor(x)) return(nlevels(x))
else return(length(unique(x, nmax = nmax)))
}
}
nunique.gt <- function(x, n, na.rm = TRUE) {
if (missing(n)) stop("n must be supplied.")
if (n < 0) stop("n must be non-negative.")
if (is_null(x)) FALSE
else {
if (na.rm) x <- x[!is.na(x)]
if (n == 1 && is.numeric(x)) !check_if_zero(max(x) - min(x))
else if (length(x) < 2000) nunique(x) > n
else tryCatch(nunique(x, nmax = n) > n, error = function(e) TRUE)
}
}
is_binary <- function(x) !nunique.gt(x, 2)
all_the_same <- function(x) !nunique.gt(x, 1)
check_if_zero <- function(x) {
# this is the default tolerance used in all.equal
tolerance <- .Machine$double.eps^0.5
# If the absolute deviation between the number and zero is less than
# the tolerance of the floating point arithmetic, then return TRUE.
# This means, to me, that I can treat the number as 0 rather than
# -3.20469e-16 or some such.
abs(x - 0) < tolerance
}
is_null <- function(x) length(x) == 0L
is_not_null <- function(x) !is_null(x)
is_null_or_error <- function(x) {is_null(x) || class(x) == "try-error"}
binarize <- function(variable) {
nas <- is.na(variable)
if (!is_binary(variable[!nas])) stop(paste0("Cannot binarize ", deparse(substitute(variable)), ": more than two levels."))
if (is.character(variable)) variable <- factor(variable)
variable.numeric <- as.numeric(variable)
if (!is.na(match(0, unique(variable.numeric)))) zero <- 0
else zero <- min(unique(variable.numeric), na.rm = TRUE)
newvar <- setNames(ifelse(!nas & variable.numeric==zero, 0, 1), names(variable))
newvar[nas] <- NA
return(newvar)
}
col.w.m <- function(mat, w = NULL, na.rm = TRUE) {
if (is_null(w)) {
w <- 1
w.sum <- apply(mat, 2, function(x) sum(!is.na(x)))
}
else {
w.sum <- apply(mat, 2, function(x) sum(w, na.rm = na.rm))
}
return(colSums(mat*w, na.rm = na.rm)/w.sum)
}
w.cov.scale <- function(w) {
(sum(w, na.rm = TRUE)^2 - sum(w^2, na.rm = TRUE)) / sum(w, na.rm = TRUE)
}
col.w.v <- function(mat, w = NULL, na.rm = TRUE) {
if (is_null(w)) {
w <- rep(1, nrow(mat))
}
return(colSums(t((t(mat) - col.w.m(mat, w, na.rm = na.rm))^2) * w, na.rm = na.rm) / w.cov.scale(w))
}
`%nin%` <- function(x, table) is.na(match(x, table, nomatch = NA_integer_))
is.formula <- function(f, sides = NULL) {
res <- is.name(f[[1]]) && deparse(f[[1]]) %in% c( '~', '!') &&
length(f) >= 2
if (is_not_null(sides) && is.numeric(sides) && sides %in% c(1,2)) {
res <- res && length(f) == sides + 1
}
return(res)
}
word.list <- function(word.list = NULL, and.or = c("and", "or"), is.are = FALSE, quotes = FALSE) {
#When given a vector of strings, creates a string of the form "a and b"
#or "a, b, and c"
#If is.are, adds "is" or "are" appropriately
L <- length(word.list)
if (quotes) word.list <- vapply(word.list, function(x) paste0("\"", x, "\""), character(1L))
if (L == 0) {
out <- ""
attr(out, "plural") = FALSE
}
else {
word.list <- word.list[!word.list %in% c(NA, "")]
L <- length(word.list)
if (L == 0) {
out <- ""
attr(out, "plural") = FALSE
}
else if (L == 1) {
out <- word.list
if (is.are) out <- paste(out, "is")
attr(out, "plural") = FALSE
}
else {
and.or <- match.arg(and.or)
if (L == 2) {
out <- paste(word.list, collapse = paste0(" ", and.or," "))
}
else {
out <- paste(paste(word.list[seq_len(L-1)], collapse = ", "),
word.list[L], sep = paste0(", ", and.or," "))
}
if (is.are) out <- paste(out, "are")
attr(out, "plural") = TRUE
}
}
return(out)
}
round_df_char <- function(df, digits, pad = "0", na_vals = "") {
nas <- is.na(df)
if (!is.data.frame(df)) df <- as.data.frame.matrix(df, stringsAsFactors = FALSE)
rn <- rownames(df)
cn <- colnames(df)
df <- as.data.frame(lapply(df, function(col) {
if (suppressWarnings(all(!is.na(as.numeric(as.character(col)))))) {
as.numeric(as.character(col))
} else {
col
}
}), stringsAsFactors = FALSE)
nums <- vapply(df, is.numeric, FUN.VALUE = logical(1))
o.negs <- sapply(1:ncol(df), function(x) if (nums[x]) df[[x]] < 0 else rep(FALSE, length(df[[x]])))
df[nums] <- round(df[nums], digits = digits)
df[nas] <- ""
df <- as.data.frame(lapply(df, format, scientific = FALSE, justify = "none"), stringsAsFactors = FALSE)
for (i in which(nums)) {
if (any(grepl(".", df[[i]], fixed = TRUE))) {
s <- strsplit(df[[i]], ".", fixed = TRUE)
lengths <- lengths(s)
digits.r.of.. <- vapply(seq_along(s), function(x) {
if (lengths[x] > 1) nchar(s[[x]][lengths[x]])
else 0 }, numeric(1L))
df[[i]] <- sapply(seq_along(df[[i]]), function(x) {
if (df[[i]][x] == "") ""
else if (lengths[x] <= 1) {
paste0(c(df[[i]][x], rep(".", pad == 0), rep(pad, max(digits.r.of..) - digits.r.of..[x] + as.numeric(pad != 0))),
collapse = "")
}
else paste0(c(df[[i]][x], rep(pad, max(digits.r.of..) - digits.r.of..[x])),
collapse = "")
})
}
}
df[o.negs & df == 0] <- paste0("-", df[o.negs & df == 0])
# Insert NA placeholders
df[nas] <- na_vals
if (length(rn) > 0) rownames(df) <- rn
if (length(cn) > 0) names(df) <- cn
return(df)
}
text.box.plot <- function(range.list, width = 12) {
full.range <- range(unlist(range.list))
ratio = diff(full.range)/(width+1)
rescaled.range.list <- lapply(range.list, function(x) round(x/ratio))
rescaled.full.range <- round(full.range/ratio)
d <- as.data.frame(matrix(NA_character_, ncol = 3, nrow = length(range.list),
dimnames = list(names(range.list), c("Min", paste(rep(" ", width + 1), collapse = ""), "Max"))),
stringsAsFactors = FALSE)
d[,"Min"] <- vapply(range.list, function(x) x[1], numeric(1L))
d[,"Max"] <- vapply(range.list, function(x) x[2], numeric(1L))
for (i in seq_len(nrow(d))) {
spaces1 <- rescaled.range.list[[i]][1] - rescaled.full.range[1]
#|
dashes <- max(0, diff(rescaled.range.list[[i]]) - 2)
#|
spaces2 <- max(0, diff(rescaled.full.range) - (spaces1 + 1 + dashes + 1))
d[i, 2] <- paste0(paste(rep(" ", spaces1), collapse = ""), "|", paste(rep("-", dashes), collapse = ""), "|", paste(rep(" ", spaces2), collapse = ""))
}
return(d)
}
ESS <- function(w) {
(sum(w)^2)/sum(w^2)
}
mean.abs.dev <- function(x) {
mean(abs(x - mean(x)))
}
coef.of.var <- function(x, pop = TRUE) {
if (pop) sqrt(mean((x-mean(x))^2))/mean(x)
else sd(x)/mean(x)
}
#To pass CRAN checks:
utils::globalVariables(c("covs", "dual", "treat", "constraint"))
|
5220ed70fd653f74fb58f571656604ede3ae5a53 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/BayesVarSel/examples/plot.Bvs.Rd.R | ad36f3e99630c44395d9b45690ab5e6cfed825ac | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 822 | r | plot.Bvs.Rd.R | library(BayesVarSel)
### Name: plot.Bvs
### Title: A function for plotting summaries of an object of class 'Bvs'
### Aliases: plot.Bvs
### ** Examples
#Analysis of Crime Data
#load data
data(UScrime)
#Default arguments are Robust prior for the regression parameters
#and constant prior over the model space
#Here we keep the 1000 most probable models a posteriori:
crime.Bvs<- Bvs(formula= y ~ ., data=UScrime, n.keep=1000)
#A look at the results:
crime.Bvs
summary(crime.Bvs)
#A plot with the posterior probabilities of the dimension of the
#true model:
plot(crime.Bvs, option="dimension")
#An image plot of the joint inclusion probabilities:
plot(crime.Bvs, option="joint")
#Two image plots of the conditional inclusion probabilities:
plot(crime.Bvs, option="conditional")
plot(crime.Bvs, option="not")
|
1f984d2e74b948f069703ebfa2d352c28cdd2333 | b39174e13af676055c2cdf3ce8429beeb4c4498d | /MOFAtools/man/loadModel.Rd | 8a3adbb971b20ba990a1e2288ddd3e7015d89191 | [] | no_license | vd4mmind/MOFA | fec5fe2157211d46e02df37c7407648c44dd9901 | 5b2a4e535aa400856d758142e27406508f915cb2 | refs/heads/master | 2020-03-21T03:55:06.513807 | 2018-06-18T15:37:26 | 2018-06-18T15:37:26 | 138,080,898 | 1 | 0 | null | 2018-06-20T20:10:41 | 2018-06-20T20:10:40 | null | UTF-8 | R | false | true | 826 | rd | loadModel.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loadModel.R
\name{loadModel}
\alias{loadModel}
\title{loading a trained MOFA model}
\usage{
loadModel(file, object = NULL, sortFactors = T, r2_threshold = NULL)
}
\arguments{
\item{file}{an hdf5 file saved by the MOFA python framework.}
\item{object}{either NULL (default) or an an existing untrained MOFA object. If NULL, the \code{\link{MOFAmodel}} object is created from the scratch.}
\item{sortFactors}{boolean indicating whether factors should be sorted by variance explained (default is TRUE)}
}
\value{
a \code{\link{MOFAmodel}} model.
}
\description{
Method to load a trained MOFA model. \cr
The training of MOFA is done using a Python framework, and the model output is saved as an .hdf5 file, which has to be loaded in the R package.
}
|
dd5be82a2aa5498f5e0210baf1f7537a7b73dbec | 4b6e4a6a79c8a0e673d522164589d48d128859e4 | /SourceData.R | b81914ae40452e8d00b012bd71b178c7b172bb05 | [] | no_license | gankahub/ExploratoryDataAnalysisAssignment2 | d365cbae7d71ec45b4378c150f44e9683641e41c | 8b9eac7065ef1bcf81168b0c5934cbaecdb9fb2d | refs/heads/master | 2016-09-05T14:27:57.001828 | 2015-06-21T22:00:36 | 2015-06-21T22:00:36 | 37,791,201 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 464 | r | SourceData.R | #creating data directory
if(!file.exists("data")){
dir.create("data")
}
##downloading file
fileurl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(fileurl,destfile = "./exdata.zip",method="curl")
##unzipping file
unzip("./exdata.zip",exdir = "./data")
#Importing SCC PM25 file
NEI <- readRDS("./data/summarySCC_PM25.rds")
#Importing Source_Classification_Code.rds
SCC <- readRDS("./data/Source_Classification_Code.rds")
|
853a0af6f6093ce144921f87c75e040fe25a34b4 | 6e2decc6ba4c804cd5e93416bbc4e43fdf00fef1 | /R_code/semiparam_transform_model_rp_noadj.R | 11269dc41934260d5ffe2f24f6d3fa671a26e99a | [] | no_license | didiwu345/WV-IC | 42688a1dd66944d15f654de4488a77bcf01c675a | 8fd2e8c87c275176015d2b6040588684a5dcb4f6 | refs/heads/main | 2023-04-01T01:16:11.867484 | 2021-04-12T15:31:58 | 2021-04-12T15:31:58 | 352,167,354 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,964 | r | semiparam_transform_model_rp_noadj.R |
suppressMessages(library(MASS))
suppressMessages(library(statmod))
suppressMessages(library(nlme))
#suppressMessages(library(survival))
#suppressMessages(library(Rcpp))
#suppressMessages(library(RcppArmadillo))
Gfunc = function(x, r){
if(r==0){ x }
else if(r>0){ log(1+r*x)/r }
}
###################### Function for semi-param transformation model fit ######################
DentalEM_rp_noadj = function(bound_time, order_bound_times, n.l, r_alpha, maxiter){
#-----------------------------------------------------------#
# bound_time: left, right bound times
# order_bound_times: ordered baseline hazard jump time pts
# n.l: number of Gauss-Laguerre quadrature points
# r_alpha: semi-parametric transformation parameter
# maxiter: maximum numer of iter
#-----------------------------------------------------------#
n = nrow(bound_time)
# Censor rate
left.censor.rate = sum(bound_time[,1]==0)/n
right.censor.rate = sum(bound_time[,2]==Inf)/n
#---------------------- Initial Values -----------------------#
L = length(order_bound_times)
lambda.ini = rep(1/L, L)
lambda_old = rep(0, length(order_bound_times))
lambda_new = lambda.ini
#-------------------- Gaussian Quadrature --------------------#
#Gauss-Laguerre: 1-D --> Alpha
GLQ = gauss.quad(n.l, kind="laguerre")
node_l = as.matrix(GLQ$nodes)
weight_l = as.matrix(GLQ$weights)
#----------------------- Density Alpha ----------------------#
#Density for alpha; not including exp{-x} part
density_Alpha = function(x, r){ (1/r)^(1/r) * x^(1/r-1) / factorial(1/r-1) }
#-------------------- Apply EM Algorithm --------------------#
### Initial values
iter = 0
### collect all loglik values
loglik_all = NULL
message(" Now: Obtaining the unpenalized nonparametric MLE")
### Start iteration
while (iter < maxiter){
iter = iter + 1
lambda_old = lambda_new
# plot(order_bound_times, cumsum(lambda_new))
# lines(order_bound_times, order_bound_times/2)
Rij.star = apply(bound_time,1,function(x){ max(x[x<Inf]) })
alpha.all = r_alpha*node_l[,1]
density.alpha.all = density_Alpha(alpha.all,r_alpha)
exp.term = rep(1,n) # length n array
### index of jump time points between LR bounds
jumptidx_in_LR = (outer(order_bound_times,bound_time[,1],'>') & outer(order_bound_times,bound_time[,2],'<='))+0 # len(order_bound_time) by n matrix
sum.lambda.exp.left = rowSums(exp.term * t(outer(order_bound_times,bound_time[,1],'<=')*lambda_old)) #length n array
sum.lambda.exp.right = rowSums(exp.term * t(outer(order_bound_times,bound_time[,2],'<=')*lambda_old)) #length n array
sum.lambda.alpha.exp.between = matrix(colSums(lambda_old*jumptidx_in_LR)*exp.term, ncol=1) %*% matrix(alpha.all,nrow=1) #n by n.l matrix
density.AB.rfs.i = matrix(nrow=n, ncol=n.l) # n x n.l matrix
for(i in 1:n){
if(bound_time[i,2] != Inf){ density.AB.rfs.i[i,] = exp(-sum.lambda.exp.left[i]*alpha.all) - exp(-sum.lambda.exp.right[i]*alpha.all) }
else if(bound_time[i,2] == Inf){ density.AB.rfs.i[i,] = exp(-sum.lambda.exp.left[i]*alpha.all) }
}
density.all = t(density.alpha.all * t(density.AB.rfs.i))
int.all.i = rep(NA, n) # length n array
for(i in 1:n){
if(bound_time[i,2] != Inf){ int.all.i[i] = exp(-Gfunc(sum.lambda.exp.left[i], r_alpha)) - exp(-Gfunc(sum.lambda.exp.right[i], r_alpha)) }
else if(bound_time[i,2] == Inf){ int.all.i[i] = exp(-Gfunc(sum.lambda.exp.left[i], r_alpha)) }
}
### logLikelihood
loglik_i = log(int.all.i)
loglik = sum(loglik_i)
loglik_all = c(loglik_all, loglik)
### E[alpha * exp{}]
E.alpha.exp = c(((matrix(exp.term,ncol=1) %*% matrix(alpha.all,nrow=1)) * density.all) %*% weight_l) / int.all.i # length n array
### E[C_ijt]
E.Cijt = matrix(nrow=n, ncol=length(order_bound_times)) # n by len(order_bound_time) matrix
for(i in 1:n){
if(bound_time[i,2]==Inf){ E.Cijt[i,] = rep(0, length(order_bound_times)) }
if(bound_time[i,2]!=Inf){
E.Cijt[i,] = c(t(weight_l*density.all[i,]) %*% ((1/(1-exp(-sum.lambda.alpha.exp.between[i,]))) * (matrix(alpha.all,ncol=1) %*% matrix(exp.term[i]*lambda_old*jumptidx_in_LR[,i],nrow=1)))) / int.all.i[i]
}
}
####################################
### Lambda Update
####################################
### Lambda (jump size) Update
lambda.new.num = sapply(1:length(order_bound_times), function(x){ sum(E.Cijt[,x] * (order_bound_times[x] <= Rij.star)) })
lambda.new.denom = sapply(1:length(order_bound_times), function(x){ sum(E.alpha.exp * (order_bound_times[x] <= Rij.star)) })
lambda_new = lambda.new.num / lambda.new.denom
################################################
### Stopping Criteria
################################################
error.lambda = max(abs(c(lambda_new - lambda_old)))
if(max(error.lambda)<=0.0001){ break }
# print(sprintf("iter# %d", iter))
# print(sprintf("lambda change: %0.6f", error.lambda))
# print(sprintf("loglik: %0.6f", loglik))
} # end of while loop #
###################### Output results ######################
result_all = list()
result_all[['iter']] = iter
result_all[['lambda_est']] = lambda_new
result_all[['order_bt']] = order_bound_times
result_all[['loglik_vec']] = loglik_i
result_all[['lcr']] = left.censor.rate
result_all[['rcr']] = right.censor.rate
return(result_all)
} #end of DentalEM_rp_noadj function#
###################### Main function ######################
semipar_trans_fit_rp_noadj = function(bound_time, n.l, r_alpha, maxiter){
#-----------------------------------------------------------
# bound_time: left and right bound time
# n.l: Gauss Laguerre quadrature point number
# r_alpha: semi-parametrix transformation parameter
# maxiter: maximum EM iteration allowed
#-----------------------------------------------------------
order_bound_times = sort(unique(c(bound_time[c(bound_time)!=0 & c(bound_time)!=Inf])))
###################### Fit semi-param transformation model ######################
fit_bl = DentalEM_rp_noadj(bound_time, order_bound_times, n.l, r_alpha, maxiter)
loglik_bl = fit_bl$loglik_vec #logliklihood vector (each represent one subject)
lambda_est = fit_bl$lambda_est #baseline hazard estimate
order_bt = fit_bl$order_bt #ordered baseline hazard jump time
#lcr = fit_bl$lcr #left censor rate
#rcr = fit_bl$rcr #right censor rate
#iternum = fit_bl$iter #iteration before algorithm stops
###################### Output results ######################
output = list()
output[['order_bt']] = c(order_bt)
output[['lambda_est']] = c(lambda_est)
output[['loglik']] = sum(loglik_bl)
return(output)
} #end of output function#
|
03f2437a7273d11afcaacab609cf36a5a0fad905 | 35b6b7ce84d38d33cc326cf0fcd791bba63d47be | /man/publish_mimebundle.Rd | 499c684c242b00961e466a06586f01688557e15d | [] | no_license | ramnathv/IRdisplay | d71bef2e45b3a3978573e6df7b659b1ac375292f | 6565f7460ea6da27e84460ed92abb32c8796f60f | refs/heads/master | 2020-03-31T05:14:15.181841 | 2018-09-24T08:22:34 | 2018-09-24T08:22:34 | 151,938,420 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 985 | rd | publish_mimebundle.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/display.r
\name{publish_mimebundle}
\alias{publish_mimebundle}
\title{Display data by mimetype, with optional alternative representations.}
\usage{
publish_mimebundle(data, metadata = NULL)
}
\arguments{
\item{data}{A named list mapping mimetypes to content (\code{\link[base]{character}} or \code{\link[base]{raw} vectors})}
\item{metadata}{A named list mapping mimetypes to named lists of metadata, e.g. \code{list('image/png' = list(width = 5))}}
}
\description{
Calls the function stored as option value of \code{jupyter.base_display_func}. (see: \link{IRdisplay-options})
}
\examples{
\dontrun{## (Run inside of an IRkernel)
publish_mimebundle(list('text/html' = '<h1>Hi!</h1>'))
publish_mimebundle(
list('image/svg+xml' = '<svg xmlns="http://www.w3.org/2000/svg"><circle r="100"/></svg>'),
list('image/svg+xml' = list(width = 100, height = 100)))}
}
\seealso{
\code{\link{prepare_mimebundle}}
}
|
9e70ad1cc44c021bdaa7e270d058a8160cec3463 | ee8dd63922e47711a5911d282472e6784c5d67c0 | /R/find-centers-kpp.R | 2c5601cc35163b132ab1561b52fde68b441ee403 | [
"MIT"
] | permissive | atusy/qntmap | 07ff96149b4d8fb5ee2386b0892d524d1f55aa34 | 5b6a349ac12b600daad7e806e22982e514150b86 | refs/heads/master | 2021-06-04T06:01:19.809161 | 2021-04-06T13:54:15 | 2021-04-06T13:54:15 | 97,662,265 | 2 | 0 | MIT | 2021-04-06T13:54:15 | 2017-07-19T02:07:09 | R | UTF-8 | R | false | false | 1,316 | r | find-centers-kpp.R | #' Generate initial centroids for clustering randomly
#'
#' @param x
#' An object which can be coerced to [`matrix`],
#' typically [`matrix`] itself or [`data.frame`].
#' @param k A number of clusters.
#' @param given Given centers. Not yet implemented.
#'
#' @importFrom matrixStats colSums2
#'
#'
find_centers_kpp <- function(x, k, given = NULL) {
# check parameters
if (!is.null(given) && nrow(x) < k)
stop("Number of given centroids must be smaller than k")
if (k < 2L) stop("k must be a numeric >= 2")
# transform parameters
x <- as.matrix(x)
x_trans <- t(x)
# calculation
n <- nrow(x) # number of data points
n_seq <- seq(n)
centers <- numeric(k) # IDs of centers
distances <- matrix(numeric(n * (k - 1L)), ncol = k - 1L)
# distances[i, j]: The distance between x[i,] and x[centers[j],]
pr <- rep(1L, n) # probability for sampling centers
for (i in seq(k - 1L)) {
centers[i] <- sample.int(n, 1L, prob = pr) # Pick up the ith center
distances[, i] <- colSums2((x_trans - x[centers[i], ])^2)
# Compute (the square of) distances to the center
pr <- distances[cbind(n_seq, max.col(-distances[, 1:i, drop = FALSE]))]
# Compute probaiblity for the next sampling
}
centers[k] <- sample.int(n, 1L, prob = pr)
data.frame(phase = centers, x[centers, ])
}
|
a45005e4031d54bfd41e68429ae00a4c8324a94f | 67b0547199b3bbc37434d3db37275e5ce013b960 | /purrr.r | 6addd6aaa3cf06c5299cd389126b85b83664d059 | [] | no_license | thefactmachine/purrr_experiments | 20ce4f729a7f6066df7a86c742d44de8367f3307 | 3184eb40afc9a462cb415e889136ad67e05b5bc1 | refs/heads/master | 2021-01-18T03:51:31.785712 | 2017-03-24T04:42:41 | 2017-03-24T04:42:41 | 85,783,107 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,436 | r | purrr.r | library(purrr)
# https://blog.rstudio.org/2016/01/06/purrr-0-2-0/
df <- data.frame(
a = 1L,
b = 1.5,
y = Sys.time(),
z = ordered(1)
)
df[1:4] %>% sapply(class) %>% str()
# clear the decks
rm(list=ls())
options(stringsAsFactors=FALSE)
library(dplyr)
library(lubridate)
library(tidyr)
library(purrr)
# turn off scientific notation.
options(scipen = 999)
# https://blog.rstudio.org/2016/01/06/purrr-0-2-0/
df <- data.frame(
a = 1L,
b = 1.5,
y = Sys.time(),
z = ordered(1)
)
# these return different data types.
# sapply() tries to guess the appropriate data type
# vapply() takes an extra parameter which lets the programmer specify the data type
df[1:4] %>% sapply(class)
df[1:2] %>% sapply(class)
df[3:4] %>% sapply(class)
# purrr has multiple functions, one for each common type of output:
# map_lgl(), map_int(), map_dbl(), map_chr(), and map_df()
# these produce the output .... OR ...throw and error....
x <- list(1, 3, 5)
y <- list(2, 4, 6)
# what is c here...?????
map2(x, y, c)
# using a formula -- result is a list
map2(x, y, ~.x + 2 + .y)
map2_dbl(x, y, `+`)
a <- list(1, 10, 100)
b <- list(1, 2, 3)
# returns a vector -- the formula needs to be .x & .y
# these are passed by position.....
purrr::map2_dbl(a, b, ~ .x + .y)
# returns a list
map2(x, y, ~ .x + .y)
# flatten lists...
lst_ex <- list(1L, 2:3, 4L)
# this returns a list ... previously we had a list of 3 (with element 3 containing two elements)
# now we have a list of 4 with each element containing a single element.
# return type is a list
purrr::flatten(lst_ex)
purrr::flatten_int(lst_ex)
# list basics....
a <- list(a = 1:3, b = "a string", c = pi, d = list(-1, -5))
# extracts a vector - gets the primative data type
a[["a"]]
# extracts a list - gets the containing data type
a["a"] %>% class()
xx <- a["a"]
# extracts a vector
xx$a
# extracts two lists
a[c("a", "b")]
# same as above....
a[1:2]
# from here: http://r4ds.had.co.nz/lists.html
# from here: http://r4ds.had.co.nz/lists.html
df_test <- data.frame(a = rnorm(5), b = rnorm(5), c = c("mark", "fred", "bill", "john", "sam"))
df_test[["a"]] %>% class()
df_test$a %>% class()
df_test[[c("a")]] %>% class()
# ==== function to recieve a function an compute an aggregate on a data.frame
fn_col_summary <- function(df, fun) {
out <- vector("numeric", length(df))
# produces a sequence of: 1,2,3 for 3 column data frame
for (i in seq_along(df)) {
out[i] <- fun(df[[i]])
} # for
return(out)
} # function
df_test_numbers <- data.frame(a = rnorm(10), b = rnorm(10), c = rnorm(10))
fn_col_summary(df_test_numbers, mean)
fn_col_summary(df_test_numbers, sum)
# MAP FUNCTIONS....
# map() returns a list.
# map_lgl() returns a logical vector.
# map_int() returns a integer vector.
# map_dbl() returns a double vector.
# map_chr() returns a character vector.
# map_df() returns a data frame.
x <- list(1, 2, 3)
purrr::map_int(x, length)
# the second argument .f, the function to apply,
# can be a formula, a character vector, or an integer vector
# Any arguments after will be passed on....
map_dbl(x, mean, trim = 0.5)
# splits the data frame up into 3 data.frames
list_car_test <- mtcars %>% split(.$cyl)
models <- mtcars %>%
split(.$cyl) %>%
map(function(df) lm(mpg ~ wt, data = df))
# the above can be written using a one-side formula.
models <- mtcars %>%
split(.$cyl) %>%
map(~lm(mpg ~ wt, data = .))
# if you want to extract the R Squared.
# we run summary on the models and extract
# the component called r.squared.
models %>%
map(summary) %>%
map_dbl(~.$r.squared)
# this just does what the above does...
mtcars %>% split(.$cyl) %>%
map(~lm(mpg ~ wt, data = .)) %>%
map(summary) %>%
map_dbl(~.$r.squared)
# we can also use a numeric vector
# to extract things by position
x <- list(list(1, 2, 3), list(4, 5, 6), list(7, 8, 9))
x %>% map_dbl(2)
# notes about the apply functions....
# lapply is basically identical to map().
# Iterates over a list and returns a list
# sapply() is a wrapper around lapply() and tries to simplify t
# the results and return a vector. vapply() is like sapply()
# but allows a parameter that defines the return type()
# map_df(x, f) is effectively the same as do.call("rbind", lapply(x, f))
lst_x <- list(c(1, 2, 3), c(4, 5, 6), c(7, 8, 9))
fn_square <- function(x) {x * x}
do.call("rbind", lapply(lst_x, sqrt))
mtcars %>%
split(.$cyl) %>%
map(~ lm(mpg ~ wt, data = .x)) %>%
map_df(~ as.data.frame(t(as.matrix(coef(.)))))
# (if you also want to preserve the variable names see
# the broom package)
library(jsonlite)
# from here: http://r4ds.had.co.nz/lists.html
lst_issues <- jsonlite::fromJSON("issues.json", simplifyVector = FALSE)
# http://r4ds.had.co.nz/lists.html
# 30 issues
length(lst_issues)
# find out the structure of each of the 30 elements.
str(lst_issues[[1]])
lst_first_element <- lst_issues[[1]]
# splits out a vector of numbers.
lst_issues %>% map_int("id")
# map returns a list.
# structure of the dataset. list_issues contains 30 elements.
# each of these elements contains 22 elements. Of these 22 elements, 1
# elemeent is a list with the name of "user". This has 17 elements.
lst_users <- lst_issues %>% map("user")
length(lst_users) == 30
# INDEXING DEEPLY INTO THE HIERARCHY....
# so here is the fun part....we pass in a character vector
# with the names of the lists as they are in the hierarchy..
lst_issues %>% map_chr(c("user", "login"))
lst_issues %>% map_int(c("user", "id"))
# UPTO HERE....
# REMOVING A LEVEL OF A HIERARCHY
# http://r4ds.had.co.nz/lists.html
lst_users %>% map_chr("login")
# gets the login from the sublist user
lst_issues %>% map_chr(c("user", "login"))
length(issues)
# =======================================
# https://github.com/hadley/purrr
# using base R "split()"
lst_split <- split(mtcars, mtcars$cyl)
names(lst_split)
lst_result <- mtcars %>% split(.$cyl) %>% map(~ lm(mpg ~ wt, data = .))
by_species <- iris %>% group_by(Species)
data(diamonds, package = "ggplot2")
# apply the as.character() function when the column is a factor
diamonds %>% map_if(is.factor, as.character) %>% str()
vct_to_scale <- c("x", "y", "z")
new_diamonds <- diamonds %>% map_at(vct_to_scale, scale)
# this is quite incredabiliy...!
cc <- mtcars %>% purrr::slice_rows("cyl") %>% by_slice(map, ~ .x / sum(.x))
# some good stuff here:
# http://www.machinegurning.com/rstats/map_df/
|
3a3765787f6c23b231cce5dbfb1d2e881cc506a1 | 812e9c9df8f276ba93c4f9c79e97fb5430f5ed60 | /RCode/power_analysis_cluster.R | 652beeb46e944d8b053696665c8e2d2098b04e72 | [
"MIT"
] | permissive | janhove/janhove.github.io | d090a22fa90776926301b9cb5b318475c2010cb5 | 44bb9fe9c8070ecda5cec049e9805da71063326f | refs/heads/master | 2023-08-21T18:52:38.584816 | 2023-08-06T09:11:17 | 2023-08-06T09:11:17 | 22,544,198 | 7 | 5 | null | 2020-05-27T09:56:52 | 2014-08-02T10:41:02 | HTML | UTF-8 | R | false | false | 15,515 | r | power_analysis_cluster.R | ################################################################################
# Power and Type-I error simulation for different approaches #
# to analysing cluster-randomised experiments #
# #
# Jan Vanhove (jan.vanhove@unifr.ch) #
# Last change: October 28, 2019 #
################################################################################
# Simulation function
compute_clustered_power <- function(n_sim = 10000, n_per_class, ICC, effect, reliability_post, reliability_pre) {
require("lme4")
require("tidyverse")
require("lmerTest")
require("cannonball")
# Allocate matrix for p-values
pvals <- matrix(nrow = n_sim, ncol = 5)
# Allocate matrix for estimates
estimates <- matrix(nrow = n_sim, ncol = 5)
# Allocate matrix for standard errors
stderrs <- matrix(nrow = n_sim, ncol = 5)
# Allocate matrix for lower bound of confidence interval
cilo <- matrix(nrow = n_sim, ncol = 5)
# Allocate matrix for upper bound of confidence interval
cihi <- matrix(nrow = n_sim, ncol = 5)
# Start looping
for (i in 1:n_sim) {
# Generate data; rerandomise the class sizes to the conditions
d <- clustered_data(n_per_class = sample(n_per_class),
ICC = ICC,
rho_prepost = NULL,
effect = effect,
reliability_post = reliability_post,
reliability_pre = reliability_pre)
# Compute residuals
d$Residual <- resid(lm(outcome ~ pretest, data = d))
# Summarise per cluster
d_per_class <- d %>%
group_by(class, condition) %>%
summarise(mean_outcome = mean(outcome),
mean_residual = mean(Residual),
mean_pretest = mean(pretest))
# First analysis: ignore covariate in cluster-mean analysis
cluster_ignore <- lm(mean_outcome ~ condition, data = d_per_class)
pvals[i, 1] <- summary(cluster_ignore)$coefficients[2, 4]
estimates[i, 1] <- coef(cluster_ignore)[[2]]
stderrs[i, 1] <- summary(cluster_ignore)$coefficients[2, 2]
cilo[i, 1] <- confint(cluster_ignore)[2, 1]
cihi[i, 1] <- confint(cluster_ignore)[2, 2]
# Second analysis: ignore covariate in multilevel model (Satterthwaite)
multi_ignore <- lmer(outcome ~ condition + (1|class), data = d)
pvals[i, 2] <- summary(multi_ignore)$coefficients[2, 5]
estimates[i, 2] <- fixef(multi_ignore)[[2]]
stderrs[i, 2] <- summary(multi_ignore)$coefficients[2, 2]
cilo[i, 2] <- estimates[i, 2] + qt(0.025, df = summary(multi_ignore)$coefficients[2, 3]) * stderrs[i, 2]
cihi[i, 2] <- estimates[i, 2] + qt(0.975, df = summary(multi_ignore)$coefficients[2, 3]) * stderrs[i, 2]
# Third analysis: analyse mean residual
cluster_residual <- lm(mean_residual ~ condition, data = d_per_class)
pvals[i, 3] <- summary(cluster_residual)$coefficients[2, 4]
estimates[i, 3] <- coef(cluster_residual)[[2]]
stderrs[i, 3] <- summary(cluster_residual)$coefficients[2, 2]
cilo[i, 3] <- confint(cluster_residual)[2, 1]
cihi[i, 3] <- confint(cluster_residual)[2, 2]
# Fourth analysis: mean outcome with mean covariate
cluster_mean <- lm(mean_outcome ~ condition + mean_pretest, data = d_per_class)
pvals[i, 4] <- summary(cluster_mean)$coefficients[2, 4]
estimates[i, 4] <- coef(cluster_mean)[[2]]
stderrs[i, 4] <- summary(cluster_mean)$coefficients[2, 2]
cilo[i, 4] <- confint(cluster_mean)[2, 1]
cihi[i, 4] <- confint(cluster_mean)[2, 2]
# Fifth analysis: include covariate in multilevel model (Satterthwaite)
multi_covariate <- lmer(outcome ~ condition + pretest + (1|class), data = d)
pvals[i, 5] <- summary(multi_covariate)$coefficients[2, 5]
estimates[i, 5] <- fixef(multi_covariate)[[2]]
stderrs[i, 5] <- summary(multi_covariate)$coefficients[2, 2]
cilo[i, 5] <- estimates[i, 2] + qt(0.025, df = summary(multi_covariate)$coefficients[2, 3]) * stderrs[i, 2]
cihi[i, 5] <- estimates[i, 2] + qt(0.975, df = summary(multi_covariate)$coefficients[2, 3]) * stderrs[i, 2]
}
# Output
pvals <- as.data.frame(pvals)
estimates <- as.data.frame(estimates)
stderrs <- as.data.frame(stderrs)
cilo <- as.data.frame(cilo)
cihi <- as.data.frame(cihi)
colnames(pvals) <- paste0("pvals_", c("cluster_ignore", "multi_ignore", "cluster_residual", "cluster_mean", "multi_covariate"))
colnames(estimates) <- paste0("estimates_", c("cluster_ignore", "multi_ignore", "cluster_residual", "cluster_mean", "multi_covariate"))
colnames(stderrs) <- paste0("stderrs_", c("cluster_ignore", "multi_ignore", "cluster_residual", "cluster_mean", "multi_covariate"))
colnames(cilo) <- paste0("cilo_", c("cluster_ignore", "multi_ignore", "cluster_residual", "cluster_mean", "multi_covariate"))
colnames(cihi) <- paste0("cihi_", c("cluster_ignore", "multi_ignore", "cluster_residual", "cluster_mean", "multi_covariate"))
output_simulation <- cbind(pvals, estimates, stderrs, cilo, cihi)
output_simulation
}
#-------------------------------------------------------------------------------
# Now run the simulations corresponding to scenarios 1 through 4.
# HIGHER ICC
# Roughly equal sample sizes, strong covariate, no effect
set.seed(0815)
n_per_class_balanced <- sample(15:25, size = 14, replace = TRUE) # little variability in cluster sizes
ICC <- 0.17
effect <- 0
reliability_post <- 1
expected_correlation <- 0.7
reliability_pre <- expected_correlation^2 / reliability_post
sim1 <- compute_clustered_power(n_per_class = n_per_class_balanced, ICC = ICC, effect = effect,
reliability_post = reliability_post, reliability_pre = reliability_pre)
# Roughly equal sample sizes, weak covariate, no effect
expected_correlation <- 0.3
reliability_pre <- expected_correlation^2 / reliability_post
sim2 <- compute_clustered_power(n_per_class = n_per_class_balanced, ICC = ICC, effect = effect,
reliability_post = reliability_post, reliability_pre = reliability_pre)
# Roughly equal sample sizes, strong covariate, some effect
effect <- 0.4
expected_correlation <- 0.7
reliability_pre <- expected_correlation^2 / reliability_post
sim3 <- compute_clustered_power(n_per_class = n_per_class_balanced, ICC = ICC, effect = effect,
reliability_post = reliability_post, reliability_pre = reliability_pre)
# Roughly equal sample sizes, weak covariate, some effect
expected_correlation <- 0.3
reliability_pre <- expected_correlation^2 / reliability_post
sim4 <- compute_clustered_power(n_per_class = n_per_class_balanced, ICC = ICC, effect = effect,
reliability_post = reliability_post, reliability_pre = reliability_pre)
# Wildly different sample sizes, strong covariate, no effect
n_per_class_different <- 2^seq(1:10)
effect <- 0
expected_correlation <- 0.7
reliability_pre <- expected_correlation^2 / reliability_post
sim5 <- compute_clustered_power(n_per_class = n_per_class_different, ICC = ICC, effect = effect,
reliability_post = reliability_post, reliability_pre = reliability_pre)
# Wildly different sample sizes, weak covariate, no effect
expected_correlation <- 0.3
reliability_pre <- expected_correlation^2 / reliability_post
sim6 <- compute_clustered_power(n_per_class = n_per_class_different, ICC = ICC, effect = effect,
reliability_post = reliability_post, reliability_pre = reliability_pre)
# Wildly different sample sizes, strong covariate, some effect
effect <- 0.4
expected_correlation <- 0.7
reliability_pre <- expected_correlation^2 / reliability_post
sim7 <- compute_clustered_power(n_per_class = n_per_class_different, ICC = ICC, effect = effect,
reliability_post = reliability_post, reliability_pre = reliability_pre)
# Wildly different sample sizes, weak covariate, some effect
expected_correlation <- 0.3
reliability_pre <- expected_correlation^2 / reliability_post
sim8 <- compute_clustered_power(n_per_class = n_per_class_different, ICC = ICC, effect = effect,
reliability_post = reliability_post, reliability_pre = reliability_pre)
# LOWER ICC
# Roughly equal sample sizes, strong covariate, no effect
ICC <- 0.03
effect <- 0
expected_correlation <- 0.7
reliability_pre <- expected_correlation^2 / reliability_post
sim9 <- compute_clustered_power(n_per_class = n_per_class_balanced, ICC = ICC, effect = effect,
reliability_post = reliability_post, reliability_pre = reliability_pre)
# Roughly equal sample sizes, weak covariate, no effect
expected_correlation <- 0.3
reliability_pre <- expected_correlation^2 / reliability_post
sim10 <- compute_clustered_power(n_per_class = n_per_class_balanced, ICC = ICC, effect = effect,
reliability_post = reliability_post, reliability_pre = reliability_pre)
# Roughly equal sample sizes, strong covariate, some effect
effect <- 0.4
expected_correlation <- 0.7
reliability_pre <- expected_correlation^2 / reliability_post
sim11 <- compute_clustered_power(n_per_class = n_per_class_balanced, ICC = ICC, effect = effect,
reliability_post = reliability_post, reliability_pre = reliability_pre)
# Roughly equal sample sizes, weak covariate, some effect
expected_correlation <- 0.3
reliability_pre <- expected_correlation^2 / reliability_post
sim12 <- compute_clustered_power(n_per_class = n_per_class_balanced, ICC = ICC, effect = effect,
reliability_post = reliability_post, reliability_pre = reliability_pre)
# Wildly different sample sizes, strong covariate, no effect
effect <- 0
expected_correlation <- 0.7
reliability_pre <- expected_correlation^2 / reliability_post
sim13 <- compute_clustered_power(n_per_class = n_per_class_different, ICC = ICC, effect = effect,
reliability_post = reliability_post, reliability_pre = reliability_pre)
# Wildly different sample sizes, weak covariate, no effect
expected_correlation <- 0.3
reliability_pre <- expected_correlation^2 / reliability_post
sim14 <- compute_clustered_power(n_per_class = n_per_class_different, ICC = ICC, effect = effect,
reliability_post = reliability_post, reliability_pre = reliability_pre)
# Wildly different sample sizes, strong covariate, some effect
effect <- 0.4
expected_correlation <- 0.7
reliability_pre <- expected_correlation^2 / reliability_post
sim15 <- compute_clustered_power(n_per_class = n_per_class_different, ICC = ICC, effect = effect,
reliability_post = reliability_post, reliability_pre = reliability_pre)
# Wildly different sample sizes, weak covariate, some effect
expected_correlation <- 0.3
reliability_pre <- expected_correlation^2 / reliability_post
sim16 <- compute_clustered_power(n_per_class = n_per_class_different, ICC = ICC, effect = effect,
reliability_post = reliability_post, reliability_pre = reliability_pre)
# save(sim1, sim2, sim3, sim4, sim5, sim6, sim7, sim8,
# sim9, sim10, sim11, sim12, sim13, sim14, sim15, sim16,
# file = "power_simulations.RData")
sim1 <- as_tibble(sim1) %>%
mutate(cluster_size = "similar cluster sizes",
effect = 0,
ICC = 0.17,
rho = 0.7)
sim2 <- as_tibble(sim2) %>%
mutate(cluster_size = "similar cluster sizes",
effect = 0,
ICC = 0.17,
rho = 0.3)
sim3 <- as_tibble(sim3) %>%
mutate(cluster_size = "similar cluster sizes",
effect = 0.4,
ICC = 0.17,
rho = 0.7)
sim4 <- as_tibble(sim4) %>%
mutate(cluster_size = "similar cluster sizes",
effect = 0.4,
ICC = 0.17,
rho = 0.3)
sim5 <- as_tibble(sim5) %>%
mutate(cluster_size = "different cluster sizes",
effect = 0,
ICC = 0.17,
rho = 0.7)
sim6 <- as_tibble(sim6) %>%
mutate(cluster_size = "different cluster sizes",
effect = 0,
ICC = 0.17,
rho = 0.3)
sim7 <- as_tibble(sim7) %>%
mutate(cluster_size = "different cluster sizes",
effect = 0.4,
ICC = 0.17,
rho = 0.7)
sim8 <- as_tibble(sim8) %>%
mutate(cluster_size = "different cluster sizes",
effect = 0.4,
ICC = 0.17,
rho = 0.3)
sim9 <- as_tibble(sim9) %>%
mutate(cluster_size = "similar cluster sizes",
effect = 0,
ICC = 0.03,
rho = 0.7)
sim10 <- as_tibble(sim10) %>%
mutate(cluster_size = "similar cluster sizes",
effect = 0,
ICC = 0.03,
rho = 0.3)
sim11 <- as_tibble(sim11) %>%
mutate(cluster_size = "similar cluster sizes",
effect = 0.4,
ICC = 0.03,
rho = 0.7)
sim12 <- as_tibble(sim12) %>%
mutate(cluster_size = "similar cluster sizes",
effect = 0.4,
ICC = 0.03,
rho = 0.3)
sim13 <- as_tibble(sim13) %>%
mutate(cluster_size = "different cluster sizes",
effect = 0,
ICC = 0.03,
rho = 0.7)
sim14 <- as_tibble(sim14) %>%
mutate(cluster_size = "different cluster sizes",
effect = 0,
ICC = 0.03,
rho = 0.3)
sim15 <- as_tibble(sim15) %>%
mutate(cluster_size = "different cluster sizes",
effect = 0.4,
ICC = 0.03,
rho = 0.7)
sim16 <- as_tibble(sim16) %>%
mutate(cluster_size = "different cluster sizes",
effect = 0.4,
ICC = 0.03,
rho = 0.3)
simulations <- bind_rows(sim1, sim2, sim3, sim4, sim5, sim6, sim7, sim8,
sim9, sim10, sim11, sim12, sim13, sim14, sim15, sim16)
write_csv(simulations, "cluster_power_simulations.csv")
#-------------------------------------------------------------------------------
# Additional simulations: Fewer clusters.
set.seed(0815)
n_small <- sample(15:25, size = 6, replace = TRUE) # little variability in cluster sizes
ICC <- 0.17
effect <- 0
reliability_post <- 1
expected_correlation <- 0.7
reliability_pre <- expected_correlation^2 / reliability_post
sim_small <- compute_clustered_power(n_per_class = n_per_class_balanced, ICC = ICC, effect = effect,
reliability_post = reliability_post, reliability_pre = reliability_pre)
sim_small <- as_tibble(sim_small) %>%
mutate(cluster_size = "similar cluster sizes, but only six clusters",
effect = 0,
ICC = 0.17,
rho = 0.7)
n_small <- sample(15:25, size = 6, replace = TRUE) # little variability in cluster sizes
ICC <- 0.17
effect <- 0.4
reliability_post <- 1
expected_correlation <- 0.7
reliability_pre <- expected_correlation^2 / reliability_post
sim_small_eff <- compute_clustered_power(n_per_class = n_small, ICC = ICC, effect = effect,
reliability_post = reliability_post, reliability_pre = reliability_pre)
sim_small_eff <- as_tibble(sim_small_eff) %>%
mutate(cluster_size = "similar cluster sizes, but only six clusters",
effect = 0.4,
ICC = 0.17,
rho = 0.7)
sim_small <- sim_small %>%
bind_rows(sim_small_eff)
write_csv(sim_small, "cluster_power_simulations_few_clusters.csv") |
af0bdd0ae3d2e51add7c6111561d8c682f982846 | 7cb02aa5f54e66d5dc1b309e810f3ca5cdcee895 | /PhenoForecast/man/createInits_forecast.Rd | 3d1b72bf992d8ef2118f559e1e81b124e7a1efa7 | [
"MIT"
] | permissive | victorfeagins/NEFI_pheno | 940c5a5f5cc8f06c456867cec3665458b9db1045 | ad22dc2bf24bfb1ed8275e196bf4812d15a3cbbb | refs/heads/master | 2023-05-14T06:29:38.563240 | 2021-03-04T23:13:02 | 2021-03-04T23:13:02 | 375,135,836 | 0 | 0 | MIT | 2021-06-08T20:25:32 | 2021-06-08T20:25:31 | null | UTF-8 | R | false | true | 366 | rd | createInits_forecast.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createInits_forecast.R
\name{createInits_forecast}
\alias{createInits_forecast}
\title{Create inits based on previous out.burn}
\usage{
createInits_forecast(out.burn, variableNames)
}
\arguments{
\item{variableNames}{}
}
\value{
}
\description{
Create inits based on previous out.burn
}
|
ef50327d9b5ad91d53a8adf83f3c90b2df0b6b8d | 6b1cb96496ec20452510216aaed1c5f3d0468e43 | /picay_creek_map.R | e9fdbf6f382851959098250d99d4a7aa88bd59b0 | [] | no_license | sebankston/san_ysidro_map | bec49d538c0c7470fb49c22f3647415522f77580 | 1c28cd92a4d14dc067124865a57a9199d159f1d4 | refs/heads/master | 2020-04-07T03:21:12.249054 | 2019-01-05T00:20:25 | 2019-01-05T00:20:25 | 158,012,848 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,415 | r | picay_creek_map.R | library(tidyverse)
library(sf)
library(leaflet)
library(leaflet.extras)
library(googlesheets)
library(janitor)
library(htmlwidgets)
all_sheets <- as_tibble(gs_ls())
all_sheets$sheet_title %>% str_subset("Picay")
pcy_all <- gs_title("Picay Inventory")
gs_ws_ls(pcy_all)
pcy_data <- gs_read(ss = pcy_all, ws = "Sheet1") %>% clean_names()
pcy_nr <- pcy_data %>% filter(barrier_type == "NR")
pcy_r <- pcy_data %>% filter(barrier_type == "R")
pcy_r2 <- pcy_data %>% filter(barrier_type == "R" & x2nd_pass == "Y")
leaflet() %>%
addProviderTiles("Esri.WorldStreetMap", group = "Street Map") %>%
addProviderTiles("Esri.WorldImagery",group = "World Imagery") %>%
addCircleMarkers(data = pcy_r2, label = pcy_r2$barrier_id, popup = ~paste0(pcy_r2$barrier_id, "<br/>", pcy_r2$pad_id), color = "red", group = "Detailed Required", radius = 5, fillOpacity = .5) %>%
# addCircleMarkers(data = pcy_nr, label = pcy_nr$barrier_id, popup = ~paste0(pcy_nr$barrier_id, "<br/>", rnc_nr$pad_id), color = "dodgerblue", group = "Non-Road Barrier", radius = 3, fillOpacity = 1) %>%
addCircleMarkers(data = pcy_r, label = pcy_r$barrier_id, popup = ~paste0(pcy_r$barrier_id, "<br/>", pcy_r$pad_id), color = "green", group = "Road Barrier", radius = 3, fillOpacity = 1) %>%
addLayersControl(baseGroups = c("Street Map", "World Imagery"), overlayGroups = c("Non-Road Barrier", "Road Barrier", "Detailed Required"))
|
8e6b879b99ff6eda0b926ff9ae34b0e7c229676f | 06584f5c1d0092830e67150ccb0d67b4a5851e24 | /dev/dev.R | e8fa1d3c145c35edf9bb1a512b8f7d31ad4abd93 | [
"MIT"
] | permissive | davidruvolo51/random-api | 4d193ca0881e36ae3a2c5f78573bfca2003c687f | d84d4c7c38f36cdd0bea4b95cb7cf459bd7a4bb2 | refs/heads/main | 2023-08-02T05:48:27.138687 | 2021-10-02T11:49:37 | 2021-10-02T11:49:37 | 401,309,113 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 495 | r | dev.R | #'////////////////////////////////////////////////////////////////////////////
#' FILE: dev.R
#' AUTHOR: David Ruvolo
#' CREATED: 2021-09-01
#' MODIFIED: 2021-09-01
#' PURPOSE: workspace management
#' STATUS: in.progress
#' PACKAGES: NA
#' COMMENTS: NA
#'////////////////////////////////////////////////////////////////////////////
renv::status()
renv::snapshot()
#' Configuration and Authentication
#' Occasionally, PAT may need to be reset
credentials::set_github_pat()
|
286225b259885cb30626760d367080ba38622ed8 | fb25129d8e0c03dbc4837276a25040a8ceb2ad39 | /run_analysis.R | 8e0eff6bb5f51a481b40090c930cfcea863ab874 | [] | no_license | suryade/gettingandcleaningdata | 6cb9606b67baa4a3f3a18ae80681d5d4110de3d5 | cb8ac11cb7cff5aca490d4b16374f60a4d8f6627 | refs/heads/master | 2021-01-10T03:05:17.018421 | 2015-10-22T22:30:57 | 2015-10-22T22:30:57 | 44,775,912 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,159 | r | run_analysis.R | ## This section loads up all the packages that will be required for the run_analysis script
## to run properly.
require(data.table)
require(plyr)
## The merge_all_data function merges the train and the test data sets and returns one
## large super set so to speak. For convenience I am also labeling the columns and massaging
## the data to make it more presentable in this function as well. So this satisfies the
## requirements 1, 3 and 4 of the assignment.
## Prerequisite for the function is that it needs to be told where the location of the
## files for the train and test data sets are located. So the user needs to specify
## a full directory path.
## Step 1 it reads the test data sets and fixes up the column names if needed
## and column binds them
## Step 2 it reads the train data set and fixes up the column names if needed
## and column binds them
## Step 3 it row binds the data sets together to create the final data set and that is
## then returned back by the function
mergeAllData <- function(directory) {
## Read subject test data
subject_test <- fread(paste0(directory, "test/subject_test.txt"))
## Rename the column
names(subject_test) <- c("subject")
X_test <- fread(paste0(directory, "test/X_test.txt"))
## Read the file that contains the what will be used as the column names
features <- fread(paste0(directory, "features.txt"))
## However that results in 2 columns in the features data frame and we need the
## second column which has the names we are interested in. So I will create a
## vector from the features second column
features_col_names <- features$V2
## Do the actual renaming of the columns of the X_test data frame now
names(X_test) <- features_col_names
Y_test <-fread(paste0(directory, "test/y_test.txt"))
## We need the descriptive activity names to be present in the final merged data set
## So here I am reading in the activity names first
activity_names <- fread(paste0(directory, "activity_labels.txt"))
## Now I merge the activity_names with the Y_test because I want to match up
## the descriptive activity names with to the activites in the data set
Y_test <- merge(Y_test, activity_names, by.x = 'V1', by.y = 'V1')
## Now I will have a 2 column data frame with the numeric code and the corresponding
## activity name but I will only want the activity name column for the final merge
Y_test <- subset(Y_test, select = c(V2))
## Rename the column
names(Y_test) <- c("activityName")
## Do the final column bind of all the data frames
merged_test_data <- cbind(subject_test, X_test, Y_test)
## Now handle the train data sets
## Read the subject train data
subject_train <- fread(paste0(directory,"train/subject_train.txt"))
## Rename the column
names(subject_train) <- c("subject")
X_train <- fread(paste0(directory,"train/X_train.txt"))
## Do the actual renaming of the columns of the X_test data frame now. Note I am
## reusing the previously created column names vector because the names have not
## changed.
names(X_train) <- features_col_names
Y_train <-fread(paste0(directory,"train/y_train.txt"))
## Merge the activity names reusing the already loaded activity_names data frame
Y_train <- merge(Y_train, activity_names, by.x = 'V1', by.y = 'V1')
## Selecting the activity names
Y_train <- subset(Y_train, select = c(V2))
## Rename the column
names(Y_train) <- c("activityName")
## Do the column bind of all the cleaned up train data sets
merged_train_data <- cbind(subject_train, X_train, Y_train)
## Do the final row bind of all the train and test data
merged_all_data <- rbind(merged_test_data, merged_train_data)
## Let's clean up the column names a bit more for the merged data set
colNames <- names(merged_all_data)
for (i in 1:length(colNames))
{
colNames[i] = gsub("\\()","",colNames[i])
colNames[i] = gsub("-std$","StndrddDev",colNames[i])
colNames[i] = gsub("-mean","Mean",colNames[i])
colNames[i] = gsub("^(t)","time",colNames[i])
colNames[i] = gsub("^(f)","frequency",colNames[i])
colNames[i] = gsub("([Gg]ravity)","Gravity",colNames[i])
colNames[i] = gsub("([Bb]ody[Bb]ody|[Bb]ody)","Body",colNames[i])
colNames[i] = gsub("[Gg]yro","Gyroscope",colNames[i])
colNames[i] = gsub("AccMag","AccMagnitude",colNames[i])
colNames[i] = gsub("([Bb]odyaccjerkmag)","BodyAccJerkMagnitude",colNames[i])
colNames[i] = gsub("JerkMag","JerkMagnitude",colNames[i])
colNames[i] = gsub("GyroMag","GyroscopeMagnitude",colNames[i])
}
colnames(merged_all_data) = colNames
return(merged_all_data)
}
## The following function extracts the mean and standard deviation values for the
## activity measurements. This is basically meeting requirement 2 of the assignment.
## Prerequisite is that the function will need to be passed in the cleaned and merged
## data frame from the mergeAllData function to work properly.
extractMeanStdData <- function(merged_data) {
## Step 1 is to figure out the column names that have the string 'Mean' in them
col_names <- grep("mean", names(merged_data), ignore.case = TRUE, perl = TRUE, value = TRUE)
## Step 2 is to subset from the original merged data set the mean columns which we
## previously figured out
grep_mean_data <- subset(merged_data, select = col_names)
## Similarly perform same steps but for the standard deviation columns. The value
## we are searching for in the column names is "StndrddDev"
col_names <- grep("StndrddDev", names(merged_data), ignore.case = TRUE, perl = TRUE, value = TRUE)
## Doing the subset for the std data
grep_std_data <- subset(merged_data, select = col_names)
## Now we combine together the mean and the std data in to one data frame
mean_std_data <- cbind(grep_mean_data, grep_std_data)
## Adding the activityName column as well so it's more readable
mean_std_data <- cbind(merged_data$activityName, mean_std_data)
## cbind replaced the column name with V1 so massaging it to have the appropriate
## column name of 'activityName'
colnames(mean_std_data)[1] <- "activityName"
return(mean_std_data)
}
## The following function creates a second independent data set with the average for
## each variable for each avtivity for each subject. This meets requirement 5 of the
## course assignment.
## Prerequisite is that the function will need to be passed in the cleaned and merged
## data frame from the mergeAllData function to work properly.
tidyDataFrame <- function(merged_data) {
## Compute the averages across the subject and activity name
avg_df <- ddply(merged_data, .(subject, activityName), function(x) colMeans(x[, 1:562]))
## Write out the data to a file
write.table(avg_df, file = "averages.txt", row.name = FALSE)
} |
b54afb3273329c6a22c20210cf1e15d717806543 | f499cfb079da7190894aed672b5be240137cb04d | /plot4.R | 7847470d33f652a429e39b54377cc9e6ba018900 | [] | no_license | ArchanaSrinivas/ExData_Plotting1 | 9234720157fb10092c1415aad8d739bd6eb3e1fd | 8e814720700d9a9c40b6b7ddb8ad7aae2c7252a6 | refs/heads/master | 2020-12-28T23:16:13.574008 | 2015-01-11T20:11:29 | 2015-01-11T20:11:29 | 29,037,634 | 0 | 0 | null | 2015-01-09T22:11:50 | 2015-01-09T22:11:49 | null | UTF-8 | R | false | false | 1,903 | r | plot4.R | ##The following code to be run only if to create data frame for the first time - reusing dataset is better if it exists
---------------------------------------------------------------------------------------------------------------
##download the zip folder
##Extract file from zip folder into working directory
##Read Entire File
power <- read.table("household_power_consumption.txt",sep=";", header = TRUE, na.strings = "?")
##Subset required data from file
RequiredPower <- subset(power, power$Date == '1/2/2007' | power$Date == '2/2/2007')
##Setting Datatype as Date
RequiredPower$Date <- as.Date(RequiredPower$Date, "%d/%m/%Y")
##Setting Datatype as Time
RequiredPower$Time <- as.POSIXlt(paste(RequiredPower$Date, RequiredPower$Time), format="%H:%M:%S")
----------------------------------------------------------------------------------------------------------------
#Creating a png output with required sizing
png("plot4.png", width = 480, height = 480)
##Setting the stage (4 by 4 graph and spacing)
par(mfrow = c(2,2), mar=c(4,4,2,1), oma=c(0,0,2,0))
#Creating Graph 1
plot(RequiredPower$Time,RequiredPower$Global_active_power , type = "l",xlab= "",ylab = "Global Active Power (kilowatts)")
#Creating Graph 2
plot(RequiredPower$Time,RequiredPower$Voltage , type = "l",xlab= "datetime",ylab = "Voltage")
#Creating Graph 3
plot(RequiredPower$Time, RequiredPower$Sub_metering_1 , type = "l" , xlab= "", ylab = "Energy sub metering")
lines(RequiredPower$Time, RequiredPower$Sub_metering_2, col="red")
lines(RequiredPower$Time, RequiredPower$Sub_metering_3, col="blue")
legend("topright", col = c("black","red", "blue"), bty = "n", pch = "", lwd=1, legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
#Creating Graph 4
plot(RequiredPower$Time,RequiredPower$Global_reactive_power , type = "l", xlab= "datetime",ylab = "Global_reactive_power")
#Finish the image
dev.off()
|
04e3cffaea5b74fabd261c8282791cc32a5b660d | df7ad23a78e32abf5ff7c3d63899e2083103e469 | /server.R | 3439ec8061f6fa8c7e31ddd5745299d63fa4f848 | [] | no_license | rstudio/ca-predict-covid | 98f9b899e9456bbe9c133d206d48ba3908ddcc96 | 85cd2343b6835ef1a1de440a0e926293968eca7c | refs/heads/master | 2022-11-13T07:33:01.626550 | 2020-06-24T20:14:45 | 2020-06-24T20:14:45 | 274,699,556 | 0 | 0 | null | 2020-06-24T16:27:09 | 2020-06-24T15:09:43 | HTML | UTF-8 | R | false | false | 77,515 | r | server.R | # Developed by California COVID Modeling Team
#
# John Pugliese, PhD.
# California Department of Public Health
#
# Jason Vargo, PhD.
# California Department of Public Health
#
# Beta Version : Released 6/22/2020
#
library(shiny)
# Define server logic required to draw a histogram
server <- function(input, output, session) {
#### Carousel Navigation ####
#
# shinyjs::onclick("nowcast_img", updateTabsetPanel(session, inputId="navbar", selected= "Nowcasts"))
# shinyjs::onclick("forecast_img", updateTabsetPanel(session, inputId="navbar", selected= "Forecasts"))
# shinyjs::onclick("epimodel_img", updateTabsetPanel(session, inputId="navbar", selected= "Scenarios"))
#
#### File Uploads ####
output$c_template <- downloadHandler(
filename = function() { paste("county_upload_template.csv", sep='') },
content = function(file) {
dlm <- data.frame( date = seq(as.Date(min(covid$Most.Recent.Date)), as.Date(Sys.Date() + 30), "day"),
county = input$county_upload,
c_reff = NA,
c_hosp = NA,
c_cumdeaths = NA)
write.table(dlm, file, row.names = F, col.names = T, quote = F, na= "NA", sep = ",")
})
options(shiny.maxRequestSize = 9*1024^2)
content_file <- reactive({
inFile <- input$file1
if (is.null(inFile)) {
df <- data.frame(Result = c("No data has been uploaded"))
} else {
df <- tryCatch({ readr::read_csv(inFile$datapath) },
error = function(e) { sendSweetAlert(session, title = "Whoa!", text = "The uploaded file does not look like the template.", type = "error") },
finally = { data.frame(Result = c("No data has been uploaded")) } )
}
return(df)
})
#names(df) <- c("date","county","c_reff","c_hosp","c_cumdeaths")
#
# Here, we are going to store the entire data.frame in a reactiveValue
rV <- reactiveValues( content_file = NULL,
approve = NULL,
reff = NULL,
hosp = NULL,
death = NULL,
county = NULL)
observe({ rV$content_file <- content_file()
rV$approve <- FALSE
rV$reff <- FALSE
rV$hosp <- FALSE
rV$death <- FALSE
rV$county <- "California"
})
observe({
df <- rV$content_file
if (ncol(df) > 1 ) {
data_types <- c()
#Check for only one geography
if ( length(unique(df$county)) == 1) {
geo_set <- TRUE
rV$county <- paste0(unique(df$county))
} else { geo_set <- FALSE}
#Check data types
if (length(na.omit(df[,1])) > 0) {
date_result <-tryCatch({ c(data_types,sapply(na.omit(df[1]), function(x) !all(is.na(as.Date(as.character(x),tryFormats = c("%Y-%m-%d", "%m/%d/%Y") )))) ) },
error = function(e) { sendSweetAlert(session, title = "Whoa!", text = "Check your date column, I'm having trouble reading it.", type = "error") },
finally = { FALSE })
data_types <- c(data_types,date_result)
if (TRUE %in% date_result) {
df$date <- as.Date(as.character(df$date),tryFormats = c("%Y-%m-%d", "%m/%d/%Y") )
rV$content_file <- df
}
}
if (length(na.omit(df[,2])) > 0) {
data_types <- c(data_types, TRUE %in% c( sapply(na.omit(df[2]), function(x) !all(is.na(as.character(x))) ) ) )
}
if (length(na.omit(df[,3])) > 0) {
data_types <- c(data_types, TRUE %in% c( sapply(na.omit(df[3]), function(x) !all(is.na(as.numeric(x))) ) ) )
rV$reff <- TRUE
}
if (length(na.omit(df[,4])) > 0) {
data_types <- c(data_types, TRUE %in% c( sapply(na.omit(df[4]), function(x) !all(is.na(as.numeric(x))) ) ) )
rV$hosp <- TRUE
}
if (length(na.omit(df[,5])) > 0) {
data_types <- c(data_types, TRUE %in% c( sapply(na.omit(df[5]), function(x) !all(is.na(as.numeric(x))) ) ) )
rV$death <- TRUE
}
#Check if the number of data values is at least as long as the number of values
date_cover <- c()
date_num <- length(na.omit(df[,1]) )
if (length(na.omit(df[,2])) > 0) {
date_cover <- c(date_cover, date_num >= length(na.omit(df[,2])) )
}
if (length(na.omit(df[,3])) > 0) {
date_cover <- c(date_cover, date_num >= length(na.omit(df[,3])) )
}
if (length(na.omit(df[,4])) > 0) {
date_cover <- c(date_cover, date_num >= length(na.omit(df[,4])) )
}
if (length(na.omit(df[,5])) > 0) {
date_cover <- c(date_cover, date_num >= length(na.omit(df[,5])) )
}
}
if (ncol(content_file()) > 1 ) {
if (geo_set == FALSE) {
sendSweetAlert(session, title = "Oops!", text = "CalCAT can only accept one geography at this time.", type = "error")
}
if (TRUE %in% all(data_types) & TRUE %in% all(date_cover) ) {
rV$approve <- TRUE
return(NULL)
} else {
if (FALSE %in% all(data_types)) {
sendSweetAlert(session, title = "Oops!", text = "One of your data columns is not the correct type.", type = "error")
rV$approve <- FALSE
} else {
sendSweetAlert(session, title = "Oops!", text = "You have more outputs than dates.", type = "error")
rV$approve <- FALSE
}
}
}
})
output$datacheck <- reactive({ rV$approve })
outputOptions(output, 'datacheck', suspendWhenHidden = FALSE)
output$contents <- renderDataTable({
df <- rV$content_file
DT::datatable(df,
rownames = FALSE,
colnames = switch(ncol(df) > 1, c("Date","Geo","Reff","Hosp.","Cum. Deaths"),"" ),
class = 'cell-border stripe',
options = list(pageLength = 15,
searching = FALSE,
lengthChange = FALSE)
)
})
### Nowcasts of R Effective ####
#Data Prep
rt.ts <- reactive({
icl_rt_f <- icl %>% select(date, constant_mobility_mean_time_varying_reproduction_number_R.t.) %>% rename(mean_rt = constant_mobility_mean_time_varying_reproduction_number_R.t.)
icl_rt <- icl_model %>% select(date, mean_time_varying_reproduction_number_R.t.) %>% rename(mean_rt = mean_time_varying_reproduction_number_R.t.)
icl_rt <- rbind(icl_rt, icl_rt_f)
fu <- filter(yu, !is.na(r_values_mean))
rt.rt.xts <- xts(rt_live[,4], rt_live$date)
can.rt.xts <- xts(can.state.observed[,8],can.state.observed$date)
epifc.rt.xts <- xts(epi_forecast[which(epi_forecast$type == "nowcast"),4],
epi_forecast[which(epi_forecast$type == "nowcast"),]$date)
yu.xts <- xts(fu[,19],fu$date)
ucla.rt.xts <- xts(ucla_state[,2],ucla_state$date)
ucla.rt.xts <- ucla.rt.xts[paste0("/",Sys.Date()-1)]
icl.rt.xts <- xts(icl_rt[,2], icl_rt$date)
if ( input$approve_upload == TRUE & rV$county == "California" & rV$reff == TRUE) {
u.df <- as.data.table(rV$content_file)
u.df <- u.df %>% select(date, c_reff) %>% filter(date <= Sys.Date())
u.rt.xts <- xts(u.df[,2], u.df$date)
colnames(u.rt.xts) <- c("Upload.Reff")
df <- merge(rt.rt.xts, can.rt.xts,epifc.rt.xts, yu.xts, ucla.rt.xts, icl.rt.xts, u.rt.xts)
if (input$include_ensemble == TRUE) {
df$mean.rt <- rowMeans(df[,c(1:4,6,7)], na.rm = TRUE)
} else {
df$mean.rt <- rowMeans(df[,c(1:4,6)], na.rm = TRUE)
}
df[is.nan(as.numeric(df))] <- NA_character_
df <- as.data.table(df) %>% as.data.frame()
df[,2:9] <- sapply(df[,2:9], function(x) as.numeric(as.character(x)) )
} else {
df <- merge(rt.rt.xts, can.rt.xts,epifc.rt.xts, yu.xts, ucla.rt.xts, icl.rt.xts)
df$mean.rt <- rowMeans(df[,c(1:4,6)], na.rm = TRUE)
df[is.nan(as.numeric(df))] <- NA_character_
df <- as.data.table(df) %>% as.data.frame()
df[,2:8] <- sapply(df[,2:8], function(x) as.numeric(as.character(x)) )
}
return(df)
})
#Value Boxes
output$mean.rt.box <- renderValueBox({
cdt <- Sys.Date()-2
current.rt <- round(rt.ts()[which(rt.ts()$index == cdt),"mean.rt"], digits = 2)
valueBox(current.rt, subtitle = paste0(ifelse(current.rt >= 1.4,
"Spread of COVID-19 is very likely increasing",
ifelse(current.rt < 1.4 & current.rt >= 1.1,
"Spread of COVID-19 may be increasing",
ifelse(current.rt < 1.1 & current.rt >= 0.9,
"Spread of COVID-19 is likely stable",
"Spread of COVID-19 is likely decreasing"
)
)
)
),
color = paste0(ifelse(current.rt >= 1.3,
"red",
ifelse(current.rt < 1.3 & current.rt >= 1.2,
"orange",
ifelse(current.rt < 1.2 & current.rt >= 1,
"yellow",
"green"
)
)
)
)
) #End valuBox
})
observeEvent(input$Rt_explain, {
sendSweetAlert(
session = session,
title = "What does a Reff of this size mean?",
text = HTML("<p>If the R effective is greater than 1, COVID-19 will spread <b>exponentially</b>. If R effective is less than 1, COVID-19
will spread more slowly and cases will decline. The higher the value of R effective, the faster an epidemic will progress.
The following graph illustrates the change in growth as R effective increases.</p>
<img src='reff_cuml_infection.jpg' alt='Infections increase faster with larger values of R effective' width='450px'/>
<p><a href='https://www.cebm.net/covid-19/when-will-it-be-over-an-introduction-to-viral-reproduction-numbers-r0-and-re/' target='_blank'>Source: CEBM</a></p>"
),
html = TRUE,
type = NULL
)
})
output$hilo_rt.box <- renderUI({
df <- rt.ts()
df <- df %>% filter(index < Sys.Date()-1) %>% slice(n())
rt.min <- as.numeric( apply(df[,c(2:5,7)], 1, function(i) min(i, na.rm = TRUE)) )
rt.max <- as.numeric( apply(df[,c(2:5,7)], 1, function(i) max(i, na.rm = TRUE)) )
name.min <- switch(as.character(colnames(df)[match(apply(df[,c(2:5,7)], 1, function(i) min(i, na.rm = TRUE)),df)]),
"rt.rt.xts" = "rt.live",
"can.rt.xts" = "COVIDActNow",
"epifc.rt.xts" = "EpiForecasts",
"yu.xts" = "covid19-projections.com",
"ucla.rt.xts" = "UCLA",
"icl.rt.xts" = "ICL")
name.max<- switch(as.character(colnames(df)[match(apply(df[,c(2:5,7)], 1, function(i) max(i, na.rm = TRUE)),df)]),
"rt.rt.xts" = "rt.live",
"can.rt.xts" = "COVIDActNow",
"epifc.rt.xts" = "EpiForecasts",
"yu.xts" = "covid19-projections.com",
"ucla.rt.xts" = "UCLA",
"icl.rt.xts" = "ICL")
tagList(valueBox( paste0( round(rt.min,digits = 2)," - ", round(rt.max,digits = 2)) , paste0(name.min," - ",name.max), color = "navy", width = 12) )
})
#Graph
output$rt.plot <- renderPlotly({
rt.plot.memoized(
rt.ts() %>% filter(index < Sys.Date()-1 & index > Sys.Date() -80),
input$approve_upload,
rV$county,
rV$reff
)
})
#Downloadable file of Statewide Reff Values
output$dlRt <- downloadHandler(
filename = function() { paste("R_eff_Nowcasts_",Sys.Date(),'.csv', sep='') },
content = function(file) {
# Title
t <- c(paste("R-Effective Model and Ensemble Time Series", sep = ""),rep("",ncol(rt.ts())-1))
#Subtitle
tt <- c(paste("COVID Assessment Tool - Downloaded on",Sys.Date(), sep = " "),rep("",ncol(rt.ts())-1))
#Column labels
if ( input$approve_upload == TRUE & rV$county == "California" & rV$reff == TRUE) {
l <- c("Date","rt.live","COVIDActNow","EpiForecasts","covid19-projections.com","ICL","Upload","Mean Reff")
} else {
l <- c("Date","rt.live","COVIDActNow","EpiForecasts","covid19-projections.com","ICL","Mean Reff")
}
df <- rt.ts() %>% select(-6,) %>% filter(index < Sys.Date() & index > Sys.Date() -80)
#df <- rt.ts()[,c(1:5,7,8)] %>% filter(index < Sys.Date() & index > Sys.Date() -80)
df[,2:ncol(df)] <- lapply(df[,2:ncol(df)],function(x) round(x,2))
# df[is.na(df)] <- 0
df[] <- lapply(df, as.character)
#Source
s <- c("Please see the Technical Notes tab of the application for data sources.",rep("",ncol(rt.ts())-1))
p <- c("Prepared by: California Department of Public Health - COVID Modeling Team",rep("",ncol(rt.ts())-1))
dlm <- rbind(t, tt, l, df, s, p)
write.table(dlm, file, row.names = F, col.names = F, quote = F, na= "NA", sep = ",")
})
#### County Rt Nowcasts ####
#Data Prep
county.rt <- reactive({
# progress <- Progress$new()
# Make sure it closes when we exit this reactive, even if there's an error
# on.exit(progress$close())
# progress$set(message = "Gathering R Effective Nowcasts", value = 0)
c <- names(canfipslist[match(input$select.county.rt,canfipslist)])
cnty <- input$select.county.rt
# progress$inc(3/4)
# out <- lapply(cnty[1], function(x) get_can_cnty(x))
out <- filter(can.county.observed, fips == cnty)
# cnty.rt <- do.call("rbind",out)
cnty.rt <- out %>% select(date,RtIndicator) %>% as.data.frame() #,RtIndicatorCI90
cnty.rt$date <- as.Date(cnty.rt$date)
# progress$inc(1/4)
df <- xts(cnty.rt[,-1],cnty.rt$date)
if (c %in% unique(yu.cnty$subregion) ) { cnty.yu <- yu.cnty %>% filter(subregion == c) %>% select(date, r_values_mean)
yu.xts <- xts(cnty.yu[,-1],cnty.yu$date)
names(yu.xts) <- c("yu.xts")
df <- merge(df,yu.xts)
}
if (c %in% unique(ucla_cnty_rt$county) ) { cnty.ucla <- ucla_cnty_rt %>% filter(county == c) %>% select(date, Rt)
ucla.xts <- xts(cnty.ucla[,-1],cnty.ucla$date)
names(ucla.xts) <- c("ucla.xts")
df <- merge(df,ucla.xts)
}
if ( input$approve_upload == TRUE & rV$county == c & rV$reff == TRUE) {
u.df <- as.data.table(rV$content_file)
u.df <- u.df %>% select(date, c_reff) %>% filter(date <= Sys.Date())
u.rt.xts <- xts(u.df[,2], u.df$date)
colnames(u.rt.xts) <- c("Upload.Reff")
df <- merge(df, u.rt.xts)
if (input$include_ensemble == TRUE) {
if (ncol(df) > 1) {df$mean.proj <- rowMeans(df[,1:ncol(df)], na.rm = TRUE)}
} else {
if ( (ncol(df)-1) > 1) {df$mean.proj <- rowMeans(df[,1:ncol(df)-1], na.rm = TRUE)}
}
} else {
if (ncol(df) > 1) {df$mean.proj <- rowMeans(df[,1:ncol(df)], na.rm = TRUE)}
}
df <- as.data.table(df) %>% as.data.frame() %>% filter(index < Sys.Date()-1)
return(df)
})
#Value Boxes
output$cnty.mean.rt.box <- renderValueBox({
if (ncol(county.rt()) > 2) { cdt <- Sys.Date()-4 } else { cdt <- Sys.Date()-5}
if (ncol(county.rt()) > 2) { current.rt <- round( county.rt()[which( county.rt()$index == cdt),"mean.proj"], digits = 2)
} else { current.rt <- round( county.rt()[which( county.rt()$index == cdt),2], digits = 2) }
valueBox(current.rt, subtitle = paste0(ifelse(current.rt >= 1.4,
"Spread of COVID-19 is very likely increasing",
ifelse(current.rt < 1.4 & current.rt >= 1.1,
"Spread of COVID-19 may be increasing",
ifelse(current.rt < 1.1 & current.rt >= 0.9,
"Spread of COVID-19 is likely stable",
"Spread of COVID-19 is likely decreasing"
)
)
)
),
color = paste0(ifelse(current.rt >= 1.3,
"red",
ifelse(current.rt < 1.3 & current.rt >= 1.2,
"orange",
ifelse(current.rt < 1.2 & current.rt >= 1,
"yellow",
"green"
)
)
)
)
) #End valueBox
})
#Graph
output$county.rt.plot <- renderPlotly({
county.rt.plot.memoized(
county.rt(),
input$select.county.rt,
input$approve_upload,
rV$county,
rV$reff
)
})
#Download file of individual COUNTY Reff Values
output$dlRt.indv.cnty <- downloadHandler(
filename = function() { paste("Rt_Nowcasts_",names(canfipslist[match(input$select.county.rt,canfipslist)]),"_",Sys.Date(),'.csv', sep='') },
content = function(file) {
c <- names(canfipslist[match(input$select.county.rt,canfipslist)])
# Title
t <- c(paste("R-Effective County Model Time Series for ",c, sep = ""),rep("",ncol(county.rt())))
#Subtitle
tt <- c(paste("COVID Assessment Tool - Downloaded on",Sys.Date(), sep = " "),rep("",ncol(county.rt())))
df <- county.rt() %>% as.data.frame()
if ( ncol(df) > 2 ) { df[,2:ncol(df)] <- lapply(df[,2:ncol(df)],function(x) round(x,2)) } else { df[,2] <- round(df[,2],2) }
df[is.na(df)] <- 0
df[] <- lapply(df, as.character)
#Column labels
l <- c("Date","COVIDActNow")
if ( c %in% unique(yu.cnty$subregion) ) { l <- c(l, c("covid19-projections.com")) }
if ( c %in% unique(ucla_cnty_rt$county) ) { l <- c(l, c("UCLA")) }
if ( input$approve_upload == TRUE & rV$county == c & rV$reff == TRUE) { l <- c(l, c("Upload")) }
if ( length(l) > 2 ) { l <- c(l, c("Mean Reff") ) }
#Source
s <- c("Please see the Technical Notes tab of the application for data sources.",rep("",ncol(county.rt())))
p <- c("Prepared by: California Department of Public Health - COVID Modeling Team",rep("",ncol(county.rt())))
dlm <- rbind(t, tt, l, df, s, p)
write.table(dlm, file, row.names = F, col.names = F, quote = F, na= "NA", sep = ",")
})
#### Rt Dot Plot ####
#Data Prep
cnty.7.day.rt <- data.table(can.county.observed) %>%
.[, max_date := max(date, na.rm = T), by = .(county)] %>%
.[date > Sys.Date()-7, .(Rt.m = mean(RtIndicator, na.rm = T),
ll = mean(RtIndicator - RtIndicatorCI90, na.rm=T),
ul = mean(RtIndicator + RtIndicatorCI90, na.rm=T)), by = .(county)] %>% na.omit()
# cnty.7.day.rt <- reactive({
#
# cnty.can <- can.county.observed %>% filter(date <= Sys.Date()-1,
# date > Sys.Date()-8) %>%
# select(county, date, RtIndicator) %>%
# mutate(date = as.Date(date)) %>%
# rename(Rt = RtIndicator) %>%
# as.data.frame()
# cnty.yu <- yu.cnty %>% filter(date <= Sys.Date()-1,
# date > Sys.Date()-8) %>%
# select(subregion, date, r_values_mean) %>%
# rename(county = subregion,
# Rt = r_values_mean )
# cnty.ucla <- ucla_cnty_rt %>% filter(date <= Sys.Date()-1,
# date > Sys.Date()-8) %>%
# select(county, date, Rt)
#
# df <- rbind(cnty.can,cnty.yu,cnty.ucla) %>%
# arrange(county,date) %>%
# group_by(county) %>%
# summarise(Rt.m = mean(Rt, na.rm = T),
# Rt.sd = sd(Rt, na.rm = T) ) %>%
# na.omit() %>%
# mutate(ll = Rt.m - 1.95*Rt.sd,
# ul = Rt.m + 1.95*Rt.sd)
# return(df)
#
# })
#
#Graph
output$rt.dot.plot <- renderPlotly({
rt.dot.plot.memoized(cnty.7.day.rt)
})
#Download file of ALL COUNTY 7-day average Reff Values
output$dlRt.cnty <- downloadHandler(
filename = function() { paste("Rt_Nowcasts_7DayAvg_Counties",Sys.Date(),'.csv', sep='') },
content = function(file) {
# Title
t <- c(paste("R-Effective 7 Day Averages for Counties", sep = ""),"","","","")
#Subtitle
tt <- c(paste("COVID Assessment Tool - Downloaded on",Sys.Date(), sep = " "),"","","","")
df <- cnty.7.day.rt %>% as.data.frame()
if ( ncol(df) > 2 ) { df[,2:ncol(df)] <- lapply(df[,2:ncol(df)],function(x) round(x,2)) } else { df[,2] <- round(df[,2],2) }
df[is.na(df)] <- 0
df[] <- lapply(df, as.character)
#Column labels
l <- c("County","COVIDActNow - 7 Day Avg", "LL", "UL")
#Source
s <- c("Please see the Technical Notes tab of the application.","","","","")
p <- c("Prepared by: California Department of Public Health - COVID Modeling Team","","","","")
u <- c("Source: COVIDActNow - https://blog.covidactnow.org/modeling-metrics-critical-to-reopen-safely/","","","","")
dlm <- rbind(t, tt, l, df, s, p, u)
write.table(dlm, file, row.names = F, col.names = F, quote = F, na= "NA", sep = ",")
})
#### Reff County Map ####
cnty.map.rt <- reactive({
can.rt <- can.county.observed %>% select(date, county, RtIndicator) %>% mutate(date = as.Date(date)) %>% as.data.frame()
yu.rt <- yu.cnty %>% select(date, subregion, r_values_mean) %>% rename(county = subregion)
ucla.rt <- ucla_cnty_rt %>% select(date, county, Rt)
df <- full_join(can.rt, yu.rt, by = c("county" = "county", "date" = "date"))
df <- full_join(df, ucla.rt, by = c("county" = "county", "date" = "date"))
if ( input$approve_upload == TRUE & input$include_ensemble == TRUE &
rV$county %in% names(fipslist) & rV$reff == TRUE) {
u.df <- as.data.table(rV$content_file)
u.df <- u.df %>% select(date, county, c_reff) %>% filter(date <= Sys.Date())
df <- full_join(df, u.df, by = c("county" = "county", "date" = "date"))
}
df$mean.proj <- rowMeans(df[,3:ncol(df)], na.rm = TRUE)
df <- df %>% filter(date < Sys.Date() & date > Sys.Date()-7)
df <- df %>% group_by(county) %>% summarise(mean.proj = mean(mean.proj, na.rm = T))
df <- df %>% select(county, mean.proj) %>% mutate(mean.proj = round(mean.proj,digits=2))
return(df)
})
output$reff_county_map <- renderggiraph({
reff_county_map_memoized(cnty.map.rt())
})
#Download file of ALL COUNTY 7-day average Reff Values
output$dlRt.cnty.map <- downloadHandler(
filename = function() { paste("Rt_Nowcasts_County_Map",Sys.Date(),'.csv', sep='') },
content = function(file) {
df <- cnty.map.rt()
# Title
t <- c(paste("Latest R-Effective for Counties", sep = ""),rep("",ncol(df)))
#Subtitle
tt <- c(paste("COVID Assessment Tool - Downloaded on",Sys.Date(), sep = " "),rep("",ncol(df)))
if ( ncol(df) > 2 ) { df[,2:ncol(df)] <- lapply(df[,2:ncol(df)],function(x) round(x,2)) } else { df[,2] <- round(df[,2],2) }
df[is.na(df)] <- 0
df[] <- lapply(df, as.character)
#Column labels
l <- c("County","R-Effective - Latest Ensemble")
#Source
s <- c("Please see the Technical Notes tab of the application.",rep("",ncol(df)))
p <- c("Prepared by: California Department of Public Health - COVID Modeling Team",rep("",ncol(df)))
u <- c("Source: COVIDActNow - https://blog.covidactnow.org/modeling-metrics-critical-to-reopen-safely/",rep("",ncol(df)))
v <- c("Source: UCLA ML Lab - https://covid19.uclaml.org/",rep("",ncol(df)))
w <- c("Source: COVID-19 Projections - https://covid19-projections.com/",rep("",ncol(df)))
dlm <- rbind(t, tt, l, df, s, p, u, v, w)
write.table(dlm, file, row.names = F, col.names = F, quote = F, na= "NA", sep = ",")
})
#### Hospitalization Projections ####
#Data Prep
hosp.proj.ts <- reactive({
min_hosp <- min(covid$Most.Recent.Date)
hosp <- covid %>% select(Most.Recent.Date,COVID.19.Positive.Patients) %>% filter(covid$County.Name == "California") %>% as.data.frame()
can.hosp.proj <- can.state.observed %>% select(date, hospitalBedsRequired) %>% filter(min_hosp <= date & date <= JHU_inf_end_dt)
IHME.hosp.proj <- IHME %>% select(date, allbed_mean) %>% filter(min_hosp <= date & date <= JHU_inf_end_dt)
mobs.hosp.proj <- mobs %>% select(date,hospitalBedsRequired) %>% filter(min_hosp <= date & date <= JHU_inf_end_dt)
mit.hosp.proj <- mit %>% select(11,7) %>% filter(min_hosp <= date & date <= JHU_inf_end_dt)
jhu.hosp.proj <- JHU_aws %>% filter(intervention == "Inference" & county == "California") %>% select("date","hosp_occup_mean") %>% filter(min_hosp <= date & date <= JHU_inf_end_dt)
covid.xts <- xts(hosp[,-1],hosp$Most.Recent.Date)
can.proj.xts <- xts(can.hosp.proj[,-1],can.hosp.proj$date)
ihme.proj.xts <- xts(IHME.hosp.proj[,-1],IHME.hosp.proj$date)
mobs.proj.xts <- xts(mobs.hosp.proj[,-1],mobs.hosp.proj$date)
mit.proj.xts <- xts(mit.hosp.proj[,-1],mit.hosp.proj$date)
jhu.proj.xts <- xts(jhu.hosp.proj[,-1],jhu.hosp.proj$date)
if ( input$approve_upload == TRUE & rV$county == "California" & rV$hosp == TRUE) {
u.df <- as.data.table(rV$content_file)
u.df <- u.df %>% select(date, c_hosp) %>% filter(min_hosp <= date & date <= JHU_inf_end_dt)
u.rt.xts <- xts(u.df[,2], u.df$date)
colnames(u.rt.xts) <- c("Upload.Hosp")
df <- merge(covid.xts,can.proj.xts,ihme.proj.xts,mobs.proj.xts,mit.proj.xts,jhu.proj.xts,u.rt.xts)
if (input$include_ensemble == TRUE) {
df$mean.proj <- rowMeans(df[,2:7], na.rm = TRUE)
} else {
df$mean.proj <- rowMeans(df[,2:6], na.rm = TRUE)
}
} else {
df <- merge(covid.xts,can.proj.xts,ihme.proj.xts,mobs.proj.xts,mit.proj.xts,jhu.proj.xts)
df$mean.proj <- rowMeans(df[,2:6], na.rm = TRUE)
}
df$mean.proj <- ifelse(!is.na(df$covid.xts), NA, df$mean.proj)
df <- as.data.table(df) %>% as.data.frame()
df$period <- ifelse(!is.na(df$covid.xts), "solid", "dash")
df$type <- ifelse(!is.na(df$covid.xts), "Est.", "Proj.")
return(df)
})
#Value Boxes
output$actual.hosp.box <- renderValueBox({
cdt <- max(covid$Most.Recent.Date)
current.hosp <- as.character(covid[which(covid$Most.Recent.Date == cdt & covid$County.Name == "California"),5])
valueBox( format(as.numeric(current.hosp), big.mark = ","), paste0("Actuals: ",cdt), color = "black")
})
output$mean.proj.hosp.box <- renderUI({
df <- hosp.proj.ts()
cdt <- max( df[which(df$index <= Sys.Date() + 30),]$index )
mean.proj <- hosp.proj.ts() %>% slice(n()) %>% select("mean.proj")
valueBox( format(round(mean.proj, digits = 0), big.mark = ","), paste0("Mean Forecast through ", cdt), color = "blue", width = 12)
})
#Graphs
output$hosp.proj.plot <- renderPlotly({
hosp.proj.plot.memoized(
hosp.proj.ts(),
input$approve_upload,
rV$county,
rV$hosp
)
})
#Download file of Statewide Hospitalization Forecasts
output$dlhosp <- downloadHandler(
filename = function() { paste("Hospital_Forecasts_",Sys.Date(),'.csv', sep='') },
content = function(file) {
df <- hosp.proj.ts() %>% select(-type, -period) %>% as.data.frame()
# Title
t <- c(paste("Statewide Hospitalization Forecasts", sep = ""),rep("",ncol(df)) )
#Subtitle
tt <- c(paste("COVID Assessment Tool - Downloaded on",Sys.Date(), sep = " "),rep("",ncol(df)))
#Column labels
if ( input$approve_upload == TRUE & rV$county == "California" & rV$hosp == TRUE) {
l <- c("Date","Actuals", "COVIDActNow","IHME","MOBS","MIT","JHU","Upload", "Mean")
} else {
l <- c("Date","Actuals", "COVIDActNow","IHME","MOBS","MIT","JHU","Mean")
}
df[,2:ncol(df)] <- lapply(df[,2:ncol(df)],function(x) round(x,2))
df[is.na(df)] <- 0
df[] <- lapply(df, as.character)
#Source
s <- c("Please see the Technical Notes tab of the application for data sources.",rep("",ncol(df)))
p <- c("Prepared by: California Department of Public Health - COVID Modeling Team",rep("",ncol(df)))
dlm <- rbind(t, tt, l, df, s, p)
write.table(dlm, file, row.names = F, col.names = F, quote = F, na= "NA", sep = ",")
})
#### County Hospitalization Projections ####
fc.cnty.beds <- reactive({
c <- names(fipslist[match(input$select.county.hosp,fipslist)])
if (c %in% cnty.beds[,1] == TRUE) {
beds <- c(cnty.beds[which(cnty.beds$COUNTY == c),9])
} else {
beds <- c(NA)
}
})
hosp.proj.cnty.ts <- reactive({
# progress <- Progress$new()
# Make sure it closes when we exit this reactive, even if there's an error
# on.exit(progress$close())
# progress$set(message = "Gathering Hospitalization Forecasts", value = 0)
cnty <- input$select.county.hosp
#c <- names(canfipslist[match(input$select.county.hosp,canfipslist)])
c <- names(fipslist[match(input$select.county.hosp,fipslist)])
min_hosp <- min(covid$Most.Recent.Date)
hosp <- covid %>% select(Most.Recent.Date,COVID.19.Positive.Patients) %>% filter(covid$County.Name == c) %>% as.data.frame()
max_ucla <- ucla_cnty %>% filter(output == "hosp",type %in% c("history"), county == c) %>% select(date) %>% summarize(max(date)) %>% c()
ucla.hist <- ucla_cnty %>% filter(output == "hosp",
type %in% c("history"),
county == c,
min_hosp <= date) %>%
select(date,value) %>% as.data.frame()
ucla.pred <- ucla_cnty %>% filter(output == "hosp",
type %in% c("pred"),
county == c,
max_ucla < date, #filter out overlapping dates from forecasts
date <= JHU_inf_end_dt) %>%
select(date,value) %>% as.data.frame()
ucla.hosp <- rbind(ucla.hist,ucla.pred)
jhu.hosp.proj <- JHU_aws %>% filter(intervention == "Inference" & county == c) %>% select("date","hosp_occup_mean") %>% filter(min_hosp <= date & date <= JHU_inf_end_dt)
covid.xts <- xts(hosp[,-1],hosp$Most.Recent.Date)
ucla.proj.xts <- xts(ucla.hosp[,-1],ucla.hosp$date)
jhu.proj.xts <- xts(jhu.hosp.proj[,-1],jhu.hosp.proj$date)
df <- merge(covid.xts,ucla.proj.xts,jhu.proj.xts )
if (input$select.county.hosp %in% canfipslist) {
# progress$inc(3/4)
out <- filter(can.county.observed, fips == cnty)
cnty.hosp <- out %>% select(date,hospitalBedsRequired) %>% as.data.frame()
# progress$inc(1/4)
can.hosp.proj <- cnty.hosp %>% select(date, hospitalBedsRequired) %>% filter(min_hosp <= date & date <= JHU_inf_end_dt)
can.proj.xts <- xts(can.hosp.proj[,-1],can.hosp.proj$date)
df <- merge(df,can.proj.xts)
}
##User Uploaded Data
if ( input$approve_upload == TRUE & rV$county == c & rV$hosp == TRUE) {
u.df <- as.data.table(rV$content_file)
u.df <- u.df %>% select(date, c_hosp) %>% filter(min_hosp <= date & date <= JHU_inf_end_dt)
u.rt.xts <- xts(u.df[,2], u.df$date)
colnames(u.rt.xts) <- c("Upload.Hosp")
df <- merge(df,u.rt.xts)
if (input$include_ensemble == TRUE) {
df$mean.proj <- rowMeans(df[,2:ncol(df)], na.rm = TRUE)
} else {
df$mean.proj <- rowMeans(df[,2:ncol(df)-1], na.rm = TRUE)
}
} else {
df$mean.proj <- rowMeans(df[,2:ncol(df)], na.rm = TRUE)
}
##
df$mean.proj <- ifelse(!is.na(df$covid.xts), NA, df$mean.proj)
df <- as.data.table(df) %>% as.data.frame()
df$period <- ifelse(!is.na(df$covid.xts), "solid", "dash")
df$type <- ifelse(!is.na(df$covid.xts), "Est.", "Proj.")
return(df)
})
#Value Boxes
output$actual.cnty.hosp.box <- renderValueBox({
c <- names(fipslist[match(input$select.county.hosp,fipslist)])
cdt <- max(covid$Most.Recent.Date)
current.hosp <- as.character(covid[which(covid$Most.Recent.Date == cdt & covid$County.Name == c),5])
valueBox( paste0(format(as.numeric(current.hosp), big.mark = ","),"/",
format(as.numeric(fc.cnty.beds()), big.mark = ",") ),
paste0("Actuals / Total Beds : ",cdt),
color = "black")
})
output$mean.cnty.proj.hosp.box <- renderValueBox({
df <- hosp.proj.cnty.ts()
cdt <- max( df[which(df$index <= Sys.Date() + 30),]$index )
mean.proj <- hosp.proj.cnty.ts() %>% slice(n()) %>% select("mean.proj")
valueBox( format(round(mean.proj, digits = 0), big.mark = ","),
paste0("Mean Forecast through ", cdt), color = "blue")
})
#Graph
output$county.hosp.plot <- renderPlotly({
county.hosp.plot.memoized(
input$select.county.hosp,
hosp.proj.cnty.ts(),
input$approve_upload,
rV$county,
rV$hosp
)
})
#Download file of COUNTY Hospitalization Forecasts
output$dlhosp.cnty <- downloadHandler(
filename = function() { paste("Hospital_Forecasts_for_",names(fipslist[match(input$select.county.hosp,fipslist)]),Sys.Date(),'.csv', sep='') },
content = function(file) {
c <- names(canfipslist[match(input$select.county.hosp,canfipslist)])
# Title
t <- c(paste("Hospitalization Forecasts for ",c, sep = ""),rep("",ncol(hosp.proj.cnty.ts())-1))
#Subtitle
tt <- c(paste("COVID Assessment Tool - Downloaded on",Sys.Date(), sep = " "),rep("",ncol(hosp.proj.cnty.ts())-1))
#Data Prep
df <- hosp.proj.cnty.ts() %>% select(-c(period, type)) %>% rename(date = index) %>% as.data.frame()
df[is.na(df)] <- 0
df[] <- lapply(df, as.character)
#Column labels
l <- c("Date","Hospitalizations")
if ( "ucla.proj.xts" %in% names(hosp.proj.cnty.ts()) ) { l <- c(l, c("UCLA")) }
if ( "jhu.proj.xts" %in% names(hosp.proj.cnty.ts()) ) { l <- c(l, c("JHU")) }
if ( "can.proj.xts" %in% names(hosp.proj.cnty.ts()) ) { l <- c(l, c("COVIDActNow")) }
if ( input$approve_upload == TRUE & rV$county == c & rV$hosp == TRUE) { l <- c(l, c("Uploaded")) }
if ( length(l) > 2 ) { l <- c(l, c("Mean") ) }
#Source
s <- c("Please see the Technical Notes tab of the application for data sources.",rep("",ncol(hosp.proj.cnty.ts())-1))
p <- c("Prepared by: California Department of Public Health - COVID Modeling Team",rep("",ncol(hosp.proj.cnty.ts())-1))
dlm <- rbind(t, tt, l, df, s, p)
write.table(dlm, file, row.names = F, col.names = F, quote = F, na= "NA", sep = ",")
})
#### Statewide Cumulative Deaths Projections ####
#Data Prep
cdeath.ca <- reactive({
reich_test <- reich_lab %>% unique() %>% as.data.frame()
cdeaths_test <- covid %>% select(Most.Recent.Date,Total.Count.Deaths) %>%
filter(covid$County.Name == "California") %>%
mutate(model_team = 'Actuals') %>%
rename(model_team = model_team,
target_end_date = Most.Recent.Date,
pointNA = Total.Count.Deaths
) %>%
select(model_team, pointNA, target_end_date) %>%
as.data.frame()
reich_test <- rbind(reich_test,cdeaths_test)
reich_test <- reich_test %>% distinct(model_team, target_end_date, .keep_all = TRUE) %>% spread(model_team, pointNA)
})
#Value Boxes
output$actual.cdeath.box <- renderValueBox({
cdt <- max(covid$Most.Recent.Date)
current.deaths <- as.character(covid[which(covid$Most.Recent.Date == cdt & covid$County.Name == "California"),4])
valueBox( format(as.numeric(current.deaths), big.mark = ","), paste0("Actuals: ",cdt), color = "black")
})
output$mean.proj.cdeaths.box <- renderUI({
ensemble <- cdeath.ca() %>% select(target_end_date,COVIDhub.ensemble) %>% filter(!is.na(COVIDhub.ensemble))
cdt.ens <- max(ensemble$target_end_date)
mean.proj <- ensemble %>% slice(n()) %>% select(2)
valueBox( format(round(mean.proj, digits = 0), big.mark = ","), paste0("COVIDhub Ensemble Forecast through ", cdt.ens), color = "blue", width = 12)
})
#Graphs
output$cdeath.proj.plot <- renderPlotly({
cdeath.proj.plot.memoized(cdeath.ca())
})
#Download file of Statewide Cumulative Deaths Forecasts
output$dlDeath <- downloadHandler(
filename = function() { paste("Cumulative_Deaths_Forecasts_",Sys.Date(),'.csv', sep='') },
content = function(file) {
# Title
t <- c(paste("Statewide Cumulative Deaths Forecasts", sep = ""),rep("",ncol(cdeath.ca())-1) )
#Subtitle
tt <- c(paste("COVID Assessment Tool - Downloaded on",Sys.Date(), sep = " "),rep("",ncol(cdeath.ca())-1))
#Column labels
l <- names(cdeath.ca())
df <- cdeath.ca() %>% as.data.frame()
#df[,2:ncol(df)] <- lapply(df[,2:ncol(df)],function(x) round(x,2))
df[is.na(df)] <- 0
df[] <- lapply(df, as.character)
#Source
s <- c("Please see the Technical Notes tab of the application for data sources.",rep("",ncol(cdeath.ca())-1))
p <- c("Prepared by: California Department of Public Health - COVID Modeling Team",rep("",ncol(cdeath.ca())-1))
dlm <- rbind(t, tt, l, df, s, p)
write.table(dlm, file, row.names = F, col.names = F, quote = F, na= "NA", sep = ",")
})
#### County Cumulative Death Projections ####
#Data prep
county.deaths <- reactive({
# progress <- Progress$new()
# Make sure it closes when we exit this reactive, even if there's an error
# on.exit(progress$close())
# progress$set(message = "Gathering Death Forecast Data", value = 0)
fips <- input$select.county.death
cnty <- names(fipslist[match(input$select.county.death,fipslist)])
#Used to filter model estimates that occur prior to actuals
min_death <- min(covid$Most.Recent.Date)
#UCLA Time series overalp in terms of date; therefore we need to find the max date for the "history" of estimates of actuals
max_ucla <- ucla_cnty %>% filter(output == "deaths",type %in% c("history"), county == cnty) %>% select(date) %>% summarize(max(date)) %>% c()
death <- covid %>% select(Most.Recent.Date,Total.Count.Deaths) %>% filter(covid$County.Name == cnty) %>% as.data.frame()
ucla.hist <- ucla_cnty %>% filter(output == "deaths",
type %in% c("history"),
county == cnty,
min_death <= date) %>%
select(date,value) %>% as.data.frame()
ucla.pred <- ucla_cnty %>% filter(output == "deaths",
type %in% c("pred"),
county == cnty,
max_ucla < date, #filter out overlapping dates from forecasts
date <= Sys.Date() + 30) %>%
select(date,value) %>% as.data.frame()
ucla.death <- rbind(ucla.hist,ucla.pred)
jhu.death <- JHU_aws %>% filter(intervention == "Inference" & county == cnty) %>% select("date","cum_deaths_mean") %>% filter(min_death <= date & date <= Sys.Date() + 30)
covid.xts <- xts(death[,-1],death$Most.Recent.Date)
ucla.proj.xts <- xts(ucla.death[,-1],ucla.death$date)
jhu.proj.xts <- xts(jhu.death[,-1],jhu.death$date)
df <- merge(covid.xts,ucla.proj.xts,jhu.proj.xts )
if (input$select.county.death %in% canfipslist) {
# progress$inc(3/4)
out <- filter(can.county.observed, county == cnty)
can.death <- out %>% select(date,cumulativeDeaths) %>%
filter(min_death <= date & date <= Sys.Date() + 30) %>%
rename(CovidActNow = cumulativeDeaths) %>% as.data.frame()
# progress$inc(1/4)
can.proj.xts <- xts(can.death[,-1],can.death$date)
df <- merge(df,can.proj.xts)
}
##User Uploaded Data
if ( input$approve_upload == TRUE & rV$county == cnty & rV$death == TRUE) {
u.df <- as.data.table(rV$content_file)
u.df <- u.df %>% select(date, c_cumdeaths) %>% filter(min_hosp <= date & date <= JHU_inf_end_dt)
u.rt.xts <- xts(u.df[,2], u.df$date)
colnames(u.rt.xts) <- c("Upload.Death")
df <- merge(df,u.rt.xts)
if (input$include_ensemble == TRUE) {
df$mean.proj <- rowMeans(df[,2:ncol(df)], na.rm = TRUE)
} else {
df$mean.proj <- rowMeans(df[,2:ncol(df)-1], na.rm = TRUE)
}
} else {
df$mean.proj <- rowMeans(df[,2:ncol(df)], na.rm = TRUE)
}
##
df$mean.proj <- ifelse(!is.na(df$covid.xts), NA, df$mean.proj)
df <- as.data.table(df) %>% as.data.frame()
df$period <- ifelse(!is.na(df$covid.xts), "solid", "dash")
df$type <- ifelse(!is.na(df$covid.xts), "Est.", "Proj.")
return(df)
})
#Value Boxes
output$actual.cnty.death.box <- renderValueBox({
c <- names(fipslist[match(input$select.county.death,fipslist)])
cdt <- max(covid$Most.Recent.Date)
current.deaths <- as.character(covid[which(covid$Most.Recent.Date == cdt & covid$County.Name == c),4])
valueBox( paste0(format(as.numeric(current.deaths), big.mark = ",") ),
paste0("Actual Deaths : ",cdt),
color = "black")
})
output$mean.cnty.proj.death.box <- renderValueBox({
df <- county.deaths()
cdt <- max( df$index )
mean.proj <- df %>% slice(n()) %>% select("mean.proj")
valueBox( format(round(mean.proj, digits = 0), big.mark = ","),
paste0("30-Day Forecast through ", cdt), color = "blue")
})
#Graph
output$county.death.plot <- renderPlotly({
county.death.plot.memoized(
input$select.county.death,
county.deaths(),
input$approve_upload,
rV$county,
rV$death
)
})
#Download file of COUNTY Total Death Forecasts
output$dlDeath.cnty <- downloadHandler(
filename = function() { paste("Cumulative_Death_Forecasts_for_",names(canfipslist[match(input$select.county.death,canfipslist)]),Sys.Date(),'.csv', sep='') },
content = function(file) {
c <- names(canfipslist[match(input$select.county.death,canfipslist)])
# Title
t <- c(paste("Cumulative Death Forecasts for ",c, sep = ""),rep("",ncol(county.deaths())-1))
#Subtitle
tt <- c(paste("COVID Assessment Tool - Downloaded on",Sys.Date(), sep = " "),rep("",ncol(county.deaths())-1))
df <- county.deaths() %>% select(-c(period, type)) %>% rename(date = index) %>% as.data.frame()
df[is.na(df)] <- 0
df[] <- lapply(df, as.character)
#Column labels
l <- c("Date","Total Deaths")
if ( "ucla.proj.xts" %in% names(county.deaths()) ) { l <- c(l, c("UCLA")) }
if ( "jhu.proj.xts" %in% names(county.deaths()) ) { l <- c(l, c("JHU")) }
if ( "can.proj.xts" %in% names(county.deaths()) ) { l <- c(l, c("COVIDActNow")) }
if ( input$approve_upload == TRUE & rV$county == c & rV$death == TRUE) { l <- c(l, c("Uploaded")) }
if ( length(l) > 2 ) { l <- c(l, c("Mean") ) }
#Source
s <- c("Please see the Technical Notes tab of the application for data sources.",rep("",ncol(county.deaths())-1))
p <- c("Prepared by: California Department of Public Health - COVID Modeling Team",rep("",ncol(county.deaths())-1))
dlm <- rbind(t, tt, l, df, s, p)
write.table(dlm, file, row.names = F, col.names = F, quote = F, na= "NA", sep = ",")
})
#### Long-term Epi-Models ####
output$model.descrip.ts <- renderUI({
UKKC <- as.character(input$include_JHU_UKKC)
model_descrip_list <- lapply(UKKC, function(i) { HTML(paste("<p>",as.character(scenarios[match(i, scenarios$colvar),2]),": ",
as.character(scenarios[match(i, scenarios$colvar),4]),"</p>")) })
do.call(tagList, model_descrip_list)
})
#### Daily Estimates #####
output$physical.select <- renderUI({
pickerInput(
inputId = "include_JHU_UKKC", "Select Scenario",
choices = switch(input$county_ts,
"California" = list("5/28/2020" = modellist[c(8:11)],
"4/23/2020" = modellist[c(4:7)],
"Real-time" = randlist,
"Real-time" = otherlist[1:2],
"Real-time" = otherlist[3] ),
list("5/28/2020" = modellist[c(8:11)],
"Real-time" = modellist[c(4:7)],
"Real-time" = otherlist[1:2] )
),
selected = c("strictDistancingNow",
"weakDistancingNow",
"IHME_sts",
"rand.3.1",
"rand.2.2",
"rand.1.3"),
options = list(`actions-box` = TRUE, noneSelectedText = "Select Scenario"),
#inline = TRUE,
multiple = TRUE,
choicesOpt = list( style = rep(("color: black; background: white; font-weight: bold;"),14))
)
})
output$epi_covid_select <- renderUI({
selectInput("select_COVID",
"Select Actuals:",
COVIDvar,
selected = switch(input$selected_crosswalk,
"1" = "COVID.19.Positive.Patients",
"2" = "ICU.COVID.19.Positive.Patients",
"3" = "Total.Count.Deaths")
)
})
state.model.xts <- reactive({
state_model_xts_memoized(input$county_ts, input$selected_crosswalk)
})
total.cnty.beds <- reactive({
c <- input$county_ts
if (c %in% cnty.beds[,1] == TRUE) {
beds <- c(cnty.beds[which(cnty.beds$COUNTY == c),9])
} else {
beds <- c(NA)
}
})
#Regex patterns for JHU scenarios
jhu.no <- "UK.\\w+.\\d+_\\d+|.\\w+_\\w{4,}"
jhu.M <- "UK.\\w+.\\d+_\\d+.M|.\\w+_\\w{4,}.M"
jhu.lh <- "UK.\\w+.\\d[w].\\w+.[LH]|.\\w+_\\w{4,}.[LH]"
jhu.lh.b <- "UK.\\w+.\\d+_\\d+.[LH]|.\\w+_\\w{4,}.[LH]"
rand.no <- "\\w\\.\\d\\.\\d"
output$physical.graph <- renderDygraph({
df <- state.model.xts()
dtrange <- paste(as.character(input$dateRange_ts), collapse = "/")
chbx <- c()
#### Actuals
if ( input$actuals == TRUE) {chbx <- c(chbx,c(input$select_COVID)) }
UKKC <- as.character(input$include_JHU_UKKC)
if ( TRUE %in% grepl(jhu.no, UKKC) & input$physical.mmd == "M" ) {
JHU_list <- UKKC[grep(jhu.no,UKKC)]
chbx <- c(chbx, c(JHU_list) )
} else {
JHU_list <- UKKC[grep(jhu.no,UKKC)]
chbx <- c(chbx, c( as.character(lapply(seq_along(JHU_list), function(i) { paste0(as.character( JHU_list[[i]] ),".M" ) } ) ) ) )
}
if (TRUE %in% grepl(jhu.no, UKKC) & input$physical.iqr == TRUE) {
JHU_list <- UKKC[grep(jhu.no,UKKC)]
chbx <- c(chbx, c( as.character(lapply(seq_along(JHU_list), function(i) {paste0(as.character( JHU_list[[i]] ),".L" ) } )) ),
c( as.character(lapply(seq_along(JHU_list), function(i) {paste0(as.character( JHU_list[[i]] ),".H" ) } )) ) )
}
if ( TRUE %in% grepl("IHME_sts", UKKC ) & input$county_ts == "California" ) {
chbx <- chbx %>% c("IHME_sts")
}
if ( TRUE %in% grepl("IHME_sts", UKKC ) & input$IHME.iqr == TRUE & input$county_ts == "California") {
IHME <- "IHME_sts"
chbx <- c(chbx, c( as.character(lapply(seq_along(IHME), function(i) {paste0(as.character( IHME[[i]] ),".L") } )) ),
c( as.character(lapply(seq_along(IHME), function(i) {paste0(as.character( IHME[[i]] ),".H") } )) )
)
}
if ( TRUE %in% grepl(rand.no, UKKC ) & input$county_ts == "California" ) {
RAND_list <- UKKC[grep(rand.no,UKKC)]
chbx <- c(chbx, c( RAND_list ) )
}
if ( TRUE %in% grepl(rand.no, UKKC ) & input$RAND.iqr == TRUE & input$county_ts == "California") {
RAND_list <- UKKC[grep(rand.no,UKKC)]
chbx <- c(chbx, c( as.character(lapply(seq_along(RAND_list), function(i) {paste0(as.character( RAND_list[[i]] ),".L") } )) ),
c( as.character(lapply(seq_along(RAND_list), function(i) {paste0(as.character( RAND_list[[i]] ),".H") } )) )
)
}
if ( TRUE %in% grepl("weakDistancingNow|strictDistancingNow",UKKC) &
input$county_ts %in% can_counties == TRUE ) {
can <- UKKC[grep("weakDistancingNow|strictDistancingNow",UKKC)]
chbx <- chbx %>% c(can)
}
df <- df[,c(chbx)]
FUNC_JSFormatNumber <- "function(x) {return x.toString().replace(/(\\d)(?=(\\d{3})+(?!\\d))/g, '$1,')}"
d <- dygraph(df, main = switch(input$selected_crosswalk,
"1" = paste0(input$county_ts," COVID Hospitalizations"),
"2" = paste0(input$county_ts," COVID ICU Patients"),
"3" = paste0(input$county_ts," COVID Cumulative Deaths")
))
if ( TRUE %in% grepl(jhu.lh, chbx) | TRUE %in% grepl(jhu.lh.b, chbx) ) {
if ( input$physical.mmd == "M") {
chbx.M <- chbx[grep(jhu.no,chbx)]
chbx.M <- unique(str_remove(chbx.M, "\\.[LH]"))
for (scenario in chbx.M) {
d <- d %>% dySeries(c( paste0(scenario,".L"),paste0(scenario),paste0(scenario,".H")), label = names(modellist[match(scenario,modellist)]), fillGraph = FALSE)
}
} else {
chbx.M <- chbx[grep(jhu.M,chbx)]
chbx.M <- str_remove(chbx.M, ".M")
for (scenario in chbx.M) {
d <- d %>% dySeries(c( paste0(scenario,".L"),paste0(scenario,".M"),paste0(scenario,".H")), label = names(modellist[match(scenario,modellist)]), fillGraph = FALSE)
}
}
# No intervals
} else {
if ( input$physical.mmd == "M") {
chbx.M <- chbx[grep(jhu.no,chbx)]
for (scenario in chbx.M) {
d <- d %>% dySeries(paste0(scenario), label = names(modellist[match(scenario,modellist)]), fillGraph = FALSE)
}
} else {
chbx.M <- chbx[grep(jhu.M,chbx)]
chbx.M <- str_remove(chbx.M, ".M")
for (scenario in chbx.M) {
d <- d %>% dySeries(paste0(scenario,".M"), label = names(modellist[match(scenario,modellist)]), fillGraph = FALSE)
}
}
}
if ( TRUE %in% grepl("IHME_sts.[LH]", chbx) ){
if ( "IHME_sts.L" %in% c(chbx) ) {d <- d %>% dySeries(c("IHME_sts.L","IHME_sts","IHME_sts.H"), label = 'IHME Model', fillGraph = FALSE) }
} else {
if ( "IHME_sts" %in% c(chbx) ) {d <- d %>% dySeries("IHME_sts", label = 'IHME Model', fillGraph = FALSE) }
}
if ( "weakDistancingNow" %in% c(chbx) ) {d <- d %>% dySeries("weakDistancingNow", label = 'CAN: Delay/Distancing', fillGraph = FALSE) }
if ( "strictDistancingNow" %in% c(chbx) ) {d <- d %>% dySeries("strictDistancingNow", label = 'CAN: Shelter in Place', fillGraph = FALSE) }
if ( TRUE %in% grepl("\\w\\.\\d\\.\\d.[LH]", chbx) ){
chbx.R <- chbx[grep(rand.no,chbx)]
chbx.R <- unique(str_remove(chbx.R, "\\.[LH]"))
for (scenario in chbx.R) {
d <- d %>% dySeries(c( paste0(scenario,".L"),paste0(scenario),paste0(scenario,".H")), label = names(modellist[match(scenario,modellist)]), fillGraph = FALSE)
}
} else {
chbx.R <- chbx[grep(rand.no,chbx)]
for (scenario in chbx.R) {
d <- d %>% dySeries(paste0(scenario), label = names(modellist[match(scenario,modellist)]), fillGraph = FALSE)
}
}
if ( "Total.Count.Deaths" %in% c(chbx) ) {d <- d %>% dySeries("Total.Count.Deaths", label = "Total Deaths", fillGraph= FALSE, drawPoints = TRUE, pointSize = 5, pointShape = "square", color = "black") }
if ( "COVID.19.Positive.Patients" %in% c(chbx) ) {d <- d %>% dySeries("COVID.19.Positive.Patients", label = "Patients Positive for COVID-19", fillGraph= FALSE, drawPoints = TRUE, pointSize = 5, pointShape = "diamond", color = "black") }
if ( "ICU.COVID.19.Positive.Patients" %in% c(chbx) ) {d <- d %>% dySeries("ICU.COVID.19.Positive.Patients", label = "ICU Patients Positive for COVID-19", fillGraph= FALSE, drawPoints = TRUE, pointSize = 5, pointShape = "hexagon", color = "black") }
#### Add county beds
if ( input$selected_crosswalk == "1" & input$county_ts == "California") {
d <- d %>% dyLimit(50000, label = "Phase 1 Surge Capacity", labelLoc = c("left"), color = "black", strokePattern = "dashed")
} else {
if ( input$selected_crosswalk == "1" & !is.na(total.cnty.beds()) == TRUE ) { d <- d %>% dyLimit(total.cnty.beds(), label = "Total Licensed Beds", labelLoc = c("left"), color = "black", strokePattern = "dashed") }
}
d <- d %>% dyOptions(digitsAfterDecimal=0, strokeWidth = 3, connectSeparatedPoints = TRUE, drawGrid = FALSE) %>%
dyAxis("y", axisLabelFormatter=JS(FUNC_JSFormatNumber), valueFormatter=JS(FUNC_JSFormatNumber)) %>%
dyHighlight(highlightSeriesOpts = list(strokeWidth = 4)) %>%
dyEvent(Sys.Date(), "Today", labelLoc = "top") %>%
dyLegend(show = "always",
labelsDiv = "legendDivID2",
hideOnMouseOut = TRUE) %>%
dyRangeSelector(height = 30, dateWindow = c((Sys.Date() - 30), as.Date("2020-09-30")) )
})
#### Static Daily Estimates ####
output$physical.graph.static <- renderCachedPlot(
cacheKeyExpr = list(
input$physical.graph_date_window,
input$actuals,
input$select_COVID,
input$include_JHU_UKKC,
input$physical.mmd,
input$physical.iqr,
input$county_ts,
input$IHME.iqr,
input$RAND.iqr,
input$selected_crosswalk,
input$drop_hline
),
{
df <- state.model.xts()[ paste0( as.Date(input$physical.graph_date_window[[1]]),"/",as.Date(input$physical.graph_date_window[[2]]) ) ]
#dtrange <- paste(as.character(input$dateRange_ts), collapse = "/")
chbx <- c()
#### Uncontrolled + Actuals
#if ( input$overlay_uncontrolled == TRUE ) { chbx <- chbx %>% c("No_Intervention") }
if ( input$actuals == TRUE) {chbx <- c(chbx,c(input$select_COVID)) }
UKKC <- as.character(input$include_JHU_UKKC)
if ( TRUE %in% grepl(jhu.no, UKKC) & input$physical.mmd == "M" ) {
JHU_list <- UKKC[grep(jhu.no,UKKC)]
chbx <- c(chbx, c(JHU_list) )
} else {
JHU_list <- UKKC[grep(jhu.no,UKKC)]
chbx <- c(chbx, c( as.character(lapply(seq_along(JHU_list), function(i) { paste0(as.character( JHU_list[[i]] ),".M" ) } ) ) ) )
}
if (TRUE %in% grepl(jhu.no, UKKC) & input$physical.iqr == TRUE) {
JHU_list <- UKKC[grep(jhu.no,UKKC)]
chbx <- c(chbx, c( as.character(lapply(seq_along(JHU_list), function(i) {paste0(as.character( JHU_list[[i]] ),".L" ) } )) ),
c( as.character(lapply(seq_along(JHU_list), function(i) {paste0(as.character( JHU_list[[i]] ),".H" ) } )) ) )
}
if ( TRUE %in% grepl("IHME_sts", UKKC ) & input$county_ts == "California" ) {
chbx <- chbx %>% c("IHME_sts")
}
if ( TRUE %in% grepl("IHME_sts", UKKC ) & input$IHME.iqr == TRUE & input$county_ts == "California") {
IHME <- "IHME_sts"
chbx <- c(chbx, c( as.character(lapply(seq_along(IHME), function(i) {paste0(as.character( IHME[[i]] ),".L") } )) ),
c( as.character(lapply(seq_along(IHME), function(i) {paste0(as.character( IHME[[i]] ),".H") } )) )
)
}
if ( TRUE %in% grepl("weakDistancingNow|strictDistancingNow",UKKC) &
input$county_ts %in% can_counties == TRUE ) {
can <- UKKC[grep("weakDistancingNow|strictDistancingNow",UKKC)]
chbx <- chbx %>% c(can)
}
if ( TRUE %in% grepl(rand.no, UKKC ) & input$county_ts == "California" ) {
RAND_list <- UKKC[grep(rand.no,UKKC)]
chbx <- c(chbx, c( RAND_list ) )
}
if ( TRUE %in% grepl(rand.no, UKKC ) & input$RAND.iqr == TRUE & input$county_ts == "California") {
RAND_list <- UKKC[grep(rand.no,UKKC)]
chbx <- c(chbx, c( as.character(lapply(seq_along(RAND_list), function(i) {paste0(as.character( RAND_list[[i]] ),".L") } )) ),
c( as.character(lapply(seq_along(RAND_list), function(i) {paste0(as.character( RAND_list[[i]] ),".H") } )) )
)
}
df <- df[,c(chbx)]
# nl <- as.numeric(match("No_Intervention",names(df)))
# maxy <- suppressWarnings( max(df[,-as.numeric(nl)], na.rm=TRUE) +
# ( max(df[,-as.numeric(nl)], na.rm=TRUE) * 0.05)
# )
colors <- c("No Intervention"= "black",
"IHME Model" = "#023858",
"CAN: Shelter in Place" = "#c994c7",
"CAN: Delay/Distancing" = "#dd1c77",
'JHU: NPIs 30-40% Effective' = "#d7301f",
'JHU: NPIs 40-50% Effective' = "#238b45",
'JHU: NPIs 50-60% Effective' = "#4d004b",
'JHU: NPIs 60-70% Effective' = "#67001f",
"JHU: Continuing Lockdown" = "#d7301f",
'JHU: Slow-paced Reopening' = "#238b45",
'JHU: Moderate-paced Reopening' = "#4d004b",
'JHU: Fast-paced Reopening' = "#67001f",
'RAND: Lift Shelter-in-Place Now' = "#023858",
'RAND: Reopen Non-essential Businesses in 2 weeks' = "#3690c0",
'RAND: Reopen Bars/Restaurants/Large events in 1 month' = "#a6bddb",
#"Total Confirmed Cases" = "red",
"Total Deaths" = "black",
"Patients Positive for COVID-19" = "black",
"ICU Patients Positive for COVID-19" = "black"
#"Positive + Suspected Patients" = "green",
#"Positive + Suspected ICU" = "blue"
)
#test_colors <- c("Continued_Lockdown" = "#d7301f")
p <- ggplot()
if (input$selected_crosswalk == "1" & input$drop_hline == TRUE & input$county_ts == "California") {
p <- p + geom_line(df, mapping = aes(x= Index, y = 50000), color = "black", linetype = "dashed") +
geom_text(aes(x = as.Date(input$physical.graph_date_window[[1]]), y= 50000,
label = "Phase 1 Surge Capacity"),
hjust = -0.1,
vjust = -0.3)
} else {
if ( input$selected_crosswalk == "1" & !is.na(total.cnty.beds()) == TRUE ) {
p <- p + geom_line(df, mapping = aes(x= Index, y = total.cnty.beds()), color = "black", linetype = "dashed") +
geom_text(aes(x = as.Date(input$physical.graph_date_window[[1]]), y= total.cnty.beds(),
label = "Total Licensed Beds"),
hjust = -0.1,
vjust = -0.3)
}
}
#if ( "No_Intervention" %in% c(chbx) ) { p <- p + geom_line(df, mapping = aes(x = Index, y = No_Intervention), color = "black", size = 1.5, linetype = "dashed") }
### JHU Scenarios
if ( TRUE %in% grepl(jhu.no, chbx)) {
chbx.M <- chbx[grep(jhu.no,chbx)]
chbx.M <- unique(str_remove(chbx.M, "\\.[MLH]"))
for (scenario in chbx.M) {
c <- as.character(colors[match(names(modellist[match(scenario,modellist)]),names(colors))])
if ( scenario %in% c(chbx) ) { p <- p + geom_line(df, mapping = aes_string(x="Index", y=scenario, color = shQuote(names(modellist[match(scenario,modellist)])) ), size = 1.5, linetype = "solid") }
if ( paste0(scenario,".M") %in% c(chbx) ) { p <- p + geom_line(df, mapping = aes_string(x="Index", y=paste0(scenario,".M"), color = shQuote(names(modellist[match(scenario,modellist)])) ), size = 1.5, linetype = "solid") }
if ( paste0(scenario,".L") %in% c(chbx) ) { p <- p + geom_ribbon(df, mapping = aes_string(x ="Index", ymin = paste0(scenario,".L"), ymax = paste0(scenario,".H") ), fill=c, color = c, alpha = 0.2) }
}
}
### Other Models/Scenarios
if ( "IHME_sts" %in% c(chbx) ) { p <- p + geom_line(df, mapping = aes(x=Index, y=IHME_sts, color = "IHME Model"), size = 1.5, linetype = "solid") }
if ( "IHME_sts.L" %in% c(chbx) ) { p <- p + geom_ribbon(df, mapping = aes(x = Index, ymin = IHME_sts.L, ymax = IHME_sts.H), fill="#a6bddb", color = "#a6bddb", alpha = 0.2) }
if ( "strictDistancingNow" %in% c(chbx) ) { p <- p + geom_line(df, mapping = aes(x=Index, y=strictDistancingNow, color = "CAN: Shelter in Place"), size = 1.5 ) }
if ( "weakDistancingNow" %in% c(chbx) ) { p <- p + geom_line(df, mapping = aes(x=Index, y=weakDistancingNow, color = "CAN: Delay/Distancing" ), size = 1.5 ) }
### RAND Models
if ( TRUE %in% grepl(rand.no, chbx)) {
chbx.M <- chbx[grep(rand.no,chbx)]
chbx.M <- unique(str_remove(chbx.M, "\\.[LH]"))
for (scenario in chbx.M) {
c <- as.character(colors[match(names(modellist[match(scenario,modellist)]),names(colors))])
if ( scenario %in% c(chbx) ) { p <- p + geom_line(df, mapping = aes_string(x="Index", y=scenario, color = shQuote(names(modellist[match(scenario,modellist)])) ), size = 1.5, linetype = "solid") }
if ( paste0(scenario,".L") %in% c(chbx) ) { p <- p + geom_ribbon(df, mapping = aes_string(x ="Index", ymin = paste0(scenario,".L"), ymax = paste0(scenario,".H") ), fill=c, color = c, alpha = 0.2) }
}
}
### Actuals
if ( "Total.Count.Deaths" %in% c(chbx) ) {p <- p + geom_point(df, mapping = aes(x = Index, y = Total.Count.Deaths, color = "Total Deaths"), shape = 15, fill = "black", size = 3 ) }
if ( "COVID.19.Positive.Patients" %in% c(chbx) ) {p <- p + geom_point(df, mapping = aes(x = Index, y = COVID.19.Positive.Patients, color = "Patients Positive for COVID-19"), shape = 23, fill = "black", size = 3 ) }
if ( "ICU.COVID.19.Positive.Patients" %in% c(chbx) ) {p <- p + geom_point(df, mapping = aes(x = Index, y = ICU.COVID.19.Positive.Patients, color = "ICU Patients Positive for COVID-19"), shape = 19, fill = "black", size = 3 ) }
# if ( input$overlay_uncontrolled == TRUE ) {
# p <- p + scale_y_continuous(labels = scales::comma, limits = c(0, as.numeric(maxy)) )
# } else {
p <- p + scale_y_continuous(labels = scales::comma)
#}
p <- p + labs(x = "Date",
y = switch(input$selected_crosswalk,
"1" = "Hospital Bed Occupancy",
"2" = "ICU Bed Occupancy",
"3" = "Cumulative Deaths"),
color = "Legend") + scale_color_manual(values = colors) +
ggtitle(switch(input$selected_crosswalk,
"1" = paste0(input$county_ts," COVID Hospitalizations"),
"2" = paste0(input$county_ts," COVID ICU Patients"),
"3" = paste0(input$county_ts," COVID Cumulative Deaths")
)) +
theme(plot.title = element_text(size = 18, face = "bold"),
axis.title = element_text(face = "bold", size = 18, colour = "black"),
axis.text.x = element_text(face = "bold", color = "black", size = 18),
axis.text.y = element_text(face = "bold", color = "black", size = 18),
axis.line = element_line(color = "black", size = 1, linetype = "solid"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
legend.text=element_text(size=14),
legend.position = "bottom"
)
return(p)
}
)
## download Static figure data
static.plot.data <- reactive({
df <- state.model.xts()[ paste0( as.Date(input$physical.graph_date_window[[1]]),"/",as.Date(input$physical.graph_date_window[[2]]) ) ]
#dtrange <- paste(as.character(input$dateRange_ts), collapse = "/")
chbx <- c()
#### Uncontrolled + Actuals
if ( input$actuals == TRUE) {chbx <- c(chbx,c(input$select_COVID)) }
UKKC <- as.character(input$include_JHU_UKKC)
if ( TRUE %in% grepl("UK.\\w+.\\d+_\\d+|.\\w+_\\w{4,}", UKKC) & input$physical.mmd == "M" ) {
JHU_list <- UKKC[grep("UK.\\w+.\\d+_\\d+|.\\w+_\\w{4,}",UKKC)]
chbx <- c(chbx, c(JHU_list) )
} else {
JHU_list <- UKKC[grep("UK.\\w+.\\d+_\\d+|.\\w+_\\w{4,}",UKKC)]
chbx <- c(chbx, c( as.character(lapply(seq_along(JHU_list), function(i) { paste0(as.character( JHU_list[[i]] ),".M" ) } ) ) ) )
}
if (TRUE %in% grepl("UK.\\w+.\\d+_\\d+|.\\w+_\\w{4,}", UKKC) & input$physical.iqr == TRUE) {
JHU_list <- UKKC[grep("UK.\\w+.\\d+_\\d+|.\\w+_\\w{4,}",UKKC)]
chbx <- c(chbx, c( as.character(lapply(seq_along(JHU_list), function(i) {paste0(as.character( JHU_list[[i]] ),".L" ) } )) ),
c( as.character(lapply(seq_along(JHU_list), function(i) {paste0(as.character( JHU_list[[i]] ),".H" ) } )) ) )
}
if ( TRUE %in% grepl("IHME_sts", UKKC ) & input$county_ts == "California" ) {
chbx <- chbx %>% c("IHME_sts")
}
if ( TRUE %in% grepl("IHME_sts", UKKC ) & input$IHME.iqr == TRUE & input$county_ts == "California") {
IHME <- "IHME_sts"
chbx <- c(chbx, c( as.character(lapply(seq_along(IHME), function(i) {paste0(as.character( IHME[[i]] ),".L") } )) ),
c( as.character(lapply(seq_along(IHME), function(i) {paste0(as.character( IHME[[i]] ),".H") } )) )
)
}
if ( TRUE %in% grepl("weakDistancingNow|strictDistancingNow",UKKC) & input$selected_crosswalk != "2") {
can <- UKKC[grep("weakDistancingNow|strictDistancingNow",UKKC)]
chbx <- chbx %>% c(can)
}
df <- df[,c(chbx)] %>% data.frame() %>% mutate(Date = seq(as.Date(input$physical.graph_date_window[[1]]),as.Date(input$physical.graph_date_window[[2]]), by = "day"))
df
})
output$dlScenario <- downloadHandler(
filename = function () {
paste0("COVID_Scenarios_",input$county_ts,".csv")
},
content = function(file) {
# Title
t <- c(paste("Long-term COVID Scenarios for ",input$county_ts, sep = ""),rep("",ncol(static.plot.data())-1))
#Subtitle
tt <- c(paste("COVID Assessment Tool - Downloaded on",Sys.Date(), sep = " "),rep("",ncol(static.plot.data())-1))
#Column labels
l <- names(static.plot.data())
df <- static.plot.data()
df[is.na(df)] <- 0
df[] <- lapply(df, as.character)
#Source
s <- c("Please see the Technical Notes tab of the application for data sources.",rep("",ncol(static.plot.data())-1))
p <- c("Prepared by: California Department of Public Health - COVID Modeling Team",rep("",ncol(static.plot.data())-1))
dlm <- rbind(t, tt, l, df, s, p)
write.table(dlm, file, row.names = F, col.names = F, quote = F, na= "NA", sep = ",")
#write.csv(df, file, row.names = F)
}
)
} # End Server
|
aed6b16ecb84b980b33d6dd1387010b0c90d49d3 | feee549a212d7d2e24d5686249b1cc0cba26832f | /gcd/run_analysis.R | 88b1c38bba0eaea268f4c53c531567c74afa2011 | [] | no_license | fsmontenegro/datasciencecoursera | 057feff2b9032d9e25db9b9058e8d6a785e76338 | c862b360269df04e1dfcff84ed44f65fa7c8fcba | refs/heads/master | 2016-08-07T14:18:46.228591 | 2015-07-26T20:42:53 | 2015-07-26T20:42:53 | 29,143,338 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,362 | r | run_analysis.R | ## run_analysis.R
## JHU's Data Specialization - Getting and Cleaning Data - Project Assignment
## Mar 22, 2015
## Fernando Montenegro (fsmontenegro@gmail.com)
##
## This script loads the UCI HAR Dataset and generates a new tidy dataset as
## per the project instructions.
# Load dplyr package
library(dplyr)
curdir <- getwd()
# Set directory. It assumes the original .zip file was expanded in local directory
setwd("UCI HAR Dataset/")
# load activity labels
if(!exists("activity_labels")) {
activity_labels <-read.table(file="activity_labels.txt")
}
# load features which will become column names
if (!exists("features")) {
features <- read.table(file="features.txt")
}
# load subject data (test and train)
if (!exists("test_subjects")) {
test_subjects<-read.table(file = "test/subject_test.txt")
}
if (!exists("train_subjects")) {
train_subjects<-read.table(file = "train/subject_train.txt")
}
# load measurements (test and train)
if (!exists("test_x")) {
test_x<-read.table(file = "test/X_test.txt")
}
if (!exists("test_y")) {
test_y<-read.table(file = "test/y_test.txt")
}
if (!exists("train_x")) {
train_x<-read.table(file = "train/X_train.txt")
}
if (!exists("train_y")) {
train_y<-read.table(file = "train/y_train.txt")
}
# Create temporary test dataframe and set column names
test_df<-data.frame(test_subjects,test_y,test_x,stringsAsFactors = TRUE)
colnames(test_df)<-c("Subject","Activity",as.vector(features[,2]))
# Create temporary train dataframe and set column names
train_df<-data.frame(train_subjects,train_y,train_x,stringsAsFactors = TRUE)
colnames(train_df)<-c("Subject","Activity",as.vector(features[,2]))
# Merge test and train dataframes
df<-rbind(test_df,train_df)
# Adjust activity names on data frame as per labels
df$Activity<-activity_labels[df$Activity,2]
# Select columns of interest as per project instructions
cols<-as.vector(features[grep("mean|std",features[,2]),2])
# Reduce main data frame to columns of interest
df<-df[,c("Subject","Activity",cols)]
# Generate new summary data frame with means for variables per subject per activity
newdf<-summarise_each(group_by(df,Subject,Activity),funs="mean")
# back to original directory
setwd(curdir)
# Write new dataset to disk
write.table(newdf,file = "tidydataset.txt", row.names = FALSE)
# return the new dataset
print(as.data.frame(newdf))
|
b22baa5f709478ab166f5c6294a4358024f51a91 | 27afc60193cec4647201cdc2967351c7a6738fff | /EDA-DR-MDS.R | 22308cc56e0d6e6d4d0c0de10739f4de7e1cc180 | [] | no_license | hyginn/BCB410-DataScience | ad7a04906b901e1a0984b4283fb54dcfdc94597b | eee2dcaa11d789e8e3b0b0791cfb163054074af4 | refs/heads/master | 2021-08-23T23:57:59.736964 | 2017-12-07T05:15:20 | 2017-12-07T05:15:20 | 105,763,164 | 1 | 2 | null | 2017-12-07T05:15:21 | 2017-10-04T12:00:59 | R | UTF-8 | R | false | false | 10,425 | r | EDA-DR-MDS.R | #EDA-DR-MDS.R
#
# Purpose: A Bioinformatics Course:
# R code accompanying the EDA-DR-MDS unit.
#
# Version: 1
#
# Date: 2017 12 06
# Author: Ricardo Ramnarine (ricardo.ramnarine@mail.utoronto.ca)
#
# Versions:
# 0.1 (Initial - skeleton template)
# 0.2 (Added section for dependencies of unit)
# 0.3 (Added section 2: Multidimensional Scaling)
# 0.4 (Added section 9: Multidimensional Scaling with Biological relevance)
# 0.5 (Added section 2.2: Nonmetric Multidimensional Scaling)
# 0.6 (Added section 9: Multidimensional Scaling with Biological relevance)
# 1 (Updated sections according to feedback received)
#
#
#
#
# == DO NOT SIMPLY source() THIS FILE! =======================================
#
# If there are portions you don't understand, use R's help system, Google for an
# answer, or ask your instructor. Don't continue if you don't understand what's
# going on. That's not how it works ...
#
# ==============================================================================
# = 1 Dependencies
if (!require(ggplot2, quietly=TRUE)) {
install.packages("ggplot2")
library(ggplot2)
}
if (!require(devtools, quietly=TRUE)) {
install.packages("devtools")
library(devtools)
}
if (!require(MASS, quietly=TRUE)) {
devtools::install_github("MASS")
library(MASS)
}
if(!require(scatterplot3d, quietly=TRUE)) {
install.packages("scatterplot3d")
}
if (!require(Biobase, quietly=TRUE)) {
biocLite("Biobase")
library(Biobase)
}
if (!require(parallel, quietly=TRUE)) {
biocLite("parallel")
library(parallel)
}
if (!require(ggplot2, quietly=TRUE)) {
biocLite("ggplot2")
library(ggplot2l)
}
#Load Libraries
library("scatterplot3d")
library(MASS)
source("https://bioconductor.org/biocLite.R")
#Load Data
load("./data/GSE3635.RData")
# = 1.1 Multidimensional Scaling (MDS) Dimension Reduction techniques
# Read this section on the wiki page and then come back and work through the example.
#####################################################
# = 2 Multidimensional Scaling
# = 2.1 Classical/Metric Multidimensional Scaling
#Let's create a matrix that satisfies the euclidean distance matrix so we can apply classical MDS.
set.seed(50)
euclidean_distance_matrix <- matrix(1, nrow = 4, ncol = 4)
euclidean_distance_matrix[row(euclidean_distance_matrix) == col(euclidean_distance_matrix)] <- 0
euclidean_distance_matrix
#Let's calculate the distance matrix
euclidean_distance <- dist(euclidean_distance_matrix)
euclidean_distance
#The Following example is adapted from Lecture 8: Multidimensional scaling by Sungkyu Jung, a Professor at the University of Pittsburgh
#which is cited here http://steipe.biochemistry.utoronto.ca/abc/students/index.php/EDA-DR-MDS#Sources
#Great, now let's apply metric multidimensional scaling to this
#R has a built in function called cmdscale that applies Classical MDS.
euclidean <- cmdscale(euclidean_distance,eig=TRUE, k=3)
euclidean
x <- euclidean$points[,1]
y <- euclidean$points[,2]
z <- euclidean$points[,3]
#scatterplot3d is from library scatterplot3D
euclidean_plot <- scatterplot3d(x=x, y=y, z=z, color="blue", main = "Euclidean Distance Matrix MDS")
p1 <- euclidean_plot$xyz.convert(x[1],y[1],z[1])
p2 <- euclidean_plot$xyz.convert(x[2],y[2],z[2])
p3 <- euclidean_plot$xyz.convert(x[3],y[3],z[3])
p4 <- euclidean_plot$xyz.convert(x[4],y[4],z[4])
#Can you guess what shape it's making?
segments(p1$x,p1$y,p2$x,p2$y,lwd=2,col=2)
segments(p1$x,p1$y,p3$x,p3$y,lwd=2,col=2)
segments(p1$x,p1$y,p4$x,p4$y,lwd=2,col=2)
segments(p2$x,p2$y,p3$x,p3$y,lwd=2,col=2)
segments(p2$x,p2$y,p4$x,p4$y,lwd=2,col=2)
segments(p3$x,p3$y,p4$x,p4$y,lwd=2,col=2)
#We made a tetrahedron!
# 2.2 = Nonmetric Multidimensional Scaling
#Let's use the same data Boris used in his DR-PCA example. The data "crabs" contains morphological
#measurements of the species "Leptograpsus variegatus" from Fremantle, W. Australia.
#The columns contain [species, sex, index, FL, RW, CL, CW, and BD] in that order which represent:
#species := species type. "B" for Blue, "O" for Orange
#sex := gender. "M" for Male, "F" for Female
#FL := frontal lobe size
#RW := rear width
#CL := carapace length
#CW := carapace width
#BD := body depth
#for more information type ?crabs in the console
#index := 1 <= index <= 50 represents the index of the particular crab within each of the
# four groups (B + F,B + M,O + F,O + M)
set.seed(100)
#crabs is from library MASS
data(crabs)
measurements <- crabs
#have a look at our data called measurements
measurements
#Let's create a distance matrix using the built in function dist()
#Notice i'm removing the species and sex columns
dist_mat<- dist(measurements[c(-1,-2)])
#sanity check that d is of type "dist", a distance structure
class(dist_mat)
#Look at the data in d.
dist_mat
#It is clearly not a euclidean distance matrix (it isn't a symmetrical matrix
#with diagionals = 0), so we will apply nonmetric MDS.
#isoMDS is a function that comes with package MASS.
#It performs Kruskal's non-metric MDS on a euclidean distance matrix.
#parameter k is the desired dimension for the solution.
result <- isoMDS(dist_mat, k=2)
result
#isoMDS returns a stress percentage. Therefore, anything below 5 is good or better. i.e less than 5%
#Notice the value stress of ~0.78. Don't remember how to interpret the result?
#Refer to the wiki section "How MDS Works"
#Let's create useful labels, i.e factors that characterize which columns each crab belongs
#to (ex. B&F, B&M, O&F, O&M)
result_frame <- as.data.frame(result)
#remove third column
result_frame <- result_frame[-3]
descriptions <- paste(measurements[,1], measurements[,2], sep="&")
#let's create some factors so we can categorize variables in the graph
Groups <- factor(descriptions)
#sanity check
Groups
#plot it using ggplot
ggplot(result_frame, aes(x=points.1, y=points.2, group=Groups)) +
geom_point(aes(shape=Groups, color=Groups))
#Notice how crabs from the same group tend to cluster closely together.
#This suggests they share more characteristics between each other than other crabs
#9 = Multidimensional Scaling with Biological relevance
#Let's now try to apply MDS techniques to our Yest cycle data.
set.seed(30)
#Store our data called measurements and check it's dimensions and class
#Step 1: split data into times based on Yeast cycle time point.
#isolating time 0
GSE <- exprs(GSE3635)
V2 <- substr(featureNames(GSE3635), 0, 3) == 'YAR'
V2
YAR <- GSE[V2,]
YAR
#sanity check
dim(YAR)
class(YAR)
#calculate distance matrix
YAR_DIST <- dist(YAR)
#sanity check
YAR_DIST
#It is clearly not a euclidean distance matrix (it isn't a symmetrical matrix
#with diagionals = 0), so we will apply Kruskal's non-metric MDS on a euclidean distance matrix.
#parameter k is the desired dimension for the solution.
result_2D <- isoMDS(YAR_DIST, k=2)
result_2D
#our stress value is ~11.9% which is too high. Thus, to get more accurate results,
#we must increase the number of dimensions.
#Of course, visualizing and understanding higher than a 3 dimensional plot isn't realistic.
#since visualizing 7 dimensions isn't very helpful, let's continue with the 3D results, out of curiousty
YAR_2D <- as.data.frame(result_2D)
YAR_2D
#create factor with only features that start with YAR from GSE3635
fact <- featureNames(GSE3635)[substr(featureNames(GSE3635), 0, 3) == 'YAR']
#sanity check
fact
#plot it
ggplot(YAR_2D, aes(x=points.1, y=points.2, group=fact)) +
geom_point()
#From the plot above, we can't really say much because it's cargo cult. The data isn't reliable with a high stress value
#Let's also plot it in 3D
result_3D <- isoMDS(YAR_DIST, k=3)
result_3D
x_3 <- result_3D$points[,1]
y_3 <- result_3D$points[,2]
z_3 <- result_3D$points[,3]
mystery <- scatterplot3d(x=x_3, y=y_3, z=z_3, color="blue", main = "YAR MDS")
descriptions <- paste(measurements[,1], measurements[,2], sep="&")
#again, can't say much about the data based on these results since a high stress value adds more distortion.
# = 10 Tasks to do
# = 10.1 First task: Mystery Matrix
#Given the Matrix mystery_matrix below, run an appropriate MDS algorithm.
mystery_matrix <- matrix(1, nrow = 4, ncol = 4)
mystery_matrix[row(mystery_matrix) == col(mystery_matrix)] <- 0
mystery_matrix[row(mystery_matrix) == col(mystery_matrix)+1] <- 3
mystery_matrix[row(mystery_matrix)+1 == col(mystery_matrix)] <- 3
mystery_matrix[row(mystery_matrix)+2 == col(mystery_matrix)] <- 5
mystery_matrix[row(mystery_matrix) == col(mystery_matrix)+2] <- 5
mystery_matrix
# 10.2 Task 2: The Plot Thickens...
#Plot the results of your mystery matrix
# 10.3 Task 3: Interpret
# Interpret the results of your mystery matrix
# = 10.4 Task 4: Stressing out? Read more to find out one great way to reduce stress.
#For the biological data, figure out what dimension would give us a "good" stress value.
# = 99 Task solutions
# = 99.1 First task: Mystery Matrix
# Hopefully you noticed this matrix was a Euclidean Distance matrix, so we will use cmdscale.
mystery_matrix <- dist(mystery_matrix)
mystery_matrix
mystery <- cmdscale(mystery_matrix,eig=TRUE, k=3)
mystery
# = 99.2 Task 2: The Plot Thickens...
#In fact, this matrix is suspiciously similar the example I used...
x_3 <- mystery$points[,1]
y_3 <- mystery$points[,2]
z_3 <- mystery$points[,3]
mystery <- scatterplot3d(x=x_3, y=y_3, z=z_3, color="blue", main = "Mystery Matrix MDS")
p1 <- mystery$xyz.convert(x_3[1],y_3[1],z_3[1])
p2 <- mystery$xyz.convert(x_3[2],y_3[2],z_3[2])
p3 <- mystery$xyz.convert(x_3[3],y_3[3],z_3[3])
p4 <- mystery$xyz.convert(x_3[4],y_3[4],z_3[4])
segments(p1$x,p1$y,p2$x,p2$y,lwd=2,col=2)
segments(p1$x,p1$y,p3$x,p3$y,lwd=2,col=2)
segments(p1$x,p1$y,p4$x,p4$y,lwd=2,col=2)
segments(p2$x,p2$y,p3$x,p3$y,lwd=2,col=2)
segments(p2$x,p2$y,p4$x,p4$y,lwd=2,col=2)
segments(p3$x,p3$y,p4$x,p4$y,lwd=2,col=2)
#10.3 Task 3: Interpret
#We made a tetrahedron again! Notice the tetrahedron has shifted its position from my original example,
#but the shape is the same! The orientation of the points don't matter because the axes have
#no significance.
# = 99.4 Task 4: Stressing out? Read more to find out one great way to reduce stress.
#A good stress value is under 5%. Let's check k=6.
temp <- isoMDS(YAR_DIST, k=6)
temp
#no good, but it turns out that 7 works.
good_result <- isoMDS(YAR_DIST, k=7)
good_result
# [END]
|
9a829d7836c42d7f204a9f76b192b0eacbaa97c1 | 38e22dcf20dd6e9b2cd745c1871c318b238c27ca | /R/nonexportedSpdepFuns.R | 4610b2977e4973203e1dd6785f1f236b9e225531 | [] | no_license | cran/splm | 5f6d21bf534ea13dd96e371a41f8f2e698cb2ba0 | 62b73c011ed69da35c92d4cf713feeb8871bb9d1 | refs/heads/master | 2023-08-02T12:21:20.641346 | 2023-07-20T16:00:02 | 2023-07-20T17:31:05 | 17,700,055 | 10 | 9 | null | 2017-12-05T04:27:11 | 2014-03-13T06:31:08 | R | UTF-8 | R | false | false | 7,321 | r | nonexportedSpdepFuns.R | ## from spdep_0.5-74, copies of non-exported functions
can.be.simmed <- function (listw)
{
res <- is.symmetric.nb(listw$neighbours, FALSE)
if (res) {
if (attr(listw$weights, "mode") == "general")
res <- attr(listw$weights, "glistsym")
}
else return(res)
res
}
jacobianSetup <- function (method, env, con, pre_eig = NULL, trs = NULL, interval = NULL,
which = 1)
{
switch(method, eigen = {
if (get("verbose", envir = env)) cat("neighbourhood matrix eigenvalues\n")
if (is.null(pre_eig)) {
eigen_setup(env, which = which)
} else {
eigen_pre_setup(env, pre_eig = pre_eig, which = which)
}
er <- get("eig.range", envir = env)
if (is.null(interval)) interval <- c(er[1] + .Machine$double.eps,
er[2] - .Machine$double.eps)
}, Matrix = {
if (get("listw", envir = env)$style %in% c("W", "S") &&
!get("can.sim", envir = env)) stop("Matrix method requires symmetric weights")
if (get("listw", envir = env)$style %in% c("B", "C",
"U") && !(is.symmetric.glist(get("listw", envir = env)$neighbours,
get("listw", envir = env)$weights))) stop("Matrix method requires symmetric weights")
if (get("verbose", envir = env)) cat("sparse matrix Cholesky decomposition\n")
Imult <- con$Imult
if (is.null(interval)) {
if (get("listw", envir = env)$style == "B") {
Imult <- ceiling((2/3) * max(sapply(get("listw",
envir = env)$weights, sum)))
interval <- c(-0.5, +0.25)
} else interval <- c(-1, 0.999)
}
if (is.null(con$super)) con$super <- as.logical(NA)
Matrix_setup(env, Imult, con$super, which = which)
}, Matrix_J = {
if (get("listw", envir = env)$style %in% c("W", "S") &&
!get("can.sim", envir = env)) stop("Matrix method requires symmetric weights")
if (get("listw", envir = env)$style %in% c("B", "C",
"U") && !(is.symmetric.glist(get("listw", envir = env)$neighbours,
get("listw", envir = env)$weights))) stop("Matrix method requires symmetric weights")
if (get("verbose", envir = env)) cat("sparse matrix Cholesky decomposition\n")
if (is.null(interval)) {
if (get("listw", envir = env)$style == "B") {
interval <- c(-0.5, +0.25)
} else interval <- c(-1, 0.999)
}
if (is.null(con$super)) con$super <- FALSE
Matrix_J_setup(env, super = con$super, which = which)
}, spam = {
##if (!require(spam)) stop("spam not available") # spam is imported
if (get("listw", envir = env)$style %in% c("W", "S") &&
!get("can.sim", envir = env)) stop("spam method requires symmetric weights")
if (get("listw", envir = env)$style %in% c("B", "C",
"U") && !(is.symmetric.glist(get("listw", envir = env)$neighbours,
get("listw", envir = env)$weights))) stop("spam method requires symmetric weights")
if (get("verbose", envir = env)) cat("sparse matrix Cholesky decomposition\n")
spam_setup(env, pivot = con$spamPivot, which = which)
if (is.null(interval)) interval <- c(-1, 0.999)
}, spam_update = {
##if (!require(spam)) stop("spam not available") # idem
if (get("listw", envir = env)$style %in% c("W", "S") &&
!get("can.sim", envir = env)) stop("spam method requires symmetric weights")
if (get("listw", envir = env)$style %in% c("B", "C",
"U") && !(is.symmetric.glist(get("listw", envir = env)$neighbours,
get("listw", envir = env)$weights))) stop("spam method requires symmetric weights")
if (get("verbose", envir = env)) cat("sparse matrix Cholesky decomposition\n")
spam_update_setup(env, in_coef = con$in_coef, pivot = con$spamPivot,
which = which)
if (is.null(interval)) interval <- c(-1, 0.999)
}, Chebyshev = {
if (get("listw", envir = env)$style %in% c("W", "S") &&
!get("can.sim", envir = env)) stop("Chebyshev method requires symmetric weights")
if (get("listw", envir = env)$style %in% c("B", "C",
"U") && !(is.symmetric.glist(get("listw", envir = env)$neighbours,
get("listw", envir = env)$weights))) stop("Chebyshev method requires symmetric weights")
if (get("verbose", envir = env)) cat("sparse matrix Chebyshev approximation\n")
cheb_setup(env, q = con$cheb_q, which = which)
if (is.null(interval)) interval <- c(-1, 0.999)
}, MC = {
if (!get("listw", envir = env)$style %in% c("W")) stop("MC method requires row-standardised weights")
if (get("verbose", envir = env)) cat("sparse matrix Monte Carlo approximation\n")
mcdet_setup(env, p = con$MC_p, m = con$MC_m, which = which)
if (is.null(interval)) interval <- c(-1, 0.999)
}, LU = {
if (get("verbose", envir = env)) cat("sparse matrix LU decomposition\n")
LU_setup(env, which = which)
if (is.null(interval)) interval <- c(-1, 0.999)
}, LU_prepermutate = {
if (get("verbose", envir = env)) cat("sparse matrix LU decomposition\n")
LU_prepermutate_setup(env, coef = con$in_coef, order = con$LU_order,
which = which)
if (is.null(interval)) interval <- c(-1, 0.999)
}, moments = {
if (get("verbose", envir = env)) cat("Smirnov/Anselin (2009) trace approximation\n")
moments_setup(env, trs = trs, m = con$MC_m, p = con$MC_p,
type = con$type, correct = con$correct, trunc = con$trunc,
which = which)
if (is.null(interval)) interval <- c(-1, 0.999)
}, SE_classic = {
if (get("verbose", envir = env)) cat("SE toolbox classic grid\n")
if (is.null(interval)) interval <- c(-1, 0.999)
if (con$SE_method == "MC" && !get("listw", envir = env)$style %in%
c("W")) stop("MC method requires row-standardised weights")
SE_classic_setup(env, SE_method = con$SE_method, p = con$MC_p,
m = con$MC_m, nrho = con$nrho, interpn = con$interpn,
interval = interval, SElndet = con$SElndet, which = which)
}, SE_whichMin = {
if (get("verbose", envir = env)) cat("SE toolbox which.min grid\n")
if (is.null(interval)) interval <- c(-1, 0.999)
if (con$SE_method == "MC" && !get("listw", envir = env)$style %in%
c("W")) stop("MC method requires row-standardised weights")
SE_whichMin_setup(env, SE_method = con$SE_method, p = con$MC_p,
m = con$MC_m, nrho = con$nrho, interpn = con$interpn,
interval = interval, SElndet = con$SElndet, which = which)
}, SE_interp = {
if (get("verbose", envir = env)) cat("SE toolbox which.min grid\n")
if (is.null(interval)) interval <- c(-1, 0.999)
if (con$SE_method == "MC" && !get("listw", envir = env)$style %in%
c("W")) stop("MC method requires row-standardised weights")
SE_interp_setup(env, SE_method = con$SE_method, p = con$MC_p,
m = con$MC_m, nrho = con$nrho, interval = interval,
which = which)
}, stop("...\n\nUnknown method\n"))
interval
}
|
7e259a44c13e08e3b96c4efb63ae36138ecbfe6a | 9fab87354090fa1938a89a81eef82019dd7ad8a0 | /assignments/assignment_1/corr.R | 4b4ebb331a129a6d51a515ab8b6ae6d71e8a39fa | [] | no_license | mwoitek/r_programming_johns_hopkins_2 | e53db114b3e1a383a9baf1524b367079e537a3d5 | f4268b9559e8a67e9d39045aa459c6d458d7be19 | refs/heads/master | 2023-02-13T09:50:43.125499 | 2021-01-19T19:13:25 | 2021-01-19T19:13:25 | 327,976,849 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 888 | r | corr.R | corr <- function(directory, threshold = 0) {
# "directory" is a character vector of length 1. It stores the location of
# the csv files.
# "threshold" is a numeric vector of length 1. It stores the number of
# complete observations required to compute the correlation between nitrate
# and sulfate measurements. The default value of "threshold" is 0.
# This function returns a numeric vector containing the correlations.
corrs <- vector(mode = "numeric", length = 0)
for (monitor in 1:332) {
file_name <- paste(sprintf("%.3d", monitor), "csv", sep = ".")
dat1mon <- read.csv(file.path(directory, file_name))
cobs <- dat1mon[complete.cases(dat1mon), ]
if (nrow(cobs) >= threshold) {
correlation <- cor(cobs["nitrate"], cobs["sulfate"])
corrs <- c(corrs, correlation)
}
}
corrs
}
|
94668167c202efa0de6ed394e7573834b8d211c8 | 2a8b63ffbe4c3237c14fefd16bc2efb557e23b31 | /R/bq_groups.R | 1637cd95032afeba929db1e7eb6b9704ca521221 | [] | no_license | davidrubinger/briqr | 4a9f2670e7b67868ed668f222a03fa7630e95ae1 | 2e27316fbb7f2bda8eb3ed1e0a6f8096c21a7b0f | refs/heads/master | 2020-05-02T21:59:31.816652 | 2019-07-27T17:13:09 | 2019-07-27T17:13:09 | 178,237,580 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 991 | r | bq_groups.R | #' Briq user groups
#'
#' List all the 'Briq' user groups of your organization
#'
#' @param organization Name of your Briq organization
#' @param api_token Briq API token
#'
#' @return Returns a tibble of Briq user groups of your organization
#' @export
#' @examples
#' \dontrun{
#' # Read organization and api_token from .Renviron file
#' bq_groups()
#'
#' # Manually enter in organization and api_token
#' bq_groups(organization = "My Org", api_token = "xYz123")
#' }
bq_groups <- function (organization = Sys.getenv("organization_name"),
api_token = Sys.getenv("briq_api_token")) {
if (organization == "") stop("organization is an empty string")
if (api_token == "") stop("api_token is an empty string")
resp <- httr::GET(
url = paste0(
"https://www.givebriq.com/v0/organizations/", organization,
"/groups"
),
config = httr::authenticate(user = api_token, password = "")
)
resp_to_tbl(resp)
}
|
a4a99f21306cf4a6a9ae3277dc1e97ac204c55c2 | 4f2ee7e68e9261fee8a99e6fb087afb456f7965b | /inst/doc/diagis.R | 29a01b010abd451da00b37deabe405266ad6eaf2 | [] | no_license | cran/diagis | 9859ff2b99267e5c5b582313bd4998c352fefc13 | 8da65154d85c51419469b28c27088ba24db61b5f | refs/heads/master | 2022-01-01T01:31:36.106340 | 2021-11-29T15:10:02 | 2021-11-29T15:10:02 | 72,307,787 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 763 | r | diagis.R | ## ----setup, include=FALSE-----------------------------------------------------
knitr::opts_chunk$set(echo = TRUE)
## ----finite_var---------------------------------------------------------------
library("diagis")
set.seed(1)
x <- rgamma(10000, 1, 0.75)
w <- dgamma(x, 2, 1) / dgamma(x, 1, 0.75)
plot(w)
weighted_mean(x, w)
## ----infinite_var-------------------------------------------------------------
set.seed(1)
x_bad <- rgamma(10000, 1, 2)
w_bad <- dgamma(x_bad, 2, 1) / dgamma(x_bad, 1, 2)
plot(w_bad)
weighted_mean(x_bad, w_bad)
## ----weight_plot--------------------------------------------------------------
weight_plot(w)
## ----weight_plot_bad----------------------------------------------------------
weight_plot(w_bad)
|
b88f549cb1c5a9ae194a846eeaf98b9b83c27bce | 50379b138496f26b30ee2f3d8efeb6ac4774baf4 | /R/block_sizes.R | 65906fedc6250e73e02cc43532f840e0dc482120 | [] | no_license | russHyde/bobbins | 3e822ac8ac0a980c32c486f5a51efe243d31754c | 4df0b7f607b32330a2ce7614bb23694db4e5768a | refs/heads/master | 2020-07-25T08:28:16.742941 | 2019-09-13T11:00:01 | 2019-09-13T11:00:01 | 208,230,377 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 427 | r | block_sizes.R | #'
block_sizes <- function(files, mode = c("tokens", "lines")) {
# rather than choose between tokens and lines, return a data-frame containing
# number of (non-trivial) tokens and number of (non-comment / non-whitespace)
# lines of code
tokens <- dupree:::preprocess_code_blocks(files, min_block_size = 0)
tokens@blocks[, c("file", "block", "start_line", "block_size")] %>%
dplyr::mutate(n_tokens = block_size)
}
|
a1e006da1489b5c2529b59dcb9899e336ce16fa7 | 4163b2ff26b31e00b8d73da528d5c881d94108e3 | /R/SEQUOIA.R | 1b8461ea8b68bf48e16a9b0fd26111102d5bd235 | [] | no_license | VB6Hobbyst7/R_SEQUOIA | 6cfdb9586036efe7bac44857aee0eed8515d1e85 | def0603d5bb699a3cfc04ea9efd91c190d1c2a9d | refs/heads/master | 2023-04-26T08:45:06.117938 | 2021-05-27T15:21:47 | 2021-05-27T15:21:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,179 | r | SEQUOIA.R | SEQUOIA <- function(enrg=FALSE) {
# Lancement des library
packages <- c("devtools", "prettydoc",
"data.table", "dplyr", "plyr", "foreign",
"gdalUtils", "lwgeom", "osmdata", "R.utils",
"raster", "RCurl", "readxl", "rlang", "rlist",
"rvest", "sf", "smoothr", "stringr", "tcltk",
"tidyverse", "openxlsx", "XML", "xml2", "units",
"measurements", "nngeo", "sp", "elevatr", "svGUI", "installr",
"ClimClass", "geosphere", "plotly", "stars")
package.check <- lapply(
packages,
FUN = function(x) {
if (!require(x, character.only = TRUE)) {
install.packages(x, dependencies = TRUE)
library(x, character.only = TRUE)
}
}
)
vers <- packageVersion("SEQUOIA")
message("\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ")
message(paste0("SEQUOIA ", vers, " est prêt ! \n"))
form <- c("Téléchargements de données",
"Aide à la création d'une cartographie ponctuelle",
"Outils cartographiques")
RES1 <- select.list(form,
multiple = FALSE,
title = paste0("SEQUOIA ",
vers,
" : Prenez de la hauteur sur votre forêt !"),
graphics = T)
if (!length(RES1)){stop("Aucune sélection effectuée > Traitement annulé \n")}
if ("Téléchargements de données" %in% RES1) { # Téléchargements des données SEQUOIA
message("Téléchargements de données")
form <- c("1 Données INPN/INSEE ",
"2 Données IGN ")
RES3 <- select.list(form,
multiple = T,
title = "Quelles données voulez-vous ?",
graphics = T)
if (!length(RES3)){stop("Aucune sélection effectuée > Traitement annulé \n")}
if ("1 Données INPN/INSEE " %in% RES3) {
message("1 Données INPN/INSEE")
repRdata <- tk_choose.dir(default = getwd(),
caption = "Choisir le répertoire des archives .Rdata")
if (!length(repRdata)){stop("Traitement annulé","\n")}
SEQUOIA:::INSEEtoRDATA(repRdata)
SEQUOIA:::INPNtoRDATA(repRdata)
}
if ("2 Données IGN " %in% RES3) {
message("2 Données IGN")
SEQUOIA:::LOADfromIGN(F)
}
} # Fin Téléchargements des données SEQUOIA
if ("Aide à la création d'une cartographie ponctuelle" %in% RES1) { # Aide à la création d'une cartographie ponctuelle
form <- c("1 Conversion .html vers .xlsx",
"2 Création PARCA",
"3 Création du fond vectoriel",
"4 Création UA",
"5 Conversion ROAD vers ROUTE",
"6 Finalisation UA")
RES2 <- select.list(form,
multiple = T,
title = "Aide à la création d'une cartographie ponctuelle",
graphics = T)
if (!length(RES2)){stop("Aucune sélection effectuée > Traitement annulé \n")}
if ("1 Conversion .html vers .xlsx" %in% RES2) {
message("1 Conversion .html vers .xlsx")
SEQUOIA:::HTMLtoXLSX(F, if(exists("repRdata")){repRdata}else{FALSE})
}
if ("2 Création PARCA" %in% RES2) {
message("2 Création PARCA")
SEQUOIA:::XLSXtoPARCA(F)
}
if ("3 Création du fond vectoriel" %in% RES2) {
message("3 Création du fond vectoriel")
SEQUOIA:::CAGEF(if(exists("repPARCA")){repPARCA}else{FALSE},
if(exists("CODE")){CODE}else{1})
}
if ("4 Création UA" %in% RES2) {
message("4 Création UA")
SEQUOIA:::PARCAtoUA(F)
}
if ("5 Conversion ROAD vers ROUTE" %in% RES2) {
message("5 Conversion ROAD vers ROUTE")
SEQUOIA:::ROADtoROUTE(F)
}
if ("6 Finalisation UA" %in% RES2) {
message("6 Finalisation UA")
Filters <- matrix(c("Shapefile", "*.shp"),1, 2, byrow = TRUE)
repUA <- tk_choose.files(caption = "Choisir le fichier .shp des unités d'analyses (UA)",
filter = matrix(c("ESRI Shapefile", ".shp"), 1, 2, byrow = TRUE))
if (!length(repUA)){stop("Traitement annulé \n")}
SEQUOIA:::UAtoUA(repUA)
if(Erreurs=="OK"){
SEQUOIA:::UAtoSSPF(repUA)}
}
} # Fin Aide à la création d'une cartographie ponctuelle
if ("Outils cartographiques" %in% RES1) { # Outils cartographiques
form <- c("MNT sur shapefile",
"Zonnage environnementaux",
"MH sur shapefile",
"AAC sur shapefile",
"Création d'une fiche Climatologique",
"Géologie sur shapefile",
"BD Foret sur shapefile")
RES3 <- select.list(form,
multiple = T,
title = "Outils cartographiques",
graphics = T)
if (!length(RES3)){stop("Aucune sélection effectuée > Traitement annulé \n")}
if ("MNT sur shapefile" %in% RES3) {
if(!exists("repPARCA")) {repPARCA <- F}
SEQUOIA:::MNTonSHP(repPARCA, NAME)
}
if ("MH sur shapefile" %in% RES3) {
SEQUOIA:::MHonSHP(F)
}
if ("AAC sur shapefile" %in% RES3) {
SEQUOIA:::AAConSHP(F)
}
if ("Zonnage environnementaux" %in% RES3) {
if(!exists("repRdata")) {repRdata <- F}
SEQUOIA:::INPNonSHP(F, repRdata)
}
if ("Création d'une fiche Climatologique" %in% RES3) {
if(!exists("repPARCA")) {repPARCA <- F}
SEQUOIA:::CLIMonSHP(repPARCA)
}
if ("Géologie sur shapefile" %in% RES3) {
if(!exists("repPARCA")) {repPARCA <- F}
SEQUOIA:::GEOLonSHP(F)
}
if ("BD Foret sur shapefile" %in% RES3) {
if(!exists("repPARCA")) {repPARCA <- F}
SEQUOIA:::BDFORETonSHP(F)
}
}
}
|
f45d898c3a5324f42300e4d564868c9c23c1c824 | d59b1245f1868090e259e8286feca5b61ebdcff3 | /Question 9/9.r | 31bf92a835a81a1a27043fa0fd6b50fb5e8bb4ef | [] | no_license | sawood14012/dalab | 1818100af639e8487be3c1a583e189dc1c53cb95 | fa62039430e6a4ca38eb63560ebfbe555dcc4fc0 | refs/heads/master | 2020-11-28T14:18:37.391371 | 2019-12-24T01:01:17 | 2019-12-24T01:01:17 | 229,845,448 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,348 | r | 9.r | library(digest)
library(bit)
BloomFilter = setRefClass(
"BloomFilter",
fields = list(
.fp_prob = "numeric",
.size = "integer",
.hash_size = "integer",
.bit_array = "ANY"
),
methods = list(
initialize = function(num_items, fp_prob)
{
.fp_prob <<- fp_prob
.size <<- get_size(num_items, fp_prob)
.hash_size <<- get_hash_size(.size, num_items)
.bit_array <<- bit(.size)
},
get_size = function(n, p)
{
m = -(n * log(p)) / (log(2) ^ 2)
return(as.integer(m))
},
get_hash_size = function(m, n)
{
k = (m / n) * log(2)
return(as.integer(k))
},
get_hash = function(item, seed)
{
hash_digest = digest(
object = item,
serialize = F,
seed = seed,
algo = "murmur32"
)
fin = paste("0x", hash_digest, sep = "")
return(as.numeric(fin) %% .size)
},
add = function(item)
{
for (i in 1:.hash_size)
{
.bit_array[get_hash(item, i)] <<- TRUE
}
},
check = function(item)
{
for (i in 1:.hash_size)
{
if (.bit_array[get_hash(item, i)] == FALSE)
return(FALSE)
}
return(TRUE)
}
)
)
bloom = BloomFilter$new(20, 0.05)
bloom$.size
bloom$.hash_size
bloom$add('harry')
bloom$check('barry')
bloom$check('harry') |
617c347a2fa191a9cff434847f279e0ed9e5a0eb | 71c3209f5f4c060876ad25ab8bc93fda0c46df95 | /inst/examples/titanic_example.R | 1775aa647e9f03043a8965579c27281556faedf1 | [] | no_license | timelyportfolio/SearchTree | cc67818ac86bdf8f1e080fbeb1c2d606a2cc5da3 | f15e28b4289bb575364cff829bc5b4944cea7c60 | refs/heads/master | 2020-06-15T07:46:11.398241 | 2016-12-05T15:16:25 | 2016-12-05T15:16:48 | 75,312,550 | 1 | 1 | null | 2016-12-03T03:37:24 | 2016-12-01T16:45:57 | JavaScript | UTF-8 | R | false | false | 325 | r | titanic_example.R | library(SearchTree)
# see if search tree is working as an htmlwidget
tit_d3 <- list(
root = SearchTree:::df2tree(as.data.frame(Titanic)),
layout = 'collapse'
)
ggtree(tit_d3)
library(d3r)
ggtree(
list(
root = d3r::d3_nest(
as.data.frame(Titanic)
),
layout = "collapse"
),
value = "colname"
)
|
ddd593f2a7899a5723b6fbd75ccf2a5e8c53a2cd | 14e19c36dba435df0a75158e14f7d00a0fa1096d | /R/tntTracks-compositeTrack.R | 7c49bb78d78653fdc1834f5b316a952a35c7b4e0 | [] | no_license | Marlin-Na/TnT | 2fc227de7cae7266886104d6009b0b36add6f2b2 | 386cf9fc3c53ab861c954b73b47b3c83be35ea89 | refs/heads/master | 2021-01-11T20:36:04.356306 | 2020-01-30T07:08:50 | 2020-01-30T07:08:50 | 79,152,363 | 16 | 3 | null | null | null | null | UTF-8 | R | false | false | 7,913 | r | tntTracks-compositeTrack.R |
#' @include tntTracks-compilation.R
setClass("CompositeTrackData", contains = c("list", "TrackData"))
setClass("CompositeTrack", contains = "TnTTrack", slots = c(Data = "CompositeTrackData"))
setValidity("CompositeTrackData",
function (object) {
if (!all(vapply(object, is, logical(1L), class2 = "RangeBasedTrack")))
return("All components of CompositeTrack should be RangeBasedTrack")
return(TRUE)
}
)
CompositeTrackData <- function (tracklist) {
new("CompositeTrackData", tracklist)
}
#' Composite Track
#'
#' Two or more arbitrary tracks can be used to create a composite track, by which
#' different features can be shown in the same track.
#'
#' @name composite-track
#' @aliases merge-track merge,TnTTrack,TnTTrack-method
#' @param x,y,... Track constructed with \link{track-constructors} or composite track.
#'
#' @return
#' Returns a "CompositeTrack" object.
#'
#' @seealso \url{http://tnt.marlin.pub/articles/examples/track-CompositeTrack.html}
#' @export
#' @examples
#' gr <- GRanges("chr1", IRanges(c(11000, 20000, 60000), width = 2000))
#' gpos <- GRanges("chr1", IRanges(c(12000, 21000, 61000), width = 1), value = c(1, 2, 3))
#' btrack <- BlockTrack(gr, label = "Block Track", tooltip = as.data.frame(gr), color = "lightblue4")
#' ptrack <- PinTrack(gpos, label = "Pin Track", tooltip = as.data.frame(gpos), background = "beige")
#'
#' ctrack <- merge(btrack, ptrack)
#' \dontrun{
#' TnTBoard(ctrack)
#' }
setMethod("merge", signature = c(x = "TnTTrack", y = "TnTTrack"),
function (x, y, ...) {
tracklist <- list(x, y, ...)
merge_tracklist(tracklist)
}
)
#' @rdname composite-track
setMethod("merge", signature = c(x = "TnTTrack", y = "missing"),
function (x, y, ...) {
tracklist <- list(x, ...)
merge_tracklist(tracklist)
}
)
merge_tracklist <- function (tracklist) {
for (i in seq_along(tracklist))
if (!(is(tracklist[[i]], "RangeBasedTrack") || is(tracklist[[i]], "CompositeTrack")))
stop("All tracks have to inherit either 'RangeBasedTrack' or 'CompositeTrack'")
tracklist <- as.list(tracklist)
which.comp <- vapply(tracklist, is, logical(1L), class2 = "CompositeTrack")
tracklist[which.comp] <- lapply(tracklist[which.comp], trackData)
tracklist <- c(tracklist, recursive = TRUE, use.names = FALSE)
tracklist <- .consolidateSeqinfo(tracklist)
.merge_tracklist <- function (tracklist) {
spec <- .mergeSpec(tracklist)
ans <- new("CompositeTrack", Data = CompositeTrackData(tracklist))
trackSpec(ans, which = names(spec)) <- spec
ans
}
.mergeSpec <- function (tracklist) {
labels <- unname(unlist(lapply(tracklist, trackSpec, which = "label")))
heights <- unname(unlist(lapply(tracklist, trackSpec, which = "height")))
backgrounds <- unname(unlist(lapply(tracklist, trackSpec, which = "background")))
stopifnot(is.atomic(labels), is.atomic(heights), is.atomic(backgrounds))
f <- function(x, w = c("label", "height", "background")) {
w <- match.arg(w)
if (!length(x))
return(NULL)
if (length(x) == 1L)
return(x)
if (w == "label")
return(paste(paste(x[-length(x)], collapse = ", "), x[length(x)], sep = " and "))
if (w == "height")
return(na.fail(max(na.omit(x))))
if (w == "background")
return(x[1])
stop("<internal> Unmatched argument")
}
list(
label = f(labels, "label"),
height = f(heights, "height"),
background = f(backgrounds, "background")
)
}
.merge_tracklist(tracklist)
}
.mkref <- function (l)
paste0("subtrack", seq_along(l))
setMethod("compileTrackData", signature = "CompositeTrackData",
function (trackData) {
li.t <- as.list(trackData)
li.retriever <- lapply(li.t,
function (t) {
cd <- compileTrackData(trackData(t))
stopifnot(
length(cd) == 2,
identical(names(cd), c("tnt.board.track.data.sync", "retriever"))
)
cd[[2]]
}
)
jc.retriever <- {
jc.init <- jc(tnr.composite_data_retriever = ma())
jc.add <- {
li.add <- mapply(ref = .mkref(li.t), func = li.retriever,
USE.NAMES = FALSE, SIMPLIFY = FALSE,
function (ref, func)
jc(add = ma(ref, func))
)
do.call(c, unname(li.add))
}
jc.end <- jc(done = ma())
c(jc.init, jc.add, jc.end)
}
jc(tnt.board.track.data.sync = ma(),
retriever = jc.retriever)
}
)
setMethod("wakeupTrack", signature = c(track = "CompositeTrack"),
function (track) {
li.track <- lapply(trackData(track), wakeupTrack)
li.disply <- lapply(li.track, function (t) asJC(t@Display))
refs <- .mkref(li.track)
l.init <- list(tnt.board.track.feature.composite = ma())
l.add <- {
l.add <- mapply(ref = refs, jc.dis = li.disply,
USE.NAMES = FALSE, SIMPLIFY = FALSE,
function (ref, jc.dis) {
list(add = ma(ref, jc.dis))
}
)
do.call(c, unname(l.add))
}
trackData(track) <- CompositeTrackData(li.track)
track@Display <- c(l.init, l.add)
track
}
)
#' @rdname seqinfo
setMethod("seqinfo", signature = "CompositeTrack",
function (x) .mergeSeqinfo(trackData(x))
)
#' @rdname seqinfo
setReplaceMethod("seqinfo", signature = c(x = "CompositeTrack"),
function (x, new2old, pruning.mode, value) {
## We need to make sure the sub-tracks have the same seqinfo, otherwise
## functions like `seqlevels<-` will not work correctly.
trackData(x) <- .consolidateSeqinfo(trackData(x))
li.tracks <- trackData(x)
for (i in seq_along(li.tracks))
seqinfo(li.tracks[[i]], new2old, pruning.mode) <- value
trackData(x) <- li.tracks
x
}
)
#' @rdname seqinfo
setMethod("seqlevelsInUse", signature = c(x = "CompositeTrack"),
function (x) {
li.tracks <- trackData(x)
li.seqs <- lapply(li.tracks, seqlevelsInUse)
unique(unlist(li.seqs))
}
)
#### range Methods ========
.range.track <- function (x, ..., with.revmap = FALSE, ignore.strand=FALSE, na.rm=FALSE) {
li.tracks <- list(x, ...)
joingr <- function (li.gr) {
li.gr <- lapply(unname(li.gr), granges)
do.call(c, li.gr)
}
li.gr <- lapply(unname(li.tracks), function (track) {
if (is(track, "RangeBasedTrack"))
return(granges(trackData(track)))
if (is(track, "CompositeTrack")) {
lgr <- lapply(trackData(track), trackData)
return(joingr(lgr))
}
stop(sprintf("Class %s is not supported.", class(track)))
})
inner_call <- function (...) {
range(..., with.revmap = with.revmap, ignore.strand = ignore.strand, na.rm = na.rm)
}
do.call(inner_call, unname(li.gr))
}
#' Range of Tracks
#'
#' @param x A TnTTrack object.
#' @param ...,with.revmap,ignore.strand,na.rm
#' Passed to \code{\link[GenomicRanges]{range,GenomicRanges-method}}.
#' @aliases range,RangeBasedTrack-method
#' @return Returns a GRanges object.
#' @name range-TnTTrack
setMethod("range", signature = c(x = "RangeBasedTrack"), .range.track)
#' @rdname range-TnTTrack
setMethod("range", signature = c(x = "CompositeTrack"), .range.track)
|
ce8de555b8b577034fc23f0df37876856617de20 | f0d35b6ea0ebe9517537ecdf921bb442f1fd7550 | /ColonelHouNote/src/main/java/com/hn/opensource/shell/tool/当前脚本名称.RD | 7a2ac1ef47a4a36e91a3eb2943827688ec2b0fdd | [] | no_license | jiangsy163/ColonelHouNote | 01191a63d51542b09ef23e9662896e8407211119 | 6173c265c82b7b0197846cf621ecebab44073ef6 | refs/heads/master | 2021-01-14T12:47:41.091704 | 2015-11-19T08:40:09 | 2015-11-26T08:41:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 42 | rd | 当前脚本名称.RD | execFileName=$0
echo ${execFileName#*/}
|
cc1efd9d0eb26056bb7b5ff426a7585cca8806e5 | bcf1de9b0b559e201cfa4c7783c8eb634d83fadd | /R/rwx2.R | cce3ef5a4ae8bdf4e1f9dcc65881568ebffa61e6 | [] | no_license | einarhjorleifsson/mac | 05861ad4a0040fc640bbbfb24f4e6c6567eac238 | 0e0edc7a74e2d0149fafd4e6ca073ad6e87e0914 | refs/heads/master | 2021-01-18T14:15:02.820160 | 2017-06-30T11:08:16 | 2017-06-30T11:08:16 | 21,002,266 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,616 | r | rwx2.R | # Amended fishvise functions
#' @title Reads the stock in numbers and fishing mortality
#'
#' @description Reads fishing mortality and stock in numbers by year and age
#' from \code{sam}.
#'
#' @export
#'
#' @return Returns a \code{data.frame} containing the following items:
#'
#' @param x Object from read.fit
#' @param Scale A numerical value. If 1 (default) returns the stock in numbers
#' as in the input file.
#'
read.rbya <- function(x,Scale=1) {
minAge <- min(x$res[,3])
maxAge <- max(x$res[,3][x$res[,3]<98.5])
noN <- maxAge - minAge+1
noFleet <- max(x$res[,2])
N <- exp(x$stateEst[,1:noN])/Scale
colnames(N) <- c(minAge:maxAge)
rownames(N) <- x$years
N <- melt(N,factorsAsStrings = FALSE)
mort <- exp(x$stateEst[,-c(1:noN)])[,x$keys$keyLogFsta[1,]]
colnames(mort) <- c(minAge:maxAge)
rownames(mort) <- x$years
mort <- melt(mort,factorsAsStrings = FALSE)
res <- cbind(N,mort[,3])
names(res) <- c("year","age","n","f")
return(res)
}
#' @title Reads the stock summary values
#'
#' @description Reads the stock summary numbers by year from \code{sam}.
#'
#'
#' @export
#'
#' @return A \code{data.frame} containing the following:
#'
#' @param x Object from function \code{read.fit}.
#' @param range Boolean Not used
#' @param Scale A value
read.rby <- function(x,range=FALSE,Scale=1) {
rby <- cbind(x$fbar,x$ssb,x$tsb)
rby[,5:12] <- rby[,5:12]/Scale
colnames(rby) <- paste(c(rep("f",4),rep("ssb",4),rep("tsb",4)),colnames(rby),sep="")
rby <- data.frame(rby)
# add recruitment
rby$r <- x$R[,1]/Scale
# add yield
rby$yield <- c(exp(x$logCatch[,1]),NA) / Scale
rby$year <- x$years
rownames(rby) <- NULL
return(rby)
}
#' @title read.lowestoft
#'
#' @description Read lowestoft files
#'
#' @export
#'
#' @param filename Filename
#' @param val.name Character Not used
#' @param Format Character, "List" or "Wide"
#'
read.lowestoft <- function(filename, val.name,Format="List")
{
y <- scan(filename, skip = 2, nlines = 1, quiet = TRUE)
a <- scan(filename, skip = 3, nlines = 1, quiet = TRUE)
tab <- read.delim(filename, header = FALSE, sep = "", skip = 5)
names(tab) <- c(a[1]:a[2])
rownames(tab) <- c(y[1]:y[2])
if(Format == "List") return(list(y = y, a = a, tab = tab))
if(Format == "Wide") return(tab)
tab$year <- as.integer(rownames(tab))
tab <- melt(tab,id.vars="year",factorsAsStrings = FALSE)
names(tab) <- c("year","age",val.name)
tab$age <- as.integer(as.character(tab$age))
return(tab)
}
#' @title read.ibya
#'
#' @description read input by year and age
#'
#' @export
#'
#' @param path Character containing the path to the directory
#' @param Scale A value
#'
read.ibya <- function(path,Scale=1) {
oc <- read.lowestoft(paste(path,"cn.dat",sep="/"),val.name="oC",Format = "Long")
oc$oC <- oc$oC/Scale
cw <- read.lowestoft(paste(path,"cw.dat",sep="/"),val.name="cW",Format = "Long")
sw <- read.lowestoft(paste(path,"sw.dat",sep="/"),val.name="sW",Format = "Long")
mat <- read.lowestoft(paste(path,"mo.dat",sep="/"),val.name="mat",Format = "Long")
nat <- read.lowestoft(paste(path,"nm.dat",sep="/"),val.name="m",Format = "Long")
pf <- read.lowestoft(paste(path,"pf.dat",sep="/"),val.name="pf",Format = "Long")
pm <- read.lowestoft(paste(path,"pm.dat",sep="/"),val.name="pm",Format = "Long")
res <- join(oc,sw,by=c("year","age"))
res <- join(res,cw,by=c("year","age"))
res <- join(res,mat,by=c("year","age"))
res <- join(res,nat,by=c("year","age"))
res <- join(res,pf,by=c("year","age"))
res <- join(res,pm,by=c("year","age"))
return(res)
}
|
b7950ac0f53974e234a7dba6da2e3ea13298df08 | 9871d45970de0cae95c10e6d69da1b626cae0337 | /functions.R | 6f607b6f4d8a9faaf98437a438c7656c63d56b01 | [] | no_license | jbarrientos/RepData_PeerAssessment1 | a37b940ba3cd1f367f8866a643da445b58154613 | 16087321fb15cc80b39ab60ed4a0525e44ee08f1 | refs/heads/master | 2020-05-23T10:29:53.150009 | 2017-03-13T06:50:52 | 2017-03-13T06:50:52 | 84,762,162 | 0 | 0 | null | 2017-03-12T22:29:09 | 2017-03-12T22:29:09 | null | UTF-8 | R | false | false | 278 | r | functions.R | fill.na <- function(ds, dsNA){
means <- tapply(ds$steps, ds$interval, mean)
for(i in 1:288){
## Update with the mean
dsNA[dsNA$interval == as.integer(names(means[i])),]$steps <- means[i]
}
return (dsNA)
} |
a792be7250ecf0b11974248848ce21e1b72e1437 | a47ce30f5112b01d5ab3e790a1b51c910f3cf1c3 | /B_analysts_sources_github/bcaffo/exactLoglinTest/death.R | f0da4ab4c116d56bfe720e0320dec0e7798c2c37 | [] | no_license | Irbis3/crantasticScrapper | 6b6d7596344115343cfd934d3902b85fbfdd7295 | 7ec91721565ae7c9e2d0e098598ed86e29375567 | refs/heads/master | 2020-03-09T04:03:51.955742 | 2018-04-16T09:41:39 | 2018-04-16T09:41:39 | 128,578,890 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 197 | r | death.R | dat <- read.table("death.dat")
names(dat) <- c("BLACKD", "WHITVIC", "V4CPTY", "V5DPTY",
"PTDTHRC", "SESF1", "POOLADEH", "POOLGBF",
"POOLMCG", "POOMABEF", "OTHAGG")
|
421517d4d5ed4d8a381060d80fab8cfda2cd9285 | d5c7866164c46d1663ee33060ec0465110f92572 | /tests/testthat/test-db.R | b87c0a6917c9a458bee057fd20336d47a082acb0 | [
"MIT"
] | permissive | vimc/orderly | c57562c73ae75320fbccda42aa20beb2d9f73834 | 788ce5c7bcba928acc2cda826d83077648c82d71 | refs/heads/master | 2023-07-25T05:27:47.997733 | 2023-06-21T10:45:49 | 2023-06-21T10:45:49 | 92,043,843 | 119 | 10 | NOASSERTION | 2023-07-13T12:48:18 | 2017-05-22T10:58:56 | R | UTF-8 | R | false | false | 19,054 | r | test-db.R | context("db")
test_that("invalid db type", {
expect_error(orderly_db("xxx", "example"),
"Invalid db type 'xxx'")
})
test_that("custom fields", {
path <- tempfile()
orderly_init(path)
file_copy("example_config.yml", file.path(path, "orderly_config.yml"),
overwrite = TRUE)
con <- orderly_db("destination", path)
on.exit(DBI::dbDisconnect(con))
expect_true(DBI::dbExistsTable(con, "orderly_schema"))
config <- orderly_config(path)
expect_error(report_db_init(con, config, TRUE),
"Table 'orderly_schema' already exists")
DBI::dbExecute(con, "DELETE FROM custom_fields WHERE id = 'author'")
expect_error(report_db_init(con, config, FALSE),
"custom fields 'author' not present in existing database")
unlockBinding(quote(fields), config)
config$fields <- NULL
expect_error(report_db_init(con, config, FALSE),
"custom fields 'requester', 'comments' in database")
})
test_that("rebuild empty database", {
skip_on_cran_windows()
path <- tempfile()
orderly_init(path)
file_copy("example_config.yml", file.path(path, "orderly_config.yml"),
overwrite = TRUE)
orderly_rebuild(path)
con <- orderly_db("destination", path)
on.exit(DBI::dbDisconnect(con))
expect_true(DBI::dbExistsTable(con, "orderly_schema"))
})
test_that("rebuild nonempty database", {
skip_on_cran_windows()
path <- test_prepare_orderly_example("minimal")
id <- orderly_run("example", root = path, echo = FALSE)
orderly_commit(id, root = path)
file.remove(file.path(path, "orderly.sqlite"))
orderly_rebuild(path)
orderly_rebuild(path)
con <- orderly_db("destination", path)
on.exit(DBI::dbDisconnect(con))
expect_equal(nrow(DBI::dbReadTable(con, "report_version")), 1)
})
test_that("no transient db", {
config <- list(destination = list(
driver = c("RSQLite", "SQLite"),
args = list(dbname = ":memory:")),
root = tempdir())
expect_error(orderly_db_args(config$destination, config = config),
"Cannot use a transient SQLite database with orderly")
})
test_that("db includes parameters", {
skip_on_cran_windows()
path <- test_prepare_orderly_example("demo")
id <- orderly_run("other", parameters = list(nmin = 0.1), root = path,
echo = FALSE)
orderly_commit(id, root = path)
con <- orderly_db("destination", root = path)
d <- DBI::dbReadTable(con, "parameters")
DBI::dbDisconnect(con)
expect_equal(d, data_frame(id = 1,
report_version = id,
name = "nmin",
type = "number",
value = "0.1"))
})
test_that("different parameter types are stored correctly", {
skip_on_cran_windows()
path <- test_prepare_orderly_example("parameters", testing = TRUE)
id <- orderly_run("example", parameters = list(a = 1, b = TRUE, c = "one"),
root = path, echo = FALSE)
orderly_commit(id, root = path)
con <- orderly_db("destination", root = path)
d <- DBI::dbReadTable(con, "parameters")
DBI::dbDisconnect(con)
expect_equal(d, data_frame(id = 1:3,
report_version = id,
name = c("a", "b", "c"),
type = c("number", "boolean", "text"),
value = c("1", "true", "one")))
})
test_that("avoid unserialisable parameters", {
t <- Sys.Date()
expect_error(report_db_parameter_type(t), "Unsupported parameter type")
expect_error(report_db_parameter_serialise(t), "Unsupported parameter type")
})
test_that("dialects", {
skip_on_cran() # likely platform dependent
s <- report_db_schema_read(NULL, "sqlite")
p <- report_db_schema_read(NULL, "postgres")
expect_false(isTRUE(all.equal(s, p)))
path <- test_prepare_orderly_example("minimal")
config <- orderly_config_$new(path)
con <- DBI::dbConnect(RSQLite::SQLite(), ":memory:")
on.exit(DBI::dbDisconnect(con))
expect_error(report_db_init_create(con, config, "postgres"),
"syntax error")
expect_silent(report_db_init_create(con, config, "sqlite"))
expect_equal(report_db_dialect(con), "sqlite")
expect_equal(report_db_dialect(structure(TRUE, class = "PqConnection")),
"postgres")
expect_error(report_db_dialect(structure(TRUE, class = "other")),
"Can't determine SQL dialect")
})
test_that("sources are listed in db", {
skip_on_cran_windows()
path <- test_prepare_orderly_example("demo")
id <- orderly_run("other", root = path, parameters = list(nmin = 0),
echo = FALSE)
orderly_commit(id, root = path)
con <- orderly_db("destination", root = path)
on.exit(DBI::dbDisconnect(con))
p <- path_orderly_run_rds(file.path(path, "archive", "other", id))
info <- readRDS(p)$meta$file_info_inputs
h <- hash_files(file.path(path, "archive", "other", id, "functions.R"), FALSE)
expect_equal(info$filename[info$file_purpose == "source"], "functions.R")
expect_equal(info$file_hash[info$file_purpose == "source"], h)
d <- DBI::dbGetQuery(
con, "SELECT * from file_input WHERE report_version = $1", id)
expect_false("resource" %in% d$file_purpose)
expect_true("source" %in% d$file_purpose)
})
test_that("backup", {
skip_on_cran_windows()
path <- create_orderly_demo()
expect_message(
orderly_backup(path),
"orderly.sqlite => backup/db/orderly.sqlite",
fixed = TRUE)
dest <- path_db_backup(path, "orderly.sqlite")
expect_true(file.exists(dest))
dat_orig <- with_sqlite(file.path(path, "orderly.sqlite"), function(con)
DBI::dbReadTable(con, "report_version"))
dat_backup <- with_sqlite(dest, function(con)
DBI::dbReadTable(con, "report_version"))
expect_equal(dat_orig, dat_backup)
})
test_that("db includes custom fields", {
skip_on_cran_windows()
path <- test_prepare_orderly_example("demo")
id <- orderly_run("minimal", root = path, echo = FALSE)
orderly_commit(id, root = path)
con <- orderly_db("destination", root = path)
on.exit(DBI::dbDisconnect(con))
d <- DBI::dbReadTable(con, "report_version_custom_fields")
expect_equal(d$report_version, rep(id, 3))
v <- c("requester", "author", "comment")
expect_setequal(d$key, v)
expect_equal(d$value[match(v, d$key)],
c("Funder McFunderface",
"Researcher McResearcherface",
"This is a comment"))
})
test_that("db includes file information", {
skip_on_cran_windows()
path <- test_prepare_orderly_example("demo")
id <- orderly_run("multifile-artefact", root = path, echo = FALSE)
p <- orderly_commit(id, root = path)
h1 <- hash_files(
file.path(path, "src", "multifile-artefact", "orderly.yml"), FALSE)
h2 <- hash_files(
file.path(path, "src", "multifile-artefact", "script.R"), FALSE)
con <- orderly_db("destination", root = path)
on.exit(DBI::dbDisconnect(con))
file_input <- DBI::dbReadTable(con, "file_input")
expect_equal(
file_input,
data_frame(id = 1:2,
report_version = id,
file_hash = c(h1, h2),
filename = c("orderly.yml", "script.R"),
file_purpose = c("orderly_yml", "script")))
info <- readRDS(path_orderly_run_rds(p))$meta$file_info_artefacts
artefact_hash <- info$file_hash
## Artefacts:
file_artefact <- DBI::dbReadTable(con, "file_artefact")
expect_equal(
file_artefact,
data_frame(id = 1:2,
artefact = 1,
file_hash = artefact_hash,
filename = c("mygraph.png", "mygraph.pdf")))
report_version_artefact <- DBI::dbReadTable(con, "report_version_artefact")
expect_equal(
report_version_artefact,
data_frame(id = 1,
report_version = id,
format = "staticgraph",
description = "A graph of things",
order = 1))
filenames <- c("orderly.yml", "script.R", "mygraph.png", "mygraph.pdf")
file <- DBI::dbReadTable(con, "file")
expect_equal(file,
data_frame(hash = c(h1, h2,
artefact_hash),
size = file_size(file.path(p, filenames))))
})
test_that("connect to database instances", {
path <- test_prepare_orderly_example("minimal")
p <- file.path(path, "orderly_config.yml")
writeLines(c(
"database:",
" source:",
" driver: RSQLite::SQLite",
" args:",
" dbname: source.sqlite",
" instances:",
" staging:",
" dbname: staging.sqlite",
" production:",
" dbname: production.sqlite"),
p)
f <- function(x) {
basename(x$source@dbname)
}
expect_equal(
f(orderly_db("source", root = path)),
"staging.sqlite")
expect_equal(
f(orderly_db("source", root = path, instance = "staging")),
"staging.sqlite")
expect_equal(
f(orderly_db("source", root = path, instance = "production")),
"production.sqlite")
})
test_that("db instance select", {
config_db <- list(
x = list(
driver = c("RSQLite", "SQLite"),
args = list(name = "a"),
instances = list(
a = list(name = "a"),
b = list(name = "b"))),
y = list(
driver = c("RSQLite", "SQLite"),
args = list(name = "y")))
config_db_a <- modifyList(config_db, list(x = list(instance = "a")))
config_db_b <- modifyList(config_db, list(x = list(args = list(name = "b"),
instance = "b")))
## The happy paths:
expect_identical(db_instance_select(NULL, config_db), config_db_a)
expect_equal(db_instance_select("a", config_db), config_db_a)
expect_equal(db_instance_select("b", config_db), config_db_b)
expect_equal(db_instance_select(c(x = "a"), config_db), config_db_a)
expect_equal(db_instance_select(c(x = "b"), config_db), config_db_b)
expect_error(db_instance_select("c", config_db),
"Invalid instance 'c' for database 'x'")
expect_error(db_instance_select(c(x = "c"), config_db),
"Invalid instance: 'c' for 'x'")
expect_error(db_instance_select(c(z = "a"), config_db),
"Invalid database name 'z' in provided instance")
})
test_that("db instance select with two instanced databases", {
config_db <- list(
x = list(
driver = c("RSQLite", "SQLite"),
args = list(name = "b"),
instances = list(
b = list(name = "b"),
a = list(name = "a"))),
y = list(
driver = c("RSQLite", "SQLite"),
args = list(name = "c"),
instances = list(
c = list(name = "c"),
a = list(name = "a"))))
config_db_aa <- modifyList(config_db,
list(x = list(args = list(name = "a"),
instance = "a"),
y = list(args = list(name = "a"),
instance = "a")))
config_db_bc <- modifyList(config_db, list(x = list(instance = "b"),
y = list(instance = "c")))
config_db_ac <- modifyList(config_db,
list(x = list(args = list(name = "a"),
instance = "a"),
y = list(args = list(name = "c"),
instance = "c")))
## The happy paths:
expect_identical(db_instance_select(NULL, config_db), config_db_bc)
expect_equal(db_instance_select("a", config_db), config_db_aa)
expect_equal(db_instance_select(c(x = "a", y = "a"), config_db),
config_db_aa)
expect_equal(db_instance_select(c(x = "b", y = "c"), config_db),
config_db_bc)
expect_equal(db_instance_select(c(x = "a"), config_db), config_db_ac)
## Some error paths:
expect_error(db_instance_select("f", config_db),
"Invalid instance 'f' for databases 'x', 'y'")
expect_error(db_instance_select(c(x = "f", y = "g"), config_db),
"Invalid instances: 'f' for 'x', 'g' for 'y'")
expect_error(db_instance_select(c(z = "a"), config_db),
"Invalid database name 'z' in provided instance")
})
test_that("db instance select rejects instance when no dbs support it", {
config_db <- list(
x = list(
driver = c("RSQLite", "SQLite"),
args = list(name = "a")),
y = list(
driver = c("RSQLite", "SQLite"),
args = list(name = "b")))
expect_identical(db_instance_select(NULL, config_db), config_db)
expect_error(db_instance_select("a", config_db),
"Can't specify 'instance' with no databases supporting it")
})
test_that("Create and verify tags on startup", {
root <- test_prepare_orderly_example("minimal")
append_lines(c("tags:", " - tag1", " - tag2"),
file.path(root, "orderly_config.yml"))
con <- orderly_db("destination", root = root)
expect_equal(DBI::dbReadTable(con, "tag"),
data_frame(id = c("tag1", "tag2")))
DBI::dbDisconnect(con)
append_lines(" - tag3", file.path(root, "orderly_config.yml"))
expect_error(
orderly_db("destination", root = root),
"tags have changed: rebuild with orderly::orderly_rebuild()",
fixed = TRUE)
orderly_rebuild(root)
con <- orderly_db("destination", root = root)
expect_equal(DBI::dbReadTable(con, "tag"),
data_frame(id = c("tag1", "tag2", "tag3")))
DBI::dbDisconnect(con)
})
test_that("Add tags to db", {
root <- test_prepare_orderly_example("minimal")
append_lines(c("tags:", " - tag1", " - tag2"),
file.path(root, "orderly_config.yml"))
append_lines(c("tags:", " - tag1"),
file.path(root, "src", "example", "orderly.yml"))
id <- orderly_run("example", root = root, echo = FALSE)
p <- orderly_commit(id, root = root)
con <- orderly_db("destination", root)
on.exit(DBI::dbDisconnect(con))
expect_equal(
DBI::dbReadTable(con, "report_version_tag"),
data_frame(id = 1, report_version = id, tag = "tag1"))
})
test_that("add batch info to db", {
path <- test_prepare_orderly_example("parameters", testing = TRUE)
params <- data_frame(
a = c("one", "two", "three"),
b = c(1, 2, 3)
)
batch_id <- ids::random_id()
mockery::stub(orderly_batch, "ids::random_id", batch_id)
output <- orderly_batch("example", parameters = params,
root = path, echo = FALSE)
p <- lapply(output$id, function(id) {
orderly_commit(id, root = path)
})
con <- orderly_db("destination", path)
on.exit(DBI::dbDisconnect(con))
expect_equal(
DBI::dbReadTable(con, "report_batch"),
data_frame(id = batch_id))
expect_equal(
DBI::dbReadTable(con, "report_version_batch"),
data_frame(report_version = output$id, report_batch = rep(batch_id, 3)))
})
## Regression test for vimc-3652
test_that("trailing slash in report name is tolerated", {
path <- test_prepare_orderly_example("minimal")
id <- orderly_run("src/example/", root = path, echo = FALSE)
expect_error(orderly_commit(id, root = path), NA)
})
test_that("db includes elapsed time", {
skip_on_cran_windows()
path <- test_prepare_orderly_example("minimal")
id <- orderly_run("example", root = path, echo = FALSE)
p <- orderly_commit(id, root = path)
con <- orderly_db("destination", root = path)
on.exit(DBI::dbDisconnect(con))
d <- DBI::dbReadTable(con, "report_version")
expect_true(d$elapsed > 0)
expect_equal(d$elapsed,
readRDS(path_orderly_run_rds(p))$meta$elapsed)
})
test_that("rebuild nonempty database with backup", {
skip_on_cran_windows()
path <- test_prepare_orderly_example("minimal")
id <- orderly_run("example", root = path, echo = FALSE)
orderly_commit(id, root = path)
con <- orderly_db("destination", path)
DBI::dbExecute(con, "UPDATE report_version SET published = 1")
DBI::dbDisconnect(con)
orderly_rebuild(path)
files <- dir(file.path(path, "backup/db"))
expect_equal(length(files), 1)
expect_match(files, "^orderly\\.sqlite\\.[0-9]{8}-[0-9]{6}$")
con1 <- orderly_db("destination", path)
con2 <- DBI::dbConnect(RSQLite::SQLite(),
dbname = file.path(path, "backup/db", files))
expect_equal(
DBI::dbReadTable(con1, "report_version")$published, 0)
expect_equal(
DBI::dbReadTable(con2, "report_version")$published, 1)
DBI::dbDisconnect(con1)
DBI::dbDisconnect(con2)
})
test_that("db write collision", {
skip_on_cran()
path <- test_prepare_orderly_example("minimal")
id1 <- orderly_run("example", root = path, echo = FALSE)
id2 <- orderly_run("example", root = path, echo = FALSE)
orderly_commit(id1, root = path)
con <- orderly_db("destination", root = path)
on.exit(DBI::dbDisconnect(con))
DBI::dbExecute(con, "BEGIN IMMEDIATE")
DBI::dbExecute(con, "DELETE FROM file_artefact")
elapsed <- system.time(
testthat::expect_error(
orderly_commit(id2, root = path, timeout = 5),
"database is locked"))
expect_true(elapsed["elapsed"] > 5)
DBI::dbRollback(con)
p <- orderly_commit(id2, root = path)
ids <- DBI::dbGetQuery(con, "SELECT id from report_version")$id
expect_equal(length(ids), 2)
expect_setequal(ids, c(id1, id2))
})
test_that("db includes instance", {
skip_on_cran_windows()
path <- test_prepare_orderly_example("minimal")
p <- file.path(path, "orderly_config.yml")
writeLines(c(
"database:",
" source:",
" driver: RSQLite::SQLite",
" instances:",
" default:",
" dbname: source.sqlite",
" alternative:",
" dbname: alternative.sqlite"),
p)
file.copy(file.path(path, "source.sqlite"),
file.path(path, "alternative.sqlite"))
id1 <- orderly_run("example", root = path, echo = FALSE)
id2 <- orderly_run("example", root = path, echo = FALSE,
instance = "default")
id3 <- orderly_run("example", root = path, echo = FALSE,
instance = "alternative")
orderly_commit(id1, root = path)
orderly_commit(id2, root = path)
orderly_commit(id3, root = path)
con <- orderly_db("destination", root = path)
d <- DBI::dbReadTable(con, "report_version_instance")
DBI::dbDisconnect(con)
expect_equal(d,
data_frame(id = c(1, 2, 3),
report_version = c(id1, id2, id3),
type = rep("source", 3),
instance = c("default", "default", "alternative")))
})
test_that("Can cope when all fields are optional", {
path <- test_prepare_orderly_example("minimal")
append_lines(
c("fields:",
" requester:",
" required: false",
" author:",
" required: false"),
file.path(path, "orderly_config.yml"))
id <- orderly_run("example", root = path, echo = FALSE)
orderly_commit(id, root = path)
db <- orderly_db("destination", root = path)
expect_equal(nrow(DBI::dbReadTable(db, "report_version_custom_fields")), 0)
})
|
31cf127fa5e8b1f67fde896ac02026b706afdb51 | 082ae6acef6cfafb85ccc8c98c67c670ffd934ee | /R/htmlrmd.R | 4dcc0eac0cf092ab76fae30ceb100f1ffe38944d | [
"MIT"
] | permissive | AnirudhHimself/ragtag | 9825b3905e79a14e0b08f1f6051c4f38923dd86f | 6b0eda50ad886860e0bd653f8c516a7d53e101a7 | refs/heads/master | 2023-02-05T20:48:25.921055 | 2020-12-25T21:36:54 | 2020-12-25T21:36:54 | 267,073,259 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,115 | r | htmlrmd.R | #' Personal Rmd Temlate. It only converts to HTML right now.
#'
#' It essentially uses the base rmarkdown html_document function
#' but adds in additional CSS and and a footer template file.
#' It essentially uses the base rmarkdown html_document function,
#' but adds in additional CSS and and a footer template file.
#' I haven't added much to this footer template so it's more of a
#' skeleton for later.
#' @import rmarkdown
#' @param number_sections Should sections be numbered?
#' @param ... additional arguments sent to \@code{html_document}
#' @export
#'
ragtag_html = function(number_sections = FALSE, ...) {
# locations of resource files in the package
pkg_resource = function(...) {
system.file(..., package = "ragtag")
}
css = pkg_resource("rmarkdown/templates/ragtagHtml/resources/styles.css")
# call the base html_document function
rmarkdown::html_document(
toc = TRUE,
toc_float = TRUE,
toc_depth = 2,
toc_collapsed = FALSE,
df_print = "paged",
theme = "lumen",
code_folding = "hide",
css = css,
number_sections = number_sections,
...
)
}
|
72c2f46bd360a65c3a1577209fe74011ecb6e247 | 6ca19435cf789e314304c77546b03c049e1c004a | /Maghav Airline Code.R | 47c32617877b7ecd90d56c5862934689415c3c6b | [] | no_license | maghavgoyal/Airline-pricing-analysis | 2e330cb526dd4f8c97a52846d6938e5f3f7c4ab8 | 12b4d7b283bb92b5beba94aa40f2ea4766b5592f | refs/heads/master | 2020-06-24T09:49:06.128394 | 2017-07-11T19:44:48 | 2017-07-11T19:44:48 | 96,930,304 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,248 | r | Maghav Airline Code.R | #Analysis of Airline Ticket Pricing
#Name: Maghav Goyal
#EMail: maghavgoyal@gmail.com
#College: DTU
#Setting the working directory
setwd("C:/Users/ANIL GOYAL/Desktop/New folder (2)/data sets")
#Reading the data and creating a data frame
air.df<-read.csv(paste("AIRLINE.csv",sep=""),)
#Viewing the data frame
View(air.df)
#Summarizing the data
summary(air.df)
# Visualizing using plots
boxplot(air.df$PRICE_PREMIUM~air.df$AIRLINE)
boxplot(air.df$PRICE_ECONOMY~air.df$AIRLINE)
boxplot(air.df$PRICE_PREMIUM~air.df$MONTH)
boxplot(air.df$PRICE_ECONOMY~air.df$MONTH)
#Visualizing with PRICE_RELATIVE as Y vs Categorical variables
boxplot(air.df$PRICE_RELATIVE~air.df$MONTH)
boxplot(air.df$PRICE_RELATIVE~air.df$INTERNATIONAL) # Huge differnce is observed
boxplot(air.df$PRICE_RELATIVE~air.df$AIRCRAFT)
boxplot(air.df$PRICE_RELATIVE~air.df$QUALITY) # As quality increases, relative price increases
#Converting other variables in categories and then visualizing bivariately
x0<-factor(air.df$FLIGHT_DURATION) #No pattern
boxplot(air.df$PRICE_RELATIVE~x0)
x<-factor(air.df$SEATS_ECONOMY)
boxplot(air.df$PRICE_RELATIVE~x) # For seats >180, the realtive price is mostly good.
x1<-factor(air.df$SEATS_PREMIUM)
boxplot(air.df$PRICE_RELATIVE~x1) #No definite pattern
x3<-factor(air.df$PITCH_ECONOMY)
boxplot(air.df$PRICE_RELATIVE~x3) # as pitch increases, the relative price decreases
x2<-factor(air.df$PITCH_PREMIUM)
boxplot(air.df$PRICE_RELATIVE~x2) # as pitch increases, the relative price increases
boxplot(air.df$PRICE_RELATIVE~x2+x3) # No definite pattern
x4<-factor(air.df$WIDTH_ECONOMY) # No inference
boxplot(air.df$PRICE_RELATIVE~x4)
x5<-factor(air.df$WIDTH_PREMIUM) #Width increases, Relative price increases
boxplot(air.df$PRICE_RELATIVE~x5)
boxplot(air.df$PRICE_RELATIVE~x4+x5) #Increases gradually
x6<-factor(air.df$N) # No definite pattern
boxplot(air.df$PRICE_RELATIVE~x6)
x7<-factor(air.df$LAMBDA) #No definite pattern
boxplot(air.df$PRICE_RELATIVE~x7)
#Scatterplot for non-categorical variables
library(car)
scatterplot(air.df$PRICE_RELATIVE,air.df$FLIGHT_DURATION) # No pattern
scatterplot(air.df$PRICE_RELATIVE,air.df$SEATS_ECONOMY) # As seats increase, the relative price decreases
#Pattern obtained by scatterplot vs (variable) and boxplot vs factor(variable) gives the same inferences
#VISUALIZING VIA GGVIS
library(ggvis)
ggvis(~PRICE_ECONOMY,~PRICE_PREMIUM,fill=~PRICE_RELATIVE,data=air.df)
ggvis(~PRICE_ECONOMY,~PRICE_PREMIUM,fill=~AIRLINE,data=air.df)
ggvis(~PRICE_PREMIUM,~WIDTH_PREMIUM,fill=~PRICE_RELATIVE,data=air.df)
#Interaction between the price quantities
ggvis(~PRICE_ECONOMY,~PRICE_RELATIVE,fill=~PRICE_PREMIUM,data=air.df)
#CORRGRAM
library(corrgram)
corrgram(air.df, order=TRUE, lower.panel=panel.shade,upper.panel=panel.pie, text.panel=panel.txt)
corrgram(air.df, order=NULL, lower.panel=panel.shade,
upper.panel=NULL, text.panel=panel.txt)
#CORRELATIONS
cor(air.df[,-c(1)]) #Removing the aphabetical column
#CHECKING CORRELATIONS FOR PRICE QUANTITIES
y<-air.df[,c(2,3,6,7,8,9,10,11,12,13,14,15,16,17)]
y1<-air.df[,c(12,13,14)]
cor(y,y1)
# Variance-Covariance Matrix
cor(air.df[,-c(1)]) #Removing the aphabetical column
#APPROACH 1
#PRICE_RELATIVE SHOULD BE TAKEN AS Y
fit<-lm(PRICE_RELATIVE~.,data=air.df)
summary(fit)
#HYPOTHESIS
# Mean of the RELATIVE_PRICE should not be equal to zero.
#USING THE GLM COMMAND AND CHECKING AIC VALUE FOR EACH EXCLUSION OF VARIABLE
fit<-glm(PRICE_RELATIVE~.-WIDTH_PREMIUM,data=air.df)
summary(fit) # AIC VALUE DECREASES SO IT CAN BE REMOVED
fit<-glm(PRICE_RELATIVE~.-WIDTH_PREMIUM-PITCH_ECONOMY,data=air.df)
summary(fit) # AIC VALUE REMAINS THE SAME SO IT CANNOT BE REMOVED
fit<-glm(PRICE_RELATIVE~.-WIDTH_PREMIUM-PITCH_PREMIUM,data=air.df)
summary(fit) # AIC VALUE REMAINS THE SAME SO IT CANNOT BE REMOVED
fit<-glm(PRICE_RELATIVE~.-WIDTH_PREMIUM-WIDTH_ECONOMY,data=air.df)
summary(fit) # AIC VALUE INCREASES SO IT CANnOT BE REMOVED
fit<-glm(PRICE_RELATIVE~.-WIDTH_PREMIUM-MONTH,data=air.df)
summary(fit) # AIC VALUE DECREASES SO IT CAN BE REMOVED
fit<-glm(PRICE_RELATIVE~.-WIDTH_PREMIUM-INTERNATIONAL,data=air.df)
summary(fit) # AIC VALUE DECREASES SO IT CAN BE REMOVED
fit<-glm(PRICE_RELATIVE~.-WIDTH_PREMIUM-INTERNATIONAL-MONTH-AIRCRAFT,data=air.df)
summary(fit) # AIC VALUE REMAINS THE SAME AND IT CANNOT BE REMOVED
fit<-glm(PRICE_RELATIVE~.-WIDTH_PREMIUM-INTERNATIONAL-MONTH-SEATS_ECONOMY,data=air.df)
summary(fit) # AIC VALUE REMAINS THE SAME AND IT CANNOT BE REMOVED
fit<-glm(PRICE_RELATIVE~.-WIDTH_PREMIUM-INTERNATIONAL-MONTH-SEATS_PREMIUM,data=air.df)
summary(fit) # AIC VALUE REMAINS THE SAME AND IT CANNOT BE REMOVED
fit<-glm(PRICE_RELATIVE~.-WIDTH_PREMIUM-INTERNATIONAL-MONTH-LAMBDA,data=air.df)
summary(fit) #AIC VALUE INCREASES AND SO CANNOT BE REMOVED
fit<-lm(PRICE_RELATIVE~.-WIDTH_PREMIUM-INTERNATIONAL-MONTH-AIRLINE,data=air.df)
summary(fit) #AIC VALUE INCREASES AND SO IT CANNOT BE REMOVED
fit<-glm(PRICE_RELATIVE~.-WIDTH_PREMIUM-INTERNATIONAL-MONTH-QUALITY,data=air.df)
summary(fit) #AIC VALUE REMAINS THE SAME AND SO CANOT BE REMOVED
#AIC VALUE OBTAINED IS MINIMUM FOR THIS MODEL TILL NOW
fit<-lm(PRICE_RELATIVE~.-INTERNATIONAL-MONTH-WIDTH_PREMIUM,data=air.df)
summary(fit)
#ADDING INTERACTION TERMS
fit<-glm(PRICE_RELATIVE~.-INTERNATIONAL-MONTH-WIDTH_PREMIUM+WIDTH_PREMIUM*WIDTH_ECONOMY,data=air.df)
summary(fit) # AIC INCREASES SO IT SHOULD NOT BE ADDED
fit<-glm(PRICE_RELATIVE~.-INTERNATIONAL-MONTH-WIDTH_PREMIUM+PRICE_PREMIUM*PRICE_ECONOMY,data=air.df)
summary(fit)# AIC DECREASES SO IT SHOULD BE ADDED
#INTUITIVELY,WIDTH_PREMIUM SHOULD NOT BE REMOVED, ALSO BECAUSE CORRELATION IS HIGH
fit<-glm(PRICE_RELATIVE~.-INTERNATIONAL-MONTH+PRICE_PREMIUM*PRICE_ECONOMY,data=air.df)
summary(fit)
#To check the prediction values from this model
predict(fit)
#As the p-value of the model is 2.2e-16 wwhich is much lower than the significant levels of 0.05, so we can safely reject the null hypothesis that the value for the co-efficient is zero.
#APPROACH 2
fit<-lm(PRICE_PREMIUM~.,data=air.df)
summary(fit)
fit<-lm(PRICE_ECONOMY~.,data=air.df)
summary(fit)
#HYPOTHESIS
#The mean of the PRICE_ECONOMY is greater than or equal to the mean of the PRICE_PREMIUM.
#Alternate Hypothesis
#The mean of the PRICE_PREMIUM is higher than the mean of the PRICE_ECONOMY.
t.test(air.df$PRICE_ECONOMY,air.df$PRICE_PREMIUM,alternative = "less")
#Linear regression for Differnce of the prices as Y
Temp<-air.df$PRICE_PREMIUM- air.df$PRICE_ECONOMY
#R-Sqaured value equals to 1 and residual error term reduced to ~Zero
fit<-lm(Temp~.-INTERNATIONAL-MONTH+PRICE_PREMIUM*PRICE_ECONOMY,data=air.df)
summary(fit)
#AIC Value is reduced to -2260
fit<-glm(Temp~.-INTERNATIONAL-MONTH+PRICE_PREMIUM*PRICE_ECONOMY,data=air.df)
summary(fit)
#PREDICTION OF VALUES
predict(fit)
#SUMMARY TELLS US THAT IT IS ~ a-b=a-b type
#HENCE MODEL NOT CONSIDERED
|
dc64bf7e7928b7f5db3c3255abefc04aa67cf4a5 | 72f7da392743b54817109487ec7f0f533829b335 | /man/onco_enrich.Rd | 1f4dab3556e53e4651f20b6f1fc9d8427139fddd | [
"MIT"
] | permissive | shijianasdf/oncoEnrichR | ee4891218a73a4ac2e65e61dfd785371b8e1eedc | bdd913da6dbb6f6d3276e033a059fdae93ea3b7b | refs/heads/master | 2023-08-29T09:17:42.081801 | 2021-10-12T18:36:40 | 2021-10-12T18:36:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 6,445 | rd | onco_enrich.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/onco_enrichr.R
\name{onco_enrich}
\alias{onco_enrich}
\title{Interrogate a gene list for cancer relevance}
\usage{
onco_enrich(
query,
query_id_type = "symbol",
ignore_id_err = TRUE,
html_floating_toc = T,
html_report_theme = "default",
project_title = "Project title",
project_owner = "Project owner",
project_description = "Project description",
bgset = NULL,
bgset_id_type = "symbol",
bgset_description = "All protein-coding genes",
p_value_cutoff_enrichment = 0.05,
p_value_adjustment_method = "BH",
q_value_cutoff_enrichment = 0.2,
min_geneset_size = 10,
max_geneset_size = 500,
min_subcellcomp_confidence = 1,
subcellcomp_show_cytosol = FALSE,
min_confidence_reg_interaction = "D",
simplify_go = TRUE,
ppi_add_nodes = 50,
ppi_score_threshold = 900,
show_ppi = TRUE,
show_drugs_in_ppi = TRUE,
show_disease = TRUE,
show_top_diseases_only = TRUE,
show_cancer_hallmarks = TRUE,
show_drug = TRUE,
show_enrichment = TRUE,
show_tcga_aberration = TRUE,
show_tcga_coexpression = TRUE,
show_cell_tissue = FALSE,
show_ligand_receptor = TRUE,
show_regulatory_interactions = TRUE,
show_unknown_function = TRUE,
show_prognostic_cancer_assoc = TRUE,
show_subcell_comp = TRUE,
show_crispr_lof = TRUE,
show_complex = TRUE,
...
)
}
\arguments{
\item{query}{character vector with gene/query identifiers}
\item{query_id_type}{character indicating source of query (one of "uniprot_acc", "symbol",
"entrezgene", or "ensembl_gene","ensembl_mrna","refseq_mrna","ensembl_protein","refseq_protein")}
\item{ignore_id_err}{logical indicating if analysis should continue when uknown query identifiers are encountered}
\item{html_floating_toc}{logical - float the table of contents to the left of the main document content (HTML report). The floating table of contents will always be visible even when the document is scrolled}
\item{html_report_theme}{Bootswatch theme for HTML report (any of "bootstrap","cerulean","cosmo","default","flatly","journal","lumen","paper","sandstone","simplex","spacelab","united","yeti")}
\item{project_title}{project title (title of report)}
\item{project_owner}{name of project owner}
\item{project_description}{project background information}
\item{bgset}{character vector with gene identifiers, used as reference/background for enrichment/over-representation analysis}
\item{bgset_id_type}{character indicating source of background ("uniprot_acc","symbol","entrezgene","ensembl_gene_id")}
\item{bgset_description}{character indicating type of background (e.g. "All lipid-binding proteins (n = 200)")}
\item{p_value_cutoff_enrichment}{cutoff p-value for enrichment/over-representation analysis}
\item{p_value_adjustment_method}{one of "holm", "hochberg", "hommel", "bonferroni", "BH", "BY", "fdr", "none"}
\item{q_value_cutoff_enrichment}{cutoff q-value for enrichment analysis}
\item{min_geneset_size}{minimal size of geneset annotated by term for testing in enrichment/over-representation analysis}
\item{max_geneset_size}{maximal size of geneset annotated by term for testing in enrichment/over-representation analysis}
\item{min_subcellcomp_confidence}{minimum confidence level of subcellular compartment annotations (range from 1 to 6, 6 is strongest)}
\item{subcellcomp_show_cytosol}{logical indicating if subcellular heatmap should show highlight proteins located in the cytosol or not}
\item{min_confidence_reg_interaction}{minimum confidence level for regulatory interactions (TF-target) retrieved from DoRothEA ('A','B','C', or 'D')}
\item{simplify_go}{remove highly similar GO terms in results from GO enrichment/over-representation analysis}
\item{ppi_add_nodes}{number of nodes to add to target set when computing the protein-protein interaction network (STRING)}
\item{ppi_score_threshold}{minimum score (0-1000) for retrieval of protein-protein interactions (STRING)}
\item{show_ppi}{logical indicating if report should contain protein-protein interaction data (STRING)}
\item{show_drugs_in_ppi}{logical indicating if targeted drugs (> phase 3) should be displayed in protein-protein interaction network (Open Targets Platform)}
\item{show_disease}{logical indicating if report should contain disease associations (Open Targets Platform, association_score >= 0.4)}
\item{show_top_diseases_only}{logical indicating if report should contain top (15) disease associations only (Open Targets Platform)}
\item{show_cancer_hallmarks}{logical indicating if report should contain annotations/evidence of cancer hallmarks per query gene (COSMIC/Open Targets Platform)}
\item{show_enrichment}{logical indicating if report should contain functional enrichment/over-representation analysis (MSigDB, GO, KEGG, REACTOME, NetPath, WikiPathways)}
\item{show_tcga_aberration}{logical indicating if report should contain TCGA aberration plots (amplifications/deletions)}
\item{show_tcga_coexpression}{logical indicating if report should contain TCGA co-expression data (RNAseq) of query set with oncogenes/tumor suppressor genes}
\item{show_cell_tissue}{logical indicating if report should contain tissue-specificity and single cell-type specificity assessments (Human Protein Atlas)
of target genes, using data from the Human Protein Atlas}
\item{show_ligand_receptor}{logical indicating if report should contain ligand-receptor interactions (CellChatDB)}
\item{show_regulatory_interactions}{logical indicating if report should contain data on transcription factor (TF) - target interactions relevant for the query set (DoRothEA)}
\item{show_unknown_function}{logical indicating if report should highlight target genes with unknown or poorly defined functions (GO/Uniprot KB/NCBI)}
\item{show_prognostic_cancer_assoc}{logical indicating if mRNA-based (single-gene) prognostic associations to cancer types should be listed (Human Protein Atlas/TCGA)}
\item{show_subcell_comp}{logical indicating if report should provide subcellular compartment annotations (ComPPI)}
\item{show_crispr_lof}{logical indicating if report should provide fitness scores and target priority scores from CRISPR/Cas9 loss-of-fitness screens (Project Score)}
\item{show_complex}{logical indicating if report should provide target memberships in known protein complexes (ComplexPortal/Compleat/PDB/CORUM)}
}
\description{
Interrogate a gene list for cancer relevance
}
|
7c1f74c0c75b1721c4e38a5e66725aa8a07330d2 | 514de18f0822291a0a57d41264d5e0e33fc44378 | /RCode/work1119.r | 3801eecc74c20baae7b90b303d252b80c3a63bd6 | [] | no_license | eburas77/BikeshareWork | 1b2a05b8454d497e2f69e993de9d06e35063bc7c | df275536a3f8ff2d87ad9719d1caeb5e0854b863 | refs/heads/master | 2021-01-10T12:29:58.770835 | 2015-11-10T22:35:28 | 2015-11-10T22:35:28 | 45,944,631 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,290 | r | work1119.r | # Aggregate over all combinations of day / subscription type / hour / start station.
# Use the df Q2 from work1111.Rdata to do this.
MyDF = aggregate(date ~ day*subscription.type*hour*start.terminal, data = Q2, FUN = length)
names(MyDF)[5] <- "count" # Since the argument "date" is arbitrary and this column really conatins a count
MyDF$hour <- as.numeric(MyDF$hour)
table(MyDF$day, MyDF$subscription.type)
############ Multiple Clustering and Plotting
# Import xml data and turn into data frame
# Need to install package XML first
library("XML")
mystations <- xmlToDataFrame(doc = "http://www.capitalbikeshare.com/data/stations/bikeStations.xml")
# Convert latitudes and longitudes into numbers
mystations$long <- as.numeric(as.character(mystations$long))
mystations$lat <- as.numeric(as.character(mystations$lat))
# Make data frame of counts per combination of hour and station
# Set number of clusters
nclust = 4
######### Prepare data frame MyDF
# >>>>>> Can use subsets of Q2 to modify this
# >>>>>> For example, delete rides that start or end at statiosn with low overall activity
# >>>>>> Or use only rides on weekdays or weekends or only by subscribers etc.
MyDF = aggregate(date ~ hour*start.terminal, data = Q2, FUN = length)
names(MyDF)[3] <- "count" # Since the argument "date" is arbitrary and this column really conatins a count
MyDF$hour <- as.numeric(MyDF$hour)
# Make a data frame for cluster memberships.
# One column contains terminal numbers,
# the other nclust columns contain membership probabilities.
# Also make names for the columns.
clustermem <- aggregate(count ~ start.terminal, data = MyDF, FUN = length)
clustermem[,3:(2+nclust)] <- 0
cnames <- c()
for (j in 1:nclust){
cnames <- c(cnames, paste("c",j,sep =""))
}
clustermem <- clustermem[,-2] # removes extraneous column 2
names(clustermem) <- c("start.terminal",cnames)
# Make a data frame for Poisson probabilities.
# One column contains hours (0 .. 23), the others lambdas (one for each cluster).
lambdas <- aggregate(count ~ hour, data = MyDF, FUN = length)
lambdas[,3:(2+nclust)] <- 0
lambdas <- lambdas[,-2]
names(lambdas) <- c("hour",cnames)
# The following code makes multiple cluster plots
# Clusters are plotted in black - red - green - blue
# depending on oveall activity level
plotcount = 20 # set the number of plots to be made
plottitle = "bikeplot.pdf" # change as needed
pdf(file = plottitle)
for (k in 1:plotcount){
mypi <- rep(1,nclust)/nclust
lambdas[,2:(1+nclust)] <- 50*runif(nclust*24)
########### Now run EM, 20 iterations
for (j in 1:20){
######### E Step
for (station in clustermem$start.terminal){
MyDF0 <- MyDF[MyDF$start.terminal == station,] # Extract all counts for this station (24 rows)
MyDF0 <- MyDF0[order(MyDF0$hour), ] # Order by hour
result <- clusterprob(MyDF0$count,as.matrix(lambdas[,2:(1+nclust)]), mypi)
clustermem[clustermem$start.terminal == station, 2:(1+nclust)] <- result$cprobs
}
########## M Step
mypi = colSums(as.matrix(clustermem[,2:(1+nclust)]))
mypi <- mypi/sum(mypi)
MyDFaux <- MyDF
MyDFaux <- merge(MyDFaux, clustermem, by = "start.terminal")
for (k in 4:(3+nclust)){
MyDFaux[,k] <- MyDFaux[,k]*MyDFaux$count
}
for (k in 1:nclust){
lam1 <- aggregate(eval(parse(text=cnames[k])) ~ hour, data = MyDFaux, FUN = sum)
lambdas[,k+1] <- lam1[,2]/sum(clustermem[,k+1])
}
####### End of M Step
}
# Summarize cluster membership
clustersummary <- clustermem
clustersummary[,2:(1+nclust)] <- round(clustersummary[,2:(1+nclust)])
clustersummary$cluster <- NA
for (j in 1:nclust){
select <- clustersummary[,j+1] == 1
clustersummary$cluster[select] <- names(clustersummary)[j+1]
}
for (j in 1:nclust){
clustersummary[,2] <- NULL
}
# Now clustersummary has two columns, one with terminal numbers, one with cluster membership
# Make data frame with only terminal numbers, longitudes, and latitudes
plotstations <- data.frame(start.terminal = mystations$terminalName, long = mystations$long, lat = mystations$lat)
# Make sure terminal numbers are no longer factors
plotstations$start.terminal <- as.numeric(as.character(plotstations$start.terminal))
clustersummary$start.terminal <- as.numeric(as.character(clustersummary$start.terminal))
# Merge
clustersummary <- merge(clustersummary, plotstations, by = "start.terminal")
# Now cluster summary has terminal number, latitude, longitude, and cluster info in its columns
# Plot, using a different color for each cluster, depending on overall activity = mean of lambdas
plotcolors = (1+nclust) - rank(colMeans(lambdas[,2:(1+nclust)]))
plot(clustersummary$long, clustersummary$lat, xlab = "Longitude",ylab = "Latitude", pch = 46,
main = paste("Clusters of DC Bike Stations") )
for (j in 1:nclust){
select <- clustersummary$cluster == cnames[j]
points(clustersummary$long[select], clustersummary$lat[select], lwd = 2,col = plotcolors[j])
}
grid(col = 1)
}
dev.off()
|
c9f047a73377792997578bba4ca17cb02dc50abb | 05c855aedb2f08484d6b76ebbd1b50b5212ef8c6 | /lib/predict_score1.R | 5d466db88a8b377dbb8f75f6652deb0eaba9d0a0 | [] | no_license | Wanting-Cui/Collaborative-Filtering | 80e12b3ca5a9bcb59b52c99f263cca13a5e2496b | 01eebd86fed06fcc9c537e1348f3b0ce0f9f76d2 | refs/heads/master | 2020-04-14T15:30:18.980070 | 2019-01-03T05:53:56 | 2019-01-03T05:53:56 | 163,928,996 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,076 | r | predict_score1.R | predict.score.movie <- function(train, test, weight,
par = list(threshold = 0.3,n = 10),
run.threshold = FALSE,
run.bestn = FALSE){
rownames(weight) <- rownames(train)
avg <- rowMeans(train, na.rm = T)
train_c <- train - avg
train_c[is.na(train_c)] <- 0
item <- colnames(test)
ind2 <- match(item, colnames(train))
mat <- matrix(0, ncol = ncol(test), nrow = nrow(test))
for (a in 1:nrow(test)){
nei <- select_neighbor(userid = rownames(test)[a], weight_mat = weight,
para = list(threshold = threshold,n = n),
run.bestn = run.bestn, run.threshold = run.threshold)
if (sum(is.na(nei)) != 0 || length(nei) == 0) {
mat[a, ] <- rep(0,ncol(test))
next
}
ind <- match(nei, rownames(weight))
w <- weight[a, ind]
k <- sum(w)
v <- data.matrix(train_c[ind, ind2])
mat[a, ] <- (1/k)*(w %*% v)
}
mat_final <- (avg[1:nrow(test)]) %*% t(rep(1, ncol(test))) + mat
return(mat_final)
}
|
e4448372e0f52b4cae8566a9ea2b733510f48b8f | 74e6fe0e281e7e9bf0a4b62baad95edbe29f8f46 | /man/mycltu.Rd | d5f54bb23aa91a6244f1745d0743fef09526f1c6 | [] | no_license | Jackschwarz58/OU-Math4753-Fall | 6f7e4885ac1f67c42c9d4121f43d0e0d412e6c89 | 35710af1c06e9744e129d89685cc2229016b86bc | refs/heads/master | 2023-01-13T04:00:34.169729 | 2020-11-19T01:07:09 | 2020-11-19T01:07:09 | 314,062,465 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,022 | rd | mycltu.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mycltu.R
\name{mycltu}
\alias{mycltu}
\title{mycltu(n, iter, a, b)}
\usage{
mycltu(n, iter, a, b)
}
\arguments{
\item{n}{The desired sample size}
\item{iter}{The number of iterations to run to compute the random sample}
\item{a}{The a value used to calculate the mean and variance for the theoretical curve}
\item{b}{The b value used to calculate the mean and variance for the theoretical curve}
}
\value{
A histogram with the sample mean, curve made from the sample distribution computed, and a
theoretical normal curve.
}
\description{
Uniform Central Limit Theorem Function.
}
\details{
This function takes in various parameters to compute the Central Limit Theorem based on a computed
uniform random sample. This information is then overlaid graphically on a histogram, along with the sampling
distribution that is then returned out of the function.
}
\examples{
mycltu(3, 10000, 0, 10)
mycltu(5, 10000, 0, 10)
mycltu(1, 10000, 0, 10)
}
|
ad1371c513b3a08c4b3f85a293aced7470f6a0bd | eaf6ef22efe32208656c29483c9f504ce08c643d | /R/get.obj.R | 725661af3ce79d5adb1bfc2019c3c6d653e2f700 | [] | no_license | leekgroup/Set | 6d1fdbbbc5d9fe2dfb3d829458702feb26ef9496 | 3e0ab08aa5c1c41f8256f9bacb883a5afdfe939f | refs/heads/master | 2021-01-20T04:33:17.659425 | 2015-04-28T00:13:49 | 2015-04-28T00:13:49 | 4,983,637 | 1 | 1 | null | 2015-04-28T00:13:51 | 2012-07-11T02:51:08 | R | UTF-8 | R | false | false | 183 | r | get.obj.R | get.obj <-
function(t, k, n, q)
{
res <- (1-k) * sum((n-q)[t==1])
res1 <- res / sum(n[t==1])
res <- k * sum(q[t==0])
res2 <- res / sum(n[t==0])
res1 + res2
}
|
6a8aef5cddc26f4af5d3ff334d23448bcd66ab1b | 2e5bcb3c8028ea4bd4735c4856fef7d6e46b5a89 | /R/SnpPlm.R | cc65a64836528e0ffe765af64c8cb09b0de04502 | [] | no_license | HenrikBengtsson/aroma.affymetrix | a185d1ef3fb2d9ee233845c0ae04736542bb277d | b6bf76f3bb49474428d0bf5b627f5a17101fd2ed | refs/heads/master | 2023-04-09T13:18:19.693935 | 2022-07-18T10:52:06 | 2022-07-18T10:52:06 | 20,847,056 | 9 | 4 | null | 2018-04-06T22:26:33 | 2014-06-15T03:10:59 | R | UTF-8 | R | false | false | 2,590 | r | SnpPlm.R | ###########################################################################/**
# @RdocClass SnpPlm
#
# @title "The SnpPlm interface class"
#
# \description{
# @classhierarchy
#
# An @see "R.oo::Interface" implementing methods special for
# @see "ProbeLevelModel"s specific to SNP arrays.
# }
#
# @synopsis
#
# \arguments{
# \item{...}{Not used.}
# }
#
# \section{Methods}{
# @allmethods "public"
# }
#
# \section{Requirements}{
# Classes inheriting from this @see "R.oo::Interface" must provide the
# following fields:
# \itemize{
# \item{mergeStrands}{A @logical value indicating if strands should be
# merged or not.}
# }
# }
#
# @examples "../incl/SnpPlm.Rex"
#
# @author "HB"
#*/###########################################################################
setConstructorS3("SnpPlm", function(...) {
extend(Interface(), "SnpPlm")
})
setMethodS3("getParameters", "SnpPlm", function(this, ...) {
params <- NextMethod("getParameters")
params$mergeStrands <- this$mergeStrands
params
}, protected=TRUE)
setMethodS3("getCellIndices", "SnpPlm", function(this, ..., verbose=FALSE) {
requireNamespace("affxparser") || throw("Package not loaded: affxparser")
cdfMergeStrands <- affxparser::cdfMergeStrands
# Argument 'verbose':
verbose <- Arguments$getVerbose(verbose)
verbose && enter(verbose, "Identifying cell indices for a SnpPlm")
cells <- NextMethod("getCellIndices", verbose=verbose)
# Merge strands?
if (this$mergeStrands) {
verbose && enter(verbose, "Merging strands")
cells <- .applyCdfGroups(cells, cdfMergeStrands)
verbose && exit(verbose)
}
verbose && exit(verbose)
cells
})
setMethodS3("getChipEffectSetClass", "SnpPlm", function(this, ...) {
SnpChipEffectSet
}, private=TRUE)
setMethodS3("getChipEffectSet", "SnpPlm", function(this, ...) {
ces <- NextMethod("getChipEffectSet")
setMergeStrands(ces, this$mergeStrands)
ces
})
setMethodS3("getProbeAffinityFile", "SnpPlm", function(this, ..., .class=SnpProbeAffinityFile) {
paf <- NextMethod("getProbeAffinityFile", .class=.class)
setMergeStrands(paf, this$mergeStrands)
paf
})
setMethodS3("getMergeStrands", "SnpPlm", function(this, ...) {
this$mergeStrands
})
setMethodS3("setMergeStrands", "SnpPlm", function(this, status, ...) {
# Argument 'status':
status <- Arguments$getLogical(status)
oldStatus <- getCombineAlleles(this)
ces <- getChipEffectSet(this)
setMergeStrands(ces, status, ...)
paf <- getProbeAffinityFile(this)
setMergeStrands(paf, status, ...)
this$mergeStrands <- status
invisible(oldStatus)
})
|
152651c3258abfc60e4ae7a49cb85dc05dae283a | b6aba9b09f3fd4672c17c0311f5ee3eafdb29968 | /Codes/DE_Profiling/run_DESeq.R | 92f9bb39387d997129d72e599a6aa836425013b7 | [] | no_license | tambonis/GA_RNA_Seq | fc31d0e0674b334adfdf727555149aa2a42b7585 | 4ddb2e45e5d37f6e4437c1f3234b05346f0fc03b | refs/heads/master | 2022-03-01T13:03:41.041835 | 2019-11-05T13:23:45 | 2019-11-05T13:23:45 | 105,914,262 | 1 | 0 | null | 2017-10-09T12:05:04 | 2017-10-05T16:34:12 | null | UTF-8 | R | false | false | 726 | r | run_DESeq.R | runDESeq <- function(count.dat, conditions){
if(! packageDescription("DESeq")$Version == "1.22.1"){
stop("Wrong version of DESeq package. This script requires DESeq-1.22.1")
}
require("DESeq")
kSamples <- colnames(count.dat)
targets <- data.frame(kSamples, Factor=conditions)
## Initialize new DESeq object
cds <- newCountDataSet(count.dat, targets$Factor)
## estimate size factor
cds <- estimateSizeFactors(cds)
## estimate dispersion parameters (as described in paper)
cds <- estimateDispersions(cds, method= "per-condition", fitType='local')
## differential expression
res <- nbinomTest(cds, "condA", "condB")
return(list(cds=cds, de=res))
}
|
4d102e14eec715d6b05c21312771660a04b7c120 | 0c1dfbb49e4055c2fb0223461cd0501463d7fdcf | /DumbellChart.R | c6f7c3be6f0a5dc3d10b0caca59c13cfe7fa63ad | [
"Apache-2.0"
] | permissive | amrrs/Unique-Plots-in-R | 2f077377828d2ef8a6c2344ffa90a3c84f5af6c5 | b9dc487cdd3a0f4c4479c08c5ef905d46407bbad | refs/heads/master | 2021-06-18T18:10:10.683619 | 2017-07-13T15:56:16 | 2017-07-13T15:56:16 | 109,792,845 | 0 | 1 | null | 2017-11-07T05:54:24 | 2017-11-07T05:54:24 | null | UTF-8 | R | false | false | 1,332 | r | DumbellChart.R | #Dumbell Chart
#Dumbell plots are also a great Tool to vislaualize the Relative Positions between the 2 points
#like Growth and Decline
require(ggplot2)
devtools::install_github("hrbrmstr/ggalt")
require(ggalt)
theme_set(theme_classic())
#Loading the Dataset
health<-read.csv("https://raw.githubusercontent.com/anishsingh20/datasets/master/health.csv")
health$Area <- factor(health$Area, levels=as.character(health$Area))
# ordering of the dumbells
#Making the Plot
plot<-ggplot(aes(x=pct_2013*100,xend=pct_2014*100,y=Area , group = Area),data = health) +
geom_dumbbell(colour_x="#2D54ED",
size_x=2.5,
color="#8299F3",
size_xend = 2.5,
colour_xend="#48D5EA",
size=1.5) +
labs(x="Percentage",
y="City",
title="Dumbell Plot for Positional Changes between 2 points",
subtitle="Percent Change: 2013 vs 2014") +
theme(plot.title = element_text(hjust=0.5, face="bold"),
plot.background=element_rect(fill="#f7f7f7"),
panel.background=element_rect(fill="#f7f7f7"),
panel.grid.minor=element_blank(),
panel.grid.major.y=element_blank(),
panel.grid.major.x=element_line(),
axis.ticks=element_blank(),
legend.position="top",
panel.border=element_blank())
|
f6f49e3da85c2cd565d0612e77925eefda8c61b0 | b728b6f9a8cdb0ef468caffaa36b8d3c7e11ea08 | /files/plot2.R | 69c54f5153da3f3a8f781a7e2a093a5082c4d94c | [] | no_license | soparkar/ExData_Plotting1 | 5f5cd763c6b7bcb6960b82824923f238c69af046 | 7a1287e6790d7873b93df8c8f725b207fa6f5a54 | refs/heads/master | 2021-01-24T23:35:56.299977 | 2014-11-08T06:42:23 | 2014-11-08T06:42:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 160 | r | plot2.R | # initialize using clean_dat.R file
source("clean_data.R")
# plot the variables
plot(datetime, power, type="l", xlab="", ylab="Global Active Power (kilowatts)") |
a052134a922304fad5df0cd3959977e2568e4563 | b5e65100e2c9eb3efd6bc49ca9bb309411fb908d | /ui.R | f6a7f5a05db83df3e1ef397f4820a4a00783f2fa | [] | no_license | wangrenfeng0/wangrenfeng0.github.io | e0f15c9696898fd4fb1a08d4763bc83b14508637 | 66adeb34a32a7f918cbb9f7f8821b6c3e687f786 | refs/heads/main | 2023-01-06T17:10:43.333325 | 2020-11-07T20:46:39 | 2020-11-07T20:46:39 | 309,583,337 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,147 | r | ui.R | library(shiny)
library(tidyr)
library(ggplot2)
library(usmap)
library(magrittr)
library(dplyr)
ui <- fluidPage(
titlePanel("Beer Data"),
sidebarLayout(
sidebarPanel(
selectInput("select", label = h3("ABV OR IBU"),
choices = c("ABV", "IBU"),
selected = 'ABV'),
selectInput('states',label =h3('States Selection'),
choices=c('ALL',read.csv('Breweries.csv',header=T)$State),
selected='All'),
radioButtons('plotType', label=h3('Histogram or Boxplot'),
choices=c('Histogram','Boxplot'),
selected='Histogram'),
checkboxInput('showLM','Show regression Line',value=T),
hr(),
fluidRow(column(3, verbatimTextOutput("value")))
),
mainPanel(
plotOutput(outputId = "distPlot1"),
plotOutput(outputId = 'distPlot2'),
plotOutput(outputId = 'distPlot3'),
plotOutput(outputId = 'distPlot4')
)
)
)
#shinyApp(ui, server) |
697b9b4494222521a2e9c1705b0335930a6c5682 | 38373485330e50b09d27ea265ee0535b368f0579 | /code/grouping/grouping.R | 483325e2767b8182ce989d61891d64d0184a337d | [] | no_license | s81320/vis | 5300e346349acd568cd7ff4ad06751960aeb42b8 | b96755388ebdbd50c42d145e9e6fc26b2c1c45c4 | refs/heads/master | 2022-11-18T03:34:05.794807 | 2020-07-21T17:25:05 | 2020-07-21T17:25:05 | 270,222,860 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,245 | r | grouping.R | ## work with correlation based igraph and corrplot
## to find groups in the field positions and skills
## our-data.RDS is the data set I guess we all work with
# library
install.packages("igraph")
library(igraph)
setwd("~/Desktop/grouping-R")
soc.data <- readRDS("our-data.RDS")
#######
### field positions
### grouping on the basis of (almost) perfect correlation
### should show 10 groups.
names(soc.data[,c(20:45)])
mat<-cor(soc.data[,c(20:45)])
mat[mat<0.999] <- 0
network <- graph_from_adjacency_matrix( mat, weighted=T, mode="undirected", diag=F)
plot(network)
#### for less groups, this should show 4 groups.
names(soc.data[,c(20:45)])
mat<-cor(soc.data[,c(20:45)])
mat[mat<0.95] <- 0
network <- graph_from_adjacency_matrix( mat, weighted=T, mode="undirected", diag=F)
plot(network)
#######
### plots the correlation matrix
# install.packages("corrplot")
library(corrplot)
mat<-cor(soc.data[,c(20:45)])
corrplot(mat, method="circle", order="FPC", tl.col="black")
### how to interprete this? At least 3 groups ... maybe more
#######
### skills
names(soc.data)
names(soc.data[,c(46:79)])
mat<-cor(soc.data[,c(46:79)])
mat[mat<0.75] <- 0
network <- graph_from_adjacency_matrix( mat, weighted=T, mode="undirected", diag=F)
plot(network)
|
ec3f1661679fffbd319be30296de107a99819e99 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/SHELF/examples/SHELF-package.Rd.R | 2f3db905d0b2739cb64aeb9396f68a54e413bb43 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,697 | r | SHELF-package.Rd.R | library(SHELF)
### Name: SHELF-package
### Title: Tools to Support the Sheffield Elicitation Framework
### Aliases: SHELF-package SHELF
### ** Examples
## Not run:
##D ## 1) Elicit judgements from two experts individually
##D # Expert A states P(X<30)=0.25, P(X<40)=0.5, P(X<50)=0.75
##D # Expert B states P(X<20)=0.25, P(X<25)=0.5, P(X<35)=0.75
##D # Both experts state 0<X<100.
##D
##D ## 2) Fit distributions to each expert's judgements
##D v <- matrix(c(30, 40, 50, 20, 25, 35), 3, 2)
##D p <- c(0.25, 0.5, 0.75)
##D myfit <- fitdist(vals = v, probs = p, lower = 0, upper = 100)
##D
##D ## 3) Plot the fitted distributions, including a linear pool
##D plotfit(myfit, lp = T)
##D
##D ## 4) Now elicit a single 'consensus' distribution from the two experts
##D # Suppose they agree P(X<25)=0.25, P(X<30)=0.5, P(X<40)=0.75
##D v <-c(25, 30, 40)
##D p <-c(0.25, 0.5, 0.75)
##D myfit <- fitdist(vals = v, probs = p, lower = 0, upper = 100)
##D
##D ## 5) Plot the fitted density, and report some feedback, such as the
##D # fitted 5th and 95th percentiles
##D plotfit(myfit, ql = 0.05, qu = 0.95)
##D feedback(myfit, quantiles = c(0.05, 0.95))
##D
##D ## Can also use interactive plotting
##D v <- matrix(c(30, 40, 50, 20, 25, 35), 3, 2)
##D p <- c(0.25, 0.5, 0.75)
##D myfit <- fitdist(vals = v, probs = p, lower = 0, upper = 100)
##D # plot each distribution
##D plotfit(myfit, int = TRUE)
##D
##D ## plot the distribution for one expert only
##D plotfit(myfit, int = TRUE, ex = 1)
##D
##D ## Enter judgements in interactive mode
##D elicit()
##D
##D ## Enter judgements using the roulette method
##D roulette(lower = 0, upper = 100, nbins = 10, gridheight = 10)
## End(Not run)
|
f92588a2782c5423e89ff48a6df9ecce5b40ce5d | 9bd311ddca0e60ee4007a806c19e5e764c2c8558 | /scripts/segmentDynamics.R | 62657a81863066c9be082a69d5e92a22f17302b9 | [] | no_license | raim/segmenTools | 8b34fc62c4e10d2ffa4423cc972367b9def2c689 | 7d356916b09a0cc019baf152de5dbf778130c0a1 | refs/heads/master | 2023-08-18T15:50:03.200348 | 2023-08-09T14:27:16 | 2023-08-09T14:27:16 | 77,826,031 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 33,502 | r | segmentDynamics.R | #!/usr/bin/env Rscript
## COLLECTING SEGMENT READ DISTRIBUTIONS AND CLUSTERING OF SEGMENT TIME SERIES
## load segments and time-series, incl. phase/pval
## * analyze phase-dist of adjacent segments, tandem
## select significantly different segments
## * rm all coding segments and calculate overlap, Jaccard measure and
## p-vals for remaining segments vs. CUT/SUT/XUT
## * rm all CUT/SUT/XUT segments and analyze additional
## * compare to ARS
## * compare to introns
## * analyze genes non-consistent with clustering: oscillating n/r, vs.
## wrong or none-oscillating major cluster genes; C vs. D in last cycle
## nicer timestamp
time <- function() format(Sys.time(), "%Y%m%d %H:%M:%S")
## segment utils
library("segmenTier") # for processTimeseries - TODO : unify coor2index!
library(segmenTools)
#segtools <- "~/programs/segmenTools/"
#source(file.path(segtools,"R/segmenTools.R")) # for segment analysis
#source(file.path(segtools,"R/coor2index.R")) # coor2index
#suppressPackageStartupMessages(library("stringr")) # for 0-padded filenames
suppressPackageStartupMessages(library(optparse))
### OPTIONS
option_list <- list(
make_option(c("-i", "--infile"), type="character", default="",
help="chromosome coordinates of primary segments as produced by clusterSegments.R but without header ('allsegs.csv')"),
make_option(c("--chrfile"), type="character", default="",
help="chromosome index file, providing a sorted list of chromosomes and their lengths in column 3 [default %default]"),
##chrfile = $YEASTDAT/chromosomes/sequenceIndex_R64-1-1_20110208.csv
make_option(c("--datafile"), type="character", default="",
help="full data set, RData that contains the time series in matrix 'ts' and its genomic coordinates in matrix 'coor' [default %default]"),
## SEGMENT SETTINGS
make_option(c("--fuse.segs"), action="store_true", default=FALSE,
help="use FUSE tag from clusterSegments to fuse adjacent segments"),
make_option(c("--typecol"), type="character", default="type",
help="name of the column with segment types"),
make_option(c("--stypes"), type="character", default="",
help="sub-set of segments in column 'type', option --typecol, use --typecol ALL and --stypes ALL to avoid splitting into types (unless `ALL' is an actual colum name)"),
make_option(c("--idcol"), type="character", default="ID",
help="name of the column with unique segment names"),
## OSCILLATION SETTINGS
make_option("--pval.thresh", default=1,
help="phases above this thresh will be set to NA [default %default]"),
make_option("--phase.weight", action="store_true", default=FALSE,
help="use weight for calculating average phases of segments: 1-p.value [default %default]"),
make_option("--pval.thresh.sig", default=1,
help="pvals above will be counted as non-significant; optionally used in segment averaging (option --filter.reads), and segment filtering before clustering (options --with.rain or --with.permutation, and --cl.filter) [default %default]"),
make_option("--read.rng", type="character", default="",
help="range of time-points for total read-count, Fourier and rain calculations (but not used for clustering!), comma-separated list of integers"),
make_option("--period", default=24,
help="period for `rain' oscillation stats [default %default]"),
make_option("--deltat", default=2,
help="sampling interval for `rain' oscillation stats [default %default]"),
## SEGMENT AVERAGING
make_option(c("--endcut"), type="integer", default=0,
help="fraction at segment ends that will not be considered for average time series"),
make_option("--avgk", type="integer", default=0,
help="integer width of running median smoothing window, using stats::runmed; must be odd"),
make_option(c("--mean.ratio"), action="store_true", default=FALSE,
help="take mean ratio of each read time-series before calculating segment average"),
make_option(c("--filter.reads"), action="store_true", default=FALSE,
help="use only reads with oscillation p.value < pval.thresh.sig for segment average caculation"),
## SEGMENT TIME-SERIES PROCESSING
make_option(c("--trafo"), type="character", default="raw",
help="time-series transformation function, R base functions like 'log', and 'ash' for asinh is available [default %default]"),
make_option(c("--use.snr"), action="store_true", default=FALSE,
help="do SNR scaling of amplitudes [default %default]"),
make_option(c("--dc.trafo"), type="character", default="raw",
help="DC component transformation function, see --trafo [default %default]"),
make_option("--perm", type="integer", default=0,
help="number of permutations used to calculate p-values for all DFT components"),
make_option("--smooth.time", type="integer", default=1, # best for clustering was 3
help="span of the moving average for smoothing of individual read time-series"),
make_option(c("--dePCR"), action="store_true", default=FALSE,
help="simple PCR amplification model to calculate back from read-cont to original molecule number, assuming minimal signal ~ 1 molecule"),
make_option(c("--countfile"), type="character", default="raw",
help="file providing the total read-count per time point which had been used to normalize read-counts to RKPM"),
## SEGMENT CLUSTERING
make_option(c("--missing"), action="store_true", default=FALSE,
help="only calculate missing clusterings; useful if SGE jobs were not successful, to only calculate the missing"),
make_option(c("--dft.range"), type="character", default="2,3,4,5,6,7",
help="DFT components to use for clustering, comma-separated [default %default]"),
make_option(c("--cl.filter"), type="character", default="unsig.p",
help="type of segment filter for clustering [default %default]"),
make_option(c("--with.rain"), type="character", default="",
help="path for prior calculation of `rain' p-values for segment filtering, p < pval.thresh.sig [default %default]"),
make_option(c("--with.permutation"), type="character", default="",
help="path for prior calculation of `permutation' p-values for segment filtering, p < pval.thresh.sig [default %default]"),
## make_option("--smooth.time.plot", type="integer", default=3, # so far best!
## help="as smooth.time but only for plotting clusters not used of analysis"),
## FLOWCLUST PARAMETERS
make_option("--ncpu", type="integer", default=1,
help="number of available cores for flowClust"),
## make_option("--seed", type="integer", default=1,
## help="seed for the random number generator before calling flowclust to get stable clustering results (TODO: set.seed, does it work for flowclust?)"),
make_option(c("--K"), type="character", default="12:20",
help="number of clusters to use in flowClust, comma-separated list of integers and colon-separated ranges [default %default]"),
make_option(c("--fixedK"), type="integer", default=0,
help="fixed number of clusters to select in flowClust, flowMerge will start from there [default %default]"),
make_option("--B", type="integer", default=500,
help="maximal number of EM iterations of flowClust"),
make_option("--tol", default=1e-5,
help="tolerance for EM convergence in flowClust"),
make_option("--lambda", default=1,
help="intial Box-Cox transformation parameter in flowClust"),
make_option("--nu", default=4,
help="degrees of freedom used for the t distribution in flowClust; Inf for pure Gaussian"),
make_option("--nu.est", type="integer", default=0,
help="A numeric indicating whether ‘nu’ is to be estimated or not; 0: no, 1: non-specific, 2: cluster-specific estimation of nu"),
make_option("--trans", type="integer", default=1,
help="A numeric indicating whether the Box-Cox transformation
parameter is estimated from the data; 0: no, 1: non-specific, 2: cluster-specific estim. of lambda"), ## TODO: try 2
make_option("--randomStart", type="integer", default=1,
help="number of kmeans initializations"),
make_option(c("--merge"), action="store_true", default=FALSE,
help="use flowMerge to merge best BIC clustering"),
make_option(c("--recluster"), action="store_true", default=FALSE,
help="use k-means to re-cluster best BIC clustering"),
## OUTPUT OPTIONS
make_option(c("--jobs"), type="character", default="distribution,timeseries,fourier,rain,clustering",
help=",-separated list of rseults to save as csv: distributions,timeseries,fourier,clustering; default is to save all, clustering only if specified by separate option --cluster, and fourier results will contain p-values only of --perm was specified and >1"),
make_option(c("--out.name"), type="character", default="",
help="file name prefix of summary file"),
make_option(c("--out.path"), type="character", default=".",
help="directory path for output data (figures, csv files)"),
make_option(c("-v", "--verb"), type="integer", default=1,
help="0: silent, 1: main messages, 2: warning messages"),
make_option(c("--fig.type"), type="character", default="png",
help="figure type, png or pdf [default %default]"),
make_option(c("--save.rdata"), action="store_true", default=FALSE,
help="save complete analysis as RData file (big!)"))
## get command line options
opt <- parse_args(OptionParser(option_list=option_list))
## process comma-separated list arguments
lst.args <- c(dft.range="numeric",
read.rng="numeric",
stypes="character",
jobs="character",
K="numeric")
for ( i in 1:length(lst.args) ) {
idx <- which(names(opt)==names(lst.args)[i])
## get individual values
tmp <- as.list(unlist(strsplit(opt[[idx]], ",")))
## expand ranges
if ( lst.args[i]=="numeric" & length(tmp)>0 )
for ( j in 1:length(tmp) ) { # only for numeric modes
tmp2 <- unlist(strsplit(tmp[[j]], ":"))
if ( length(tmp2)>1 ) {
tmp2 <- as.numeric(tmp2)
tmp[[j]] <- tmp2[1]:tmp2[2]
}
}
if ( length(tmp)>0 )
opt[[idx]] <- unlist(tmp)
mode(opt[[idx]]) <- lst.args[i]
}
## promote options to main environment and print all arguments
if ( opt$verb>0 )
cat(paste("SETTINGS:\n"))
for ( i in 1:length(opt) ) {
if ( opt$verb>0 )
cat(paste(names(opt)[i], "\t", #typeof(opt[[i]]),
paste(opt[[i]],collapse=","), "\n",sep=""))
arg <- names(opt)[i]
assign(arg, opt[[arg]])
}
if ( verb>0 )
cat(paste("\n"))
## LOADING PACKAGES FOR REQUESTED JOBS
if ( "clustering" %in% jobs ) {
suppressPackageStartupMessages(library("flowClust"))
suppressPackageStartupMessages(library("flowMerge"))
}
if ( "rain" %in% jobs )
suppressPackageStartupMessages(library("rain"))
### START
## generate locally in cwd
dir.create(out.path, showWarnings = FALSE)
if ( ncpu>1 ) # load parallel for flowClust
suppressPackageStartupMessages(library("parallel"))
### LOAD DATA
## load chromosome index
cf <- read.table(chrfile,sep="\t",header=FALSE)
chrS <- c(0,cumsum(cf[,ncol(cf)])) ## index of chr/pos = chrS[chr] + pos
## load time-series and oscillation data: coor, ts, osc
if ( verb>0 )
cat(paste("Loading data\t",time(),"\n"))
load(datafile)
## set missing read range to all!
if ( length(read.rng)==0 | any(is.na(read.rng)) )
read.rng <- 1:ncol(ts)
## oscillation parameters (for first two cycles only)
phase <- osc[,"phase" ]
pval <- osc[,"rpval"]
phase[pval >= c(min(1,pval.thresh))] <- NA # =1 rm 360° artefact in osci set
logit <- function(p) log(p/(1-p))
lpvl <- logit(pval)
lpvl[is.infinite(lpvl)] <- NA
## PHASE WEIGHTS by p-values
## TODO: use weights? seems to have little effect
wght <- rep(1,length(pval)) # phase.weight default equiv. to no weight
if ( phase.weight ) wght <- 1-pval
##if ( phase.weight=="log" ) wght <- -log2(pval) # TODO: fix infinite weights!
## LOAD SEGMENTS
if ( verb>0 )
cat(paste("Loading segments\t",time(),"\n"))
segs <- read.table(infile,sep="\t",header=TRUE, comment.char="",
stringsAsFactors=FALSE)
## add type ALL column,
## allows to pass ALL to cmdline option --stypes to avoid typesplitting
if ( stypes[1]=="ALL" & !"ALL"%in%colnames(segs) ) {
segs <- cbind.data.frame(segs, all="all")
stypes <- "all"
typecol <- "all"
}
## reduce to requested segment types
if ( stypes[1]=="" )
stypes <- sort(unique(as.character(segs[,typecol])))
segs <- segs[as.character(segs[,typecol])%in%stypes,]
if ( fuse.segs ) {
fuse <- segs[2:nrow(segs),"fuse"]==1
cat(paste("NOTE: FUSING", sum(fuse), "SEGMENTS, from segment types:\n",
paste(unique(segs[fuse,typecol]),collapse="; "),"\n"))
fsegs <- segs[c(TRUE,!fuse),]
fsegs[,"end"] <- segs[c(!fuse,TRUE),"end"]
segs <- fsegs
}
## replace genome coordinates by continuous index
segs <- coor2index(segs,chrS)
## split by type
lst <- split(segs,segs[,typecol])
if ( length(stypes)>0 )
lst <- lst[names(lst)%in%stypes]
sgtypes <- names(lst)
segnum <- unlist(lapply(lst,nrow))
## CALCULATE OSCI PARAMETERS FOR SEGMENTS
## cluster number of max BIC clustering
## will be used in segmentation analysis together with results from
## segmentLengths and testSegments
if ( "clustering" %in% jobs ) {
clnum <- matrix(NA, length(sgtypes),ncol=4)
rownames(clnum) <- sgtypes
colnames(clnum) <- c("K","BIC","NUMCL", "TOT")
}
for ( type in sgtypes ) {
if ( !exists("type", mode="character") )
type <- sgtypes[1] ## NOTE: DEVEL HELPER - NOT REQUIRED
if ( verb>0 )
cat(paste(type, "\t",time(),"\n"))
fname <- gsub(":",".",type) # FOR FILENAMES
sgs <- lst[[type]]
## READ-COUNT DISTRIBUTIONS OF SEGMENTS
## pvs required for filtering before clustering
phs <- pvs <- rds <- NULL
if ( any(c("distribution","clustering") %in% jobs) ) {
if ( verb>0 )
cat(paste("segment read statistics\t",time(),"\n"))
##file.name <- file.path(out.path,paste(fname,"_dynamics",sep=""))
##if ( file.exists(file.name) & !redo) {
## cat(paste("\talready done\n")
##}
## add phase distribution, weighted by p-values!
phs <- t(apply(sgs,1,function(x)
phaseDist(phase[x["start"]:x["end"]],
w=wght[x["start"]:x["end"]])))
## comparing sg0002_raw_ash_icor_463 with sg0002_raw_ash_icor_464
## from D:dft1-7.dcash.snr_T:raw_K:12_S:icor_E:3_M:75_nui:3
test.circ.test <- FALSE
if (test.circ.test) {
idx1 <- grep("sg0002_raw_ash_icor_463",sgs[,idcol],value=F)
idx2 <- grep("sg0002_raw_ash_icor_464",sgs[,idcol],value=F)
x1 <- circular(phase[sgs[idx1,"start"]:sgs[idx1,"end"]],
type="angles",units="degrees")
x2 <- circular(phase[sgs[idx2,"start"]:sgs[idx2,"end"]],
type="angles",units="degrees")
plot(x1);points(x2,col=2)
watson.two.test(x1,x2)
}
## raw pvalue distribution
## NOTE only p.signif is used, fraction of signif. oscill. reads!
pvs <- t(apply(sgs,1,function(x)
pvalDist(pval[x["start"]:x["end"]],pval.thresh.sig)))
## total range of expression
## NOTE: ONLY TAKING FIRST 19/20 TIMEPOINTS TO SKIP SHIFT IN THE END?
rds <- t(apply(sgs,1,function(x)
readDist(c(ts[x["start"]:x["end"],read.rng]))))
## write out phase, pval and read-count distributions
## convert back to chromosome coordinates
sgdst <- data.frame(ID=sgs[,idcol],rds,phs,pvs)
file.name <- file.path(out.path,paste(fname,"_dynamics",sep=""))
write.table(sgdst,file=paste(file.name,".csv",sep=""),quote=FALSE,
sep="\t",col.names=TRUE,row.names=FALSE)
}
if ( !any(c("rain","timeseries","fourier","clustering") %in% jobs) )
next
## AVERAGE SEGMENT TIME-SERIES
## required for all following options
## NOTE: 20161231 - all smoothing attempts failed, and the
## simple mean of each nucleotide works best
## TODO: rm extrema (take only center 85%)?
## runmed (avgk>1) before global mean?
## why doesnt median work (clustering fails)?
## try to filter for most significant oscillators?
if ( verb>0 )
cat(paste("segment time series\t",time(),"\n"))
sgavg <- function(x) {
rng <- as.numeric(x["start"]):as.numeric(x["end"])
rds <- ts[rng,]
if ( filter.reads )
rds <- rds[pval[rng] < pval.thresh.sig]
## get average
avg <- segmentAverage(rds,
avg="mean",endcut=endcut,k=avgk,endrule="median",
mean.ratio=mean.ratio)
avg
}
## assume PCR amplfication and get original number
avg <- t(apply(sgs,1,sgavg))
## re-scale data by assumping a simple PCR model
## x(n) = x(0) * 2^n, with n PCR amplification cycles
if ( dePCR ) {
## multiply by the total read-count, assuming data
## was divided by this factor
cnt <- read.delim(countfile,
header=FALSE, sep=" ")
tot <- t(t(avg)*cnt[,2])
## estimate number of PCR cycles n
## n = log2(x(n)/x(0))
## assuming minimal signal is from x(0)=1 molecule
n <- log2(min(c(tot[tot>0])))
## TODO: this is negative; likely requires re-scaling of RPKM
## x(0) = x(n) * 2^-n
avg <- tot*2^-n
}
## write out average timeseries
if ( "timeseries" %in% jobs ) {
sgts <- data.frame(ID=sgs[,idcol], avg)
file.name <- file.path(out.path,paste(fname,"_timeseries",sep=""))
write.table(sgts,file=paste(file.name,".csv",sep=""),quote=FALSE,
sep="\t",col.names=TRUE,row.names=FALSE)
}
## get DFT, use time series processing from segmenTier
## this will be written out; re-calculated after filtering
## below for clustering
## NOTE: using read.rng here as well (above for total read count)
dft <- NULL
##if ( any(c("fourier","clustering") %in% jobs) ) {
if ( any(c("fourier") %in% jobs) ) {
if ( verb>0 )
cat(paste("fourier transform\t",time(),"\n",sep=""))
if ( perm>0 & verb>0 )
cat(paste("permutations\t", perm,"\n",sep=""))
tset <- processTimeseries(avg[,read.rng],na2zero=TRUE, use.fft=TRUE,
smooth.time=smooth.time,
trafo=trafo, perm=perm,
dft.range=dft.range, dc.trafo=dc.trafo,
use.snr=use.snr,low.thresh=-Inf, verb=verb)
## TODO: FIX AMPLITUDE IF SNR WAS NOT USED
if ( !use.snr & dc.trafo=="raw" )
tset$dft <- tset$dft/length(read.rng)
## write out phase, pval and DFT from segment averages
dft <- tset$dft
if ( perm>0 ) {
pvl <- tset$pvalues
colnames(pvl) <- paste(colnames(pvl),"p",sep="_")
dft <- data.frame(dft,pvl)
}
sgdft <- data.frame(ID=sgs[,idcol], dft)
file.name <- file.path(out.path,paste(fname,"_fourier",sep=""))
write.table(sgdft,file=paste(file.name,".csv",sep=""),quote=FALSE,
sep="\t",col.names=TRUE,row.names=FALSE)
}
## use rain
## TODO: establish exact period from DO, then use rain
## use only first 20 time-points here as well
if ( any(c("rain") %in% jobs) ) {
if ( verb>0 )
cat(paste("rain osci stastistics\t",time(),"\n"))
rn <- rain(t(avg[,read.rng]), period=period, deltat=deltat)
sgrain <- data.frame(ID=sgs[,idcol], rn)
file.name <- file.path(out.path,paste(fname,"_rain",sep=""))
write.table(sgrain,file=paste(file.name,".csv",sep=""),quote=FALSE,
sep="\t",col.names=TRUE,row.names=FALSE)
}
if ( !"clustering" %in% jobs ) next
if ( verb>0 )
cat(paste("segment clustering\t",time(),"\n"))
## CLUSTER DFT OF AVERAGE TIME-SERIES
## TODO: instead, collect DFT cluster centers of segments
## and use these as centers, or cluster these instead?
## OR: trust cluster-segment and take only those reads that
## were in major clusters
## TODO: use permutation results as filter; optionally load
## permutation from previous run!
## TODO: move back to unsig! was the most sensible!
## FILTERS
## NOTE: 20170118
## currently only `unsig' works well in filtering segment mass in
## exponent>1; however, this is based on prior pvalue of read-counts
## while it may be nicer to have a filter based on segment averages
##nosig <- dft[,"X2_p"] >= 0.05; nosig[is.na(nosig)] <- TRUE # NO SIG @PERIOD
#### nonsig - NO SIGNIFICANT PVALUE IN ANY FOURIER COMPONENT!
##nonsig <- !apply(tset$pvalues,1,function(x) any(x< .05))
##nonsig[is.na(nonsig)] <- TRUE
##lowex <- rds[,"r.0"]>.9 # MINIMAL FRACTION OF READ COUNTS>0
##len <- sgs[,"end"]-sgs[,"start"]+1
##short <- len < 100 # LONGER THEN 150
## use prior RAIN calculation as filter
unsigr <- rep(FALSE, nrow(avg))
if ( with.rain!="" ) {
sgrain <- read.delim(file.path(with.rain,paste0(fname,"_rain.csv")),
row.names=1,stringsAsFactors=FALSE)[as.character(sgs[,idcol]),]
## FILTER
unsigr <- sgrain[,"pVal"] >= pval.thresh.sig
## plot
## TODO: this is used in paper; make fit for supplementary material
file.name <- file.path(out.path,paste0(fname,"_filter_rain"))
plotdev(file.name,width=4,height=4,type=fig.type,res=300)
rpcdf <- ecdf(sgrain[,"pVal"])
plot(rpcdf,xlab="rain p-value")
points(pval.thresh.sig,rpcdf(pval.thresh.sig))
points(.995,rpcdf(.995))
legend("top",legend=c(paste0(sum(!unsigr), " oscillate")))
dev.off()
#plot(sgrain[,"pVal"],pvs[,1]) # NOTE slight correlation!
}
## use prior permutation calculation as filter
unsigp <- rep(FALSE, nrow(avg))
if ( with.permutation!="" ) {
sgdft <- read.delim(file.path(with.permutation,
paste0(fname,"_fourier.csv")),
row.names=1,stringsAsFactors=FALSE)[as.character(sgs[,idcol]),]
## FILTER
sgdft[is.na(sgdft[,"X2_p"]),"X2_p"] <- 1
unsigp <- sgdft[,"X2_p"] >= pval.thresh.sig
## plot
file.name <- file.path(out.path,paste0(fname,"_filter_permutation"))
plotdev(file.name,width=4,height=4,type=fig.type,res=300)
ppcdf <- ecdf(sgdft[,"X2_p"]) ## TODO: this must be argument
plot(ppcdf,xlab="permutation p-value")
points(pval.thresh.sig,ppcdf(pval.thresh.sig))
legend("top",legend=c(paste0(sum(!unsigp), " oscillate")))
dev.off()
}
## FILTER: minimal expressed time-points per segment
mintpt <- 12
npoints <- rowSums(avg>0)
fewpoints <- npoints <= mintpt
## plot fewpoints
file.name <- file.path(out.path,paste0(fname,"_filter_numpoints"))
plotdev(file.name,width=4,height=4,type=fig.type,res=300)
npcdf <- ecdf(npoints)
plot(npcdf,xlab="number of expressed timepoints")
points(mintpt,npcdf(mintpt),cex=2)
legend("top",legend=c(paste0(sum(!fewpoints), " expressed")))
dev.off()
## FILTER: no single significant read-count (< pval.thresh.sig)
unsig <- pvs[,"p.signif"] == 0
pcdf <- ecdf(pvs[,"p.signif"])
file.name <- file.path(out.path,paste0(fname,"_filter_signifraction"))
plotdev(file.name,width=4,height=4,type=fig.type,res=300)
plot(pcdf,xlab="fraction of significant reads")
points(.01,pcdf(.01))
dev.off()
## FILTER: total expresssion vs. rain
minexp <- -.5
tot <- log(rds[,"r.mean"])
lowex <- tot<minexp
## plot total
file.name <- file.path(out.path,paste0(fname,"_filter_total"))
plotdev(file.name,width=4,height=4,type=fig.type,res=300)
tcdf <- ecdf(tot)
plot(tcdf,xlab="mean read-count")
points(minexp,tcdf(minexp))
legend("right",legend=c(paste0(sum(!lowex), " expressed")))
dev.off()
## SHORT
minlen <- 90
len <- sgs[,"end"]-sgs[,"start"]+1
short <- len < minlen # LONGER THEN 150
## plot lengths
file.name <- file.path(out.path,paste0(fname,"_filter_length"))
plotdev(file.name,width=4,height=4,type=fig.type,res=300)
lcdf <- ecdf(len)
plot(lcdf,xlab="segment length")
points(minlen,lcdf(minlen))
legend("right",legend=c(paste0(sum(!short), " long")))
dev.off()
## filter combination
noise <- lowex | short | fewpoints
## 20190425 - align with segmentStatistics.R
##noise <- pvs >= noisep & low
## unsigr <- sgrain[,"pVal"] >= pval.thresh.sig
noise <- unsigr & lowex
## SELECT FILTER
filters <- cbind(lowex=lowex, fewpoints=fewpoints, short=short,
unsig=unsig, unsig.r=unsigr, unsig.p=unsigp,
noise=noise)
rmvals <- filters[,cl.filter]
dat <- avg
dat[rmvals,] <- 0 # set to zero, will be removed in processTimeseries
## TODO: lowly expressed AND non-signficant oscillation LOOKS good!!
##table(lowex,unsigr)
##rmvals <- lowex&unsigr
if ( verb>0 ) {
cat(paste("clustered segments\t",sum(!rmvals),"\n"))
cat(paste("noise segments\t",sum(rmvals),"\n"))
}
tset <- processTimeseries(dat,na2zero=TRUE,use.fft=TRUE,
smooth.time=smooth.time, trafo=trafo,
perm=0, dft.range=dft.range, dc.trafo=dc.trafo,
use.snr=use.snr,low.thresh=-Inf)
## cluster by flowClust
testnew <- TRUE ## ALLOWS MULTIPLE EQUAL K!
if ( testnew ) {
fcset <- clusterTimeseries2(tset, K=K, method="flowClust",
parameters=c(B=B,tol=tol,lambda=lambda,
nu=nu,nu.est=nu.est,
trans=trans, randomStart=0))
} else {
fcset <- flowclusterTimeseries(tset, ncpu=ncpu, K=K, selected=fixedK,
B=B, tol=tol, lambda=lambda, merge=merge,
nu=nu, nu.est=nu.est, trans=trans)
}
## save all as RData
## temporary; until below is fixed
#if ( save.rdata )
# save(sgs, rds, phs, pvs, dft, tset, fcset, #sgcls,
# file=file.path(out.path,paste0(fname,".RData",sep="")))
## get BIC and best clustering
selected <- selected(fcset) # cluster number of max BIC
cls <- fcset$clusters[,selected] # clusters at max BIC
# Error in fcset$clusters[, selected] : subscript out of bounds
bic <- fcset$bic # BIC
icl <- fcset$icl # ICL
max.cli <- fcset$max.cli # cluster number at max ICL
max.clb <- fcset$max.clb # cluster number at max BIC
max.bic <- max(bic,na.rm=TRUE) # max BIC
max.icl <- max(icl, na.rm=T) # max ICL
## store cluster number, max BIC, numbers of clustered and total segments
usedk <- fcset$usedk[selected] # NOTE:
clnum[type,] <- c(K=usedk, BIC=max.bic,
NUMCL=sum(!tset$rm.vals), TOT=nrow(avg))
## and add selected global clustering to segment table
## write out clusters
sgcls <- data.frame(ID=sgs[,idcol],sgCL=cls)
## flowMerge
mselected <- NULL
if ( merge ) {
fcset <- mergeCluster(tset, fcset, selected=selected(fcset))
mselected <- fcset$merged # cluster number of merged clustering
if ( !is.null(mselected) ) {
mcls <- fcset$clusters[,mselected] # clusters of merged clustering
mrg.cl <- fcset$merged.K # cluster number of merged clustering
## add to data frame
sgcls <- cbind.data.frame(sgcls,mCL=mcls)
}
}
## re-cluster with kmeans
if ( recluster ) {
fcset <- reCluster(tset, fcset, select=FALSE)
rselected <- fcset$reclustered
sgcls <- cbind.data.frame(sgcls, rCL=fcset$clusters[,rselected])
}
file.name <- file.path(out.path,paste(fname,"_clusters",sep=""))
write.table(sgcls,file=paste(file.name,".csv",sep=""),quote=FALSE,
sep="\t",col.names=TRUE,row.names=FALSE)
## RESORT CLUSTERING
## TODO: sort by phase and re-cluster; use vector phs
## of average segment phases!
## cls.phase <- sapply(cls.srt, function(x)
## phaseDist(phase[cls==x],w=1-rp[cls==x,"pVal"]))
## cset$sorting[[bestKcol]] <- cls.srt
## ## re-color
## cset <- colorClusters(cset)
## save all as RData
if ( save.rdata )
save(sgs, rds, phs, pvs, dft, tset, fcset, sgcls, fname,
file=file.path(out.path,paste0(fname,".RData",sep="")))
## PLOT CLUSTERING
if ( verb>0 )
cat(paste("\tplotting time series clustering\t",time(),"\n"))
## re-do tset without removing unsignificant!
## "plot-tset"
pset <- processTimeseries(avg,na2zero=TRUE,use.fft=TRUE,
smooth.time=smooth.time, trafo=trafo,
perm=0, dft.range=dft.range, dc.trafo=dc.trafo,
use.snr=use.snr,low.thresh=-Inf)
## plot BIC
file.name <- file.path(out.path,paste(fname,"_BIC",sep=""))
plotdev(file.name,width=4,height=4,type=fig.type,res=300)
par(mai=c(.7,.7,0.1,0.1),mgp=c(1.5,.5,0),tcl=-.3)
plotBIC(fcset, norm=TRUE)
dev.off()
## plot DFT
file.name <- file.path(out.path,paste(fname,"_DFT",sep=""))
plotdev(file.name,type=fig.type,res=300,
width=round(length(dft.range)),height=4)
par(mfcol=c(2,round(length(dft.range)/2)),
mai=c(.5,.5,0.1,0),mgp=c(1.5,.5,0),tcl=-.3)
plotDFT(tset, fcset, cycles=dft.range, pch=1, cex=.5)
dev.off()
## plot best BIC
file.name <- file.path(out.path,paste0(fname,"_osc_",selected))
plotdev(file.name,width=4,height=9,type=fig.type,res=300)
plotClusters(pset,fcset,k=selected,norm="meanzero")
dev.off()
## plot merged
if ( merge & !is.null(mselected) ) {
file.name <- file.path(out.path,paste0(fname, "_osc_",mselected))
plotdev(file.name,width=4,height=9,type=fig.type,res=300)
plotClusters(pset,fcset,k=mselected,norm="meanzero")
dev.off()
## plot DFT
file.name <- file.path(out.path,paste0(fname,"_DFT_",mselected))
plotdev(file.name,type=fig.type,res=300,
width=round(length(dft.range)),height=4)
par(mfcol=c(2,round(length(dft.range)/2)),
mai=c(.5,.5,0.1,0),mgp=c(1.5,.5,0),tcl=-.3)
tmp<-fcset;tmp$selected <- mselected
plotDFT(tset, tmp, cycles=dft.range, pch=1, cex=.5)
dev.off()
}
if ( recluster ) {
file.name <- file.path(out.path,paste(fname, "_osc_",rselected,sep=""))
plotdev(file.name,width=4,height=9,type=fig.type,res=300)
plotClusters(pset,fcset,k=rselected,norm="meanzero")
dev.off()
## plot DFT
file.name <- file.path(out.path,paste0(fname,"_DFT_",rselected))
plotdev(file.name,type=fig.type,res=300,
width=round(length(dft.range)),height=4)
par(mfcol=c(2,round(length(dft.range)/2)),
mai=c(.5,.5,0.1,0),mgp=c(1.5,.5,0),tcl=-.3)
tmp<-fcset;tmp$selected <- rselected
plotDFT(tset, tmp, cycles=dft.range, pch=1, cex=.5)
dev.off()
}
}
## write out summary for analysis over segmentation types!
if ( "clustering" %in% jobs ) {
if ( verb>0 )
cat(paste("saving clustering results\t",time(),"\n"))
clnum <- cbind(ID=rownames(clnum), clnum)
if ( out.name == "" ) {
file.name <- "clustering"
} else {
file.name <- paste(out.name,"clustering",sep="_")
}
file.name <- file.path(out.path,file.name)
write.table(clnum,file=paste(file.name,".csv",sep=""),quote=FALSE,
sep="\t",col.names=TRUE,row.names=FALSE)
}
if ( verb>0 )
cat(paste("DONE\t",time(),"\n"))
if ( !interactive() ) quit(save="no")
## TODO (but elsewhere) :
## PLOT PHASE DIST etc.
## plot phase dist (for diff. weights?),
## read-dist and pval-dist
### TODO - CLASSIFY OVERLAPS
### TODO - CHROMOSOME PLOTS
## get overlaps with cltranscripts and clgenes as in testSegments.R
## classify into upstream - covers - downstream of cluster genes
## calculate enrichment (Jaccard?) per cluster
## potentially: use super-clusters
## plot next to time-course
### MANUAL
##
load("all_20160525/K16_k1_icor3_filt_osc_K14m7.RData")
## segments with 4 peaks!
## mostly D/cd, either full ORF or downstream, sometimes upstream
soi <- seg[which(seg[,"mCL"] == 5),]
|
ccc39879b8b224605fa5b8022bd0bfbcc3421366 | c755955ceae00ad0c4f28c4cbdcae76c40f9e389 | /MEDGOLD_ExtractGrid.R | f23a6fc3f8656ca07d8cf87068c4196630d8d567 | [] | no_license | sandrocalmanti/med-gold | 3b8debae3ebd480775ac439065c688a1acbdac43 | 9577477dc6bd1d2a6a77441fb4871d32ef2f3196 | refs/heads/master | 2020-06-22T21:13:56.681948 | 2019-07-24T13:44:41 | 2019-07-24T13:44:41 | 198,371,245 | 1 | 0 | null | 2019-07-23T07:41:54 | 2019-07-23T06:57:43 | null | UTF-8 | R | false | false | 1,074 | r | MEDGOLD_ExtractGrid.R | #Clean all
rm(list=ls())
library(s2dverification)
library(SpecsVerification)
library(abind)
library(multiApply)
library(easyVerification)
library(ncdf4)
library(CSTools)
library(zeallot)
library(lubridate)
start_time <- Sys.time()
fi_var_name <- 'tmin2m'
nc_var_name <- 'mn2t24'
nc_var_longname <- 'Minimum temperature at 2 metres in the last 24 hours'
nc_var_unit <- 'K'
nc_domain_fname <- 'it-medgold'
dayst <- '01'
monst <- '11'
yrst <- 1988
yren <- 2018
sdates <- paste0(as.character(seq(yrst,yren)),monst,dayst)
exp <- list(
list(
name = 'ecmf',
path = file.path('/home/sandro/DATA/SEASONAL/ECMF/BC/',
'$VAR_NAME$_$EXP_NAME$_$START_DATE$_$SUFFIX$.nc'),
nc_var_name = nc_var_name,
suffix = nc_domain_fname,
var_min = '-300'
)
)
exp_cst <- CST_Load(var=fi_var_name,
exp=exp,obs=NULL,
sdates=sdates,
storefreq='daily',
output='lonlat',
nprocs=8)
|
1667666b83e4176ed9260730f3492dd554abbcab | d6517414d24b19b20b19a62f152b2baf664b161f | /R/second.r | c07f15107832796fbc4936ada4e526737e2f5249 | [] | no_license | NickGor18/test | 9220067fe8514e7450b3e9e6e1567b370f37d769 | 3ccea109d7c93a8851be09149a2943e3da3daace | refs/heads/main | 2023-03-04T21:47:32.877949 | 2021-02-22T20:37:37 | 2021-02-22T20:37:37 | 257,052,649 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,062 | r | second.r | rand = function(x,m,a,c,rang,lim){
y = x
k = 0
arr = c(x)
while(TRUE){
k = k+1
x = (a*x+c)%%m;
arr = c(arr,x%%rang)
if((x==y)||(k>lim)){
break;
}
}
return(arr)
}
n1=rand(1,7,2,4,7,3)
n2=rand(1,125,6,17,125,126)
n3=rand(1,(2^15)-1,1664525,1013904223,1000,30000)
n4=rand(1,(2^31)-1,48271,0,10000,20000)
n5=rand(1,(2^32)-1,253801,14519,10000,50000)
hist(n1)
hist(n2)
hist(n3)
hist(n4)
hist(n5)
mw = c(mean(n1),mean(n2),mean(n3),mean(n4),mean(n5))
mwe = c(max(n1)/2,max(n2)/2,max(n3)/2,max(n4)/2,max(n5)/2)
print(mw)
print(mwe)
disp = c(var(n1),var(n2),var(n3),var(n4),var(n5))
print(disp)
m1=runif(3,0,6)
m2=runif(125,0,125)
m3=runif(2^15-1,0,1000)
m4=runif(2^18-1,0,10000)
#m5=runif(2^32-1,0,10000)
hist(m1)
hist(m2)
hist(m3)
hist(m4)
#hist(m5)
plot(m1)
plot(m2)
plot(m3)
plot(m4)
#plot(m5)
mw2=c(mean(m1),mean(m2),mean(m3),mean(m4))
mwe2 = c(max(m1)/2,max(m2)/2,max(m3)/2,max(m4)/2)
print(mw2)
print(mwe2)
disp2=c(var(m1), var(m2), var(m3),var(m4))
print(disp2)
plot(n1)
plot(n2)
plot(n3)
plot(n4)
plot(n5)
|
5f3c2adc09ff9c3bdc283bacd09ecf348c5bd1c6 | 600aaf4836ff6a6a9bed6849e62ccbb5be9659e3 | /R/myfirstfun.R | ca4a780f280e0cd195228515613ca2333f3625e8 | [] | no_license | Tim-OB/MATH4753OBRIEN | df480d265c48a4babbc7a0c19140513a4fb744c9 | da5a43d264c7771ba6616f5a06bf34c3957c188c | refs/heads/master | 2023-03-29T22:04:32.094990 | 2021-04-13T03:29:14 | 2021-04-13T03:29:14 | 334,214,140 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 209 | r | myfirstfun.R | #' @title My first R function
#'
#' @param x A vector of quantitative data
#'
#' @return The squared value of vector x
#' @export
#'
#' @examples
#' y <- 1:10; myfirstfun(y)
myfirstfun <- function(x){
x^2
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.