blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
81911fd3b3dbf7fc3921a6ba04244658f95e9a74 | 036b4f37d12348e272f3fd56863077e525e92ad6 | /R Code/HP3 Code/ZZ_Probably won't use or Old Code/ZI INLA Models.R | df15c79c80463576f7088839cd7b2753742b9a7c | [] | no_license | gfalbery/Helmneth | 575d02bcbee41162d7d72a5942b5086670a0e710 | 3d9ebbf047e981986bdb7867dc38ecc0f3954c9e | refs/heads/master | 2020-04-24T05:24:46.123145 | 2020-01-04T20:38:50 | 2020-01-04T20:38:50 | 171,734,478 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,015 | r | ZI INLA Models.R | # ZI INLA Models
source("R Code/00_Master Code.R")
library(INLA)
inla.setOption(num.threads = 8)
lapply(1:3, function(i){
if(i==1) {
saveRDS(inla(data = FinalHostMatrix, # Doesn't fit
Virus ~ Space + Phylo2 + Space:Phylo2 + MinCites + DomDom,
control.compute = list(dic = TRUE),
family = "nbinomial"), file = paste0("ZI INLA Model",i,".Rdata"))}
if(i==2) {
saveRDS(inla(data = FinalHostMatrix, # Doesn't fit
Virus ~ Space + Phylo2 + Space:Phylo2 + MinCites + DomDom,
control.compute = list(dic = TRUE),
family = "zeroinflatednbinomial1"), file = paste0("ZI INLA Model",i,".Rdata"))}
if(i==3) {
saveRDS(inla(data = FinalHostMatrix, # Doesn't fit
Virus ~ Space + Phylo2 + Space:Phylo2 + MinCites + DomDom,
control.compute = list(dic = TRUE),
family = "zeroinflatednbinomial2"), file = paste0("ZI INLA Model",i,".Rdata"))}
}) |
1d87d11dae16c4d86ce7b515be2cfc8d53ae94d9 | c78d91985497e8ae7db564802ebb42eff5af5160 | /R/grob-descent.R | dd76104616fa3bcf2753807d8801673950ffc941 | [
"MIT"
] | permissive | nemochina2008/gridtext | c0fb166e480f770bce0359d4e5ef5c3361f143ce | 72efc1d287bfd953c49ce133168cab4bb22e5408 | refs/heads/master | 2020-06-14T07:19:24.680077 | 2019-07-02T21:06:53 | 2019-07-02T21:06:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,636 | r | grob-descent.R | #' Calculate descent and other text properties, either from grob or from graphical
#' parameters
#' @param grob The grob for which we want information
#' @export
grob_descent <- function(grob) {
unit(grob_descent_pt(grob), "pt")
}
#' @rdname grob_descent
#' @export
grob_descent_pt <- function(grob) {
if (inherits(grob, "text")) {
font_details_pt(grob$gp)$descent
} else 0
}
#' Calculates generic font height and descent from given graphical parameters
#'
#' @param gp Graphical parameters
#' @export
font_details_pt <- function(gp = gpar()) {
lookup_font_details(gp)
}
# environment to cache font details so we don't have to recalculate over and over
font_env <- new.env(parent = emptyenv())
font_env$font_details <- list()
lookup_font_details <- function(gp) {
fontfamily <- gp$fontfamily %||% grid::get.gpar("fontfamily")$fontfamily
fontface <- gp$fontface %||% grid::get.gpar("fontface")$fontface
fontsize <- gp$fontsize %||% grid::get.gpar("fontsize")$fontsize
key <- paste0(fontfamily, fontface, fontsize)
details <- font_env$font_details[[key]]
if (is.null(details)) {
details <- calc_font_details(gp)
font_env$font_details <- c(
font_env$font_details,
stats::setNames(list(details), key)
)
}
details
}
calc_font_details <- function(gp) {
pushViewport(viewport(gp = gp)) # change viewport to get correct font settings
grob <- textGrob(label = "gjpqyQ")
height <- convertHeight(heightDetails(grob), "pt", valueOnly = TRUE)
descent <- convertHeight(descentDetails(grob), "pt", valueOnly = TRUE)
popViewport()
list(height_pt = height, descent_pt = descent)
}
|
2989349306a7470488bb792f342d9221b43775b4 | 40962c524801fb9738e3b450dbb8129bb54924e1 | /DAY - 8/Assignment/Q4 - Pnorm_SurvivalOfPhones.R | 17f79da385ed7144343180557e1b605ff333314a | [] | no_license | klmsathish/R_Programming | 628febe334d5d388c3dc51560d53f223585a0843 | 93450028134d4a9834740922ff55737276f62961 | refs/heads/master | 2023-01-14T12:08:59.068741 | 2020-11-15T13:23:31 | 2020-11-15T13:23:31 | 309,288,498 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 453 | r | Q4 - Pnorm_SurvivalOfPhones.R | #A company ships 5000 cell phones.
#They are expected to last an average of 10,000 hours before needing repair;
#with a standard deviation of 500 hours.
#Assume the survival time of the phones are normally distributed.
#If a phone is randomly selected to be tracked for repairs find the expected number that needs repair,
#a) after 11,000 hours
cat("Expected NUmber of Phones to be repaired = ",
pnorm(11000,mean=10000,sd=500,lower.tail=FALSE))
|
3901e85a26809ed943db8e74f7eb65c4ebf5e85f | cfe7f33002bde9401d63234999a440da8ff238b9 | /FolderLevel2/scrip.R | 0643e9cb59d386a69ec095aadf20e1bf69ac106a | [] | no_license | dandinoar/MBC | 337c2a89804c5dadbc2673dffe00c3683c89d9ee | 9f743f08bbd7b4f52e8f1939def15b9ec1876ae9 | refs/heads/master | 2020-03-30T19:48:43.990621 | 2018-10-04T12:00:29 | 2018-10-04T12:00:29 | 151,559,556 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 50 | r | scrip.R | print("hello Git")
#My second commit (all folder)
|
56cbad8e75ccfe2de29195fd49449f9a46fd3f1a | 7f7600833234db5020cc355c95f1976c5fd628ee | /01_association_metrics/metacell/metacells_from_leiden.R | bd9a8111254947c1b6f30de5350cf60f51fde431 | [
"BSD-2-Clause"
] | permissive | shuang1330/single_cell | 2f3b9979dde007cb31e9bcaecb8493f6c02514cd | 983596997f6f4d66a82b2953f8c3301ebc7828dd | refs/heads/master | 2023-04-14T13:33:05.748097 | 2022-10-12T09:17:10 | 2022-10-12T09:17:10 | 298,008,060 | 3 | 1 | BSD-2-Clause | 2022-04-19T11:22:57 | 2020-09-23T15:08:13 | Jupyter Notebook | UTF-8 | R | false | false | 4,205 | r | metacells_from_leiden.R | # ------------------------------------------------------------------------------
# Implement own method to generate metacells based on leiden clustering
# Run leiden clustering separatley for each donor (run on Oelen v3, Monocytes)
# and use group cells that are part of the same cluster
# ------------------------------------------------------------------------------
library(Seurat)
#Load complete seurat object
seurat<-readRDS("../../seurat_objects/1M_v3_mediumQC_ctd_rnanormed_demuxids_20201106.rds")
DefaultAssay(seurat)<-"SCT" #3000 most variable genes already identified
#Filter for monocytes
seurat<-seurat[,seurat$cell_type_lowerres=="monocyte"]
#Resolution for leiden clusters
leidenRes<-100
print(paste("Leiden resolution:",leidenRes))
type<-"SCT" #choose RNA or SCT
print(paste("Normalization:",type))
#Files with overall annotation and metacell matrix
annot_mc_all<-NULL
annot_mc_major_all<-NULL
metacellBulk_all<-NULL
#Iterate over all samples
samples<-levels(seurat$assignment)
for(donor in samples){
print(paste("Processing donor:",donor))
#Filter for the donor
seurat_donor<-seurat[,seurat$assignment==donor]
#Calculate PCA
seurat_donor<-RunPCA(seurat_donor, verbose=FALSE)
#Generate kNN graph and leidern clustering
seurat_donor <- FindNeighbors(seurat_donor, dims = 1:20)
seurat_donor <- FindClusters(seurat_donor, resolution = leidenRes,
algorithm = 4, #4=Leiden
group.singletons=FALSE)
#don't assign all singletons to the nearest cluster
#Save metacell - cell annotation
annot_mc<-data.frame(cluster=Idents(seurat_donor),
metacell=paste0("mc_",Idents(seurat_donor),"_",donor),
sample=donor,
cell=names(Idents(seurat_donor)),
row.names=NULL)
annot_mc_all<-rbind(annot_mc_all,annot_mc)
#Create pseudobulk
#all(colnames(seurat_donor)==annot_mc$cell)
if(type=="RNA"){
metacellBulk <- t(apply(as.matrix(seurat_donor@assays$RNA@counts), 1, tapply,
as.factor(annot_mc$cluster),
mean, na.rm=T))
} else if (type=="SCT"){
metacellBulk <- t(apply(as.matrix(seurat_donor@assays$SCT@counts), 1, tapply,
as.factor(annot_mc$cluster),
mean, na.rm=T))
} else {
stop(paste("Matrix type",type,"not known! Only RNA or SCT!"))
}
colnames(metacellBulk)<-paste0("mc_",1:ncol(metacellBulk),"_",donor)
metacellBulk_all<-cbind(metacellBulk_all,metacellBulk)
#Get majority annotation
meta.data<-seurat_donor@meta.data
meta.data$cell<-rownames(meta.data)
meta.data<-merge(meta.data,annot_mc,
by.x="cell",by.y="cell")
# Annotate each meta-cell to the most frequent condition
timepoint.mc<-sapply(colnames(metacellBulk),
function(id) names(which.max(table(
meta.data$timepoint[
meta.data$metacell==id]))))
#Save majority annotation
annot_mc_major<-data.frame(metacell=names(timepoint.mc),
condition=unlist(timepoint.mc),
sample=donor,
row.names=NULL)
annot_mc_major_all<-rbind(annot_mc_major_all,annot_mc_major)
}
if(type=="RNA"){
#Save per cell annotation
write.table(annot_mc_all,file="annotations_metacell_leiden_perCell.tsv",sep="\t")
write.table(annot_mc_major_all,file="annotations_mc_leiden_tp.tsv",sep="\t")
#Save peudobulk counts
saveRDS(metacellBulk_all, file="metacell_leiden.RDS")
} else if(type=="SCT"){
write.table(annot_mc_all,file=paste0("annotations_metacell_leiden_SCT_perCell_",
leidenRes,".tsv"),
sep="\t")
write.table(annot_mc_major_all,file=paste0("annotations_mc_leiden_SCT_tp_",
leidenRes,".tsv"),
sep="\t")
#Save peudobulk counts
saveRDS(metacellBulk_all, file=paste0("metacell_leiden_SCT_",
leidenRes,".RDS"))
}
|
150a28e054b2da9bb766b07ff00ce2edeeccc848 | caa16c4b880b8061723314098056cc104447831b | /Tarea 2/Tarea 2/Punto 4.R | f6fa0f69c5d5ed11237a1f4c347a4de267a40d48 | [] | no_license | nacevedo/StatisticalLearning | 9e9244862402bdf7c9285977f3a40a5c2fe01c06 | ba7c28c7404517c360685dd8b5de51d463dd6146 | refs/heads/master | 2020-07-17T04:26:05.868721 | 2019-12-09T19:09:58 | 2019-12-09T19:09:58 | 205,942,692 | 0 | 0 | null | 2019-12-08T21:08:40 | 2019-09-02T22:00:29 | R | UTF-8 | R | false | false | 5,925 | r | Punto 4.R | #You can try to modify the denominator inside the cosine to change the complexity of f*
f=function(x){
y=2+x^(.2)*cos(x/.15)/x^-.45
return(y)
}
plot(f,0,5)
#Points simulation: you change n and sigma
N=400
sigma=1.2
x=runif(N,0,5);x=sort(x) #For convenience, the input x is sorted
y=rep(0,times=N)
for(i in 1:N){
y[i]=f(x[i])+rnorm(1,0,sigma)
}
plot(x,y)
points(x,f(x),type="l",col=2,lwd=2)
L=function(hh,x_test,x_train){
N = length(x_test)
N2 = length(x_train)
L=matrix(rep(0,times=N*N2), ncol=N2)
for(m in 1:N){
cond = 0
zz=rep(x_test[m],times=N2)
for(j in 1:N2){
cond[j] = ifelse(abs(x_test[m]-x_train[j]) > hh, 0, 1)
}
bottom=sum((1-(zz-x_train)^2/hh)^2*cond)
L[m,]=(1-(zz-x_train)^2/hh)^2*cond/bottom
}
return(L)
}
xCV <-x[sample(length(x))]
yCV <-y[sample(length(y))]
data = cbind(xCV, yCV)
# 5 Fold CV
# Partir
folds <- cut(seq(1,nrow(data)),breaks=5,labels=FALSE)
MSE_final= vector()
h=seq(.1,5,by=.1)
for(k in 1:length(h)){
MSE = vector()
hh = h[k]
for(i in 1:5){
test_i <- which(folds==i,arr.ind=TRUE)
test <- data[test_i, ]
x_test = test[,1]
y_test = test[,2]
train <- data[-test_i, ]
x_train = train[,1]
y_train = train[,2]
N = length(x_test)
N2 = length(x_train)
mat=matrix(rep(0,times=N*N2), ncol=N2)
for(m in 1:N){
cond = 0
zz=rep(x_test[m],times=N2)
for(j in 1:N2){
cond[j] = ifelse(abs(x_test[m]-x_train[j]) > hh, 0, 1)
}
bottom=sum((1-(zz-x_train)^2/hh)^2*cond)
mat[m,]=(1-(zz-x_train)^2/hh)^2*cond/bottom
}
ff = mat%*%y_train
MSE[i] = mean((ff-y_test)^2)
}
MSE_final[k] = mean(MSE)
}
plot(h, MSE_final, type = 'l', xlab = 'h', ylab = 'MSE', main = '5 - Fold CV')
h[which.min(MSE_final)]
min(MSE_final)
### 10 Fold CV
# Partir
folds <- cut(seq(1,nrow(data)),breaks=10,labels=FALSE)
MSE_final10 = vector()
for(k in 1:length(h)){
MSE = vector()
hh = h[k]
for(i in 1:10){
test_i <- which(folds==i,arr.ind=TRUE)
test <- data[test_i, ]
x_test = test[,1]
y_test = test[,2]
train <- data[-test_i, ]
x_train = train[,1]
y_train = train[,2]
N = length(x_test)
N2 = length(x_train)
mat=matrix(rep(0,times=N*N2), ncol=N2)
for(m in 1:N){
cond = 0
zz=rep(x_test[m],times=N2)
for(j in 1:N2){
cond[j] = ifelse(abs(x_test[m]-x_train[j]) > hh, 0, 1)
}
bottom=sum((1-(zz-x_train)^2/hh)^2*cond)
mat[m,]=(1-(zz-x_train)^2/hh)^2*cond/bottom
}
ff = mat%*%y_train
MSE[i] = mean((ff-y_test)^2)
}
MSE_final10[k] = mean(MSE)
}
plot(h, MSE_final10, type = 'l', xlab = 'h', ylab = 'MSE', main = '10 - Fold CV')
h[which.min(MSE_final10)]
min(MSE_final10)
# Comparacion
plot(h, MSE_final, type = 'l', xlab = 'h', ylab = 'MSE', main = 'Cross Validation', col = 4)
lines(h, MSE_final10, type = 'l', xlab = 'h', ylab = 'MSE', col = 2)
legend('topright',legend = c('5 - Fold CV', '10 - Fold CV'),col=c("red", "blue"),lty=1, cex=0.8)
#### LOOCV ####
folds <- cut(seq(1,nrow(data)),breaks=400,labels=FALSE)
MSE_final400 = vector()
for(k in 1:length(h)){
MSE = vector()
hh = h[k]
for(i in 1:400){
test_i <- which(folds==i,arr.ind=TRUE)
test <- data[test_i, ]
x_test = test[1]
y_test = test[2]
train <- data[-test_i, ]
x_train = train[,1]
y_train = train[,2]
N = length(x_test)
N2 = length(x_train)
mat=matrix(rep(0,times=N*N2), ncol=N2)
for(m in 1:N){
cond = 0
zz=rep(x_test[m],times=N2)
for(j in 1:N2){
cond[j] = ifelse(abs(x_test[m]-x_train[j]) > hh, 0, 1)
}
bottom=sum((1-(zz-x_train)^2/hh)^2*cond)
mat[m,]=(1-(zz-x_train)^2/hh)^2*cond/bottom
}
ff = mat%*%y_train
MSE[i] = mean((ff-y_test)^2)
}
MSE_final400[k] = mean(MSE)
}
plot(h, MSE_final400, type = 'l', xlab = 'h', ylab = 'MSE', main = '10 - Fold CV')
h[which.min(MSE_final400)]
min(MSE_final400)
# Comparacion
plot(h, MSE_final, type = 'l', xlab = 'h', ylab = 'MSE', main = 'Cross Validation', col = 4)
lines(h, MSE_final10, type = 'l', xlab = 'h', ylab = 'MSE', col = 2)
lines(h, MSE_final400, type = 'l', xlab = 'h', ylab = 'MSE', col = 9)
legend('topright',legend = c('5 - Fold CV', '10 - Fold CV', 'LOOCV'),col=c("red", "blue","black"),lty=1, cex=0.8)
plot(h, MSE_final, type = 'l', xlab = 'h', ylab = 'MSE', main = '10 - Fold CV')
h[which.min(MSE_final)]
min(MSE_final)
### IC ###
B = 500
ybar_boot = c(rep(0,times=B)) # Creacion de vector vacio
hh = 3.7
N = 400
for(i in 1:B){
sample = sample(seq(1:N), N, replace = T)
x_train = x[sample]
y_train = y[sample]
x_test = 3.8
NN = length(x_test)
N2 = length(x_train)
mat=matrix(rep(0,times=NN*N2), ncol=N2)
for(m in 1:NN){
cond = 0
zz=rep(x_test[m],times=N2)
for(j in 1:N2){
cond[j] = ifelse(abs(x_test[m]-x_train[j]) > hh, 0, 1)
}
bottom=sum((1-(zz-x_train)^2/hh)^2*cond)
mat[m,]=(1-(zz-x_train)^2/hh)^2*cond/bottom
}
ybar_boot[i] = (mat%*%y_train)
}
plot(density(ybar_boot)) # Plot de medidas obtenidas con el bootstrap
c(quantile(ybar_boot,0.025),quantile(ybar_boot,0.975))
#### Bootstrap ###
B = 500
ybar_boot = c(rep(0,times=B)) # Creacion de vector vacio
for(i in 1:B){
sample = sample(seq(1:N), N, replace = T)
x_train = x[sample]
y_train = y[sample]
x_test = c(3, 4)
NN = length(x_test)
N2 = length(x_train)
mat=matrix(rep(0,times=NN*N2), ncol=N2)
for(m in 1:NN){
cond = 0
zz=rep(x_test[m],times=N2)
for(j in 1:N2){
cond[j] = ifelse(abs(x_test[m]-x_train[j]) > hh, 0, 1)
}
bottom=sum((1-(zz-x_train)^2/hh)^2*cond)
mat[m,]=(1-(zz-x_train)^2/hh)^2*cond/bottom
}
ybar_boot[i] = (mat%*%y_train)[1]-(mat%*%y_train)[2]
}
plot(density(ybar_boot)) # Plot de medidas obtenidas con el bootstrap
c(quantile(ybar_boot,0.025),quantile(ybar_boot,0.975))
|
707c382f7c9fabe420d469fa540db6140804af08 | 5613034868dd4d8c2280927eee4003947f15da59 | /tests/testthat/test-double_ml_iivm_tuning.R | d88f7474e4eabc3aefe0ff42b2559cc4e462812b | [] | no_license | FrederikBornemann/doubleml-for-r | b6ab57421bedd2971c82f4f7c1265a7406184999 | a10e38d5e70eacf85742976c5e92ca03abcf41c3 | refs/heads/master | 2023-07-05T00:55:40.351004 | 2021-08-13T07:12:09 | 2021-08-13T07:12:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,729 | r | test-double_ml_iivm_tuning.R | context("Unit tests for tuning of IIVM")
requireNamespace("lgr")
logger = lgr::get_logger("bbotk")
logger$set_threshold("warn")
lgr::get_logger("mlr3")$set_threshold("warn")
tune_settings = list(
rsmp_tune = rsmp("cv", folds = 3),
measure = list(
"ml_m" = "classif.ce",
"ml_g" = "regr.mse",
"ml_r" = "classif.ce"),
terminator = mlr3tuning::trm("evals", n_evals = 5),
algorithm = tnr("random_search"))
learner = "rpart"
on_cran = !identical(Sys.getenv("NOT_CRAN"), "true")
if (on_cran) {
test_cases = expand.grid(
learner_list = learner,
dml_procedure = "dml2",
score = "LATE",
AT = c(TRUE),
NT = c(TRUE),
n_rep = c(1),
tune_on_folds = FALSE,
stringsAsFactors = FALSE)
} else {
test_cases = expand.grid(
learner_list = learner,
dml_procedure = c("dml1", "dml2"),
score = "LATE",
AT = c(TRUE, FALSE),
NT = c(TRUE, FALSE),
n_rep = c(1, 3),
tune_on_folds = c(FALSE, TRUE),
stringsAsFactors = FALSE)
}
test_cases["test_name"] = apply(test_cases, 1, paste, collapse = "_")
patrick::with_parameters_test_that("Unit tests for tuning of IIVM:",
.cases = test_cases, {
n_rep_boot = 498
n_folds = 2
set.seed(3141)
double_mliivm_obj_tuned = DoubleMLIIVM$new(
data = data_iivm$dml_data,
n_folds = n_folds,
ml_g = "regr.rpart",
ml_m = "classif.rpart",
ml_r = "classif.rpart",
subgroups = list(
always_takers = AT,
never_takers = NT),
dml_procedure = dml_procedure,
score = score,
n_rep = n_rep)
param_grid = list(
"ml_m" = paradox::ParamSet$new(list(
paradox::ParamDbl$new("cp", lower = 0.01, upper = 0.02),
paradox::ParamInt$new("minsplit", lower = 1, upper = 2))),
"ml_g" = paradox::ParamSet$new(list(
paradox::ParamDbl$new("cp", lower = 0.01, upper = 0.02),
paradox::ParamInt$new("minsplit", lower = 1, upper = 2))),
"ml_r" = paradox::ParamSet$new(list(
paradox::ParamDbl$new("cp", lower = 0.01, upper = 0.02),
paradox::ParamInt$new("minsplit", lower = 1, upper = 2))))
double_mliivm_obj_tuned$tune(param_set = param_grid, tune_on_folds = tune_on_folds, tune_settings = tune_settings)
double_mliivm_obj_tuned$fit()
theta_obj_tuned = double_mliivm_obj_tuned$coef
se_obj_tuned = double_mliivm_obj_tuned$se
# bootstrap
# double_mlirm_obj_tuned$bootstrap(method = 'normal', n_rep = n_rep_boot)
# boot_theta_obj_tuned = double_mlirm_obj_tuned$boot_coef
# restrictions to test
# Functional (tbd) vs OOP implementation (handling randomness in param selection!?)
expect_is(theta_obj_tuned, "numeric")
expect_is(se_obj_tuned, "numeric")
}
)
|
fb385b1431183fb24aebbe585a47a4574370fc75 | c3fced9fa3881b8d07000adfb5bebe4213eaa4a4 | /ANALYSIS/DATA/Exclude Outliers Regression.R | 5db423757f12fdc5dcd1026e0dc2479631aa57ae | [] | no_license | rafael-schuetz/Pareto | ea9c06cb588113bbdf6a3b5da27a2d2a22f37dc8 | 74c414268d429373b83ccfb27bf222ae25b97c32 | refs/heads/master | 2022-04-13T11:36:56.587595 | 2020-04-08T18:31:48 | 2020-04-08T18:31:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,297 | r | Exclude Outliers Regression.R | #https://www.r-bloggers.com/how-to-remove-outliers-in-r/
#remove outliers
#mealsNo
mergedData_mealsNoexcludedOutliers <- quantile(mergedData$mealsNo, probs=c(.25, .75), na.rm = TRUE)
iqr_mealsNo <- IQR(mergedData$mealsNo, na.rm = TRUE)
up_mealsNo <- mergedData_mealsNoexcludedOutliers[2]+1.5*iqr_mealsNo # Upper Range
low_mealsNo<- mergedData_mealsNoexcludedOutliers[1]-1.5*iqr_mealsNo # Lower Range
outlier_meals_ID <- filter(mergedData, mergedData$mealsNo <= low_mealsNo | mergedData$mealsNo >= up_mealsNo) %>%
dplyr::select(id) %>% unique()
outlier_meals_ID <- outlier_meals_ID[["id"]]
mealsNoOutliers <- mergedData %>%
dplyr::filter(!(id %in% outlier_meals_ID))
#tripsNo
mergedData_tripsNoexcludedOutliers <- quantile(mergedData$tripsNo, probs=c(.25, .75), na.rm = TRUE)
iqr_tripsNo <- IQR(mergedData$tripsNo, na.rm = TRUE)
up_tripsNo <- mergedData_tripsNoexcludedOutliers[2]+1.5*iqr_tripsNo # Upper Range
low_tripsNo<- mergedData_tripsNoexcludedOutliers[1]-1.5*iqr_tripsNo # Lower Range
outlier_trips_ID <- filter(mergedData, mergedData$tripsNo <= low_tripsNo | mergedData$tripsNo >= up_tripsNo) %>%
dplyr::select(id) %>% unique()
outlier_trips_ID <- outlier_trips_ID[["id"]]
tripsNoOutliers <- mergedData %>%
filter(!(mergedData$id %in% outlier_trips_ID)) |
2032150d1b394e0ab76a6f1aba6dbdd510a27539 | 2099a2b0f63f250e09f7cd7350ca45d212e2d364 | /ACL-Dataset/Summary_rnd/P14-2061.xhtml.A.R | aafafbb6441a7bdfd7d8b83fba4e31577e0bb67e | [] | no_license | Angela7126/SLNSumEval | 3548301645264f9656b67dc807aec93b636778ef | b9e7157a735555861d2baf6c182e807e732a9dd6 | refs/heads/master | 2023-04-20T06:41:01.728968 | 2021-05-12T03:40:11 | 2021-05-12T03:40:11 | 366,429,744 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,119 | r | P14-2061.xhtml.A.R | <html>
<head>
<meta name="TextLength" content="SENT_NUM:7, WORD_NUM:122">
</head>
<body bgcolor="white">
<a href="#0" id="0">While language identification in signed languages is yet to be studied, significant progress has been recorded for written and spoken languages.</a>
<a href="#1" id="1">For visual data, normalization corresponds to local brightness and contrast normalization.</a>
<a href="#2" id="2">These variations coupled with lighting conditions may introduce noise.</a>
<a href="#3" id="3">First, to remove any non-signing signals that remain constant within videos of a single sign language but that are different across sign languages.</a>
<a href="#4" id="4">Our best average accuracy (84.03%) is obtained using 500 K-means features which are extracted over four frames (taken at a step of 2).</a>
<a href="#5" id="5">Classification algorithms are used with their default settings and the classification strategy is one-vs.-rest .</a>
<a href="#6" id="6">Given the learned features, the feature mapping functions and a set of labeled training videos, we extract features as follows:</a>
</body>
</html> |
950bec810f5d0547b9c8d397073d835aa17e274f | 1abf8398ec048750d230f77b5467c0d3cf508349 | /R/ProcessUsingCHIPpeakAnno.R | 386f6c17f1870dc4ffab21713d95a8db1354d6b7 | [] | no_license | bioinformatics-gao/ChipSeq | 97e8453cb74663bd2b4f35e44846311ca962850d | dde9e5a4f82142657f22d281cb10509715c0ef78 | refs/heads/master | 2021-01-12T00:03:09.798091 | 2017-01-11T17:32:19 | 2017-01-11T17:32:19 | 78,662,917 | 1 | 0 | null | 2017-01-11T17:35:08 | 2017-01-11T17:35:08 | null | UTF-8 | R | false | false | 13,382 | r | ProcessUsingCHIPpeakAnno.R | #' ProcessUsingCHIPpeakAnno
#'
#' @param input.file.dir
#' @param input.file.pattern
#' @param output.file.dir
#'
#' @return
#' @export
#'
#' @examples
#' input.file.dir="/media/H_driver/2016/Yang/MACS/MACS/"
#' input.file.pattern="*.bed"
#' output.file.dir="/media/H_driver/2016/Yang/MACS/MACS/"
#'
#' ProcessUsingCHIPpeakAnno(input.file.dir,input.file.pattern,output.file.dir)
#'
ProcessUsingCHIPpeakAnno <- function(input.file.dir,input.file.pattern,output.file.dir) {
library(ChIPpeakAnno)
dir.name=input.file.dir
input.file.pattern=input.file.pattern
dir.name=reformatPath(dir.name)
output.dir.name=reformatPath(output.file.dir)
dir.create(output.dir.name)
file.name=paste0(dir.name,dir(dir.name,recursive = TRUE,pattern=input.file.pattern))
file.name.2<-as.list(file.name)
names(file.name.2)=sapply(strsplit(file.name,split="\\/"),"[[",8)
print(file.name.2)
file.name.3<-file.name.2
sample.name<-sapply(strsplit(names(file.name.3),split="_peaks_"),"[[",1)
names(file.name.3)=sample.name
file.name.4 <-file.name.3[-1]
re.out<-lapply(file.name.4,function(u){
re=toGRanges(u,format="BED")
#colnames(re)=c("Count","GeneName")
re
})
head(re.out[[1]])
re.out.L<-lapply(re.out,function(u){
re=length(u)
#colnames(re)=c("Count","GeneName")
re
})
annoData <- toGRanges(EnsDb.Mmusculus.v75, feature="gene")
annoData[1:2]
binOverFeature(overlaps, annotationData=annoData,
radius=5000, nbins=20, FUN=length, errFun=0,
ylab="count",
main="Distribution of aggregated peak numbers around TSS")
ol <- findOverlapsOfPeaks(re.out[c(2,4,1)])
overlaps<-ol$peaklist$`11_2470IUPUI_WT_BM_SMC1_peaks.bed///13_2470IUPUI_WT_BM_Rad21_peaks.bed///10_WT_BM_ASXL1_peaks.bed`
re<-makeVennDiagram(re.out[c(2,4,1)],NameOfPeaks=c("SMC1A", "RAD21","ASXL1"),totalTest=35000)
#fisher exact test
UseFisher <- function(temp.ct,index.A,index.B,totalN) {
total.peaks=totalN
A=sum(temp.ct[which(temp.ct[,index.A]==1&temp.ct[,index.B]==1),4])
B=sum(temp.ct[which(temp.ct[,index.A]==1&temp.ct[,index.B]==0),4])
C=sum(temp.ct[which(temp.ct[,index.A]==0&temp.ct[,index.B]==1),4])
D=total.peaks-(A+B+C)
ctb<-matrix(c(A,B,C,D),nrow = 2,dimnames =list(c("In", "Not"),c("In", "Not")))
#re<-fisher.test(ctb)
print(ctb)
re.fisher<-fisher.test(ctb, alternative='greater')[c("p.value","estimate")]
re.fisher
}
temp.ct<-ol$venn_cnt
#A vs B
index.A<-grep("SMC1",colnames(temp.ct))
index.B<-grep("Rad21",colnames(temp.ct))
tempRe<-UseFisher(temp.ct,index.A,index.B,35000)
pVal.fisher.AB=tempRe$p.value
OR.fisher.AB=tempRe$estimate
#A vs C
index.A<-grep("SMC1",colnames(temp.ct))
index.B<-grep("ASXL1",colnames(temp.ct))
tempRe<-UseFisher(temp.ct,index.A,index.B,35000)
pVal.fisher.AC=tempRe$p.value
OR.fisher.AC=tempRe$estimate
#B vs C
index.A<-grep("Rad21",colnames(temp.ct))
index.B<-grep("ASXL1",colnames(temp.ct))
tempRe<-UseFisher(temp.ct,index.A,index.B,35000)
pVal.fisher.BC=tempRe$p.value
OR.fisher.BC=tempRe$estimate
pVal.fisher.AB
OR.fisher.AB
pVal.fisher.AC
OR.fisher.AC
pVal.fisher.BC
OR.fisher.BC
library(TxDb.Hsapiens.UCSC.hg19.knownGene)
library(TxDb.Hsapiens.UCSC.mm9.knownGene)
aCR<-assignChromosomeRegion(overlaps, nucleotideLevel=FALSE,
precedence=c("Promoters", "immediateDownstream",
"fiveUTRs", "threeUTRs",
"Exons", "Introns"),
TxDb=TxDb.Mmusculus.UCSC.mm9.knownGene)
barplot(aCR$percentage, las=3)
pie1(aCR$percentage,las=3)
dc<-annoGR(TxDb.Mmusculus.UCSC.mm9.knownGene)
seqinfo(dc)
seqlevels(dc)
seqinfo(overlaps)
seqlevels(overlaps)
#GRCm38/mm10
dd.GRCm39.mm10<-toGRanges(EnsDb.Mmusculus.v75)
seqinfo(dd.GRCm39.mm10)
seqlevels(dd.GRCm39.mm10)
seqlevels(dd.GRCm39.mm10,force=TRUE) <- c("chr1","chr10","chr11","chr12","chr13",
"chr14","chr15","chr16","chr17","chr18","chr19","chr2",
"chr3","chr4","chr5","chr6","chr7","chr8","chr9","chrX","chrY")
seqinfo(overlaps)<-seqinfo(dd.GRCm39.mm10)
seqinfo(overlaps)
overlaps.trimmed<-trim(overlaps, use.names=TRUE)
library(EnsDb.Mmusculus.v79)
#GRCm38/mm10
dd<-toGRanges(EnsDb.Mmusculus.v79)
seqinfo(dd)
library(ensembldb)
library(GenomeInfoDb)
seqlevelsStyle(overlaps.trimmed) <- seqlevelsStyle(dd.GRCm39.mm10)
overlaps.anno<-annoPeaks(overlaps.trimmed,dd.GRCm39.mm10)
library(org.Mm.eg.db)
overlaps.anno.with.entrez.id <- addGeneIDs(overlaps.anno,"org.Mm.eg.db",IDs2Add = "entrez_id")
write.csv(as.data.frame(unname(overlaps.anno.with.entrez.id)), paste0(out.dir.name,"other_anno.csv"))
pie1(table(overlaps.anno.with.entrez.id$insideFeature))
library("DBI")
over <- getEnrichedGO(overlaps.anno.with.entrez.id, orgAnn="org.Mm.eg.db",
maxP=.05, minGOterm=10,
multiAdjMethod="BH", condense=TRUE)
# over.gene.symbol <- getEnrichedGO(overlaps.anno.with.entrez.id, orgAnn="org.Mm.eg.db",
# feature_id_type="gene_symbol",
# maxP=.05, minGOterm=10,
# multiAdjMethod="BH",condense=TRUE)
head(over[["bp"]][, -c(3, 10)])
library(org.Hs.eg.db)
e2s = toTable(org.Mm.egSYMBOL)
tempDS=over$bp
tempDS2<-data.frame(apply(tempDS,1,function(u,e2c){
#print(u[1])
x=u[11]
tempId<-unlist(strsplit(as.character(x),split=";"))
index<-which(!is.na(match(e2s[,1],tempId)))
index<-match(tempId,e2s[,1])
geneS<-paste(e2s[index,2], collapse=";")
geneS
#print(geneS)
},e2c))
tempDS3<-cbind(tempDS,tempDS2)
colnames(tempDS3)[12]="GeneSymbol"
Draw4GO <- function(tempDS3) {
x=tempDS3
y=x[order(x$pvalue,decreasing = TRUE),]
z=y[1:10,c(1,3,4,5,6,10)]
Function<-z$Definition
negative_log10p=-log10(z$pvalue)
library(ggplot2)
#ggplot(z, aes(x=z$go.id, y=negative_log10p,fill=factor(z$go.id)))+geom_bar(stat="identity")+geom_hline(yintercept = -log10(0.05))+coord_flip()
ggplot(z, aes(go.id,pvalue, fill = go.id)) +
geom_bar(stat="identity")+ scale_x_discrete(labels=z$count.InDataset, limits=factor(z$go.id))+ scale_fill_discrete(breaks = z$go.id,
name="GO term")+theme(legend.text = element_text(colour="black", size = 11, face = "bold"))+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+ggtitle("GO enrichment analysis")+labs(x="Gene Counts",y="-log p-value")
}
write.table(tempDS3,file=paste0(out.dir.name,"BP_.txt"),row.names = FALSE,quote=FALSE,sep="\t")
#anno <- annoGR(EnsDb.Hsapiens.v79)
ree<-annotatePeakInBatch(overlaps,
AnnotationData=dc,
output="nearestBiDirectionalPromoters",
bindingRegion=c(-2000, 500))
ree2 <- addGeneIDs(ree,
"org.Mm.eg.db",
IDs2Add = "entrez_id")
re<-makeVennDiagram(re.out[c(2,4,1)],NameOfPeaks=c("SMC1A", "RAD21","ASXL1"),totalTest=35000)
library(BSgenome.Mmusculus.UCSC.mm9)
upseqs<-Views(Mmusculus,overlaps)
overlaps.trimmed<-trim(overlaps, use.names=TRUE)
mm9.S<-Seqinfo(genome="mm9")
seqinfo(overlaps,force=TRUE) <- Seqinfo(genome="mm9")
seqlevels(mm9.S) <- c("chr1","chr10","chr11","chr12","chr13",
"chr14","chr15","chr16","chr17","chr18","chr19","chr2",
"chr3","chr4","chr5","chr6","chr7","chr8","chr9","chrX","chrY")
seqinfo(overlaps) <- mm9.S
goodGR <- trim(overlaps)
overlaps.trimmed<-goodGR
seq<-getAllPeakSequence(overlaps.trimmed,genome=Mmusculus)
seqs.mm9 <- getSeq(Mmusculus,overlaps.trimmed)
write2FASTA(seq, paste0(out.dir.name,"WT_triple.fa"))
## We can also try simulation data
seq.sim.motif <- list(c("t", "g", "c", "a", "t", "g"),
c("g", "c", "a", "t", "g", "c"))
set.seed(1)
seq.sim <- sapply(sample(c(2, 1, 0), 1000, replace=TRUE, prob=c(0.07, 0.1, 0.83)),
function(x){
s <- sample(c("a", "c", "g", "t"),
sample(100:1000, 1), replace=TRUE)
if(x>0){
si <- sample.int(length(s), 1)
if(si>length(s)-6) si <- length(s)-6
s[si:(si+5)] <- seq.sim.motif[[x]]
}
paste(s, collapse="")
})
os <- oligoSummary(seq, oligoLength=6, MarkovOrder=3,
quickMotif=TRUE)
zscore <- sort(os$zscore, decreasing=TRUE)
h <- hist(zscore, breaks=100, main="Histogram of Z-score")
text(zscore[1:2], rep(5, 2),
labels=names(zscore[1:2]), adj=0, srt=90)
pfms <- mapply(function(.ele, id)
new("pfm", mat=.ele, name=paste("SAMPLE motif", id)),
os$motifs, 1:length(os$motifs))
motifStack(pfms[[1]])
motifStack(pfms[[2]])
motifStack(pfms[[3]])
motifStack(pfms[[4]])
}
# input.file.dir="/media/H_driver/2016/Danny/Danny_chip_PeakCall/"
# input.file.pattern="*macs142_peaks.bed"
# output.file.dir="/media/H_driver/2016/Danny/Danny_chip_PeakCall/"
#
# AnnotatePeak(input.file.dir,input.file.pattern,8,output.file.dir,genome="Mm")
# AnnotatePeak(input.file.dir,input.file.pattern,7,output.file.dir,genome="Hs")
AnnotatePeak <- function(input.file.dir,input.file.pattern,index.file,output.file.dir,genome) {
library(ChIPpeakAnno)
dir.name=input.file.dir
input.file.pattern=input.file.pattern
dir.name=reformatPath(dir.name)
output.dir.name=reformatPath(output.file.dir)
#print(output.dir.name)
temp=Sys.time()
temp1=gsub(":","-",Sys.time())
temp2=gsub(" ","-",temp1)
temp3=paste0(output.dir.name,"AnalysisResults_at_",temp2)
dir.create(temp3)
file.name=paste0(dir.name,dir(dir.name,recursive = TRUE,pattern=input.file.pattern))
file.name.2<-as.list(file.name)
names(file.name.2)=sapply(strsplit(file.name,split="\\/"),"[[",index.file)
print(file.name.2)
file.name.3<-file.name.2
#sample.name<-sapply(strsplit(names(file.name.3),split="_peaks_"),"[[",1)
#names(file.name.3)=sample.name
file.name.4 <-file.name.3[-1]
re.out<-lapply(file.name.4,function(u){
re=toGRanges(u,format="BED")
#colnames(re)=c("Count","GeneName")
re
})
head(re.out[[1]])
re.out.L<-lapply(re.out,function(u){
re=length(u)
#colnames(re)=c("Count","GeneName")
re
})
if(genome=="Mm"){
annoData <- toGRanges(EnsDb.Mmusculus.v75, feature="gene")
ol <- findOverlapsOfPeaks(re.out[c(2,4,1)])
overlaps<-ol$peaklist$`11_2470IUPUI_WT_BM_SMC1_peaks.bed///13_2470IUPUI_WT_BM_Rad21_peaks.bed///10_WT_BM_ASXL1_peaks.bed`
binOverFeature(overlaps, annotationData=annoData,
radius=5000, nbins=20, FUN=length, errFun=0,
ylab="count",
main="Distribution of aggregated peak numbers around TSS")
overlaps.trimmed<-trim(overlaps, use.names=TRUE)
library(EnsDb.Mmusculus.v79)
dd.GRCm39.mm10<-toGRanges(EnsDb.Mmusculus.v75)
#seqinfo(dd.GRCm39.mm10)
#seqlevels(dd.GRCm39.mm10)
seqlevels(dd.GRCm39.mm10,force=TRUE) <- c("chr1","chr10","chr11","chr12","chr13",
"chr14","chr15","chr16","chr17","chr18","chr19","chr2",
"chr3","chr4","chr5","chr6","chr7","chr8","chr9","chrX","chrY")
seqinfo(overlaps)<-seqinfo(dd.GRCm39.mm10)
#GRCm38/mm10
#dd<-toGRanges(EnsDb.Mmusculus.v79)
#seqinfo(dd)
#library(ensembldb)
#library(GenomeInfoDb)
seqlevelsStyle(overlaps.trimmed) <- seqlevelsStyle(dd.GRCm39.mm10)
overlaps.anno<-annoPeaks(overlaps.trimmed,dd.GRCm39.mm10)
write.table(overlaps.anno,file=paste0(temp3,"/","annotation.txt"),row.names = FALSE,quote=FALSE,sep="\t")
}else if(genome=="Hs"){
library(EnsDb.Hsapiens.v75)
#annoData<-toGRanges(EnsDb.Hsapiens.v75, feature="gene")
dd.hs<-toGRanges(EnsDb.Hsapiens.v75)
print(seqinfo(dd.hs))
print(seqlevels(dd.hs))
#print(seqlevels(dd.hs)[,1])
#print(seqlevels(re.out[[1]])[,1])
# seqlevels(dd.hs,force=TRUE) <- c("chr1","chr10","chr11","chr12","chr13",
# "chr14","chr15","chr16","chr17","chr18","chr19","chr2",
# "chr3","chr4","chr5","chr6","chr7","chr8","chr9","chrX","chrY")
#temp4=
re.out.L<-lapply(1:length(re.out),function(u,re.out,dd.hs){
x=re.out[[u]]
x_name=names(re.out)[u]
seqlevels(dd.hs,force=TRUE)<-seqinfo(x)@seqnames
#print(seqinfo(re.out.trimmed))
#print(seqlevels(re.out.trimmed))
seqinfo(x)<-seqinfo(dd.hs)
#GRCm38/mm10
#dd<-toGRanges(EnsDb.Mmusculus.v79)
#seqinfo(dd)
#library(ensembldb)
#library(GenomeInfoDb)
seqlevelsStyle(x) <- seqlevelsStyle(dd.hs)
re.out.trimmed<-trim(x, use.names=TRUE)
overlaps.anno<-annoPeaks(re.out.trimmed,dd.hs)
write.table(overlaps.anno,file=paste0(temp3,"/",x_name,"_annotation.txt"),row.names = FALSE,quote=FALSE,sep="\t")
},re.out,dd.hs)
}
}
|
6042ce1f9aab4df7960059dcc72e294b06c47a11 | 903c20050d9ef156481dc6ef0aefdb38d5b1d131 | /scratch/inverse-bias-problem/2015-02-06-scratch-3.R | b076b6b5af384586a6e56db5c0e9a29c0aa7df00 | [] | no_license | wes-brooks/thesis | 27f5fd55f7d608b41f5e99bfa358839b46b396c6 | a4969f903a8bf8f81e4fca67faa7efb4f6243d3b | refs/heads/master | 2021-05-30T05:39:19.943867 | 2015-02-25T23:40:45 | 2015-02-25T23:40:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 495 | r | 2015-02-06-scratch-3.R | f0.smooth = vector()
f1.smooth = vector()
f2.smooth = vector()
f0..smooth = vector()
for (i in 1:100) {
h = 0.858
dist = abs(tt-tt[i])
wt = lagr:::epanechnikov(dist, h)
X.loc = as.matrix(tt - tt[i])
f0.smooth = c(f0.smooth, lm(f0(tt)~X.loc, weights=wt)$coef[1])
f1.smooth = c(f1.smooth, lm(f1(tt)~X.loc, weights=wt)$coef[1])
f2.smooth = c(f2.smooth, lm(f2(tt)~X.loc, weights=wt)$coef[1])
f0..smooth = c(f0..smooth, lm(f0.(tt)~1, weights=wt)$coef[1])
} |
bd2f0b7ba283ced6a77e22406d86f81f46c1516b | 18b5b5ea60ef362374e8ed60e651a2cffc4e221c | /man/addJsHtml2Latest.Rd | 9c3328d62d2d55189d25d6a52c57c8a572f6f91b | [
"MIT"
] | permissive | tpemartin/webtemplate | a6bfc468e8d3d64f7ca042676a6d247198e4c5b0 | 061eff584dbc0e8659cb1ca6a00119e4afe76658 | refs/heads/master | 2022-12-30T17:43:34.926760 | 2020-10-10T13:20:39 | 2020-10-10T13:20:39 | 292,805,709 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 505 | rd | addJsHtml2Latest.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/webservice.R
\name{addJsHtml2Latest}
\alias{addJsHtml2Latest}
\title{Add JS html to the end of body to the latest modified html}
\usage{
addJsHtml2Latest(jsfile, path = ".")
}
\arguments{
\item{jsfile}{A character. The file path of your html js file}
\item{path}{A character. The path to your latest modified html file}
}
\value{
}
\description{
Add JS html to the end of body to the latest modified html
}
\examples{
none
}
|
24c3485c6af8024f320bc7b1afec6bd0060217e7 | 1aed09b0f36d78e3727ead5dd3ec90229a9be0b3 | /man/convert_image_references.Rd | 6050d3aa45a285b04749dce29f1f3d7b6964197d | [
"MIT"
] | permissive | best-practice-and-impact/govspeakr | 9ad3e6a09014a43cf132e1ab59d1716a392016c5 | 1503fa53eacbded63d33a38903068cc8b4588589 | refs/heads/master | 2023-04-30T04:15:22.168910 | 2023-04-13T12:36:15 | 2023-04-13T12:36:15 | 218,343,310 | 6 | 5 | MIT | 2021-09-02T14:20:28 | 2019-10-29T17:21:40 | R | UTF-8 | R | false | true | 608 | rd | convert_image_references.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utility_functions.R
\name{convert_image_references}
\alias{convert_image_references}
\title{Convert markdown image references to govdown}
\usage{
convert_image_references(image_references, md_file, images_folder)
}
\arguments{
\item{image_references}{dataframe of image file names and associated govdown
reference.}
\item{md_file}{string with markdown file text}
\item{images_folder}{string; folder containing images for *.md file}
}
\description{
Convert markdown image references to govspeak format (!!n)
}
|
2fd8979c3ebaed3f57620480fd2c10ea1d9f7146 | 1b2340e55dc23107bd549a63e8aa0cf7e08ab1d5 | /CTRP.R | a763ee91dc77b16cbf7c4ea0e587bba7865fd819 | [] | no_license | caichufan/HW05 | 8cb1fe8d0728c619c8ca56a85bb985aca4a9b6ff | 3760cf389748da35267717be86d365f8e37a8f54 | refs/heads/master | 2022-11-28T00:49:58.762659 | 2020-08-05T05:48:39 | 2020-08-05T05:48:39 | 284,823,174 | 0 | 0 | null | 2020-08-03T22:49:11 | 2020-08-03T22:49:11 | null | UTF-8 | R | false | false | 2,226 | r | CTRP.R | library(dplyr)
AUC_1 <- read.csv("AUC_1.csv", header = TRUE, stringsAsFactors = FALSE)
AUC_2 <- read.csv("AUC_2.csv", header = TRUE, stringsAsFactors = FALSE)
cancercellline <- read.csv("cancer_cell_line_info.csv", header = TRUE, stringsAsFactors = FALSE)
compound <- read.csv("compound_info.csv", header = TRUE, stringsAsFactors = FALSE)
experiment <- read.csv("Experiment_info.csv", header = TRUE, stringsAsFactors = FALSE)
colnames(experiment)[colnames(experiment) == "expt_id"] <- c("experiment_id")
AUC <- rbind(AUC_1, AUC_2)
#first, let's join AUC and experiment, by "expriment_id"
AUC_experiment <- inner_join(AUC, experiment, by = "experiment_id")
#then, let's join AUC_experiment to cancercellline, by "master_ccl_id"
AUC_experiment_ccl <- inner_join(AUC_experiment, cancercellline, by = "master_ccl_id")
#finally, join the rest everything by "master_cpd_id"
fulllist <- inner_join(AUC_experiment_ccl, compound, by = "master_cpd_id")
#test this dataset: Which cancer type has the lowest AUC values to the compound "vorinostat"?
vorinostat <- subset(fulllist, fulllist$cpd_name == "vorinostat")
minimum <- min(vorinostat$area_under_curve)
lowest_AUC <- vorinostat[vorinostat$area_under_curve == minimum,]
print(lowest_AUC$cancer_type)
#"upper_aerodigestive_tract"
#For 22RV1, which compound has the lowest AUC value?
RV1 <- subset(fulllist, fulllist$ccl_name == "22RV1")
min(RV1$area_under_curve)
lowest_AUC_RV1 <- RV1[RV1$area_under_curve == min(RV1$area_under_curve),]
print(lowest_AUC_RV1$cpd_name)
#"leptomycin B"
#For the 10 compounds that target EGFR,
#which of them has (on average) the lowest AUC values in the breast cancer cell lines?
EGFR <- subset(fulllist, grepl( "EGFR", fulllist$gene_symbol_of_protein_target, ignore.case = TRUE))
EGFR <- subset(EGFR, EGFR$cancer_type == "breast")
compound_list_EGFR <- as.data.frame(table(EGFR$cpd_name))
#I found 11 not 10 compounds targeting EGFR??
EGFR_AUC_cp <- EGFR[,c("area_under_curve", "cpd_name")]
EGFR_AUC_cp <- aggregate(EGFR_AUC_cp$area_under_curve ~ EGFR_AUC_cp$cpd_name, EGFR_AUC_cp, mean)
colnames(EGFR_AUC_cp) <- c("compound", "AUC")
min <- EGFR_AUC_cp[EGFR_AUC_cp$AUC == min(EGFR_AUC_cp$AUC),]
print(min$compound)
#"afatinib"
|
1625544cdac1846a57e774766d03c5b2a40411de | 89c46bb14c4a3206ee4524bb20fbee51ebe270ac | /tests/testthat.R | 43750ae95cfb1832d1540d47a7ede654ce98b396 | [
"MIT"
] | permissive | gadenbuie/sprinkles | 7e0c6664e19bb78c73da8862952b55f98969371e | 640d865f1c6f29657de024776d01a3aa829abdb3 | refs/heads/main | 2023-02-17T06:09:20.712162 | 2021-01-18T14:06:04 | 2021-01-18T14:06:04 | 330,547,563 | 13 | 1 | null | null | null | null | UTF-8 | R | false | false | 62 | r | testthat.R | library(testthat)
library(sprinkles)
test_check("sprinkles")
|
b3e630fa732ba54aac86ce6925f57c490d851932 | 2e5bcb3c8028ea4bd4735c4856fef7d6e46b5a89 | /man/nbrOfArrays.AffymetrixCelSet.Rd | f0fd854320094aeda17e0b56b3dd5a5bf16c24aa | [] | no_license | HenrikBengtsson/aroma.affymetrix | a185d1ef3fb2d9ee233845c0ae04736542bb277d | b6bf76f3bb49474428d0bf5b627f5a17101fd2ed | refs/heads/master | 2023-04-09T13:18:19.693935 | 2022-07-18T10:52:06 | 2022-07-18T10:52:06 | 20,847,056 | 9 | 4 | null | 2018-04-06T22:26:33 | 2014-06-15T03:10:59 | R | UTF-8 | R | false | false | 898 | rd | nbrOfArrays.AffymetrixCelSet.Rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% AffymetrixCelSet.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{nbrOfArrays.AffymetrixCelSet}
\alias{nbrOfArrays.AffymetrixCelSet}
\alias{AffymetrixCelSet.nbrOfArrays}
\alias{nbrOfArrays,AffymetrixCelSet-method}
\title{Gets the number of arrays in the file set}
\description{
Gets the number of arrays in the file set.
This is just a wrapper for \code{length()}.
}
\usage{
\method{nbrOfArrays}{AffymetrixCelSet}(this, ...)
}
\arguments{
\item{...}{Not used.}
}
\value{
Returns an \code{\link[base]{integer}}.
}
\author{Henrik Bengtsson}
\seealso{
For more information see \code{\link{AffymetrixCelSet}}.
}
\keyword{internal}
\keyword{methods}
|
aec767d51e1e23bf1eaa8ae9302c713972044492 | 9ec684f6599db154ed1f79b5154c5e98c4d27b0c | /Voter_Data_Scrape.R | eae99e1adb56e583ff74dc9620c4a968080209c9 | [] | no_license | STA440-Team4/Case_Study_4 | 3138ba78f0809b5a8191b20d2188fa0c0ffdf0db | fce8c5012d468cafaa29a17c3b9b5f971007d9b8 | refs/heads/master | 2020-04-01T12:28:34.679343 | 2018-10-16T03:02:24 | 2018-10-16T03:02:24 | 153,209,364 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 515 | r | Voter_Data_Scrape.R | # Creating a Tidy Data Frame of Voter Data
if (!require("dplyr")) install.packages("dplyr")
library(dplyr)
if (!require("rvest")) install.packages("rvest")
library(rvest)
# Help for scraping and downloading zip files comes from
# https://stackoverflow.com/questions/46838858/r-web-scrapping-and-downloading-multiple-zip-files-and-save-the-files-without
url = "https://dl.ncsbe.gov/?prefix=data/"
page = read_html(url)
find_nodes = html_nodes(page, "a")
zip <- grep("a", html_nodes(page, "a.zip"), value = TRUE)
|
24a46d9f619e58ea1a1a0fa10437e093d544f646 | f23deafb3417c19cd19a86e3355d6717e327cd5b | /SalesPlot.R | 709c32847b4b81f140ac98e844054f13747a7e51 | [] | no_license | jamielaiwang/Clif-Bar-Weekly-Sales-Forecasting | 281a5e78b63abcd8acda4b8aaa177980fb65ada8 | cfa4ffa85fd0461045b8a5e713866e76b7b932be | refs/heads/master | 2020-04-21T15:05:22.650375 | 2019-02-21T18:26:25 | 2019-02-21T18:26:25 | 169,657,360 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,416 | r | SalesPlot.R | require(plyr)
require(reshape)
library(ggplot2)
library(zoo)
library(lubridate)
library(scales)
library(forecast)
library(fpp2)
#TODO: set data, submit and r paths to correct directories, if needed
paths = list(data='./DATA/', # ./ is relative path
submit='./Submissions/',
r='../R/')
# Loads the training data with correct classes
#cls <- c('factor', 'factor','Date', 'numeric', 'logical')
#train <- read.csv(paste0(paths$data, 'Cliftrainclean.csv'),
# colClasses=cls)
# Store 67 to 72 must be deleted.
train <- read.csv(paste0(paths$data, "Cliftrainclean.csv"),header = TRUE)
train$Date<-as.Date(as.character(train$Date),format="%m/%d/%Y")
train$Store<-as.factor(train$Store)
train$Dept<-as.factor(train$Dept)
train.dates <- unique(train$Date)
num.train.dates <- length(train.dates)
all.stores <- unique(train$Store)
num.stores <- length(all.stores)
train.frame <- data.frame(Date=rep(train.dates, num.stores),
Store=rep(all.stores, each=num.train.dates))
tr.1 <- join(train.frame,
train[train$Dept==1, c('Store','Date','Weekly_Sales')])
tr.1 <- cast(tr.1, Date ~ Store)
tr.date<-tr.1[,1]
tr.sales<-tr.1[,2:ncol(tr.1)]
colnames(tr.sales)<-paste("Store",colnames(tr.sales),sep="")
head(tr.sales) #dim(tr.sales)=129*84
tr.x<-cbind(tr.date,tr.sales)
colnames(tr.x)[1] <- "Date"
head(tr.x[,1:6])
png(file='ALBSales.png', width=700, height=320)
ALB.df= data.frame(Dates=tr.date, ALB_ACME=tr.x$Store2, ALB_Denver=tr.x$Store3
,ALB_Eastern=tr.x$Store4,ALB_Houston=tr.x$Store5,ALB_Intermountain=tr.x$Store6
,ALB_Jewel=tr.x$Store7,ALB_Portland=tr.x$Store9
,ALB_Seattle=tr.x$Store10,ALB_Shaws=tr.x$Store11
,ALB_Southern=tr.x$Store13,ALB_Southwest=tr.x$Store14)
ggplot(data = ALB.df, aes(x=Dates, y=value, color=variable ) ) +
ylab('Weekly Sales of ALB supermartkets') +
geom_line(aes(y=ALB_ACME , col='ALB_ACME'), size=1, alpha=.5) +
geom_line(aes(y=ALB_Denver, col='ALB_Denver'), size=1, alpha=.5) +
geom_line(aes(y=ALB_Eastern , col='ALB_Eastern'), size=1, alpha=.5) +
geom_line(aes(y=ALB_Houston, col='ALB_Houston'), size=1, alpha=.5) +
geom_line(aes(y=ALB_Intermountain , col='ALB_Intermountian'), size=1, alpha=.5) +
geom_line(aes(y=ALB_Jewel, col='ALB_Jewel'), size=1, alpha=.5) +
geom_line(aes(y=ALB_Portland, col='ALB_Portland'), size=1, alpha=.5) +
geom_line(aes(y=ALB_Seattle, col='ALB_Seattle'), size=1, alpha=.5) +
geom_line(aes(y=ALB_Shaws, col='ALB_Shaws'), size=1, alpha=.5) +
geom_line(aes(y=ALB_Southern, col='ALB_Southern'), size=1, alpha=.5) +
geom_line(aes(y=ALB_Southwest, col='ALB_Southwest'), size=1, alpha=.5) +
theme(legend.position=c(.1,.85))
dev.off()
png(file='ALBNorthSales.png', width=700, height=320)
ALBN.df= data.frame(Dates=tr.date, ALB_Portland=tr.x$Store9
,ALB_Seattle=tr.x$Store10)
ALBN.df$Month <- as.Date(cut(ALBN.df$Dates,
breaks = "month"))
ggplot(data = ALBN.df, aes(x=Dates, y=value, color=variable ) ) +
ylab('Weekly Sales of North ALB supermartkets') +
geom_line(aes(y=ALB_Portland, col='ALB_Portland'), size=1, alpha=.5) +
geom_line(aes(y=ALB_Seattle, col='ALB_Seattle'), size=1, alpha=.5) +
scale_x_date(labels = date_format("%Y-%m"), breaks = '2 months') +
theme(legend.position=c(.1,.85))
dev.off()
png(file='ALBEastSouthSales.png', width=700, height=320)
ALBS.df= data.frame(Dates=tr.date ,ALB_Eastern=tr.x$Store4,ALB_Houston=tr.x$Store5
,ALB_Southern=tr.x$Store13,ALB_Southwest=tr.x$Store14)
ggplot(data = ALBS.df, aes(x=Dates, y=value, color=variable ) ) +
ylab('Weekly Sales of Eastern and Southern ALB supermartkets') +
geom_line(aes(y=ALB_Eastern, col='ALB_Eastern'), size=1, alpha=.5) +
geom_line(aes(y=ALB_Houston, col='ALB_Houston'), size=1, alpha=.5) +
geom_line(aes(y=ALB_Southern, col='ALB_Southern'), size=1, alpha=.5) +
geom_line(aes(y=ALB_Southwest, col='ALB_Southwest'), size=1, alpha=.5) +
scale_x_date(labels = date_format("%Y-%m"), breaks = '2 months') +
theme(legend.position=c(.1,.85))
dev.off()
png(file='WalmartNHMSales.png', width=700, height=320)
Walmart.df= data.frame(Dates=tr.date, WM_NHM_Atlantic=tr.x$Store78, WM_NHM_NorthCentral=tr.x$Store79
,WM_NHM_Northeast=tr.x$Store80,WM_NHM_SouthCentral=tr.x$Store81
,WM_NHM_Southeast=tr.x$Store82,WM_NHM_West=tr.x$Store83)
ggplot(data = Walmart.df, aes(x=Dates, y=value, color=variable)) +
ylab('Weekly Sales of Walmart Neighborhood Markets') +
geom_line(aes(y=WM_NHM_Atlantic , col='WM_NHM_Atlantic'), size=1, alpha=.5) +
geom_line(aes(y=WM_NHM_NorthCentral, col='WM_NHM_NorthCentral'), size=1, alpha=.5) +
geom_line(aes(y=WM_NHM_Northeast , col='WM_NHM_Northeast'), size=1, alpha=.5) +
geom_line(aes(y=WM_NHM_SouthCentral, col='WM_NHM_SouthCentral'), size=1, alpha=.5) +
geom_line(aes(y=WM_NHM_Southeast , col='WM_NHM_Southeast'), size=1, alpha=.5) +
geom_line(aes(y=WM_NHM_West, col='WM_NHM_West'), size=1, alpha=.5) +
scale_x_date(labels = date_format("%Y-%m"), breaks = '2 months') +
theme(legend.position=c(.1,.85))
dev.off()
png(file='KrogerSales.png', width=700, height=320)
Kroger.df= data.frame(Dates=tr.date, Kroger_Atlanta=tr.x$Store33, Kroger_Central=tr.x$Store34
,Kroger_Cincinnati=tr.x$Store35,Kroger_Columbus=tr.x$Store36
,Kroger_Dallas=tr.x$Store38,WM_NHM_West=tr.x$Store83)
ggplot(data = Walmart.df, aes(x=Dates, y=value, color=variable)) +
ylab('Weekly Sales of Walmart Neighborhood Markets') +
geom_line(aes(y=WM_NHM_ATLN , col='WM_NHM_ATLN'), size=1, alpha=.5) +
geom_line(aes(y=WM_NHM_NorthCentral, col='WM_NHM_NorthCentral'), size=1, alpha=.5) +
geom_line(aes(y=WM_NHM_Northeast , col='WM_NHM_Northeast'), size=1, alpha=.5) +
geom_line(aes(y=WM_NHM_SouthCentral, col='WM_NHM_SouthCentral'), size=1, alpha=.5) +
geom_line(aes(y=WM_NHM_Southeast , col='WM_NHM_Southeast'), size=1, alpha=.5) +
geom_line(aes(y=WM_NHM_West, col='WM_NHM_West'), size=1, alpha=.5) +
scale_x_date(labels = date_format("%Y-%m"), breaks = '2 months') +
theme(legend.position=c(.1,.85))
dev.off()
### We can use ets() to detect seasonality ###
ets.seasonality.vector<-character()
for (i in 1:ncol(tr.sales))
{ets.model1<-ets(tr.sales[,i])
ets.method<-ets.model1$method
ets.seasonality.vector[i]<-ets.method
}
## Give the name of seasonality.vector
names(ets.seasonality.vector)<-names(tr.sales)
ets.seasonality.vector
## which(ets.seasonality.vector=="ETS(M,Ad,N)") is store 56.
## Store 56 is ROUNDY'S TOTAL CENSUS TRADING AREA
ets.report<-table(ets.seasonality.vector)
ets.report
sink('ETSDetectSeasonality.txt')
cat("\n")
cat("=============================\n")
cat("\n")
ets.report
cat("=============================\n")
cat("\n")
cat("\n")
sink()
|
a697728d2c9dd5c389fe9bf66458bb69d5dbc7c4 | 7ed11e971db591445c3ac9c6a14d87057cd34bf5 | /4 - B - Simulacion AR.R | 5aa0072eebf2f466d87b86a549308e3d60583b9b | [] | no_license | jeguns/EP6035 | 80ddf7b2f68f6d5071647d243b1c2345fdd5b31e | b6faf81a317da39b89a7439e5b18c1f7ead4f8fa | refs/heads/master | 2023-06-14T04:45:37.423504 | 2021-07-08T01:55:32 | 2021-07-08T01:55:32 | 355,602,552 | 2 | 3 | null | null | null | null | UTF-8 | R | false | false | 3,171 | r | 4 - B - Simulacion AR.R |
library(TSA)
library(ggfortify)
# AR(1) con phi = 0.85 ----------------------------------------------------
# ARIMA(1,0,0) con phi = 0.85
phi = 0.85
sigma = 2
set.seed(78)
arima.sim(model=list(ar=c(phi)), n=150, sd = sigma) -> Y.AR1A # AR(1) con phi = 0.85
#arima.sim(list(order = c(1,0,0), ar= phi), n = 150) -> Y.AR1A # AR(1) con phi = 0.85
Y.AR1A %>%
autoplot(type="l") +
geom_hline(yintercept = 0)
Y.AR1A %>% aTSA::stationary.test(method = "kpss",lag.short=T)
Y.AR1A %>% aTSA::stationary.test(method = "kpss",lag.short=F)
Y.AR1A %>% aTSA::stationary.test(method = "adf")
Y.AR1A %>% aTSA::stationary.test(method = "pp",lag.short=F)
Y.AR1A %>% aTSA::stationary.test(method = "pp",lag.short=T)
Y.AR1A %>% BoxCox.lambda()
Y.AR1A %>% archTest()
Y.AR1A %>% McLeod.Li.test(y=.)
Y.AR1A %>%
TSA::acf(type="correlation", lag = 50, plot = TRUE, main = "Función de autocorrelación")
Y.AR1A %>%
TSA::acf(type="partial", lag = 50, plot = TRUE, main = "Función de autocorrelación parcial")
Y.AR1A %>%
TSA::acf(type="partial", lag = 50, plot = FALSE, main = "Función de autocorrelación parcial")
lm(Y.AR1A~zlag(Y.AR1A))
# AR(1) con phi = 0.55 ----------------------------------------------------
phi = 0.55
sigma = sqrt(6)
set.seed(6545)
arima.sim(model=list(ar=c(phi)), n=150, sd = sigma) -> Y.AR1B # AR(1) con phi = 0.48
Y.AR1B %>%
autoplot(type="l") +
geom_hline(yintercept = 0)
Y.AR1B %>%
TSA::acf(type="correlation", lag = 50, plot = TRUE, main = "Función de autocorrelación")
Y.AR1B %>%
TSA::acf(type="partial", lag = 50, plot = TRUE, main = "Función de autocorrelación parcial")
# AR(1) con phi = -0.1 ----------------------------------------------------
phi = -0.1
sigma = sqrt(2)
set.seed(7488)
arima.sim(model=list(ar=c(phi)), n=150, sd = sigma) -> Y.AR1C # AR(1) con phi = 0.48
Y.AR1C %>%
autoplot(type="l") +
geom_hline(yintercept = 0)
Y.AR1C %>%
TSA::acf(type="correlation", lag = 50, plot = TRUE, main = "Función de autocorrelación")
Y.AR1C %>%
TSA::acf(type="partial", lag = 50, plot = TRUE, main = "Función de autocorrelación parcial")
# AR(1) con phi = -0.89 ---------------------------------------------------
phi = -0.89
sigma = sqrt(8)
set.seed(7488)
arima.sim(model=list(ar=c(phi)), n=150, sd = sigma) -> Y.AR1C # AR(1) con phi = 0.48
Y.AR1C %>%
autoplot(type="l") +
geom_hline(yintercept = 0)
Y.AR1C %>%
TSA::acf(type="correlation", lag = 50, plot = TRUE, main = "Función de autocorrelación")
Y.AR1C %>%
TSA::acf(type="partial", lag = 50, plot = TRUE, main = "Función de autocorrelación parcial")
# AR(2) con phi1 = 0.75 y phi2 = 0.8 --------------------------------------
phi = c(0.65,0.25)
sigma = sqrt(2)
set.seed(15345)
arima.sim(model=list(ar=phi), n=150, sd = sigma) -> Y.AR2A
Y.AR2A %>%
autoplot(type="l") +
geom_hline(yintercept = 0)
Y.AR2A %>%
TSA::acf(type="correlation", lag = 50, plot = TRUE, main = "Función de autocorrelación")
Y.AR2A %>%
TSA::acf(type="partial", lag = 50, plot = TRUE, main = "Función de autocorrelación parcial")
ts.plot(ts.sim)
ts.sim %>% Arima(order=c(1,1,0)) %>% forecast(h=3)
|
dfbbe21af27b93fa9ebf1f16fcbadcd747c5a603 | f60d5f0968dd503b0dca29282013c50bb3ac8acf | /UI.R | 700446db86efa6559f2662f2e9550d00c9705cd9 | [] | no_license | GonzaloMoreno/Develop-Data-Product-Project | 65b431d4b143d2c9815cc20f9b1d7c391013e991 | fd92706a200f96e5cd3f7da3eeab0bbab40203ea | refs/heads/master | 2021-01-19T15:33:40.320731 | 2015-05-20T02:51:57 | 2015-05-20T02:51:57 | 35,921,523 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,400 | r | UI.R | library(shiny)
shinyUI(pageWithSidebar( ##shiniyUI=User Interface. pageWithSidebar= crear pagina con barra lateral
headerPanel("Ejemplo2 entrada numérica Gonzalo Moreno"),
sidebarPanel(
h3("letra tamaño h3"),
numericInput("Entrada1", "Entrada númerica etiqueta 1", 0,min=0, max=10, step=1),
checkboxGroupInput("Entrada2", "caja de chequeo", c("BOGOTA"="BOGOTA",
"VILLAVICENCIO"= "VILLAVICENCIO",
"VALLEDUPAR"= "VALLEDUPAR",
"BUCARAMANGA"="BUCARAMANGA",
"YOPAL"="YOPAL",
"IBAGUE"="IBAGUE",
"DORADA"="DORADA",
"FLORENCIA"="FLORENCIA")),
dateInput("fechaingreso", "fecha:")
),
mainPanel(
h2("Titulo principal, letra tamaño h2"),
h2("Usted ingreso la variable numérica, letra tamaño h4"),
verbatimTextOutput("salida1"),
##verbatimTextOutput("salida2"),
plotOutput("salida2"),
verbatimTextOutput("salida3")
)
)
) |
adc62b51bfd5cd24e4d66d019c81bc59328bd7c9 | 0d1685a2218c0c37bfc700fcb8008dda69625ede | /man/EpivizChart-class.Rd | 3324992ae5369cf5ce06dd3e09b1ba1029f68bd9 | [] | no_license | epiviz/epivizrChart | 75f41609bd6d82517e374126102a8c32c0c7a060 | 445ac18b7da77581616e0b94785336c53c40c046 | refs/heads/master | 2021-11-26T01:26:00.082587 | 2021-07-30T07:35:15 | 2021-07-30T07:35:15 | 89,830,859 | 3 | 1 | null | 2021-01-22T13:06:22 | 2017-04-30T05:15:47 | HTML | UTF-8 | R | false | true | 1,757 | rd | EpivizChart-class.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EpivizChart-class.R
\docType{class}
\name{EpivizChart-class}
\alias{EpivizChart-class}
\alias{EpivizChart}
\title{Data container for an Epiviz chart component.}
\description{
Data container for an Epiviz chart component.
}
\section{Fields}{
\describe{
\item{\code{data}}{(list) Values of an epiviz chart's data attribute.}
\item{\code{colors}}{(character) Epiviz chart's colors attribute.}
\item{\code{settings}}{(list) Epiviz chart's settings attribute.}
\item{\code{parent}}{An object of class \code{\link{EpivizEnvironment}} where chart is appended.}
}}
\section{Methods}{
\describe{
\item{\code{get_attributes()}}{Get attributes for rendering chart}
\item{\code{get_available_settings()}}{Get available settings}
\item{\code{get_colors()}}{Get chart colors}
\item{\code{get_data()}}{Get chart data}
\item{\code{get_parent()}}{Get parent}
\item{\code{get_settings()}}{Get chart settings}
\item{\code{navigate(chr, start, end)}}{Navigate chart to a genomic location
\describe{
\item{chr}{Chromosome}
\item{start}{Start location}
\item{end}{End location}
}}
\item{\code{render_component(shiny = FALSE)}}{Render to html}
\item{\code{revisualize(chart_type)}}{Revisualize chart as the given chart type
\describe{
\item{chart_type}{The type of chart to be visualized
(BlocksTrack, HeatmapPlot, LinePlot, LineTrack, ScatterPlot,
StackedLinePlot, StackedLineTrack)}
}}
\item{\code{set_colors(colors)}}{Set chart colors}
\item{\code{set_data(data)}}{Set chart data}
\item{\code{set_settings(settings)}}{Modify current settings
\describe{
\item{settings}{List of new settings.
Call get_available_settings for settings available to modify.
}
}}
}}
|
550a381467cb1b27ee12448de7b0fcc883834cb7 | 4f038917144f89bcdb949346c92ae90782ab6f72 | /R/google_sheets.R | ef1d7bf2714185fe394c0b561f56fda863330364 | [
"MIT"
] | permissive | isabella232/rauth0 | a4df725b2eb1f0f351140e878e419ff44d9e862c | 07972e009ed95e1d7980de32ba8ef54b6ff97c5f | refs/heads/master | 2023-06-09T14:00:41.898109 | 2021-07-01T08:58:08 | 2021-07-01T08:58:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 934 | r | google_sheets.R |
#' DWH Google sheets connection
#'
#' This function loads a token from Google to connect to Google drive,
#' to read/write spreadsheets with package `googlesheets`
#'
#' @param google_token The token used to connect to google's api, a JSON file has to be indicated in this parameter
#'
#' @examples
#'
#' library(googlesheets)
#' dwh_gsheets()
#' x = gs_key('<YOUR-GSHEET-KEY>')
#'
#' sheets = gs_ws_ls(x)
#' sheet = gs_read(x, ws='mytab')
#'
#' @importFrom jsonlite fromJSON
#' @importFrom googlesheets gs_auth
#' @importFrom httr oauth_service_token oauth_endpoints
#' @export
dwh_gsheets <- function(google_token="~/.dwh_google_token.json"){
token <- oauth_service_token(
oauth_endpoints("google"),
jsonlite::fromJSON(google_token),
paste(c("https://spreadsheets.google.com/feeds",
"https://www.googleapis.com/auth/drive"),
collapse = " ")
)
gs_auth(token=token, cache=F)
}
|
88b7569caa2f95cd8a5ff766a1044a081e9f4357 | 3266fda1d5144f83f2d7a2243cbc59347e70ce6c | /man/floor_date.Rd | bfbe84b9adcf5809182efd7060c73a9e3fbeff3b | [] | no_license | gvanzin/lubridate | 04b8d4ce11914c4385c100af3cbe854feab6a62c | de6e93866695704ec8bd10a6f61353befeee0818 | refs/heads/master | 2020-12-27T01:46:08.818299 | 2015-04-24T10:19:50 | 2015-04-24T10:19:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,460 | rd | floor_date.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/round.r
\name{floor_date}
\alias{floor_date}
\title{Round date-times down.}
\usage{
floor_date(x, unit = c("second", "minute", "hour", "day", "week", "month",
"year", "quarter"))
}
\arguments{
\item{x}{a vector of date-time objects}
\item{unit}{a character string specifying the time unit to be rounded to. Should be one of
"second", "minute", "hour", "day", "week", "month", "quarter", or "year."}
}
\value{
x with the appropriate units floored
}
\description{
\code{floor_date} takes a date-time object and rounds it down to the nearest integer
value of the specified time unit. Users can specify whether to round down to
the nearest second, minute, hour, day, week, month, quarter, or year.
}
\details{
By convention the boundary for a month is the first second of the month. Thus
\code{floor_date(ymd("2000-03-01"), "month")} gives "2000-03-01 UTC".
}
\examples{
x <- as.POSIXct("2009-08-03 12:01:59.23")
floor_date(x, "second")
# "2009-08-03 12:01:59 CDT"
floor_date(x, "minute")
# "2009-08-03 12:01:00 CDT"
floor_date(x, "hour")
# "2009-08-03 12:00:00 CDT"
floor_date(x, "day")
# "2009-08-03 CDT"
floor_date(x, "week")
# "2009-08-02 CDT"
floor_date(x, "month")
# "2009-08-01 CDT"
floor_date(x, "quarter")
# "2009-07-01 CDT"
floor_date(x, "year")
# "2009-01-01 CST"
}
\seealso{
\code{\link{ceiling_date}}, \code{\link{round_date}}
}
\keyword{chron}
\keyword{manip}
|
9115f6592fa24978f9d7701ea08612c77e96708d | 178369bc640e256e8eaeb74fc4932db340395bbb | /Divvy_generate_coefficients_for_app_predictions.R | 3c59306666d277d6a21963d4d89235eee5de5ace | [] | no_license | m5loco/Divvy | 6ce329bf33e7a5748807f941234746c4216053bc | 53bb1397c92b474d43e719e19ab54abb4b3a36f7 | refs/heads/master | 2022-08-26T21:20:51.640333 | 2022-08-04T00:22:21 | 2022-08-04T00:22:21 | 241,747,324 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,585 | r | Divvy_generate_coefficients_for_app_predictions.R | source("c:/users/Mark/Documents/School/MSDS 498/fcst/base_fcst.R")
library("plyr")
library("dplyr")
build_coef_df <- function(station_id,coefs,df)
{
if (is.null(df))
{
df<-data.frame(as.list(coefs))
df$station_id<-station_id
}
else
{
new_df<-data.frame(as.list(coefs))
new_df$station_id<-station_id
df<-rbind.fill(df,new_df)
}
df
}
stations<-top_stations_df$from_station_id
mdl_coeff<-NULL
for (station_id in stations)
{
print(station_id)
station_train_df<-top_df[top_df$station_id == station_id,]
# For testing....
# station_train_df<-subset(station_train_df[station_train_df$start_year<2019,],select=model_vars)
station_train_df<-subset(station_train_df,select=model_vars)
###Full LM Model
full.lm <- lm(count_i ~ . ,data=station_train_df)
###Forward Selection Model
lower.lm <- lm(count_i ~ 1,data=station_train_df)
forward.lm <- stepAIC(object=lower.lm,scope=list(upper=full.lm,lower=lower.lm),direction=c('forward'),trace=0);
###GLM Models - Use coefficients selected by forward selection
glm_vars<-rownames(summary(forward.lm)$coefficients)
glm_vars<-glm_vars[!glm_vars=='(Intercept)']
###Poisson
poisson.glm <- glm(count_i ~ ., family="poisson"(link="log"), data=subset(station_train_df,select=c("count_i",glm_vars)))
mdl_coeff<-build_coef_df(station_id,poisson.glm$coefficients,mdl_coeff)
write.csv(mdl_coeff,"c:/users/Mark/Documents/School/MSDS 498/fcst/2020_top_station_coefficients.csv",row.names=FALSE)
}
|
c6c92701bddac24e6bd66c7e13fc122ac7795f7a | 8491183d56c8fc70ac58f8af10626875845b9dee | /Week 7/Week7-HigherOrderAndQualitative.R | 25e4a99e21ca3e47efcc10ae7ee5b0159ef01efd | [] | no_license | yifeitung/HUDM_5126 | 7f0a48088f4df801fb5c998aef3863bda3b207b9 | 6c51d67b613f0ea98ae0e15f2b87247a36400574 | refs/heads/master | 2023-02-07T18:09:55.512673 | 2020-12-28T19:30:34 | 2020-12-28T19:30:34 | 307,020,222 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,885 | r | Week7-HigherOrderAndQualitative.R | # Get data from Week7-bluegills.txt
data <- read.table(file.choose(), header = T)
# Look at data and attach
head(data)
attach(data)
# Scatterplot
plot(age, length)
# Fit quadratic model:
age2 = age^2
reg2 = lm(length ~ age + age2)
summary(reg2)
# Plot the fitted parabola
agevalues = seq(1,6, len = 100)
predictedlengths <- predict(reg2, list(age=agevalues, age2=agevalues^2))
lines(agevalues, predictedlengths, col = "red")
# Compare to linear model
reg1 = lm(length ~ age)
summary(reg1)
abline(reg1)
# Residual plot
plot(age, resid(reg1))
abline(h=0)
# Which one is better?
anova(reg1, reg2)
# Conclusion: quadratic term is significant!
# Or look at p-value for age2 in reg2 which shows quadratic term is significant
# Report R^2 when using linear; R^2 when using quadratic and partial R^2 when x^2 is added:
# R-squared (quadratic) = 0.8011
# R-squared(linear) = 0.7349
# R-squared(quadratic|linear) = SSRegr(quadratic|linear)/SSE(linear) = 2972.1/11892.8 = 0.2499
# Alternative methods for fitting:
# Evaluating expressions directly inside the formula:
reg2b <- lm(length ~ age + I(age^2))
summary(reg2b)
# Fitting polynomial of a specific degree:
reg2c <- lm(length ~ poly(age, degree = 2, raw = T))
summary(reg2c)
# Note the use of raw = T, otherwise R fits orthogonal polynomials
# Do we need a cubic term?
reg3 <- lm(length ~ poly(age,3, raw = T))
summary(reg3)
# no
# Exercise: p. 300
data2 <- read.table("http://users.stat.ufl.edu/~rrandles/sta4210/Rclassnotes/data/textdatasets/KutnerData/Chapter%20%208%20Data%20Sets/CH08TA01.txt", header = FALSE)
data2
names(data2)[1]<-paste("Y")
names(data2)[2]<-paste("X1")
names(data2)[3]<-paste("X2")
attach(data2)
x1 = (X1-mean(X1))/0.4
x2 = (X2-mean(X2))/10
x12 = x1^2
x22 = x2^2
# Check correlations:
cor(X1, X1^2)
cor(X2, X2^2)
cor(x1, x12)
cor(x2, x22)
# Interaction term
x1x2 = x1*x2
model1 = lm(Y ~ x1+x2+x12+x22+x1x2)
summary(model1)
model2 = lm(Y ~ x1+x2)
anova(model2,model1)
# Since partial F test p-value = 0.5527
# we can't reject H0
# That is, no need of quadratic terms
# Exercise on p. 316
data3 <- read.table("http://users.stat.ufl.edu/~rrandles/sta4210/Rclassnotes/data/textdatasets/KutnerData/Chapter%20%208%20Data%20Sets/CH08TA02.txt", header = FALSE)
data3
names(data3)[1]<-paste("Y")
# Y = months elapsed for adoption of innovation
names(data3)[2]<-paste("X1")
# X1 = size of firm (millions)
names(data3)[3]<-paste("X2")
# X2 = 1 for stock firm, X2 = 0 for mutual fund
attach(data3)
# Regular linear multiple regression model:
regdummy = lm(Y ~ X1 + X2)
summary(regdummy)
# Fitted function:
# Yhat = 33.87 - 0.1*X1 + 8.06*X2
# Since p-value for X2 (dummy variable) is 3.74e-05 < 0.05
# there is a significant difference between stock and mutual fund firms
# The gap between the two types of companies is 8.055 months (stock higher than mutual funds)
# Plot:
# install.packages("ggplot2")
library(ggplot2)
plot <- ggplot(data = data3, aes(x = X1, y = Y, colour = factor(X2)))
plot + stat_smooth(method=lm, se = F) + geom_point()
# Write down the two separate regression equations:
# Mutual fund: y = 33.87 -0.1*x1
# Stock: y = 33.87 - 0.1*x1 + 8.06 = 41.93 - 0.1*x1
# p. 327
# Let's try interaction between dummy var and numerical var.
# (that is, nonparallel lines)
regdummy.Interaction = lm(Y ~ X1*X2)
summary(regdummy.Interaction)
# Call:
# lm(formula = Y ~ X1 * X2)
#
# Residuals:
# Min 1Q Median 3Q Max
# -5.7144 -1.7064 -0.4557 1.9311 6.3259
#
# Coefficients:
# Estimate Std. Error t value Pr(>|t|)
# (Intercept) 33.8383695 2.4406498 13.864 2.47e-10 ***
# X1 -0.1015306 0.0130525 -7.779 7.97e-07 ***
# X2 8.1312501 3.6540517 2.225 0.0408 *
# X1:X2 -0.0004171 0.0183312 -0.023 0.9821
# ---
# Signif. codes: 0 *** 0.001 ** 0.01 * 0.05 . 0.1 1
#
# Residual standard error: 3.32 on 16 degrees of freedom
# Multiple R-squared: 0.8951, Adjusted R-squared: 0.8754
# F-statistic: 45.49 on 3 and 16 DF, p-value: 4.675e-08
# Conclusion: Since interaction p-value = 0.9821 > 0.05
# the interaction effect is not significant
# That is, model with parallel lines is enough
# Challenge question: Test Ho: beta2 = 0 within the last model and also within the model without interaction.
# Are the conclusions the same? Does the conclusion have the same implication?
# Conclusions are the same: x2 is a significant predictor
# However, implications are different.
# Rule: when using interaction term always include the individual terms as well
# Exercise:
# Use the data from here: http://users.stat.ufl.edu/~rrandles/sta4210/Rclassnotes/data/textdatasets/KutnerData/Appendix%20C%20Data%20Sets/APPENC03.txt
# V1 = id
# V2 = Market share (%)
# V3 = Price ($)
# V4 = index of the amount of advertising
# V5 = 1 if discount in price and 0 o/w
# V6 = 1 if package promotion and 0 o/w
# V7 = month
# V8 = year
data4 <- read.table("http://users.stat.ufl.edu/~rrandles/sta4210/Rclassnotes/data/textdatasets/KutnerData/Appendix%20C%20Data%20Sets/APPENC03.txt", header = FALSE)
# We want a regression model to predict Market share by Price (X1), Discount (X2), and Promotion (X3)
names(data4)[2]<-paste("Y")
names(data4)[3]<-paste("X1")
names(data4)[5]<-paste("X2")
names(data4)[6]<-paste("X3")
attach(data4)
data4
# Task 1
# Predict the market share based on price and one or both dummy variables.
# Fit the following models:
# 1) Price only
reg1 <- lm(Y~X1)
# 2) Price and Discount
reg2 <- lm(Y~X1+X2)
# 3) Price, Discount and Promotion
reg3 <- lm(Y~X1+X2+X3)
# 4) Price, Discount, Promotion and all interactions between them
reg4 <- lm(Y~X1*X2+X1*X3)
# Choose which model is the best fit by adjusted R-square criterion?
# reg1: 0.007168
# reg2: 0.6376
# reg3: 0.679
# reg4: 0.664
# Task 2:
# In the best model obtain the residual plot and check the assumptions
# The best model is the the model with Price, Discount and Promotion
# Obtained fitted values
y_hat <- predict(reg3)
e <- residuals(reg3)
mydata <-cbind(data4, y_hat, e)
mydata
# Residuals against Y_hat
library(latex2exp)
library(ggplot2)
g1 <- ggplot(mydata, aes(x = y_hat, y = e))+geom_point(color = "black") + xlab("Fitted")+
ylab("Residual")+ggtitle(TeX("(a) Residual Plot against $\\widehat{Y}$"))+
geom_hline(yintercept = 0, linetype = "dashed", color="red")
g1
# Task 3:
# Recall:
# X2 = 1 if discount in price and 0 o/w
# X3 = 1 if package promotion and 0 o/w
# Write down the 4 separate regression equations for each combination
# of discount and promotion
# That is, we need the regression equation for each of these:
# No discount, no promotion: y=3.18527-0.35269*X1
# No discount with promotion:y=3.18527-0.35269*X1+0.11803=3.3033-0.35269*X1
# Discount but no promotion: y=3.18527-0.35269*X1+0.39914=3.58441-0.35269*X1
# Discount and promotion:y=3.18527-0.35269*X1+0.39914+0.11803=3.70244-0.35269*X1
|
78c580bb4df1a7b905145463956e938120ec8188 | 9e92deab552da35f83af28a557bed128ccfa2706 | /app.R | e1cda3225c50ca79744688a7bb7c7cb83a324da5 | [] | no_license | adammaikai/cnv-heatmap | e73763c88eec49bbfb73798d0ebe5712ef3b4ec4 | d4daafb749a1d934fbb5af73bf16cb9253458a44 | refs/heads/master | 2022-11-11T11:43:00.352428 | 2020-06-26T19:40:47 | 2020-06-26T19:40:47 | 261,002,052 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,691 | r | app.R | # CNV Analysis App
source("src/CNVPlottingFunctions.R")
load("data/CNVPlottingPrep.RData")
# Define UI for data upload app ----
ui <- fluidPage(
title = "CNV Heatmap",
sidebarLayout(
sidebarPanel(
radioButtons(inputId = 'genome', label = 'Select Genome',
c("GRCh37/hg19", "GRCh38/hg38", "GRCm38/mm10"), selected=character(0)),
# Input: Select a .seg file ----
fileInput("segFile", "Upload .seg File",
multiple = FALSE,
accept = c(".seg", ".txt")),
hr(),
# Input: Select a metadata file ----
fileInput("metaFile", "Upload a Metadata File",
multiple = FALSE,
accept = c(".txt", ".csv", "tsv")),
radioButtons(inputId = 'sep', label = 'Metadata Separator',
c(Tab='\t', Comma=',', Semicolon=';'),
'\t'),
radioButtons("condMeta", "Annotation column", choices=character(0), selected=character(0)),
hr(),
actionButton(inputId = "plot_now", label = "Plot Heatmap"),
downloadButton('downloadPlot',"Save Plot"),
hr()
),
mainPanel(
tabsetPanel(
tabPanel("CNV Heatmap", id="plottab", plotOutput("Plot"))
),
HTML(" ", paste0(
'<span data-display-if="',
'$('html').attr('class')=='shiny-busy'',
'">',
'<i class="fa fa-spinner fa-pulse fa-fw" style="color:orange"></i>',
'</span>'
)),
hr(),
tabsetPanel(
id = 'dataset',
tabPanel("Segments File", id="segtab", DT::dataTableOutput("seg")),
tabPanel("Metadata", id="metatab", DT::dataTableOutput("meta"))
)
)
)
)
# Define server logic to read selected file ----
server <- function(input, output, session) {
uploadseg <- eventReactive(input$segFile$datapath, {
if(is.null(input$genome)){
stop(safeError("Please select a reference genome!"))
}
# input$segFile will be NULL initially. After the user selects and uploads a file
req(input$segFile)
if(input$genome == "GRCh37/hg19"){
coords <- chromCoordsHg19
} else if(input$genome == "GRCh38/hg38"){
coords <- chromCoordsHg38
} else {
coords <- chromCoordsMm10
}
tryCatch({
seg <- readSeg(file.path = input$segFile$datapath, chromCoords = coords)},
error = function(e) {stop(safeError(e))})
list(seg=seg, coords=coords)
})
uploadmeta <- eventReactive(input$metaFile$datapath, {
# input$segFile will be NULL initially. After the user selects and uploads a file
req(input$metaFile)
tryCatch({
meta <- readMeta(input$metaFile$datapath, sep=input$sep)},
error = function(e) {stop(safeError(e))})
meta
})
output$seg <- DT::renderDataTable({
seg <- uploadseg()$seg
DT::datatable(seg)
})
output$meta <- DT::renderDataTable({
meta <- uploadmeta()
DT::datatable(meta)
})
myPlot <- eventReactive(input$plot_now,{
segUp <- uploadseg()
seg <- segUp$seg
coords <- segUp$coords
if(!is.null(input$metaFile$datapath)){
meta <- uploadmeta()
column <- input$condMeta
} else {
meta <- NULL
column <- NULL}
print(coords)
coordMat <- do.call(rbind, lapply(coords$chr,
function(i) {chr.i <- subset(coords, chr==i);
return(data.frame(row=seq(chr.i$start, chr.i$cumsum, by=100000), chr=i))}))
cnvMat <- populateCNVMatrix(chromCoords = coords,
coordMat = coordMat,
segDf = seg)
annos <- makeHeatmapAnnotations(cnvMat = cnvMat,
chromCoords = coords,
coordMat = coordMat,
metadata = meta,
column_anno = column)
plotCNVHeatmap(cnvMat = cnvMat, annos = annos)
})
observeEvent(input$metaFile, {
meta <- uploadmeta()
columns <- names(meta)[2:ncol(meta)]
updateRadioButtons(session, "condMeta",
label = "Metadata Annotation",
choices = c("None", columns),
selected = columns[1])
})
output$Plot <-renderPlot({
myPlot()
})
output$downloadPlot <- downloadHandler(
filename = function() {
paste0("CNV_heatmap_", input$condMeta, "_", gsub("-", "", Sys.Date()), sep=".pdf")
},
content = function (file) {
pdf(file)
print(myPlot())
dev.off()
}
)
}
# Create Shiny app ----
shinyApp(ui, server) |
6594af2aa56cd2c276682fae00e70fdd4122aaa3 | 2e74c7339c63385172629eaa84680a85a4731ee9 | /envir_lead/04_crosswalk.R | f079b133594abe0633fac5cab9facee4688469e8 | [] | no_license | zhusui/ihme-modeling | 04545182d0359adacd22984cb11c584c86e889c2 | dfd2fe2a23bd4a0799b49881cb9785f5c0512db3 | refs/heads/master | 2021-01-20T12:30:52.254363 | 2016-10-11T00:33:36 | 2016-10-11T00:33:36 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,060 | r | 04_crosswalk.R | #----HEADER----------------------------------------------------------------------------------------------------------------------
# Purpose: Run a linear model to inform the prior of ST-GPR for blood lead
#********************************************************************************************************************************
#----CONFIG----------------------------------------------------------------------------------------------------------------------
# set control flow arguments
run.interactively <- FALSE
# load packages
library(plyr)
library(foreign)
library(splines)
library(boot)
library(reshape2)
library(data.table)
library(stats)
library(lme4)
library(ggplot2)
#********************************************************************************************************************************
#----PREP------------------------------------------------------------------------------------------------------------------------
# Read in your model data if you are working interactively
# If running from the central st-gpr, the data will already be read in from the database
if (Sys.info()["sysname"] == "Windows") {
}
#********************************************************************************************************************************
#----MODEL------------------------------------------------------------------------------------------------------------------------
## Linear model
# without RFX
mod <- lm(data ~ as.factor(age_group_id) + lt_urban:as.factor(super_region_id) + geometric_mean + outphase_smooth + ln_LDI_pc,
data=df,
na.action=na.omit)
## Crosswalk your non-gold standard datapoints
# First store the relevant coefficients
coefficients <- as.data.table(coef(summary(mod)), keep.rownames = T)
#********************************************************************************************************************************
#----CROSSWALKING DATA-------------------------------------------------------------------------------------------------------------
# Save your raw data/variance for reference
df[, "raw_data" := copy(data)]
df[, "raw_variance" := copy(variance)]
# We will first crosswalk our data based on the results of the regression
# Then, adjust the variance of datapoints, given that our adjusted data is less certain subject to the variance of the regression
# To do this you will use the formula: Var(Ax + By) = a^2 * Var(x) + b^2 * Var(y) + 2ab * covariance(x,y)
# (https://en.wikipedia.org/wiki/Variance)
# In our case, the variables are as follows:
# A: geometric_mean
# x: beta for geometric_mean
# B: lt_urban (logit of data urbanicity) - lt_prop_urban (logit of country urbanicity)
# y: beta for urbanicity in a given super_region
# Adjust datapoints to geometric mean
# note that if the variable = 1, that means data is arithmetic (non standard)
# therefore, we want to crosswalk these points down to the geometric mean values
gm_coeff <- as.numeric(coefficients[rn == "geometric_mean", 'Estimate', with=F])
df[, "data" := data - (geometric_mean * gm_coeff)]
# Now adjust the variance for points crosswalked to geometric mean
gm_se <- as.numeric(coefficients[rn == "geometric_mean", 'Std. Error', with=F])
df[, variance := variance + (geometric_mean^2 * gm_se^2)]
# Adjust data urbanicity to the national average
# here, the variable lt_urban represents the urbanicity of the datapoint (in logit)
# whereas the variable lt_prop_urban represents the national average urbanicity (inlogit)
# we want to crosswalk these points as if they are nationally representative
# in order to do this we multiply the beta on urbanicity in that super region by the difference in percent urbanicity between study and national
# ex, if study is 0 (rural) and country is 50% urban, we are multiplying the coefficent by -0.5
# Finally, we will use the above formula to adjust the variance using regression uncertainty
for (this.super.region in unique(df$super_region_id)) {
cat("Adjusting points in super region #", this.super.region, "\n"); flush.console()
# First, we will adjust the data based on urbanicity
urban_coeff <- as.numeric(coefficients[rn == paste0("lt_urban:as.factor(super_region_id)",this.super.region), "Estimate", with=F])
df[super_region_id == this.super.region,
"data" := data - ((lt_urban-lt_prop_urban) * urban_coeff)]
# Now adjust the variance for points crosswalked to urbanicity in a given superregion
# here we will also take into account the covariance between urbanicity and geometric mean
urban_se <- as.numeric(coefficients[rn == paste0("lt_urban:as.factor(super_region_id)",this.super.region), "Std. Error", with=F])
covariance <- vcov(mod)[paste0("lt_urban:as.factor(super_region_id)",this.super.region),"geometric_mean"]
df[super_region_id == this.super.region,
variance := variance + ((lt_urban-lt_prop_urban)^2 * urban_se^2) + (2*(geometric_mean*(lt_urban-lt_prop_urban)) * covariance)]
}
# First reset all study level covariates to predict as the gold standard
# Also save the originals for comparison
df[, geometric_mean_og := geometric_mean]
df[, geometric_mean := 0]
df[, lt_urban_og := lt_urban]
df[, lt_urban := lt_prop_urban] # decided to use logit transform on this cov
# Save df with all crosswalk result variables for examination
write.csv(df, paste0(run_root, "/crosswalk_results.csv"), row.names=FALSE, na="")
# Clean up dataset for input to ST-GPR
df <- df[, c("ihme_loc_id",
"location_id",
"year_id",
"age_group_id",
"sex_id",
"data",
"standard_deviation",
"variance",
"sample_size",
"ln_LDI_pc",
"lt_urban",
"outphase_smooth",
"super_region_id",
"region_id",
"me_name",
"nid",
"age_start",
"train"),
with=F]
|
450978091ee593fe053e9f3d2167cfdab9391271 | cf62c1acac62cc4bf102447f0fc4285977782612 | /man/bfgs_gcv.ubre.Rd | 9c781d6329ba27b6fe750dd8852d5bbc3b79ddd5 | [] | no_license | cran/scam | d6d00a723a0462416a79178038d61cd30fbe793e | cc080df129f3611eadf93cef7259324def3f83dd | refs/heads/master | 2023-04-30T11:29:17.031975 | 2023-04-14T09:00:08 | 2023-04-14T09:00:08 | 17,699,475 | 4 | 6 | null | null | null | null | UTF-8 | R | false | false | 4,358 | rd | bfgs_gcv.ubre.Rd | \name{bfgs_gcv.ubre}
\alias{bfgs_gcv.ubre}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Multiple Smoothing Parameter Estimation by GCV/UBRE}
\description{
Function to efficiently estimate smoothing parameters of SCAM by GCV/UBRE score optimization.
The procedure is outer to the model fitting by the Newton-Raphson method.
The function uses the BFGS method where the Hessian matrix is updated iteratively at each step.
Backtracking is included to satisfy the sufficient decrease condition.
The function is not normally called directly, but rather service routines for \code{\link{scam}}.
}
\usage{
bfgs_gcv.ubre(fn=gcv.ubre_grad, rho, ini.fd=TRUE, G, env,
n.pen=length(rho), typx=rep(1,n.pen), typf=1, control)
%- maybe also 'usage' for other objects documented here.
}
\arguments{
\item{fn}{GCV/UBRE Function which returs the GCV/UBRE value and its derivative wrt log smoothing parameter.}
\item{rho}{log of the initial values of the smoothing parameters.}
\item{ini.fd}{If TRUE, a finite difference to the Hessian is used to find the initial
inverse Hessian, otherwise the initial
inverse Hessian is a diagonal matrix `100*I'.}
\item{G}{A list of items needed to fit a SCAM.}
% \item{gamma}{An ad hoc parametrer of the GCV/UBRE score.}
\item{env}{Get the enviroment for the model coefficients, their derivatives and the smoothing parameter.}
\item{n.pen}{Smoothing parameter dimension.}
\item{typx}{A vector whose component is a positive scalar specifying the typical magnitude of sp.}
\item{typf}{A positive scalar estimating the magnitude of the gcv near the minimum.}
\item{control}{Control option list as returned by \code{\link{scam.control}}.}
}
% \details{
%% ~~ If necessary, more details than the description above ~~
% }
\value{A list is returned with the following items:
\item{gcv.ubre}{The optimal value of GCV/UBRE.}
\item{rho}{The best value of the log smoothing parameter.}
\item{dgcv.ubre}{The gradient of the GCV/UBRE.}
\item{iterations}{The number of iterations taken until convergence.}
\item{conv.bfgs}{Convergence information indicating why the BFGS terminated (given below).}
\item{termcode}{ An integer code indicating why the optimization process
terminated.
1: relative gradient is close to zero, current iterate
probably is a solution.
2: scaled distance between last two steps less than `steptol', current iterate
probably is a local minimizer, but it's possible that the algorithm is making very slow progress,
or `steptol' is too large.
3: last global step failed to locate a point lower than
estimate. Either estimate is an approximate
local minimum of the function or \code{steptol} is too
small.
4: iteration limit exceeded.
5: five consecutive steps of length \code{maxNstep} have been taken, it's possible that `maxstep' is too small.
}
\item{object}{A list of elements returned by the fitting procedure \code{scam.fit} for an optimal value of the smoothing parameter.}
\item{dgcv.ubre.check}{If \code{check.analytical=TRUE} this is the finite-difference approximation of the gradient calculated by
\code{\link{gcv.ubre_grad}}, otherwise \code{NULL}.}
\item{check.grad}{If \code{check.analytical=TRUE} this is the relative difference (in %) between the analytical
and finite differenced derivatives calculated by \code{\link{gcv.ubre_grad}}, otherwise \code{NULL}.}
}
\references{
Pya, N. and Wood, S.N. (2015) Shape constrained additive models. Statistics and Computing, 25(3), 543-559
Pya, N. (2010) Additive models with shape constraints. PhD thesis. University of Bath. Department of Mathematical Sciences
Wood, S.N. (2011) Fast stable restricted maximum likelihood and marginal likelihood estimation of semiparametric generalized
linear models. Journal of the Royal Statistical Society: Series B. 73(1): 1--34
}
\author{
Natalya Pya <nat.pya@gmail.com>
}
\seealso{
\code{\link{scam}}
}
\keyword{models}
\keyword{smooth}
\keyword{regression}
|
ac260b6e0d343af74346f8428212e3dbd02bd353 | 6e5efc0b6b6b37c735c1c773531c41b51675eb10 | /man/GetQEA.pathNames.Rd | 789e7f2a34609c74b4cff17f656a04ebc52a7126 | [
"GPL-2.0-or-later"
] | permissive | xia-lab/MetaboAnalystR | 09aa09c9e57d7da7d73679f5a515eb68c4158e89 | 9edbbd1e2edda3e0796b65adf440ad827abb7beb | refs/heads/master | 2023-08-10T06:08:56.194564 | 2023-08-01T15:13:15 | 2023-08-01T15:13:15 | 109,994,826 | 268 | 165 | MIT | 2023-03-02T16:33:42 | 2017-11-08T15:38:12 | R | UTF-8 | R | false | true | 380 | rd | GetQEA.pathNames.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enrich_path_stats.R
\name{GetQEA.pathNames}
\alias{GetQEA.pathNames}
\title{Export pathway names from QEA analysis}
\usage{
GetQEA.pathNames(mSetObj = NA)
}
\arguments{
\item{mSetObj}{Input the name of the created mSetObj (see InitDataObjects)}
}
\description{
Export pathway names from QEA analysis
}
|
c9fef07c04702d7942d45af98a8356b1dab8bce0 | c1f773bf23a84ea9af94812a1a6d35004c378d64 | /man/inla.sens.Rd | b01a6f32b46f33bbb72efe21bc91d41866da3719 | [] | no_license | inbo/INLA | faab93d1f3e7f70251ef05148a40c69b6ac13561 | 0fa332471d2e19548cc0f63e36873e31dbd685be | refs/heads/master | 2020-04-11T08:59:50.842976 | 2019-12-02T12:38:52 | 2019-12-02T12:38:52 | 51,505,516 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 4,262 | rd | inla.sens.Rd | \name{inla.sens}
\alias{inla.sens}
\title{
Testing code for sensitivity
}
\description{
TODO: Write a description
}
\usage{
inla.sens(inlaObj)
}
\arguments{
\item{inlaObj}{
The result from a run of \code{inla}.
}
}
%\details{
%% ~~ If necessary, more details than the description above ~~
%}
\value{
TODO: This is an EXPERIMENTAL function!
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
%\references{
%% ~put references to the literature/web site here ~
%}
\author{
Geir-Arne Fuglstad \email{geirarne.fuglstad@gmail.com}
}
%\note{
%% ~~further notes~~
%}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{inla}}
}
\examples{
## Case 1: Simple linear regression on simulated data
# Number of observations
nObs = 100
# Measurement noise
sdNoise = 0.1
# Coefficients
mu = 2
beta = 1
# Covariate
x = runif(nObs)
# Generate data
y = mu + beta*x + rnorm(nObs)*sdNoise
# Make some data unobserved
nUnObs = 20
y[(nObs-nUnObs+1):nObs] = NA
# Fit the model
mod = inla(y ~ x,
data = list(x = x, y = y))
# Calculate sensitivites
inla.sens(mod)
## Case 2: Time series
# Length of time-series
nObs = 100
# Measurement noise
sdNoise = 0.1
# Autoregressive process
rho = 0.6
sdProc = 0.1
arP = matrix(0, nrow = nObs, ncol = 1)
for(i in 2:nObs)
arP[i] = rho*arP[i-1] + rnorm(1)*sdProc
tIdx = 1:nObs
# Coefficients
mu = 2
# Generate data
y = mu + arP + rnorm(nObs)*sdNoise
# Make some data unobserved
nUnObs = 20
y[(nObs-nUnObs+1):nObs] = NA
idx = 1:nObs
# Run INLA
mod = inla(y ~ f(tIdx, model = "ar1"),
data = list(y = y, tIdx = tIdx),
control.inla = list(reordering = "metis"))
# Calculate sensitivities
inla.sens(mod)
## Case 3: Epil dataset
data(Epil)
my.center = function(x) (x - mean(x))
# make centered covariates
Epil$CTrt = my.center(Epil$Trt)
Epil$ClBase4 = my.center(log(Epil$Base/4))
Epil$CV4 = my.center(Epil$V4)
Epil$ClAge = my.center(log(Epil$Age))
Epil$CBT = my.center(Epil$Trt*Epil$ClBase4)
# Define the model
formula = y ~ ClBase4 + CTrt + CBT+ ClAge + CV4 +
f(Ind, model="iid") + f(rand,model="iid")
mod = inla(formula,family="poisson", data = Epil)
# Calculate sensitivities
inla.sens(mod)
## Case 4: Spatial data
# Number of observations
nObs = 100
# Measurement noise
sdNoise = 0.2
# Spatial process
sdProc = 1.0
rho0 = 0.2
# Coefficients
beta0 = 1
beta1 = 2
# Generate spatial data + measurement noise
loc = cbind(runif(nObs), runif(nObs))
dd = as.matrix(dist(loc))
Sig = sdProc^2*inla.matern.cov(nu = 1, kappa = sqrt(8)/rho0, dd, corr = TRUE)
L = t(chol(Sig))
u = L%*%rnorm(nObs) + rnorm(nObs)*sdNoise
# Generate Covariate
x = runif(nObs)-0.5
# Combine to observations
y = beta0 + beta1*x + u
# Number of unobserved
nUnObs = 2
y[1:nUnObs] = NA
# Mesh
mesh = inla.mesh.2d(loc, max.edge = 0.05, cutoff = 0.05)
# Make SPDE object
spde = inla.spde2.matern(mesh)
spde2 = inla.spde2.matern(mesh, constr = TRUE)
# Make A matrix
A = inla.spde.make.A(mesh, loc)
# Stack
X = cbind(1, x)
stk = inla.stack(data = list(y = y), A = list(A, 1),
effects = list(field = 1:spde$n.spde,
X = X))
# Run INLA
mod1 = inla(y ~ -1 + X + f(field, model = spde),
data = inla.stack.data(stk),
control.predictor = list(A = inla.stack.A(stk)),
control.family = list(prior = "pcprec",
param = c(3, 0.05)))
mod2 = inla(y ~ -1 + X + f(field, model = spde2),
data = inla.stack.data(stk),
control.predictor = list(A = inla.stack.A(stk)),
control.family = list(prior = "pcprec",
param = c(3, 0.05)))
# Calculate sensitivities
res1 = inla.sens(mod1)
res2 = inla.sens(mod2)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%%\keyword{ ~kwd1 }
|
67549182c589600bcb6beac2cf5a5c3a17751f70 | d6a81538d1a2726b5e76d176e6c67bb8b9a52200 | /R/data_interface.R | f2aa5729f0709721ce4c040ff101cd1283b62cb7 | [] | no_license | cran/prcbench | ace339c428112e35040f0df4beb2ff2475bc6a58 | 30a94dc12d194829feb49ca0b820a55c8858bcd0 | refs/heads/master | 2023-03-18T17:07:03.819425 | 2023-03-12T15:10:09 | 2023-03-12T15:10:09 | 49,258,146 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,617 | r | data_interface.R | #' Create a list of test datasets
#'
#' The \code{create_testset} function creates test datasets either for
#' benchmarking or curve evaluation.
#'
#' @param test_type A single string to specify the type of dataset generated by
#' this function.
#'
#' \describe{
#' \item{"bench"}{Create test datasets for benchmarking}
#' \item{"curve"}{Create test datasets for curve evaluation}
#' }
#'
#' @param set_names A character vector to specify the names of test
#' datasets.
#'
#' \enumerate{
#'
#' \item For benchmarking (\code{test_type = "bench"})
#'
#' This function uses a naming convention for randomly generated data for
#' benchmarking. The format is a prefix ('i' or 'b') followed by the number of
#' dataset. The prefix 'i' indicates a balanced dataset, whereas 'b'
#' indicates an imbalanced dataset. The number can be used with a suffix 'k'
#' or 'm', indicating respectively 1000 or 1 million.
#'
#' Below are some examples.
#' \describe{
#' \item{"b100"}{A balanced data set with 50 positives and 50
#' negatives.}
#' \item{"b10k"}{A balanced data set with 5000 positives and 5000
#' negatives.}
#' \item{"b1m"}{A balanced data set with 500,000 positives and 500,000
#' negatives.}
#' \item{"i100"}{An imbalanced data set with 25 positives and 75
#' negatives.}
#' }
#'
#' The function returns a list of \code{\link{TestDataB}} objects.
#'
#' \item For curve evaluation (\code{test_type = "curve"})
#'
#' The following three predefined datasets can be specified for curve
#' evaluation.
#'
#' \tabular{lll}{
#' \strong{set name}
#' \tab \strong{\code{S3} object}
#' \tab \strong{data source} \cr
#'
#' c1 or C1 \tab \code{\link{TestDataC}} \tab \code{\link{C1DATA}} \cr
#' c2 or C2 \tab \code{\link{TestDataC}} \tab \code{\link{C2DATA}} \cr
#' c3 or C3 \tab \code{\link{TestDataC}} \tab \code{\link{C3DATA}} \cr
#' c4 or C4 \tab \code{\link{TestDataC}} \tab \code{\link{C4DATA}}
#' }
#'
#' The function returns a list of \code{\link{TestDataC}} objects.
#' }
#'
#' @return A list of \code{R6} test dataset objects.
#'
#' @seealso \code{\link{run_benchmark}} and \code{\link{run_evalcurve}} require
#' the list of the datasets generated by this function.
#' \code{\link{TestDataB}} for benchmarking test data.
#' \code{\link{TestDataC}}, \code{\link{C1DATA}}, \code{\link{C2DATA}},
#' \code{\link{C3DATA}}, and \code{\link{C4DATA}} for curve evaluation
#' test data.
#' \code{\link{create_usrdata}} for creating a user-defined test set.
#'
#' @examples
#' ## Create a balanced data set with 50 positives and 50 negatives
#' tset1 <- create_testset("bench", "b100")
#' tset1
#'
#' ## Create an imbalanced data set with 25 positives and 75 negatives
#' tset2 <- create_testset("bench", "i100")
#' tset2
#'
#' ## Create P1 dataset
#' tset3 <- create_testset("curve", "c1")
#' tset3
#'
#' ## Create P1 dataset
#' tset4 <- create_testset("curve", c("c1", "c2"))
#' tset4
#'
#' @export
create_testset <- function(test_type, set_names = NULL) {
# Validate arguments
new_args <- .validate_create_testset_args(test_type, set_names)
# Create a test dataset
if (new_args$test_type == "bench") {
dsets <- lapply(
new_args$set_names,
function(sname) {
.create_benchtest(sname)
}
)
} else if (new_args$test_type == "curve") {
dsets <- lapply(
new_args$set_names,
function(sname) {
.create_curvetest(sname)
}
)
}
names(dsets) <- new_args$set_names
dsets
}
#
# Create a random sample dataset
#
.create_benchtest <- function(sname = NULL, np = 10, pfunc = NULL, nn = 10,
nfunc = NULL) {
# Calculate np and nn when sname is specified
if (!is.null(sname)) {
tot <- as.numeric(gsub("[i|b|r|k|m]", "", tolower(sname)))
if (grepl("k$", tolower(sname))) {
tot <- tot * 1000
} else if (grepl("m$", tolower(sname))) {
tot <- tot * 1000 * 1000
}
if (tot < 2) {
stop("Invalid set_names. Data set size must be >1.",
call. = FALSE
)
}
if (grepl("^i", tolower(sname))) {
posratio <- 0.25
} else if (grepl("^b", tolower(sname))) {
posratio <- 0.5
} else if (grepl("^r", tolower(sname))) {
posratio <- stats::runif(1)
} else {
stop("Invalid set_names. Check the naming convetion.", call. = FALSE)
}
np <- round(tot * posratio)
nn <- tot - np
}
# Sample positive scores
if (is.null(pfunc)) {
pfunc <- function(n) stats::rbeta(n, shape1 = 1, shape2 = 1)
}
# Sample negative scores
if (is.null(nfunc)) {
nfunc <- function(n) stats::rbeta(n, shape1 = 1, shape2 = 4)
}
# Create scores and labels
scores <- c(pfunc(np), nfunc(nn))
labels <- c(rep(1, np), rep(0, nn))
# Create a TestDataB object
TestDataB$new(scores, labels, as.character(sname))
}
#
# Get a test dataset with pre-calculated values
#
.create_curvetest <- function(sname) {
if (tolower(sname) == "c1") {
pdata <- prcbench::C1DATA
} else if (tolower(sname) == "c2") {
pdata <- prcbench::C2DATA
} else if (tolower(sname) == "c3") {
pdata <- prcbench::C3DATA
} else if (tolower(sname) == "c4") {
pdata <- prcbench::C4DATA
} else {
stop("Invalid dataset name", call. = FALSE)
}
# Create a TestDataC object
ds <- TestDataC$new(pdata$scores, pdata$labels, sname)
ds$set_basepoints_x(pdata$bp_x)
ds$set_basepoints_y(pdata$bp_y)
ds$set_textpos_x(pdata$tp_x)
ds$set_textpos_y(pdata$tp_y)
ds$set_textpos_x2(pdata$tp_x2)
ds$set_textpos_y2(pdata$tp_y2)
ds
}
#
# Validate arguments and return updated arguments
#
.validate_create_testset_args <- function(test_type, set_names) {
assertthat::assert_that(assertthat::is.string(test_type))
if (!is.na(pmatch(test_type, "bench"))) {
test_type <- "bench"
} else if (!is.na(pmatch(test_type, "curve"))) {
test_type <- "curve"
} else {
stop("Invalid test_type. It must be either 'bench' or 'curve'.",
call. = FALSE
)
}
if (!is.null(set_names)) {
set_names <- tolower(set_names)
if (test_type == "bench") {
for (sname in set_names) {
assertthat::assert_that(assertthat::is.string(sname))
cnum <- gsub("[i|b|k|m]", "", sname)
if (suppressWarnings(is.na(as.numeric(cnum)))) {
stop("Invalid set_names. Check the naming convetion",
call. = FALSE
)
}
}
} else if (test_type == "curve") {
c_set_names <- c("c1", "c2", "c3", "c4")
if (length(setdiff(set_names, c_set_names)) != 0) {
stop("Invalid set_names. Valid set_names are 'c1', 'c2', 'c3' or 'c4'.",
call. = FALSE
)
}
}
}
list(test_type = test_type, set_names = set_names)
}
#' Create a user-defined test dataset
#'
#' The \code{create_usrdata} function creates various types of test datasets.
#'
#' @param test_type A single string to specify the type of dataset generated by
#' this function.
#'
#' \describe{
#' \item{"bench"}{Create a test dataset for benchmarking}
#' \item{"curve"}{Create a test dataset for curve evaluation}
#' }
#'
#' @param scores A numeric vector to set scores.
#'
#' @param labels A numeric vector to set labels.
#'
#' @param tsname A single string to specify the name of the dataset.
#'
#' @param base_x A numeric vector to set pre-calculated recall values for
#' curve evaluation.
#'
#' @param base_y A numeric vector to set pre-calculated precision values for
#' curve evaluation.
#'
#' @param text_x A single numeric value to set the x position for displaying
#' the test result in a plot
#'
#' @param text_y A single numeric value to set the y position for displaying
#' the test result in a plot
#'
#' @param text_x2 A single numeric value to set the x position for displaying
#' the test result (group into categories) in a plot
#'
#' @param text_y2 A single numeric value to set the y position for displaying
#' the test result (group into categories) in a plot
#'
#' @return A list of \code{R6} test dataset objects.
#'
#' @seealso \code{\link{create_testset}} for creating a predefined test set.
#' \code{\link{TestDataB}} for benchmarking test data.
#' \code{\link{TestDataC}} for curve evaluation test data.
#'
#' @examples
#' ## Create a test dataset for benchmarking
#' testset2 <- create_usrdata("bench",
#' scores = c(0.1, 0.2), labels = c(1, 0),
#' tsname = "m1"
#' )
#' testset2
#'
#' ## Create a test dataset for curve evaluation
#' testset <- create_usrdata("curve",
#' scores = c(0.1, 0.2), labels = c(1, 0),
#' base_x = c(0, 1.0), base_y = c(0, 0.5)
#' )
#' testset
#'
#' @export
create_usrdata <- function(test_type, scores = NULL, labels = NULL,
tsname = NULL, base_x = NULL, base_y = NULL,
text_x = NULL, text_y = NULL,
text_x2 = text_x, text_y2 = text_y) {
# Validate arguments
new_args <- .validate_create_usrdata(
test_type, scores, labels, tsname,
base_x, base_y, text_x, text_y,
text_x2, text_y2
)
if (new_args$test_type == "bench") {
dsets <- list(TestDataB$new(
new_args$scores, new_args$labels,
new_args$tsname
))
} else if (new_args$test_type == "curve") {
ds <- TestDataC$new(new_args$scores, new_args$labels, new_args$tsname)
ds$set_basepoints_x(new_args$base_x)
ds$set_basepoints_y(new_args$base_y)
if (!is.null(new_args$text_x)) {
ds$set_textpos_x(new_args$text_x)
}
if (!is.null(new_args$text_y)) {
ds$set_textpos_y(new_args$text_y)
}
if (!is.null(new_args$text_x2)) {
ds$set_textpos_x2(new_args$text_x2)
}
if (!is.null(new_args$text_y2)) {
ds$set_textpos_y2(new_args$text_y2)
}
dsets <- list(ds)
}
names(dsets) <- new_args$tsname
dsets
}
#
# Validate arguments and return updated arguments
#
.validate_create_usrdata <- function(test_type, scores, labels, tsname, base_x,
base_y, text_x, text_y, text_x2, text_y2) {
assertthat::assert_that(assertthat::is.string(test_type))
if (!is.na(pmatch(test_type, "bench"))) {
test_type <- "bench"
} else if (!is.na(pmatch(test_type, "curve"))) {
test_type <- "curve"
} else {
stop("Invalid test_type. It must be either 'bench' or 'curve'.",
call. = FALSE
)
}
assertthat::assert_that(is.numeric(scores))
assertthat::assert_that(length(scores) > 1)
assertthat::assert_that(is.numeric(labels) || is.factor(labels))
assertthat::assert_that(length(labels) > 1)
assertthat::assert_that(length(unique(labels)) == 2)
assertthat::assert_that(length(scores) == length(labels))
if (is.null(tsname)) {
tsname <- "usr"
}
assertthat::assert_that(assertthat::is.string(tsname))
if (test_type == "curve") {
assertthat::assert_that(is.numeric(base_x))
assertthat::assert_that(all(base_x >= 0.0) && all(base_x <= 1.0))
assertthat::assert_that(is.numeric(base_y))
assertthat::assert_that(all(base_y >= 0.0) && all(base_y <= 1.0))
assertthat::assert_that(length(base_x) == length(base_y))
lapply(c(text_x, text_y, text_x2, text_y2), function(p) {
if (!is.null(p)) {
assertthat::assert_that(assertthat::is.number(p))
assertthat::assert_that(p >= 0.0 && p <= 1.0)
}
})
}
list(
test_type = test_type, scores = scores, labels = labels, tsname = tsname,
base_x = base_x, base_y = base_y, text_x = text_x, text_y = text_y,
text_x2 = text_x2, text_y2 = text_y2
)
}
|
d0fba6ee79f1a5f12339e14137500f203e7f3d9a | 91621f72b96e03e7c2064cf43733e8507b0e4deb | /R/package/inst/tests/test-ddfmanager.R | 1eb3d7808dbe548d1b76bf3f298c07db9bc2a4ae | [
"Apache-2.0"
] | permissive | williamdengnewyork/DDF | 5e08455db3ab1f307aee063955b9d4d30ad81e5a | e4e68315dcec1ed8b287bf1ee73baa88e7e41eba | refs/heads/master | 2020-12-03T02:28:37.777033 | 2016-06-08T08:23:51 | 2016-06-08T08:23:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 611 | r | test-ddfmanager.R | library(testthat)
library(ddf)
context("DDFManager")
test_that("sql2ddf works", {
write.table(mtcars, "/tmp/mtcars", row.names=F, col.names=F)
dm <- DDFManager()
sql(dm, 'set hive.metastore.warehouse.dir=/tmp/hive/warehouse')
sql(dm, "drop table if exists mtcars")
sql(dm, "CREATE TABLE mtcars (mpg double, cyl int, disp double, hp int, drat double, wt double, qesc double, vs int, am int, gear int, carb string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ' '")
sql(dm, "LOAD DATA LOCAL INPATH '/tmp/mtcars' INTO TABLE mtcars")
ddf <- sql2ddf(dm, "select * from mtcars")
expect_is(ddf, "DDF")
}) |
56c976aa36ad83747ce4a0f342af9cf3f2340271 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed_and_cleaned/11367_0/rinput.R | aa77dcef122cb03e78444e9c7e1a7be2fb0154e2 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 137 | r | rinput.R | library(ape)
testtree <- read.tree("11367_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="11367_0_unrooted.txt") |
a5d9369a6faf1ad3326f801e8544c95be2bb26d4 | 4668f8ca41d00c2af993dad99b95743b71ecfaaa | /cachematrix.R | af8a749a55dac10afd4f4617470ab06e47de059b | [] | no_license | dalmaciopereyra/ProgrammingAssignment2 | 89b88559b34912c3c8048c7aa275b67b7b3aa24e | 8481e89a643536e25a342861de338ea94a08d42a | refs/heads/master | 2020-11-24T17:06:50.686178 | 2019-12-15T23:24:34 | 2019-12-15T23:24:34 | 228,263,820 | 0 | 0 | null | 2019-12-15T22:46:26 | 2019-12-15T22:46:25 | null | UTF-8 | R | false | false | 1,126 | r | cachematrix.R | ## This function calculates the inverse of a matrix
## makeCacheMatrix creates a matrix and caches it inverse, returns a matrix with functions to
## set and get values or set and get inverted matrix
makeCacheMatrix <- function(x = matrix()) {
#set the variable for the cached inversion of the matrix
inv<-NULL
#get and set matrix
get<-function() x
set<-function(y) {
x<<-y
inv<-NULL
}
# get and set matrix inverse
getinv <- function() inv
setinv <- function(inverse) inv <<- inverse
# return list of functions for matrix
list(get=get, set=set, getinv=getinv, setinv=setinv)
}
## cacheSolve checks if the inverse of a matrix has been calculated, if it has it returns the inverse
## otherwise it calculates the inverse of the matrix
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv<-x$getinv()
#if it has been calculated return inverse
if (!is.null(inv)) {
message("inverse cached")
return(inv)
}
#now we compute the inverse
m<-x$get()
inv<- solve(m, ...)
x$setinv(inv)
#return inverse
return(inv)
}
|
b011660e2f5feb6852f6401f7ef82e0bb3123775 | 54380f007be35fbf5af5a3c198ffbc596d1a3e9b | /man/save_csv_carl.Rd | 82acb8d6f01b7269da0e579058c54651f7350c09 | [] | no_license | cfbeuchel/CarlHelpR | bf47bd7d2e56a13f69a8d66047e451b42cb13dbb | bbbeb168a6d7f378a6ec75e3e93915b26faa29fb | refs/heads/master | 2022-03-21T12:48:55.218400 | 2019-12-20T10:40:14 | 2019-12-20T10:40:14 | 114,129,646 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,387 | rd | save_csv_carl.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/save_csv_carl.R
\name{save_csv_carl}
\alias{save_csv_carl}
\title{Custom save to .csv function}
\usage{
save_csv_carl(file = NA, file_name = NA, subfolder = NA,
create_subfolder = F, sep = ",", quote = F)
}
\arguments{
\item{file}{The data.table or data.frame to be saved.}
\item{file_name}{Character string that specifies the name the saved file should have
The date of creation and the .csv ending are added automatically}
\item{subfolder}{A character string without "/" giving the subfolder the file shall be saved in.}
\item{create_subfolder}{Given a subfolder, setting this to TRUE will create a new directory with the name given in subfolder and will stop if set to FALSE}
\item{sep}{the field separator string. Values within each row of x are separated by this string.}
\item{quote}{a logical value (TRUE or FALSE) or a numeric vector. If TRUE, any character or factor columns will be surrounded by double quotes. If a numeric vector, its elements are taken as the indices of columns to quote. In both cases, row and column names are quoted if they are written. If FALSE, nothing is quoted.}
}
\description{
This function saves data.tables or data.frames as .csv in the root working directory or a specified subfolder.
Additionally the current date is automatically included in the file name.
}
|
1d6aa4a2f9121345b3b551fbdc6e556a65a68981 | dac4a8f2b14dbb92dd07e9ca9642410ae407a2f2 | /R/TransMx.R | 5f14b7c5e6053863710e4a12decf8686e24d4646 | [] | no_license | dstgithub/GrpString | 0710f0b5d1e8a90ee1e94e5a2f6facb19bc48c97 | 45b4da9cc59c71ddb8b53d7b6753665b7ff960fe | refs/heads/master | 2021-01-12T03:26:45.555515 | 2017-11-15T21:40:25 | 2017-11-15T21:40:25 | 78,210,123 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,196 | r | TransMx.R | #' @export
TransMx <- function(strings.vec, indiv = FALSE){
##### Prepare: get the name of the input string vector
strings.vec_name <- deparse(substitute(strings.vec))
##### 0. get all unique characters of all strings
# 0.1 remove strings with fewer than 2 characters
stringsx.pos <- which(nchar(strings.vec) >= 2)
stringsx.vec <- strings.vec[stringsx.pos]
num_strings <- length(stringsx.vec)
# 0.2 make sure stringsx.vec is characters
stringsx.vec <- as.character(stringsx.vec)
# 0.3 split string into single characters
str.sp.list <- lapply(stringsx.vec, function(x) strsplit(x, ''))
# 0.4 vector of unique characters from above, sorted
str.spu <- sort(unique(unlist(str.sp.list)))
# 0.5 number of unique characters
num.str.spu <- length(str.spu)
# 0.6 number sequence from 1 to num.str.spu
str.spun <- 1:num.str.spu
###### Level 2 function: all transitions in a string ######
TransLoca <- function(str){
##### 1. split str to all transitions, in a vector
### Level 3 function: split a string to all transitions ###
Split2v <- function(s){ # vector version
len <- nchar(s)-1
ss.vec <- vector(length = len)
for (i in 1:len){
ss.vec[i] <- (substring(s,i,i+1))
}
return(ss.vec)
} # end of level 3 function Split2v
### 1.2 apply level 3 split2v function
str.trans <- Split2v(str)
# 1.2 number of each transition, in a vector
# name of each element in vector is transition
trans.table <- table(str.trans)
# 1.3 number of all transitions
# it is equal to sum(trans.table)
num.trans <- nchar(str) - 1
##### 2. convert each of the transitions to a pair of numbers
### Level 3 function: convert transition to a pair of numbers ###
# each pair represents a row number and a column number for a df
TransPos <- function(single.trans){
# 2.1.1 transition itself
trans <- names(single.trans)
# 2.1.2 split transition to two characters, store in vector
trans.char <- unlist(strsplit(trans, ''))
# 2.1.3 replace characters to digits
# digits in str.spun correspond to characters in str.spu
trans.loca <- as.numeric(plyr::mapvalues(trans.char, str.spu, str.spun,
warn_missing = FALSE))
return(trans.loca)
} # end of level 3 function TransPos
### 2.2 apply function TransLoca to all transitions in the string
transLoca.list <- lapply(1:length(trans.table), function(i) TransPos(trans.table[i]))
##### 3. assign numbers of transitions to data frame
# if a transition does not occur, the number is 0
# 3.1 initialize df
trans.df <- data.frame(matrix(0, ncol = num.str.spu, nrow = num.str.spu))
# 3.2 assign numbers of transitions
# it seems that the for loop is the only choice(?)
for(i in 1:length(trans.table)){
trans.df[transLoca.list[[i]][1], transLoca.list[[i]][2]] <- trans.table[i]
}
# 3.3 second characters (i.e., 'To') of transitions
colnames(trans.df) <- str.spu
return(trans.df)
} # end of level 2 function TransLoca
##### 4. numbers of transitions in all strings
### 4.1 apply level 2 function TransLoca to get a list of transition matrix
trans.df.list <- lapply(stringsx.vec, TransLoca)
# 4.2 add all the df in the list together
# do.call in previous versions can only be used for 2 dfs in a list
trans.df.sum.df <- Reduce('+', trans.df.list)
# 4.3 first characters (i.e., 'From') of transitions
trans.df.sum1.df <- noquote(cbind(str.spu, trans.df.sum.df))
colnames(trans.df.sum1.df)[1] <- "From/To"
### 4.4 get normalized numbers of transitions
# 4.4.1 grand total number of all transitions in all strings
trans.total <- sum(trans.df.sum.df)
# 4.4.2 normalized transition numbers in all strings
trans.df.sum_norm.df <- trans.df.sum.df / trans.total
# 4.4.3 round to 4 decimals if not 0
trans.df.sum_norm.df <- apply(trans.df.sum_norm.df, 1:2, function(x){
ifelse (x > 0, round(x, digits = 4), "0")
})
# 4.4.4 first characters (i.e., 'From') of transitions
trans.df.sum_norm1.df <- noquote(cbind(str.spu, trans.df.sum_norm.df))
colnames(trans.df.sum_norm1.df)[1] <- "From/To"
##### 5. transition numbers of each transition in all strings
# 5.1 numbers of all transitions
trans_num.vec <- trans.df.sum.df[trans.df.sum.df > 0]
# 5.2 store in a df
trans_num.df <- data.frame(matrix(0, ncol = 2, nrow = length(trans_num.vec)))
trans_num.df[,2] <- trans_num.vec
# 5.3 positions of transitions in trans.df.sum.df
trans_pos.mx <- which(trans.df.sum.df > 0, arr.ind=T)
# 5.4 convert positions represented by digits to characters
trans_pos.lett.mx <- plyr::mapvalues(trans_pos.mx, str.spun, str.spu, warn_missing = FALSE)
# 5.5 paste characters to form transitions
trans_pos.2lett.vec <- apply(trans_pos.lett.mx, 1, function(x) paste(x, collapse = ""))
# 5.6 store in the df
trans_num.df[,1] <- trans_pos.2lett.vec
# 5.7 sort by descent order and then assign col names and remove (current row names)
trans_num.df <- trans_num.df[with(trans_num.df, order(-trans_num.df[,2])),]
colnames(trans_num.df) <- c("transition", "number_of_transition")
rownames(trans_num.df) <- NULL
##### 6. put the three df above in a list
trans.out.df.list <- list(Transition_Matrix = trans.df.sum1.df,
Transition_Normalized_Matrix = trans.df.sum_norm1.df,
Transition_Organized = trans_num.df)
##### 7. optional: output individual matrix
if(indiv == TRUE){
# 7.1 add a column for starting positions ('From') of transitions to each df
trans.df1.list <- lapply(trans.df.list, function(x){
x <- noquote(cbind(str.spu, x))
colnames(x)[1] <- "From/To"
return(x)
})
# 7.2 prepare out files names
# stringsx.pos contains the positions of strings in original vec with at least 2 characters
num_strings.c <- sprintf("%02d", stringsx.pos)
# 7.3 out file names (based on the input vec), one for each string
out.file.names <- paste0(strings.vec_name, "_", num_strings.c, "mx", ".txt")
# 7.4 Write output transition matrix file for each string
lapply(1:num_strings, function(i){
utils::write.table(trans.df1.list[[i]], sep = "\t", row.names = FALSE,
col.names = TRUE, file = out.file.names[i])
})
}
##### end of 'if' optional
##### return the list containing the 3 df
return(trans.out.df.list)
}
|
27c61f1b0e9f9ca17f9fdb35cf53758574619e2a | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/imp4p/examples/fast_apply_nb_not_na.Rd.R | 80f6ae224e8221f1ea95ac6758c1498c050f5ad6 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 379 | r | fast_apply_nb_not_na.Rd.R | library(imp4p)
### Name: fast_apply_nb_not_na
### Title: Function similar to the function
### 'apply(X,dim,function(x)sum(!is.na(x)))'.
### Aliases: fast_apply_nb_not_na
### Keywords: Cpp wrapper function
### ** Examples
## The function is currently defined as
##function (X, dim)
##{
## .Call("imp4p_fast_apply_nb_not_na", PACKAGE = "imp4p", X,
## dim)
## }
|
738c15d7b470e19c1a6d77828f1e44466a0255d8 | 9cc4eba7237b76735fec01bed8a7cc22b2cc5eb1 | /anova.r | 0953df5854c34df0a499d81e5eab4e89ef312a33 | [] | no_license | lkuszal/RScribbles | 1f1b9c9f8b3a52be86771cd86887c5b69af80e0d | ffbb668c0052469c74cfdbec2b6f8bc2b11a1884 | refs/heads/main | 2023-03-14T07:22:55.283861 | 2021-03-10T18:27:11 | 2021-03-10T18:27:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 432 | r | anova.r | d1=c(5.6, 6.2, 6.7, 5.8, 5.8)
d2=c(5.8, 4.8, 6.1, 5.4, 5.7)
d3=c(4.2, 4.8, 5.2, 5.1, 4.8, 4.3)
d0=c(d1,d2,d3)
s0=mean(c(d1,d2,d3))
s1=mean(d1)
s2=mean(d2)
s3=mean(d3)
#SST
a=0
for(x in d0){
a=a+(x-s0)^2
}
print(a)
#SSA
print(length(d1)*(s0-s1)^2+length(d2)*(s0-s2)^2+length(d3)*(s0-s3)^2)
#SSE
a=0
for(x in d1){
a=a+(x-s1)^2
}
for(x in d2){
a=a+(x-s2)^2
}
for(x in d3){
a=a+(x-s3)^2
}
print(a)
|
27b70d6f4e933f8d2b0ce940d4b3f889351f6f5a | 4ac9ca1c41d9de8ffda4512cc697e0d9424fadc7 | /R/errorHandler.R | cab5df8b0e60aa33e3da87fff2d5a91b9c62f55e | [] | no_license | johndharrison/dockerAPI | 8b6717c028857568baf7b2d2d14cd23a94df2a9a | f45e8bfeb383865d83ef365bbc0ef0e666f09f35 | refs/heads/master | 2020-05-25T14:18:24.038852 | 2016-08-08T16:25:20 | 2016-08-08T16:25:20 | 28,023,667 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,576 | r | errorHandler.R | setOldClass("response")
#' CLASS errorHandler
#'
#' errorHandler Class uses the
#'
#' errorHandler is a generator object.
#'
#' @import jsonlite
#' @import whisker
#' @import httr
#' @export errorHandler
#' @exportClass errorHandler
#' @aliases errorHandler
errorHandler <- setRefClass("errorHandler",
fields = list(
response = "response"),
methods = list(
initialize = function(response = NULL, ...){
if(is.null(response)){
response <<- `class<-`(list(), "response")
}
callSuper(...)
},
checkResponse = function(appresponse, pass = c(), warnings = data.frame(), errors = c()){
response <<- appresponse
if(!appresponse$status_code %in% pass){
if(!appresponse$status_code %in% errors$status_code){
base::stop(capture.output(cat(content(appresponse))), call. = FALSE)
}else{
base::stop(errors$message[errors$status_code == appresponse$status_code], call. = FALSE)
}
}
})
) |
d5f2a873b78f546beba20bd62e45ccd88c2ae0d0 | c765806f595d9dd3a745dfb0868776693ac388d0 | /first_term/pbl01_2/rfiles/first.R | 0936a7d87b631644e7016b5eac52c9b1264b619f | [] | no_license | ducksfrogs/AIQuest | f5ec7c75d85f5256ecb6a7c15c994d7cc2eeafa1 | 54454e75ff240a8fdbb1cbf0fd699b3cec41bb1c | refs/heads/main | 2023-02-21T15:23:39.853357 | 2021-01-26T23:17:35 | 2021-01-26T23:17:35 | 304,774,841 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 485 | r | first.R | data("iris")
str(iris)
summary(iris)
library(ggplot2)
ggplot(iris, aes(x=Sepal.Length, y=Petal.Width))
+ geom_point(color=Species)
diamonds
iris
library(tidyverse)
iris_tbl <- as_tibble(iris)
ggplot(iris_tbl, aes(x=Sepal.Length, y=Petal.Width)) +
geom_point(aes(color= Species))
data(father.son)
install.packages('UsingR')
data((father.son))
library(UsingR)
father.son
plot(father.son)
fs <- father.son
glm(fs$fheight ~ fs$sheight, data = fs)
lm(fs$fheight ~ fs$sheight, data = fs)
|
068a298b9464f6c198194591df5bef1006de68db | 1946498b789b9feb4c70a38b4ab5cde4c5697bc5 | /tests/testthat/test-data.R | 441e2ec3a6873431a0fc10e1d25d9d1585eb1b21 | [] | no_license | aivachine/project | 76feaa3819a44e4e9c053a2afc2bbb632ce55adc | edd2ea38d8e6c3711b52041759404f3c521676b5 | refs/heads/master | 2020-12-11T01:45:47.878999 | 2016-05-22T22:36:25 | 2016-05-22T22:36:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,762 | r | test-data.R | context("Data management")
dir <- tempdir()
setwd(dir)
if (dir.exists("project-test")) unlink("project-test", recursive = TRUE)
dir.create("project-test")
setwd("project-test")
prInit()
test_that("One can save and load objects", {
assign("x", rnorm(100), envir = .GlobalEnv)
y <- x
prSave("x")
expect_true(file.exists("data/x.rda"))
rm(x, envir = .GlobalEnv)
expect_output(prLoad("x"), "Numeric vector 'x' has been loaded")
expect_true(exists("x"))
expect_equivalent(x, y)
})
test_that("One can save and load objects in subdirectories", {
assign("x2", rnorm(100), envir = .GlobalEnv)
y <- x2
prSave("dir2/x2")
expect_true(file.exists("data/dir2/x2.rda"))
rm(x2, envir = .GlobalEnv)
expect_output(prLoad("dir2/x2"), "Numeric vector 'x2' has been loaded")
expect_true(exists("x2"))
expect_equivalent(x2, y)
assign("x3", rnorm(100), envir = .GlobalEnv)
y <- x3
prSave("x3", subdir = "dir3")
expect_true(file.exists("data/dir3/x3.rda"))
rm(x3, envir = .GlobalEnv)
expect_output(prLoad("x3", subdir = "dir3"), "Numeric vector 'x3' has been loaded")
expect_true(exists("x3"))
expect_equivalent(x3, y)
})
test_that("One can move data files", {
assign("x4", rnorm(100), envir = .GlobalEnv)
prSave("x4")
prMoveData("x4", "dir4")
expect_false(file.exists("data/x4.rda"))
expect_true(file.exists("data/dir4/x4.rda"))
})
test_that("One can delete data files", {
assign("x5", rnorm(100), envir = .GlobalEnv)
prSave("x5")
prDeleteData("x5")
expect_false(file.exists("data/x5.rda"))
})
test_that("One cannot overwrite a data file", {
assign("x6", rnorm(100), envir = .GlobalEnv)
prSave("x6")
prSave("dir6/x6")
expect_error(prSave("x6"))
expect_error(prMoveData("x6", "dir6"))
}) |
7510b19dfe2c10d156fbc9f8b7478b9686a7bc61 | 494655c2d37cca13398fba6fa5fe4752d25319e5 | /r4.r | 92566e738f729f531655022756b0266cc3a7fa62 | [] | no_license | truptikamble/BE-PROGRAMS | 7f6f163de2f41dffb62d4ad0698a1a1134e0abce | 9093f0e7954c92bd97913c0dd1b0f5d2e70c7a71 | refs/heads/master | 2020-04-01T20:11:54.971603 | 2018-10-29T13:18:49 | 2018-10-29T13:18:49 | 153,593,007 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 515 | r | r4.r | data = read.csv(file.choose())
data = data[,c(1,5)]
names(data)
data$date = as.Date(data$date)
df = data.frame(date = data$date , snowfall = data$snowfall,
year = as.numeric(format(data$date , format="%Y")),
month = as.numeric(format(data$date, format="%m")),
day = as.numeric(format(data$date , format = "%d")))
head(df,n=21)
maxSubset = subset(df , year==2018)
maxSnow = which.max(maxSubset$snowfall)
maxSubset[maxSnow,]
weekdays(maxSubset[maxSnow,1])
|
a1c4b3cd5f65c5423c53024b0e269e88c6919a9b | 17cb64908b89c854c304e10d5a88e621728d9f37 | /R/summary_popsumm.R | e188707ae2009200da8667a2056a7a5a203db68e | [] | no_license | EvoNetHIV/Herbeck-et-al-Vaccine-2018 | 494387deba8f63902a03b4ba4de68eddce540c63 | bf0ee209f8924c9ad4865a2dadfe11f7b256f845 | refs/heads/master | 2022-12-09T23:42:17.631572 | 2020-09-01T19:07:32 | 2020-09-01T19:07:32 | 69,389,080 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 10,537 | r | summary_popsumm.R | #' @export
summary_popsumm<-function(dat,at){
#if summary stats calculations doesn't occur at this timesetp,
#based on popsumm_frequency value, then leave function
if( (at%%dat$param$popsumm_frequency!=0) & (at!=1)){return(dat)}
#time_index is a time vector based on current value
#of "at" and parameter value "popsumm_frequency"
if(at==1)
time_index <- 1
else if(at==dat$param$popsumm_frequency)
time_index <- (at-dat$param$popsumm_frequency+2):at
else
time_index<- (at-dat$param$popsumm_frequency+1):at
#"popsumm_index" is an index for the "popsumm" vectors
#based on value of "at" and paramter "popsumm_frequency"
if(at==1)
popsumm_index <- 1
else
if(dat$param$popsumm_frequency==1)
popsumm_index <- at
else
popsumm_index <- (at/dat$param$popsumm_frequency)+1
#logical vectors and indices helpful to calculate summary stats
inf_index <- dat$pop$Status == 1
total_inf <- length(which(inf_index))
sus_index <- dat$pop$Status == 0
alive_index <- inf_index | sus_index
total_alive <- length(which(alive_index))
treated_index <- dat$pop$treated == 1 & inf_index
not_treated_index <- dat$pop$treated == 0 & inf_index
treated_undetectable <- treated_index & dat$pop$V<dat$param$vl_undetectable
treated_agents <- which(treated_index)
not_treated_agents <- which(not_treated_index)
under30_index <- dat$pop$age < 30
agents30to50_index <- dat$pop$age >= 30 & dat$pop$age < 50 # Testing need to change 31 back to 50
over50_index <- dat$pop$age >= 50
agents_under30 <- which(under30_index)
agents_30to50 <- which(agents30to50_index)
agents_over50 <- which(over50_index)
new_infections <- is.element(dat$pop$Time_Inf, time_index)
new_infections_count <- length(which(is.element(dat$pop$Time_Inf, time_index)))
new_infections_virus_vacc_sens_count <- length(which(is.element(dat$pop$Time_Inf, time_index)&
dat$pop$virus_sens_vacc==1))
new_infections_virus_vacc_notsens_count <- length(which(is.element(dat$pop$Time_Inf, time_index)&
dat$pop$virus_sens_vacc==0))
new_infections_virus_drug_sens_count <- length(which(is.element(dat$pop$Time_Inf, time_index)&
dat$pop$virus_sens_drug==1))
new_infections_virus_drug_part_res_count <- length(which(is.element(dat$pop$Time_Inf, time_index)&
dat$pop$virus_part_res_drug==1))
new_infections_virus_drug_3_plus_res_count <- length(which(is.element(dat$pop$Time_Inf, time_index) &
dat$pop$virus_3_plus_drug_muts==1))
new_infections_virus_1_drug_muts <- length(which(is.element(dat$pop$Time_Inf, time_index) &
dat$pop$virus_3_plus_drug_muts==1))
donor_time_inf <- ifelse(new_infections_count>0,
dat$pop$Donors_Total_Time_Inf_At_Trans[new_infections],
NA)
donor_acute_count <- ifelse(!is.na(donor_time_inf),
length(which(donor_time_inf<=dat$param$t_acute)),
NA)
new_births <- is.element(dat$pop$arrival_time, time_index)
cd4_aids <- dat$pop$CD4 == 4
new_diagnoses <- dat$pop$diag_status == 1 & is.element(dat$pop$diag_time,time_index)
acute_phase_vec <- (at-dat$pop$Time_Inf)<dat$param$t_acute
acute_phase <- !is.na(acute_phase_vec) & acute_phase_vec==T
percent_virus_sensitive <- round(100*(length(which(dat$pop$virus_sens_vacc==1 & inf_index))/length(which(inf_index))))
percentVaccinated <- round(100*(length(which(dat$pop$vaccinated == 1 & alive_index))/total_alive))
#deaths
just_died <- is.element(dat$pop$Time_Death,time_index)
died_aids <- dat$pop$Status == -2 & just_died
died_aids_mean_age <- mean(dat$pop$age[died_aids])
died_non_aids <- dat$pop$Status == -1 & just_died
died_non_aids_inf <- died_non_aids & !is.na(dat$pop$V)
died_non_aids_sus <- died_non_aids & is.na(dat$pop$V)
aged_out <- (dat$pop$age>=dat$param$max_age) & just_died
#browser()
#network statistics
# some of these can't be computed if we are in edgelist mode
# so need to create a network from the edgelist
if(!is.null(dat[['nw']])){
nw <- dat[['nw']]
} else {
nw_summary <- NULL
number_edges <- nrow(dat$el)
network_size <- attr(dat$el,'n')
total_nodes <- NULL
netattrs<-attributes(dat$el)
nw <- as.network.matrix(dat$el, matrix.type='edgelist',
# TODO: ASSUMING THESE HAVE BEEN HARDCODED UPSTREAM
directed = FALSE,
bipartite = FALSE,
loops = FALSE
)
}
nw_summary <- summary(nw~degree(0:1) + concurrent, at = at)
number_edges <- network.edgecount(nw)
network_size <- network.size(nw)
total_nodes <- sum(nw_summary[1]+nw_summary[2]+nw_summary[3]) # This depends on nw_summary which I blanked out above
#viral load values
log10_vl_values <- log10(dat$pop$V[which(inf_index)]+dat$param$AbsoluteCut)
spvl_untreated_values <- (
dat$pop$LogSetPoint[which(inf_index & not_treated_index)])
# todo: may be a faster way to calculate degree
edges_by_agent <- unname(summary(nw ~ sociality(base = 0),at=at)) #use dat$attr$id for index on dat$pop
edges_untreated <- edges_by_agent[dat$attr$id %in% not_treated_agents ]
edges_treated <- edges_by_agent[dat$attr$id %in% treated_agents]
edges_under30 <- edges_by_agent[dat$attr$id %in% agents_under30]
edges_30to50 <- edges_by_agent[dat$attr$id %in% agents_30to50]
edges_over50 <- edges_by_agent[dat$attr$id %in% agents_over50]
#aim3 mutations
inf_undetect_ix <- (dat$pop$Status==1 & dat$pop$V> dat$param$vl_undetectable)
no_inf_undect <- length(which(inf_undetect_ix))
mutations0 <- length(which(inf_undetect_ix & dat$pop$aim3_no_muts==0))
mutations1 <- length(which(inf_undetect_ix & dat$pop$aim3_no_muts>=1))
mutations2 <- length(which(inf_undetect_ix & dat$pop$aim3_no_muts>=2))
mutations3 <- length(which(inf_undetect_ix & dat$pop$aim3_no_muts>=3))
mutations4 <- length(which(inf_undetect_ix & dat$pop$aim3_no_muts>=4))
mutations5 <- length(which(inf_undetect_ix & dat$pop$aim3_no_muts>=5))
mutations1exact <- length(which(inf_undetect_ix & dat$pop$aim3_no_muts==1))
mutations2exact <- length(which(inf_undetect_ix & dat$pop$aim3_no_muts==2))
mutations3exact <- length(which(inf_undetect_ix & dat$pop$aim3_no_muts==3))
mutations4exact <- length(which(inf_undetect_ix & dat$pop$aim3_no_muts==4))
mutations3plus_long <- length(which(inf_index & dat$pop$aim3_muations_long>=3))
mutations4plus_long <- length(which(inf_index & dat$pop$aim3_muations_long>=4))
mutations5_long <- length(which(inf_index & dat$pop$aim3_muations_long==5))
mutations0all <- length(which( dat$pop$aim3_no_muts==0))
mutations1all <- length(which( dat$pop$aim3_no_muts==1))
mutations2all <- length(which( dat$pop$aim3_no_muts==2))
mutations3all <- length(which(dat$pop$aim3_no_muts==3))
mutations4all <- length(which(dat$pop$aim3_no_muts==4))
mutations5all <- length(which(dat$pop$aim3_no_muts==5))
mutations1plusall <- length(which( dat$pop$aim3_no_muts>=1))
mutations2plusall <- length(which( dat$pop$aim3_no_muts>=2))
mutations3plusall <- length(which(dat$pop$aim3_no_muts>=3))
mutations4plusall <- length(which(dat$pop$aim3_no_muts>=4))
#coital acts
if(!is.null(dat$discord_coital_df)){
number_coit_acts <- sum(tapply(dat$discord_coital_df$act_id_couple,
dat$discord_coital_df$couple_id,
max))
acts_iev <- length(which(dat$discord_coital_df$iev==1))/2
percent_iev <- (acts_iev / number_coit_acts)
transmission_opps_condom_percent <- (length(which(dat$discord_coital_df$condom==1)) /
nrow(dat$discord_coital_df) )
trans_probs_mean <- mean(dat$discord_coital_df$trans_probs)
}else{
number_coit_acts <- 0
percent_iev <- NA
percent_condom <- NA
trans_probs_mean <- NA
transmission_opps_condom_percent <- NA
}
#actual calculation of summary stats based on indices and vectors from above
# and functions for each variable in "popsumm_fxns"
popsumm_vars=names(dat$popsumm)
for(ii in 1:length(popsumm_vars)){
temp_var<-popsumm_vars[ii]
environment(dat$popsumm_fxns[[ii]])<-environment()
dat$popsumm[[temp_var]][popsumm_index] <- dat$popsumm_fxns[[ii]]()
}
#calculation of generic attribute stats
#what percent of alive agents are in each category
#stat: generic_att_percent_cat_xx (xx=1,..,total number of attributes)
#what percent of alive agents are infected in each category
#stat: generic_att_percent_inf_cat_xx
#stats for generic attribute values need to be treated separately
#as the number of attributes may vary between model scenarios
#note: objects below need to be renamed for clarity
temp_length <- length(dat$param$generic_nodal_att_values)
if(temp_length>1){
#how many alive agents in each category
temp_table=table(dat$pop$att1[alive_index])
#how many alive and infected agents in each category
temp_table2=table(dat$pop$att1[inf_index])
#total agents
sum_temp_table=sum(temp_table)
#this vector makes sure categories from tables above are
#arranged in ascending order (necessary if zero agents in a particular
#category, which would mean they are missing in tables above
temp_match=match(names(temp_table),1:temp_length)
for(zz in 1:length(temp_match)){
namevec <- paste("generic_att_percent_cat_",temp_match[zz],sep="")
dat$popsumm[[namevec]][popsumm_index]=temp_table[zz]/sum_temp_table
}
for(zz in 1:temp_length){
namevec2 <- paste("generic_att_percent_inf_cat_",zz,sep="")
ix1<- which(names(temp_table)==zz)
ix2<- which(names(temp_table2)==zz)
if(length(ix2)>0){
val<- temp_table2[ix2]/temp_table[ix1]
}else{val<-0}
dat$popsumm[[namevec2]][popsumm_index] <- val
}
}
# end of calculating summary stats for generic attributes
return(dat)
}
|
2c99f425893fba50ecafc546e7a5a71e69aa142a | 739c351026e02598e006a2655a1978a8cbd72ac0 | /R/elnet.R | faf563c0f06bd498c6d2c016e7d6c020db6061dc | [
"MIT"
] | permissive | tidymodels/butcher | a30c3b6a627d89ce2755da9de92df97ded8bba7f | 7e162c9f23fc55a63fac6f132c24038419a94416 | refs/heads/main | 2023-04-28T15:39:45.260168 | 2023-04-26T21:06:10 | 2023-04-26T21:06:10 | 190,640,087 | 125 | 19 | NOASSERTION | 2023-08-23T02:34:18 | 2019-06-06T19:45:18 | R | UTF-8 | R | false | false | 973 | r | elnet.R | #' Axing an elnet.
#'
#' elnet objects are created from the \pkg{glmnet} package, leveraged
#' to fit generalized linear models via penalized maximum likelihood.
#'
#' @inheritParams butcher
#'
#' @return Axed model object.
#'
#' @examplesIf rlang::is_installed("glmnet")
#' # Load libraries
#' library(parsnip)
#' library(rsample)
#'
#' # Load data
#' split <- initial_split(mtcars, prop = 9/10)
#' car_train <- training(split)
#'
#' # Create model and fit
#' elnet_fit <- linear_reg(mixture = 0, penalty = 0.1) %>%
#' set_engine("glmnet") %>%
#' fit_xy(x = car_train[, 2:11], y = car_train[, 1, drop = FALSE])
#'
#' out <- butcher(elnet_fit, verbose = TRUE)
#'
#' @name axe-elnet
NULL
#' Remove the call.
#'
#' @rdname axe-elnet
#' @export
axe_call.elnet <- function(x, verbose = FALSE, ...) {
old <- x
x <- exchange(x, "call", call("dummy_call"))
add_butcher_attributes(
x,
old,
disabled = c("print()", "summary()"),
verbose = verbose
)
}
|
99a88db47ccfeb452d97d549add45950e35b4ffd | 6ce53cb961f1d96634f4d633433bdc74fb69ced0 | /landscape/landscape_plot.r | b1b5e949844ba150ceaf0bf82c755bfd1105040c | [] | no_license | lvmt/R_releated_Code | fb936364205137b984a6f218f746156f4e051af7 | df0863f37b3b771df934068f962329825c163fa4 | refs/heads/master | 2023-03-17T15:19:25.552072 | 2021-03-02T06:10:56 | 2021-03-02T06:10:56 | 257,591,855 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,074 | r | landscape_plot.r | '''突变位点landscape图
'''
'''利用landscape_file.py生成的文件,作为输入文件
'''
library(ComplexHeatmap)
setwd("C:/Users/dell/desktop")
infile = read.table("landscape_filter_gene.plot.xls", sep="\t",header=TRUE, row.names = 1)
infile[is.na(infile)] = ""
#rownames(infile) =infile[,1]
#infile1 =infile[,2:32]
col = c("cds_indel" = "lightblue", "frameshift" = "yellow", "missense" = "orange",
'nonsense'='green', 'misstart'='purple', 'splice'='black', 'stop_gain'='red',
'stop_loss'='blue')
'''由于测试数据集只有miss和stop,所以只定义了这2个的function
'''
alter_fun = list(
background = function(x, y, w, h) {
grid.rect(x, y, w-unit(1.0, "mm"), h-unit(1.0, "mm"), gp = gpar(fill = "#CCCCCC", col = NA))
},
cds_indel = function(x, y, w, h) {
grid.rect(x, y, w-unit(0.1, "mm"), h-unit(3.0, "mm"), gp = gpar(fill = col["cds_indel"], col = NA))
},
frameshift = function(x, y, w, h) {
grid.rect(x, y, w-unit(0.2, "mm"), h-unit(0.2, "mm"), gp = gpar(fill = col["frameshift"], col = NA))
},
missense = function(x, y, w, h) {
grid.rect(x, y, w-unit(0.3, "mm"), h-unit(0.4, "mm"), gp = gpar(fill = col["missense"], col = NA))
},
misstart = function(x, y, w, h) {
grid.rect(x, y, w-unit(0.4, "mm"), h-unit(0.4, "mm"), gp = gpar(fill = col["misstart"], col = NA))
},
nonsense = function(x, y, w, h) {
grid.rect(x, y, w-unit(0.5, "mm"), h-unit(0.6, "mm"), gp = gpar(fill = col["nonsense"], col = NA))
},
stopgain = function(x, y, w, h) {
grid.rect(x, y, w-unit(0.6, "mm"), h-unit(0.8, "mm"), gp = gpar(fill = col["stop_gain"], col = NA))
},
stoploss = function(x, y, w, h) {
grid.rect(x, y, w-unit(0.7, "mm"), h-unit(1.5, "mm"), gp = gpar(fill = col["stop_loss"], col = NA))
},
splice = function(x, y, w, h) {
grid.rect(x, y, w-unit(1.5, "mm"), h-unit(1.5, "mm"), gp = gpar(fill = col["splice"], col = NA))
}
)
oncoPrint(infile, get_type = function(x) strsplit(x, ";")[[1]],
alter_fun = alter_fun, col = col,
#row_order = NULL, #定义百分数排序
# pct_gp = gpar(col="white",fontsize = 0.01), row_names_gp = gpar(fontsize = 12),row_names_side = "left",
# column_title = "",column_title_gp=gpar(fontsize=10),show_row_barplot =FALSE ,
# show_column_names = TRUE,show_heatmap_legend=T,
# column_names_gp=gpar(fontsize = 12),
remove_empty_columns = TRUE,
show_pct = TRUE,
pct_gp = gpar(fontsize = 5),
row_names_gp = gpar(fontsize = 8),
#show_row_barplot = FALSE,
#row_names_side = "left",
column_title = "Landscape of RA_ILD",
show_column_names = TRUE,show_heatmap_legend=T,
column_names_gp=gpar(fontsize = 6),
heatmap_legend_param = list(title = "Alternations",
at = c("cds_indel", "frameshift", "missense", "misstart", "nonsense", "stop_gain", "stop_loss", "splice"),
labels = c("cds_indel", "frameshift", "missense", "misstart", "nonsense", "stop_gain", "stop_loss", "splice")))
|
ead800733071c37c2a555315b83f85f68a2adfea | 29585dff702209dd446c0ab52ceea046c58e384e | /r4ss/R/r4ss_logo.R | 7426270da10d78d03e7ac3bab84de56195775e14 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 550 | r | r4ss_logo.R | #' Make a simple logo for r4ss organization on GitHub
#'
#' I was tired of the automatically generated symbol
#' that appeared by default.
#'
#' @author Ian Taylor
#'
r4ss_logo <- function(){
png('r4ss_logo.png',res=300,units='in',width=3,height=3)
par(mar=rep(0,4))
plot(0, 0, type='n', axes=FALSE, xlab="", ylab="")
for(i in 4:1){
phi <- pi + pi/4 - i*pi/2
r <- 0.5
text(r*cos(phi), 1.2*r*sin(phi), substring('R4SS',i,i),
font=4, cex=12, col=rich.colors.short(5,alpha=1)[i+1])
}
dev.off()
}
|
16e9821888683d4281348005760695a94da8607a | 2c1d0d7c8d8381bcbc2251a1e4a40f98b1a2bc86 | /bin/common.R | 88ad56df8f9c24be29a2fb945c423527418c81db | [] | no_license | fredhutchio/argo-navis | 5ac77853a2befd9522579db1a13676a2f1e2f7dd | 885426f360c89a11626f0c4a44a225ab59960ff1 | refs/heads/master | 2021-01-13T01:26:07.195365 | 2015-10-12T22:05:23 | 2015-10-12T22:05:23 | 31,085,035 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,941 | r | common.R | "Mostly deme coloring specific codez"
library(RColorBrewer)
read.color.spec <- function(filename) {
df <- read.csv(filename, stringsAsFactors=F)
colors <- df$color
names(colors) <- df$deme
colors
}
brewify.colors <- function(demes, pallette="RdBu") {
demes <- sort(unique(demes))
n <- length(demes)
colors <- brewer.pal(n, pallette)
names(colors) <- demes
colors
}
colors.from.args <- function(args) {
if (!is.null(args$color_spec)) {
return(read.color.spec(args$color_spec))
} else if (!is.null(args$brewer)) {
demes <- read.csv(args$demes, stringsAsFactors=F)$deme
return(brewify.colors(demes, pallette=args$brewer))
} else {
stop("You must specify either --brewer or --color-spec")
}
}
factorify.deme <- function(df, label='label', args=list()) {
df <- df
# Ugg... beast hacks, need to fix this upstream obviously
#if (!class(df[,label]) == "character") {
#rodent.col <- rgb(134/225, 197/225, 140/225)
#species <- c('bat', 'human', 'monkey', 'reference', 'rodent')
#df[,label] <- sapply(df[,label], function(i) species[i])
#df[,label] <- factor(df[,label], levels=species)
#}
colors <- colors.from.args(args)
keep.colors <- colors[as.character(sort(unique(df[,label])))]
list(data=df, colors=keep.colors)
}
# Parsing, extraction and prettification of migration stat name info
mig.regex <- "mig_(.+)_(.+)"
comp.from <- function(stats.names) {
gsub(mig.regex, "\\1", stats.names)
}
comp.to <- function(stats.names) {
gsub(mig.regex, "\\2", stats.names)
}
pretty.mig <- function(stats.names) {
gsub(mig.regex, "\\1 -> \\2", stats.names)
}
explode.mig <- function(df) {
# Add some columns (from, to and migration) that make plotting and such easier
df$from <- comp.from(df$statistic)
df$migration <- pretty.mig(df$statistic)
df$to <- comp.to(df$statistic)
df$subset.name <- df$subset #hack to get ggplot's dynamic resolution not to break
df
}
|
0ec72b2d7119b265ccff0804bc740bdc1501bfc5 | dcee394d4fd8ba93a4e4880963ebdccd72fad310 | /R code/week6.R | d559623487ba25b35fa8a5a9e317eb9407a2ee93 | [] | no_license | issactoast/kindstat | ad6f2173eeed209e1475ec0630e0792868f89664 | e36502278b561ddd978ed12791fef408e78cfea7 | refs/heads/master | 2022-04-25T01:08:37.339317 | 2020-04-19T19:57:02 | 2020-04-19T19:57:02 | 257,089,204 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,374 | r | week6.R | # 직선의 방정식
a: 절편
b: 기울기
# y = a + b * x
plot(0, 0, type = "n",
xlim = c(0, 10),
ylim = c(0, 10))
abline(h = 0)
abline(v = 0)
points(x = c(2, 4, 6, 8),
y = c(1, 6, 4, 8))
abline(a = 2, b = 0.5) # 직선
abline(a = 1, b = 0.8, col = "red") # 친구의 직선
abline(a = 0.0005164763,
b = 0.9499521173, col = "blue")
# 내집 크기: 5
5 * 0.9499521173 + 0.0005164763
4.750277
# RSS = sum((y_i - y_i_hat)^2)
# 직선 1: 13
y_i_hat <- c(2, 4, 6, 8) * 0.5 + 2
y_i_hat
y_i = c(1, 6, 4, 8)
sum((y_i - y_i_hat)^2)
# 직선 2: 9.4
y_i_hat <- c(2, 4, 6, 8) * 0.8 + 1
y_i_hat
RSS <- function(par){
intercept <- par[1]
slope <- par[2]
y_i_hat <- c(2, 4, 6, 8) * slope + intercept
y_i <- c(1, 6, 4, 8)
sum((y_i - y_i_hat)^2)
}
RSS(c(2, 0.5))
RSS(c(2, 0.6))
RSS(c(1.2, 0.6))
result <- optim(par = c(2, 0.5), fn = RSS)
result$par
library(rgl)
RSS <- function(intercept, slope){
y_i_hat <- c(2, 4, 6, 8) * slope + intercept
y_i <- c(1, 6, 4, 8)
sum((y_i - y_i_hat)^2)
}
# vectorize
rss <- Vectorize(RSS)
library(rgl)
open3d()
persp3d(rss,
xlim = c(-5, 5),
ylim = c(-2, 2),
zlim = c(6, 15),
n = 100)
rgl.spheres(x = 0.0005164763,
y = 0.9499521173,
z = RSS(0.0005164763, 0.9499521173),
r = 0.05, color = 'red')
clear3d()
|
d7473dd2e355e6ba706ccb9ff8576fa8a991df1c | 109734b597c2d760725a1a050174a5d11b3c1a9b | /R/dist2dpath.R | 65010b4569e7b973f21805c2bc1d30a78b556d36 | [] | no_license | rubak/spatstat | c293e16b17cfeba3e1a24cd971b313c47ad89906 | 93e54a8fd8276c9a17123466638c271a8690d12c | refs/heads/master | 2020-12-07T00:54:32.178710 | 2020-11-06T22:51:20 | 2020-11-06T22:51:20 | 44,497,738 | 2 | 0 | null | 2020-11-06T22:51:21 | 2015-10-18T21:40:26 | R | UTF-8 | R | false | false | 2,155 | r | dist2dpath.R | #
# dist2dpath.R
#
# $Revision: 1.10 $ $Date: 2017/06/05 10:31:58 $
#
# dist2dpath compute shortest path distances
#
dist2dpath <- function(dist, method="C") {
## given a matrix of distances between adjacent vertices
## (value = Inf if not adjacent)
## compute the matrix of shortest path distances
stopifnot(is.matrix(dist) && isSymmetric(dist))
stopifnot(all(diag(dist) == 0))
findist <- dist[is.finite(dist)]
if(any(findist < 0))
stop("Some distances are negative")
##
n <- nrow(dist)
if(n <= 1L) return(dist)
cols <- col(dist)
##
tol <- .Machine$double.eps
posdist <- findist[findist > 0]
if(length(posdist) > 0) {
shortest <- min(posdist)
tol2 <- shortest/max(n,1024)
tol <- max(tol, tol2)
}
##
switch(method,
interpreted={
dpathnew <- dpath <- dist
changed <- TRUE
while(changed) {
for(j in 1:n)
dpathnew[,j] <- apply(dpath + dist[j,][cols], 1L, min)
unequal <- (dpathnew != dpath)
changed <- any(unequal) & any(abs(dpathnew-dpath)[unequal] > tol)
dpath <- dpathnew
}
},
C={
adj <- is.finite(dist)
diag(adj) <- TRUE
d <- dist
d[!adj] <- -1
z <- .C("Ddist2dpath",
nv=as.integer(n),
d=as.double(d),
adj=as.integer(adj),
dpath=as.double(numeric(n*n)),
tol=as.double(tol),
niter=as.integer(integer(1L)),
status=as.integer(integer(1L)),
PACKAGE = "spatstat")
if(z$status == -1L)
warning(paste("C algorithm did not converge to tolerance", tol,
"after", z$niter, "iterations",
"on", n, "vertices and",
sum(adj) - n, "edges"))
dpath <- matrix(z$dpath, n, n)
## value=-1 implies unreachable
dpath[dpath < 0] <- Inf
},
stop(paste("Unrecognised method", sQuote(method))))
return(dpath)
}
|
967d4a80ce083c9d64f125cd74276b1f9edfbe87 | 685adb82b0ef76319c7d0e5fe4cb9aabae82367a | /man/modelAvg.Rd | 2c2cc8612a8fac235adcfa94875c4628225ef9d2 | [] | no_license | scarpino/binequality | 6474cc7a520b414dd622437582fe1d8e8fcbc3b7 | c810d3e5f066bfa8e1b67edbe8b06bc289f380b0 | refs/heads/master | 2021-01-19T02:19:01.663501 | 2018-11-05T14:04:13 | 2018-11-05T14:04:13 | 37,625,137 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 705 | rd | modelAvg.Rd | \name{modelAvg}
\alias{modelAvg}
\title{
A function to calculate model averages
}
\description{
This function calculates model averaged statistics using AIC and BIC.
}
\usage{
modelAvg(fitList, ID, nonCon = TRUE)
}
\arguments{
\item{fitList}{
a (non-empty) list of fitted distributions
}
\item{ID}{
a (non-empty) string of the ID column name.
}
\item{nonCon}{
an optional logical, where nonCon == TRUE excludes models failing to converged and nonCon == FALSE includes them.
}
}
\details{
Calculates model averaged statistics using BIC and AIC as weights.
}
\value{
Returns a list with aic and bic values, aic and bic averages, and the aic and bic weights.
}
\examples{
#not run, internal function
} |
770b304dbb1fe2bdcdcf9cd00b2de2a1884cb1e1 | e535cafb3cdb408a577ccfaa2496dc8bba889e2e | /codes/vis-fit.R | fefdc3a6451a9d613a187b525caa2a7b041c4a29 | [] | no_license | richardli/cincinnati-overdose | fe1bbc6ea2afb5a63edaecfc500b5b7d08e06b42 | 39b64a725f61aac72039bf4e6962e47f07afe6fb | refs/heads/master | 2020-06-01T19:56:43.065393 | 2019-09-26T15:41:48 | 2019-09-26T15:41:48 | 190,908,738 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 33,990 | r | vis-fit.R | #---------------------------
# Plot of fixed effects
#---------------------------
library(ggrepel)
library(scales)
library(gridExtra)
library(xtable)
fixed <- data.frame(fit0$summary.fixed)
fixed$index <- 1:dim(fixed)[1]
race <- c(6:12)
fixed$label <- factor(rownames(fixed), levels=rownames(fixed))
fixed$label <- better_names(fixed$label)
covariates <- data[, match(rownames(fixed), colnames(data))]
covariates <- covariates[data$time ==1, ]
covtab <- t(apply(covariates, 2, function(x){
return(c(mean(x, na.rm = T),
sd(x, na.rm = T),
quantile(x, c(0, 1), na.rm=T)))
}))
colnames(covtab) <- c("mean", "sd", "min", "max")
covtab <- data.frame(Covariate = better_names(rownames(covtab)),
Mean = covtab[,1],
SD = covtab[,2],
Source = "ACS 2013–2017")
covtab$Mean[rownames(covtab) == "crime_rate"] <- mean(data$crime_rate, na.rm=T)
covtab$SD[rownames(covtab) == "crime_rate"] <- sd(data$crime_rate, na.rm=T)
sub <- Y[Y$region == 1, ]
covtab$Mean[rownames(covtab) == "temperature"] <- mean(sub$temperature, na.rm=T)
covtab$SD[rownames(covtab) == "temperature"] <- sd(sub$temperature, na.rm=T)
covtab$Mean[rownames(covtab) == "precipitation"] <- mean(sub$precipitation, na.rm=T)
covtab$SD[rownames(covtab) == "precipitation"] <- sd(sub$precipitation, na.rm=T)
print(xtable(covtab, digits = 3), include.rownames=F)
fixed <- fixed[-1, ]
if(VIS){
# Cochrane data from the 'rmeta'-package
metrics <- data.frame(
mean = fixed$mean,
median = fixed$mean,
lower = fixed$X0.025quant,
upper = fixed$X0.975quant,
rrmean = exp(fixed$mean),
rrmedian = exp(fixed$mean),
rrlower = exp(fixed$X0.025quant),
rrupper = exp(fixed$X0.975quant))
tab <- data.frame(label = fixed$label,metrics[, 1:4])
for(i in 2:5) tab[, i] <- sprintf("%.3f",tab[, i])
tab <- as.matrix(tab)
# only work with for numbers < 10
for(i in 1:dim(tab)[1]){
dot1 <- lapply(strsplit(as.character(tab[i, 2]), ''), function(x) which(x == '.'))[[1]]
if(dot1 < 3) tab[i, 2] <- paste0(" ", tab[i, 2])
}
# only work with for numbers < 10
tocharacter <- function(x){
dot1 <- lapply(strsplit(as.character(x[1]), ''), function(x) which(x == '.'))[[1]]
dot2 <- lapply(strsplit(as.character(x[2]), ''), function(x) which(x == '.'))[[1]]
pad1 <- pad2 <- ""
if(dot1 < 3) pad1 <- rep(" ", 3-dot1)
if(dot2 < 3) pad2 <- rep(" ", 3-dot2)
paste0("(", pad1, x[1], ", ", pad2, x[2], ")")
}
tab[, 4] <- apply(tab[, c(4, 5)], 1, tocharacter)
tab <- as.matrix(rbind(c("Fixed effect", " Mean", "95% Posterior CI"), tab[, c(1, 2, 4)]))
tab <- list(tab[,1], tab[, 2], tab[, 3])
special <- which(tab[[1]] == "Temperature (Fahrenheit)")
tab[[1]][special] <- "Temperature (F)"
met <- metrics[, c(1, 3, 4)]
ggforest <- function(met, tab, mid = 0, breaks = 5, leftsep = 0.5, leftmar = 15, leftskip = 17, log = FALSE, xlab = ""){
require(scales)
met <- data.frame(met)
colnames(met)[1:3] <- c("mean", "lower", "upper")
met$y <- dim(met)[1] : 1
mx <- max(met$upper - mid, mid - met$lower)
gtab <- data.frame(lab = array(as.character(unlist(tab[[1]]))),
mean = as.character(tab[[2]]),
ci = as.character(tab[[3]]), stringsAsFactors = FALSE)
gtab$mean[1] <- paste0(" ", gtab$mean[1])
tmp1 <- max(nchar(gtab$lab)) + 6
tmp2 <- max(nchar(gtab$mean)) + 4
tmp3 <- max(nchar(gtab$ci)) + 2
gtab$text <- ""
for(i in 1:dim(gtab)[1]) {
# gtab$text[i] <- paste0(gtab$text[i], paste(
# rep(" ", tmp1 - nchar(gtab$lab[i])), collapse = ""))
gtab$text[i] <- paste0(gtab$text[i], paste(c(gtab$mean[i],
rep(" ", tmp2 - nchar(gtab$mean[i]))), collapse = ""))
gtab$text[i] <- paste0(gtab$text[i], paste(c(gtab$ci[i],
rep(" ", tmp3 - nchar(gtab$ci[i]))), collapse = ""))
}
gtab$y <- (dim(met)[1] + 1) : 1
gtab$x <- min(met$lower) - leftsep
g <- ggplot(data = met) + geom_segment(x = 0, xend = 0, y = 0, yend = dim(met)[1] + 0.5, color = 'gray30', linetype = "dashed") + geom_errorbarh(aes(xmin = lower, xmax = upper, x = mean, y = y), color = "#56106EFF", height = 0.1, size = 0.5) + geom_point(aes(x = mean, y = y), size = 0.8, color = "#BB3754FF")+ theme_void() + xlab(xlab)
if(log){
g <- g + scale_x_continuous(trans = "log", breaks= trans_breaks("log", "exp", n = breaks), labels=function(x) sprintf("%.1f", x))
}else{
g <- g + scale_x_continuous(breaks= trans_breaks(identity, identity, n = breaks))
}
g <- g + theme(plot.margin=unit(c(1,1,1,leftmar),"cm"),
axis.text.x = element_text(vjust = -1),
axis.title = element_text(vjust = -1.2,face="bold"),
axis.text.y = element_blank(),
axis.ticks.x = element_line(size = .5, color = "black"),
axis.ticks.length = unit(.15, "cm"),
axis.line.x = element_line(size = .5, color = "black")
)
g <- g + coord_cartesian(xlim = range(met[, 1:3]), clip = "off")
highlight <- c(ifelse((met$upper - mid) * (met$lower - mid) > 0, 2, 1))
g <- g + annotate("text", x = gtab$x[-1], y = gtab$y[-1], label = gtab$text[-1], size = 3, hjust = 1, fontface = highlight, family="Courier")
g <- g + annotate("text", x = gtab$x[1], y = gtab$y[1], label = gtab$text[1], size = 3.4, hjust = 1, fontface =2, family="Courier")
if(log){
xx <- exp(log(gtab$x[1]) - leftskip)
}else{
xx <- gtab$x[1] - leftskip
}
g <- g + annotate("text", x = xx, y = gtab$y[-1], label = gtab$lab[-1], size = 3, hjust = 0, fontface = highlight, family="Courier")
g <- g + annotate("text", x = xx, y = gtab$y[1], label = gtab$lab[1], size = 3.4, hjust = 0, fontface =2, family="Courier")
if(log){
g <- g + geom_segment(x = log(xx), xend = log(gtab$x[1]), y = gtab$y[1] - 0.5, yend = gtab$y[1] - 0.5, size = 0.2, color = "gray50")
}else{
g <- g + geom_segment(x = xx, xend = gtab$x[1], y = gtab$y[1] - 0.5, yend = gtab$y[1] - 0.5, size = 0.2, color = "gray50")
}
return(g)
}
out <- ggforest(met = met, tab = tab, mid = 0, breaks = 5, leftsep = 0, leftmar = 15.5, leftskip = 18.5, xlab = "Log Relative Risk")
ggsave(plot = out, filename = paste0("../figures/", prefix, "fixed_effects", postfix, "-v.pdf"), width = 10.5, height = 9)
ggsave(plot = out, filename = paste0("../figures/", prefix, "fixed_effects", postfix, "-v.tiff"), width = 10.5, height = 9)
tocharacter2 <- function(x){
# dot1 <- lapply(strsplit(as.character(x[1]), ''), function(x) which(x == '.'))[[1]]
dot2 <- lapply(strsplit(as.character(x[2]), ''), function(x) which(x == '.'))[[1]]
pad1 <- pad2 <- ""
if(dot2 < 4) pad2 <- paste(rep(" ", 4-dot2), collapse="")
paste0("(", x[1], ", ", pad2, x[2], ")")
}
tab <- data.frame(label = fixed$label,metrics[, 5:8])
for(i in 2:5) tab[, i] <- sprintf("%.3f",tab[, i])
tab <- as.matrix(tab)
# only work with for numbers < 10
for(i in 1:dim(tab)[1]){
dot1 <- lapply(strsplit(as.character(tab[i, 2]), ''), function(x) which(x == '.'))[[1]]
if(dot1 < 3) tab[i, 2] <- paste0(" ", tab[i, 2])
}
tab[, 4] <- apply(tab[, c(4, 5)], 1, tocharacter2)
tab <- as.matrix(rbind(c("Fixed effect", "exp(Mean)", "95% Posterior CI"), tab[, c(1, 2, 4)]))
tab <- list(tab[,1], tab[, 2], tab[, 3])
special <- which(tab[[1]] == "Temperature (Fahrenheit)")
tab[[1]][[special]] <- "Temperature (F)"
met <- metrics[, c(1, 3, 4) + 4]
out <- ggforest(met = met, tab = tab, mid = 1, breaks = 5, leftsep = 0, leftmar = 15.5, leftskip = 19.5, log = TRUE, xlab = "Relative Risk")
ggsave(plot = out, filename = paste0("../figures/", prefix, "fixed_effects", postfix, "-v-rr.pdf"), width = 10.5, height = 9)
ggsave(plot = out, filename = paste0("../figures/", prefix, "fixed_effects", postfix, "-v-rr.tiff"), width = 10.5, height = 9)
}
#-------------------------------
# Plot of random effects
#-------------------------------
space1 <- data.frame(fit0$summary.random[["region"]][1:N, ])
space1$region <- colnames(mat)[1:N]
time1 <- data.frame(fit0$summary.random[["time"]])
time2 <- data.frame(fit0$summary.random[["time.iid"]])
time1$effect <- "Structured"
time2$effect <- "IID"
time <- rbind(time1, time2)
time$effect <- factor(time$effect, levels = c("Structured", "IID"))
time$Date <- Y$Date[match(time$ID, Y$time)]
#-------------------------------
# Plot of spatial and temporal random effects
#-------------------------------
if(VIS){
time1 <- data.frame(fit0$summary.random[["time"]])
time1$Date <- Y$Date[match(time1$ID, Y$time)]
time1$effect <- "Temporal Random Effects"
time1$spike <- "1"
time1$spike[c(14, 20, 36)] <- "2"
g1 <- mapPlot2(data = space1, geo = geo, variables = c("mean"), labels = c("Spatial effect"), by.data = "region", by.geo = byGEO, removetab=TRUE) + map.theme + coord_map() + scale_fill_distiller("Spatial Effect", palette = "RdBu") + theme(panel.border = element_blank()) + theme(strip.background = element_blank(), strip.text.x = element_text( vjust = -0.1, size = 16)) + theme(legend.position = c(0.15, 0.72), legend.key.size = unit(.85,"line"), plot.margin=margin(t=-1,b=-1,l=0,r=0, unit="cm"), legend.title = element_text(size = 11))
g2 <- ggplot(time1, aes(x = Date, y = `mean`, ymin = `X0.025quant`, ymax = `X0.975quant`, group = effect, shape = spike)) + geom_point(color = "darkred", size=2) + geom_errorbar(width = .1, color = "darkred", size = .3) + line.theme + ylab("Temporal Effect") + ggtitle("") + xlab(NULL) + scale_x_date(breaks = "6 month", labels = date_format("%b %Y"), lim = c(min(Y$Date), max(Y$Date)))+ scale_shape_manual(values = c(19, 17), guide = FALSE)+ theme(panel.border = element_blank(), plot.margin=margin(t=1, b=1, l=0, r=1, unit="cm"))
out <- grid.arrange(grobs = list(g1, g2), ncol = 2, widths = c(1, 1.15))
ggsave(plot = out, filename = paste0("../figures/", prefix, "space_and_time_effects", postfix, ".pdf"), width = 10, height = 4)
ggsave(plot = out, filename = paste0("../figures/", prefix, "space_and_time_effects", postfix, ".tiff"), width = 10, height = 4)
space1$rr <- exp(space1$mean)
time1$rr <- exp(time1$mean)
time1$`X0.025quantrr` <- exp(time1$X0.025quant)
time1$`X0.975quantrr` <- exp(time1$X0.975quant)
breaks <- exp(seq(min(space1$mean), max(space1$mean), length.out = 5))
g1 <- mapPlot2(data = space1, geo = geo, variables = c("rr"), labels = c("Relative Risk"), by.data = "region", by.geo = byGEO, removetab=TRUE) + map.theme + coord_map() + scale_fill_distiller("Relative Risk", palette = "RdBu", trans = "log", breaks = breaks, label = function(x){options( digits = 2);format( x, scientific = FALSE)}) + theme(panel.border = element_blank()) + theme(strip.background = element_blank(), strip.text.x = element_text( vjust = -0.1, size = 16)) + theme(legend.position = c(0.15, 0.72), legend.key.size = unit(.85,"line"), plot.margin=margin(t=-1,b=-1,l=0,r=0, unit="cm"), legend.title = element_text(size = 9))
g2 <- ggplot(time1, aes(x = Date, y = rr, ymin = `X0.025quantrr`, ymax = `X0.975quantrr`, group = effect, shape = spike)) + geom_point(color = "darkred", size=2) + geom_errorbar(width = .1, color = "darkred", size = .3) + line.theme + ylab("Relative Risk") + ggtitle("") + xlab(NULL) + scale_x_date(breaks = "6 month", labels = date_format("%b %Y"), lim = c(min(Y$Date), max(Y$Date)))+ scale_shape_manual(values = c(19, 17), guide = FALSE)+ theme(panel.border = element_blank(), plot.margin=margin(t=1, b=1, l=0, r=1, unit="cm"))
out <- grid.arrange(grobs = list(g1, g2), ncol = 2, widths = c(1, 1.15))
ggsave(plot = out, filename = paste0("../figures/", prefix, "space_and_time_effects", postfix, "-rr.pdf"), width = 10, height = 4)
ggsave(plot = out, filename = paste0("../figures/", prefix, "space_and_time_effects", postfix, "-rr.tiff"), width = 10, height = 4)
time_12month <- time1
time_12month$month <- format(time_12month$Date, "%m")
time_12month$year <- format(time_12month$Date, "%Y")
time_12month_mean <- aggregate(data = data.frame(time_12month), mean~month, FUN = mean)
g3 <- ggplot(time_12month_mean, aes(x = month, y = mean)) + geom_line(group = 1, color = "black", size = 1.5, linetype = "dashed") + geom_point(color = "black", size=2) + line.theme + ylab("Temporal Effect") + ggtitle("") + xlab("Month") + geom_point(data = time_12month, aes(x = month, y = mean, color = year, group = year), position = position_dodge(0.2)) + geom_errorbar(data = time_12month, aes(x = month, y = mean, color = year, group = year, ymin = `X0.025quant`, ymax = `X0.975quant`), width = 0.2, position = position_dodge(0.2)) + geom_line(data = time_12month, aes(x = month, y = mean, color = year, group = year), position = position_dodge(0.2), alpha = 0.8)+ scale_colour_brewer(palette = "Set1")
ggsave(plot = g3, filename = paste0("../figures/seasonality", postfix, ".pdf"), width = 10, height = 4)
## Violin graph
library(ggridges)
samp <- NULL
for(i in 1:dim(fit0$summary.random[["time"]])[1]){
tmp <- inla.rmarginal(1e4, fit0$marginals.random[["time"]][[i]])
samp <- rbind(samp, data.frame(sample = tmp, month = format(time_12month$Date, "%b")[i], month.num = as.numeric(format(time_12month$Date, "%m")[i]), year = time_12month$year[i]))
}
# samp$sample <- exp(samp$sample)
samp$month <- factor(samp$month, levels = rev(samp$month[match(1:12, samp$month.num)]))
g <- ggplot(samp, aes(x =sample, y = month)) + geom_density_ridges2(quantile_lines = TRUE, quantiles = 2, fill = "darkblue", alpha = 0.5, scale = 0.9) + xlab("Temporal Log Relative Risks") + line.theme + ylab("Month")
ggsave(plot = g, filename = paste0("../figures/seasonality2", postfix, ".pdf"), width = 8, height = 9)
}
#-------------------------------
# Plot of spatial and temporal variance
#-------------------------------
if(VIS){
dens <- NULL
for(i in 1:length(fit0$marginals.hyperpar)){
name <- names(fit0$marginals.hyperpar)[i]
if(strsplit(name, " ")[[1]][1] == "Precision"){
tmp <- inla.tmarginal(function(x)1/x, fit0$marginals.hyperpar[[i]])
name <- gsub("Precision", "Standard deviation", name)
}else{
tmp <- (fit0$marginals.hyperpar[[i]])
}
tmp <- data.frame(tmp)
tmp$name <- name
dens <- rbind(dens, tmp)
}
ordered <- unique(dens$name)
last <- which(ordered == "Standard deviation for regiontime")
ordered <- c(as.character(ordered[-last]), as.character(ordered[last]))
dens$name <- factor(dens$name, levels = ordered)
pdf(paste0("../figures/", prefix, "posterior-var", postfix, ".pdf"), width = 9, height = 4.5)
g <- ggplot(dens, aes(x = x, y = y)) + geom_line() + facet_wrap(~name, scales = 'free') + xlab("") + ylab("density") + theme_bw()
print(g)
dev.off()
}
#-------------------------------------
# Plot of interaction
# by space/time, add on main space-time
#--------------------------------------
if("regiontime" %in% names(fit0$summary.random)){
st <- data.frame(fit0$summary.random[["regiontime"]])
st$regiontime <- 1:dim(st)[1]
st <- merge(st, regiontime, by = "regiontime")
}else if("region.int" %in% names(fit0$summary.random)){
st <- data.frame(fit0$summary.random[["region.int"]])
st$region <- st$ID
st$time <- rep(1:T, each = dim(mat)[1])
}else{
st <- data.frame(fit0$summary.random[["time.int"]])
st$time <- st$ID
st$region <- rep(1:dim(mat)[1], each =T)
}
st$regionname <- colnames(mat)[st$region]
# if(VIS){
# pdf(paste0("../figures/", prefix, "spacetime_effects", postfix, ".pdf"), width = 12, height = 10)
# g <- mapPlot2(data = st, geo = geo, variables = c("time"), values = c("mean"), by.data = "regionname", by.geo = byGEO, is.long = TRUE) + scale_fill_viridis_c("effect")+ map.theme + coord_map()
# print(g)
# dev.off()
# pdf(paste0("../figures/", prefix, "spacetime_effects2", postfix, ".pdf"), width = 9, height = 5)
# g <- ggplot(st, aes(x = time, y = `mean`, color = regionname, group = regionname)) + geom_line(alpha = 0.3) + theme_bw() + scale_colour_manual(values=colorRampPalette(brewer.pal(name="Set1", n = 8)[-6])(311), guide=FALSE) + ylab("Relative Risk")
# # g + geom_line(data=subset(st, regionname %in% st$regionname[which(st$mean > 0.7)]), aes(x = time, y = `mean`, color = regionname, group = regionname))
# print(g)
# dev.off()
# }
nsim <- 1e5
marg.s <- matrix(NA, nsim, N)
marg.t <- matrix(NA, nsim, T)
st2 <- data.frame(region = rep(1:N, T),
time = rep(1:T, each = N),
mean = NA, med = NA, low = NA, high = NA)
for(i in 1:dim(marg.s)[2]){
marg.s[, i] <- inla.rmarginal(nsim, fit0$marginals.random[["region"]][[i]])
}
for(i in 1:dim(marg.t)[2]){
marg.t[, i] <- inla.rmarginal(nsim, fit0$marginals.random[["time"]][[i]]) + inla.rmarginal(nsim, fit0$marginals.random[["time.iid"]][[i]])
}
if("regiontime" %in% names(fit0$summary.random)){
counter <- 1
for(j in 1:dim(marg.t)[2]){
for(i in 1:dim(marg.s)[2]){
# make sure to not count the added regions
k <- regiontime$regiontime[intersect(which(regiontime$time == j), which(regiontime$region == i))]
which <- which(fit0$summary.random[["regiontime"]]$ID == k)
tmp <- inla.rmarginal(nsim, fit0$marginals.random[["regiontime"]][[which]])
# tmp <- tmp + marg.s[, i] + marg.t[, j]
st2[counter, "mean"] <- mean(tmp)
st2[counter, "med"] <- median(tmp)
st2[counter, "low"] <- quantile(tmp, .025)
st2[counter, "high"] <- quantile(tmp, .975)
counter <- counter + 1
}
}
}else{
counter <- 1
for(j in 1:dim(marg.t)[2]){
for(i in 1:dim(marg.s)[2]){
# make sure to not count the added regions
which <- which(fit0$summary.random[["region.int"]]$ID == i)[j]
tmp <- inla.rmarginal(nsim, fit0$marginals.random[["region.int"]][[which]])
# tmp <- tmp + marg.s[, i] + marg.t[, j]
st2[counter, "mean"] <- mean(tmp)
st2[counter, "med"] <- median(tmp)
st2[counter, "low"] <- quantile(tmp, .025)
st2[counter, "high"] <- quantile(tmp, .975)
counter <- counter + 1
}
}
}
st2$regionname <- colnames(mat)[st2$region]
# if(VIS){
# pdf(paste0("../figures/", prefix, "random_effects", postfix, ".pdf"), width = 14, height = 10)
# g <- mapPlot2(data = st2, geo = geo, variables = c("time"), values = c("mean"), by.data = "regionname", by.geo = byGEO, is.long = TRUE) + scale_fill_viridis_c("effect")+ map.theme + coord_map() + theme(legend.position = c(0.9, 0.05), legend.direction="horizontal")
# print(g)
# dev.off()
# pdf(paste0("../figures/", prefix, "random_effects2", postfix, ".pdf"), width = 9, height = 5)
# g <- ggplot(st2, aes(x = time, y = mean, color = regionname, group = regionname)) + geom_line() + theme_bw() + scale_colour_manual(values=colorRampPalette(brewer.pal(name="Set1", n = 8)[-6])(N), guide=FALSE) + ylab("Total random effects")
# print(g)
# dev.off()
# }
st3 <- st2
# insig <- which(st2$low * st2$high< 0)
# st3[insig, "mean"] <- NA
st3$date <- data$Date[match(st3$time, data$time)]
st3$date <- format(st3$date, "%b %Y")
tmp <- unique(st3$date)
st3$date <- factor(st3$date, levels = tmp)
st3sub <- subset(st3, time %% 5 == 4)
# st3sub <- subset(st3sub, time > min(st3sub$time))
st3sub$rr <- exp(st3sub$mean)
if(VIS){
max <- max(abs(min(st3sub$mean)), max(st3sub$mean))
breaks <- exp(seq(-max, max, length.out = 5))
breaks[1] <- breaks[1] + 1e-5
breaks[length(breaks)] <- breaks[length(breaks)] - 1e-5
g <- mapPlot2(data = st3sub, geo = geo, variables = c("date"), values = c("rr"),
by.data = "regionname", by.geo = byGEO, is.long = TRUE, border = "gray80", ncol=4) +
scale_fill_distiller("Relative Risk", palette = "RdBu", trans = "log", breaks = breaks, lim = exp(c(-max, max)), label = function(x){options( digits = 2);format(x, scientific = FALSE)}) +
map.theme + coord_map() +
theme(legend.position = "bottom",
legend.title = element_text(size=6, vjust = 1),
legend.text = element_text(size=4),
legend.key.width = unit(1, "cm"),
legend.key.height = unit(.15, "cm"))+
theme(panel.border = element_blank()) + theme(strip.background = element_blank(), strip.text.x = element_text(vjust = -0.1, size = 6, hjust = .7), panel.spacing = unit(0, "lines"))
ggsave(plot=g, filename=paste0("../figures/", prefix, "random_effects_sub_sig", postfix, ".pdf"), width = 8/1.5, height = 4.5/1.5)
ggsave(plot=g, filename=paste0("../figures/", prefix, "random_effects_sub_sig", postfix, ".tiff"), width = 8/1.5, height = 4.5/1.5)
g <- mapPlot2(data = st3, geo = geo, variables = c("date"), values = c("mean"),
by.data = "regionname", by.geo = byGEO, is.long = TRUE, border = "gray80") +
scale_fill_distiller("Independent Space-Time Effect", palette = "RdBu", na.value = "white", limits = c(-1,1)*max(abs(st3sub$mean)))+
map.theme + coord_map() +
theme(legend.position = "bottom",
legend.title = element_text(size=12, vjust = 1),
legend.key.width = unit(2, "cm"),
legend.key.height = unit(.5, "cm"))+
theme(panel.border = element_blank()) + theme(strip.background = element_blank(), strip.text.x = element_text(vjust = -0.1, size = 12, hjust = .7), panel.spacing = unit(0, "lines"))
ggsave(plot=g, filename=paste0("../figures/", prefix, "random_effects_sig", postfix, ".pdf"), width = 12, height = 9)
ggsave(plot=g, filename=paste0("../figures/", prefix, "random_effects_sig", postfix, ".tiff"), width = 12, height = 9)
}
if(VIS){
#-------------------------------
# Plot of prediction
#-------------------------------
data2 <- data
data2$pred <- fit0$summary.fitted.values[1:dim(data)[1], "mean"]
data2$var <- fit0$summary.fitted.values[1:dim(data)[1], "sd"]^2
data2$low <- fit0$summary.fitted.values[1:dim(data)[1], "0.025quant"]
data2$high <- fit0$summary.fitted.values[1:dim(data)[1], "0.975quant"]
data2 <- data2[, c("region", "Date", "regionname", "counts", "pred", "low", "high", "var", "population", "Year", "time")]
total <- aggregate(counts~regionname, data2, FUN = sum)
data2$regionname <- factor(data2$regionname, levels = as.character(total$regionname[order(total$counts, decreasing = TRUE)]))
pdf(paste0("../figures/", prefix, "prediction-vs-truth", postfix, ".pdf"), width = 12, height = 16)
pages <- 6
if(N <= 50) pages <- 1
for(i in 1:pages){
subset <- 50 * (i-1) + (1 : 50)
subset <- levels(data2$regionname)[subset[subset <= N]]
g <- ggplot(subset(data2, regionname %in% subset), aes(x = Date, group = regionname)) + geom_point(aes(y = counts), color = "black", size = .5) + geom_point(aes(y = pred), color = "red", size = .5, alpha = .5)+ geom_errorbar(aes(ymin = low, ymax = high), color = "red", size = .5, alpha = .5) + line.theme + facet_wrap(~regionname, scale = "free", ncol = 5) + ylab("Posterior")+
theme(panel.border = element_blank()) + theme(strip.background = element_blank(), strip.text.x = element_text(vjust = -0.1, size = 12, hjust = .7), panel.spacing = unit(0, "lines"))
print(g)
}
dev.off()
data2$time.index <- data2$time
data2$time <- format(data2$Date, "%b %Y")
tmp <- unique(data2$time)
data2$time <- factor(data2$time, levels = tmp)
data2sub <- subset(data2, time.index %% 5 == 4)
# data2sub <- subset(data2sub, time.index > min(data2sub$time.index))
data2sub$time <- factor(data2sub$time, levels=unique(data2sub$time))
pdf(paste0("../figures/", prefix, "prediction-map", postfix, ".pdf"), width = 12*1.5, height = 9*1.5)
g <- mapPlot2(data = data2, geo = geo, variables = c("time"), values = c("pred"), by.data = "regionname", by.geo = byGEO, is.long = TRUE, border = "gray70") + scale_fill_viridis_c("posterior means of smoothed counts", option = "A")+ map.theme + coord_map() + theme(legend.position = "bottom",
legend.title = element_text(size=16, vjust = 1),
legend.key.width = unit(2, "cm"),
legend.key.height = unit(.5, "cm"))+
theme(panel.border = element_blank()) + theme(strip.background = element_blank(), strip.text.x = element_text(vjust = -0.1, size = 16, hjust = .7), panel.spacing = unit(0, "lines"))
print(g)
dev.off()
pdf(paste0("../figures/", prefix, "prediction-map2", postfix, ".pdf"), width = 12*1.5, height = 9*1.5)
g <- g + scale_fill_viridis_c("posterior means of smoothed counts", option = "A", direction = -1)
print(g)
dev.off()
g <- mapPlot2(data = data2sub, geo = geo, variables = c("time"), values = c("pred"), by.data = "regionname", by.geo = byGEO, is.long = TRUE, ncol = 4, border = "gray60") + scale_fill_viridis_c("posterior means of smoothed counts", option = "A", trans='sqrt', breaks = c(1, 2, 3, 5, 10, 20))+ map.theme + coord_map() + theme(legend.position = "bottom",
legend.title = element_text(size=6, vjust = 1),
legend.text = element_text(size=4),
legend.key.width = unit(1, "cm"),
legend.key.height = unit(.15, "cm"))+
theme(panel.border = element_blank()) + theme(strip.background = element_blank(), strip.text.x = element_text(vjust = -0.1, size = 6, hjust = .7), panel.spacing = unit(0, "lines"))
ggsave(plot=g, filename=paste0("../figures/", prefix, "prediction-subset-map", postfix, ".pdf"), width = 8/1.5, height = 4.5/1.5)
ggsave(plot=g, filename=paste0("../figures/", prefix, "prediction-subset-map", postfix, ".tiff"), width = 8/1.5, height = 4.5/1.5)
g <- g + scale_fill_viridis_c("posterior means of smoothed counts", option = "A", trans='sqrt', breaks = c(1, 2, 3, 5, 10, 20), direction = -1)
ggsave(plot=g, filename=paste0("../figures/", prefix, "prediction-subset-map2", postfix, ".pdf"), width = 8/1.5, height = 4.5/1.5)
ggsave(plot=g, filename=paste0("../figures/", prefix, "prediction-subset-map2", postfix, ".tiff"), width = 8/1.5, height = 4.5/1.5)
# data2$pred[data2$high < 1] <- NA
# data2sub$pred[data2sub$high < 1] <- NA
# pdf(paste0("../figures/", prefix, "prediction-map-r0", postfix, ".pdf"), width = 12, height = 9)
# g <- mapPlot2(data = data2, geo = geo, variables = c("time"), values = c("pred"), by.data = "regionname", by.geo = byGEO, is.long = TRUE, border = "gray80") + scale_fill_viridis_c("posterior means of smoothed counts",na.value="white")+ map.theme + coord_map() + theme(legend.position = "bottom",
# legend.title = element_text(size=12),
# legend.key.width = unit(2, "cm"),
# legend.key.height = unit(.5, "cm"))
# print(g)
# dev.off()
# pdf(paste0("../figures/", prefix, "prediction-subset-map-r0", postfix, ".pdf"), width = 12, height = 4.7)
# g <- mapPlot2(data = data2sub, geo = geo, variables = c("time"), values = c("pred"), by.data = "regionname", by.geo = byGEO, is.long = TRUE, border = "gray80", ncol=7) + scale_fill_viridis_c("posterior means of smoothed counts",na.value="white")+ map.theme + coord_map() + theme(legend.position = "bottom",
# legend.title = element_text(size=12),
# legend.key.width = unit(2, "cm"),
# legend.key.height = unit(.4, "cm"))
# print(g)
# dev.off()
# data2$pred[data2$high < 3] <- NA
# data2sub$pred[data2sub$high < 3] <- NA
# pdf(paste0("../figures/", prefix, "prediction-map-r2", postfix, ".pdf"), width = 12, height = 9)
# g <- mapPlot2(data = data2, geo = geo, variables = c("time"), values = c("pred"), by.data = "regionname", by.geo = byGEO, is.long = TRUE) + scale_fill_viridis_c("posterior means of smoothed counts",na.value="white")+ map.theme + coord_map() + theme(legend.position = "bottom",
# legend.title = element_text(size=12),
# legend.key.width = unit(2, "cm"),
# legend.key.height = unit(.5, "cm"))
# print(g)
# dev.off()
# pdf(paste0("../figures/", prefix, "prediction-subset-map-r2", postfix, ".pdf"), width = 12, height = 4.7)
# g <- mapPlot2(data = data2sub, geo = geo, variables = c("time"), values = c("pred"), by.data = "regionname", by.geo = byGEO, is.long = TRUE, ncol=7) + scale_fill_viridis_c("posterior means of smoothed counts",na.value="white")+ map.theme + coord_map() + theme(legend.position = "bottom",
# legend.title = element_text(size=12),
# legend.key.width = unit(2, "cm"),
# legend.key.height = unit(.4, "cm"))
# print(g)
# dev.off()
# data2$counts[data2$counts < 1] <- NA
# data2sub$counts[data2sub$counts < 1] <- NA
# pdf(paste0("../figures/", prefix, "prediction-map-r0-observed", postfix, ".pdf"), width = 12, height = 9)
# g <- mapPlot2(data = data2, geo = geo, variables = c("time"), values = c("counts"), by.data = "regionname", by.geo = byGEO, is.long = TRUE, border = "gray80") + scale_fill_viridis_c("observed counts",na.value="white")+ map.theme + coord_map() + theme(legend.position = "bottom",
# legend.title = element_text(size=12),
# legend.key.width = unit(2, "cm"),
# legend.key.height = unit(.5, "cm"))
# print(g)
# dev.off()
# pdf(paste0("../figures/", prefix, "prediction-map-r0-observed", postfix, ".pdf"), width = 12, height = 4.7)
# g <- mapPlot2(data = data2sub, geo = geo, variables = c("time"), values = c("counts"), by.data = "regionname", by.geo = byGEO, is.long = TRUE, border = "gray80", ncol=7) + scale_fill_viridis_c("observed counts",na.value="white")+ map.theme + coord_map() + theme(legend.position = "bottom",
# legend.title = element_text(size=12),
# legend.key.width = unit(2, "cm"),
# legend.key.height = unit(.4, "cm"))
# print(g)
# dev.off()
if(byGEO == "GEOID" && FALSE){
data2$SNA <- geobg$table[match(as.character(data2[, byDATA]), geobg$table$bg), 1]
data2$groups = data2[, byDATA]
pdf("../figures/", prefix, "neighborhoods-1.pdf", width = 10, height = 3)
for(ii in 1:length(unique(data2$SNA))){
toplot <- unique(data2$SNA)[ii]
g1 <- ggplot(subset(data2, SNA %in% toplot), aes(x = Date, y = pred, group = groups, color = SNA, ymin = low, ymax=high)) + geom_point(aes(y = counts), color = "gray30")+ geom_line(alpha=.5) + geom_errorbar(alpha=.3) + scale_colour_manual(values=colorRampPalette(brewer.pal(name="Set1", n = 8)[-6])(length(unique(data2$SNA))), guide=FALSE) + line.theme + ggtitle(" ") + ylab(" ")+ xlab("Month") + facet_wrap(~regionname, nrow = 1) + ggtitle(c(as.character(toplot)))
print(g1)
}
dev.off()
}else{
data2$SNA <- data2[, byDATA]
}
# data2$risk <- data2$counts/data2$population
# data2$risk.smooth <- data2$pred/data2$population
# data2$risk.low <- log(data2$low/data2$population)
# data2$risk.high <- log(data2$high/data2$population)
# # range <- range(c(data2$risk, data2$risk.smooth))
# ggplot(data2, aes(x = counts, y = pred, color = Date), alpha = 0.05) + geom_point() + geom_abline(intercept = 0, slope = 1) + facet_wrap(~Date)
# pdf(paste0("../figures/", prefix, "prediction-vs-truth-map", postfix, ".pdf"), width = 12, height = 16)
# data2$Datetext <- as.character(data2$Date)
# g <- mapPlot(data = subset(data2, Year == 2017), geo = geo, variables = c("Datetext"), values = c("pred"), by.data = "regionname", by.geo = "GEOID", is.long = TRUE) + scale_fill_viridis_c("counts")+ map.theme + coord_map() + theme(legend.position = c(0.9, 0.05), legend.direction="horizontal")
# g0 <- mapPlot(data = subset(data2, Year == 2017), geo = geo, variables = c("Datetext"), values = c("counts"), by.data = "regionname", by.geo = "GEOID", is.long = TRUE) + scale_fill_viridis_c("counts")+ map.theme + coord_map() + theme(legend.position = c(0.9, 0.05), legend.direction="horizontal")
# dev.off()
# data2$logrisk <- log(data2$risk)
# data2$logrisk.smooth <- log(data2$risk.smooth)
# data2$sd.naive <- sqrt(data2$counts / data2$population^2)
# data2$sd.smooth <- sqrt(data2$var / data2$population^2)
# range <- range(c(data2$sd.naive, data2$sd.smooth))
# pdf(paste0("../figures/", prefix, "SD.pdf", width = 7, height = 5)
# g <- ggplot(data2, aes(x = sd.naive, y = sd.smooth, size = population)) + geom_point(alpha = 0.2, color = "black") + geom_abline(intercept = 0, slope = 1) + theme_bw() + xlim(range) + ylim(range) + xlab("Naive SE") + ylab("Posterior SD")
# print(g)
# dev.off()
# g1 <- ggplot(data2, aes(x = risk.smooth, y = sd.smooth, size = population)) + geom_point(alpha = 0.2, color = "gray10") + geom_abline(intercept = 0, slope = 1) + theme_bw() + ylim(range)
# g2 <- ggplot(data2, aes(x = risk, y = sd.naive, size = population)) + geom_point(alpha = 0.2, color = "gray10") + geom_abline(intercept = 0, slope = 1) + theme_bw() + ylim(range)
# grid.arrange(g1,g2, ncol = 2)
# if(byGEO == "GEOID"){
# pdf(paste0("../figures/", prefix, "neighborhoods-1.pdf", width = 10, height = 3)
# for(ii in 1:length(unique(data2$SNA))){
# toplot <- unique(data2$SNA)[ii]
# g1 <- ggplot(subset(data2, SNA %in% toplot), aes(x = Date, y = logrisk.smooth, group = group, color = SNA, ymin = risk.low, ymax=risk.high)) + geom_point(aes(y = logrisk), color = "gray30")+ geom_line(alpha=.5) + geom_errorbar(alpha=.3) + scale_colour_manual(values=colorRampPalette(brewer.pal(name="Set1", n = 8)[-6])(length(unique(data2$SNA))), guide=FALSE) + line.theme + ggtitle(" ") + ylab(" ")+ xlab("Month") + facet_wrap(~regionname, nrow = 1) + ggtitle(c(as.character(toplot)))
# print(g1)
# }
# dev.off()
# pdf(paste0("../figures/", prefix, "neighborhoods-2.pdf", width = 10, height = 3)
# for(ii in 1:length(unique(data2$SNA))){
# toplot <- unique(data2$SNA)[ii]
# total <- aggregate(counts~regionname+SNA+Date, data2, FUN = sum)
# ordered <- as.character(format(sort(unique(total$Date)), "%b %Y"))
# total$Date <-as.character(format(total$Date, "%b %Y"))
# total$Date <- factor(total$Date, ordered)
# total$risk <- total$count / data2$population[match(total$regionname, data2$regionname)]
# total$logrisk <- log(total$risk)
# # total$logrisk[total$SNA %in% toplot == FALSE] <- NA
# geo1 <- subset(geo, GEOID %in% total$regionname[total$SNA %in% toplot])
# geo2 <- fortify(geo1, region = "GEOID")
# geo2 <- by(geo2, geo2$id, function(x) {Polygon(x[c('long', 'lat')])@labpt})
# centroids <- setNames(do.call("rbind.data.frame", geo2), c('long', 'lat'))
# centroids$label <- names(geo2)
# g2 <- mapPlot(data = total, geo = geo1, values = c("logrisk"), variables = "Date", by.data = "regionname", by.geo = "GEOID" ,is.long = TRUE)+ggplot2::facet_wrap(~variable, nrow = 3) + scale_fill_viridis_c("log total incidents per resident")+ map.theme + coord_map() + theme(legend.position="bottom") + ggtitle(c(as.character(toplot)))#+ geom_label_repel(data = centroids, aes(x = long, y = lat, label = label), force = 100, alpha = 0.5, size = 0.3)
# print(g2)
# }
# dev.off()
}
|
de993e99061f43988ac4c5dee2801291a62d4956 | 316aa351ff9f4b242fd79cf388aecd5bd5ad4f21 | /R/globals.R | fe3def572b7e460a0109e503ee234c3bcab0701a | [] | no_license | jmhewitt/dsdive | 5d24b6f9d3e39e9d8fc33c12b6172126a3769dd0 | bb55540452917d0c81e039e9ed6fb79f9b233131 | refs/heads/master | 2022-09-23T06:18:55.498742 | 2020-05-28T19:36:01 | 2020-05-28T19:36:01 | 224,232,874 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 397 | r | globals.R | #' @useDynLib dsdive, .registration = TRUE
globalVariables(c('ind','depth.bin', 't.start', 't.end', 'depth.min',
'depth.max', 'stages', 'depth.mid', 'local_n', 'local_ids',
'beta1', 'beta2', 'alpha1', 'alpha2', 'alpha3', 'pi.designs',
'lambda.designs', 'tstep', 'depth.bins', 'delta',
'dsobs.list', 't.stages', 'myinfo')) |
097f18d60641b677f0310d15ea2a3c73c9861b2d | 735f948ce890b407ef90c5406387285d02e942c2 | /rankhospital.R | 53bce87749e8b0af9a36ecf405f76cd5dd2db76c | [] | no_license | LL8054/Week-4 | 03f97710b4ca77d5ff23458bd9ea385c842916ab | 2bcc53c8684cafc8fd2d0dbb166a600b52b6402f | refs/heads/master | 2021-01-01T05:51:30.133813 | 2014-09-26T04:26:58 | 2014-09-26T04:26:58 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,671 | r | rankhospital.R | ## Ranks hospitals in the requested state according to the inputted outcome
## and returns the requested rank.
rankhospital <- function(state, outcome, num = "best") {
## Reads data.
data <- read.csv("outcome-of-care-measures.csv", na.strings="Not Available", colClasses = "character")
## Checks validity of state input.
state_check <- match(state,unique(data[,7]))
if (is.na(state_check == TRUE)) {
stop("invalid state")
}
## Checks validity of outcome input.
possible_outcomes <- c("heart attack", "heart failure", "pneumonia")
outcome_check <- match(outcome,possible_outcomes)
if (is.na(outcome_check == TRUE)) {
stop("invalid outcome")
}
if (outcome == "heart attack") outcome_col <- 11
if (outcome == "heart failure") outcome_col <- 17
if (outcome == "pneumonia") outcome_col <- 23
## If both state and output input are valid, returns char vector of name of
## hospital with the corresponding ranking in that state according to outcome.
master <- data.frame()
master <- subset(data, data[,7]==state)
master[,outcome_col] <- as.numeric(master[,outcome_col])
master$Rank <- ave(master[,outcome_col], master$State, FUN=rank)
master_sorted <- master[with(master, order(master[,47], master[,2])),]
if (num == "best") num <- 1
if (num == "worst") {
max_val <- max(master_sorted[,outcome_col], na.rm=TRUE)
master_max <- subset(master_sorted, master_sorted[,outcome_col]==max_val)
num <- nrow(master_max)
print(master_max[num,2])
} else {
hospital_name <- master_sorted[num,2]
hospital_name
}
} |
61e309fe2153f78399c84d3cae1f826cd11aebad | 77c584d0ece8f1a55ab70826d8ff88e599d6d80a | /R/readrm.r | 02b4570e681219e4428920c8fec669bf4ef29563 | [] | no_license | cran/rmutil | 696f8c192accdeceb0ba30ee397d3548eda17a77 | 07d0956d37bd75500f8385036bb123a16b459510 | refs/heads/master | 2022-11-05T17:34:44.072693 | 2022-10-27T03:32:35 | 2022-10-27T03:32:35 | 17,722,019 | 3 | 1 | null | 2019-07-05T16:13:23 | 2014-03-13T18:48:48 | R | UTF-8 | R | false | false | 12,734 | r | readrm.r | #
# rmutil : A Library of Special Functions for Repeated Measurements
# Copyright (C) 1998, 1999, 2000, 2001 J.K. Lindsey
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public Licence as published by
# the Free Software Foundation; either version 2 of the Licence, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public Licence for more details.
#
# You should have received a copy of the GNU General Public Licence
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# SYNOPSIS
#
# read.list(file="", skip=0, nlines=2, order=NULL)
# read.surv(file="", skip=0, nlines=1, cumulative=T, all=T)
# read.rep(file, header=TRUE, skip=0, col.names=NULL,
# response, id=NULL, times=NULL, censor=NULL,
# totals=NULL, weights=NULL, nest=NULL, delta=NULL,
# coordinates=NULL, type=NULL, ccov=NULL, tvcov=NULL, na.rm=T)
#
# DESCRIPTION
#
# Utility functions for reading repeated measurements data
### read unbalanced repeated measurements from a file into a list
###
read.list <- function(file="", skip=0, nlines=2, order=NULL){
#
# check if order of columns is to be changed
if(!is.null(order)){
if(length(order)!=nlines)stop("order must have length",nlines,"\n")
else if(any(range(order)!=c(1,nlines)))
stop("order must have values in (",c(1,nlines),")\n")}
#
# scan in the data until EOF
#
continue <- TRUE
result <- list()
while(continue){
x <- scan(file,skip=skip,nlines=nlines,quiet=TRUE)
skip <- skip+nlines
if(length(x)==0)continue <- FALSE
else {
tmp <- matrix(x,ncol=nlines)
if(!is.null(order))tmp <- tmp[,order]
result <- c(result,list(tmp))}}
invisible(result)}
### read unbalanced event history data from a file into a list
###
read.surv <- function(file="", skip=0, nlines=1, cumulative=TRUE, all=TRUE){
#
# scan in the data until EOF
#
continue <- TRUE
result <- list()
censor <- NULL
while(continue){
x <- scan(file,skip=skip,nlines=nlines,quiet=TRUE)
skip <- skip+nlines
if(length(x)==0)continue <- FALSE
else {
# find response times (if all==TRUE, times and censor
# indicator alternate on the line
if(all)mm <- matrix(x,ncol=2,byrow=TRUE)[,1]
else mm <- x[1:(length(x)-1)]
# if cumulative times, find times between events
if(cumulative)mm <- c(mm[1],diff(mm))
result <- c(result,list(mm))
# create vector of censor indicators for last time of
# each individual only
censor <- c(censor,x[length(x)])}}
invisible(list(result,censor))}
### read a rectangular data set from a file and create a repeated data object
###
read.rep <- function(file, header=TRUE, skip=0, sep = "",
na.strings="NA", response, id=NULL, times=NULL, censor=NULL,
totals=NULL, weights=NULL, nest=NULL, delta=NULL,
coordinates=NULL, type=NULL, ccov=NULL, tvcov=NULL, na.rm=TRUE){
if(missing(response)||!is.character(response))
stop("name(s) of response variables must be supplied")
if(missing(file)||!is.character(file))
stop("a file name must be supplied")
dataframe <- read.table(file,header=header,skip=skip,
na.strings=na.strings,sep=sep)
#
# find response information and construct object
#
cn <- colnames(dataframe)
nc <- match(response,cn)
if(any(is.na(nc)))stop(paste("response",response[is.na(nc)],"not found"))
if(!is.numeric(z <- as.matrix(dataframe[,nc,drop=FALSE])))
stop("response must be numeric")
z <- list(response=list(y=z,nobs=NULL,times=NULL,nest=NULL,coordinates=NULL,
censor=NULL,n=NULL,wt=NULL,delta=NULL,units=NULL,type=NULL),
ccov=NULL,tvcov=NULL)
class(z) <- "repeated"
class(z$response) <- "response"
tobs <- dim(z$response$y)[1]
nrcol <- dim(z$response$y)[2]
if(is.null(type))z$response$type <- rep("unknown",nrcol)
else if(length(type)!=nrcol)stop("a type must be supplied for each response")
else {
for(i in 1:length(type))
z$response$type[i] <- match.arg(type[i],
c("nominal","ordinal","discrete","duration","continuous","unknown"))
if(any(is.na(z$response$type)))
z$response$type[is.na(z$response$type)] <- "unknown"}
rna <- rep(TRUE,tobs)
for(i in 1:nrcol)rna <- rna&!is.na(z$response$y[,i])
if(is.null(id))
z$response$nobs <- if(is.null(times)) rep(1,tobs) else tobs
else {
if(!is.character(id)||length(id)>1)
stop("id must be the name of one variable")
nc <- match(id,cn)
if(is.na(nc))stop("id not found")
id <- as.vector(dataframe[,nc])
if(is.character(id)||is.factor(id))id <- as.numeric(as.factor(id))
else if(any(diff(id)!=0&diff(id)!=1,na.rm=TRUE))
warning("id not consecutively numbered")
nobs <- table(id)
z$response$nobs <- as.vector(nobs[match(unique(id),names(nobs))])}
if(any(z$response$nobs!=1)&&length(z$response$nobs)>1)for(i in unique(id)){
if(any(diff((1:tobs)[id==i])>1,na.rm=TRUE))
stop(paste("observations for individual",i,"not together in table"))}
if(!is.null(nest)){
if(all(z$response$nobs==1))
stop("these are not repeated measurements - nest not valid")
if(!is.character(nest)||length(nest)>1)
stop("nest must be the name of one variable")
nc <- match(nest,cn)
if(is.na(nc))stop("nest not found")
z$response$nest <- as.vector(dataframe[,nc])
if(is.character(z$response$nest))
z$response$nest <- as.numeric(as.factor(z$response$nest))
else if(!is.numeric(z$response$nest))stop("nest must be numeric")
rna <- rna&!is.na(z$response$nest)}
if(!is.null(times)){
if(!is.character(times)||length(times)>1)
stop("times must be the name of one variable")
nc <- match(times,cn)
if(is.na(nc))stop("times not found")
z$response$times <- as.vector(dataframe[,nc])
if(!is.numeric(z$response$times))stop("times must be numeric")
rna <- rna&!is.na(z$response$times)}
if(!is.null(times)||!is.null(nest))for(i in unique(id)){
if(!is.null(times)&&any(diff(z$response$times[id==i])<0,na.rm=TRUE))
stop(paste("negative time step for individual",i))
if(!is.null(nest)&&any(diff(z$response$nest[id==i])!=0&
diff(z$response$nest[id==i])!=1,na.rm=TRUE))
stop(paste("nest for individual",i,"not consecutively numbered"))}
if(!is.null(censor)){
if(!is.character(censor)||length(censor)!=nrcol)
stop("censor must have one name per response variable")
nc <- match(censor,cn)
if(any(is.na(nc)))stop("censor",censor[is.na(nc)],"not found")
z$response$censor <- as.matrix(dataframe[,nc,drop=FALSE])
if(!is.numeric(z$response$censor))stop("censor must be numeric")
if(any(z$response$censor!=1&z$response$censor!=0&
z$response$censor!=-1,na.rm=TRUE))
stop("censor can only have values, -1, 0, 1")
for(i in 1:nrcol)if(!all(is.na(z$response$censor[,i]))){
rna <- rna&!is.na(z$response$censor[,i])
if(z$response$type[i]=="unknown")
z$response$type[i] <- "duration"}}
if(!is.null(totals)){
if(!is.character(totals)||length(totals)!=nrcol)
stop("totals must have one name per response variable")
nc <- match(totals,cn)
if(any(is.na(nc)))stop("totals",totals[is.na(nc)],"not found")
z$response$n <- as.matrix(dataframe[,nc,drop=FALSE])
if(!is.numeric(z$response$n))stop("totals must be numeric")
if(any(z$response$y<0|z$response$n<z$response$y,na.rm=TRUE))
stop("responses must lie between 0 and totals")
for(i in 1:nrcol)if(!all(is.na(z$response$n[,i]))){
rna <- rna&!is.na(z$response$n[,i])
if(z$response$type[i]=="unknown")
z$response$type[i] <- "nominal"}}
if(!is.null(delta)){
if(is.numeric(delta)){
if(length(delta)==1)
z$response$delta <- matrix(delta,ncol=nrcol,nrow=tobs)
else if(length(delta)==nrcol)
z$response$delta <- matrix(rep(delta,tobs),ncol=nrcol,nrow=tobs,byrow=TRUE)
else stop("delta must contain one value for each response")
if(any(z$response$type=="unknown"))for(i in 1:nrcol)
if(z$response$type[i]=="unknown")
z$response$type[i] <- "continuous"}
else {
if(!is.character(delta)||length(delta)!=nrcol)
stop("delta must have one name per response variable")
nc <- match(delta,cn)
if(any(is.na(nc)))stop("delta",delta[is.na(nc)],"not found")
z$response$delta <- as.matrix(dataframe[,nc,drop=FALSE])
if(!is.numeric(z$response$delta))stop("delta must be numeric")
if(any(z$response$y<=0,na.rm=TRUE))
stop("delta must be strictly positive")
for(i in 1:nrcol)if(!all(is.na(z$response$delta[,i]))){
rna <- rna&!is.na(z$response$delta[,i])
if(z$response$type[i]=="unknown")
z$response$type[i] <- "continuous"}}}
if(!is.null(weights)){
if(!is.character(weights)||length(times)>1)
stop("weights must be the name of one variable")
nc <- match(weights,cn)
if(is.na(nc))stop("weights not found")
z$response$wt <- as.vector(dataframe[,nc])
if(!is.numeric(z$response$wt))stop("weights must be numeric")
rna <- rna&!is.na(z$response$wt)}
if(!is.null(coordinates)){
if(!is.character(coordinates)||(length(coordinates)!=2&&
length(coordinates)!=3))
stop("coordinates must be the name of 2 or 3 variables")
nc <- match(coordinates,cn)
if(any(is.na(nc)))
stop("coordinates",coordinates[is.na(nc)],"not found")
z$response$coordinates <- as.matrix(dataframe[,nc,drop=FALSE])
if(!is.numeric(z$response$coordinates))
stop("coordinates must be numeric")
for(i in 1:length(coordinates))
rna <- rna&!is.na(z$response$coordinates[,i])}
#
# find time-varying covariates
#
if(!is.null(tvcov)){
if(all(z$response$nobs==1))
stop("these are not repeated measurements - tvcov not valid")
z$tvcov <- list(tvcov=NULL,nobs=z$response$nobs)
class(z$tvcov) <- "tvcov"
nc <- match(tvcov,cn)
if(any(is.na(nc)))stop("tvcov",tvcov[is.na(nc)],"not found")
z$tvcov$tvcov <- dataframe[,nc,drop=FALSE]
for(i in 1:length(tvcov))rna <- rna&!is.na(z$tvcov$tvcov[,i])
# if no factor variables present, return a matrix anyway
fac <- FALSE
for(i in 1:dim(z$tvcov$tvcov)[2])
if(!is.vector(z$tvcov$tvcov[,i],mode="numeric")){
fac <- TRUE
break}
if(!fac)z$tvcov$tvcov <- as.matrix(z$tvcov$tvcov)}
#
# find time-constant covariates
#
if(!is.null(ccov)){
z$ccov <- list(ccov=NULL)
class(z$ccov) <- "tccov"
nc <- match(ccov,cn)
if(any(is.na(nc)))stop("ccov",ccov[is.na(nc)],"not found")
z$ccov$ccov <- dataframe[,nc,drop=FALSE]
for(i in unique(id))for(j in 1:length(ccov))
if(sum(!is.na(unique(z$ccov$ccov[id==i,j])))>1)
stop(paste("ccov",ccov[j],"for individual",i,"not constant"))
for(i in 1:length(ccov))rna <- rna&!is.na(z$ccov$ccov[,i])
j <- c(0,cumsum(z$response$nobs)[-length(z$response$nobs)])+1
z$ccov$ccov <- z$ccov$ccov[j,,drop=FALSE]
# if no factor variables present, return a matrix anyway
fac <- FALSE
for(i in 1:dim(z$ccov$ccov)[2])
if(!is.vector(z$ccov$ccov[,i],mode="numeric")){
fac <- TRUE
break}
if(!fac)z$ccov$ccov <- as.matrix(z$ccov$ccov)}
#
# remove NAs
#
if(na.rm&&any(!rna)){
# remove NAs from variables associated with response
z$response$y <- z$response$y[rna,,drop=FALSE]
if(!is.null(z$response$times))z$response$times <- z$response$times[rna]
if(!is.null(z$response$nest))z$response$nest <- z$response$nest[rna]
if(!is.null(z$response$coordinates))
z$response$coordinates <- z$response$coordinates[rna,]
if(!is.null(z$response$n))z$response$n <- z$response$n[rna,,drop=FALSE]
if(!is.null(z$response$censor)){
z$response$censor <- z$response$censor[rna,,drop=FALSE]
if(all(z$response$censor==1))z$response$censor <- NULL}
if(!is.null(z$response$delta)&&length(z$response$delta)>1)
z$response$delta <- z$response$delta[rna,,drop=FALSE]
if(!is.null(z$tvcov))z$tvcov$tvcov <- z$tvcov$tvcov[rna,,drop=FALSE]
# correct nobs
tmp <- NULL
j <- c(0,cumsum(z$response$nobs))
for(i in 1:length(z$response$nobs)){
tmp <- c(tmp,sum(rna[(j[i]+1):j[i+1]]))
if(tmp[i]==0)
warning(paste("Individual",i,"has no observations"))}
z$response$nobs <- tmp[tmp>0]
# remove NAs from ccov
if(!is.null(z$ccov)){
z$ccov$ccov <- z$ccov$ccov[tmp>0,,drop=FALSE]
for(i in 1: dim(z$ccov$ccov)[2])
if(length(unique(z$ccov$ccov[,i]))==1)
warning(paste("covariate",colnames(z$ccov$ccov)[i],"has only one value\n"))}
# remove NAs from tvcov
if(!is.null(z$tvcov)){
z$tvcov$nobs <- z$response$nobs
for(i in 1: dim(z$tvcov$tvcov)[2])
if(length(unique(z$tvcov$tvcov[,i]))==1)
warning(paste("covariate",colnames(z$tvcov$tvcov)[i],"has only one value\n"))}}
if(!na.rm&&any(!rna))z$NAs <- !rna
#
# if independent observations, reset nobs
#
if(all(z$response$nobs==1))z$response$nobs <- 1
z}
|
2bcb3621c922a219715829be2f24eaa3ac3104ba | 4406e1ee6cdd13839904586c951d3f3b48464843 | /inst/doc/r181missFunction.r | e55c74475bf2aefca2badf48304455eddb108819 | [] | no_license | cran/erer | 3ffc4f2572a3fb054737935919dc9e100af921fd | 6a9996e01e89dc7c6a5fac6375701e18c25fb403 | refs/heads/master | 2022-05-01T02:02:51.406564 | 2022-04-18T14:44:30 | 2022-04-18T14:44:30 | 17,695,848 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 120 | r | r181missFunction.r | miss <- function(x) {
y <- x * 2
lost <- function(y) {z <- y + w; return(z)}
m <- lost(y)
return(m + 100)
} |
16587d09dd03f0700f69c5a9d140db4706f2482d | f3aa5a88dc2433d0ff8b4c9d1052e4562661242a | /R/AllGenerics.R | e6a2fb29f6053d2a62a2177769de5a0dfa9e9a24 | [] | no_license | drisso/RUVSeq | 22c558d8d3fadbc8ac5e96dd635743da211d8a12 | f560e84eb493f0c2d72279612f9e4ca99493b434 | refs/heads/master | 2022-11-29T00:31:53.759192 | 2022-11-22T10:09:10 | 2022-11-22T10:09:10 | 141,323,453 | 13 | 3 | null | null | null | null | UTF-8 | R | false | false | 634 | r | AllGenerics.R | setGeneric(
name = "RUVg",
def = function(x, cIdx, k, drop=0, center=TRUE, round=TRUE, epsilon=1, tolerance=1e-8, isLog=FALSE) {
standardGeneric("RUVg")
}
)
setGeneric(
name = "RUVs",
def = function(x, cIdx, k, scIdx, round=TRUE, epsilon=1, tolerance=1e-8, isLog=FALSE) {
standardGeneric("RUVs")
}
)
setGeneric(
name = "RUVr",
def = function(x, cIdx, k, residuals, center=TRUE, round=TRUE, epsilon=1, tolerance=1e-8, isLog=FALSE) {
standardGeneric("RUVr")
}
)
|
3ed7def9969fc015e5dac4b31db18fccf53daabc | 38b622cce50d55aab89a0f0c5a40dbb8cba2a42c | /assignment1.R | 05ab9d2c874f8bb4af08c0fb68f87fafcebaee51 | [] | no_license | jkbasley/ExData_Plotting1 | e297545c903d10a4ad6fd0d152179b7b9f1f2a2d | 67a4b433560f055c2b1230e8936ebefda5dc6bd0 | refs/heads/master | 2021-01-12T08:16:47.499444 | 2016-12-19T03:53:23 | 2016-12-19T03:53:23 | 76,528,375 | 0 | 0 | null | 2016-12-15T05:55:41 | 2016-12-15T05:55:41 | null | UTF-8 | R | false | false | 630 | r | assignment1.R | ## set Working Directory to access File
setwd("D:/Users/jbasley/Documents/R Programming/Exploratory Data Analysis")
library(lubridate)
## read, subset and clean the required data from the file
data <- read.table("exdata-data-household_power_consumption/household_power_consumption.txt", na.strings = "?",
sep=";", header = TRUE)
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
feb_data <- data[data$Date >= "2007/02/01" & data$Date <= "2007/02/02", ]
feb_data$Time <- strptime(x = as.character(feb_data$Time), format = "%H:%M:%S")
feb_data$Time <- format(feb_data$Time, "%H:%M:%S")
rm(data)
|
4b97a92cd38fc385e50e4987326b1c00232eaf56 | a614ccb2e893de94293328f794c702d8377810f5 | /climatechange/global.R | 9c7980c070be65ea0ade1b95636dae6482583437 | [] | no_license | zylstraa/climatechangeapp | 0f0f3d44f12e5a4fa5e4eb9d8a0fe55c36190972 | 75ea7e61d6626f52d117cb8f2f6c2d7140601e7f | refs/heads/main | 2023-02-22T17:48:50.011503 | 2021-01-29T00:56:41 | 2021-01-29T00:56:41 | 327,152,831 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,649 | r | global.R | library(shiny)
library(tidyverse)
library(dismo)
library(plotly)
library(rnaturalearth)
library(rgeos)
library(sf)
#Global temperature found at https://datahub.io/core/global-temp#data
global_temp <- read_csv('data/globaltempannual.csv')
global_tempmonth <- read_csv('data/global_monthly_tempanom.csv')
#Global CO2 levels in the air https://datahub.io/core/co2-ppm
CO2 <- read_csv('data/co2airlevels.csv')
#National Fossil fuel emissions data source https://datahub.io/core/co2-fossil-by-nation#data
ff_co2 <- read_csv('data/fossilfuelco2bycountry.csv')
#deforestation: CAUSES https://data.worldbank.org/indicator/AG.LND.FRST.K2?view=chart
forest_area <- read_csv('data/forest_area.csv')
#country codes w/ continent list: https://datahub.io/JohnSnowLabs/country-and-continent-codes-list
countrycodes <- read_csv('data/countrycodes.csv')
#color palettes used:
color1 <- c('#677c40','#788b55','#89996a','#9aa87f','#abb695','#bbc5aa', '#eef0e9')
color2 <- c('#101a17','#182d27','#1e4237','18383a','1b5356','1b7073',
'#245749', '#296e5b','#2e856e','355b35',
'3b613b', '416740', '476d46', '4d734c', '537952', '598058','#569985','#78ad9c',
'158e93','00adb3','53bbbf','7cc9cc',
'e0f1f2','c0e4e5','9fd6d8',
'#9ac1b4','#bbd6cc', '#ddeae5','#eef0e9', '#E7E8E9')
color3 <- c('c7e9eb','9aa87f','eef0e9')
#Global sea levels https://datahub.io/core/sea-level-rise#resource-epa-sea-level
sealevel <- read_csv('data/epa-sea-level.csv')
#hydroelectric % https://data.worldbank.org/indicator/EG.ELC.HYRO.ZS?end=2015&start=1960&view=chart
#renewable non hydroelectric % https://data.worldbank.org/indicator/EG.ELC.RNWX.ZS?view=chart
hydro <- read_csv('data/hydro.csv')
nonhydro <- read_csv('data/nonhydro.csv')
#fish: https://data.worldbank.org/indicator/EN.FSH.THRD.NO?view=chart
fish <- read_csv('data/fish.csv')
#mammal: https://data.worldbank.org/indicator/EN.MAM.THRD.NO?view=chart
mammal <- read_csv('data/mammal.csv')
#bird: https://data.worldbank.org/indicator/EN.BIR.THRD.NO?view=chart
bird <- read_csv('data/bird.csv')
#plant: https://data.worldbank.org/indicator/EN.HPT.THRD.NO?view=chart
plant <- read_csv('data/plant.csv')
#Natural disasters source: https://public.emdat.be/data (containing:
# Volcanic activity
# Ash fall
# Lahar
# Pyroclastic flow
# Lava flow
#
# Mass movement (dry)
# Rockfall
# Landslide
# Avalanche
# Subsidence
#
# Earthquake
# Ground movement
# Tsunami
#
# Meteorological
#
# Extreme temperature
# Cold wave
# Heat wave
# Severe winter conditions
# Fog
#
# Storm
# Convective storm
# Extra-tropical storm
# Tropical cyclone
#
#
# Wave action
# Rogue wave
# Seiche
#
# Landslide
# Avalanche
# Landslide
# Subsidence
# Rockfall
# Mudslide
#
# Flood
# Coastal flood
# Flash flood
# Wildfire
# Forest fire
# Land fire (Brush, Bush, Pasture)
# Glacial lake outburst
#
# Drought
# Famine
#Effects: Storms & Natural Disasters https://public.emdat.be/data
disaster <- read_csv('data/naturaldisasters.csv')
#CAUSES data
#Carbon produced by fossil fuels
ff_co2 <- ff_co2 %>%
rename('Bunker.fuel'='Bunker fuels (Not in Total)')
ff_co2 <- ff_co2 %>%
mutate(Total_ff = Total+Bunker.fuel)
global_ff <-
ff_co2 %>%
filter(Year>=1900) %>%
group_by(Year) %>%
summarise(Total = sum(Total_ff))
#Global CO2 emissions by country
CO2 <- CO2 %>%
select(Date,Trend) %>%
rename('CO2_levels'='Trend')
CO2$Date <- as.POSIXct(CO2$Date, format="%Y/%m/%d")
CO2$Date <- format(CO2$Date, format="%b %Y")
CO2$Date <- factor(CO2$Date, levels = CO2[["Date"]])
datevals <- c('Feb 1960','Feb 1965','Feb 1970','Feb 1975','Feb 1980','Feb 1985','Feb 1990','Feb 1995','Feb 2000','Feb 2005','Feb 2010','Feb 2015')
showvals <- c('1960', '1965', '1970', '1975', '1980','1985','1990','1995','2000','2005','2010','2015')
donut1 <-
ff_co2 %>%
filter(Year==2014) %>%
arrange(desc(Total_ff)) %>%
head(100) %>%
select(Country,Total_ff) %>%
mutate(Country= case_when(
Total_ff <= 181333 ~'Countries with less than 2% contribution',
Total_ff > 181333 ~Country
)
)
donut2 <-
ff_co2 %>%
filter(Year==2014) %>%
arrange(desc(Total_ff)) %>%
head(100) %>%
select(Country,Total_ff) %>%
filter(Total_ff <= 181333) %>%
mutate(Country= case_when(
Total_ff <= 40232 ~'Countries with less than 1% contribution',
Total_ff > 40232 ~Country
)
)
#Deforestation
forest_area <-forest_area %>%
dplyr::rename_all(function(x) paste0("Y", x)) %>%
rename('CODE'='YCountry Code','Country'='YCountry Name')
countrycodes <- countrycodes %>%
rename('CODE'='Three_Letter_Country_Code') %>%
select('Continent_Name','Country_Name','CODE')
forest_bycont <- right_join(countrycodes,forest_area, by='CODE') %>%
group_by(Continent_Name) %>%
mutate_if(is.numeric,replace_na,0) %>%
summarise_if(is.numeric,funs(sum)) %>%
head(-1) %>%
pivot_longer(cols=Y1990:Y2016, names_to='Year',values_to='km_forest') %>%
pivot_wider(names_from=Continent_Name,values_from=km_forest) %>%
rename('North_America'='North America','South_America'='South America')
numbers <- '([0-9]{4})'
forest_bycont$Year <- str_extract_all(forest_bycont$Year,numbers)
#EFFECTS data:
#Sea level
sealevel <-
sealevel %>%
select(-`NOAA Adjusted Sea Level`) %>%
rename('Upper_Error'='Upper Error Bound', 'Lower_Error' = 'Lower Error Bound', 'Avg_level_rise'='CSIRO Adjusted Sea Level')
#Flooding
flooding <- disaster %>%
rename('Disaster_type'='Disaster Type') %>%
filter(Disaster_type=='Flood') %>%
count(Year) %>%
filter(Year >= 1960)
#Natural disasters
disaster <- disaster %>%
filter(Year >= 1990) %>%
select(Year,Country,ISO) %>%
group_by(Year) %>%
add_count(Country,name='Number_disasters')
disaster <- disaster[!duplicated(disaster), ]
#Species threatened
fish <- fish %>%
rename('Country'='Country Name','ISO'='Country Code','Species'='Fish Species')
mammal <- mammal %>%
rename('Country'='Country Name','ISO'='Country Code','Species'='Mammal')
bird <- bird %>%
rename('Country'='Country Name','ISO'='Country Code','Species'='Bird')
plant <- plant %>%
rename('Country'='Country Name','ISO'='Country Code','Species'='Plants')
#ACTION data:
hydro <- hydro %>%
rename('hydro'='2015','Country'='Country Name') %>%
select(Country,hydro)
nonhydro <- nonhydro %>%
rename('nonhydro'='2015','Country'='Country Name','ISO'='Country Code') %>%
select(Country,nonhydro)
energy <- full_join(hydro,nonhydro,by='Country') %>%
mutate('nonrenewable'=100-(hydro+nonhydro)) %>%
drop_na()
#Global temperature focus:
# global_tempmonth <-
# global_tempmonth %>%
# group_by(Date) %>%
# summarise(Anomoly_Avg=mean(Mean))
#
# tempplot <- global_tempmonth %>%
# ggplot(aes(x=Date, y=Anomoly_Avg))+geom_line()
#Overall 1,324,449 square kilometers of forest were lost from 1990-2016
#Effects: have a dropdown where they can take a look at each individual issue, Michael idea: Value box, on average on whatever year is chosen, on average in x year, sealevel rose x amount
#What's in the value box can vary based on whatever you're looking at
#Temperature:
global_tempmonth <-
global_tempmonth %>%
group_by(Date) %>%
summarise(Anomoly_Avg=mean(Mean))
#Action: renewable energy:
# nope <- c('Fragile and conflict affected situations','Central Europe and the Baltics','East Asia & Pacific (excluding high income)','Early-demographic dividend',
# 'East Asia & Pacific','Europe & Central Asia (excluding high income)','Europe & Central Asia','Euro area','European Union','Fragile and conflict affected situations',
# 'High income','Heavily indebted poor countries (HIPC)','IBRD only','IDA & IBRD total','IDA total','IDA blend','IDA only','Latin America & Caribbean (excluding high income)',
# 'Latin America & Caribbean','Least developed countries: UN classification','Low income','Lower middle income','Low & middle income','Late-demographic dividend',
# 'Middle East & North Africa','Middle income','Middle East & North Africa (excluding high income)','North America','OECD members','Other small states','Pre-demographic dividend',
# 'Post-demographic dividend', 'South Asia','Sub-Saharan Africa (excluding high income)','Sub-Saharan Africa','Small states','East Asia & Pacific (IDA & IBRD countries)',
# 'Europe & Central Asia (IDA & IBRD countries)','Latin America & the Caribbean (IDA & IBRD countries)','Middle East & North Africa (IDA & IBRD countries)',
# 'South Asia (IDA & IBRD)','Sub-Saharan Africa (IDA & IBRD countries)','Upper middle income')
|
d901b15c2bd2324f3ae0b5f9de76c3c5568d79d3 | 148215869261003c8d516d13d98a2418b938b398 | /simple linear regression assignment-4/emp_data.R | e78d5ab8918a7f03dd20748c66d52ec7a46d01e4 | [] | no_license | honeyjayachandran/SLREGRESSION | 92b78008507df1675bdb4708a8e57bb5a08b4984 | fc2b5dcb93bd52d62a36a96a3e80c3e638e4b7a1 | refs/heads/main | 2022-12-31T02:33:17.578231 | 2020-10-21T03:55:19 | 2020-10-21T03:55:19 | 305,902,301 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,480 | r | emp_data.R | # Load emp_data.csv dataset
library(readr)
emp_data <- read.csv("C:/Users/Sony/Downloads/simple linear regression assignment-4/emp_data.csv")
View(emp_data)
# Exploratory data analysis
summary(emp_data)
#Scatter plot
plot(emp_data$Salary_hike, emp_data$Churn_out_rate) # plot(X,Y)
?plot
attach(emp_data)
#Correlation Coefficient (r)
cor(emp_data$Salary_hike, emp_data$Churn_out_rate) # cor(X,Y)=-0.9117216
# Simple Linear Regression model
reg <- lm(emp_data$Churn_out_rate ~ emp_data$Salary_hike) # lm(Y ~ X)
summary(reg)
pred <- predict(reg) #multiple r sqred=0.8312
reg$residuals
sum(reg$residuals)
mean(reg$residuals)
sqrt(sum(reg$residuals^2)/nrow(emp_data)) #RMSE=3.997528
sqrt(mean(reg$residuals^2))
confint(reg,level=0.95)
predict(reg,interval="predict")
# ggplot for adding regresion line for data
library(ggplot2)
?ggplot2
ggplot(data = emp_data, aes(x = emp_data$Salary_hike, y = emp_data$Churn_out_rate)) +
geom_point(color='blue') +
geom_line(color='red',data = emp_data, aes(x=emp_data$Salary_hike, y=pred))
?ggplot2
########################
# A simple ggplot code for directly showing the line
# ggplot(emp_data,aes(emp_data$Salary_hike,emp_data$Churn_out_rate))+stat_summary(fun.data=mean_cl_normal) + geom_smooth(method='lm')
####################
# Logrithamic Model
# x = log(emp_data$Salary_hike); y = emp_data$Churn_out_rate
plot(log(emp_data$Salary_hike), emp_data$Churn_out_rate)
cor(log(emp_data$Salary_hike), emp_data$Churn_out_rate) #corr=-0.9212077
reg_log <- lm(emp_data$Churn_out_rate ~ log(emp_data$Salary_hike)) # lm(Y ~ X)
summary(reg_log) #multiple rsqred=0.8486
predict(reg_log)
reg_log$residuals
sqrt(sum(reg_log$residuals^2)/nrow(emp_data)) #RMSE=3.786004
confint(reg_log,level=0.95)
predict(reg_log,interval="confidence")
######################
# Exponential Model
# x = emp_data$Salary_hike and y = log(emp_data$Churn_out_rate)
plot(emp_data$Salary_hike, log(emp_data$Churn_out_rate))
cor(emp_data$Salary_hike, log(emp_data$Churn_out_rate)) #cor=-0.9346361
reg_exp <- lm(log(emp_data$Churn_out_rate) ~ emp_data$Salary_hike) #lm(log(Y) ~ X)
summary(reg_exp) #multiple r sqred=0.8735
reg_exp$residuals
sqrt(mean(reg_exp$residuals^2))
logemp <- predict(reg_exp)
emp <- exp(logemp)
error = emp_data$Churn_out_rate - emp
error
sqrt(sum(error^2)/nrow(emp_data)) #RMSE=3.541549
confint(reg_exp,level=0.95)
predict(reg_exp,interval="confidence")
##############################
# Polynomial model with 2 degree (quadratic model)
plot(emp_data$Salary_hike, emp_data$Churn_out_rate)
plot(emp_data$Salary_hike*emp_data$Salary_hike, emp_data$Churn_out_rate)
cor(emp_data$Salary_hike*emp_data$Salary_hike, emp_data$Churn_out_rate) #corr=-0.9962051
plot(emp_data$Salary_hike*emp_data$Salary_hike, log(emp_data$Churn_out_rate))
cor(emp_data$Salary_hike, log(emp_data$Churn_out_rate))
cor(emp_data$Salary_hike*emp_data$Salary_hike, log(emp_data$Churn_out_rate)) #cro=-0.925803
# lm(Y ~ X + I(X*X) +...+ I(X*X*X...))
reg2degree <- lm(log(emp_data$Churn_out_rate) ~ emp_data$Salary_hike + I(emp_data$Salary_hike*emp_data$Salary_hike))
summary(reg2degree)
logpol <- predict(reg2degree)
expy <- exp(logpol)
err = emp_data$emp_data$Churn_out_rate - expy
sqrt(sum(err^2)/nrow(emp_data)) #RMSE
confint(reg2degree,level=0.95)
predict(reg2degree,interval="confidence")
# visualization
ggplot(data = emp_data, aes(x = emp_data$Salary_hike + I(emp_data$Salary_hike^2), y = log(emp_data$Churn_out_rate))) +
geom_point(color='blue') +
geom_line(color='red',data = emp_data, aes(x=emp_data$Salary_hike+I(emp_data$Salary_hike^2), y=logpol))
##############################
# Polynomial model with 3 degree
reg3degree<-lm(log(emp_data$Churn_out_rate)~emp_data$Salary_hike + I(emp_data$Salary_hike*emp_data$Salary_hike) + I(emp_data$Salary_hike*emp_data$Salary_hike*emp_data$Salary_hike))
summary(reg3degree)
logpol3 <- predict(reg3degree)
expy3 <- exp(logpol3)
# visualization
ggplot(data = emp_data, aes(x = emp_data$Salary_hike + I(emp_data$Salary_hike^2) + I(emp_data$Salary_hike^3), y = emp_data$Churn_out_rate)) +
geom_point(color='blue') +
geom_line(color='red',data = emp_data, aes(x=emp_data$Salary_hike+I(emp_data$Salary_hike^2)+I(emp_data$Salary_hike^3), y=expy3))
################################
|
8ff72709e10962d361c539aeed07cd23390643d7 | f123dbf9d574c04bd0e4599807c7f6c397d075b8 | /infToUn.R | f66a7a52f36578ced82917aeac395306949fe095 | [] | no_license | miniesta4/RScripts | 55b777555e0064861fa20d8be4a11bab628a8f42 | b6133b5535562dc0ee5eec04a93377a7f488df9f | refs/heads/master | 2021-01-25T09:53:45.213296 | 2019-09-05T09:58:16 | 2019-09-05T09:58:16 | 31,956,265 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 826 | r | infToUn.R | infToUn <- function(){
## Load library
library(xlsx)
## Read files
partes <- read.csv2("../files/20150415-PartesSubc.csv", colClasses = "character")
facturas <- read.csv2("../files/20150415-FacturasSubc.csv", colClasses = "character")
## Names
names(facturas) <- names(partes)
## Combine rows
pyf <- rbind(partes, facturas)
## Split
pyfs <- split(pyf, pyf$CO_ENTIDAD)
t <- gsub("[ :-]", "", Sys.time())
d <- paste0("../files/output/", t)
dir.create(d)
#lapply(pyfs, function(x) write.csv2(x, file = paste0(d, "/", x[1,2], ".csv"),
# row.names = FALSE, quote = FALSE))
lapply(pyfs, function(x) write.xlsx2(x, file = paste0(d, "/", x[1,2], ".xlsx"),
sheetName = "Datos", row.names = FALSE, quote = FALSE))
} |
cf7aa465fd31749dbaa2be8f971c849c74eaf287 | f6eee4f24213e1c25b933198455884039e977855 | /man/get_timing_signal_1d.Rd | d20b967efa733b365c36b650b0dc9e60d84d9c0e | [] | no_license | ifrit98/transformR | 444b413a71b17561c088df41153a6a2f99d8bfa7 | ec251fada5ee818e498c23363166c2af99fca47f | refs/heads/master | 2020-09-12T05:37:50.034115 | 2019-11-25T03:21:44 | 2019-11-25T03:21:44 | 222,327,773 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,475 | rd | get_timing_signal_1d.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/positional-encodings.R
\name{get_timing_signal_1d}
\alias{get_timing_signal_1d}
\title{Gets a timing signal for a given length and number of channels}
\usage{
get_timing_signal_1d(
length,
channels,
min_timescale = 1,
max_timescale = 10000,
start_index = 0
)
}
\arguments{
\item{length}{scalar, length of timing signal sequence.}
\item{channels}{scalar, size of timing embeddings to create.
The number of different timescales is equal to channels / 2.}
\item{min_timescale:}{a float}
\item{max_timescale:}{a float}
\item{start_index:}{index of first position}
}
\description{
Gets a bunch of sinusoids of different frequencies.
Each channel of the input Tensor is incremented by a sinusoid of a different
frequency and phase.
This allows attention to learn to use absolute and relative positions.
Timing signals should be added to some precursors of both the query and the
memory inputs to attention.
The use of relative position is possible because sin(x+y) and cos(x+y) can
be expressed in terms of y, sin(x) and cos(x).
In particular, we use a geometric sequence of timescales starting with
min_timescale and ending with max_timescale. The number of different
timescales is equal to channels / 2. For each timescale, we
generate the two sinusoidal signals sin(timestep/timescale) and
cos(timestep/timescale). All of these sinusoids are concatenated in
the channels dimension.
}
|
48850ab5c7423d32fced0a199f5c64bf06bbe4a2 | a4683b136132a03b900eaf4f80164e74e15f013b | /man/octave.reader.Rd | 4358f8fc2c0be3d162436adabba0bd7f3fb723b3 | [] | no_license | johnmcdonnell/ProjectTemplate | 5be21714cb14d6e3229ed717d3732f29a59e17ad | dc848ceda38d036b4ff16bfcef1d550ec6fb84ce | refs/heads/master | 2021-01-17T07:38:02.346283 | 2012-04-14T18:49:33 | 2012-04-14T18:49:33 | 4,026,892 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 644 | rd | octave.reader.Rd | \name{octave.reader}
\alias{octave.reader}
\title{Read an Octave file with a .m file extension.}
\usage{
octave.reader(data.file, filename, variable.name)
}
\arguments{
\item{data.file}{The name of the data file to be read.}
\item{filename}{The path to the data set to be loaded.}
\item{variable.name}{The name to be assigned to in the
global environment.}
}
\value{
No value is returned; this function is called for its
side effects.
}
\description{
This function will load the specified Octave file into
memory.
}
\examples{
library('ProjectTemplate')
\dontrun{octave.reader('example.m', 'data/example.m', 'example')}
}
|
87bc6162a853aa4282393fcc1724e33e21b96387 | e6d065cf95d5c034b02df73579ed67ff8e802a5c | /tests/testthat/test-utils.R | b47fa4c86edaf8f0153391390c1ac0d52f56c5e5 | [
"MIT"
] | permissive | tylerlittlefield/mtg | 9e868f699535cc9047a8b17255b1aa130a8a03ff | e319a08ce292770bf867ca2f9d92b8c433aed8ee | refs/heads/master | 2023-06-08T09:42:21.272899 | 2019-01-11T21:49:23 | 2019-01-11T21:49:23 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 94 | r | test-utils.R | context("test-utils")
test_that("utils works", {
# dummy test
expect_equal(2 * 2, 4)
})
|
24363be4df8001aae37f8f79af54f918b3748844 | 283617b8337193c0c99cb09cc4eab7790f3fbd8f | /Plot4.R | 1a3300191f311d0ab979171d9294a38799cb921f | [] | no_license | Gabegao/ExData_Plotting1 | 7b1513031c98ebf485e7767d4bc655bb255075f3 | 1451806fa2483f68403e086705d367bee6e1ba1b | refs/heads/master | 2021-01-19T04:32:31.825443 | 2016-01-09T06:11:12 | 2016-01-09T06:11:12 | 49,243,509 | 0 | 0 | null | 2016-01-08T02:15:50 | 2016-01-08T02:15:49 | null | UTF-8 | R | false | false | 1,361 | r | Plot4.R | library(data.table)
## read data from file
data<-fread("household_power_consumption.txt", sep=";", header=TRUE, na.strings=c("NA","?"))
## convert from character to "Date" class
data$Date<- as.Date(data$Date, "%d/%m/%Y")
## pull out the subset data between 2007-02-01 and 2007-02-02
data<-subset(data, {Date >= "2007-02-01" & Date <= "2007-02-02"})
## create a temp variable to store the combined Date and Time information
temp<- strptime(paste(data$Date, data$Time), "%Y-%m-%d %H:%M:%S")
## initialize the file to save the plots
png(filename = "Plot4.png")
par(mfrow=c(2,2))
## the first plot in the top left corner
plot(temp, data$Global_active_power, type="l", col="black", xlab="", ylab="Global active power")
## the second plot in the top right corner
plot(temp, data$Voltage, type="l", col="black", xlab="datetime", ylab="Voltage")
## the thrid plot in the bottom left corner
plot(temp, data$Sub_metering_1, type="l", col="black", xlab="", ylab="Energy sub metering")
lines(temp, data$Sub_metering_2, col="red")
lines(temp, data$Sub_metering_3, col="blue")
legend("topright", lty=c(1,1,1), col=c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
## the fourth plot in the bottom right corner
plot(temp, data$Global_reactive_power, type="l", col="black", xlab="datetime", ylab="Global reactive power")
dev.off()
|
f7a0a7de1770761a83e6f05c86a21cd2827db3c9 | 601b4921fe99514fc8400468a44c0fb4a7b298a3 | /R/DA_FDN2M2.R | 30fe96bc08c5f43caa2bbcf6f2de53b1bd76ff15 | [] | no_license | njuwill/SPPcomb | 20742148386ea5770f73088aaa80f427c8118ae0 | e0625b2219f3c8cf17175e8e5bcc068f5022c912 | refs/heads/master | 2021-05-01T23:17:05.107294 | 2018-02-09T21:51:04 | 2018-02-09T21:51:04 | 120,931,787 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,825 | r | DA_FDN2M2.R | #' Data Analysis of (N2,M2)
#'
#' The main function to solve the estimating equations constructed by (N2,M2). Since there is just one case data, no selection bias needed.
#'
#' @param realdata_covariates a list contains the following data matrics:
#' CASEZhat_2, CASEZhat_22, CONTZhat_2, CONTZhat_22
#' @param realdata_alpha a list contains the following data matrics:
#' prob_case_22,prob_cont_2, pwt_cont_2
#' @param beta0 an initial parameter for solver "nleqslv".
#' @param p number of parameters.
#' @return A list of estimator and its standard deviation.
#'
#' @export
#'
#' @details The function solves estimating equation based on (N2,M2), see Huang(2014).
#'
#' @examples
#' #p <- 8
#' #beta0=c(-5.4163,0.7790,-0.1289,0.2773,-0.5510,0.1568,0.4353,-0.6895)
#' #DA_FDN2M2(realdata_covariates,realdata_alpha,p=p,beta0=beta0)
#'
#' @references
#' Huang, H., Ma, X., Waagepetersen, R., Holford, T.R. , Wang, R., Risch, H., Mueller, L. & Guan, Y. (2014). A New Estimation Approach for Combining Epidemiological Data From Multiple Sources, Journal of the American Statistical Association, 109:505, 11-23.
DA_FDN2M2<- function(realdata_covariates,realdata_alpha,p,beta0) {
#use initial parameter beta1 for more convergence
# beta0=c(-5.556564, 0.9106478, -0.05683087, 0.318503 ,-0.4461375 ,0.2002597 ,0.4306426, -0.6645185);
# beta0=c(-5.4163,0.7790,-0.1289,0.2773,-0.5510,0.1568,0.4353,-0.6895);
# subset_4 <- 1:(p-2)
## set the sampling probabilities
DA_FDN2M2_inside<- function(beta,CASEZhat_2,CASEZhat_22,CONTZhat_2,CONTZhat_22,
prob_case_22,prob_cont_2,p,J,V,pwt_cont_2){
W_case_4=prob_case_22/(prob_case_22+exp(as.matrix(CASEZhat_22)%*%t(beta)))*exp((as.matrix(CASEZhat_22)-as.matrix(CASEZhat_2))%*%t(beta));
W_cont_4=exp(as.matrix(CONTZhat_2)%*%t(beta))/(prob_cont_2+exp(as.matrix(CONTZhat_22)%*%t(beta)))*prob_cont_2/pwt_cont_2;
Q_4_cont=colSums(as.matrix(CONTZhat_22)*kronecker(as.matrix(W_cont_4),t(as.matrix(rep(1,p)))));
Q_4_case=colSums(as.matrix(CASEZhat_22)*kronecker(as.matrix(W_case_4),t(as.matrix(rep(1,p)))));
Q_4=Q_4_case-Q_4_cont;
dim(Q_4)=c(p,1);
Q=Q_4
if (length(J)+length(V)==0){
J_4=matrix(0,p,p);
for (i in 1:p){
for (j in 1:p){
## J_4[i,j]=sum(CASEZhat_22[,subset_4[i]]*CASEZhat_2[,j]*W_case_4);
J_4[i,j]=-sum(CASEZhat_22[,i]*(CASEZhat_2[,j]-CASEZhat_22[,j])*W_case_4)-sum(CONTZhat_22[,i]*(CONTZhat_2[,j])*W_cont_4);
}
}
VQQ_44=matrix(0,p,p);
for (i in 1:p){
for (j in 1:p){
VQQ_44[i,j]=sum(CASEZhat_22[,i]*CASEZhat_22[,j]*W_case_4^2)+sum(CONTZhat_22[,i]*CONTZhat_22[,j]*W_cont_4^2);
}
}
V=VQQ_44
J=J_4
}
f=t(J)%*%solve(V)%*%Q;
return(list(f,J,V))
}
prob_cont_2 <- realdata_alpha$prob_cont_2;
prob_case_22 <- realdata_alpha$prob_case_22;
pwt_cont_2 <- realdata_alpha$pwt_cont_2;
#Extract covariates
CASEZhat_2 <- realdata_covariates$CASEZhat_2;
CASEZhat_22 <- realdata_covariates$CASEZhat_22;
CONTZhat_2 <- realdata_covariates$CONTZhat_2;
CONTZhat_22 <- realdata_covariates$CONTZhat_22;
dim(beta0) <- c(1,p);
#DA_FDM2 uses objfun_1011_u() in AnalysisR folder
f <- function(beta) {
Y <- DA_FDN2M2_inside(beta,CASEZhat_2,CASEZhat_22,CONTZhat_2,CONTZhat_22,
prob_case_22,prob_cont_2,p,c(),c(),pwt_cont_2);
Y[[1]]
}
reg <- nleqslv::nleqslv(beta0,f,method="Newton");
betahat <- reg[[1]];
betahat
fval <- reg[[2]];
flag <- reg[[3]];
if (flag !=1 ){
reg_tmp <- DA_FDN2M2_inside(beta0,CASEZhat_2,CASEZhat_22,CONTZhat_2,CONTZhat_22,
prob_case_22,prob_cont_2,p,c(),c(),pwt_cont_2);
f <- reg_tmp[[1]];
J_u <- reg_tmp[[2]];
V_u <- reg_tmp[[3]];
f2 <- function(beta){
Y2 <- DA_FDN2M2_inside(beta,CASEZhat_2,CASEZhat_22,CONTZhat_2,CONTZhat_22,
prob_case_22,prob_cont_2,p,J_u,V_u,pwt_cont_2);
Y2[[1]]
}
reg_update <- nleqslv::nleqslv(beta0,f2,method="Newton");
betahat <- reg_update[[1]];
fval_update <- reg_update[[2]];
flag_update <- reg_update[[3]];
}
beta1 <- betahat;
dim(beta1) <- c(1,p);
plugin <- DA_FDN2M2_inside(beta1,CASEZhat_2,CASEZhat_22,CONTZhat_2,CONTZhat_22,
prob_case_22,prob_cont_2,p,c(),c(),pwt_cont_2);
J <- plugin[[2]];
V <- plugin[[3]];
# std <- sqrt(t(abs(diag(solve(t(J)%*%solve(V)%*%J)))));
std <- sqrt(t(abs(diag((solve(J)%*%V%*%solve(t(J)))))));
return(list(est=beta1, std=std))
}
|
75025fac5b29944c272f0899c8677c0cc0e9070d | 21abf7d633fc8e7ac023496696fd54c241f11726 | /R/hubway-package.r | 00877745f0a5d597f0fcbb092dbec08c6cdb7fb8 | [] | no_license | hrbrmstr/hubway | d3f3233802c5ec9a25c4f6d8042948191c108581 | cd8082fcab1bc2423bbe3fc801103cc5b56fd06e | refs/heads/master | 2021-01-19T02:30:38.655457 | 2016-07-14T19:33:10 | 2016-07-14T19:33:10 | 63,364,008 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 267 | r | hubway-package.r | #' Tools to Work with the 'Hubway' 'API'
#' @name hubway
#' @docType package
#' @author Bob Rudis (@@hrbrmstr)
#' @importFrom dplyr filter left_join
#' @importFrom httr GET content stop_for_status
#' @importFrom tibble as_tibble
#' @importFrom jsonlite fromJSON
NULL
|
ec8828ee6e858da23f3a6bd73bdcab3110abddd4 | 29c921acdfa314d915965c785ca04cbc681ce2e4 | /R/read_gluc_all.R | 4bd37a6ee08a651b7ce5503bd8a83bddfcddc865 | [] | no_license | muschellij2/gluc | 6a9b18b1c8d020ae7f21a4d1de4d60a573b263f4 | 3fdc469701b31411b5cddbee13027dcb77654871 | refs/heads/master | 2021-01-19T01:18:48.697687 | 2020-02-27T18:15:26 | 2020-02-27T18:15:26 | 87,238,467 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 793 | r | read_gluc_all.R | #' @title Read All Glucose Sheets from file
#' @description A global function to read the different types of glucose
#' data from an xlsx
#'
#' @param path paths of xlsx files
#' @param type type of file to read, defaults to all
#' @param verbose print diagnostic messages
#' @param ... additional argument to send to the read function related to
#' \code{type}
#'
#' @return A list of \code{tbl} data.frame for each type.
#' @export
read_gluc_all = function(
path,
type = c("Abbott", "Dexcom",
"AbbottRaw", "DexcomRaw"),
verbose = TRUE,
...){
type = match.arg(type, several.ok = TRUE)
res = lapply(type, function(x) {
if (verbose) {
message("Reading in ", x, " sheets")
}
read_gluc(path = path, type = x, ...)
})
names(res) = type
return(res)
} |
8ef20939ed97239ac32d6686e6c3ed872e012d4e | 7635cad41c9014b4f403c8634761fcb08a3ba28a | /man/momentS.Rd | 06f1fc6c4ea76de831fde532e1876ca890a74318 | [] | no_license | CFWP/rags2ridges | 4cf70eac020ac5141f6cafd7398bb335643d6606 | 04027a52492eee0e4aa3ac2e5e508c5c4c266c80 | refs/heads/master | 2022-05-26T08:35:46.286445 | 2022-04-30T08:02:09 | 2022-04-30T08:02:09 | 29,291,369 | 5 | 3 | null | 2017-09-04T14:59:35 | 2015-01-15T10:00:07 | R | UTF-8 | R | false | true | 1,674 | rd | momentS.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rags2ridges.R
\name{momentS}
\alias{momentS}
\title{Moments of the sample covariance matrix.}
\usage{
momentS(Sigma, shape, moment = 1)
}
\arguments{
\item{Sigma}{Positive-definite \code{matrix}, the scale parameter
\eqn{\mathbf{\Sigma}} of the Wishart distribution.}
\item{shape}{A \code{numeric}, the shape parameter \eqn{\nu} of the Wishart
distribution. Should exceed the number of variates (number of rows or
columns of \code{Sigma}).}
\item{moment}{An \code{integer}. Should be in the set \eqn{\{-4, -3, -2, -1,
0, 1, 2, 3, 4\}} (only those are explicitly specified in Lesac, Massam,
2004).}
}
\value{
The \eqn{r}-th moment of a sample covariance matrix:
\eqn{E(\mathbf{S}^r)}.
}
\description{
Calculates the moments of the sample covariance matrix. It assumes that the
summands (the outer products of the samples' random data vector) that
constitute the sample covariance matrix follow a Wishart-distribution with
scale parameter \eqn{\mathbf{\Sigma}} and shape parameter \eqn{\nu}. The
latter is equal to the number of summands in the sample covariance estimate.
}
\examples{
# create scale parameter
Sigma <- matrix(c(1, 0.5, 0, 0.5, 1, 0, 0, 0, 1), byrow=TRUE, ncol=3)
# evaluate expectation of the square of a sample covariance matrix
# that is assumed to Wishart-distributed random variable with the
# above scale parameter Sigma and shape parameter equal to 40.
momentS(Sigma, 40, 2)
}
\references{
Lesac, G., Massam, H. (2004), "All invariant moments of the
Wishart distribution", \emph{Scandinavian Journal of Statistics}, 31(2),
295-318.
}
\author{
Wessel N. van Wieringen.
}
|
f947b93c19678d60a19a02db1b6e5d27f57723b9 | 2d35e1f947c0ba5e45153ddc0ddd8caded605125 | /app.R | e5466c090a9b74bf726cede5ec914b46dfbc128a | [
"MIT"
] | permissive | jjvhab/UHD_examineR | 62b1ec7f0f5b3926df07aa7866e17c13de31afb7 | 5dc8e72882b2dfe7ca3c2a74296f4e514b1f612a | refs/heads/master | 2020-07-13T15:55:57.496366 | 2019-11-14T13:34:28 | 2019-11-14T13:34:28 | 205,110,175 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,162 | r | app.R | # UHD examineR v1.0.2 by V.Haburaj
#
# Load and inspect hyperspectral recordings captured with a
# Cubert UHD258 snapshot camera. The imported raster file is
# plotted as RGB image. The raster can be manipulated
# (spatial and spectral filters). By selecting an area of
# interest in the plotted raster, a plot of the mean spectrum
# is generated (+/- sd). The selected spectrum can be saved
# to a data.frame, which can be exported as CSV file. The
# manipulated raster can be saved to a TIF file.
#
# by Vincent Haburaj, vincent.haburaj@fu-berlin.de
# ------------------------
# Load Packages ----------
# ------------------------
if (!require("shiny")) {
message('package shiny not found!')
} else library(shiny)
if (!require("shinydashboard")) {
message('package shinydashboard not found!')
} else library(shinydashboard)
if (!require("shinyjs")) {
message('package shinyjs not found!')
} else library(shinyjs)
if (!require("shinyWidgets")) {
message('package shinyWidgets not found!')
} else library(shinyWidgets)
if (!require("DT")) {
message('package DT not found!')
} else library(DT)
if (!require("raster")) {
message('package raster not found!')
} else library(raster)
if (!require("velox")) {
message('package velox not found!')
} else library(velox)
if (!require("ggplot2")) {
message('package ggplot2 not found!')
} else library(ggplot2)
if (!require("hsdar")) {
message('package hsdar not found!')
} else library(hsdar)
# ------------------------
# Define UI --------------
# ------------------------
# jscode for closing the app:
jscode <- "shinyjs.closeWindow = function() { window.close(); }"
ui = dashboardPage(
# define skin:
skin = 'black',
# define header:
dashboardHeader(
title = "UHD examineR",
# add close button:
tags$li(actionButton("close", "",
icon = icon('power-off'),
style="color: #fff; background-color: #FC091B; border-color: #FC091B"),
class='dropdown')
),
# make sidebar with control widgets:
dashboardSidebar(
# needed for close button:
useShinyjs(),
extendShinyjs(text = jscode, functions = c("closeWindow")),
# specify input file:
fileInput(inputId = 'file',
label = 'Select raster:'),
# choose image rotation angle:
selectInput('sliderROT', label='rotate image clockwise by:',
choices = c(0,90,180,270),
selected=0)
),
# make main body:
dashboardBody(
# ROW 1:
fluidRow(
# image plotting area:
box(
status = 'primary',
title = "Raster",
solidHeader = TRUE,
plotOutput("rast", brush = brushOpts(
id = "rasPlot_brush",
stroke = 'cyan',
fill='red'),
width = '100%',
height = '600px')
),
column(
width=6,
# process raster data:
tabBox(
width = NULL,
title = 'Filter raster',
# SPATIAL FILTER:
tabPanel('Spatial filter',
# optionally apply spatial filter:
checkboxInput("checkboxSpatialFilter",
label = "Spatial filter",
value = FALSE),
# select spatial filter:
selectInput("selectedSpatialFilter",
label = "spatial filter",
c("median","mean")),
# choose Median window size:
selectInput('sliderMED', label='spatial filter window size',
choices = c(3,5,7,9,11,13,15,17,19,21),
selected=3)
),
tabPanel('Spectral filter',
# optionally apply spectral filter:
checkboxInput("checkboxSG",
label = "Savitzky-Golay (2nd order)",
value = FALSE),
# choose SG filter width:
selectInput('sliderSG', label='filter width',
choices = c(3,5,7,9,11,13,15),
selected=5)
)
),
box(
width = NULL,
title = 'Save filtered raster data',
# download processed raster:
downloadButton("downloadRaster", "Save processed raster to TIF"
)
),
box(
width=NULL,
# optionally limit spectral range:
checkboxInput("checkbox01",
label = "Plot: clip data to 470-830 nm",
value = TRUE)
)
)
),
# ROW 2:
fluidRow(
# graph plotting area:
tabBox(
title = "VIS-NIR reflectance",
tabPanel('Selected values', plotOutput("TSplot")),
tabPanel('All saved values', plotOutput("combiPlot"))
),
column(
width = 6,
# controls to choose, label, add and save selected spectrum:
box(
width = NULL,
title = 'Save selected values to table',
selectInput('selectedValues', 'Select Values:',
choices = c('Mean',
'Standard Deviation')),
textInput('colName', 'Column Name:'),
actionButton('addValues', 'Add Values', icon=icon('plus')),
downloadButton("downloadData", "Save table to CSV")
),
# table plotting area:
box(
width=NULL,
DT::dataTableOutput('table')
)
)
)
)
)
# set maximum fileInput size to 500 MB:
options(shiny.maxRequestSize=500*1024^2)
# ------------------------
# Define server logic ----
# ------------------------
server <- function(input, output) {
# define stop button:
observeEvent(input$close, {
js$closeWindow()
stopApp()
})
# define spatial filter fuction:
multiFocal <- function(x, w) {
if(is.character(x)) {
x <- brick(x)
}
# The function to be applied to each individual layer
if(input$selectedSpatialFilter=="mean"){
# MEAN:
vx <- velox(x)
vx2 <- vx$copy()
vx2$meanFocal(weights=matrix(1,
as.numeric(input$sliderMED),
as.numeric(input$sliderMED)),
bands=c(1:41))
r <- vx2$as.RasterStack()
return(r)
} else {
# MEDIAN:
vx <- velox(x)
vx2 <- vx$copy()
vx2$medianFocal(wrow=as.numeric(input$sliderMED),
wcol=as.numeric(input$sliderMED),
bands=c(1:41))
r <- vx2$as.RasterStack()
return(r)
}
}
# load raster from file and rotate:
imgInput <- reactive({
if (nlayers(stack(input$file$datapath)) != 41) {
stop('input raster must have 41 bands.')
} else {
if (input$sliderROT == 0) {
cl <- stack(input$file$datapath)
}
if (input$sliderROT == 90) {
cl <- t(flip(stack(input$file$datapath), 2))
}
if (input$sliderROT == 180) {
cl <- flip(flip(stack(input$file$datapath),1),2)
}
if (input$sliderROT == 270) {
cl <- t(flip(stack(input$file$datapath),1))
}
}
# and apply spatial filter if selected:
if (input$checkboxSpatialFilter) {
multiFocal(cl)
} else cl
})
#transform to velox for faster processing:
veloxInput <- reactive({
if (input$checkboxSG) {
# Savitzky-Golay-Filter:
cl <- imgInput()
m <- as.matrix(values(cl))
wl <- seq(450,850,10)
# create speclib object:
spectral_data <- speclib(m, wl)
idSpeclib(spectral_data) <- as.character(wl)
# filter:
SG <- smoothSpeclib(spectral_data, method = "sgolay", p = 2, n = as.numeric(input$sliderSG))
# as raster:
imgSG <- cl
values(imgSG) <- spectra(SG)
velox::velox(imgSG)
} else velox::velox(imgInput())
})
# create raster plot:
output$rast <- renderPlot({
cl <- imgInput()
plotRGB(cl, r=22,g=13,b=6,scale=65536, stretch='hist', asp=1)
})
# get extent of selected region:
Coords <- reactive({
req(input$rasPlot_brush$xmin)
c(input$rasPlot_brush$xmin, input$rasPlot_brush$xmax,
input$rasPlot_brush$ymin, input$rasPlot_brush$ymax)
})
# get mean reflectance of selected region:
valueMean <- eventReactive(input$rasPlot_brush$xmin,{
vl <- veloxInput()
as.vector(vl$extract(as(raster::extent(Coords()), 'SpatialPolygons') ,fun=mean))
})
# get sd reflection of selected region:
valueSD <- eventReactive(input$rasPlot_brush$xmin,{
vl <- veloxInput()
as.vector(vl$extract(as(raster::extent(Coords()), 'SpatialPolygons') ,fun=sd))
})
# label spectral data:
waveL <- eventReactive(input$rasPlot_brush$xmin,{
w <- seq(450,850,10)
w
})
# create and call graph plot:
output$TSplot <- renderPlot(res = 100, {
if(valueMean()>1){
df <- data.frame(
m = valueMean()/65536,
ms = valueSD()/65536,
wl = waveL()
)
} else df <- data.frame(
m = valueMean(),
ms = valueSD(),
wl = waveL()
)
# optionally limit plot data:
if (input$checkbox01) {
df <- df[3:39,]
}
# call:
ggplot(data=df, aes(x=wl,y=m))+
geom_line(col='black', lwd=1.5) +
geom_line(aes(x=wl, y=m+ms), linetype='dashed', color='gray50') +
geom_line(aes(x=wl, y=m-ms), linetype='dashed', color='gray50') +
xlab('wavelength [nm]') +
ylab('reflectance [0,1]') +
ggtitle('Mean and SD of the region selected')+
theme_classic() +
theme(panel.grid.major = element_line(colour = 'grey90'),
panel.grid.minor = element_line(colour = 'grey90', linetype = 2))
})
# create and call a plot of datatable:
output$combiPlot <- renderPlot(res=100, {
df <- storedData$df
if(any(grepl('_sd', colnames(df)))){
df <- df[, -grep("_sd", colnames(df))]
}
df <- reshape2::melt(df, id='wl')
# call:
ggplot(data=df, aes(x=wl,y=value,col=variable))+
geom_line() +
xlab('wavelength [nm]') +
ylab('reflectance [0,1]') +
theme_classic() +
theme(panel.grid.major = element_line(colour = 'grey90'),
panel.grid.minor = element_line(colour = 'grey90', linetype = 2),
legend.title = element_blank())
})
# mean of selection:
selectedMean <- reactive({
tmp <- valueMean()
if (tmp>1) {
data.frame(
m = tmp[3:39]/65536
)
} else data.frame(
m = tmp[3:39]
)
})
# sd of selection:
selectedSD <- reactive({
tmp <- valueSD()
if (tmp>1) {
data.frame(
m = tmp[3:39]/65536
)
} else data.frame(
m = tmp[3:39]
)
})
# create base table:
storedData <- reactiveValues(
df = data.frame(
wl = seq(470,830,10)
)
)
# add selected values to table:
observeEvent(input$addValues, {
if (input$selectedValues == 'Mean') {
newData <- selectedMean()
} else newData <- selectedSD()
storedData$df$newData <- round(newData[,1],5)
# and label them:
if (input$selectedValues == 'Mean') {
names(storedData$df)[names(storedData$df) == 'newData'] <- paste(input$colName,'_mean', sep='')
} else names(storedData$df)[names(storedData$df) == 'newData'] <- paste(input$colName,'_sd', sep='')
})
# plot table:
output$table <- DT::renderDataTable({
df <- storedData$df
datatable(df,
extensions = c("FixedColumns", "FixedHeader", 'Scroller'),
options = list(scroller=TRUE,
scrollX=TRUE,
rownames=FALSE,
searching =FALSE,
autoWidth=TRUE,
scrollY = "400px",
fixedHeader = TRUE))
})
# download table:
output$downloadData <- downloadHandler(
filename = 'selected_spectral_data.csv',
content = function(file) {
write.csv(storedData$df, file, row.names = FALSE)}
)
# download raster:
output$downloadRaster <- downloadHandler(
filename = 'processed_raster_data.tif',
content = function(file) {
raster::writeRaster(veloxInput()$as.RasterStack(), file)
}
)
}
# ------------------------
# Run the app ------------
# ------------------------
shinyApp(ui = ui, server = server)
# ------------------------
# References -------------
# ------------------------
# citation('shiny')
# Winston Chang, Joe Cheng, JJ Allaire, Yihui Xie and Jonathan
# McPherson (2018). shiny: Web Application Framework for R. R
# package version 1.1.0. https://CRAN.R-project.org/package=shiny
#
# citation('shinydashboard')
# Winston Chang and Barbara Borges Ribeiro (2018). shinydashboard:
# Create Dashboards with 'Shiny'. R package version 0.7.1.
# https://CRAN.R-project.org/package=shinydashboard
#
# citation('shinyjs')
# Dean Attali (2018). shinyjs: Easily Improve the User Experience
# of Your Shiny Apps in Seconds. R package version 1.0.
# https://CRAN.R-project.org/package=shinyjs
#
# citation('shinyWidgets')
# Victor Perrier, Fanny Meyer and David Granjon (2019).
# shinyWidgets: Custom Inputs Widgets for Shiny. R package version
# 0.4.8. https://CRAN.R-project.org/package=shinyWidgets
#
# citation('DT')
# Yihui Xie, Joe Cheng and Xianying Tan (2019). DT: A Wrapper of
# the JavaScript Library 'DataTables'. R package version 0.6.
# https://CRAN.R-project.org/package=DT
# citation('raster')
# Robert J. Hijmans (2019). raster: Geographic Data Analysis and
# Modeling. R package version 2.9-23.
# https://CRAN.R-project.org/package=raster
#
# citation('velox')
#Philipp Hunziker (2018). velox: Fast Raster Manipulation and
# Extraction. R package version 0.2.0.9002.
#
# citation('ggplot2')
# H. Wickham. ggplot2: Elegant Graphics for Data Analysis.
# Springer-Verlag New York, 2016.
#
# citation('hsdar')
# Lukas W. Lehnert, Hanna Meyer, Joerg Bendix (2018). hsdar:
# Manage, analyse and simulate hyperspectral data in R. R package
# version 0.7.1.
|
e1ddff197f574dd00adb613e367cad6ab593c48a | 4dced39355fb06b47320044cb55a8af13f4a6f1a | /plot2.R | 928df0cd3d5c1eace1ac662c12b5e3b4203663ed | [] | no_license | Rabun0510/ExData_Plotting1 | c025194e9072b5933a6dd25014b83404dd9e60f8 | 76e83190cc23444503d792fc7db849edb3db2903 | refs/heads/master | 2020-12-28T21:05:47.627045 | 2015-04-08T19:56:27 | 2015-04-08T19:56:27 | 33,628,820 | 0 | 0 | null | 2015-04-08T19:53:45 | 2015-04-08T19:53:44 | null | UTF-8 | R | false | false | 1,071 | r | plot2.R | # Exdata-013 project 1 plot 2
#Read the original data
originalData <- read.table("./data/household_power_consumption.txt", header = TRUE, sep = ";", na.strings = "?", stringsAsFactors = FALSE)
#Add a column of DateTime that appends the date and time into one POSIXlt object
elecData <- cbind("DateTime" = strptime(paste(originalData$Date, originalData$Time), format = "%d/%m/%Y %H:%M:%S"), originalData)
#Filter the data according to the date
lowerDate <- as.POSIXlt("2007-02-01")
upperDate <- as.POSIXlt("2007-02-03")
dataToSelect <- (elecData$DateTime >= lowerDate)&(elecData$DateTime <= upperDate)
plotData <- elecData[dataToSelect,]
plotData <- plotData[!is.na(plotData$DateTime),]
#Plot a line graph of Global_active_power
par(mfrow = c(1,1))
plot(plotData$DateTime, plotData$Global_active_power, type = "n", xlab = "", ylab = "Global Active Power (kilowatts)")
lines(plotData$DateTime, plotData$Global_active_power, type = "l", lwd = 1, lty = "solid")
#Save the histogram plot as a png file
dev.copy(png, file = "plot2.png", width = 480, height = 480)
dev.off() |
ae971cccf9c6211eaa09b18749bb6f4002feb333 | 3f0ab525141b98db333e50e8dd8e344f3fb0704c | /Code/R Processing Code/Tracker_Updater.R | 100ca24d10a723add094a7fbbcf1696f402a496f | [] | no_license | anwarsk/IU-Bus-Transit | 09c2c4280a5d4cce246855b6a1be10abf1674cb4 | 6bed90828b47657f503bac6832f57fab3045c920 | refs/heads/master | 2021-01-17T16:50:22.506538 | 2016-01-21T23:57:33 | 2016-01-21T23:57:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,746 | r | Tracker_Updater.R |
dataTracker <- read.csv("D:/Bus_Data/Tracker_X.csv")
stopData <- read.csv("D:/Bus_Data/RouteToStopMap.csv")
stopData <- subset.data.frame(stopData,stopData$Route == "X")
dT <- merge(dataTracker, stopData[, c("Stop_ID", "StopIndex", "Name")], by.x = "ToStop_Name", by.y = "Name")
dT$StopCombo <- paste(dT$FromStop_ID, dT$Stop_ID, sep="_")
dT$Mode[dT$FromStop_ID == dT$Stop_ID] <- 0
dT$Mode[dT$FromStop_ID != dT$Stop_ID] <- 1
setdiff(unique(stopData$Name), unique(dT$ToStop_Name))
library(stringr)
library(dplyr)
write.table(file=paste("D:/Bus_Data/Out_X.csv"), x=t(colnames(out)), row.names = FALSE, col.names = FALSE, quote = FALSE, sep= ",", append = TRUE)
for (i in unique(dT$StopCombo))
{
d<-subset(dT,dT$StopCombo==i)
rcount <- nrow(d)
if(rcount > 100)
{
fit<- kmeans(d$Time,4)
out<-cbind(d,clusterno=fit$cluster)
d <- aggregate(out$Time, by= list(out$clusterno), FUN = mean)
ds <- aggregate(out$Time, by= list(out$clusterno), FUN = sum)
d1 <- count(out, clusterno)
d2 <- cbind(d1, d, ds)
d2 <- d2[order(d2$n, decreasing = TRUE),]
meanValue <- d2[1,4]
f1 <- d2[1,2]
f2 <- d2[2,2]
diff <- f1*0.25
if((f1-f2)< diff)
{
meanValue <- (meanValue + d2[2,4])/2
}
out$MeanTime <- meanValue
out$TimeDiffer <- (meanValue - out$Time)
write.csv(file=paste("D:/Bus_Data/E/Out_X_",".csv",sep=i), x=out, row.names = FALSE)
write.csv(file=paste("D:/Bus_Data/E/Out_X_","_Summary.csv",sep=i), x=d2, row.names = FALSE)
write.table(file=paste("D:/Bus_Data/Out_X.csv"), x=out, row.names = FALSE, col.names = FALSE, quote = FALSE, sep= ",", append = TRUE)
}
}
|
0d69a59e494ede711337baf395604051a8b028be | a500013b7a3733f72d747082e10801e98567097a | /archived_script/figure_response_time_fascet.R | ae2b7a91f0c2615c9c77cc03a0b6b6553cdaf78e | [] | no_license | robcrystalornelas/impacts_systematic_review | 4ff913c79e3f7b14d6ba79f6cc4f9e612fe68c29 | 9ed0f457f72bad9fb7de420bb7a9744dd9ada667 | refs/heads/master | 2022-03-18T14:40:31.648205 | 2019-11-20T00:19:12 | 2019-11-20T00:19:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,726 | r | figure_response_time_fascet.R | ## READ IN DATA ####
source("~/Desktop/Impacts Systematic Review/scripts/impacts_systematic_review/clean_raw_data.R")
## LOAD PACKAGES ####
library(dplyr)
library(ggplot2)
library(ggthemes)
## ORGANIZE DATA ####
response_time_data <- select(raw_data, publicationyear, ecosystem,invasivespeciestaxa,firstyeardetected,firstyearatsite,yearbegins,yearends,yearbinned)
head(response_time_data)
# Create new column to show response time (yearbegins - firstyeardetected)
# first, make all columns numerical
response_time_data$firstyeardetected <- as.character(response_time_data$firstyeardetected)
response_time_data$firstyeardetected <- as.numeric(response_time_data$firstyeardetected)
response_time_data$yearbegins <- as.character(response_time_data$yearbegins)
response_time_data$yearbegins <- as.numeric(response_time_data$yearbegins)
# now add new column with response time variable
response_time_data <- mutate(response_time_data, responsetime = yearbegins - firstyeardetected)
head(response_time_data)
# bin response time data
response_time_data$responsetimebinned <- cut(response_time_data$responsetime, breaks = c(0,1,5,10,500), labels = c("Rapid (0-1 years)","1.1-5 years","5.1-10 years","Slow (10.1+)"))
response_time_data
# NAs, so remove those
response_time_data_cc <- select(response_time_data, ecosystem,publicationyear,responsetimebinned)
head(response_time_data_cc)
response_time_data_cc <- response_time_data_cc[complete.cases(response_time_data_cc$responsetimebinned), ]
head(response_time_data_cc)
dim(response_time_data_cc)
## MAKE FIGURES ####
ggplot(impact_and_taxa) +
geom_bar(aes(x= impacttype, stat="bin", fill = invasivespeciestaxa)) +
facet_wrap(~invasivespeciestaxa) +
scale_fill_manual(values = colorRampPalette(solarized_pal()(8))(16)) +
theme(axis.text.x = element_text(angle = 60, hjust = 1))
gg <- ggplot(response_time_data_cc) +
geom_bar(aes(x = responsetimebinned, stat = "bin", fill = ecosystem)) +
facet_wrap(~ecosystem)
gg
gg <- gg + scale_fill_manual(values = colorRampPalette(ptol_pal()(8))(16))
gg
gg <- gg + ylab("Frequency")
gg <- gg + xlab("Response Time (year species detected - year study begins)")
gg <- gg + theme(axis.text.x = element_text(angle = 60, hjust = 1))
gg <- gg + theme(legend.position="none")
gg <- gg + guides(fill = FALSE)
gg
gg <- gg + theme(axis.text=element_text(size=12), # Change tick mark label size
axis.title=element_text(size=14,face="bold"),
axis.text.x = element_text(angle = 45, hjust = 1),
strip.text = element_text(size=12)) # Change fascet title size
gg
pdf(file="~/Desktop/Impacts Systematic Review/figures/responsetime_barplot_fascet_by_ecosystem.pdf")
gg
dev.off()
dev.off()
|
36dd29e3e33f632602514911f519d1707e2186b0 | 070a7acc8806d202f518ad262d6ed0bbc7c4a09e | /70_crawling/RSelenium_google_news.R | b2ea028007df00ea409f30729bfcca6388b12b64 | [] | no_license | alphamega89/R_edu | 1136224d99b1d07444c832ca1df7add781a2adb3 | 8fc59126d215cf513a27e0bd379b0a4ec4b36028 | refs/heads/master | 2020-06-03T17:25:08.761252 | 2019-07-04T06:32:10 | 2019-07-04T06:32:10 | 191,666,670 | 0 | 0 | null | 2019-06-13T01:10:55 | 2019-06-13T01:10:55 | null | UTF-8 | R | false | false | 1,565 | r | RSelenium_google_news.R | options(stringsAsFactors = FALSE)
library("rvest")
library("RSelenium")
url = "google_news_url"
port_no = sample(2000:8000, size = 1)
dd = rsDriver(port = port_no, browser = "chrome", chromever = "74.0.3729.6")
dd_client = dd$client
dd_client$navigate(url)
tx_base = dd_client$getPageSource()
tx_base = read_html(tx_base[[1]])
tx_base %>%
html_nodes(xpath = "//a[@aria-label and @class='fl']") %>%
html_attr("href") %>%
paste0("https://www.google.com", .) -> page_links
tx_base %>%
html_nodes(xpath = "//a[@class='l lLrAF' and @href]") %>%
html_text() %>%
gsub(pattern = ",", replacement = " ") -> title
tx_base %>%
html_nodes(xpath = "//div[@class='slp']/span[1]") %>%
html_text() %>%
gsub(pattern = ",", replacement = " ") -> ref
tx_base %>%
html_nodes(xpath = "//div[@class='slp']/span[3]") %>%
html_text() %>%
gsub(pattern = "\\. ", replacement = "-") %>%
gsub(pattern = "\\.", replacement = "") %>%
as.Date() -> time
# tx_base %>%
# html_nodes(xpath = "//a[@class='l lLrAF' and @href]/text()")
#
# tx_base %>%
# html_nodes(css = ".r.dO0Ag") %>%
# html_text() -> title
#
# tx_base %>%
# html_nodes(css = ".slp .xQ82C") %>%
# html_text() -> ref
#
# tx_base %>%
# html_nodes(css = ".slp .f") %>%
# html_text() %>%
# gsub(pattern = "\\. ", replacement = "-") %>%
# gsub(pattern = "\\.", replacement = "") %>%
# as.Date() -> time
news_sub = data.frame(no = 1:length(title),
date = time,
reference = ref,
title = title)
head(news_sub)
|
a352ce068f11c8ff22076f380c688f53b8bb1bb1 | d37601daca4cf2f6856136c8e96105083313c2f9 | /R_12_VectOps_Prac.R | 9f85935ff959da61a123f00c044daf93ea162e0c | [] | no_license | ShivKumarBS/R_Udemy | ad0c73d2d8fb85fa7300d5a40521e942a5b48749 | b04bd902523093ae663701e9fa078777652cfbb9 | refs/heads/master | 2021-07-12T22:44:32.759458 | 2017-10-16T12:51:23 | 2017-10-16T12:51:23 | 107,125,729 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 145 | r | R_12_VectOps_Prac.R | a <- c(4,5,5,12,1,53,56,6,74,0)
b<-c(5,2,6,3,6,7,3,6,7,8)
length(a)
length(b)
c<-a+b
d<-a-b
e<-a>b
f<-a<b
g<-a/b
a
b
c
d
e
f
g |
d9762aa63c13dfb98fd427b34d16b4e624a115c3 | ea49e39c5ab2913fc372e3d0bc3292e4ae235d69 | /man/flexdashboard-package.Rd | 7ad6f982b2d425bc7b3bfd0756d31f7a2fcfef8a | [] | no_license | zerononee/flexdashboard | 5ba1af4fce665ede679e0b463406dbde7059d328 | bb9bd147faa94022cfee1aa07cf7abc7f56922fa | refs/heads/master | 2023-08-13T04:44:07.692026 | 2021-10-14T14:27:59 | 2021-10-14T14:27:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,476 | rd | flexdashboard-package.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\docType{package}
\name{flexdashboard-package}
\alias{flexdashboard}
\alias{flexdashboard-package}
\title{flexdashboard: Interactive dashboards for R}
\description{
Create interactive dashboards using \pkg{rmarkdown}.
}
\details{
\itemize{
\item{Use R Markdown to publish a group of related data visualizations as a dashboard.}
\item{Ideal for publishing interactive JavaScript visualizations based on htmlwidgets (also works with standard base, lattice, and grid graphics).}
\item{Flexible and easy to specify layouts. Charts are intelligently re-sized to fill the browser and adapted for display on mobile devices.}
\item{Optionally use Shiny to drive visualizations dynamically.}
}
See the flexdashboard website for additional documentation:
\href{http://rmarkdown.rstudio.com/flexdashboard/}{http://rmarkdown.rstudio.com/flexdashboard/}
}
\seealso{
Useful links:
\itemize{
\item \url{https://pkgs.rstudio.com/flexdashboard/}
\item \url{https://github.com/rstudio/flexdashboard/}
\item Report bugs at \url{https://github.com/rstudio/flexdashboard/issues}
}
}
\author{
\strong{Maintainer}: Carson Sievert \email{carson@rstudio.com} (\href{https://orcid.org/0000-0002-4958-2844}{ORCID})
Authors:
\itemize{
\item Richard Iannone \email{rich@rstudio.com} (\href{https://orcid.org/0000-0003-3925-190X}{ORCID})
\item JJ Allaire \email{jj@rstudio.com}
\item Barbara Borges \email{barb.b.ribeiro@gmail.com}
}
Other contributors:
\itemize{
\item RStudio [copyright holder]
\item Keen IO (Dashboard CSS) [contributor, copyright holder]
\item Abdullah Almsaeed (Dashboard CSS) [contributor, copyright holder]
\item Jonas Mosbech (StickyTableHeaders) [contributor, copyright holder]
\item Noel Bossart (Featherlight) [contributor, copyright holder]
\item Lea Verou (Prism) [contributor, copyright holder]
\item Dmitry Baranovskiy (Raphael.js) [contributor, copyright holder]
\item Sencha Labs (Raphael.js) [contributor, copyright holder]
\item Bojan Djuricic (JustGage) [contributor, copyright holder]
\item Tomas Sardyha (Sly) [contributor, copyright holder]
\item Bryan Lewis (Examples) [contributor, copyright holder]
\item Joshua Kunst (Examples) [contributor, copyright holder]
\item Ryan Hafen (Examples) [contributor, copyright holder]
\item Bob Rudis (Examples) [contributor, copyright holder]
\item Joe Cheng (Examples) [contributor]
}
}
|
df825aaaa288d9a8c88e15cc79a517d9023c40c3 | 9fda055e87c7bdce1b194d17500a828a5946abe9 | /pipelines/06_add_dr/add_dr_COMBAT.R | d23a66a85832aa37a87dd841fb33f91463dc373b | [
"MIT"
] | permissive | haniffalab/Single-cell-RNAseq-data-analysis-bundle | 8d9ccaf43415c7b160fbe4aee04d57a3d7af91e1 | 5de2e3e2d174c4252f374629fe4780072eaf2f84 | refs/heads/master | 2023-05-24T00:26:54.327013 | 2022-05-04T13:51:08 | 2022-05-04T13:51:08 | 181,858,413 | 12 | 12 | null | null | null | null | UTF-8 | R | false | false | 6,262 | r | add_dr_COMBAT.R | args = commandArgs(trailingOnly=T)
args = paste(args, collapse = "")
args = unlist(strsplit(args, ";"))
arguments.list = "
seurat.addr.arg = args[1]
do.normalize.arg = args[2]
add.PCA.arg = args[3]
add.TSNE.arg = args[4]
add.UMAP.arg = args[5]
add.FDG.arg = args[6]
save.dr.arg = args[7]
"
expected_arguments = unlist(strsplit(arguments.list, "\n"))
expected_arguments = expected_arguments[!(expected_arguments == "")]
if(length(args) != length(expected_arguments)){
error.msg = sprintf('This pipeline requires %s parameters', as.character(length(expected_arguments)))
expected_arguments = paste(unlist(lapply(strsplit(expected_arguments, ".arg"), "[", 1)), collapse = "\n")
stop(sprintf('This pipeline requires %s parameters: '))
}
eval(parse(text = arguments.list))
for(n in 1:length(expected_arguments)){
argument = expected_arguments[n]
argument = gsub(pattern=" ", replacement="", x=argument)
argument.name = unlist(strsplit(argument, "="))[1]
variable.name = gsub(pattern=".arg", replacement="", argument.name)
argument.content = eval(parse(text = argument.name))
eval(parse(text = argument.content))
if (!exists(variable.name)){
stop(sprintf("Argument %s not passed. Stopping ... ", variable.name))
}
}
# create required folders for output and work material
output_folder = gsub(pattern="^\\d+_", replacement="", x=basename(getwd()))
output_folder = paste(output_folder, seurat.addr, sep = "_")
c.time = Sys.time()
c.time = gsub(pattern=" BST", replacement="", x=c.time)
c.time = gsub(pattern=":", replacement="", x=c.time)
c.time = gsub(pattern=" ", replacement="", x=c.time)
c.time = gsub(pattern="-", replacement="", x=c.time)
c.time = substr(x=c.time, start=3, stop=nchar(c.time))
output_folder = paste(output_folder, c.time, sep = "_")
output_folder = file.path("../../output", output_folder)
dir.create(output_folder)
seurat.addr = file.path("../../data", seurat.addr)
source("../../tools/bunddle_utils.R")
library(Seurat)
library("sva")
library(plyr)
library(dplyr)
library(reshape)
#######################################################################################################
# load data
print("loading data ... ")
seurat.obj = readRDS(seurat.addr)
print("Data loaded.")
if(do.normalize){
print("Normalizing data ... ")
seurat.obj = NormalizeData(object = seurat.obj, normalization.method = "LogNormalize", scale.factor = 10000)
print("Applying COMBAT ...")
expression.data = as.matrix(seurat.obj@data[seurat.obj@var.genes, ])
pheno.data = data.frame(sample = names(seurat.obj@ident),
batch = seurat.obj@meta.data$fetal.ids,
stages = seurat.obj@meta.data$stages)
batch = as.numeric(pheno.data$batch)
pheno.data$batch = batch
mod = model.matrix(~as.factor(stages), data=pheno.data)
colnames(expression.data) = gsub(pattern="CD45[+]", replacement="CD45Pos", x=colnames(expression.data))
colnames(expression.data) = gsub(pattern="CD45[-]", replacement="CD45Neg", x=colnames(expression.data))
write.csv(expression.data, file.path(output_folder, "data.csv"), row.names = T)
batch = data.frame(Batch = batch)
rownames(batch) = colnames(expression.data)
write.csv(batch, file.path(output_folder, "batch.csv"))
command = sprintf("%s combat.py %s", python.addr, output_folder)
system(command, wait = T)
rm(mod, expression.data)
combat_data = read.csv(file.path(output_folder, "combat.csv"), sep = ",", row.names = 1)
print("COMBAT data loaded.")
colnames(combat_data) = gsub(pattern="CD45Pos", replacement="CD45+", x=colnames(combat_data))
colnames(combat_data) = gsub(pattern="CD45Neg", replacement="CD45-", x = colnames(combat_data))
combat_data = as(as.matrix(combat_data), "dgCMatrix")
genes.not = rownames(seurat.obj@data)[!(rownames(seurat.obj@data) %in% rownames(combat_data))]
all.expression = seurat.obj@data[genes.not, ]
all.expression = rbind(all.expression, combat_data)
all.expression = all.expression[rownames(seurat.obj@data), ]
seurat.obj@data = combat_data
print("Scaling data ... ")
seurat.obj = ScaleData(object=seurat.obj)
}
if(add.PCA){
print("Performing PCA")
seurat.obj = RunPCA(object = seurat.obj, pc.genes = seurat.obj@var.genes, do.print = FALSE)
}
if (add.TSNE){
print("Performing tSNE")
seurat.obj = RunTSNE(object=seurat.obj, dims.use=1:20, seed.use=42, do.fast=TRUE)
}
if (add.UMAP){
# run umap
print("running UMAP")
umap.coordinates = RunUMAP(pca.df=seurat.obj@dr$pca@cell.embeddings, tool_addr=tool_addr, python.addr=python.addr)
rownames(umap.coordinates) = names(seurat.obj@ident)
seurat.obj = SetDimReduction(object=seurat.obj, reduction.type="umap", slot="cell.embeddings", new.data=as.matrix(umap.coordinates))
seurat.obj = SetDimReduction(object=seurat.obj, reduction.type="umap", slot="key", new.data="umap")
}
if (add.FDG){
# run force-directed graph
print("Running force directed graph")
seurat.obj = BuildSNN(object=seurat.obj, reduction.type="pca", dims.use=1:20, plot.SNN=F, force.recalc=T)
fdg_coordinates = runFDG(pca.df=seurat.obj@dr$pca@cell.embeddings, snn=seurat.obj@snn, iterations=2000, tool_addr=tool_addr, python.addr=python.addr)
seurat.obj = SetDimReduction(object=seurat.obj, reduction.type="fdg", slot="cell.embeddings", new.data=as.matrix(fdg_coordinates))
seurat.obj = SetDimReduction(object=seurat.obj, reduction.type="fdg", slot = "key", new.data = "fdg")
}
print("Saving Seurat object")
saveRDS(seurat.obj, seurat.addr)
if(save.dr){
CellNames = as.vector(names(seurat.obj@ident))
tSNEdata = seurat.obj@dr$tsne@cell.embeddings
UMAPdata = seurat.obj@dr$umap@cell.embeddings
FDGdata = seurat.obj@dr$fdg@cell.embeddings
PCAdata = seurat.obj@dr$pca@cell.embeddings
colnames(tSNEdata) = c("tSNEx", "tSNEy")
colnames(UMAPdata) = c("UMAPx", "UMAPy")
colnames(FDGdata) = c("FDGx", "FDGy")
dr_md_df = data.frame(CellNames = CellNames)
dr_md_df = cbind(dr_md_df, tSNEdata, UMAPdata, FDGdata, PCAdata, seurat.obj@meta.data)
save.to = file.path(output_folder, "dr_and_metadata.csv")
write.csv(dr_md_df, save.to)
}else{
unlink(output_folder, recursive=T, force=T)
}
file.remove("Rplots.pdf")
print("Ended beautifully ... ")
|
c955dd09d7235107369f56961bc0ffe93a58b44c | e6fe0284ac73cb98b1ffd4cd4831bd65d43bd546 | /FraminghamHeartStudy(Logistic Regression).R | 048a57e4d690af4c31a3a4a0462c468d883ccc51 | [] | no_license | zahidmak/dataAnalysis | 08df3707647e6aa08721d0827f288e980028f90d | 88814f11ba41f07275be26d1330c0b86219a5bc3 | refs/heads/master | 2021-04-30T22:37:07.599997 | 2016-10-17T02:25:25 | 2016-10-17T02:25:25 | 71,092,579 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 570 | r | FraminghamHeartStudy(Logistic Regression).R | framingham=read.csv("C:/Users/Zahid/Downloads/framingham.csv")
str(framingham)
library("caTools")
set.seed(1000)
split=sample.split(framingham$TenYearCHD, SplitRatio=0.65)
train=subset(framingham, split==TRUE)
test=subset(framingham, split==FALSE)
framinghamLog=glm(TenYearCHD ~ .,data=train, family=binomial)
summary(framinghamLog)
predictTest=predict(framinghamLog, type="response", newdata=test)
table(test$TenYearCHD, predictTest>0.5)#Confusion Metrix
library("ROCR")
ROCRpred=prediction(predictTest, test$TenYearCHD)
as.numeric(performance(ROCRpred,"auc")@y.values) |
37c181841475df08ee1934ed73fd35ca1c8a3606 | ffce1eae6403512dd96e15d50adb19ad79e800e7 | /man/rct.Rd | 90f6194875f33aae8d90d289b0e297d55191a491 | [] | no_license | willdebras/rctheatre | 0ec1141c262a3ba38a4e7d9ea3adca1b5ecad140 | 7bdb8523b99ff77c545804058577ca42191c658a | refs/heads/master | 2020-06-17T09:39:28.089170 | 2019-07-09T21:05:31 | 2019-07-09T21:05:31 | 195,884,107 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,991 | rd | rct.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rct.R
\name{rct}
\alias{rct}
\title{rct function}
\usage{
rct(cases, type = "square", border = "#1239DC", xlim = c(0, 1),
ylim = c(0, 1), fill = NULL, xlab = NULL, ylab = "",
lab.cex = 1, seed = NULL, plot.new = TRUE, label = FALSE,
lab.col = "#1239DC", draw.plot = TRUE, ...)
}
\arguments{
\item{cases}{single number or vector giving the number of seats to shade. If a vector is supplied, the values indicate how many seats of each colour to shade. The sum of this vector gives the total number of seats shaded}
\item{type}{the floor plan to be used. Current options are "square", "theatre" (the original Rifkin and Bouwer floor plan), "stadium" and "bigsquare"}
\item{border}{the color for the outlines of the floor plan}
\item{xlim}{range of x axis. Note that the theate sits in the unit square with bottom corner (0, 0) and top corner (1, 1)}
\item{ylim}{range of y axis}
\item{fill}{vector of colours for shading seats, defaults to grey}
\item{xlab}{text label to appear below floor plan. Defaults to "x cases in n"}
\item{lab.cex}{character expansion factor (see 'par') to specify size of text labels (if any) on the floor plan}
\item{seed}{specify the starting seed value for the random number generator. Setting this makes it possible to reproduce exactly the same shaded seats on successive calls of rct}
\item{plot.new}{if FALSE, the theatre is drawn over the top of an existing plot}
\item{label}{if TRUE, any text labels for the specified floor plan will be displayed}
\item{lab.col}{colour used for any text labels}
\item{draw.plot}{if this is FALSE, the RCT is not drawn and instead a data frame is returned showing the seats that would have been shaded and the colours that would have been used}
\item{...}{any additional parameters are passed through to the plot call that sets up the chart}
}
\value{
returns a risk characterization theatre plot
}
\description{
rct function
}
|
3978b8a1f99feb0fa89d171e5a9bca1d80166a02 | 6bb3dc3d43ee66307c1ed6f5940141be0e481936 | /R/man/predict_model.easy_glinternet.Rd | 8dfd77050737051b322d46570f42f8cd03d04610 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | CCS-Lab/easyml | 7f41db72122f5de23f75381bcc03e13d65536c1b | c334d1174ee5795734f900d2cfa2836b76834bc9 | refs/heads/master | 2023-06-12T07:09:22.847009 | 2023-06-02T05:27:51 | 2023-06-02T05:27:51 | 71,721,801 | 40 | 19 | NOASSERTION | 2022-06-21T21:11:33 | 2016-10-23T18:38:28 | R | UTF-8 | R | false | true | 659 | rd | predict_model.easy_glinternet.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/glinternet.R
\name{predict_model.easy_glinternet}
\alias{predict_model.easy_glinternet}
\title{Predict values for a penalized regression model with interactions.}
\usage{
\method{predict_model}{easy_glinternet}(object, newx = NULL)
}
\arguments{
\item{object}{A list of class \code{easy_glinternet}.}
\item{newx}{A data.frame, the new data to use for predictions.}
}
\value{
A vector, the predicted values using the new data.
}
\description{
This function wraps the procedure for predicting values from
a glinternet model and makes it accessible
to the easyml core framework.
}
|
63d87f382abb3a1662aa51a550a8e841f5d15f43 | 6682b2abe94c8ddc3bc6d8d0d82f86182026b554 | /data_cleanup.r | d940b7d26bf8dd919e7337e5471dc6b06f98c8a5 | [
"CC-BY-4.0"
] | permissive | ikurecic/pds2020_lockdowns | 75663097f6858f7b2776885d4649a4ddf711df99 | e836c7a56c9fd3fff14dbc1e1aa5de95b1f421c6 | refs/heads/main | 2023-04-16T07:58:15.260122 | 2022-08-05T09:29:16 | 2022-08-05T09:29:16 | 521,168,591 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,652 | r | data_cleanup.r | library(tidyverse)
# deaths are cumulative
deaths_by_date <- "https://github.com/CSSEGISandData/COVID-19/blob/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv?raw=true" %>%
read_csv() %>%
rename(State=Province_State) %>%
gather('1/22/20':last_col(), key="date", value="deaths") %>%
group_by(State, date) %>%
summarise(deaths = sum(deaths)) %>%
mutate(date = as.Date(date, "%m/%d/%y"))
unemployment <- "r539cy.xls.csv" %>%
read_csv(col_types = cols_only(State=col_guess(),
"Filed week ended"=col_date("%m/%d/%Y"),
"Initial Claims"=col_guess())) %>%
rename(date=`Filed week ended`, unemployment_claims=`Initial Claims`)
deaths_and_unemployment <- left_join(deaths_by_date, unemployment) %>%
fill(unemployment_claims) %>%
arrange(date, .by_group=TRUE)
policy <- "test3.csv" %>%
read_csv(col_types = cols("Stay at Home Announced"=col_date("%d-%b-%y"),
"Policy Begin"=col_date("%d-%b-%y"),
"Policy End"=col_date("%d-%b-%y")))
all_data <- right_join(deaths_and_unemployment, policy) %>%
mutate(under_lockdown = date >= `Policy Begin` & date < `Policy End`) %>%
mutate(deaths_in_day=deaths-lag(deaths)) %>%
mutate(deaths_relative_difference=deaths_in_day/lag(deaths_in_day)) %>%
write_csv("all_data.csv")
all_data %>%
select(State,date,deaths,deaths_in_day,deaths_relative_difference,unemployment_claims,Party,Region,Population_Density,Percent_of_GDP,GDP_per_Capita,under_lockdown) %>%
write_csv("all_useful_data.csv")
|
268c687c95257f84641eaff64d8156454bfbf845 | 7ec2982f972a6e2874106da57d0318a06b806390 | /Coffee_RR.R | 192dffe2ccd65f5ebf2482db72b06fec08cbfb1d | [
"MIT"
] | permissive | jamesjin63/UkBio-Bank-BC | 65d7d21151b5cacddbfcaaa32658467dab592857 | fe097bc396b0600b5d550c6c1ccc11777ec14b9c | refs/heads/main | 2023-08-30T09:45:41.876473 | 2021-11-06T08:44:31 | 2021-11-06T08:44:31 | 425,195,928 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,959 | r | Coffee_RR.R | library(tidyverse)
library(doParallel)
rm(list=ls())
load("~/Desktop/PhDwork/Bricks/Lina/UKB-drink/bd_drink.Rdata")
load("~/Desktop/PhDwork/Bricks/Lina/UKB-drink/df_coffee_tea.Rdata")
load("~/Desktop/PhDwork/Bricks/Lina/UKB-drink/bd_enroll.Rdata")
load("~/Desktop/PhDwork/Bricks/Lina/UKB-drink/bd_followup.Rdata")
# x=bd_register %>%
# select(f.eid,f.34.0.0,f.21022.0.0,
# f.40006.0.0,f.40006.1.0,f.40006.2.0,f.40006.3.0,
# f.40006.4.0,f.40006.5.0,f.40006.7.0,f.40006.8.0,
# f.40006.9.0,f.40006.10.0,f.40006.11.0,f.40006.12.0,
# f.40006.13.0,f.40006.14.0,f.40006.15.0,f.40006.16.0) %>%
# drop_na(7)
#
# x=bd_register %>%
# select(f.eid,f.34.0.0,f.21022.0.0,
# f.40008.0.0,f.40008.1.0,f.40008.2.0,f.40008.3.0,
# f.40008.4.0,f.40008.5.0,f.40008.6.0,f.40008.7.0,
# f.40008.8.0,f.40008.9.0,f.40008.10.0,f.40008.11.0,
# f.40008.12.0,f.40008.13.0,f.40008.14.0,f.40008.15.0,f.40008.16.0,
# ) %>%
# drop_na(5)
####################################
### 筛选病人
####################################
death=bd_death %>% select(f.eid,f.40000.0.0) %>%
set_names("f.eid","death_time")
Coffee=df %>% select(f.eid,Coffee)
x=bd_register %>% ## 这里的诊断时间,按照第一次算
select(f.eid,f.34.0.0,f.21022.0.0,f.40005.0.0,f.40006.0.0) %>%
as_tibble() %>% left_join(bd_enroll) %>% left_join(bd_followup) %>%
left_join(.,death) %>% left_join(.,Coffee) %>%
select(f.eid,f.34.0.0,f.40005.0.0,f.40006.0.0,f.53.0.0,f.191.0.0,death_time,Coffee) %>%
set_names("f.eid","born_year","dignosde_time","ICD10","enter_time",
"followup_time","death_time","Coffee")
### 1.排除入组前有癌症的病人: 诊断时间< 入组时间
xa= x %>% filter(dignosde_time<enter_time)
### 2.排除 失访的病人(失访时间<诊断时间)
xb= x %>% filter(followup_time<dignosde_time)
### 3.排除 死亡的病人 (死亡时间<诊断时间)
xc= x %>% filter(death_time<dignosde_time)
### 4.排除 没有喝咖啡记录的人
xd1= x %>% drop_na(Coffee)
xd= x %>% filter(!f.eid %in% xd1$f.eid)
### 5.随访时间不足一年 (入组不到一年,就发生癌症,死亡,或失访,其中任意一个)
xe= x %>% filter(followup_time<(enter_time+365) |
(dignosde_time<(enter_time+365) & dignosde_time>(enter_time) )|
death_time<(enter_time+365) )
####################################
### 符合病人
####################################
##### 随访出现癌症的
df_cancer= x %>% filter(!f.eid %in% c(xa$f.eid,xb$f.eid,xc$f.eid,xd$f.eid,xe$f.eid)) %>%
drop_na(dignosde_time) %>%
mutate(cancer=substr(ICD10,1,3)) %>%
mutate(BC=ifelse(cancer=="C50","Yes","No"),
Coffee=ifelse(Coffee=="No","No","Yes"))
##### 随访未出现癌症的
df_cancer_non= x %>% filter(!f.eid %in% c(xa$f.eid,xb$f.eid,xc$f.eid,xd$f.eid,xe$f.eid)) %>%
filter(!f.eid %in% c(df_cancer$f.eid)) %>%
mutate(cancer=substr(ICD10,1,3)) %>%
mutate(BC=ifelse(cancer=="C50","Yes","No"),
Coffee=ifelse(Coffee=="No","No","Yes"))
### 乳腺癌比较
dfall=rbind(df_cancer_non,df_cancer) #%>% filter(enter_time>as.Date("2010-01-01"))
write.csv(dfall,"BC_cancer_coffee_tea.csv")
##### 随访出现癌症的
df_Coffee= dfall %>%
filter(Coffee=="Yes")
table(df_Coffee$BC)
##### 随访未出现癌症的
df_Coffee_non= dfall %>%
filter(Coffee=="No")
table(df_Coffee_non$BC)
library(epitools)
RRtable<-matrix(c(345,22970,152,11169),nrow = 2, ncol = 2)
RRtable
riskratio.wald(RRtable)
#write.csv(dfall,"Coffe_BC.csv")
################################1################################1
######### covariate:
################################1################################1
# For multivariable analysis:
# 1. Demographic variables: age, race/ethnicity, education
# 2. Lifestyle variables: smoking history pack-years of smoking, alcohol intake, total energy intake,
# physical activity, BMI, sleep duration
# 3. Reproductive variables: family history of BC, oral contraceptive use ever history of menopausal
# HT use, age at menarche, age at menopause, age at first full-term birth, number of live birth
################################1
dfbase1=bd_register %>%
select(f.eid,f.21022.0.0,f.21001.0.0,f.20116.0.0,
f.20414.0.0,f.1160.0.0,f.20110.0.0
) %>%
set_names("f.eid","Age","BMI","smoking_status",
"alcohol_intake","sleep_duration","BC_history")
################################2
dfbase2=bd_death %>%
select(f.eid,f.845.0.0,f.2724.0.0,f.2814.0.0
) %>%
set_names("f.eid","Age_education","Had_menopause","HRT")
################################3
dfbase3=bd_followup%>%
select(f.eid,f.2714.0.0,f.2734.0.0,f.2754.0.0,f.21000.0.0) %>%
set_names("f.eid","age_menarch",
"Number_of_live_births",
"Age_first_live_birth",
"Ethnics")
################################ all
dfbase=left_join(dfbase1,dfbase2) %>% left_join(.,dfbase3)
dfbase=left_join(dfbase1,dfbase2) %>% left_join(.,dfbase3) %>%
as_tibble() %>%
filter(!Age_first_live_birth %in% c(-4,-3)) %>%
filter(!Number_of_live_births %in% c(-4,-3)) %>%
filter(!age_menarch %in% c(-4,-3,-1)) %>%
filter(!Age_education%in% c(-4,-3)) %>%
filter(!sleep_duration%in% c(-4,-3)) %>%
mutate(sleep_duration=cut(sleep_duration,breaks=c(-Inf,5,7,9,Inf),
labels=c("<=5","6-7","8-9",">=10"))) %>%
mutate(sleep_duration=as.factor(sleep_duration)) %>%
mutate(BC_history=as.factor(ifelse(BC_history=="Breast cancer","Yes","No"))) %>%
mutate(age_menarch=as.factor(cut(age_menarch,breaks=c(-Inf,11,14,Inf),
labels=c("<=11","12-13",">=14")))) %>%
mutate(smoking_status=as.factor(smoking_status),
alcohol_intake=as.factor(alcohol_intake),
Had_menopause=as.factor(Had_menopause),
HRT=as.factor(HRT),
Ethnics=as.factor(Ethnics))
|
7413d93138861e8773f1690a278cd410f9d69a6c | db78542ec83aa66cb8a543a94463bb99c58151e7 | /01 BASICS/307 Matrix Selections.r | e6eb6adab84b22e589167a55411360edd6e83ea4 | [] | no_license | chunhuayu/R-Learning | 59ee2567fb910c5124492da84603069ee5b9e2f1 | 36ede3bb562dca07029a8411e230b970e69f22e5 | refs/heads/master | 2020-05-09T14:59:05.094442 | 2019-06-29T03:06:52 | 2019-06-29T03:06:52 | 181,216,574 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,953 | r | 307 Matrix Selections.r | ### Selection of matrix elements
# Similar to vectors, you can use the square brackets [ ] to select one or multiple elements from a matrix.
# Whereas vectors have one dimension, matrices have two dimensions.
# You should therefore use a comma to separate the rows you want to select from the columns. For example:
# my_matrix[1,2] selects the element at the first row and second column.
# my_matrix[1:3,2:4] results in a matrix with the data on the rows 1, 2, 3 and columns 2, 3, 4.
# If you want to select all elements of a row or a column, no number is needed before or after the comma, respectively:
# my_matrix[,1] selects all elements of the first column.
# my_matrix[1,] selects all elements of the first row.
# Back to Star Wars with this newly acquired knowledge! As in the previous exercise, all_wars_matrix is already available in your workspace.
### INSTRUCTIONS
# Select the non-US revenue for all movies (the entire second column of all_wars_matrix), store the result as non_us_all.
# Use mean() on non_us_all to calculate the average non-US revenue for all movies. Simply print out the result.
# This time, select the non-US revenue for the first two movies in all_wars_matrix. Store the result as non_us_some.
# Use mean() again to print out the average of the values in non_us_some.
> # all_wars_matrix is available in your workspace
> all_wars_matrix
US non-US
A New Hope 461.0 314.4
The Empire Strikes Back 290.5 247.9
Return of the Jedi 309.3 165.8
The Phantom Menace 474.5 552.5
Attack of the Clones 310.7 338.7
Revenge of the Sith 380.3 468.5
>
> # Select the non-US revenue for all movies
> non_us_all <- all_wars_matrix[,2]
>
> # Average non-US revenue
> mean(non_us_all)
[1] 347.9667
>
> # Select the non-US revenue for first two movies
> non_us_some <- non_us_all[1:2]
>
> # Average non-US revenue for first two movies
> mean(non_us_some)
[1] 281.15
>
|
5501895778916e56774141ff2db44600776ad967 | e666d8e06b29ce5bb4e4b878517c748bfa65099d | /man/graph.mult.scaling.Rd | e923064b91e98e9bfad85ca98fd74daf3748fef1 | [] | no_license | cran/statGraph | fd264831034dc287dbc9dca79c6c787e4d730026 | 3d2f65b73696edf483d0bf3dae8223915df2b8bd | refs/heads/master | 2021-07-09T12:10:21.174424 | 2021-05-19T06:00:06 | 2021-05-19T06:00:06 | 89,013,842 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,947 | rd | graph.mult.scaling.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/statGraph.R
\name{graph.mult.scaling}
\alias{graph.mult.scaling}
\title{Multidimensional scaling of graphs}
\usage{
graph.mult.scaling(
G,
plot = TRUE,
bandwidth = "Silverman",
type = "n",
main = "",
...
)
}
\arguments{
\item{G}{a list of undirected graphs (igraph type) or their adjacency
matrices. The adjacency matrix of an unweighted graph contains only 0s and
1s, while the weighted graph may have nonnegative real values that correspond
to the weights of the edges.}
\item{plot}{logical. If TRUE (default) the points chosen to represent the
Jensen-Shannon divergence between graphs are plotted.}
\item{bandwidth}{string showing which criterion is used to choose the
bandwidth during the spectral density estimation. Choose between the
following criteria: "Silverman" (default), "Sturges", "bcv", "ucv" and "SJ".
"bcv" is an abbreviation of biased cross-validation, while "ucv" means
unbiased cross-validation. "SJ" implements the methods of Sheather & Jones
(1991) to select the bandwidth using pilot estimation of derivatives.}
\item{type}{what type of plot should be drawn. The defaut value is '"n"',
which indicates that the points will not be plotted (i. e. only the labels
of the graphs will be plotted).}
\item{main}{title of the plot (default value is "").}
\item{...}{additional plotting parameters. See 'plot' function from the
'graphics' package for the complete list.}
}
\value{
A matrix in which each column corresponds to a coordinate and each
row corresponds to a graph (point). Then, each row gives the coordinates of
the points chosen to represent the Jensen-Shannon divergence between graphs.
}
\description{
\code{graph.mult.scaling} performs multidimensional scaling of graphs. It
takes the Jensen-Shannon divergence between graphs (JS) and uses the
'cmdscale' function from the 'stats' package to obtain a set of points such
that the distances between the points are similar to JS.
}
\examples{
set.seed(1)
G <- list()
for (i in 1:5) {
G[[i]] <- igraph::sample_gnp(50, 0.5)
}
for (i in 6:10) {
G[[i]] <- igraph::sample_smallworld(1, 50, 8, 0.2)
}
for (i in 11:15) {
G[[i]] <- igraph::sample_pa(50, power = 1, directed = FALSE)
}
graph.mult.scaling(G)
}
\references{
Takahashi, D. Y., Sato, J. R., Ferreira, C. E. and Fujita A. (2012)
Discriminating Different Classes of Biological Networks by Analyzing the
Graph Spectra Distribution. _PLoS ONE_, *7*, e49949.
doi:10.1371/journal.pone.0049949.
Silverman, B. W. (1986) _Density Estimation_. London: Chapman and Hall.
Sturges, H. A. The Choice of a Class Interval. _J. Am. Statist. Assoc._,
*21*, 65-66.
Sheather, S. J. and Jones, M. C. (1991). A reliable data-based bandwidth
selection method for kernel density estimation.
_Journal of the Royal Statistical Society series B_, 53, 683-690.
http://www.jstor.org/stable/2345597.
}
\keyword{multidimensional_scaling}
|
6e93faa069842ea8d21f69e57de044cbbfc64dab | d20145e080798319e0028a4c9d8851f619285c29 | /ch05/R/soccer.R | 5866fb18d4fbac396abbcb1e6d615d16d22f9bf6 | [] | no_license | friendly/VCDR | da441b8306969fd6469a3bbc6a4475e028c61e29 | 818156ee0a26943a656ae915d924773c87654cec | refs/heads/master | 2023-02-18T08:40:28.634897 | 2014-12-05T13:24:55 | 2014-12-05T13:24:55 | 16,708,002 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 591 | r | soccer.R | data("UKSoccer", package="vcd")
names(dimnames(UKSoccer)) <- paste(names(dimnames(UKSoccer)), "Goals")
chisq.test(UKSoccer)
CMHtest(UKSoccer)
mosaic(UKSoccer, shade=TRUE)
set.seed(1234)
mosaic(UKSoccer, gp=shading_max, labeling=labeling_residuals, digits=2)
BL1995 <- xtabs(~ HomeGoals + AwayGoals, data=Bundesliga,
subset = Year == 1995)
mosaic(BL1995, shade=TRUE, gp=shading_max)
# Bundesliga, all years with 306 games
BL <- xtabs(~ HomeGoals + AwayGoals, data=Bundesliga,
subset = Year > 1964)
# keep just 0:6 goals
BL <- BL[1:7, 1:7]
BL
mosaic(BL, shade=TRUE, gp=shading_max)
|
beeec0f3a8bb8c7c9a886cc1c53f59daca5b9de1 | 330780a5bba7ff8d032a34339508d8e8a0b189fa | /cachematrix.R | 97a601d4bfd2aec5377221fe9d5188c925359233 | [] | no_license | rajkumar0101/ProgrammingAssignment2 | 48101ef1cbe7b1c13547f79d554a50b4f71283d6 | 81a787e557e2ce6392a9631942bb95cbf2ad48a1 | refs/heads/master | 2022-11-28T10:53:17.253408 | 2020-06-29T03:01:21 | 2020-06-29T03:01:21 | 274,742,135 | 0 | 0 | null | 2020-06-24T18:32:15 | 2020-06-24T18:32:14 | null | UTF-8 | R | false | false | 1,162 | r | cachematrix.R | ## This script provides two functions makeCacheMatrix() and cacheSolve()
## that cache the inverse of a matrix.
## The first function, makeCacheMatrix() creates a special "matrix",
## which is really a list containing a function to
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the inverse
## 4. get the value of the inverse
makeCacheMatrix <- function(x = matrix()) {
k <- NULL
set <- function(y) {
x <<- y
k <<- NULL
}
get <- function() x
setinverse <- function(inverse) k <<- inverse
getinverse <- function() k
list(set = set,
get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve() computes the inverse of the special "matrix" returned by makeCacheMatrix
## above
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
k <- x$getinverse()
if (!is.null(k)) {
message("getting cached data")
return(k)
}
data <- x$get()
k <- solve(data, ...)
x$setinverse(k)
k
}
## Testing the function
mat <- matrix(1:4, 2, 2)
mcm <- makeCacheMatrix(mat)
mcm$get()
mcm$getinverse()
cacheSolve(mcm)
|
648059baad24a32059da306ceb37e86051b1f79e | 1db3390483611dad623d984fc1d18c277af3ed4e | /man/hpcc.rollup.Rd | 4981475ba6c2dc40b80a76fed6da9c7e983d4977 | [] | no_license | Saulus/rHpcc | 7105a535b4b62c736625c74175114ea61e7aa30c | 5fef5811fe0a63555a66e30c05bb4ffef46ad7ce | refs/heads/master | 2021-01-14T14:10:50.315049 | 2014-11-24T09:17:54 | 2014-11-24T09:17:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,512 | rd | hpcc.rollup.Rd | \name{hpcc.rollup}
\alias{hpcc.rollup}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
title
}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
hpcc.rollup(recordset, condition = NULL, transfunc, GROUPED = FALSE, out.dataframe, fieldList = NULL, local = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{recordset}{
%% ~~Describe \code{recordset} here~~
}
\item{condition}{
%% ~~Describe \code{condition} here~~
}
\item{transfunc}{
%% ~~Describe \code{transfunc} here~~
}
\item{GROUPED}{
%% ~~Describe \code{GROUPED} here~~
}
\item{out.dataframe}{
%% ~~Describe \code{out.dataframe} here~~
}
\item{fieldList}{
%% ~~Describe \code{fieldList} here~~
}
\item{local}{
%% ~~Describe \code{local} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (recordset, condition = NULL, transfunc, GROUPED = FALSE,
out.dataframe, fieldList = NULL, local = FALSE)
{
if (missing(dataframe)) {
stop("no dataframe")
}
out.dataframe <- .hpcc.get.name()
code <- sprintf("\%s := ROLLUP(\%s", out.dataframe, recordset)
a <- TRUE
if (is.not.null(condition)) {
code <- sprintf("\%s,\%s", code, condition)
a <- FALSE
}
else if (GROUPED) {
code <- sprintf("\%s,GROUP")
a <- FALSE
}
code <- sprintf("\%s,\%s(LEFT,RIGHT)", code, transfunc)
if (a & is.not.null(fieldList)) {
code <- sprintf("\%s,\%s", code, fieldList)
}
if (local) {
code <- sprintf("\%s,LOCAL", code)
}
code <- sprintf("\%s)", code)
.hpcc.submit(code)
return(out.dataframe)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
cea3dfed89ecb27832578a861d80e4fe258a881d | 1721a042328b7035f6a6c401d9d8ba3edc538dce | /4_LI_Subset_acStrat/DFA_LocalInfo_subsetAcstrat.R | 8a5a779d90bb3d0b2626fd5afbb1ff1d1af8f051 | [] | no_license | ftheunissen/Woodpecker | dbfd1bb15212574b8083543ef465c51f768954f2 | 0521927adfbf73ecdd4dcf4e3484ae949a1858b7 | refs/heads/master | 2022-10-16T10:59:57.410738 | 2020-06-17T16:53:08 | 2020-06-17T16:53:08 | 273,028,370 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,900 | r | DFA_LocalInfo_subsetAcstrat.R | library(MASS)
library(car)
library(ggplot2)
library(FactoMineR)
library(dplyr)
library(lme4)
setwd("../4_LI_Subset_acStrat/")
rm(list=ls())#empty the workspace
# Read the z-scored data
data=read.table(file="AcousticsFINAL_Zscored.csv", header=T, sep=",",dec=".")
(nrow(data)) # Number of measurements
(ncol(data)) # Number of features considered (acoustical + metadata)
# PCA calculation
resPCA <- PCA (data, quali.sup=1:16, ncp=22, scale.unit = T) # quali.sup= categorical supplementary variables (here species, genus...) not used in PCA calculation; ncp= nb of PC to report (initially, should be equal to the number of variables used in PCA)
resPCA$eig # get percentages of variance explained by each PC and eigenvalues (make sure data are z-scored before this)
resPCA$var$cor # get loading scores of each acoustic variable onto PCs
resPCA$ind$coord # get PC scores for each 'individual' (i.e. here: each drumming file) in the dataset
# Note that 6 componnents explain 75% of the variance; 10 would give 90%.
PCALoadings <- resPCA$var$cor[,1:6] #based on the PCs for which eigenvalues are > 1
# write.csv(PCALoadings, "PCALoadings.csv")
# adding PCs (with eigenval > 1) to dataframe
data$PC1<-resPCA$ind$coord[,1]
data$PC2<-resPCA$ind$coord[,2]
data$PC3<-resPCA$ind$coord[,3]
data$PC4<-resPCA$ind$coord[,4]
data$PC5<-resPCA$ind$coord[,5]
data$PC6<-resPCA$ind$coord[,6]
output_LIsubsetAcStrat <- matrix(0, nrow = 100, ncol = 8) # 8 columns,1 for overall classif %age, 1 for each acoustic strategy + 1 for full dataset; 1 row by iteration
colnames(output_LIsubsetAcStrat) <- c("Classification %age", "MeanLI_Overall","MeanLI_AcStrat1", "MeanLI_AcStrat2", "MeanLI_AcStrat3", "MeanLI_AcStrat4", "MeanLI_AcStrat5", "MeanLI_AcStrat6")
for (iteration in 1:100){
rm(list=setdiff(ls(), c("data","iteration","output_LIsubsetAcStrat","resPCA","PCALoadings")))
################### Run cross-validated DFA based on all files available (ranging from 3 to 30 files per species)
# Edit for this part: need subset of same nb of species per acoustic strategies
AcStrat1 <- droplevels(data[data$AcousticClust==1,])
AcStrat2 <- droplevels(data[data$AcousticClust==2,])
AcStrat3 <- droplevels(data[data$AcousticClust==3,])
AcStrat4 <- droplevels(data[data$AcousticClust==4,])
AcStrat5 <- droplevels(data[data$AcousticClust==5,])
AcStrat6 <- droplevels(data[data$AcousticClust==6,])
randAcStrat1 <- sample(length(levels(AcStrat1$Species)),5, replace = F)
subAcStrat1 <- droplevels(AcStrat1[AcStrat1$Species %in% levels(AcStrat1$Species)[randAcStrat1],])
randAcStrat2 <- sample(length(levels(AcStrat2$Species)),5, replace = F)
subAcStrat2 <- droplevels(AcStrat2[AcStrat2$Species %in% levels(AcStrat2$Species)[randAcStrat2],])
randAcStrat3 <- sample(length(levels(AcStrat3$Species)),5, replace = F)
subAcStrat3 <- droplevels(AcStrat3[AcStrat3$Species %in% levels(AcStrat3$Species)[randAcStrat3],])
randAcStrat4 <- sample(length(levels(AcStrat4$Species)),5, replace = F)
subAcStrat4 <- droplevels(AcStrat4[AcStrat4$Species %in% levels(AcStrat4$Species)[randAcStrat4],])
randAcStrat5 <- sample(length(levels(AcStrat5$Species)),5, replace = F)
subAcStrat5 <- droplevels(AcStrat5[AcStrat5$Species %in% levels(AcStrat5$Species)[randAcStrat5],])
randAcStrat6 <- sample(length(levels(AcStrat6$Species)),5, replace = F)
subAcStrat6 <- droplevels(AcStrat6[AcStrat6$Species %in% levels(AcStrat6$Species)[randAcStrat6],])
subdata <- rbind(subAcStrat1,subAcStrat2,subAcStrat3,subAcStrat4,subAcStrat5,subAcStrat6)
#subdata2 <- subdata[order(as.character(subdata$Species)),]
#subdata2 <- subdata[order(row.names(subdata$Species)),]
## DFA with ALL data and CV
ndata=nrow(subdata)
species.names = levels(subdata$Species) #'Species' is the variable by which we want to discriminate drums
nbirds=length(species.names)
# There are nbirds = 92 species.
resLDA=lda(Species~PC1+PC2+PC3+PC4+PC5+PC6, prior=rep(1,nbirds)/nbirds, data=subdata, CV=T) # LDA computation. Important for information calculation: equal prior probabilities
tab <- table(subdata$Species, resLDA$class)
tab # Count table
sum(tab[row(tab) == col(tab)]) / sum(tab) #correct classification of drums on the classification matrix's diagonal
# Probability table
prob.tab <- tab/sum(tab)
# Conditional Probability table or Confusion Matrix using posterior probabilities
# Rows are actual species and columns the probability of being assigned to another species
# As these are conditional probabilities - the sum of each row sums to 1.
cprob.tab.post=matrix(0,nrow=nbirds,ncol=nbirds)
colnames(cprob.tab.post) <- colnames(prob.tab)
rownames(cprob.tab.post) <- rownames(prob.tab)
# Note colnames(prob.tab) == rownames(prob.tab) == species.names
for (i in 1:nbirds) {
for (j in 1:nbirds) {
# Select the rows that correspond to a specific species
ind.good <- (subdata$Species == species.names[i])
cprob.tab.post[i,j] <- mean(resLDA$posterior[ind.good,j])
}
}
# mean classification %age
(pcc <- mean(diag(cprob.tab.post)))
output_LIsubsetAcStrat[iteration,1] <- pcc
# classification %age at chance level
(chance=1/nbirds)
# ----- Define a function for plotting a matrix -----found here: http://www.phaget4.org/R/image_matrix.html #
myImagePlot <- function(x, ...){
min <- min(x)
max <- max(x)
yLabels <- rownames(x)
xLabels <- colnames(x)
title <-c()
# check for additional function arguments
if( length(list(...)) ){
Lst <- list(...)
if( !is.null(Lst$zlim) ){
min <- Lst$zlim[1]
max <- Lst$zlim[2]
}
if( !is.null(Lst$yLabels) ){
yLabels <- c(Lst$yLabels)
}
if( !is.null(Lst$xLabels) ){
xLabels <- c(Lst$xLabels)
}
if( !is.null(Lst$title) ){
title <- Lst$title
}
}
# check for null values
if( is.null(xLabels) ){
xLabels <- c(1:ncol(x))
}
if( is.null(yLabels) ){
yLabels <- c(1:nrow(x))
}
# Red, green and blue range from 0 to 1 (to get white background; if all 3 ranging from 1 to 0 then black background)
ColorRamp <- rgb( seq(1,0,length=7), # Red
seq(1,0,length=7), # Green
seq(1,1,length=7)) # Blue
ColorLevels <- seq(min, max, length=length(ColorRamp))
#### example if only want greyscale:
# ColorRamp <- rgb( seq(1,0,length=20), # Red
# seq(1,0,length=20), # Green
# seq(1,0,length=20)) # Blue
# ColorLevels <- seq(min, max, length=length(ColorRamp))
#### example if only want redscale:
# ColorRamp <- rgb( seq(1,1,length=20), # Red
# seq(1,0,length=20), # Green
# seq(1,0,length=20)) # Blue
# ColorLevels <- seq(min, max, length=length(ColorRamp))
# Reverse Y axis
# reverse <- nrow(x) : 1
# yLabels <- yLabels[reverse]
# x <- x[reverse,]
# Data Map
par(mar = c(5,5,5,2), cex=0.7, las = 2)
image(1:length(xLabels), 1:length(yLabels), t(x), col=ColorRamp, xlab="",
ylab="", axes=FALSE, zlim=c(min,max))
if( !is.null(title) ){
title(main=title,line = 3, cex.main=2)
}
axis(BELOW<-1, at=1:length(xLabels), labels=xLabels, cex.axis=0.7)
axis(LEFT <-2, at=1:length(yLabels), labels=yLabels, las= HORIZONTAL<-1,
cex.axis=0.7)
}
#pdf("Confusion_matrix_DFA_CondProb.pdf")
#image(1:nbirds, 1:nbirds, -cprob.tab.post, xlim = c(1,nbirds), ylim=c(1,nbirds), col = terrain.colors(18), xlab='Predicted Species', ylab = 'Acutal Species')
myImagePlot(cprob.tab.post,title=sprintf('Posterior Confusion Matrix %.1f %%', pcc*100.0)) # Calling function just defined on validating dataset
#dev.off()
##################################
# Calculating correct classification by pairs
### example with a pair BD (species B and species D)
### D is wrongly put N% of the time into B, compared to its own classification (D%)
### B is wrongly put NN% of teh time into D, compared to its own classification (B%)
### A way to calculate correct classification is here defined by measuring the distance between own and paired classifications:
### Correct classification within the pair = [(D% - N%) + (B% - NN%)]/2 *100 (multiplying by 100 to put it into 'percentage')
### if D always wrongly placed into B (and then B% = -1) and B always wrongly placed into D (thus D%=-1), correct classif = -100
### if D and B always well classified (thus D% and B% are both = 1), then correct classif = 100
newMat2 <- matrix(0, nrow = nbirds, ncol = nbirds)
for (X in 1:nbirds){ # First go through rows
for (Y in 1:nbirds){ # then go through columns
if (X >= Y){newMat2[X,Y] <- NA}
else {newMat2[X,Y] <- mean(c(cprob.tab.post[X,X]-cprob.tab.post[X,Y], cprob.tab.post[Y,Y]-cprob.tab.post[Y,X]))*100}
}
}
rownames(newMat2) <- rownames(cprob.tab.post) # changed val_matrix into cprob.tab.post
colnames(newMat2) <- colnames(cprob.tab.post) # changed val_matrix into cprob.tab.post
##################################### The following section is used to retrieve lda scores to later reconstruct ancestral states
# first this requires running the lda without CV to get LDs
resLDA_noCV=lda(Species~PC1+PC2+PC3+PC4+PC5+PC6, prior=rep(1,nbirds)/nbirds, data=subdata, CV=F) # LDA computation
# Get loadings of acoustic variables onto linear discriminants by multiplying two matrixes: scalings of PC1-PC6 onto the linear
# discriminants, and scalings of acoustic variables onto Principal Components
LD_loadings <- t(resLDA_noCV$scaling) %*% t(resPCA$var$cor[,1:6]) #express LDs as a function of acoustic variables
# write.csv(resLDA_noCV$scaling, "LD_Loadings_PCs.csv")
# write.csv(t(LD_loadings), "LD_Loadings_AcousticVars.csv")
# Create variables to stores LD scores
subdata$LD1 <- 0
subdata$LD2 <- 0
subdata$LD3 <- 0
subdata$LD4 <- 0
subdata$LD5 <- 0
subdata$LD6 <- 0
# Could be done with matrix multiply but more explicit like this
for (drum in 1:length(subdata[,1])) {
subdata$LD1[drum] <- sum(subdata[drum, 17:38]*LD_loadings[1,])
subdata$LD2[drum] <- sum(subdata[drum, 17:38]*LD_loadings[2,])
subdata$LD3[drum] <- sum(subdata[drum, 17:38]*LD_loadings[3,])
subdata$LD4[drum] <- sum(subdata[drum, 17:38]*LD_loadings[4,])
subdata$LD5[drum] <- sum(subdata[drum, 17:38]*LD_loadings[5,])
subdata$LD6[drum] <- sum(subdata[drum, 17:38]*LD_loadings[6,])
}
##################################### End of linear discriminants computation
# Calculation of local information
# Go from conditional probability to joint probability distribution
normval_matrix <- cprob.tab.post/nbirds
# Then retrieve the size of the matrix
matrixdimensions <- dim(normval_matrix)
#preparing matrix to store Localinfo
localInfoMatrix<-matrix(0,nrow = nbirds, ncol=1)
for(k in 1:matrixdimensions[1]){ # for each line
infoLoc = 0
for (kk in 1:matrixdimensions[2]){ #for each column
if (normval_matrix[k,kk] !=0){
infoLoc <- infoLoc + nbirds*normval_matrix[k,kk]*(log2((nbirds*normval_matrix[k,kk])/(colSums(normval_matrix)[kk])))
}
}
localInfoMatrix[k,1] = infoLoc
}
rownames(localInfoMatrix) <- rownames(normval_matrix)
localInfoMatrix
#####################################
(GlobalInfo <- mean(localInfoMatrix))
(mean(localInfoMatrix))
cat(c('global information from the classification matrix =',GlobalInfo,'bits'))
cat("Careful if the result is close to the ceiling value (here",log2(nbirds),"bits) ; result to take with caution ; similarly if the nb of analyzed signals is low.")
#####################################
# insert local info values into full dataset (careful, one value per species so need to apply same value to all drums within a species)
subdata$LI <- 0
subdata$LI[1] = localInfoMatrix[1]
l=1 #initiate counter
for (i in 2:length(subdata$Species))
{ if (subdata$Species[i]==subdata$Species[i-1]) {
subdata$LI[i]=localInfoMatrix[l]
}
else {
l=l+1
subdata$LI[i]=localInfoMatrix[l]}
}
# similarly, retrieve specific classification %ages and insert them into full dataset
classif_species = 0
for (i in 1:nbirds){
for (j in 1:nbirds){
if (i == j){classif_species [i] <- cprob.tab.post[i,i]}
}
}
subdata$SP_classif <- 0
subdata$SP_classif[1] = classif_species[1]
l=1 #initiate counter
for (i in 2:length(subdata$Species))
{ if (subdata$Species[i]==subdata$Species[i-1]) {subdata$SP_classif[i]=classif_species[l] }
else {
l=l+1
subdata$SP_classif[i]=classif_species[l] }
}
# Also need to produce a species-specific medians table for further analyses (reconstructions, HierarchicalClustering, etc)
# initiate matrix (nrows = nb of species + 1 (header); ncol = Nb of variables + 1 (speciesName))
SpMatrix <- data.frame(matrix(0, nrow = nbirds, ncol = length(subdata[1,])))
firstlineofeachspecies <- subdata[ !duplicated(subdata$Species), ] # Retrieves 1st line of each species --> Do not use for numeric values (only factors)
SpMatrix[,1:16] <- firstlineofeachspecies[1:16]
colnames(SpMatrix)[1:16] <- colnames(subdata[,1:16])
# start a loop going through the variables of interest in the dataset (i.e. not the explanatory variables)
for (i in 17:length(subdata[1,])){
# grouping the dataset by species levels, extract descriptives values for each variable
# (with 'i' the column number of the variable of interest)
Test <-
subdata %>%
group_by(Species) %>%
summarise_at(.vars = names(.)[i],funs(max, min, mean, median, sd))
colnames(SpMatrix)[i] <- paste("median_", names(subdata[i]), sep='')
SpMatrix[, i] <- Test$median
}
# Plot the species-specific normalized local information to show their distribution relative to ceiling
# pdf('SpeciesInfo_normed.pdf',width = 17, height = 7)
ggplot(SpMatrix, aes(x = Species_FullName, y= median_LI/log2(nbirds))) +
geom_point(cex=2) +
theme( panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_line(colour = "black")) +
xlab('') +
ylab('\nNormalized Mutual Information (bits)\n') +
theme(axis.text.x=element_text(angle=50, size=11, hjust=1, face ="italic"),
axis.text.y=element_text(angle=90, size=15, hjust=0.5),
axis.title.y = element_text(colour="grey20",size=20,angle=90,hjust=.5,face="plain"))+
geom_hline(yintercept=mean(localInfoMatrix/log2(nbirds)),color=2, lty=2)+
geom_hline(yintercept=1,color=2, lty=1)
# dev.off()
mean(localInfoMatrix/log2(nbirds)) # to put in the MS
sd(localInfoMatrix/log2(nbirds)) # to put in the MS
##### Running stats comparing the mean Local information between acoustic strategies
### according stats to the graph (Anova, comparing between group means)
SpMatrix$AcousticClust <- as.factor(SpMatrix$AcousticClust)
SpMatrix$AcousticClust2 <- car::recode(SpMatrix$AcousticClust,"'1'='AC';'2'='DK';'3'='SF'; '4'='SS'; '5'='RS'; '6'='IS'") # car::recode written as is because conflicting with dplyr::recode
res.aov <- aov(median_LI/log2(nbirds) ~ AcousticClust2, data=SpMatrix)
summary(res.aov)
TukeyHSD(res.aov)
#### control lines before plotting
subdata1 <- SpMatrix[SpMatrix$AcousticClust==1,]
subdata2 <- SpMatrix[SpMatrix$AcousticClust==2,]
subdata3 <- SpMatrix[SpMatrix$AcousticClust==3,]
subdata4 <- SpMatrix[SpMatrix$AcousticClust==4,]
subdata5 <- SpMatrix[SpMatrix$AcousticClust==5,]
subdata6 <- SpMatrix[SpMatrix$AcousticClust==6,]
mean(SpMatrix$median_LI)/log2(nbirds)
mean(subdata1$median_LI)/log2(nbirds)
mean(subdata2$median_LI)/log2(nbirds)
mean(subdata3$median_LI)/log2(nbirds)
mean(subdata4$median_LI)/log2(nbirds)
mean(subdata5$median_LI)/log2(nbirds)
mean(subdata6$median_LI)/log2(nbirds)
output_LIsubsetAcStrat[iteration,2] <- mean(SpMatrix$median_LI)/log2(nbirds)
output_LIsubsetAcStrat[iteration,3] <- mean(subdata1$median_LI)/log2(nbirds)
output_LIsubsetAcStrat[iteration,4] <- mean(subdata2$median_LI)/log2(nbirds)
output_LIsubsetAcStrat[iteration,5] <- mean(subdata3$median_LI)/log2(nbirds)
output_LIsubsetAcStrat[iteration,6] <- mean(subdata4$median_LI)/log2(nbirds)
output_LIsubsetAcStrat[iteration,7] <- mean(subdata5$median_LI)/log2(nbirds)
output_LIsubsetAcStrat[iteration,8] <- mean(subdata6$median_LI)/log2(nbirds)
} # end of main loop selecting 5 species per acoustic strategy 100 times
write.csv(output_LIsubsetAcStrat, 'LI_byAcStrat_equalsizes.csv', row.names=F)
# testing whether the mean LI value are different across acoustic strategy when equal species sample considered (5 species maximum)
# thus 5 species randomly selected per acoustic strategy, after which calculation of information has been computed 100 times (script hereabove)
library(lsmeans)
data4stats=read.table(file="LI_byAcStrat_equalsizes_rearranged.csv", header=T, sep=",",dec=".") # template from 100 simulations. Adjust according to new simulations
output_LIsubsetAcStrat=read.table(file="LI_byAcStrat_equalsizes.csv", header=T, sep=",",dec=".")
res.anova <- aov(Mean_LI ~ Strategy, data=data4stats)
summary(res.anova)
TukeyHSD(res.anova)
# checkup lines
mean(output_LIsubsetAcStrat[,3]) # Strategy AC
mean(output_LIsubsetAcStrat[,4]) # Strategy DK
mean(output_LIsubsetAcStrat[,5]) # Strategy SF
mean(output_LIsubsetAcStrat[,6]) # Strategy SS
mean(output_LIsubsetAcStrat[,7]) # Strategy RS
mean(output_LIsubsetAcStrat[,8]) # Strategy IS
### Plot the acoustic strategies' mean local information and order graph based on above control lines
# Create function to get mean ± SE (otherwise boxplots only use quartiles and we want the graph to
# match the stats, hence why plotting mean±SE with max/min, rather than median±Q25/Q75 with min/max)
MinMeanSEMMax <- function(x) {
v <- c(min(x) , mean(x) - sd(x)/sqrt(length(x)), mean(x), mean(x) + sd(x)/sqrt(length(x)), max(x) )
names(v) <- c("ymin", "lower", "middle", "upper", "ymax")
v
}
# pdf("LI_byAcStrategies_equalSizes.pdf")
ggplot(data = data4stats, aes(reorder(factor(data4stats$Strategy), data4stats$Mean_LI*100, FUN=mean), data4stats$Mean_LI*100, fill = factor(data4stats$Strategy))) +
stat_summary(fun.data=MinMeanSEMMax, geom="errorbar", position=position_dodge(1), width = .3, lwd=0.8) +
stat_summary(fun.data=MinMeanSEMMax, geom="boxplot", position=position_dodge(1)) +
scale_fill_manual(values=c(1:6)) +
theme_bw() +
ylab('Normalized Mutual Information (%) \n with equal number of species \n') +
ylim(10, 100) +
xlab('\nDrumming type') +
scale_x_discrete(labels=c("SF\n(n = 5)", "SS\n(n = 5)", "DK\n(n = 5)","AC\n(n = 5)", "RS\n(n = 5)", "IS\n(n = 5)")) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.text.x=element_text(angle=45, size=15, vjust=0.5, face ="bold.italic"),
axis.text.y=element_text(angle=90, size=15, hjust =0.5, vjust=1),
axis.title.x = element_text(colour="grey20",size=20,angle=0,hjust=.5,face="plain"),
axis.title.y = element_text(colour="grey20",size=20,angle=90,hjust=.5,face="plain"),
legend.position="none")
# dev.off()
|
fb268da9f1ad079ac08848904d14c89d1344e857 | c04bba07f70461390f14c86512a9ca5433feb74d | /server/rexplorer_api.R | fabe48aee0cf3d1aa1d2048f69cb948b914119ab | [] | no_license | nickmplay/RExplorer | def62ac27b9f8982fa253456e731ab3a810c0b06 | 2793c0f3c85624370b5e5cc85036bc5a784126d2 | refs/heads/main | 2023-04-17T21:13:45.260075 | 2021-04-25T18:53:58 | 2021-04-25T18:53:58 | 365,001,252 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,450 | r | rexplorer_api.R | # rexplorer_api.R
#' @filter cors
cors <- function(req, res) {
res$setHeader("Access-Control-Allow-Origin", "*")
if (req$REQUEST_METHOD == "OPTIONS") {
res$setHeader("Access-Control-Allow-Methods","*")
res$setHeader("Access-Control-Allow-Headers", req$HTTP_ACCESS_CONTROL_REQUEST_HEADERS)
res$status <- 200
return(list())
} else {
plumber::forward()
}
}
#* @get /dirR
dirR <- function(path){
# URI decode
path <- URLdecode(path)
# run local query
subd_list <- list.dirs(path, recursive = F, full.names = F)
subd_files <- list.files(path)
if(length(subd_list) == 0 & length(subd_files) == 0){
return(list(err = "Directory not found"))
} else {
if(length(subd_list) == 0){
return(list(dir = path, subDirs = c(), files = subd_files))
} else {
file_list <- setdiff(list.files(path), subd_list)
return(list(dir = path, subDirs = subd_list, files = file_list))
}
}
}
#* @get /openPath
openPath <- function(path, fname){
# create shell command
path_length <- nchar(path)
if(substr(path, path_length - 1, path_length) != "\\"){
path <- paste0(path, "\\")
}
#print(paste(path, fname, sep = " - "))
shell_cmd <- paste0("cd ", path, " & ", '"', fname, '"')
# run shell
shell(shell_cmd, wait = F)
return(list(dir = path, open = "ok"))
}
#* @assets ../templates /
list()
# http://127.0.0.1:8000/dirR?path=C:/Users/nickm/Documents/2_Work |
90519895f565ce2ff6fd532b7d3f316f2af6f231 | 5edf3ebc52f12c8b7ed4dbc1aa5f97a8e8929605 | /tests/json_validation.R | 73e170f95ac5bc8c04d78d3b9359ef50c414d053 | [] | no_license | lukaszbrzozowski/CaseStudies2019S | 15507fa459f195d485dd8a6cef944a4c073a92b6 | 2e840b9ddcc2ba1784c8aba7f8d2e85f5e503232 | refs/heads/master | 2020-04-24T04:22:28.141582 | 2019-06-12T17:23:17 | 2019-06-12T17:23:17 | 171,700,054 | 1 | 0 | null | 2019-02-20T15:39:02 | 2019-02-20T15:39:02 | null | UTF-8 | R | false | false | 3,432 | r | json_validation.R | library(testthat)
library(jsonlite)
library(readr)
json_files <- list.files(pattern = "*.json$", recursive = TRUE)
test_that("validate jsons", {
for(json in json_files){
json_to_validate <- read_file(json)
is_json_valid <- validate(json_to_validate)
if(!is_json_valid) print(json)
expect_true(is_json_valid)
}
})
check_names_of_variables <- function(json_to_validate, json){
var_names <- names(json_to_validate[[1]]$variables)
res <- TRUE
for(var in var_names){
res <- res & json_to_validate[[1]][["variables"]][[var]][["name"]] == var
}
if(res == FALSE) print(json)
res
}
dataset_json_files <- list.files(pattern = "dataset.json$", recursive = TRUE)
test_that("validate data set jsons", {
for(json in dataset_json_files){
json_to_validate <- fromJSON(json, simplifyVector = FALSE)
expect_equal(names(json_to_validate[[1]]), c("id", "added_by", "date", "name", "source", "url", "number_of_features",
"number_of_instances", "number_of_missing_values",
"number_of_instances_with_missing_values", "variables" ))
expect_true(check_names_of_variables(json_to_validate, json))
}
})
task_json_files <- list.files(pattern = "task.json$", recursive = TRUE)
test_that("validate task jsons", {
for(json in task_json_files){
json_to_validate <- fromJSON(json)
if(any(colnames(json_to_validate) != c("id", "added_by", "date", "dataset_id", "type", "target"))){
print(json)
}
expect_equal(colnames(json_to_validate), c("id", "added_by", "date", "dataset_id", "type", "target"))
}
})
model_json_files <- list.files(pattern = "model.json$", recursive = TRUE)
test_that("validate model jsons", {
for(json in model_json_files){
json_to_validate <- fromJSON(json)
if(any(colnames(json_to_validate) != c("id", "added_by", "date", "library", "model_name", "task_id", "dataset_id", "parameters", "preprocessing"))){
print(paste(json_to_validate$added_by, json, sep = " "))
}
expect_equal(colnames(json_to_validate), c("id", "added_by", "date", "library", "model_name", "task_id", "dataset_id", "parameters", "preprocessing"))
}
})
audit_json_files <- list.files(pattern = "audit.json$", recursive = TRUE)
test_that("validate audit jsons", {
for(json in audit_json_files){
json_to_validate <- fromJSON(json)
if(any(colnames(json_to_validate) != c("id", "date", "added_by", "model_id", "task_id", "dataset_id", "performance"))){
print(paste(json_to_validate$added_by, json, sep =" "))
}
if(grepl("regression_", json_to_validate$task_id)){
measures <- names(json_to_validate$performance)
check_measures <- all(c("mse", "rmse", "mae", "r2") %in% measures)
expect_true(check_measures)
if(check_measures==FALSE) print(paste(json_to_validate$added_by, json, sep = " "))
}
if(grepl("classification_", json_to_validate$task_id)){
measures <- names(json_to_validate$performance)
check_measures <- all(c("acc", "auc", "specificity", "recall", "precision", "f1") %in% measures)
expect_true(check_measures)
if(check_measures==FALSE) print(paste(json_to_validate$added_by, json, sep = " "))
}
expect_equal(colnames(json_to_validate), c("id", "date", "added_by", "model_id", "task_id", "dataset_id", "performance"))
}
})
|
04bba9d245ac1a5db473233410aab5d3f470a8cd | f89f384319eb44162ef6033d6ee2715e2cdd04be | /tests/testthat.R | 155e75e3a90082a753ce708588999a8a6cbcb04e | [
"MIT"
] | permissive | Sage-Bionetworks/climbr | 8d3ed1a931950bdae866a1527c1b7ba842b65807 | c16735ba20733037c650db5d450f70b184d438f9 | refs/heads/master | 2021-07-23T17:30:46.002594 | 2020-04-13T22:22:37 | 2020-04-13T22:22:37 | 135,639,698 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 56 | r | testthat.R | library(testthat)
library(climbr)
test_check("climbr")
|
2b1bf31cd02cf01389fecb565a322cb6c7078ed2 | 4ace82a4ef70dc973744392cfea7fd3e7bbf2a68 | /man/redis_logger.Rd | ad96b978332d49e14d5d98c686be19e00408e2f5 | [] | no_license | mlkornexl/easyRedis | dd183c6b64933d370e392428c558cf61af611d8e | 67846581911fa06fe6d38aeaf0d9e80c21f16612 | refs/heads/master | 2021-09-13T18:40:00.782660 | 2018-05-03T07:09:10 | 2018-05-03T07:09:10 | 106,254,110 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,985 | rd | redis_logger.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logger.R
\name{redis_logger}
\alias{redis_logger}
\alias{layout.redis}
\alias{appender.redis}
\alias{redis_logger}
\title{Logging to Redis Database}
\usage{
layout.redis(ApplicationId, ApplicationInstanceId)
appender.redis(channel, logToConsole = FALSE)
redis_logger(ApplicationId, ApplicationInstanceId,
channel = redis_options("logChannel"), logToConsole = FALSE)
}
\arguments{
\item{ApplicationId}{string, unique ID of the logging application}
\item{ApplicationInstanceId}{string, unique ID of the single instance of
the logging application}
\item{channel}{Redis channel to publish logging messages}
\item{logToConsole}{logical, if \code{TRUE} logging message is written to
console too}
}
\value{
\code{layout.redis} returns a function with arguments \code{level},
\code{msg}, and additional arguments \code{...}, see Layout section and
\code{\link[futile.logger]{flog.layout}} for details.
\code{appender.redis} returns a function with one argument
\code{line} that publishes the logging message to the Redis channel
\code{channel} and optionally writes it to the console
(\code{\link[base]{stdout}()}).
}
\description{
The functions define logging environment for writing log messages to
the Redis data base using \pkg{futile.logger}'s logging facilities
}
\section{Functions}{
\itemize{
\item \code{layout.redis}: defines logging message's layout, see Layout
section for details
\item \code{appender.redis}: defines a logging appender to publish logging
messages to a Redis data base channel \code{channel}
\item \code{redis_logger}: defines a new logger \code{redis} and sets
layout and appender to the return values of \code{layout.redis()} and
\code{appender.redis()}, respectively.
}}
\section{Layout}{
The logging message is a JSON string with two elements at the base level
\describe{
\item{\code{Metadata}}{contains \code{ApplicationId} and
\code{ApplicationInstanceId}}
\item{\code{LoggingDocument}}{contains a list of message specific
elements}
}
Both meta data \code{ApplicationId} and \code{ApplicationInstanceId}
must be supplied. If missing the respective \link[=redis_options]{redis
options} will be used.
The \code{level} argument in the returned function will control the
layout of the \code{LoggingDocument} list. Additional arguments
\code{...} either set list elements of \code{LoggingDocument} directly
(if present) or will be coerced to a "details" JSON-string.
}
\seealso{
\code{\link[futile.logger]{flog.layout}} for managing layouts in
\pkg{futile.logger} package
\code{\link[futile.logger]{flog.appender}} for managing logging
appenders in \pkg{futile.logger} package
\code{\link[futile.logger]{flog.logger}} for managing loggers in
\pkg{futile.logger} package
}
|
694f51c859360a62bdc87db135f2f7834eaa5d5d | 184f8a96fac73dd74276879bd59a46e0dcc7efe9 | /SimulateEpidemic.R | f81a98f65ab799d333ec720bb5886213ef911db3 | [
"MIT"
] | permissive | FistOfHit/DocNet | 6cee527e371796fdf44a64f3cb7c78e200c35ebe | 4080793cd02223b8fe6250a7fba01a8ae1996a50 | refs/heads/master | 2021-04-03T06:23:38.884062 | 2018-03-12T11:53:23 | 2018-03-12T11:53:23 | 124,815,407 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,203 | r | SimulateEpidemic.R | # Script to simulate an "epidemic" of information spreading across the
# docnet network.
# Initialise variables and storage
sim_time = 500
num_init_infec = 20
infec_prone = matrix(0, 2, 242)
infec_prone[1, ] = c(1:242)
indexes = 242*sim_time - c(241:0)
# Finding random nodes with AdoptionDate <= 5
early_docs = V(temp_network)$nodeAdoptionDate <= 5
rand_choices = matrix(0, 10, 242)
for (i in 1:10) {
rand_choices[i, ] = sample(242, 242)
}
initial_inf = matrix(0, 10, num_init_infec)
# Creating the matrix of 10 initial conditions
for (i in 1:10) {
k = 1
for (j in 1:242) {
choice = rand_choices[i, j]
if (early_docs[choice] == TRUE) {
initial_inf[i, k] = choice
k = k + 1
}
if (k >= num_init_infec) break
}
}
# Simulating epidemic spread
for (i in 1:10) {
for (j in 1:10) {
epi_sim = simEpi(temp_network, init_infected = initial_inf[i, ],
inf.prob = 0.5, max.time = sim_time)
those_infected = epi_sim$results[indexes, 2]
infec_prone[2, ] = ifelse(those_infected == TRUE,
infec_prone[2, ] + 1, infec_prone[2, ])
}
} |
819527d6438b7afd99d0a2330106cde1698f7c63 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/IsoSpecR/tests/test-numerical-stability.R | c634185e52537092f4ab903f7811f87f0a1fa64d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 871 | r | test-numerical-stability.R | context("Numerical stability")
test_that("IsoSpecR calculates correctly full isotopic distributions.", {
expect_equal(nrow(IsoSpecify(c(C=100, H=202), 1)), 101*203)
expect_equal(nrow(IsoSpecify(c(C=100, H=202, S=5), 1)), 101*203*choose(4+5-1, 5))
expect_equal(nrow(IsoSpecify(c(O=100, N=10, S=6), 1)), choose(100+3-1,100)*11*choose(4+6-1, 6))
})
test_that("IsoSpecR provides results similar to envipat.", {
load("envipat.Rd")
isospec = lapply(mols, IsoSpecify, stopCondition=1)
expect_equal(sapply(isospec, nrow), sapply(envipat, nrow))
isospec = lapply(mols, IsoSpecify, stopCondition=1)
for(i in 1:3){
envi_1_probs = envipat[[i]][,"abundance"] / sum(envipat[[i]][,"abundance"])
envi_1_probs = sort(envi_1_probs)
isos_1_probs = sort(exp(isospec[[i]][,2]))
expect_that(max(abs(isos_1_probs - envi_1_probs)) < 1.0e-10, is_true())
}
}) |
de1f07cb2b5863b89053a6d7699f26f58bad8fd2 | 66dfe4a46f879dcca53b1f828b68e3ecfa5c988f | /R/build_pre.R | e0e9e8600d5643086effd50fd876d67df6d66be5 | [
"CC-BY-4.0"
] | permissive | christiandudel/covid_age | b19313fe0284e967a233b2cdc2f64a0958ee34a4 | ea504f6080a82a727abca2184257d2737ee2dd2e | refs/heads/master | 2021-06-12T19:59:57.830679 | 2020-07-08T10:43:37 | 2020-07-08T10:43:37 | 254,386,370 | 0 | 1 | null | 2020-04-09T13:59:03 | 2020-04-09T13:59:03 | null | UTF-8 | R | false | false | 1,274 | r | build_pre.R |
library(here)
change_here <- function(new_path){
new_root <- here:::.root_env
new_root$f <- function(...){file.path(new_path, ...)}
assignInNamespace(".root_env", new_root, ns = "here")
}
change_here("C:/Users/riffe/Documents/covid_age")
startup::startup()
repo <- git2r::repository(here())
#init()
git2r::pull(repo,credentials = creds)
source(here("R","build.R"))
schedule_this <- FALSE
if (schedule_this){
library(taskscheduleR)
taskscheduler_create(taskname = "COVerAGE-DB-automatic-daily-build",
rscript = "C:/Users/riffe/Documents/covid_age/R/build_pre.R",
schedule = "DAILY",
starttime = "02:00",
startdate = format(Sys.Date() + 1, "%d/%m/%Y"))
}
#taskscheduleR::taskscheduler_delete("COVerAGE-DB-automatic-daily-build")
test_schedule_build <- FALSE
if (test_schedule_build){
library(taskscheduleR)
taskscheduler_create(taskname = "COVerAGE-DB-automatic-build-test",
rscript = "C:/Users/riffe/Documents/covid_age/R/build_pre.R",
schedule = "ONCE",
starttime = format(Sys.time() + 61, "%H:%M"))
}
#taskscheduleR::taskscheduler_delete("COVerAGE-DB-automatic-build-test")
|
05a2ac093f94eb9dec5dcbc6fa607a9b28eadce9 | 7948b2516f63ea703e944d6a4666418e6cb16690 | /ui.R | 31a366a0e0482a716546b2d8724cd72f00d0c8cd | [] | no_license | ClaymoreAlex/BMI-Calculator | 224c11560755dcead0deb03a01309fa3d20ef20f | 2a166c53f2f4d6a5f3bb3520cfa9fc9c3a6c49de | refs/heads/master | 2021-01-01T05:36:35.125630 | 2015-09-20T02:26:34 | 2015-09-20T02:26:34 | 42,797,007 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 919 | r | ui.R | library(shiny)
# Define UI for application that determines BMI
shinyUI(pageWithSidebar
(
# Application title
headerPanel("Body Mass Index Calculator"),
sidebarPanel(
h4('This application determines your body mass index and corresponding weight status category based upon you height and weight.'),
h4(' '),
h4('Enter your height in feet and inches and your weight in pounds.'),
h4(' '),
numericInput('inFeet','Height - Feet', 5, min = 4, max = 6),
numericInput('inInches','Height - Inches', 0, min = 0, max = 11),
numericInput('inWeight','Weight - Pounds', 120, min = 80, max = 400),
submitButton('Calculate BMI')
),
mainPanel(
h4('Based on your height and weight, your body mass index is'),
verbatimTextOutput("outBMI"),
h4('and your weight status category is'),
verbatimTextOutput("outstat")
)
))
|
60715b60897b1a31eb32177a684120fca5afb66c | b1d40c1ea78f35f2912b6300ee6e8cdae10bac3c | /Shiny_Pokemon.R | 18a4c2cf4352e294811abd6eb521c2b81e4ac7c3 | [] | no_license | seanwoww/bios611_project1 | 613efa1682e9849bbb45872a9b199bffc112258e | 36def6b27d19943b1ad1b425e88969b31ae3d5ca | refs/heads/master | 2023-01-20T10:42:01.238212 | 2020-11-17T15:05:48 | 2020-11-17T15:05:48 | 290,349,304 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,146 | r | Shiny_Pokemon.R | library(shiny)
library(tidyverse)
library(plotly)
args <- commandArgs(trailingOnly=T);
port <- as.numeric(args[[1]]);
library(Rtsne)
pt <- read_csv("derived_data/pokemon_tidy.csv")
py <- pt %>% group_by(type1) %>% select(.,attack, defense, sp_attack, sp_defense, speed, hp,name)
fitt <- Rtsne(py %>% select(.,attack, defense, sp_attack, sp_defense, speed, hp),dims =2, check_duplicates = FALSE);
fig <- plot_ly(fitt$Y %>% as.data.frame() %>% as_tibble() %>% mutate(label=py$type1) %>% mutate(name= py$name), x = ~V1 , y = ~V2, color = ~label, text=~name, hoverinfo = 'text', type = 'scatter', mode = 'markers')
ui <- basicPage(
titlePanel("Pokemon TSNE"),
mainPanel(
plotlyOutput("graph")
))
server <- function(input, output) {
output$graph <- renderPlotly({plot_ly(fitt$Y %>% as.data.frame() %>% as_tibble() %>% mutate(label=py$type1) %>% mutate(name= py$name), x = ~V1 , y = ~V2, color = ~label, text=~name, hoverinfo = 'text', type = 'scatter', mode = 'markers')
})}
print(sprintf("Starting shiny on port %d", port));
shinyApp(ui = ui, server = server, options = list(port=port, host="0.0.0.0"))
|
f3ff3c310462e43dd836bf8c52a6b2607a5d7b8f | f57bdf4e211ad7984fb5005883ae4d116c8f2e92 | /R/relatednessCoeff.R | 692ba5104b20617ea4e98107931dd04722f9485a | [] | no_license | cran/paramlink | 70d2e3776063411628bd5b5663a408737886d040 | 8af1f6544e37d20717829b5fbf7286fd303c68cc | refs/heads/master | 2022-05-22T10:39:49.492616 | 2022-04-15T07:10:02 | 2022-04-15T07:10:02 | 17,698,214 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,792 | r | relatednessCoeff.R | #' Relatedness coefficients
#'
#' Computes inbreeding coefficients for all pedigree members, and Jacquard's
#' condensed identity coefficients for any pair of members. These are simple
#' wrappers for functions in other packages or external programs.
#'
#' Both \code{inbreeding} and \code{kinship_coefs} are thin wrappers of
#' \code{\link[kinship2]{kinship}}. \code{jacquard2}, executes an external
#' call to the C program \code{IdCoefs} (Abney, 2009). For this to
#' function, \code{IdCoefs} must be installed on the computer (see link in the
#' References section below) and the executable placed in a folder included in
#' the PATH variable. The \code{jacquard2} wrapper works by writing the
#' necessary files to disk and calling \code{IdCoefs} via \code{\link{system}}.
#'
#' @param x a \code{\link{linkdat}} object.
#' @param ids a integer vector of length 2.
#' @param verbose a logical, indicating if messages from IdCoefs should be
#' printed.
#' @param cleanup a logical: If TRUE, the pedfile and sample file created for
#' the IdCoefs run are deleted automatically.
#' @return For \code{inbreeding}, a numerical vector with the inbreeding
#' coefficients, with names according to the ID labels \code{x$orig.ids}.\cr
#' For \code{kinship_coefs}, either a single numeric (if \code{ids} is a pair
#' of pedigree members) or the whole kinship matrix, with \code{x$orig.ids} as
#' dimnames.\cr For \code{jaquard} and \code{jaquard2}, a numerical vector of
#' length 9 (in the standard order of Jacquard's condensed identity
#' coefficients).
#' @seealso \code{\link[kinship2]{kinship}}
#' @references The \code{IdCoefs} program: Abney, Mark (2009). A graphical
#' algorithm for fast computation of identity coefficients and generalized
#' kinship coefficients. Bioinformatics, 25, 1561-1563.
#' \url{http://home.uchicago.edu/~abney/abney_web/Software.html}
#'
#' @examples
#'
#' # Offspring of first cousins
#' x = cousinsPed(1, child=TRUE)
#' inb = inbreeding(x)
#' stopifnot(inb[9] == 1/16)
#'
#' # if ID labels are not 1:9, care must be taken in extracting correct elements.
#' set.seed(1357)
#' y = relabel(x, sample(1:9))
#' child = leaves(y)
#' inbreeding(y)[child] #wrong
#' inb = inbreeding(y)[as.character(child)] #correct
#' inb
#' # the inbreeding coeff of the child equals the kinship coeff of parents
#' kin = kinship_coefs(y, parents(y, child))
#' stopifnot(inb==kin, inb==1/16)
#'
#' @name relatednessCoeff
NULL
#' @rdname relatednessCoeff
#' @export
inbreeding = function(x) {
ped = x$pedigree
kin.matrix = kinship2::kinship(id = ped[, "ID"], dadid = ped[, "FID"], momid = ped[, "MID"])
inb.coeff = numeric()
inb.coeff[x$founders] = 0
inb.coeff[x$nonfounders] = sapply(x$nonfounders, function(i) kin.matrix[ped[i, "FID"],
ped[i, "MID"]])
names(inb.coeff) = x$orig.ids
inb.coeff
}
#' @rdname relatednessCoeff
#' @export
kinship_coefs = function(x, ids = NULL) {
if (!is.null(ids))
assert_that(length(ids) == 2, all(ids %in% x$orig.ids))
ped = x$pedigree
kin.matrix = kinship2::kinship(id = ped[, "ID"], dadid = ped[, "FID"], momid = ped[, "MID"])
dimnames(kin.matrix) = list(x$orig.ids, x$orig.ids)
if (is.null(ids))
return(kin.matrix)
kin.matrix[as.character(ids[1]), as.character(ids[2])]
}
#' @rdname relatednessCoeff
#' @export
jacquard = function(x, ids) {
message("This function is no longer available. Use `ribd::identityCoefs()` instead")
#if (!requireNamespace("identity", quietly = TRUE))
# stop("Package 'identity' must be install for this function to work.", call. = FALSE)
#assert_that(length(ids) == 2, all(ids %in% x$orig.ids))
#idsi = .internalID(x, ids)
#ped = x$pedigree[, 1:3]
#identity::identity.coefs(idsi, ped)[2, 3:11]
}
#' @rdname relatednessCoeff
#' @export
jacquard2 = function(x, ids, verbose = FALSE, cleanup = TRUE) {
assert_that(length(ids) == 2, all(ids %in% x$orig.ids))
x = .reorder_parents_before_children(x)
ped = relabel(x$pedigree, x$orig.ids)[, 1:3] # relabel if orig.ids are 1..N (in some order)
write.table(ped, file = "__paramlink2idcoefs__.ped", quote = F, row.names = F, col.names = F)
write.table(ids, file = "__paramlink2idcoefs__.sample", quote = F, row.names = F, col.names = F)
command = "idcoefs -p __paramlink2idcoefs__.ped -s __paramlink2idcoefs__.sample -o __paramlink2idcoefs__.output"
run = suppressWarnings(system(command, intern = T))
if (verbose)
print(run)
res = read.table("__paramlink2idcoefs__.output", as.is = T)
if (cleanup)
unlink(dir(pattern = "__paramlink2idcoefs__"))
as.numeric(res[2, 3:11])
}
|
5959e691d8fd04083faa51a663df51590d2332d7 | 0350ddd55d5bf5b6922a27fbd06e1aa9e83d8af1 | /Machine Learning A-Z - Hands-On Python & R Data Science/Regression/Random Forest Regression/random_forest_regression.R | 678e434e0b1518203349d0b0ed640ad9f8d9bfee | [] | no_license | jacobskr/ML_Python | 7a06b14c71a38b6fc148dd745a91abeb8dae833d | a1e18b7e2c9fc1f18e088cc73f0408ac746a98ef | refs/heads/master | 2020-04-26T00:40:34.719269 | 2019-05-05T02:59:31 | 2019-05-05T02:59:31 | 173,185,834 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 952 | r | random_forest_regression.R | # Random Forest Regression
# Importing the dataset
dataset = read.csv('Data\\Position_Salaries.csv')
dataset = dataset[2:3]
# Fitting the Random Forest Regression Model to the dataset
library(randomForest)
set.seed(1234)
regressor = randomForest(x = dataset[1], # Need to give x as a dataframe (that is what [] does)
y = dataset$Salary, # Need to give y as a vector (that is what $ does)
ntree =500)
# Predicting a new result
y_pred = predict(regressor, data.frame(Level = 6.5))
# Visualising the Random Forest Regression Model results
library(ggplot2)
x_grid = seq(min(dataset$Level), max(dataset$Level), 0.01)
ggplot() +
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = 'red') +
geom_line(aes(x = x_grid, y = predict(regressor, newdata = data.frame(Level = x_grid))),
colour = 'blue') +
ggtitle('Truth or Bluff') +
xlab('Level') +
ylab('Salary')
|
46e640285b19c17ff83643548db9dbd0d127c188 | aa9ab3be63d4388c12019d25f0c925677d22fd3e | /IPL-Match Prediction/MissingPlayerInfo.R | 306aef151b7b8ffee75c37c877f6760d36ca7c5c | [] | no_license | SujathaSubramanian/Projects | da05a1b20d06073605dacf07e869a949e308ac4d | d09fa21c5332c96c0bbfb6b5513d2e26883ae838 | refs/heads/master | 2020-06-11T19:24:48.949136 | 2017-04-21T10:11:31 | 2017-04-21T10:11:31 | 75,627,992 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,253 | r | MissingPlayerInfo.R | trim <- function( x ) {
gsub("(^[[:space:]]+|[[:space:]]+$)", "", x)
}
PlayersData = read.xlsx("PlayersData.xlsx",1,stringsAsFactors=FALSE)
str(PlayersData)
unique(PlayersData$PlayerName)
players = read.xlsx("PastTeamData.xls", sheetName = "IPL Team Data")
# Remove the withdrawn players from the player list
players = players[!trim(players$Role) %in% c("coach","withdrawn player"),]
Names = players[!players$Name %in% unique(PlayersData$PlayerName),"Name"]
PlayersData[PlayersData$PlayerName== "Shiv Chanderpaul",]
unique(PlayersData$PlayerName)
subset(PlayersData,PlayerName == "Kane Richardson")
PlayerNames = c("KV Sharma","S Chanderpaul","U Chand","TG Southee","K Cooper","BRM Taylor","TA Boult","KS Williamson","LR Shukla","Gurkeerat Singh","Sean Abbott","Adam Milne")
i=1
while(i <=length(MissingPlayers))
{
PlayerInfo.table = OverallPlayer.Info(PlayerNames[i])
if(PlayerInfo.table != 0)
{
if(i == 1)
{
AllPlayerInfo = PlayerInfo.table
}else
{
AllPlayerInfo = rbind(AllPlayerInfo ,PlayerInfo.table)
}
}
i = i+1
}
PastTeamData[PastTeamData$Year == "2014",]
IPLMatchResults[IPLMatchResults$Year == "2008",]
data = read.xlsx("PlayersData.xlsx",1,startRow=1,endRow=10)
head(PlayersData)
|
4af673ddf408cdcf756fe56f4929f4cebc443b7b | 4aa506e3fbbbac480a1a91e97f69e886eb273148 | /ScoutSujyot_Inclass2.r | 8b1425014f830483dcb767cace4525d8b96f46d7 | [] | no_license | msujyot/CMDA | dd44e46a76ce07d34cdc8d2b27522710d72eb905 | b4c23dfb70b503aa48285d21179daa52669be56e | refs/heads/master | 2016-09-05T12:19:17.361821 | 2014-09-22T20:59:54 | 2014-09-22T20:59:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,233 | r | ScoutSujyot_Inclass2.r | # In class assignment #2
# Scout (Mugdha) Sujyot
getwd()
setwd("C:\\Users\\Scout\\Desktop\\School\\CS 3654\\CMDA")
#The health insurance customer data
load('exampleData.rData')
#Summary statistics
summary(custdata) #for the entire data frame
summary(custdata$is.employed) # We have 325 NA's in the "is employed" field and we need
# to resolve those.
summary(custdata$income) # We have a negative minimum income (-8700!) which is
# illogical and we need to re-examine the data.
summary(custdata$age) # Our max age is 146.7 which seems unlikely. This is
# probably a data entry error and should be verified.
summary(custdata$num.vehicles) #We have 56 NA's for number of vehicles. These might just
#be 0's but should be checked.
summary(custdata$housing.type) #We have 56 NA's for housing type also.
summary(custdata$recent.move) #And also for recent.move. Perhaps there are the same
#records that are causing issues?
#Loading cars data
uciCar <- read.table(
'http://www.win-vector.com/dfiles/car.data.csv',
sep=',',
header=T
)
#Examine the loaded data
summary(uciCar) # This data is very well-behaved with no NAs indicating
# missing entries
load('credit.RData')
summary(d)
# We have to reshape the data to make it easier to work with
mapping <- list('A11'='... < 0 DM',
'A12'='0 <= ... < 200 DM',
'A13'='... >= 200 DM / salary assignments for at least 1 year',
'A14'='no checking account',
'A30'='no credits taken/all credits paid back duly',
'A31'='all credits at this bank paid back duly',
'A32'='existing credits paid back duly till now',
'A33'='delay in paying off in the past',
'A34'='critical account/other credits existing (not at this bank)',
'A40'='car (new)',
'A41'='car (used)',
'A42'='furniture/equipment',
'A43'='radio/television',
'A44'='domestic appliances',
'A45'='repairs',
'A46'='education',
'A47'='(vacation - does not exist?)',
'A48'='retraining',
'A49'='business',
'A410'='others',
'A61'='... < 100 DM',
'A62'='100 <= ... < 500 DM',
'A63'='500 <= ... < 1000 DM',
'A64'='.. >= 1000 DM',
'A65'='unknown/ no savings account',
'A71'='unemployed',
'A72'='... < 1 year',
'A73'='1 <= ... < 4 years',
'A74'='4 <= ... < 7 years',
'A75'='.. >= 7 years',
'A91'='male : divorced/separated',
'A92'='female : divorced/separated/married',
'A93'='male : single',
'A94'='male : married/widowed',
'A95'='female : single',
'A101'='none',
'A102'='co-applicant',
'A103'='guarantor',
'A121'='real estate',
'A122'='if not A121 : building society savings agreement/life insurance',
'A123'='if not A121/A122 : car or other, not in attribute 6',
'A124'='unknown / no property',
'A141'='bank',
'A142'='stores',
'A143'='none',
'A151'='rent',
'A152'='own',
'A153'='for free',
'A171'='unemployed/ unskilled - non-resident',
'A172'='unskilled - resident',
'A173'='skilled employee / official',
'A174'='management/ self-employed/highly qualified employee/ officer',
'A191'='none',
'A192'='yes, registered under the customers name',
'A201'='yes',
'A202'='no')
num_variables <- dim(d)[2]
for(i in 1:num_variables) {
if(class(d[,i])=='character') {
d[,i] <- as.factor(as.character(mapping[d[,i]]))
}
}
summary(d$Personal.status.and.sex) # Although we know that 92 men are married/widowed,
# 548 are single, and 50 are divorced or separated,
# for the women we only know that 310 were
# married/separated/divorced and we don't know how many,
# if any, are single.
summary(d$'Other.debtors/guarantors') # Out of all the loans, 907 did not have coapplicants or
# guarantors 41 had coapplicants and 52 had guarantors.
# This indicates that it easier to get a loan in Germany
# than in the States.
install.packages('hexbin')
library(hexbin)
library(ggplot2)
load('exampleData.rData')
custdata2 <- subset(custdata,
(custdata$age > 0 & custdata$age < 100
& custdata$income > 0))
names(custdata2)
#This creates a hexbin plot
hexbinplot(custdata2$age~custdata2$income) # The hexbin plot resembles a scatter plot as it
# indicates density. However, by binning the data
# we can see this information more clearly.
# I used a scatter plot to compare Income vs Number of Vehicles. There doesn't really seem to be
# any correlation between the two, however.
ggplot(custdata2, aes(x=income,y=num.vehicles)) +
geom_point() +
ylim(0,6) +
theme_bw() +
ggtitle("Income vs Number of Vehicles")
# I used a side-by-side bar chart to compare Income < 30K vs Recent Move. There doesn't really seem
# to be any correlation between the two, either.
ggplot(custdata) +
geom_bar(aes(x=recent.move, fill=income.lt.30K),position="dodge")
+ theme_bw() + ggtitle("Income < 30K vs Recent Move")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.