blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
161831a51138d9c84a98756ae3e5d0063d6feaa6 | 85147cd2189fdb0735a41e90a25799d594fbedfc | /Research Paper_Soichi/code/T3.R | cd5f3be4d13768106e43bc2601940d9832f512ea | [] | no_license | skajikaji/hello-world | 7870f523fc35711327acd91a8b1119229b41e787 | d0a7cff6e45556d953db1536f32652ccce89c952 | refs/heads/master | 2021-01-17T11:57:05.140970 | 2017-03-11T22:37:38 | 2017-03-11T22:37:38 | 84,053,205 | 0 | 0 | null | 2017-03-10T17:17:57 | 2017-03-06T09:15:28 | null | UTF-8 | R | false | false | 2,027 | r | T3.R | #T3. C1
Pooled_pols_p = plm(polity4 ~ lag(polity4)+lag(lrgdpch)+factor(year), data = Five, subset = sample == "1", index = c("code","year"), model = "pooling")
summary(Pooled_pols_p)
#T3. C2
FE_fols_p_2 = plm(polity4 ~ lag(polity4)+lag(lrgdpch)+factor(country)+factor(year), data = Five, subset = sample == "1",index = c("country","year"), model = "within", effect = "individual")
summary(FE_fols_p_2)
#T3. C5
FE_folswod_p = plm(polity4 ~ lag(lrgdpch)+factor(year)+factor(country), data = Five, subset = sample =="1", index = c("code","year"), model = "within", effect = "individual")
summary(FE_folswod_p)
#T3. C6
FE_fols_p_6 = plm(polity4 ~ lag(polity4, 1:5)+lag(lrgdpch, 1:5)+factor(country)+factor(year), data = One, subset = sample == "1",index = c("country","year"), model = "within", effect = "individual")
summary(FE_fols_p_6)
#T2. C7
FE_fols_p_7 = plm(polity4 ~ lag(polity4)+lag(lrgdpch)+factor(country)+factor(year), data = Ten, subset = sample == "1",index = c("country","year"), model = "within", effect = "individual")
summary(FE_fols_p_7)
#T2. C9
FE_fols_p_9 = plm(polity4 ~ lag(polity4)+lag(lrgdpch)+factor(country)+factor(year), data = Twenty, subset = sample == "1",index = c("country","year"), model = "within", effect = "individual")
summary(FE_fols_p_9)
library(stargazer)
stargazer(Pooled_pols_p, FE_fols_p_2, FE_folswod_p, FE_fols_p_7, FE_fols_p_9, type="html", style = "aer",
se=list(clse(Pooled_pols_p),clse(FE_fols_p_2),clse(FE_folswod_p), clse(FE_fols_p_7),clse(FE_fols_p_9)),
column.labels = c("Five-year data", "Ten-year data", "Twenty-year data"), column.separate = c(3,1,1),
no.space = TRUE, dep.var.labels="Dependent variable is democracy", omit = c("factor", "Constant"), omit.stat = c("f"),
covariate.labels=c("Democracy_{t-1}", "Log GDP per capita_{t-1}"),
object.names = TRUE,
title="Table 3-Fixed Effects Results Using Polity Measure of Democracy", align=TRUE, out = "Table 3.htm")
|
33f75d8dd6e8bcbec872f7f10176c19b62d5c7fb | fecfc421df96d4db88764e7d7d6d1c543723f52b | /R_scripts/aquadiva_final_90days_beta_diversity.R | dc775756d1f1cc7a96d0bc6b92083d309b84fe98 | [
"MIT"
] | permissive | pikatech/selection_shapes | fe4ddb63de6ed788581413fda89e1091495dbc82 | f0605bc4b4f617d99b1dd92b69feda00e620b868 | refs/heads/master | 2021-07-20T22:21:11.654160 | 2020-08-30T20:14:12 | 2020-08-30T20:14:12 | 211,339,284 | 2 | 0 | null | 2020-08-30T20:14:13 | 2019-09-27T14:37:15 | R | UTF-8 | R | false | false | 4,524 | r | aquadiva_final_90days_beta_diversity.R | dataPath <- "/home/tech/Dokumente/AquaDiva/manuscript/data"
setwd(dataPath)
# Load libraries
library(phyloseq)
library(Biostrings)
library(reshape2)
library(vegan)
library(ape) # to read tree/fasta file
library(readr)
library(gridExtra)
library(ggplot2)
library(ggalluvial)
library(NbClust)
library(ggrepel)
# some settings
set_colors_mcluster <- c("#8CFF8C", "cyan", "#FBC58C", "#DFE07F", "#D0A5fE", "#008DD2")
set_colors_well <- c(
"green1", "green4" , # Well H1
# "#A2A980", "wheat", # Well H2
"cyan2", "cyan4", # well H3
"#008DD2","gold","lightgoldenrod4",# well H4
"#FBC58C", "mediumpurple3", "plum" # Well H5
)
# Load data
load(file.path(dataPath, "data_rarefy.phyloseq"))
#########################################################################################
################################ hierarchical clustering ##############################
#########################################################################################
set.seed(123)
# Calculate the dissimilarity between samples using Bray-Curtis distance
d1 <- phyloseq::distance(data_rarefy, "bray")
# To find the optimal number of clusters
res.nb <- NbClust(diss=as.dist(d1), distance=NULL,
min.nc = 2, max.nc = 10,
method = "ward.D2", index ="silhouette")
res.nb # print the results
nc = res.nb$Best.nc[1] # best number of clusters
# Plot
hc1 <- hclust(d1, method="ward.D2")
clusnc = cutree(hc1, nc)
clusnc # check the clusters
# replace mCluster numbers, because we will set H41 to an extra cluster (mCluster 6) and keep the number of other clusters
clusnc[clusnc == 3] <- 10
clusnc[clusnc == 5] <- 3
clusnc[clusnc == 6] <- 5
clusnc[clusnc == 10] <- 6
clusnc
### Now plot the dendrogram and color the tips (mClusters) with different colors
par(mar=c(3, 0, 0, 0), xpd=TRUE) # bottom,left, top, right
plot(as.phylo(hc1), tip.color = set_colors_mcluster[clusnc],direction = "downwards",
cex = 1)
# add legend to the bottom
add_legend <- function(...) {
opar <- par(fig=c(0, 1, 0, 1), oma=c(0, 0, 0, 0),
mar=c(0, 0, 0, 0), new=TRUE)
on.exit(par(opar))
plot(0, 0, type='n', bty='n', xaxt='n', yaxt='n')
legend(...)
}
add_legend("bottom", legend=c("mCluster 1","mCluster 2","mCluster 3","mCluster 4","mCluster 5","mCluster 6"),
fill=set_colors_mcluster ,inset=.02,
horiz=TRUE, cex=1.1)
dev.copy(pdf, "hierarchical_clustering_per_sample.pdf", width = 14, height = 5)
dev.off()
##################################################################
# PCoA and PERMANOVA using Bray-Curtis distance #
##################################################################
set.seed(123)
# make a data frame from the sample_data
sampledf <- data.frame(sample_data(data_rarefy))
# Adonis test (Number of permutations: 999)
ht_mcluster <- adonis(d1 ~ mCluster, data = sampledf) # ***
# This output tells us that our adonis test is significant
# so we can reject the null hypothesis that our three countries have the same centroid.
ht_mcluster
# Export the PERMANOVA results
write.table(data.frame(ht_mcluster$aov.tab), "permanova_mcluster_norm.csv", sep=";", dec=",", col.names=NA)
# Ordination (PCoA)
set.seed(123)
ordu = ordinate(data_rarefy, method = "PCoA", distance = "bray")
p_pcoa <- plot_ordination(physeq = data_rarefy, ordination = ordu) +
theme_bw() +
theme(strip.text = element_text(size = 14, colour = "black", angle = 0),
legend.title = element_text(size = 14, colour = "black", face="bold", angle = 0),
axis.title=element_text(size=14), axis.text=element_text(size=14), legend.text=element_text(size=14)) +
scale_color_manual(values=set_colors_well, name="Well") +
stat_ellipse(geom = "polygon", type="norm", alpha=0.4, aes(fill=mCluster)) +
geom_point(size=3, aes(color = Well)) +
ggtitle("PCoA") +
xlab(paste0("PCoA 1 (", round(ordu$values[1,2]*100,1),"%)")) + ylab(paste0("PCoA 2 (", round(ordu$values[2,2]*100,1),"%)")) +
scale_fill_manual(values=set_colors_mcluster) +
annotate("label", label = paste0("adonis(Bray-Curtis distance ~ mCluster)\n", "p < ", ht_mcluster$aov.tab$"Pr(>F)"[1], ", R-squared = ",
round(ht_mcluster$aov.tab$R2[1],2)), x = -0.45, y = -0.32, size = 4.5,
colour = "black", hjust = 0) +
guides(color = guide_legend(override.aes = list(fill=NA)),
fill= guide_legend(override.aes = list(linetype = 0)))
p_pcoa
dev.copy(pdf, "pcoa_rarefied_data.pdf", width = 8, height = 6)
dev.off()
|
cb690e9bcd750d0bdb5b99d10ecce69093ec6e4f | cc04267cffec0786f121c2fdc27433335f0f0720 | /man/crawl-package.Rd | f1b2d5a8a77d0070e7ad79eb933735075e923082 | [] | no_license | mudit2013/crawl | 2502ec0eba1f51bdbddf2f7638c980bad084ec2d | 4d20e3b7a181c98832765c59a891ceb7ca1e3b73 | refs/heads/master | 2021-01-18T08:50:22.104226 | 2015-08-20T01:38:13 | 2015-08-20T01:38:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,053 | rd | crawl-package.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/crawl-package.R
\docType{package}
\name{crawl-package}
\alias{crawl}
\alias{crawl-package}
\title{Fit Continuous-Time Correlated Random Walk Models to Animal Movement Data}
\description{
The (C)orrelated (RA)ndom (W)alk (L)ibrary (I know it is not an R library,
but, "crawp" did not sound as good) of R functions was designed for fitting
continuous-time correlated random walk (CTCRW) models with time indexed
covariates. The model is fit using the Kalman-Filter on a state space
version of the continuous-time staochistic movement process.
}
\details{
\tabular{ll}{ Package: \tab crawl\cr Type: \tab Package\cr Version: \tab
1.5\cr Date: \tab Aug 19, 2015\cr License: \tab Unlimited \cr LazyLoad: \tab
yes\cr }
}
\author{
Devin S. Johnson
Maintainer: Devin S. Johnson <devin.johnson@noaa.gov>
}
\references{
Johnson, D., J. London, M. -A. Lea, and J. Durban (2008)
Continuous-time correlated random walk model for animal telemetry data.
Ecology 89(5) 1208-1215.
}
|
cdcb7742a5db8c2d063dd4dd1deeb2f7efa17342 | 72fe4dae0e6d42a8de38165cc86a3f328327778b | /MiSeq_16S/differential_abundance.R | 361dee032c3ac3c57b8c2b1929721d8a9aca446a | [] | no_license | cwarden45/metagenomics_templates | 7558c38926552049103b42f35d0b0e0f2978e7b5 | 45f45dd1ad3388169b514ff3d13484ba35371aa5 | refs/heads/master | 2021-01-12T14:16:58.039335 | 2018-12-17T21:06:12 | 2018-12-17T21:06:12 | 70,007,310 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 36,706 | r | differential_abundance.R | avgGroupExpression = function (geneExpr, groups) {
avg.expr = tapply(geneExpr, groups, mean)
return(avg.expr)
}#end def avgGroupExpression
ratio2fc = function(value)
{
if(value >= 1){
return(value)
} else {
return (-1/value)
}
}#end def ratio2fc
calc.gene.cor = function(arr, indep.var)
{
na.count = length(arr[!is.na(arr)])
if((na.count >= 3) & (sd(arr) != 0)){
gene.cor.coef = cor(arr,indep.var)
} else {
gene.cor.coef = NA
}
return(gene.cor.coef)
}#end def calc.gene.cor
count.na.values = function(arr)
{
return(length(arr[is.na(arr)]))
}#end def count.na.values
reformat.label = function(name, kept.characters=20){
#new.name = paste("...",unlist(substr(name, nchar(name)-kept.characters,nchar(name))),sep="")
assignment.info = unlist(strsplit(name,split=";"))
new.name = assignment.info[length(assignment.info)]
return(new.name)
}#end def reformat.label
gene.lm = function(arr, var1, var2=c(), var3=c())
{
if (length(var2) == 0){
fit = lm(as.numeric(arr) ~ var1)
result = summary(fit)
pvalue = result$coefficients[2,4]
} else if (length(var3) == 0){
fit = lm(as.numeric(arr) ~ var1 + var2)
result = summary(fit)
pvalue = result$coefficients[2,4]
} else {
fit = lm(as.numeric(arr) ~ var2*var3 + var2 + var3)
result = summary(fit)
pvalue = result$coefficients[4,4]
}
return(pvalue)
}#end def gene.lm
gene.aov = function(arr, var1, var2=c(), var3=c())
{
if (length(var2) == 0){
fit = aov(as.numeric(arr) ~ var1)
result = summary(fit)
aov.pvalue = result[[1]][['Pr(>F)']][1]
} else if (length(var3) == 0){
fit = aov(as.numeric(arr) ~ var1 + var2)
result = summary(fit)
aov.pvalue = result[[1]][['Pr(>F)']][1]
} else {
fit = aov(as.numeric(arr) ~ var2*var3 + var2 + var3)
result = summary(fit)
aov.pvalue = result[[1]][['Pr(>F)']][3]
}
return(aov.pvalue)
}#end def gene.aov
library(gplots)
fixed.color.palatte = c("green","orange","purple","cyan","pink","maroon","yellow","grey","red","blue","black","darkgreen","thistle1","tan","orchid1",colors())
param.table = read.table("parameters.txt", header=T, sep="\t")
comp.name=as.character(param.table$Value[param.table$Parameter == "comp_name"])
min.abundance= as.numeric(as.character(param.table$Value[param.table$Parameter == "abundance_cutoff"]))
fc.rounding.factor= as.numeric(as.character(param.table$Value[param.table$Parameter == "rounding_factor"]))
fc.cutoff= as.numeric(as.character(param.table$Value[param.table$Parameter == "fc_cutoff"]))
pvalue.cutoff = as.numeric(as.character(param.table$Value[param.table$Parameter == "pvalue_cutoff"]))
fdr.cutoff = as.numeric(as.character(param.table$Value[param.table$Parameter == "fdr_cutoff"]))
pvalue.cutoff2 = as.numeric(as.character(param.table$Value[param.table$Parameter == "sec_pvalue_cutoff"]))
fdr.cutoff2 = as.numeric(as.character(param.table$Value[param.table$Parameter == "sec_fdr_cutoff"]))
deg.groups = unlist(strsplit(as.character(param.table$Value[param.table$Parameter == "deg_groups"]), split=","))
plot.groups = unlist(strsplit(as.character(param.table$Value[param.table$Parameter == "plot_groups"]), split=","))
trt.group = as.character(param.table$Value[param.table$Parameter == "treatment_group"])
trt.group2 = as.character(param.table$Value[param.table$Parameter == "secondary_trt"])
interaction.flag = as.character(param.table$Value[param.table$Parameter == "interaction"])
interaction.flag[interaction.flag == "none"]="no"
pvalue.method = as.character(param.table$Value[param.table$Parameter == "pvalue_method"])
fdr.method = as.character(param.table$Value[param.table$Parameter == "fdr_method"])
output.folder = as.character(param.table$Value[param.table$Parameter == "Raw_Code_PC"])
user.folder = as.character(param.table$Value[param.table$Parameter == "Result_Folder"])
sample.description.file = as.character(param.table$Value[param.table$Parameter == "sample_description_file"])
counts.file = as.character(param.table$Value[param.table$Parameter == "counts_file"])
abundance.file= as.character(param.table$Value[param.table$Parameter == "abundance_file"])
setwd(output.folder)
sample.description.table = read.table(sample.description.file, sep="\t", header=T)
longID = sample.description.table$sampleID
sample.label = sample.description.table$userID
deg.group.table = sample.description.table[,deg.groups]
if (length(deg.groups) == 1){
deg.meta = sample.description.table[!is.na(deg.group.table),]
} else {
deg.grp.na.counts = apply(deg.group.table, 1, count.na.values)
deg.meta = sample.description.table[deg.grp.na.counts == 0,]
}
ab.table = read.table(abundance.file, head=T, sep="\t")
assignments = as.character(ab.table$Assignment)
ab.mat= ab.table[,match(sample.label,names(ab.table))]
ab.mat[is.na(ab.mat)] = 0
ab.mat = matrix(as.numeric(unlist(ab.mat)), ncol=length(sample.label))
rownames(ab.mat) = as.character(assignments)
colnames(ab.mat) = as.character(sample.label)
counts.table = read.table(counts.file, head=T, sep="\t")
counts = counts.table[,match(sample.label,names(counts.table))]
counts[is.na(counts)] = 0
counts = matrix(as.numeric(unlist(counts)), ncol=length(sample.label))
rownames(counts) = as.character(assignments)
colnames(counts) = as.character(sample.label)
print(dim(ab.mat))
ab.max = apply(ab.mat, 1, max, na.rm = T)
assignments = assignments[ab.max > min.abundance]
ab.mat = ab.mat[match(assignments, rownames(ab.mat)),]
counts = counts[match(assignments, rownames(counts)),]
print(dim(ab.mat))
ab.mat = ab.mat + fc.rounding.factor
if(length(plot.groups) == 1){
print("Averaging Abundance for One Variable (for plot.groups)")
grp = sample.description.table[,plot.groups]
} else if ((length(plot.groups) == 2)&(interaction.flag == "no")){
print("Averaging Abundance for First Variable (for plot.groups)")
grp = sample.description.table[,plot.groups[1]]
} else if (length(plot.groups) == 2){
print("Averaging Abundance for Interaction Variable (for plot.groups)")
grp = paste(sample.description.table[,plot.groups[1]],sample.description.table[,plot.groups[2]],sep=":")
} else {
stop("Code only compatible with 2 variables (with or without a 3rd interaction variable")
}
groupIDs = as.character(levels(as.factor(grp)))
average.ab = data.frame(t(apply(ab.mat, 1, avgGroupExpression, groups = grp)))
if(length(groupIDs) == 1){
average.ab = t(average.ab)
} else {
average.ab = average.ab
}
colnames(average.ab) = paste("avg.abundance", sub("-",".",groupIDs), sep=".")
#removed undefined group IDs (so, you can visualize samples that aren't in your comparison)
if(length(deg.groups) == 1){
var1 = sample.description.table[,deg.groups]
deg.counts = counts[,!is.na(var1)]
deg.ab = ab.mat[,!is.na(var1)]
ab.max = apply(deg.ab, 1, max, na.rm = T)
assignments = assignments[ab.max > min.abundance]
ab.mat = ab.mat[match(assignments, rownames(ab.mat)),]
counts = counts[match(assignments, rownames(counts)),]
deg.counts = deg.counts[match(assignments, rownames(deg.counts)),]
deg.ab = deg.ab[match(assignments, rownames(deg.ab)),]
average.ab = average.ab[match(assignments, rownames(average.ab)),]
print(dim(ab.mat))
var1 = var1[!is.na(var1)]
} else if (length(deg.groups) == 2){
if(interaction.flag == "filter-overlap"){
var1 = sample.description.table[,deg.groups[1]]
var2 = sample.description.table[,deg.groups[2]]
deg.samples = !is.na(var1)&!is.na(var2)
deg.counts = counts[,deg.samples]
deg.ab = ab.mat[,deg.samples]
ab.max = apply(deg.ab, 1, max, na.rm = T)
assignments = assignments[ab.max > min.abundance]
ab.mat = ab.mat[match(assignments, rownames(ab.mat)),]
counts = counts[match(assignments, rownames(counts)),]
deg.counts = deg.counts[match(assignments, rownames(deg.counts)),]
deg.ab = deg.ab[match(assignments, rownames(deg.ab)),]
average.ab = average.ab[match(assignments, rownames(average.ab)),]
print(dim(ab.mat))
var1 = var1[deg.samples]
var2 = var2[deg.samples]
prim.deg.grp = var1[var2 == trt.group2]
prim.counts = counts[,var2 == trt.group2]
prim.ab = ab.mat[,var2 == trt.group2]
sec.deg.grp = var1[var2 != trt.group2]
sec.counts = counts[,var2 != trt.group2]
sec.ab = ab.mat[,var2 != trt.group2]
} else{
var1 = sample.description.table[,deg.groups[1]]
var2 = sample.description.table[,deg.groups[2]]
deg.samples = !is.na(var1)&!is.na(var2)
deg.counts = counts[,deg.samples]
deg.ab = ab.mat[,deg.samples]
ab.max = apply(deg.ab, 1, max, na.rm = T)
assignments = assignments[ab.max > min.abundance]
ab.mat = ab.mat[match(assignments, rownames(ab.mat)),]
counts = counts[match(assignments, rownames(counts)),]
deg.counts = deg.counts[match(assignments, rownames(deg.counts)),]
deg.ab = deg.ab[match(assignments, rownames(deg.ab)),]
average.ab = average.ab[match(assignments, rownames(average.ab)),]
print(dim(ab.mat))
var1 = var1[deg.samples]
var2 = var2[deg.samples]
}
} else {
stop("Code currently doesn't support more than 2 group model for DEG (with or without interaction)")
}
if(length(deg.groups) == 1){
print("Averaging Abundance for One Variable (for deg.groups)")
contrast.grp = var1
} else if ((length(deg.groups) == 2)&(interaction.flag == "no")){
print("Averaging Abundance for First Variable (for deg.groups)")
contrast.grp = var1
} else if (length(deg.groups) == 2){
print("Averaging Abundance for Interaction Variable (for deg.groups)")
contrast.grp = paste(var1,var2,sep=":")
} else {
stop("Code only compatible with 2 variables (with or without a 3rd interaction variable")
}
if (trt.group == "continuous"){
contrast.grp = as.numeric(contrast.grp)
gene.cor = apply(deg.ab, 1, calc.gene.cor, indep.var=contrast.grp)
fc.table = data.frame(cor=gene.cor)
} else {
groupIDs = as.character(levels(as.factor(contrast.grp)))
contrast.ab = data.frame(t(apply(deg.ab, 1, avgGroupExpression, groups = contrast.grp)))
colnames(contrast.ab) = paste("avg.abundance", sub("-",".",groupIDs), sep=".")
}#end else
if((interaction.flag == "no") & (trt.group != "continuous")){
print("Calculating fold-change for primary variable")
trt.expr = contrast.ab[,paste("avg.abundance", sub("-",".",trt.group), sep=".")]
cntl.expr = contrast.ab[,paste("avg.abundance", sub("-",".",groupIDs[groupIDs != trt.group]), sep=".")]
fc = trt.expr / cntl.expr
fc = round(sapply(fc, ratio2fc), digits=2)
fc.table = data.frame(fold.change=fc)
} else if ((interaction.flag == "model")|(interaction.flag == "filter-overlap")){
if ((trt.group == "continuous")&(trt.group2 == "continuous")){
print("Calculating correlation for secondary variable")
sec.contrast.grp = as.numeric(var2)
gene.cor2 = apply(deg.ab, 1, calc.gene.cor, indep.var=sec.contrast.grp)
fc.table = data.frame(prim.cor=gene.cor, sec.cor = gene.cor2)
} else if (trt.group == "continuous"){
print("Fold-change / correlation cutoff not used for mixed variable analysis")
print("NOTE: 'Up-Regulated' R output refers to assignments that vary with FDR and p-value cutoffs")
print("However, fold-change / correlation values for each separate variable are still provided")
sec.groupIDs = var2
sec.groups = as.character(levels(as.factor(sec.groupIDs)))
sec.contrast.ab = data.frame(t(apply(deg.ab, 1, avgGroupExpression, groups = sec.groupIDs)))
colnames(sec.contrast.ab) = paste("avg.abundance", sub("-",".",groupIDs), sep=".")
sec.trt.expr = sec.contrast.ab[,paste("avg.abundance", sub("-",".",trt.group2), sep=".")]
sec.cntl.expr = sec.contrast.ab[,paste("avg.abundance", sub("-",".",sec.groups[sec.groups != trt.group2]), sep=".")]
sec.fc = sec.trt.expr / sec.cntl.expr
sec.fc = round(sapply(sec.fc, ratio2fc), digits=2)
fc.table = data.frame(prim.cor=gene.cor, sec.fc = sec.fc)
} else if (trt.group2 == "continuous"){
print("Fold-change / correlation cutoff not used for mixed variable analysis")
print("NOTE: 'Up-Regulated' R output refers to assignments that vary with FDR and p-value cutoffs")
print("However, fold-change / correlation values for each separate variable are still provided")
prim.groupIDs = var1
prim.groups = as.character(levels(as.factor(prim.groupIDs)))
prim.contrast.ab = data.frame(t(apply(deg.ab, 1, avgGroupExpression, groups = prim.groupIDs)))
colnames(prim.contrast.ab) = paste("avg.abundance", sub("-",".",prim.groups), sep=".")
prim.trt = trt.group
prim.cntl = prim.groups[prim.groups != trt.group]
prim.trt.expr = prim.contrast.ab[,paste("avg.abundance", sub("-",".",prim.trt), sep=".")]
prim.cntl.expr = prim.contrast.ab[,paste("avg.abundance", sub("-",".",prim.cntl), sep=".")]
prim.fc = prim.trt.expr / prim.cntl.expr
prim.fc = round(sapply(prim.fc, ratio2fc), digits=2)
sec.contrast.grp = as.numeric(var2)
gene.cor2 = apply(deg.ab, 1, calc.gene.cor, indep.var=sec.contrast.grp)
fc.table = data.frame(prim.fc=prim.fc, sec.cor = gene.cor2)
} else {
print("Calculating fold-change table for primary variables (within subsets of secondary variable)")
prim.groups = paste(var1,var2,sep=":")
prim.trt = paste(trt.group,trt.group2,sep=":")
prim.cntl = paste(prim.groups[prim.groups != trt.group],trt.group2,sep=":")
prim.trt.expr = contrast.ab[,paste("avg.abundance", sub("-",".",prim.trt), sep=".")]
prim.cntl.expr = contrast.ab[,paste("avg.abundance", sub("-",".",prim.cntl), sep=".")]
prim.fc = prim.trt.expr / prim.cntl.expr
prim.fc = round(sapply(prim.fc, ratio2fc), digits=2)
sec.groups = as.character(levels(as.factor(sample.description.table[,deg.groups[2]])))
sec.trt = paste(trt.group, sec.groups[sec.groups != trt.group2], sep=":")
sec.cntl = paste(prim.groups[prim.groups != trt.group], sec.groups[sec.groups != trt.group2], sep=":")
sec.trt.expr = contrast.ab[,paste("avg.abundance", sub("-",".",sec.trt), sep=".")]
sec.cntl.expr = contrast.ab[,paste("avg.abundance", sub("-",".",sec.cntl), sep=".")]
sec.fc = sec.trt.expr / sec.cntl.expr
sec.fc = round(sapply(sec.fc, ratio2fc), digits=2)
#don't provide in table, but use for direction assignment
overall.fc = prim.fc - sec.fc
fc.table = data.frame(fc1 = prim.fc, fc2=sec.fc)
colnames(fc.table) = c(paste("fold.change",trt.group,":",trt.group2,sep="."),
paste("fold.change",trt.group,":",sec.groups[sec.groups != trt.group2], sep="."))
}#end else
}else if(trt.group == "continuous"){
print("Skipping fold-change calculation for continuous variable")
}else{
stop("interaction must be \"no\", \"model\", or \"filter-overlap\"")
}#end else
rep.check = 1
for (i in 1:length(deg.groups)){
deg.group = deg.groups[i]
if((i == 1) & (trt.group != "continuous")){
deg.group.values = as.character(deg.meta[,deg.group])
min.reps = min(table(deg.group.values))
if (min.reps < 2){
rep.check=0
print("There are not at least 2 samples per-group in order to calculate p-value.")
print("In the future, please make sure you at least have duplicate samples.")
}#end if (min.reps < 2)
} else if ((i == 2) & (trt.group2 != "continuous")){
deg.group.values = as.character(deg.meta[,deg.group])
min.reps = min(table(deg.group.values))
if (min.reps < 2){
rep.check=0
print("There are not at least 2 samples per-group in order to calculate p-value.")
print("In the future, please make sure you at least have duplicate samples.")
}#end if (min.reps < 2)
} else if (i > 2){
stop("Workflow currently doesn't support use of more than 2 variables")
}
}#end for (deg.group in deg.groups)
if(rep.check == 1){
#start p-value calculation
if (pvalue.method == "metagenomeSeq"){
library(metagenomeSeq)
if ((length(deg.groups) == 2)&(interaction.flag == "filter-overlap")){
print("metagenomeSeq, Two-Step Analysis")
stop("Need to add code!")
} else {
if (length(deg.groups) == 1){
print("metagenomeSeq with 1 variable")
if (trt.group == "continuous"){
var1 = as.numeric(var1)
}
pheno = data.frame(var1=var1)
rownames(pheno) = colnames(deg.counts)
features = data.frame(genus = assignments)
rownames(features) = rownames(deg.counts)
mrObj = newMRexperiment(deg.counts, phenoData=AnnotatedDataFrame(pheno), featureData=AnnotatedDataFrame(features))
scalePercentile = cumNormStatFast(mrObj)
mrObj = cumNorm(mrObj, p = scalePercentile)
design = model.matrix(~var1)
fit = fitFeatureModel(mrObj,design)
test.pvalue = as.numeric(fit$pvalues)
} else if ((length(deg.groups) == 2)&(interaction.flag == "no")){
print("metagenomeSeq with 2 variables")
if (trt.group == "continuous"){
var1 = as.numeric(var1)
} else{
var1 = as.factor(as.character(var1))
}
if (trt.group2 == "continuous"){
var2 = as.numeric(var2)
} else{
var2 = as.factor(as.character(var2))
}
pheno = data.frame(var1=var1, var2=var2)
rownames(pheno) = colnames(deg.counts)
features = data.frame(genus = assignments)
rownames(features) = rownames(deg.counts)
mrObj = newMRexperiment(deg.counts, phenoData=AnnotatedDataFrame(pheno), featureData=AnnotatedDataFrame(features))
scalePercentile = cumNormStatFast(mrObj)
mrObj = cumNorm(mrObj, p = scalePercentile)
design = model.matrix(~var1+var2)
#get error message if trying to use fitFeatureModel
settings = zigControl(maxit = 10, verbose = TRUE)
fit = fitZig(obj = mrObj, mod = design, useCSSoffset = FALSE,
control = settings)
pvalue.mat = fit$eb$p.value
test.pvalue = as.numeric(pvalue.mat[,2])
} else if ((length(deg.groups) == 2)&(interaction.flag == "model")){
print("metagenomeSeq with 2 variables plus interaction")
stop("Need to add code!")
}
}#end else
} else if (pvalue.method == "limma-ab"){
library(limma)
if ((length(deg.groups) == 2)&(interaction.flag == "filter-overlap")){
print("limma, Two-Step Analysis")
if (trt.group == "continuous"){
prim.deg.grp = as.numeric(prim.deg.grp)
}
design = model.matrix(~prim.deg.grp)
fit = lmFit(prim.ab, design)
fit = eBayes(fit)
pvalue.mat = data.frame(fit$p.value)
prim.pvalue = pvalue.mat[,2]
if (trt.group2 == "continuous"){
sec.deg.grp = as.numeric(sec.deg.grp)
}
design = model.matrix(~sec.deg.grp)
fit = lmFit(sec.ab,design)
fit = eBayes(fit)
pvalue.mat = data.frame(fit$p.value)
sec.pvalue = pvalue.mat[,2]
} else {
if (length(deg.groups) == 1){
print("limma with 1 variable")
if (trt.group == "continuous"){
var1 = as.numeric(var1)
}
design = model.matrix(~var1)
fit = lmFit(deg.ab,design)
fit = eBayes(fit)
pvalue.mat = data.frame(fit$p.value)
test.pvalue = pvalue.mat[,2]
} else if ((length(deg.groups) == 2)&(interaction.flag == "no")){
print("limma with 2 variables")
if (trt.group == "continuous"){
var1 = as.numeric(var1)
} else{
var1 = as.factor(as.character(var1))
}
if (trt.group2 == "continuous"){
var2 = as.numeric(var2)
} else{
var2 = as.factor(as.character(var2))
}
design = model.matrix(~var1 + var2)
fit = lmFit(deg.ab,design)
fit = eBayes(fit)
pvalue.mat = data.frame(fit$p.value)
test.pvalue = pvalue.mat[,2]
} else if ((length(deg.groups) == 2)&(interaction.flag == "model")){
print("limma with 2 variables plus interaction")
if (trt.group == "continuous"){
var1 = as.numeric(var1)
}
if (trt.group2 == "continuous"){
var2 = as.numeric(var2)
}
design = model.matrix(~var1*var2 + var1 + var2)
fit = lmFit(deg.ab,design)
fit = eBayes(fit)
pvalue.mat = data.frame(fit$p.value)
test.pvalue = pvalue.mat[,4]
}
}#end else
} else if (pvalue.method == "limma-counts"){
library(edgeR)
library(limma)
if ((length(deg.groups) == 2)&(interaction.flag == "filter-overlap")){
print("limma-voom, Two-Step Analysis")
if (trt.group == "continuous"){
prim.deg.grp = as.numeric(prim.deg.grp)
}
y <- DGEList(counts=prim.counts, genes=assignments)
png(paste(comp.name,"prim_voom_plot.png",sep="_"))
design <- model.matrix(~prim.deg.grp)
v <- voom(y,design,plot=TRUE)
dev.off()
fit <- lmFit(v,design)
fit <- eBayes(fit)
pvalue.mat = data.frame(fit$p.value)
prim.pvalue = pvalue.mat[,2]
if (trt.group2 == "continuous"){
sec.deg.grp = as.numeric(sec.deg.grp)
}
y <- DGEList(counts=sec.counts, genes=assignments)
design <- model.matrix(~sec.deg.grp)
png(paste(comp.name,"sec_voom_plot.png",sep="_"))
v <- voom(y,design,plot=TRUE)
dev.off()
fit <- lmFit(v,design)
fit <- eBayes(fit)
pvalue.mat = data.frame(fit$p.value)
sec.pvalue = pvalue.mat[,2]
} else {
y <- DGEList(counts=deg.counts, genes=assignments)
if (length(deg.groups) == 1){
print("limma-voom with 1 variable")
if (trt.group == "continuous"){
var1 = as.numeric(var1)
}
design <- model.matrix(~var1)
png(paste(comp.name,"voom_plot.png",sep="_"))
v <- voom(y,design,plot=TRUE)
dev.off()
fit <- lmFit(v,design)
fit <- eBayes(fit)
pvalue.mat = data.frame(fit$p.value)
test.pvalue = pvalue.mat[,2]
} else if ((length(deg.groups) == 2)&(interaction.flag == "no")){
print("limma-voom with 2 variables")
if (trt.group == "continuous"){
var1 = as.numeric(var1)
} else{
var1 = as.factor(as.character(var1))
}
if (trt.group2 == "continuous"){
var2 = as.numeric(var2)
} else{
var2 = as.factor(as.character(var2))
}
design <- model.matrix(~var1 + var2)
png(paste(comp.name,"voom_plot.png",sep="_"))
v <- voom(y,design,plot=TRUE)
dev.off()
fit <- lmFit(v,design)
fit <- eBayes(fit)
pvalue.mat = data.frame(fit$p.value)
test.pvalue = pvalue.mat[,2]
} else if ((length(deg.groups) == 2)&(interaction.flag == "model")){
print("limma-voom with 2 variables plus interaction")
if (trt.group == "continuous"){
var1 = as.numeric(var1)
}
if (trt.group2 == "continuous"){
var2 = as.numeric(var2)
}
design <- model.matrix(~var1*var2 + var1 + var2)
png(paste(comp.name,"voom_plot.png",sep="_"))
v <- voom(y,design,plot=TRUE)
dev.off()
fit <- lmFit(v,design)
fit <- eBayes(fit)
pvalue.mat = data.frame(fit$p.value)
test.pvalue = pvalue.mat[,4]
}
}#end else
} else if (pvalue.method == "lm-ab"){
if ((length(deg.groups) == 2)&(interaction.flag == "filter-overlap")){
print("Abundance linear regression, Two-Step Analysis")
if (trt.group == "continuous"){
prim.deg.grp = as.numeric(prim.deg.grp)
}
prim.pvalue = apply(prim.ab, 1, gene.lm, var1=prim.deg.grp)
if (trt.group2 == "continuous"){
sec.deg.grp = as.numeric(sec.deg.grp)
}
sec.pvalue = apply(sec.ab, 1, gene.lm, var1=sec.deg.grp)
} else {
if (length(deg.groups) == 1){
print("Abundance linear regression with 1 variable")
if (trt.group == "continuous"){
var1 = as.numeric(var1)
}
test.pvalue = apply(deg.ab, 1, gene.lm, var1=var1)
} else if ((length(deg.groups) == 2)&(interaction.flag == "no")){
print("Abundance linear regression with 2 variables")
if (trt.group == "continuous"){
var1 = as.numeric(var1)
}
if (trt.group2 == "continuous"){
var2 = as.numeric(var2)
}
test.pvalue = apply(deg.ab, 1, gene.lm, var1=var1, var2=var2)
} else if ((length(deg.groups) == 2)&(interaction.flag == "model")){
print("Abundance linear regression with 2 variables plus interaction")
var3 = as.factor(paste(var1,var2,sep=":"))
if (trt.group == "continuous"){
var1 = as.numeric(var1)
}
if (trt.group == "continuous"){
var2 = as.numeric(var2)
}
test.pvalue = apply(deg.ab, 1, gene.lm, var1=var3, var2=var1, var3=var2)
}
}#end else
} else if (pvalue.method == "ANOVA-ab"){
if ((length(deg.groups) == 2)&(interaction.flag == "filter-overlap")){
print("Abundance ANOVA, Two-Step Analysis")
if (trt.group == "continuous"){
prim.deg.grp = as.numeric(prim.deg.grp)
}
prim.pvalue = apply(prim.ab, 1, gene.aov, var1=prim.deg.grp)
if (trt.group2 == "continuous"){
sec.deg.grp = as.numeric(sec.deg.grp)
}
sec.pvalue = apply(sec.ab, 1, gene.aov, var1=sec.deg.grp)
} else {
if (length(deg.groups) == 1){
print("Abundance ANOVA with 1 variable")
if (trt.group == "continuous"){
var1 = as.numeric(var1)
}
test.pvalue = apply(deg.ab, 1, gene.aov, var1=var1)
} else if ((length(deg.groups) == 2)&(interaction.flag == "no")){
print("Abundance ANOVA with 2 variables")
if (trt.group == "continuous"){
var1 = as.numeric(var1)
}
if (trt.group2 == "continuous"){
var2 = as.numeric(var2)
}
test.pvalue = apply(deg.ab, 1, gene.aov, var1=var1, var2=var2)
} else if ((length(deg.groups) == 2)&(interaction.flag == "model")){
print("Abundance ANOVA with 2 variables plus interaction")
var3 = as.factor(paste(var1,var2,sep=":"))
if (trt.group == "continuous"){
var1 = as.numeric(var1)
}
if (trt.group2 == "continuous"){
var2 = as.numeric(var2)
}
test.pvalue = apply(deg.ab, 1, gene.aov, var1=var3, var2=var1, var3=var2)
}
}#end else
} else{
stop("pvalue_method must be \"limma-ab\", \"limma-counts\",\"metagenomeSeq\", \"ANOVA-ab\", or \"lm-ab\"")
}
} else{
test.pvalue = rep(1,times=length(assignments))
prim.pvalue = rep(1,times=length(assignments))
sec.pvalue = rep(1,times=length(assignments))
}#end else
if (trt.group == "continuous"){
upID = "Increased Abundance"
downID = "Decreased Abundance"
} else {
upID = paste(trt.group," Up",sep="")
downID = paste(trt.group," Down",sep="")
}
if (interaction.flag == "no"){
if (fdr.method == "BH"){
fdr = p.adjust(test.pvalue, "fdr")
} else if (fdr.method == "q-value"){
library(qvalue)
qobj <- qvalue(p = test.pvalue)
fdr = qobj$qvalue
png(paste(pvalue.method,"_qvalue_plot.png",sep=""))
qHist = hist(qobj)
print(qHist)
dev.off()
} else if (fdr.method == "q-lfdr"){
library(qvalue)
qobj <- qvalue(p = test.pvalue)
fdr = qobj$lfdr
png(paste(pvalue.method,"_qvalue_plot.png",sep=""))
qHist = hist(qobj)
print(qHist)
dev.off()
} else {
stop("fdr_method must be \"BH\", \"q-value\", or \"q-lfdr\"")
}
status = rep("No Change", times=length(fdr))
if (trt.group == "continuous"){
status[(gene.cor >= 0) & (test.pvalue <= pvalue.cutoff) & (fdr <= fdr.cutoff)] = upID
status[(gene.cor <= 0) & (test.pvalue <= pvalue.cutoff) & (fdr <= fdr.cutoff)] = downID
pvalue.table = data.frame(p.value = test.pvalue, FDR = fdr)
} else{
status[(fc >= fc.cutoff) & (test.pvalue <= pvalue.cutoff) & (fdr <= fdr.cutoff)] = upID
status[(fc <= -fc.cutoff) & (test.pvalue <= pvalue.cutoff) & (fdr <= fdr.cutoff)] = downID
pvalue.table = data.frame(p.value = test.pvalue, FDR = fdr)
}#end else
} else{
trt.group = prim.trt
if(interaction.flag == "model"){
if (fdr.method == "BH"){
fdr = p.adjust(test.pvalue, "fdr")
} else if (fdr.method == "q-value"){
library(qvalue)
qobj <- qvalue(p = test.pvalue)
fdr = qobj$qvalue
png(paste(pvalue.method,"_qvalue_plot.png",sep=""))
qHist = hist(qobj)
print(qHist)
dev.off()
} else if (fdr.method == "q-lfdr"){
library(qvalue)
qobj <- qvalue(p = test.pvalue)
fdr = qobj$lfdr
png(paste(pvalue.method,"_qvalue_plot.png",sep=""))
qHist = hist(qobj)
print(qHist)
dev.off()
} else {
stop("fdr_method must be \"BH\", \"q-value\", or \"q-lfdr\"")
}
status = rep("No Change", times=length(fdr))
if ((trt.group == "continuous")&(trt.group2 == "continuous")){
status[(gene.cor.int >= 0) & (test.pvalue <= pvalue.cutoff) & (fdr <= fdr.cutoff)] = upID
status[(gene.cor.int <= 0) & (test.pvalue <= pvalue.cutoff) & (fdr <= fdr.cutoff)] = downID
pvalue.table = data.frame(p.value = test.pvalue, FDR = fdr)
} else if ((trt.group != "continuous")&(trt.group2 != "continuous")){
status[(overall.fc >= fc.cutoff) & (test.pvalue <= pvalue.cutoff) & (fdr <= fdr.cutoff)] = upID
status[(overall.fc <= -fc.cutoff) & (test.pvalue <= pvalue.cutoff) & (fdr <= fdr.cutoff)] = downID
pvalue.table = data.frame(p.value = test.pvalue, FDR = fdr)
} else {
upID = "Variable Abundance"
status[(test.pvalue <= pvalue.cutoff) & (fdr <= fdr.cutoff)] = upID
pvalue.table = data.frame(p.value = test.pvalue, FDR = fdr)
}#end else
} else if (interaction.flag == "filter-overlap"){
if (fdr.method == "BH"){
fdr = p.adjust(prim.pvalue, "fdr")
} else if (fdr.method == "q-value"){
library(qvalue)
qobj <- qvalue(p = prim.pvalue)
fdr = qobj$qvalue
png(paste(pvalue.method,"_qvalue_plot.png",sep=""))
qHist = hist(qobj)
print(qHist)
dev.off()
} else if (fdr.method == "q-lfdr"){
library(qvalue)
qobj <- qvalue(p = prim.pvalue)
fdr = qobj$lfdr
png(paste(pvalue.method,"_qvalue_plot.png",sep=""))
qHist = hist(qobj)
print(qHist)
dev.off()
} else {
stop("fdr_method must be \"BH\", \"q-value\", or \"q-lfdr\"")
}
pass1.status = rep("No Change", times=length(fdr))
if (trt.group == "continuous"){
pass1.status[(gene.cor >= 0) & (prim.pvalue <= pvalue.cutoff) & (fdr <= fdr.cutoff)] = paste(trt.group," Up",sep="")
pass1.status[(gene.cor <= 0) & (prim.pvalue <= pvalue.cutoff) & (fdr <= fdr.cutoff)] = paste(trt.group," Down",sep="")
} else{
pass1.status[(prim.fc >= fc.cutoff) & (prim.pvalue <= pvalue.cutoff) & (fdr <= fdr.cutoff)] = paste(trt.group," Up",sep="")
pass1.status[(prim.fc <= -fc.cutoff) & (prim.pvalue <= pvalue.cutoff) & (fdr <= fdr.cutoff)] = paste(trt.group," Down",sep="")
}#end else
print(paste("Primary Up-Regulated: ",length(pass1.status[pass1.status == paste(trt.group," Up",sep="")]),sep=""))
print(paste("Primary Down-Regulated: ",length(pass1.status[pass1.status == paste(trt.group," Down",sep="")]),sep=""))
if (fdr.method == "BH"){
sec.fdr = p.adjust(sec.pvalue, "fdr")
} else if (fdr.method == "q-value"){
library(qvalue)
qobj <- qvalue(p = sec.pvalue)
sec.fdr = qobj$qvalue
png(paste(pvalue.method,"_qvalue_plot.png",sep=""))
qHist = hist(qobj)
print(qHist)
dev.off()
} else if (fdr.method == "q-lfdr"){
library(qvalue)
qobj <- qvalue(p = sec.pvalue)
sec.fdr = qobj$lfdr
png(paste(pvalue.method,"_qvalue_plot.png",sep=""))
qHist = hist(qobj)
print(qHist)
dev.off()
} else {
stop("fdr_method must be \"BH\", \"q-value\", or \"q-lfdr\"")
}
pass2.status = rep("No Change", times=length(fdr))
if (trt.group2 == "continuous"){
pass2.status[(gene.cor2 >= 0) & (sec.pvalue <= pvalue.cutoff2) & (sec.fdr <= fdr.cutoff2)] = paste(trt.group," Up",sep="")
pass2.status[(gene.cor2 <= 0) & (sec.pvalue <= pvalue.cutoff2) & (sec.fdr <= fdr.cutoff2)] = paste(trt.group," Down",sep="")
} else{
pass2.status[(sec.fc >= fc.cutoff) & (sec.pvalue <= pvalue.cutoff2) & (sec.fdr <= fdr.cutoff2)] = paste(trt.group," Up",sep="")
pass2.status[(sec.fc <= -fc.cutoff) & (sec.pvalue <= pvalue.cutoff2) & (sec.fdr <= fdr.cutoff2)] = paste(trt.group," Down",sep="")
}#end else
print(paste("Secondary Up-Regulated: ",length(pass2.status[pass2.status == paste(trt.group," Up",sep="")]),sep=""))
print(paste("Secondary Down-Regulated: ",length(pass2.status[pass2.status == paste(trt.group," Down",sep="")]),sep=""))
pvalue.table = data.frame(prim.pvalue = prim.pvalue, prim.FDR = fdr,
sec.pvalue=sec.pvalue, sec.fdr=sec.fdr)
status = rep("No Change", times=length(fdr))
status[(pass1.status == paste(trt.group," Up",sep="")) & (pass2.status == "No Change")] = upID
status[(pass1.status == paste(trt.group," Down",sep="")) & (pass2.status == "No Change")] = downID
} else{
stop("interaction must be \"no\", \"model\", or \"filter-overlap\"")
}#end else
}#end else
print(paste("Up-Regulated: ",length(status[status == upID]),sep=""))
print(paste("Down-Regulated: ",length(status[status == downID]),sep=""))
if (interaction.flag == "filter-overlap"){
pvalue.method = paste(pvalue.method,"two-step_filtered",sep="_")
}
if(rep.check == 1){
deg.table = data.frame(genus = assignments, average.ab, fc.table, pvalue.table, status = status)
} else {
deg.table = data.frame(genus = assignments, average.ab, fc.table, status = status)
}#end else
deg.file = paste(comp.name,"_",pvalue.method,"_differential_abundance_fdr_",fdr.cutoff,"_pval_",pvalue.cutoff,".txt",sep="")
deg.file = gsub(":",".",deg.file)
write.table(deg.table, file=deg.file, row.names=F, quote=F, sep="\t")
final.deg.file = paste(user.folder,"/",comp.name,"_differential_abundance_stats.txt",sep="")
write.table(deg.table, file=final.deg.file, row.names=F, quote=F, sep="\t")
temp.ab = ab.mat
temp.ab = temp.ab[status != "No Change", ]
deg.assignments = assignments[status != "No Change"]
if(length(deg.assignments) > 1){
if(length(plot.groups) > 1){
source("heatmap.3.R")
grp1 = as.character(sample.description.table[,plot.groups[1]])
grp2 = as.character(sample.description.table[,plot.groups[2]])
group.levels = c(levels(as.factor(grp1)),levels(as.factor(grp2)))
color.palette <- fixed.color.palatte[1:length(group.levels)]
labelColors1 = rep("black",times=length(sample.label))
for (i in 1:length(group.levels)){
labelColors1[grp1 == as.character(group.levels[i])] = color.palette[i]
}#end for (i in 1:length(group.levels))
labelColors2 = rep("black",times=length(sample.label))
for (i in 1:length(group.levels)){
labelColors2[grp2 == as.character(group.levels[i])] = color.palette[i]
}#end for (i in 1:length(group.levels))
temp.ab = t(temp.ab)
if(length(deg.assignments) < 25){
colnames(temp.ab) = deg.assignments
} else {
colnames(temp.ab) = rep("", length(deg.assignments))
}
rownames(temp.ab) = sample.label
column_annotation <- as.matrix(deg.assignments)
colnames(column_annotation) <- c("")
row_annotation <- data.frame(label1 = labelColors1, label2 = labelColors2)
row_annotation = as.matrix(t(row_annotation))
rownames(row_annotation) <- c(plot.groups)
heatmap.file <- paste(comp.name,"_",pvalue.method,"_differential_abundance_fdr_",fdr.cutoff,"_pval_",pvalue.cutoff,".png",sep="")
heatmap.file = gsub(":",".",heatmap.file)
png(file = heatmap.file)
heatmap.3(temp.ab, col=colorpanel(33, low="blue", mid="black", high="red"), density.info="none", key=TRUE,
RowSideColors=row_annotation, trace="none", margins = c(20,20),RowSideColorsSize=4, dendrogram="both")
legend("topright", legend=group.levels,
col=color.palette,
pch=15, cex=0.7)
dev.off()
if(interaction.flag != "no"){
temp.fc.table = as.matrix(fc.table)
if (((trt.group == "continuous") & (trt.group2 == "continuous")) | ((trt.group != "continuous") & (trt.group2 != "continuous"))){
temp.fc.table = temp.fc.table[,-ncol(temp.fc.table)]
}
temp.fc.table = temp.fc.table[status != "No Change", ]
if(length(deg.assignments) < 25){
rownames(temp.fc.table) = deg.assignments
} else {
rownames(temp.fc.table) = rep("",times=length(deg.assignments))
}
colnames(temp.fc.table) = gsub(".:.",":",gsub("fold.change.","",colnames(temp.fc.table)))
temp.fc.table[temp.fc.table < -10] = -10
temp.fc.table[temp.fc.table > 10] = 10
heatmap.file <- paste("fold_change_",comp.name,"_",pvalue.method,"_differential_abundance_fdr_",fdr.cutoff,"_pval_",pvalue.cutoff,".png",sep="")
heatmap.file = gsub(":",".",heatmap.file)
png(file = heatmap.file)
heatmap.2(temp.fc.table, col=colorpanel(33, low="blue", mid="black", high="red"), density.info="none", key=TRUE,
trace="none", margins = c(20,20), cexCol=1.5)
dev.off()
}#end if(interaction.flag != "no")
} else {
group.levels = levels(as.factor(sample.description.table[,plot.groups]))
color.palette <- fixed.color.palatte[1:length(group.levels)]
labelColors = rep("black",times=length(sample.label))
for (i in 1:length(group.levels)){
labelColors[grp == as.character(group.levels[i])] = color.palette[i]
}#end for (i in 1:length(group.levels))
temp.ab = t(temp.ab)
if(length(deg.assignments) < 25){
colnames(temp.ab) = deg.assignments
} else {
colnames(temp.ab) = rep("", length(deg.assignments))
}
rownames(temp.ab) = sample.label
heatmap.file = paste(comp.name,"_",pvalue.method,"_differential_abundance_fdr_",fdr.cutoff,"_pval_",pvalue.cutoff,".png",sep="")
heatmap.file = gsub(":",".",heatmap.file)
png(file = heatmap.file)
heatmap.2(temp.ab, col=colorpanel(33, low="blue", mid="black", high="red"), density.info="none", key=TRUE,
RowSideColors=labelColors, trace="none", margins = c(20,20))
legend("topright", group.levels, col=color.palette, pch=15, cex=0.8)
dev.off()
}#end else
}#end if(length(deg.assignments) > 1) |
d7e86af755596ef2d354194e189222d8326003ae | 347fd6c3f8e4c21403c90ee4287e96657d6b09ca | /tests/testthat.R | cf5a16059b4af4e49d6a1aa26831f82f2e17e75e | [] | no_license | okayaa/MTSYS | 80c3db015c35bf2ef1ce47273ab41bd63468e30f | c3dae3fbafda23de591572d1fa5dcb5622812df3 | refs/heads/master | 2021-06-06T10:14:27.992722 | 2021-03-11T13:38:14 | 2021-03-11T13:38:14 | 98,603,229 | 9 | 0 | null | null | null | null | UTF-8 | R | false | false | 54 | r | testthat.R | library(testthat)
library(MTSYS)
test_check("MTSYS")
|
d81ddd0bb168f7722d8f6479ad0ea94e7bc5d55a | 308be353979c3bcbe279d9ca282729ad5a014f77 | /heart dieases(SVM).R | e8fa6de7fbfeeaaf14281b4d5c72fd536ce505a7 | [] | no_license | i-ravi/R-ML | 5ee7cb4181bdd7eb61f633bdacf9cc46d858d4fc | cdb980f06932180c75d4970afc548fc5cdc24f33 | refs/heads/master | 2022-04-02T08:15:12.722600 | 2020-01-02T21:43:12 | 2020-01-02T21:43:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 658 | r | heart dieases(SVM).R | library(caret)
heart <- read.csv("./datafiles/Heart.csv",sep=",",header = F)
library(caTools)
heart <- na.omit(heart)
split <- sample.split(heart,SplitRatio = 0.8)
training <- subset(heart, split ==T)
training <- na.omit(training)
testing <- subset(heart ,split==F)
testing <- na.omit(testing)
anyNA(heart)
summary(heart)
trctrl= trainControl(method = "repeatedcv", number =10,repeats = 3 )
svmLinear <- train(V15 ~. , data =heart,method = "svmLinear",
trControl = trctrl,
preProcess= c("center","scale"),tuneLength=10)
pred <- predict(svmLinear,newdata = testing )
print(confusionMatrix(table(pred,testing$V15)))
|
33e9c4918975245825614209118146e6ab0ec9d7 | 3190820fe0ce8a4ea87701cc4bc7b5467b210d26 | /man/update_job.Rd | eac9c43f52aa413e85d2e48db3f4411a3f4585ee | [
"Apache-2.0"
] | permissive | jonathom/openeo-r-client | 17ebed91a27b31089aa203c7e96206f0ae6fab0a | fccbe70cf53c3d5b4dfabeba28aac5faab04f6c0 | refs/heads/master | 2023-03-19T05:13:36.709711 | 2020-09-30T14:35:37 | 2020-09-30T14:35:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,782 | rd | update_job.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jobs.R
\name{update_job}
\alias{update_job}
\title{Modifies a job with given parameter}
\usage{
update_job(
id,
title = NULL,
description = NULL,
process = NULL,
plan = NULL,
budget = NULL,
con = NULL,
...
)
}
\arguments{
\item{id}{the job id of a created job}
\item{title}{update title for the job}
\item{description}{update description}
\item{process}{A \code{\link{Graph}}, a function returning a \code{\link{ProcessNode}} as an endpoint, the \code{\link{ProcessNode}}
will return the results or a self defined \code{\link{Process}}}
\item{plan}{replaces plan with the set value}
\item{budget}{replaces or sets the credits that can be spent at maximum}
\item{con}{connected and authenticated openeo client (optional) otherwise \code{\link{active_connection}}
is used.}
\item{...}{additional parameters passed to toJSON (like 'digits')}
}
\description{
The function will mofidy a stored job with the given parameter. The dot parameter will contain all the values
that shall be replaced or removed. Shows a message of result or failure.
}
\details{
The '...' operator shall contain all the values that are to be replaced in the job. There are some reserved
keys.
'process_graph' will replace the process graph with a newly defined one, therefore the process graph needs to be a Graph object.
'format' will change the desired output format.
All other parameter will be assumed to be special output parameter. Remember, you don't need to specify a process graph or graph_id,
e.g. if you just want to update the output format.
To leave parameter unchanged, then don't mention it. If you want to delete some, then set them to NA.
}
|
a8d29c2d7ab10175d373f929fc53d9053aaf32f5 | 4087ca8ad27acbf38c080aa7218762fe0e3e0f0f | /8_calculating_pop_risk_M1_PCR_A.R | 120f99e56b4b54f6d04c1f086bfc044e2840af26 | [] | no_license | Hope-Simpson/modelling-BU-distribution | 1ac877c841a0d5e5bb671abecefdd563443e4c2e | 41d621d3f3e110e71edc9e2f688abe09e2d29fb8 | refs/heads/main | 2023-01-31T02:40:13.153447 | 2020-12-12T09:31:07 | 2020-12-12T09:31:07 | 320,776,124 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,210 | r | 8_calculating_pop_risk_M1_PCR_A.R | ### Calulcating total population living in areas predicted suitable for BU, MU, and Both, with weighted mean and upper and lower bounds
gc()
rm(list = ls())
.rs.restartR()
# 1. Set up workspace directories to be used during this projects
path_wd <- "C:/Users/Hope/Dropbox (LSoHaTM)/BU_Modelling"
path_data <- paste(path_wd, "/Tables", sep = "")
path_shapefiles <- paste(path_wd, "/Shapefiles", sep = "")
path_scripts <- paste(path_wd, "/final_scripts", sep = "")
path_raster <- paste(path_wd, "/Raster", sep = "")
path_model_PCR <- paste(path_wd, "/Outputs/BU_confirmed_cases", sep = "")
path_model_env <- paste(path_wd, "/Outputs/MU_environmental", sep = "")
setwd(path_wd)
# 2. Load the packages that we need for this project
source(paste(path_scripts,"ipak.R", sep = "/"))
# List of packages
packages <- c("tidyverse","reshape2","stats","readxl","foreign",
"sf","dismo","raster","rgdal","proj4","ggplot2",
"RStoolbox")
ipak(packages)
## 3 Load the African map with country boundaries
AFRO_map <- file.path(path_shapefiles,"salb.shp")
AFRO_map <- st_read(AFRO_map)
st_crs(AFRO_map)
# plot(AFRO_map$geometry)
## 3.1 Load the predicted environmental suitability for BU
setwd(path_model_PCR)
raster.files <- list.files(path_model_PCR,
pattern="*tif$", full.names=TRUE)
raster.files
occ_model2_PCR <- raster(("BU_Occurrence.tif"))
LB_model2_PCR <- raster(("BU_Occurrence_LB.tif"))
UB_model2_PCR <- raster(("BU_Occurrence_UB.tif"))
PCS <- crs(occ_model2_PCR)
## 3.3 Load the predicted occurrence for env MU
setwd(path_model_env)
raster.files <- list.files(path_model_env,
pattern="*tif$", full.names=TRUE)
raster.files
occ_model2_env <- raster(("MU1_Occurrence.tif"))
LB_model2_env <- raster(("MU1_Occurrence_LB.tif"))
UB_model2_env <- raster(("MU1_Occurrence_UB.tif"))
occ_model2_env <- crop(extend(occ_model2_env, occ_model2_PCR),occ_model2_PCR)
occ_model2_env[occ_model2_env < 0] <- 0
# plot(UB_model2_env)
## 3.4 Load the continuous population density for 2020 (WorldPop project)
setwd(path_raster)
raster.files <- list.files(path_raster,
pattern="*tif$", full.names=TRUE)
PopDens2020 <- raster(("PopDens2020adj.tif"))
PopDens2020 <- projectRaster(PopDens2020, crs = PCS)
PopDens2020[PopDens2020<0] <- 0
#####################
## 4.2 Combine MU occurrence and BU occurrence surface
## Sum up both surface OCC
# first categorise 3 levels
# 2 = BU only, 1= MU only, 3 = both
occ_model2_PCR[occ_model2_PCR == 1] <- 2
occ_model2_PCR[is.na(occ_model2_PCR)] <- 0
MU.BU.Occ <- occ_model2_PCR + occ_model2_env
setwd(path_model_PCR)
plot(MU.BU.Occ)
writeRaster(MU.BU.Occ, filename = "BU_MU_Occ2.tif", format= "GTiff", overwrite=TRUE)
## Reclassify to (0,1), so that 0. Absence or only 1 present; 2. = BU & MU
MU.BU.Occ[MU.BU.Occ < 3] <- 0
MU.BU.Occ[MU.BU.Occ == 3] <- 1
# plot(MU.BU.Occ)
# plot(AFRO_map$geometry, add = TRUE)
# do the same for LB and UB
MU.BU.LB <- LB_model2_PCR + LB_model2_env
MU.BU.LB[MU.BU.LB < 2] <- 0
MU.BU.LB[MU.BU.LB == 2] <- 1
MU.BU.UB <- UB_model2_PCR + UB_model2_env
MU.BU.UB[MU.BU.UB < 2] <- 0
MU.BU.UB[MU.BU.UB == 2] <- 1
setwd(path_model_PCR)
raster.files <- list.files(path_model_PCR,
pattern="*tif$", full.names=TRUE)
raster.files
occ_model2_PCR <- raster(("BU_Occurrence.tif"))
# Extract Population estimates for each category
## Population density for BU
## Align both raster dataset: crop, extend and resample processing
PopDens2020 <- crop(extend(PopDens2020, occ_model2_PCR), occ_model2_PCR)
PopDens2020 <- raster::resample(PopDens2020, occ_model2_PCR, method = "ngb")
# generate raster showing population in each occurence cell
BU.PopDens2020r <- PopDens2020 * occ_model2_PCR
MU.PopDens2020r <- PopDens2020 * occ_model2_env
MU.BU.PopDens2020r <- PopDens2020 * MU.BU.Occ
# 7. Estimate total population by country across Africa
## 7.1 Total Country Population
AFRO_map <- st_transform(AFRO_map, crs = 3857)
AFRO.tb <- AFRO_map %>%
st_set_geometry(NULL)
Total.Pop2020 <- raster::extract(PopDens2020, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
Total.Pop2020 <- as.data.frame(Total.Pop2020)
names(AFRO.tb)
Total.Pop2020 <- cbind(AFRO.tb[,c(3,7,10)],Total.Pop2020)
colnames(Total.Pop2020)[5] <- c("TotalPop2020")
## 7.2 Extract population in areas of BU presence at national level
BU.Pop2020e <- raster::extract(BU.PopDens2020r, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
BU.Pop2020 <- as.data.frame(BU.Pop2020e)
BU.Pop2020 <- cbind(AFRO.tb[,c(3,7,10)],BU.Pop2020)
colnames(BU.Pop2020)[5] <- c("BUPop2020")
## 7.3 Extract population in areas of MU presence
MU.Pop2020e <- raster::extract(MU.PopDens2020r, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
MU.Pop2020 <- as.data.frame(MU.Pop2020e)
MU.Pop2020 <- cbind(AFRO.tb[,c(3,7,10)],MU.Pop2020)
colnames(MU.Pop2020)[5] <- c("MU.Pop2020")
head(BU.Pop2020)
## 7.3 Extract population in areas of MU + BU presence
MU.BU.Pop2020e <- raster::extract(MU.BU.PopDens2020r, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
MU.BU.Pop2020 <- as.data.frame(MU.BU.Pop2020e)
MU.BU.Pop2020 <- cbind(AFRO.tb[,c(3,7,10)],MU.BU.Pop2020)
colnames(MU.BU.Pop2020)[5] <- c("MU.BU.Pop2020")
head(MU.BU.Pop2020)
## 7.5 Assemble datasets and export as a CSV table
Assemble.Pop2020 <- cbind(Total.Pop2020, BU.Pop2020[5], MU.Pop2020[5], MU.BU.Pop2020[5] )
## 7.6 Finally, estimate total population affected and credible intervals
BU.Pop2020.LBr <- PopDens2020 * LB_model2_PCR
BU.Pop2020.UBr <- PopDens2020 * UB_model2_PCR
BU.Pop2020.WMr <- PopDens2020 * occ_model2_PCR
MU.Pop2020.LBr <- PopDens2020 * LB_model2_env
MU.Pop2020.UBr <- PopDens2020 * UB_model2_env
MU.Pop2020.WMr <- PopDens2020 * occ_model2_env
MU.BU.Pop2020.LBr <- PopDens2020 * MU.BU.LB
MU.BU.Pop2020.UBr <- PopDens2020 * MU.BU.UB
MU.BU.Pop2020.WMr <- PopDens2020 * MU.BU.Occ
## Extract lower bound population living in at-risk areas by country
BU.Pop2020.LBe <- raster::extract(BU.Pop2020.LBr, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
BU.Pop2020.LB <- as.data.frame(BU.Pop2020.LBe)
# lower bound area
BU.area.LBe <- raster::extract(LB_model2_PCR, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
BU.area.LB <- as.data.frame(BU.area.LBe)
head(BU.Pop2020.LB)
head(BU.Pop2020.LBe)
head(BU.Pop2020.LB)
BU.Pop2020.LB <- cbind(AFRO.tb[,c(3,7,10)], BU.Pop2020.LB, BU.area.LB[2])
colnames(BU.Pop2020.LB)[5] <- c("BUPop2020_LB")
colnames(BU.Pop2020.LB)[c(6)] <- c("BUarea_LB")
BU.Pop2020.LB$BUarea_LB <- BU.Pop2020.LB$BUarea_LB * 25
########
# MU lower bound pop
MU.Pop2020.LBe <- raster::extract(MU.Pop2020.LBr, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
MU.Pop2020.LB <- as.data.frame(MU.Pop2020.LBe)
# MU lower bound pop area
MU.area.LBe <- raster::extract(LB_model2_env, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
MU.area.LB <- as.data.frame(MU.area.LBe)
MU.Pop2020.LB <- cbind(AFRO.tb[,c(3,7,10)], MU.Pop2020.LB, MU.area.LB[2])
colnames(MU.Pop2020.LB)[5] <- c("MUPop2020_LB")
colnames(MU.Pop2020.LB)[6] <- c("MUarea_LB")
MU.Pop2020.LB$MUarea_LB <- MU.Pop2020.LB$MUarea_LB * 25
#### combined
## Lower Bound population
MU.BU.Pop2020.LBe <- raster::extract(MU.BU.Pop2020.LBr, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
MU.BU.Pop2020.LB <- as.data.frame(MU.BU.Pop2020.LBe)
# lower bound area
MU.BU.area.LBe <- raster::extract(MU.BU.LB, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
MU.BU.area.LB <- as.data.frame(MU.BU.area.LBe)
MU.BU.Pop2020.LB <- cbind(AFRO.tb[,c(3,7,10)], MU.BU.Pop2020.LB, MU.BU.area.LB[2])
colnames(MU.BU.Pop2020.LB)[5] <- c("MU.BUPop2020_LB")
colnames(MU.BU.Pop2020.LB)[c(6)] <- c("MU.BUarea_LB")
MU.BU.Pop2020.LB$MU.BUarea_LB <- MU.BU.Pop2020.LB$MU.BUarea_LB * 25
## Upper Bound
BU.Pop2020.UBe <- raster::extract(BU.Pop2020.UBr, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
BU.Pop2020.UB <- as.data.frame(BU.Pop2020.UBe)
BU.area.UBe <- raster::extract(UB_model2_PCR, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
BU.area.UB <- as.data.frame(BU.area.UBe)
BU.Pop2020.UB <- cbind(AFRO.tb[,c(3,7,10)],BU.Pop2020.UB, BU.area.UB[2])
colnames(BU.Pop2020.UB)[5] <- c("BUPop2020_UB")
colnames(BU.Pop2020.UB)[6] <- c("BUarea_UB")
head(BU.Pop2020)
# multiply each cell by 25 to get area in sqkm
BU.Pop2020.UB$BUarea_UB <- BU.Pop2020.UB$BUarea_UB * 25
### Upper bound population
MU.Pop2020.UBe <- raster::extract(MU.Pop2020.UBr, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
MU.Pop2020.UB <- as.data.frame(MU.Pop2020.UBe)
MU.area.UBe <- raster::extract(UB_model2_env, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
MU.area.UB <- as.data.frame(MU.area.UBe)
MU.Pop2020.UB <- cbind(AFRO.tb[,c(3,7,10)],MU.Pop2020.UB, MU.area.UB[2])
colnames(MU.Pop2020.UB)[5] <- c("MUPop2020_UB")
colnames(MU.Pop2020.UB)[6] <- c("MUarea_UB")
MU.Pop2020.UB$MUarea_UB <- MU.Pop2020.UB$MUarea_UB * 25
### mu bu
MU.BU.Pop2020.UBe <- raster::extract(MU.BU.Pop2020.UBr, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
MU.BU.Pop2020.UB <- as.data.frame(MU.BU.Pop2020.UBe)
MU.BU.area.UBe <- raster::extract(MU.BU.UB, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
MU.BU.area.UB <- as.data.frame(MU.BU.area.UBe)
MU.BU.Pop2020.UB <- cbind(AFRO.tb[,c(3,7,10)],MU.BU.Pop2020.UB, MU.BU.area.UB[2])
colnames(MU.BU.Pop2020.UB)[5] <- c("MU.BUPop2020_UB")
colnames(MU.BU.Pop2020.UB)[6] <- c("MU.BUarea_UB")
# multiply each cell by 25 to get area in sqkm
MU.BU.Pop2020.UB$MU.BUarea_UB <- MU.BU.Pop2020.UB$MU.BUarea_UB * 25
## Weighted Mean
BU.Pop2020.WMe <- raster::extract(BU.Pop2020.WMr, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
BU.Pop2020.WM <- as.data.frame(BU.Pop2020.WMe)
BU.area.WMe <- raster::extract(occ_model2_PCR, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
BU.area.WM <- as.data.frame(BU.area.WMe)
head(BU.area.WM)
BU.Pop2020.WM <- cbind(AFRO.tb[,c(3,7,10)],BU.Pop2020.WM, BU.area.WM[2])
colnames(BU.Pop2020.WM)[5] <- c("BUPop2020WM")
colnames(BU.Pop2020.WM)[6] <- c("BUarea_WM")
BU.Pop2020.WM$BUarea_WM <- BU.Pop2020.WM$BUarea_WM * 25
#### MU
MU.Pop2020.WMe <- raster::extract(MU.Pop2020.WMr, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
MU.Pop2020.WM <- as.data.frame(MU.Pop2020.WMe)
MU.area.WMe <- raster::extract(occ_model2_env, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
MU.area.WM <- as.data.frame(MU.area.WMe)
MU.Pop2020.WM <- cbind(AFRO.tb[,c(3,7,10)],MU.Pop2020.WM, MU.area.WM[2])
colnames(MU.Pop2020.WM)[5] <- c("MUPop2020")
colnames(MU.Pop2020.WM)[6] <- c("MUarea_WM")
MU.Pop2020.WM$MUarea_WM <- MU.Pop2020.WM$MUarea_WM * 25
## combined
MU.BU.Pop2020.WMe <- raster::extract(MU.BU.Pop2020.WMr, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
MU.BU.Pop2020.WM <- as.data.frame(MU.BU.Pop2020.WMe)
MU.BU.area.WMe <- raster::extract(MU.BU.Occ, AFRO_map, fun=sum, na.rm=TRUE, df=TRUE)
MU.BU.area.WM <- as.data.frame(MU.BU.area.WMe)
MU.BU.Pop2020.WM <- cbind(AFRO.tb[,c(3,7,10)],MU.BU.Pop2020.WM, MU.BU.area.WM[2])
colnames(MU.BU.Pop2020.WM)[5] <- c("MU.BU.Pop2020")
colnames(MU.BU.Pop2020.WM)[6] <- c("MU.BU.area_WM")
MU.BU.Pop2020.WM$MU.BU.area_WM <- MU.BU.Pop2020.WM$MU.BU.area_WM * 25
head(Assemble.MU.Pop2020)
## Assemble tables
Assemble.BU.Pop2020 <- cbind(Total.Pop2020, BU.Pop2020.WM[c(5,6)], BU.Pop2020.LB[c(5,6)], BU.Pop2020.UB[c(5,6)])
Assemble.MU.Pop2020 <- cbind(Total.Pop2020, MU.Pop2020.WM[c(5,6)], MU.Pop2020.LB[c(5,6)], MU.Pop2020.UB[c(5,6)])
Assemble.MU.BU.Pop2020 <- cbind(Total.Pop2020, MU.BU.Pop2020.WM[c(5,6)], MU.BU.Pop2020.LB[c(5,6)], MU.BU.Pop2020.UB[c(5,6)])
head(Assemble.BU.Pop2020)
# 8. Export CSV tables with final results
setwd(path_data)
write.csv(Assemble.Pop2020, file = "Assemble_Pop2020_PCRA_oct.csv", row.names = FALSE, na="")
write.csv(Assemble.BU.Pop2020, file = "Assemble_BU_Pop2020_PCRA_oct.csv", row.names = FALSE, na="")
write.csv(Assemble.MU.Pop2020, file = "Assemble.MU.Pop2020_PCRA_oct.csv", row.names = FALSE, na="")
write.csv(Assemble.MU.BU.Pop2020, file = "Assemble.MU.BU.Pop2020_PCRA_oct.csv", row.names = FALSE, na="")
getwd()
BU_area <- freq(occ_model2_PCR, useNA='no')
apc <- prod(res(occ_model2_PCR))
BU_area <- cbind(BU_area, area=BU_area[,2] * apc)
BU_areaLB <- freq(LB_model2_PCR, useNA='no')
apc <- prod(res(LB_model2_PCR))
BU_areaLB <- cbind(BU_areaLB, area=BU_areaLB[,2] * apc)
BU_areaUB <- freq(UB_model2_PCR, useNA='no')
apc <- prod(res(UB_model2_PCR))
BU_areaUB <- cbind(BU_areaUB, area=BU_areaUB[,2] * apc)
#####################
MU_area <- freq(occ_model2_env, useNA='no')
apc <- prod(res(occ_model2_env))
MU_area <- cbind(MU_area, area=MU_area[,2] * apc)
BU_areaLB <- freq(LB_model2_PCR, useNA='no')
apc <- prod(res(LB_model2_PCR))
BU_areaLB <- cbind(BU_areaLB, area=BU_areaLB[,2] * apc)
BU_areaUB <- freq(UB_model2_PCR, useNA='no')
apc <- prod(res(UB_model2_PCR))
BU_areaUB <- cbind(BU_areaUB, area=BU_areaUB[,2] * apc)
################################
# Clean up workspace of unnecessary objects and save objects created in the corresponding folder
gc()
save.image(paste0(path_model_PCR, sep = "/", "Population_Estimates.RData"))
|
643443f107c23a210911c46ae35cba865de88229 | e62690b3dd9335476e72adb9e4b97b4cafbcafa9 | /RandomForest.R | 112abee7a66017f7e105ee7a97ac38a08518005d | [
"MIT"
] | permissive | nkafr/the-analytics-edge-kaggle | 76ff464b29994be273d5ffc16c97fbf4aed6f69a | 9eb87e969d2f24d008a9c8e3f934efff421245db | refs/heads/master | 2021-01-20T18:16:00.635300 | 2016-07-16T20:52:15 | 2016-07-16T20:52:15 | 63,443,304 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,277 | r | RandomForest.R | #install.packages("randomForest")
library(randomForest)
#install.packages("caret")
library(caret)
#tune random forest to find the best parameters (better approach)
rf_model<-train(Party ~YOB+ Income+ EducationLevel+ HouseholdStatus+ Q109244+ Q115611+ Q98197 +Q113181 +Q98869 +Q101163 + Q99480 +Q105840 +Q120379 + Q116881+ +Q106272+ Q120472 + Q115899
+ Q102089 +Q110740 + Q119851 +Q121699 +Q115195 + Q106042 +Q118232 + Q100680 + Q118892 +Q107869, data=train ,method="rf",trControl=trainControl(method="cv",number=5),
prox=TRUE,allowParallel=TRUE)
print(rf_model)
print(rf_model$finalModel)
#alternatively use tune.rf
tune.rf <- tuneRF(train[,-7],train[,7], improve=0.05, stepFactor=0.5)
#Execute random forest algorith using the most important features (in terms of p-values))
forest = randomForest(Party ~YOB+ Income+ EducationLevel+ HouseholdStatus+ Q109244+ Q115611+ Q98197 +Q113181 +Q98869 +Q101163 + Q99480 +Q105840 +Q120379 + Q116881+ +Q106272+ Q120472 + Q115899 + Q102089 +Q110740 + Q119851 +Q121699 +Q115195 + Q106042 +Q118232 + Q100680 + Q118892 +Q107869 , ntree=800, nodesize=30, data=train)
#A metric we can look at is related to "impurity", which measures how homogenous each bucket or leaf of the tree is.
#In each tree in the forest, whenever we select a variable and perform a split, the impurity is decreased.
#Therefore, one way to measure the importance of a variable is to average the reduction in impurity,
#taken over all the times that variable is selected for splitting in all of the trees in the forest
varImpPlot(forest) #or forest$importance[order(-forest$importance), , drop = FALSE] or importance(forest)
#forest$importance conatains the same of varImpPlot(forest) but unordered
#Another metric that we can look at is the number of times, aggregated over all of the trees in the random forest model,
#that a certain variable is selected for a split
vu = varUsed(forest, count=TRUE)
vusorted = sort(vu, decreasing = FALSE, index.return = TRUE)
dotchart(vusorted$x, names(forest$forest$xlevels[vusorted$ix]))
PredTestLabels= predict(forest, newdata=test)
MySubmission = data.frame(USER_ID = test$USER_ID, Predictions= PredTestLabels)
write.csv(MySubmission, "SubmissionSimpleLog.csv", row.names=FALSE) |
303aceb5dfda56f15d9329eeb3fdc0180aa9e451 | fd2b200255e17e62a97a2af5549f1e42252b14f6 | /R/param_file.R | a57af77add8f38c9c97afa8ddf606c5730481a44 | [] | no_license | AQLT/rjwsacruncher | fa151aad89c616c3bed95c9c57044257ff2ee871 | fe786b38c72f1ae8926f9a6f41f11ced17072caf | refs/heads/master | 2023-06-23T05:34:48.709367 | 2023-06-07T21:48:05 | 2023-06-07T21:48:05 | 172,219,271 | 3 | 2 | null | 2021-09-10T07:12:07 | 2019-02-23T13:46:02 | R | UTF-8 | R | false | false | 4,949 | r | param_file.R | #' Create paramater file for the 'JWSACruncher'
#'
#' To run the 'JWSACruncher' needs a parameter file and \code{create_param_file} allows to create it.
#'
#' @param dir_file_param Path to the directory that will contains the parameter file "parameters.param".
#' @param bundle Maximum size for a group of series (in output). By default \code{bundle = 10000}.
#' @param csv_layout Layout of the CSV files (series only). By default \code{csv_layout = "list"}. Other options: \code{csv_layout = "vtable"} (vertical table) or \code{csv_layout = "htable"} (horizontal table).
#' @param csv_separator the field separator string used in the CSV file. By default \code{csv_separator = ";"}.
#' @param ndecs Number of decimals used in the output. By default \code{ndec = 6}.
#' @param policy refreshing policy of the processing. By default \code{policy = "parameters"} (re-estimation of the coefficients of the reg-ARIMA model, see details).
#' @param output Full path of the output folder. By default (\code{output = NULL}) a folder is create in the path to the workspace (\[workspace\]/Output).
#' @param matrix_item character containing the items of the matrix output (see the 'JDemetra+' manual for more information). By default, the items defined in the option \code{getOption("default_matrix_item")} are used (option initialized by the default output of the 'JWSACruncher' 2.2.2).
#' @param tsmatrix_series character containing the names of the times series to export (see the 'JDemetra+' manual for more information). By default, the items defined in the option \code{getOption("default_tsmatrix_series")} are used (option initialized by the default output of the 'JWSACruncher' 2.2.2).
#' @param paths_path The paths used for relative addresses (see the "Demetra Paths" of the graphical interface of 'JDemetra+').
#'
#' @details When the 'JWSACruncher' is launched, the data is refreshed with a specific policy that is defined by the paramater \code{policy}. The available options are:
#' \itemize{
#' \item \code{policy = "current"}: all the estimations are fixed;
#' \item \code{policy = "fixedparameters"} or \code{policy = "fixed"}: re-estimation of the coefficients of the regression variables (but not the ARIMA coefficients);
#' \item \code{policy = "parameters"} (the default): \code{policy = "fixedparameters"} + re-estimation of ARIMA coefficients;
#' \item \code{policy = "lastoutliers"}: \code{policy = "parameters"} + re-identification of last outliers (on the last year);
#' \item \code{policy = "outliers"}: \code{policy = "lastoutliers"} + re-identification of all outliers;
#' \item \code{policy = "stochastic"}: \code{policy = "outliers"} + re-identification of ARIMA orders;
#' \item \code{policy = "complete"} or \code{policy = "concurrent"}: the model is completely re-identified and re-estimated.
#' }
#'
#' @return Path to the paramater file.
#' @seealso \code{\link{cruncher_and_param}}.
#' @encoding UTF-8
#' @export
create_param_file <- function(dir_file_param, bundle = 10000, csv_layout = "list", csv_separator = ";",
ndecs = 6, policy = "parameters", output = NULL,
matrix_item = getOption("default_matrix_item"),
tsmatrix_series = getOption("default_tsmatrix_series"),
paths_path = NULL){
if (missing(dir_file_param))
stop("The parameter dir_file_param is missing")
first_line <- "<?xml version=\"1.0\" encoding=\"UTF-8\" standalone=\"yes\"?>"
param_line <- paste("<wsaConfig bundle=", bundle, " csvlayout=", csv_layout, " csvseparator=",
csv_separator, " ndecs=",ndecs,">", sep = "\"")
policy_line <- paste0(" <policy>", policy, "</policy>")
output_line <- matrix_lines <- tsmatrix_lines <- path_lines <- NULL
if (!is.null(output)) {
output <- normalizePath(output)
output_line <- paste0(" <output>", output, "</output>")
}
if (!is.null(matrix_item)) {
matrix_lines <- c(" <matrix>",
paste0(" <item>", matrix_item, "</item>"),
" </matrix>")
}
if (!is.null(tsmatrix_series)) {
tsmatrix_lines <- c(" <tsmatrix>",
paste0(" <series>", tsmatrix_series, "</series>"),
" </tsmatrix>")
}
if (!is.null(paths_path)) {
paths_path <- normalizePath(paths_path)
path_lines <- c(" <paths>",
paste0(" <path>", paths_path, "</path>"),
" </paths>")
}
file_param <- c(first_line, param_line, policy_line, output_line,
matrix_lines, tsmatrix_lines, path_lines,
"</wsaConfig>"
)
writeLines(file_param, con = paste0(dir_file_param,"/parameters.param"))
return(invisible(paste0(dir_file_param,"/parameters.param")))
}
|
365ec6833518218c9785f483e80d8140a0231f6c | b9c790aeada1217f887b87b771d16792a4ec5491 | /R/cluster-library.R | 1a924bd7a9c662cc414cd26b71efdea41233dfbb | [] | no_license | kendonB/multidplyr | 3388365276edaf5fef6dc47b48c46ae42ef4aafd | 6c47fa98a2534f295006348317a10d1c6f0541cb | refs/heads/master | 2021-01-18T03:44:21.786493 | 2019-12-10T00:39:11 | 2019-12-10T00:39:11 | 55,572,372 | 0 | 0 | null | 2016-04-06T03:14:06 | 2016-04-06T03:14:05 | null | UTF-8 | R | false | false | 532 | r | cluster-library.R | #' Attach a library on each node.
#'
#' Also attached the library in the current session.
#'
#' @inheritParams objman
#' @param packages A character vector of package names
#' @export
#' @examples
#' cl <- create_cluster(2)
#' cl %>% cluster_eval(search())
#'
#' cl %>%
#' cluster_library("magrittr") %>%
#' cluster_eval(search())
cluster_library <- function(cluster, packages) {
library(packages, character.only = TRUE)
cluster_call(.cl = cluster, library, package = packages, character.only = TRUE)
invisible(cluster)
}
|
2caa47277d31b674986e58f13e17191befa2a4a7 | 3f55966dda24d22a1e9a7d1da1368a33fa36aca5 | /R/tally.R | eff2eab18a23bb5d0f782cb1019eef02a9faf714 | [] | no_license | juliangehring/Rariant | 87c33885401d5c12bb47b569a485f8c02678dcfb | dc6380d5334224deb6c4044adb77bed34cb79835 | refs/heads/master | 2021-01-24T21:12:13.776404 | 2016-01-29T01:41:40 | 2016-01-29T01:41:40 | 18,237,057 | 0 | 1 | null | 2019-10-07T11:40:15 | 2014-03-29T08:00:55 | R | UTF-8 | R | false | false | 3,863 | r | tally.R | comparativeMismatch <- function(test_counts, control_counts, strand = c("both", "plus", "minus"), consensus, roi) {
#strand = match.arg(strand)
#test_counts = selectStrand(test_tally, strand)
#control_counts = selectStrand(control_tally, strand)
testDepth = seqDepth(test_counts)
controlDepth = seqDepth(control_counts)
test_base = callConsensus(test_counts) ## needed?
control_base = callConsensus(control_counts)
## define the consensus sequence
if(!missing(consensus)) {
ref_base = suppressWarnings(ds2int(getSeq(consensus, roi)[[1]]))
ref_base[ref_base == 5] = NA ## 'N' -> NA
} else {
ref_base = control_base
}
## mismatch counts
controlMismatch = mismatchCount(control_counts, ref_base, controlDepth)
controlMismatch[is.na(controlMismatch)] = 0 ## TODO
testMismatch = mismatchCount(test_counts, ref_base, testDepth)
testMismatch[is.na(testMismatch)] = 0
n = nrow(test_counts)
## depth of the consensus allele in test
refDepth = test_counts[mat2ind(ref_base, n)]
refDepth[is.na(refDepth)] = 0
## substract the consensus counts for test
idx_consensus = mat2ind(ref_base, n)
test_counts[idx_consensus] = 0 ## [cbind(i, j)] as alternative
## call alt alleles and counts
test_alt_base = callConsensus(test_counts)
altDepth = test_counts[mat2ind(test_alt_base, n)]
## TODO: make this more efficient
dx = data.frame(
chr = seqchar(roi), pos = start(roi):end(roi),
testMismatch = testMismatch, controlMismatch = controlMismatch,
testDepth = testDepth, controlDepth = controlDepth,
testRef = ind2base(test_base), testAlt = ind2base(test_alt_base),
controlRef = ind2base(control_base), ref = ind2base(ref_base),
refDepth = refDepth, altDepth = altDepth
)
return(dx)
}
selectStrand <- function(x, strand = c("both", "plus", "minus"), idx = 1:ncol(x)) {
if(strand == "both")
y = x[ ,idx,1] + x[ ,idx,2]
if(strand == "plus")
y = x[ ,idx,1]
if(strand == "minus")
y = x[ ,idx,2]
dim(y) = c(nrow(x), length(idx))
return(y)
}
seqDepth <- function(x) {
return(as.integer(rowSums(x)))
}
callConsensus <- function(counts, verbose = FALSE) {
idx_max = max.col(counts, ties.method = "first")
idx_max_2 = max.col(counts, ties.method = "last")
idx = (idx_max == idx_max_2)
if(verbose && !all(idx))
warning(sprintf("%d %s", sum(!idx), "calls are ambiguous."))
idx_max[!idx] = NA
return(idx_max)
}
mismatchCount <- function(counts, consensus, depth = rowSums(counts)) {
idx_mat = (consensus - 1) * nrow(counts) + 1:nrow(counts)
mmc = (depth - counts[idx_mat])
return(mmc)
}
dna_bases <- function() {
res = c("A", "C", "G", "T", "N")
names(res) = res
return(res)
}
tallyBamRegion <- function(bam, region, minBase = 0, minMap = 0, maxDepth = 1e4) {
if(length(region) > 1)
stop("Region must be of length 1.")
## params
pp = PileupParam(
max_depth = maxDepth,
min_base_quality = minBase,
min_mapq = minMap,
min_nucleotide_depth = 0,
min_minor_allele_depth = 0,
distinguish_strands = FALSE,
distinguish_nucleotides = TRUE,
ignore_query_Ns = FALSE,
include_deletions = FALSE)
sb = ScanBamParam(which = region)
t2 = pileup(bam, scanBamParam = sb, pileupParam = pp)
## to matrix
m = matrix(0L, width(region), 4)
colnames(m) = levels(t2$nucleotide)[1:4]
idx_col = as.integer(t2$nucleotide)
if(any(idx_col > 4)) {
#warning("Skipping non-standard bases")
t2 = t2[idx_col <= 4, ]
idx_col = idx_col[idx_col <= 4]
}
m[cbind(t2$pos-start(region)+1, idx_col)] = t2$count
return(m)
}
|
6a53686784be5c6fc080bf52dbd031100b21839e | f00c94397f76bb4ad6f71cf0f7a6a7cdbb7df2aa | /R/data.R | 9b016826cdcf9903a2e71b20933b9f06df6dc392 | [
"MIT"
] | permissive | thackl/gggenomes | 15aedd2fa5ca983e99caf331269ddca41ce1d0ef | f90efabffe83fab0f212a1ba29d7ac2e3f2f915c | refs/heads/master | 2023-08-21T09:53:04.756717 | 2023-06-16T09:09:18 | 2023-06-16T09:09:18 | 121,570,177 | 466 | 65 | MIT | 2023-08-02T09:57:06 | 2018-02-14T22:46:25 | R | UTF-8 | R | false | false | 8,320 | r | data.R | ## Documentations for bundled example data (/data/*.rda)
#' Sequence index of 6 EMALE genomes (endogenous virophages)
#'
#' A data set containing the sequence information on 6 endogenous virophages
#' found in the genomes of the marine protist *Cafeteria burkhardae*.
#'
#' @format A data frame with 6 rows and 4 columns
#' \describe{
#' \item{file_id}{name of the file the data was read from}
#' \item{seq_id}{sequence identifier}
#' \item{seq_desc}{sequence description}
#' \item{length}{length of the sequence}
#' }
#' @source
#' * Publication: \url{http://dx.doi.org/10.1101/2020.11.30.404863}
#' * Raw data: \url{https://github.com/thackl/cb-emales}
#' * Derived & bundled data: `ex("emales/emales.fna")`
"emale_seqs"
#' Gene annotations if 6 EMALE genomes (endogenous virophages)
#'
#' A data set containing gene feature annotations for 6 endogenous virophages
#' found in the genomes of the marine protist *Cafeteria burkhardae*.
#'
#' @format A data frame with 143 rows and 17 columns
#' \describe{
#' \item{file_id}{name of the file the data was read from}
#' \item{seq_id}{identifier of the sequence the feature appears on}
#' \item{start}{start of the feature on the sequence}
#' \item{end}{end of the feature on the sequence}
#' \item{strand}{reading orientation relative to sequence (+ or -)}
#' \item{type}{feature type (CDS, mRNA, gene, ...)}
#' \item{feat_id}{unique identifier of the feature}
#' \item{introns}{a list column with internal intron start/end positions}
#' \item{parent_ids}{a list column with parent IDs - feat_id's of parent features}
#' \item{source}{source of the annotation}
#' \item{score}{score of the annotation}
#' \item{phase}{For "CDS" features indicates where the next codon begins relative to the 5' start}
#' \item{width}{width of the feature}
#' \item{gc_content}{relative GC-content of the feature}
#' \item{name}{name of the feature}
#' \item{Note}{}
#' \item{geom_id}{an identifier telling the which features should be plotted as on items (usually CDS and mRNA of same gene)}
#' }
#' @source
#' * Publication: \url{http://dx.doi.org/10.1101/2020.11.30.404863}
#' * Raw data: \url{https://github.com/thackl/cb-emales}
#' * Derived & bundled data: `ex("emales/emales.gff")`
"emale_genes"
#' Terminal inverted repeats of 6 EMALE genomes
#'
#' @format A data frame with 3 rows and 14 columns
#' \describe{
#' \item{file_id}{name of the file the data was read from}
#' \item{seq_id}{identifier of the sequence the feature appears on}
#' \item{start}{start of the feature on the sequence}
#' \item{end}{end of the feature on the sequence}
#' \item{strand}{reading orientation relative to sequence (+ or -)}
#' \item{type}{feature type (CDS, mRNA, gene, ...)}
#' \item{feat_id}{unique identifier of the feature}
#' \item{introns}{a list column with internal intron start/end positions}
#' \item{parent_ids}{a list column with parent IDs - feat_id's of parent features}
#' \item{source}{source of the annotation}
#' \item{score}{score of the annotation}
#' \item{phase}{For "CDS" features indicates where the next codon begins relative to the 5' start}
#' \item{name}{name of the feature}
#' \item{geom_id}{an identifier telling the which features should be plotted as on items (usually CDS and mRNA of same gene)}
#' }
#' @source
#' * Publication: \url{http://dx.doi.org/10.1101/2020.11.30.404863}
#' * Raw data: \url{https://github.com/thackl/cb-emales}
#' * Derived & bundled data: `ex("emales/emales-tirs.gff")`
"emale_tirs"
#' Integrated Ngaro retrotransposons of 6 EMALE genomes
#'
#' @format A data frame with 3 rows and 14 columns
#' \describe{
#' \item{file_id}{name of the file the data was read from}
#' \item{seq_id}{identifier of the sequence the feature appears on}
#' \item{start}{start of the feature on the sequence}
#' \item{end}{end of the feature on the sequence}
#' \item{strand}{orientation of the feature relative to the sequence (+ or -)}
#' \item{type}{feature type (CDS, mRNA, gene, ...)}
#' \item{feat_id}{unique identifier of the feature}
#' \item{introns}{a list column with internal intron start/end positions}
#' \item{parent_ids}{a list column with parent IDs - feat_id's of parent features}
#' \item{source}{source of the annotation}
#' \item{score}{score of the annotation}
#' \item{phase}{For "CDS" features indicates where the next codon begins relative to the 5' start}
#' \item{name}{name of the feature}
#' \item{geom_id}{an identifier telling the which features should be plotted as on items (usually CDS and mRNA of same gene)}
#' }
#' @source
#' * Publication: \url{http://dx.doi.org/10.1101/2020.11.30.404863}
#' * Raw data: \url{https://github.com/thackl/cb-emales}
#' * Derived & bundled data: `ex("emales/emales-ngaros.gff")`
"emale_ngaros"
#' Relative GC-content along 6 EMALE genomes
#'
#' One row per 50 bp window.
#'
#' @format A data frame with 2856 rows and 6 columns
#' \describe{
#' \item{file_id}{name of the file the data was read from}
#' \item{seq_id}{identifier of the sequence the feature appears on}
#' \item{start}{start of the feature on the sequence}
#' \item{end}{end of the feature on the sequence}
#' \item{name}{name of the feature}
#' \item{score}{relative GC-content of the window}
#' }
#' @source
#' * Derived & bundled data: `ex("emales/emales-gc.bed")`
"emale_gc"
#' All-versus-all whole genome alignments of 6 EMALE genomes
#'
#' One row per alignment block. Alignments were computed with minimap2.
#'
#' @format A data frame with 125 rows and 23 columns
#' \describe{
#' \item{file_id}{name of the file the data was read from}
#' \item{seq_id}{identifier of the sequence the feature appears on}
#' \item{length}{length of the sequence}
#' \item{start}{start of the feature on the sequence}
#' \item{end}{end of the feature on the sequence}
#' \item{strand}{orientation of the feature relative to the sequence (+ or -)}
#' \item{seq_id2}{identifier of the sequence the feature appears on}
#' \item{length2}{length of the sequence}
#' \item{start2}{start of the feature on the sequence}
#' \item{end2}{end of the feature on the sequence}
#' \item{...}{see \url{https://github.com/lh3/miniasm/blob/master/PAF.md} for additional columns}
#' }
#' @source
#' * Derived & bundled data: `ex("emales/emales.paf")`
"emale_ava"
#' All-versus-all alignments 6 EMALE proteomes
#'
#' One row per alignment. Alignments were computed with mmseqs2 (blast-like).
#'
#' @format A data frame with 827 rows and 13 columns
#' \describe{
#' \item{file_id}{name of the file the data was read from}
#' \item{feat_id}{identifier of the first feature in the alignment}
#' \item{feat_id2}{identifier of the second feature in the alignment}
#' \item{pident, ...}{see \url{https://github.com/seqan/lambda/wiki/BLAST-Output-Formats} for BLAST-tabular format columns}
#' }
#' @source
#' * Derived & bundled data: `ex("emales/emales-prot-ava.o6")`
"emale_prot_ava"
#' Alignments of 6 EMALE proteomes against Uniref50
#'
#' One row per alignment. Alignments were computed with mmseqs2 (blast-like), and
#' filtered by evalue (<1e-20).
#'
#' @format A data frame with 509 rows and 16 columns
#' \describe{
#' \item{file_id}{name of the file the data was read from}
#' \item{feat_id}{identifier of the first feature in the alignment}
#' \item{feat_id2}{identifier of the second feature in the alignment}
#' \item{pident, ...}{see \url{https://github.com/seqan/lambda/wiki/BLAST-Output-Formats} for BLAST-tabular format columns}
#' \item{seq_head}{full sequence header of the emale protein}
#' \item{seq_head2}{full sequence header of the Uniref50 protein}
#' \item{taxname}{one of the 4 most abundant taxonomic names among the Uniref50 hits or NA}
#' }
#' @source
#' * Derived & bundled data: `ex("emales/emales-prot-uniref50.tsv")`
"emale_prot_uniref50"
#' Clusters of orthologs of 6 EMALE proteomes
#'
#' One row per feature. Clusters are based on manual curation.
#'
#' @format A data frame with 48 rows and 3 columns
#' \describe{
#' \item{cluster_id}{identifier of the cluster}
#' \item{feat_id}{identifer of the gene}
#' \item{cluster_size}{number of features in the cluster}
#' }
#' @source
#' * Derived & bundled data: `ex("emales/emales-cogs.tsv")`
"emale_cogs"
|
1576497e47e1077c97602ac3d32090c76542130e | 6e1ed64a0769a0776a01f2db6c42510477acc13f | /man/split_bins.Rd | 53ab59c887bced6025570f19e4d2cce8239c8733 | [] | no_license | cran/creditmodel | f9e008df63f25cdaefebe79a82aaf9aad3598c8c | a4f07950173e6e0f6c6565a08fc564ec2075cb36 | refs/heads/master | 2022-01-19T11:26:07.485897 | 2022-01-07T10:32:41 | 2022-01-07T10:32:41 | 183,883,743 | 4 | 2 | null | null | null | null | UTF-8 | R | false | true | 947 | rd | split_bins.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/variable_binning.R
\name{split_bins}
\alias{split_bins}
\title{split_bins}
\usage{
split_bins(
dat,
x,
breaks = NULL,
bins_no = TRUE,
as_factor = FALSE,
labels = NULL,
use_NA = TRUE,
char_free = FALSE
)
}
\arguments{
\item{dat}{A data.frame with independent variables.}
\item{x}{The name of an independent variable.}
\item{breaks}{Breaks for binning.}
\item{bins_no}{Number the generated bins. Default is TRUE.}
\item{as_factor}{Whether to convert to factor type.}
\item{labels}{Labels of bins.}
\item{use_NA}{Whether to process NAs.}
\item{char_free}{Logical, if TRUE, characters are not splitted.}
}
\value{
A data.frame with Bined x.
}
\description{
\code{split_bins} is for binning using breaks.
}
\examples{
bins = split_bins(dat = UCICreditCard,
x = "PAY_AMT1", breaks = NULL, bins_no = TRUE)
}
|
ae4d9be285419e94c2bbdc9a1dc93df418a12f5f | 5d090e950f10f89b2e3df2acfd9a2aad4a5c91b1 | /Mod14/Projeto/06-CreateModel.R | 9149423ad06dc1c988640e309c658fb0679f04b7 | [] | no_license | mneresc/formacao_r_azure_machine_learning | db7542ed24a9577bfbd6763f92734e38c6066202 | 147237e1fd4ab26d3aa6d5d25ba4392cd79206b7 | refs/heads/main | 2023-05-12T08:35:26.965443 | 2021-05-19T00:25:58 | 2021-05-19T00:25:58 | 334,229,847 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,602 | r | 06-CreateModel.R | # Cria um modelo preditivo usando randomForest
# Este código foi criado para executar tanto no Azure, quanto no RStudio.
# Para executar no Azure, altere o valor da variavel Azure para TRUE.
# Se o valor for FALSE, o codigo será executado no RStudio
# Obs: Caso tenha problemas com a acentuação, consulte este link:
# https://support.rstudio.com/hc/en-us/articles/200532197-Character-Encoding
# Configurando o diretório de trabalho
# Coloque entre aspas o diretório de trabalho que você está usando no seu computador
# Não use diretórios com espaço no nome
# setwd("C:/FCD/BigDataRAzure/Cap14/Projeto")
# getwd()
# Função para tratar as datas
set.asPOSIXct <- function(inFrame) {
dteday <- as.POSIXct(
as.integer(inFrame$dteday),
origin = "1970-01-01")
as.POSIXct(strptime(
paste(as.character(dteday),
" ",
as.character(inFrame$hr),
":00:00",
sep = ""),
"%Y-%m-%d %H:%M:%S"))
}
char.toPOSIXct <- function(inFrame) {
as.POSIXct(strptime(
paste(inFrame$dteday, " ",
as.character(inFrame$hr),
":00:00",
sep = ""),
"%Y-%m-%d %H:%M:%S")) }
# Variável que controla a execução do script
Azure <- FALSE
if(Azure){
dataset$dteday <- set.asPOSIXct(dataset)
}else{
bikes <- bikes
}
require(randomForest)
model <- randomForest(cnt ~ xformWorkHr + dteday + temp + hum,
data = bikes, # altere o nome do objeto data para "dataset" de estiver trabalhando no Azure ML
ntree = 40,
nodesize = 5)
print(model)
|
4a838d7f26cc83f4652b6701692785b090e97d88 | b2544bed289a662be10de6d6794149bb61cf08db | /man/plot.MassBalance.Rd | c325612a4834d1b7012c679707bf208c74a5af63 | [] | no_license | jhndouglass/RRQP | 2ca5b72916f7a145e0e0a82e87e87dacc8344a3b | 39aaadfc1ff81ef343ef929bb68a740b3e7e8dea | refs/heads/master | 2020-12-24T15:31:31.026996 | 2015-02-16T14:50:30 | 2015-02-16T14:50:30 | 30,872,626 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 342 | rd | plot.MassBalance.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/plot_mass_balance.R
\name{plot.MassBalance}
\alias{plot.MassBalance}
\title{Plot MassBalance object}
\usage{
\method{plot}{MassBalance}(x, ...)
}
\arguments{
\item{stuff}{}
}
\value{
stuff
}
\description{
stuff
}
\details{
stuff
}
\examples{
stuff <- 1
}
|
8c15105ccd1469a37bb67c9da6a8d12572bb1b28 | 94b08a46749642378b1711bc592b34ebcc7bcc73 | /packageDir/man/StudyMetaData-class.Rd | 26a1089fa4bb78a714695cf49715535ad5e9df70 | [] | no_license | stochasticTreat/pathLayerDistributionVersion2 | 857f1a19d0c8ea80c99a605bdbe9544d58c49075 | 487b7e8d7e25f8841652251070ba97ed7c8b8376 | refs/heads/master | 2016-09-05T11:55:05.946434 | 2015-02-17T21:01:23 | 2015-02-17T21:01:23 | 21,082,634 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 911 | rd | StudyMetaData-class.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/InitiateDataStructures.R
\docType{class}
\name{StudyMetaData-class}
\alias{StudyMetaData-class}
\title{StudyMetaData}
\description{
Object containing crucial study meta data.
}
\section{Slots}{
\describe{
\item{\code{paths}}{Path_Detail reference class object}
\item{\code{settings}}{A list of settings list objects. One slot in list is generally provided for each arm.}
\item{\code{studyName}}{String. The name of the study and name of the file folder used to save the study.}
\item{\code{geneIdentifierType}}{String. The name of the type of gene identifiers used in the study.}
\item{\code{GeneIdentifierLookup}}{depricated. A slot for a set of official, approved gene identifiers and mappings for unofficial gene identifiers.}
\item{\code{RootFile}}{character data. The path to folder where the study is saved.}
}}
|
f6202cc57bb62baac99d643e1cbe8567d1b64e8b | b3bb9332adaec9b5b692c538fc116275a368cb23 | /man/countries.Rd | e4d61340b5b06ba7630786607daefdd617f50755 | [] | no_license | pedrobf777/betfaiR-1 | 0504dafc37d4c5f988a0ea57c4801478f4e93ad1 | 7282cc1ad7caffba1eddf60fcf9bfb4d70adfc46 | refs/heads/master | 2021-10-24T20:59:49.421884 | 2019-03-28T21:29:46 | 2019-03-28T21:29:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 872 | rd | countries.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{countries}
\alias{countries}
\title{countries method}
\arguments{
\item{filter}{list to select desired markets, see \link{marketFilter},
or visit \href{https://api.developer.betfair.com/services/webapps/docs/display/1smk3cen4v3lu3yomq5qye0ni/Betting+Type+Definitions#BettingTypeDefinitions-MarketFilter}{developer.betfair.com},
for the different options.}
}
\value{
a dataframe of countries with markets selected by the \code{filter}
parameter, with the following variables:
\itemize{
\item{\code{countryCode} the ISO-2 code for the event, ISO-2 codes are
available on \href{https://en.wikipedia.org/wiki/ISO_3166-1_alpha-2}{wiki}}
\item{\code{marketCount} number of markets associated with this competition}
}
}
\description{
countries associated with markets
}
|
103b37b1efbb45cd9ff7dab11423b6a41327dfe7 | 29585dff702209dd446c0ab52ceea046c58e384e | /photobiology/R/spct.operators.r | 4ba443f5c74958f0c2a3d8b97728e9a115e3a200 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 55,412 | r | spct.operators.r | # Private function which takes the operator as its third argument
# The operator must enclosed in backticks to be recognized as such
#
# This avoids the repetition of very similar code for each operator
# as in the older versions.
oper.e.generic_spct <- function(e1, e2, oper) {
if (is.object_spct(e1) || is.object_spct(e2)) {
stop("Operators are not defined for object_spct objects")
}
if (is.logical(e1)) {
e1 <- as.integer(e1)
}
if (is.logical(e2)) {
e2 <- as.integer(e2)
}
if (is.waveband(e1)) {
e_temp <- e2
e2 <- e1
e1 <- e_temp
}
if (is.numeric(e1)) {
class1 <- "numeric"
} else {
class1 <- class_spct(e1)[1]
}
if (is.numeric(e2)) {
class2 <- "numeric"
} else if (is.waveband(e2)) {
class2 <- "waveband"
} else {
class2 <- class_spct(e2)[1]
}
if (class1 == "raw_spct") {
if (is.numeric(e2)) {
z <- e1
z[["counts"]] <- oper(z[["counts"]], e2)
return(z)
} else if (class2 == "raw_spct") {
z <- oper_spectra(e1$w.length, e2$w.length,
e1$counts, e2$counts,
bin.oper=oper, trim="intersection")
names(z)[2] <- "counts"
setRawSpct(z, strict.range = FALSE)
return(z)
} else {
warning("operation between 'raw_spct' and ", class(e2)[1],
" objects not implemented")
}
} else if (class1 == "cps_spct") {
if (is.numeric(e2)) {
z <- e1
z[["cps"]] <- oper(z[["cps"]], e2)
return(z)
} else if (class2 == "cps_spct") {
z <- oper_spectra(e1$w.length, e2$w.length,
e1$cps, e2$cps,
bin.oper=oper, trim="intersection")
names(z)[2] <- "cps"
setCpsSpct(z, strict.range = FALSE)
return(z)
} else {
warning("operation between 'cps_spct' and ", class(e2)[1],
" objects not implemented")
}
} else if (class1 == "source_spct") {
q2e(e1, action = "add", byref = TRUE)
if (is.waveband(e2)) {
if (!identical(oper, `*`)) {
warning("Only '*' is allowed between source_spct and waveband objects")
return(NA)
}
if (is_effective(e1) && !is_effective(e2)) {
bwswf.used <- getBSWFUsed(e1)
} else if (!is_effective(e1) && is_effective(e2)) {
bswf.used <- labels(e2)[["name"]]
} else if (is_effective(e1) && is_effective(e2)) {
bswf.used <- paste(getBSWFUsed(e1), "*", labels(e2)[["name"]])
} else if (!is_effective(e1) && !is_effective(e2)) {
bswf.used <- "none"
} else {
stop("Failed assertion! BUG IN PACKAGE CODE")
}
e1 <- trim_spct(e1, low.limit=min(e2), high.limit=max(e2) - 1e-12,
verbose=FALSE,
use.hinges = getOption("photobiology.use.hinges",
default=NULL))
mult <-
calc_multipliers(w.length=e1$w.length, w.band=e2, unit.out="energy",
unit.in="energy",
use.cached.mult = getOption("photobiology.use.cached.mult",
default = FALSE))
return(source_spct(w.length=e1$w.length,
s.e.irrad = e1$s.e.irrad * mult,
time.unit=getTimeUnit(e1),
bswf.used=bswf.used,
strict.range = FALSE))
}
if (is.numeric(e2)) {
z <- e1
if (exists("s.q.irrad", z, inherits = FALSE)) {
z[["s.q.irrad"]] <- NULL
}
z[["s.e.irrad"]] <- oper(z[["s.e.irrad"]], e2)
check_spct(z)
return(z)
} else if (class2 == "source_spct") {
q2e(e2, action = "replace", byref = TRUE)
if (is_effective(e1) || is_effective(e2)) {
if (getBSWFUsed(e1) != getBSWFUsed(e2)) {
warning("One or both operands are effective spectral irradiances")
bswf.used <- paste(getBSWFUsed(e1), getBSWFUsed(e2))
} else {
bswf.used <- getBSWFUsed(e1)
}
} else {
bswf.used <- "none"
}
z <- oper_spectra(e1$w.length, e2$w.length,
e1$s.e.irrad, e2$s.e.irrad,
bin.oper=oper, trim="intersection")
names(z)[2] <- "s.e.irrad"
setSourceSpct(z, time.unit=getTimeUnit(e1), bswf.used = bswf.used,
strict.range = FALSE)
return(z)
} else if (class2 == "filter_spct") {
filter.quantity <- getOption("photobiology.filter.qty", default="transmittance")
if (filter.quantity=="transmittance") {
if (!identical(oper, `*`) && !identical(oper, `/`)) {
warning("Only '*' and '/' are allowed between source_spct and filter_spct objects")
return(NA)
}
A2T(e2, action = "replace", byref = TRUE)
z <- oper_spectra(e1$w.length, e2$w.length,
e1$s.e.irrad, e2$Tfr,
bin.oper=oper, trim="intersection")
names(z)[2] <- "s.e.irrad"
setSourceSpct(z, time.unit=getTimeUnit(e1), bswf.used = getBSWFUsed(e1),
strict.range = FALSE)
} else if (filter.quantity=="absorbance") {
if (!identical(oper, `*`) && !identical(oper, `/`)) {
warning("Only '*' and '/' are allowed between source_spct and filter_spct objects")
return(NA)
}
T2A(e2, action = "add", byref = TRUE)
z <- oper_spectra(e1$w.length, e2$w.length,
e1$s.e.irrad, e2$A,
bin.oper=oper, trim="intersection")
names(z)[2] <- "s.e.response"
setResponseSpct(z, time.unit=getTimeUnit(e1))
}
return(z)
} else if (class2 == "reflector_spct") {
if (!identical(oper, `*`) && !identical(oper, `/`)) {
warning("Only '*' and '/' are allowed between source_spct and reflector_spct objects")
return(NA)
}
z <- oper_spectra(e1$w.length, e2$w.length,
e1$s.e.irrad, e2$s.e.irrad,
bin.oper=oper, trim="intersection")
names(z)[2] <- "s.e.irrad"
setSourceSpct(z, time.unit=getTimeUnit(e1), bswf.used = getBSWFUsed(e1),
strict.range = FALSE)
return(z)
} else if (class2 == "response_spct") {
q2e(e2, action = "replace", byref = TRUE)
if (!identical(oper, `*`) && !identical(oper, `/`)) {
warning("Only '*' and '/' are allowed between source_spct and response_spct objects")
return(NA)
}
z <- oper_spectra(e1$w.length, e2$w.length,
e1$s.e.irrad, e2$s.e.response,
bin.oper=oper, trim="intersection")
names(z)[2] <- "s.e.response"
setResponseSpct(z, time.unit=getTimeUnit(e1))
return(z)
} else if (class2 == "chroma_spct") {
if (!identical(oper, `*`) && !identical(oper, `/`)) {
warning("Only '*' and '/' are allowed between source_spct and chroma_spct objects")
return(NA)
}
x <- oper_spectra(e1$w.length, e2$w.length,
e1$s.e.irrad, e2$x,
bin.oper=oper, trim="intersection")
y <- oper_spectra(e1$w.length, e2$w.length,
e1$s.e.irrad, e2$y,
bin.oper=oper, trim="intersection")
z <- oper_spectra(e1$w.length, e2$w.length,
e1$s.e.irrad, e2$z,
bin.oper=oper, trim="intersection")
z <- dplyr::data_frame(w.length=x$w.length, x=x[["s.irrad"]],
y=y[["s.irrad"]], z=z[["s.irrad"]])
setChromaSpct(z)
return(z)
} else { # this traps also e2 == "generic_spct"
warning("Operations involving generic_spct are undefined and always return NA")
return(NA)
}
} else if (class1 == "filter_spct") {
filter.quantity <- getOption("photobiology.filter.qty",
default = "transmittance")
if (filter.quantity == "transmittance") {
e1 <- A2T(e1)
if (is.numeric(e2)) {
z <- e1
if (exists("A", z, inherits = FALSE)) {
z[["A"]] <- NULL
}
z[["Tfr"]] <- oper(z[["Tfr"]], e2)
check_spct(z)
return(z)
}
else if (class2 == "source_spct") {
q2e(e2, action = "add", byref = TRUE)
if (!identical(oper, `*`) && !identical(oper, `/`)) {
warning("Only '*' and '/' are allowed between filter_spct and source_spct objects")
return(NA)
}
z <- oper_spectra(e1$w.length, e2$w.length,
e1$Tfr, e2$s.e.irrad,
bin.oper=oper, trim="intersection")
names(z)[2] <- "s.e.irrad"
setSourceSpct(z, time.unit=getTimeUnit(e2), bswf.used = getBSWFUsed(e2),
strict.range = FALSE)
} else if (class2 == "filter_spct") {
e2 <- A2T(e2)
if (!identical(oper, `*`) && !identical(oper, `/`)) {
warning("Only '*' and '/' are allowed for two filter_spct objects (transmittance)")
return(NA)
}
z <- oper_spectra(e1$w.length, e2$w.length,
e1$Tfr, e2$Tfr,
bin.oper=oper, trim="intersection")
names(z)[2] <- "Tfr"
setFilterSpct(z, strict.range = FALSE)
return(z)
} else { # this traps optically illegal operations
warning("The operation attempted in undefined according to Optics laws or the input is malformed")
return(NA)
}
} else if (filter.quantity=="absorbance") {
T2A(e1, action = "add", byref = TRUE)
if (is.numeric(e2)) {
z <- e1
if (exists("Tfr", z, inherits = FALSE)) {
z[["Tfr"]] <- NULL
}
z[["A"]] <- oper(z[["A"]], e2)
check_spct(z)
return(z)
} else if (class2 == "source_spct") {
q2e(e2, action = "add", byref = TRUE)
if (!identical(oper, `*`) && !identical(oper, `/`)) {
warning("Only '*' and '/' are allowed between filter_spct and source_spct objects")
}
z <- oper_spectra(e1$w.length, e2$w.length,
e1$A, e2$s.e.irrad,
bin.oper=oper, trim="intersection")
names(z)[2] <- "s.e.response"
setResponseSpct(z, time.unit=getTimeUnit(e2))
return(z)
} else if (class2 == "filter_spct") {
T2A(e2, action = "add", byref = TRUE)
if (!identical(oper, `+`) && !identical(oper, `-`)) {
warning("Only '+' and '-' are allowed for two filter_spct objects (absorbance)")
return(NA)
}
z <- oper_spectra(e1$w.length, e2$w.length,
e1$A, e2$A,
bin.oper=oper, trim="intersection")
names(z)[2] <- "A"
setFilterSpct(z, strict.range = FALSE)
return(z)
} else { # this traps optically illegal operations
warning("The operation attempted in undefined according to Optics laws or the input is malformed")
return(NA)
}
}
} else if (class1 == "reflector_spct") {
if (is.numeric(e2)) {
z <- e1
z[["Rfr"]] <- oper(z[["Rfr"]], e2)
check_spct(z)
return(z)
} else if (class2 == "reflector_spct") {
z <- oper_spectra(e1$w.length, e2$w.length,
e1$Rfr, e2$Rfr,
bin.oper=oper, trim="intersection")
names(z)[2] <- "Rfr"
setReflectorSpct(z, strict.range = FALSE)
return(z)
} else if (class2 == "source_spct") {
if (!identical(oper, `*`) && !identical(oper, `/`)) {
warning("Only '*' and '/' are allowed between reflector_spct and source_spct objects")
return(NA)
}
z <- oper_spectra(e1$w.length, e2$w.length,
e1$Rfr, e2$s.e.irrad,
bin.oper=oper, trim="intersection")
names(z)[2] <- "s.e.irrad"
setSourceSpct(z, time.unit=getTimeUnit(e2), bswf.used = getBSWFUsed(e2),
strict.range = FALSE)
return(z)
} else { # this traps optically illegal operations
warning("The operation attempted in undefined according to Optics laws or the input is malformed")
return(NA)
}
} else if (class1 == "response_spct") {
q2e(e1, action = "replace", byref = TRUE)
if (is.numeric(e2)) {
z <- e1
z[["s.e.response"]] <- oper(z[["s.e.response"]], e2)
check_spct(z)
return(z)
} else if (class2 == "response_spct") {
q2e(e2, action = "replace", byref = TRUE)
z <- oper_spectra(e1$w.length, e2$w.length,
e1$s.e.response, e2$s.e.response,
bin.oper=oper, trim="intersection")
names(z)[2] <- "s.e.response"
setResponseSpct(z, time.unit=getTimeUnit(e1))
return(z)
} else if (class2 == "source_spct") {
if (!identical(oper, `*`) && !identical(oper, `/`)) {
warning("Only '*' and '/' are allowed between response_spct and source_spct objects")
return(NA)
}
q2e(e2, action = "replace", byref = TRUE)
z <- oper_spectra(e1$w.length, e2$w.length,
e1$s.e.response, e2$s.e.irrad,
bin.oper=oper, trim="intersection")
names(z)[2] <- "s.e.response"
setResponseSpct(z, time.unit=getTimeUnit(e2))
return(z)
} else { # this traps optically illegal operations
warning("The operation attempted in undefined according to Optics laws or the input is malformed")
return(NA)
}
} else if (class1 == "chroma_spct") {
if (is.numeric(e2)) {
if (length(e2) == 3 && names(e2) == c("x", "y", "z")) {
return(chroma_spct(w.length = e1$w.length,
x = oper(e1$x, e2["x"]),
y = oper(e1$y, e2["y"]),
z = oper(e1$z, e2["z"])))
} else {
return(chroma_spct(w.length = e1$w.length,
x = oper(e1$x, e2),
y = oper(e1$y, e2),
z = oper(e1$z, e2)))
}
} else if (class2 == "chroma_spct") {
x <- oper_spectra(e1$w.length, e2$w.length,
e1$x, e2$x,
bin.oper=oper, trim="intersection")
y <- oper_spectra(e1$w.length, e2$w.length,
e1$y, e2$y,
bin.oper=oper, trim="intersection")
z <- oper_spectra(e1$w.length, e2$w.length,
e1$z, e2$z,
bin.oper=oper, trim="intersection")
return(chroma_spct(w.length=x$w.length,
x=x[["s.irrad"]], y=y[["s.irrad"]], z=z[["s.irrad"]]))
} else if (class2 == "source_spct") {
q2e(e2, action = "replace", byref = TRUE)
x <- oper_spectra(e1$w.length, e2$w.length,
e1$x, e2$s.e.irrad,
bin.oper=oper, trim="intersection")
y <- oper_spectra(e1$w.length, e2$w.length,
e1$y, e2$s.e.irrad,
bin.oper=oper, trim="intersection")
z <- oper_spectra(e1$w.length, e2$w.length,
e1$z, e2$s.e.irrad,
bin.oper=oper, trim="intersection")
return(chroma_spct(w.length=x$w.length,
x=x[["s.irrad"]], y=y[["s.irrad"]], z=z[["s.irrad"]]))
}
} else if (is.numeric(e1)) {
if (class2 == "raw_spct") {
z <- e2
z[["counts"]] <- oper(e1, z[["counts"]])
return(z)
} else if (class2 == "cps_spct") {
z <- e2
z[["cps"]] <- oper(e1, z[["cps"]])
return(z)
} else if (class2 == "source_spct") {
q2e(e2, action = "replace", byref = TRUE)
z <- e2
z[["s.e.irrad"]] <- oper(e1, z[["s.e.irrad"]])
return(z)
} else if (class2 == "filter_spct") {
filter.quantity <- getOption("photobiology.filter.qty",
default="transmittance")
if (filter.quantity=="transmittance") {
A2T(e2, action = "add", byref = TRUE)
return(filter_spct(w.length=e2$w.length, Tfr=oper(e1, e2$Tfr),
strict.range = FALSE))
} else if (filter.quantity=="absorbance") {
T2A(e2, action = "add", byref = TRUE)
return(filter_spct(w.length=e2$w.length, A=oper(e1, e2$A),
strict.range = FALSE))
} else {
stop("Assertion failed: bug in code!")
}
} else if (class2 == "reflector_spct") {
return(reflector_spct(w.length=e2$w.length, Rfr=oper(e1, e2$Rfr),
strict.range = FALSE))
} else if (class2 == "response_spct") {
q2e(e2, action = "replace", byref = TRUE)
return(response_spct(w.length=e2$w.length,
s.e.response=oper(e1, e2$s.e.response)))
} else if (class2 == "chroma_spct") {
if (length(e2) == 3 && names(e2) == c("x", "y", "z")) {
return(chroma_spct(w.length = e2$w.length,
x = oper(e1["x"], e2$x),
y = oper(e1["y"], e2$y),
z = oper(e1["z"], e2$z)))
} else {
return(chroma_spct(w.length = e2$w.length,
x = oper(e1, e2$x),
y = oper(e1, e2$y),
z = oper(e1, e2$z)))
}
}
} else {
warning("The operation attempted in undefined according to Optics laws or the input is malformed")
return(NA)
}
}
# Private function which takes the operator as its third argument
# The operator must enclosed in backticks to be recognized as such
#
# This avoids the repetition of very similar code for each operator
# as in the older versions.
oper.q.generic_spct <- function(e1, e2, oper) {
if (is.object_spct(e1) || is.object_spct(e2)) {
stop("Operators are not defined for object_spct objects")
}
if (is.logical(e1)) {
e1 <- as.integer(e1)
}
if (is.logical(e2)) {
e2 <- as.integer(e2)
}
if (is.waveband(e1)) {
e_temp <- e2
e2 <- e1
e1 <- e_temp
}
if (is.numeric(e1)) {
class1 <- "numeric"
} else {
class1 <- class_spct(e1)[1]
}
if (is.numeric(e2)) {
class2 <- "numeric"
} else if (is.waveband(e2)) {
class2 <- "waveband"
} else {
class2 <- class_spct(e2)[1]
}
if (class1 == "raw_spct") {
if (is.numeric(e2)) {
z <- e1
z[["counts"]] <- oper(z[["counts"]], e2)
return(z)
} else if (class2 == "raw_spct") {
z <- oper_spectra(e1$w.length, e2$w.length,
e1$counts, e2$counts,
bin.oper=oper, trim="intersection")
names(z)[2] <- "counts"
setRawSpct(z, strict.range = FALSE)
return(z)
} else {
warning("operation between 'raw_spct' and ", class(e2)[1],
" objects not implemented")
}
} else if (class1 == "cps_spct") {
if (is.numeric(e2)) {
z <- e1
z[["cps"]] <- oper(z[["cps"]], e2)
return(z)
} else if (class2 == "cps_spct") {
z <- oper_spectra(e1$w.length, e2$w.length,
e1$cps, e2$cps,
bin.oper=oper, trim="intersection")
names(z)[2] <- "cps"
setCpsSpct(z, strict.range = FALSE)
return(z)
} else {
warning("operation between 'cps_spct' and ", class(e2)[1],
" objects not implemented")
}
} else if (class1 == "source_spct") {
e2q(e1, action = "replace", byref = TRUE)
if (is.waveband(e2)) {
if (!identical(oper, `*`)) {
warning("The operation attempted in undefined according to Optics laws or the input is malformed")
return(NA)
}
if (is_effective(e1) && !is_effective(e2)) {
bwswf.used <- getBSWFUsed(e1)
} else if (!is_effective(e1) && is_effective(e2)) {
bswf.used <- labels(e2)[["name"]]
} else if (is_effective(e1) && is_effective(e2)) {
bswf.used <- paste(getBSWFUsed(e1), "*", labels(e2)[["name"]])
} else if (!is_effective(e1) && !is_effective(e2)) {
bswf.used <- "none"
} else {
stop("Failed assertion! BUG IN PACKAGE CODE")
}
e1 <- trim_spct(e1,
low.limit = min(e2),
high.limit = max(e2) - 1e-12,
verbose=FALSE,
use.hinges = getOption("photobiology.use.hinges",
default=NULL))
mult <- calc_multipliers(w.length=e1$w.length,
w.band=e2,
unit.out="photon",
unit.in="photon",
use.cached.mult = getOption("photobiology.use.cached.mult",
default = FALSE))
return(source_spct(w.length=e1$w.length, s.q.irrad = e1$s.q.irrad * mult,
bswf.used = bswf.used, time.unit=getTimeUnit(e1),
strict.range = FALSE))
}
if (is.numeric(e2)) {
z <- e1
z[["s.q.irrad"]] <- oper(z[["s.q.irrad"]], e2)
return(z)
} else if (class2 == "source_spct") {
e2q(e2, action = "replace", byref = TRUE)
if (getTimeUnit(e1) != getTimeUnit(e2)) {
warning("operands have different value for 'time.unit' attribute")
return(NA)
}
if (is_effective(e1) || is_effective(e2)) {
if (getBSWFUsed(e1) != getBSWFUsed(e2)) {
warning("One or both operands are effective spectral irradiances")
bswf.used <- paste(getBSWFUsed(e1), getBSWFUsed(e2))
} else {
bswf.used <- getBSWFUsed(e1)
}
} else {
bswf.used <- "none"
}
z <- oper_spectra(e1$w.length, e2$w.length,
e1$s.q.irrad, e2$s.q.irrad,
bin.oper=oper, trim="intersection")
names(z)[2] <- "s.q.irrad"
setSourceSpct(z, time.unit = getTimeUnit(e1), bswf.used = bswf.used,
strict.range = FALSE)
return(z)
} else if (class2 == "filter_spct") {
filter.quantity <- getOption("photobiology.filter.qty", default="transmittance")
if (filter.quantity=="transmittance") {
if (!identical(oper, `*`) && !identical(oper, `/`)) {
warning("Only '*' and '/' are allowed between source_spct and filter_spct objects")
return(NA)
}
A2T(e2, action = "add", byref = TRUE)
z <- oper_spectra(e1$w.length, e2$w.length,
e1$s.q.irrad, e2$Tfr,
bin.oper=oper, trim="intersection")
names(z)[2] <- "s.q.irrad"
setSourceSpct(z, time.unit = getTimeUnit(e1), bswf.used = getBSWFUsed(e1),
strict.range = FALSE)
} else if (filter.quantity=="absorbance") {
if (!identical(oper, `*`)) return(NA)
T2A(e2, action = "add", byref = TRUE)
z <- oper_spectra(e1$w.length, e2$w.length,
e1$s.q.irrad, e2$A,
bin.oper=oper, trim="intersection")
names(z)[2] <- "s.q.response"
setResponseSpct(z)
}
return(z)
} else if (class2 == "reflector_spct") {
if (!identical(oper, `*`)) return(NA)
z <- oper_spectra(e1$w.length, e2$w.length,
e1$s.q.irrad, e2$s.q.irrad,
bin.oper=oper, trim="intersection")
names(z)[2] <- "s.q.irrad"
setSourceSpct(z, time.unit = getTimeUnit(e1), bswf.used = getBSWFUsed(e1),
strict.range = FALSE)
return(z)
} else if (class2 == "response_spct") {
if (!identical(oper, `*`)) return(NA)
e2q(e2, action = "replace", byref = TRUE)
z <- oper_spectra(e1$w.length, e2$w.length,
e1$s.q.irrad, e2$s.q.response,
bin.oper=oper, trim="intersection")
names(z)[2] <- "s.q.response"
setResponseSpct(z)
return(z)
} else if (class2 == "chroma_spct") {
if (!identical(oper, `*`)) return(NA)
x <- oper_spectra(e1$w.length, e2$w.length,
e1$s.q.irrad, e2$x,
bin.oper=oper, trim="intersection")
y <- oper_spectra(e1$w.length, e2$w.length,
e1$s.q.irrad, e2$y,
bin.oper=oper, trim="intersection")
z <- oper_spectra(e1$w.length, e2$w.length,
e1$s.q.irrad, e2$z,
bin.oper=oper, trim="intersection")
z <- dplyr::data_frame(w.length=x$w.length,
x=x[["s.irrad"]], y=y[["s.irrad"]], z=z[["s.irrad"]])
setChromaSpct(z)
return(z)
} else { # this traps also e2 == "generic_spct"
return(NA)
}
} else if (class1 == "filter_spct") {
filter.quantity <- getOption("photobiology.filter.qty", default="transmittance")
if (filter.quantity=="transmittance") {
e1 <- A2T(e1)
if (is.numeric(e2)) {
z <- e1
if (exists("A", z, inherits = FALSE)) {
z[["A"]] <- NULL
}
z[["Tfr"]] <- oper(z[["Tfr"]], e2)
return(z)
} else if (class2 == "source_spct") {
e2q(e2, action = "replace", byref = TRUE)
if (!identical(oper, `*`)) return(NA)
z <- oper_spectra(e1$w.length, e2$w.length,
e1$Tfr, e2$s.q.irrad,
bin.oper=oper, trim="intersection")
names(z)[2] <- "s.q.irrad"
setSourceSpct(z, time.unit = getTimeUnit(e2), bswf.used = getBSWFUsed(e2),
strict.range = FALSE)
} else if (class2 == "filter_spct") {
e2 <- A2T(e2)
if (!identical(oper, `*`)) return(NA)
z <- oper_spectra(e1$w.length, e2$w.length,
e1$Tfr, e2$Tfr,
bin.oper=oper, trim="intersection")
names(z)[2] <- "Tfr"
setFilterSpct(z, strict.range = FALSE)
return(z)
} else { # this traps optically illegal operations
warning("The operation attempted in undefined according to Optics laws or the input is malformed")
return(NA)
}
} else if (filter.quantity=="absorbance") {
T2A(e1, action = "add", byref = TRUE)
if (is.numeric(e2)) {
if (exists("Tfr", z, inherits = FALSE)) {
z[["Tfr"]] <- NULL
}
z[["A"]] <- oper(z[["A"]], e2)
return(z)
} else if (class2 == "source_spct") {
e2q(e2, action = "add", byref = TRUE)
if (!identical(oper, `*`)) return(NA)
z <- oper_spectra(e1$w.length, e2$w.length,
e1$A, e2$s.q.irrad,
bin.oper=oper, trim="intersection")
names(z)[2] <- "s.q.response"
setResponseSpct(z)
return(z)
} else if (class2 == "filter_spct") {
T2A(e2, action = "add", byref = TRUE)
if (!identical(oper, `+`) || !identical(oper, `-`)) return(NA)
z <- oper_spectra(e1$w.length, e2$w.length,
e1$A, e2$A,
bin.oper=oper, trim="intersection")
names(z)[2] <- "A"
setFilterSpct(z, strict.range = FALSE)
return(z)
} else { # this traps optically illegal operations
warning("The operation attempted in undefined according to Optics laws or the input is malformed")
return(NA)
}
}
} else if (class1 == "reflector_spct") {
if (is.numeric(e2)) {
z <- e1
z[["Rfr"]] <- oper(z[["Rfr"]], e2)
return(z)
} else if (class2 == "reflector_spct") {
z <- oper_spectra(e1$w.length, e2$w.length,
e1$Rfr, e2$Rfr,
bin.oper=oper, trim="intersection")
names(z)[2] <- "Rfr"
setReflectorSpct(z, strict.range = FALSE)
return(z)
} else if (class2 == "source_spct") {
if (!identical(oper, `*`)) return(NA)
e2q(e2, action = "replace", byref = TRUE)
z <- oper_spectra(e1$w.length, e2$w.length,
e1$Rfr, e2$s.q.irrad,
bin.oper=oper, trim="intersection")
names(z)[2] <- "s.q.irrad"
setSourceSpct(z, time.unit = getTimeUnit(e2), bswf.used = getBSWFUsed(e2),
strict.range = FALSE)
return(z)
} else { # this traps optically illegal operations
warning("The operation attempted is undefined according to Optics laws or the input is malformed")
return(NA)
}
} else if (class1 == "response_spct") {
e2q(e1, action = "replace", byref = TRUE)
if (is.numeric(e2)) {
z <- e1
if (exists("s.e.response", z, inherits = FALSE)) {
z[["s.e.response"]] <- NULL
}
z[["s.q.response"]] <- oper(z[["s.q.response"]], e2)
return(z)
} else if (class2 == "response_spct") {
e2q(e2, action = "replace", byref = TRUE)
z <- oper_spectra(e1$w.length, e2$w.length,
e1$s.q.response, e2$s.q.response,
bin.oper=oper, trim="intersection")
names(z)[2] <- "s.q.response"
setResponseSpct(z, time.unit=getTimeUnit(e1))
return(z)
} else if (class2 == "source_spct") {
if (!identical(oper, `*`)) return(NA)
e2q(e2, action = "replace", byref = TRUE)
z <- oper_spectra(e1$w.length, e2$w.length,
e1$s.q.response, e2$s.q.irrad,
bin.oper=oper, trim="intersection")
names(z)[2] <- "s.q.response"
setResponseSpct(z)
return(z)
} else { # this traps optically illegal operations
warning("The operation attempted in undefined according to Optics laws or the input is malformed")
return(NA)
}
} else if (class1 == "chroma_spct") {
if (is.numeric(e2)) {
if (length(e2) == 3 && names(e2) == c("x", "y", "z")) {
return(chroma_spct(w.length = e1$w.length,
x = oper(e1$x, e2["x"]),
y = oper(e1$y, e2["y"]),
z = oper(e1$z, e2["z"])))
} else {
return(chroma_spct(w.length = e1$w.length,
x = oper(e1$x, e2),
y = oper(e1$y, e2),
z = oper(e1$z, e2)))
}
} else if (class2 == "chroma_spct") {
x <- oper_spectra(e1$w.length, e2$w.length, e1$x, e2$x, bin.oper=oper, trim="intersection")
y <- oper_spectra(e1$w.length, e2$w.length, e1$y, e2$y, bin.oper=oper, trim="intersection")
z <- oper_spectra(e1$w.length, e2$w.length, e1$z, e2$z, bin.oper=oper, trim="intersection")
return(chroma_spct(w.length=x$w.length, x=x[["s.irrad"]], y=y[["s.irrad"]], z=z[["s.irrad"]]))
} else if (class2 == "source_spct") {
e2q(e2, action = "replace", byref = TRUE)
x <- oper_spectra(e1$w.length, e2$w.length, e1$x, e2$s.q.irrad, bin.oper=oper, trim="intersection")
y <- oper_spectra(e1$w.length, e2$w.length, e1$y, e2$s.q.irrad, bin.oper=oper, trim="intersection")
z <- oper_spectra(e1$w.length, e2$w.length, e1$z, e2$s.q.irrad, bin.oper=oper, trim="intersection")
return(chroma_spct(w.length=x$w.length, x=x[["s.irrad"]], y=y[["s.irrad"]], z=z[["s.irrad"]]))
}
} else if (is.numeric(e1)) {
if (class2 == "cps_spct") {
z <- e2
z[["cps"]] <- oper(e1, z[["cps"]])
return(z)
} else if (class2 == "source_spct") {
e2q(e2, action = "replace", byref = TRUE)
z <- e2
z[["s.q.irrad"]] <- oper(e1, z[["s.q.irrad"]])
return(z)
} else if (class2 == "filter_spct") {
filter.quantity <- getOption("photobiology.filter.qty", default="transmittance")
if (filter.quantity=="transmittance") {
A2T(e2, action = "add", byref = TRUE)
return(filter_spct(w.length=e2$w.length, Tfr=oper(e1, e2$Tfr), strict.range = FALSE))
} else if (filter.quantity=="absorbance") {
T2A(e2, action = "add", byref = TRUE)
return(filter_spct(w.length=e2$w.length, A=oper(e1, e2$A), strict.range = FALSE))
} else {
stop("Assertion failed: bug in code!")
}
} else if (class2 == "reflector_spct") {
return(reflector_spct(w.length=e2$w.length, Rfr=oper(e1, e2$Rfr), strict.range = FALSE))
} else if (class2 == "response_spct") {
e2q(e2, action = "replace", byref = TRUE)
return(response_spct(w.length=e2$w.length, s.q.response=oper(e1, e2$s.q.response)))
} else if (class2 == "chroma_spct") {
if (length(e2) == 3 && names(e2) == c("x", "y", "z")) {
return(chroma_spct(w.length = e2$w.length,
x = oper(e1["x"], e2$x),
y = oper(e1["y"], e2$y),
z = oper(e1["z"], e2$z)))
} else {
return(chroma_spct(w.length = e2$w.length,
x = oper(e1, e2$x),
y = oper(e1, e2$y),
z = oper(e1, e2$z)))
}
}
} else {
warning("The operation attempted in undefined according to Optics laws or the input is malformed")
return(NA)
}
}
# multiplication ----------------------------------------------------------
#' Arithmetic Operators
#'
#' Multiplication operator for spectra.
#'
#' @param e1 an object of class "generic_spct"
#' @param e2 an object of class "generic_spct"
#' @name times-.generic_spct
#' @export
#' @family math operators and functions
#'
'*.generic_spct' <- function(e1, e2) {
unit <- getOption("photobiology.radiation.unit", default = "energy")
if (unit == "energy") {
return(oper.e.generic_spct(e1, e2, `*`))
} else if (unit == "photon" || unit == "quantum") {
return(oper.q.generic_spct(e1, e2, `*`))
} else {
return(NA)
}
}
# division ----------------------------------------------------------------
#' Arithmetic Operators
#'
#' Division operator for generic spectra.
#'
#' @param e1 an object of class "generic_spct"
#' @param e2 an object of class "generic_spct"
#' @name slash-.generic_spct
#' @export
#' @family math operators and functions
#'
'/.generic_spct' <- function(e1, e2) {
unit <- getOption("photobiology.radiation.unit", default = "energy")
if (unit == "energy") {
return(oper.e.generic_spct(e1, e2, `/`))
} else if (unit == "photon" || unit == "quantum") {
return(oper.q.generic_spct(e1, e2, `/`))
} else {
return(NA)
}
}
#' Arithmetic Operators
#'
#' Integer-division operator for generic spectra.
#'
#' @param e1 an object of class "generic_spct"
#' @param e2 an object of class "generic_spct"
#' @name div-.generic_spct
#' @export
#' @family math operators and functions
#'
'%/%.generic_spct' <- function(e1, e2) {
unit <- getOption("photobiology.radiation.unit", default = "energy")
if (unit == "energy") {
return(oper.e.generic_spct(e1, e2, `%/%`))
} else if (unit == "photon" || unit == "quantum") {
return(oper.q.generic_spct(e1, e2, `%/%`))
} else {
return(NA)
}
}
#' Arithmetic Operators
#'
#' Reminder operator for generic spectra.
#'
#' @param e1 an object of class "generic_spct"
#' @param e2 an object of class "generic_spct"
#' @name mod-.generic_spct
#' @export
#' @family math operators and functions
#'
'%%.generic_spct' <- function(e1, e2) {
unit <- getOption("photobiology.radiation.unit", default = "energy")
if (unit == "energy") {
return(oper.e.generic_spct(e1, e2, `%%`))
} else if (unit == "photon" || unit == "quantum") {
return(oper.q.generic_spct(e1, e2, `%%`))
} else {
return(NA)
}
}
# Sum ---------------------------------------------------------------
#' Arithmetic Operators
#'
#' Division operator for generic spectra.
#'
#' @param e1 an object of class "generic_spct"
#' @param e2 an object of class "generic_spct"
#' @name plus-.generic_spct
#' @export
#' @family math operators and functions
#'
'+.generic_spct' <- function(e1, e2 = NULL) {
unit <- getOption("photobiology.radiation.unit", default = "energy")
if (is.null(e2)) {
e2 <- 0.0
}
if (unit == "energy") {
return(oper.e.generic_spct(e1, e2, `+`))
} else if (unit == "photon" || unit == "quantum") {
return(oper.q.generic_spct(e1, e2, `+`))
} else {
return(NA)
}
}
# Minus -------------------------------------------------------------------
#' Arithmetic Operators
#'
#' Substraction operator for generic spectra.
#'
#' @param e1 an object of class "generic_spct"
#' @param e2 an object of class "generic_spct"
#' @name minus-.generic_spct
#' @export
#' @family math operators and functions
#'
'-.generic_spct' <- function(e1, e2 = NULL) {
unit <- getOption("photobiology.radiation.unit", default = "energy")
if (is.null(e2)) {
e2 <- e1
e1 <- 0.0
}
if (unit == "energy") {
return(oper.e.generic_spct(e1, e2, `-`))
} else if (unit == "photon" || unit == "quantum") {
return(oper.q.generic_spct(e1, e2, `-`))
} else {
return(NA)
}
}
# other operators ---------------------------------------------------------------------
#' Arithmetic Operators
#'
#' Power operator for spectra.
#'
#' @param e1 an object of class "generic_spct"
#' @param e2 a numeric vector. possibly of length one.
#' @export
#' @family math operators and functions
#'
'^.generic_spct' <- function(e1, e2) {
unit <- getOption("photobiology.radiation.unit", default = "energy")
if (unit == "energy") {
return(oper.e.generic_spct(e1, e2, `^`))
} else if (unit %in% c("photon", "quantum")) {
return(oper.q.generic_spct(e1, e2, `^`))
} else {
return(NA)
}
}
# math functions ----------------------------------------------------------
#' Math function dispatcher for spectra
#'
#' Function that dispatches the function supplied as argument using different variables depending
#' on the class of the spectrum argument.
#'
#' @param x an object of class "generic_spct"
#' @param .fun an R function with signature function(x, ...)
#' @param ... additional arguments passed to f
#'
#' @keywords internal
#'
f_dispatcher_spct <- function(x, .fun, ...) {
if (is.raw_spct(x)) {
z <- x
z[["counts"]] <- .fun(z[["counts"]], ...)
check_spct(z)
return(z)
} else if (is.cps_spct(x)) {
z <- x
z[["cps"]] <- .fun(z[["cps"]], ...)
check_spct(z)
return(z)
} else if (is.filter_spct(x)) {
filter.qty <- getOption("photobiology.filter.qty", default="transmittance")
if (filter.qty == "transmittance") {
z <- A2T(x, action = "replace", byref = FALSE)
z[["Tfr"]] <- .fun(z[["Tfr"]], ...)
} else if (filter.qty == "absorbance") {
z <- T2A(x, action = "replace", byref = FALSE)
z[["A"]] <- .fun(z[["A"]], ...)
} else {
stop("Unrecognized 'filter.qty': ", filter.qty)
}
check_spct(z)
return(z)
} else if (is.reflector_spct(x)) {
z <- x
z[["Rfr"]] <- .fun(z[["Rfr"]], ...)
check_spct(z)
return(z)
} else if (is.source_spct(x)) {
unit <- getOption("photobiology.radiation.unit", default = "energy")
if (unit == "energy") {
z <- q2e(x, action = "replace", byref = FALSE)
z[["s.e.irrad"]] <- .fun(z[["s.e.irrad"]], ...)
check_spct(z)
return(z)
} else if (unit == "photon" || unit == "quantum") {
z <- e2q(x, action = "replace", byref = FALSE)
z[["s.q.irrad"]] <- .fun(z[["s.q.irrad"]], ...)
check_spct(z)
return(z)
} else {
return(source_mspct())
}
} else if (is.response_spct(x)) {
unit <- getOption("photobiology.radiation.unit", default = "energy")
if (unit == "energy") {
z <- q2e(x, action = "replace", byref = FALSE)
z[["s.e.response"]] <- .fun(z[["s.e.response"]], ...)
return(z)
} else if (unit == "photon" || unit == "quantum") {
z <- e2q(x, action = "replace", byref = FALSE)
z[["s.q.response"]] <- .fun(z[["s.q.response"]], ...)
check_spct(z)
return(z)
}
} else if (is.chroma_spct(x)) {
z <- x
z[["x"]] <- .fun(z[["x"]], ...)
z[["y"]] <- .fun(z[["y"]], ...)
z[["z"]] <- .fun(z[["z"]], ...)
check_spct(z)
return(z)
} else {
warning("Function not implemented for ", class(x)[1], " objects.")
return(NA)
}
}
#' Logarithms and Exponentials
#'
#' Logarithms and Exponentials for Spectra. The functions are applied to the
#' spectral data, not the wavelengths. The quantity in the spectrum to which the
#' function is appled depends on the class of \code{x} and the current value of
#' output options
#'
#' @name log
#'
#' @param x an object of class "generic_spct"
#' @param base a positive number: the base with respect to which logarithms are
#' computed. Defaults to e=exp(1).
#' @export
#' @family math operators and functions
#'
log.generic_spct <- function(x, base = exp(1)) {
f_dispatcher_spct(x, log, base)
}
#' @rdname log
#'
#' @export
#'
log2.generic_spct <- function(x) {
f_dispatcher_spct(x, log, base = 2)
}
#' @rdname log
#'
#' @export
#'
log10.generic_spct <- function(x) {
f_dispatcher_spct(x, log, base = 10)
}
#' @rdname log
#'
#' @export
#'
exp.generic_spct <- function(x) {
f_dispatcher_spct(x, exp)
}
#' Miscellaneous Mathematical Functions
#'
#' \code{abs(x)} computes the absolute value of \code{x}, \code{sqrt(x)}
#' computes the (principal) square root of \code{x}. The functions are applied
#' to the spectral data, not the wavelengths. The quantity in the spectrum to
#' which the function is appled depends on the class of \code{x} and the current
#' value of output options.
#'
#' @name MathFun
#'
#' @param x an object of class "generic_spct"
#' @export
#' @family math operators and functions
#'
sqrt.generic_spct <- function(x) {
f_dispatcher_spct(x, sqrt)
}
#' @rdname MathFun
#'
#' @export
#'
abs.generic_spct <- function(x) {
f_dispatcher_spct(x, abs)
}
#' Sign
#'
#' \code{sign} returns a vector with the signs of the corresponding elements of
#' x (the sign of a real number is 1, 0, or -1 if the number is positive, zero,
#' or negative, respectively).
#'
#' @name sign
#'
#' @param x an object of class "generic_spct"
#' @export
#' @family math operators and functions
#'
sign.generic_spct <- function(x) {
f_dispatcher_spct(x, sign)
}
#' @title
#' Rounding of Numbers
#'
#' @description
#' \code{ceiling} takes a single numeric argument x and returns a numeric vector
#' containing the smallest integers not less than the corresponding elements of
#' x. \\
#' \code{floor} takes a single numeric argument x and returns a numeric vector
#' containing the largest integers not greater than the corresponding elements
#' of x. \\
#' \code{trunc} takes a single numeric argument x and returns a numeric vector
#' containing the integers formed by truncating the values in x toward 0. \\
#' \code{round} rounds the values in its first argument to the specified number of
#' decimal places (default 0). \\
#' \code{signif} rounds the values in its first argument to the specified number of
#' significant digits. \\
#' The functions are applied to the spectral data, not the wavelengths. The
#' quantity in the spectrum to which the function is appled depends on the class
#' of \code{x} and the current value of output options.
#'
#' @param x an object of class "generic_spct" or a derived class.
#' @param digits integer indicating the number of decimal places (round) or
#' significant digits (signif) to be used. Negative values are allowed (see
#' ‘Details’).
#'
#' @name round
#' @export
#' @family math operators and functions
#'
round.generic_spct <- function(x, digits = 0) {
f_dispatcher_spct(x, sign, digits = digits)
}
#' @rdname round
#'
#' @export
#'
signif.generic_spct <- function(x, digits = 6) {
f_dispatcher_spct(x, signif, digits = digits)
}
#' @rdname round
#'
#' @export
#'
ceiling.generic_spct <- function(x) {
f_dispatcher_spct(x, ceiling)
}
#' @rdname round
#'
#' @export
#'
floor.generic_spct <- function(x) {
f_dispatcher_spct(x, floor)
}
#' @rdname round
#'
#' @param ... arguments to be passed to methods.
#'
#' @export
#'
trunc.generic_spct <- function(x, ...) {
f_dispatcher_spct(x, trunc, ...)
}
#' Trigonometric Functions
#'
#' Trigonometric functions for object of \code{generic_spct} and derived
#' classes. \\
#' The functions are applied to the spectral data, not the wavelengths. The
#' quantity in the spectrum to which the function is appled depends on the class
#' of \code{x} and the current value of output options.
#'
#' @name Trig
#'
#' @param x an object of class "generic_spct" or a derived class.
#'
#' @export
#'
cos.generic_spct <- function(x) {
f_dispatcher_spct(x, cos)
}
#' @rdname Trig
#'
#' @export
#'
sin.generic_spct <- function(x) {
f_dispatcher_spct(x, sin)
}
#' @rdname Trig
#'
#' @export
#'
tan.generic_spct <- function(x) {
f_dispatcher_spct(x, tan)
}
#' @rdname Trig
#'
#' @export
#'
acos.generic_spct <- function(x) {
f_dispatcher_spct(x, acos)
}
#' @rdname Trig
#'
#' @export
#'
asin.generic_spct <- function(x) {
f_dispatcher_spct(x, asin)
}
#' @rdname Trig
#'
#' @export
#'
atan.generic_spct <- function(x) {
f_dispatcher_spct(x, atan)
}
#' @name Trig
#'
#' @export
#'
cospi.generic_spct <- function(x) {
f_dispatcher_spct(x, cospi)
}
#' @rdname Trig
#'
#' @export
#'
sinpi.generic_spct <- function(x) {
f_dispatcher_spct(x, sinpi)
}
#' @rdname Trig
#'
#' @export
#'
tanpi.generic_spct <- function(x) {
f_dispatcher_spct(x, tanpi)
}
# transmittance and absorbance --------------------------------------------
# A2T ---------------------------------------------------------------------
#' Convert absorbance into transmittance
#'
#' Function that coverts absorbance into transmittance (fraction).
#'
#' @param x an R object
#' @param action a character string
#' @param byref logical indicating if new object will be created by reference or by copy of x
#' @param ... not used in current version
#'
#' @export A2T
#' @family quantity conversion functions
#'
A2T <- function(x, action, byref, ...) UseMethod("A2T")
#' @describeIn A2T Default method for generic function
#'
#' @export
#'
A2T.default <- function(x, action=NULL, byref=FALSE, ...) {
return(10^-x)
}
#' @describeIn A2T Method for filter spectra
#'
#' @export
#'
A2T.filter_spct <- function(x, action="add", byref=FALSE, ...) {
if (byref) {
name <- substitute(x)
}
if (exists("Tfr", x, inherits=FALSE)) {
NULL
} else if (exists("A", x, inherits=FALSE)) {
x[["Tfr"]] <- 10^-x[["A"]]
} else {
x[["Tfr"]] <- NA
}
if (action=="replace" && exists("A", x, inherits=FALSE)) {
x[["A"]] <- NULL
}
if (byref && is.name(name)) { # this is a temporary safe net
name <- as.character(name)
assign(name, x, parent.frame(), inherits = TRUE)
}
return(x)
}
#' @describeIn A2T Method for collections of filter spectra
#'
#' @export
#'
A2T.filter_mspct <- function(x, action = "add", byref = FALSE, ...) {
msmsply(x, A2T, action = action, byref = byref, ...)
}
# T2A ---------------------------------------------------------------------
#' Convert transmittance into absorbance.
#'
#' Function that coverts transmittance into absorbance (fraction).
#'
#' @param x an R object
#' @param action character Allowed values "replace" and "add"
#' @param byref logical indicating if new object will be created by reference or by copy of x
#' @param ... not used in current version
#'
#' @export T2A
#' @family quantity conversion functions
#'
T2A <- function(x, action, byref, ...) UseMethod("T2A")
#' @describeIn T2A Default method for generic function
#'
#' @export
#'
T2A.default <- function(x, action=NULL, byref=FALSE, ...) {
if (any(x < 0)) {
Tfr.zero <- getOption("photobiology.Tfr.zero", default = 1e-10)
warning("Replacing zeros by", Tfr.zero)
x <- ifelse(x <= 0, Tfr.zero, x)
}
return(-log10(x))
}
#' @describeIn T2A Method for filter spectra
#'
#' @export
#'
T2A.filter_spct <- function(x, action="add", byref=FALSE, ...) {
if (byref) {
name <- substitute(x)
}
if (exists("A", x, inherits=FALSE)) {
NULL
} else if (exists("Tfr", x, inherits=FALSE)) {
x[["A"]] <- -log10(x[["Tfr"]])
} else {
x[["A"]] <- NA
}
if (action=="replace" && exists("Tfr", x, inherits=FALSE)) {
x[["Tfr"]] <- NULL
}
if (byref && is.name(name)) { # this is a temporary safe net
name <- as.character(name)
assign(name, x, parent.frame(), inherits = TRUE)
}
if (any(is.infinite(x$A))) {
warning("'Inf' absorbance values generated as some Tfr values were equal to zero!")
}
return(x)
}
#' @describeIn T2A Method for collections of filter spectra
#'
#' @export
#'
T2A.filter_mspct <- function(x, action = "add", byref = FALSE, ...) {
msmsply(x, T2A, action = action, byref = byref, ...)
}
# energy - photon and photon - energy conversions -------------------------
# energy to photon ---------------------------------------------------------------------
#' Convert energy-based quantities into photon-based quantities.
#'
#' Function that coverts spectral energy irradiance into spectral photon irradiance (molar).
#'
#' @param x an R object
#' @param action a character string
#' @param byref logical indicating if new object will be created by reference or by copy of x
#' @param ... not used in current version
#'
#' @export e2q
#' @family quantity conversion functions
#'
e2q <- function(x, action, byref, ...) UseMethod("e2q")
#' @describeIn e2q Default method
#'
#' @export
#'
e2q.default <- function(x, action="add", byref=FALSE, ...) {
return(NA)
}
#' @describeIn e2q Method for spectral irradiance
#'
#' @export
#'
e2q.source_spct <- function(x, action="add", byref=FALSE, ...) {
if (byref) {
name <- substitute(x)
}
if (exists("s.q.irrad", x, inherits=FALSE)) {
NULL
} else if (exists("s.e.irrad", x, inherits=FALSE)) {
x[["s.q.irrad"]] <- x[["s.e.irrad"]] * e2qmol_multipliers(x[["w.length"]])
} else {
x[["s.q.irrad"]] <- NA
}
if (action=="replace" && exists("s.e.irrad", x, inherits=FALSE)) {
x[["s.e.irrad"]] <- NULL
}
if (byref && is.name(name)) { # this is a temporary safe net
name <- as.character(name)
assign(name, x, parent.frame(), inherits = TRUE)
}
return(x)
}
#' @describeIn e2q Method for spectral responsiveness
#'
#' @export
#'
e2q.response_spct <- function(x, action="add", byref=FALSE, ...) {
if (byref) {
name <- substitute(x)
}
if (exists("s.q.response", x, inherits=FALSE)) {
NULL
} else if (exists("s.e.response", x, inherits=FALSE)) {
x[["s.q.response"]] <- x[["s.e.response"]] / e2qmol_multipliers(x[["w.length"]])
} else {
x[["s.q.response"]] <- NA
}
if (action=="replace" && exists("s.e.response", x, inherits=FALSE)) {
x[["s.e.response"]] <- NULL
}
if (byref && is.name(name)) { # this is a temporary safe net
name <- as.character(name)
assign(name, x, parent.frame(), inherits = TRUE)
}
return(x)
}
#' @describeIn e2q Method for collections of (light) source spectra
#'
#' @export
#'
e2q.source_mspct <- function(x, action = "add", byref = FALSE, ...) {
msmsply(x, e2q, action = action, byref = byref, ...)
}
#' @describeIn e2q Method for for collections of response spectra
#'
#' @export
#'
e2q.response_mspct <- function(x, action = "add", byref = FALSE, ...) {
msmsply(x, e2q, action = action, byref = byref, ...)
}
# photon to energy ---------------------------------------------------------------------
#' Convert photon-based quantities into energy-based quantities
#'
#' Function that coverts spectral photon irradiance (molar) into spectral energy irradiance.
#'
#' @param x an R object
#' @param action a character string
#' @param byref logical indicating if new object will be created by reference or by copy of x
#' @param ... not used in current version
#'
#' @export q2e
#' @family quantity conversion functions
#'
q2e <- function(x, action, byref, ...) UseMethod("q2e")
#' @describeIn q2e Default method
#'
#' @export
#'
q2e.default <- function(x, action="add", byref=FALSE, ...) {
return(NA)
}
#' @describeIn q2e Method for spectral irradiance
#'
#' @export
#'
q2e.source_spct <- function(x, action="add", byref=FALSE, ...) {
if (byref) {
name <- substitute(x)
}
if (exists("s.e.irrad", x, inherits=FALSE)) {
NULL
} else if (exists("s.q.irrad", x, inherits=FALSE)) {
x[["s.e.irrad"]] <- x[["s.q.irrad"]] / e2qmol_multipliers(x[["w.length"]])
} else {
x[["s.e.irrad"]] <- NA
}
if (action=="replace" && exists("s.q.irrad", x, inherits=FALSE)) {
x[["s.q.irrad"]] <- NULL
}
if (byref && is.name(name)) { # this is a temporary safe net
name <- as.character(name)
assign(name, x, parent.frame(), inherits = TRUE)
}
return(x)
}
#' @describeIn q2e Method for spectral responsiveness
#'
#' @export
#'
q2e.response_spct <- function(x, action="add", byref=FALSE, ...) {
if (byref) {
name <- substitute(x)
}
if (exists("s.e.response", x, inherits=FALSE)) {
NULL
} else if (exists("s.q.response", x, inherits=FALSE)) {
x[["s.e.response"]] <- x[["s.q.response"]] * e2qmol_multipliers(x[["w.length"]])
} else {
x[["s.e.response"]] <- NA
}
if (action=="replace" && exists("s.q.response", x, inherits=FALSE)) {
x[["s.q.response"]] <- NULL
}
if (byref && is.name(name)) { # this is a temporary safe net
name <- as.character(name)
assign(name, x, parent.frame(), inherits = TRUE)
}
return(x)
}
#' @describeIn q2e Method for collections of (light) source spectra
#'
#' @export
#'
q2e.source_mspct <- function(x, action = "add", byref = FALSE, ...) {
msmsply(x, q2e, action = action, byref = byref, ...)
}
#' @describeIn q2e Method for collections of response spectra
#'
#' @export
#'
q2e.response_mspct <- function(x, action = "add", byref = FALSE, ...) {
msmsply(x, q2e, action = action, byref = byref, ...)
}
|
a84c51322ca070fcf8aaa488aa6e766bfb387bb3 | dd8132404e8c7b028cb13cba904c50aace01c6a7 | /swt/src/spc/se.u/source/overlay.r | 71d1df0df8afe7ae96ce6a440c7e38321144f118 | [] | no_license | arnoldrobbins/gt-swt | d0784d058fab9b8b587f850aeccede0305d5b2f8 | 2922b9d14b396ccd8947d0a9a535a368bec1d6ae | refs/heads/master | 2020-07-29T09:41:19.362530 | 2019-10-04T11:36:01 | 2019-10-04T11:36:01 | 209,741,739 | 15 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,310 | r | overlay.r | # overlay --- let user edit lines directly
subroutine overlay (status)
integer status
include SE_COMMON
character termination, savtxt (MAXLINE), empty (2)
integer lng, kname, vcol, lcurln, scurln
integer inject, nextln, equal
pointer indx
pointer getind, gettxt
data empty / NEWLINE, EOS /
status = OK
if (Line1 == 0) {
Curln = 0
status = inject (empty)
if (status == ERR)
return
First_affected = 1
Line1 = 1
Line2 += 1
}
for (lcurln = Line1; lcurln <= Line2; lcurln += 1) {
Curln = lcurln
vcol = Overlay_col
repeat {
call adjust_window (Curln, Curln)
call updscreen
Cmdrow = Curln - Topln + Toprow
indx = gettxt (Curln)
lng = Lineleng (indx)
if (Txt (lng - 1) == NEWLINE) # clobber newline, if there
lng -= 1
if (vcol <= 0)
vcol = lng
while (lng < vcol) {
Txt (lng) = ' 'c
lng += 1
}
Txt (lng) = NEWLINE
Txt (lng + 1) = EOS
call move$ (Txt, savtxt, lng + 1) # make a copy of the line
call getcmd (Txt, Firstcol, vcol, termination)
if (First_affected > Curln)
First_affected = Curln
if (termination == FUNNY)
break 2
if (equal (Txt, savtxt) == NO) { # was the line changed?
kname = Markname (indx)
call delete (Curln, Curln, status)
scurln = Curln
if (status == OK)
status = inject (Txt)
if (status == ERR)
break 2
indx = getind (nextln (scurln))
Markname (indx) = kname
}
select (termination)
when (CURSOR_UP)
if (Curln > 1)
Curln -= 1
else
Curln = Lastln
when (CURSOR_DOWN)
if (Curln < Lastln)
Curln += 1
else
Curln = min (1, Lastln)
when (CURSOR_SAME)
vcol = 1
} until (termination ~= CURSOR_UP
&& termination ~= CURSOR_DOWN
&& termination ~= CURSOR_SAME)
}
Cmdrow = Nrows - 1
return
end
|
9bbed38cad1a1e839c77e0d113c270f3c5488484 | a1738539620913a8cf50d51517fcd9df5c79fbd6 | /man/NOAA_plot.Rd | caee9b326684614bb451671fb831da0b072f50a2 | [] | no_license | Lijiuqi/DLMtool | aa5ec72fd83436ebd95ebb6e80f61053603c901c | bfafc37c100680b5757f03ae22c5d4062829258b | refs/heads/master | 2021-05-05T22:07:48.346019 | 2018-01-02T22:22:45 | 2018-01-02T22:22:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 785 | rd | NOAA_plot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MSEObj.r
\name{NOAA_plot}
\alias{NOAA_plot}
\title{National Oceanographic and Atmospheric Administration default plot 1}
\usage{
NOAA_plot(MSEobj,nam=NA,type=NA,panel=T)
}
\arguments{
\item{MSEobj}{An object of class MSE}
\item{nam}{Title of plot}
\item{type}{Plots full range of data if NA. Plots a subset that meet
thresholds if not NA.}
\item{panel}{Should a two panel plot be made or should plots be made in
sequence.}
}
\value{
A table of performance metrics.
}
\description{
A preliminary plot for returning trade-offs plots and performance table for
total yield, variability in yield, probability of overfishing and likelihood
of biomass dropping below 50 per cent BMSY
}
\author{
T. Carruthers
}
|
484642dc4ec1de878c869478192c7e28669bb71f | 4d0db2930fb990437606ee9f2e2e7ab69ead14bd | /R/coalgen.default.R | 226b078c8ba38d0a2e3bb9b15f4fd3b12f4a89ab | [] | no_license | mmlopezu/coalsieve | b8b73af142c51b7f2e986d5211c74c5b7539f280 | 9c024dbef57364b7c170dc93243ae9cdc6ef25ad | refs/heads/master | 2021-01-18T03:31:13.975276 | 2012-05-17T16:20:06 | 2012-05-17T16:20:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 108 | r | coalgen.default.R | coalgen.default <-
function(sample,...)
{
sample<-as.matrix(sample)
out<-1
class(out)<-"coalgen"
out
}
|
216aac66a6914e367b63c004f40fb9192be9a3e9 | 43eae47269ee5a073218dcda8eed82e3e18c6312 | /man/tableS3table1.Rd | 13b1c6cd996f22e6967c1c941fa67110670d6fc6 | [] | no_license | wlandau/fbseqStudies | 4e494d25165130f95a983ee64d751ba3fb24bd3d | 0169ac5a00d457a261401f926b37e08587eace64 | refs/heads/main | 2021-01-19T04:39:24.890203 | 2017-10-21T02:36:07 | 2017-10-21T02:36:07 | 45,758,088 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 416 | rd | tableS3table1.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{tableS3table1}
\alias{tableS3table1}
\title{tableS3table1}
\description{
a data frame object. Contains the "Table 1" section of Supplementary Table S3
of the Paschold et al. (2012) study (http://www.ncbi.nlm.nih.gov/pubmed/23086286).
}
\author{
Paschold et al. (http://www.ncbi.nlm.nih.gov/pubmed/23086286)
}
|
b703261e0fe68e57255675fefab4b0954c35f878 | 2f0947cf2542bbeb5439706072e74ea65c9e2e35 | /temp_func.R | 91e756a1ad7307907b286c7e5882f24c881149bc | [] | no_license | willbmisled/lakes_database | 90ae61bb1beecfbdc819d90bcf3661701b719188 | cf0759248edddad80af59dd28cb495b1140a64b0 | refs/heads/master | 2020-04-05T11:47:25.487638 | 2018-07-26T14:59:59 | 2018-07-26T14:59:59 | 81,237,106 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,818 | r | temp_func.R |
#function to reformat the raw NLA data based on a modified data structure file
formatData<-function(x='nla2012_waterchem_wide.csv',y='waterchem.csv'){
#read the raw data; trycatch will look for the data in two directories
a<-tryCatch(read.csv(paste('data/raw_data/',x,sep=''),sep=',',na.strings = c(""," ","NA","N/A")),
warning = function(e) read.csv(paste('../data/raw_data/',x,sep=''),sep=',',na.strings = c(""," ","NA","N/A")))
#get the modified data structure
struc<-tryCatch(read.csv(paste('data/workfiles/',y,sep=''),sep=',',na.strings = c(""," ","NA","N/A")),
warning = function(e) read.csv(paste('../data/workfiles/',y,sep=''),sep=',',na.strings = c(""," ","NA","N/A")))
#choose columns to keep, gather, or delete
keeps<-filter(struc,parameter=='column_name')%>%select(DATA_COLUMN)%>% unlist(use.names = FALSE)
gathers<-filter(struc,parameter!='column_name',column_name!='delete')%>%select(DATA_COLUMN)%>%distinct()%>% unlist(use.names = FALSE)
deletes<-filter(struc,parameter=='delete')%>%select(DATA_COLUMN)%>% unlist(use.names = FALSE)
#gather
wc<-gather(a,DATA_COLUMN,value,one_of(gathers))
#deconstruct the COLUMN_NAME (parameter)
#df "struc" has the new row and column names
#delete unwanted columns
a<-left_join(wc,struc)%>%select(-one_of(deletes))
#create new df based on descontructed column_name
v<-distinct(a,column_name)%>% unlist(use.names = FALSE)
ch<-select(wc,one_of(keeps)) %>% distinct()
for(i in c(1:length(v))){
w<-filter(a,column_name==v[i])%>%
select(UID,parameter,value)
names(w)[3]<-v[i]
ch<-left_join(ch,w)
}
#output
return(list(ch,struc))
}
q<-formatData('nla2012_waterchem_wide.csv','waterchem.csv')
waterchem<-q[[1]]
struc_waterchem<-q[[2]]
q<-formatData('nla2012_chla_wide.csv','chla.csv')
chla_new<-q[[1]]
struc_chla<-q[[2]]
|
d43a30172a22c48ad3786648e9023d456e3a7d1c | 57854e2a3731cb1216b2df25a0804a91f68cacf3 | /tests/testthat/test-api.R | 76fb1d62dae58d505420a98c54efa46a1994568d | [] | no_license | persephonet/rcrunch | 9f826d6217de343ba47cdfcfecbd76ee4b1ad696 | 1de10f8161767da1cf510eb8c866c2006fe36339 | refs/heads/master | 2020-04-05T08:17:00.968846 | 2017-03-21T23:25:06 | 2017-03-21T23:25:06 | 50,125,918 | 1 | 0 | null | 2017-02-10T23:23:34 | 2016-01-21T17:56:57 | R | UTF-8 | R | false | false | 2,116 | r | test-api.R | context("API calling")
test_that("Deprecated endpoints tell user to upgrade", {
fake410 <- fakeResponse("http://crunch.io/410", status_code=410)
expect_error(handleAPIresponse(fake410),
paste("The API resource at http://crunch.io/410 has moved permanently.",
"Please upgrade crunch to the latest version."))
})
with_mock_HTTP({
test_that("crunch.debug does not print if disabled", {
expect_POST(
expect_output(crPOST("api/", body='{"value":1}'),
NA),
"api/",
'{"value":1}')
})
test_that("crunch.debug logging if enabled", {
with(temp.option(crunch.debug=TRUE), {
expect_POST(
expect_output(crPOST("api/", body='{"value":1}'),
'\n {"value":1} \n',
fixed=TRUE),
"api/",
'{"value":1}')
## Use testthat:: so that it doesn't print ds. Check for log printing
testthat::expect_output(ds <- loadDataset("test ds"),
NA)
})
})
})
if (run.integration.tests) {
test_that("Request headers", {
skip_on_jenkins("Don't fail the build if httpbin is down")
r <- try(crGET("http://httpbin.org/gzip"))
expect_true(r$gzipped)
expect_true(grepl("gzip", r$headers[["Accept-Encoding"]]))
expect_true(grepl("rcrunch", r$headers[["User-Agent"]]))
})
test_that("crunchUserAgent", {
expect_true(grepl("rcrunch", crunchUserAgent()))
expect_true(grepl("libcurl", crunchUserAgent()))
expect_error(crunchUserAgent("anotherpackage/3.1.4"),
NA)
expect_true(grepl("anotherpackage", crunchUserAgent("anotherpackage")))
})
with_test_authentication({
test_that("API root can be fetched", {
expect_true(is.shojiObject(getAPIRoot()))
})
})
test_that("API calls throw an error if user is not authenticated", {
logout()
expect_error(getAPIRoot(),
"You are not authenticated. Please `login\\(\\)` and try again.")
})
}
|
b7afc45e11c97eb93f1d3326e79f985f48c1f64c | fc85028bf61541953d619dbd5454f9931148d550 | /man/Nadults.Rd | 3485d8ad8ded74bf644829218fdcd1794f6b3d4d | [] | no_license | carlopacioni/vortexR | f26e14f957291caac6ce60ff3631adf79f5ee07b | e5a0efd8a018139f451e4c33e6e9b4e3e2eb1fad | refs/heads/master | 2022-05-19T02:50:25.985186 | 2022-05-03T01:24:34 | 2022-05-03T01:24:34 | 31,725,120 | 7 | 2 | null | 2020-07-03T05:04:38 | 2015-03-05T17:01:01 | R | UTF-8 | R | false | true | 2,117 | rd | Nadults.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_analysis.R
\name{Nadults}
\alias{Nadults}
\title{Calculate the harmonic mean of the total number of adults}
\usage{
Nadults(
data,
scenarios = "all",
npops_noMeta = 1,
appendMeta = FALSE,
gen = 1,
yr0 = 1,
yrt = 2,
save2disk = TRUE,
fname = "Nadults",
dir_out = "DataAnalysis"
)
}
\arguments{
\item{data}{The second element (census_means) of the output from \code{collate_yr}}
\item{scenarios}{A vector of scenario names for which Ne needs to be
calculated, default: 'all'}
\item{npops_noMeta}{The total number of populations excluding the metapopulation,
default: 1}
\item{appendMeta}{Whether to calculate data for the metapopulation,
default: FALSE}
\item{gen}{The generation time express in years}
\item{yr0}{The time window to be considered (first and last year
respectively)}
\item{yrt}{The time window to be considered (first and last year
respectively)}
\item{save2disk}{Whether to save the output to disk, default: TRUE}
\item{fname}{The name of the files where to save the output, defult: 'Nadults'}
\item{dir_out}{The local path to store the output. Default: DataAnalysis}
}
\value{
A \code{data.table} (\code{data.frame} if \code{\link[data.table]{data.table}} is not
loaded) with Nb values
}
\description{
\code{Nadults} calculates, for several scenarios, the harmonic mean of the total
number of adults between \code{yr0} and \code{yrt}. These can be use to
calculate Ne/N ratios where relevant.
}
\details{
\code{yrt} is adjusted by subtracting the number of years of the generation
time (rounded to the nearest integer). In this way the user can provide the
same \code{yr0,yrt} and \code{gen} to \code{Nadults} and \code{Ne} and these
values are adjusted internally to correctly calculate the Ne/N ratios where
relevant. If this behaviour is not desired, use \code{gen=0}.
}
\examples{
# Using Pacioni et al. example data. See ?pac.yr for more details.
data(pac.yr)
NadultAll <- Nadults(data=pac.yr[[2]], scenarios='all', gen=2.54, yr0=50,
yrt=120, save2disk=FALSE)
}
|
c671e0e2e5c534712de2ebc7d981eebbfbe286a7 | 753e07657928cf5c073943285425b76072eb9943 | /run_analysis.R | ace92560d95f5bf381c58eddc27a19fecd0441eb | [] | no_license | Nikhil1005/Getting-and-CLeaning-Data--Course-Project | 017b92a621d5b3d78cf366b0e4df0b8456bd16e9 | fcbcee5f2867a2fcfd3b73bfb0a605f69cc97fdc | refs/heads/master | 2020-05-23T09:45:17.659993 | 2017-03-15T10:49:57 | 2017-03-15T10:49:57 | 84,760,825 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,174 | r | run_analysis.R | #download file
if(!file.exists("./data"))
{dir.create("./data")}
fileurl<- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileurl,destfile = "./data/Datasets.zip")
#unzip files
unzip(zipfile = "./data/Datasets.zip",exdir = "./data")
#read files
x_train<-read.table('./data/UCI HAR Dataset/train/X_train.txt')
Y_train<-read.table('./data/UCI HAR Dataset/train/Y_train.txt')
subject_train<- read.table('./data/UCI HAR Dataset/train/subject_train.txt')
x_test<-read.table('./data/UCI HAR Dataset/test/X_test.txt')
Y_test<-read.table('./data/UCI HAR Dataset/test/Y_test.txt')
subject_test<- read.table('./data/UCI HAR Dataset/test/subject_test.txt')
features<- read.table('./data/UCI HAR Dataset/features.txt')
activity_labels<- read.table('./data/UCI HAR Dataset/activity_labels.txt')
# give column names
colnames(x_train) <- features[,2]
colnames(Y_train) <- "activityID"
colnames(subject_train) <-"subjectID"
colnames(x_test) <- features[,2]
colnames(Y_test) <- "activityID"
colnames(subject_test) <-"subjectID"
colnames(activity_labels) <-c("activityID", "activity_type")
#merge in one test and train dataset
merge_train <- cbind(Y_train,subject_train,x_train)
merge_test <-cbind(Y_test, subject_test,x_test)
complete_data<-rbind(merge_test,merge_train)
colnames<-colnames(complete_data)
#extract only mean and standard deviation
col_mean_std<- (grepl("activityID", colnames)| grepl("subjectID",colnames) |
grepl("mean..", colnames) | grepl("std..", colnames))
mean_std <- complete_data[,col_mean_std==TRUE]
#bringing in name of activity
mean_std_w_act_name<- merge(mean_std,activity_labels, by = 'activityID')
#apprioprately naming the data set
colnames<-colnames(mean_std_w_act_name)
for(i in 1:length(colnames))
{
colnames[i] = gsub("\\()","",colnames[i])
colnames[i] = gsub("-std$","StdDev",colnames[i])
colnames[i] = gsub("-mean","Mean",colnames[i])
colnames[i] = gsub("^(t)","time",colnames[i])
colnames[i] = gsub("^(f)","freq",colnames[i])
colnames[i] = gsub("([Gg]ravity)","Gravity",colnames[i])
colnames[i] = gsub("([Bb]ody[Bb]ody|[Bb]ody)","Body",colnames[i])
colnames[i] = gsub("[Gg]yro","Gyro",colnames[i])
colnames[i] = gsub("AccMag","AccMagnitude",colnames[i])
colnames[i] = gsub("([Bb]odyaccjerkmag)","BodyAccJerkMagnitude",colnames[i])
colnames[i] = gsub("JerkMag","JerkMagnitude",colnames[i])
colnames[i] = gsub("GyroMag","GyroMagnitude",colnames[i])
};
colnames(mean_std_w_act_name) <- colnames
mean_std_w_act_name<- mean_std_w_act_name[,names(mean_std_w_act_name)!= "activity_type"]
new_data <- aggregate(mean_std_w_act_name,
by = list(mean_std_w_act_name$subjectID,mean_std_w_act_name$activityID),
mean)
new_data<-merge(new_data, activity_labels , by = "activityID")
#Storing output to new data set and exporting it
new_data<- new_data[,names(new_data)!= "Group.1" ]
new_data<- new_data[,names(new_data)!= "Group.2" ]
write.table(new_data,"final_data.txt", row.names = FALSE, sep = "\t") |
bf9ffa51b4d31589d366c2925192c9eafd8ca50a | af91cf7b1b311ab545a9595cd34160463992718b | /PubFigures.R | d7c322707ec83dfa6778a21e6d7e5280b03ee9f7 | [] | no_license | jrmihalj/PostdocAnalyses | 3c3c59e8a480a44122d1cf623b3567fe8c52a807 | eb0c898c1abbd7cb368171cfef8b4954033794e4 | refs/heads/master | 2021-01-01T05:53:20.898061 | 2015-09-02T18:12:34 | 2015-09-02T18:12:34 | 40,129,730 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 832 | r | PubFigures.R | # Publication quality figures
#####################
# Figure 1, version 1
#####################
library(gridExtra)
quartz(height=8, width=8, dpi=300)
grid.arrange(klip.plot1b, tmb.plot1b, love.plot1b, nm.plot1b, nrow=2)
quartz(height=8, width=8, dpi=300)
plot.new()
legend(locator(1), legend=c("0 Day", "3 Day"), pch=c(1,17), pt.cex=c(1.15,1.15),
lty=c(1,1), col=c("gray","black"), lwd=c(2,2), bty="n")
graphics.off()
#####################
# Figure 1, version 2
#####################
library(gridExtra)
quartz(height=8, width=8, dpi=300)
grid.arrange(klip.plot2b, tmb.plot2b, love.plot2b, nm.plot2b, nrow=2)
quartz(height=8, width=8, dpi=300)
plot.new()
legend(locator(1), legend=c("0 Day", "3 Day"), pch=c(19,17), pt.cex=c(1.15,1.15),
lty=c(1,1), col=c("gray","black"), lwd=c(2,2), bty="n")
graphics.off()
|
9fc63f515bc108aa7bd428c512358d14002c055a | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/chillR/R/VIP.R | daacb191679a7a159a123c13eef34db872ffe978 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,393 | r | VIP.R | #' Calculate VIP scores for PLS regression
#'
#' This function calculates the Variable Importance in the Projection statistic
#' for the Partial Least Squares regression. It is used in the PLS function.
#' Executing it in isolation will probably not be useful to most users.
#'
#' This is required to produce the VIP scores for the PLS procedure.
#'
#' @param object an mvr object, as produced by the pls procedure or a range of
#' other functions
#' @return data frame with as many columns as independent variables are input
#' into the PLS analysis. The number of columns corresponds to the number of
#' latent components selected for the analysis. Values in the data frame are
#' the VIP values corresponding to each variable for the respective component.
#' @author Eike Luedeling, but the function was mainly copied from
#' http://mevik.net/work/software/pls.html; the reference given there is listed
#' below
#' @references the function is mostly identical to the one provided on
#' http://mevik.net/work/software/pls.html.
#'
#' Here is the reference given there:
#'
#' Chong, Il-Gyo & Jun, Chi-Hyuck, 2005, Performance of some variable selection
#' methods when multicollinearity is present, Chemometrics and Intelligent
#' Laboratory Systems 78, 103-112
#'
#' This reference refers to the chillR package:
#'
#' Luedeling E, Kunz A and Blanke M, 2013. Identification of chilling and heat
#' requirements of cherry trees - a statistical approach. International Journal
#' of Biometeorology 57,679-689.
#' @keywords utility
#' @examples
#'
#' PLS_results<-PLS_pheno(
#' weather_data=KA_weather,
#' split_month=6, #last month in same year
#' bio_data=KA_bloom,return.all=TRUE)
#'
#' #return.all makes the function return the whole PLS object - needed for next line to work
#'
#' VIP(PLS_results$PLS_output)
#'
#'
#' @export VIP
VIP <-
function(object) {
if (object$method != "oscorespls")
stop("Only implemented for orthogonal scores algorithm. Refit with 'method = \"oscorespls\"'")
if (nrow(object$Yloadings) > 1)
stop("Only implemented for single-response models")
SS <- c(object$Yloadings)^2 * colSums(object$scores^2)
Wnorm2 <- colSums(object$loading.weights^2)
SSW <- sweep(object$loading.weights^2, 2, SS / Wnorm2, "*")
sqrt(nrow(SSW) * apply(SSW, 1, cumsum) / cumsum(SS))
}
|
cc6649419ccbf2d6ab8f929b17ee42e8457505ae | afe99ef55dbea4b9207c313beea27e09ea3025e9 | /known_dend_49H5ID.R | 7204ac0485907bfbacee391dd20a92a58e5aa5f9 | [] | no_license | bhlabwii/wolf_howlID | 915f51c6d7635be4d779af4554e644e96d9c5dab | 20c6f07b0d63ea534024d17f143f1e0651d132eb | refs/heads/main | 2023-03-13T10:05:33.754573 | 2021-03-03T08:22:28 | 2021-03-03T08:22:28 | 344,052,683 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,159 | r | known_dend_49H5ID.R | setwd ("D:/Wolf_Project/Howl_recognise_fresh20200821_1133/Analysis/R/Dendogram_known")
#Required Packages
#install.packages("dendextend")
#install.packages("circlize")
# load package cluster
library(cluster, quietly = TRUE)
library(readxl)
library(dendextend)
library(colorspace) # get nice colors
library(circlize)
#Reading excel file
wolf49H <- read_excel("49H5ID_LDScroe.xlsx")
str(wolf49H)
wolf49Hx <- wolf49H [,10:13]
wolf49Hx
wolf49H.rn <- data.frame (wolf49H$howl_3ID.Filename, wolf49Hx$x.LD1, wolf49Hx$x.LD2, row.names = TRUE )
wolf49H.rn
str(wolf49H.rn)
agn2 <- agnes(wolf49H.rn, metric = "manhattan", stand = TRUE)
dend <- as.dendrogram(agn2)
dend <- as.dendrogram(agn2)
# order it the closest we can to the order of the observations:
dend <- rotate(dend, 1:0)
# Color the branches based on the hight:
dend <- color_branches(dend, h= 2.2) #, groupLabels=different Howling Individual)
# reduce the size of the labels:
# dend <- assign_values_to_leaves_nodePar(dend, 0.5, "lab.cex")
dend <- set(dend, "labels_cex", .5)
# And plot:
par(mar=c(1,1,1,1), mgp=c(1, 1, 1))
circlize_dendrogram(dend, lebels=TRUE, row.names= TRUE, labels_track_height = 0.4)
dend
#writetable
clustnumber<- cutree(dend, k=5)
dendogram_50known_howl <-data.frame(wolf49H$howl_3ID.Individual, clustnumber)
write.csv(dendogram_50known_howl, "49h5ID_dend_results.csv")
#########################
agn3 <- agnes(wolf49H.rn, metric = "manhattan", stand = TRUE)
dend2 <- as.dendrogram(agn3)
# order it the closest we can to the order of the observations:
dend2 <- rotate(dend2, 1:0)
# Color the branches based on the hight:
dend2 <- color_branches(dend2, h= 2.2) #, groupLabels=different Howling Individual)
# reduce the size of the labels:
# dend <- assign_values_to_leaves_nodePar(dend, 0.5, "lab.cex")
dend2 <- set(dend2, "labels_cex", .5)
#colour highlighted row
highlight<- row.names(wolf49H.rn)[20]
highlight
dend2 <- color_labels(dend2, labels = highlight , col = 2)
# And plot:
par(mar=c(1,1,1,1), mgp=c(1, 1, 1))
circlize_dendrogram(dend2, lebels=TRUE, row.names= TRUE, labels_track_height = 0.4)
|
71be1dd4955c5f40cae21db69372c85ada15ce8b | 304a3e196011c93132b6114d08816a5e0b6ac406 | /man/WECCAsc.Rd | 138fcc904da82598623c188630ad10e54982cc5c | [] | no_license | tgac-vumc/WECCA | 15c78527785502f62e295bb1cfdae22411002118 | 3103e1b6bfa7235ec833246e4efbf9ff79894373 | refs/heads/master | 2020-12-31T04:41:16.480074 | 2015-11-30T15:13:28 | 2015-11-30T15:13:28 | 43,244,049 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,853 | rd | WECCAsc.Rd | \name{WECCAsc}
\alias{WECCAsc}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Weighted clustering of array CGH data (soft calls). }
\description{
Weighted distance matrix calculation, followed by dendrogram construction for regioned call probability array CGH data.
}
\usage{
WECCAsc(cghdata.regioned, dist.measure = "KLdiv", linkage = "ward", weight.type = "all.equal")
}
\arguments{
\item{cghdata.regioned}{ A \code{list}-object as returned by the function \code{regioning.sc}. }
\item{dist.measure}{ The distance measure to be used. This is either \code{"KLdiv"} (the symmetric Kullback-Leibler divergence) or \code{"ordinal"} (distance between the cumulative call probability distributions). }
\item{linkage}{ The linkage method to be used, like \code{"single"}, \code{"average"}, \code{"complete"}, \code{"ward"}. }
\item{weight.type}{ Region weighting to be used in the calculation of the distance, either \code{"all.equal"} or \code{"heterogeneity"}. }
}
\details{
The distance between the call probability profiles of all sample pairs are calculated using special distance measures suitable for this data type.
The distance between the call probability signature of two regions is either defined as the symmetric Kullback-Leibler divergence or as the absolute distance between their cumulative call probability distributions.
The distance between two call probability profiles is defined as a weighted average of the distances between individual regions.
Region weighing currently implemented: no-weighing (all regions contribute equally to the overall distance), or heterogeneity weighing (weights are proportional to their Shannon's entropy).
Once the distance matrix has been calculated, this is submitted to the \code{hclust} function, which clusters the samples hierarchically, returning a dendrogram.
}
\value{
An object of class \code{hclust} which describes the tree produced by the clustering process. See \code{hclust} for a list of its components.
}
\references{
Van Wieringen, W.N., Van de Wiel, M.A., Ylstra, B. (2008), "Weighted clustering of called aCGH data", \emph{Biostatistics}, 9(3), 484-500.
Smeets, S.J., Brakenhoff, R.H., Ylstra, B., Van Wieringen, W.N., Van de Wiel, M.A., Leemans, C.R., Braakhuis, B.J.M. (2009), "Genetic classification of oral and oropharyngeal carcinomas identifies subgroups with a different prognosis", \emph{Cellular Oncology}, 39, 291--300.
}
\author{ Wessel N. van Wieringen: \email{w.vanwieringen@vumc.nl} }
\seealso{ \code{dist}, \code{hclust}, \code{KLdiv}, \code{regioning} }
\examples{
# generate object of cghCall-class
data(WiltingCalled)
# make region data (soft and hard calls)
WiltingRegioned <- regioning(WiltingCalled)
# clustering with soft.calls
dendrogram <- WECCAsc(WiltingRegioned)
}
|
b8f1bde909b529312f9f21c7f935a045734d08df | a52b2b00885ffd5a1e93599e02dfe6875f3a2dad | /ptv_stock/create_subset_df.R | 1915b4167f93cacf667a018754c929ac127436c6 | [] | no_license | victortso/R | 623148a1cfc68af537a01220063a18b4a58cac69 | 318c1c0041def7ae3a021cac86e6d50cd9b1cc4e | refs/heads/master | 2021-04-09T16:04:32.427135 | 2018-04-30T05:17:31 | 2018-04-30T05:17:31 | 120,529,248 | 0 | 0 | null | 2018-02-06T22:26:45 | 2018-02-06T22:08:02 | null | UTF-8 | R | false | false | 1,306 | r | create_subset_df.R | #
# FILE: create_subset_df.R
#
# 4/14/2018 tso New File
# 4/29/2018 tso Create subset data frame (30, 60, 90 days) out from the larger set of data frame
#
#------ Requirement# 1
# Given n is number of months to be used to compute the SD of the last n months
# Create a subset data frame that contains the past n month(s)
create_subset_df <- function(df, n) {
# https://stats.stackexchange.com/questions/12980/subset-data-by-month-in-r
# using subset to get the specific month data
df$month <- as.numeric(substr(df$timestamp, 6, 7))
m <- df$month[1]
first <- subset(df, month == m)
# DEBUG: BEGIN
# n <-7
# df <- df_save
# subset of df
# DEBUG: END
df_s <- data.frame()
for (i in 1:n) {
m <- df$month[1]
first <- subset(df, month == m)
if (dim(first)[1] == 0) {
# Check if there is anything left to do.
# print("DONE early")
break;
}
df <- subset(df, month != m)
df_s <- rbind(df_s, first)
}
(df_s <- df_s[,c(1:7)])
# Compute the Range, Range_SD_range and SD
df_s <- df_s %>%
mutate(Range = high - low)
# Compute the SD
(sd_range <- sd(df_s$Range))
df_s <- df_s %>%
mutate(SD = sd_range)
# Compute the Range - SD_range
df_s <- df_s %>%
mutate(Range_SD_range = Range - sd_range)
return(df_s)
} |
7edff6d7ca30547e3aa378f06e9a7d1a7e38a589 | d5400fe94d97262d3fcdad066faad5febbbedbd5 | /TeleFunc/man/totcount.Rd | d3b38583d7f78b8ee3cadc0177f6d032c16d75d3 | [] | no_license | ConnorFWhite/TeleFunc | 195c5e26065cbc58aa290fa1639b7e317c87de7c | a538536bb08a6d7ae6cca72e56436d482f3b7821 | refs/heads/master | 2023-02-07T16:14:47.620076 | 2023-02-03T19:53:54 | 2023-02-03T19:53:54 | 90,874,903 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 935 | rd | totcount.Rd | \name{totcount}
\alias{totcount}
\title{
Summarize Transistion matrix
}
\description{
Summarize a transistion matrix to determine the number of times each state occured and the edges, which is the number of times the state changed
}
\usage{
totcount(Trans)
}
\arguments{
\item{Trans}{
n x n Transistion matrix
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
returns a list with two levels
\item{StateCount }{the number of times each state occurs}
\item{TransCount}{ the number of times each transition occurs, the diagonal, which represents a transistion onto itself is 0}
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Connor F. White
}
\note{
%% ~~further notes~~
}
\seealso{
transMat
}
\examples{
dat<-sample(c("A","B","C","D"),100,replace=TRUE)
x<-transMat(dat, States=c("A","B","C","D"))
counts<-totcount(x)
counts$StateCount
counts$TransCount
}
|
fbda60bf1c0f5dbfc6572454caab2000600c0fa7 | 65ecd584076be5478ed0b80c5f8b1ba133ae1681 | /R Programming - Assignment 3/rprog%2Fdata%2FProgAssignment3-data/rankHospital.R | aa5c901f7e6e3e0cabfc0266176daa83a0730958 | [] | no_license | aeisenstadt/datasciencecoursera | fe9b495bf7bf2a145fbd6ce941c27f227810ff71 | d603f9061ec149682c301c09b3d80a995bf9bf03 | refs/heads/master | 2020-12-07T02:39:36.744471 | 2017-11-22T15:01:04 | 2017-11-22T15:01:04 | 95,497,274 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,436 | r | rankHospital.R | ## R Programming Assignment 3 file
## rankhospital function: returns a character vector with the name
##of the hospital that has the ranking specified by the num argument
rankhospital <- function(state, outcome, num = "best") {
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv",
colClasses = "character")
## Check that state and outcome are valid
outcome_col <- numeric()
if (outcome == "heart attack") {
outcome_col <- 11
} else if (outcome == "heart failure"){
outcome_col <- 17
} else if (outcome == "pneumonia"){
outcome_col <- 23
} else {
stop("invalid outcome")
}
state_check <- grep(state, data[,7])
if (length(state_check) == 0) stop("invalid state")
## Return hospital name in that state with the given rank
## 30-day death rate
divide <- split(data, data$State)
byState <- data.frame(divide[grep(state, names(divide))])
rankedframe <- byState[ order(as.numeric(byState[,outcome_col]), byState[,2]), ]
if (num == "best") {return(rankedframe[1, 2])}
else if (num == "worst") {
worst <- which.max(rankedframe[, outcome_col])
final <- rankedframe[worst,2]
return(final)}
rankedframe[num, 2]
} |
620e9dc2729ac00ebd95e6aea154690412754698 | 559713216d4fe05838b1450981d8f6a2bd838135 | /profiling/5.cj_new_AGPv2/5.C.2_run_phase_parent.R | 2a8d2ffa3e9804734653bdfb20c5aba61a9ca499 | [] | no_license | yangjl/phasing | 6ac18f067c86d225d7351dfb427b6ae56713ce1b | 99a03af55171dac29b51961acb6a044ea237cb3a | refs/heads/master | 2020-04-06T06:58:38.234815 | 2016-06-10T00:40:36 | 2016-06-10T00:40:36 | 38,838,697 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 834 | r | 5.C.2_run_phase_parent.R | ### Jinliang Yang
### Nov 1st, 2015
##get command line args
options(echo=TRUE) # if you want see commands in output file
args <- commandArgs(trailingOnly = TRUE)
JOBID <- as.numeric(as.character(args[1]))
print(JOBID)
##############
library("imputeR")
df <- read.csv("largedata/pp_files.csv")
df$file <- as.character(df$file)
o <- load(df$file[JOBID])
#perr <- read.csv("cache/teo_parents_errmx.csv")
kerr <- read.csv("cache/teo_kids_errmx.csv")
probs <- error_probs(mx=kerr, merr=1)
# phasing
#obj@pedigree <- subset(obj@pedigree, p1==p2)
phase <- phase_parent(GBS.array=obj, win_length=10, join_length=10, verbose=TRUE, OR=log(3))
# compute error rate
#outfile <- gsub("/obs/", "/pp/", df$file[JOBID])
outfile <- gsub("RData", "csv", df$file[JOBID])
write.table(phase, outfile, sep=",", row.names=FALSE, quote=FALSE)
|
46083b55c19089c9d2b881fa92c9ec08740a8c8c | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/eeptools/examples/max_mis.Rd.R | 3405ef8bdccd4ff3719846bbe375ecccceea84e5 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 285 | r | max_mis.Rd.R | library(eeptools)
### Name: max_mis
### Title: A function to safely take the maximum of a vector that could
### include only NAs.
### Aliases: max_mis
### ** Examples
max(c(7,NA,3,2,0),na.rm=TRUE)
max_mis(c(7,NA,3,2,0))
max(c(NA,NA,NA,NA),na.rm=TRUE)
max_mis(c(NA,NA,NA,NA))
|
ec8d7cd203b3065ee6f7048372cd1df15fa1a703 | 7a7ca2e3741fa6fb4c2f8d7a37bd1008c850bfbc | /R_GW_Maps.R | 1515514711fba1072c6cb60de259ddb82cf61b30 | [] | no_license | futurikidis21/Crime-Rates-London-Geographically-Weighted-Model | 83b04f1b5f3fcde0f662ee84146d0edbbf173ea0 | 3f02e0d8f0845b3451f17a1b6e405f7cccafa923 | refs/heads/master | 2021-07-02T01:35:30.301350 | 2017-09-21T17:53:39 | 2017-09-21T17:53:39 | 104,266,923 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,188 | r | R_GW_Maps.R | install.packages("rgdal")
# For spatial data handling
library(rgdal)
install.packages("spdep")
library(spdep)
install.packages("rgeos")
library(rgeos)
# For charting
install.packages("tmap")
library(tmap)
install.packages("cartogram")
library(cartogram)
install.packages("ggplot2")
library(ggplot2)
install.packages("gridExtra")
library(gridExtra)
install.packages("GGally")
library(GGally)
# For data munging
install.packages("dplyr")
library(dplyr)
# For spatial stats
install.packages("GWmodel")
library(GWmodel)
install.packages("spdep")
library(spdep)
# For regression functions
install.packages("car")
library(car)
percentage<- read.csv("db(N).csv")
percentage$id <- as.character(percentage$id)
gb_boundaries <- readOGR(dsn = "London-wards", layer = "Wardsv2")
gb_boundaries@data$WD11_CMWD1 <- as.character(gb_boundaries@data$WD11_CMWD1)
gb_boundaries@data <- inner_join(gb_boundaries@data, percentage, by=c("WD11_CMWD1" = "id"))
gb_boundaries@data[is.na(gb_boundaries@data)]<-0
#Geographically Weighted model and mapping...change range to get maps
gw_ss <- gwss(gb_boundaries,var=c(colnames(gb_boundaries@data[45]),colnames(gb_boundaries@data[16:36])),kernel = "bisquare", adaptive = TRUE, bw = 50, quantile = TRUE)
tm_shape(gw_ss$SDF,unit.size=10000000000000) +
tm_fill(col=colnames(gw_ss$SDF@data[,c(573,574,585,586,587,588,589,590,591,593)]), title="Spearman Correlation Coefficient", style="cont",palette="Spectral", size=2,scale=2) +
tm_borders(col="#bdbdbd", lwd=0.01) +
tm_facets(free.scales = FALSE,scale.factor = 10) +
tm_layout(
panel.labels=c('Crimes vs Population','Crimes vs Population Density','Crime vs Bad Health','Crime vs Higher Social Status','Crime vs Unemployed','Crime vs Economically Active','Crime vs Unemployed','Crime vs Long Term Unemployed','Crime vs Unqualified','Crime vs House Price'),
panel.label.size=0.5,
panel.label.height =1.5,
frame=FALSE,
title.snap.to.legend=FALSE,
title.size=1,
title.position = c("left", "top"),
inner.margins = c(0,0,0.15,0),
legend.title.size=0.7,
legend.text.size=0.6,
legend.outside=TRUE)
tmap_mode("plot")
|
2ce228dff08b807794d536279c76120ae3e8bab2 | e20e28ae2820b90251878c97b4975b313d5658a1 | /An Introduction to Statistical Learning with Applications in R/1. Basic Concept/Basic Concepts.r | 8dc72377e5c1eb7cd4f33e1f305c451b70f23f05 | [] | no_license | Manjunath7717/Machine-Learning-Algorithms | 535fa8a4d39872169e3981657a500e33277e6348 | b8fbaf901530454e28a0588cccaba61bf0983225 | refs/heads/master | 2022-12-06T11:32:19.428628 | 2020-08-27T15:45:59 | 2020-08-27T15:45:59 | 263,555,888 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,423 | r | Basic Concepts.r | x <- c(1,3,2,5)
x
length (x)
x = c(1,6,2)
x
length (x)
y= c(1,4,3)
x+y
x=matrix(data=c(1,2,3,4), nrow=2, ncol=2)
x
x=matrix(c(1,2,3,4) ,2,2)
sqrt(x)
matrix(c(1,2,3,4),2,2,byrow=TRUE)
A=matrix(1:16,4,4)
A
x=rnorm(50)
y=x+rnorm(50,mean=50,sd=.1)
cor(x,y)
set.seed (3)
y=rnorm(1000)
mean(y)
var(y)
sqrt(var(y))
sd(y)
x=rnorm(100)
y=rnorm(100)
plot(x,y)
plot(x,y,xlab="this is the x-axis",ylab="this is the y-axis",main="Plot of X vs Y")
pdf("Figure.pdf")
plot(x,y,col="red")
dev.off()
x=seq(1,10)
x
x=1:10
x
x=seq(-pi,pi,length=50)
x
y=x
f=outer(x,y,function (x,y)cos(y)/(1+x^2))
contour (x,y,f)
contour(x,y,f,nlevels=45,add=T)
fa=(f-t(f))/2
contour(x,y,fa,nlevels=15)
image(x,y,fa)
persp(x,y,fa)
persp(x,y,fa,theta=30)
persp(x,y,fa,theta=30,phi=20)
persp(x,y,fa,theta=30,phi=70)
persp(x,y,fa,theta=30,phi=40)
Auto=read.table("Auto.data.txt")
Auto=read.csv("Auto.csv",header=T,na.strings ="?")
Auto
dim(Auto)
Auto=na.omit(Auto)
dim(Auto)
names(Auto)
attach(Auto)
plot(cylinders, mpg)
plot(Auto$cylinders , Auto$mpg )
attach(Auto)
cylinders=as.factor(cylinders)
Auto
#plot(cylinders ,mpg )
plot(cylinders,mpg, col="red",horizontal=T,ylab =" MPG ", xlab="cylinders")
plot(cylinders,mpg, col="blue",varwidth=F,ylab =" MPG ", xlab="cylinders")
hist(mpg)
hist(mpg,col=2)
hist(mpg,col=2,breaks=15)
pairs(Auto)
plot(horsepower ,mpg)
#identify(horsepower,mpg,name)
summary(Auto)
summary(mpg)
|
75eed5ffe2db30e852827965993f5e5b1b45cd04 | 3660dc3609b4475230efba029ff7ae2e8c888773 | /transposon_annotation/r_scripts/aster_comm_dat_rank_abund_plot.R | 291717548fa68d6e14d072342596a8657dd61e55 | [
"MIT"
] | permissive | sestaton/sesbio | 16b98d14fe1d74e1bb37196eb6d2c625862e6430 | b222b3f9bf7f836c6987bfbe60965f2e7a4d88ec | refs/heads/master | 2022-11-12T17:06:14.124743 | 2022-10-26T00:26:22 | 2022-10-26T00:26:22 | 13,496,065 | 15 | 6 | null | null | null | null | UTF-8 | R | false | false | 162 | r | aster_comm_dat_rank_abund_plot.R | library(vegan)
superfam_bp <- read.table("all_superfams_bp_tab_ex.txt",sep="\t",row.names=1,header=T)
superfam_bp_rad <- radfit(superfam_bp)
plot(superfam_bp_rad) |
137c560f7c52e1474b4348bb4567a24bfd6d0e24 | fa458927b2c8baee881318dc64d4cb786905950e | /revdep/check.R | 94a83eee97465335734461de7c003d5cdd976f20 | [] | no_license | nathanvan/genderizeR | eb4627cdd28d4b7b2e09781c4393ce14b27a37ae | 7b66bb2d3ef54d6e88c15b387a23688d7e5f5131 | refs/heads/master | 2021-01-13T06:08:27.031339 | 2015-10-14T18:54:48 | 2015-10-14T18:54:48 | 44,268,976 | 0 | 0 | null | 2015-10-14T18:54:48 | 2015-10-14T18:47:14 | R | UTF-8 | R | false | false | 125 | r | check.R | library("devtools")
res <- revdep_check(libpath = "e:/R_bufor/")
revdep_check_save_summary(res)
revdep_check_save_logs(res)
|
a9e304525d88712785f95f052ef1791c3f610f5b | a92031dd5712ecb8d959470d6c324a831e6154f7 | /extras/CheckRowIds.R | 53e373fdee1462a571b464c6f0af5c75293bb147 | [
"Apache-2.0"
] | permissive | sebastiaan101/Legend | 581e90db04b6e6dd3d0de36d69bbb8e8bbdb5fe6 | 4e7867fc3fc9ce902f2fb5274252484d7f38060b | refs/heads/master | 2023-02-06T00:57:32.816985 | 2020-12-18T16:10:59 | 2020-12-18T16:10:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,455 | r | CheckRowIds.R | # Copyright 2018 Observational Health Data Sciences and Informatics
#
# This file is part of Legend
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Use this code to check if row IDs have changed from one data fetch to the next
# Should be used for example when redoing data fetch but keeping old shared PS objecs.
folder1 <- file.path(outputFolder, indicationId, "allCohortsOld")
folder2 <- folder1 <- file.path(outputFolder, indicationId, "allCohorts")
file <- "allCohorts.rds"
files <- list.files(folder1)
for (file in files) {
writeLines(file)
cohort1 <- readRDS(file.path(folder1, file))
cohort2 <- readRDS(file.path(folder2, file))
if (!is.null(cohort1$cohortId)) {
cohort1 <- cohort1[order(cohort1$rowId, cohort1$cohortId),]
row.names(cohort1) <- NULL
cohort2 <- cohort2[order(cohort2$rowId, cohort2$cohortId),]
row.names(cohort2) <- NULL
}
if (!all.equal(cohort1, cohort2)) {
warning("Not equal: ", file)
}
}
|
6711e82eda6a031ad95ed7f1cf72505e4f80152c | 224a67b5fac2d6348a37f5f8fffa68030b8a2b6a | /Twitter Mentions/Cleaning.R | 9c856a608cee8b5db8272ff112a1f160833f3544 | [] | no_license | PiermattiaSchoch/Social-Network-Analysis | 015cf87c2dd461789b199a7e645b9090926ce095 | 33765f4357ef3bbea15066966091f6a093e33152 | refs/heads/master | 2020-06-26T19:11:10.581280 | 2019-07-30T21:05:54 | 2019-07-30T21:05:54 | 199,726,756 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,152 | r | Cleaning.R | # * Command Line *
# Create a directory: > cd /Users/University/Tweet
# Split the file: > split -b 800m /Users/University/Desktop/Tweet/tweets2009-07.txt
# tweet.txt
# Time to Create 5 dataframe not cleaned
start_time <- Sys.time()
# ----------------
# * Libraries *
library(data.table)
library(dplyr)
library(reshape2)
library(tidyr)
library(igraph)
# ----------------
# * Cleaning*
setwd("~/Desktop/Tweet")
# Import and look
df = fread("tweet.txt", blank.lines.skip = T, header = F)
head(df,1)
tail(df,3)
# Remove some rows to lighten the df:
df = df[1:(nrow(df) - 4300000),]
# Check the date of the last row (ok it ends here 2009-07-06 01:05:04)
tail(df,3)
# Spread the dataframe
ty = ifelse(df$V1 == 'T',T,F)
mydata_1 = df[ty]
ty_1 = ifelse(df$V1 =='U',T,F)
mydata_2 = df[ty_1]
ty_2 = ifelse(df$V1 =='W',T,F)
mydata_3 = df[ty_2]
# Column bind togheter and look
df_largematrix = cbind(mydata_1$V2,mydata_2$V2,mydata_3$V2)
head(df_largematrix)
# Create Indexes each for one of the 5days to analyze
index1 = grepl("2009-07-01",df_largematrix[,1])
index2 = grepl("2009-07-02",df_largematrix[,1])
index3 = grepl("2009-07-03",df_largematrix[,1])
index4 = grepl("2009-07-04",df_largematrix[,1])
index5 = grepl("2009-07-05",df_largematrix[,1])
# Create 5 Dataframes
df1 = as.data.frame(df_largematrix[index1,])[,2:3]
df2 = as.data.frame(df_largematrix[index2,])[,2:3]
df3 = as.data.frame(df_largematrix[index3,])[,2:3]
df4 = as.data.frame(df_largematrix[index4,])[,2:3]
df5 = as.data.frame(df_largematrix[index5,])[,2:3]
# Set columns names
names(df1) = c("User","Tweet")
names(df2) = c("User","Tweet")
names(df3) = c("User","Tweet")
names(df4) = c("User","Tweet")
names(df5) = c("User","Tweet")
# Take the username in the User column
df1$User = gsub(".*com/","", df1$User)
df2$User = gsub(".*com/","", df2$User)
df3$User = gsub(".*com/","", df3$User)
df4$User = gsub(".*com/","", df4$User)
df5$User = gsub(".*com/","", df5$User)
# Using regmatches and gregexpr this gives you a list with hashtags per tweet,
# hastag is of format @ followed by any number of letters or digits
list1 = regmatches(df1$Tweet,gregexpr("@(\\d|\\w)+",df1$Tweet))
df1$Tag <- sapply(list1, paste0, collapse=" ")
list2 = regmatches(df2$Tweet,gregexpr("@(\\d|\\w)+",df2$Tweet))
df2$Tag <- sapply(list2, paste0, collapse=" ")
list3 = regmatches(df3$Tweet,gregexpr("@(\\d|\\w)+",df3$Tweet))
df3$Tag <- sapply(list3, paste0, collapse=" ")
list4 = regmatches(df4$Tweet,gregexpr("@(\\d|\\w)+",df4$Tweet))
df4$Tag <- sapply(list4, paste0, collapse=" ")
list5 = regmatches(df5$Tweet,gregexpr("@(\\d|\\w)+",df5$Tweet))
df5$Tag <- sapply(list5, paste0, collapse=" ")
# Be large then delete (the warning is ok)
library(tidyr)
df1 = separate(df1, Tag, into = as.character(seq(1:20)), sep = " ")
df2 = separate(df2, Tag, into = as.character(seq(1:20)), sep = " ")
df3 = separate(df3, Tag, into = as.character(seq(1:30)), sep = " ")
df4 = separate(df4, Tag, into = as.character(seq(1:20)), sep = " ")
df5 = separate(df5, Tag, into = as.character(seq(1:20)), sep = " ")
# Remove all columns that have NA's
df1 <- df1[,colSums(is.na(df1)) < nrow(df1)]
df2 <- df2[,colSums(is.na(df2)) < nrow(df2)]
df3 <- df3[,colSums(is.na(df3)) < nrow(df3)]
df4 <- df4[,colSums(is.na(df4)) < nrow(df4)]
df5 <- df5[,colSums(is.na(df5)) < nrow(df5)]
# Remove @
df1 = as.data.frame(lapply(df1, function(y) gsub("@", "", y)))
df2 = as.data.frame(lapply(df2, function(y) gsub("@", "", y)))
df3 = as.data.frame(lapply(df3, function(y) gsub("@", "", y)))
df4 = as.data.frame(lapply(df4, function(y) gsub("@", "", y)))
df5 = as.data.frame(lapply(df5, function(y) gsub("@", "", y)))
# Remove the whole comment
df1$Tweet = NULL
df2$Tweet = NULL
df3$Tweet = NULL
df4$Tweet = NULL
df5$Tweet = NULL
# Check structure
str(df1)
# Keep all rows with at least one tag
df1 = df1[!(df1$X1==""), ]
df2 = df2[!(df2$X1==""), ]
df3 = df3[!(df3$X1==""), ]
df4 = df4[!(df4$X1==""), ]
df5 = df5[!(df5$X1==""), ]
# Convert to character
library(dplyr)
df1 = df1 %>%
mutate_all(as.character)
df2 = df2 %>%
mutate_all(as.character)
df3 = df3 %>%
mutate_all(as.character)
df4 = df4 %>%
mutate_all(as.character)
df5 = df5 %>%
mutate_all(as.character)
# --------------------
# Check with a subset
bb_sub = df1[1:3,]; bb_sub
# From wide to large
long <- melt(bb_sub, id.vars = c("User")); long
long = na.omit(long); long
long = long[order(long$User), ][,c("User","value")]; long
# Calculate weigth
long = as.data.frame( long %>%
group_by(User, value) %>%
mutate(weight = n()) %>%
distinct(.))
names(long) = c("from","to","weight");long
# Graph with 6vertex 3 edges -> it's ok!
bb_graph <- graph_from_data_frame(long, directed = T)
# -------------
# From wide to large
long1 <- melt(df1, id.vars = c("User"))
long2 <- melt(df2, id.vars = c("User"))
long3 <- melt(df3, id.vars = c("User"))
long4 <- melt(df4, id.vars = c("User"))
long5 <- melt(df5, id.vars = c("User"))
# Remove Na's
long1 = na.omit(long1)
long2 = na.omit(long2)
long3 = na.omit(long3)
long4 = na.omit(long4)
long5 = na.omit(long5)
# Sort by name
library(gtools)
long1 = long1[order(long1$User), ][,c("User","value")]
long2 = long2[order(long2$User), ][,c("User","value")]
long3 = long3[order(long3$User), ][,c("User","value")]
long4 = long4[order(long4$User), ][,c("User","value")]
long5 = long5[order(long5$User), ][,c("User","value")]
# Check if order is correct
head(long5$User)
tail(long5$User)
# Calculate weigth
long1 = as.data.frame( long1 %>%
group_by(User, value) %>%
mutate(weight = n()) %>%
distinct(.))
long2 = as.data.frame( long2 %>%
group_by(User, value) %>%
mutate(weight = n()) %>%
distinct(.))
long3 = as.data.frame( long3 %>%
group_by(User, value) %>%
mutate(weight = n()) %>%
distinct(.))
long4 = as.data.frame(long4 %>%
group_by(User, value) %>%
mutate(weight = n()) %>%
distinct(.))
long5 = as.data.frame(long5 %>%
group_by(User, value) %>%
mutate(weight = n()) %>%
distinct(.))
# Set proper name columns
names(long1) = c("from","to","weight")
names(long2) = c("from","to","weight")
names(long3) = c("from","to","weight")
names(long4) = c("from","to","weight")
names(long5) = c("from","to","weight")
# Check max weight as an example
max(long1$weight); max(long2$weight); max(long3$weight); max(long4$weight); max(long5$weight)
# Privacy
# users1 <- sprintf("user%d",seq(1:nrow(long1)))
# Create csv files
csv1 = write.csv2(long1, 'July_1.csv',row.names = FALSE)
csv2 = write.csv2(long2, 'July_2.csv', row.names = FALSE)
csv3 = write.csv2(long3, 'July_3.csv', row.names = FALSE)
csv4 = write.csv2(long4, 'July_4.csv', row.names = FALSE)
csv5 = write.csv2(long5, 'July_5.csv', row.names = FALSE)
# End time
end_time <- Sys.time()
# Differences
start_time - end_time
|
8d10fdb9ff8e9213097384da9ea18f4ec1dc216c | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/BMRV/examples/hbmr_bin_data.Rd.R | a9ab7e18b92a5095e2bf7dd607ba688415c0fa06 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 145 | r | hbmr_bin_data.Rd.R | library(BMRV)
### Name: hbmr_bin_data
### Title: Example data for hbmr_bin
### Aliases: hbmr_bin_data
### ** Examples
data(hbmr_bin_data)
|
dc5d09506d3de5680b7f4271565a3c09d0598fdb | bef84bddcc5506f76c46a505a31582d85cf4ff0a | /R_Scripts/Connection.R | e10c01c8c72607ca20c7dba1110327550918585c | [] | no_license | amittuple/tuple_client | 4b1f3297de06ee67d44cc12922f5c5f70d6583a1 | 1dcf8b02cd5a3cb3b9be5da19a194103d7b0a7af | refs/heads/master | 2021-01-11T18:27:36.524014 | 2017-01-20T10:40:43 | 2017-01-20T10:40:43 | 79,550,201 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 508 | r | Connection.R | ############################################
#
# CONNECTION STRING
#
############################################
print ('Connection.R')
drv <- dbDriver("PostgreSQL")
dbname.yaml = yml.params$DATABASE$NAME
host.yaml = yml.params$DATABASE$HOST
port.yaml = yml.params$DATABASE$PORT
user.yaml = yml.params$DATABASE$USER
password.yaml = yml.params$DATABASE$PASSWORD
# # Set up DB connection
conn <- dbConnect(drv, dbname=dbname.yaml, host=host.yaml, port=port.yaml, user=user.yaml, password=password.yaml)
|
64460f01f40c43182c3a95ed6184fd9318540c47 | 70d989d65229c9cefdef4926fba81f785c60255a | /man/bangladesh_population.Rd | 50af0b1d575a73d4ecd5541458355d7e1f3b85e0 | [] | no_license | adammwilson/DataScienceData | c334276270b824c13b9088ef392067ada7bf19a9 | 8074bfe7c695b46728091568bb08881fb26145a9 | refs/heads/master | 2021-01-19T15:55:04.858821 | 2017-11-01T17:17:40 | 2017-11-01T17:17:40 | 100,978,465 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,379 | rd | bangladesh_population.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bangladesh_population.R
\docType{data}
\name{bangladesh_population}
\alias{bangladesh_population}
\title{Human population density for Bangladesh}
\format{A raster layer}
\source{
\url{http://sedac.ciesin.columbia.edu/data/set/gpw-v4-population-density}
}
\usage{
bangladesh_population
}
\description{
Gridded Population of the World, Version 4 (GPWv4) Population Density consists of estimates of human population density based on counts consistent with national censuses and population registers, for the years 2015. A proportional allocation gridding algorithm, utilizing approximately 12.5 million national and sub-national administrative units, is used to assign population values to 30 arc-second (~1 km) grid cells. The population density grids are created by dividing the population count grids by the land area grids. The pixel values represent persons per square kilometer. The global raster from SEDAC was cropped and masked to the country of Bangladesh.
Citation: Center for International Earth Science Information Network - CIESIN - Columbia University. 2016. Gridded Population of the World, Version 4 (GPWv4): Population Density. Palisades, NY: NASA Socioeconomic Data and Applications Center (SEDAC). http://dx.doi.org/10.7927/H4NP22DQ. Accessed DAY MONTH YEAR.
}
\keyword{datasets}
|
97354e8c53face7c1888d7b27f529d11a2e3c43d | ee816a81ef6fbb360e679bb7b3930010c165610a | /man/add_score.Rd | a1e1b7d033f1f7646cf05bd8d9b7d1c3fefbc4b6 | [] | no_license | stephens999/dscr | 266c215e969bd1bdf27ee51b9d2e30d49baa87bc | 9bcecf5e0848b384a9e58331c890316ea9335df2 | refs/heads/master | 2021-01-19T02:10:48.104730 | 2018-06-29T19:26:07 | 2018-06-29T19:26:07 | 26,604,817 | 13 | 14 | null | 2018-06-29T17:12:24 | 2014-11-13T19:46:43 | R | UTF-8 | R | false | true | 597 | rd | add_score.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dsc.R
\name{add_score}
\alias{add_score}
\title{Add a score function to a dsc.}
\usage{
add_score(dsc, fn, name = "default_score", outputtype = "default_output")
}
\arguments{
\item{dsc}{The dsc to add the score to.}
\item{fn}{A score function}
\item{name}{String giving a name by which the score function is to
be known by.}
\item{outputtype}{The type of output the score function takes, as
generated by a outputParser.}
}
\value{
Nothing, but modifies the dsc environment.
}
\description{
Adds a score to a dsc.
}
|
e7419b9b0287da692c2f0130cb14bd1e34d1a990 | 0853134802bde59234f5b0bd49735b9b39042cfb | /Rsite/source/Rd-man-files/mx.symbol.LeakyReLU.Rd | 926af0951fd079a4ddc9fad973db23edd8c36dac | [] | no_license | mli/new-docs | 2e19847787cc84ced61319d36e9d72ba5e811e8a | 5230b9c951fad5122e8f5219c4187ba18bfaf28f | refs/heads/master | 2020-04-02T03:10:47.474992 | 2019-06-27T00:59:05 | 2019-06-27T00:59:05 | 153,949,703 | 13 | 15 | null | 2019-07-25T21:33:13 | 2018-10-20T21:24:57 | R | UTF-8 | R | false | true | 1,979 | rd | mx.symbol.LeakyReLU.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mxnet_generated.R
\name{mx.symbol.LeakyReLU}
\alias{mx.symbol.LeakyReLU}
\title{LeakyReLU:Applies Leaky rectified linear unit activation element-wise to the input.}
\usage{
mx.symbol.LeakyReLU(...)
}
\arguments{
\item{data}{NDArray-or-Symbol
Input data to activation function.}
\item{gamma}{NDArray-or-Symbol
Slope parameter for PReLU. Only required when act_type is 'prelu'. It should be either a vector of size 1, or the same size as the second dimension of data.}
\item{act.type}{{'elu', 'leaky', 'prelu', 'rrelu', 'selu'},optional, default='leaky'
Activation function to be applied.}
\item{slope}{float, optional, default=0.25
Init slope for the activation. (For leaky and elu only)}
\item{lower.bound}{float, optional, default=0.125
Lower bound of random slope. (For rrelu only)}
\item{upper.bound}{float, optional, default=0.334
Upper bound of random slope. (For rrelu only)}
\item{name}{string, optional
Name of the resulting symbol.}
}
\value{
out The result mx.symbol
}
\description{
Leaky ReLUs attempt to fix the "dying ReLU" problem by allowing a small `slope`
when the input is negative and has a slope of one when input is positive.
}
\details{
The following modified ReLU Activation functions are supported:
- *elu*: Exponential Linear Unit. `y = x > 0 ? x : slope * (exp(x)-1)`
- *selu*: Scaled Exponential Linear Unit. `y = lambda * (x > 0 ? x : alpha * (exp(x) - 1))` where
*lambda = 1.0507009873554804934193349852946* and *alpha = 1.6732632423543772848170429916717*.
- *leaky*: Leaky ReLU. `y = x > 0 ? x : slope * x`
- *prelu*: Parametric ReLU. This is same as *leaky* except that `slope` is learnt during training.
- *rrelu*: Randomized ReLU. same as *leaky* but the `slope` is uniformly and randomly chosen from
*[lower_bound, upper_bound)* for training, while fixed to be
*(lower_bound+upper_bound)/2* for inference.
Defined in src/operator/leaky_relu.cc:L65
}
|
b84aa6c0ced99f60a48e9ae4b6269284bc954d0c | 4ed931a8bcc1a80c448bfe20adc606c569477495 | /code/CSA statistical analysis.R | f9dcac4979e36b0efa72c281bbf7225ea2d04a45 | [] | no_license | jasonsckrabulis/sckrabulis_etal_wind_predicts_si | 858e4dc2faa01d87571ea8e468ec155fef0b6334 | 5c5ab8267f811c3dc46fd4d155bc584529613570 | refs/heads/main | 2023-08-24T22:45:50.083079 | 2022-07-27T19:21:18 | 2022-07-27T19:21:18 | 210,902,001 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 50,974 | r | CSA statistical analysis.R | # CSA lifeguard statisical analysis
# Libraries
library(car) # Anova() function
library(lme4) # glmer() function
library(PropCIs) # exactci() function
library(suncalc) # getSunlightTimes() function
library(chron) # times() function
# Datasets
# Datasets limited by temporal data
# No time of day effect dataset, 1 datapoint per day (use 'CSA data - among day variation.csv')
Lifeguard <- read.csv(file.choose(), header=TRUE)
# Remove days with zero swimmers & datapoints with missing temporal predictor values
NoZero <- subset(Lifeguard, SwimTotal!=0 & WindDir!="" & WaterTempC!="NA" & AvgPrevTemp7!="NA" & PrevSi7!="NA")
# Make SI and Swimmer counts equal when total SI count is > swimmer count
for(i in 1:length(NoZero$SwimTotal)){
if (NoZero$SiTotal[i] > NoZero$SwimTotal[i]){
NoZero$SwimTotal[i] <- NoZero$SiTotal[i]
}
}
# Time of day effect dataset, 2 datapoints per day (use 'CSA data - within day variation.csv')
LifeguardTod <- read.csv(file.choose(), header=TRUE)
#Remove days with zero swimmers & datapoints with missing temporal predictor values
NoZeroTime <- subset(LifeguardTod, SwimNum!=0 & WindDir!="" & WaterTempC!="NA" & AvgPrevTemp7!="NA" & PrevSi7!="NA")
# Make SI and Swimmer counts equal when total SI count is > swimmer count
for(i in 1:length(NoZeroTime$SwimTotal)){
if (NoZeroTime$SiTotal[i] > NoZeroTime$SwimTotal[i]){
NoZeroTime$SwimTotal[i] <- NoZeroTime$SiTotal[i]
}
}
# Datasets not limited by temporal data
# No time of day effect dataset, 1 datapoint per day (a different subset of the full 'Lifeguard' data frame)
# Remove days with swimmers & datapoints with missing relevant values
LifeguardFull <- subset(Lifeguard, SwimTotal!=0 & WindDir!="")
# Make SI and Swimmer counts equal when total SI count is > swimmer count
for(i in 1:length(LifeguardFull$SwimTotal)){
if (LifeguardFull$SiTotal[i] > LifeguardFull$SwimTotal[i]){
LifeguardFull$SwimTotal[i] <- LifeguardFull$SiTotal[i]
}
}
# Paired wind directions for multiple comparisons
NoZeroNNE <- subset(LifeguardFull, WindDir=="N" | WindDir=="NE")
NoZeroNEE <- subset(LifeguardFull, WindDir=="NE" | WindDir=="E")
NoZeroESE <- subset(LifeguardFull, WindDir=="E" | WindDir=="SE")
NoZeroSES <- subset(LifeguardFull, WindDir=="SE" | WindDir=="S")
NoZeroSSW <- subset(LifeguardFull, WindDir=="S" | WindDir=="SW")
NoZeroSWW <- subset(LifeguardFull, WindDir=="SW" | WindDir=="W")
NoZeroWNW <- subset(LifeguardFull, WindDir=="W" | WindDir=="NW")
NoZeroNWN <- subset(LifeguardFull, WindDir=="NW" | WindDir=="N")
# Wind speed effects by each direction
NoZeroN <- subset(LifeguardFull, WindDir=="N")
NoZeroNE <- subset(LifeguardFull, WindDir=="NE")
NoZeroE <- subset(LifeguardFull, WindDir=="E")
NoZeroSE <- subset(LifeguardFull, WindDir=="SE")
NoZeroS <- subset(LifeguardFull, WindDir=="S")
NoZeroSW <- subset(LifeguardFull, WindDir=="SW")
NoZeroW <- subset(LifeguardFull, WindDir=="W")
NoZeroNW <- subset(LifeguardFull, WindDir=="NW")
# Time of day effect dataset, 2 datapoints per day (a different subset of the full 'LifeguardTod' data frame)
LifeguardTodFull <- subset(LifeguardTod, SwimTotal!=0 & WindDir!="")
# Make SI and Swimmer counts equal when total SI count is > swimmer count
for(i in 1:length(LifeguardTodFull$SwimTotal)){
if (LifeguardTodFull$SiTotal[i] > LifeguardTodFull$SwimTotal[i]){
LifeguardTodFull$SwimTotal[i] <- LifeguardTodFull$SiTotal[i]
}
}
# Data for confidence interval calculation
conf <- subset(LifeguardFull, select=c("Date", "SiTotal", "WindDir", "WindVel", "SwimTotal"))
#_
# Models
# No time of day effect, 1 datapoint per day
# Forward selection using glmer()
# Step 1 (Best model: model2 w/ WindDirGroup1)
model1 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WaterTempC+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model1)
model2 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model2)
model3 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup2+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model3)
model4 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~PrevSi1+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model4)
model5 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~PrevSi3+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model5)
model6 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~PrevSi5+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model6)
model7 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~PrevSi7+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model7)
model8 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~AvgPrevTemp1+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model8)
model9 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~AvgPrevTemp3+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model9)
model10 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~AvgPrevTemp5+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model10)
model11 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~AvgPrevTemp7+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model11)
model12 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindVel+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model12)
model13 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~SwimTotal+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model13)
model14 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~NWSprecip1+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model14)
model15 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~NWSprecip2+(1|CalDay), family="binomial",data=NoZero, na.action=na.omit)
Anova(model15)
# Step 2 (Best model: model10 w/ WindDirGroup1+WindVel)
model1 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WaterTempC+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model1)
model2 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+PrevSi1+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model2)
model3 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+PrevSi3+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model3)
model4 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+PrevSi5+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model4)
model5 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+PrevSi7+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model5)
model6 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+AvgPrevTemp1+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model6)
model7 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+AvgPrevTemp3+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model7)
model8 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+AvgPrevTemp5+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model8) # warnings
# Check AvgPrevTemp5 effect with residuals and asin+sqrt transform lm (not significant)
model8a <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model8a)
plot(NoZero$AvgPrevTemp5, residuals(model8))
model8b <- lm(residuals(model8a)~NoZero$AvgPrevTemp5)
Anova(model8b)
model8asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDirGroup1+AvgPrevTemp5, data=NoZero, na.action=na.omit)
Anova(model8asin)
model9 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+AvgPrevTemp7+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model9)
model10 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model10)
model11 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+SwimTotal+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model11)
model12 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+NWSprecip1+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model12)
model13 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+NWSprecip2+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model13)
# Step 3 (Best model: model 11 w/ WindDirGroup1+WindVel+SwimTotal)
model1 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+WaterTempC+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model1)
model2 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+PrevSi1+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model2)
model3 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+PrevSi3+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model3)
model4 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+PrevSi5+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model4)
model5 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+PrevSi7+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model5)
model6 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+AvgPrevTemp1+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model6)
model7 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+AvgPrevTemp3+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model7) # warnings
# Check AvgPrevTemp3 effect with residuals and asin+sqrt transform lm (not significant)
model7a <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+(1|CalDay), family="binomial", data=NoZero)
Anova(model7a)
plot(NoZero$AvgPrevTemp3, residuals(model7a))
model7b <- lm(residuals(model7a)~NoZero$AvgPrevTemp3)
Anova(model7b)
model7asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDirGroup1+WindVel+AvgPrevTemp3, data=NoZero, na.action=na.omit)
Anova(model7asin)
model8 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+AvgPrevTemp5+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model8) # warnings
# Check AvgPrevTemp5 effect with residuals and asin+sqrt transform lm (not significant)
model8a <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+(1|CalDay), family="binomial", data=NoZero)
Anova(model8a)
plot(NoZero$AvgPrevTemp5, residuals(model8a))
model8b <- lm(residuals(model8a)~NoZero$AvgPrevTemp5)
Anova(model8b)
model8asin <-lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDirGroup1+WindVel+AvgPrevTemp5, data=NoZero, na.action=na.omit)
Anova(model8asin)
model9 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+AvgPrevTemp7+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model9) # warnings
# Check AvgPrevTemp7 effect with residuals and asin+sqrt transform lm (not significant)
model9a <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+(1|CalDay), family="binomial", data=NoZero)
Anova(model9a)
plot(NoZero$AvgPrevTemp7, residuals(model9a))
model9b <- lm(residuals(model9a)~NoZero$AvgPrevTemp7)
Anova(model9b)
model9asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDirGroup1+WindVel+AvgPrevTemp7, data=NoZero, na.action=na.omit)
Anova(model9asin)
model10 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1*WindVel+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model10) # warnings
# Check WindDirGroup1 x WindVel interaction effect with asin+sqrt transform lm (significant)
model10asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDirGroup1*WindVel, data=NoZero, na.action=na.omit)
Anova(model10asin)
model11 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+SwimTotal+(1|CalDay),family="binomial",data=NoZero,na.action=na.omit)
Anova(model11)
model12 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+NWSprecip1+(1|CalDay),family="binomial",data=NoZero,na.action=na.omit)
Anova(model12)
model13 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+NWSprecip2+(1|CalDay),family="binomial",data=NoZero,na.action=na.omit)
Anova(model13)
# Step 4 (no significant predictors added)
model1 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+SwimTotal+WaterTempC+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model1) # warnings
# Check WaterTempC effect with residuals and asin+sqrt transform lm (not significant)
model1a <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+SwimTotal+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model1a)
plot(NoZero$WaterTempC, residuals(model1a))
model1b <- lm(residuals(model1a)~NoZero$WaterTempC)
model1asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDirGroup1+WindVel+SwimTotal+WaterTempC, data=NoZero, na.action=na.omit)
Anova(model1asin)
model2 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+SwimTotal+PrevSi1+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model2)
model3 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+SwimTotal+PrevSi3+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model3)
model4 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+SwimTotal+PrevSi5+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model4)
model5 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+SwimTotal+PrevSi7+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model5)
model6 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+SwimTotal+AvgPrevTemp1+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model6) # warnings
# Check AvgPrevTemp1 with residuals and asin+sqrt transform lm (not significant)
model6a <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+SwimTotal+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model6a)
plot(NoZero$AvgPrevTemp1, residuals(model6a))
model6b <- lm(residuals(model6a)~NoZero$AvgPrevTemp1)
model6asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDirGroup1+WindVel+SwimTotal+AvgPrevTemp1, data=NoZero, na.action=na.omit)
Anova(model6asin)
model7 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+SwimTotal+AvgPrevTemp3+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model7) # warnings
# Check AvgPrevTemp3 effect with residuals and asin+sqrt transform lm (not significant)
model7a <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+SwimTotal+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model7a)
plot(NoZero$AvgPrevTemp3, residuals(model7a))
model7b <- lm(residuals(model7a)~NoZero$AvgPrevTemp3)
model7asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDirGroup1+WindVel+SwimTotal+AvgPrevTemp3, data=NoZero, na.action=na.omit)
Anova(model7asin)
model8 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+SwimTotal+AvgPrevTemp5+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model8) # warnings
# Check AvgPrevTemp5 effect with residuals and asin+sqrt transform lm (not significant)
model8a <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+SwimTotal+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model8a)
plot(NoZero$AvgPrevTemp5, residuals(model8a))
model8b <- lm(residuals(model8a)~NoZero$AvgPrevTemp5)
modell8asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDirGroup1+WindVel+SwimTotal+AvgPrevTemp5, data=NoZero, na.action=na.omit)
Anova(model8asin)
model9 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+SwimTotal+AvgPrevTemp7+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model9) # warnings
# Check AvgPrevTemp7 effect with residuals and asin+sqrt transform lm (not significant)
model9a <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+SwimTotal+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model9a)
plot(NoZero$AvgPrevTemp7, residuals(model9a))
model9b <- lm(residuals(model9a)~NoZero$AvgPrevTemp7)
model9asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDirGroup1+WindVel+SwimTotal+AvgPrevTemp7, data=NoZero, na.action=na.omit)
Anova(model9asin)
model10 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1*WindVel+SwimTotal+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model10) # warnings
#C heck WindDirGroup1*WindVel interaction effect with asin+sqrt transform lm (significant)
model10asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDirGroup1*WindVel+SwimTotal, data=NoZero, na.action=na.omit)
Anova(model10asin)
model11 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+SwimTotal+NWSprecip1+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model11) # warnings
# Check NWSprecip1 effect with residuals and asin+sqrt transform lm (not significant)
model11a <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+SwimTotal+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model11a)
model11b <- lm(residuals(model11a)~NoZero$NWSprecip1)
model11asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDirGroup1+WindVel+SwimTotal+NWSprecip1, data=NoZero, na.action=na.omit)
Anova(model11asin)
model12 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+SwimTotal+NWSprecip2+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model12)
#_
# Final model for daily SI risk (1 datapoint per day)
modelFull <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+SwimTotal+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(modelFull)
summary(modelFull)
modelDir <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(modelDir)
summary(modelDir)
modelVel <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindVel+(1|CalDay), family="binomial", data=NoZero,na.action=na.omit)
Anova(modelVel)
summary(modelVel)
modelSwim <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~SwimTotal+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(modelSwim)
summary(modelSwim)
modelNull <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(modelNull)
summary(modelNull)
# R2 from Naglekerkle 1991 using log-likelihood ratios
totalCount <- sum(NoZero$SiTotal)+sum(NoZero$SwimTotal-NoZero$SiTotal)
r2Full <- as.numeric((1-exp(-2/totalCount*(logLik(modelFull)-logLik(modelNull))))/(1-exp(2/totalCount*logLik(modelNull))))
r2Dir <- as.numeric((1-exp(-2/totalCount*(logLik(modelDir)-logLik(modelNull))))/(1-exp(2/totalCount*logLik(modelNull))))
r2Vel <- as.numeric((1-exp(-2/totalCount*(logLik(modelVel)-logLik(modelNull))))/(1-exp(2/totalCount*logLik(modelNull))))
r2Swim <- as.numeric((1-exp(-2/totalCount*(logLik(modelSwim)-logLik(modelNull))))/(1-exp(2/totalCount*logLik(modelNull))))
# Full model with WindDirGroup2 instead of WindDirGroup1
model10g2 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup2+WindVel+SwimTotal+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model10g2)
summary(model10g2)
# Final model with all wind directions included in WindDir variable
model10all <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDir+WindVel+SwimTotal+(1|CalDay), family="binomial", data=NoZero, na.action=na.omit)
Anova(model10all) # warnings
model10all <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDir+WindVel+SwimTotal, data=NoZero, na.action=na.omit)
Anova(model10all)
#_
# Final model with all days included (1 datapoint per day)
modelFull <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+WindVel+SwimTotal+(1|CalDay), family="binomial", data=LifeguardFull, na.action=na.omit)
Anova(modelFull)
summary(modelFull)
modelFullasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDirGroup1+WindVel+SwimTotal, data=LifeguardFull, na.action=na.omit)
Anova(modelFullasin)
modelDir <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+(1|CalDay), family="binomial", data=LifeguardFull, na.action=na.omit)
Anova(modelDir)
summary(modelDir)
modelDirasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDirGroup1, data=LifeguardFull, na.action=na.omit)
Anova(modelDirasin)
modelVel <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindVel+(1|CalDay), family="binomial", data=LifeguardFull, na.action=na.omit)
Anova(modelVel)
summary(modelVel)
modelVelasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindVel, data=LifeguardFull, na.action=na.omit)
Anova(modelVelasin)
modelSwim <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~SwimTotal+(1|CalDay), family="binomial", data=LifeguardFull, na.action=na.omit)
Anova(modelSwim)
summary(modelSwim)
modelSwimasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~SwimTotal, data=LifeguardFull, na.action=na.omit)
Anova(modelSwimasin)
modelNull <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~(1|CalDay), family="binomial", data=LifeguardFull, na.action=na.omit)
Anova(modelNull)
summary(modelNull)
#_
# R2 from Naglekerkle 1991 using log-likelihood ratios
totalCount <- sum(LifeguardFull$SiTotal)+sum(LifeguardFull$SwimTotal-LifeguardFull$SiTotal)
r2Full <- as.numeric((1-exp(-2/totalCount*(logLik(modelFull)-logLik(modelNull))))/(1-exp(2/totalCount*logLik(modelNull))))
r2Dir <- as.numeric((1-exp(-2/totalCount*(logLik(modelDir)-logLik(modelNull))))/(1-exp(2/totalCount*logLik(modelNull))))
r2Vel <- as.numeric((1-exp(-2/totalCount*(logLik(modelVel)-logLik(modelNull))))/(1-exp(2/totalCount*logLik(modelNull))))
r2Swim <- as.numeric((1-exp(-2/totalCount*(logLik(modelSwim)-logLik(modelNull))))/(1-exp(2/totalCount*logLik(modelNull))))
# R2 for asinsqrt models
r2Fullasin <- summary(modelFullasin)$r.squared
r2Dirasin <- summary(modelDirasin)$r.squared
r2Velasin <- summary(modelVelasin)$r.squared
r2Swimasin <- summary(modelSwimasin)$r.squared
# Full model with WindDirGroup2 instead of WindDirGroup1
model10g2 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup2+WindVel+SwimTotal+(1|CalDay), family="binomial", data=LifeguardFull, na.action=na.omit)
Anova(model10g2)
summary(model10g2)
# Final model with all wind directions included in WindDir variable
model10all <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDir+WindVel+SwimTotal+(1|CalDay), family="binomial", data=LifeguardFull, na.action=na.omit)
Anova(model10all) # warnings
model10all <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDir+WindVel+SwimTotal, data=LifeguardFull, na.action=na.omit)
Anova(model10all)
#_
# Multiple comparisons (one datapoint per day)
# Subsets of paired directions to test for effects of direction
NNE <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDir+(1|CalDay), family="binomial", data=NoZeroNNE, na.action=na.omit) # significant
Anova(NNE)
NNEasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDir, data=NoZeroNNE, na.action=na.omit) # significant
Anova(NNEasin)
NEE <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDir+(1|CalDay), family="binomial", data=NoZeroNEE, na.action=na.omit) # significant
Anova(NEE)
NEEasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDir, data=NoZeroNEE, na.action=na.omit) # significant
Anova(NEE)
ESE <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDir+(1|CalDay), family="binomial", data=NoZeroESE, na.action=na.omit) # not sigificant
Anova(ESE)
ESEasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDir, data=NoZeroESE, na.action=na.omit) # not significant
Anova(ESE)
SES <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDir+(1|CalDay), family="binomial", data=NoZeroSES, na.action=na.omit) # warnings
Anova(SES)
SESasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDir, data=NoZeroSES, na.action=na.omit) # not significant
Anova(SES)
SSW <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDir+(1|CalDay), family="binomial", data=NoZeroSSW, na.action=na.omit) # warnings
Anova(SSW)
SSWasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDir, data=NoZeroSSW, na.action=na.omit) # not significant
Anova(SSW)
SWW <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDir+(1|CalDay), family="binomial", data=NoZeroSWW, na.action=na.omit) # not significant
Anova(SWW)
SWWasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDir, data=NoZeroSWW, na.action=na.omit) # not significant
Anova(SWW)
WNW <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDir+(1|CalDay), family="binomial", data=NoZeroWNW, na.action=na.omit) # significant
Anova(WNW)
WNWasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDir, data=NoZeroWNW, na.action=na.omit) # significant
Anova(WNW)
NWN <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDir+(1|CalDay), family="binomial", data=NoZeroNWN, na.action=na.omit) # not significant
Anova(NWN)
NWNasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDir, data=NoZeroNWN, na.action=na.omit) # not significant
Anova(NWN)
# P values of each wind direction effect
# Binomial models
pvalues_binomial <- c((summary(NNE)$coef[2,4]),(summary(NEE)$coef[2,4]),(summary(ESE)$coef[2,4]),
(summary(SES)$coef[2,4]),(summary(SSW)$coef[2,4]),(summary(SWW)$coef[2,4]),(summary(WNW)$coef[2,4]),(summary(NWN)$coef[2,4]))
# [1] 1.097701e-04 1.735662e-07 5.596078e-01 9.756343e-01 9.057330e-01 8.782898e-01 1.246812e-02 6.935914e-01
pvalues_binomial_adjust<-p.adjust(pvalues_binomial,method="BH")
# [1] 0.0105472080 0.0000226189 0.8674859200 0.9999842000 0.9999842000 0.9999842000 0.1234272000 0.8674859200
# Asin+sqrt models
pvalues_asinsqrt <- c((summary(NNEasin)$coef[2,4]),(summary(NEEasin)$coef[2,4]),(summary(ESEasin)$coef[2,4]),
(summary(SESasin)$coef[2,4]),(summary(SSWasin)$coef[2,4]),(summary(SWWasin)$coef[2,4]),(summary(WNWasin)$coef[2,4]),(summary(NWNasin)$coef[2,4]))
# [1] 3.945784e-04 3.125614e-05 1.991537e-01 1.458891e-01 1.991537e-01 6.095318e-01 2.913011e-02 8.359805e-01
pvalues_asinsqrt_adjust<-p.adjust(pvalues_asinsqrt,method="BH")
# [1] 0.0015783134 0.0002500491 0.2655382014 0.2655382014 0.2655382014 0.6966077830 0.0776802977 0.8359805237
#_
# Wind speed effects with each direction, not limited by temporal data
N <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindVel+(1|CalDay), family="binomial", data=NoZeroN, na.action=na.omit) # warnings
Anova(N)
Nasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindVel, data=NoZeroN, na.action=na.omit) # significant
Anova(Nasin)
NE <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindVel+(1|CalDay), family="binomial", data=NoZeroNE, na.action=na.omit) # warnings
Anova(NE)
NEasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindVel, data=NoZeroNE, na.action=na.omit) # not significant
Anova(NEasin)
E <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindVel+(1|CalDay), family="binomial", data=NoZeroE, na.action=na.omit)
Anova(E)
Easin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindVel, data=NoZeroE, na.action=na.omit) # not significant
Anova(Easin)
SE <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindVel+(1|CalDay), family="binomial", data=NoZeroSE, na.action=na.omit)
Anova(SE)
SEasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindVel, data=NoZeroSE, na.action=na.omit) # not significant
Anova(SEasin)
S <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindVel+(1|CalDay), family="binomial", data=NoZeroS, na.action=na.omit) # converge failure
Anova(S)
Sasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindVel, data=NoZeroS, na.action=na.omit)
Anova(Sasin) # residual SS = 0
SW <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindVel+(1|CalDay), family="binomial", data=NoZeroSW, na.action=na.omit)
Anova(SW)
SWasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindVel, data=NoZeroSW, na.action=na.omit) # not significant
Anova(SWasin)
W <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindVel+(1|CalDay), family="binomial", data=NoZeroW, na.action=na.omit)
Anova(W)
Wasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindVel, data=NoZeroW, na.action=na.omit) # not significant
Anova(Wasin)
NW <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindVel+(1|CalDay), family="binomial", data=NoZeroNW, na.action=na.omit)
Anova(NW)
NWasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindVel, data=NoZeroNW, na.action=na.omit) # not significant
Anova(NWasin)
#_
# SI cases vs wind speed: 1 panel, N, NE, E with restricted lines to available data
#lm models
lmE <- lm(I(SiTotal/SwimTotal)~WindVel, data=NoZeroE)
lmN <- lm(I(SiTotal/SwimTotal)~WindVel, data=NoZeroN)
lmNE <- lm(I(SiTotal/SwimTotal)~WindVel, data=NoZeroNE)
lmNW <- lm(I(SiTotal/SwimTotal)~WindVel, data=NoZeroNW)
lmS <- lm(I(SiTotal/SwimTotal)~WindVel, data=NoZeroS)
lmSE <- lm(I(SiTotal/SwimTotal)~WindVel, data=NoZeroSE)
lmSW <- lm(I(SiTotal/SwimTotal)~WindVel, data=NoZeroSW)
lmW <- lm(I(SiTotal/SwimTotal)~WindVel, data=NoZeroW)
# lm plot
# NE solid circle
plot(NoZeroNE$WindVel, NoZeroNE$SiTotal/NoZeroNE$SwimTotal, pch=16, xlim=c(0,20), ylim=c(-.1,1), ylab="Proportion SI cases", xlab="Wind speed (mph)")
segments(min(NoZeroNE$WindVel),as.numeric(coef(lmNE)[1])+(as.numeric(coef(lmNE)[2])*min(NoZeroNE$WindVel)),max(NoZeroNE$WindVel),as.numeric(coef(lmNE)[1])+(as.numeric(coef(lmNE)[2])*max(NoZeroNE$WindVel)))
# N open circle dashed
points(NoZeroN$WindVel, NoZeroN$SiTotal/NoZeroN$SwimTotal,pch=1)
segments(min(NoZeroN$WindVel),as.numeric(coef(lmN)[1])+(as.numeric(coef(lmN)[2])*min(NoZeroN$WindVel)),max(NoZeroN$WindVel),as.numeric(coef(lmN)[1])+(as.numeric(coef(lmN)[2])*max(NoZeroN$WindVel)),lty=2)
# E open square dotted
points(NoZeroE$WindVel, NoZeroE$SiTotal/NoZeroE$SwimTotal,pch=4)
segments(min(NoZeroE$WindVel),as.numeric(coef(lmE)[1])+(as.numeric(coef(lmE)[2])*min(NoZeroE$WindVel)),max(NoZeroE$WindVel),as.numeric(coef(lmE)[1])+(as.numeric(coef(lmE)[2])*max(NoZeroE$WindVel)),lty=3)
#_
# Models
# Time of day effect, 2 datapoints per day
# Forward selection using glmer()
# Step 1 (Best model: model13 w/ TimeOfDay)
model1 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WaterTempC+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model1)
model2 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model2)
model3 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup2+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model3)
model4 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~PrevSi1+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model4) # warnings
# Check PrevSi1 effect with residuals and asin+sqrt transform lm (not significant)
model4a <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~(1|CalDay), family="binomial", data=NoZeroTime)
plot(NoZeroTime$PrevSi1, residuals(model4a))
model4b <- lm(residuals(model4a)~NoZeroTime$PrevSi1)
Anova(model4b)
model4asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~PrevSi1, data=NoZeroTime, na.action=na.omit)
Anova(model4asin)
model5 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~PrevSi3+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model5)
model6 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~PrevSi5+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model6)
model7 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~PrevSi7+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model7)
model8 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~AvgPrevTemp1+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model8)
model9 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~AvgPrevTemp3+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model9)
model10 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~AvgPrevTemp5+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model10)
model11 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~AvgPrevTemp7+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model11)
model12 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindVel+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model12)
model13 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model13)
model14 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~SwimTotal+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model14)
model15 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~NWSprecip1+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model15)
model16 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~NWSprecip2+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model16)
# Step 2 (Best model: model2 w/ TimeOfDay+WindDirGroup1)
model1 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WaterTempC+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model1)
model2 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model2)
model3 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup2+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model3)
model4 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+PrevSi1+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model4)
model5 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+PrevSi3+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model5)
model6 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+PrevSi5+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model6)
model7 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+PrevSi7+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model7)
model8 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+AvgPrevTemp1+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model8)
model9 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+AvgPrevTemp3+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model9)
model10 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+AvgPrevTemp5+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model10)
model11 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+AvgPrevTemp7+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model11)
model12 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindVel+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model12) # warnings
# Check WindVel effect with residuals and asin+sqrt transform lm (significant)
model12a <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+(1|CalDay), family="binomial", data=NoZeroTime)
plot(NoZeroTime$WindVel, residuals(model12a))
model12b <- lm(residuals(model12a)~NoZeroTime$WindVel)
Anova(model12b)
model12asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~TimeOfDay+WindVel, data=NoZeroTime, na.action=na.omit)
Anova(model12asin)
model13 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+SwimTotal+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model13) # warnings
# Check SwimTotal effect with residuals and asin+sqrt transform lm (not significant)
model13a <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+(1|CalDay), family="binomial", data=NoZeroTime)
plot(NoZeroTime$SwimTotal, residuals(model13a))
model13b <- lm(residuals(model13a)~NoZeroTime$SwimTotal)
model13asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~TimeOfDay+SwimTotal, data=NoZeroTime, na.action=na.omit)
Anova(model13asin)
model14 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+NWSprecip1+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model14)
model15 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+NWSprecip2+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model15)
# Step 3 (Best model: model10 w/ TimeOfDay+WindDirGroup1+WindVel)
model1 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+WaterTempC+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model1)
model2 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+PrevSi1+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model2)
model3 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+PrevSi3+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model3)
model4 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+PrevSi5+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model4)
model5 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+PrevSi7+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model5)
model6 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+AvgPrevTemp1+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model6)
model7 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+AvgPrevTemp3+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model7)
model8 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+AvgPrevTemp5+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model8) # warnings
# Check WindVel effect with residuals and asin+sqrt transform lm (significant)
model8a <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+(1|CalDay), family="binomial", data=NoZeroTime)
plot(NoZeroTime$AvgPrevTemp5, residuals(model10a))
model8b <- lm(residuals(model10a)~NoZeroTime$AvgPrevTemp5)
Anova(model8b)
model8asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~TimeOfDay+WindDirGroup1+AvgPrevTemp5, data=NoZeroTime, na.action=na.omit)
Anova(model8asin)
model9 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+AvgPrevTemp7+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model9)
model10 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+WindVel+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model10) # warnings
# Check WindVel effect with residuals and asin+sqrt transform lm (significant)
model10a <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+(1|CalDay), family="binomial",data=NoZeroTime)
plot(NoZeroTime$WindVel, residuals(model10a))
model10b <- lm(residuals(model10a)~NoZeroTime$WindVel)
Anova(model10b)
model10asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~TimeOfDay+WindDirGroup1+WindVel, data=NoZeroTime, na.action=na.omit)
Anova(model10asin)
model11 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+SwimTotal+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model11) # warnings
# Check SwimTotal effect with residuals and asin+sqrt transform lm (not significant)
model11a <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+(1|CalDay), family="binomial",data=NoZeroTime)
plot(NoZeroTime$SwimTotal, residuals(model11a))
model11b <- lm(residuals(model11a)~NoZeroTime$SwimTotal)
model11asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~TimeOfDay+WindDirGroup1+SwimTotal, data=NoZeroTime, na.action=na.omit)
Anova(model11asin)
model12 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+NWSprecip1+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model12)
model13 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+NWSprecip2+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model13)
# Step 4 (No additional predictors significant)
model1 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+WindVel+WaterTempC+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model1) # warnings
# Check WaterTempC effect with residuals and asin+sqrt transform lm (not significant)
model1a <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+WindVel+(1|CalDay), family="binomial", data=NoZeroTime)
plot(NoZeroTime$WaterTempC, residuals(model1a))
model1b <- lm(residuals(model1a)~NoZeroTime$WaterTempC)
Anova(model1b)
model1asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~TimeOfDay+WindDirGroup1+WindVel+WaterTempC, data=NoZeroTime, na.action=na.omit)
Anova(model1asin)
model2 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+WindVel+PrevSi1+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model2) # warnings
# Check PrevSi1 effect with residuals and asin+sqrt transform lm (not significant)
model2a <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+WindVel+(1|CalDay), family="binomial", data=NoZeroTime)
plot(NoZeroTime$PrevSi1, residuals(model2a))
model2b <- lm(residuals(model1a)~NoZeroTime$PrevSi1)
Anova(model2b)
model2asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~TimeOfDay+WindDirGroup1+WindVel+PrevSi1, data=NoZeroTime, na.action=na.omit)
Anova(model2asin)
model3 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+WindVel+PrevSi3+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model3)
model4 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+WindVel+PrevSi5+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model4)
model5 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+WindVel+PrevSi7+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model5)
model6 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+WindVel+AvgPrevTemp1+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model6)
model7 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+WindVel+AvgPrevTemp3+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model7) # warnings
# Check AvgPrevTemp3 effect with residuals and asin+sqrt transform lm (not significant)
model7a <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+WindVel+(1|CalDay), family="binomial",data=NoZeroTime)
plot(NoZeroTime$AvgPrevTemp3, residuals(model7a))
model7b <- lm(residuals(model7a)~NoZeroTime$AvgPrevTemp3)
Anova(model7b)
model7asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~TimeOfDay+WindDirGroup1+WindVel+AvgPrevTemp3, data=NoZeroTime, na.action=na.omit)
Anova(model7asin)
model8 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+WindVel+AvgPrevTemp5+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model8)
model9 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+WindVel+AvgPrevTemp7+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model9)
model10 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1*WindVel+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model10) # warnings
# Check WindDirGroup1 x WindVel interaction effect with asin+sqrt transformed lm (significant)
model10asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~TimeOfDay+WindDirGroup1*WindVel, data=NoZeroTime, na.action=na.omit)
Anova(model10asin)
model11 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+WindVel+SwimTotal+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model11) # warnings
# Check SwimTotal effect with residuals and asin+sqrt transform lm (not significant)
model11a <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+WindVel+(1|CalDay), family="binomial",data=NoZeroTime)
plot(NoZeroTime$SwimTotal, residuals(model11a))
model11b <- lm(residuals(model11a)~NoZeroTime$SwimTotal)
model11asin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~TimeOfDay+WindDirGroup1+WindVel+SwimTotal, data=NoZeroTime, na.action=na.omit)
Anova(model11asin)
model12 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+WindVel+NWSprecip1+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model12)
model13 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+WindVel+NWSprecip2+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model13)
#_
#F inal model for time of day SI risk
modelFull <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+WindVel+(1|CalDay),family="binomial",data=NoZeroTime,na.action=na.omit)
Anova(modelFull) # warnings
summary(modelFull)
modelFullasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~TimeOfDay+WindDirGroup1+WindVel, data=NoZeroTime, na.action=na.omit)
Anova(modelFullasin)
modelTod <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(modelTod)
summary(modelTod)
modelTodasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~TimeOfDay, data=NoZeroTime, na.action=na.omit)
Anova(modelTodasin)
modelDir <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(modelDir)
summary(modelDir)
modelDirasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDirGroup1, data=NoZeroTime, na.action=na.omit)
Anova(modelDirasin)
modelVel <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindVel+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(modelVel)
summary(modelVel)
modelVelasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindVel, data=NoZeroTime, na.action=na.omit)
Anova(modelVelasin)
modelNull <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(modelNull)
summary(modelNull)
modelNullasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~0, data=NoZeroTime, na.action=na.omit)
# R2 from Naglekerke 1991 using log-likelihood ratios for binomial
totalCount <- sum(NoZeroTime$SiTotal)+sum(NoZeroTime$SwimTotal-NoZeroTime$SiTotal)
r2Full <- as.numeric((1-exp(-2/totalCount*(logLik(modelFull)-logLik(modelNull))))/(1-exp(2/totalCount*logLik(modelNull))))
r2Tod <- as.numeric((1-exp(-2/totalCount*(logLik(modelTod)-logLik(modelNull))))/(1-exp(2/totalCount*logLik(modelNull))))
r2Dir <- as.numeric((1-exp(-2/totalCount*(logLik(modelDir)-logLik(modelNull))))/(1-exp(2/totalCount*logLik(modelNull))))
r2Vel <- as.numeric((1-exp(-2/totalCount*(logLik(modelVel)-logLik(modelNull))))/(1-exp(2/totalCount*logLik(modelNull))))
# R2 for asinsqrt models
r2Fullasin <- summary(modelFullasin)$r.squared
r2Todasin <- summary(modelTodasin)$r.squared
r2Dirasin <- summary(modelDirasin)$r.squared
r2Velasin <- summary(modelVelasin)$r.squared
# Full model with WindDirGroup2 instead of WindDirGroup1
model10g2 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup2+WindVel+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model10g2)
summary(model10g2)
# Final model with all wind directions included in WindDir variable
model10all <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDir+WindVel+(1|CalDay), family="binomial", data=NoZeroTime, na.action=na.omit)
Anova(model10all) # warnings
model10allasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~TimeOfDay+WindDir+WindVel, data=NoZeroTime, na.action=na.omit)
Anova(model10allasin)
#_________________________________
# Final model with all datapoints
modelFull <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup1+WindVel+(1|CalDay), family="binomial", data=LifeguardTodFull, na.action=na.omit)
Anova(modelFull)
summary(modelFull)
modelFullasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~TimeOfDay+WindDirGroup1+WindVel, data=LifeguardTodFull, na.action=na.omit)
Anova(modelFullasin)
modelTod <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+(1|CalDay), family="binomial", data=LifeguardTodFull, na.action=na.omit)
Anova(modelTod)
summary(modelTod)
modelTodasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~TimeOfDay, data=LifeguardTodFull, na.action=na.omit)
Anova(modelTodasin)
modelDir <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindDirGroup1+(1|CalDay), family="binomial", data=LifeguardTodFull, na.action=na.omit)
Anova(modelDir)
summary(modelDir)
modelDirasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindDirGroup1, data=LifeguardTodFull, na.action=na.omit)
Anova(modelDirasin)
modelVel <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~WindVel+(1|CalDay), family="binomial", data=LifeguardTodFull, na.action=na.omit)
Anova(modelVel)
summary(modelVel)
modelVelasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~WindVel, data=LifeguardTodFull, na.action=na.omit)
Anova(modelVelasin)
modelNull <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~(1|CalDay), family="binomial", data=LifeguardTodFull, na.action=na.omit)
Anova(modelNull)
summary(modelNull)
modelNullasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~0, data=LifeguardTodFull, na.action=na.omit)
# R2 from Naglekerke 1991 using log-likelihood ratios for binomial
totalCount <- sum(LifeguardTodFull$SiTotal)+sum(LifeguardTodFull$SwimTotal-LifeguardTodFull$SiTotal)
r2Full <- as.numeric((1-exp(-2/totalCount*(logLik(modelFull)-logLik(modelNull))))/(1-exp(2/totalCount*logLik(modelNull))))
r2Tod <- as.numeric((1-exp(-2/totalCount*(logLik(modelTod)-logLik(modelNull))))/(1-exp(2/totalCount*logLik(modelNull))))
r2Dir <- as.numeric((1-exp(-2/totalCount*(logLik(modelDir)-logLik(modelNull))))/(1-exp(2/totalCount*logLik(modelNull))))
r2Vel <- as.numeric((1-exp(-2/totalCount*(logLik(modelVel)-logLik(modelNull))))/(1-exp(2/totalCount*logLik(modelNull))))
# R2 for asinsqrt models
r2Fullasin <- summary(modelFullasin)$r.squared
r2Todasin <- summary(modelTodasin)$r.squared
r2Dirasin <- summary(modelDirasin)$r.squared
r2Velasin <- summary(modelVelasin)$r.squared
# Full model with WindDirGroup2 instead of WindDirGroup1
model10g2 <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDirGroup2+WindVel+(1|CalDay), family="binomial", data=LifeguardTodFull, na.action=na.omit)
Anova(model10g2)
summary(model10g2)
# Final model with all wind directions included in WindDir variable
model10all <- glmer(cbind(SiTotal,SwimTotal-SiTotal)~TimeOfDay+WindDir+WindVel+(1|CalDay), family="binomial", data=LifeguardTodFull, na.action=na.omit)
Anova(model10all) # warnings
model10allasin <- lm(I(asin(sqrt(SiTotal/SwimTotal)))~TimeOfDay+WindDir+WindVel, data=LifeguardTodFull, na.action=na.omit)
Anova(model10allasin)
#_
# Generate Clopper-Pearson "exact" confidence intervals for binomial proportion data
SIcases <- conf$SiTotal
Swimmers <- conf$SwimTotal
lower <- numeric(length(SIcases))
upper <- numeric(length(SIcases))
diff <- numeric(length(SIcases))
# Calculate confidence boundaries and the difference to plot stacked area in MS Excel
for(i in 1:length(SIcases)){
lower[i] <- exactci(SIcases[i], Swimmers[i],0.95)$conf.int[1]
upper[i] <- exactci(SIcases[i], Swimmers[i],0.95)$conf.int[2]
diff[i]<-upper[i]-lower[i]
}
df <- data.frame(cbind(conf, numSI=SIcases, numSwim=Swimmers, lower=lower, upper=upper, diff=diff))
# Write data frame to a .csv for outside use
# be sure to specify your file path
write.csv(df, "your_path_here", row.names=FALSE)
|
396c0eb5e7ff683790d02052773995a1d25584bf | 7676c2d5b77b588adde0e787a501dac27ad8efcd | /work/r데이터분석_예제파일/예제/9_08.R | 8897bca59b3e1bfe19af33790f5a8e45fe8df197 | [] | no_license | bass4th/R | a834ce1e455ca2c018364a48ea8ba0fbe8bf95e9 | fb774770140a5792d9916fc3782708e09c12273d | refs/heads/master | 2020-05-05T09:02:52.210737 | 2019-04-06T19:48:37 | 2019-04-06T19:48:37 | 179,888,937 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 70 | r | 9_08.R | plot(db_result, iris2[c(1,4)])
plotcluster(iris2, db_result$cluster)
|
0e1319267d688e165c21f95ad6007ed1f47cdf32 | 0a87ed76aaa52167c3dcc2d63c8ebf3829c3da1f | /code/run_jags_model_multi_ricker_sparse.R | 307c8dd0f77857f55ea13191c0743307808ea66f | [] | no_license | aterui/public-proj_fishery-stability | 86aebbb416006b30b24f06579739b9b2b0cc3cdc | 17a9ff57e6eaa5767eb291f4d0c4f3de20b43a67 | refs/heads/master | 2023-07-24T17:52:40.480771 | 2022-12-30T01:27:38 | 2022-12-30T01:27:38 | 355,669,677 | 0 | 0 | null | 2022-09-18T12:57:46 | 2021-04-07T20:09:10 | TeX | UTF-8 | R | false | false | 6,625 | r | run_jags_model_multi_ricker_sparse.R |
# setup -------------------------------------------------------------------
rm(list = ls())
source(here::here("code/library.R"))
source(here::here("code/set_functions.R"))
source(here::here("code/data_fmt_fishdata.R"))
# common setup ------------------------------------------------------------
## mcmc setup ####
n_ad <- 100
n_iter <- 1.0E+4
n_thin <- max(3, ceiling(n_iter / 250))
n_burn <- ceiling(max(10, n_iter/2))
n_chain <- 4
n_sample <- ceiling(n_iter / n_thin)
inits <- replicate(n_chain,
list(.RNG.name = "base::Mersenne-Twister",
.RNG.seed = NA),
simplify = FALSE)
for (j in 1:n_chain) inits[[j]]$.RNG.seed <- (j - 1) * 10 + 1
## model file ####
m <- read.jagsfile("code/model_multi_ricker_sparse.R")
## parameters ####
para <- c("p0",
"log_r",
"sigma_time",
"sigma_obs",
"sigma",
"alpha",
"rho")
# jags --------------------------------------------------------------------
## data screening
unique_site <- df_complete %>%
mutate(p = ifelse(abundance > 0, 1, 0)) %>%
group_by(site_id, taxon) %>%
summarize(freq = sum(p, na.rm = T),
site_id = unique(site_id)) %>%
filter(freq > 4) %>%
group_by(site_id) %>%
summarize(n_taxa = n_distinct(taxon)) %>%
filter(n_taxa > 1) %>%
pull(site_id)
df_est <- foreach(i = seq_len(length(unique_site)),
.combine = bind_rows) %do% {
## subset data ####
df_subset <- df_complete %>%
dplyr::filter(site_id == unique_site[i]) %>%
mutate(p = ifelse(abundance > 0, 1, 0)) %>%
group_by(taxon) %>%
summarize(freq = sum(p, na.rm = T),
site_id = unique(site_id)) %>%
filter(freq > 4) %>%
dplyr::select(taxon, site_id) %>%
left_join(df_complete,
by = c("taxon", "site_id")) %>%
mutate(taxon_id = as.numeric(factor(.$taxon)))
## data for jags ####
d_jags <- list(N = df_subset$abundance,
Year = df_subset$year - min(df_subset$year) + 1,
Species = as.numeric(factor(df_subset$taxon)),
Area = df_subset$area,
Nsample = nrow(df_subset),
Nyr1 = min(df_subset$year, na.rm = T) - 1999 + 1,
Nyr = max(df_subset$year, na.rm = T) - min(df_subset$year, na.rm = T) + 1,
Nsp = n_distinct(df_subset$taxon),
Nf = 2,
W = diag(n_distinct(df_subset$taxon)),
Q = 1)
## run jags ####
post <- run.jags(m$model,
monitor = para,
data = d_jags,
n.chains = n_chain,
inits = inits,
method = "parallel",
burnin = n_burn,
sample = n_sample,
adapt = n_ad,
thin = n_thin,
n.sims = 4,
module = "glm")
mcmc_summary <- MCMCvis::MCMCsummary(post$mcmc)
print(max(mcmc_summary$Rhat, na.rm = T))
while(max(mcmc_summary$Rhat, na.rm = T) >= 1.1) {
post <- extend.jags(post,
burnin = 0,
sample = n_sample,
adapt = n_ad,
thin = n_thin,
n.sims = n_chain,
combine = TRUE)
mcmc_summary <- MCMCvis::MCMCsummary(post$mcmc)
print(max(mcmc_summary$Rhat, na.rm = T))
}
## reformat mcmc_summary ####
n_total_mcmc <- (post$sample / n_sample) * n_iter + n_burn
mcmc_summary <- mcmc_summary %>%
mutate(param = rownames(.),
site = unique_site[i]) %>%
tibble() %>%
rename(median = `50%`,
lower = `2.5%`,
upper = `97.5%`) %>%
mutate(param_name = str_remove(param,
pattern = "\\[.{1,}\\]"),
x = fn_brrm(param),
n_total_mcmc = n_total_mcmc,
n_sample = post$sample,
n_thin = n_thin,
n_burn = n_burn,
n_chain = n_chain) %>%
separate(col = x,
into = c("x1", "x2"),
sep = ",",
fill = "right",
convert = TRUE) %>%
filter(!str_detect(param, "loglik")) %>%
left_join(distinct(df_subset, taxon, taxon_id),
by = c("x1" = "taxon_id")) %>%
left_join(distinct(df_subset, taxon, taxon_id),
by = c("x2" = "taxon_id"))
## trace plot output ####
MCMCvis::MCMCtrace(post$mcmc,
filename = paste0("output/mcmc_trace_multi_ricker_sparse_",
unique_site[i]))
print(paste(i, "/", length(unique_site)))
return(mcmc_summary)
}
# export ------------------------------------------------------------------
saveRDS(df_est,
file = here::here("output/summary_multi_ricker_sparse.rds"))
|
7ad3382a6d2f84fc70e672976ff234d7fd2c9f5e | ade78bf90676194b00b17ec4d24953d6aa0df9a7 | /man/normsegs.Rd | f2379716a91fc5a5140d80936a9daeb6a37c3284 | [] | no_license | cran/CNprep | f9bec3436f1bb03205df293cfca32ccf5d5b6362 | fb1c7cba7a7a8e51731e9a4c4ad8e1c635a7a0b6 | refs/heads/master | 2022-05-31T02:39:59.013791 | 2022-05-24T02:10:02 | 2022-05-24T02:10:02 | 17,678,314 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,294 | rd | normsegs.Rd | \name{normsegs}
\alias{normsegs}
\docType{data}
\title{
A reference set of segments
}
\description{
A table of segment lengths and log copy number ratios for a large set of
human diploid genomes.
}
\usage{data(normsegs)}
\format{
A data matrix with 43497 rows/segments and 2 columns/variables.
\describe{
\item{\code{length}}{a numeric vector of segment genomic length}
\item{\code{segmedian}}{a numeric vector of segment median computed
from log copy number ratio}
}
}
\details{
The table originates in a set of copy number profiles of over a 1000
individuals, obtained using Representational Oligonucleotide Microarray
Analysis (ROMA) technology. To ensure ploidy of 2 segments from X and Y
chromosomes and segemnts shorter than 5Mb were excluded.
}
\source{
Science. 2007 Apr 20;316(5823):445-9. Epub 2007 Mar 15.
Strong association of de novo copy number mutations with autism.
Sebat J, Lakshmi B, Malhotra D, Troge J, Lese-Martin C, Walsh T, Yamrom B,
Yoon S, Krasnitz A, Kendall J, Leotta A, Pai D, Zhang R, Lee YH, Hicks J,
Spence SJ, Lee AT, Puura K, Lehtimaki T, Ledbetter D, Gregersen PK, Bregman J,
Sutcliffe JS, Jobanputra V, Chung W, Warburton D, King MC, Skuse D,
Geschwind DH, Gilliam TC, Ye K, Wigler M.
}
\examples{
data(normsegs)
}
\keyword{datasets}
|
290d803777099fdd2fd3eaab14deb2dfbf632f62 | 554719db63ef962b983644e6db3dde058a409d8b | /R/network.design.r | e2a3c783c567b021c7a7640142c6d74c9bdc9bfb | [] | no_license | amsantac/geospt | af6a6228bb756a9c4e6a36d940169dffc510cd95 | 4c392c600281f7a7d6ed0a3b1d8e8fe8f2b1c543 | refs/heads/master | 2021-01-19T07:58:24.463158 | 2018-03-17T00:42:36 | 2018-03-17T00:42:36 | 38,572,978 | 1 | 3 | null | null | null | null | UTF-8 | R | false | false | 644 | r | network.design.r | assign("network.design",
function(formula, vgm.model, xmin, xmax, ymin, ymax, npoint.x, npoint.y, npoints, boundary=NULL, type, ...){
if (is.null(boundary)) {
grid.p<-expand.grid(x=seq(xmin,xmax,(xmax-xmin)/(npoint.x-1)), y=seq(ymin,ymax,(ymax-ymin)/(npoint.y-1)))
plot(grid.p,pch=19,cex=0.5)
grid.p$z <- grid.p$x
}
else if (is.null(boundary)==FALSE) {
df.pts<-spsample(boundary, n=npoints, type="regular")
plot(boundary)
points(df.pts,pch=19,cex=0.5)
grid.p <- data.frame(df.pts,df.pts@coords[,1])
names(grid.p) <- c("x","y","z")
}
K = krige.cv(formula=formula, ~x+y, grid.p, vgm.model, ...)
ASEPE <- mean((K[,2])^0.5)
ASEPE
}
)
|
dfa4e7c6dce3aef9ea1825305f64637948f70de1 | 944bf3198a447a116bd92749a3a61c555c164059 | /man/hexs_range.Rd | 1fd376d7f664db020a307e71ddd09233a54572dd | [] | no_license | lwawrowski/combat | 5d7d88244a52cdbc15d03e1c714a4c00835143b4 | 970d60c0eb68343f2f0e2539c9f3bcbeb273f94e | refs/heads/master | 2020-06-02T16:13:37.993633 | 2019-06-12T21:36:20 | 2019-06-12T21:36:20 | 191,223,225 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 651 | rd | hexs_range.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hexs_range.R
\name{hexs_range}
\alias{hexs_range}
\title{Hexs in range for unit}
\usage{
hexs_range(hex_id, range_min, range_max, dist_matrix_data)
}
\arguments{
\item{hex_id}{id of hex where there is a unit}
\item{range_min}{minimum range of the unit}
\item{range_max}{maximum range of the unit}
\item{dist_matrix_data}{distance matrix}
}
\value{
Vector with IDs of hexs which are in range of unit
}
\description{
Return a vector of hexs which are in range of unit
}
\examples{
hexs_range(hex_id = "ID8", range_min = 1, range_max = 5, dist_matrix_data = dist_matrix)
}
|
4ab95ef37c9113feb3185201a46307181fec0775 | b85b26cbd201a1317453bc0814b9fec0fc1ebcea | /sources/3-ingest/3-unstructured/1-text/patterns/r/read_multiple_text_files_op2.r | 95882ad4f89eb682362e06d8419da7e4092e2fa2 | [
"MIT"
] | permissive | UN-AVT/kamino-source | d9d06b50fd2a808693b22b04decf8690b609c884 | 95c6e151e69d55d61a3dc15a3344a75a4abe2cbc | refs/heads/main | 2023-05-21T20:34:08.342314 | 2023-02-28T14:12:01 | 2023-02-28T14:12:01 | 305,565,875 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 680 | r | read_multiple_text_files_op2.r | # Required libraries
library(tidyverse)
library(readtext)
library(readr)
####################################################################################
# Read multiple text files - option#2 - read_file
load_multiple_files_2 <- function(base_url){
txt_files <- list.files(base_url, pattern="^sona_.*\\.txt$", full.names=TRUE, recursive=FALSE)
txt_set_2 <- c()
for (f in txt_files) {
txt_set_2 <- c(txt_set_2, paste(read_file(f)))
}
as.data.frame(txt_set_2)
}
#Call the function to load the data
load_multiple_files_2("./archetypes/ghana-state-of-the-nation-addresses/")
#################################################################################### |
900d134dd72fcc329495f6e0c2621033024ef49c | 3f6c2b95c87ff81c8fc0e169bcbc104fa16e641f | /deploy/build-int.R | 97d8056dfac679c8301d31605cd0becafeeecbde | [
"MIT"
] | permissive | houstonhaynes/PortfolioSite | e435f4b3a0dd0bf21a24697b5072f4212655f58d | a610d0d35b672517de5b2a3b31c6728adc27fd97 | refs/heads/main | 2021-12-24T23:26:55.369142 | 2021-11-07T23:04:55 | 2021-11-07T23:04:55 | 192,436,623 | 0 | 0 | MIT | 2019-10-24T05:01:14 | 2019-06-18T00:26:16 | null | UTF-8 | R | false | false | 108 | r | build-int.R | setwd ("c:/repo/PortfolioSite")
blogdown::hugo_cmd(shQuote(c('-b', 'https://int.h3tech.dev', '-D')))
q("no") |
75285bbcf58c6c38026f41ca809e252983939a7d | 4256888bc0f695fff22678c8a28ae45b693f6c03 | /R/clt.R | ec9dcaaf0701fcd2bdcdb9d163ac3d30383d5326 | [] | no_license | cran/ResearchMethods | 4f4e112cde9d71d02fe16565d7c9afffbffa0a84 | da9c6cef5bd6021ce8c9683fb37543c4bba21dd6 | refs/heads/master | 2021-01-15T18:46:13.872049 | 2011-06-23T00:00:00 | 2011-06-23T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,755 | r | clt.R | cltDemo <- function(){
CDenvir <- new.env()
runVar <- tclVar(1)
CDenvir$n = 1
refresh <- function(...){
x = as.numeric(tclvalue(tkget(varEntry)))
if(is.na(x)){
x = 1
tkdelete(varEntry,0,"end")
tkinsert(varEntry,0,'1')
}
if(tclvalue(runVar)=='3' && (x != round(x) || x < 0)){
cat("df for the chi-square distribution must be a positive integer\n")
x = 1
tkdelete(varEntry,0,"end")
tkinsert(varEntry,0,'1')
}
if(tclvalue(runVar)=='2' && x < 0){
cat("theta for the exponential distribution must be positive\n")
x = 1
tkdelete(varEntry,0,"end")
tkinsert(varEntry,0,'1')
}
switch((tclvalue(runVar)),
'1' = clt.unif(1),
'2' = clt.exp(1,x),
'3' = clt.chisq(1,x),
'4' = clt.parab(1))
CDenvir$n=1
}
nextStep <- function(...){
CDenvir$n=CDenvir$n+1
x = as.numeric(tclvalue(tkget(varEntry)))
if(is.na(x)){
x = 1
tkdelete(varEntry,0,"end")
tkinsert(varEntry,0,'1')
}
if(tclvalue(runVar)=='3' && (x != round(x) || x < 0) ){
cat("df for the chi-square distribution must be a positive integer\n")
x = 1
tkdelete(varEntry,0,"end")
tkinsert(varEntry,0,'1')
}
if(tclvalue(runVar)=='2' && x < 0){
cat("theta for the exponential distribution must be positive\n")
x = 1
tkdelete(varEntry,0,"end")
tkinsert(varEntry,0,'1')
}
switch((tclvalue(runVar)),
'1' = clt.unif(CDenvir$n),
'2' = clt.exp(CDenvir$n,x),
'3' = clt.chisq(CDenvir$n,x),
'4' = clt.parab(CDenvir$n))
}
clt.unif <- function(n=20) {
N <- 10000
par(mfrow = c(1,2), pty = "s")
m <- (rowMeans(matrix(runif(N*n), N, n)) - 0.5)*sqrt(12*n)
hist(m, breaks = "FD", xlim = c(-4,4),
main = paste("Uniform(0,1), n = ",n,sep=""),
prob = TRUE, ylim = c(0,0.5), col = "lemonchiffon")
box()
pu <- par("usr")[1:2]
x <- seq(pu[1], pu[2], len = 500)
lines(x, dnorm(x), col = "red")
qqnorm(m, ylim = c(-4,4), xlim = c(-4,4), pch = ".", col = "blue")
abline(0, 1, col = "red")
}
clt.exp <- function(n=20,theta=1) {
N <- 10000
par(mfrow = c(1,2), pty = "s")
m <- (rowMeans(matrix(rexp(N*n,1/theta), N, n)) - theta)/sqrt(theta^2/n)
hist(m, breaks = "FD", xlim = c(-4,4),
main = paste("exp(",theta,"), n = ",n,sep=""),
prob = TRUE, ylim = c(0,0.5), col = "lemonchiffon")
box()
pu <- par("usr")[1:2]
x <- seq(pu[1], pu[2], len = 500)
lines(x, dnorm(x), col = "red")
qqnorm(m, ylim = c(-4,4), xlim = c(-4,4), pch = ".", col = "blue")
abline(0, 1, col = "red")
}
clt.chisq <- function(n=20,nu=1) {
N <- 10000
par(mfrow = c(1,2), pty = "s")
m <- (rowMeans(matrix(rchisq(N*n,nu), N, n)) - nu)/sqrt(2*nu/n)
hist(m, breaks = "FD", xlim = c(-4,4),
main = paste("Chi-Square(",nu,"), n = ",n,sep=""),
prob = TRUE, ylim = c(0,0.5), col = "lemonchiffon")
box()
pu <- par("usr")[1:2]
x <- seq(pu[1], pu[2], len = 500)
lines(x, dnorm(x), col = "red")
qqnorm(m, ylim = c(-4,4), xlim = c(-4,4), pch = ".", col = "blue")
abline(0, 1, col = "red")
}
clt.parab <- function(n=20) {
rparab <- function(nn) {
u <- runif(nn,-1,1)
sign(u)*abs(u)^(1/3)
}
N <- 10000
par(mfrow = c(1,2), pty = "s")
m <- (rowMeans(matrix(rparab(N*n), N, n)))/sqrt(3/(5*n))
hist(m, breaks = "FD", xlim = c(-4,4),
main = paste("n = ",n,sep=""),
prob = TRUE, ylim = c(0,0.5), col = "lemonchiffon")
box()
pu <- par("usr")[1:2]
x <- seq(pu[1], pu[2], len = 500)
lines(x, dnorm(x), col = "red")
qqnorm(m, ylim = c(-4,4), xlim = c(-4,4), pch = ".", col = "blue")
abline(0, 1, col = "red")
}
m <- tktoplevel()
buttonFrame <- tkframe(m)
otherFrame <- tkframe(m)
unifButton <- tkradiobutton(buttonFrame,command=refresh,variable=runVar,value=1,text='uniform distribution')
expButton <- tkradiobutton(buttonFrame,command=refresh,variable=runVar,value=2,text='exponential distribution')
chisqButton <- tkradiobutton(buttonFrame,command=refresh,variable=runVar,value=3,text='chi-square distribution')
parButton <- tkradiobutton(buttonFrame,command=refresh,variable=runVar,value=4,text='parabolic distribution')
nextButton <- tkbutton(otherFrame,command=nextStep,text='increase n')
varEntry <- tkentry(otherFrame,bg='white',width=4)
tkgrid(buttonFrame)
tkgrid(otherFrame)
tkgrid(unifButton)
tkgrid(expButton)
tkgrid(chisqButton)
tkgrid(parButton)
tkgrid(nextButton,columnspan=2)
tkgrid(tklabel(otherFrame,text='theta OR df'),varEntry)
tkinsert(varEntry,0,'1')
refresh()
}
|
af6c827444f3411ec104cddb38ee6ca2a55e05c5 | e0993eefe78e0ee8d45b34091732e40cbfa17dfe | /man/prdframe.Rd | f5690763a7705bcabd408115cd994d9e093c7cd4 | [] | no_license | Mutaz94/dMod | 05bbb00726965e135ecc1e8d1df62fbcb39af5bd | 31f1e198bb3bc0bec6f964e04664b0ff4c0abdaf | refs/heads/master | 2021-01-15T00:40:39.450992 | 2020-02-21T15:33:38 | 2020-02-21T15:33:38 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,400 | rd | prdframe.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classes.R
\name{prdframe}
\alias{prdframe}
\title{Prediction frame}
\usage{
prdframe(prediction = NULL, deriv = NULL, sensitivities = NULL,
parameters = NULL)
}
\arguments{
\item{prediction}{matrix of model prediction}
\item{deriv}{matrix of sensitivities wrt outer parameters}
\item{sensitivities}{matrix of sensitivitie wrt inner parameters}
\item{parameters}{names of the outer paramters}
}
\value{
Object of class \code{prdframe}, i.e. a matrix with other matrices and vectors as attributes.
}
\description{
A prediction frame is used to store a model prediction in a matrix. The columns
of the matrix are "time" and one column per state. The prediction frame has attributes "deriv",
the matrix of sensitivities with respect to "outer parameters" (see \link{P}), an attribute
"sensitivities", the matrix of sensitivities with respect to the "inner parameters" (the model
parameters, left-hand-side of the parameter transformation) and an attributes "parameters", the
parameter vector of inner parameters to produce the prediction frame.
Prediction frames are usually the constituents of prediction lists (\link{prdlist}). They are
produced by \link{Xs}, \link{Xd} or \link{Xf}. When you define your own prediction functions,
see \code{P2X} in \link{prdfn}, the result should be returned as a prediction frame.
}
|
474a9001cf875db377240f0afac9cca3663f86e2 | 94c1c5c14e4150a74866ef321c042d39c7f3fca3 | /simulations/sim03-file01-smalln-simulation.R | dc899507ab9805998fd8d91e106fb19a8af0df74 | [] | no_license | bvegetabile/entbal-continuous-paper | 00f06331425aad10d8afbe8e0f7186c7161ca167 | 407ac84522e02dd676bb854b7832b16bd6e32bc7 | refs/heads/main | 2023-01-14T15:29:07.039431 | 2020-11-23T18:21:33 | 2020-11-23T18:21:33 | 304,689,559 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,189 | r | sim03-file01-smalln-simulation.R | source('simulation-functions/load_all_funcs_and_libs.R')
# ------------------------------------------------------------------------
# Simulation parameters ------
source('simulations/simulation-parameters-01.R')
# ------------------------------------------------------------------------
# #--- Visualizing one span solution
set.seed(20200109)
n_obs <- 200
t1 <- system.time(testdat <- simrun(X=33, n_obs = n_obs, MX1 = MX1, MX2 = MX2, MX3 = MX3, lite_version = F))
print(t1)
system.time(mod1 <- loess_split_sample(testdat$dat$Y, testdat$dat$A, testdat$dat[,3:7],
n_moments = 3, spans = seq(0.05,1,0.05), plot_curves = T))
system.time(mod2 <- bspline_split_sample(testdat$dat$Y, testdat$dat$A, testdat$dat[,3:7],
n_moments = 3, plot_curves = T, dfs = seq(3,10,1)))
outro <- model_estimation(testdat$dat, testdat$eb2$wts, testpts = A_test)
naive <- model_estimation(testdat$dat, testpts = A_test)
plot(testdat$dat$A, testdat$dat$Y, pch = 19, col = rgb(0,0,0,0.25))
lines(A_test, truth, type = 'l', lwd = 3, col = 'red')
lines(A_test, testdat$truth, type = 'l', lwd = 3, col = 'red')
lines(A_test, testdat$gbm$ests_loess, lwd = 10)
lines(A_test, outro$ests_loess, lwd = 5, col = 'blue')
lines(A_test, outro$ests_splmod, lwd = 5, col = 'darkgreen')
lines(A_test, outro$ests_lmmod2, lwd = 5, col = 'orange')
lines(A_test, naive$ests_lmmod2, lwd = 5, col = 'lightblue')
# ------------------------------------------------------------------------
# Simulation ------
n_sims <- 1000
options(cores = 20)
cl <- makeCluster(20)
registerDoParallel(cl)
system.time(simout <- foreach(i = 1:n_sims,
.packages = c('CBPS', 'splines', 'gbm'),
# .errorhandling = 'remove',
.verbose = T)
%dopar% simrun(i, n_obs = 200,
MX1 = MX1, MX2 = MX2, MX3 = MX3,
amin = amin, amax = amax,
a_effect = T, lite_version = F))
stopCluster(cl)
# Storing simulation results
saveRDS(simout, file = 'output/sim3-output1-smalln-effect-estimation.RDS')
|
a5cefd69e5755c101a076610fd06ac5d0ff960c0 | b4265cf7c1a9505df2dd9e2f93b8120eb9edfe7a | /man/idata.Rd | 7ac40334c6874619762a0ffda203ef9d17231544 | [] | no_license | nverno/datavis | e23ccb49c29a15b8e9f3ad47287579cf460fb305 | b356368add25e211d2cc237a11e3dea4fdc06e53 | refs/heads/master | 2021-01-10T05:52:08.970830 | 2016-01-30T09:54:10 | 2016-01-30T09:54:10 | 48,509,630 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 484 | rd | idata.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vis.R
\name{idata}
\alias{idata}
\title{Visualize the master files interactively. Creates a shiny app to
choose the master files to look at with datatables.}
\usage{
idata(update = FALSE)
}
\arguments{
\item{update}{Check for updates to master files (default FALSE).}
}
\description{
Visualize the master files interactively. Creates a shiny app to
choose the master files to look at with datatables.
}
|
c604b664ac802e13a1f675eeb558878bbd4ebe41 | fb8b4674b28cd8c28a27657cb5f1b578d6e37d34 | /Code/test_loopbatch_code.R | 7576489e10b391c05714f5e9380bfab292f207d1 | [] | no_license | mayerbry/cluster-presentation | 610ff77846704ddcc00661b65a567d116d7f24cc | ebecef806ef4589ddc4f43e787c6c856f317d476 | refs/heads/master | 2022-11-12T18:46:47.925658 | 2020-07-08T22:22:05 | 2020-07-08T22:22:05 | 278,205,126 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,473 | r | test_loopbatch_code.R | args<-(commandArgs(TRUE));
if(length(args)==0){
print("No arguments supplied.")
}else{
for(i in 1:length(args)){
eval(parse(text=args[[i]]))
}
print(args)
}
library(doParallel)
library(plyr)
source("model_functions.R")
registerDoParallel(6)
getDoParWorkers()
total_simulations = 10000
#for the batch we are going to try different infectivities
infectivity_list = c(1.1, 1.5)
#use the inputted values from the bash loop to reduce out subset (change output name)
if(inf_set == 1) infectivity_list = infectivity_list[1]
if(inf_set == 2) infectivity_list = infectivity_list[2]
out_file_name = paste("batch_results/batch_results_loop", inf_set, ".csv", sep = "") #output name varies by input save in folder
start_time = Sys.time()
results = plyr::ldply(infectivity_list, function(infectivity_in){
plyr::ldply(1:total_simulations, function(run){ #ldply is like a loop
simulation = stochastic_model_latent(max_time = 100, initI = 10, infectivity = 1.1, parms,
seed_set = 5)
last_V = tail(simulation$V, 1)
data.frame( #just want to know if virus = 0 in the simulation
run = run,
infectivity = infectivity_in,
infection_extinction = (last_V == 0)
)
}, .parallel = T) #Just set this .parallel = T after you close the function
}) #<- if you added a .parallel = T here you would need to request 36 cores
write.csv(results, out_file_name, row.names = F)
print(Sys.time() - start_time)
|
2eae51ac269127014148101ffe6d7d246868483f | f53d589e6715188efb49ae7f5600d0cdf3eb7582 | /evaluate.R | c10f4c20a1c349877e2e636a0bcf7f2d4ac5c9ed | [] | no_license | sctruex/Edifecs | cb86d10711328208878cfaebabef7f72833fc199 | 62cc70b83d407fa9816b44143eda23ac7c1bbacd | refs/heads/master | 2021-05-29T06:01:35.838207 | 2015-09-11T17:54:57 | 2015-09-11T17:54:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,956 | r | evaluate.R | library(Cubist)
library(rpart)
library(randomForest)
library(Metrics)
args <- commandArgs(trailingOnly = TRUE)
#where the data is located
file.name <- args[1]
#where the model is located
model.loc <- args[2]
#number of beneficiaries from file.name to be evaluated
sample.size <- as.numeric(args[3])
#type of model in model.loc
model.type <- args[4]
#which cohort are we modeling
cohort <- args[5]
message('predicting cost for ', sample.size, ' beneficiaries from data file ',file.name,' using model from ',model.loc,' of type ',model.type)
#read in all data
df <- read.csv(file.name)
message('read in data')
df<-df[complete.cases(df),]
message('complete cases extracted')
#extract appropriate cohort (18,635-36,172-30,813-13,836-28,941-60,722)
df = switch(cohort,
chf = df[rowSums(df[,c('DX_108','CM_CHF')]) > 0,],
diabetes = df[rowSums(df[,c('DX_49','DX_50','CM_DM','CM_DMCX')]) > 0,],
copd = df[rowSums(df[,c('DX_127','CM_CHRNLUNG')]) > 0,],
asthma = df[df$DX_128 > 0,],
coronary = df[df$DX_101 > 0,],
age = df[df$AGE>65,],
all = df[rowSums(df[,c('DX_108','CM_CHF','DX_49','DX_50','CM_DM','CM_DMCX','DX_127','CM_CHRNLUNG','DX_128','DX_101')]) == 0,])
message('extracted cohort for: ',cohort)
if(cohort=='all') {
df = df[df$AGE<=65,]
message('extracted age under 65')
}
message('nrow: ',nrow(df))
message('sample.size: ',sample.size)
message(sample.size >0)
message(nrow(df) > sample.size)
#if a sample size argument was provided, take a sample of the data of the specified size
if((sample.size > 0) && (nrow(df) > sample.size)) {
df<-df[sample(nrow(df), sample.size),]
}
message('took sample: ',nrow(df))
#shuffle the data
df = df[1:nrow(df),]
message('shuffle data')
#NULL visit link
df$VisitLink<-NULL
message('null visit link')
#read in model
model<- readRDS(model.loc)
message('stored model')
#use model to predict values for sample
prediction = switch(model.type,
tree = predict(model, df, type='vector'),
forest = predict(model, df),
lm = predict(model, df, type='response'),
glm = predict(model,df),
mtrees = predict(model,subset(df, select = -c(future_cost))),
individual = df$previous_cost*ratio,
population = rep(mean(df$previous_cost)*ratio, nrow(df)),
previous = predict(model, subset(df, select = c(previous_cost, future_cost)), type='response'))
message('prediction complete: ',nrow(prediction))
#get rmse, mae, and quartiles for prediction
rmse = vector()
mae = vector()
rmse = rmse(df$future_cost, prediction)
message('rmse: ',rmse)
mae = mae(df$future_cost, prediction)
message('mae: ',mae)
differences = abs(df$future_cost - prediction)
message(nrow(differences))
quantiles = quantile(differences)
#print out errors
message('\n')
message('0% ----- ',quantiles[1])
message('25% ----- ',quantiles[2])
message('50% ----- ',quantiles[3])
message('75% ----- ',quantiles[4])
message('100% ----- ',quantiles[5])
message('\n')
message('\n')
message('rmse ----- ',rmse)
message('mae ----- ',mae)
message('\n')
|
42d8ad541c4668e579c61d8e41f245348c902eb0 | a2102c3c6a4b1edc5ae8ec0fa3cf7b91cca4ff3b | /R/fbGetMarketingStat.R | 1d101a29d0650f285aa3d8b411558aaae654cc76 | [] | no_license | Hatteron/rfacebookstat | 0417ab5767080c0f8812dbf16efe9b25988855df | 768ede6677ba1f9c38108e34e92d475530d610bc | refs/heads/master | 2021-07-15T22:58:09.131039 | 2017-10-19T13:21:54 | 2017-10-19T13:21:54 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,998 | r | fbGetMarketingStat.R | fbGetMarketingStat <-
function(accounts_id = NULL,
sorting = NULL,
level = "account",
breakdowns = NULL,
fields ="account_id,account_name,campaign_name,impressions,clicks,unique_clicks,reach,spend",
filtering = NULL,
date_start = "2000-01-01",
date_stop = Sys.Date(),
api_version = "v2.9",
access_token = NULL){
result <- data.frame()
for(i in 1:length(accounts_id)){
#Создаём строку запроса
QueryString <- gsub("&{1,5}","&",
paste(paste0("https://graph.facebook.com/",api_version,"/",accounts_id[i],"/insights?"),
ifelse(is.null(sorting),"",paste0("sort=",sorting)),
paste0("level=",level),
ifelse(is.null(breakdowns),"",paste0("breakdowns=",breakdowns)),
paste0("fields=",fields),
ifelse(is.null(filtering),"",paste0("filtering=",filtering)),
paste0("time_range={\"since\":\"",date_start,"\",\"until\":\"",date_stop,"\"}"),
"limit=5000",
paste0("access_token=",access_token),
sep = "&"))
#Отправка запроса
answer <- getURL(QueryString)
answerobject <- fromJSON(answer)
tempData <- answerobject$data
result <- rbind(result, tempData)
if(!is.null(answerobject$error)) {
error <- answerobject$error
stop(answerobject$error)
}
if(exists("tempData")){rm(tempData)}
#Запуск процесса пейджинации
while(!is.null(answerobject$paging$`next`)){
QueryString <- answerobject$paging$`next`
answer <- getURL(QueryString)
answerobject <- fromJSON(answer)
tempData <- answerobject$data
result <- rbind(result, tempData)}
}
return(result)
}
|
588fbe149e796bf78edd6015e8990d7a40c03e5f | cec3b04437dde49b293477a86fcb89bf23655c30 | /man/fars_read_years.Rd | 8ad20ce0a8b705bba0949d85d86224408df10c8b | [
"MIT"
] | permissive | larkinj/Submission | cadeec733832e2d5c7b5ddc0d97fbc2fcde4cedb | c7009d8311321ba174a045cebe5f2e4638c47dc6 | refs/heads/master | 2023-04-25T14:06:31.576811 | 2021-05-07T19:28:17 | 2021-05-07T19:28:17 | 365,277,373 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 998 | rd | fars_read_years.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_read_years}
\alias{fars_read_years}
\title{Reads multiple Fatality Analysis Reporting System (FARS) files.}
\usage{
fars_read_years(years)
}
\arguments{
\item{years}{A list of integer years that are of interest.}
}
\value{
A Tibble containing the information in a FARS file for each year of interest.
}
\description{
The FARS is a nationwide census prepared by the US National Highway Traffic Safety Administration.
It provides the American public yearly data regarding fatal injuries suffered in motor vehicle traffic crashes.
}
\details{
Years must be represented by numeric values. Years represented as character strings will cause an error.
FARS files should be situated in the working directory.
FARS files can be downloaded from the NHTSA website - \url{https://www.nhtsa.gov/research-data/fatality-analysis-reporting-system-fars}.
}
\examples{
\dontrun{fars_read_years(2013:2015)}
}
|
4c075d9f5b05c3923a5f65b8aaba5c23eeb173d7 | e28571721cc99a1c19acfbdb14c4aa58b4ec3e85 | /man/NewResearch.Rd | 186e906b1e1a4657aa6be85dd72074f93734fecb | [] | no_license | cran/represtools | 4bc002c95b71c565344e5362c74454a1c24cd52e | 0dd66034840d9744318aa193280640887237374b | refs/heads/master | 2020-04-06T06:59:01.960900 | 2019-10-03T16:30:03 | 2019-10-03T16:30:03 | 61,646,515 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 634 | rd | NewResearch.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ResearchDirs.R
\name{NewResearch}
\alias{NewResearch}
\title{Create a new Research}
\usage{
NewResearch(path = ".", RStudio = TRUE, useExistingDirectory = FALSE)
}
\arguments{
\item{path}{A character variable indicating the name of the project to create}
\item{RStudio}{Boolean indicating whether or not to create an RStudio project. Default is TRUE.}
\item{useExistingDirectory}{Overwrite the directory if it already exists. Default is FALSE.}
}
\value{
NewResearch will invisibly return the name of the project.
}
\description{
Create a new Research
}
|
6ea48d1254786170fabbc54dbc4c63e6fa9f513a | 2d34708b03cdf802018f17d0ba150df6772b6897 | /googledataprocv1alpha1.auto/man/projects.regions.clusters.create.Rd | 992897c11f95022245ea95ec6e43bd8f2f850c88 | [
"MIT"
] | permissive | GVersteeg/autoGoogleAPI | 8b3dda19fae2f012e11b3a18a330a4d0da474921 | f4850822230ef2f5552c9a5f42e397d9ae027a18 | refs/heads/master | 2020-09-28T20:20:58.023495 | 2017-03-05T19:50:39 | 2017-03-05T19:50:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,184 | rd | projects.regions.clusters.create.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataproc_functions.R
\name{projects.regions.clusters.create}
\alias{projects.regions.clusters.create}
\title{Request to create a cluster in a project.}
\usage{
projects.regions.clusters.create(Cluster, projectId, region)
}
\arguments{
\item{Cluster}{The \link{Cluster} object to pass to this method}
\item{projectId}{Required The ID of the Google Cloud Platform project that the cluster belongs to}
\item{region}{Required The Dataproc region in which to handle the request}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://cloud.google.com/dataproc/}{Google Documentation}
Other Cluster functions: \code{\link{Cluster.labels}},
\code{\link{Cluster}},
\code{\link{projects.regions.clusters.patch}}
}
|
3aeb25f3c3d51767ec83cdd9e98ea7bf314a0d87 | 1f8d064f98fce1cb7a1e39a6223aec20dbeed5d3 | /man/example_data2.Rd | cfc0b4f1b8de7d14ca20ad0a0c20ca6c8efba490 | [] | no_license | cran/nhm | c2234b2c9c698fa021ea31af9238c34ba11aaab3 | d77273020ba895b337f3d304886e72a7d8aff09e | refs/heads/master | 2020-12-22T01:37:56.389788 | 2019-10-11T08:10:05 | 2019-10-11T08:10:05 | 236,631,983 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 696 | rd | example_data2.Rd | \name{example_data2}
\alias{example_data2}
\docType{data}
\title{
Example of data on a progressive 4 state process with state misclassification
}
\description{
The observed states and associated observation times for 1000 patients simulated from a 4 state process non-homogeneous Markov model with misclassification to adjacent transient states.}
\usage{data("example_data1")}
\format{
A data frame with 3864 rows and 5 variables:
\describe{
\item{state}{Observed state at the time of observation}
\item{time}{Time at which the observation occurred}
\item{id}{Patient identification number}
\item{cov1}{Binary covariate}
\item{cov2}{Continuous covariate}
}
}
\keyword{datasets}
|
3cdc613b8a56b480e8bfa35f60d274bdcbf82d24 | 79d7343ff4727d37c9deba38671e6b650e57be87 | /src/Benchmarks/FFORMA.R | 038880d9c1425ddcda19d7c6318cac0ff322adce | [] | no_license | ptanhhust2001/LSTMMSNet | ba8f00063ac5a92f6c38f9b03ab47937dc9d8305 | 664db971ff48edcbbc9aed478ac7ca84c8b1da12 | refs/heads/master | 2023-06-09T22:08:28.352444 | 2021-06-23T07:25:43 | 2021-06-23T07:25:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,618 | r | FFORMA.R | # For more details about FFORMA method/installation/configurations: https://github.com/robjhyndman/M4metalearning/
set.seed(1234)
library(M4metalearning)
library(tsfeatures)
library(xgboost)
library(rBayesianOptimization)
ts_df <- read.csv("solar_train.txt", header = FALSE, sep = ",")
horizon <- 24
ts_list = list()
for (i in 1 : nrow(ts_df)){
print(i)
time_series <- ts(as.numeric(ts_df[i,]))
ts_list[[i]] <- list(st =paste0("D",i), x = time_series, h = horizon)
}
start_time <- Sys.time()
meta_M4 <- temp_holdout(ts_list)
print("Started Modelling")
meta_M4 <- calc_forecasts(meta_M4, forec_methods(), n.cores=4)
meta_M4 <- calc_errors(meta_M4)
meta_M4 <- THA_features(meta_M4, n.cores=4)
saveRDS(meta_M4, "metasolar.rds")
#meta_M4 <- readRDS("metaenergy.rds")
hyperparameter_search(meta_M4, filename = "solar_hyper.RData", n_iter=10)
load("solar_hyper.RData")
best_hyper <- bay_results[ which.min(bay_results$combi_OWA), ]
#Train the metalearning model with the best hyperparameters found
train_data <- create_feat_classif_problem(meta_M4)
param <- list(max_depth=best_hyper$max_depth,
eta=best_hyper$eta,
nthread = 3,
silent=1,
objective=error_softmax_obj,
num_class=ncol(train_data$errors), #the number of forecast methods used
subsample=bay_results$subsample,
colsample_bytree=bay_results$colsample_bytree)
meta_model <- train_selection_ensemble(train_data$data,
train_data$errors,
param=param)
print("Done model training")
final_M4 <- ts_list
#just calculate the forecast and features
final_M4 <- calc_forecasts(final_M4, forec_methods())
final_M4 <- THA_features(final_M4)
#get the feature matrix
final_data <- create_feat_classif_problem(final_M4)
#calculate the predictions using our model
preds <- predict_selection_ensemble(meta_model, final_data$data)
#calculate the final mean forecasts
final_M4 <- ensemble_forecast(preds, final_M4)
saveRDS(final_M4, "FinalForecastsSolarYearly.rds")
#the combination predictions are in the field y_hat of each element in the list
#lets check one
end_time <- Sys.time()
print(paste0("Total time", (end_time - start_time)))
forecast_df = matrix(nrow = 300, ncol = 24)
for (idr in 1 : length(final_M4)){
time_series_forecast <- as.numeric(final_M4[[idr]]$y_hat)
time_series_forecast[time_series_forecast <0] <- 0
forecast_df[idr,] <- time_series_forecast
}
write.table(forecast_df, "forma_solar_yearly_forecasts.txt", row.names = FALSE, col.names = FALSE)
|
71dedef7356e7e95f2a0214105f83c9ced2a1631 | 4c6a31d1acf4897720bad357e61a4ac89f15300d | /ClassificationTrees/ClassificationTrees.R | 592d85b5105991d0d1f6c1948ef94aa89814643f | [] | no_license | chancemartin116/DataScience | 64d44745dbeeaf0e5504c86e953c5d8ed2ddc692 | d8a271f2b2245412d0fe915b489313005c674bcb | refs/heads/master | 2020-03-19T03:33:10.272042 | 2018-06-24T21:02:13 | 2018-06-24T21:02:13 | 135,738,169 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,208 | r | ClassificationTrees.R | library(tree)
library(gmodels)
# store csv file in data frame
bank <- read.csv("data/bank.csv", sep=";")
# set random number generator to produce same results for each run
set.seed(123)
# get random set of 900 integers ranging from 1 to 1000
train_sample <- sample(4521, 3000)
# split the data frames into training and test sets
# -train_sample means all values not in train_sample
bank_train <- bank[train_sample, ]
bank_test <- bank[-train_sample, ]
# "y ~ ." is equivalent to y = x1 + x2 + ..... where the single dot on the right of the tilde
# says to use all the features
# create decision tree using formula and training set
treeModel <- tree("y ~ .", data=bank_train)
# get predicted labels for test set
bank_pred <- predict(treeModel, newdata=bank_test, type="class")
# create a table of predicted outcomes compared to actual results
# row, column and chi-squared proportions are not shown
crossTable <- CrossTable(bank_test$y, bank_pred,
prop.chisq = FALSE, prop.c = FALSE, prop.r = FALSE,
dnn = c('actual y', 'predicted y'))
# get ratio of correctly labeled samples to total samples
print(paste0("prediction accuracy: ", (crossTable$t[1] + crossTable$t[4]) / sum(crossTable$t))) |
5a0de171dd8c3f321335aa6f738a568b0f32248b | 3ef4024d236e55beefcb75c3e14258d4073d2806 | /run_analysis.R | 5c48f77b36e769424a68aa7b92348e931992a130 | [] | no_license | NicZ8/Getting-and-Cleaning-Data-Course-Project | 49f750c5e016f92232db278039f98278819c3801 | e1a31266736582b92351b400d48612ec6f529d6a | refs/heads/master | 2021-01-01T04:26:13.566723 | 2016-04-18T03:57:13 | 2016-04-18T03:57:13 | 56,300,655 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,100 | r | run_analysis.R | ## Assignment: Getting and Cleaning Data Course Project
## Run Analysis
## Set working directory and load required R packages
## Note: Check that you working directory contains the required files.
setwd("~/UCI HAR Dataset")
library(plyr)
## Load features which are the variable names for the X data sets
## Note: There are 561 features.
features <- read.table("features.txt")
var_names <- as.character(features[, 2])
## Load training data set
X_train <- read.table("train/X_train.txt", col.names = var_names)
y_train <- read.table("train/y_train.txt", col.names = "activityid")
subj_train <- read.table("train/subject_train.txt", col.names = "subject")
train_data <- cbind(X_train, y_train, subj_train)
## Load testing data set
X_test <- read.table("test/X_test.txt", col.names = var_names)
y_test <- read.table("test/y_test.txt", col.names = "activityid")
subj_test <- read.table("test/subject_test.txt", col.names = "subject")
test_data <- cbind(X_test, y_test, subj_test)
## 1. Merge the training and the test sets to create one data set.
run_data <- rbind(train_data, test_data)
## 2. Extract only the measurements on the mean and standard deviation
## for each measurement.
## Note: Columns 562 and 563 (activity id and subject) are retained
## in the data frame.
## Only true "mean()" and "std()" measurements are selected. Other
## variables that contain the strings "mean" or "std" are ignored.
var_mean_std <- grep("mean\\(\\)|std\\(\\)", var_names)
selected_run_data <- run_data[, c(562, 563, var_mean_std)]
## 3. Use descriptive activity names to name the activities in the data set
## i.e. create new column to add given activity labels (in lower case)
## to activity id's using the merge() function.
al <- read.table("activity_labels.txt",
col.names = c("activityid", "activity"))
al$activity <- tolower(as.character(al$activity))
selected_run_data <- merge(al, selected_run_data, by = "activityid")
## 4. Appropriately label the data set with descriptive variable names.
## Note: Variables were already named when reading in the data sets
## by using the features from features.txt .
## In this step, names are tidied up by removing all special
## characters such as "-" and "()".
names(selected_run_data) <- gsub("[[:punct:]]","", names(selected_run_data))
## 5. From the data set in step 4, create a second, independent tidy data set
## with the average of each variable for each activity and each subject.
## Note: The average (mean) is only calculated for all the measured
## variables (columns 3 to 69).
## Activity id column is dropped as it is no longer required.
## Columns are re-ordered to show subject first, then activity.
tidy_run_data <- ddply(selected_run_data, c("subject", "activity"),
function(x) colMeans(x[, 3:69]))
tidy_run_data <- tidy_run_data[, c(2,1,3:68)]
write.table(tidy_run_data, "tidy_run_data.txt", row.names = FALSE) |
407e34d0b47c0bcbf37551e500d4a9fe94889aeb | a176626eb55b6525d5a41e2079537f2ef51d4dc7 | /Uni/Projects/code/P022.Temprature_France/final/cn005.r2.r | 670b3ebb81a8d9e7ce710adf112f236270af372d | [] | no_license | zeltak/org | 82d696b30c7013e95262ad55f839998d0280b72b | d279a80198a1dbf7758c9dd56339e8a5b5555ff2 | refs/heads/master | 2021-01-21T04:27:34.752197 | 2016-04-16T04:27:57 | 2016-04-16T04:27:57 | 18,008,592 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 18,418 | r | cn005.r2.r | library(ztable)
library(lme4)
library(reshape)
library(foreign)
library(ggplot2)
library(plyr)
library(data.table)
library(reshape2)
library(Hmisc)
library(mgcv)
library(gdata)
library(car)
library(dplyr)
library(ggmap)
library(broom)
library(splines)
library(DataCombine)
source("/media/NAS/Uni/org/files/Uni/Projects/code/$Rsnips/rmspe.r")
#mod1
mod1res<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/4.results/r2/mod1.csv")
options(ztable.type="html")
#options(ztable.type="viewer")
z=ztable(mod1res)
z
#mod3
#-------------------->> RES TABLE
res <- matrix(nrow=12, ncol=48)
res <- data.frame(res)
colnames(res) <- c(
"m1.raw","m1.raw.space","m1.raw.time","m1.time","m1.time.space","m1.time.time","m1.space","m1.space.space","m1.space.time","m1.noaod","m1.noaod.space","m1.noaod.time"
,"m1.R2","m1.rmspe","m1.R2.space","m1.R2.time","m1.rmspe.space" #mod1 Full
,"m1cv.R2","m1cv.I","m1cv.Ise","m1cv.slope","m1cv.slopese","m1cv.rmspe","m1cv.R2.space","m1cv.R2.time","m1cv.rmspe.space" #mod1 CV
,"m1cvloc.R2","m1cvloc.I","m1cvloc.Ise","m1cvloc.slope","m1cvloc.slopese","m1cvloc.rmspe","m1cvloc.R2.space","m1cvloc.R2.time","m1cvloc.rmspe.space"#loc m1
,"m2.R2" #mod2
,"m3.t31","m3.t33" #mod3 tests
,"m3.R2","m3.rmspe","m3.R2.space","m3.R2.time","m3.rmspe.space" #mod3
,"m3.I","m3.Ise","m3.slope","m3.slopese")#Extra
res$type <- c("2000","2001","2002","2003","2004","2005","2006","2007","2008","2009","2010","2011")
#m3
#y2000
mod3.2000<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.2000.csv")
mod3.2000[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod3.2000$lstid<-paste(mod3.2000$Longitude,mod3.2000$Latitude,sep="-")
#m1
mod1.2000<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod1.2000.csv")
mod1.2000[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod1.2000$lstid<-paste(mod1.2000$Longitude,mod1.2000$Latitude,sep="-")
#R2.m3
setkey(mod3.2000,day,lstid)
setkey(mod1.2000,day,lstid)
mod1.2000 <- merge(mod1.2000,mod3.2000[, list(day,lstid,Final_Pred)], all.x = T)
m3.fit.all<- summary(lm( NTckin~Final_Pred,data=mod1.2000))
saveRDS(mod1.2000,"/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.pred.2000.csv")
res[res$type=="2000", 'm3.R2'] <- print(summary(lm(NTckin~Final_Pred,data=mod1.2000))$r.squared)
res[res$type=="2000", 'm3.I'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2000))$coef[1,1])
res[res$type=="2000", 'm3.Ise'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2000))$coef[1,2])
res[res$type=="2000", 'm3.slope'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2000))$coef[2,1])
res[res$type=="2000", 'm3.slopese'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2000))$coef[2,2])
#RMSPE
res[res$type=="2000", 'm3.rmspe'] <- print(rmse(residuals(m3.fit.all)))
#clean
keep(mod1.2000,res,rmse, sure=TRUE)
gc()
#y2001
mod3.2001<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.2001.csv")
mod3.2001[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod3.2001$lstid<-paste(mod3.2001$Longitude,mod3.2001$Latitude,sep="-")
#m1
mod1.2001<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod1.2001.csv")
mod1.2001[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod1.2001$lstid<-paste(mod1.2001$Longitude,mod1.2001$Latitude,sep="-")
#R2.m3
setkey(mod3.2001,day,lstid)
setkey(mod1.2001,day,lstid)
mod1.2001 <- merge(mod1.2001,mod3.2001[, list(day,lstid,Final_Pred)], all.x = T)
m3.fit.all<- summary(lm( NTckin~Final_Pred,data=mod1.2001))
saveRDS(mod1.2001,"/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.pred.2001.csv")
res[res$type=="2001", 'm3.R2'] <- print(summary(lm(NTckin~Final_Pred,data=mod1.2001))$r.squared)
res[res$type=="2001", 'm3.I'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2001))$coef[1,1])
res[res$type=="2001", 'm3.Ise'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2001))$coef[1,2])
res[res$type=="2001", 'm3.slope'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2001))$coef[2,1])
res[res$type=="2001", 'm3.slopese'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2001))$coef[2,2])
#RMSPE
res[res$type=="2001", 'm3.rmspe'] <- print(rmse(residuals(m3.fit.all)))
#clean
keep(mod1.2001,res,rmse, sure=TRUE)
gc()
#y2002
mod3.2002<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.2002.csv")
mod3.2002[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod3.2002$lstid<-paste(mod3.2002$Longitude,mod3.2002$Latitude,sep="-")
#m1
mod1.2002<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod1.2002.csv")
mod1.2002[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod1.2002$lstid<-paste(mod1.2002$Longitude,mod1.2002$Latitude,sep="-")
#R2.m3
setkey(mod3.2002,day,lstid)
setkey(mod1.2002,day,lstid)
mod1.2002 <- merge(mod1.2002,mod3.2002[, list(day,lstid,Final_Pred)], all.x = T)
m3.fit.all<- summary(lm( NTckin~Final_Pred,data=mod1.2002))
saveRDS(mod1.2002,"/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.pred.2002.csv")
res[res$type=="2002", 'm3.R2'] <- print(summary(lm(NTckin~Final_Pred,data=mod1.2002))$r.squared)
res[res$type=="2002", 'm3.I'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2002))$coef[1,1])
res[res$type=="2002", 'm3.Ise'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2002))$coef[1,2])
res[res$type=="2002", 'm3.slope'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2002))$coef[2,1])
res[res$type=="2002", 'm3.slopese'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2002))$coef[2,2])
#RMSPE
res[res$type=="2002", 'm3.rmspe'] <- print(rmse(residuals(m3.fit.all)))
#clean
keep(mod1.2002,res,rmse, sure=TRUE)
gc()
#y2003
mod3.2003<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.2003.csv")
mod3.2003[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod3.2003$lstid<-paste(mod3.2003$Longitude,mod3.2003$Latitude,sep="-")
#m1
mod1.2003<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod1.2003.csv")
mod1.2003[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod1.2003$lstid<-paste(mod1.2003$Longitude,mod1.2003$Latitude,sep="-")
#R2.m3
setkey(mod3.2003,day,lstid)
setkey(mod1.2003,day,lstid)
mod1.2003 <- merge(mod1.2003,mod3.2003[, list(day,lstid,Final_Pred)], all.x = T)
m3.fit.all<- summary(lm( NTckin~Final_Pred,data=mod1.2003))
saveRDS(mod1.2003,"/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.pred.2003.csv")
res[res$type=="2003", 'm3.R2'] <- print(summary(lm(NTckin~Final_Pred,data=mod1.2003))$r.squared)
res[res$type=="2003", 'm3.I'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2003))$coef[1,1])
res[res$type=="2003", 'm3.Ise'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2003))$coef[1,2])
res[res$type=="2003", 'm3.slope'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2003))$coef[2,1])
res[res$type=="2003", 'm3.slopese'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2003))$coef[2,2])
#RMSPE
res[res$type=="2003", 'm3.rmspe'] <- print(rmse(residuals(m3.fit.all)))
#clean
keep(mod1.2003,res,rmse, sure=TRUE)
gc()
#y2004
mod3.2004<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.2004.csv")
mod3.2004[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod3.2004$lstid<-paste(mod3.2004$Longitude,mod3.2004$Latitude,sep="-")
#m1
mod1.2004<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod1.2004.csv")
mod1.2004[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod1.2004$lstid<-paste(mod1.2004$Longitude,mod1.2004$Latitude,sep="-")
#R2.m3
setkey(mod3.2004,day,lstid)
setkey(mod1.2004,day,lstid)
mod1.2004 <- merge(mod1.2004,mod3.2004[, list(day,lstid,Final_Pred)], all.x = T)
m3.fit.all<- summary(lm( NTckin~Final_Pred,data=mod1.2004))
saveRDS(mod1.2004,"/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.pred.2004.csv")
res[res$type=="2004", 'm3.R2'] <- print(summary(lm(NTckin~Final_Pred,data=mod1.2004))$r.squared)
res[res$type=="2004", 'm3.I'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2004))$coef[1,1])
res[res$type=="2004", 'm3.Ise'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2004))$coef[1,2])
res[res$type=="2004", 'm3.slope'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2004))$coef[2,1])
res[res$type=="2004", 'm3.slopese'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2004))$coef[2,2])
#RMSPE
res[res$type=="2004", 'm3.rmspe'] <- print(rmse(residuals(m3.fit.all)))
#clean
keep(mod1.2004,res,rmse, sure=TRUE)
gc()
#y2005
mod3.2005<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.2005.csv")
mod3.2005[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod3.2005$lstid<-paste(mod3.2005$Longitude,mod3.2005$Latitude,sep="-")
#m1
mod1.2005<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod1.2005.csv")
mod1.2005[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod1.2005$lstid<-paste(mod1.2005$Longitude,mod1.2005$Latitude,sep="-")
#R2.m3
setkey(mod3.2005,day,lstid)
setkey(mod1.2005,day,lstid)
mod1.2005 <- merge(mod1.2005,mod3.2005[, list(day,lstid,Final_Pred)], all.x = T)
m3.fit.all<- summary(lm( NTckin~Final_Pred,data=mod1.2005))
saveRDS(mod1.2005,"/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.pred.2005.csv")
res[res$type=="2005", 'm3.R2'] <- print(summary(lm(NTckin~Final_Pred,data=mod1.2005))$r.squared)
res[res$type=="2005", 'm3.I'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2005))$coef[1,1])
res[res$type=="2005", 'm3.Ise'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2005))$coef[1,2])
res[res$type=="2005", 'm3.slope'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2005))$coef[2,1])
res[res$type=="2005", 'm3.slopese'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2005))$coef[2,2])
#RMSPE
res[res$type=="2005", 'm3.rmspe'] <- print(rmse(residuals(m3.fit.all)))
#clean
keep(mod1.2005,res,rmse, sure=TRUE)
gc()
#y2006
mod3.2006<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.2006.csv")
mod3.2006[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod3.2006$lstid<-paste(mod3.2006$Longitude,mod3.2006$Latitude,sep="-")
#m1
mod1.2006<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod1.2006.csv")
mod1.2006[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod1.2006$lstid<-paste(mod1.2006$Longitude,mod1.2006$Latitude,sep="-")
#R2.m3
setkey(mod3.2006,day,lstid)
setkey(mod1.2006,day,lstid)
mod1.2006 <- merge(mod1.2006,mod3.2006[, list(day,lstid,Final_Pred)], all.x = T)
m3.fit.all<- summary(lm( NTckin~Final_Pred,data=mod1.2006))
saveRDS(mod1.2006,"/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.pred.2006.csv")
res[res$type=="2006", 'm3.R2'] <- print(summary(lm(NTckin~Final_Pred,data=mod1.2006))$r.squared)
res[res$type=="2006", 'm3.I'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2006))$coef[1,1])
res[res$type=="2006", 'm3.Ise'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2006))$coef[1,2])
res[res$type=="2006", 'm3.slope'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2006))$coef[2,1])
res[res$type=="2006", 'm3.slopese'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2006))$coef[2,2])
#RMSPE
res[res$type=="2006", 'm3.rmspe'] <- print(rmse(residuals(m3.fit.all)))
#clean
keep(mod1.2006,res,rmse, sure=TRUE)
gc()
#y2007
mod3.2007<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.2007.csv")
mod3.2007[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod3.2007$lstid<-paste(mod3.2007$Longitude,mod3.2007$Latitude,sep="-")
#m1
mod1.2007<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod1.2007.csv")
mod1.2007[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod1.2007$lstid<-paste(mod1.2007$Longitude,mod1.2007$Latitude,sep="-")
#R2.m3
setkey(mod3.2007,day,lstid)
setkey(mod1.2007,day,lstid)
mod1.2007 <- merge(mod1.2007,mod3.2007[, list(day,lstid,Final_Pred)], all.x = T)
m3.fit.all<- summary(lm( NTckin~Final_Pred,data=mod1.2007))
saveRDS(mod1.2007,"/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.pred.2007.csv")
res[res$type=="2007", 'm3.R2'] <- print(summary(lm(NTckin~Final_Pred,data=mod1.2007))$r.squared)
res[res$type=="2007", 'm3.I'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2007))$coef[1,1])
res[res$type=="2007", 'm3.Ise'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2007))$coef[1,2])
res[res$type=="2007", 'm3.slope'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2007))$coef[2,1])
res[res$type=="2007", 'm3.slopese'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2007))$coef[2,2])
#RMSPE
res[res$type=="2007", 'm3.rmspe'] <- print(rmse(residuals(m3.fit.all)))
#clean
keep(mod1.2007,res,rmse, sure=TRUE)
gc()
#y2008
mod3.2008<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.2008.csv")
mod3.2008[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod3.2008$lstid<-paste(mod3.2008$Longitude,mod3.2008$Latitude,sep="-")
#m1
mod1.2008<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod1.2008.csv")
mod1.2008[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod1.2008$lstid<-paste(mod1.2008$Longitude,mod1.2008$Latitude,sep="-")
#R2.m3
setkey(mod3.2008,day,lstid)
setkey(mod1.2008,day,lstid)
mod1.2008 <- merge(mod1.2008,mod3.2008[, list(day,lstid,Final_Pred)], all.x = T)
m3.fit.all<- summary(lm( NTckin~Final_Pred,data=mod1.2008))
saveRDS(mod1.2008,"/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.pred.2008.csv")
res[res$type=="2008", 'm3.R2'] <- print(summary(lm(NTckin~Final_Pred,data=mod1.2008))$r.squared)
res[res$type=="2008", 'm3.I'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2008))$coef[1,1])
res[res$type=="2008", 'm3.Ise'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2008))$coef[1,2])
res[res$type=="2008", 'm3.slope'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2008))$coef[2,1])
res[res$type=="2008", 'm3.slopese'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2008))$coef[2,2])
#RMSPE
res[res$type=="2008", 'm3.rmspe'] <- print(rmse(residuals(m3.fit.all)))
#clean
keep(mod1.2008,res,rmse, sure=TRUE)
gc()
#y2009
mod3.2009<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.2009.csv")
mod3.2009[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod3.2009$lstid<-paste(mod3.2009$Longitude,mod3.2009$Latitude,sep="-")
#m1
mod1.2009<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod1.2009.csv")
mod1.2009[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod1.2009$lstid<-paste(mod1.2009$Longitude,mod1.2009$Latitude,sep="-")
#R2.m3
setkey(mod3.2009,day,lstid)
setkey(mod1.2009,day,lstid)
mod1.2009 <- merge(mod1.2009,mod3.2009[, list(day,lstid,Final_Pred)], all.x = T)
m3.fit.all<- summary(lm( NTckin~Final_Pred,data=mod1.2009))
saveRDS(mod1.2009,"/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.pred.2009.csv")
res[res$type=="2009", 'm3.R2'] <- print(summary(lm(NTckin~Final_Pred,data=mod1.2009))$r.squared)
res[res$type=="2009", 'm3.I'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2009))$coef[1,1])
res[res$type=="2009", 'm3.Ise'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2009))$coef[1,2])
res[res$type=="2009", 'm3.slope'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2009))$coef[2,1])
res[res$type=="2009", 'm3.slopese'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2009))$coef[2,2])
#RMSPE
res[res$type=="2009", 'm3.rmspe'] <- print(rmse(residuals(m3.fit.all)))
#clean
keep(mod1.2009,res,rmse, sure=TRUE)
gc()
#y2010
mod3.2010<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.2010.csv")
mod3.2010[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod3.2010$lstid<-paste(mod3.2010$Longitude,mod3.2010$Latitude,sep="-")
#m1
mod1.2010<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod1.2010.csv")
mod1.2010[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod1.2010$lstid<-paste(mod1.2010$Longitude,mod1.2010$Latitude,sep="-")
#R2.m3
setkey(mod3.2010,day,lstid)
setkey(mod1.2010,day,lstid)
mod1.2010 <- merge(mod1.2010,mod3.2010[, list(day,lstid,Final_Pred)], all.x = T)
m3.fit.all<- summary(lm( NTckin~Final_Pred,data=mod1.2010))
saveRDS(mod1.2010,"/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.pred.2010.csv")
res[res$type=="2010", 'm3.R2'] <- print(summary(lm(NTckin~Final_Pred,data=mod1.2010))$r.squared)
res[res$type=="2010", 'm3.I'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2010))$coef[1,1])
res[res$type=="2010", 'm3.Ise'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2010))$coef[1,2])
res[res$type=="2010", 'm3.slope'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2010))$coef[2,1])
res[res$type=="2010", 'm3.slopese'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2010))$coef[2,2])
#RMSPE
res[res$type=="2010", 'm3.rmspe'] <- print(rmse(residuals(m3.fit.all)))
#clean
keep(mod1.2010,res,rmse, sure=TRUE)
gc()
#y2011
mod3.2011<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.2011.csv")
mod3.2011[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod3.2011$lstid<-paste(mod3.2011$Longitude,mod3.2011$Latitude,sep="-")
#m1
mod1.2011<-fread("/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod1.2011.csv")
mod1.2011[, day:=as.Date(strptime(date, "%d%b%Y"))]
#create aodid
mod1.2011$lstid<-paste(mod1.2011$Longitude,mod1.2011$Latitude,sep="-")
#R2.m3
setkey(mod3.2011,day,lstid)
setkey(mod1.2011,day,lstid)
mod1.2011 <- merge(mod1.2011,mod3.2011[, list(day,lstid,Final_Pred)], all.x = T)
m3.fit.all<- summary(lm( NTckin~Final_Pred,data=mod1.2011))
saveRDS(mod1.2011,"/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/mod3.pred.2011.csv")
res[res$type=="2011", 'm3.R2'] <- print(summary(lm(NTckin~Final_Pred,data=mod1.2011))$r.squared)
res[res$type=="2011", 'm3.I'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2011))$coef[1,1])
res[res$type=="2011", 'm3.Ise'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2011))$coef[1,2])
res[res$type=="2011", 'm3.slope'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2011))$coef[2,1])
res[res$type=="2011", 'm3.slopese'] <-print(summary(lm(NTckin~Final_Pred,data=mod1.2011))$coef[2,2])
#RMSPE
res[res$type=="2011", 'm3.rmspe'] <- print(rmse(residuals(m3.fit.all)))
#clean
keep(mod1.2011,res,rmse, sure=TRUE)
gc()
saveRDS(res,"/media/NAS/Uni/Projects/P022_Temprature_France/3.work/cn099.R/res.csv")
|
5f40601f6b11f66e8e158370fb89d74894d72995 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/tailloss/examples/fMoment.Rd.R | 31822a65d3f7779904b8a3fb74d23dd45f15566c | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 878 | r | fMoment.Rd.R | library(tailloss)
### Name: fMoment
### Title: Moment Bound.
### Aliases: fMoment
### Keywords: bound moment
### ** Examples
data(UShurricane)
# Compress the table to millions of dollars
USh.m <- compressELT(ELT(UShurricane), digits = -6)
EPC.Moment <- fMoment(USh.m, s = 1:40)
EPC.Moment
plot(EPC.Moment, type = "l", ylim = c(0, 1))
# Assuming the losses follow a Gamma with E[X] = x, and Var[X] = 2 * x
EPC.Moment.Gamma <- fMoment(USh.m, s = 1:40, theta = 2, cap = 5)
EPC.Moment.Gamma
plot(EPC.Moment.Gamma, type = "l", ylim = c(0, 1))
# Compare the two results:
plot(EPC.Moment, type = "l", main = "Exceedance Probability Curve", ylim = c(0, 1))
lines(EPC.Moment.Gamma, col = 2, lty = 2)
legend("topright", c("Dirac Delta", expression(paste("Gamma(",
alpha[i] == 1 / theta^2, ", ", beta[i] ==1 / (x[i] * theta^2), ")", " cap =", 5))),
lwd = 2, lty = 1:2, col = 1:2)
|
69c33b94119fbb4437ddca32cff126dcaca54507 | cbff7053b64ad287993886d20f0eef197c2b08fd | /man/siplab-package.Rd | 506038aa2a81aa8291413b41d3472fa94582ce3f | [] | no_license | cran/siplab | 12cffa3f2c150e41af6f15d269054d3f2f51a524 | 95127a93d7f9a256432859e1a5cf41733cb5f206 | refs/heads/master | 2022-05-20T20:21:39.789499 | 2022-03-07T16:30:02 | 2022-03-07T16:30:02 | 17,699,687 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,452 | rd | siplab-package.Rd | \encoding{UTF-8}
\name{siplab-package}
\alias{siplab-package}
\alias{siplab}
\docType{package}
\title{
Spatial Individual-Plant Simulation
}
\description{
A platform for experimenting with spatially explicit individual-based plant modelling
}
\details{
\tabular{ll}{
Package: \tab siplab\cr
Type: \tab Package\cr
Version: \tab 1.6\cr
License: \tab GPL\cr
}
%~~ An overview of how to use the package, including the most important functions ~~
The main top level functions are \code{\link{pairwise}()}, and \code{\link{assimilation}()}.
\code{\link{pairwise}()} computes the competition indices most commonly used in individual-tree distance-dependent (or spatially explicit) forest growth models. These indices are based on a sum of functions of size and distance between the subject plant and each of its competitors. They represent an aggregate of pairwise interactions, the angular configuration of competitors and any higher-order interactions are ignored. Each index is characterized by a specific interaction function, here called a \link{kernel}, and by a definition of competitors.
\code{\link{assimilation}()} deals with \dQuote{fully spatial} models, computing \dQuote{assimilation indices} that aim at a mechanistic approximation of effective resource capture. One starts with a spatial resource distribution that is typically assumed to be uniform, Plants exert competitive pressure depending on size and distance, described by \link{influence} functions. The resource available at each point is allocated to plants according to their local influence and to a partition rule. Finally, the resource uptake may be weighted by an \link{efficiency} function that depends on size and distance, and is spatially integrated to obtain the plant's assimilation index.
Several examples of influence and efficiency functions are pre-programmed, and others can be easily produced.
The \code{\link{edges}()} function is useful for handling edge effects.
Some sample data sets are included, see links below.
The package is built on top of the \pkg{spatstat} library (\url{http://spatstat.org/}), which needs to be installed first.
}
\author{
Oscar \enc{García}{Garcia}
Maintainer: O. Garcia <garcia@dasometrics.net>
%~~ The author and/or maintainer of the package ~~
}
\references{
%~~ Literature or other references for background information ~~
\enc{García}{Garcia}, O. \dQuote{Siplab, a spatial individual-based plant modelling system}. Computational Ecology and Software 4(4), 215-222. 2014.
(\url{http://www.iaees.org/publications/journals/ces/articles/2014-4(4)/2014-4(4).asp}).
% https://www.researchgate.net/publication/267695426
\enc{García}{Garcia}, O. \dQuote{A generic approach to spatial individual-based modelling and simulation of plant communities}. Mathematical and Computational Forestry and Nat.-Res. Sci. (MCFNS) 6(1), 36-47. 2014.
(\url{http://mcfns.net/index.php/Journal/article/view/6_36}).
\url{https://github.com/ogarciav/siplab}.
\url{http://forestgrowth.unbc.ca/siplab/} (no longer maintained).
}
%~~ Optionally other standard keywords, one per line, from file KEYWORDS in the R documentation directory ~~
%\keyword{ package }
\seealso{
%~~ Optional links to other man pages, e.g. ~~
%~~ \code{\link[<pkg>:<pkg>-package]{<pkg>}} ~~
Example \pkg{siplab} data sets: \code{\link{boreasNP}}, \code{\link{boreasNS}}, \code{\link{boreasSA}}, \code{\link{boreasSP}}.
Some \pkg{spatstat} standard data sets may also be of interest: \code{finpines}, \code{longleaf}, \code{spruces}, \code{waka}.
For tutorials try the vignettes. E. g., in R type \code{help.start()} to open the help browser, and navigate to Packages > siplab > Vignettes.
}
\examples{
%~~ simple examples of the most important functions ~~
# Pretend that the data is given as a simple data frame
data <- as.data.frame(spruces) # from a spatstat data set
head(data) # x-y coordinates in a 56x38 m plot, marks are dbh in meters
data$marks = data$marks * 100 # dbh in cm
# Convert to a point pattern object
datap <- as.ppp(data, c(0, 56, 0, 38)) # plot limits (minx, maxx, miny, maxy)
# or datap <- ppp(data$x, data$y, c(0, 56), c(0, 38), marks = data$marks)
# Hegyi (1974) index (as usual without his original 1-foot distance offset)
hegyi <- pairwise(datap, maxR = 6, kernel = powers_ker, kerpar = list(pi=1,
pj=1, pr=1, smark=1))
head(marks(hegyi))
# ZOI model
zoi <- assimilation(datap, influence=zoi_inf, infpar=c(k=0.2, smark=1),
asym=1)
}
|
13ef20b8a9e33d2b3458a9bd036c69b7dfe6884a | dbd3f41a922de02b2a7a57438ce50cdc21eb6556 | /tests/testthat/test_build_model.R | 5048ebc29464c7bf547900148fe1cefb9db1c33b | [
"MIT"
] | permissive | exploratory-io/exploratory_func | 137083b566e08166c7844a58195ff9bacc419ae8 | 20f2838dc8c7b104867f5e694538a67555008f89 | refs/heads/master | 2023-08-23T07:22:59.967232 | 2023-08-17T22:17:15 | 2023-08-17T22:17:15 | 58,769,613 | 66 | 19 | NOASSERTION | 2023-09-13T00:27:57 | 2016-05-13T20:14:49 | R | UTF-8 | R | false | false | 2,053 | r | test_build_model.R | context("build model test")
test_that("test nnet build_model", {
test_data <- structure(
list(
CANCELLED = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0),
`Carrier Name` = c("Delta Air Lines", "American Eagle", "American Airlines", "Southwest Airlines", "SkyWest Airlines", "Southwest Airlines", "Southwest Airlines", "Delta Air Lines", "Southwest Airlines", "Atlantic Southeast Airlines", "American Airlines", "Southwest Airlines", "US Airways", "US Airways", "Delta Air Lines", "Atlantic Southeast Airlines", NA, "Atlantic Southeast Airlines", "Delta Air Lines", "Delta Air Lines"),
CARRIER = c("DL", "MQ", "AA", "DL", "MQ", "AA", "DL", "DL", "MQ", "AA", "AA", "WN", "US", "US", "DL", "EV", "9E", "EV", "DL", "DL"),
DISTANCE = c(1587, 173, 646, 187, 273, 1062, 583, 240, 1123, 851, 852, 862, 361, 507, 1020, 1092, 342, 489, 1184, 545)), row.names = c(NA, -20L),
class = c("tbl_df", "tbl", "data.frame"), .Names = c("CANCELLED", "Carrier Name", "CARRIER", "DISTANCE"))
test_data <- test_data %>% rename(`DIST ANCE`=DISTANCE)
model_ret <- nnet::multinom(CARRIER ~ `DIST ANCE`, data = test_data)
model_df <- build_model(test_data,
model_func = nnet::multinom,
formula = CARRIER ~ `DIST ANCE`,
test_rate = 0.1,
seed=1)
coef_ret <- model_coef(model_df)
stats_ret <- model_stats(model_df)
prediction_training_ret <- prediction(model_df, data = "training")
prediction_ret <- prediction(model_df, data = "test")
evaluate_ret <- evaluate_multi(prediction_ret, CARRIER, predicted_label)
expect_gte(evaluate_ret[["misclassification_rate"]], 0)
expect_lte(evaluate_ret[["misclassification_rate"]], 1)
})
test_that("loess", {
test_data <- data.frame(
sin = sin(seq(10)),
x = seq(10),
group = rep(letters[1:2], 5)
)
loess_model_df <- build_model(test_data, model_func = stats::loess, formula = sin ~ x)
expect_equal(class(loess_model_df$model[[1]]), "loess")
})
|
97e0e154560556158e72df3a89d7adf5e814a798 | 96bafe39853bc628ee295f9e31e0ef12e10140e5 | /characterization/2_lifetime/Lifetime.R | c24260ee937398b91f22cc837822633d684a30b6 | [] | no_license | lichenyu/Video_Popularity | f6f43e0bdeec4209d536eb5c5bd0ddf9a234ec33 | 35d08a6c718b6b5d67643391081f78e2fd6fcda1 | refs/heads/master | 2021-01-21T13:48:39.384027 | 2016-05-03T00:26:53 | 2016-05-03T00:26:53 | 48,667,686 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,511 | r | Lifetime.R | #workpath = 'F:/Video_Popularity/'
workpath = '/Users/ouyangshuxin/Documents/Video_Popularity/'
data = read.table(paste(workpath, 'characterization/2_lifetime/lifetime', sep = ''))
vc = data$V2
lifetime = data$V3
active_idx = which(30 <= vc)
lifetime_active = lifetime[active_idx]
lifetime_inactive = lifetime[-active_idx]
pdf(paste(workpath, "characterization/2_lifetime/lifetime.pdf", sep = ''),
width = 5, height = 4)
e = ecdf(lifetime)
e_active = ecdf(lifetime_active)
e_inactive = ecdf(lifetime_inactive)
#d,l,u,r
par(mar=c(5, 4, 1, 2) + 0.1)
plot(e_inactive, do.points = FALSE, verticals = TRUE, col.01line = 0,
xlim = c(0, 30), ylim = c(0, 1), axes = FALSE, xaxs="i", yaxs="i",
main = "", sub = "", xlab = "Lifetime (days)", ylab = "ECDF",
col = "red", lwd = 2#, lty = 6
)
lines(e_active, do.points = FALSE, verticals = TRUE, col.01line = 0,
col = "green", lwd = 2)#, lty = 2)
lines(e, do.points = FALSE, verticals = TRUE, col.01line = 0,
col = "blue", lwd = 2)#, lty = 1)
axis(side = 1, at = seq(0, 30, 1), labels = rep("", 31), tck = 1, lty = 2, col = 'grey')
axis(side = 1, at = seq(0, 30, 5), labels = seq(0, 30, 5))
axis(side = 2, at = seq(0, 1, .2), labels = seq(0, 1, .2), las = 2, tck = 1, lty = 2, col = 'grey')
legend("bottomright", legend = c("All Videos", "Videos with 30 or More Views", "Videos with Less than 30 Views"),
lty = c(1, 1, 1), lwd = rep(2, 3), col = c("blue", "green", "red"),
bg="white", cex = 0.8)
box()
dev.off()
|
5bb925a40f98a04dab1979ba67569261de925bda | 0f24c8bfc4257f25e6397d627d95103b8fba028c | /munge/05-qc.R | 676d1ba2c6e0f8548f518df95145856474017941 | [] | no_license | BrownellLab/UISO_code | 10c9c068d99dd1e982810ba19fb21d42d9b30d73 | f81c34ea81c585e628cca1e23ad08cf9e0ea8454 | refs/heads/master | 2021-01-16T00:03:57.743454 | 2014-12-10T20:15:02 | 2014-12-10T20:15:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 731 | r | 05-qc.R | # # This does not run by default as it is time consuming
# # Uncomment to run
# # Run quality control metrics on raw data
# tmp.data.cel <- data.cel
# pData(tmp.data.cel) <- pData(data.cel)[, c("sample", "type")]
# arrayQualityMetrics(expressionset=tmp.data.cel, outdir="./reports/QC/Raw/", force=T, intgroup="type")
#
# QCReport(data.cel, file="./reports/QC/RawDataQC.pdf")
#
# # Run quality control metrics on RMA processed data
# tmp.eset.original <- eset.original
# pData(tmp.eset.original) <- pData(tmp.eset.original)[, c("sample", "type")]
# arrayQualityMetrics(expressionset=tmp.eset.original, outdir="./reports/QC/RMA/", force=T, intgroup="type")
#
# ProjectTemplate::cache("qc.raw")
# ProjectTemplate::cache("qc.rma")
|
5fb0130a447d2244d8166d674ea660aa2abeec35 | bd0b14be2dbef740a27d42f21c636ecc7892b37f | /mcmc_rppa.R | 79023fbdbf1ba4ecee8f19c4d3a08077e967c5f6 | [] | no_license | carlostpz/rppa_biometrics | 2e9568949fcf321af4891a999a6fef65d0648662 | c8fbe1ca451db6478c4f60b9734ff78ba023ad2e | refs/heads/master | 2020-03-31T22:54:27.322926 | 2019-03-01T19:44:54 | 2019-03-01T19:44:54 | 152,299,472 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 38,195 | r | mcmc_rppa.R |
# Define the main function that runs the MCMC algorithm as described in Appendix A
mcmc_rppa = function(n_iter,
kappa1,
kappa2,
y,
burn_in,
eta=1,
update_delta=TRUE,
delta1_equals_delta2=FALSE){
# Args:
#
# n_iter: (total) number of mcmc iterations
# kappa1: value of kappa1 fixed accross all iterations
# kappa2: value of kappa2 fixed accross all iterations
# y: array with values of protein expression
# burn_in: number of initial iterations to discard for burn-in when calculating
# posterior point estimates
# eta: hyperparameter for Dirichlet priors (it must be a single value rather than a vector)
# update_delta: (boolean) If TRUE, update delta in the MCMC. If FALSE, delta1 and delta2 are fixed at the initial value.
# Use delta_update when running the chain for the sencond time after point estimates are obtained
# according to Dahl (2006).
# delta1_equals_delta2: (boolean) If TRUE, constrains delta_2 to be equal to delta1. In practice, this sets gamma to be
# very close 1 so that we do not have proteins changing cluster from delta1 to delta2.
# fix
set.seed(100)
#
require(Rcpp)
require(RcppArmadillo)
require(MCMCpack)
require(msm)
require(Matrix)
require(mvtnorm)
# for real data, we have:
#CC <- 2
#n <- 55
#T <- 8
#D <- 3
#L <- 4
#J <- 3
# dimensions of y:
CC <- dim(y)[1]
D <- dim(y)[2]
n <- dim(y)[3]
L <- dim(y)[4]
T <- dim(y)[6]
J <- dim(y)[5]
# Imputing outlier of meaninglessly high expression by the mean of remaining repetitions
# Important: do this when using the real data only
#y[2, 1, 15, 4, 1, 1] = mean( y[2, 1, 15, 4, 2:3, 1] )
# number of data points and parameters
# these are to be used to calculate AIC, BIC, DIC
n_data = CC*D*L*n*J*T
# n_param = 11 + CC*D*L*( 2*kappa1 + kappa2 + 2 + 2*n )
n_param = CC*D*L*( 2*kappa1 + kappa2 )
ones_T = rep(1, T)
t_ones_T = t(ones_T)
chain_log_lik = rep(0, n_iter)
##################################
# Choosing prior hyperparameters
##################################
v0 = 10
mu00 = 0
v00 = 1/(1.5^2)
av = 1
bv = 1
a_gamma = 1
b_gamma = 1
eta1 = rep(eta, kappa1)
eta2 = rep(eta, kappa2 - kappa1)
v_Sigma = 10
V_Sigma = diag(T)
#############################
# initializing parameters
#############################
#Sigma
Sigma = array(0, dim = c(T,T,CC) )
for (cc in 1:CC){
Sigma[,,cc] = diag(T)
}
Sigma_inv = Sigma
chain_Sigma = array(0, dim = c(T, T, CC, n_iter) )
chain_Sigma_inv = array(0, dim = c(T, T, CC, n_iter) )
########
# delta
########
delta = array( 0 , dim = c( 2, CC, D, n ) )
mat_clust = array( 0, dim = c( CC, D, n, T ) )
for (cc in 1:CC){
for (d in 1:D){
for (i in 1:n){
for (tt in 1:T){
mat_clust[cc, d, i, tt] = mean ( y[cc,d,i, , ,tt] )
Dist_mat = dist( mat_clust[ cc, d, , ] )
cluster = hclust( d = Dist_mat )
delta[1, cc, d, ] = cutree( cluster, k=kappa1 )
}
}
}
}
delta[2, , , ] = delta[1, , , ]
chain_delta = array( 0, dim = c(2, CC, D, n, n_iter) )
############
# tau_cdl
############
tau = array(1, dim=c(CC, D, L, 2))
tau[ , , , 2] = 3
chain_tau = array( 0, dim = c( CC, D, L, 2, n_iter ) )
############
# mu*_cdl
############
# IMPORTANT: the last kappa2 - kappa1 entries of mus_cdl1 and mus_cdl3 must never be used in the algorithm
mus = array(0, dim=c( CC, D, L, 3, kappa2 ) )
chain_mu_star = array( 0, dim = c( CC, D, L, 3, kappa2, n_iter ) )
###########
# mus_cdl1
###########
# Initialize as the mean of the protein expressions in the initial time ( t=1 )
for (cc in 1:CC){
for (d in 1:D){
for (l in 1:L){
mus[ cc, d, l, 1, 1:kappa1] = mean( y[cc,d, , l, , 1] )
}
}
}
###########
# mus_cdl2
###########
# Initialize as the mean of the protein expressions in the "middle" time ( t=4 )
for (cc in 1:CC){
for (d in 1:D){
for (l in 1:L){
mus[ cc, d, l, 2, 1:kappa2] = mean( y[cc,d, , l, , 4] )
}
}
}
###########
# mus_cdl3
###########
# Initialize as the mean of the protein expressions in the final time ( t=8 )
for (cc in 1:CC){
for (d in 1:D){
for (l in 1:L){
mus[ cc, d, l, 3, 1:kappa1] = mean( y[cc,d, , l, , 8] )
}
}
}
# initializing mu_cdli
chain_mu_cdli = array( 0, dim = c(CC, D, L, n, T, n_iter) )
#########
# gamma
#########
gamma = 0.5
chain_gamma = rep( 0, n_iter)
#######
# pi2
#######
pi2 = rep(1/(kappa2 - kappa1), kappa2 - kappa1)
chain_pi2 = matrix(0, ncol = length(pi2), nrow = n_iter)
#######
# pi1
#######
pi1 = rep(1/kappa1, kappa1)
chain_pi1 = matrix(0, ncol = length(pi1), nrow = n_iter)
######
# v0u
######
v0u = rep(1, 3)
chain_v0u = matrix(0, ncol = length(v0u), nrow = n_iter)
#######
# mu0u
#######
mu0u = c(0, 0, 0)
chain_mu0u = matrix(0, ncol = length(mu0u), nrow = n_iter)
#######################################################################
#
# MCMC
#
#######################################################################
# Profile:
#Rprof("/home/tadeu/ut_austin/RPPA/depRPPA/inference/rppa_prof.txt")
#################
# subrutines #
#################
# Pre cashing sum_j y_cdilt
sum_y_over_j = array(0, dim = c(CC, D, n, L, T) )
for ( cc in 1:CC){
for (d in 1:D){
for (i in 1:n){
for (l in 1:L){
for (time in 1:T){
sum_y_over_j[cc,d, i, l, time] = sum( y[cc,d,i,l, ,time] )
}
}
}
}
}
############################################################################
######################### Marginalization part ########################
############################################################################
# function that builds the vector mu_cdli (general version of the function)
build_mu_cdli_margin = function( mus_cdl1, mus_cdl2, mus_cdl3,
tau_cdl1, tau_cdl2,
delta_1cdi, delta_2cdi, T=8){
new_mu_cdli = rep(0, T)
new_mu_cdli [1:tau_cdl1 ] = mus_cdl1[ delta_1cdi ]
new_mu_cdli [( tau_cdl1 + 1 ):tau_cdl2] = mus_cdl2[ delta_2cdi ]
new_mu_cdli [( tau_cdl2 + 1 ):T ] = mus_cdl3[ delta_1cdi ]
return(new_mu_cdli)
}
# building the design matrix: X_cdil
build_X = function( delta_1cdi, delta_2cdi, tau_cdl1, tau_cdl2, kappa1, kappa2, T=8 ){
tau1 = tau_cdl1
tau2 = tau_cdl2
delta1 = delta_1cdi
delta2 = delta_2cdi
X = matrix(0, ncol = 2*kappa1 + kappa2, nrow = T)
X [ 1:tau1, delta1] = 1
X [ (tau1+1):tau2, delta2 + kappa1 ] = 1
X [ (tau2+1):T, delta1 + kappa1 + kappa2 ] = 1
return(X)
}
######################
# sampling tau_cdl1
######################
sampling_tau_cdl1 = function( l, mu0u, v0u,
delta_1cd, delta_2cd, Sigma_inv_c, tau_cdl2,
kappa1, kappa2, sum_y_over_j_cd, y_cd, T=8){
if (tau_cdl2 == 2){
return(1)
}else{
pmf_tau_cdl1 = rep( 0, tau_cdl2-1 )
for ( m in 1:( tau_cdl2 - 1 ) ){
# prior mean and precision matrix
m_cdl = c( rep(mu0u[1], kappa1), rep(mu0u[2], kappa2), rep(mu0u[3], kappa1) )
S_cdl_inv = diag( c( rep(v0u[1], kappa1), rep(v0u[2], kappa2), rep(v0u[3], kappa1) ) )
S_cdl_star = S_cdl_inv
m_cdl_star = S_cdl_inv %*% m_cdl
for (i in 1:n){ # loop through proteins "i"
X_cdil = build_X( delta_1cdi = delta_1cd[i],
delta_2cdi = delta_2cd[i],
tau_cdl1 = m,
tau_cdl2 = tau_cdl2,
kappa1 = kappa1,
kappa2 = kappa2 )
# posterior covariance matrix
S_cdl_star = S_cdl_star + J * t(X_cdil) %*% Sigma_inv_c %*% X_cdil
# posterior mean vector
m_cdl_star = m_cdl_star + t(X_cdil) %*% Sigma_inv_c %*% sum_y_over_j_cd[i,l, ]
} # end of loop through proteins "i"
# finalizing
S_cdl_star = solve(S_cdl_star)
m_cdl_star = S_cdl_star %*% m_cdl_star
# calculating the "likelihood"
sum_aux = 0
for (i in 1:n){
mu_cdli_aux = build_mu_cdli_margin( mus_cdl1 = m_cdl_star[1:kappa1],
mus_cdl2 = m_cdl_star[ (kappa1 + 1) :( kappa1 + kappa2) ],
mus_cdl3 = m_cdl_star[ ( kappa1 + kappa2 + 1): (2*kappa1 + kappa2)],
tau_cdl1 = m,
tau_cdl2 = tau_cdl2,
delta_1cdi = delta_1cd[i],
delta_2cdi = delta_2cd[i] )
for (j in 1:J){
expres = y_cd[i, l, j, ] - mu_cdli_aux
sum_aux = sum_aux + t( expres ) %*% Sigma_inv_c %*% expres
}
}
log_lik_part = as.numeric(sum_aux)
pmf_tau_cdl1[m]=
0.5 * log( det( S_cdl_star ) ) - 0.5* (
v0u[1] * sum( (m_cdl_star[ 1:kappa1] - mu0u[1] )^2 ) +
v0u[2] * sum( (m_cdl_star[ (kappa1 + 1):(kappa1 + kappa2)] - mu0u[2])^2 ) +
v0u[3] * sum( (m_cdl_star[ (kappa1 + kappa2 + 1):(2*kappa1 + kappa2)] - mu0u[3])^2 ) +
log_lik_part
)
}
# normalizing the weights
pmf_tau_cdl1 = exp(pmf_tau_cdl1 - max(pmf_tau_cdl1) )
pmf_tau_cdl1 = pmf_tau_cdl1/sum(pmf_tau_cdl1)
# sampling tau_cd1
tau_sample = sample( size = 1, x = 1:(tau_cdl2-1), prob = pmf_tau_cdl1 )
return( tau_sample )
}
}
######################
# sampling tau_cdl2
######################
sampling_tau_cdl2 = function( l, mu0u, v0u, delta_1cd, delta_2cd,
Sigma_inv_c, tau_cdl1,
kappa1, kappa2, sum_y_over_j_cd, y_cd, T=8){
if(tau_cdl1 == 6){
return(7)
}else{
pmf_tau_cdl2 = rep( 0, T - tau_cdl1 - 1 )
for ( m in ( tau_cdl1 + 1 ):(T-1) ){
# prior mean and precision matrix
m_cdl = c( rep(mu0u[1], kappa1), rep(mu0u[2], kappa2), rep(mu0u[3], kappa1) )
S_cdl_inv = diag( c( rep(v0u[1], kappa1), rep(v0u[2], kappa2), rep(v0u[3], kappa1) ) )
S_cdl_star = S_cdl_inv
m_cdl_star = S_cdl_inv %*% m_cdl
for (i in 1:n){
X_cdil = build_X( delta_1cdi = delta_1cd[i],
delta_2cdi = delta_2cd[i],
tau_cdl1 = tau_cdl1,
tau_cdl2 = m,
kappa1 = kappa1, kappa2 = kappa2 )
# posterior covariance matrix
S_cdl_star = S_cdl_star + J * t(X_cdil) %*% Sigma_inv_c %*% X_cdil
# posterior mean vector
m_cdl_star = m_cdl_star + t(X_cdil) %*% Sigma_inv_c %*% sum_y_over_j_cd[i,l, ]
}
# finalizing
S_cdl_star = solve(S_cdl_star)
m_cdl_star = S_cdl_star %*% m_cdl_star
# calculating the "likelihood"
sum_aux = 0
for (i in 1:n){
mu_cdli_aux = build_mu_cdli_margin( mus_cdl1 = m_cdl_star[ 1 : kappa1 ],
mus_cdl2 = m_cdl_star[ (kappa1 + 1) : ( kappa1 + kappa2) ],
mus_cdl3 = m_cdl_star[ ( kappa1 + kappa2 + 1) : (2*kappa1 + kappa2) ],
tau_cdl1 = tau_cdl1,
tau_cdl2 = m,
delta_1cdi = delta_1cd[i],
delta_2cdi = delta_2cd[i] )
for (j in 1:J){
expres = y_cd[i, l, j, ] - mu_cdli_aux
sum_aux = sum_aux + t( expres ) %*% Sigma_inv_c %*% expres
}
}
log_lik_part = as.numeric(sum_aux)
pmf_tau_cdl2[ m - tau_cdl1 ] =
0.5 * log( det(S_cdl_star) ) - 0.5* (
v0u[1] * sum( (m_cdl_star[ 1:kappa1] - mu0u[1] )^2 ) +
v0u[2] * sum( (m_cdl_star[ (kappa1 + 1):(kappa1 + kappa2)] - mu0u[2])^2 ) +
v0u[3] * sum( (m_cdl_star[ (kappa1 + kappa2 + 1):(2*kappa1 + kappa2)] - mu0u[3])^2 ) +
log_lik_part
)
}
# normalizing the weights
pmf_tau_cdl2 = exp(pmf_tau_cdl2 - max(pmf_tau_cdl2) )
pmf_tau_cdl2 = pmf_tau_cdl2/sum(pmf_tau_cdl2)
# sampling tau_cd2
tau_sample = sample( size = 1, x = (tau_cdl1+1):(T-1), prob = pmf_tau_cdl2 )
return( tau_sample )
}
}
##################
# sample_mus_cd1
##################
sample_mus_cdl1 = function (l, m, Sigma_c_inv,
mus_cdl1, mus_cdl2, mus_cdl3,
tau_cdl1, tau_cdl2,
delta_1cd, delta_2cd,
y_cd, kappa1, T=8 ){
ones_T = rep(1, T)
u1 = rep(0, T)
u2 = rep(0, T)
u3 = rep(0, T)
u1[ 1 : tau_cdl1 ] = 1
u2[ (tau_cdl1 + 1) : tau_cdl2] = 1
u3[ (tau_cdl2 + 1) : T] = 1
t_u1 = t(u1)
u = 1
mus_cdl3_m = mus_cdl3[m]
set_P = which( delta_1cd == m )
hash_P = length( set_P )
bfy = rep(0, T)
for (time in 1:T){
bfy[time] = sum( y_cd[set_P, l, , time] )
}
b1 = 1/( v0u[u] + J * hash_P * t_u1 %*% Sigma_c_inv %*% u1 )
a1 = b1 * ( t_u1 %*% Sigma_c_inv %*%
(bfy - J * u2 * sum( mus_cdl2[ delta_2cd[ set_P ] ] ) -
J * hash_P * u3 * mus_cdl3_m ) + mu0u[u]*v0u[u] )
if( m == 1 ){
return( rtnorm(1, mean=a1, sd = sqrt(b1), upper=mus_cdl1[ m+1 ] ) )
}else{
if (m == kappa1){
return( rtnorm(1, mean=a1, sd = sqrt(b1), lower=mus_cdl1[ m-1 ] ) )
}else{
return( rtnorm(1, mean=a1, sd = sqrt(b1), lower=mus_cdl1[ m-1 ], upper=mus_cdl1[ m+1 ] ) )
}
}
}
####################
# sample_mus_cd2
####################
sample_mus_cdl2 = function ( l, m, Sigma_c_inv,
mus_cdl1, mus_cdl3,
tau_cdl1, tau_cdl2,
delta_1cd, delta_2cd,
y_cd, T=8 ){
ones_T = rep(1, T)
u1 = rep(0, T)
u2 = rep(0, T)
u3 = rep(0, T)
u1[1: tau_cdl1] = 1
u2[ (tau_cdl1 + 1):tau_cdl2] = 1
u3[ (tau_cdl2 + 1):T] = 1
u = 2
t_u2 = t(u2)
set_P = which( delta_2cd ==m )
hash_P = length( set_P )
bfy = rep(0, T)
for (time in 1:T){
bfy[time] = sum( y_cd[ set_P, l, , time ] )
}
b2 = 1/( v0u[u] + J*hash_P*t_u2%*%Sigma_c_inv%*%u2 )
a2 = b2 * ( t_u2 %*% Sigma_c_inv %*%
( bfy -
J * u1 * sum( mus_cdl1[ delta_1cd[set_P] ] ) -
J * u3 * sum( mus_cdl3[ delta_1cd[set_P] ] ) ) + mu0u[u]*v0u[u] )
return( rnorm(1, mean=a2, sd = sqrt(b2) ) )
}
#################
# sample_mus_cd3
#################
sample_mus_cdl3 = function (l, m, Sigma_c_inv,
mus_cdl1, mus_cdl2,
tau_cdl1, tau_cdl2,
delta_1cd, delta_2cd,
y_cd, T=8 ){
ones_T = rep(1, T)
u1 = rep(0, T)
u2 = rep(0, T)
u3 = rep(0, T)
u1[ 1 : tau_cdl1 ] = 1
u2[ ( tau_cdl1 + 1):tau_cdl2 ] = 1
u3[ ( tau_cdl2 + 1):T ] = 1
t_u3 = t(u3)
u = 3
mus_cdl1_m = mus_cdl1[m]
set_P = which( delta_1cd == m )
hash_P = length( set_P )
bfy = rep(0, T)
for (time in 1:T){
bfy[time] = sum( y_cd[set_P, l, , time] )
}
b3 = 1/( v0u[u] + J*hash_P*t_u3%*%Sigma_c_inv%*%u3 )
a3 = b3 * ( t_u3%*% Sigma_c_inv %*%
( bfy -
J * u2 * sum( mus_cdl2[ delta_2cd[ set_P ] ] ) -
J * hash_P * u1 * mus_cdl1_m ) + mu0u[u]*v0u[u] )
return( rnorm(1, mean=a3, sd = sqrt(b3) ) )
}
##########################
# sampling delta_1cdi
##########################
sample_delta_1cdi = function( kappa1, kappa2,
mus_cd1, mus_cd2, mus_cd3,
tau_cd1, tau_cd2, delta_2cdi,
Sigma_inv_c, y_cdi, pi1){
# mus_cd1 has to include dose l and time t ( in this order )
# idem for mus_cd2 and mus_cd3
# tau_cd1 and tau_cd2 need to include doses l
if ( delta_2cdi < kappa1 + 1){
return(delta_2cdi)
}else{
ones_T = rep(1, T)
log_N_1cdi = rep(0, kappa1)
for ( m in 1:kappa1){
for (l in 1:L){
mu_cdli_m = build_mu_cdli_margin( mus_cdl1 = mus_cd1[l, ],
mus_cdl2 = mus_cd2[l, ],
mus_cdl3 = mus_cd3[l, ],
tau_cdl1 = tau_cd1[l],
tau_cdl2 = tau_cd2[l],
delta_1cdi = m,
delta_2cdi = delta_2cdi )
for ( j in 1:J){
expres = y_cdi[l,j, ] - mu_cdli_m
log_N_1cdi[m] = log_N_1cdi[m] - 0.5 * t( expres ) %*% Sigma_inv_c %*% expres
}
}
}
prob_delta_1cdi = exp( log_N_1cdi + log(pi1) - max( log_N_1cdi + log(pi1) ) )
prob_delta_1cdi = prob_delta_1cdi/ sum(prob_delta_1cdi)
new_delta1 = sample( size = 1, x = 1:kappa1, prob = prob_delta_1cdi )
return( new_delta1 )
}
}
##########################
# sampling delta_2cdi
##########################
sample_delta_2cdi = function( kappa1, kappa2,
mus_cd1, mus_cd2, mus_cd3,
tau_cd1, tau_cd2, delta_1cdi,
Sigma_inv_c, y_cdi, gamma, pi2){
# mus_cd1 has to include dose l and time t ( in this order )
# idem for mus_cd2 and mus_cd3
# tau_cd1 and tau_cd2 need to include doses l
ones_T = rep(1, T)
log_N_2cdi = rep(0, kappa2 )
prob_delta_2cdi = rep(0, kappa2)
for ( m in c( (kappa1 + 1) : kappa2, delta_1cdi ) ){
for (l in 1:L){
mu_cdli_m = build_mu_cdli_margin( mus_cdl1 = mus_cd1[l, ],
mus_cdl2 = mus_cd2[l, ],
mus_cdl3 = mus_cd3[l, ],
tau_cdl1 = tau_cd1[ l],
tau_cdl2 = tau_cd2[ l],
delta_1cdi = delta_1cdi,
delta_2cdi = m )
for ( j in 1:J){
expres = y_cdi[l,j, ] - mu_cdli_m
log_N_2cdi[m] = log_N_2cdi[m] - 0.5 * t( expres ) %*% Sigma_inv_c %*% expres
}
}
}
index = (kappa1 + 1): kappa2
prob_delta_2cdi[ index ] = log_N_2cdi[ index ] + log(pi2) + log( 1 - gamma )
index = delta_1cdi
prob_delta_2cdi[ index ] = log_N_2cdi[ index ] + log( gamma )
index = c( delta_1cdi, (kappa1 + 1): kappa2 )
prob_delta_2cdi[ index ] = prob_delta_2cdi[ index ] - max( prob_delta_2cdi[ index ] )
prob_delta_2cdi[ index ] = exp( prob_delta_2cdi[ index ] )/ sum ( exp( prob_delta_2cdi[ index ] ) )
new_delta2 = sample( size = 1, x = 1:kappa2, prob = prob_delta_2cdi )
return( new_delta2 )
}
############################################################################
########################## Updates ###############################
############################################################################
start_time = proc.time()
for (iter in 1:n_iter){
#######################
# Updating v_0u
#######################
#v_01
u = 1
v0u[u] = rgamma( 1,
shape = av + 0.5 * L*CC*D*kappa1,
rate = 0.5 * sum( (mus[ , , , u, ] - mu0u[u])^2 ) + bv )
#v_02
u = 2
v0u[u] = rgamma( 1,
shape = av + 0.5 * L*CC*D*kappa2,
rate = 0.5 * sum( (mus[ , , , u, ] - mu0u[u])^2 ) + bv )
#v_03
u = 3
v0u[u] = rgamma( 1,
shape = av + 0.5 * L*CC*D*kappa1,
rate = 0.5 * sum( (mus[ , , , u, ] - mu0u[u])^2 ) + bv )
#####################
# Updating mu_0u
#####################
# mu_01
u = 1
mu0u[u] = rnorm ( 1,
sd = sqrt( 1/ (v00 + L*CC*D*kappa1 * v0u[u]) ),
mean = ( v0u[u] * sum( mus[ , , , 1, 1:kappa1] ) + mu00*v00 ) / (v00 + L*CC*D*kappa1*v0u[u]) )
# mu_02
u = 2
mu0u[u] = rnorm ( 1,
sd = sqrt( 1/ (v00 + CC*D*kappa2 * v0u[u]) ),
mean = ( v0u[u] * sum( mus[ , , , 2, 1:kappa2] ) + mu00*v00 ) / ( v00 + L*CC*D*kappa2* v0u[u] ) )
# mu_03
u = 3
mu0u[u] = rnorm ( 1,
sd = sqrt( 1/ (v00 + CC*D*kappa1 * v0u[u]) ),
mean = ( v0u[u] * sum( mus[ , , , 3, 1:kappa1] ) + mu00*v00 ) / (v00 + L*CC*D*kappa1 * v0u[u]) )
###################
# Updating tau1
###################
for (cc in 1:CC){
for (d in 1:D){
for (l in 1:L){
tau[cc, d, l, 1] = sampling_tau_cdl1( l = l, mu0u = mu0u,
v0u = v0u,
delta_1cd = delta[1,cc,d, ],
delta_2cd = delta[2,cc,d, ],
Sigma_inv_c = Sigma_inv[,,cc],
tau_cdl2 = tau[cc,d,l,2],
kappa1 = kappa1,
kappa2 = kappa2,
sum_y_over_j_cd = sum_y_over_j[cc,d, , , ],
y_cd = y[cc,d, , , , ])
}
}
}
##################
# Updating tau2
##################
for (cc in 1:CC){
for (d in 1:D){
for (l in 1:L){
tau[cc, d, l, 2] = sampling_tau_cdl2(l = l,
mu0u = mu0u,
v0u = v0u,
delta_1cd = delta[1,cc,d, ],
delta_2cd = delta[2,cc,d, ],
Sigma_inv_c = Sigma_inv[,,cc],
tau_cdl1 = tau[cc,d,l,1],
kappa1 = kappa1,
kappa2 = kappa2,
sum_y_over_j_cd = sum_y_over_j[cc,d, , , ],
y_cd = y[cc,d, , , , ])
}
}
}
########################
# Updating mu*_cdlu
########################
for (cc in 1:CC){
Sigma_c_inv = Sigma_inv[,,cc]
for (d in 1:D){
for (l in 1:L){
tau_cdl1 = tau[cc,d,l,1]
tau_cdl2 = tau[cc,d,l,2]
delta_1cd = delta[1,cc,d, ]
delta_2cd = delta[2,cc,d, ]
y_cd = y[cc,d, , , , ]
for (m in 1:kappa1 ){
mus[cc,d,l,1,m] = sample_mus_cdl1( l=l, m=m, Sigma_c_inv = Sigma_c_inv,
mus_cdl1 = mus[cc,d,l,1, ],
mus_cdl2 = mus[cc,d,l,2, ],
mus_cdl3 = mus[cc,d,l,3, ],
tau_cdl1 = tau_cdl1,
tau_cdl2 = tau_cdl2,
delta_1cd = delta_1cd,
delta_2cd = delta_2cd,
y_cd = y_cd,
kappa1 = kappa1 )
}
for (m in 1:kappa2){
mus[cc,d,l,2,m] = sample_mus_cdl2( l=l, m=m, Sigma_c_inv = Sigma_c_inv,
mus_cdl1 = mus[cc,d,l,1, ],
mus_cdl3 = mus[cc,d,l,3, ],
tau_cdl1 = tau_cdl1,
tau_cdl2 = tau_cdl2,
delta_1cd = delta_1cd,
delta_2cd = delta_2cd,
y_cd = y_cd)
}
for (m in 1:kappa1){
mus[cc,d,l,3,m] = sample_mus_cdl3( l=l, m=m, Sigma_c_inv = Sigma_c_inv,
mus_cdl1 = mus[cc,d,l,1, ],
mus_cdl2 = mus[cc,d,l,2, ],
tau_cdl1 = tau_cdl1,
tau_cdl2 = tau_cdl2,
delta_1cd = delta_1cd,
delta_2cd = delta_2cd,
y_cd = y_cd)
}
} # end for loop in l
} # end for loop in d
} # end for loop in cc
#################################
# building the vectors mu_cdli
#################################
mu_cdli = array(0, dim = c(CC, D, L, n, T) )
for (cc in 1:CC){
for (d in 1:D){
for (l in 1:L){
mus_cdl1 = mus[cc,d,l,1, ]
mus_cdl2 = mus[cc,d,l,2, ]
mus_cdl3 = mus[cc,d,l,3, ]
tau_cdl1 = tau[cc,d,l,1]
tau_cdl2 = tau[cc,d,l,2]
for (i in 1:n){
mu_cdli[cc,d,l,i, ] = build_mu_cdli_margin(mus_cdl1 = mus_cdl1,
mus_cdl2 = mus_cdl2,
mus_cdl3 = mus_cdl3,
tau_cdl1 = tau_cdl1,
tau_cdl2 = tau_cdl2,
delta_1cdi = delta[1,cc,d,i],
delta_2cdi = delta[2,cc,d,i] )
}
}
}
}
######################
# Udating Sigma_c
######################
for( cc in 1:CC ){
scale_IW = V_Sigma
for( d in 1:D){
for (i in 1:n){
for (j in 1:J){
for (l in 1:L){
scale_IW = scale_IW + ( y[cc, d, i, l, j, ] - mu_cdli[cc, d, l, i, ] ) %*%
t( y[cc, d, i, l, j, ] - mu_cdli[cc, d, l, i, ] )
}
}
}
}
Sigma[,,cc] = riwish(v = n*D*L*J + v_Sigma, S=scale_IW)
Sigma_inv[,,cc] = solve( Sigma[,,cc] )
}
######################
# Updating gamma
######################
how_many_equal = sum( delta[ 2, , , ] == delta[ 1, , , ] )
gamma = rbeta( 1, shape1 = a_gamma + how_many_equal, shape2 = b_gamma + n*CC*D - how_many_equal )
########################
# Updating delta_1cdi
########################
# do I update delta ?
if (update_delta){
for (cc in 1:CC){
Sigma_inv_c = Sigma_inv[,,cc]
for (d in 1:D){
mus_cd1 = mus[cc,d, ,1, ]
mus_cd2 = mus[cc,d, ,2, ]
mus_cd3 = mus[cc,d, ,3, ]
tau_cd1 = tau[cc,d, ,1]
tau_cd2 = tau[cc,d, ,2]
for (i in 1:n){
delta[1, cc, d, i] = sample_delta_1cdi( kappa1 = kappa1, kappa2 = kappa2,
mus_cd1 = mus_cd1,
mus_cd2 = mus_cd2,
mus_cd3 = mus_cd3,
tau_cd1 = tau_cd1,
tau_cd2 = tau_cd2,
delta_2cdi = delta[2, cc, d, i],
Sigma_inv_c = Sigma_inv_c,
y_cdi = y[cc,d,i, , , ],
pi1=pi1 )
}
}
}
}
########################
# Updating delta_2cdi
########################
# do I update delta ?
if (update_delta){
# should delta2 be equal to delta1 ?
if(delta1_equals_delta2){
delta[2, , , ] = delta[1, , , ]
}else{
for (cc in 1:CC){
Sigma_inv_c = Sigma_inv[,,cc]
for (d in 1:D){
mus_cd1 = mus[cc,d, ,1, ]
mus_cd2 = mus[cc,d, ,2, ]
mus_cd3 = mus[cc,d, ,3, ]
tau_cd1 = tau[cc,d, ,1]
tau_cd2 = tau[cc,d, ,2]
for (i in 1:n){
delta[2, cc, d, i] = sample_delta_2cdi( kappa1=kappa1, kappa2=kappa2,
mus_cd1 = mus_cd1,
mus_cd2 = mus_cd2,
mus_cd3 = mus_cd3,
tau_cd1 = tau_cd1,
tau_cd2 = tau_cd2,
delta_1cdi = delta[1, cc, d, i],
Sigma_inv_c = Sigma_inv_c,
y_cdi = y[cc,d,i, , , ],
pi2=pi2,
gamma = gamma )
} # end of loop through i
} # end pf loop through d
} # end of loop through c
}
}
###################
# Updating pi1
###################
dir_param = rep(0, kappa1)
for ( a in 1: kappa1){
dir_param[a] = sum( delta[ 1, , , ] == a) + eta1[a]
}
pi1 = as.numeric( rdirichlet( 1, alpha = dir_param ) )
###################
# Updating pi2
###################
dir_param = rep(0, kappa2 - kappa1)
for ( a in 1: (kappa2 - kappa1) ){
dir_param[a] = sum( delta[ 2, , , ] == a + kappa1 ) + eta2[a]
}
pi2 = as.numeric( rdirichlet( 1, alpha = dir_param ) )
#################################
# building the vectors mu_cdli
#################################
mu_cdli = array(0, dim = c(CC, D, L, n, T) )
for (cc in 1:CC){
for (d in 1:D){
for (l in 1:L){
mus_cdl1 = mus[cc,d,l,1, ]
mus_cdl2 = mus[cc,d,l,2, ]
mus_cdl3 = mus[cc,d,l,3, ]
tau_cdl1 = tau[cc,d,l,1]
tau_cdl2 = tau[cc,d,l,2]
for (i in 1:n){
mu_cdli[cc,d,l,i, ] = build_mu_cdli_margin( mus_cdl1 = mus_cdl1,
mus_cdl2 = mus_cdl2,
mus_cdl3 = mus_cdl3,
tau_cdl1 = tau_cdl1,
tau_cdl2 = tau_cdl2,
delta_1cdi = delta[1,cc,d,i],
delta_2cdi = delta[2,cc,d,i] )
}
}
}
}
############################
# Log likelihood
############################
log_lik = 0
for(cc in 1:CC){
for (d in 1:D){
for (l in 1:L){
for(i in 1:n){
for (j in 1:J){
expres = ( y[cc,d,i,l,j, ] - mu_cdli[cc,d,l,i, ] )
log_lik = log_lik - 0.5 * T * log( 2 * pi ) +
0.5 * log( det(Sigma_inv[ , , cc]) ) -
0.5 * t(expres) %*% Sigma_inv[,,cc] %*% expres
}
}
}
}
}
#################################
# Storing the sampled values
#################################
chain_log_lik[iter] = log_lik
chain_mu_cdli[ , , , , , iter ] = mu_cdli
chain_mu_star[ , , , , , iter ] = mus
chain_Sigma[ , , , iter] = Sigma
chain_Sigma_inv[ , , , iter] = Sigma_inv
chain_delta[ , , , , iter ] = delta
chain_tau [ , , , , iter] = tau
chain_gamma[ iter ] = gamma
chain_pi2[ iter, ] = pi2
chain_pi1[ iter, ] = pi1
chain_v0u[ iter, ] = v0u
chain_mu0u[ iter, ] = mu0u
print(iter)
}
# index after convergence
index = (burn_in+1):n_iter
####################
# calculating BIC
####################
BIC = max( chain_log_lik ) - n_param * log( n_data )/2 # the bigger the better
####################
# calculating AIC
####################
AIC = max( chain_log_lik ) - n_param # the bigger the better
####################
# calculating WAIC
####################
# log "predictive density"
outside_sum = 0
for(cc in 1:CC){
for (d in 1:D){
for (l in 1:L){
for (i in 1:n){
for (j in 1:J){
inside_sum = 0
for (iter in index){
inside_sum = inside_sum + dmvnorm( x = y[cc, d, i, l ,j, ],
mean = chain_mu_cdli[cc,d,l,i, , iter],
sigma = chain_Sigma[ , , cc,iter])
}
outside_sum = outside_sum + log( max( inside_sum/length(index), 10^(-20) ) )
}
}
}
}
}
log_predictive = outside_sum
outside_sum = 0
pw1 = -2*mean( chain_log_lik[ index ] ) + 2 * log_predictive
WAIC = log_predictive - pw1 # the bigger the better
###########################
# marginal likelihood
###########################
max_log_lik = max( chain_log_lik[index] )
margin_lik = 1 / ( mean( exp( max_log_lik - chain_log_lik[index] ) ) / exp(max_log_lik) )
###########################
# LPML
###########################
outside_sum = 0
for(cc in 1:CC){
for (d in 1:D){
for (l in 1:L){
for (i in 1:n){
for (j in 1:J){
inside_sum = 0
for (iter in index){
inside_sum = inside_sum + 1/max( dmvnorm( x = y[cc, d, i, l ,j, ],
mean = chain_mu_cdli[cc,d,l,i, , iter],
sigma = chain_Sigma[ , , cc,iter]), 10^(-20) )
}
outside_sum = outside_sum + log( inside_sum/length(index) )
}
}
}
}
}
lpml = -1 * outside_sum
#####################################
# cluster membership point estimate
#####################################
# loading cpp function for cluster membership point estimation (based on Dahl 2006)
sourceCpp("choose_cluster.cpp")
# delta 1
ind1 = matrix(0, nrow = CC, ncol = D)
est_delta1 = array(0, dim=c( CC, D, n ) )
for(cc in 1:CC){
for (d in 1:D){
res = as.numeric( choose_cluster( chain_delta[1, cc, d, , ] ) )
min_res = min(res)
ind1[cc, d] = max( which( res == min_res ) )
est_delta1[cc, d, ] = chain_delta[1,cc,d, ,ind1[cc,d] ]
}
}
# delta 2
ind2 = matrix(0, nrow = CC, ncol = D)
est_delta2 = array(0, dim=c( CC, D, n ) )
for(cc in 1:CC){
for (d in 1:D){
res = as.numeric( choose_cluster( chain_delta[2, cc, d, , ] ) )
min_res = min(res)
ind2[cc, d] = max( which( res == min_res ) )
est_delta2[cc, d, ] = chain_delta[2,cc,d, ,ind1[cc,d] ]
}
}
# time elapsed
end_time = proc.time()
print ( end_time - start_time )
return( list( "mu" = chain_mu_cdli,
"mu_star" = chain_mu_star,
"Sigma" = chain_Sigma,
"delta" = delta,
"tau" = chain_tau,
"gamma" = chain_gamma,
"pi1" = chain_pi1,
"pi2" = chain_pi2,
"v0" = chain_v0u,
"mu0" = chain_mu0u,
"log_lik" = chain_log_lik,
"AIC" = AIC,
"BIC" = BIC,
"WAIC" = WAIC,
"est_delta1" = est_delta1,
"est_delta2" = est_delta2,
"margin_lik" = margin_lik,
"lpml" = lpml,
"kappa1" = kappa1,
"kappa2" = kappa2) )
}
# end of main MCMC function
# To load this function, run
# source("mcmc_rppa.R")
|
52c76ff8e942d8522d69d7b7712cdab39bb2ba0d | 0a101fc5301d4a8124290021b449c9f5c6fdfc2f | /R/ry_hweExact.R | d5b4ab2b225c6454680661e6979650ae25778ff4 | [] | no_license | ryamada22/Ronlyryamada | b1349f9c5173909fb8d0a48c7f023426ca810d8a | 9c4d20f1526ddd9ddaae71eab99896d2908d4bdf | refs/heads/master | 2020-12-18T17:25:20.135157 | 2016-06-18T07:27:53 | 2016-06-18T07:27:53 | 42,698,405 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 873 | r | ry_hweExact.R | #' Hardy-Weinberg Exact Test
#'
#' This function returns Exact HWtest p and probability of all possible counts.
#' @param g is a integer vector with length 3.
#' @keywords exact test, Hardy-Weinberg equilibrium
#' @export
#' @examples
#' xx<-ry_hweExact(c(813,182,5))
#' xx$p.value
#' sum(xx$prob) # should be 1
ry_hweExact<-function(g=c(813,182,5)){
n<-sum(g) # total indvidual number
nA<-2*g[1]+g[2] # No. A-alleles
na<-2*g[3]+g[2] # No. a-alleles
evod<-g[2]%%2
maxAa<-min(nA,na)-evod
Aa<-seq(from=evod,to=maxAa,by=2)
AA<-(nA-Aa)/2
aa<-(na-Aa)/2
obs<-(g[2]-evod)/2+1
prob<-rep(0,length(Aa))
prob<-exp(n*lgamma(2+1)+lgamma(nA+1)+lgamma(na+1)-lgamma(2*n+1)-(AA*lgamma(2+1)+Aa*lgamma(1+1)+aa*lgamma(2+1))+lgamma(n+1)-(lgamma(AA+1)+lgamma(Aa+1)+lgamma(aa+1)))
p.value<-sum(prob[prob<=prob[obs]])
list(Aa=Aa,prob=prob,obsprob=prob[obs],p.value=p.value)
}
|
a29b52ab0b3a36379b30469a1e5029039425c3e7 | 67c7bea0e0205765d4f5c2ef550ee78ba3f7e1ba | /man/DelayedAperm-class.Rd | bb23667f33950a2653d3d02eb51f0bb64e962d86 | [] | no_license | LTLA/DelayedArray | 0ca09e4a1ca643e6a877f9be8eca95191836c348 | 478b91875503db474c6163a0609badaa2ad6ce26 | refs/heads/master | 2021-07-03T23:09:13.540384 | 2021-02-19T16:24:21 | 2021-02-19T16:24:21 | 156,434,324 | 0 | 0 | null | 2018-11-06T19:11:03 | 2018-11-06T19:11:03 | null | UTF-8 | R | false | false | 4,080 | rd | DelayedAperm-class.Rd | \name{DelayedAperm-class}
\alias{class:DelayedAperm}
\alias{DelayedAperm-class}
\alias{DelayedAperm}
\alias{is_noop,DelayedAperm-method}
\alias{summary.DelayedAperm}
\alias{summary,DelayedAperm-method}
\alias{dim,DelayedAperm-method}
\alias{dimnames,DelayedAperm-method}
\alias{extract_array,DelayedAperm-method}
\alias{is_sparse,DelayedAperm-method}
\alias{extract_sparse_array,DelayedAperm-method}
\alias{updateObject,SeedDimPicker-method}
\title{DelayedAperm objects}
\description{
NOTE: This man page is about \link{DelayedArray} internals and is provided
for developers and advanced users only.
The DelayedAperm class provides a formal representation of a
\emph{delayed "extended \code{aperm()}" operation}, that is, of a delayed
\code{\link[base]{aperm}()} that can drop and/or add \emph{ineffective}
dimensions. Note that since only \emph{ineffective} dimensions (i.e.
dimensions with an extent of 1) can be dropped or added, the length of
the output array is guaranteed to be the same as the length of the input
array.
DelayedAperm is a concrete subclass of the \link{DelayedUnaryOp} virtual
class, which itself is a subclass of the \link{DelayedOp} virtual class:
\preformatted{
DelayedOp
^
|
DelayedUnaryOp
^
|
DelayedAperm
}
DelayedAperm objects are used inside a \link{DelayedArray} object to
represent the \emph{delayed "extended \code{aperm()}" operations} carried
by the object. They're never exposed to the end user and are not intended
to be manipulated directly.
}
\usage{
\S4method{is_noop}{DelayedAperm}(x)
\S4method{summary}{DelayedAperm}(object, ...)
## ~ ~ ~ Seed contract ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
\S4method{dim}{DelayedAperm}(x)
\S4method{dimnames}{DelayedAperm}(x)
\S4method{extract_array}{DelayedAperm}(x, index)
## ~ ~ ~ Propagation of sparsity ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~
\S4method{is_sparse}{DelayedAperm}(x)
\S4method{extract_sparse_array}{DelayedAperm}(x, index)
}
\arguments{
\item{x, object}{
A DelayedAperm object.
}
\item{index}{
See \code{?\link{extract_array}} for a description of the \code{index}
argument.
}
\item{...}{
Not used.
}
}
\seealso{
\itemize{
\item \link{DelayedOp} objects.
\item \code{\link{showtree}} to visualize the nodes and access the
leaves in the tree of delayed operations carried by a
\link{DelayedArray} object.
\item \code{\link{extract_array}} and \code{\link{extract_sparse_array}}.
}
}
\examples{
## DelayedAperm extends DelayedUnaryOp which extends DelayedOp:
extends("DelayedAperm")
## ---------------------------------------------------------------------
## BASIC EXAMPLES
## ---------------------------------------------------------------------
a0 <- array(1:20, dim=c(1, 10, 2))
A0 <- DelayedArray(a0)
showtree(A0)
A <- aperm(A0, perm=c(2, 3, 1))
showtree(A)
class(A@seed) # a DelayedAperm object
M1 <- drop(A0)
showtree(M1)
class(M1@seed) # a DelayedAperm object
M2 <- t(M1)
showtree(M2)
class(M2@seed) # a DelayedAperm object
## ---------------------------------------------------------------------
## PROPAGATION OF SPARSITY
## ---------------------------------------------------------------------
## DelayedAperm objects always propagate sparsity.
sa0 <- as(a0, "SparseArraySeed")
SA0 <- DelayedArray(sa0)
showtree(SA0)
is_sparse(SA0) # TRUE
SA <- aperm(SA0, perm=c(2, 3, 1))
showtree(SA)
class(SA@seed) # a DelayedAperm object
is_sparse(SA@seed) # TRUE
## ---------------------------------------------------------------------
## SANITY CHECKS
## ---------------------------------------------------------------------
stopifnot(class(A@seed) == "DelayedAperm")
stopifnot(class(M1@seed) == "DelayedAperm")
stopifnot(class(M2@seed) == "DelayedAperm")
stopifnot(class(SA@seed) == "DelayedAperm")
stopifnot(is_sparse(SA@seed))
}
\keyword{methods}
|
5e41ecfe58f8f175c5663a8721172f10071ef53d | ff9eb712be2af2fa24b28ecc75341b741d5e0b01 | /man/print.gofCensored.Rd | 6c07bec5d3a8619c42ec5a8b089572af3c7c27c5 | [] | no_license | alexkowa/EnvStats | 715c35c196832480ee304af1034ce286e40e46c2 | 166e5445d252aa77e50b2b0316f79dee6d070d14 | refs/heads/master | 2023-06-26T19:27:24.446592 | 2023-06-14T05:48:07 | 2023-06-14T05:48:07 | 140,378,542 | 21 | 6 | null | 2023-05-10T10:27:08 | 2018-07-10T04:49:22 | R | UTF-8 | R | false | false | 1,946 | rd | print.gofCensored.Rd | \name{print.gofCensored}
\alias{print.gofCensored}
\title{
Print Output of Goodness-of-Fit Tests Based on Censored Data
}
\description{
Formats and prints the results of performing a goodness-of-fit test. This method is
automatically called by \code{\link[base]{print}} when given an object of class
\code{"gofCensored"}. Currently, the only function that produces an object of
this class is \code{\link{gofTestCensored}}.
}
\usage{
\method{print}{gofCensored}(x, show.cen.levels = TRUE,
pct.censored.digits = .Options$digits, ...)
}
\arguments{
\item{x}{
an object of class \code{"gofCensored"}. See \code{\link{gofCensored.object}} for
details.
}
\item{show.cen.levels}{
logical scalar indicating whether to print the censoring levels. The default is
\code{show.cen.levels=TRUE}.
}
\item{pct.censored.digits}{
numeric scalar indicating the number of significant digits to print for the
percent of censored observations.
}
\item{\dots}{
arguments that can be supplied to the \code{\link[base]{format}} function.
}
}
\details{
This is the \code{"gofCensored"} method for the generic function
\code{\link[base]{print}}.
Prints name of the test, hypothesized distribution, estimated population parameter(s),
estimation method, data name, sample size, censoring information, value of the test
statistic, parameters associated with the null distribution of the test statistic,
p-value associated with the test statistic, and the alternative hypothesis.
}
\value{
Invisibly returns the input \code{x}.
}
\references{
Chambers, J. M. and Hastie, T. J. (1992). \emph{Statistical Models in S}.
Wadsworth & Brooks/Cole.
}
\author{
Steven P. Millard (\email{EnvStats@ProbStatInfo.com})
}
\seealso{
\link[=FcnsByCatCensoredData]{Censored Data}, \code{\link{gofCensored.object}},
\code{\link[base]{print}}.
}
\keyword{print} |
119804e7536210035f9b4717e0eeaf0ab1f7e7ed | 23823d54414d1ab7d9a8e71be79569efec000973 | /Alpha diversity plots/Lime Alpha Diversity - boxPlots.R | c42331e5a8d919f9f8c13002b7b4555523480089 | [] | no_license | CamEJ/Plotting-Mothur-data-in-R-and-elsewhere | cbe8b5f63472068f76c3af1369e2f337cfdb39b9 | 921f689497235bd585527501ffddf39bce3788aa | refs/heads/master | 2023-07-10T16:11:29.825927 | 2023-06-27T10:45:50 | 2023-06-27T10:45:50 | 87,837,866 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,368 | r | Lime Alpha Diversity - boxPlots.R | ## Box and whisker plot - Lime Alpha Diversity
d = read.table("LimeAllAlphaDiv.txt", sep = "\t", row.names=1, header=T)
## load ggplot
library(ggplot2)
library(grid)
## also ran gthemer to put my WP3 colours to give same
## four colours to my four treatments as for the other plots.
## need to run setFactorOrder function before doing these two commands.
# keep samples in order ie DNA then cDNA
d[["NucType"]] <- setFactorOrder(d[["NucType"]], c("DNA", "cDNA"))
## d = data
library(ggthemr)
##### set two colours i liked as palette using ggthemr (and for this one my fill was only going
## to be one of two colours (one for DNA and one for cDNA)
dark_cols <- c("black", "lightslateblue", "gray3")
DarkCols1 <- c("#555555", dark_cols)
# remove previous effects:
ggthemr_reset()
# Define colours for your figures with define_palette
darkCols <- define_palette(
swatch = DarkCols1, # colours for plotting poits and bars
gradient = c(lower = DarkCols1[1L], upper = DarkCols1[2L]), #upper and lower colours for continuous colours
background = "white" #defining a grey-ish background
)
# set the theme for your figures:
ggthemr(darkCols)
####
ggplot(d, aes(factor(Growth), invsimpson, fill = factor(NucType))) +
## + geom_boxplot so it knows what type of plot
# and put colour = black to make lines of box black.
geom_boxplot(colour="black") +
## scale_fill_manual to give different from default colour
## name argument gives legend title
## colours: http://sape.inf.usi.ch/quick-reference/ggplot2/colour
## if you didnt use gthemr to set colour scheme and manually want to choose colour then alter the
## following and uncomment it.
#scale_fill_manual(name = "Treatment", values = c("royalblue2", "green4", "yellow", "purple")) +
## theme_bw() to make background white
theme_bw() +
## specify labels for axes and plot title if required
labs(y="Inverse Simpson", fill=" ") +
## change text size and placing of axes and titles
## ensure , at end of each line
## legend.key.size changes size of legend and required 'grid' package to be loaded for unit() to work
theme(axis.text.x=element_text(size=18, vjust=0.5, colour = "black"),
axis.text.y=element_text(size=16, vjust=0.5, colour = "black"),
axis.title.y=element_text(size=18, vjust=1, colour="black"),
legend.text=element_text(size=18, vjust=0.5),
legend.key.size=unit(2, "cm"),
axis.title.x=element_blank() )
#### now plots sobs#####
ggplot(d, aes(factor(Growth), sobs, fill = factor(NucType))) +
## + geom_boxplot so it knows what type of plot
# and put colour = black to make lines of box black.
geom_boxplot(colour="black") +
## scale_fill_manual to give different from default colour
## name argument gives legend title
## colours: http://sape.inf.usi.ch/quick-reference/ggplot2/colour
## if you didnt use gthemr to set colour scheme and manually want to choose colour then alter the
## following and uncomment it.
#scale_fill_manual(name = "Treatment", values = c("royalblue2", "green4", "yellow", "purple")) +
## theme_bw() to make background white
theme_bw() +
## specify labels for axes and plot title if required
labs(y="Number of OTUs observed", fill=" ") +
## change text size and placing of axes and titles
## ensure , at end of each line
## legend.key.size changes size of legend and required 'grid' package to be loaded for unit() to work
theme(axis.text.x=element_text(size=18, vjust=0.5, colour = "black"),
axis.text.y=element_text(size=16, vjust=0.5, colour = "black"),
axis.title.y=element_text(size=18, vjust=1, colour="black"),
legend.text=element_text(size=18, vjust=0.5),
legend.key.size=unit(2, "cm"),
axis.title.x=element_blank() )
#### now plots coverage #####
ggplot(d, aes(factor(Growth), coverage, fill = factor(NucType))) +
## + geom_boxplot so it knows what type of plot
# and put colour = black to make lines of box black.
geom_boxplot(colour="black") +
## scale_fill_manual to give different from default colour
## name argument gives legend title
## colours: http://sape.inf.usi.ch/quick-reference/ggplot2/colour
## if you didnt use gthemr to set colour scheme and manually want to choose colour then alter the
## following and uncomment it.
#scale_fill_manual(name = "Treatment", values = c("royalblue2", "green4", "yellow", "purple")) +
## theme_bw() to make background white
theme_bw() +
## specify labels for axes and plot title if required
labs(y="Sample coverage", fill=" ") +
## change text size and placing of axes and titles
## ensure , at end of each line
## legend.key.size changes size of legend and required 'grid' package to be loaded for unit() to work
theme(axis.text.x=element_text(size=18, vjust=0.5, colour = "black"),
axis.text.y=element_text(size=16, vjust=0.5, colour = "black"),
axis.title.y=element_text(size=18, vjust=1, colour="black"),
legend.text=element_text(size=18, vjust=0.5),
legend.key.size=unit(2, "cm"),
axis.title.x=element_blank() )
|
a85f76dae6e2b803781539231c410040ed75efa8 | b4be3cc45d29c53fabd32b6fd60a484c625af6fe | /startingvalues/model3_startvalues.R | e3e69eebb252d7d7062f115057f0ac2200fd4567 | [] | no_license | ccrismancox/JPR_reputation4resolve | d7238285c87ed42637ed978943f7b423d1989dec | 9a58dfd5b4f02af6069a6dfdb6540e11584ac156 | refs/heads/master | 2023-03-08T03:50:40.123938 | 2021-02-24T18:23:09 | 2021-02-24T18:23:09 | 341,600,031 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,900 | r | model3_startvalues.R | rm(list=ls())
#This is the main model
library(DEoptim)
library(doParallel)
library(doRNG)
library(data.table)
library(matrixStats)
source("../functions/estimationFunctions2018.R")
source("../functions/deoptimWrap.r")
load("../Data/woaData_USEA.rdata")
rwin <- RWIN2 #military defeats are censored
fullData <- cbind.data.frame(ccode, year,caseList, y, RWIN2,X)
fullData <- as.data.table(fullData)
X[,14] <- 1-X[,14] #territory is 1
XR <- XG <- X[,c(1:8,10:11, 14)]
ZG <- cbind(1, X[, c("polity2", "rgdpna", "Incomp" ,"lmtnest", "ethfrac", "relfrac", "milper")])
llik <- function(x0){
lik(x0, XR=XR, XG=XG, Z=ZG, y=y, rwin=rwin)
}
upper <- rep(10, ncol(XR)*2+2*ncol(ZG))
lower <- -upper
set.seed(1)
outerit <- 1
deTest <- DEoptim(llik, lower, upper,
control=DEoptim.control(parallelType=1,
itermax=500000,
trace=FALSE,
parVar=c("lik",
"XR",
"XG",
"y",
"ZG",
"rwin",
"F1.given",
"F2.given",
"f1.given",
"f2.given"),
steptol=100000))
outerit <- outerit+ 1
save(list="deTest", file="model3start_currentiteration.rdata")
#Additional iterations as needed
while(nrow(deTest$member$bestmemit)==500000){
deTest <- DEoptim(llik, lower, upper,
control=DEoptim.control(parallelType=1,
itermax=500000,
trace=FALSE,
parVar=c("lik",
"XR",
"XG",
"y",
"ZG",
"rwin",
"F1.given",
"F2.given",
"f1.given",
"f2.given"),
initialpop=deTest$member$pop,
steptol=100000))
outerit <- outerit+ 1
save(list="deTest", file="model3start_currentiteration.rdata")
}
# save.image("model3_startvalues.rdata") #only if regenerating starting values
|
b352ad704929d42dd8749f0b0a53a93d7e22410b | 017414614b3d26ea10faa775fc3d4e630752ddd1 | /R/class_unexportable.R | b52327ad38eadffd2543bf7b64e39a65546ec4eb | [
"MIT"
] | permissive | krlmlr/targets | 4822815b7ae412af115296e5010de2edc61d1c50 | a8cbf46ce5d2274bd623085be749af3059ce6083 | refs/heads/main | 2023-04-13T16:43:18.213413 | 2021-04-21T20:29:07 | 2021-04-21T20:29:07 | 360,385,953 | 1 | 0 | NOASSERTION | 2021-04-22T03:55:53 | 2021-04-22T03:55:53 | null | UTF-8 | R | false | false | 459 | r | class_unexportable.R | #' @export
store_serialize_value.tar_unexportable <- function(store, target) {
object <- store_serialize_object(target$store, target$value$object)
target$value <- value_init(object, iteration = target$settings$iteration)
}
#' @export
store_unserialize_value.tar_unexportable <- function(store, target) {
object <- store_unserialize_object(target$store, target$value$object)
target$value <- value_init(object, iteration = target$settings$iteration)
}
|
ad3662f70fb19f5ab4bcc98aae6e2756918ac40c | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/plsRglm/examples/cvtable.Rd.R | c38e4f3db3fe327dc6d1b973a0307b2f18c67838 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,407 | r | cvtable.Rd.R | library(plsRglm)
### Name: cvtable
### Title: Table method for summary of cross validated PLSR and PLSGLR
### models
### Aliases: cvtable cvtable.plsR cvtable.plsRglm
### Keywords: methods print
### ** Examples
data(Cornell)
XCornell<-Cornell[,1:7]
yCornell<-Cornell[,8]
cv.modpls <- cv.plsR(dataY=yCornell,dataX=XCornell,nt=6,K=6,NK=5)
res.cv.modpls <- cvtable(summary(cv.modpls))
plot(res.cv.modpls) #defaults to type="CVQ2"
rm(list=c("XCornell","yCornell","cv.modpls","res.cv.modpls"))
## No test:
data(Cornell)
XCornell<-Cornell[,1:7]
yCornell<-Cornell[,8]
cv.modpls <- cv.plsR(dataY=yCornell,dataX=XCornell,nt=6,K=6,NK=25)
res.cv.modpls <- cvtable(summary(cv.modpls))
plot(res.cv.modpls) #defaults to type="CVQ2"
rm(list=c("XCornell","yCornell","cv.modpls","res.cv.modpls"))
data(Cornell)
XCornell<-Cornell[,1:7]
yCornell<-Cornell[,8]
cv.modpls <- cv.plsR(dataY=yCornell,dataX=XCornell,nt=6,K=6,NK=100)
res.cv.modpls <- cvtable(summary(cv.modpls))
plot(res.cv.modpls) #defaults to type="CVQ2"
rm(list=c("XCornell","yCornell","cv.modpls","res.cv.modpls"))
data(Cornell)
XCornell<-Cornell[,1:7]
yCornell<-Cornell[,8]
cv.modplsglm <- cv.plsRglm(dataY=yCornell,dataX=XCornell,nt=6,K=6,
modele="pls-glm-gaussian",NK=100)
res.cv.modplsglm <- cvtable(summary(cv.modplsglm))
plot(res.cv.modplsglm) #defaults to type="CVQ2Chi2"
rm(list=c("XCornell","yCornell","res.cv.modplsglm"))
## End(No test)
|
aaabd68fcc78d02ec13e568295394c93fc96c203 | 288b4b6998906714ab368e0ee14c70a4059be4ab | /data-raw/dat.collins1985b.r | 3d618de4abaf23ee4422d33f5d81d2c262105dff | [] | no_license | qsh7950/metadat | f6243a382c8c0e3f4c9a0e2cd657edb0ffa3e018 | 5c70fa63d7acfa1f315534fb292950513cb2281e | refs/heads/master | 2021-02-26T06:42:18.937872 | 2019-10-21T21:58:33 | 2019-10-21T21:58:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 160 | r | dat.collins1985b.r | dat.collins1985b <- read.table("data-raw/dat.collins1985b.txt", header=TRUE, stringsAsFactors=FALSE)
save(dat.collins1985b, file="data/dat.collins1985b.rda")
|
40e98fb1b0292cf6ad2e8bdc29b4165c8c7f775e | 04d9ad07b47eb7a693764aa6121e7506b64c1639 | /analysis/5-SupplMaterials.R | 2ebf32c100e901747df82c3dca1956da0c2c8f30 | [] | no_license | marisacasillas/CBL-Roete | f7560109557d265b801bac9d5e49cfc375733241 | cfa201b70fb8276a73532eba1f78786f95532843 | refs/heads/master | 2021-06-29T05:26:11.180202 | 2020-11-09T21:51:50 | 2020-11-09T21:51:50 | 180,978,195 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,718 | r | 5-SupplMaterials.R | ########## Supplementary materials analysis ##########
# ANALYSIS
## Local sample
# Select subset of the data, excluding all skipped utterance, and generate binary reconstruction score:
# Yi,j,k for the i-th utterance of the j-th child at age k: Y = 1 if utterance is correctly reconstructed,
# Y=0 if not.
subset.suppl.local.data <- subset(suppl.local.data,select=c(1,3,4,5,6,10,11,12))
subset.suppl.local.data$Y <- ifelse(subset.suppl.local.data$reconstructed == "True", 1,0)
subset.suppl.local.data <- subset(subset.suppl.local.data, suppl.local.data$skipped == "False")
# descriptive statistics
local_means_by_child_recon_perc <- aggregate(subset.suppl.local.data$Y, by = c(list(child = subset.suppl.local.data$child)),FUN = mean)
local_mean_of_means_by_child_recon_perc <- mean(local_means_by_child_recon_perc$x)*100
local_min <- min(local_means_by_child_recon_perc$x)*100
local_max <- max(local_means_by_child_recon_perc$x)*100
local_means_by_child_recon_score <- aggregate(subset.suppl.local.data$correctedscore, by = c(list(child = subset.suppl.local.data$child)),FUN = mean)
local_mean_of_means_by_child_recon_score <- mean(local_means_by_child_recon_score$x)
local_se_by_child_recon_score <- aggregate(subset.suppl.local.data$correctedscore, by = c(list(child = subset.suppl.local.data$child)),FUN = std.error)
local_mean_of_se_by_child_recon_score <- mean(local_se_by_child_recon_score$x)
# model with the binary uncorrected score as dependent variable, age as independent variable (fixed effect), by-child random intercept and random slopes of age.
model_local_uncorrected_suppl <- glmer(Y ~ age + (age|child), family=binomial(link = 'logit'), data = subset.suppl.local.data)
sink(paste0(plot.path,"local_uncorrected_suppl.txt"))
summary(model_local_uncorrected_suppl)
sink()
summary(model_local_uncorrected_suppl)
ranef(model_local_uncorrected_suppl)
# recenter child age
subset.suppl.local.data$recentered_age <- subset.suppl.local.data$age - 2.5
# model with the corrected score as dependent variable, recentered age as independent variable (fixed effect), by-child random intercept and random slopes of age.
model_local_corrected_suppl <- lmer(correctedscore ~ recentered_age + (recentered_age|child), data = subset.suppl.local.data, control=lmerControl(optimizer = "nloptwrap", optCtrl=list(maxfun=1000000)))
sink(paste0(plot.path,"local_corrected_suppl.txt"))
summary(model_local_corrected_suppl)
sink()
summary(model_local_corrected_suppl)
ranef(model_local_corrected_suppl)
## cumulative sample
# Select subset of the data, excluding all skipped utterance, and generate binary reconstruction score:
# Yi,j,k for the i-th utterance of the j-th child at age k: Y = 1 if utterance is correctly reconstructed,
# Y=0 if not.
subset.suppl.cumu.data <- subset(suppl.cumu.data,select=c(1,3,4,5,6,10,11,12))
subset.suppl.cumu.data$Y <- ifelse(subset.suppl.cumu.data$reconstructed == "True", 1,0)
subset.suppl.cumu.data <- subset(subset.suppl.cumu.data, suppl.cumu.data$skipped == "False")
# descriptive statistics
cumu_means_by_child_recon_perc <- aggregate(subset.suppl.cumu.data$Y, by = c(list(child = subset.suppl.cumu.data$child)),FUN = mean)
cumu_mean_of_means_by_child_recon_perc <- mean(cumu_means_by_child_recon_perc$x)*100
cumu_min <- min(cumu_means_by_child_recon_perc$x)*100
cumu_max <- max(cumu_means_by_child_recon_perc$x)*100
cumu_means_by_child_recon_score <- aggregate(subset.suppl.cumu.data$correctedscore, by = c(list(child = subset.suppl.cumu.data$child)),FUN = mean)
cumu_mean_of_means_by_child_recon_score <- mean(cumu_means_by_child_recon_score$x)
cumu_se_by_child_recon_score <- aggregate(subset.suppl.cumu.data$correctedscore, by = c(list(child = subset.suppl.cumu.data$child)),FUN = std.error)
cumu_mean_of_se_by_child_recon_score <- mean(cumu_se_by_child_recon_score$x)
# model with the binary uncorrected score as dependent variable, age as independent variable (fixed effect), by-child random intercept and random slopes of age.
model_cumu_uncorrected_suppl <- glmer(Y ~ age + (age|child), family=binomial(link = 'logit'), data = subset.suppl.cumu.data)
sink(paste0(plot.path,"cumu_uncorrected_suppl.txt"))
summary(model_cumu_uncorrected_suppl)
sink()
summary(model_cumu_uncorrected_suppl)
ranef(model_cumu_uncorrected_suppl)
# check for severity of non-convergence; not problematic
relgrad <- with(model_cumu_uncorrected_suppl@optinfo$derivs,solve(Hessian,gradient))
max(abs(relgrad))
# recenter child age
subset.suppl.cumu.data$recentered_age <- subset.suppl.cumu.data$age - 2.5
# model with the corrected score as dependent variable, recentered age as independent variable (fixed effect), by-child random intercept and random slopes of age.
model_cumu_corrected_suppl <- lmer(correctedscore ~ recentered_age + (recentered_age|child), data = subset.suppl.cumu.data, control=lmerControl(optimizer = "nloptwrap", optCtrl=list(maxfun=1000000)))
sink(paste0(plot.path,"cumu_corrected_suppl.txt"))
summary(model_cumu_corrected_suppl)
sink()
summary(model_cumu_corrected_suppl)
ranef(model_cumu_corrected_suppl)
# PLOTS
# 1) plot average controlled score over age, per child
## local sample
# collect data for plot
plot1.local.data <- aggregate(subset.suppl.local.data$correctedscore, by = c(list(child = subset.suppl.local.data$child),list(age=subset.suppl.local.data$age)),FUN = sum)
colnames(plot1.local.data)[3] <- "total_score"
plot1.local.data.temp <- aggregate(subset.suppl.local.data$correctedscore, by = c(list(child = subset.suppl.local.data$child),list(age = subset.suppl.local.data$age)), FUN = function(x){NROW(x)})
colnames(plot1.local.data.temp)[3] <- "total_num_utterances"
plot1.local.data <- merge(plot1.local.data, plot1.local.data.temp, by = c("child","age"))
plot1.local.data$averagescore <- plot1.local.data$total_score/plot1.local.data$total_num_utterances
# generate plot
plot.local.suppl.recon_score <- ggplot(plot1.local.data,
aes(x=age, y = averagescore, group = child, linetype = child, colour = child)) +
geom_line(size = 1.5) + basic.theme + theme(axis.text.x = element_text(size=22)) +
coord_cartesian(ylim=(c(-0.5,0.5))) +
xlab("\nAge (years)") +
ylab("Average reconstruction \nscore\n") +
ggtitle("Local sampling") +
geom_hline(yintercept=0) +
basic.theme + theme(axis.text.x = element_text(size=22)) +
theme(plot.title = element_text(size=30, face = "bold.italic", hjust = 0.5, margin=margin(b = 30, unit = "pt")))
# save plot
ggsave(paste0(plot.path, "plotsuppllocalrecon_score.png"), plot = (plot.local.suppl.recon_score+theme_apa()))
## cumulative sample
# collect data for plot
plot1.cumu.data <- aggregate(subset.suppl.cumu.data$correctedscore, by = c(list(child = subset.suppl.cumu.data$child),list(age=subset.suppl.cumu.data$age)),FUN = sum)
colnames(plot1.cumu.data)[3] <- "total_score"
plot1.cumu.data.temp <- aggregate(subset.suppl.cumu.data$correctedscore, by = c(list(child = subset.suppl.cumu.data$child),list(age = subset.suppl.cumu.data$age)), FUN = function(x){NROW(x)})
colnames(plot1.cumu.data.temp)[3] <- "total_num_utterances"
plot1.cumu.data <- merge(plot1.cumu.data, plot1.cumu.data.temp, by = c("child","age"))
plot1.cumu.data$averagescore <- plot1.cumu.data$total_score/plot1.cumu.data$total_num_utterances
# generate plot
plot.cumu.suppl.recon_score <- ggplot(plot1.cumu.data,
aes(x=age, y = averagescore, group = child, linetype = child, colour = child)) +
geom_line(size = 1.5) + basic.theme + theme(axis.text.x = element_text(size=22)) +
coord_cartesian(ylim=(c(-0.5,0.5))) +
xlab("\nAge (years)") +
ylab("Average reconstruction \nscore\n") +
ggtitle("Cumulative sampling") +
geom_hline(yintercept=0) +
basic.theme + theme(axis.text.x = element_text(size=22)) +
theme(plot.title = element_text(size=30, face = "bold.italic", hjust = 0.5, margin=margin(b = 30, unit = "pt")))
# save plot
ggsave(paste0(plot.path, "plotsupplcumurecon_score.png"), plot = (plot.cumu.suppl.recon_score + theme_apa()))
# 2) plot average percentage of correctly reconstructed utterances over age, per child
## local sample
# collect data for plot
plot2.local.data <- aggregate(subset.suppl.local.data$Y, by = c(list(child = subset.suppl.local.data$child),list(age=subset.suppl.local.data$age)),FUN = sum)
colnames(plot2.local.data)[3] <- "total_num_corr_reconstructions"
plot2.local.data.temp <- aggregate(subset.suppl.local.data$Y, by = c(list(child = subset.suppl.local.data$child),list(age = subset.suppl.local.data$age)), FUN = function(x){NROW(x)})
colnames(plot2.local.data.temp)[3] <- "total_num_utterances"
plot2.local.data <- merge(plot2.local.data, plot2.local.data.temp, by = c("child","age"))
plot2.local.data$percentages <- (plot2.local.data$total_num_corr_reconstructions/plot2.local.data$total_num_utterances)*100
# generate plot
plot.suppl.local.reconstruction_perc <- ggplot(plot2.local.data,
aes(x=age, y = percentages, group = child, linetype = child, colour = child)) +
geom_line(size = 1.5) + basic.theme + theme(axis.text.x = element_text(size=22)) +
coord_cartesian(ylim=(c(0,100))) +
xlab("\nAge (years)") +
ylab("Percentage correctly\n reconstructed utterances\n") +
ggtitle("Local sampling") +
basic.theme + theme(axis.text.x = element_text(size=22)) +
theme(plot.title = element_text(size=30, face = "bold.italic", hjust = 0.5, margin=margin(b = 30, unit = "pt")))
# save plot
ggsave(paste0(plot.path, "plotsuppllocalrecon_perc.png"), plot = (plot.suppl.local.reconstruction_perc+theme_apa()))
## cumulative sample
# collect data for plot
plot2.cumu.data <- aggregate(subset.suppl.cumu.data$Y, by = c(list(child = subset.suppl.cumu.data$child),list(age=subset.suppl.cumu.data$age)),FUN = sum)
colnames(plot2.cumu.data)[3] <- "total_num_corr_reconstructions"
plot2.cumu.data.temp <- aggregate(subset.suppl.cumu.data$Y, by = c(list(child = subset.suppl.cumu.data$child),list(age = subset.suppl.cumu.data$age)), FUN = function(x){NROW(x)})
colnames(plot2.cumu.data.temp)[3] <- "total_num_utterances"
plot2.cumu.data <- merge(plot2.cumu.data, plot2.cumu.data.temp, by = c("child","age"))
plot2.cumu.data$percentages <- (plot2.cumu.data$total_num_corr_reconstructions/plot2.cumu.data$total_num_utterances)*100
# generate plot
plot.suppl.cumu.reconstruction_perc <- ggplot(plot2.cumu.data,
aes(x=age, y = percentages, group = child, linetype = child, colour = child)) +
geom_line(size = 1.5) + basic.theme + theme(axis.text.x = element_text(size=22)) +
coord_cartesian(ylim=(c(0,100))) +
xlab("\nAge (years)") +
ylab("Percentage correctly\n reconstructed utterances\n") +
ggtitle("Cumulative sampling") +
basic.theme + theme(axis.text.x = element_text(size=22)) +
theme(plot.title = element_text(size=30, face = "bold.italic", hjust = 0.5, margin=margin(b = 30, unit = "pt")))
# save plot
ggsave(paste0(plot.path, "plotsupplcumurecon_perc.png"), plot = (plot.suppl.cumu.reconstruction_perc+theme_apa()))
## Combine local and cumulative plot
# function to merge two graphs that share a legend into one figure
grid_arrange_shared_legend <- function(..., ncol = length(list(...)), nrow = 1, position = c("bottom", "right")) {
plots <- list(...)
position <- match.arg(position)
g <- ggplotGrob(plots[[1]] +
theme(legend.position = position))$grobs
legend <- g[[which(sapply(g, function(x) x$name) == "guide-box")]]
lheight <- sum(legend$height)
lwidth <- sum(legend$width)
gl <- lapply(plots, function(x) x +
theme(legend.position = "none"))
gl <- c(gl, ncol = ncol, nrow = nrow)
combined <- switch(position,
"bottom" = arrangeGrob(do.call(arrangeGrob, gl),
legend,ncol = 1,
heights = unit.c(unit(1, "npc") - lheight, lheight)),
"right" = arrangeGrob(do.call(arrangeGrob, gl),
legend, ncol = 2,
widths = unit.c(unit(1, "npc") - lwidth, lwidth)))
grid.newpage()
grid.draw(combined)
# return gtable invisibly
invisible(combined)
}
# function to remove the x-axis of the right-hand side graph without distorted graph sizes,
# the x-axis is the same as the x-axis of the left-hand side graph
arrange_related_x_axes <- function(..., nrow=NULL, ncol=NULL, as.table=FALSE,
main=NULL, sub=NULL, plot=TRUE) {
dots <- list(...)
n <- length(dots)
if(is.null(nrow) & is.null(ncol)) { nrow = floor(n/2) ; ncol = ceiling(n/nrow)}
if(is.null(nrow)) { nrow = ceiling(n/ncol)}
if(is.null(ncol)) { ncol = ceiling(n/nrow)}
fg <- frameGrob(layout=grid.layout(nrow,ncol))
ii.p <- 1
for(ii.row in seq(1, nrow)){
ii.table.row <- ii.row
if(as.table) {ii.table.row <- nrow - ii.table.row + 1}
for(ii.col in seq(1, ncol)){
ii.table <- ii.p
if(ii.p > n) break
fg <- placeGrob(fg, ggplotGrob(dots[[ii.table]]),
row=ii.table.row, col=ii.col)
ii.p <- ii.p + 1
}
}
if(!is.null(main) | !is.null(sub)){
g <- frameGrob() # large frame to place title(s) and content
g <- packGrob(g, fg)
if (!is.null(main))
g <- packGrob(g, textGrob(main), side="top")
if (!is.null(sub))
g <- packGrob(g, textGrob(sub, gp=gpar(fontsize=30), vjust=-1, hjust=0), side="bottom")
} else {
g <- fg
}
if(plot) grid.draw(g)
invisible(g)
}
# arrange two graphs (local and cumulative sampled data) into one figure
# 1) plot 1: average reconstruction score x age, per child
plot.local.suppl.recon_score.noxtitle <- plot.local.suppl.recon_score +
xlab("\n") +
ylab("Average reconstruction score\n") +
theme(axis.text.x = element_text(size=18),
axis.text.y = element_text(size=18),
legend.position="none")
plot.cumu.suppl.recon_score.noxtitle <- plot.cumu.suppl.recon_score +
xlab("\n") +
ylab("\n") +
theme(legend.key.width = unit(2, "cm"),
axis.text.x = element_text(size=18),
axis.text.y = element_text(size=18),
legend.position=c(0.75,0.25))
png(paste(plot.path,
"suppl_bothreconstruction.png", sep=""),
width=1500,height=700,units="px",
bg = "transparent")
grid.newpage()
arrange_related_x_axes(plot.local.suppl.recon_score.noxtitle,
plot.cumu.suppl.recon_score.noxtitle,
nrow=1, ncol = 2, as.table=TRUE,
sub="Age (years)")
dev.off()
# 2) plot 2: average reconstruction percentage x age, per child
plot.suppl.local.reconstruction_perc.noxtitle <- plot.suppl.local.reconstruction_perc +
xlab("\n") +
ylab("Percentage correctly\nreconstructed utterances\n") +
theme(axis.text.x = element_text(size=18),
axis.text.y = element_text(size=18),
legend.position="none")
plot.suppl.cumu.reconstruction_perc.noxtitle <- plot.suppl.cumu.reconstruction_perc +
xlab("\n") +
ylab("\n") +
theme(legend.key.width = unit(2, "cm"),
axis.text.x = element_text(size=18),
axis.text.y = element_text(size=18),
legend.position=c(0.75,0.25))
png(paste(plot.path,
"suppl_bothreconperc.png", sep=""),
width=1500,height=700,units="px",
bg = "transparent")
grid.newpage()
arrange_related_x_axes(plot.suppl.local.reconstruction_perc.noxtitle,
plot.suppl.cumu.reconstruction_perc.noxtitle,
nrow=1, ncol = 2, as.table=TRUE,
sub="Age (years)")
dev.off()
|
03ff08e48254d02303d22edf28d30b6659bf7593 | 8ae75ba8be59e47b20103c536b22e5d6df5ebbe4 | /man/srmsymc_readRecModel.Rd | 1aae450bc4c1646da1a18f6dba9f733a1779078e | [] | no_license | AndyCampbell/msy | 361c9fbb82310611b5fbeaa14a998984f3711dbe | 107a0321e84edab191f668ec14cc392bd1da11e5 | refs/heads/master | 2020-12-03T05:19:51.796250 | 2014-11-13T18:37:57 | 2014-11-13T18:37:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 270 | rd | srmsymc_readRecModel.Rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{srmsymc_readRecModel}
\alias{srmsymc_readRecModel}
\title{Recruitment model}
\usage{
srmsymc_readRecModel(path)
}
\arguments{
\item{path}{XXX}
}
\description{
Read recruitment model from the srmsymc.dat file
}
|
c7247f85f98fe638f2b02f8c05cd6b0cfae969a2 | 15ba08494f3ce8731aff56873d16e58e062d05a5 | /Lab04.R | d0fd04b981134ac713c6bf6c41208c3107af751c | [] | no_license | zhaoleist/advStats | 98aea043c4ea5aec7a25e464592f4f8d9d9529cb | 92b7fdd7061897d128bf76a84cec1132e2654017 | refs/heads/master | 2021-05-09T19:22:07.546232 | 2018-04-26T03:07:12 | 2018-04-26T03:07:12 | 118,638,166 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,509 | r | Lab04.R | # Lab 4
rm(list=ls())
# Question (1A)
probs <- seq(0,1,0.01)
aNum <- 0.9932621
expDist <- dexp(probs, rate=5)/aNum
plot(probs, expDist, ylab="density", main="question 1(A)")
####################################################################################
# define metropolis function
metropolis_priorExp <- function(piOld=0.5, iteration=100000, numHeads, numTrials){
container <- vector()
aNum <- 0.9932621
for (i in 1:iteration){
pOld <- dexp(piOld, rate=5)/aNum * dbinom(numHeads, numTrials, piOld)
piNew <- piOld + rnorm(1, 0, sd=0.01)
if (piNew > 1)
piNew <- 1
if (piNew < 0)
piNew <- 0
pNew <- dexp(piNew, rate=5)/aNum * dbinom(numHeads, numTrials, piNew)
ratio <- pNew/pOld
if (ratio > 1 || ratio >= runif(1))
piOld <- piNew
container[i] <- piOld
}
return(container)
}
#####################################################################################
# define grid function
grid_priorExp <- function(xVals, i=1, mySum=0, numHeads, numTrials){
posteriorDist <- vector()
for (x in xVals){
posteriorDist[i] <- dexp(x, rate=5)/aNum * dbinom(numHeads, numTrials, x)
mySum <- mySum + posteriorDist[i]
i <- i + 1
}
return(c(posteriorDist, mySum))
}
###################################################################################
# Question (1B)
posteriorDist_metro <- metropolis_priorExp(0.5, 100000,14,24)
myHist <- hist(posteriorDist_metro, breaks=200, plot=FALSE)
myVector <- grid_priorExp(myHist$mids,1,0,14,24)
mySum <- myVector[length(myVector)]
posteriorDist_grid <- myVector[-length(myVector)]
minY <- 0
maxY <- max(myHist$counts/sum(myHist$counts), posteriorDist_grid/mySum, dbeta(myHist$mids, 40+14, 40+10)/sum(dbeta(myHist$mids, 40+14, 40+10)))
plot(myHist$mids, myHist$counts/sum(myHist$counts), xlim=c(0,1), col="green", ylim=c(minY, maxY),
main="with additional observation of 14 heads & 10 tails ")
points(myHist$mids, posteriorDist_grid/mySum, xlim=c(0, 1), col="blue", xlab="", ylab="", ylim=c(minY, maxY))
points(myHist$mids, dbeta(myHist$mids, 40+14, 40+10)/sum(dbeta(myHist$mids, 40+14, 40+10)),
xlim=c(0,1), ylim=c(minY, maxY), col="red", xlab="", ylab="")
legend("topleft", legend=c("metropolis","grid", "prior dbeta(40, 40)"),
col=c("green", "blue", "red"), lty=3, lwd = 3, cex=0.6)
######################################################################################
# Question (1C)
posteriorDist_metro_2 <- metropolis_priorExp(0.5, 500000,583,1000)
myHist_2 <- hist(posteriorDist_metro_2, breaks=200, plot=FALSE)
myVector_2 <- grid_priorExp(myHist_2$mids,1,0,583,1000)
mySum_2 <- myVector_2[length(myVector_2)]
posteriorDist_grid_2 <- myVector_2[-length(myVector_2)]
minY_2 <- 0
maxY_2 <- max(myHist_2$counts/sum(myHist_2$counts), posteriorDist_grid_2/mySum_2, dbeta(myHist_2$mids, 40+583, 40+417)/sum(dbeta(myHist_2$mids, 40+583, 40+417)))
plot(myHist_2$mids, myHist_2$counts/sum(myHist_2$counts), xlim=c(0.4,0.8), ylim=c(minY_2, maxY_2), col="green",
main="with additional observation of 583 heads & 417 tails")
points(myHist_2$mids,posteriorDist_grid_2/mySum_2, xlab="", ylab="", col="blue", xlim=c(0.4,0.8), ylim=c(minY_2, maxY_2))
points(myHist_2$mids, dbeta(myHist_2$mids, 40+583, 40+417)/sum(dbeta(myHist_2$mids, 40+583, 40+417)),
xlim=c(0.4,0.8), ylim=c(minY_2, maxY_2), col="red", xlab="", ylab="")
legend("topleft", legend=c("metropolis", "grid", "prior dbeta(40, 40)"),
col=c("green", "blue", "red"), lty=3, lwd = 3, cex=0.6) |
6e02f0d9852fb234bb4ddc853bcfb3bf8be57784 | bedafe30af3f3abcce2f75f7c4aa7cd673f6e9fd | /man/pancancer_small.Rd | 66e9d0d2fdc04086f9f352c1326fc4bf6e6456b0 | [] | no_license | cran/pancor | 81fbc3319a329e98eef5b6d4f3fa5e8414851522 | e86e2652e6eb77d54ca8da01c976edbb206a71ed | refs/heads/master | 2020-12-22T01:54:44.010766 | 2019-10-16T11:20:02 | 2019-10-16T11:20:02 | 236,635,964 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 413 | rd | pancancer_small.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{pancancer_small}
\alias{pancancer_small}
\title{TCGA preprocessed data for correlationship analysis}
\format{data.frame object}
\usage{
data(pancancer_small)
}
\description{
Data are downloaded from \href{http://xena.ucsc.edu/}{XENA}
}
\examples{
data(pancancer_small)
}
\keyword{datasets}
|
ca2ec7ece7ede8ee3189c84053966d884fb06c72 | 5ee1cdd71389b557cfc8bde59fb296162344e669 | /_Templates/Data Preprocessing Template/my_data_preprocessing_template.R | c5b6523f003613e88c4b4c7dbd3e353033fef7ee | [] | no_license | ritwik0706/ML-Playground | 5d3e6688fa8b1119e9812c4118e6c4dcb5ce6640 | 0fcca39b58ecb2943118fe87a1a6a37999d55618 | refs/heads/master | 2020-04-11T18:13:30.518923 | 2018-12-29T11:16:00 | 2018-12-29T11:16:00 | 161,990,388 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 423 | r | my_data_preprocessing_template.R |
# Data Preprocessing Template
# Importing Dataset
dataset = read.csv('Data.csv')
# Splitting The DataSet into Training Set and Test Set
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.8)
train_set = subset(dataset , split == TRUE)
test_set = subset(dataset , split == FALSE)
# Feature Scaling
# train_set[, 2:3] = scale(train_set[, 2:3])
# test_set[, 2:3] = scale(test_set[, 2:3]) |
e51235bde2590f8ad53981a73d40aff407b9b1f5 | 71d1b647f1516d600bfc23434380b43753de6bee | /weightalgos/man/jackson.Rd | 5c6535d14c89a1f5f7d2322f52f02f8a5e7fe871 | [] | no_license | CCMRcodes/WeightAlgorithms | 1f58a5908ab86098b9e99c0a8f9b70bb725f51c4 | 3cea6d4bdb2badc1852463bcc0e1c395fda2cacd | refs/heads/master | 2021-11-15T21:37:13.842959 | 2021-10-07T00:59:20 | 2021-10-07T00:59:20 | 232,852,314 | 1 | 2 | null | 2020-08-10T16:36:02 | 2020-01-09T16:23:02 | R | UTF-8 | R | false | true | 3,278 | rd | jackson.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Jackson2015.R
\name{jackson}
\alias{jackson}
\title{Jackson 2015 Measurment Cleaning Algorithm}
\usage{
jackson(
df,
id,
measures,
tmeasures,
start_point,
t = c(0, 182, 365, 730),
windows = c(1, 90, 90, 90),
outliers = c(75, 700)
)
}
\arguments{
\item{df}{object of class \code{data.frame}, containing \code{id} and
\code{measures}.}
\item{id}{string corresponding to the name of the column of patient
identifers in \code{df}.}
\item{measures}{string corresponding to the name of the column of
measurements in \code{df}.}
\item{tmeasures}{string corresponding to the name of the column of
measurement collection dates or times in \code{df}. If \code{tmeasures} is
a date object, there may be more than one weight on the same day, if it is
a precise datetime object, there may not be more than one weight on the
same day. Just something to look out for.}
\item{start_point}{string corresponding to the name of the column in
\code{df} holding the time at which subsequent measurement dates will be
assessed, should be the same for each person. Eg., if t = 0 (\code{t[1]})
corresponds to an index visit held by the variable \code{VisitDate}, then
\code{start_point} should be set to \code{VisitDate}.}
\item{t}{numeric vector of time points to collect measurements, e.g.
\code{c(0, 182.5, 365)} for measure collection at t = 0, t = 180 (6 months
from t = 0), and t = 365 (1 year from t = 0). Default is
\code{c(0, 182, 365, 730)} according to Jackson et al. 2015.}
\item{windows}{numeric vector of measurement collection windows to use around
each time point in \code{t}. E.g. Jackson et al. 2015 use
\code{c(1, 90, 90, 90)} for \code{t = c(0, 182.5, 365, 730)}, implying that
the closest measurement t = 0 will be collected on the day of the
\code{start_point}. Subsequent measurements will be collected 90 days prior
to and 90 days post t0+182.5 days, t0+365 days, and t0+730 days.}
\item{outliers}{numeric vector corresponding to the upper and lower bound for
each time entry. Default is \code{c(75, 700)}.}
}
\value{
returns a data frame reduced by the inclusion criteria defined in
\code{t}, \code{windows} and \code{outliers}, and further reduced to 4
columns: the value of \code{id}; the average of the measures defined in
\code{measures} at each \code{t} and \code{windows}, denoted the
\code{measout};the number of measures used to calculate \code{measout} at
time \code{t}; \code{measureTime}, the time-point defined by \code{t}.
}
\description{
For grouped time series, (e.g., per person), \code{jackson} removes outliers,
then apply windows to each measurement within each block of time/window, then
averages all measurements within that window. Based on work by Jackson et al.
2015
}
\examples{
library(dplyr)
library(ggplot2)
data(cdw1000)
jackson_df <- jackson(
df = cdw1000,
id = "id",
measures = "Weight",
tmeasures = "WeightDate",
start_point = "VisitDate"
)
# dplyr::glimpse(jackson_df)
samp <- jackson_df \%>\%
distinct(id) \%>\%
sample_n(16) \%>\%
pull()
jackson_df \%>\%
filter(id \%in\% samp) \%>\%
ggplot(aes(x = factor(measureTime), y = measout)) +
geom_point(aes(size = n)) +
facet_wrap(vars(id), ncol = 4)
}
|
d2555fb2fe4044c570af9112b4db94d74d84a486 | 1875ad05eafdaab03dc9fbb7f0bcee207c0b8cbc | /2. EFA_thesis.R | 256f1120635622ac7e94df0da3f7b7fddbec8253 | [] | no_license | sybrendeboever/Master-thesis-statistics-KUL | 91ab784829669a357db91a37353b8e14d11833f3 | caba87d8aba7966721df84ddbb6cd76301dafb90 | refs/heads/main | 2023-07-09T11:47:59.687771 | 2021-08-15T17:45:53 | 2021-08-15T17:45:53 | 378,909,408 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,787 | r | 2. EFA_thesis.R | # ----------------------------------------------------------------------------------------
# ------------------------------ EFA on all variables ---------------------------------- #
library(psych)
library(ggplot2)
library(grid)
# Final dataset
data = read.csv("D:/Unief/Master Statistiek/Jaar 2/Thesis/6. Data/all_NA_2019_2020_NoMiss_OmgedraaideScales_CorrecteQ22.csv")
data$id = paste(data$Student_ID,data$YEAR,sep="_")
data2 = read.csv("D:/Unief/Master Statistiek/Jaar 2/Thesis/6. Data/Results/CFA/dataq_NA.csv") # Dataq_NA bevat de first attempt van students
data2$id = paste(data2$Student_ID,data2$YEAR,sep="_")
data = data[data$id %in% data2$id,]; rm(data2)
data$Q5[data$Q5==3] = NA
# Subset data
set.seed(1234)
sample = sample(c(1:nrow(data)),
size=round(0.40*nrow(data)), # 40% of the data
replace=FALSE) # Obtain unique row numbers
training = data[sample,]
subj_train = paste(training$Student_ID,training$YEAR,sep="_")
test = data[-sample,]
subj = paste(test$Student_ID,test$YEAR,sep="_")
# -------------------------------- COMPARE TRAINING AND TEST DATASET ----------------------
## 1. Training
for (i in 1:nrow(training)){
row = training[i,c("Q15_1", "Q15_3", "Q15_2","Q15_5", "Q15_6", "Q15_7", "Q15_8")] # remark the order of Qs!
if (anyNA(row)){
training$Q15[i] = NA
} else{
if (row[1]==1){ # PL
training$Q15[i] = 1 # Number of question
}
if (row[2]==1){ # OE
training$Q15[i] = 3
}
if (row[3]==1){ # CI
training$Q15[i] = 2
}
if (row[4]==1){ # PL+OE+CI
training$Q15[i] = 5
}
if (row[5]==1){ # PL+OE
training$Q15[i] = 6
}
if (row[6]==1){ # PL+CI
training$Q15[i] = 7
}
if (row[7]==1){ # OE+CI
training$Q15[i] = 8
}
if (training[i,c("Q15_4")]==1){
training$Q15[i] = 0
}
}
}
# Migration background
training$SOE[training$Herkomst %in% c("1/Migratie-achtergrond (EU1)","2/Migratie-achtergrond (niet-EU1)")] = "1" # Migration background
training$SOE[training$Herkomst == "5/Overige"] = "2" # 'Other'
training$SOE[training$Herkomst %in% c("3/Geen migratie-achtergrond","4/Niet toegewezen")] = "3" # No migration background: ref
# Count frequencies per level
factors = c("Gender","Fase","Q5","Q15","SOE","Q31")
training2 = data.frame(apply(training,2,factor))
list=list()
for (i in factors){
n = nlevels(training2[,i])
groups = data.frame(var=character(n),levels=character(n),summary=character(n),percent=c(n))
groups$var = i
groups$levels = levels(training2[,i])
groups$summary = paste("n = ",as.numeric(table(training2[,i])),sep="")
groups$percent = round(as.numeric(table(training2[,i]))*100/sum(!is.na(training2[,i])),2)
list[[i]] = groups
}
summary_1=do.call(rbind,list)
#write.csv(summary_1,file="D:/Unief/Master Statistiek/Jaar 2/Thesis/5. Thesis/3. Results/1. Descriptive statistics/summary_training.csv")
## 2. Test
for (i in 1:nrow(test)){
row = test[i,c("Q15_1", "Q15_3", "Q15_2","Q15_5", "Q15_6", "Q15_7", "Q15_8")] # remark the order of Qs!
if (anyNA(row)){
test$Q15[i] = NA
} else{
if (row[1]==1){ # PL
test$Q15[i] = 1 # Number of question
}
if (row[2]==1){ # OE
test$Q15[i] = 3
}
if (row[3]==1){ # CI
test$Q15[i] = 2
}
if (row[4]==1){ # PL+OE+CI
test$Q15[i] = 5
}
if (row[5]==1){ # PL+OE
test$Q15[i] = 6
}
if (row[6]==1){ # PL+CI
test$Q15[i] = 7
}
if (row[7]==1){ # OE+CI
test$Q15[i] = 8
}
if (test[i,c("Q15_4")]==1){
test$Q15[i] = 0
}
}
}
# Migration background
test$SOE[test$Herkomst %in% c("1/Migratie-achtergrond (EU1)","2/Migratie-achtergrond (niet-EU1)")] = "1" # Migration background
test$SOE[test$Herkomst == "5/Overige"] = "2" # 'Other'
test$SOE[test$Herkomst %in% c("3/Geen migratie-achtergrond","4/Niet toegewezen")] = "3" # No migration background: ref
# Count frequencies per level
factors = c("Gender","Fase","Q5","Q15","SOE","Q31")
test2 = data.frame(apply(test,2,factor))
list=list()
for (i in factors){
n = nlevels(test2[,i])
groups = data.frame(var=character(n),levels=character(n),summary=character(n),percent=c(n))
groups$var = i
groups$levels = levels(test2[,i])
groups$summary = paste("n = ",as.numeric(table(test2[,i])),sep="")
groups$percent = round(as.numeric(table(test2[,i]))*100/sum(!is.na(test2[,i])),2)
list[[i]] = groups
}
summary_2=do.call(rbind,list)
#write.csv(summary_2,file="D:/Unief/Master Statistiek/Jaar 2/Thesis/5. Thesis/3. Results/1. Descriptive statistics/summary_test.csv")
# 3. Check differences between the dataset per variable
## Gender
d = data.frame(training=as.numeric(table(training$Gender)),
test = as.numeric(table(test$Gender)))
row.names(d)=c("M","V")
chisq.test(d)
## Phase of study
d = data.frame(training=as.numeric(table(training$Fase)),
test = as.numeric(table(test$Fase)))
row.names(d)=c("1","2","3","4","5")
chisq.test(d)
## Q5
d = data.frame(training=as.numeric(table(training$Q5)),
test = as.numeric(table(test$Q5)))
row.names(d)=c("yes","no")
chisq.test(d)
## Vocational interest
d = data.frame(training=as.numeric(table(training$Q15)),
test = as.numeric(table(test$Q15)))
row.names(d)=c("idk","PL","OE","CI","PL+OE+CI","PL+OE","PL+CI","OE+CI")
chisq.test(d)
## Engineering persistence
d = data.frame(training=as.numeric(table(training$Q31)),
test = as.numeric(table(test$Q31)))
row.names(d)=c("0","1","2")
chisq.test(d)
## Migration status
d = data.frame(training=as.numeric(table(training$SOE)),
test = as.numeric(table(test$SOE)))
row.names(d)=c("1","2","3")
chisq.test(d) # p=0.3652
# -------------------------------- HORNS PARALLEL ANALSYSIS -------------------------------
# Variables to include
include = c("Q30","Q32_1","Q32_2","Q32_3","Q32_4","Q32_5","Q32_6","Q32_7", # CE
"Q8","Q9","Q16","Q17_6","Q20","Q23","Q25_5", # SA
"Q14_1","Q14_2","Q14_3","Q14_4","Q24_1","Q24_2","Q24_3","Q24_4","Q24_5", # PRA
"Q17_2","Q17_3","Q17_4","Q17_5","Q25_2","Q25_3","Q25_4") # PRC
# Polychoric correlations
cor = polychoric(training[,include])
round(eigen(cor$rho)$values, 3)
# cor = cor(training[,include],use="pairwise")
# round(eigen(cor)$values, 3)
# Scree plot
eigenvalues_pc = nFactors::eigenComputes(x=cor$rho,cor=TRUE,model="components")
eigenvalues_fa = nFactors::eigenComputes(x=cor$rho,cor=TRUE,model="factors")
# nFactors::eigenComputes(x=cor(training[,include],use="pairwise"),cor=TRUE,model="components") # gives the same eigenvalues as the pc in parallel analysis which is based on the pearson correlations
# nFactors::eigenComputes(x=cor(training[,include],use="pairwise"),cor=TRUE,model="factors")
plot1 = data.frame(eigenvalues = c(eigenvalues_pc,eigenvalues_fa),
type = c(rep("pc",length(eigenvalues_pc)),
rep("fa",length(eigenvalues_fa))),
i = rep(c(1:length(eigenvalues_pc)),2))
g2=ggplot(data = plot1,aes(group=type,col=type)) +
scale_color_manual(values=c("black","gray45"),guide=F)+
geom_hline(yintercept = 1, col="gray55")+
geom_point(aes(x = i, y = eigenvalues),shape=18,size=2) +
geom_line(aes(x = i, y = eigenvalues),alpha=0.5) +
xlab(NULL)+ ylab("Eigenvalues")+
scale_x_continuous(breaks = seq(from = 1, to = 30, by = 1),
labels = c(1,"",3,"","",6,"","",9,"","",12,"","",15,"","",18,"","",21,"","",24,"","",27,"","",30))+
scale_y_continuous(breaks = seq(from = 0, to = 13, by = 1))+
theme(panel.border = element_rect(color="black",fill=NA),
panel.background = element_blank())
# Horn's parallel analysis
pa = psych::fa.parallel(training[,include],
n.iter=1500,
SMC=TRUE,
use="pairwise",
fm="uls",
fa="both",
cor="cor" # Garrido 2013: polychoric, however, Timmerman 2011: Pearson in case polychoric leads to nonconvergence
)
plot = data.frame(value = c(pa$fa.values,pa$fa.sim,pa$fa.simr,
pa$pc.values,pa$pc.sim,pa$pc.simr),
type = c(rep("fa",3*31),rep("pc",3*31)),
id = c(rep("actual",31),rep("sim",31),rep("simr",31),
rep("actual",31),rep("sim",31),rep("simr",31)),
n = rep(c(1:31),2))
plot$type_id = paste(plot$type,plot$id,sep="_")
g1 = ggplot(data=plot,aes(x=n,y=value,group=type_id,colour=type))+
geom_line(lwd=1,lty=c(1,1,2)[plot$id],alpha=0.45)+
geom_point(data=plot[plot$id=="actual",],shape=18,size=3)+
theme_bw()+
theme(panel.grid=element_blank(),legend.position = 'none')+
ylab("Eigenvalues of pricipal components \n and factor analysis") + xlab("Factor/Component number")+
scale_x_continuous(n.breaks=length(unique(plot$n)))+
scale_y_continuous(n.breaks=6)+
scale_colour_manual(values=c("black","gray45"))
# % variance explained by the extracted factors
# PC: 5 components (based on polychoric scree plot and Horn), based on polychoric
eigenvalues_pc = nFactors::eigenComputes(x=cor$rho,cor=TRUE,model="components")
PC_5_poly = sum(eigenvalues_pc[1:5]/sum(eigenvalues_pc))*100 # 47%
# PC: 9 components
PC_9_poly = sum(eigenvalues_pc[1:9]/sum(eigenvalues_pc))*100 # 62%
# FA: 5 factors
FA_5_poly = sum(eigenvalues_fa[1:5]/sum(eigenvalues_fa[eigenvalues_fa>0]))*100 # 73%
# FA: 9 factors
FA_9_poly = sum(eigenvalues_fa[1:9]/sum(eigenvalues_fa[eigenvalues_fa>0]))*100 # 90%
## Create a barplot
PC5poly = eigenvalues_pc[1:5]/sum(eigenvalues_pc)*100
PC9poly = eigenvalues_pc[1:9]/sum(eigenvalues_pc)*100
FA5poly = eigenvalues_fa[1:5]/sum(eigenvalues_fa[eigenvalues_fa>0])*100
FA9poly = eigenvalues_fa[1:9]/sum(eigenvalues_fa[eigenvalues_fa>0])*100
# plot
eig = data.frame(value=c(PC9poly,0,0,0,0,PC_5_poly,0,0,0,PC_9_poly,
FA9poly,0,0,0,0,FA_5_poly,0,0,0,FA_9_poly),
type = c(rep("PC",2*9),rep("FA",2*9)),
layer = rep(c(rep("front",9),rep("back",9)),2),
x = c(rep(seq(1,9),4)))
eig$xaxs = paste(eig$type,eig$x,sep="_")
eig$xaxs = factor(eig$xaxs,levels=levels(factor(eig$xaxs)))
g3=ggplot(data=eig[eig$layer=="back",],aes(x=xaxs,y=value,fill=type))+
theme_bw()+
theme(panel.grid=element_blank(),
legend.position = c(0.9,0.9),
legend.title = element_blank(),
panel.grid.major.y = element_line(colour="gray97"),
panel.grid.minor.y = element_line(colour="gray97"))+
geom_col(alpha=0.35,lty=2,col=c("gray45"))+
geom_col(data=eig[eig$layer=="front",])+
scale_fill_manual(values=c("black","gray45"),guide=FALSE)+
scale_x_discrete(expand=c(0,0),labels=rep(c(1:9),2),name="Factor/Component number")+
scale_y_continuous(expand=c(0,0),limits=c(0,100),name="Variance (%)")
# Combine plots
pushViewport(viewport(layout=grid.layout(3,5)))
print(g1,vp=viewport(layout.pos.row=c(1:3),layout.pos.col=c(1:3)))
print(g2,vp=viewport(layout.pos.row=c(1:2),layout.pos.col=c(4:5)))
print(g3,vp=viewport(layout.pos.row=3,layout.pos.col=c(4:5)))
#export::graph2ppt(file="D:/Unief/Master Statistiek/Jaar 2/Thesis/5. Thesis/3. Results/2. EFA/Horn.ppt",append=TRUE)
# -------------------------------- EFA -------------------------------
# Factor analysis
# 9 factors
include = c("Q30","Q32_1","Q32_2","Q32_3","Q32_4","Q32_5","Q32_6","Q32_7", # CE
"Q8","Q9","Q16","Q17_6","Q20","Q23","Q25_5", # SA
"Q14_1","Q14_2","Q14_3","Q14_4","Q24_1","Q24_2","Q24_3","Q24_4","Q24_5", # PRA
"Q17_2","Q17_3","Q17_4","Q17_5","Q25_2","Q25_3","Q25_4") # PRC
fit = psych::fa(r = training[,include],
max.iter=100,
SMC = TRUE,
residuals = TRUE,
use="pairwise",
nfactors = 9,
cor = "poly",
fm = "uls",
rotate = "Promax"); fit
# 5 factors
include = c("Q30","Q32_1","Q32_2","Q32_3","Q32_4","Q32_5","Q32_6","Q32_7", # CE
"Q8","Q9","Q16","Q17_6","Q20","Q23","Q25_5", # SA
"Q14_1","Q14_2","Q14_3","Q14_4","Q24_1","Q24_2","Q24_3","Q24_4","Q24_5", # PRA
"Q17_2","Q17_3","Q17_4","Q17_5","Q25_2","Q25_3","Q25_4") # PRC
fit = psych::fa(r = training[,include],
max.iter=100,
SMC = TRUE,
residuals = TRUE,
use="pairwise",
nfactors = 5,
cor = "poly",
fm = "uls",
rotate = "Promax"); fit
|
7b95a466163b7bef453a0de7cf956deb4951b443 | 8bcda50cf844b198807c524093cf5ea1507da5dc | /calculation.r | 511dbcac5bf749ea084039c5dac7e9ec17a4cb0a | [
"CC0-1.0"
] | permissive | wentzien/fuel_price_variations_germany | 8a9c0751ace0ae66c8f20cd4071e2b02d261a208 | 2ca5934733bf164f9725b0339ee4f5371755b994 | refs/heads/master | 2023-01-08T02:19:36.476269 | 2020-09-20T08:11:03 | 2020-09-20T08:11:03 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,276 | r | calculation.r | #
# README FIRST
#
# Uncomment the following line to install required packages
# install.packages("zoo","xts","stringr","fs","readr","jsonlite")
#
# Load libraries
library(zoo)
library(stringr)
library(fs)
library(xts)
library(readr)
library(jsonlite)
#
# General options
#
# File encoding in UTF-8
options("encoding" = "UTF-8")
#
# Download latest stations file and convert it to JSON
#
urlhead = "https://dev.azure.com/tankerkoenig/362e70d1-bafa-4cf7-a346-1f3613304973/_apis/git/repositories/0d6e7286-91e4-402c-af56-fa75be1f223d/items?path=%2Fstations%2F2020%2F06%2F"
urltail = "-stations.csv&versionDescriptor%5BversionOptions%5D=0&versionDescriptor%5BversionType%5D=0&versionDescriptor%5Bversion%5D=master&resolveLfs=true&api-version=5.0"
url = paste0(urlhead, (Sys.Date() -1 ) ,urltail)
url
d = read_csv(url)
d$openingtimes_json=NULL
d$first_active=NULL
json = toJSON(d, dataframe="rows")
write(json, "data/stations.json")
#
# Download latest price updates and calculate savings per time interval
#
# Calculate Files to download
# End of URL
urltail = "-prices.csv&versionDescriptor%5BversionOptions%5D=0&versionDescriptor%5BversionType%5D=0&versionDescriptor%5Bversion%5D=master&resolveLfs=true&%24format=octetStream&api-version=5.0&download=true"
# Start of URL
urlhead ="https://dev.azure.com/tankerkoenig/362e70d1-bafa-4cf7-a346-1f3613304973/_apis/git/repositories/0d6e7286-91e4-402c-af56-fa75be1f223d/items?path=%2Fprices%2F"
# Load Data from two days ago to get last price update before midnight
# two days ago
date=Sys.Date()-2
year = format(date,"%Y")
month = format(date,"%m")
day = format(date,"%d")
# Calculate pattern of file on Azure GIT repository
file = paste0(year,"%2F", month, "%2F", year, "-", month, "-", day )
# Calculate full URL of file to download
url=paste0(urlhead, file, urltail)
# Load file from Web
d=read.csv(url)
# Load Data for yesterday to get last price updates, see comments above
date=Sys.Date()-1
year = format(date,"%Y")
month = format(date,"%m")
day = format(date,"%d")
file = paste0(year,"%2F", month, "%2F", year, "-", month, "-", day )
url=paste0(urlhead, file, urltail)
d2 = read.csv(url)
# Concatenate data sets
data = rbind(d,d2)
# Find all stations providing price updates
stations= unique(data$station_uuid)
#
#
# Loop over stations and create files
#
#
for(s in stations) {
# Log progress
cat(paste0("Processing ", s, "...\n"))
# Attempt to use data for this station
try({
# Look only at data of this station
station = subset(data, station_uuid==s)
# Convert date and time to R format
station$date = as.POSIXlt(station$date, format="%Y-%m-%d %H:%M:%OS")
# Calculate Savings for yesterday
start = as.POSIXlt(paste0(date, " 0:00:00"))
end = as.POSIXlt(paste0(date, " 23:59:59"))
# Find the last price update two days ago that is valid until first update yesterday
last = subset(station, date < start)
last = last$date[nrow(last)]
# Get only data relevant for yesterday
station = subset(station, date >= last)
# Regularize time to 1 Minute intervals
ts.1min <- seq(last,end, by = paste0("60 s"))
#
# regularize Diesel
#
x = xts(x=station$diesel , order.by=station$date, name="Diesel")
# Carry forward updates on a per minute basis to calculate correct averages
res <- merge(x, xts(, ts.1min))
res <- na.locf(res, na.rm = TRUE)
res <- window(x = res,start = start, end= end)
# Aggregate by hour and calculate average
ends <- endpoints(res,'minutes',60)
table = period.apply(res, ends ,mean)-mean(res) # abs. savings in cents rounded to two digits
# Create new data structure for aggregation
table = data.frame(date=index(table), coredata(table))
table$hour = format(table$date, "%H")
table$date = NULL
names(table) = c("price","hour")
# Calculate average by hour
result <- tapply(table$price, table$hour, mean)
# Prepare data frame contents for file writing from tapply result
result.frame = data.frame(key=names(result), value=result)
names(result.frame) = c("hour","price")
result.frame[,2] = round ( result.frame[,2] ,2) # Show only two Digits
result.frame = result.frame[order( result.frame$hour), ]
# Write File and create required directories
dirname=path_join(str_split(station$station_uuid[1],"-"))
dirname=path_join(c(path("data"), dirname))
filename=path("diesel.csv")
dir_create(dirname, recurse=T)
filename=path_join(c(dirname,filename))
# Write as CSV
write.csv(result.frame, filename, row.names=F)
# Write as JSON
filename=path("diesel.json")
filename=path_join(c(dirname,filename))
json = toJSON(result.frame, dataframe="rows")
write(json, filename)
#
# E10
#
# Comments see Diesel
x = xts(x=station$e10 , order.by=station$date, name="E10")
res <- merge(x, xts(, ts.1min))
res <- na.locf(res, na.rm = TRUE)
res <- window(x = res,start = start, end= end)
ends <- endpoints(res,'minutes',60)
table = period.apply(res, ends ,mean)-mean(res) # abs. savings in cents rounded to two digits
table = data.frame(date=index(table), coredata(table))
table$hour = format(table$date, "%H")
table$date = NULL
names(table) = c("price","hour")
result <- tapply(table$price, table$hour, mean)
result.frame = data.frame(key=names(result), value=result)
names(result.frame) = c("hour","price")
result.frame[,2] = round ( result.frame[,2] ,2) # Show only two Digits
# Write File
dirname=path_join(str_split(station$station_uuid[1],"-"))
dirname=path_join(c(path("data"), dirname))
filename=path("e10.csv")
dir_create(dirname, recurse=T)
filename=path_join(c(dirname,filename))
write.csv(result.frame, filename, row.names=F)
# Write as JSON
filename=path("e10.json")
filename=path_join(c(dirname,filename))
json = toJSON(result.frame, dataframe="rows")
write(json, filename)
#
# E5
#
# Comments see Diesel
x = xts(x=station$e5 , order.by=station$date, name="E5")
res <- merge(x, xts(, ts.1min))
res <- na.locf(res, na.rm = TRUE)
res <- window(x = res,start = start, end= end)
ends <- endpoints(res,'minutes',60)
table = period.apply(res, ends ,mean)-mean(res) # abs. savings in cents rounded to two digits
table = data.frame(date=index(table), coredata(table))
table$hour = format(table$date, "%H")
table$date = NULL
names(table) = c("price","hour")
result <- tapply(table$price, table$hour, mean)
result.frame = data.frame(key=names(result), value=result)
names(result.frame) = c("hour","price")
result.frame[,2] = round ( result.frame[,2] ,2) # Show only two Digits
result.frame = result.frame[order( result.frame$hour), ]
# Write File
dirname=path_join(str_split(station$station_uuid[1],"-"))
dirname=path_join(c(path("data"), dirname))
filename=path("e5.csv")
dir_create(dirname, recurse=T)
filename=path_join(c(dirname,filename))
write.csv(result.frame, filename, row.names=F)
# Write as JSON
filename=path("e5.json")
filename=path_join(c(dirname,filename))
json = toJSON(result.frame, dataframe="rows")
write(json, filename)
}) # END TRY
} # END FOR LOOP |
7137a442a8f5371bc565c1fce8dcc5ee9ef3f135 | 7dc7c5b7d6faa41e52788b1dd6f5b7316a845e24 | /DATRAS/man/plotALKraw.Rd | f1056d3fef71bcda65c299b9210f853220ab2489 | [] | no_license | alko989/DATRAS | 7f969cc2295ede8b9d81e7f24aa1e0fe4518d5b3 | 26bd3f3d9c8460d2f82b3e73525c809973e8261d | refs/heads/master | 2021-08-28T03:48:00.471214 | 2020-11-03T15:05:47 | 2020-11-03T15:59:01 | 212,617,500 | 1 | 0 | null | 2019-10-03T15:45:54 | 2019-10-03T15:45:54 | null | UTF-8 | R | false | true | 833 | rd | plotALKraw.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ALKfuns.R
\name{plotALKraw}
\alias{plotALKraw}
\title{Plot a raw ALK using the observed proportions in each age group.}
\usage{
plotALKraw(
x,
minAge,
maxAge,
truncAt = 0,
type = "l",
ylab = "Proportion",
xlab = "Cm",
...
)
}
\arguments{
\item{x}{a DATRASraw object with added spectrum.}
\item{minAge}{pool all ages less than or equal to this age.}
\item{maxAge}{pool all ages greater than or equal to this age.}
\item{truncAt}{truncate proportions below this number for nicer plots}
\item{type}{argument to matplot()}
\item{ylab}{argument to matplot()}
\item{xlab}{argument to matplot()}
\item{...}{extra parameters to matplot()}
}
\value{
nothing
}
\description{
Plot a raw ALK using the observed proportions in each age group.
}
|
e60e5483aa536bbae2ed2e0d4b1bfe797ae968ca | 1e0af8508e46e9ee9e1329f8622ef60f7f1e61e9 | /man/deepFeatures.Rd | f36f23d268600798abde50813342fcffa0832ef8 | [] | no_license | muratmaga/patchMatchR | 9a2e29e3563ea17725c510c6ab0f5650ef7b7bcb | af1daa5c33d8d02eebbfb8e21665f74258008b40 | refs/heads/master | 2020-09-30T02:58:34.982012 | 2019-12-04T11:28:59 | 2019-12-04T11:28:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,011 | rd | deepFeatures.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/patchMatch.R
\name{deepFeatures}
\alias{deepFeatures}
\title{extract deep features from 2D or 3D image}
\usage{
deepFeatures(x, mask, patchSize = 64, featureSubset,
block_name = "block2_conv2", vggmodel)
}
\arguments{
\item{x}{input input image}
\item{mask}{defines the object of interest in the fixedImage}
\item{patchSize}{vector or scalar defining patch dimensions}
\item{featureSubset}{a vector that selects a subset of features}
\item{block_name}{name of vgg feature block, either block2_conv2 or integer.
use the former for smaller patch sizes.}
\item{vggmodel}{prebuilt feature model}
}
\value{
feature array, patches and patch coordinates
}
\description{
High-level function for extracting features based on a pretrained network.
}
\examples{
library(ANTsR)
img <- ri( 1 ) \%>\% iMath( "Normalize" )
mask = randomMask( getMask( img ), 20 )
features = deepFeatures( img, mask, patchSize = 32 )
}
\author{
Avants BB
}
|
83e5e2ec086054e871627d90bf713582acb8a2fd | b2f61fde194bfcb362b2266da124138efd27d867 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Basler/terminator/stmt22_80_404/stmt22_80_404.R | e2c416900a9cf31c15d7070b4439c0ac1eada820 | [] | no_license | arey0pushpa/dcnf-autarky | e95fddba85c035e8b229f5fe9ac540b692a4d5c0 | a6c9a52236af11d7f7e165a4b25b32c538da1c98 | refs/heads/master | 2021-06-09T00:56:32.937250 | 2021-02-19T15:15:23 | 2021-02-19T15:15:23 | 136,440,042 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 65 | r | stmt22_80_404.R | 10f881fd47801938cb906fc40f3be45a stmt22_80_404.qdimacs 3763 12737 |
78aa191b61c670a6103a6f4365f447c8ffe22a07 | 752dc4ad14e1410cfcff767cf439fee5330bd24a | /R/order_by.R | 2088d404710fde21148c5f52e5b05f2d11325b03 | [] | no_license | puzuwe/rquery | 6cc1e1d3036782e87d6bf5c96fffc4f2f38431c7 | f9e75b08512bd35b7ff5aa6b8ba7965e454ed9b0 | refs/heads/master | 2020-03-08T16:43:57.135339 | 2018-04-05T18:30:40 | 2018-04-05T18:30:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,306 | r | order_by.R |
#' Make an orderby node (not a relational operation).
#'
#' Order a table by a set of columns (not general expressions) and
#' limit number of rows in that order.
#'
#' Note: this is a relational operator in that it takes a table that
#' is a relation (has unique rows) to a table that is still a relation.
#' However, most relational systems do not preserve row order in storage or between
#' operations. So without the limit set this is not a useful operator except
#' as a last step prior ot pulling data to an in-memory \code{data.frame} (
#' which does preserve row order).
#'
#'
#' @param source source to select from.
#' @param cols order by column names.
#' @param ... force later arguments to be bound by name
#' @param rev_cols order by reverse of these column names.
#' @param limit number limit row count.
#' @return select columns node.
#'
#' @examples
#'
#' if (requireNamespace("RSQLite", quietly = TRUE)) {
#' my_db <- DBI::dbConnect(RSQLite::SQLite(), ":memory:")
#' d <- dbi_copy_to(my_db, 'd',
#' data.frame(AUC = 0.6, R2 = 0.2))
#' eqn <- orderby(d, rev_cols = "AUC", limit=4)
#' cat(format(eqn))
#' sql <- to_sql(eqn, my_db)
#' cat(sql)
#' print(DBI::dbGetQuery(my_db, sql))
#' DBI::dbDisconnect(my_db)
#' }
#'
#' @export
#'
orderby <- function(source,
cols = NULL,
...,
rev_cols = NULL,
limit = NULL) {
UseMethod("orderby", source)
}
#' @export
orderby.relop <- function(source,
cols = NULL,
...,
rev_cols = NULL,
limit = NULL) {
if(length(list(...))>0) {
stop("unexpected arguments")
}
have <- column_names(source)
check_have_cols(have, c(cols, rev_cols), "rquery::orderby orderterms")
r <- list(source = list(source),
table_name = NULL,
parsed = NULL,
orderby = cols,
rev_orderby = rev_cols,
limit = limit)
r <- relop_decorate("relop_orderby", r)
r
}
#' @export
orderby.data.frame <- function(source,
cols = NULL,
...,
rev_cols = NULL,
limit = NULL) {
if(length(list(...))>0) {
stop("unexpected arguments")
}
tmp_name <- mk_tmp_name_source("rquery_tmp")()
dnode <- table_source(tmp_name, colnames(source))
dnode$data <- source
enode <- orderby(dnode,
orderby = cols,
rev_orderby = rev_cols,
limit = limit)
return(enode)
}
#' @export
format.relop_orderby <- function(x, ...) {
if(length(list(...))>0) {
stop("unexpected arguments")
}
ot <- c(x$orderby)
if(length(x$rev_orderby)>0) {
ot <- c(ot, paste0("desc(", x$rev_orderby, ")"))
}
paste0(trimws(format(x$source[[1]]), which="right"),
" %.>%\n ",
"orderby(., ",
ifelse(ot>0,
paste(ot, collapse = ", "),
""),
ifelse((length(x$limit)>0) && (length(x$orderby)>0),
paste0(", LIMIT ",
format(ceiling(x$limit), scientific = FALSE)),
""),
")",
"\n")
}
calc_used_relop_orderby <- function (x, ...,
using = NULL,
contract = FALSE) {
if(length(using)<=0) {
using <- column_names(x)
}
consuming <- x$orderby
using <- unique(c(using, consuming))
missing <- setdiff(using, column_names(x$source[[1]]))
if(length(missing)>0) {
stop(paste("rquery::calc_used_relop_orderby unknown columns",
paste(missing, collapse = ", ")))
}
using
}
#' @export
columns_used.relop_orderby <- function (x, ...,
using = NULL,
contract = FALSE) {
cols <- calc_used_relop_select_rows(x,
using = using,
contract = contract)
return(columns_used(x$source[[1]],
using = cols,
contract = contract))
}
#' @export
to_sql.relop_orderby <- function (x,
db,
...,
limit = NULL,
source_limit = NULL,
indent_level = 0,
tnum = mk_tmp_name_source('tsql'),
append_cr = TRUE,
using = NULL) {
if(length(list(...))>0) {
stop("unexpected arguments")
}
cols1 <- column_names(x$source[[1]])
cols <- vapply(cols1,
function(ci) {
quote_identifier(db, ci)
}, character(1))
ot <- vapply(x$orderby,
function(ci) {
quote_identifier(db, ci)
}, character(1))
if(length(x$rev_orderby)>0) {
rev_ot <- vapply(x$rev_orderby,
function(ci) {
paste(quote_identifier(db, ci), "DESC")
}, character(1))
ot <- c(ot, rev_ot)
}
subcols <- calc_used_relop_orderby(x, using=using)
qlimit = limit
if(!getDBOption(db, "use_pass_limit", TRUE)) {
qlimit = NULL
}
subsql_list <- to_sql(x$source[[1]],
db = db,
limit = qlimit,
source_limit = source_limit,
indent_level = indent_level + 1,
tnum = tnum,
append_cr = FALSE,
using = subcols)
subsql <- subsql_list[[length(subsql_list)]]
tab <- tnum()
prefix <- paste(rep(' ', indent_level), collapse = '')
q <- paste0(prefix, "SELECT * FROM (\n",
subsql, "\n",
prefix, ") ",
tab,
ifelse(length(ot)>0,
paste0(" ORDER BY ", paste(ot, collapse = ", ")),
""))
if(!is.null(x$limit)) {
limit <- min(limit, x$limit)
}
if(!is.null(limit)) {
q <- paste(q, "LIMIT",
format(ceiling(limit), scientific = FALSE))
}
if(append_cr) {
q <- paste0(q, "\n")
}
c(subsql_list[-length(subsql_list)], q)
}
|
732727ee59c74dc7306403378a7b43ae229a80b2 | 4981852fde1c33ed42ae995d9e47ae80c95e1a56 | /democratic_predictions.R | db61e4b7cc71c099818c1d754cde0375442864d3 | [] | no_license | apapiu/Primaries-Elections | 249d2a0981b344b05a0d1ab07566730026ceb0ba | 7c06b711c65f70a849b98f25bde77d3393e2c44b | refs/heads/master | 2021-01-17T06:48:32.357564 | 2016-07-13T08:09:21 | 2016-07-13T08:09:21 | 53,035,331 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,083 | r | democratic_predictions.R | library(dplyr)
library(tidyr)
library(plotly)
counties <- read.csv("county_facts.csv")
primary <- read.csv("primary_results.csv")
primary %>% filter(party == "Democrat") -> primary
primary$votes <- NULL
primary %>% spread(candidate, fraction_votes) -> dem_votes
dem_votes$`Martin O'Malley` <- NULL
dem_votes$` No Preference` <- NULL
dem_votes$` Uncommitted` <- NULL
primary %>% group_by(fips) %>% summarize(sum(votes)) -> num_votes
#USE THIS to remove counties that have few votes.
dem_votes$winner <- "Hillary"
dem_votes$winner[dem_votes$`Bernie Sanders` > dem_votes$`Hillary Clinton`] <- "Bernie"
dem_votes <- inner_join(dem_votes, counties, by = "fips")
train <-dem_votes[,c(8,11:dim(dem_votes)[2])] #winner plus different demographics.
train$winner <- as.factor(train$winner)
#models:
library(ranger)
library(randomForest)
library(xgboost)
library(gbm)
ranger(winner ~., data = train)
model_rf <- randomForest(winner ~., data = train)
plot(model_rf, ylim = c(0,0.5))
varImpPlot(model_rf)
model_rf
#very nice separation:
ggplot(train, aes(x = EDU635213, y = log(1 + RHI225214))) +
geom_point(aes(color = winner), alpha = 0.8) +
scale_color_brewer(palette = "Set1") +
xlim(c(60, 100))
ggplot(train, aes(x = log(1 + AGE775214), y = log(1 + RHI325214))) +
geom_point(aes(color = winner), alpha = 0.8) +
scale_color_brewer(palette = "Set1")
#let's do a 3D one:
#XGBoost:
X <- as.matrix(train[,2:52])
y = 1*(train$winner == "Hillary")
xgb.cv(data = X, label = y, nfold = 10, nrounds = 200,
objective = "binary:logistic",
metrics = c("error", "auc")) -> cv.error
plot(cv.error$test.error.mean)
plot(cv.error$test.auc.mean)
max(cv.error$test.auc.mean)
model_xgb <- xgboost(data = X, label = y,nrounds = 200,
objective = "binary:logistic")
xgb.importance(feature_names = colnames(X), model = model_xgb) -> xgb_importance
#Now let's do PCA on the most important 10 features
#Sort off slightly supervised unsupervised learning.
xgb_importance$Feature[1:10] ->important_features
X_imp <- X[,important_features]
X_imp <- scale(X_imp)
pca <- prcomp(X_imp)
proj <- predict(pca, X_imp)
proj_2d <- data.frame(proj[,1:2], winner = train$winner)
biplot(pca)
#this isn't very informative, you get a better view by percentage black and
ggplot(proj_2d, aes (x = PC1, y = PC2)) +
geom_point(aes(color = winner))
#some more EDA
hist(train$EDU635213)
train$hs_bin <- cut(train$EDU635213, breaks = 30)
train$black_bin <- cut(sqrt(train$RHI225214), breaks = 30) #take sqrt here to visualize better
pplot <- function(x) {
x <- substitute(x)
ggplot(data = train, aes_q(x = x, fill = substitute(winner))) +
geom_bar(stat = "count", position = "fill", width = 0.8) +
scale_fill_brewer(palette = "Set1")
}
pplot(hs_bin) + coord_flip() + geom_bar(position = "fill")+
labs(title = "Winner by Percentage of People With High School Diploma",
xlab = "Percentage with HS Diploma")
pplot(black_bin) + geom_bar() + coord_flip()
plot(dem_votes$RHI225214, dem_votes$`Hillary Clinton`)
|
05f008879851c2e3689713e04b7d55efb92a703a | c60e0344a8d4d9efc6032fcef28377d7d5a5803e | /Lab02_script.R | f18bc7105b31f055506cad7e60ba7f815e194504 | [] | no_license | osmanrkhan/data_viz_labs | 582d3396ada126a6742f8efacbbfd7e8bf802b73 | 7b5a2da9f453b79ff73725ae1078a9543abc1f19 | refs/heads/master | 2020-09-12T20:02:26.288946 | 2019-11-18T20:21:52 | 2019-11-18T20:21:52 | 222,536,702 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,246 | r | Lab02_script.R | ### Data Visualization (GOVT16-QSS17) Fall 2019
## GGplot Part III (3-5)
##
## Name: Osman Khan
## Date: October 29th, 2019
# Script will combine colored map of the world
# with renewables data, create a gif of change over time
library(ggplot2)
library(tidyverse)
library(readxl)
library(maps)
library(ggmap)
library(mapproj)
library(animation)
library(dplyr)
world_map <- map_data("world")
world_map <- world_map %>%
filter(region != "Antarctica" &
region != "French Southern and Antarctic Lands")
View(world_map)
ggplot(data = world_map, aes(y = lat, x = long, group = group)) +
geom_polygon() +
coord_map(xlim = c(-180, 180)) +
theme_void()
#remove filler from head, tail, first column
renewable <- read_excel(
"/Users/osmankhan/Desktop/19F/QSS 017/Lab02/renewable.xlsx")[2:226,]
colnames(renewable) <- c("country", "year_1990", "year_1991", "year_1992", "year_1993",
"year_1994", "year_1995","year_1996", "year_1997", "year_1998",
"year_1999", "year_2000", "year_2001", "year_2002", "year_2003",
"year_2004", "year_2005", "year_2006", "year_2007", "year_2008",
"year_2009", "year_2010", "year_2011", "year_2012", "year_2013",
"year_2014", "year_2015")
renewable$country <- gsub('[0-9]+', '', renewable$country)
renewable$country[221] = "Vietnam"
renewable$country[68] = "Ethiopia"
renewable$country[181] = "South Sudan"
renewable$country[197] = "Syria"
renewable$country[1] = "Afghanistan"
renewable$country[192] = "Sudan"
renewable$country[97] = "Iran"
renewable$country[215] = "USA"
renewable$country[213] = "UK"
renewable$country[167] = "Russia"
renewable$country[215] = "USA"
renewable$country[24] = "Bolivia"
renewable$country[220] = "Venezuela"
renewable$country[3] = "Algeria"
renewable$country[28] = "Brazil"
renewable$country[47] = "Congo"
renewable$country[50] = "Ivory Coast"
renewable$country[163] = "South Korea"
View(renewable$country)
#joins the two datasets together
joined_data <- full_join(x = renewable, y = world_map, by = c("country" = "region"))
View(joined_data)
#filters out first years for which there is messy data, gathers all by year
final_dat <- joined_data %>%
gather(key = "year", value = "renew_percent", year_1990:year_2015) %>%
filter(year != "year_1990" & year != "year_1991" & year != "year_1992")
final_dat$renew_percent <- as.numeric(final_dat$renew_percent)
saveGIF({
# Loop through all time points
for (i in unique(final_dat$year)) {
# Subset japan: data
data <- subset(final_dat, year == i)
# Finish the ggplot command
a <- data %>%
ggplot(aes(y = lat, x = long,group = group, fill = renew_percent)) +
geom_polygon() + coord_map(xlim = c(-180, 180)) +
theme_void() + ggtitle(paste(" Percentage of Total Energy Produced By Renewables By Country - ",
substr(i, start = 6, stop = 10)), "
") +
scale_fill_gradientn(colors = c("#240E8B", "#F04393", "#F9C449")) +
labs(fill = "% Renewable")
print(a)
}
}, movie.name = "map.gif", interval = 0.3)
|
dd04db2240ac751eca4fc9146c14f0067b1aef65 | fc52feeae6371fb7d13b98fbb7799cad8892c24d | /functions/wide.form.R | 3171eb070feb56c340b4524849ee92d87331917d | [] | no_license | jack-w-hill/mangrove-hydro-edaphics | 8083ddf3066ccc5d9d8b038999e5c61c837460ef | 7787a9c6312e19e7254cb0b960d489cb90240403 | refs/heads/master | 2023-05-31T17:24:59.458049 | 2023-05-19T18:15:37 | 2023-05-19T18:15:37 | 254,569,885 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 528 | r | wide.form.R | wide.form <- function(group.by,
spread.by,
values,
fill = 0 ,
prop = FALSE,
prune = TRUE){
sum.mat <- tapply(values,
list(group.by, spread.by),
sum)
sum.mat[is.na(sum.mat)] = fill
if(prune){
sum.mat <- sum.mat[rowSums(sum.mat) > 0, colSums(sum.mat) > 0]
}
if(prop){
sum.mat = prop.table(sum.mat, 1)
}
return(sum.mat)
}
|
1e3b5218acbf6d431cd916c8d0d8ba4da3d4f468 | e97f66fdbac0e27d8ce5107b903079c666eb7247 | /R/BoostedStacking.R | 5e175c4475161a431ff5358cd39458f6715552cf | [] | no_license | philippstats/mlr | b9f0c0042148d3305bce4a017a97cb48d3d2e308 | eb4188d8c7f8157dd93a00a9be6b3b0063dfb973 | refs/heads/master | 2021-01-12T13:54:27.124915 | 2016-06-01T10:01:59 | 2016-06-01T10:01:59 | 54,952,701 | 0 | 0 | null | 2016-03-29T06:49:24 | 2016-03-29T06:49:23 | null | UTF-8 | R | false | false | 8,478 | r | BoostedStacking.R | #' @title Create a BoostedStacking object.
#'
#' @description
#' Short descrition of boosted Stacking here!
#'
#' @param model.multiplexer [\code{\link{ModelMultiplexer}}]\cr
#' The muliplexer learner.
#' @param predict.type [\code{character(1)}]\cr
#' Classification: \dQuote{response} (= labels) or \dQuote{prob} (= probabilities and labels by selecting the ones with maximal probability).
#' Regression: \dQuote{response} (= mean response) or \dQuote{se} (= standard errors and mean response).
#' @param resampling [\code{\link{ResampleDesc}} \cr
#' Resampling strategy.
#' par.set [\code{\link[ParamHelpers]{ParamSet}}]\cr
#' @param mm.ps Collection of parameters and their constraints for optimization.
#' Dependent parameters with a \code{requires} field must use \code{quote} and not
#' \code{expression} to define it.
#' @param control [\code{\link{TuneControlRandom}}]\cr
#' Control object for search method.
#' @param measure [\code{\link{Measure}}]\cr
#' Performance measure.
#' @param niter [\code{integer}]\cr
#' Number of boosting iterations.
#' @param tolerance [\code{numeric}]\cr
#' Tolerance for stopping criterion.
#' @examples
#' lrns = list(
#' #makeLearner("classif.ksvm", kernel = "rbfdot"), # no implm for response and multiclass
#' makeLearner("classif.gbm"),
#' makeLearner("classif.randomForest"))
#' mm = makeModelMultiplexer(lrns)
#' ctrl = makeTuneControlRandom(maxit = 3L)
#' ps = makeModelMultiplexerParamSet(mm,
#' makeIntegerParam("n.trees", lower = 1L, upper = 500L),
#' makeIntegerParam("interaction.depth", lower = 1L, upper = 10L),
#' makeIntegerParam("ntree", lower = 1L, upper = 500L),
#' makeIntegerParam("mtry", lower = 1L, upper = getTaskNFeats(tsk)))
#' lrns = lapply(lrns, setPredictType, bpt)
#' stb = makeBoostedStackingLearner(model.multiplexer = mm,
#' predict.type = spt, resampling = cv5, mm.ps = ps, control = ctrl,
#' measures = mmce, niter = 2L)
#' r = resample(stb, task = tsk, resampling = cv2)
#'
#' @export
#'
#'
#TODO: tol als parameter
makeBoostedStackingLearner = function(model.multiplexer = mm, mm.ps = ps,
control = ctrl, resampling = cv2, predict.type = "prob",
measures = mmce, niter = 2L, tolerance = 1e-8) {
# do we need an id?
# input checks
# INSERT IT HERE
assertClass(model.multiplexer, "ModelMultiplexer")
assertChoice(predict.type, choices = c("response", "prob"))
assertClass(resampling, "ResampleDesc")
assertClass(mm.ps, "ParamSet")
assertClass(control, "TuneControlRandom") # for now
assertInt(niter, lower = 1L)
assertNumber(tolerance)
#FIXME check if mm and ps fit together
#
if (model.multiplexer$type == "classif" & model.multiplexer$predict.type == "response" & "factors" %nin% model.multiplexer$properties) {
stop("base models in model multiplexer does not support classifcation with factor features, which are created by using predict.type='response' within base learners")
}
#check: measures and type
#
par.set = makeParamSet(makeIntegerParam("niter", lower = 1, tunable = FALSE))
bsl = makeLearnerBaseConstructor(classes = "BoostedStackingLearner",
id = "boostedStacking",
type = model.multiplexer$type,
package = model.multiplexer$package,
properties = model.multiplexer$properties,
par.set = par.set,
par.vals = list(niter = niter),
predict.type = predict.type)
bsl$fix.factors.prediction = TRUE
bsl$model.multiplexer = model.multiplexer
bsl$mm.ps = mm.ps
bsl$resampling = resampling
bsl$measures = measures
bsl$control = control
bsl$tolerance = tolerance
return(bsl)
}
#' @export
trainLearner.BoostedStackingLearner = function(.learner, .task, .subset, ...) {
# checks
if (.task$type == "regr") {
bpt = unique(extractSubList(.learner$model.multiplexer$base.learners, "predict.type"))
spt = .learner$predict.type
if (any(c(bpt, spt) == "prob"))
stopf("Base learner predict type are '%s' and final predict type is '%s', but both should be 'response' for regression.", bpt, spt)
}
# body
bms.pt = unique(extractSubList(.learner$model.multiplexer$base.learner, "predict.type"))
new.task = subsetTask(.task, subset = .subset)
niter = .learner$par.vals$niter
base.models = preds = vector("list", length = niter)
score = rep(ifelse(.learner$measures$minimize, Inf, -Inf), niter + 1)
names(score) = c("init.score", paste("not.set", 1:niter, sep = "."))
tolerance = .learner$tolerance
for (i in seq_len(niter)) {
#messagef("[Iteration] Number %", i, Sys.time())
# Parameter Tuning
res = tuneParams(learner = .learner$model.multiplexer, task = new.task,
resampling = .learner$resampling, measures = .learner$measures,
par.set = .learner$mm.ps, control = .learner$control, show.info = FALSE)# IDEA: take best from every fold (i.e. anti-correlated/performs best on differnt input spaces/ bagging-like)
# Stopping criterium
score[i+1] = res$y[1]
names(score)[i+1] = paste(res$x$selected.learner, i, sep = ".")
shift = score[i] - score[i+1]
tol.reached = ifelse(.learner$measures$minimize, shift < tolerance, shift > tolerance)
#messagef(">force.stop is %s", tol.reached)
if (tol.reached) {
messagef("[Tolerance Reached] Boosting stopped after %s iterations", i)
to.rm = i:niter
score = score[-c(to.rm + 1)]
base.models[to.rm] = NULL
preds[to.rm] = NULL
break()
}
# create learner, model, prediction
best.lrn = makeXBestLearnersFromMMTuneResult(tune.result = res,
model.multiplexer = .learner$model.multiplexer, mm.ps = .learner$mm.ps,
x.best = 1, measure = .learner$measures) # FIXME x.best
messagef("Best Learner: %s", best.lrn$id)
base.models[[i]] = train(best.lrn[[1]], new.task)
preds[[i]] = resample(best.lrn[[1]], new.task, resampling = .learner$resampling,
measures = .learner$measures, show.info = FALSE)
# create new task
if (bms.pt == "prob") {
new.feat = getPredictionProbabilities(preds[[i]]$pred)
# FIXME if new.feat is constant, NA then use the second pred
new.task = makeTaskWithNewFeat(task = new.task,
new.feat = new.feat, feat.name = paste0("feat.", i))
} else {
new.feat = getPredictionResponse(preds[[i]]$pred)
# FIXME if new.feat is constant, NA then use the second pred
new.task = makeTaskWithNewFeat(task = new.task,
new.feat = new.feat, feat.name = paste0("feat.", i))
}
}
# FIXME pred.train returns acc to bms.pt...is that correct?
list(base.models = base.models, score = score[-1], final.task = new.task, pred.train = preds[[length(preds)]])
}
#predictLearner.BoostedStackingModel = function(.learner, .model, .newdata, ...) {
#' @export
predictLearner.BoostedStackingLearner = function(.learner, .model, .newdata, ...) {
new.data = .newdata
sm.pt = .learner$predict.type
bms.pt = unique(extractSubList(.learner$model.multiplexer$base.learner, "predict.type"))
td = getTaskDescription(.model)
niter = length(.model$learner.model$base.models)
for (i in seq_len(niter)) {
newest.pred = predict(.model$learner.model$base.models[[i]], newdata = new.data)
#FIXME for pred with response (or forbid it!?)
# new.feat = getResponse(newest.pres) # is nicer
if (bms.pt == "prob") {
new.feat = getPredictionProbabilities(newest.pred)
new.data = makeDataWithNewFeat(data = new.data,
new.feat = new.feat,
feat.name = paste0("feat.", i), td)
} else {
new.feat = getPredictionResponse(newest.pred)
new.data = makeDataWithNewFeat(data = new.data,
new.feat = new.feat, feat.name = paste0("feat.", i), td)
}
}
if (sm.pt == "prob") {
if (bms.pt == "prob") {
return(as.matrix(getPredictionProbabilities(newest.pred, cl = td$class.levels)))
} else { # if bms.pt="response" and sm.pt="prob" predict must be repeated
last.model = .model$learner.model$base.models[[niter]]
last.model$learner$predict.type = "prob"
newest.pred = predict(last.model, newdata = new.data[, -ncol(new.data)]) #FIXMENOW
return(as.matrix(getPredictionProbabilities(newest.pred, cl = td$class.levels)))
}
} else { #sm.pt = "response" and bms.pt = "prob"/"response"
return(getPredictionResponse(newest.pred)) #
}
#FIXME multiclass - should work now: check it!
}
setPredictType.BoostedStackingLearner = function(learner, predict.type) {
message("blabla")
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.