blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
043178d37a4c03862b57ff1564d29c1bd9b76cfd
|
c5a0d5c211e22a1c5c240f0bda607fb48a4f60d3
|
/cBioPortal_ui.R
|
64fdf7ebfa6d247a89cc9fb618f2fe29b306656e
|
[] |
no_license
|
kmezhoud/shinySpark
|
b10bbac1da73ea2ac50cbe20d4a51f0434d7a0c6
|
1867a8100a2378dbe61963de26b4e629589f1674
|
refs/heads/master
| 2020-04-24T09:17:33.511354
| 2019-02-21T11:49:06
| 2019-02-21T11:49:06
| 171,858,216
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,036
|
r
|
cBioPortal_ui.R
|
output$cBioPortal <- renderUI({
# tagList(
sidebarLayout(
sidebarPanel(
actionButton("connect", "Connect to spark", style="color: #0447FF"),
#wellPanel(
#conditionalPanel("input.tabs_cbioportal == 'Studies'",
# uiOutput("Welcome"),
# uiOutput("ui_Studies")),
#conditionalPanel("input.tabs_cbioportal != 'Studies'",
selectizeInput(
'StudiesID', 'Select a study', choices = Studies, selected = "gbm_tcga_pub" ,multiple = FALSE
),
uiOutput("ui_Cases"),
conditionalPanel("input.tabs_cbioportal != 'Clinical'",
uiOutput("ui_GenProfs")
),
# ),
conditionalPanel("input.tabs_cbioportal == 'Clinical'",
uiOutput("ui_ClinicalData")
),
#conditionalPanel("input.tabs_cbioportal == 'ProfData'", uiOutput("ui_ProfData")),
#conditionalPanel("input.tabs_cbioportal == 'Mutation'", uiOutput("ui_MutData"))
#)
actionButton("close", "Close window & disconnect", style = "color: #FF0404")
),
mainPanel(
# conditionalPanel("input.overview_id == true",
# uiOutput("pipeline"),
# imageOutput("overview")
# ),
# tags$hr(),
tabsetPanel(id = "tabs_cbioportal",
tabPanel("Studies",
#downloadLink("dl_Studies_tab", "", class = "fa fa-download alignright"),
DT::dataTableOutput(outputId = "StudiesTable")
),
tabPanel("Clinical",
#downloadLink("dl_Clinical_tab", "", class = "fa fa-download alignright"),
DT::dataTableOutput(outputId="ClinicalDataTable"),
#if(!is.null(input$SurvPlotID)){ #!is.null(input$clinicalDataButtID) &&
div(class="row",
div(class="col-xs-6",
conditionalPanel("input.SurvPlotID ==true",
h5("survival Plot from R session"),
plotOutput("suvivalPlot")
)
),
div(class="col-xs-6"
)
),
div(class= "row",
div(class="col-xs-6",
conditionalPanel("input.SurvPlotID == true",
h5("R session plot"),
plotOutput("clinicalDataPlot")
)
),
div(class="col-xs-6",
conditionalPanel("input.SurvPlotID ==true &&
input.clinicalDataButtID == true",
h5("spark transformation and plot"),
plotOutput("clinicalDataPlot_spark")
)
)
)
),
tabPanel("ProfData",
#downloadLink("dl_ProfData_tab", "", class = "fa fa-download alignright"),
DT::dataTableOutput(outputId ="ProfDataTable")
),
tabPanel("Mutation",
DT::dataTableOutput(outputId ="MutDataTable")
)
)
)
)
# )
})
|
e5c1aa4c04804739ec0ca4e7e2c9c33e1e486838
|
65c409808b847a0777cb12876fcebf70e574a26a
|
/scripts/PSpreanalysis.R
|
258d098f00b713fe29b78c51da5f37d88e72d8cf
|
[] |
no_license
|
lucyzhangzhang/PSlibs
|
1d9b897cb22dd058bd8a857fd1c9e5a7df8029f5
|
d0a0ec7d4154061ea0724d42f9590a76441d31f8
|
refs/heads/master
| 2020-06-27T08:47:33.480797
| 2020-04-07T18:27:39
| 2020-04-07T18:27:39
| 199,901,263
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,824
|
r
|
PSpreanalysis.R
|
library(ggplot2)
library(reshape2)
library(dplyr)
library(stats)
setwd("~/R/Eutrema/PS")
trimCounts <- read.table("counts")
samples <- c("ps10_S1",
"ps11_S2",
"ps12_S3",
"ps37_S4",
"ps38_S5",
"ps40_S6",
"ps41_S7",
"ps44_S8",
"ps46_S9",
"ps48_S10",
"ps49_S11",
"ps50_S12")
trimCounts <- cbind(trimCounts, trimCounts[,3]/trimCounts[,2],
rep(c(1, 2), nrow(trimCounts)/2),
rep(c(1, 1, 2, 2), nrow(trimCounts)/4),
rep(samples, each = 4))
colnames(trimCounts) <- c("File", "Raw", "Trimmed", "SurvivedRatio", "Read", "Lane", "Sample")
tCmelt <- melt(trimCounts[,c("File", "Raw", "Trimmed")], id = "File")
Trim <- ggplot(tCmelt, aes(x = File, y = value/1000000, fill = factor(variable))) +
geom_bar(stat = "identity")+
labs(y = "Millions of reads") +
theme(text = element_text(size=10,
family="serif"),
axis.text.x = element_text(size=10,angle=75, hjust=1),
legend.position = "top",
legend.title = element_blank())
ggsave("TrimStack.png", Trim, height = 6, width = 8, dpi = 125)
trimCounts$Sample <- as.factor(trimCounts$Sample)
trimRatio <- ggplot(trimCounts, aes(x = Sample, y = SurvivedRatio)) +
geom_boxplot(notch = F)+
ylim(0.75, 0.9) +
theme(text = element_text(size=10,
family="serif"),
axis.text.x = element_text(size=10,angle=75, hjust=1),
legend.position = "top",
legend.title = element_blank())
ggsave("TrimRatio.png", trimRatio, height = 5, width = 6, dpi = 125)
#tSmelt <- melt(trimCounts[,c("Sample", "Raw", "Trimmed")], id = "Sample")
#
# ggplot(tSmelt, aes(x = Sample, y = value/1000000, fill = factor(variable))) +
# geom_bar(stat = "identity", position = "dodge")+
# labs(y = "Average of millions of reads") +
# stat_summary(geom = "errorbar", fun.data = mean_se, position = "dodge") +
# theme(text = element_text(size=10,
# family="serif"),
# axis.text.x = element_text(size=10,angle=75, hjust=1),
# legend.position = "top",
# legend.title = element_blank())
trimAve <- aggregate(. ~ Sample, trimCounts[,c(2, 3, 7)], mean)
trimAve
trimSD <- aggregate(. ~ Sample, trimCounts[,c(2, 3, 7)], sd)
trimSD
tAvem <- melt(trimAve, id="Sample")
tAvem
tSDm <- melt(trimSD, id = "Sample")
tAvem <- cbind(tAvem, tSDm[,3])
colnames(tAvem) <- c("Sample", "Treat", "Count", "SD")
tAvem <- transform(tAvem, Count = Count/1000000, SD = SD/1000000)
#to make the error bars center at the middle of the bar
#you have to determine the width of the bar, then the
#dodge position of the error bar would be the same as that
#of the bar and the width of the error bar would be half
#the value of its dodge position
tAve <- ggplot(tAvem, aes(x = Sample, y = Count, fill = Treat)) +
geom_bar(stat = "identity", position = "dodge", width = 0.9) +
geom_errorbar(aes(ymin=Count - SD, ymax = Count + SD), size = 0.5, position = position_dodge(0.9), width = 0.45) +
ylab("Millions of reads") + xlab("Sample (n = 4)") +
theme(text = element_text(size=10,
family="serif"),
axis.text.x = element_text(size=10,angle=75, hjust=1),
legend.position = "top",
legend.title = element_blank())
tAve
ggsave("TrimAve.png", tAve, height = 5, width = 6, dpi = 125)
merge_data <- read.table("~/scratch/PS/STAR/tot", header = T)
merge_data <- cbind(samples, merge_data)
mergMelt <- melt(merge_data, id = "samples")
mergMelt
merge_plot <- ggplot(mergMelt, aes(x = samples, y = value/1000000, fill = variable)) +
geom_bar(stat = "identity", position = "dodge") +
labs(x = "Samples", y = "Millions of mapped reads") +
theme(text = element_text(size=10,
family="serif"),
axis.text.x = element_text(size=10,angle=75, hjust=1),
legend.position = "top",
legend.title = element_blank())
print(merge_plot)
######MAPQ######
library(dplyr)
unMerge <- read.table("unMerge_count")
Merge <- read.table("Merged_count")
counts <- cbind(rep(samples, each = 4), unMerge, Merge)
colnames(counts) <- c("Sample", "Unmerged", "Score", "Merged")
counts <- counts[,1:ncol(counts)-1]
UnMerged <- counts[,c(1, 3, 2)]
unmergeTot <- rep(aggregate(UnMerged$Unmerged, by = list(Category = UnMerged$Sample), FUN = sum)[,2], each = 4)
UnMerged <- cbind(UnMerged, unmergeTot)
UnMerged <- cbind(UnMerged, UnMerged$Unmerged/UnMerged$unmergeTot)
colnames(UnMerged)[ncol(UnMerged)] <- "Ratio"
UnMerged
uniq <- UnMerged %>% filter(UnMerged$Score == 255)
trim.sum <- tapply(trimCounts$Trimmed, trimCounts$Sample, FUN = sum)
uniq <- cbind(uniq, trim.sum)
uniq$TrimRation <- uniq$unmergeTot/uniq$trim.sum*100
uniq$unmergeTot <- as.double(uniq$unmergeTot)
formatted <- data.frame(format(uniq[, 4], digits = 3, scientific = T), format(uniq[, 7], digits = 3, scientific = F), stringsAsFactors = F)
rownames(formatted) <- rownames(uniq)
colnames(formatted) <- c("Counts", "mapRatio")
write.table(formatted, file = "mapRatio.tab", quote = F, row.names = , col.names = F, sep = " & ", eol = " \\\\\n")
M_erged <- counts[,c(1, 3, 4)]
mergeTot <- rep(aggregate(M_erged$Merged, by = list(Category = M_erged$Sample), FUN = sum)[,2], each = 4)
M_erged <- cbind(M_erged, mergeTot)
M_erged <- cbind(M_erged, M_erged$Merged/M_erged$mergeTot)
colnames(M_erged)[ncol(M_erged)] <- "Ratio"
M_erged
UnMerge_plot <- ggplot(UnMerged, aes(x = Sample, y = Unmerged/unmergeTot, fill = factor(Score))) +
geom_bar(stat = "identity", position = "dodge") +
scale_y_log10() +
theme(text = element_text(size=10,
family="serif"),
axis.text.x = element_text(size=10,angle=75, hjust=1),
legend.position = "top",
legend.title = element_blank())
ggsave("UnMerged.png", UnMerge_plot, height = 4, width = 5, dpi = 125)
Merge_plot <- ggplot(M_erged, aes(x = Sample, y = Merged, fill = factor(Score))) +
geom_bar(stat = "identity", position = "dodge") +
scale_y_log10() +
theme(text = element_text(size=10,
family="serif"),
axis.text.x = element_text(size=10,angle=75, hjust=1),
legend.position = "top",
legend.title = element_blank())
ggsave("Merged.png", Merge_plot, height = 4, width = 5, dpi = 125)
unMergeUniq <- UnMerged %>% filter( Score == 255)
unMergeAOV <- aov(unMergeUniq$Ratio ~ unMergeUniq$Sample)
MergeUniq <- M_erged %>% filter(Score == 255)
MergeAOV <- aov(MergeUniq$Ratio ~ MergeUniq$Sample)
res <- t.test(unMergeUniq$Ratio, MergeUniq$Ratio, val.equal = T)
res
allUniq <- cbind(samples, unMergeUniq$Ratio, MergeUniq$Ratio)
colnames(allUniq) <- c("Sample", "Unmerged", "Merged")
#find outliers oooo
is_outlier <- function(x) {
return(x < quantile(x, 0.25) - 1.5 * IQR(x) | x > quantile(x, 0.75) + 1.5 * IQR(x))
}
allUniq <- melt(as.data.frame(allUniq), id = "Sample")
#get the outliers and put the sample name in the outliers column and other ones as NA
mergeBox <- allUniq %>%
group_by(variable) %>% mutate( outlier = ifelse(is_outlier(as.numeric(value)), samples[Sample], as.numeric(NA))) %>%
ggplot(aes(x = as.factor(variable), y = as.numeric(value), fill =as.factor(variable))) +
geom_boxplot() +
labs(x = "Unmerged/Merged reads (n = 12)", y = "Ratio of uniquely mapped reads") +
geom_text(aes(label = outlier), na.rm = TRUE, vjust = -1, size=3, family="serif") +
ylim(0.985, 0.999) +
theme(text = element_text(size=10,
family="serif"),
axis.text.x = element_text(size=10, hjust=1),
legend.position = "top",
legend.title = element_blank())
ggsave("mergeBox.png", mergeBox, height = 4, width = 3, dpi = 125)
|
0b36639ce86d646709ad5bef8b46b454148a15d0
|
7e45e97f10ba41f5ed251438044dc923d64f6b3d
|
/antic-herit-sim.r
|
86d8764ab0e97821347b0c65999a85ba5dfa85fc
|
[] |
no_license
|
ericminikel/e200k-anticipation
|
a2513cfd4a1e02f139b7bbab1c7f9dbd450ea22e
|
13415953725ab1dd3c527002bfcc2c83f8ac6b0a
|
refs/heads/master
| 2020-12-24T13:43:55.723392
| 2015-02-17T00:07:17
| 2015-02-17T00:07:17
| 24,304,771
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,886
|
r
|
antic-herit-sim.r
|
#!/broad/software/free/Linux/redhat_5_x86_64/pkgs/r_3.0.2/bin/Rscript
# Eric Vallabh Minikel
# Demonstrates that "windowing" of year of onset is sufficient to create robust
# and highly significant false positive signals of anticipation and heritability
# This simulation will generate n parent/child pairs and then ascertain them
# according to user-specified criteria. The criteria I changed most often
# can be changed via command-line parameters; others are still hard-coded.
# Once the pairs are ascertained, a variety of tests are performed and
# plots generated to show how the ascertainment method creates bias.
# In "fullmode" this script will print the output of many tests to stdout
# which is useful for getting a sense of how the simulation works.
# If "imgdir" is set, it will save plots to the specified directory,
# otherwise it creates no plots.
# By default it runs in sparse mode where all it does is output the
# figures for anticipation (years), heritability (%) and year of birth
# age of onset correlation (slope units). This mode is useful for
# running large batches with different parameters.
suppressPackageStartupMessages(require(survival))
options(stringsAsFactors=FALSE)
suppressPackageStartupMessages(require(optparse)) # http://cran.r-project.org/web/packages/optparse/optparse.pdf
option_list = list(
make_option(c("-n", "--npairs"), action="store", default=40000,
type='integer', help="Number of parent/child pairs"),
make_option(c("--pyearmin"), action="store", default=1800,
type='integer', help="Minimum parent birth year"),
make_option(c("--pyearmax"), action="store", default=1980,
type='integer', help="Maximum parent birth year"),
make_option(c("-v", "--verbose"), action="store_true", default=FALSE,
help="Print verbose output [default %default]"),
make_option(c("-q", "--quiet"), action="store_false", dest="verbose",
help="Do not print verbose output (this is the default)"),
make_option(c("-i", "--imgdir"), action="store", default="",
type='character', help="Directory to save images to (default is no images)"),
make_option(c("-f", "--fullmode"), action="store_true", default=FALSE,
help="Run in full output mode"),
make_option(c("-a", "--amode"), action="store", default=1,
type="integer", help="Ascertainment mode [default %default]"),
make_option(c("-r", "--rate"), action="store", default=.05,
type="numeric", help="Rate of decline in ascertainment [default %default]"),
make_option(c("--amin"), action="store", default=1989,
type="integer", help="Minimum ascertainable year [default %default]"),
make_option(c("--amax"), action="store", default=2013,
type="integer", help="Maximum ascertainable year [default %default]"),
make_option(c("-s","--seed"), action="store", default=2222,
type="integer", help="Seed for randomization [default %default]"),
make_option(c("-l","--limit_n"), action="store", default=NA,
type="integer", help="Limit to this many ascertained pairs"),
make_option(c("--heritability_is_real"), action="store_true", default=FALSE,
help="Simulate heritability of age of onset being real")
)
opt = parse_args(OptionParser(option_list=option_list))
# uncomment to run in interactive mode instead of from command line
# opt = list()
# opt[["n"]] = 100000
# opt[["verbose"]] = FALSE
# opt[["pyearmin"]] = 1700
# opt[["pyearmax"]] = 2000
# opt[["imgdir"]] = ""
# opt[["fullmode"]] = TRUE
# opt[["amode"]] = 1
# opt[["rate"]] = .05
# opt[["amin"]] = 1989
# opt[["amax"]] = 2013
# opt[["seed"]] = 1
# opt[["limit_n"]] = NA
# opt[["heritability_is_real"]] = FALSE
# set random seed
seed = opt$seed
set.seed(seed) # 2222
# bother to create images??
imgs = opt$imgdir != ""
imgdir = opt$imgdir
######
# graphical parameters
pcolor='#D18532' #'orange'
ccolor='#091090' #'violet'
######
# load & process U.S. actuarial life tables for life expectancy censoring
# http://www.ssa.gov/oact/STATS/table4c6.html
# life = read.table('~/d/sci/src/e200k-anticipation/ssa.actuarial.table.2009.txt',sep='\t',header=TRUE)
life = read.table('ssa.actuarial.table.2009.txt',sep='\t',header=TRUE)
life$age = as.integer(life$age)
life$msur = as.numeric(life$msur)
life$fsur = as.numeric(life$fsur)
# calculate percent surviving. this is 1-CDF.
life$mpctsur = life$msur/100000
life$fpctsur = life$fsur/100000
life$allpctsur = (life$msur + life$fsur) / 200000
# calculate probability, from age 0:119 of dying at each possible age. this is the PDF.
life$pdf = life$allpctsur - c(life$allpctsur[-1],0)
# note: here is how to sample one person's age at death from the life expectancy distribution:
# sample(0:119, size=1, prob=life$pdf, replace=TRUE)
#######
# model parameters you can tweak
nparents = opt$n # 40000 # 40000
onset_mean = 64 # orig 65
onset_sd = 10 # orig 15
parent_yob_min = opt$pyearmin # 1800 # originally 1870
parent_yob_max = opt$pyearmax # 2000 # originally 1980
fertility_age_mean = 28 # orig 26
fertility_age_sd = 6 # orig 3
ascertainable_year_min = opt$amin # 1989 # try 1880 - results still significant
ascertainable_year_max = opt$amax # 2013
ascertainable_year_min_mode3 = 1969 # never made this adjustable via command line & not used in paper
ascertainable_dec_rate_mode4 = opt$rate # .01
ascertain_mode = opt$amode # 1
# 1 = parent and child must BOTH be in ascertainable range for pair to be asceratined (Simulations 1-3 in paper)
# 2 = only one must be in range; if so, the other can also be ascertained regardless of how early year of onset (Simulation 10 in paper)
# 3 = like mode 1, but the second individual must still be within a more generous range (e.g. 1969-2013) instead of the strict range (1989-2013)
# we didn't end up using mode 3 in the paper
# 4 = like mode 1, but the probability of ascertaining the second individual declines by X% for each year before the min date. (Simulation 4-9 in paper)
if (opt$verbose) {
print(opt,file=stderr())
}
# accepts a list of vectors of identical length and returns one vector with the first non-NA value
coalesce = function(...) {
# convert input arguments into a list of vectors
input_list = list(...)
# check that all input vectors are of same length
vectorlength = length(input_list[[1]])
for (j in 1:length(input_list)) {
if(length(input_list[[j]]) != vectorlength) {
stop(paste("Not all vectors are of same length. First vector length: ",vectorlength,". Vector #",j,"'s length: ",length(input_list[[j]]),sep=""))
}
}
# create a result vector to fill with first non-NA values
result = rep(NA,vectorlength)
# fill with first non-NA value
for (i in 1:length(result)) {
for (j in 1:length(input_list)) {
if(!is.na(input_list[[j]][i])) {
result[i] = input_list[[j]][i]
break
}
}
}
return(result)
}
#######
# simulation
parent_yob = round(runif(n=nparents, min=parent_yob_min, max=parent_yob_max))
child_yob = round(rnorm(n=nparents, m=fertility_age_mean, s=fertility_age_sd)) + parent_yob
parent_hypo_onset_age = round(rnorm(n=nparents, m=onset_mean, s=onset_sd)) # parent's hypothetical onset age, if they lived long enough to have onset
parent_hypo_onset_year = parent_yob + parent_hypo_onset_age
child_hypo_onset_age = round(rnorm(n=nparents, m=onset_mean, s=onset_sd))
child_hypo_onset_year = child_yob + child_hypo_onset_age
parent_intercurrent_age = sample(0:119, size=nparents, prob=life$pdf, replace=TRUE)
child_intercurrent_age = sample(0:119, size=nparents, prob=life$pdf, replace=TRUE)
# test results of having actual heritability
if (opt$heritability_is_real){
child_hypo_onset_age = rowMeans(cbind(round(rnorm(n=nparents, m=onset_mean, s=onset_sd)),parent_hypo_onset_age))
child_hypo_onset_year = child_yob + child_hypo_onset_age
}
# determine "case" status
parent_case = parent_hypo_onset_age < parent_intercurrent_age # is this person a "case" i.e. do they have an age of onset prior to death of intercurrent illness?
child_case = child_hypo_onset_age < child_intercurrent_age # is this person a "case" i.e. do they have an age of onset prior to death of intercurrent illness?
# "hypothetical" scenario: include all individuals who had onset in their lifetimes, i.e. did not die of intercurrent disease, even if their death is after 2013
parent_onset_age = parent_hypo_onset_age
parent_onset_age[!parent_case] = NA
parent_onset_year = parent_hypo_onset_year
parent_onset_year[!parent_case] = NA
child_onset_age = child_hypo_onset_age
child_onset_age[!child_case] = NA
child_onset_year = child_hypo_onset_year
child_onset_year[!child_case] = NA
# evaluate how the "competing risks" due to life expectancy change the estimated age of onset.
mean(c(parent_hypo_onset_age,child_hypo_onset_age))
mean(c(parent_onset_age,child_onset_age),na.rm=TRUE)
# do a survival analysis on all individuals, without ascertainment, but accounting for intercurrent death censoring
all_ages = c(pmin(parent_hypo_onset_age, parent_intercurrent_age), pmin(child_hypo_onset_age, child_intercurrent_age))
all_status = c(parent_hypo_onset_age < parent_intercurrent_age, child_hypo_onset_age < child_intercurrent_age) # TRUE = event / died of E200K. FALSE = no event / died intercurrently
survdata = data.frame(ages=all_ages,status=all_status)
mfit = survfit(Surv(ages,status==1) ~ 1, data = survdata)
mfit
if (imgs) {
# Fig 1A without lines
png(file.path(imgdir,'fig1a.withoutlines.png'),res=300,pointsize=3,width=500,height=500)
plot(NA,NA,xlim=c(1800,2070),ylim=c(1,50),main='Lifespan of parent/child pairs',xlab='Year',ylab='Parent/child pair id')
for (i in 1:50) {
points(c(parent_yob[i],parent_hypo_onset_year[i]),c(i,i),col=pcolor,type='l',lwd=3)
points(parent_yob[i],i,col=pcolor,pch=15,cex=1.2)
points(parent_hypo_onset_year[i],i,col=pcolor,pch=4,cex=1.2)
points(c(child_yob[i],child_hypo_onset_year[i]),c(i,i),col=ccolor,type='l',lwd=2)
points(child_yob[i],i,col=ccolor,pch=15,cex=1)
points(child_hypo_onset_year[i],i,col=ccolor,pch=4,cex=1)
}
dev.off()
}
if (imgs) {
# visualization of simulation - first 30 pairs
# Fig 1A is this with nparents=40000, ascertain_mode = 1, yob range = 1800-2000 and asc range = 1989-2013
png(file.path(imgdir,'fig1a.png'),res=300,pointsize=3,width=500,height=500)
plot(NA,NA,xlim=c(1870,2070),ylim=c(1,50),main='Lifespan of parent/child pairs',xlab='Year',ylab='Parent/child pair id')
abline(v=1989,col='red')
abline(v=2013,col='red')
for (i in 1:50) {
points(c(parent_yob[i],parent_hypo_onset_year[i]),c(i,i),col=pcolor,type='l',lwd=1.2)
points(parent_yob[i],i,col=pcolor,pch=15,cex=1.3)
points(parent_hypo_onset_year[i],i,col=pcolor,pch=4,cex=1.3)
points(c(child_yob[i],child_hypo_onset_year[i]),c(i,i),col=ccolor,type='l',lwd=1)
points(child_yob[i],i,col=ccolor,pch=15,cex=1.3)
points(child_hypo_onset_year[i],i,col=ccolor,pch=4,cex=1.3)
}
mtext(side=3, line=-2, text="A", cex=2, adj=0, outer=TRUE)
# legend('bottomleft',c('parents','child','birth','onset'),pch=c(16,16,15,4),col=c(pcolor,ccolor,'black','black'),cex=1)
dev.off()
}
# distribution of year of onset in hypothetical data
if (imgs) {
png(file.path(imgdir,'hypo.hist.yoo.png'),width=500,height=500)
hist(c(parent_onset_year, child_onset_year), breaks=100, col='black', xlim=c(parent_yob_min, parent_yob_max+120))
dev.off()
}
# no correlation betwen yob and ao in hypothetical data
if (imgs) {
png(file.path(imgdir,'hypo.hist.yob.png'),width=500,height=500)
plot(c(parent_yob, child_yob), c(parent_onset_age, child_onset_age), ylim=c(0,120))
dev.off()
}
if (opt$fullmode) {
all_yob = c(parent_yob, child_yob)
all_onset = c(parent_onset_age, child_onset_age)
m = lm(all_onset ~ all_yob)
summary(m)
}
# no trend (no heritability) in hypothetical data
if (imgs) {
png(file.path(imgdir,'hypo.ao.corr.png'),width=500,height=500)
plot(parent_onset_age, child_onset_age)
dev.off()
}
if (opt$fullmode) {
m = lm(child_onset_age ~ parent_onset_age)
summary(m)
}
# no difference (no anticipation) in hypothetical data
if (imgs) {
png(file.path(imgdir,'hypo.ao.antic.barplot.png'),width=500,height=500)
barplot(rbind(parent_onset_age, child_onset_age), beside=TRUE, col=c(pcolor,ccolor), border=NA)
dev.off()
}
if (opt$fullmode) {
cat("t test of all simulated individuals\n")
t.test(parent_onset_age, child_onset_age, alternative="two.sided", paired=TRUE)
}
# now actually only ascertain the ascertainable cases
parent_onset_age[parent_onset_year > ascertainable_year_max] = NA
parent_onset_year[parent_onset_year > ascertainable_year_max] = NA
child_onset_age[child_onset_year > ascertainable_year_max] = NA
child_onset_year[child_onset_year > ascertainable_year_max] = NA
if (opt$fullmode) {
cat("t test after right truncation of year of onset only\n")
t.test(parent_onset_age, child_onset_age, alternative="two.sided", paired=TRUE)
# note that this only introduces a small difference - the magnitude depends on how
# far back in history your simulated pairs go. when parent_yob is 1700 to 2000,
# you get .35 years anticipation, when it's 1500 to 2000, you get .17 years anticipation,
# when it's 1000 to 2000, you get .10 years, and so on.
}
if (ascertain_mode == 1) {
# mode 1: you can ONLY ascertain people with onset in the given range.
# therefore parent and child must each be in ascertainable range to be ascertained
parent_asc = parent_onset_year > ascertainable_year_min & parent_onset_year < ascertainable_year_max # will this person be ascertained in the study
child_asc = child_onset_year > ascertainable_year_min & child_onset_year < ascertainable_year_max
both_asc = parent_asc & child_asc # will this parent-child _pair_ be ascertained in the study
} else if (ascertain_mode == 2) {
# mode 2: you FIRST ascertain people within range, then look to ascertain their family members
# therefore parent can be ascertained if child is in range and vice versa
parent_asc = parent_onset_year > ascertainable_year_min & parent_onset_year < ascertainable_year_max # will this person be ascertained in the study
child_asc = child_onset_year > ascertainable_year_min & child_onset_year < ascertainable_year_max
both_asc = parent_asc = child_asc = parent_asc | child_asc # if one is ascertained, both can be ascertained
} else if (ascertain_mode == 3) {
# mode 3: you FIRST ascertain people within range, then look to ascertain their family members within SECOND, MORE GENEROUS range
# therefore parent can be ascertained if child is in range and vice versa, but cases who died very long ago will not be ascertained
parent_asc = parent_onset_year > ascertainable_year_min & parent_onset_year < ascertainable_year_max # will this person be ascertained in the study
child_asc = child_onset_year > ascertainable_year_min & child_onset_year < ascertainable_year_max
either_asc = parent_asc | child_asc # if one is ascertained, both can be ascertained
parent_asc = either_asc & parent_onset_year > ascertainable_year_min_mode3
child_asc = either_asc & child_onset_year > ascertainable_year_min_mode3
both_asc = parent_asc & child_asc
} else if (ascertain_mode == 4) {
# mode 4: FIRST ascertain people within range, then ascertain relatives from earlier but with a declining probability of ascertainment
# depending on how long ago.
parent_in_range = parent_onset_year > ascertainable_year_min & parent_onset_year < ascertainable_year_max
parent_in_range[is.na(parent_in_range)] = FALSE
child_in_range = child_onset_year > ascertainable_year_min & child_onset_year < ascertainable_year_max
child_in_range[is.na(child_in_range)] = FALSE
either_asc = parent_in_range | child_in_range
parent_p = pmin(pmax(1.0*(parent_in_range),
1.0-ascertainable_dec_rate_mode4*(ascertainable_year_min - parent_onset_year),
0.0,na.rm=TRUE),1.0)
parent_asc = either_asc & (runif(n=nparents,min=0,max=1) < parent_p)
child_p = pmin(pmax(1.0*(child_in_range),
1.0-ascertainable_dec_rate_mode4*(ascertainable_year_min - child_onset_year),
0.0,na.rm=TRUE),1.0)
child_asc = either_asc & (runif(n=nparents,min=0,max=1) < child_p)
both_asc = parent_asc & child_asc
}
# convert NA to false
parent_asc[is.na(parent_asc)] = FALSE
child_asc[is.na(child_asc)] = FALSE
both_asc[is.na(both_asc)] = FALSE
if (opt$fullmode) {
cat("t test after ascertainment, without limiting of n\n")
t.test(parent_onset_age[both_asc], child_onset_age[both_asc], alternative="two.sided", paired=TRUE)
}
### survival analysis of ascertained data
# cannot combine this option with limit_n as currently written.
# therefore this code is run before the limit_n clause
# function to make a data frame for survival tests
# age0, age1 = vectors of ages for two groups 0 and 1
# stat0, stat1 = vectors of TRUE = died / had an event, FALSE = censored, matching age0 and age1
# groupnames = names of the two groups being compared
getsurvdf = function(age1,age2,stat1,stat2,groupnames=c(0,1)) {
ages = c(age1,age2) # make a single vector with times for both
group = c(rep(groupnames[1],length(age1)),rep(groupnames[2],length(age2)))
status = c(stat1,stat2)
survdf = data.frame(ages,status,group) # convert to data frame
return (survdf) # return the data frame
}
if (opt$fullmode) {
# # choose pairs to include
child_is_aw = child_intercurrent_age + child_yob > ascertainable_year_max & child_hypo_onset_age + child_yob > ascertainable_year_max
parent_is_aw = parent_intercurrent_age + parent_yob > ascertainable_year_max & parent_hypo_onset_age + parent_yob > ascertainable_year_max
parent_age = pmin(parent_hypo_onset_age, parent_intercurrent_age, ascertainable_year_max - parent_yob)
child_age = pmin(child_hypo_onset_age, child_intercurrent_age, ascertainable_year_max - child_yob)
parent_event = parent_hypo_onset_age <= ascertainable_year_max - parent_yob & parent_intercurrent_age > parent_onset_age
child_event = child_hypo_onset_age <= ascertainable_year_max - child_yob & child_intercurrent_age > child_onset_age
# now try when only half of alive-and-wells are included
# p_known = 0 # probability of an alive-and-well individual being known as such.
surv_results = data.frame(p_known = seq(0,1,.1), parent_median=rep(0,11), child_median=rep(0,11))
for (p_known in seq(0,1,.1)) {
child_known_aw = child_is_aw & sample(c(TRUE,FALSE),prob=c(p_known,1-p_known),size=length(child_is_aw),replace=TRUE)
parent_known_aw = parent_is_aw & sample(c(TRUE,FALSE),prob=c(p_known,1-p_known),size=length(parent_is_aw),replace=TRUE)
surv_include_half = (parent_asc & child_asc) | (parent_asc & child_known_aw) | (child_asc & parent_known_aw)
survdf = getsurvdf(parent_age[surv_include_half],child_age[surv_include_half],parent_event[surv_include_half],child_event[surv_include_half])
msurv = with(survdf,Surv(ages, status==1))
mfit = survfit(Surv(ages,status==1) ~ group, data = survdf)
diffobj = survdiff(Surv(ages,status==1) ~ group, data=survdf)
p = 1-pchisq(diffobj$chisq,df=1)
subt = '' # paste('log-rank test p value = ',formatC(p,digits=2),sep='')
medians = summary(mfit)$table[,"median"] # extract medians from summary
med_diff = as.integer(medians[2] - medians[1]) # take difference of medians of survival curves
surv_results[surv_results$p_known==p_known,c("parent_median","child_median")] = medians
}
png(file.path(imgdir,'fig1e.new.png'),res=300,pointsize=3,width=500,height=500)
plot(NA,NA,xlim=c(0,1),ylim=c(54,72),
xlab='Ascertainment rate of alive and well individuals',ylab='Median age of onset',
main='Survival of parents vs. children',yaxt='n',xaxt='n')
axis(side=1,at=(0:10)/10,labels=paste((0:10)*10,"%",sep=""),cex.axis=.9)
axis(side=2,at=seq(54,72,2),labels=seq(54,72,2))
points(surv_results$p_known,surv_results$parent_median,type='b',col=pcolor,pch=18,lwd=1.2,cex=1.3)
points(surv_results$p_known,surv_results$child_median, type='b',col=ccolor,pch=18,lwd=1.2,cex=1.3)
legend('bottomright',c('parents','children'),col=c(pcolor,ccolor),lwd=1.2,pch=18)
mtext(side=3, line=-2, text="E", cex=2, adj=0, outer=TRUE)
dev.off()
}
# simple survival analysis with all available pairs.
# this is equivalent to running the above loop with p_known = 1
# include pairs with one member alive and well in 2013. don't include pairs with an intercurrent death for this
# particular analysis
# surv_include = (parent_asc & child_asc) | (parent_asc & child_is_aw) | (child_asc & parent_is_aw)
# survdf = getsurvdf(parent_age[surv_include],child_age[surv_include],parent_event[surv_include],child_event[surv_include])
# msurv = with(survdf,Surv(ages, status==1))
# mfit = survfit(Surv(ages,status==1) ~ group, data = survdf)
# diffobj = survdiff(Surv(ages,status==1) ~ group, data=survdf)
# p = 1-pchisq(diffobj$chisq,df=1)
# subt = '' # paste('log-rank test p value = ',formatC(p,digits=2),sep='')
# medians = summary(mfit)$table[,"median"] # extract medians from summary
# med_diff = as.integer(medians[2] - medians[1]) # take difference of medians of survival curves
# if desired by user, limit the final n post-ascertainment
# this feature is only fully implemented in batch mode, results unpredictable in interactive mode
if (!is.na(opt$limit_n)) {
both_asc_indices = which(both_asc) # convert bool to indices
both_asc_indices_limited = both_asc_indices[1:opt$limit_n] # limit number of indices
both_asc = 1:nparents %in% both_asc_indices_limited # convert back to boolean vector
# now handle the parent_asc and child_asc vectors. assume you get lone parents and
# children up to the last both_asc index
max_asc_index = max(which(both_asc))
parent_asc[max_asc_index:nparents] = FALSE
child_asc[max_asc_index:nparents] = FALSE
}
# now re-assess anticipation, heritability etc now that we've ascertained
if (imgs) {
png(file.path(imgdir,'yoa.all.asc.indiv.hist.png'),width=500,height=500)
# distribution of year of onset in all ascertained individuals
hist(c(parent_onset_year[parent_asc], child_onset_year[child_asc]), breaks=10, col='black', xlim=c(parent_yob_min, parent_yob_max+120))
dev.off()
png(file.path(imgdir,'yoa.both.asc.pairs.png'),width=500,height=500)
# or you can plot only for _pairs_ that are ascertained
hist(c(parent_onset_year[both_asc], child_onset_year[both_asc]), breaks=10, col='black', xlim=c(parent_yob_min, parent_yob_max+120))
dev.off()
}
# very strong correlation betwen yob and ao in ascertained data
all_yob_cens = c(parent_yob[parent_asc], child_yob[child_asc])
all_onset_cens = c(parent_onset_age[parent_asc], child_onset_age[child_asc])
m = lm(all_onset_cens ~ all_yob_cens)
if (opt$fullmode) {
summary(m)
}
if (imgs) {
slope = summary(m)$coefficients[2,1]
pval = summary(m)$coefficients[2,4]
subtitle = '' # paste('slope of ',formatC(slope,digits=2,format='f'),' at p = ',formatC(pval,digits=2),sep='')
png(file.path(imgdir,'fig1c.png'),res=300,pointsize=3,width=500,height=500)
plot(c(parent_yob[parent_asc], child_yob[child_asc]), c(parent_onset_age[parent_asc], child_onset_age[child_asc]), pch=19, ylim=c(0,120),
xlab='Year of birth', ylab='Age of onset', main='Artifactual year of birth - age of onset correlation', sub=subtitle)
abline(m, col='red', lwd=2)
mtext(side=3, line=-2, text="C", cex=2, adj=0, outer=TRUE)
dev.off()
}
# show heritability in ascertained data via parent-offspring regression
# there is no correlation here in mode 2.
m = lm(child_onset_age[both_asc] ~ parent_onset_age[both_asc])
if(opt$fullmode) {
summary(m)
}
if (imgs) {
png(file.path(imgdir,'fig1d.png'),res=300,pointsize=3,width=500,height=500)
slope = summary(m)$coefficients[2,1]
pval = summary(m)$coefficients[2,4]
subtitle = '' # paste('slope of ',formatC(slope,digits=2,format='f'),' at p = ',formatC(pval,digits=2),sep='')
plot(parent_onset_age[both_asc], child_onset_age[both_asc], pch=19, xaxt='n', yaxt='n', xlab='', ylab='', main='Age of onset of ascertained pairs', sub=subtitle)
axis(side=1,col=pcolor,col.axis=pcolor)
mtext(side=1,col=pcolor,text=expression(bold('Parent age of onset')),padj=3)
axis(side=2,col=ccolor,col.axis=ccolor)
mtext(side=2,col=ccolor,text=expression(bold('Child age of onset')),padj=-3)
abline(m,col='red')
mtext(side=3, line=-2, text="D", cex=2, adj=0, outer=TRUE)
dev.off()
}
if (opt$fullmode) {
# including year of birth in model abolishes heritability
m = lm(child_onset_age[both_asc] ~ parent_onset_age[both_asc])
summary(m) # child_yob significant, parent_onset_age not significant
m = lm(child_onset_age[both_asc] ~ parent_onset_age[both_asc] + child_yob[both_asc])
summary(m) # child_yob significant, parent_onset_age not significant
}
# large difference (anticipation) in ascertained data
if (opt$fullmode) {
cat("t test after ascertainment, with n limits\n")
t.test(parent_onset_age[both_asc], child_onset_age[both_asc], alternative="two.sided", paired=TRUE) # 21 years, p = 1e-9
}
antic = t.test(parent_onset_age[both_asc], child_onset_age[both_asc], alternative="two.sided", paired=TRUE)$estimate
pval = t.test(parent_onset_age[both_asc], child_onset_age[both_asc], alternative="two.sided", paired=TRUE)$p.value
subtitle = '' # paste(formatC(antic,digits=1,format='f'),' years difference at p = ',formatC(pval,digits=2),sep='')
temp1 = rbind(parent_onset_age[both_asc], child_onset_age[both_asc])
temp1 = temp1[,!is.na(temp1[1,]) & !is.na(temp1[2,])]
if (imgs) {
png(file.path(imgdir,'fig1b.png'),res=300,pointsize=3,width=500,height=500)
tobj = t.test(parent_onset_age[both_asc], child_onset_age[both_asc], alternative="two.sided", paired=TRUE) # 21 years, p = 1e-9
barplot(temp1[,1:25], beside=TRUE, col=c(pcolor,ccolor), border=NA, xlab='Parent/child pairs', ylab='Age of onset',
main='Age of onset of ascertained pairs', sub=subtitle)
legend('bottomleft',c('parents','children'),col=c(pcolor,ccolor),pch=15,bg='white')
mtext(side=3, line=-2, text="B", cex=2, adj=0, outer=TRUE)
dev.off()
}
if (opt$fullmode) {
# test the robustness of observed anticipation to the stratifications
# applied by Pocchiari 2013
early_birth_cohort = child_yob < 1939 & both_asc
late_birth_cohort = child_yob >= 1939 & both_asc
early_death_year_cohort = child_onset_year < 2000 & both_asc
late_death_year_cohort = child_onset_year >= 2000 & both_asc
early_death_age_cohort = child_onset_age < 61 & both_asc
late_death_age_cohort = child_onset_age >= 61 & both_asc
parent_early_death_age_cohort = parent_onset_age < 70 & both_asc
parent_late_death_age_cohort = parent_onset_age >= 70 & both_asc
cohorts = data.frame(early_birth_cohort,late_birth_cohort,early_death_year_cohort,
late_death_year_cohort,early_death_age_cohort,late_death_age_cohort,
parent_early_death_age_cohort,parent_late_death_age_cohort)
for (i in 1:(dim(cohorts)[2])) {
cohort = cohorts[,i]
cohortname = names(cohorts)[i]
print(paste("COHORT: ",cohortname,sep=""))
if(length(child_onset_age[cohort]) > 1) {
print(t.test(child_onset_age[cohort], parent_onset_age[cohort], paired=TRUE, alternative="less"))
} else {
print(paste("Not enough data points",asc_child_death_age))
}
}
}
if (opt$fullmode) {
print(summary(mfit,times=c(40:60)),file=stdout()) # display survival results
print(diffobj,file=stdout())
}
if (imgs) {
png(file.path(imgdir,'fig1e.png'),res=300,pointsize=3,width=500,height=500)
par(lend=1)
plot(survfit(Surv(ages,status==1)~group,data=survivaldata),lwd=c(3,1.5),col=c(pcolor,ccolor),xlab='Age',ylab='Percent without onset',
main='Survival analysis of parents vs. children', sub=subt, yaxt='n')
axis(side=2, at=seq(0,1,.2), labels=paste(100*seq(0,1,.2),"%",sep=""))
legend('bottomleft',c('parents','children'),col=c(pcolor,ccolor),lwd=c(3,1.5),pch=15,bg='white')
mtext(side=3, line=-2, text="E", cex=2, adj=0, outer=TRUE)
dev.off()
}
# prepare summary stats to print out
ttest = t.test(parent_onset_age[both_asc],child_onset_age[both_asc],paired=TRUE,alternative="two.sided")
antic = ttest$estimate
anticp = ttest$p.value
# if(ttest$p.value < .001) {
# antic = ttest$estimate
# } else {
# antic = 'ns'
# }
m = lm(child_onset_age[both_asc] ~ parent_onset_age[both_asc])
herit = 2*summary(m)$coefficients[2,1]
heritp = summary(m)$coefficients[2,4]
# if(summary(m)$coefficients[2,4] < .001) {
# herit = 2*summary(m)$coefficients[2,1]
# } else {
# herit = 'ns'
# }
m = lm(all_onset_cens ~ all_yob_cens)
yobslope = summary(m)$coefficients[2,1]
yobslopep = summary(m)$coefficients[2,4]
# if(summary(m)$coefficients[2,4] < .001) {
# yobslope = summary(m)$coefficients[2,1]
# } else {
# yobslope = 'ns'
# }
m = lm(child_onset_age[both_asc] ~ parent_onset_age[both_asc] + child_yob[both_asc])
herit_wyob = 2*summary(m)$coefficients[2,1]
heritp_wyob = summary(m)$coefficients[2,4]
output = data.frame(opt$npairs, opt$limit_n, opt$pyearmin, opt$pyearmax, opt$amode, opt$rate, opt$amin, opt$amax, antic, anticp, herit, heritp, yobslope, yobslopep, herit_wyob, heritp_wyob)
write.table(output, file=stdout(), sep='\t', row.names=FALSE, col.names=FALSE, quote=FALSE)
# antic
# herit
# yobslope
|
14227d8aba142292f06de05a5fa5eaebc4dc01e4
|
bb6e0f698c434945a622b5b605a025ae92dc7729
|
/report/Rough Contour Limits.R
|
4f0a5ee076d40afc68f6d3d4f28d731d1feecdbb
|
[
"CC0-1.0"
] |
permissive
|
mvparrot/vis-serve
|
933f2f211b817b00177cd7b53b89f324ed882c0e
|
f2c307837d4d89054073864dedc8b352613c7920
|
refs/heads/master
| 2021-01-17T10:19:33.786055
| 2016-09-29T11:09:43
| 2016-09-29T11:09:43
| 56,582,980
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,318
|
r
|
Rough Contour Limits.R
|
factor1 <- as.data.frame(levels(plot_perserve$serve_num))
factor2 <- MultipleGames(4,perserve = TRUE, server = TRUE)
factor3 <- c("Ad", "Deuce")
contour_lim <- data.frame()
for (i in 1:nrow(factor1)) {
for (j in 1:nrow(factor2)) {
for (s in 1:2) {
f1 = factor1[i,1]; f2 = factor2[j,1]; f3 = factor3[s]
out <- plot_perserve %>%
filter(serve_num %in% f1, server %in% f2, side %in% f3)
if (f3 == "Ad") {
out <- kde2d(out$center.x, out$center.y,
lims = c(-6.4, 0, 0, 4.115))
}
if (f3 == "Deuce") {
out <- kde2d(out$center.x, out$center.y,
lims = c(-6.4, 0, -4.115, 0))
}
out <- data.frame(expand.grid(center.x = out$x, center.y = out$y),
z = as.vector(out$z)) %>%
mutate(serve_num=f1, server = f2, side = f3)
contour_lim <- rbind(contour_lim, out)
}
}
}
contour_lim <- contour_lim %>%
distinct(.keep_all = TRUE) %>% # I get duplicates for some reason
mutate(unique = paste(arg1, server, side,sep = "."))
plot_split <- split(contour_lim, contour_lim$unique)
plotall <- court_service
for (p in 1:length(plot_split)) {
plotall <- plotall +
geom_contour(aes(x=center.x, y=center.y,z=z), data = plot_split[[p]])
}
plotall + facet_grid(serve_num~server)
|
291c76b1eac85a760ede18a14c6ba4e9341b349b
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/ICAOD/R/RcppExports.R
|
a3f40738aa345d06eb4bc4f6db9341dec7ed255c
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,300
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' @title Fisher Information Matrix for a 2-Parameter Cox Proportional-Hazards Model for Type One Censored Data
#'
#' @description
#' It provides the cpp function for the FIM introduced in Eq. (3.1) of Schmidt and Schwabe (2015) for type one censored data.
#'
#'
#' @param x Vector of design points.
#' @param w Vector of design weight. Its length must be equal to the length of \code{x} and \code{sum(w) = 1}.
#' @param param Vector of values for the model parameters \eqn{c(\beta_0, \beta_1)}.
#' @param tcensor The experiment is terminated at the fixed time point \code{tcensor}.
#' @return Fisher information matrix.
#' @references Schmidt, D., & Schwabe, R. (2015). On optimal designs for censored data. Metrika, 78(3), 237-257.
#' @export
FIM_2par_exp_censor1 <- function(x, w, param, tcensor) {
.Call('_ICAOD_FIM_2par_exp_censor1', PACKAGE = 'ICAOD', x, w, param, tcensor)
}
#' @title Fisher Information Matrix for a 2-Parameter Cox Proportional-Hazards Model for Random Censored Data
#'
#' @description
#' It provides the cpp function for the FIM introduced in Eq. (3.1) of Schmidt and Schwabe (2015) for random censored data (type two censored data).
#'
#'
#' @param x Vector of design points.
#' @param w Vector of design weight. Its length must be equal to the length of \code{x} and \code{sum(w) = 1}.
#' @param param Vector of values for the model parameters \eqn{c(\beta_0, \beta_1)}.
#' @param tcensor The experiment is terminated at the fixed time point \code{tcensor}.
#' @return Fisher information matrix.
#' @references Schmidt, D., & Schwabe, R. (2015). On optimal designs for censored data. Metrika, 78(3), 237-257.
#' @export
FIM_2par_exp_censor2 <- function(x, w, param, tcensor) {
.Call('_ICAOD_FIM_2par_exp_censor2', PACKAGE = 'ICAOD', x, w, param, tcensor)
}
#' @title Fisher Information Matrix for a 3-Parameter Cox Proportional-Hazards Model for Type One Censored Data
#'
#' @description
#' It provides the cpp function for the FIM introduced in Page 247 of Schmidt and Schwabe (2015) for type one censored data.
#'
#'
#' @param x Vector of design points.
#' @param w Vector of design weight. Its length must be equal to the length of \code{x} and \code{sum(w) = 1}.
#' @param param Vector of values for the model parameters \eqn{c(\beta_0, \beta_1, \beta_2)}.
#' @param tcensor The experiment is terminated at the fixed time point \code{tcensor}.
#' @return Fisher information matrix.
#' @references Schmidt, D., & Schwabe, R. (2015). On optimal designs for censored data. Metrika, 78(3), 237-257.
#' @export
FIM_3par_exp_censor1 <- function(x, w, param, tcensor) {
.Call('_ICAOD_FIM_3par_exp_censor1', PACKAGE = 'ICAOD', x, w, param, tcensor)
}
#' @title Fisher Information Matrix for a 3-Parameter Cox Proportional-Hazards Model for Random Censored Data
#'
#' @description
#' It provides the cpp function for the FIM introduced in Page 247 of Schmidt and Schwabe (2015) for random censored data (type two censored data).
#'
#'
#' @param x Vector of design points.
#' @param w Vector of design weight. Its length must be equal to the length of \code{x} and \code{sum(w) = 1}.
#' @param param Vector of values for the model parameters \eqn{(\beta_0, \beta_1, \beta_2)}.
#' @param tcensor The experiment is terminated at the fixed time point \code{tcensor}.
#' @return Fisher information matrix.
#' @references Schmidt, D., & Schwabe, R. (2015). On optimal designs for censored data. Metrika, 78(3), 237-257.
#' @export
FIM_3par_exp_censor2 <- function(x, w, param, tcensor) {
.Call('_ICAOD_FIM_3par_exp_censor2', PACKAGE = 'ICAOD', x, w, param, tcensor)
}
#' @title Fisher Information Matrix for the 2-Parameter Exponential Model
#'
#' @description
#' It provides the cpp function for FIM for the model \code{~a + exp(-b*x)}.
#'
#' @param x Vector of design points.
#' @param w Vector of design weight. Its length must be equal to the length of \code{x} and \code{sum(w) = 1}.
#' @param param Vector of values for the model parameters \code{c(a, b)}.
#' @return Fisher information matrix.
#' @references Dette, H., & Neugebauer, H. M. (1997). Bayesian D-optimal designs for exponential regression models. Journal of Statistical Planning and Inference, 60(2), 331-349.
#' @details The FIM does not depend on the value of \code{a}.
#' @examples FIM_exp_2par(x = c(1, 2), w = c(.5, .5), param = c(3, 4))
#' @export
FIM_exp_2par <- function(x, w, param) {
.Call('_ICAOD_FIM_exp_2par', PACKAGE = 'ICAOD', x, w, param)
}
#' @title Fisher Information Matrix for the Alcohol-Kinetics Model
#' @description It provides the cpp function for FIM for the model \code{~(b3 * x1)/(1 + b1 * x1 + b2 * x2)}
#' @param x1 Vector of design points (first dimension).
#' @param x2 Vector of design points (second dimension).
#' @param w Vector of design weight. Its length must be equal to the length of \code{x} and \code{sum(w) = 1}.
#' @param param Vector of values for the model parameters \code{c(b1, b2, b3)}.
#' @return Fisher information matrix.
#' @export
FIM_kinetics_alcohol <- function(x1, x2, w, param) {
.Call('_ICAOD_FIM_kinetics_alcohol', PACKAGE = 'ICAOD', x1, x2, w, param)
}
#' @title Fisher Information Matrix for the 2-Parameter Logistic (2PL) Model
#' @description It provides the cpp function for FIM for the model \code{~1/(1 + exp(-b *(x - a)))}.
#' In item response theory (IRT),
#' \eqn{a} is the item difficulty parameter, \eqn{b} is the item discrimination parameter and \eqn{x} is the person ability parameter.
#' @param x Vector of design points.
#' @param w Vector of design weight. Its length must be equal to the length of \code{x} and \code{sum(w) = 1}.
#' @param param Vector of values for the model parameters \code{c(a, b)}.
#' @return Fisher information matrix.
#' @export
#' @details
#' It can be shown that minimax and standardized D-optimal designs for the 2PL model is symmetric around point
#' \eqn{a_M = (a^L + a^U)/2}{aM = (aL + aU)/2} where \eqn{a^L}{aL} and \eqn{a^U}{aU} are the
#' lower bound and upper bound for parameter \eqn{a}, respectively. In \code{\link{ICA.control}},
#' arguments \code{sym} and \code{sym_point} can be used to specify \eqn{a_M}{aM} and find accurate symmetric optimal designs.
#' @examples
#' FIM_logistic(x = c(1, 2), w = c(.5, .5), param = c(2, 1))
#' @importFrom Rcpp evalCpp
#' @useDynLib ICAOD
FIM_logistic <- function(x, w, param) {
.Call('_ICAOD_FIM_logistic', PACKAGE = 'ICAOD', x, w, param)
}
#' @title Fisher Information Matrix for the Logistic Model with Two Predictors
#' @description It provides the cpp function for FIM for the following model:\cr
#' \code{~exp(b0+ b1 * x1 + b2 * x2 + b3 * x1 * x2)/(1 + exp(b0 + b1 * x1 + b2 * x2 + b3 * x1 * x2))}.
#' @param x1 Vector of design points (for first predictor).
#' @param x2 Vector of design points (for second predictor).
#' @param w Vector of design weight. Its length must be equal to the length of \code{x} and \code{sum(w) = 1}.
#' @param param Vector of values for the model parameters \code{c(b0, b1, b2, b3)}.
#' @return Fisher information matrix.
#' @export
FIM_logistic_2pred <- function(x1, x2, w, param) {
.Call('_ICAOD_FIM_logistic_2pred', PACKAGE = 'ICAOD', x1, x2, w, param)
}
#' @title Fisher Information Matrix for the 4-Parameter Logistic Model
#'
#' @description It provides the cpp function for the FIM for the model
#' \code{~theta1/(1+exp(theta2*x+theta3))+theta4}.
#' This model is another re-parameterization of the 4-parameter Hill model.
#' For more details, see Eq. (1) and (2) in Hyun and Wong (2015).
#' @param x Vector of design points.
#' @param w Vector of design weight. Its length must be equal to the length of \code{x} and \code{sum(w) = 1}.
#' @param param Vector of values for the model parameters \code{c(theta1, theta2, theta3, theta4)}.
#' @return Fisher information matrix.
#' @details The fisher information matrix does not depend on \code{theta4}.\cr
#' @references
#' Hyun, S. W., & Wong, W. K. (2015). Multiple-Objective Optimal Designs for Studying the Dose Response Function and Interesting Dose Levels. The international journal of biostatistics, 11(2), 253-271.
#' @seealso \code{\link{multiple}}
#' @export
#' @examples
#' FIM_logistic_4par(x = c(-6.9, -4.6, -3.9, 6.7 ),
#' w = c(0.489, 0.40, 0.061, 0.050),
#' param = c(1.563, 1.790, 8.442, 0.137))
FIM_logistic_4par <- function(x, w, param) {
.Call('_ICAOD_FIM_logistic_4par', PACKAGE = 'ICAOD', x, w, param)
}
#' @title Fisher Information Matrix for the Mixed Inhibition Model
#'
#' @description It provides the cpp function for the FIM for the model \code{~theta0 + theta1* log(x + theta2)}.
#'
#' @param x Vector of design points.
#' @param w Vector of design weight. Its length must be equal to the length of \code{x} and \code{sum(w) = 1}.
#' @param param Vector of values for the model parameters \code{c(theta0, theta1, theta2)}.
#' @return Fisher information matrix.
#' @references Dette, H., Kiss, C., Bevanda, M., & Bretz, F. (2010). Optimal designs for the EMAX, log-linear and exponential models. Biometrika, 97(2), 513-518.
#' @details
#' The FIM of this model does not depend on the parameter \code{theta0}.
#' @export
FIM_loglin <- function(x, w, param) {
.Call('_ICAOD_FIM_loglin', PACKAGE = 'ICAOD', x, w, param)
}
#' @title Fisher Information Matrix for the Mixed Inhibition Model.
#'
#' @description
#' It provides the cpp function for FIM for the model \code{~ V*S/(Km * (1 + I/Kic)+ S * (1 + I/Kiu))}
#'
#' @param S Vector of \code{S} component of design points. \code{S} is the substrate concentration.
#' @param I Vector of \code{I} component of design points. \code{I} is the inhibitor concentration.
#' @param w Vector of design weight. Its length must be equal to the length of \code{S} and \code{I}, besides \code{sum(w) = 1}.
#' @param param Vector of values for the model parameters \code{c(V, Km, Kic, Kiu)}.
#' @return Fisher information matrix of design.
#' @references Bogacka, B., Patan, M., Johnson, P. J., Youdim, K., & Atkinson, A. C. (2011). Optimum design of experiments for enzyme inhibition kinetic models. Journal of biopharmaceutical statistics, 21(3), 555-572.
#' @details
#' The optimal design does not depend on parameter \eqn{V}.
#' @examples
#' FIM_mixed_inhibition(S = c(30, 3.86, 30, 4.60),
#' I = c(0, 0, 5.11, 4.16), w = rep(.25, 4),
#' param = c(1.5, 5.2, 3.4, 5.6))
#' @export
FIM_mixed_inhibition <- function(S, I, w, param) {
.Call('_ICAOD_FIM_mixed_inhibition', PACKAGE = 'ICAOD', S, I, w, param)
}
#' @title Fisher Information Matrix for the Power Logistic Model
#' @description It provides the cpp function for FIM for the model \code{~1/(1 + exp(-b *(x - a)))^s}, but when \code{s} is fixed (a two by two matrix).
#' @param x Vector of design points.
#' @param w Vector of design weight. Its length must be equal to the length of \code{x} and \code{sum(w) = 1}.
#' @param param Vector of values for the model parameters \code{c(a, b)}.
#' @param s parameter \code{s}.
#' @return Fisher information matrix.
#' @export
#' @note This matrix is a two by two matrix and not equal to the Fisher information matrix for the power logistic model
#' when the derivative is taken with respect to all the three parameters.
#' This matrix is only given to be used in some illustrative examples.
FIM_power_logistic <- function(x, w, param, s) {
.Call('_ICAOD_FIM_power_logistic', PACKAGE = 'ICAOD', x, w, param, s)
}
#' @title Fisher Information Matrix for the Sigmoid Emax Model
#' @description It provides the cpp function for FIM for the model \code{~b1+(b2-b1)*(x^b4)/(x^b4+b3^b4)}
#' @param x Vector of design points.
#' @param w Vector of design weight. Its length must be equal to the length of \code{x} and \code{sum(w) = 1}.
#' @param param Vector of values for the model parameters \code{c(b1, b2, b3, b4)}.
#' The mean of response variable is .
#' @return Fisher information matrix.
#' @export
FIM_sig_emax <- function(x, w, param) {
.Call('_ICAOD_FIM_sig_emax', PACKAGE = 'ICAOD', x, w, param)
}
det2 <- function(mat, logarithm = FALSE) {
.Call('_ICAOD_det2', PACKAGE = 'ICAOD', mat, logarithm)
}
|
81386e16a68cd11c600930030834a3eee29e4a1b
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/6194_0/rinput.R
|
90e9193f3c3f74a105c1f4f0d5e461f8957c9262
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("6194_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="6194_0_unrooted.txt")
|
0aa0644414d72c87ed02202c0819f04bbf7d0635
|
789bd8e74dc9c3bbb73ac1ff15082e0124cca0d0
|
/man/setResource.Rd
|
d3c7bc984f2f471d3870c06bce956f542db34551
|
[] |
no_license
|
sizespectrum/mizer
|
58e8f41a85c035728498ae99692de1f719552fc2
|
d6dbc6bcb2dbbca6c6ecf072caad7a998b54ed8b
|
refs/heads/master
| 2023-06-08T11:42:57.079370
| 2023-06-02T13:45:35
| 2023-06-02T13:45:35
| 5,898,893
| 30
| 31
| null | 2021-09-03T11:53:31
| 2012-09-21T08:51:19
|
R
|
UTF-8
|
R
| false
| true
| 6,571
|
rd
|
setResource.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/setResource.R
\name{setResource}
\alias{setResource}
\alias{resource_rate}
\alias{resource_rate<-}
\alias{resource_capacity}
\alias{resource_capacity<-}
\alias{resource_level}
\alias{resource_level<-}
\alias{resource_dynamics}
\alias{resource_dynamics<-}
\title{Set resource dynamics}
\usage{
setResource(
params,
resource_rate = NULL,
resource_capacity = NULL,
resource_level = NULL,
resource_dynamics = NULL,
balance = NULL,
lambda = resource_params(params)[["lambda"]],
n = resource_params(params)[["n"]],
w_pp_cutoff = resource_params(params)[["w_pp_cutoff"]],
r_pp = deprecated(),
kappa = deprecated(),
...
)
resource_rate(params)
resource_rate(params) <- value
resource_capacity(params)
resource_capacity(params) <- value
resource_level(params)
resource_level(params) <- value
resource_dynamics(params)
resource_dynamics(params) <- value
}
\arguments{
\item{params}{A MizerParams object}
\item{resource_rate}{Optional. Vector of resource intrinsic birth rates or
coefficient in the power-law for the birth rate, see Details. Must be
strictly positive.}
\item{resource_capacity}{Optional. Vector of resource intrinsic carrying
capacities or coefficient in the power-law for the capacity, see Details.
The resource capacity must be larger than the resource abundance.}
\item{resource_level}{Optional. The ratio between the current resource number
density and the resource capacity. Either a number used at all sizes or a
vector specifying a value for each size. Must be strictly between 0 and 1,
except at sizes where the resource is zero, where it can be \code{NaN}. This
determines the resource capacity, so do not specify both this and
\code{resource_capacity}.}
\item{resource_dynamics}{Optional. Name of the function that determines the
resource dynamics by calculating the resource spectrum at the next time
step from the current state.}
\item{balance}{By default, if possible, the resource parameters are
set so that the resource replenishes at the same rate at which it is
consumed. In this case you should only specify either the resource rate
or the resource capacity (or resource level) because the other is then
determined automatically. Set to FALSE if you do not want the balancing.}
\item{lambda}{Used to set power-law exponent for resource capacity if the
\code{resource_capacity} argument is given as a single number.}
\item{n}{Used to set power-law exponent for resource rate if the
\code{resource_rate} argument is given as a single number.}
\item{w_pp_cutoff}{The upper cut off size of the resource spectrum power law
used only if \code{resource_capacity} is given as a single number.}
\item{r_pp}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}}. Use \code{resource_rate} argument
instead.}
\item{kappa}{\ifelse{html}{\href{https://lifecycle.r-lib.org/articles/stages.html#deprecated}{\figure{lifecycle-deprecated.svg}{options: alt='[Deprecated]'}}}{\strong{[Deprecated]}}. Use \code{resource_capacity}
argument instead.}
\item{...}{Unused}
\item{value}{The desired new value for the respective parameter.}
}
\value{
\code{setResource}: A MizerParams object with updated resource parameters
}
\description{
Sets the intrinsic resource growth rate and the intrinsic resource carrying
capacity as well as the name of the function used to simulate the resource
dynamics. By default this function changes both the rate and the capacity
together in such a way that the resource replenishes at the same rate at
which it is consumed.
}
\section{Setting resource dynamics}{
You would usually set the resource dynamics only after having finished the
calibration of the steady state. Then setting the resource dynamics with
this function will preserve that steady state, unless you explicitly
choose to set \code{balance = FALSE}. Your choice of the resource dynamics only
affects the dynamics around the steady state. The higher the resource rate
or the lower the resource capacity the less sensitive the model will be to
changes in the competition for resource.
The \code{resource_dynamics} argument allows you to choose the resource dynamics
function. By default, mizer uses a semichemostat model to describe the
resource dynamics in each size class independently. This semichemostat
dynamics is implemented by the function \code{\link[=resource_semichemostat]{resource_semichemostat()}}. You can
change that to use a logistic model implemented by \code{\link[=resource_logistic]{resource_logistic()}} or
you can use \code{\link[=resource_constant]{resource_constant()}} which keeps the resource constant or you
can write your own function.
Both the \code{\link[=resource_semichemostat]{resource_semichemostat()}} and the \code{\link[=resource_logistic]{resource_logistic()}} dynamics
are parametrised in terms of a size-dependent rate \eqn{r_R(w)} and a
size-dependent capacity \eqn{c_R}. The help pages of these functions give
the details.
The \code{resource_rate} argument can be a vector (with the same length as
\code{w_full(params)}) specifying the intrinsic resource growth rate for each size
class. Alternatively it can be a single number, which is then used as the
coefficient in a power law: then the intrinsic growth rate \eqn{r_R(w)} at
size \eqn{w} is set to
\deqn{r_R(w) = r_R w^{n-1}.}
The power-law exponent \eqn{n} is taken from the \code{n} argument.
The \code{resource_capacity} argument can be a vector specifying the intrinsic
resource carrying capacity for each size class. Alternatively it can be a
single number, which is then used as the coefficient in a truncated power
law: then the intrinsic growth rate \eqn{c_R(w)} at size \eqn{w} is set to
\deqn{c(w) = \kappa\, w^{-\lambda}}{c(w) = \kappa w^{-\lambda}}
for all \eqn{w} less than \code{w_pp_cutoff} and zero for larger sizes.
The power-law exponent \eqn{\lambda} is taken from the \code{lambda} argument.
The values for \code{lambda}, \code{n} and \code{w_pp_cutoff} are stored in a list in the
\code{resource_params} slot of the MizerParams object so that they can be re-used
automatically in the future. That list can be accessed with
\code{\link[=resource_params]{resource_params()}}. It also holds the coefficient \code{kappa} that describes the
steady-state resource abundance.
}
\examples{
params <- NS_params
resource_dynamics(params)
resource_dynamics(params) <- "resource_constant"
}
\concept{resource parameters}
|
20b44b0bd1bede9da71d2a3c0a8793c18ba8b17b
|
657cb8d31a7edde2ba866b43417fcff13168025c
|
/tests/testthat/test_filters_management.R
|
2e448c8ffff349fef9e78f06547715bbfaae4d00
|
[] |
no_license
|
jdeboer/googleAnalyticsR
|
d026832e1d48787c5c0bc58489a2932cc8ddb5b2
|
8ca31878fff7fb3b0215acad810e137b609f7018
|
refs/heads/master
| 2023-01-20T04:53:50.946223
| 2020-11-24T19:41:38
| 2020-11-24T19:41:38
| 48,536,792
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,492
|
r
|
test_filters_management.R
|
source("setup.R")
context("Filter management")
test_that("Get Filter View list", {
skip_on_cran()
skip_on_travis()
fits <- ga_filter_view_list(accountId = accountId,
webPropertyId = webPropId,
viewId = ga_id)
expect_s3_class(fits, "data.frame")
})
test_that("Get Filter list for account", {
skip_on_cran()
skip_on_travis()
fits <- ga_filter_list(accountId)
expect_s3_class(fits, "data.frame")
})
test_that("Get Specific Filter", {
skip_on_cran()
skip_on_travis()
fits <- ga_filter(accountId, filterId = "22248057")
expect_equal(fits$kind, "analytics#filter")
})
test_that("Get Filter View", {
skip_on_cran()
skip_on_travis()
fits <- ga_filter_view(accountId,
webPropertyId = webPropId,
viewId = ga_id,
linkId = "106249469:22248057")
expect_equal(fits$kind, "analytics#profileFilterLink")
})
# Thu Feb 8 12:56:55 2018 ------------------------------
test_that("Add filter to the view", {
skip_on_cran()
skip_on_travis()
Filter <- list(
name = 'googleAnalyticsR test: Exclude Internal Traffic',
type = 'EXCLUDE',
excludeDetails = list(
field = 'GEO_IP_ADDRESS',
matchType = 'EQUAL',
expressionValue = '199.04.123.1',
caseSensitive = 'False'
)
)
response <- ga_filter_add(Filter,
accountId = accountId2,
webPropertyId = webPropId2,
viewId = ga_id2,
linkFilter = TRUE)
expect_equal(response$kind, "analytics#profileFilterLink")
## move rank to 1
viewFilterLink <- list(rank = 1)
response2 <- ga_filter_update_filter_link(viewFilterLink,
accountId = accountId2,
webPropertyId = webPropId2,
viewId = ga_id2,
linkId = response$id)
expect_equal(response2$kind, "analytics#profileFilterLink")
expect_equal(response2$rank, 1)
del <- ga_filter_delete(accountId = accountId2,
webPropertyId = webPropId2,
viewId = ga_id2,
filterId =response2$filterRef$id,
removeFromView = TRUE)
expect_true(del)
})
test_that("Add filter to the account, but not link", {
skip_on_cran()
skip_on_travis()
Filter <- list(
name = 'googleAnalyticsR test2: Exclude Internal Traffic',
type = 'EXCLUDE',
excludeDetails = list(
field = 'GEO_IP_ADDRESS',
matchType = 'EQUAL',
expressionValue = '199.04.123.1',
caseSensitive = 'False'
)
)
filterId <- ga_filter_add(Filter,
accountId = accountId2,
linkFilter = FALSE)
expect_type(filterId, "character")
test_name <- "googleAnalyticsR test3: Changed name via PATCH"
filter_to_update <- list(name = test_name)
patched <- ga_filter_update(filter_to_update, accountId2, filterId, method = "PATCH")
expect_equal(patched$kind, "analytics#filter")
expect_equal(patched$name, test_name)
# delete the filter
del <- ga_filter_delete(accountId = accountId2,
filterId = filterId)
expect_true(del)
})
|
8c5dec82444bc633bf6bf0ca4b5db5f3b1da56b0
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Rpdb/examples/translation.Rd.R
|
834d552bb50aa669c4a7c10e5c9c822b4b61c881
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 902
|
r
|
translation.Rd.R
|
library(Rpdb)
### Name: translation
### Title: Translation of Atomic Coordinates
### Aliases: Tabc Tabc.coords Tabc.pdb Txyz Txyz.coords Txyz.pdb
### translation
### Keywords: manip
### ** Examples
# First lets read a pdb file
x <- read.pdb(system.file("examples/PCBM_ODCB.pdb",package="Rpdb"))
visualize(x, mode = NULL)
visualize(Txyz(x, y=10), mode = NULL)
visualize(Txyz(x, y=10, mask=x$atoms$resid==1), mode = NULL)
visualize(Tabc(x, b=1 ), mode = NULL)
visualize(Tabc(x, b=1 , mask=x$atoms$resid==1), mode = NULL)
# Lets build a C70/Pentacene dimer with an inter-molecular distance equal to 3.5
C70 <- read.pdb(system.file("examples/C70.pdb",package="Rpdb"))
Pen <- read.pdb(system.file("examples/Pentacene.pdb",package="Rpdb"))
x <- merge(C70,Pen)
visualize(x, mode = NULL)
viewXY()
visualize(Txyz(x, x=0, y=0, z=3.5, mask=x$atoms$resname=="C70", thickness=0.5), mode = NULL)
viewXY()
|
9ec0264424e863068289f34197f3dddd3b460d71
|
becf7caa63d95071221c4120c0f238626b46100a
|
/man/FEM.time.Rd
|
01b8e07fcad05e4d8f9d4e66b4dadc7fb01668fe
|
[] |
no_license
|
cran/fdaPDE
|
1acaf7798553c5582294cbb99c3a7207b78ccb55
|
4c275d6bfbcbbdef81e094fc977e60f68d1e24f3
|
refs/heads/master
| 2023-03-09T17:20:47.497214
| 2023-03-01T07:22:39
| 2023-03-01T07:22:39
| 49,715,847
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,832
|
rd
|
FEM.time.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FEMobjects.R
\name{FEM.time}
\alias{FEM.time}
\title{Define a spatio-temporal field by a Finite Element basis expansion}
\usage{
FEM.time(coeff,time_mesh,FEMbasis,FLAG_PARABOLIC=FALSE)
}
\arguments{
\item{coeff}{A vector or a matrix containing the coefficients for the spatio-temporal basis expansion. The number of rows
(or the vector's length) corresponds to the number of basis in \code{FEMbasis} times the number of knots in \code{time_mesh}.}
\item{time_mesh}{A vector containing the b-splines knots for separable smoothing and the nodes for finite differences for parabolic smoothing}
\item{FEMbasis}{A \code{FEMbasis} object defining the Finite Element basis, created by \link{create.FEM.basis}.}
\item{FLAG_PARABOLIC}{Boolean. If \code{TRUE} the coefficients are from parabolic smoothing, if \code{FALSE} the separable one.}
}
\value{
A \code{FEM.time} object. This contains a list with components \code{coeff}, \code{mesh_time}, \code{FEMbasis} and \code{FLAG_PARABOLIC}.
}
\description{
This function defines a FEM.time object.
}
\examples{
library(fdaPDE)
## Upload the horseshoe2D data
data(horseshoe2D)
## Create the 2D mesh
mesh = create.mesh.2D(nodes = rbind(horseshoe2D$boundary_nodes, horseshoe2D$locations),
segments = horseshoe2D$boundary_segments)
## Create the FEM basis
FEMbasis = create.FEM.basis(mesh)
## Compute the coeff vector evaluating the desired function at the mesh nodes
## In this case we consider the fs.test() function introduced by Wood et al. 2008
coeff = rep(fs.test(mesh$nodes[,1], mesh$nodes[,2]),5)
time_mesh = seq(0,1,length.out = 5)
## Create the FEM object
FEMfunction = FEM.time(coeff, time_mesh, FEMbasis, FLAG_PARABOLIC = TRUE)
## Plot it at desired time
plot(FEMfunction,0.7)
}
|
0eedeac7d232479c0d4492df2893dc1d6fa29255
|
6cbb7fdf84de20904947ce318da6adad5586a6b9
|
/plot3.R
|
f1083927382b1e54994dead11b15e00c8c02d8a8
|
[] |
no_license
|
c0nanh/ExData_Plotting1
|
26e57482817c57cb48a46a9866a80293228b87ae
|
b4b6868ecd83a9b6ac79e770efa07d98f05ee54a
|
refs/heads/master
| 2021-01-15T21:20:19.666418
| 2015-03-08T04:51:57
| 2015-03-08T04:51:57
| 31,834,487
| 0
| 0
| null | 2015-03-08T01:32:28
| 2015-03-08T01:32:28
| null |
UTF-8
|
R
| false
| false
| 2,367
|
r
|
plot3.R
|
ColumnNames <- c("Date", "Time", "Global_active_power", "Global_reactive_power", "Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2", "Sub_metering_3")
#set path to where the source data is here:
FilePath <- "~/Dropbox/Public/Coursera/Exploratory Data Analysis/Week 1/Household Power/household_power_consumption.txt"
#slow way
#household_power_consumption <- read.csv(FilePath,
# colClasses = c('myDate', 'character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'),
# header = TRUE,
# sep = ";",
# na.strings = c("?"))
#household_power_consumption <- household_power_consumption[household_power_consumption$Date == as.Date("2007-02-01") | household_power_consumption$Date == as.Date("2007-02-02"),]
#fast way
household_power_consumption <- read.csv(FilePath,
colClasses = c('character', 'character','numeric','numeric','numeric','numeric','numeric','numeric','numeric'),
col.names = ColumnNames,
sep = ";",
na.strings = c("?"),
skip = 66636,
nrows = 2880)
#create a new column for data and time together
household_power_consumption$DateTime <- strptime(paste(household_power_consumption$Date, household_power_consumption$Time), "%d/%m/%Y %H:%M")
#Set up png file as graphics device
png("plot3.png", width=480, height=480, units="px")
# Define colours to be used
plot_colours <- c("black", "red", "blue")
#Base plot
plot(household_power_consumption$DateTime,
household_power_consumption$Sub_metering_1,
col=plot_colours[1],
type="l",
ylab="Energy sub metering",
xlab = "")
#sub metering 2
lines(household_power_consumption$DateTime,
household_power_consumption$Sub_metering_2,
col=plot_colours[2],
type="l")
#sub metering 3
lines(household_power_consumption$DateTime,
household_power_consumption$Sub_metering_3,
col=plot_colours[3],
type="l")
#Legend
legend("topright", legend=names(household_power_consumption[,7:9]),cex=0.8, col=plot_colours, lwd=2)
dev.off()
|
93c95705d380dba246ffdf5846114e587d0370b4
|
788d33afc66b34f15c7c345a1a220d2f012d9bb7
|
/bin/DSS.R
|
3082713b3c9a78d1cf8d1e06ba1686da1a5d9284
|
[] |
no_license
|
Shicheng-Guo/tissueoforigin
|
d02283cb3ad6af77c44a06a8c49f888854ea7d0b
|
819792046b013277f4b8fbdc3d5d08ca75e69695
|
refs/heads/master
| 2020-09-08T07:21:15.846358
| 2020-01-18T01:50:44
| 2020-01-18T01:50:44
| 221,059,067
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,811
|
r
|
DSS.R
|
setwd("/home/local/MFLDCLIN/guosa/hpc/methylation/PRJNA577192/COV")
library("DSS")
read_methBED <- function(filename)
{
meth = read.table(file=filename,stringsAsFactors=F,skip=1)
x = meth[,c(1,2,5)]
x = cbind(x,as.integer(meth[,5]*meth[,4]))
colnames(x) = c('chr','pos','N','X');
return(x)
}
read_bismarkcov <- function(filename){
meth = read.table(file=filename,stringsAsFactors=F,skip=0)
N= meth[,4]+meth[,5]
X= meth[,4]
chr=meth[,1]
pos=meth[,2]
x= data.frame(chr,pos,N,X)
return(x)
}
############################################################
#Parameters
fdr_cutoff = 0.05
#Input
filenames_1 = c(
'LYMP-S2019-0003-1.bedgraph','LYMP-S2019-0004-1.bedgraph'
)
samplenames_1 = c('LYMP-S2019-0003-1','LYMP-S2019-0004-1')
filenames_2 = c(
'LYMP-S2019-0003-2.bedgraph',
'LYMP-S2019-0007-2.bedgraph'
)
samplenames_2 = c('LYMP-S2019-0003-2','LYMP-S2019-0007-2')
############################################################
#Read input files
input = list()
filenames=c(filenames_1,filenames_2)
for(file in filenames)
{
input = append(input,list(read_bismarkcov(file)))
}
save(input,file="input.RData")
#Calling differentially methylated sites
BSobj <- makeBSseqData(input,c('LYMP-S2019-0003-1','LYMP-S2019-0004-1','LYMP-S2019-0003-2','LYMP-S2019-0007-2') )
save(BSobj,file="BSobj.RData")
dmlTest <- DMLtest(BSobj, group1=c('LYMP-S2019-0003-1','LYMP-S2019-0004-1'), group2=c('LYMP-S2019-0003-2','LYMP-S2019-0007-2'),smoothing = T, smoothing.span = 200)
save(dmlTest,file="dmlTest.region.RData")
write.table(dmlTest,file="output.LYMP.DMC.tsv",row.names=F,quote=F,sep="\t")
#Write output to hard disk
DMS = callDML(dmlTest)
fdr_cutoff=0.05
index = which(DMS$fdr <= fdr_cutoff)
write.table(DMS[index,],file="output.LYMP.DMR.tsv",row.names=F,quote=F,sep="\t")
# Manhattan plot and qqplot
ManhattanmyDMP<-function(myDMP){
library("qqman")
library("Haplin")
colnames(myDMP)[match("chr",colnames(myDMP))]<-"CHR"
colnames(myDMP)[match("pos",colnames(myDMP))]<-"MAPINFO"
colnames(myDMP)[match("pval",colnames(myDMP))]<-"P.Value"
SNP=rownames(myDMP)
CHR=gsub("chr","",myDMP$CHR)
if(length(grep("X",CHR))>0){
CHR<-sapply(CHR,function(x) gsub(pattern = "X",replacement = "23",x))
CHR<-sapply(CHR,function(x) gsub(pattern = "Y",replacement = "24",x))
}
CHR<-as.numeric(CHR)
BP=myDMP$MAPINFO
P=myDMP$P.Value
manhattaninput=data.frame(SNP,CHR,BP,P)
max<-max(2-log(manhattaninput$P,10))
genomewideline=log(0.05/nrow(na.omit(manhattaninput)),10)
pdf("manhattan.pdf")
manhattan(na.omit(manhattaninput),col = c("blue4", "orange3"),ylim = c(0,7),lwd=2, suggestiveline=F,genomewideline=F)
dev.off()
pdf("qqplot.pdf")
pQQ(P, nlabs =length(P), conf = 0.95)
dev.off()
}
|
7d00eb58ec768a1047c6e475e713968d97000af2
|
8aab57d531fc9ffa67bb39d8504e6adb13ea78ce
|
/Tarea6/Lectura/ScriptTarea6.r
|
4d1d060f6daae200b13c9a4e40d8945948e8af1e
|
[] |
no_license
|
fabrgalindo/ADA2015
|
6e37a56a2995c71ad03c98e8f95ce7fa35b6e7a7
|
558483555ff2c38f8c3bd3cd340453b6bc0b0622
|
refs/heads/master
| 2021-01-10T12:09:53.933180
| 2016-01-22T05:33:27
| 2016-01-22T05:33:27
| 49,386,474
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,231
|
r
|
ScriptTarea6.r
|
setwd("C:/repoGit/ADA2015/Lectura")
PACKAGES<- read.table("Paquetes.txt")
#Cargar los paquetes necesarios si éstos no han sido cargados.
for (package in PACKAGES ) {
paquete<-toString(package)
if (!require(paquete, character.only=T, quietly=T)) {
#Instalar
install.packages(paquete)
#Cargar
library(paquete, character.only = TRUE)
}
}
#Establecer el directorio de trabajo.
setwd("C:/repoGit/ADA2015/Lectura")
FILES<- read.table("FILES_DESC.txt")
#Validar la existencia y crear un directorio de descarga
if( !file.exists("C:/repoGit/ADA2015/Lectura") ) {.....
dir.create(file.path("C:/repoGit/ADA2015/Lectura"), recursive=TRUE)
if( !dir.exists("C:/repoGit/ADA2015/Lectura") ) {
stop("No existe directorio")
}
}
#Si el archivo no está presente en el directorio de datos deberá buscarse en el directorio de descarga, si no está presente deberá descargarse.
#Una vez descargado el archivo, y ya presente en el sistema deberá descompactarse dejando el archivo de datos en el directorio apropiado.
for( varFile in FILES ){
x<-toString(varFile)
xgz<-paste(toString(varFile),"gz",sep=".")
# Se valida si el archivo descompactado ya existe en el ??rea de datos.
if( ! file.exists(x)) {
# Si no existe se busca el archivo compactado en el ??rea de descarga.
if( ! file.exists( x) ){
#URL <- "http://www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles/StormEvents_fatalities-ftp_v1.0_d1950_c20150826.csv.gz"
URL <- "http://www1.ncdc.noaa.gov/pub/data/swdi/stormevents/csvfiles"
URLCompleta<-paste(URL,xgz,sep="/")
download.file(URLCompleta,destfile=xgz,method="libcurl")
}
gunzip( xgz )
}
}
if( exists("Fatalities") ){
rm(Fatalities)
}
FILES_DESC<- read.table("FILES_DESC.txt")
# Todos los archivos se fusionan en la estructura de datos Fatalities
for( fileDesc in FILES_DESC ){
xDesc<-toString(fileDesc)
if( !exists("Fatalities" ) ) {
Fatalities<-read.csv( xDesc, header=T, sep=",", na.strings="")
}
else {
data<-read.csv(xDesc, header=T, sep=",", na.strings="")
Fatalities<-rbind(Fatalities,data)
}
}
#Eliminar la variable temporal.
rm(data)
#Mostrar en pantalla todos los datos.
print(Fatalities)
|
ba3db172c6da973777c6557cf6fadfb8f7a9e527
|
93530649c133130057b4f2e0124a54d134c3f9d0
|
/emis_model/model/NH3_mods.R
|
3f0a0a48f54a5936da6198b40e6a0825b6b2e9a9
|
[] |
no_license
|
sashahafner/mink-NH3-2020
|
762a2ed65b36cf4dcbc58e3b907c533840c69cb3
|
00594052bdf5c55318e59305d224211a86923e98
|
refs/heads/main
| 2023-02-07T07:02:05.708383
| 2020-12-23T14:25:51
| 2020-12-23T14:25:51
| 323,922,371
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 56,431
|
r
|
NH3_mods.R
|
# Equilibrium and kinetic (CO2) models for NH3 and CO2 emission from slurry and changes in speciation
# Author: S. Hafner
#
# History of revisions
# 2014 OCT 31 nh3_mods.R Combined code from two separate files for equilibrium and kinetic models
# See comments above each section for details on earlier revisions.
# 2014 NOV 02 nh3_mods.R Added pH argument to eqSpec, if specified HAc or KOH will be added to match
# pH based on H balance. Changed while check for ionic strength. Added tot
# check for kinEmisDiffMod.
# 2014 NOV 24 nh3_mods.R Found problem with ul setting in kinSpec (just that it was too low, -6). Set to
# -5 for now, and added error messages when charge balance is off, but this should
# be improved.
# 2014 NOV 25 nh3_mods.R Added error check on both optimize calls for when objective is too high.
# Added acid argument to eqSpec so user can select which acid will be used to
# match entered pH. Choices are "HAc" of "HCl".
# 2014 DEC 01 nh3_mods.R Made acid/base pH adjustment approach more flexible and replaced "acid" argument with
# adjpH. Can handle addition or subtraction of acid or base now
# 2015 JAN 03 nh3_mods.R Changed calculation of relative emission, added imass.NH3 and imass.CO2 calculation
# before emission calculations. Still is a problem for emission for lb = 'cc'.
# 2015 JAN 04 nh3_mods.R Added 'NaOH' option to adjpH argument. Added ph.results argument to eqEmisDiffMod
# for returning pH values
# 2015 JAN 05 nh3_mods.R Added signif rounding to 5 digits for time in output matrix row names. Need to
# revisit.
# 2015 MAR 07 nh3_mods.R Note: made some changes to ch.cells, added Dn, plus other changes to a.globe
# and tot.glob, and related, that decreased speed and were discarded. See
# archived versions.
# 2015 MAR 07 nh3_mods.R Added variable D with depth. Specify Dd for deep D and zD for depth of change.
#
# 2015 MAR 14 nh3_mods.R Added mol = molalities of all species to output of kinEmisDiffMod().
# Had to make m.glob for this.
# 2015 APR 20 nh3_mods.R kinEmisDiffMod only, added h.m.constant logical argument. Set to TRUE to use
# single h.m value
# 2018 MAR 02 NH3_mods.R File name change only
# 2019 APRIL 10 NH3_mods.R Correct global assignment symbols, had added spaces between them.
# Equilibrium-only model
# History of revisions
# Date File Description
# 2010 OCT 08 SpecMod_v1.R First version that really works. Cacluates speciation of NH3 and CO2.
# 2010 OCT 11 SpecMod_v2.R Trying to incorporate temperature response of equilibrium constants,
# kinetics, and emission. Seems to work, but is slow.
# 2010 OCT 12 SpecMod_v3.R Switching to H2CO3 as master C species.
# 2010 OCT 13 SpecMod_v4.R Trying to speed things up. Kinetic component now works pretty well, but is only single layer.
# 2010 OCT 13 SpecMod_v5.R Includes transport. Now uses extended Debye-Huckel equation for activity coefficients
# 2010 OCT 14 SpecMod_v6.R Modifying so CO2 is returned by spec function. Everything seems to work.
# 2010 OCT 18 SpecMod_v7.R Adding a function for running the complete transport model (was manual)
# Corrected a 1000-fold error in derivative calculations
# 2010 OCT 19 SpecMod_v8.R Trying to speed up execution
# 2010 OCT 20 SpecMod_v8.R Corrected an error in dx
# 2010 OCT 27 SpecMod_v12.R Between this and v8, tried several things for improving speed--all failed.
# This version uses a slightly simplified version of spec.ph for opitimization
# 2010 OCT 28 SpecMod_v13.R Uses global variables to track tot and a. Only calculates speciation when needed.
# Slightly faster than v12, but results are sensitive to the change in tot that is
# considered insignificant. If it is too small, ode1D makes many more calls to der.calc,
# and results in slow runs. Saw this by writing out time at each der.calc call to a file.
# 2010 NOV 03 SpecMod_v14.R Adding K+, Na+, Cl-, and acetic acid.
# 2010 NOV 04 SpecMod_v15.R Trying to pull out non-volatile solutes from ODE solver to speed things up
# 2010 NOV 08-09SpecMod_v16.R Tried to write my own solver that uses simple fixed relative change approach.
# As feared, it was very slow. Incorporated a new approach where CO2(aq) in the upper layer
# is set to zero. Speeds things up slightly, and is optional.
# 2010 NOV 09 SpecMod_v17.R Adding speciation-only option, with alternate speciation for simulations without kinetics
# 2010 NOV 09 SpecTransMod_v1.R Separating from original model. This version does not include kinetics
# Note that species order is the same as in original model. This makes for a slightly different
# approach to the stoichiometry matrix.
# 2010 DEC 13 SpecTransMod_v1.R Added line to deal with n.calls.spec when only spec is called up without transport
# 2010 DEC 24 SpecTransMod_v1.R Added effect of ambient CO2
# 2010 DEC 24 SpecTransMod_v2.R Made some changes to output. Adding option for constant concentration at lower surface.
# Required changes in diffusion calculations--need to check!!!
# 2011 JAN 04 SpecTransMod_v3.R Improved der.calc code a bit.
# 2011 APR 05 SpecTransMod_v4.R Making h.m length of 2, for NH3 and CO2, and calculate h.m for CO2 from h.m for NH3.
# 2011 APR 07 SpecTransMod_v4.R Added if statement so summ is in output only if relevant times are requested.
# 2011 APR 14 SpecTransMod_v5.R Changing equations for equilibrium constants and kinetic constants to latest version,
# based on Plummer and Bunsenberg (1982).
# 2011 SEP 26 SpecTransMod_v7.R Adding CO2.emis argument so CO2 emission can be shut off. Note that the changes in version 6
# were abandoned. Added pos to output (had been missing).
# 2011 SEP 27 SpecTransMod_v7.R Replaced dielectric constant calculation and Debye-Huckel A and B with equations from PHREEQC and WATEQ.
# 2013 NOV 26 eqmod.R Changed file name, replaced tabs with spaces
# 2014 MAR 21 eqmod.R Changed names of functions, nested pHSpec and HBalErr in eqSpec, nested derivative functions inside model functions, renamed model functions
# 2014 MAY 30 eqmod.R Added totk to output, with separate CO2 and other TIC species
# Required package
library(deSolve)
# Calculates speciation for a given set of total concentrations, with equilibrium control of CO2 hydration
eqSpec <- function(tot = tot, temp.c = 20, of = 'a', ll = -14, ul = 0, pH, adjpH = 'H2CO3') {
# Check arguments
if(length(tot) != 7 || any(!names(tot)%in%c('H.', 'NH3', 'H2CO3', 'K.', 'Na.', 'Cl.', 'HAc'))) stop('Check tot argument. Should be a numeric vector with elements ', "'H.', 'NH3', 'H2CO3', 'K.', 'Na.', 'Cl.', 'HAc'")
# Define other functions
pHSpec <- function(l.a.h, a.dh, b.dh, a.par, l.k, s,tot, z) {
# Iteratively solve, updating ionic strength each time
i <- sum(0.5*tot[tot>0]) # Very crude guess
b <- 999
k <- 10^l.k
l.a <- 0*l.k # Just for length and names
l.a['H.'] <- l.a.h
a <- 10^l.a
j <- 0
di <- 99
while (di/i>1E-4){ #abs(log10(i/b))>log10(1.001)) {
j <- j + 1
b <- i
l.g<- -a.dh*z^2*sqrt(i)/(1+b.dh*a.par[1, ]*sqrt(i)) + a.par[2, ]*i
g <- 10^l.g
l.a['K.'] <-log10(tot['K.']) + l.g['K.']
l.a['Na.'] <-log10(tot['Na.']) + l.g['Na.']
l.a['Cl.'] <-log10(tot['Cl.']) + l.g['Cl.']
l.a['NH4.'] <-log10(tot['NH3']*k['NH4.']*g['NH3']*a['H.']*g['NH4.']/(g['NH4.'] + k['NH4.']*g['NH3']*a['H.']) )
l.a['NH3'] <-l.a['NH4.'] - l.a['H.'] - l.k['NH4.']
l.a['H2CO3'] <- log10(tot['H2CO3']*a['H.']*g['H2CO3']/(k['HCO3.']*g['H2CO3']/g['HCO3.'] + a['H.'] + k['CO3.2']*g['H2CO3']/(g['CO3.2']*a['H.']) + a['H.']*k['CO2']*g['H2CO3']/g['CO2'] ) )
l.a['HCO3.'] <- l.k['HCO3.'] + l.a['H2CO3'] - l.a['H.']
l.a['CO3.2'] <- l.k['CO3.2'] + l.a['H2CO3'] - 2*l.a['H.']
l.a['CO2'] <-l.k['CO2'] + l.a['H2CO3']
l.a['OH.'] <- 0 -l.a['H.'] + l.k['OH.']
l.a['Ac.'] <- log10(k['Ac.']*g['HAc']*tot['HAc']*g['Ac.']/(a['H.']*g['Ac.'] + k['Ac.']*g['HAc']))
l.a['HAc'] <- l.a['Ac.'] + l.a['H.'] - l.k['Ac.']
l.m <- l.a - l.g
m <- 10^l.m
i <- sum(0.5*m*z^2)
di <- abs(i - b)
}
a <- 10^l.a
cb <- sum(z*m)
totk <- c(tot[1:2], m[3:4], tot[4:7])
totk[3] <- totk[3] + m['HCO3.'] + m['CO3.2']
list(m = m, a = a, g = g, i = i, l.m = l.m, l.a = l.a, l.g = l.g, tot = tot, totk = totk, cb = cb, i.its = j)
}
# Calculates error in total H for a given pH guess
HBalErr <- function(l.a.h, a.dh, b.dh, a.par, l.k, s,tot, z) {
m <- pHSpec(l.a.h = l.a.h, a.dh = a.dh, b.dh = b.dh, a.par = a.par, l.k = l.k, s = s, tot = tot, z = z)$m
abs(tot['H.'] - sum(s[, 1]*m)) # All calculated totals given by t(s)%*%m. Alternative would be charge balance.
}
# Now code for speciation
if(!exists('n.calls.spec')) n.calls.spec <<- 0
n.calls.spec <<- n.calls.spec + 1
# Temperature
temp.k <- 273.15+temp.c
# Henry's law constants
kh.CO2<- 10^(108.38578 +0.01985076*temp.k - 6919.53/temp.k - 40.45154*log10(temp.k) + 669365/temp.k^2)
kh.NH3<- 10^(-3.51645 -0.0013637*temp.k + 1701.35/temp.k)
# Equilibrium constants
temp.k <- 273.15+temp.c
l.k <- c('H.' = 0, 'NH3' = 0, 'H2CO3' = 0, 'CO2' = 2.778151, 'K.' = 0, 'Na.' = 0, 'Cl.' = 0, 'HAc' = 0,
'OH.'= -4.2195 -2915.16/temp.k,
'NH4.'= 0.0905 + 2729.31/temp.k,
'HCO3.'= -353.5305 -0.06092*temp.k + 21834.37/temp.k + 126.8339*log10(temp.k) -1684915/temp.k^2,
'CO3.2'= -461.4176 -0.093448*temp.k + 26986.16/temp.k + 165.7595*log10(temp.k) -2248629/temp.k^2,
'Ac.' = -4.8288 + 21.42/temp.k)
# Matrix of stoichiometric coefficients. Not used for much, but is good for keeping track of reactions
s <- matrix(c(1, 0,0, 0,0, 0,0, 0,-1, 1,-1, -2, -1, 0, 1,0, 0,0, 0,0, 0,0, 1,0, 0,0, 0, 0,1, 1,0, 0,0, 0,0, 0,1, 1,0,
0, 0,0, 0,1, 0,0, 0,0, 0,0, 0,0, 0, 0,0, 0,0, 1,0, 0,0, 0,0, 0,0, 0, 0,0, 0,0, 0,1, 0,0, 0,0, 0,0,
0, 0,0, 0,0, 0,0, 1,0, 0,0, 0,1),
nrow = 13, dimnames = list(c("H.", "NH3", 'H2CO3', 'CO2', 'K.', 'Na.', 'Cl.', 'HAc', "OH.", "NH4.", 'HCO3.', 'CO3.2', 'Ac.'),
c("H.", 'NH3', 'H2CO3', 'K.', 'Na.', 'Cl.', 'HAc'))
)
# Species charge
z <- c('H.' = 1, 'NH3' = 0, 'H2CO3' = 0, 'CO2' = 0, 'K.' = 1, 'Na.' = 1, 'Cl.' = -1, 'HAc' = 0, 'OH.' = -1, 'NH4.' = 1, 'HCO3.' = -1, 'CO3.2' = -2, 'Ac.' = -1)
# Parameters for calculation of activity coefficients, using extended Debye-Huckel equation (see PHREEQC manual)
a.par <- matrix(c(9, 0,0, 0.1, 0,0.1, 0,0.1, 3,0, 4.25, 0,3, 0,0, 0.1, 3.5, 0,2.5, 0,5.4, 0,4.5, 0,4.5, 0), nrow = 2, dimnames = list(c('a', 'b'), c('H.', 'NH3', 'H2CO3', 'CO2', 'K.', 'Na.', 'Cl.', 'HAc', 'OH.', 'NH4.', 'HCO3.', 'CO3.2', 'Ac.')))
# Calculate a.dh and b.dh parameters for Debye-Huckel equation. Dielectric constant, density rearranged from PHREEQC code
# Dielectric constant
de <- 2727.586 + 0.6224107*temp.k - 1075.112*log10(temp.k) - 52000.87/temp.k
# Water density
c.d <- 647.26 - temp.k
d.H2O <- (1 + .1342489*c.d^(1/3) - 3.946263E-3*c.d)/(3.1975 - 0.3151548*c.d^(1/3) - 1.203374E-3*c.d + 7.48908E-13*c.d^4)
# a.dh and b.dh are A and B in Debye-Huckel equation. Equations are from Tuesdell & Jones (1974)
a.dh <- 1.82483E6*d.H2O^0.5/(de*temp.k)^(3/2)
b.dh <- 50.2916*d.H2O^0.5/(de*temp.k)^0.5
# If pH not specified (assumed to be typical case) it is calculated, if specified, KOH and HAc is added to reach specified value, based on proton balance
if(missing(pH)) {
sol <- optimize(f = HBalErr, interval = c(ll, ul), a.dh = a.dh, b.dh = b.dh, a.par = a.par, l.k = l.k, s = s, tot = tot, z = z, tol = 1E-15)
if(sol$objective>5E-7) {
print(sol)
stop('Around line 160, optimize didn\'t converge: complete results above, specified limits: ', ll, ' ', ul)
}
l.a.h <- sol$minimum
} else { # pH is specified, acid or base adjusted to match it
l.a.h<- -pH
dhb <- 999
hb <- tot['H.'] - sum(s[, 1]*pHSpec(l.a.h, a.dh, b.dh, a.par, l.k, s,tot, z)$m)
while(dhb>1E-10) { # Must be solved iteratively because i will change
if(adjpH == 'HAc') {
tot['HAc'] <- tot['HAc'] - hb
} else if(adjpH == 'HCl') {
tot['Cl.'] <- tot['Cl.'] - hb
tot['H.'] <- tot['H.'] - hb
} else if(adjpH == 'H2CO3') {
tot['H2CO3'] <- tot['H2CO3'] - hb
} else if(adjpH == 'KOH') {
tot['K.'] <- tot['K.'] + hb
tot['H.'] <- tot['H.'] - hb
} else if(adjpH == 'NaOH') {
tot['Na.'] <- tot['Na.'] + hb
tot['H.'] <- tot['H.'] - hb
} else if(adjpH == 'NH3') {
tot['NH3'] <- tot['NH3'] + hb
} else stop('adjpH argument must be "HAc", "H2CO3", "KOH", or "HCl" but is ', adjpH)
hb2 <- tot['H.'] - sum(s[, 1]*pHSpec(l.a.h, a.dh, b.dh, a.par, l.k, s,tot, z)$m)
dhb <- abs(hb2-hb)
hb <- hb2
}
}
out <- pHSpec(l.a.h, a.dh = a.dh, b.dh = b.dh, a.par = a.par, l.k = l.k, s = s, tot = tot, z = z)
if(abs(out$cb)>5E-8) warning('Charge balance off in eqSpec. Check results. cb =', out$cb)
tot.g <- t(s)%*%out$m
p.CO2 <- out$a['CO2']/kh.CO2
p.NH3 <- out$a['NH3']/kh.NH3
names(p.CO2) <- NULL
if(of == 'a') return(out$a)
if(of == 'm') return(out$m)
if(of == 'k') return(l.k)
if(of == 'all') return(c(out, p.CO2 = p.CO2, p.NH3 = p.NH3))
}
### To test model spec function
#tt <- c("H." = 0.57, 'NH3' = 0.57, 'H2CO3' = 0, 'K.' = 0, 'Na.' = 0, 'Cl.' = 0.57, 'HAc' = 0)
#out <- eqSpec(tot = tt, temp.c = 20, of = 'all', ll = -14, ul = 0)
#-log10(out$a)
#out$g
#tt <- c("H." = -0.101, 'NH3' = 0.1, 'H2CO3' = 0.1, 'K.' = 0.1, 'Na.' = 0.1, 'Cl.' = 0.1, 'HAc' = 0.1)
#out <- eqSpec(tot = tt, temp.c = 20, of = 'all', ll = -14, ul = 0)
#out$m
#out$l.a
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Interface function for emission without diffusive transport
# Emission for single solution--no diffusive transport
eqEmisMod <- function(
tot, # Vector of master species totals (mol/kgw), e.g., c("H." = 0, 'NH3' = 0.1, 'H2CO3' = 0.1, 'K.' = 0., 'Na.' = 0., 'Cl.' = 0., 'HAc' = 0.). Must be in this order.
times, # Vector of times, in seconds
h.m, # Mass transfer coefficient for NH3, m/s
p.CO2.a, # Ambient CO2 partial pressure (atm)
temp.c, # Temperature in °C
thk, # Layer thickness in m
of = 'all'
) {
# Define function for derivatives
derCalc <- function(t, y,parms) {
# Calculate rate constants
R <- 0.000082057 # Universal gas constant (atm m3/mol-K)
h.m <- parms$h.m
temp.c <- parms$temp.c
p.CO2.a <- parms$p.CO2.a
thk <- parms$thk
temp.k <- temp.c + 273.15
k1 <- 10^(12.066 - 4018.0925/temp.k)
kr1 <- 10^(14.844 - 4018.0925/temp.k)
k2 <- 10^(14.354 - 3087.545/temp.k)
kr2 <- 10^(14.881 - 5524.2258/temp.k)
kh.CO2<- 10^(108.38578 +0.01985076*temp.k - 6919.53/temp.k - 40.45154*log10(temp.k) + 669365/temp.k^2)
h.CO2<- R*temp.k*kh.CO2 # Henry's law constant aq:g (m3/kgw)
kh.NH3<- 10^(-3.51645 -0.0013637*temp.k + 1701.35/temp.k)
h.NH3<- R*temp.k*kh.NH3
# Calculate species concentrations
y[y<0 & names(y) != 'H.'] <- 0
#y[y<0] <- 0
out <- eqSpec(tot = y, temp.c = temp.c, of = 'all')
a <- out$a
m <- out$m
cb <- out$cb
# Surface fluxes, mol/m2-s
j.CO2<- h.m['CO2']*(a['CO2']/h.CO2 - p.CO2.a/(R*temp.k))
j.NH3<- h.m['NH3']*(a['NH3']/h.NH3 - 0.0/(R*temp.k))
# Derivatives
dH2CO3.dt <- -j.CO2/(thk*1000)
dNH3.dt<- -j.NH3/(thk*1000)
names(j.CO2) <- names(j.NH3) <- NULL
list(tot = c(dH.dt = 0, dNH3.dt = dNH3.dt, dH2CO3.dt = dH2CO3.dt, dK.dt = 0, dNa.dt = 0, dCl.dt = 0, dHAc.dt = 0), act = a, mol = m, cb = cb,
pH = -log10(a['H.']), j.NH3 = j.NH3, j.CO2 = j.CO2)
}
# Model calculations
h.m[2] <- h.m[1]*sqrt(17.0305/44.0095);names(h.m) <- c('NH3', 'CO2') # Calculate CO2 h.m based on that for NH3, based on Eq. 6 in Lee et al 2004
out <- lsoda(y = tot, times = times, func = derCalc, parms = list(h.m = h.m, temp.c = temp.c, p.CO2.a = p.CO2.a, thk = thk))
tot.out <- matrix(as.vector(out[, colnames(out) %in% c('H.', 'NH3', 'H2CO3', 'K.', 'Na.', 'Cl.', 'HAc')]), nrow = dim(out)[1], ncol = length(tot), dimnames = list(t = out[, 'time'], msp = c('H.', 'NH3', 'H2CO3', 'K.', 'Na.', 'Cl.', 'HAc')))
act.out <- matrix(as.vector(out[, grep('act', colnames(out))]), nrow = dim(out)[1], ncol = 13, dimnames = list(t = out[, 'time'], sp = c('H.', 'NH3', 'H2CO3', 'CO2', 'K.', 'Na.', 'Cl.', 'HAc', 'OH.', 'NH4.', 'HCO3.', 'CO3.2', 'Ac.')))
mol.out <- matrix(as.vector(out[, grep('mol', colnames(out))]), nrow = dim(out)[1], ncol = 13, dimnames = list(t = out[, 'time'], sp = c('H.', 'NH3', 'H2CO3', 'CO2', 'K.', 'Na.', 'Cl.', 'HAc', 'OH.', 'NH4.', 'HCO3.', 'CO3.2', 'Ac.')))
cb.out <- matrix(as.vector(out[, grep('cb', colnames(out))]), nrow = dim(out)[1], ncol = 1, dimnames = list(t = out[, 'time'], cb = 'cb'))
ph.out <- matrix(as.vector(out[, grep('pH', colnames(out))]), nrow = dim(out)[1], ncol = 1, dimnames = list(t = out[, 'time'], 'pH'))
emis.out <- data.frame(t = out[, 'time'], j.NH3 = out[, 'j.NH3'], j.CO2 = out[, 'j.CO2'], e.NH3 = 1000*(tot['NH3'] - tot.out[, 'NH3'])*thk,
e.CO2 = 1000*(tot['H2CO3'] - tot.out[, 'H2CO3'])*thk)
if(of == 'all') list(times = times, tot = tot.out, act = act.out, mol = mol.out, cb = cb.out, ph = ph.out, emis = emis.out, pars = c(h.m = h.m, p.CO2.a = p.CO2.a, temp.c = temp.c, thk = thk))
}
## To test kin.mod
#eqEmisMod(tot = c("H." = -0.101, 'NH3' = 0.1, 'H2CO3' = 0.1, 'K.' = 0.1, 'Na.' = 0.1, 'Cl.' = 0.1, 'HAc' = 0.1), times = c(0, 1,3600), h.m = 0.001, temp.c = 20, p.CO2.a = 400E-6, thk = 0.01)
# Interface function for complete model with transport
eqEmisDiffMod <- function(
c.thk, # Vector of cell thicknesses (m) or single value
CO2.emis = TRUE, # Include CO2 emission?
D = 1E-9, # Diffusion coefficient (m2/s)
h.m, # Mass transfer coefficient for NH3, m/s
lb = 'nf', # Lower boundary condition, no flow by default, set to 'cc' for constant concentration
n.cells = length(c.thk), # Total number of cells in simulation
of = 'all', # Output format
p.CO2.a, # Ambient CO2 partial pressure (atm)
times, # Vector of times (s)
temp.c, # Temperature (C)
thk = 0, # Total thickness (m), only used for lb = 'mx'
tot, # Vector of master species totals (mol/kgw), e.g., c("H." = 0, 'NH3' = 0.1, 'H2CO3' = 0.1, 'CO2' = 0.0, 'K.' = 0., 'Na.' = 0., 'Cl.' = 0., 'HAc' = 0.). Must be in this order.
ph.results = FALSE # Set to true to calculate and return all pH values, otherwise just surface
) {
if(!(lb %in% c('nf', 'cc', 'mx'))) stop('lb error')
# Calculate derivatives
# Calculate derivatives of emission + transport equations
derCalc <- function(t, y,parms) {
n.calls.der <<- n.calls.der + 1
R <- 0.000082057 # Universal gas constant (atm m3/mol-K)
c.thk <- parms$c.thk
D <- parms$D
dx <- parms$dx
h.m <- parms$h.m
lb <- parms$lb
n.cells <- parms$n.cells
p.CO2.a <- parms$p.CO2.a
temp.c <- parms$temp.c
temp.k <- temp.c + 273.15
tot.lb <- parms$tot.lb
#write.table(t, '14.dmp', row.names = F, col.names = F, append = T)
# Put total concentrations in matrix for working with them
y[y<0 & names(y) != 'H.'] <- 0 # CRUDE
#y[y<0] <- 0 # CRUDE
tot <- matrix(y, nrow = n.cells, dimnames = list(1:n.cells, c('H.', 'NH3', 'H2CO3', 'K.', 'Na.', 'Cl.', 'HAc')))
# Calculate constants
kh.CO2<- 10^(108.38578 +0.01985076*temp.k - 6919.53/temp.k - 40.45154*log10(temp.k) + 669365/temp.k^2)
h.CO2<- R*temp.k*kh.CO2 # Henry's law constant aq:g (m3/kgw)
kh.NH3<- 10^(-3.51645 -0.0013637*temp.k + 1701.35/temp.k)
h.NH3<- R*temp.k*kh.NH3
# Diffusion/dispersion. Note single D for all solutes, and that totals diffuse, not species
j.diff.out <- D*1000*rbind(c(0, 0,0, 0,0, 0,0), diff(tot))/dx # Diffusion out, toward cell 1 (g/m2-s). Note 1000 kgw/m3 conversion to make concentrations volumetric
j.diff.lb <- as.vector(D*1000*diff(rbind(tot[n.cells, ],tot.lb))/(0.5*c.thk[n.cells])) # Diffusion into bottom cell. Used below only if lb = "c"
names(j.diff.lb) <- c('H.', 'NH3', 'H2CO3', 'K.', 'Na.', 'Cl.', 'HAc')
# Calculate species activities, only for upper-most layer
ll<- -13
ul<- -4
a <- eqSpec(tot = tot[1, ],temp.c = temp.c, of = 'a', ll = ll, ul = ul)
# Derivatives
# Surface fluxes, mol/m2-s
j.CO2 <- h.m['CO2']*(a['CO2']/h.CO2 - p.CO2.a/(R*temp.k))
j.NH3 <- h.m['NH3']*(a['NH3']/h.NH3 - 0.0/(R*temp.k))
# Derivatives for concentration
if(lb != 'cc') j.diff.lb <- 0*j.diff.lb
dH2CO3.dt<- (-j.CO2*c(1, rep(0, n.cells-1)) + diff(c(j.diff.out[, 'H2CO3'], j.diff.lb['H2CO3'])) )/(c.thk*1000)
dNH3.dt <- (-j.NH3*c(1, rep(0, n.cells-1)) + diff(c(j.diff.out[, 'NH3'], j.diff.lb['NH3'])) )/(c.thk*1000)
names(j.CO2) <- names(j.NH3) <- NULL
list(tot = c(dH.dt = rep(0, n.cells), dNH3.dt, dH2CO3.dt, dK.dt = rep(0, n.cells), dNa.dt = rep(0, n.cells), dCl.dt = rep(0, n.cells), dHAc.dt = rep(0, n.cells)), act = a, pH = -log10(a['H.']), j.NH3 = j.NH3, j.CO2 = j.CO2, p.CO2.s = a['CO2']/h.CO2*R*temp.k, p.NH3.s = a['NH3']/h.NH3*R*temp.k)
}
if(lb == 'mx') {
n.cells <- n.cells + 1 # For bottom, tracked cell
c.thk <- c(c.thk, thk - sum(c.thk))
}
if (any(c.thk <= 0)) stop('c.thk error Auq1995')
# Initial total masses, for calculating relative emission
# 1000 goes from kg to m3
imass.NH3 <- sum(1000*tot['NH3']*c.thk)
imass.CO2 <- sum(1000*(tot['CO2'] + tot['H2CO3'])*c.thk)
time.start <- Sys.time()
n.calls.der <<- 0
n.calls.spec <<- 0
tot.lb <- tot # Lower boundary constant concentrations
if(length(c.thk) == 1) c.thk <- rep(c.thk, n.cells)
if(length(tot) == 7) tot <- rep(tot, each = n.cells)
pos <- cumsum(c.thk) - 0.5*c.thk # position (m), based on cell centers
dx <- c(c.thk[n.cells], diff(pos))
h.m[2] <- ifelse(CO2.emis, h.m[1]*sqrt(17.0305/44.0095), 0);names(h.m) <- c('NH3', 'CO2') # Calculate CO2 h.m based on that for NH3, based on Eq. 6 in Lee et al 2004
if(lb == 'mx') dx[n.cells] <- 0.5*c.thk[n.cells-1] # Eliminates resistance within bottom cell
# Call up ODE solver. Note that only NH3 and IC solutes change over time
out <- ode.1D(y = tot, times = times, func = derCalc,
parms = list(h.m = h.m, n.cells = n.cells, p.CO2.a = p.CO2.a, temp.c = temp.c, c.thk = c.thk, dx = dx, D = D, lb = lb, p.CO2.a = p.CO2.a, tot.lb = tot.lb),
nspec = 7, dimens = n.cells) #, method = 'lsodes') #, hini = 0.01)
tot.out <- array(as.vector(out[, colnames(out) %in% c('H.', 'NH3', 'H2CO3', 'K.', 'Na.', 'Cl.', 'HAc')]), dim = c(dim(out)[1], n.cells, 7), dimnames = list(t = signif(times, 5), pos = pos, msp = c('H.', 'NH3', 'H2CO3', 'K.', 'Na.', 'Cl.', 'HAc')))
act.out <- matrix(as.vector(out[, grep('act', colnames(out))]), nrow = dim(out)[1], ncol = 13, dimnames = list(t = signif(times, 5), sp = c('H.', 'NH3', 'H2CO3', 'CO2', 'K.', 'Na.', 'Cl.', 'HAc', 'OH.', 'NH4.', 'HCO3.', 'CO3.2', 'Ac.')))
if(ph.results) {
ph.out <- matrix(NA, nrow = length(times), ncol = length(pos), dimnames = list(time = signif(times, 5), pos = pos))
tot.out[, ,-1][tot.out[, ,-1]<0] <- 0
for(tt in 1:length(times)) {
for(pp in 1:length(pos)) {
ph.out[tt, pp]<- -as.numeric(eqSpec(tot = tot.out[tt, pp, ],temp.c = temp.c, of = 'all')$l.a['H.'])
}
}
} else ph.out <- as.vector(out[, grep('pH', colnames(out))])
names(ph.out) <- out[, 'time']
emis.NH3<- sum(1000*tot['NH3']*c.thk) - apply(1000*tot.out[, ,'NH3'], 1,function(x) sum(x*c.thk))
emis.CO2<- sum(1000*(tot['H2CO3'])*c.thk) - apply(1000*(tot.out[, ,'H2CO3']), 1,function(x) sum(x*c.thk))
emis.NH3.rel<- emis.NH3/imass.NH3
emis.CO2.rel<- emis.CO2/imass.CO2
p.CO2.s.out <- out[, grep('p.CO2.s', colnames(out))]
p.NH3.s.out <- out[, grep('p.NH3.s', colnames(out))]
emis.out <- data.frame(t = signif(out[, 'time'], 5), j.NH3 = out[, 'j.NH3'], j.CO2 = out[, 'j.CO2'], e.NH3 = emis.NH3, e.CO2 = emis.CO2, e.NH3.rel = emis.NH3.rel, e.CO2.rel = emis.CO2.rel, p.CO2.s = p.CO2.s.out, p.NH3.s = p.NH3.s.out)
pars <- c(D = D, h.m = h.m, kin.mult = NA, lb = lb, n.cells = n.cells, p.CO2.a = p.CO2.a, thk = sum(c.thk), temp.c = temp.c)
if (sum(times %in% c('0', '3600', '86400')) == 3) {
summ.0 <- emis.out[times == 0, -1];names(summ.0) <- paste(names(summ.0), '.0', sep = '')
summ.1h <- emis.out[times == 3600, -1];names(summ.1h) <- paste(names(summ.1h), '.1h', sep = '')
summ.1d <- emis.out[times == 86400, -1];names(summ.1d) <- paste(names(summ.1d), '.1d', sep = '')
ph.summ <- ph.out[times%in%c('0', '3600', '86400')];names(ph.summ) <- c('ph.0', 'ph.1h', 'ph.1d')
} else summ.0 <- summ.1h <- summ.1d <- ph.summ <- NA
summ <- unlist(c(summ.0, summ.1h, summ.1d, ph.summ))
exe.time <- Sys.time() - time.start
if(of == 'all') list(times = times, pos = pos, tot = tot.out, act = act.out, ph = ph.out, emis = emis.out, exe.time = exe.time,
n.calls = c(der = n.calls.der, spec = n.calls.spec), pars = pars, summ = summ)
}
# Kinetic functions
# Model for NH3 and CO2 speciation and transport, including kinetically-limited reactions for H2CO3/CO2
# Author: Sasha Hafner
# History of revisions
# Date File Description
# 2010 OCT 08 SpecMod_v1.R First version that really works. Cacluates speciation of NH3 and CO2.
# 2010 OCT 11 SpecMod_v2.R Trying to incorporate temperature response of equilibrium constants,
# kinetics, and emission. Seems to work, but is slow.
# 2010 OCT 12 SpecMod_v3.R Switching to H2CO3 as master C species.
# 2010 OCT 13 SpecMod_v4.R Trying to speed things up. Kinetic component now works pretty well, but is only single layer.
# 2010 OCT 13 SpecMod_v5.R Includes transport. Now uses extended Debye-Huckel equation for activity coefficients
# 2010 OCT 14 SpecMod_v6.R Modifying so CO2 is returned by spec function. Everything seems to work.
# 2010 OCT 18 SpecMod_v7.R Adding a function for running the complete transport model (was manual)
# Corrected a 1000-fold error in derivative calculations
# 2010 OCT 19 SpecMod_v8.R Trying to speed up execution
# 2010 OCT 20 SpecMod_v8.R Corrected an error in dx
# 2010 OCT 27 SpecMod_v12.R Between this and v8, tried several things for improving speed--all failed.
# This version uses a slightly simplified version of spec.ph for opitimization
# 2010 OCT 28 SpecMod_v13.R Uses global variables to track tot and a. Only calculates speciation when needed.
# Slightly faster than v12, but results are sensitive to the change in tot that is
# considered insignificant. If it is too small, ode1D makes many more calls to der.calc,
# and results in slow runs. Saw this by writing out time at each der.calc call to a file.
# 2010 NOV 03 SpecMod_v14.R Adding K+, Na+, Cl-, and acetic acid.
# 2010 NOV 04 SpecMod_v15.R Trying to pull out non-volatile solutes from ODE solver to speed things up
# 2010 NOV 08-09SpecMod_v16.R Tried to write my own solver that uses simple fixed relative change approach.
# As feared, it was very slow. Incorporated a new approach where CO2(aq) in the upper layer
# is set to zero. Speeds things up slightly, and is optional.
# 2010 NOV 10 SpecKinTransMod_v1.R After version 16, I made an equilibrium-only version (SpecTransMod_v1.R).
# 2010 NOV 16 SpecKinTransMod_v1.R Adding more detail to output.
# 2010 NOV 17 SpecKinTransMod_v2.R Adding two-film-type option, with constant composition layer below bottom cell.
# I changed the diffusion rate calculations slightly to do this.
# 2010 NOV 19 SpecKinTransMod_v2.R Added multiplier for kinetic rates
# 2010 DEC 24 SpecKinTransMod_v2.R Added effect of ambient CO2
# 2010 DEC 28 SpecKinTransMod_v2.R Added line to deal with n.calls.spec when only speciation is called up without transport
# 2010 DEC 29 SpecKinTransMod_v3.R Trying to improve code for output a bit, also adding some output for a concise summary
# 2010 JAN 04 SpecKinTransMod_v4.R Improving the der.calc code a bit.
# 2010 JAN 20 SpecKinTransMod_v4.R Corrected small error.
# 2011 JAN 21 SpecKinTransMod_v5.R Added lower boundary option--essentially two-film with finite bulk layer, specified by lb = 'mx'.
# 2011 APR 05 SpecKinTransMod_v6.R Making h.m length of 2, for NH3 and CO2, and calculate h.m for CO2 from h.m for NH3.
# 2011 APR 07 SpecKinTransMod_v7.R Added kin.mult option to kin.mod.
# 2011 APR 08 SpecKinTransMod_v7.R Added if statement so summ is in output only if relevant times are requested.
# 2011 APR 14 SpecKinTransMod_v8.R Changing equations for equilibrium constants and kinetic constants to latest version,
# based on Plummer and Bunsenberg (1982).
# 2011 APR 14 SpecKinTransMod_v9.R Same as 8 (made a change and changed back)
# 2011 SEP 26 SpecKinTransMod_v13.R Added CO2.emis argument, which can be set to FALSE for turning off CO2 emission.
# This change doesn't make much sense here, since an equilibrium model can be used if
# there is no CO2 emission.
# Note that changes made for simulating Ni and jar trials are skipped--see version
# 12 for these.
# 2011 SEP 27 SpecKinTransMod_v13.R Replaced dielectric constant calculation and Debye-Huckel A and B with equations from PHREEQC and WATEQ.
# 2012 JAN 10 SpecKinTransMod_v14.R Simplified the equation for calculating l.a['H2CO3'] to match the one in the equations document (just moved g['H2CO3']
# around). Model simulations in paper were run using version 13, but the two should be identical.
# 2013 NOV 26 kinmod.R Changed file name
# 2014 MAR 21 kinmod.R Changing function names and nesting small functions within others
# 2014 OCT 31 kinmod.R Removed degree sign from comments, was causing a warning in grepl() which was called when source() is called
# Required packages
library(deSolve)
# Calculates speciation for a given set of total concentrations, with kinetic control of CO2 hydration
kinSpec <- function(tot, temp.c = 20, of = 'a', ll = -14, ul = 0) {
if(!exists('n.calls.spec')) n.calls.spec <<- 0
n.calls.spec <<- n.calls.spec + 1
# Define functions
# Calculates speciation for a given pH, assuming kinetic control of CO2 hydration/dehydration
pHSpec <- function(l.a.h, a.dh, b.dh, a.par, l.k, s,tot, z) {
# Iteratively solve, updating ionic strength each time
i <- sum(0.5*tot[tot>0]) # Very crude guess
b <- 999
k <- 10^l.k
l.a <- 0*l.k # Just for length and names
l.a['H.'] <- l.a.h
a <- 10^l.a
j <- 0
di <- 99
while (di/i>1E-4){ #abs(log10(i/b))>log10(1.001)) {
j <- j + 1
b <- i
l.g<- -a.dh*z^2*sqrt(i)/(1+b.dh*a.par[1, ]*sqrt(i)) + a.par[2, ]*i
g <- 10^l.g
l.a['CO2'] <-log10(tot['CO2']) + l.g['CO2']
l.a['K.'] <-log10(tot['K.']) + l.g['K.']
l.a['Na.'] <-log10(tot['Na.']) + l.g['Na.']
l.a['Cl.'] <-log10(tot['Cl.']) + l.g['Cl.']
l.a['NH4.'] <-log10(tot['NH3']*k['NH4.']*g['NH3']*a['H.']*g['NH4.']/(g['NH4.'] + k['NH4.']*g['NH3']*a['H.']) )
l.a['NH3'] <-l.a['NH4.'] - l.a['H.'] - l.k['NH4.']
l.a['H2CO3'] <- log10(tot['H2CO3']*a['H.']/(k['HCO3.']/g['HCO3.'] + a['H.']/g['H2CO3'] + k['CO3.2']/(g['CO3.2']*a['H.'])) )
l.a['HCO3.'] <- l.k['HCO3.'] + l.a['H2CO3'] - l.a['H.']
l.a['CO3.2'] <- l.k['CO3.2'] + l.a['H2CO3'] - 2*l.a['H.']
l.a['OH.'] <- 0 -l.a['H.'] + l.k['OH.']
l.a['Ac.'] <- log10(k['Ac.']*g['HAc']*tot['HAc']*g['Ac.']/(a['H.']*g['Ac.'] + k['Ac.']*g['HAc']))
l.a['HAc'] <- l.a['Ac.'] + l.a['H.'] - l.k['Ac.']
l.m <- l.a - l.g
m <- 10^l.m
i <- sum(0.5*m*z^2)
di <- abs(i - b)
}
a <- 10^l.a
cb <- sum(z*m)
#if(abs(cb)>1E-10) stop('Charge balance error (', cb, '), maybe there is a problem in pHSpec with ul or ll. See lines 707 & 708, or add a browser() call on this line.')
list(m = m, a = a, g = g, i = i, l.m = l.m, l.a = l.a, l.g = l.g, tot = tot, cb = cb, i.its = j)
}
# Calculates error in total H for a given pH guess
HBalErr <- function(l.a.h, a.dh, b.dh, a.par, l.k, s,tot, z) {
m <- pHSpec(l.a.h = l.a.h, a.dh = a.dh, b.dh = b.dh, a.par = a.par, l.k = l.k, s = s, tot = tot, z = z)$m
abs(tot['H.'] - sum(s[, 1]*m)) # All calculated totals given by t(s)%*%m. Alternative would be charge balance.
}
# Temperature
temp.k <- 273.15+temp.c
# Henry's law constants
kh.CO2<- 10^(108.38578 +0.01985076*temp.k - 6919.53/temp.k - 40.45154*log10(temp.k) + 669365/temp.k^2)
kh.NH3<- 10^(-3.51645 -0.0013637*temp.k + 1701.35/temp.k)
# Equilibrium constants
l.k <- c('H.' = 0, 'NH3' = 0, 'H2CO3' = 0, 'CO2' = 0, 'K.' = 0, 'Na.' = 0, 'Cl.' = 0, 'HAc' = 0,
'OH.'= -4.2195 -2915.16/temp.k,
'NH4.'= 0.0905 + 2729.31/temp.k,
'HCO3.'= -353.5305 -0.06092*temp.k + 21834.37/temp.k + 126.8339*log10(temp.k) -1684915/temp.k^2,
'CO3.2'= -461.4176 -0.093448*temp.k + 26986.16/temp.k + 165.7595*log10(temp.k) -2248629/temp.k^2,
'Ac.' = -4.8288 + 21.42/temp.k)
# Matrix of stoichiometric coefficients. Not used for much, but is good for keeping track of reactions
s <- matrix(c(1, 0,0, 0,0, 0,0, 0,-1, 1,-1, -2, -1, 0, 1,0, 0,0, 0,0, 0,0, 1,0, 0,0, 0, 0,1, 0,0, 0,0, 0,0, 0,1, 1,0, 0, 0,0, 1,0, 0,0, 0,0, 0,0, 0,0,
0, 0,0, 0,1, 0,0, 0,0, 0,0, 0,0, 0, 0,0, 0,0, 1,0, 0,0, 0,0, 0,0, 0, 0,0, 0,0, 0,1, 0,0, 0,0, 0,0, 0, 0,0, 0,0, 0,0, 1,0, 0,0, 0,1),
nrow = 13, dimnames = list(c("H.", "NH3", 'H2CO3', 'CO2', 'K.', 'Na.', 'Cl.', 'HAc', "OH.", "NH4.", 'HCO3.', 'CO3.2', 'Ac.'),
c("H.", 'NH3', 'H2CO3', 'CO2', 'K.', 'Na.', 'Cl.', 'HAc')))
# Species charge
z <- c('H.' = 1, 'NH3' = 0, 'H2CO3' = 0, 'CO2' = 0, 'K.' = 1, 'Na.' = 1, 'Cl.' = -1, 'HAc' = 0, 'OH.' = -1, 'NH4.' = 1, 'HCO3.' = -1, 'CO3.2' = -2, 'Ac.' = -1)
# Parameters for calculation of activity coefficients, using extended Debye-Huckel equation (see PHREEQC manual)
a.par <- matrix(c(9, 0,0, 0.1, 0,0.1, 0,0.1, 3,0, 4.25, 0,3, 0,0, 0.1, 3.5, 0,2.5, 0,5.4, 0,4.5, 0,4.5, 0), nrow = 2, dimnames = list(c('a', 'b'), c('H.', 'NH3', 'H2CO3', 'CO2', 'K.', 'Na.', 'Cl.', 'HAc', 'OH.', 'NH4.', 'HCO3.', 'CO3.2', 'Ac.')))
# Calculate a.dh and b.dh parameters for Debye-Huckel equation. Dielectric constant, density rearranged from PHREEQC code
# Dielectric constant
de <- 2727.586 + 0.6224107*temp.k - 1075.112*log10(temp.k) - 52000.87/temp.k
# Water density
c.d <- 647.26 - temp.k
d.H2O <- (1 + .1342489*c.d^(1/3) - 3.946263E-3*c.d)/(3.1975 - 0.3151548*c.d^(1/3) - 1.203374E-3*c.d + 7.48908E-13*c.d^4)
# a.dh and b.dh are A and B in Debye-Huckel equation. Equations are from Truesdell & Jones (1974)
a.dh <- 1.82483E6*d.H2O^0.5/(de*temp.k)^(3/2)
b.dh <- 50.2916*d.H2O^0.5/(de*temp.k)^0.5
sol <- optimize(f = HBalErr, interval = c(ll, ul), a.dh = a.dh, b.dh = b.dh, a.par = a.par, l.k = l.k, s = s, tot = tot, z = z, tol = 1E-15)
if(sol$objective>5E-7) {
print(sol)
stop('Around line 540, optimize didn\'t converge: complete results above, specified limits: ', ll, ' ', ul)
}
l.a.h <- sol$minimum
out <- pHSpec(l.a.h, a.dh = a.dh, b.dh = b.dh, a.par = a.par, l.k = l.k, s = s, tot = tot, z = z)
p.CO2 <- out$a['co2']/kh.CO2
p.NH3 <- out$a['NH3']/kh.NH3
if(of == 'a') return(out$a)
if(of == 'm') return(out$m)
if(of == 'k') return(l.k)
if(of == 'all') return(c(out, p = c(p.CO2, p.NH3)))
}
## To test model spec function
#n.calls.spec <<- 0
#tt <- c("H." = -0.101, 'NH3' = 0.1, 'H2CO3' = 0.1, 'CO2' = 0.0, 'K.' = 0.1, 'Na.' = 0.1, 'Cl.' = 0.1, 'HAc' = 0.1)
#out <- kinSpec(tot = tt, temp.c = 25, of = 'all', ll = -14, ul = 0)
#out$m
#out$l.a
#tt <- c("H." = 0, 'NH3' = 0.1, 'H2CO3' = 0.1, 'CO2' = 0.0, 'K.' = 0, 'Na.' = 0, 'Cl.' = 0, 'HAc' = 0)
#system.time(for(i in 1:100) kinSpec(tot = tt, temp.c = 25, of = 'all'))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Interface function for kinetics without transport
kinEmisMod <- function(
tot, # Vector of master species totals (mol/kgw), e.g., c("H." = 0, 'NH3' = 0.1, 'H2CO3' = 0.1, 'CO2' = 0.0, 'K.' = 0., 'Na.' = 0., 'Cl.' = 0., 'HAc' = 0.). Must be in this order.
times, # Vector of times, in seconds
h.m, # Mass transfer coefficient for NH3, m/s
kin.mult = 1, # Multiplier for kinetic rates
p.CO2.a = 400E-6, # Ambient CO2 partial pressure (atm)
temp.c = 20, # Temperature in C
thk = 0.01, # Layer thickness in m
of = 'all'
) {
# Check tot argument
if(length(tot) != 8 || any(!names(tot)%in%c('H.', 'NH3', 'H2CO3', 'CO2', 'K.', 'Na.', 'Cl.', 'HAc'))) warning('Is tot argument correct? Must be in the order', "'H.', 'NH3', 'H2CO3', 'CO2', 'K.', 'Na.', 'Cl.', 'HAc'")
# Calculate derivatives
# Kinetics and emission for single solution--no diffusive transport
derCalc <- function(t, y,parms) {
# Calculate rate constants
R <- 0.000082057 # Universal gas constant (atm m3/mol-K)
h.m <- parms$h.m
kin.mult <- parms$kin.mult
temp.c <- parms$temp.c
thk <- parms$thk
p.CO2.a <- parms$p.CO2.a
temp.k <- temp.c + 273.15
k1 <-kin.mult*10^(12.0657 - 4018.09/temp.k)
kr1 <- kin.mult*10^(14.8438 - 4018.09/temp.k)
k2 <-kin.mult*10^(-337.2083 + -0.06092*temp.k + 19225.3/temp.k + 126.8339*log10(temp.k) + -1684915/temp.k^2)
kr2 <- kin.mult*10^(14.8809 + -5524.23/temp.k)
kh.CO2<- 10^(108.38578 +0.01985076*temp.k - 6919.53/temp.k - 40.45154*log10(temp.k) + 669365/temp.k^2)
h.CO2<- R*temp.k*kh.CO2 # Henry's law constant aq:g (m3/kgw)
kh.NH3<- 10^(-3.51645 -0.0013637*temp.k + 1701.35/temp.k)
h.NH3<- R*temp.k*kh.NH3
# Calculate species concentrations
y[y<0 & names(y) != 'H.'] <- 0
#y[y<0] <- 0
out <- kinSpec(tot = y, temp.c = temp.c, of = 'all')
a <- out$a
m <- out$m
cb <- out$cb
if(any(abs(cb)>5E-8)) stop('Charge balance error, maybe there is a problem in pHSpec with ul or ll.')
# Surface fluxes, mol/m2-s
j.CO2<- h.m['CO2']*(a['CO2']/h.CO2 - p.CO2.a/(R*temp.k))
j.NH3<- h.m['NH3']*(a['NH3']/h.NH3 - 0.0/(R*temp.k))
# Derivatives
dCO2.dt <- -j.CO2/(thk*1000) - k1*a['CO2']*1.0 - k2*a['CO2']*a['OH.'] + kr1*a['H2CO3'] + kr2*a['HCO3.']
dH2CO3.dt<- k1*a['CO2']*1.0 + k2*a['CO2']*a['OH.'] - kr1*a['H2CO3'] - kr2*a['HCO3.']
dNH3.dt<- -j.NH3/(thk*1000)
#tt <- c("H." = -0.101, 'NH3' = 0.1, 'H2CO3' = 0.1, 'CO2' = 0.0, 'K.' = 0.1, 'Na.' = 0.1, 'Cl.' = 0.1, 'HAc' = 0.1)
names(j.CO2) <- names(j.NH3) <- NULL
list(tot = c(dH.dt = 0, dNH3.dt = dNH3.dt, dH2CO3.dt = dH2CO3.dt, dCO2.dt = dCO2.dt, dK.dt = 0, dNa.dt = 0, dCl.dt = 0, dHAc.dt = 0),
act = a, mol = m, cb = cb, pH = -log10(a['H.']), j.NH3 = j.NH3, j.CO2 = j.CO2, p.CO2.s = a['CO2']/h.CO2*R*temp.k)
}
time.start <- Sys.time()
n.calls.der <<- 0
n.calls.spec <<- 0
h.m[2] <- h.m[1]*sqrt(17.0305/44.0095);names(h.m) <- c('NH3', 'CO2') # Calculate CO2 h.m based on that for NH3, based on Eq. 6 in Lee et al 2004
out <- lsoda(y = tot, times = times, func = derCalc, parms = list(h.m = h.m, kin.mult = kin.mult, p.CO2.a = p.CO2.a, temp.c = temp.c, thk = thk))
tot.out <- matrix(as.vector(out[, colnames(out) %in% c('H.', 'NH3', 'H2CO3', 'CO2', 'K.', 'Na.', 'Cl.', 'HAc')]), nrow = dim(out)[1], ncol = 8, dimnames = list(t = out[, 'time'], msp = c('H.', 'NH3', 'H2CO3', 'CO2', 'K.', 'Na.', 'Cl.', 'HAc')))
act.out <- matrix(as.vector(out[, grep('act', colnames(out))]), nrow = dim(out)[1], ncol = 13, dimnames = list(t = out[, 'time'], sp = c('H.', 'NH3', 'H2CO3', 'CO2', 'K.', 'Na.', 'Cl.', 'HAc', 'OH.', 'NH4.', 'HCO3.', 'CO3.2', 'Ac.')))
mol.out <- matrix(as.vector(out[, grep('mol', colnames(out))]), nrow = dim(out)[1], ncol = 13, dimnames = list(t = out[, 'time'], sp = c('H.', 'NH3', 'H2CO3', 'CO2', 'K.', 'Na.', 'Cl.', 'HAc', 'OH.', 'NH4.', 'HCO3.', 'CO3.2', 'Ac.')))
cb.out <- matrix(as.vector(out[, grep('cb', colnames(out))]), nrow = dim(out)[1], ncol = 1, dimnames = list(t = out[, 'time'], cb = 'cb'))
ph.out <- matrix(as.vector(out[, grep('pH', colnames(out))]), nrow = dim(out)[1], ncol = 1, dimnames = list(t = out[, 'time'], 'pH'))
emis.out <- data.frame(t = out[, 'time'], j.NH3 = out[, 'j.NH3'], j.CO2 = out[, 'j.CO2'], e.NH3 = 1000*(tot['NH3'] - tot.out[, 'NH3'])*thk,
e.CO2 = 1000*(tot['H2CO3'] + tot['CO2'] - tot.out[, 'H2CO3'] - tot.out[, 'CO2'])*thk)
emis.out$e.NH3.rel<- emis.out$e.NH3/(1000*tot['NH3']*thk)
emis.out$e.CO2.rel<- emis.out$e.CO2/(1000*(tot['H2CO3']+tot['CO2'])*thk)
if(of == 'all') list(times = times, tot = tot.out, act = act.out, mol = mol.out, cb = cb.out, ph = ph.out, emis = emis.out, pars = c(h.m = h.m, p.CO2.a = p.CO2.a, temp.c = temp.c, thk = thk))
}
### To test kin.mod
#kinEmisMod(tot = c("H." = -0.101, 'NH3' = 0.1, 'H2CO3' = 0.1, 'CO2' = 0, 'K.' = 0.1, 'Na.' = 0.1, 'Cl.' = 0.1, 'HAc' = 0.1), times = c(0, 1,3600), h.m = 0.0, temp.c = 20, p.CO2.a = 400E-6, thk = 0.01)
#kinEmisMod(tot = c("H." = -0.1, 'NH3' = 0.1, 'H2CO3' = 0.1, 'CO2' = 0.0, 'K.' = 0., 'Na.' = 0., 'Cl.' = 0., 'HAc' = 0.1), times = c(0:60), h.m = 0.001, temp.c = 25, thk = 0.01)
# Interface function for complete model with transport
kinEmisDiffMod <- function(
c.thk, # Vector of cell thicknesses (m) or single value
CO2.emis = TRUE, # Include CO2 emission?
D = 1E-9, # Diffusion coefficient (m2/s)
Dd = NULL, # Deep diffusion coefficient (m2/s)
zD = NULL, # Location where diffusion coefficient switches
e1 = 1, # Set to 1 to explicitly model CO2 emission from the upper-most layer, 0 to base it on diffusion from below
h.m, # Mass transfer coefficient for NH3 (or both NH3 and CO2 if h.m.constant = TRUE), m/s
h.m.constant = FALSE, # Set to TRUE to use single h.m value for CO2 and NH3
kin.mult = 1, # Multiplier for kinetic rates
lb = 'nf', # Lower boundary condition, no flow by default, set to 'cc' for constant concentration
n.cells = length(c.thk), # Total number of cells in simulation
of = 'all', # Output format
p.CO2.a, # Ambient CO2 partial pressure (atm)
times, # Vector of times (s)
temp.c, # Temperature (C)
thk = 0, # Total thickness (m), only used for lb = 'mx'
tot, # Vector of master species totals (mol/kgw), e.g., c("H." = 0, 'NH3' = 0.1, 'H2CO3' = 0.1, 'CO2' = 0.0, 'K.' = 0., 'Na.' = 0., 'Cl.' = 0., 'HAc' = 0.). Must be in this order.
parallel = FALSE # TRUE for parallel processing
) {
if(!(lb %in% c('nf', 'cc', 'mx'))) stop('lb error')
if(parallel) {
library(foreach)
library(parallel)
library(doParallel)
if(!exists('clstr')) {
clstr <- makeCluster(detectCores()-1)
registerDoParallel(clstr)
}
}
# Define functions
# Calculate derivatives of kinetic + transport equations
derCalc <- function(t, y,parms) {
n.calls.der <<- n.calls.der + 1
R <- 0.000082057 # Universal gas constant (atm m3/mol-K)
c.thk <- parms$c.thk
D <- parms$D
dx <- parms$dx
e1 <- parms$e1
h.m <- parms$h.m
kin.mult <- parms$kin.mult
n.cells <- parms$n.cells
p.CO2.a <- parms$p.CO2.a
temp.c <- parms$temp.c
lb <- parms$lb
tot.lb <- parms$tot.lb
temp.k <- temp.c + 273.15
# Put total concentrations in matrix for working with them
y[y<0] <- 0 # CRUDE
tot.k <- matrix(y, nrow = n.cells, dimnames = list(1:n.cells, c('NH3', 'H2CO3', 'CO2')))
tot <- cbind('H.' = tot.glob[, 'H.'], tot.k, tot.glob[, c('K.', 'Na.', 'Cl.', 'HAc')])
# Calculate constants
k1 <-kin.mult*10^(12.0657 - 4018.09/temp.k)
kr1 <- kin.mult*10^(14.8438 - 4018.09/temp.k)
k2 <-kin.mult*10^(-337.2083 + -0.06092*temp.k + 19225.3/temp.k + 126.8339*log10(temp.k) + -1684915/temp.k^2)
kr2 <- kin.mult*10^(14.8809 + -5524.23/temp.k)
kh.CO2<- 10^(108.38578 +0.01985076*temp.k - 6919.53/temp.k - 40.45154*log10(temp.k) + 669365/temp.k^2)
h.CO2<- R*temp.k*kh.CO2 # Henry's law constant aq:g (m3/kgw)
kh.NH3<- 10^(-3.51645 -0.0013637*temp.k + 1701.35/temp.k)
h.NH3<- R*temp.k*kh.NH3
# Diffusion/dispersion. Note single D for all solutes, and that totals diffuse, not species
j.diff.out <- D*1000*rbind(c(0, 0,0), diff(tot.k))/dx # Diffusion out, toward cell 1 (mol/m2-s). Note 1000 kgw/m3 conversion to make concentrations volumetric
j.diff.lb <- as.vector(D[1]*1000*diff(rbind(tot.k[n.cells, ],tot.lb))/(0.5*c.thk[n.cells])) # Diffusion into bottom cell. Used below only if lb = "c"
names(j.diff.lb) <- c('NH3', 'H2CO3', 'CO2')
# Find location of cells with changed tot
d.tot <- abs(tot.k - tot.glob[, c('NH3', 'H2CO3', 'CO2')])
ch.cells <- c(1:n.cells)[apply(d.tot, 1,max)>1E-15]
# Update tot.glob, but only for those cells that changed (to prevent significant cumulative error)
tot.glob[ch.cells, c('NH3', 'H2CO3', 'CO2')] <<- tot.k[ch.cells, ]
# Set a to a.glob for all cells. Those with tot changes are updated below
a <- a.glob
m <- m.glob
ll<- -13
ul<- -5.0
n.calls.spec <<- n.calls.spec + length(ch.cells)
if(parallel) {
a[ch.cells, ] <- foreach(i = ch.cells, .combine = 'rbind', .export = ls(envir = globalenv())) %dopar% {
tot.x <- as.numeric(tot[i, ])
names(tot.x) <- c('H.', 'NH3', 'H2CO3', 'CO2', 'K.', 'Na.', 'Cl.', 'HAc')
kinSpec(tot = tot.x, temp.c = temp.c, of = 'a', ll = ll, ul = ul)
}
} else {
for (i in ch.cells) {
tot.x <- as.numeric(tot[i, ])
#ll<- log10(a[i, 'H.']) - 0.05
#ul<- log10(a[i, 'H.']) + 0.05
names(tot.x) <- c('H.', 'NH3', 'H2CO3', 'CO2', 'K.', 'Na.', 'Cl.', 'HAc')
pred <- kinSpec(tot = tot.x, temp.c = temp.c, of = 'all', ll = ll, ul = ul)
a[i, ] <- pred$a
m[i, ] <- pred$m
}
}
# Update a.glob
a.glob[ch.cells, ] <<- a[ch.cells, ]
m.glob[ch.cells, ] <<- m[ch.cells, ]
# Derivatives
# Surface fluxes, mol/m2-s
if(e1 == 1) {
j.CO2 <- h.m['CO2']*(a[1, 'CO2']/h.CO2 - p.CO2.a/(R*temp.k))
}
j.NH3 <- h.m['NH3']*(a[1, 'NH3']/h.NH3 - 0.0/(R*temp.k))
# CO2 hydration, mol/kgw-s
if(e1 == 0 & h.m['CO2']>0) a[1, 'CO2'] <- 0
kin.CO2 <- k1*a[, 'CO2']*1.0 + k2*a[, 'CO2']*a[, 'OH.'] - kr1*a[, 'H2CO3'] - kr2*a[, 'HCO3.']
# Derivatives for concentration
if(lb != 'cc') j.diff.lb <- 0*j.diff.lb
if(e1 == 1) {
dCO2.dt <- (-j.CO2*c(1, rep(0, n.cells-1)) + diff(c(j.diff.out[, 'CO2'], j.diff.lb['CO2'])) )/(c.thk*1000) - kin.CO2
}
else if(e1 == 0) {
dCO2.dt <- diff(c(j.diff.out[, 'CO2'], j.diff.lb['CO2']))/(c.thk*1000) - kin.CO2 # Diffusion and kinetics only
j.CO2 <- dCO2.dt[1]
dCO2.dt[1] <- 0
} else stop('e1 error', e1)
dH2CO3.dt<- diff(c(j.diff.out[, 'H2CO3'], j.diff.lb['H2CO3']))/(c.thk*1000) + kin.CO2
dNH3.dt <- (-j.NH3*c(1, rep(0, n.cells-1)) + diff(c(j.diff.out[, 'NH3'], j.diff.lb['NH3'])) )/(c.thk*1000)
names(j.CO2) <- names(j.NH3) <- names(kin.CO2) <- NULL
list(tot = c(dNH3.dt, dH2CO3.dt, dCO2.dt), act = a, mol = m, pH = -log10(a[, 'H.']), j.NH3 = j.NH3, j.CO2 = j.CO2, kin.CO2 = kin.CO2, p.CO2.s = a[1, 'CO2']/h.CO2*R*temp.k, p.NH3.s = a[1, 'NH3']/h.NH3*R*temp.k) # ,
#dCO2.dt = dCO2.dt, dH2CO3.dt = dH2CO3.dt, dNH3.dt = dNH3.dt) # Works but slow things down a bit
}
if(lb == 'mx') {
n.cells <- n.cells + 1 # For bottom, tracked cell
c.thk <- c(c.thk, thk - sum(c.thk))
}
# Diffusion coefficient, if it varies with depth
if(!is.null(Dd) & !is.null(zD)) {
Ds <- D
D <- rep(D, n.cells)
if(zD<sum(c.thk)) {
di <- which(cumsum(c.thk)>=zD)[1]
D[(di+1):n.cells] <- Dd
ffs <- (sum(c.thk[1:di]) - zD)/c.thk[di]
# If zD doesn't fall exactly on a break between cells, weight it
D[di] <- 10^(ffs*log10(Dd) + (1 - ffs)*log10(Ds))
}
}
# NTS: Why was this duplicated????
#if(lb == 'mx') {
# n.cells <- n.cells + 1 # For bottom, tracked cell
# c.thk <- c(c.thk, thk - sum(c.thk))
#}
# Initial total masses, for calculating relative emission
imass.NH3 <- sum(1000*tot['NH3']*c.thk)
imass.CO2 <- sum(1000*(tot['CO2'] + tot['H2CO3'])*c.thk)
time.start <- Sys.time()
n.calls.der <<- 0
n.calls.spec <<- 0
tot.lb <- tot[2:4] # Lower boundary constant concentrations, only for "kinetic" species NH3, H2CO3, CO2
if(length(c.thk) == 1) c.thk <- rep(c.thk, n.cells)
if(length(tot) == 8) tot <- rep(tot, each = n.cells)
pos <- cumsum(c.thk) - 0.5*c.thk # position (m), based on cell centers
dx <- c(c.thk[n.cells], diff(pos))
if(h.m.constant) {
h.m[2] <- h.m[1]
} else {
h.m[2] <- h.m[1]*sqrt(17.0305/44.0095)
}
if(!CO2.emis) h.m[2] <- 0
names(h.m) <- c('NH3', 'CO2') # Calculate CO2 h.m based on that for NH3, based on Eq. 6 in Lee et al 2004
if(lb == 'mx') dx[n.cells] <- 0.5*c.thk[n.cells-1] # Eliminates resistance within bottom cell
# Global variables for tracking changes and avoiding speciation calculations if possible
tot.glob <<- matrix(tot, nrow = n.cells, dimnames = list(1:n.cells, c('H.', 'NH3', 'H2CO3', 'CO2', 'K.', 'Na.', 'Cl.', 'HAc')))
a.glob <<- m.glob <<- matrix(rep(999, 13*n.cells), nrow = n.cells)
colnames(a.glob) <<- colnames(m.glob) <<- c('H.', 'NH3', 'H2CO3', 'CO2', 'K.', 'Na.', 'Cl.', 'HAc', 'OH.', 'NH4.', 'HCO3.', 'CO3.2', 'Ac.')
# Initial speciation
ll<- -14
ul<- -0
for (i in 1:n.cells) {
tot.x <- as.numeric(tot.glob[i, ])
names(tot.x) <- c('H.', 'NH3', 'H2CO3', 'CO2', 'K.', 'Na.', 'Cl.', 'HAc')
pred <- kinSpec(tot = tot.x, temp.c = temp.c, of = 'all', ll = ll, ul = ul)
a.glob[i, ] <<- pred$a
m.glob[i, ] <<- pred$m
}
y <- tot[names(tot) %in% c('NH3', 'H2CO3', 'CO2')]
if(e1 == 0) y['CO2'][1] <- 0 # Set surface CO2 to zero
# Call up ODE solver. Note that only NH3 and IC solutes change over time
out <- ode.1D(y = y, times = times, func = derCalc,
parms = list(e1 = e1, h.m = h.m, n.cells = n.cells, temp.c = temp.c, c.thk = c.thk, dx = dx, D = D, kin.mult = kin.mult, lb = lb, p.CO2.a = p.CO2.a, tot.lb = tot.lb),
nspec = 3, dimens = n.cells) #, method = 'lsodes') #, hini = 0.01)
tot.out <- array(as.vector(out[, colnames(out) %in% c('NH3', 'H2CO3', 'CO2')]), dim = c(dim(out)[1], n.cells, 3), dimnames = list(t = out[, 'time'], pos = pos, msp = c('NH3', 'H2CO3', 'CO2')))
tot.f.out <- matrix(tot[names(tot) %in% c('H.', 'K.', 'Na.', 'Cl.', 'HAc')], nrow = n.cells, dimnames = list(pos = pos, msp = c('H.', 'K.', 'Na.', 'Cl.', 'HAc')))
act.out <- array(as.vector(out[, grep('^act', colnames(out))]), dim = c(dim(out)[1], n.cells, 13), dimnames = list(t = out[, 'time'], pos = pos, sp = c('H.', 'NH3', 'H2CO3', 'CO2', 'K.', 'Na.', 'Cl.', 'HAc', 'OH.', 'NH4.', 'HCO3.', 'CO3.2', 'Ac.')))
mol.out <- array(as.vector(out[, grep('^mol', colnames(out))]), dim = c(dim(out)[1], n.cells, 13), dimnames = list(t = out[, 'time'], pos = pos, sp = c('H.', 'NH3', 'H2CO3', 'CO2', 'K.', 'Na.', 'Cl.', 'HAc', 'OH.', 'NH4.', 'HCO3.', 'CO3.2', 'Ac.')))
ph.out <- matrix(as.vector(out[, grep('pH', colnames(out))]), nrow = dim(out)[1], ncol = n.cells, dimnames = list(t = out[, 'time'], pos = pos))
emis.NH3<- sum(1000*tot['NH3']*c.thk) - apply(1000*tot.out[, ,'NH3'], 1,function(x) sum(x*c.thk))
emis.CO2<- sum(1000*(tot['CO2'] + tot['H2CO3'])*c.thk) - apply(1000*(tot.out[, ,'CO2'] + tot.out[, ,'H2CO3']), 1,function(x) sum(x*c.thk))
emis.NH3.rel<- emis.NH3/imass.NH3
emis.CO2.rel<- emis.CO2/imass.CO2
p.CO2.s.out <- out[, grep('p.CO2.s', colnames(out))]
p.NH3.s.out <- out[, grep('p.NH3.s', colnames(out))]
emis.out <- data.frame(t = out[, 'time'], j.NH3 = out[, 'j.NH3'], j.CO2 = out[, 'j.CO2'], e.NH3 = emis.NH3, e.CO2 = emis.CO2, e.NH3.rel = emis.NH3.rel, e.CO2.rel = emis.CO2.rel, p.CO2.s = p.CO2.s.out, p.NH3.s = p.NH3.s.out)
kin.CO2.out <- matrix(as.vector(out[, grep('kin.CO2', colnames(out))]), nrow = dim(out)[1], ncol = n.cells, dimnames = list(t = out[, 'time'], pos = pos))
dCO2.out <- matrix(as.vector(out[, grep('dCO2.dt', colnames(out))]), nrow = dim(out)[1], ncol = n.cells, dimnames = list(t = out[, 'time'], pos = pos))
dH2CO3.out <- matrix(as.vector(out[, grep('dH2CO3.dt', colnames(out))]), nrow = dim(out)[1], ncol = n.cells, dimnames = list(t = out[, 'time'], pos = pos))
dNH3.out <- matrix(as.vector(out[, grep('dNH3.dt', colnames(out))]), nrow = dim(out)[1], ncol = n.cells, dimnames = list(t = out[, 'time'], pos = pos))
pars <- c(D = D, h.m = h.m, kin.mult = kin.mult, lb = lb, n.cells = n.cells, p.CO2.a = p.CO2.a, thk = sum(c.thk), temp.c = temp.c)
if (sum(times %in% c('0', '3600', '86400')) == 3) {
summ.0 <- emis.out[times == 0, -1];names(summ.0) <- paste(names(summ.0), '.0', sep = '')
summ.1h <- emis.out[times == 3600, -1];names(summ.1h) <- paste(names(summ.1h), '.1h', sep = '')
summ.1d <- emis.out[times == 86400, -1];names(summ.1d) <- paste(names(summ.1d), '.1d', sep = '')
ph.summ <- ph.out[times%in%c('0', '3600', '86400'), 1];names(ph.summ) <- c('ph.0', 'ph.1h', 'ph.1d')
} else summ.0 <- summ.1h <- summ.1d <- ph.summ <- NA
summ <- unlist(c(summ.0, summ.1h, summ.1d, ph.summ))
exe.time <- Sys.time() - time.start
if(of == 'all') list(times = times, pos = pos, tot.k = tot.out, tot.f = tot.f.out, act = act.out, mol = mol.out, ph = ph.out, kin.CO2 = kin.CO2.out,
emis = emis.out, exe.time = exe.time, n.calls = c(der = n.calls.der, spec = n.calls.spec), pars = pars, summ = summ)
#dCO2 = dCO2.out, dH2CO3 = dH2CO3.out, dNH3 = dNH3.out,
}
|
925ed724566bcfe4485ac41f10560948140d5717
|
3019ee00ab18b27c206731d99dddae97519b41ce
|
/R/colors.R
|
1fe7236064bd4c4a2270ec49f6e898c4bc01ff44
|
[] |
no_license
|
atkinsjeff/nationalpark
|
da66e7cdb91abc3382d08c3e0bebe45bd039e00f
|
fbbe97e3f10cd7fc2f13575e2cc66af79a7f046f
|
refs/heads/master
| 2020-04-26T11:47:52.279508
| 2019-05-14T20:56:58
| 2019-05-14T20:56:58
| 173,528,870
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,319
|
r
|
colors.R
|
#' Complete list of palettes
#'
#' Use \code{\link{np_palette}} to construct palettes of desired length.
#'
#' @export
np_palettes <- list(
arches = c("#3B1105", "#FC7500", "#ADB5BE", "#5487C8", "#FFD707"),
badlands = c("#3D6F07", "#FBF9FF", "#7D2010", "#DE5A31", "#535261"),
bryce = c("#DCDFE4", "#01261D", "#3C63A6", "#4F2716", "#F2B06A"),
#conagree = c("#204608", "#403F0F", "#F4F3E1", "#11130E", "#A0F51C"),
chesapeake = c("#0D0B07", "#F2884B", "#FF4F46", "#A66D60", "#594540"),
conagree = c("#78A31F", "#13480A", "#69B1FF", "#190B40", "#612407"),
deathvalley = c("#6503A6", "#6BB3F2", "#5C6F13", "#F2CB05", "#6B3C22"),
everglades = c("#78A633", "#DADC57", "#735826", "#17130D", "#323D58"),
flatirons = c("#261E29", "#383354", "#AFAEE7", "#2A301E", "#EEDB96"),
grandtetons = c("#3F1359", "#9C98BD", "#67381F", "#354920", "#F4D4CA"),
lakesuperior = c("#FAC9B1", "#DCABB0", "#6A5551", "#192E31", "#9BBFE6"),
kingscanyon = c( "#B28D92", "#A4C3E1", "#2C3E12", "#413632",
"#7E7D4F", "#2B2B1D", "#A77957", "#BA9B23", "#080503"),
picturedrocks = c("#254C84", "#58A0CD", "#5C845F", "#7E8081", "#EAAC72"),
rockymtn = c("#E8B02C", "#333D2E", "#6A9CF5", "#5F6E2D", "#CE916C"),
shenandoah = c("#253222", "#485551", "#A1A1A6", "#080A08", "#37291C"),
shenandoah2 = c("#2B4622", "#FFFE01", "#1A1A10", "#4F9503", "#727B77"),
smokies2 = c("#4C5961", "#000405", "#F2E702", "#2C4800", "#FBE1CA"),
smokies3 = c("#004E5A", "#BBD0ED", "#F0C02D", "#BF6514", "#0B0F0D"),
smokies = c("#932ABE", "#2788F9", "#FFE9E6", "#F4B80D", "#665543"),
tallgrass = c("#82A8E5", "#3C481D", "#757553", "#341A16", "#EDD8B9"),
yellowstone = c("#0154BE", "#FADB31", "#163003", "#C41F1E", "#4896F2"),
yellowstone2 = c("#244200", "#211C12", "#D5AA7D", "#663726", "#93A2BF"),
zion = c("#244200", "#211C12", "#D5AA7D", "#663726", "#9B856B")
)
#' A National Parks of the United States palette generator
#'
#' These are a handful of color palettes pulled from photographs of US National Parks.
#'
#' @param n Number of colors desired. Unfortunately most palettes now only
#' have 5 colors. These palettes are picked using color.adobe.com
#' from photographs provided by J.W. Atkins, E. Agee, A. R. Woleslagle.
#' If omitted, uses all colours.
#' @param name Name of desired palette. Choices are:
#' \code{badlands}, \code{deathvalley}, \code{smokies},
#' \code{picturedrocks}, \code{lakesuperior}, \code{tallgrass},
#' \code{rockymtn}, \code{flatirons}, \code{shenandoah},
#' \code{shenandoah2}, \code{kingscanyon}, \code{kingscanyon2},
#' \code{smokies2}, \code{smokies3}, \code{arches},
#' \code{bryce}, \code{grandtetons}, \code{conagree},
#' \code{zion}, \code{yellowstone}
#' @param type Either "continuous" or "discrete". Use continuous if you want
#' to automatically interpolate between colours.
#' @importFrom graphics rgb rect par image text
#' @return A vector of colours.
#' @export
#' @keywords colors
#' @examples
#' np_palette("badlands")
#' np_palette("deathvalley")
#' np_palette("deathvalley", 3)
#'
#' # If you need more colours than normally found in a palette, you
#' # can use a continuous palette to interpolate between existing
#' # colours
#' pal <- np_palette(21, name = "badlands", type = "continuous")
#' image(volcano, col = pal)
np_palette <- function(name, n, type = c("discrete", "continuous")) {
type <- match.arg(type)
pal <- np_palettes[[name]]
if (is.null(pal))
stop("Palette not found.")
if (missing(n)) {
n <- length(pal)
}
if (type == "discrete" && n > length(pal)) {
stop("Number of requested colors greater than what palette can offer")
}
out <- switch(type,
continuous = grDevices::colorRampPalette(pal)(n),
discrete = pal[1:n]
)
structure(out, class = "palette", name = name)
}
#' @export
#' @importFrom graphics rect par image text
#' @importFrom grDevices rgb
print.palette <- function(x, ...) {
n <- length(x)
old <- par(mar = c(0.5, 0.5, 0.5, 0.5))
on.exit(par(old))
image(1:n, 1, as.matrix(1:n), col = x,
ylab = "", xaxt = "n", yaxt = "n", bty = "n")
rect(0, 0.9, n + 1, 1.1, col = rgb(1, 1, 1, 0.8), border = NA)
text((n + 1) / 2, 1, labels = attr(x, "name"), cex = 1, family = "serif")
}
#' heatmap
#'
#' A heatmap example
"heatmap"
|
20747ab7d3d469fba5ffd6b6d6197e973a5be947
|
0c1eeee21c2a8d5a52c86dd99b473eb865a0f085
|
/Exercise7.R
|
f6a4fe64d12f5f6373b683064244818fc7ecc6a5
|
[] |
no_license
|
afleishm/Biocomp-Fall2018-181012-Exercise7
|
dcd624d922a64f44eb4145bc8f35e022ec524612
|
e19c11c05a98969e87b491de260fcc4ad8541296
|
refs/heads/master
| 2020-04-02T06:38:58.019496
| 2018-10-24T16:06:52
| 2018-10-24T16:06:52
| 154,161,337
| 0
| 0
| null | 2018-10-22T14:46:45
| 2018-10-22T14:46:44
| null |
UTF-8
|
R
| false
| false
| 1,510
|
r
|
Exercise7.R
|
#Setting working directory
rm(list=ls()) #remove global environment
setwd("/Users/Ashley/Documents/Biocomputing_2018/Biocomp-Fall2018-181012-Exercise7")
iris=read.csv("iris.csv", header = TRUE)
iris
#1
#Return odd rows of a data frame
i=1
return_odd_rows = function(df) {
odds = data.frame()
for (i in 1:nrow(df)) {
if (i%%2==1) {
oddRow=df[i,]
odds=rbind(odds,oddRow)
}
}
return(odds)
}
#2
#Return the number of observations for a given species included in the data set
species_num = function(species) {
num_rows = 0
for (i in 1:nrow(iris)) {
if (iris$Species[i] == species) {
num_rows = num_rows + 1
}
}
return (num_rows)
}
#Return a dataframe for flowers with Sepal.Width greater than a value specified by the function user
sepal_width_flowers = function(num) {
width_df = data.frame()
for (i in 1:nrow(iris)) {
if (iris$Sepal.Width[i] > num) {
flower_row = iris[i,]
width_df = rbind(width_df, flower_row)
}
}
return(width_df)
}
#Write the data for a given species to a comma-delimited file with the given species name as the file name. Hint: look at paste() to add the .csv extension to your file in the function.
write_species_file = function(species) {
species_data = data.frame()
for (i in 1:nrow(iris)) {
if (iris$Species[i] == species) {
species_row = iris[i,]
species_data = rbind(species_data, species_row)
}
}
write.csv(species_data, file = paste(species, ".csv", sep = ""))
}
|
1d6d0b0452ae54a75552d263da39408034b979ba
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/PBImisc/examples/SejmSenat.Rd.R
|
185f787148186d6a05912ac496de5c354982bb84
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 254
|
r
|
SejmSenat.Rd.R
|
library(PBImisc)
### Name: SejmSenat
### Title: SejmSenat
### Aliases: SejmSenat
### Keywords: SejmSenat
### ** Examples
data(SejmSenat)
library(ca)
# can you see some patterns?
plot(ca(SejmSenat[-15,]), mass =c(TRUE,TRUE), arrows =c(FALSE,TRUE))
|
c81184aaf8cfd98d6fb04dc3fb9620d0fba1ee2c
|
473dfd3f5c89fd2bf2087c524c52e484ecc823b6
|
/tests/testthat/test-halton.coefficients.R
|
0c7c78ad3ffe60b6819e4b31b84ddaf41402f605
|
[] |
no_license
|
cran/SDraw
|
038ec0a0f2d8a094f89d258d43edb15a003303b2
|
0b06c5ecbd424a0d9ba59fe5fd4f4bf30a1ce326
|
refs/heads/master
| 2021-01-17T19:20:33.351896
| 2020-07-03T15:20:09
| 2020-07-03T15:20:09
| 60,879,512
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 587
|
r
|
test-halton.coefficients.R
|
context("Test the halton.coefficients function")
test_that("halton.coefficients() operates appropriately", {
##Expect problems when J == 0
expect_error(halton.coefficients(1, 0))
##Expect function operates well with large values of samp and J
expect_equal(dim(halton.coefficients(300:302, c(600,200,100))), c(3,600,3))
##Expect that object inheritance is strictly part of the 'double' class
expect_type(obj <- halton.coefficients(10, 20), "double")
##Check output structure
expect_identical(as.numeric(halton.coefficients(3,3)), c(1, 1, 0))
})
|
5f5b8b8e21ed988da4aeec461328290c8974a161
|
e62a38261550858b6162855aded0b56d69e15eb1
|
/SiVHMP1/R/H11.R
|
9447d09036cd27e4ff881add09d02750b87191c4
|
[] |
no_license
|
RavnikD/SiVHMP1P
|
0e57396410de5f71dbc554dc3c05e8a2a6191d24
|
327678dee0ccb9f154b112c96c0150e1d4b25d85
|
refs/heads/master
| 2020-04-18T06:38:53.756364
| 2019-01-24T10:35:51
| 2019-01-24T10:35:51
| 167,330,429
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,579
|
r
|
H11.R
|
library(xlsx)
dir.create("Podaci")
#Download podataka
download.file("http://www.hnb.hr/documents/20182/98f5bca9-e530-461d-9c25-eb4e8b5b0efc", "Podaci/h-h11.xlsx", mode = "wb")
#' Tablica H11 Indeksi efektivnih tecajeva kune
#
#' @name h11
#' @author David Ravnik \email{dravnik@unipu.hr}
#' @author Tomislava Kolar \email{tkolar@unipu.hr}
#' @references \url{https://www.hnb.hr/statistika}
#' @keywords dataframe
#' @export
# UCITAVANJE GODISNJE TABLICE
H11 <- read.xlsx(file = "Podaci/h-h11.xlsx", 1,
startRow = 7, endRow = 282, as.data.frame = TRUE, header = FALSE)
#Micanje redaka koji su u potpunosti NA
H11 <- H11[, colSums(is.na(H11)) != nrow(H11)]
#Micanje praznih stupca
H11 <- H11[-5]
H11 <- H11[-6]
H11 <- H11[-7]
H11 <- H11[-8]
#Davanje imena header-ima
colnames(H11) <- c("Godina",
"Mjesec",
"Nominalni.efektivni.tecaj",
"RETD.Indeks.potrosackih.cijena",
"RETD.Indeks.proizvodackih.cijena.industrije",
"REFTKD.Jedinicni.troskovi.rada.u.ukupnome.gospodarstvu",
"REFTKD.Jedinicni.troskovi.rada.u.preradivackoj.industriji")
#Sredivanje godine
H11$Godina <- as.character(H11$Godina)
H11$Godina <- substr(H11$Godina, 1, 4)
#Factor -> char
H11$Mjesec <- as.character(H11$Mjesec)
#Micanje space-a s kraja mjeseca
H11$Mjesec <- trimws(H11$Mjesec)
#Pretvaranje stupca godina u godina-mjesec (YYYY -> YYYY-MM)
for(i in c(1:nrow(H11))){
if(!is.na(H11$Godina[i])){
godina <- H11$Godina[i]
}
switch (as.character(H11$Mjesec[i]),
"sijeÄŤanj" = H11$Godina[i] <- as.character(paste0(godina,"-",1)),
"veljaÄŤa" = H11$Godina[i] <- as.character(paste0(godina,"-",2)),
"oĹľujak" = H11$Godina[i] <- as.character(paste0(godina,"-",3)),
"travanj" = H11$Godina[i] <- as.character(paste0(godina,"-",4)),
"svibanj" = H11$Godina[i] <- as.character(paste0(godina,"-",5)),
"lipanj" = H11$Godina[i] <- as.character(paste0(godina,"-",6)),
"srpanj" = H11$Godina[i] <- as.character(paste0(godina,"-",7)),
"kolovoz" = H11$Godina[i] <- as.character(paste0(godina,"-",8)),
"rujan" = H11$Godina[i] <- as.character(paste0(godina,"-",9)),
"listopad" = H11$Godina[i] <- as.character(paste0(godina,"-",10)),
"studeni" = H11$Godina[i] <- as.character(paste0(godina,"-",11)),
"prosinac" = H11$Godina[i] <- as.character(paste0(godina,"-",12))
)
}
#Micanje stupca Mjesec
H11 <- H11[-2]
colnames(H11)[1] <- "Datum"
|
4a052f740228a0e4240ac79a74012a362b740280
|
97e62c4e55fe669e545260689b3787ef22f81332
|
/estimate_risk_CVD.R
|
9abbdad54d18bd9de8bfd82d59f3c104fe3e1e0a
|
[] |
no_license
|
RSaikiRSaiki/CH_2021
|
7bafdad1e1a1dfe887d5cb656638aab5693fa249
|
10065b8d35c18b6a75e0db5d5062a3b75566f526
|
refs/heads/main
| 2023-07-01T15:06:20.290483
| 2021-08-04T15:11:25
| 2021-08-04T15:11:25
| 355,820,175
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,410
|
r
|
estimate_risk_CVD.R
|
## import genomic data ##
source("ImportGenomicData_CVD_OS.R")
cmp_data_org = cmp_data_org[,setdiff(colnames(cmp_data_org),c("all_time_to_traf","traf_fail"))]
cmp_data_org = na.omit(cmp_data_org)
id = rownames(cmp_data_org)
cmp_data_org = cbind(id,cmp_data_org)
## select disease ##
cmp_data_org$fail = cmp_data_org$status_CVD
## functions ##
source("show_di.R")
source("writeForest.R")
source("getCnaLabel.R")
## cohort study, fine-gray ##
source("prepareCaseCohortFineGray.R")
pdata = fg(cmp_data_org)
pdata = pdata[which(pdata$is_subcohort==1),]
## Cumulative mortality in no-CH cases ##
pdata_none = pdata[which(pdata$is_none==1),]
m_none = coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_none)
sf_none = survfit(m_none)
## Effect of SNV ##
## draw Fig.6a ##
pdata_mut_none = pdata[which(pdata$is_mut==0 & pdata$is_mut_large==0),]
sf_mut_none = survfit(coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_mut_none))
pdata_mut_small = pdata[which(pdata$is_mut==1 & pdata$is_mut_large==0),]
sf_mut_small = survfit(coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_mut_small))
pdata_mut_large = pdata[which(pdata$is_mut==1 & pdata$is_mut_large==1),]
sf_mut_large = survfit(coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_mut_large))
pdf("cum_inc_snv_small_large_fg.pdf",width=4,height=5)
par(mfrow=c(1,1),mai=rep(0.2,4))
show_di(list(sf_mut_large,sf_mut_small,sf_mut_none),"",c("red","pink","gray"))
dev.off()
## result of comparison ##
## SNV(small) vs no CH
## SNV(large) vs. no CH
pdata_mut_small = pdata[which(pdata$is_mut_large==0),]
m_mut_small = coxph(Surv(fgstart, fgstop, fgstatus) ~ is_mut+fac_age+is_male+is_OEE_1_0+is_OEE_1_2+all_BMI+all_HT+all_DM+all_HL+all_Smoke+all_Drink, weight=fgwt, data=pdata_mut_small)
summary(m_mut_small)
pdata_mut_large = pdata[which(pdata$is_mut_large==1 | pdata$is_mut==0),]
m_mut_large = coxph(Surv(fgstart, fgstop, fgstatus) ~ is_mut_large+fac_age+is_male+is_OEE_1_0+is_OEE_1_2+all_BMI+all_HT+all_DM+all_HL+all_Smoke+all_Drink, weight=fgwt, data=pdata_mut_large)
summary(m_mut_large)
m_res = rbind(
c(summary(m_mut_small)$coef["is_mut",],summary(m_mut_small)$conf.int["is_mut",]),
c(summary(m_mut_large)$coef["is_mut_large",],summary(m_mut_large)$conf.int["is_mut_large",]))
rownames(m_res) = c("VAF<5% vs no SNV","VAF>=5% vs no SNV")
m_res
## Effect of CNA ##
## draw Fig.6c ##
pdata_cna_none = pdata[which(pdata$is_cna==0 & pdata$is_cna_large==0),]
sf_cna_none = survfit(coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_cna_none))
pdata_cna_small = pdata[which(pdata$is_cna==1 & pdata$is_cna_large==0),]
sf_cna_small = survfit(coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_cna_small))
pdata_cna_large = pdata[which(pdata$is_cna==1 & pdata$is_cna_large==1),]
sf_cna_large = survfit(coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_cna_large))
pdf("cum_inc_cna_small_large_fg.pdf",width=4,height=5)
par(mfrow=c(1,1),mai=rep(0.2,4))
show_di(list(sf_cna_large,sf_cna_small,sf_cna_none),"",c("red","pink","gray"))
dev.off()
## result of comparison ##
## CNA(small) vs no CH
## CNA(large) vs. no CH
pdata_cna_small = pdata[which(pdata$is_cna_large==0),]
m_cna_small = coxph(Surv(fgstart, fgstop, fgstatus) ~ is_cna+fac_age+is_male+is_OEE_1_0+is_OEE_1_2+all_BMI+all_HT+all_DM+all_HL+all_Smoke+all_Drink, weight=fgwt, data=pdata_cna_small)
summary(m_cna_small)
pdata_cna_large = pdata[which(pdata$is_cna_large==1 | pdata$is_cna==0),]
m_cna_large = coxph(Surv(fgstart, fgstop, fgstatus) ~ is_cna_large+fac_age+is_male+is_OEE_1_0+is_OEE_1_2+all_BMI+all_HT+all_DM+all_HL+all_Smoke+all_Drink, weight=fgwt, data=pdata_cna_large)
summary(m_cna_large)
m_res = rbind(
c(summary(m_cna_small)$coef["is_cna",],summary(m_cna_small)$conf.int["is_cna",]),
c(summary(m_cna_large)$coef["is_cna_large",],summary(m_cna_large)$conf.int["is_cna_large",]))
rownames(m_res) = c("VAF<5% vs no SNV","VAF>=5% vs no SNV")
m_res
## Effect of combined SNV & CNA ##
## draw Fig.6c ##
pdata_both = pdata[which(pdata$is_mut_large==1 & pdata$is_cna==1),]
sf_both = survfit(coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_both))
pdata_mut_alone = pdata[which(pdata$is_mut_large==1 & pdata$is_cna==0),]
sf_mut_alone = survfit(coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_mut_alone))
pdata_cna_alone = pdata[which(pdata$is_mut_large==0 & pdata$is_cna==1),]
sf_cna_alone = survfit(coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_cna_alone))
pdata_none = pdata[which(pdata$is_none==1),]
sf_none = survfit(coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_none))
pdf("both_cum_inc_fg_vaf_large.pdf",width=4,height=5)
par(mfrow=c(1,1),mai =rep(0.2,4))
show_di(list(sf_both,sf_mut_alone,sf_cna_alone,sf_none),"Fine-Gray",c("purple","red","skyblue","gray"),ymax_df=0.40)
dev.off()
## result of comparison ##
## Both vs SNV(>5%) alone
## Both vs. CNA alone
## Both vs. Either
pdata_both = pdata[which(pdata$is_mut_large==1 & pdata$is_cna==1),]
pdata_mut_alone = pdata[which(pdata$is_mut_large==1 & pdata$is_cna==0),]
pdata_both_mut_alone = rbind(pdata_both,pdata_mut_alone)
m_both_mut_alone = coxph(Surv(fgstart, fgstop, fgstatus) ~ is_cna+fac_age+is_male+is_OEE_1_0+is_OEE_1_2+all_BMI+all_HT+all_DM+all_HL+all_Smoke+all_Drink+fac_vaf, weight=fgwt, data=pdata_both_mut_alone)
pdata_both = pdata[which(pdata$is_mut_large==1 & pdata$is_cna==1),]
pdata_cna_alone = pdata[which(pdata$is_mut_large==0 & pdata$is_cna==1),]
pdata_both_cna_alone = rbind(pdata_both,pdata_cna_alone)
m_both_cna_alone = coxph(Surv(fgstart, fgstop, fgstatus) ~ is_mut_large+fac_age+is_male+is_OEE_1_0+is_OEE_1_2+all_BMI+all_HT+all_DM+all_HL+all_Smoke+all_Drink, weight=fgwt, data=pdata_both_cna_alone)
pdata_both = pdata[which(pdata$is_mut_large==1 & pdata$is_cna==1),]
pdata_mut_alone = pdata[which(pdata$is_mut_large==1 & pdata$is_cna==0),]
pdata_cna_alone = pdata[which(pdata$is_mut_large==0 & pdata$is_cna==1),]
pdata_both_either = rbind(pdata_both,pdata_mut_alone,pdata_cna_alone)
pdata_both_either$is_both[which(pdata_both_either$is_mut_large!=1 | pdata_both_either$is_cna!=1)] = 0
pdata_both_either$is_both[which(pdata_both_either$is_mut_large==1 & pdata_both_either$is_cna==1)] = 1
m_both_either = coxph(Surv(fgstart, fgstop, fgstatus) ~ is_both+fac_age+is_male+is_OEE_1_0+is_OEE_1_2+all_BMI+all_HT+all_DM+all_HL+all_Smoke+all_Drink, weight=fgwt, data=pdata_both_either)
m_res = rbind(
c(summary(m_both_mut_alone)$coef["is_cna",],summary(m_both_mut_alone)$conf.int["is_cna",]),
c(summary(m_both_cna_alone)$coef["is_mut_large",],summary(m_both_cna_alone)$conf.int["is_mut_large",]),
c(summary(m_both_either)$coef["is_both",],summary(m_both_either)$conf.int["is_both",])
)
rownames(m_res) = c("both vs mut (VAF>5%)","both vs cna","both vs either")
m_res
### prob. bi-allele vs prob. mono-allele ###
## define bi-allelic cases ##
pdata_2 = pdata
pdata_2$is_biallelic = as.numeric(
(pdata_2$all_cis_jak2==1) |
(pdata_2$all_cis_tp53==1) |
(pdata_2$all_cis_dnmt3a==1) |
(pdata_2$all_cis_tet2 == 1) |
(pdata_2$all_cis_ezh2==1) |
(pdata_2$all_cis_cbl==1) |
(pdata_2$all_cis_runx1==1))
## divide into categories ##
pdata_bi_all = pdata_2[which(pdata_2$is_biallelic==1),]
pdata_mono = pdata_2[which(pdata_2$is_biallelic==0 & pdata_2$is_both==1 & pdata_2$is_mut_large==1),]
pdata_snv = pdata[which(pdata$is_mut_large==1 & pdata$is_cna==0),]
pdata_snv$is_biallelic = 0
## Draw ED Fig.6d ##
pdata_none = pdata[which(pdata$is_none==1),]
m_none = coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_none)
sf_none = survfit(m_none)
m_mono <- coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_mono)
sf_mono = survfit(m_mono)
m_bi_all <- coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_bi_all)
sf_bi_all = survfit(m_bi_all)
m_snv <- coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_snv)
sf_snv = survfit(m_snv)
pdf("bi_mono_fg.pdf",width=7,height=10)
show_di(list(sf_bi_all,sf_mono,sf_snv,sf_none),">=2 alterations",c("purple","violet","red","gray"),ymax_df=0.5)
dev.off()
## Comparison among categories ##
pdata_bi_mono = rbind(pdata_bi,pdata_mono)
m_bi_mono <- coxph(Surv(fgstart, fgstop, fgstatus) ~ is_biallelic + fac_age + is_male + is_OEE_1_0 + is_OEE_1_2, weight=fgwt, data=pdata_bi_mono)
summary(m_bi_mono)
pdata_mono_snv = rbind(pdata_mono,pdata_snv)
m_both_mono_snv <- coxph(Surv(fgstart, fgstop, fgstatus) ~ is_both + fac_age + is_male + is_OEE_1_0 + is_OEE_1_2, weight=fgwt, data=pdata_mono_snv)
summary(m_both_mono_snv)
pdata_bi_snv = rbind(pdata_bi,pdata_snv)
m_both_bi_snv <- coxph(Surv(fgstart, fgstop, fgstatus) ~ is_biallelic + fac_age + is_male + is_OEE_1_0 + is_OEE_1_2, weight=fgwt, data=pdata_bi_snv)
summary(m_both_bi_snv)
## both vs either, stratified by n alt ##
## Draw ED Fig.10d-f ##
pdf("both_multi_inc_mut_large.pdf",width=13,height=5)
par(mfrow=c(1,3),mai=rep(0.2,4))
pdata_mut = pdata[which(pdata$all_n_alt==1 & pdata$is_both==0 & pdata$is_mut_large==1),]
sf_mut = survfit( coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_mut))
pdata_both = pdata[which(pdata$all_n_alt>=2 & pdata$is_both==1 & pdata$is_mut_large==1),]
sf_both = survfit( coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_both))
pdata_mut_alone_multi = pdata[which(pdata$all_n_alt>=2 & pdata$all_n_cna==0 & pdata$is_mut_large==1),]
sf_mut_alone_multi = survfit(coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_mut_alone_multi))
pdata_none = pdata[which(pdata$is_none==1),]
sf_none = survfit( coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_none))
show_di(list(sf_both,sf_mut_alone_multi,sf_mut,sf_none),
paste0(">=2 alterations"),c("purple","red","orange","gray"),ymax_df=0.35)
pdata_both_mut_multi = rbind(pdata_both,pdata_mut_alone_multi)
m_both_mut_multi <- coxph(Surv(fgstart, fgstop, fgstatus) ~ is_both+fac_age+is_male+is_OEE_1_0+is_OEE_1_2+all_BMI+all_HT+all_DM+all_HL+all_Smoke+all_Drink,weight=fgwt,data=pdata_both_mut_multi)
pdata_both_mut = rbind(pdata_both,pdata_mut)
m_both_mut <- coxph(Surv(fgstart, fgstop, fgstatus) ~ is_both+fac_age+is_male+is_OEE_1_0+is_OEE_1_2+all_BMI+all_HT+all_DM+all_HL+all_Smoke+all_Drink,weight=fgwt,data=pdata_both_mut)
comb_all = rbind(
c(summary(m_both_mut_multi)$coef[1,],summary(m_both_mut_multi)$conf.int[1,]),
c(summary(m_both_mut)$coef[1,],summary(m_both_mut)$conf.int[1,])
)
for(i in 2:4){
pdata_mut = pdata[which(pdata$all_n_alt==1 & pdata$is_both==0 & pdata$is_mut_large==1),]
sf_mut = survfit( coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_mut))
pdata_both = pdata[which(pdata$all_n_alt==i & pdata$is_both==1 & pdata$is_mut_large==1),]
sf_both = survfit( coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_both))
pdata_mut_alone_multi = pdata[which(pdata$all_n_alt>=i & pdata$all_n_cna==0 & pdata$is_mut_large==1),]
sf_mut_alone_multi = survfit(coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_mut_alone_multi))
pdata_none = pdata[which(pdata$is_none==1),]
sf_none = survfit( coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_none))
show_di(list(sf_both,sf_mut_alone_multi,sf_mut,sf_none),
paste0(i," alterations"),c("purple","red","orange","gray"),ymax_df=0.35)
pdata_both_mut_multi = rbind(pdata_both,pdata_mut_alone_multi)
m_both_mut_multi <- coxph(Surv(fgstart, fgstop, fgstatus) ~ is_both+fac_age+is_male+is_OEE_1_0+is_OEE_1_2+all_BMI+all_HT+all_DM+all_HL+all_Smoke+all_Drink,weight=fgwt,data=pdata_both_mut_multi)
pdata_both_mut = rbind(pdata_both,pdata_mut)
m_both_mut <- coxph(Surv(fgstart, fgstop, fgstatus) ~ is_both+fac_age+is_male+is_OEE_1_0+is_OEE_1_2+all_BMI+all_HT+all_DM+all_HL+all_Smoke+all_Drink,weight=fgwt,data=pdata_both_mut)
comb_all = rbind(comb_all,
c(summary(m_both_mut_multi)$coef[1,],summary(m_both_mut_multi)$conf.int[1,]),
c(summary(m_both_mut)$coef[1,],summary(m_both_mut)$conf.int[1,])
)
}
dev.off()
## result of comparison ##
## Both vs. SNV(VAF>5%) alone
comb_all
## number of alterations ##
## draw ED Fig.10g ##
sf_list = list()
for(i in 1:3){
if(i<3){
pdata_i = pdata[which(pdata$all_n_alt==i),]
}else if(i==3){
pdata_i = pdata[which(pdata$all_n_alt>=i),]
}
m_i <- coxph(Surv(fgstart, fgstop, fgstatus) ~ 1, weight=fgwt, data=pdata_i)
sf_list[[i]] = survfit(m_i)
}
sf_list[[4]] = sf_none
pdf("cum_inc_by_n_alt.pdf",width=5,height=6.5)
show_di(sf_list,"",c("firebrick1","firebrick3","firebrick4","gray"),ymax_df=0.30)
dev.off()
## Cmparison among subjects with different numbers of alterations ##
res_n_alt = matrix(nrow=0,ncol=9)
for(i in 1:3){
if(i<3){
pdata_i = pdata[which(pdata$all_n_alt==0 | pdata$all_n_alt==i),]
}else if(i==3){
pdata_i = pdata[which(pdata$all_n_alt==0 | pdata$all_n_alt>=i),]
}
if(!cvd) m_i <- coxph(Surv(fgstart, fgstop, fgstatus) ~ is_any+fac_age+is_male+is_OEE_1_0+is_OEE_1_2,weight=fgwt,data=pdata_i)
if(cvd) m_i <- coxph(Surv(fgstart, fgstop, fgstatus) ~ is_any+fac_age+is_male+is_OEE_1_0+is_OEE_1_2+all_BMI+all_HT+all_DM+all_HL+all_Smoke+all_Drink,weight=fgwt,data=pdata_i)
print(summary(m_i))
res_n_alt = rbind(c(summary(m_i)$coef[1,],summary(m_i)$conf.int[1,]),res_n_alt)
}
for(i in 0:2){
if(i<2){
pdata_i = pdata[which(pdata$all_n_alt==i | pdata$all_n_alt==i+1),]
}else if(i==2){
pdata_i = pdata[which(pdata$all_n_alt==i | pdata$all_n_alt>=i+1),]
}
pdata_i$all_n_alt[which(pdata_i$all_n_alt==i)] = 0
pdata_i$all_n_alt[which(pdata_i$all_n_alt>=i+1)] = 1
if(!cvd) m_i <- coxph(Surv(fgstart, fgstop, fgstatus) ~ all_n_alt+fac_age+is_male+is_OEE_1_0+is_OEE_1_2,weight=fgwt,data=pdata_i)
if(cvd) m_i <- coxph(Surv(fgstart, fgstop, fgstatus) ~ all_n_alt+fac_age+is_male+is_OEE_1_0+is_OEE_1_2+all_BMI+all_HT+all_DM+all_HL+all_Smoke+all_Drink,weight=fgwt,data=pdata_i)
print(summary(m_i))
res_n_alt = rbind(c(summary(m_i)$coef[1,],summary(m_i)$conf.int[1,]),res_n_alt)
}
for(i in 0:1){
if(i<1){
pdata_i = pdata[which(pdata$all_n_alt==i | pdata$all_n_alt==i+2),]
}else if(i==1){
pdata_i = pdata[which(pdata$all_n_alt==i | pdata$all_n_alt>=i+2),]
}
pdata_i$all_n_alt[which(pdata_i$all_n_alt==i)] = 0
pdata_i$all_n_alt[which(pdata_i$all_n_alt>=i+2)] = 1
if(!cvd) m_i <- coxph(Surv(fgstart, fgstop, fgstatus) ~ all_n_alt+fac_age+is_male+is_OEE_1_0+is_OEE_1_2,weight=fgwt,data=pdata_i)
if(cvd) m_i <- coxph(Surv(fgstart, fgstop, fgstatus) ~ all_n_alt+fac_age+is_male+is_OEE_1_0+is_OEE_1_2+all_BMI+all_HT+all_DM+all_HL+all_Smoke+all_Drink,weight=fgwt,data=pdata_i)
print(summary(m_i))
res_n_alt = rbind(c(summary(m_i)$coef[1,],summary(m_i)$conf.int[1,]),res_n_alt)
}
for(i in 0:0){
if(i<0){
pdata_i = pdata[which(pdata$all_n_alt==i | pdata$all_n_alt==i+3),]
}else if(i==0){
pdata_i = pdata[which(pdata$all_n_alt==i | pdata$all_n_alt>=i+3),]
}
pdata_i$all_n_alt[which(pdata_i$all_n_alt==i)] = 0
pdata_i$all_n_alt[which(pdata_i$all_n_alt>=i+3)] = 1
if(!cvd) m_i <- coxph(Surv(fgstart, fgstop, fgstatus) ~ all_n_alt+fac_age+is_male+is_OEE_1_0+is_OEE_1_2,weight=fgwt,data=pdata_i)
if(cvd) m_i <- coxph(Surv(fgstart, fgstop, fgstatus) ~ all_n_alt+fac_age+is_male+is_OEE_1_0+is_OEE_1_2+all_BMI+all_HT+all_DM+all_HL+all_Smoke+all_Drink,weight=fgwt,data=pdata_i)
print(summary(m_i))
res_n_alt = rbind(c(summary(m_i)$coef[1,],summary(m_i)$conf.int[1,]),res_n_alt)
}
## result of comparison ##
res_n_alt
## analysis end ##
q()
|
d8a131075b4d188400eeabf2539862cddb8dbb23
|
c9d0e8b9fe980c243f220e86322568bae2ee5650
|
/man/gs_summary_overview_pair.Rd
|
0671a42bf0e712024d9634bfc298ab36fe643288
|
[
"MIT"
] |
permissive
|
federicomarini/GeneTonic
|
20c0043dcf63a1d4aadc9b0718a263d20fd1dd42
|
a874402b40df9e276f6c5637675ea35472448c02
|
refs/heads/devel
| 2023-08-17T05:52:09.962420
| 2023-08-10T10:49:08
| 2023-08-10T10:49:08
| 193,464,295
| 73
| 9
|
NOASSERTION
| 2023-03-14T14:35:01
| 2019-06-24T08:21:40
|
R
|
UTF-8
|
R
| false
| true
| 3,020
|
rd
|
gs_summary_overview_pair.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gs_summaries.R
\name{gs_summary_overview_pair}
\alias{gs_summary_overview_pair}
\title{Plots a summary of enrichment results}
\usage{
gs_summary_overview_pair(
res_enrich,
res_enrich2,
n_gs = 20,
p_value_column = "gs_pvalue",
color_by = "z_score",
alpha_set2 = 1
)
}
\arguments{
\item{res_enrich}{A \code{data.frame} object, storing the result of the functional
enrichment analysis. See more in the main function, \code{\link[=GeneTonic]{GeneTonic()}}, to check the
formatting requirements (a minimal set of columns should be present).}
\item{res_enrich2}{As \code{res_enrich}, the result of functional enrichment analysis,
in a scenario/contrast different than the first set.}
\item{n_gs}{Integer value, corresponding to the maximal number of gene sets to
be displayed}
\item{p_value_column}{Character string, specifying the column of \code{res_enrich}
where the p-value to be represented is specified. Defaults to \code{gs_pvalue}
(it could have other values, in case more than one p-value - or an adjusted
p-value - have been specified).}
\item{color_by}{Character, specifying the column of \code{res_enrich} to be used
for coloring the plotted gene sets. Defaults sensibly to \code{z_score}.}
\item{alpha_set2}{Numeric value, between 0 and 1, which specified the alpha
transparency used for plotting the points for gene set 2.}
}
\value{
A \code{ggplot} object
}
\description{
Plots a summary of enrichment results - for two sets of results
}
\examples{
library("macrophage")
library("DESeq2")
library("org.Hs.eg.db")
library("AnnotationDbi")
# dds object
data("gse", package = "macrophage")
dds_macrophage <- DESeqDataSet(gse, design = ~ line + condition)
rownames(dds_macrophage) <- substr(rownames(dds_macrophage), 1, 15)
dds_macrophage <- estimateSizeFactors(dds_macrophage)
# annotation object
anno_df <- data.frame(
gene_id = rownames(dds_macrophage),
gene_name = mapIds(org.Hs.eg.db,
keys = rownames(dds_macrophage),
column = "SYMBOL",
keytype = "ENSEMBL"
),
stringsAsFactors = FALSE,
row.names = rownames(dds_macrophage)
)
# res object
data(res_de_macrophage, package = "GeneTonic")
res_de <- res_macrophage_IFNg_vs_naive
# res_enrich object
data(res_enrich_macrophage, package = "GeneTonic")
res_enrich <- shake_topGOtableResult(topgoDE_macrophage_IFNg_vs_naive)
res_enrich <- get_aggrscores(res_enrich, res_de, anno_df)
res_enrich2 <- res_enrich[1:42, ]
set.seed(42)
shuffled_ones <- sample(seq_len(42)) # to generate permuted p-values
res_enrich2$gs_pvalue <- res_enrich2$gs_pvalue[shuffled_ones]
res_enrich2$z_score <- res_enrich2$z_score[shuffled_ones]
res_enrich2$aggr_score <- res_enrich2$aggr_score[shuffled_ones]
# ideally, I would also permute the z scores and aggregated scores
gs_summary_overview_pair(
res_enrich = res_enrich,
res_enrich2 = res_enrich2
)
}
\seealso{
\code{\link[=gs_summary_overview]{gs_summary_overview()}}, \code{\link[=gs_horizon]{gs_horizon()}}
}
|
aa1f63080aebbf0ec09a55f1ebd2d143c78831dd
|
91b964ca761e887da5283f2afbf3179f24c538dd
|
/sources/sentiws/create-data_dictionary_sentiws.R
|
697b15b27ce80b421e38632b122ae2535beabe57
|
[] |
no_license
|
olgasparyan/quanteda.sentiment
|
5565b5b8cf4f904b550fad01b8f45ecb770ce357
|
9808d0c04dbff3ea6b3336e3084db7ddc529fcab
|
refs/heads/master
| 2023-03-22T13:42:59.419608
| 2021-03-08T16:13:58
| 2021-03-08T16:13:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,515
|
r
|
create-data_dictionary_sentiws.R
|
# SentiWS Dictionary
library("quanteda")
library("dplyr")
library("tidyr")
library("stringr")
read_senti_scores <- function(filename) {
results <- read.delim(filename, header = FALSE, encoding="UTF-8") %>%
cbind(str_split_fixed(.$V3, "[,-]",50),stringsAsFactors = FALSE) %>%
mutate(
V1 = str_sub(str_match(V1,".*\\|"),1,-2),
nr = row_number()
) %>%
select(-V3) %>%
mutate(nr = as.character(nr)) %>%
gather(wordstem,word,V1,1:48, -nr,-V2) %>%
select(word,V2) %>% rename(score=V2) %>%
filter(word != "") %>%
arrange(word)
}
positive <- read_senti_scores("sources/sentiws/SentiWS_v1.8c_Positive.txt") %>%
mutate(sentiment = "positive") %>%
unique()
negative <- read_senti_scores("sources/sentiws/SentiWS_v1.8c_Negative.txt") %>%
mutate(sentiment = "negative") %>%
unique()
sentis <- bind_rows(positive, negative)
data_dictionary_sentiws <- as.dictionary(sentis)
polarity(data_dictionary_sentiws) <-
list(pos = c("positive"), neg = c("negative"))
valence(data_dictionary_sentiws) <-
list(positive = positive[!duplicated(positive$word), "score"],
negative = negative[!duplicated(negative$word), "score"])
meta(data_dictionary_sentiws) <-
list(
title = "SentimentWortschatz (SentiWS)",
description = "A quanteda dictionary object containing SentimentWortschatz (SentiWS), a publicly available German-language resource for sentiment analysis. The current version of SentiWS contains 1,650 positive and 1,818 negative words, which sum up to 15,649 positive and 15,632 negative word forms including their inflections. It not only contains adjectives and adverbs explicitly expressing a sentiment, but also nouns and verbs implicitly containing one. The original dictionary weights within the interval of -1 to 1. Note that the version implemented in quanteda.dictionaries uses a binary classification into positive (weight > 0) and negative (weight < 0) features.",
url = "http://wortschatz.uni-leipzig.de/en/download/",
reference = "Remus, R., Quasthoff U., and Heyer, G. (2010). [SentiWS: a Publicly Available German-language Resource for Sentiment Analysis](http://www.lrec-conf.org/proceedings/lrec2010/pdf/490_Paper.pdf). In _Proceedings of the 7th International Language Ressources and Evaluation (LREC'10)_, 1168--1171.",
license = "CC-BY-NC-SA 3.0"
)
usethis::use_data(data_dictionary_sentiws, overwrite = TRUE)
|
7d1babae1b7eb030708fd125ae74c3b7003ef6fa
|
51397736808dbafc8b19c2cb8be7fc2641e6c1b2
|
/R/theme_hodgeslab_basic.R
|
bce5e91512007a9b039f67cb50774a99c4be337e
|
[
"MIT"
] |
permissive
|
hodgeslab/hodgeslabR
|
77d70e3376b1ca25ff2f739e75e6b62dca933410
|
05ae272ddd19b74f7508be3a08ba66ef93445499
|
refs/heads/master
| 2021-08-04T12:26:34.961501
| 2021-07-28T21:14:22
| 2021-07-28T21:14:22
| 66,617,723
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,707
|
r
|
theme_hodgeslab_basic.R
|
#' Basic ggplot2 theme for the Hodges lab
#'
#' This function defines the basic theme for plots coming from the Hodges lab.
#'
#' @param base_size Sets the Postscript font size for all labels, in units of pt. Defaults to 7.
#' @param base_family Sets the Postscript font family for all labels. Defaults to empty "".
#' @param line_size Sets the line width for lines used in plots, in units of pt. Defaults to 0.5.
#' @keywords hodgeslab theme
#' @export
#' @examples
#' ggplot(df,aes(x=x,y=y)) + geom_point() + theme_hodgeslab_basic()
# define hodges themes
theme_hodgeslab_basic <- function(base_size = 7, base_family = "", line_size = 0.25, grid = F, rotx = 0, box = F) {
p <- theme_classic(base_size = base_size, base_family = base_family) %+replace%
theme(
axis.text.x = element_text(size = base_size * 1, lineheight = 1, vjust = 1, colour="black", margin=margin(1,0,0,0)),
axis.text.y = element_text(size = base_size * 1, lineheight = 1, hjust = 1, colour="black", margin=margin(0,1,0,0)),
axis.title.x = element_text(size = base_size, vjust = 1, colour="black", margin=margin(base_size/2,0,0,0)),
axis.title.y = element_text(size = base_size, angle = 90, vjust = 0, colour="black", margin=margin(0,base_size/2,0,0)),
axis.line.x = element_line(size = line_size*linescale, linetype="solid", colour="black"),
axis.line.y = element_line(size = line_size*linescale, linetype="solid", colour="black"),
axis.ticks = element_line(size = line_size*linescale, colour = "black"),
legend.text = element_text(size = base_size * 1),
legend.key.size = unit(0.8, "line"),
strip.background = element_blank()
)
if(grid == T) {
p <- p %+replace% theme(
panel.grid.major.x = element_line(colour = "grey80", size = 0.5*line_size*linescale),
panel.grid.major.y = element_line(colour = "grey80", size = 0.5*line_size*linescale),
panel.grid.minor.x = element_line(colour = "grey95", size = 0.5*line_size*linescale),
panel.grid.minor.y = element_line(colour = "grey95", size = 0.5*line_size*linescale)
)
}
if(rotx != 0) {
p <- p %+replace% theme(
axis.text.x = element_text(angle=rotx, size = base_size * 1, lineheight = 1, colour="black", hjust = 1, vjust = 1, margin=margin(1,0,0,0))
)
}
if(box == T) {
p <- p %+replace% theme(
axis.line.x = element_line(size = line_size*linescale, linetype="solid", colour=NA),
axis.line.y = element_line(size = line_size*linescale, linetype="solid", colour=NA),
panel.border = element_rect(size = 2*line_size*linescale, colour = "black", fill = NA)
)
}
p
}
|
8a2022ade449b7ee9d0ef6491da78e916a074278
|
8da2e4942212add30e18773d3491ac446561cc65
|
/apps/simple/server.R
|
aa54d3785df795ca45ed55cbe36ad7de18dae0ac
|
[] |
no_license
|
janfait/shiny-training
|
97663e74a59e99484d334679d802f2679c13098a
|
15adcec5bc954425c61bd9dd5f09cc6b1153af36
|
refs/heads/master
| 2021-08-29T23:23:42.289204
| 2017-12-15T08:24:35
| 2017-12-15T08:24:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 440
|
r
|
server.R
|
######################################################################################################
# BEGIN SERVER
######################################################################################################
function(session, input, output) {
output$chart1 <- renderPlot({
plot(data[,input$select1],data[,"a"],xlab="Index",ylab="Variable")
})
output$table1 <- renderTable({
head(data,input$slider1)
})
}
|
a6e93d1af7e3cef6bb7132e60cbd95a4f321a570
|
90811ffdd0cbfb9c33511b768c2415a55fd7ff5c
|
/Week2_Assignment/Week2_Assignment.R
|
dd3d1b81c0b954d85348e72644186747b40bfa06
|
[] |
no_license
|
jey1987/DATA607
|
a2b21ea35d7be1a58a61e8ef6493356c871058bc
|
f0cda5f7edd97fb29a750bdd4f705d16cd3e6d80
|
refs/heads/master
| 2020-07-14T09:16:02.776522
| 2020-03-02T03:21:23
| 2020-03-02T03:21:23
| 205,290,782
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 213
|
r
|
Week2_Assignment.R
|
install.packages("RMySQL")
library(RMySQL)
mydb = dbConnect(MySQL(), user='root', password='root', dbname='sys', host='localhost')
rs = dbSendQuery(mydb, "select * from review_movies")
data = fetch(rs,n=-1)
data
|
d7ee36675265e76ed0ba9c04bec7f9278fe8d39a
|
4216b8ed7df260ac573f0ce7b8ad6675afbd628c
|
/bin/sanitise.R
|
5547cf212061dfeaebf615d6ea60403f79c10424
|
[] |
no_license
|
cgpu/genomechronicler-nf
|
86780e686de7b710bdec84e3ed551908818ed6f5
|
e32d8e9cf51b142100e699576ce2dbab231d6f66
|
refs/heads/master
| 2020-08-15T11:44:59.379367
| 2020-04-16T16:59:28
| 2020-04-16T16:59:28
| 215,335,983
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,181
|
r
|
sanitise.R
|
#' Title Sanitise LaTeX formatted url table entries
#'
#' @param table
#' @param col_names
#' @param pattern
#'
#' @return
#' @export
#'
#' @examples
sanitise <- function(table = NULL,
col_names = FALSE,
pattern = "href") {
if (col_names == FALSE) {
col_names <- colnames(table)
}
for (col in col_names) {
col_as_string <- paste(table[[col]], collapse = "")
if (grepl(pattern, col_as_string, fixed = TRUE)) {
url <- paste0(col, "_url")
table[[url]] <- gsub("\\href\\{", "", table[[col]])
table[[col]] <- sub(".*\\{(.*)\\}.*", "\\1", table[[col]])
table[[url]] <- gsub("\\\\", "", table[[url]])
table[[url]] <- gsub("\\}.*", "", table[[url]])
table[[col]] <- gsub("\\href\\{", "", table[[col]])
# prepare url for DT::datatable
table[[col]] <- paste0("<a href='",
table[[url]],
"'",
"target='blank",
"'>",
table[[col]],
"</a>")
as.data.frame(table) %>%
dplyr::mutate_all(stringr::str_replace_all,"<a href=''target='blank'></a>", "") -> table
}
}
return(table)
}
|
5ad94f690c0d09a3fad2188624adacdb91e58b34
|
21458fe5033fc2f62ffaa0b8bfb04b7a1ad1bf86
|
/man/get_sam.Rd
|
6e702dd33c6871ee282e9cbf108d98169d85b83a
|
[] |
no_license
|
einarhjorleifsson/fishvise
|
c1c3c7fd6d38765ef879e676627acf62a4d6ea4d
|
b1e56db880c037cf9dd75adea47f2d811d5b85bb
|
refs/heads/master
| 2021-01-10T19:21:22.523937
| 2015-01-08T09:38:42
| 2015-01-08T09:38:42
| 14,083,740
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 426
|
rd
|
get_sam.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{get_sam}
\alias{get_sam}
\title{get_sam}
\usage{
get_sam(assessment = "wbcod_2014", user = "user3")
}
\arguments{
\item{assessment}{The directory name of the assessment as specified on
www.stockassessment.org}
\item{user}{User name, guest users can use "user3" (default)}
}
\description{
Gets the input files and sam output files from the www.stockassessment.org
}
|
9e6a153edd80635e6b0da9f2022958cd2c4a1f23
|
7a06fa71e5a4dbf05142a6aad06a8abe09254359
|
/man/interpolate_array.Rd
|
a3af88d51db6ea442dcb1cfd5aae920b3bf5101a
|
[] |
no_license
|
cran/rayimage
|
cd49f499ebe3327ddf1cb20cdd42f6e249c03fe9
|
ea6514f4b31d96348384746ee3c857912da9688f
|
refs/heads/master
| 2023-01-22T21:29:45.404478
| 2023-01-17T03:40:02
| 2023-01-17T03:40:02
| 246,452,527
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 896
|
rd
|
interpolate_array.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/interpolate_array.R
\name{interpolate_array}
\alias{interpolate_array}
\title{Matrix/Array Interpolation}
\usage{
interpolate_array(image, x, y)
}
\arguments{
\item{image}{Image filename, a matrix, or a 3-layer RGB array.}
\item{x}{X indices (or fractional index) to interpolate.}
\item{y}{Y indices (or fractional index) to interpolate.}
}
\value{
Either a vector of values (if image is a matrix) or a list of interpolated values
from each layer.
}
\description{
Given a series of X and Y coordinates and an array/matrix, interpolates the Z coordinate
using bilinear interpolation.
}
\examples{
#if(interactive()){
#Interpolate a matrix
interpolate_array(volcano,c(10,10.1,11),c(30,30.5,33))
#Interpolate a 3-layer array (returns list for each channel)
interpolate_array(dragon,c(10,10.1,11),c(30,30.5,33))
#end}
}
|
a419dbbd5e19de1c47f508aa35e71782cac90e28
|
3967755668dda9b6d0cf493ec4a9f70cb5d08e93
|
/scripts/milista.r
|
337f7f186a413a8bd307b25170ad507bdebb091a
|
[] |
no_license
|
rpizarrog/diplomado-cd-iot-2021
|
141867d23f27682f1f771c88c81d681f58669e3a
|
3f51c29ff5738d2bf7c1469bdd8c79f18192c620
|
refs/heads/main
| 2023-05-06T15:08:40.352958
| 2021-05-29T19:52:25
| 2021-05-29T19:52:25
| 349,596,571
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 512
|
r
|
milista.r
|
# Manejo de listas
numeros <- c(1,4,5,6)
numeros
nombres <- c("Ruben", "Javer", "Jose")
nombres
milista <- c(list(numeros), list(nombres))
milista
# Convertir listas vectores
losnumeros <- unlist(milista[[1]])
losnumeros
class(milista)
# De ejemplo de WEB
my_list <- list(l1 = c(1, 3, 5, 7),
l2 = c(1, 2, 3),
l3 = c(1, 1, 10, 5, 8, 65, 90))
my_list
# Apply unlist R function
print(unlist(my_list))
|
23bf54934b557236f452b22430aa220d610a56db
|
d1a4f6abfc847fe415cebe62073d5c1a41ba0008
|
/server.R
|
e467d24201158ba7c22a3be567289f65947cdbd3
|
[] |
no_license
|
lquaglio/vessel-dashboard
|
647629744fc1c1bcf3963fb68427722e1407631d
|
6d1c50c91837d0c510af008dec3be6aaf69f309c
|
refs/heads/main
| 2023-08-20T14:29:15.973357
| 2021-10-16T18:24:31
| 2021-10-16T18:24:31
| 417,888,077
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,233
|
r
|
server.R
|
function(input, output, session) {
selectFilterServer("filter", df)
selectedData <- datasetServer("filter")
# Card info
output$info <- renderUI({
# div(class = "ui card five wide column", style="margin-right:10px; margin-left:15px;height:100%;background-color:#f0f0f0",
# div(class = "content",
# div(class = "header", icon("info", style="height:30px;width:30px;"), "Informations"),
# div(class = "content",
div(class = "description",
br(),
strong(selectedData()$SHIPNAME),
br(),
br(),
strong("Type: "), selectedData()$ship_type,
br(),
br(),
br(),
strong("Destination: "), ifelse(is.na(selectedData()$DESTINATION), "Not Available", selectedData()$DESTINATION),
br(),
br(),
br(),
strong("First AIS signal at: "), br(),
selectedData()$min_datetime %>%
str_replace_all(c("T" = " ", "Z" = "")),
br(),
strong("Last AIS signal at: "), br(),
selectedData()$max_datetime %>%
str_replace_all(c("T" = " ", "Z" = "")),
br(),
br(),
br(),
strong("Total Traveled Distance: "), selectedData()$total_distance, "m",
br(),
br(),
br(),
br()
)
# )
# )
# )
})
# Text output
output$distance <- renderText({
glue::glue("{vessel_name} sailed {distance} meters, from {start_dttm} to {end_dttm}",
vessel_name = selectedData()$SHIPNAME,
distance = round(selectedData()$distance,0),
start_dttm = selectedData()$prevDATETIME %>%
str_replace_all(c("T" = " at ", "Z" = "")),
end_dttm = selectedData()$DATETIME %>%
str_replace_all(c("T" = " at ", "Z" = ""))
)
})
# Map
output$map <- renderLeaflet({
leaflet(data = selectedData()) %>%
addTiles() %>%
addMarkers(~prevLON, ~prevLAT,
popup = ~paste0("Start", "<br>",
"Latitude: ", prevLAT, "<br>",
"Longitude: ", prevLON, "<br>"),
label = "Start", icon = shipIcon[1]) %>%
addMarkers(~LON, ~LAT,
popup = ~paste0("End", "<br>",
"Latitude: ", LAT, "<br>",
"Longitude: ", LON, "<br>"),
label = "End") %>%
addPolylines(lat = selectedData()[, c("prevLAT", "LAT")] %>% as.numeric(),
lng = selectedData()[, c("prevLON", "LON")] %>% as.numeric(),
color = "white", dashArray = "3", weight = 2)
})
}
|
570d6ab05de389313e90147f1552476c31411cbc
|
0661f6241c38793a78abaadefb8cf3e20cb86eab
|
/Data/scripts/data_plots.R
|
2bf5f91671441c997ed140942a28cd528e8fc42d
|
[
"CC0-1.0"
] |
permissive
|
mjgcos/dissertation
|
2f900f56d3705f17ac734d98905812fba1d41f13
|
586537a2a8ac8a0e9444ea1fe6aa17959e555f6a
|
refs/heads/master
| 2020-05-15T14:58:51.510749
| 2015-09-18T14:09:26
| 2015-09-18T14:09:26
| 37,481,708
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 887
|
r
|
data_plots.R
|
## Plots for Data Section
library(ggplot2)
require(scales)
setwd("~/Academic/SGPE/Dissertation/Data/csv")
data <- read.csv("final.csv")
data$date <- as.Date(data$date)
attach(data)
countries <- as.character(levels(country))
country.colours <- as.character(c("orange", "red", "blue", "green",
"darkgreen", "orange", "purple"))
a <- ggplot(data, aes(date, bsp/100, colour = country)) +
geom_line() +
theme_bw() +
scale_size_manual(values=c(rep.int(1, 19))) +
scale_y_continuous(labels = percent_format()) +
# scale_color_manual(values = country.colours) +
scale_color_manual(values = rep("black", 7)) +
theme(legend.position = "none") +
coord_cartesian(ylim = c(-0.025,0.025)) +
ylab("Differenced Bond Spreads")
d_bsp_graph <- a + facet_wrap( ~ country , ncol =2)
pdf(file = "../graphics/d_bsp_graph.pdf")
d_bsp_graph
dev.off()
|
59bf465401d70fe390f3906de2e86c5231e4cf7e
|
3b62ffa02efef29b8bbaa9041d74a1ee72b4807a
|
/R/data-sets.R
|
c7147e7d9f972d699203a889e88c7095829be138
|
[] |
no_license
|
jmsigner/rhr
|
52bdb94af6a02c7b10408a1dce549aff4d100709
|
7b8d1b2dbf984082aa543fe54b1fef31a7853995
|
refs/heads/master
| 2021-01-17T09:42:32.243262
| 2020-06-22T14:24:00
| 2020-06-22T14:24:00
| 24,332,931
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 821
|
r
|
data-sets.R
|
##' @name datSH
##' @title Tracking data of one red deer in Germany
##' @description data set from one red deer from northern Germany
##' @docType data
##' @usage datSH
##' @format 1 location per row
##' @source Verein fuer Wildtierforschung Goettingen and Dresden
NULL
##' @name trackS
##' @title Animal track without time.
##' @description Simulated animal track.
##' @docType data
##' @usage trackS
##' @format Object of \code{RhrTrackS}
NULL
##' @name trackST
##' @title Animal track with time.
##' @description Simulated animal track.
##' @docType data
##' @usage trackST
##' @format Object of \code{RhrTrackST}
NULL
##' @name trackSTR
##' @title Animal track with regularly spaced time.
##' @description Simulated animal track.
##' @docType data
##' @usage trackSTR
##' @format Object of \code{RhrTrackSTR}
NULL
|
76533d5b039e2766b1c50ea308070ac848219bbc
|
234ac26bcc3db8046f6e3ef35bb03e1c435c38f2
|
/R/pmtree.R
|
93fd767ab8c3cd2cb9f774eb986e3160a0e1f193
|
[] |
no_license
|
cran/model4you
|
36c35b406c8c75c3460f3ee17b4bb5365d026a01
|
9e401b89624d8dde1e535c71f2b45bac33b51d85
|
refs/heads/master
| 2021-05-11T14:26:05.547580
| 2021-01-20T15:10:02
| 2021-01-20T15:10:02
| 117,701,502
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,823
|
r
|
pmtree.R
|
#' Compute model-based tree from model.
#'
#' Input a parametric model and get a model-based tree.
#'
#' @param model a model object. The model can be a parametric model with a binary covariate.
#' @param data data. If NULL (default) the data from the model object are used.
#' @param zformula formula describing which variable should be used for partitioning.
#' Default is to use all variables in data that are not in the model (i.e. \code{~ .}).
#' @param control control parameters, see \code{\link[partykit]{ctree_control}}.
#' @param coeffun function that takes the model object and returns the coefficients.
#' Useful when \code{coef()} does not return all coefficients (e.g. \code{survreg}).
#' @param ... additional parameters passed on to model fit such as weights.
#'
#' @details Sometimes the number of participant in each treatment group needs to
#' be of a certain size. This can be accomplished by setting \code{control$converged}.
#' See example below.
#'
#' @return ctree object
#'
#' @example inst/examples/ex-pmtree.R
#' @example inst/examples/ex-pmtree-methods.R
#'
#' @export
#' @import partykit
#' @importFrom partykit ctree_control nodeids nodeapply
#' @importFrom stats predict
pmtree <- function(model, data = NULL, zformula = ~.,
control = ctree_control(), coeffun = coef,
...) {
### nmax not possible because data come from model
stopifnot(all(!is.finite(control$nmax)))
args <- .prepare_args(model = model, data = data, zformula = zformula,
control = control)
## call ctree
args$ytrafo <- function(data, weights, control, ...)
.modelfit(data = data, weights = weights, control = control,
model = model, coeffun = coeffun, ...)
ret <- do.call("ctree", args)
### add modelinfo to teminal nodes if not there yet, but wanted
which_terminals <- nodeids(ret, terminal = TRUE)
if(control$saveinfo) {
tree_ret <- .add_modelinfo(ret, nodeids = which_terminals,
data = args$data, model = model,
coeffun = coeffun)
} else {
tree_ret <- ret
}
## prepare return object
class(tree_ret) <- c("pmtree", class(ret))
tree_ret$info$model <- model
tree_ret$info$zformula <- if(is.null(zformula)) as.formula("~ .") else
as.formula(zformula)
# tree_ret$data <- data
tree_ret$nobs <- sum(unlist(
nodeapply(tree_ret, ids = which_terminals, function(x) x$info$nobs)
))
return(tree_ret)
}
#' Add model information to a personalised-model-ctree
#'
#' For internal use.
#'
#' @param x constparty object.
#' @param nodeids node ids, usually the terminal ids.
#' @param data data.
#' @param model model.
#' @param coeffun function that takes the model object and returns the coefficients.
#' Useful when coef() does not return all coefficients (e.g. survreg).
#'
#' @return tree with added info. Class still to be added.
.add_modelinfo <- function(x, nodeids, data, model, coeffun) {
idx <- get_paths(nodeapply(x)[[1]], nodeids)
names(idx) <- nodeids
tree_ret <- unclass(x)
subset_term <- predict(x, type = "node")
# if(saveinfo) {
for (i in nodeids) {
ichar <- as.character(i)
idn <- idx[[ichar]]
if(length(idn) > 1) idn <- c(1, idn)
iinfo <- tree_ret[[idn]]$info
subsi <- subset_term == i
if (is.null(iinfo)) {
di <- data[subsi, ]
umod <- update(model, data = di, model = TRUE)
iinfo <- list(estfun = estfun(umod), coefficients = coeffun(umod),
objfun = ifelse(class(umod)[[1]] == "lm",
sum(objfun(umod)),
- logLik(umod)),
object = umod)
tree_ret[[idn]]$info <- iinfo
}
tree_ret[[idn]]$info$nobs <- sum(subsi)
}
# }
tree_ret
}
|
21aaeb1fe27adb51c5d524caa19b8d62378246f0
|
f7c64b2475df5163a1e3d5b7698b8f73ac9b1099
|
/scripts/correlation.R
|
e4e008b0aad4f73ba80292624ab02c7be83224b1
|
[] |
no_license
|
yannabraham/HipHopViz
|
2ec897f92f0987a8261be10198bdb3311bc13008
|
7f1219704c9f8462b27bba723e9089bfcde1d5a4
|
refs/heads/master
| 2020-06-09T02:43:09.449966
| 2019-06-23T13:58:33
| 2019-06-23T13:58:33
| 193,354,865
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,199
|
r
|
correlation.R
|
# extract correlation data from objects & load them to database
#
# Author: abrahya1
###############################################################################
library(RSQLite)
con <- dbConnect("SQLite", dbname = "../site/data/HipHop.db")
## load correlation data
correlation.files <- c('//nibr.novartis.net/CHBS-DFS/LABDATA/Inbox/PHCHBS-I21325/_project_data/HipHopViz/CMB_Exp_HIP_gene_z-score_correlation_upto193_0.15_2.Rdata',
'//nibr.novartis.net/CHBS-DFS/LABDATA/Inbox/PHCHBS-I21325/_project_data/HipHopViz/CMB_Exp_HOP_gene_z-score_correlation_upto193_0.15_2.Rdata')
outfiles <- c('CMB_Exp_HIP_gene_z-score_correlation_upto193_0.15_2.txt','CMB_Exp_HOP_gene_z-score_correlation_upto193_0.15_2.txt')
system.time(
ksink <- lapply(seq(1,length(correlation.files)),function(x) {
attach(correlation.files[x])
if(x==1) {
cat(dbBuildTableDefinition(con,'correlation_temp',correlation.frame),file='correlation.sql')
}
write.table(correlation.frame,outfiles[x],row.names=F,col.names=F,sep='\t',quote=F)
detach(pos=2)
return(NULL)
}
)
)
## clean up hip/hop
hiphop.files <- c('E:/BigData/HipHop/full/HIP-scores-depivot.txt', 'E:/BigData/HipHop/full/HOP-scores-depivot.txt')
ksink <- lapply(hiphop.files,function(x) {
require(stringr)
tmp <- read.delim(x)
# head(tmp)
write.table(tmp, str_replace(x,'depivot','depivot-final'),row.names=F,col.names=F,sep='\t',quote=F)
}
)
## clean up cpd correlation
require(stringr)
compound2experiment <- read.delim('../site/data/compound2experiment.txt',row.names=1,header=F)
names(compound2experiment) <- c('compound_concentration_experiment_type','compound_id','experiment_type','concentration')
compound2experiment$compound_concentration <- apply(compound2experiment[,c('compound_id','concentration')],1,paste,sep='',collapse='_')
compound2experiment$compound_experiment <- apply(compound2experiment[,c('compound_id','experiment_type')],1,paste,sep='',collapse='_')
compound2experiment$compound_experiment <- str_trim(compound2experiment$compound_experiment)
cpd.files <- c('E:/BigData/HipHop/full/HIP_cpd_z-score_correlation_0.1_1.txt','E:/BigData/HipHop/full/HOP_cpd_z-score_correlation_0.1_1.txt')
compound_correlation <- lapply(cpd.files,read.delim)
length(compound_correlation)
compound_correlation <- do.call('rbind',compound_correlation)
nrow(compound_correlation)
length(levels(compound_correlation$Cluster_Experiment1))
compound_correlation$Cluster_Experiment1 <- factor(compound_correlation$Cluster_Experiment1,
levels=unique(str_trim(compound_correlation$Cluster_Experiment1))
)
length(levels(compound_correlation$Cluster_Experiment1))
length(levels(compound_correlation$Cluster_Experiment2))
compound_correlation$Cluster_Experiment2 <- factor(compound_correlation$Cluster_Experiment2,
levels=unique(str_trim(compound_correlation$Cluster_Experiment2))
)
length(levels(compound_correlation$Cluster_Experiment2))
cluster_experiment <- with(compound_correlation,unique(c(levels(Cluster_Experiment1),levels(Cluster_Experiment2))))
length(cluster_experiment) # 5820 unique ids
head(cluster_experiment)
cluster2compound <- lapply(cluster_experiment,function(x) {
strsplit(str_trim(x),'_')[[1]]
}
)
cluster2compound <- do.call('rbind',cluster2compound)
cluster2compound <- data.frame(cluster2compound)
names(cluster2compound) <- c('compound_id','concentration','type','experiment')
head(cluster2compound)
cluster2compound$cluster_experiment <- cluster_experiment
cluster2compound$experiment_type <- apply(cluster2compound[,c('experiment','type')],1,paste,sep='',collapse='_')
cluster2compound$compound_concentration <- apply(cluster2compound[,c('compound_id','concentration')],1,paste,sep='',collapse='_')
cluster2compound$compound_experiment <- apply(cluster2compound[,c('compound_id','experiment_type')],1,paste,sep='',collapse='_')
cluster2compound$compound_experiment <- str_trim(cluster2compound$compound_experiment)
cluster2compound$compound_concentration_experiment_type <- apply(cluster2compound[,c('compound_id','concentration','experiment','type')],1,paste,sep='',collapse='_')
cluster2compound$final_compound_concentration_experiment_type <- cluster2compound$compound_concentration_experiment_type
cluster2compound <- within(cluster2compound,
final_compound_concentration_experiment_type[!compound_concentration_experiment_type %in% compound2experiment$compound_concentration_experiment_type] <- NA
)
sum(is.na(cluster2compound$final_compound_concentration_experiment_type)) # 553
sum(!is.na(cluster2compound$final_compound_concentration_experiment_type)) # 5267+553 = 5820 ok
mismatch <- subset(cluster2compound,is.na(final_compound_concentration_experiment_type) & compound_experiment %in% compound2experiment$compound_experiment)
head(mismatch)
nrow(mismatch) # 553 ok
# how many mismatch have a single compound_experiment match
n_compound_exp <- sapply(mismatch$compound_experiment,function(x) sum(compound2experiment$compound_experiment==x))
table(n_compound_exp) # 525 are single match
length(with(mismatch,final_compound_concentration_experiment_type[match(names(n_compound_exp)[n_compound_exp==1],compound_experiment)]))
length(with(compound2experiment,compound_concentration_experiment_type[match(names(n_compound_exp)[n_compound_exp==1],compound_experiment)]))
length(with(cluster2compound,final_compound_concentration_experiment_type[match(names(n_compound_exp)[n_compound_exp==1],compound_experiment)]))
head(with(compound2experiment,compound_concentration_experiment_type[match(names(n_compound_exp)[n_compound_exp==1],compound_experiment)]))
head(with(cluster2compound,compound_concentration_experiment_type[match(names(n_compound_exp)[n_compound_exp==1],compound_experiment)]))
cluster2compound <- within(cluster2compound,
final_compound_concentration_experiment_type[match(names(n_compound_exp)[n_compound_exp==1],compound_experiment)] <-
with(compound2experiment,as.character(compound_concentration_experiment_type)[match(names(n_compound_exp)[n_compound_exp==1],compound_experiment)])
)
sum(is.na(cluster2compound$final_compound_concentration_experiment_type)) # 28 left...
subset(cluster2compound, !compound_concentration_experiment_type %in% compound2experiment$compound_concentration_experiment_type, select=c('final_compound_concentration_experiment_type') )
# can we resolve the others using concentration??
length(with(mismatch,final_compound_concentration_experiment_type[match(names(n_compound_exp)[n_compound_exp==2],compound_experiment)]))
length(with(compound2experiment,compound_concentration_experiment_type[match(names(n_compound_exp)[n_compound_exp==2],compound_experiment)]))
subset(compound2experiment,compound_experiment=='1330_0054_HIP')
subset(mismatch,compound_experiment=='1330_0054_HIP')
conc.mismatch <- merge(
subset(mismatch,compound_experiment %in% names(n_compound_exp)[n_compound_exp==2],select=c('compound_experiment','compound_concentration_experiment_type','concentration')),
subset(compound2experiment,compound_experiment %in% names(n_compound_exp)[n_compound_exp==2],select=c('compound_experiment','compound_concentration_experiment_type','concentration')),
by='compound_experiment',
suffixes=c('.cc','.c2e')
)
head(conc.mismatch)
conc.mismatch[,c('concentration.cc','concentration.c2e')]
sum(with(conc.mismatch,round(as.numeric(as.character(concentration.cc)),1)==round(as.numeric(as.character(concentration.c2e)),1))) # 23 ok!
conc.mismatch <- subset(conc.mismatch,round(as.numeric(as.character(concentration.cc)),1)==round(as.numeric(as.character(concentration.c2e)),1))
nrow(conc.mismatch)
length(with(cluster2compound,final_compound_concentration_experiment_type[match(conc.mismatch$compound_concentration_experiment_type.cc,compound_concentration_experiment_type)]))
tmp <- cbind(
with(cluster2compound,compound_concentration_experiment_type[match(conc.mismatch$compound_concentration_experiment_type.cc,compound_concentration_experiment_type)]),
conc.mismatch[,c('compound_concentration_experiment_type.cc','compound_concentration_experiment_type.c2e')]
)
all(tmp[,1]==tmp[,2])
cluster2compound <- within(cluster2compound,
final_compound_concentration_experiment_type[match(conc.mismatch$compound_concentration_experiment_type.cc,compound_concentration_experiment_type)] <-
as.character(conc.mismatch$compound_concentration_experiment_type.c2e)
)
sum(is.na(cluster2compound$final_compound_concentration_experiment_type)) # 5 left...
# 5 left...
length(with(mismatch,final_compound_concentration_experiment_type[match(names(n_compound_exp)[n_compound_exp==4],compound_experiment)]))
length(with(compound2experiment,compound_concentration_experiment_type[match(names(n_compound_exp)[n_compound_exp==4],compound_experiment)]))
subset(mismatch,compound_experiment %in% names(n_compound_exp)[n_compound_exp==4])
subset(compound2experiment,compound_experiment %in% names(n_compound_exp)[n_compound_exp==4])
is.numeric(subset(compound2experiment,compound_experiment %in% names(n_compound_exp)[n_compound_exp==4],select='concentration',drop=T))
as.numeric(as.character(subset(mismatch,compound_experiment %in% names(n_compound_exp)[n_compound_exp==4],select='concentration',drop=T)))
all(as.numeric(as.character(subset(mismatch,compound_experiment %in% names(n_compound_exp)[n_compound_exp==4],select='concentration',drop=T))) %in%
subset(compound2experiment,compound_experiment %in% names(n_compound_exp)[n_compound_exp==4],select='concentration',drop=T)
)
small.conc.mismatch <- merge(
subset(mismatch,compound_experiment %in% names(n_compound_exp)[n_compound_exp==4],select=c('compound_experiment','compound_concentration_experiment_type','concentration')),
subset(compound2experiment,compound_experiment %in% names(n_compound_exp)[n_compound_exp==4],select=c('compound_experiment','compound_concentration_experiment_type','concentration')),
by='compound_experiment',
suffixes=c('.cc','.c2e')
)
small.conc.mismatch$concentration.cc <- as.numeric(as.character(small.conc.mismatch$concentration.cc))
sum(with(small.conc.mismatch,concentration.cc==concentration.c2e)) # 5 ok
small.conc.mismatch <- subset(small.conc.mismatch,concentration.cc==concentration.c2e)
cluster2compound <- within(cluster2compound,
final_compound_concentration_experiment_type[match(small.conc.mismatch$compound_concentration_experiment_type.cc,compound_concentration_experiment_type)] <-
as.character(small.conc.mismatch$compound_concentration_experiment_type.c2e)
)
sum(is.na(cluster2compound$final_compound_concentration_experiment_type)) # 0!!
# replace levels in compound_correlation
head(cluster2compound)
head(cbind(levels(compound_correlation$Cluster_Experiment1),cluster2compound$cluster_experiment))
all(levels(compound_correlation$Cluster_Experiment1)==cluster2compound$cluster_experiment) # ok
all(levels(compound_correlation$Cluster_Experiment1)==with(cluster2compound,cluster_experiment[match(levels(compound_correlation$Cluster_Experiment1),cluster_experiment)])) # ok
levels(compound_correlation$Cluster_Experiment1) <- with(cluster2compound,as.character(final_compound_concentration_experiment_type[match(levels(compound_correlation$Cluster_Experiment1),cluster_experiment)]))
head(cbind(levels(compound_correlation$Cluster_Experiment2),cluster2compound$cluster_experiment))
all(levels(compound_correlation$Cluster_Experiment2)==cluster2compound$cluster_experiment) # false -> use matching...
all(levels(compound_correlation$Cluster_Experiment2)==with(cluster2compound,cluster_experiment[match(levels(compound_correlation$Cluster_Experiment2),cluster_experiment)])) # ok
levels(compound_correlation$Cluster_Experiment2) <- with(cluster2compound,final_compound_concentration_experiment_type[match(levels(compound_correlation$Cluster_Experiment2),cluster_experiment)])
all(levels(compound_correlation$Cluster_Experiment1) %in% compound2experiment$compound_concentration_experiment_type)
all(levels(compound_correlation$Cluster_Experiment2) %in% compound2experiment$compound_concentration_experiment_type)
write.table(compound_correlation,'E:/BigData/HipHop/full/HIPHOP_cpd_z-score_correlation_0.1_1.final.txt',row.names=F,col.names=F,sep='\t',quote=F)
|
c19f9ea4b6134464668fb5c0678decfb0a6553ec
|
bff9a69080d7a82b58cbf17193705ded04afc675
|
/plot_single_simulation.r
|
48cf5d358b505ab6efa90cd0d78e31e09198384e
|
[] |
no_license
|
bramkuijper/migration
|
67e638558b928847c5d715558aec154a6baadfba
|
c0e467f6fc5985c0184553dc4cc5ba446e14e40e
|
refs/heads/master
| 2023-03-15T14:25:35.027854
| 2023-03-10T10:04:07
| 2023-03-10T10:04:07
| 185,962,196
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,611
|
r
|
plot_single_simulation.r
|
#!/usr/bin/env Rscript
#--vanilla
library("ggplot2")
library("gridExtra")
# get command line arguments
args = commandArgs(trailingOnly=TRUE)
# give an error message if you do not provide it with a simulation file name
if (length(args) < 1)
{
print("provide a simulation file name")
stop()
}
# find out where the parameter listing starts
# so that we can read in the data part of the file
# without having it messed up by the subsequent parameter listing
find_out_param_line <- function(filename) {
# read all the lines of the file
f <- readLines(filename)
# make a reverse sequence
seqq <- seq(length(f),1,-1)
# go through each line in the data file and find first line
# where data is printed (i.e., a line which starts with a digit)
for (line_i in seqq)
{
print(f[[line_i]])
print(line_i)
if (length(grep("^\\d",f[[line_i]])) > 0)
{
return(line_i)
}
}
return(NA)
}
parameter_row <- find_out_param_line(args[1])
if (is.na(parameter_row))
{
print("cannot find data...")
stop()
}
# read in data frame of corresponding simulation
the.data <- read.table(args[1], header=T, nrow=parameter_row - 1, sep=";")
the.data <- the.data[-c(1),]
# now use ggplot2 to plot stuff
str(the.data)
p1 <- ggplot(data=the.data
,aes(x=generation)) +
geom_line(aes(y = winter_pop, colour="Winter")) +
theme_classic() +
xlab("Generation") +
ylab("Population size")
p2 <- ggplot(data=the.data
,aes(x=generation)) +
geom_line(aes(y = mean_spring_staging_size, colour="Spring")) +
geom_line(aes(y = mean_autumn_staging_size, colour = "Autumn")) +
theme_classic() +
xlab("Generation") +
ylab("Mean staging size")
p3 <- ggplot(data=the.data
,aes(x=generation)) +
geom_line(aes(y = mean_spring_flock_size, colour="Spring")) +
geom_line(aes(y = mean_autumn_flock_size, colour = "Autumn")) +
theme_classic() +
xlab("Generation") +
#ylim(c(0,10)) +
ylab("Mean flock size")
p3b <- ggplot(data=the.data
,aes(x=generation)) +
geom_line(aes(y = var_spring_flock_size, colour="Spring")) +
geom_line(aes(y = var_autumn_flock_size, colour = "Autumn")) +
theme_classic() +
xlab("Generation") +
#ylim(c(0,10)) +
ylab("Var flock size")
p3c <- ggplot(data=the.data
,aes(x=generation)) +
geom_line(aes(y = n_spring_flocks, colour="Spring")) +
geom_line(aes(y = n_autumn_flocks, colour = "Autumn")) +
theme_classic() +
xlab("Generation") +
#ylim(c(0,10)) +
ylab("Number flocks")
p3d <- ggplot(data=the.data
,aes(x=generation)) +
geom_line(aes(y = mean_spring_latency, colour="Spring")) +
geom_line(aes(y = mean_autumn_latency, colour="Autumn")) +
theme_classic() +
xlab("Generation") +
ylab("Mean latency")
p3e <- ggplot(data=the.data
,aes(x=generation)) +
geom_line(aes(y = mean_spring_departure, colour="Spring")) +
geom_line(aes(y = mean_autumn_departure, colour="Autumn")) +
theme_classic() +
xlab("Generation") +
ylab("Mean timing")
p3f <- ggplot(data=the.data
,aes(x=generation)) +
geom_line(aes(y = mean_spring_cost, colour="Spring")) +
geom_line(aes(y = mean_autumn_cost, colour = "Autumn")) +
theme_classic() +
xlab("Generation") +
#ylim(c(0,10)) +
ylab("Mean cost")
p4 <- ggplot(data=the.data
,aes(x=generation)) +
geom_line(aes(y = breeder_pop, colour="N breeders")) +
theme_classic() +
xlab("Generation") +
ylab(expression("N"[breeder]))
p5 <- ggplot(data=the.data
,aes(x=generation)) +
geom_line(aes(y = offspring_pop, colour="N offspring")) +
theme_classic() +
xlab("Generation") +
ylab(expression("N"[offspring]))
p6 <- ggplot(data=the.data
,aes(x=generation)) +
geom_line(aes(y = mean_resources_summer, colour="Winter")) +
geom_line(aes(y = mean_resources_winter, colour="Summer")) +
theme_classic() +
xlab("Generation") +
ylab("Mean resources")
p7 <- ggplot(data=the.data
,aes(x=generation)) +
geom_line(aes(y = mean_theta_a_winter, colour="Psignal (resources), theta winter")) +
#geom_line(aes(y = mean_theta_a_summer, colour="Stage (resources), theta summer")) +
geom_line(aes(y = mean_phi_a_winter, colour="Pdisperse (group size), phi winter")) +
#geom_line(aes(y = mean_phi_a_summer, colour="Disperse (group size), phi summer")) +
theme_classic() +
xlab("Generation") +
ylab("Elevation")
p8 <- ggplot(data=the.data
,aes(x=generation)) +
geom_line(aes(y = mean_theta_b_winter, colour="Psignal (resources), theta winter")) +
#geom_line(aes(y = mean_theta_b_summer, colour="Stage (resources), theta summer")) +
geom_line(aes(y = mean_phi_b_winter, colour="Pdisperse (group size), phi winter")) +
#geom_line(aes(y = mean_phi_b_summer, colour="Disperse (group size), phi summer")) +
theme_classic() +
xlab("Generation") +
ylab("Slope")
p9 <- ggplot(data=the.data
,aes(x=generation)) +
geom_line(aes(y = mean_spring_signal_timing, colour="Spring")) +
geom_line(aes(y = mean_autumn_signal_timing, colour="Autumn")) +
theme_classic() +
xlab("Generation") +
ylab("Mean signal phenology")
p10 <- ggplot(data=the.data
,aes(x=generation)) +
geom_line(aes(y = mean_age, colour="Mean age")) +
theme_classic() +
xlab("Generation") +
ylab("Mean age")
big_plot <- arrangeGrob(p1,
p9,
p2,
p3 + scale_y_continuous(trans = "log10"),
p3b,
p3c + scale_y_continuous(trans = "log10"),
p3d,
p3e + scale_y_continuous(trans = "log10"),
p3f,
p4,
p5 + scale_y_continuous(trans = "log10"),
p6 + scale_y_continuous(trans = "log10"),
p10,
p7,
p8,
nrow=15,ncol=1)
the.base.name <- basename(args[1])
output_file_name <- paste(
"graph_"
,the.base.name
,".pdf"
,sep="")
ggsave(output_file_name, big_plot, height = 25)
|
390e15438e227d98c4add38da891f63658d1d59b
|
9f2c2d1c1bfe949ec76e6d323d9327d9d400fc24
|
/Run-DetermRecSim-Autosomal-SexSpecific.R
|
c361ed683411c076a471464fc7b978fcd229acbc
|
[] |
no_license
|
colin-olito/XvAutosomeInversions
|
fba08c4c05e1df84922a39d25f3c33892c87d89c
|
cc69ca1a8212439423f58a02482d1ba61d38279f
|
refs/heads/master
| 2021-03-22T00:05:22.720462
| 2018-07-16T22:08:18
| 2018-07-16T22:08:18
| 103,689,205
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,116
|
r
|
Run-DetermRecSim-Autosomal-SexSpecific.R
|
##############################################################
# Local adaptation and the evolution of autosomal inversions
# with sex-specific selection and migration
#
# R code for simple deterministic simulations of the
# haplotype frequency recursions for the model of
# autosomal inversions with sex-specific selection and
# migration. Simplest case to find equilibrium
# frequencies of the inversions.
#
#
# Author: Colin Olito
#
# NOTES:
#
rm(list=ls())
#####################
## Dependencies
source('R/functions-figures.R')
source('R/functions-RecSim-Autosomal-SexSpecific.R')
######################
## Run Simulations
# Locally adaptive allele completely recessive (hf = hm = 0)
test <- recursionFwdSimLoop(gen = 25000, hf = 0, hm = 0, threshold = 1e-7,
sf.vals = c(0.05, 0.1, 0.2), sm.vals = c(0.05, 0.1, 0.2),
mf.vals = c(0.01, 0.05), mm.vals = c(0.01, 0.05),
r.vals = c(0.0, 0.01, 0.1))
# Additive fitness effects hf = hm = 1/2)
test <- recursionFwdSimLoop(gen = 25000, hf = 0.5, hm = 0.5, threshold = 1e-7,
sf.vals = c(0.05, 0.15, 0.2), sm.vals = c(0.05, 0.1, 0.2),
mf.vals = c(0.01, 0.05), mm.vals = c(0.01, 0.05),
r.vals = c(0.0, 0.01, 0.1))
# Locally adaptive allele completely dominant (hf = hm = 1)
test <- recursionFwdSimLoop(gen = 25000, hf = 1, hm = 1, threshold = 1e-7,
sf.vals = c(0.01, 0.05, 0.1, 0.2), sm.vals = c(0.01, 0.05, 0.1, 0.2),
mf.vals = c(0.01, 0.05), mm.vals = c(0.01, 0.05),
r.vals = c(0.0, 0.01, 0.1))
########################################################
## Some exploratory code to play with results/plotting
head(test)
unique(test$sf)
rs <- unique(test$r)
mfs <- unique(test$mf)
EQ.freq <- (test[,8:12] + test[,13:17])/2
plot(NA, axes=FALSE, type='n', main='',xlim = c(0,max(test$sf)), ylim = c(0,1), ylab='', xlab='', cex.lab=1.2)
usr <- par('usr')
rect(usr[1], usr[3], usr[2], usr[4], col='white', border=NA)
plotGrid(lineCol='grey80')
box()
lines(test$x5[test$r==rs[1] & test$mf==mfs[1]] ~ test$sf[test$r==rs[1] & test$mf==mfs[1]], col=1, lwd=3, lty=1)
lines(test$x5[test$r==rs[1] & test$mf==mfs[2]] ~ test$sf[test$r==rs[1] & test$mf==mfs[2]], col=1, lwd=3, lty=1)
lines(test$x5[test$r==rs[2] & test$mf==mfs[1]] ~ test$sf[test$r==rs[2] & test$mf==mfs[1]], col=1, lwd=3, lty=1)
lines(test$x5[test$r==rs[2] & test$mf==mfs[2]] ~ test$sf[test$r==rs[2] & test$mf==mfs[2]], col=1, lwd=3, lty=1)
lines(test$y5[test$r==rs[1] & test$mf==mfs[1]] ~ test$sf[test$r==rs[1] & test$mf==mfs[1]], col=2, lwd=3, lty=2)
lines(test$y5[test$r==rs[1] & test$mf==mfs[2]] ~ test$sf[test$r==rs[1] & test$mf==mfs[2]], col=2, lwd=3, lty=2)
lines(test$y5[test$r==rs[2] & test$mf==mfs[1]] ~ test$sf[test$r==rs[2] & test$mf==mfs[1]], col=2, lwd=3, lty=2)
lines(test$y5[test$r==rs[2] & test$mf==mfs[2]] ~ test$sf[test$r==rs[2] & test$mf==mfs[2]], col=2, lwd=3, lty=2)
# axes
axis(1, las=1)
axis(2, las=1)
plot(NA, axes=FALSE, type='n', main='',xlim = c(0,max(test$sf)), ylim = c(0,max(test$LDx)), ylab='', xlab='', cex.lab=1.2)
usr <- par('usr')
rect(usr[1], usr[3], usr[2], usr[4], col='white', border=NA)
plotGrid(lineCol='grey80')
box()
lines(test$LDx[test$r==rs[1] & test$mf==mfs[1]] ~ test$sf[test$r==rs[1] & test$mf==mfs[1]], col=1, lwd=3, lty=1)
lines(test$LDx[test$r==rs[1] & test$mf==mfs[2]] ~ test$sf[test$r==rs[1] & test$mf==mfs[2]], col=1, lwd=3, lty=1)
lines(test$LDx[test$r==rs[2] & test$mf==mfs[1]] ~ test$sf[test$r==rs[2] & test$mf==mfs[1]], col=1, lwd=3, lty=1)
lines(test$LDx[test$r==rs[2] & test$mf==mfs[2]] ~ test$sf[test$r==rs[2] & test$mf==mfs[2]], col=1, lwd=3, lty=1)
lines(test$LDy[test$r==rs[1] & test$mf==mfs[1]] ~ test$sf[test$r==rs[1] & test$mf==mfs[1]], col=2, lwd=3, lty=2)
lines(test$LDy[test$r==rs[1] & test$mf==mfs[2]] ~ test$sf[test$r==rs[1] & test$mf==mfs[2]], col=2, lwd=3, lty=2)
lines(test$LDy[test$r==rs[2] & test$mf==mfs[1]] ~ test$sf[test$r==rs[2] & test$mf==mfs[1]], col=2, lwd=3, lty=2)
lines(test$LDy[test$r==rs[2] & test$mf==mfs[2]] ~ test$sf[test$r==rs[2] & test$mf==mfs[2]], col=2, lwd=3, lty=2)
# axes
axis(1, las=1)
axis(2, las=1)
#Some exploratory code to play with the recursionFwdSim function
par.list <- list(
gen = 25000,
mf = 0.05,
mm = 0.01,
sf = 0.2,
sm = 0.1,
hf = 0.5,
hm = 0.5,
r = 0.5
)
xi.init <- c(0,(par.list$mf/par.list$sf),(par.list$mf/par.list$sf),(1 - 2*(par.list$mf/par.list$sf)-0.01),0.01)
yi.init <- c(0,(par.list$mm/par.list$sm),(par.list$mm/par.list$sm),(1 - 2*(par.list$mm/par.list$sm)-0.01),0.01)
xi.init <- c(0.25,0.25,0.25,(0.25-0.01),0.01)
yi.init <- c(0.25,0.25,0.25,(0.25-0.01),0.01)
res <- recursionFwdSim(par.list, xi.init = xi.init, yi.init = yi.init, threshold = 1e-6, silent=FALSE)
str(res)
head(res$Fi.gen)
plot(NA, ylim=c(0,1), xlim=c(0,nrow(res$Fi.gen)), ylab="Frequency", xlab="Generations")
lines(res$xi.gen[,1], col=1, lwd=3)
lines(res$xi.gen[,2], col=8, lwd=3, lty=1)
lines(res$xi.gen[,3], col=4, lwd=3, lty=3)
lines(res$xi.gen[,4], col=3, lwd=3)
lines(res$xi.gen[,5], col=2, lwd=3)
plot(NA, ylim=c(0,1), xlim=c(0,nrow(res$xi.gen)), ylab="Frequency", xlab="Generations")
lines(res$xi.gen[,1], col=1, lwd=3)
lines(res$xi.gen[,2], col=8, lwd=3, lty=1)
lines(res$xi.gen[,3], col=4, lwd=3, lty=3)
lines(res$xi.gen[,4], col=3, lwd=3)
lines(res$xi.gen[,5], col=2, lwd=3)
lines(res$yi.gen[,1], col=1, lwd=3)
lines(res$yi.gen[,2], col=8, lwd=3, lty=1)
lines(res$yi.gen[,3], col=4, lwd=3, lty=3)
lines(res$yi.gen[,4], col=3, lwd=3)
lines(res$yi.gen[,5], col=2, lwd=3)
sum(res$EQ.freq)
res$EQ.freq
# Finding equilibrium haplotype frequencies in the absence of the inversion,
# then using these eq. frequencies as initial conditions when inversion invades
inits <- recursionFwdSim(par.list, xi.init = c(0.25,0.25,0.25,0.25,0), yi.init = c(0.25,0.25,0.25,0.25,0), threshold = 1e-6)
x.inits <- inits[[5]][1:5]
x.inits[x.inits == max(x.inits)] <- x.inits[x.inits == max(x.inits)] - 0.01
x.inits[5] <- 0.01
x.inits
y.inits <- inits[[5]][6:10]
y.inits[y.inits == max(y.inits)] <- y.inits[y.inits == max(y.inits)] - 0.01
y.inits[5] <- 0.01
y.inits
sum(x.inits)
round(sum(y.inits), digits=3)
y.inits <- round(y.inits,digits=3)
res <- recursionFwdSim(par.list, xi.init = x.inits, yi.init = y.inits, threshold = 1e-7)
plot(NA, ylim=c(0,1), xlim=c(0,nrow(res$xi.gen)), ylab="Frequency", xlab="Generations")
lines(res$xi.gen[,1], col=1, lwd=3)
lines(res$xi.gen[,2], col=8, lwd=3, lty=1)
lines(res$xi.gen[,3], col=4, lwd=3, lty=3)
lines(res$xi.gen[,4], col=3, lwd=3)
lines(res$xi.gen[,5], col=2, lwd=3)
lines(res$yi.gen[,1], col=1, lwd=3)
lines(res$yi.gen[,2], col=8, lwd=3, lty=1)
lines(res$yi.gen[,3], col=4, lwd=3, lty=3)
lines(res$yi.gen[,4], col=3, lwd=3)
lines(res$yi.gen[,5], col=2, lwd=3)
sum(res$EQ.freq)
|
60a241a6cb09e56c0e1a6770b4db5c786f32d90d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/soilDB/examples/seriesExtent.Rd.R
|
5d8d6e13b7d8972b2774abad862a50d94256f586
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 400
|
r
|
seriesExtent.Rd.R
|
library(soilDB)
### Name: seriesExtent
### Title: Get/Display Soil Series Extent
### Aliases: seriesExtent seriesExtentAsGmap
### Keywords: manip
### ** Examples
## Not run:
##D # fetch series extent for the 'Amador' soil series
##D s <- seriesExtent('amador')
##D plot(s)
##D
##D # fetch then plot the extent of the 'Amador' soil series
##D seriesExtentAsGmap('amador')
## End(Not run)
|
cd0a74747fa27a19b71d22be2af736e8b5381601
|
ebde5969386084d5e4e50d2426cc1e6e85d4de4b
|
/bin/crest_peak_call
|
879688b84eb57d0fbe36cd9d2952deffac8d2eb5
|
[
"MIT"
] |
permissive
|
r3fang/CRESTseq
|
b4c655d4295400e4bbb3459a418069b16a96d4d2
|
87b6be546516ea3c676ae24271f1d80421ebad23
|
refs/heads/master
| 2021-06-18T22:22:31.396867
| 2017-06-26T20:07:48
| 2017-06-26T20:07:48
| 61,255,451
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,682
|
crest_peak_call
|
#!/usr/bin/env Rscript
# Rscript crest_peak.R INPUT=\'low/crest/crest.bin.high.txt\' PREFIX=\'demo\' GOOD_SGRNA=3 MIN_WIDTH=1000 CUTOFF=0.1
# PART I - check if packages already installed, if not exist
if(!("GenomicRanges" %in% installed.packages()[,"Package"])){stop("R package GenomicRanges not installed")}
suppressMessages(library(GenomicRanges))
# PART II - load in arguments
args <- commandArgs(trailingOnly = TRUE)
if(length(args) < 5){
stop("too few arguments.")
}else{
for(i in 1:length(args)){
invisible(eval(parse(text=args[[i]])))
}
}
if(!exists("INPUT")) stop("argument INPUT missing")
if(!exists("PREFIX")) stop("argument PREFIX missing")
if(!exists("GOOD_SGRNA")) stop("argument GOOD_SGRNA missing")
if(!exists("MIN_WIDTH")) stop("argument MIN_WIDTH missing")
if(!exists("CUTOFF")) stop("argument FDR missing")
if(!file.exists(INPUT)) stop("file INPUT not exists")
if(!(length(GOOD_SGRNA)> 0 & is.numeric(GOOD_SGRNA) & GOOD_SGRNA%%1==0 & GOOD_SGRNA>=0)) stop(GOOD_SGRNA, " is not a non-negative integer")
if(!(length(MIN_WIDTH)> 0 & is.numeric(MIN_WIDTH) & MIN_WIDTH%%1==0 & MIN_WIDTH>0)) stop(MIN_WIDTH, " is not a positive integer")
if(!(length(CUTOFF)> 0 & is.numeric(MIN_WIDTH) & CUTOFF>0)) stop(CUTOFF, " must be a positive number")
# PART III main program
# check the input
if(nrow(bins <- read.table(INPUT, head=TRUE))==0){stop("crest_peak_call: input is empty")}
if(length(which(colnames(bins) == c("group_id", "items_in_group", "lo_value", "p", "FDR", "goodsgrna"))) != ncol(bins)) stop("column name of INPUT is not valid")
bins.tmp <- do.call(rbind, strsplit(as.character(bins$group_id), split=":|-"))
bins.gr <- GRanges(seqnames=bins.tmp[,1], IRanges(as.numeric(as.character(bins.tmp[,2])), as.numeric(as.character(bins.tmp[,3]))), score=-log(bins$FDR), FDR=bins$FDR, goodsgrna=bins$goodsgrna)
write.table(as.data.frame(bins.gr)[,c(1,2,3,6)], file = paste(PREFIX, ".bdg", sep=""), append = FALSE, quote = FALSE, sep = "\t",
eol = "\n", na = "NA", dec = ".", row.names = FALSE,
col.names = FALSE, qmethod = c("escape", "double"),
fileEncoding = "")
peaks.gr <- reduce(bins.gr[which(bins.gr$score >= CUTOFF & bins.gr$goodsgrna >= GOOD_SGRNA)])
peaks.gr[which(width(peaks.gr) < MIN_WIDTH)] <- resize(peaks.gr[which(width(peaks.gr) < MIN_WIDTH)], width=MIN_WIDTH, fix="center")
write.table(as.data.frame(peaks.gr)[,c(1,2,3)], file = paste(PREFIX, ".bed", sep=""), append = FALSE, quote = FALSE, sep = "\t",
eol = "\n", na = "NA", dec = ".", row.names = FALSE,
col.names = FALSE, qmethod = c("escape", "double"),
fileEncoding = "")
|
|
39f918f500af6388a2f8068947cbe19d09fa811d
|
172b75afc28797c0658cbc6ccfce99569e1aeb8e
|
/man/epakernel.Rd
|
bbc2ffa2b15958fe2fd0b5196a739362fb93170f
|
[] |
no_license
|
cran/MEPDF
|
6baa0bfee2c0f3c06fc8ca1b9db86e475c8d2e0c
|
cd5e5cfcd3c6439e05c88bc32ab2275006cdb56c
|
refs/heads/master
| 2018-10-31T07:36:24.045738
| 2018-09-26T13:10:03
| 2018-09-26T13:10:03
| 108,254,982
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 289
|
rd
|
epakernel.Rd
|
\name{epakernel}
\alias{epakernel}
\title{epakernel}
\usage{
epakernel(x,H)
}
\description{
Kernel function based on the normal distribution.
}
\arguments{
\item{x}{Evaluation point.}
\item{H}{Positive-definite, symmetric matrix as bandwidth.}
}
\examples{
epakernel(c(1,1),H = diag(2))
}
|
f90cdda4ad65cf665cdcc51e7ea36d985459754b
|
679c68425cab4a554aaf86290f2e1feadfba413b
|
/R/limes-package.R
|
a88f1a6da8a0ff2a6da674cd1d44772d2896d0f4
|
[] |
no_license
|
pik-piam/limes
|
b271576c645931415925c81d89ffa6b1b6495ec7
|
435f7ddbd8cee78f991707ded25289efff9d5801
|
refs/heads/master
| 2023-08-18T07:16:27.087324
| 2023-08-15T15:12:51
| 2023-08-15T15:12:51
| 206,319,561
| 0
| 5
| null | 2023-07-10T16:15:17
| 2019-09-04T12:57:31
|
R
|
UTF-8
|
R
| false
| false
| 477
|
r
|
limes-package.R
|
#' The LIMES R package
#'
#' Contains the LIMES-specific routines for data and model output manipulation
#'
#' \tabular{ll}{ Package: \tab remind\cr Type: \tab Package\cr Version: \tab
#' 7.6\cr Date: \tab 2017-11-20\cr License: \tab LGPL-3\cr LazyLoad: \tab
#' yes\cr }
#'
#' @name limes-package
#' @aliases limes-package remind
#' @docType package
#' @author Sebastian Osorio and Renato Rodrigues
#'
#' Maintainer: Sebastian Osorio <sebastian.osorio@pik-potsdam.de>
NULL
|
75f0123742df81a55b459a12d1a509eed1c4ca7d
|
ddc68b452b15b423bd680e01ccd552983481f76b
|
/example/genomic_ideogram_customize.R
|
748551d471c230128c48a8965f406b06d390db03
|
[] |
no_license
|
Adamyazori/circlize_examples
|
31197d38a01b34ec0b089cc0bc4b1a258991658d
|
0dad84d83d79741ccabfee3beb8c7cca707c9f71
|
refs/heads/master
| 2023-03-17T16:03:32.534312
| 2020-06-23T20:46:41
| 2020-06-23T20:46:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 450
|
r
|
genomic_ideogram_customize.R
|
library(circlize)
circos.initializeWithIdeogram(plotType = NULL)
circos.trackPlotRegion(ylim = c(0, 1), panel.fun = function(x, y) {
chr = get.cell.meta.data("sector.index")
xlim = get.cell.meta.data("xlim")
ylim = get.cell.meta.data("ylim")
circos.rect(xlim[1], 0, xlim[2], 0.5,
col = rgb(runif(1), runif(1), runif(1)))
circos.text(mean(xlim), 0.9, chr, cex = 0.5, facing = "clockwise", niceFacing = TRUE)
}, bg.border = NA)
circos.clear()
|
9e66cbc52853ec8e5233a2f95a1005c9d183b2e4
|
7e69dd0a831a9eddc70d5e868f5365bf3cde732b
|
/R/utils.R
|
2c562969e51c5500d766c37700a54e6cb374f219
|
[] |
no_license
|
firebitsbr/varmint
|
096ab682bb0369cf4f8fbb490539dca582a98b7f
|
3d29290f58f0e850bf76b03047815f1e1526e37f
|
refs/heads/master
| 2020-04-07T21:27:14.060451
| 2018-04-11T15:57:38
| 2018-04-11T15:57:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 38
|
r
|
utils.R
|
is_not_null <- function(x) !is.null(x)
|
703e8e0a0ecbe7d580cafd60df849f9375372b72
|
7f72ac13d08fa64bfd8ac00f44784fef6060fec3
|
/RGtk2/man/gtkListStoreInsert.Rd
|
15e32ba4713ab5db700ef8194bdbe5366f3799c3
|
[] |
no_license
|
lawremi/RGtk2
|
d2412ccedf2d2bc12888618b42486f7e9cceee43
|
eb315232f75c3bed73bae9584510018293ba6b83
|
refs/heads/master
| 2023-03-05T01:13:14.484107
| 2023-02-25T15:19:06
| 2023-02-25T15:20:41
| 2,554,865
| 14
| 9
| null | 2023-02-06T21:28:56
| 2011-10-11T11:50:22
|
R
|
UTF-8
|
R
| false
| false
| 826
|
rd
|
gtkListStoreInsert.Rd
|
\alias{gtkListStoreInsert}
\name{gtkListStoreInsert}
\title{gtkListStoreInsert}
\description{Creates a new row at \code{position}. \code{iter} will be changed to point to this new
row. If \code{position} is larger than the number of rows on the list, then the
new row will be appended to the list. The row will be empty after this
function is called. To fill in values, you need to call
\code{\link{gtkListStoreSet}} or \code{\link{gtkListStoreSetValue}}.}
\usage{gtkListStoreInsert(object, position)}
\arguments{
\item{\verb{object}}{A \code{\link{GtkListStore}}}
\item{\verb{position}}{position to insert the new row}
}
\value{
A list containing the following elements:
\item{\verb{iter}}{An unset \code{\link{GtkTreeIter}} to set to the new row}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
f8f9b9abf9d46f3c5906bc081f4fe67b09482e9c
|
d236278de7ff6f0e80400b43ff9d17b181f92c91
|
/scripts/fst/plotPBS5%.r
|
95cd21ce146177be2e6a888154e46856c85dcfe4
|
[] |
no_license
|
eoziolor/grandis_introgression
|
049917208eb80f717fb6e9828c16b7fdce26e688
|
a0fb5bed63fbbdbb787df3745ccade715760be73
|
refs/heads/master
| 2022-01-28T16:18:41.187524
| 2019-05-09T10:14:40
| 2019-05-09T10:14:40
| 106,438,641
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,037
|
r
|
plotPBS5%.r
|
pbs<-read.table("~/analysis/data/fst/allpbs5kb",header=FALSE,stringsAsFactors = FALSE)
pbsname<-c("Scaf","start","end","BBpbs","VBpbs","PBpbs","SJpbs","BNPpbs","keep")
colnames(pbs)<-pbsname
col<-c()
for (i in 1:5){
col[i]<-quantile(pbs[,i+3],prob=.99,na.rm=TRUE)
}
colb<-c()
for (i in 1:3){
colb[i]<-quantile(pbs[,i+3],prob=.95,na.rm=TRUE)
}
nsnps <-pbs[,"keep"]
subw<-nsnps>0
#chr<-read.table("~/analysis/fst/scripts/chr_colors",stringsAsFactors=FALSE,sep="\t")
library(stringr)
library(dplyr)
library(gtools)
#plot(pbs[subw,4],pch=20,cex=.5,col=factor(pbs[subw,1]))
#plot(pbs[subw,4],pch=20,cex=.5,col=chr[pbs[subw,1],2])
#legend('topright',legend=levels(mixedsort(pbs[,1])),col=1:2,cex=.5,pch=1)
pbsct<-pbs %>% filter(str_detect(Scaf,"chr"))
nsnps <-pbsct[,"keep"]
subwc<-nsnps>0
pbsc<-pbsct[subwc,]
rownames(pbsc)<-seq(1:dim(pbsc[subwc,])[1])
pbsc<-pbsc[,1:8]
###Doing this on merged windows to avoid patchyness of peak coloration
pbs_out_temp<-read.table("~/analysis/data/fst/PBSoutliers_5kb_all_max.bed",stringsAsFactors = FALSE) #loads a pbs vector with windows merged within 50kb of each other and with max and windows count statistics
names<-c("Scaf","start","end","BBmax","BBcount","VBmax","VBcount","PBmax","PBcount","SJmax","SJcount","BNPmax","BNPcount")
colnames(pbs_out_temp)<-names
pbs_out<-pbs_out_temp %>% filter(str_detect(Scaf,"chr"))
#checking for whether those are outliers in different groups
all<-pbs_out[,4]>colb[1] & pbs_out[,6]>colb[2] & pbs_out[,8]>colb[3] & pbs_out[,10]>col[4] & pbs_out[,12]>col[5]
res<-pbs_out[,4]>colb[1] & pbs_out[,6]>colb[2] & pbs_out[,8]>colb[3] & pbs_out[,10]<col[4] & pbs_out[,12]<col[5]
interm<-pbs_out[,4]<colb[1] & pbs_out[,6]<colb[2] & pbs_out[,8]<colb[3] & pbs_out[,10]>col[4] & pbs_out[,12]>col[5]
bbu<-pbs_out[,4]>colb[1] & pbs_out[,6]<colb[2] & pbs_out[,8]<colb[3] & pbs_out[,10]<col[4] & pbs_out[,12]<col[5]
vbu<-pbs_out[,4]<colb[1] & pbs_out[,6]>colb[2] & pbs_out[,8]<colb[3] & pbs_out[,10]<col[4] & pbs_out[,12]<col[5]
pbu<-pbs_out[,4]<colb[1] & pbs_out[,6]<colb[2] & pbs_out[,8]>colb[3] & pbs_out[,10]<col[4] & pbs_out[,12]<col[5]
sju<-pbs_out[,4]<colb[1] & pbs_out[,6]<colb[2] & pbs_out[,8]<colb[3] & pbs_out[,10]>col[4] & pbs_out[,12]<col[5]
bnpu<-pbs_out[,4]<colb[1] & pbs_out[,6]<colb[2] & pbs_out[,8]<colb[3] & pbs_out[,10]<col[4] & pbs_out[,12]>col[5]
#write.table(pbsc[,1:3],"~/analysis/data/fst/PBS_keep_5kb.bed",row.names = FALSE,col.names = FALSE,quote=FALSE)
write.table(pbs_out[all,1:3],"~/analysis/data/fst/pbs_regions_sharedall_5%res.bed",row.names = FALSE,col.names = FALSE,quote = FALSE)
write.table(pbs_out[res,1:3],"~/analysis/data/fst/pbs_regions_sharedres_5%res.bed",row.names = FALSE,col.names = FALSE,quote = FALSE)
write.table(pbs_out[interm,1:3],"~/analysis/data/fst/pbs_regions_sharedinterm_5%res.bed",row.names = FALSE,col.names = FALSE,quote = FALSE)
write.table(pbs_out[bbu,1:3],"~/analysis/data/fst/pbs_regions_sharedbbu_5%res.bed",row.names = FALSE,col.names = FALSE,quote = FALSE)
write.table(pbs_out[vbu,1:3],"~/analysis/data/fst/pbs_regions_sharedvbu_5%res.bed",row.names = FALSE,col.names = FALSE,quote = FALSE)
write.table(pbs_out[pbu,1:3],"~/analysis/data/fst/pbs_regions_sharedpbu_5%res.bed",row.names = FALSE,col.names = FALSE,quote = FALSE)
write.table(pbs_out[sju,1:3],"~/analysis/data/fst/pbs_regions_sharedsju_5%res.bed",row.names = FALSE,col.names = FALSE,quote = FALSE)
write.table(pbs_out[bnpu,1:3],"~/analysis/data/fst/pbs_regions_sharedbnpu_5%res.bed",row.names = FALSE,col.names = FALSE,quote = FALSE)
source("http://bioconductor.org/biocLite.R")
biocLite()
library("rtracklayer")
bed1=import("~/analysis/data/fst/PBS_keep_5kb.bed")
bedall=import("~/analysis/data/fst/pbs_regions_sharedall_5%res.bed")
bed1overlall=bed1[bed1 %over% bedall]
hitsall<-findOverlaps(bedall,bed1)
allhit<-subjectHits(hitsall)
bedres=import("~/analysis/data/fst/pbs_regions_sharedres_5%res.bed")
bed1overlres=bed1[bed1 %over% bedres]
hitsres<-findOverlaps(bedres,bed1)
reshit<-subjectHits(hitsres)
bedinterm=import("~/analysis/data/fst/pbs_regions_sharedinterm_5%res.bed")
bed1overlinterm=bed1[bed1 %over% bedinterm]
hitsinterm<-findOverlaps(bedinterm,bed1)
intermhit<-subjectHits(hitsinterm)
bedbbu=import("~/analysis/data/fst/pbs_regions_sharedbbu_5%res.bed")
bed1overlbbu=bed1[bed1 %over% bedbbu]
hitsbbu<-findOverlaps(bedbbu,bed1)
bbuhit<-subjectHits(hitsbbu)
bedvbu=import("~/analysis/data/fst/pbs_regions_sharedvbu_5%res.bed")
bed1overlvbu=bed1[bed1 %over% bedvbu]
hitsvbu<-findOverlaps(bedvbu,bed1)
vbuhit<-subjectHits(hitsvbu)
bedpbu=import("~/analysis/data/fst/pbs_regions_sharedpbu_5%res.bed")
bed1overlpbu=bed1[bed1 %over% bedpbu]
hitspbu<-findOverlaps(bedpbu,bed1)
pbuhit<-subjectHits(hitspbu)
bedsju=import("~/analysis/data/fst/pbs_regions_sharedsju_5%res.bed")
bed1overlsju=bed1[bed1 %over% bedsju]
hitssju<-findOverlaps(bedsju,bed1)
sjuhit<-subjectHits(hitssju)
bedbnpu=import("~/analysis/data/fst/pbs_regions_sharedbnpu_5%res.bed")
bed1overlbnpu=bed1[bed1 %over% bedbnpu]
hitsbnpu<-findOverlaps(bedbnpu,bed1)
bnpuhit<-subjectHits(hitsbnpu)
pbsc<-cbind(pbsc,0,0,0,0,0,0,0,0)
newn<-c("Scaf","start","end","BB","VB","PB","SJ","BNP","all","res","interm","bbu","vbu","pbu","sju","bnpu")
colnames(pbsc)<-newn
pbsc[allhit,"all"]<-pbsc[allhit,"all"]+1
pbsc[reshit,"res"]<-pbsc[reshit,"res"]+1
pbsc[intermhit,"interm"]<-pbsc[intermhit,"interm"]+1
pbsc[bbuhit,"bbu"]<-pbsc[bbuhit,"bbu"]+1
pbsc[vbuhit,"vbu"]<-pbsc[vbuhit,"vbu"]+1
pbsc[pbuhit,"pbu"]<-pbsc[pbuhit,"pbu"]+1
pbsc[sjuhit,"sju"]<-pbsc[sjuhit,"sju"]+1
pbsc[bnpuhit,"bnpu"]<-pbsc[bnpuhit,"bnpu"]+1
##plotting in 5kb windows
####Plotting common regions
palette(c("grey50","grey70"))
par(mfrow=c(5,1),mar=c(0,3,0,0))
plot(pbsc[,4],pch=20,cex=1.2,
col=ifelse((all),"purple",
ifelse((res),"black",
ifelse((interm),"firebrick2",
ifelse((bbu),"gold2",
ifelse(pbsc[,4]>col[1],"green2",sort(as.factor(pbsc[,1]))))))),
xlab="",xaxt='n',ylab="BB (PBS)",cex.lab=1,cex.axis=2.2,bty="n",ylim=c(-.5,3.8),xaxs="i",yaxs="i")
# legend("topright",legend=c("Shared by all adapted","Resistant only","Intermediate only","Shared (group non-specific)","Local"),
# col=c("purple","black","firebrick2","green2","gold2"),pch=20,cex=1.8,y.intersp=.5,x.intersp=.8,bty='n')
plot(pbsc[,5],pch=20,cex=1.2,
col=ifelse((all),"purple",
ifelse((res),"black",
ifelse((interm),"firebrickas.numeric(rownames(pbsc))==allhit2",
ifelse((vbu),"gold2",
ifelse(pbsc[,5]>col[2],"green2",sort(as.factor(pbsc[,1]))))))),
xlab="",xaxt='n',ylab="VB (PBS)",cex.lab=1,cex.axis=2.2,bty="n",ylim=c(-.5,3.8),xaxs="i",yaxs="i")
plot(pbsc[,6],pch=20,cex=1.2,
col=ifelse((all),"purple",
ifelse((res),"black",
ifelse((interm),"firebrick2",
ifelse((pbu),"gold2",
ifelse(pbsc[,6]>col[3],"green2",sort(as.factor(pbsc[,1]))))))),
xlab="",xaxt='n',ylab="PB (PBS)",cex.lab=1,cex.axis=2.2,bty="n",ylim=c(-.5,3.8),xaxs="i",yaxs="i")
plot(pbsc[,7],pch=20,cex=1.2,
col=ifelse((all),"purple",
ifelse((res),"black",
ifelse((interm),"firebrick2",
ifelse((sju),"gold2",
ifelse(pbsc[,7]>col[4],"green2",sort(as.factor(pbsc[,1]))))))),
xlab="",xaxt='n',ylab="SJ (PBS)",cex.lab=1,cex.axis=2.2,bty="n",ylim=c(-.5,3.8),xaxs="i",yaxs="i")
plot(pbsc[,8],pch=20,cex=1.2,
col=ifelse((all),"purple",
ifelse((res),"black",
ifelse((interm),"firebrick2",
ifelse((bnpu),"gold2",
ifelse(pbsc[,8]>col[5],"green2",sort(as.factor(pbsc[,1]))))))),
xlab="",xaxt='n',ylab="BNP (PBS)",cex.lab=1,cex.axis=2.2,bty="n",ylim=c(-.5,3.8),xaxs="i",yaxs="i")
|
70739ff60fa1646198d8ff566b6972220c7f2df4
|
29b6da9b76714bb4ab21635ccfa79401e959d468
|
/unitTestDemo.R
|
d28f8eab370a894bcf1fcad078d1d686e353e176
|
[] |
no_license
|
LottaRydin/pharmetheus_interview
|
488ba179cf0d12ece069fbf6a469d94244b58b6d
|
2eef28224c487b8a12316de7629931d3c497027d
|
refs/heads/master
| 2022-07-15T18:37:13.844383
| 2020-05-06T12:53:24
| 2020-05-06T12:53:24
| 261,399,121
| 0
| 0
| null | 2020-05-06T12:53:25
| 2020-05-05T08:21:21
|
R
|
UTF-8
|
R
| false
| false
| 1,293
|
r
|
unitTestDemo.R
|
library(testthat)
#The function below, greatesValueOf, was created by the fictive consultant Demo.
#It works well for the type of numeric data Demo is handling.
#However, if Demo were to share this function with colleagues
#who handle numeric data with other characteristics, there is
#a risk for errors.
#Do not edit this code, only make sure you understand what it does.
greatestValueOf <- function(numericValueVector){
greatest <- 0
for (value in numericValueVector){
if (value > greatest){
greatest <- value
}
}
return(greatest)
}
test_that('greatestValueOf returns the greatest value of a numeric vector',{
#This test verifies that the correct result is returned for the
#type of data that Demo wrote the function for
expect_equal(object=greatestValueOf(numericValueVector=c(7,2,33,12)),
expected=33)
expect_equal(object=greatestValueOf(numericValueVector=c(1)),
expected=1)
#Think about what tests would at the same time
#reveal limitations with the function above and indicate when the limitations have been removed,
#i.e. what tests would fail with the current version of the function and pass with a version for more general types of data
# Do not write code now, only think about the questions.
})
|
2447e68b482c29c427b4490f6d25324e0ee6996b
|
1b661731d1243a1147ba55a205ad6ff7ca67c267
|
/Thesis/slideImgs/malware-trend-vis.R
|
535c5ca1f808554a30ea0e933cc5623ec697801c
|
[] |
no_license
|
jingxiang-li/kaggle-malware
|
208312b544cdaba47797f269b788a5c413eb7df9
|
e946501200549b94676cf3795f79cdfe2182943b
|
refs/heads/master
| 2020-04-17T04:41:27.828971
| 2016-08-29T19:27:34
| 2016-08-29T19:27:34
| 66,872,244
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,385
|
r
|
malware-trend-vis.R
|
rm(list = ls())
require(dplyr)
require(pipeR)
require(ggplot2)
setwd('~/Thesis/slideImgs/')
source("ggplot_theme.R")
# new malwares trend
x = c(24, 26, 40, 60, 61, 117, 283, 482, 486, 299)
ratio = 140000000 / 472
year = 2007:2016
df = data.frame(y = x * ratio, year = 2007:2016)
df$year = as.factor(df$year)
fancy_scientific <- function(l) {
l <- format(l, scientific = FALSE)
l <- substr(l, 1, 3)
l <- paste(l, 'M', sep="")
l[1] <- "0M"
l
}
cairo_pdf("./new-malware.pdf", width = 7, height = 5.25)
df %>>%
ggplot(aes(year, y)) +
geom_bar(stat = 'identity', width = .8) +
ylab('') +
xlab('') +
ggtitle('Number of New Malwares Registered in the Last 10 Years') +
theme_Publication() +
scale_fill_Publication() +
scale_y_continuous(labels = fancy_scientific, limits = c(0, 1.5e+08))
dev.off()
area <- c(439, 832, 1456, 2236, 3018, 4681, 8632, 14995, 21990, 26157)
x <- area / 53
ratio = 600000000 / 542
df = data.frame(y = x * ratio, year = 2007:2016)
df$year = as.factor(df$year)
cairo_pdf("./sum-malware.pdf", width = 7, height = 5.25)
df %>>%
ggplot(aes(year, y)) +
geom_bar(stat = 'identity', width = .8) +
ylab('') +
xlab('') +
ggtitle('Total Number of Malwares Registered in the Last 10 Years') +
theme_Publication() +
scale_fill_Publication() +
scale_y_continuous(labels = fancy_scientific, limits=c(0, 6e+08))
dev.off()
|
e7e55c101f04a08c7468a103a160d0b721c60f90
|
9e80cbf66448bc647acc9c7e6176d285fe422efa
|
/one_pvc.R
|
464f0084248deaac0a82e66c1db215136e5ca535
|
[] |
no_license
|
JiaRu2016/Scrape-www.tjcn.org
|
4e4573db46bbb9c9f0fd2f720dcb2e8576c26712
|
975f82fb3b62d3fd60631178332d6f7dae0e68a9
|
refs/heads/master
| 2021-01-20T19:34:46.738738
| 2016-06-24T16:07:10
| 2016-06-24T16:07:10
| 61,523,164
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,564
|
r
|
one_pvc.R
|
#
# 输入单个省份的索引url
# 分析页面,找出所有地级市的名称,年份,网页链接,
# 整理成一个data.frame
# 另外再写入txt文件,以备校对
# 建立以省份命名的文件夹,例如“江苏省”,里面存储“江苏省_index.txt“文件
require(rvest)
require(stringr)
# 单个省份索引url
# 例如:安徽省统计公报索引
# pvc_url <- "http://www.tjcn.org/help/3551.html"
SetPvc <- function(name, pvc_url) {
message("开始请求网站。。。可能要等好久呢(╯‵□′)╯︵┻━┻")
html_code <- read_html(pvc_url, encoding = "GB18030")
message("响应成功啦啦啦~~~")
# 提取标题信息,用于校对。
title <- html_code %>%
html_node("h2") %>%
html_text()
message("开始爬取:", title)
pvc <- str_replace(title, pattern = "^(.+)统计公报索引$", replacement = "\\1")
# check
if (str_sub(name, 1, 2) == str_sub(pvc, 1, 2)) {
message("校对省份名称成功")
} else {
message("校对省份名称出现问题:", name, " != ", pvc, "!!!!!!!!!!!!!!!")
}
# 生成“地级市和available年份”列表
cy_list <- html_code %>%
html_nodes(".sy+ .sy .tt td") %>%
html_text() %>%
str_replace_all(pattern = "\\s+\r\n\\s+", replacement = " ")
cy_list
# 可以把这个城市年份列表写成一个文件,供校对用。
# 找出每个城市、年份的连接地址
cy_link <- html_code %>%
html_nodes(".sy+ .sy .tt a") %>%
html_attr("href")
cy_link
# 检查一下1
check1 <- sum(str_count(cy_list, pattern = "\\d{4}")) == length(cy_link)
if (!check1) {
warning("城市列表里的年份总数 != link 数目", name)
}
# 整理成表格
df <- data.frame()
for (i in seq(1, length(cy_list), by = 2)) {
# i 是 “石家庄市”
# i + 1 是 "2015 2014 ... 2001 2000 "
c_name <- cy_list[i]
c_years <- cy_list[i+1] %>%
str_extract_all(pattern = "\\d{4}") %>%
unlist() %>%
str_trim()
c_df <- data.frame(city = c_name, years = c_years)
df <- rbind(df, c_df)
}
# 检查一下2
check2 <- nrow(df) == length(cy_link)
if (!check2) {
warning("nrow(df) != link 数目", name)
}
df$link <- cy_link
df
# 建立省份文件夹,写文件
dir.create(pvc)
index_file_name <- paste0(pvc, "_index", ".txt")
write.table(df, row.names = F, file = file.path(pvc, index_file_name))
return(0)
}
# SetPvc(name = "西藏", pvc_url = "http://www.tjcn.org/help/3571.html")
#
# #
|
b0bc2ba78f4f44083e35b563b240bc541045da93
|
98900f0cd8cbc35ae0904a6c7df65f18ea04d6e9
|
/man/addFilter.Rd
|
17da07ff498f15641b5980c4b549ad2de164f427
|
[
"MIT"
] |
permissive
|
seacode/rsimGmacs
|
0a55a984cb1b457db72767209cf0c8964b172e14
|
c49d708e51d2d4dddcab2eaded0500ef9ce1774a
|
refs/heads/master
| 2021-01-13T13:58:34.136732
| 2015-07-01T15:18:41
| 2015-07-01T15:18:41
| 29,838,306
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 631
|
rd
|
addFilter.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/addFilter.R
\name{addFilter}
\alias{addFilter}
\title{Add a file type to file extensions}
\usage{
addFilter(rowname = "csv", desc = "csv files (*.csv)", filter = "*.csv",
Filters = NULL)
}
\arguments{
\item{rowname}{- row name to add in filters matrix}
\item{desc}{- brief description of the file types}
\item{filter}{- extension filter ("*.ext")}
\item{Filters}{- matrix of filters to add to}
}
\value{
augmented Filters matrix
}
\description{
Add a filter to the file extension matrix
(if it's not there already) used in file dialogs.
}
|
4acaeaf373b14c7ee5331d9db2babf2eb127f80a
|
f7a9004cb85908367a6b8775497d9f313eb6cd76
|
/M2_ContinuousPrior.R
|
9f81b35d5a763a0be7b32349e32321811330bb87
|
[] |
no_license
|
YaohuiZeng/Bayesian-changepoint
|
1d23fc2c192e0f6aa283f302ac013553f180d102
|
c7ca0e2cc12a786bf2c75e4af767c52879f9455a
|
refs/heads/master
| 2021-01-17T07:18:30.114300
| 2015-06-10T02:43:14
| 2015-06-10T02:43:14
| 36,478,717
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,282
|
r
|
M2_ContinuousPrior.R
|
## Bayesian Course Project: Bayesian modeling for a changepoint problem
## Yaohui Zeng <yaohui-zeng@uiowa.edu>, May 13, 2015
## Taken from the OpenBUGS example:
## Stagnant: a changepoint problem and an illustration of how NOT
## to do MCMC!
## Model 2: continuous prior for the changepoint value, x_k.
## Slice sampling (written by Dr. Brian Smith <brian-j-smith@uiowa.edu>)
slice <- function(x0, f, width, log=FALSE, ...) {
n <- length(x0)
y <- ifelse(log, f(x0, ...) - rgamma(1, 1, 1), runif(1, 0, f(x0, ...)))
l <- x0 - runif(n, 0, width)
r <- l + width
repeat {
x1 <- runif(n, l, r)
if(f(x1, ...) > y) return(x1)
else {
idx <- (x1 < x0)
l[idx] <- x1[idx]
r[!idx] <- x1[!idx]
}
}
}
## Slice Sampling Density Kernel for xk
gxk <- function(xk, a, b, pars) {
if(xk < a || xk > b) return(-Inf)
s2 <- pars$s2
beta1 <- pars$beta1
beta2 <- pars$beta2
alpha <- pars$alpha
k <- sum(x <= xk)
delta <- sum((Y[1:k] - alpha - beta1 * x[1:k] + beta1 * xk)^2) +
sum((Y[(k+1):N] - alpha - beta2 * x[(k+1):N] + beta2 * xk)^2)
-1 / (2*s2) * delta
}
## Rejection sampling
freject<- function(xk, pars){
s2 <- pars$s2
beta1 <- pars$beta1
beta2 <- pars$beta2
alpha <- pars$alpha
k <- sum(x <= xk)
delta <- sum((Y[1:k] - alpha - beta1 * x[1:k] + beta1 * xk)^2) +
sum((Y[(k+1):N] - alpha - beta2 * x[(k+1):N] + beta2 * xk)^2)
-1 / (2*s2) * delta
}
## Rejection sampling support
support <- function(xk, pars){
(xk > -1.3) & (xk < 1.1)
}
gibbs.sampling.m2 <- function(para.init, seed, L = 1000,
method = 1, verbose = FALSE) {
set.seed(seed)
## Hyperparameters
mu.alpha <- 0; s2.alpha <- 1e6
mu.beta <- 0; s2.beta <- 1e6
a.s2 <- 0.001; b.s2 <- 0.001
a <- -1.3; b <- 1.1 # parameters of Unif(a, b) for changepoint xk
## Model Parameters and Initial Values
alpha <- para.init$alpha
beta1 <- para.init$beta1
beta2 <- para.init$beta2
s2 <- para.init$s2
xk <- para.init$xk
## Divide data into two groups based on xk
k <- sum(x <= xk)
## save sampled values
parms <- matrix(NA, ncol = 6, nrow = L)
parms[1, ] <- c(alpha, beta1, beta2, s2, xk, k)
for (i in 2:L) {
# alpha
b.a <- 1 / (N / s2 + 1 / s2.alpha)
if (k < N) {
delta <- sum(Y) - beta1 * sum(x[1:k] - xk) -
beta2 * sum(x[(k+1):N] - xk)
} else {
delta <- sum(Y) - beta1 * sum(x[1:k] - xk)
}
a.a <- b.a * (delta / s2 + mu.alpha / s2.alpha)
alpha <- rnorm(1, mean = a.a, sd = b.a ^ 0.5)
# beta1
b.b1 <- 1 / (sum((x[1:k] - xk) ^ 2) / s2 + 1 / s2.beta)
a.b1 <- b.b1 * (sum((Y[1:k] - alpha) * (x[1:k] - xk)) / s2 +
mu.beta / s2.beta)
beta1 <- rnorm(1, mean = a.b1, sd = b.b1 ^ 0.5)
# beta2
if (k < N) {
b.b2 <- 1 / (sum((x[(k+1):N] - xk) ^ 2) / s2 + 1 / s2.beta)
a.b2 <- b.b2 * (sum((Y[(k+1):N] - alpha) * (x[(k+1):N] - xk)) / s2
+ mu.beta / s2.beta)
} else {
b.b2 <- 1 / (1 / s2.beta) # = s2.beta
a.b2 <- b.b2 * (mu.beta / s2.beta) # = mu.beta
}
beta2 <- rnorm(1, mean = a.b2, sd = b.b2 ^ 0.5)
# s2
a.3 <- a.s2 + N / 2
if (k < N) {
b.3 <- sum((Y[1:k] - alpha - beta1 * (x[1:k] - xk)) ^ 2) / 2 +
sum((Y[(k+1):N] - alpha - beta2 * (x[(k+1):N] - xk)) ^ 2) / 2 + b.s2
} else {
b.3 <- sum((Y[1:k] - alpha - beta1 * (x[1:k] - xk)) ^ 2) / 2 + b.s2
}
s2 <- 1 / rgamma(1, a.3, b.3)
if (method == 1) { ## slice sampling
pars <- list(
s2 = s2,
beta1 = beta1,
beta2 = beta2,
alpha = alpha
)
xk <- slice(xk, f = gxk, width = 10, log = TRUE, a = a, b = b, pars = pars)
} else if (method == 2) { ## rejection sampling
parss <- list(
alpha = alpha,
beta1 = beta1,
beta2 = beta2,
s2 = s2
)
xk <- arms(0, freject, support, pars=parss, 1)
} else if (method == 3) {
## M-H algorithm, uniform proposal; not working!
cand <- runif(1, a, b)
cand.k <- sum(x <= cand)
delta <- sum((Y[1:k] - alpha - beta1 * x[1:k] + beta1 * xk)^2) +
sum((Y[(k+1):N] - alpha - beta2 * x[(k+1):N] + beta2 * xk)^2)
cand.delta <- sum((Y[1:cand.k] - alpha - beta1 * x[1:cand.k] + beta1 * cand)^2) +
sum((Y[(cand.k+1):N] - alpha - beta2 * x[(cand.k+1):N] + beta2 * cand)^2)
ln.r <- cand.delta - delta
xk <- ifelse(runif(1) < exp(ln.r), cand, xk)
} else {
stop('Method must be equal 1, 2 or 3. 1 = slice sampling; 2 = rejection sampling; 3 = metropolis hastings sampling.')
}
# Update index k
k <- sum(x <= xk)
if(verbose) {
print(i)
print(c(alpha, beta1, beta2, s2, xk, k))
}
## store sampled values
parms[i, ] <- c(alpha, beta1, beta2, s2, xk, k)
}
parms <- data.frame(parms)
names(parms) <- c("alpha", "beta1", "beta2", "s2", "xk", "k")
parms
}
library(coda)
library(xtable)
library(ars)
library(HI)
data <- list(Y = c(1.12, 1.12, 0.99, 1.03, 0.92, 0.90, 0.81, 0.83, 0.65, 0.67,
0.60, 0.59, 0.51, 0.44, 0.43, 0.43, 0.33, 0.30, 0.25, 0.24,
0.13, -0.01, -0.13, -0.14, -0.30, -0.33, -0.46, -0.43, -0.65),
x = c(-1.39, -1.39, -1.08, -1.08, -0.94, -0.80, -0.63, -0.63,
-0.25, -0.25, -0.12, -0.12, 0.01, 0.11, 0.11, 0.11, 0.25,
0.25, 0.34, 0.34, 0.44, 0.59, 0.70, 0.70, 0.85, 0.85, 0.99,
0.99, 1.19),
N = 29)
## Data
Y <<- data$Y
x <<- data$x
N <<- data$N
# plot(data$x, data$Y, xlab = 'x', ylab = 'y', main = 'Stagnant band height data')
# chain 1
para.ch1 <- list(
alpha = 0.47,
beta1 = -0.45,
beta2 = -1.0,
s2 = 1 / 5,
xk = 0.5
)
# chain 2
para.ch2 <- list(
alpha = 0.47,
beta1 = -0.45,
beta2 = -1.0,
s2 = 1 / 5,
xk = -0.5
)
## initial comparison
ch1 <- gibbs.sampling.m2(para.init = para.ch1, seed = 12345, L = 10000,
method = 1, verbose = T)
ch1.mcmc <- as.mcmc(ch1)
plot(ch1.mcmc)
summary(ch1.mcmc)
ch2 <- gibbs.sampling.m2(para.init = para.ch2, seed = 12345, L = 10000,
method = 1, verbose = T)
ch2.mcmc <- as.mcmc(ch2)
plot(ch2.mcmc)
summary(ch2.mcmc)
cor(ch2[-6])
stagnant.mcmc <- mcmc.list(ch1.burn.mcmc, ch2.burn.mcmc)
plot(stagnant.mcmc, ask=TRUE)
summary(stagnant.mcmc)
ch1 <- gibbs.sampling.m2(para.init = para.ch1, seed = 12345, L = 10000,
method = 3, verbose = F)
ch2 <- gibbs.sampling.m2(para.init = para.ch2, seed = 12345, L = 10000,
method = 3, verbose = F)
# compare to OpenBUGS
png('compare2BUGS_alpha_m2_fixed_rejection sampling.png', width = 800, height = 300)
plot(1:10000, ch1$alpha, type = 'l', col = 'red',
xlab = 'iteration', ylab = 'alpha', lty = 3,
ylim = c(0, 1),
main = 'Comparison of alpha')
lines(1:10000, ch2$alpha, col = 'blue')
legend('bottomright', legend = c('Chain 1', 'Chain 2'), lty = c(3, 1),
col = c('red', 'blue'))
dev.off()
png('compare2BUGS_xk_m2_fixed_rejection sampling.png', width = 800, height = 300)
plot(1:10000, ch1$xk, type = 'l', col = 'red',
xlab = 'iteration', ylab = 'xk', lty = 3,
ylim = c(-0.5, 1),
main = 'Comparison of xk')
lines(1:10000, ch2$xk, col = 'blue')
legend('topright', legend = c('Chain 1', 'Chain 2'), lty = c(3, 1),
col = c('red', 'blue'))
dev.off()
## more iterations, doesn't help
ch1 <- gibbs.sampling.m2(para.init = para.ch1, seed = 12345, L = 100000,
method = 1, verbose = T)
ch1.mcmc <- as.mcmc(ch1)
plot(ch1.mcmc)
summary(ch1.mcmc)
ch1.burn <- ch1[-(1:10000), ]
ch1.burn.mcmc <- as.mcmc(ch1.burn)
plot(ch1.burn.mcmc)
summary(ch1.burn.mcmc)
cor(ch1[-6])
ch2 <- gibbs.sampling.m2(para.init = para.ch2, seed = 12345, L = 1000000,
method = 1, verbose = T)
ch2.mcmc <- as.mcmc(ch2)
plot(ch2.mcmc)
summary(ch2.mcmc)
cor(ch2[-6])
ch2.burn <- ch2[-(1:10000), ]
ch2.burn.mcmc <- as.mcmc(ch2.burn)
plot(ch2.burn.mcmc)
summary(ch2.burn.mcmc)
stagnant.mcmc <- mcmc.list(ch1.burn.mcmc, ch2.burn.mcmc)
plot(stagnant.mcmc, ask=TRUE)
summary(stagnant.mcmc)
|
a938bb5cad9e8da273b69cc618f02296b82f0f65
|
88dde69ede665f2bc9f606781d3de00562e8b3ed
|
/R/Getters.R
|
e2e43a8d7ab1a853433a24f36299a81580e8b990
|
[] |
no_license
|
vishalbelsare/pim
|
64ebfb621d618e7e85457c9d41c83f7b6251d6d1
|
26545a0cbcc668c0015f6289e634837548d442b2
|
refs/heads/master
| 2023-06-09T12:34:55.536755
| 2020-02-03T17:17:01
| 2020-02-03T17:17:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,580
|
r
|
Getters.R
|
#' Extract information from pim.environment and pim.poset objects
#'
#' These functions serve to extract the information contained
#' in the objects of class \code{\link{pim.environment}} and
#' \code{\link{pim.poset}}.
#'
#' @param x an object of class \code{pim.environment} or \code{pim.poset}
#' @param object an object of class \code{pim} or \code{pim.summary}
#' @param ... arguments passed to and from other methods.
#'
#' @return \code{classes()}: A named vector with the classes of the data
#' contained in the \code{pim.environment}
#'
#' @seealso \code{\link{nobs}}, \code{\link{poset}}, \code{\link{is.complete}},
#' \code{\link{pim.environment-class}}, \code{\link{pim.poset-class}},
#' \code{\link{pim-class}}, \code{\link{pim.summary-class}}
#'
#' @examples
#' data(DysData)
#' DysPimEnv <- new.pim.env(DysData,poset=TRUE)
#' classes(DysPimEnv)
#' names(DysPimEnv)
#' compare(DysPimEnv)
#'
#' themodel <- pim(SPC_D2 ~ Chemo, data = DysData, model = 'difference')
#' model(themodel)
#' thesummary <- summary(themodel)
#' model(thesummary)
#'
#' @aliases names compare model link
#' @include pim.environment-class.R
#' @export
setGeneric('classes', function(x) standardGeneric('classes'))
#' @export
#' @rdname classes
setMethod('classes',
signature='pim.environment',
function(x){
unlist(x@classes)
})
## NOTE:
# names is a primitive function, hence a S4 generic
# is already available in the base package. Creating
# a generic in the package here only results in warnings.
# The generic won't be created, so when trying to export,
# there's nothing to export.
#' @return \code{names()}: For an object of class \code{pim.environment} the names
#' of the variables in the object. For an object of class \code{pim.poset},
#' the name of the poset functions inside the environment
#' @export
#' @rdname classes
setMethod('names',
signature='pim.environment',
function(x){
x@data.names
})
#' @export
#' @rdname classes
setMethod('names',
signature='pim.poset',
function(x){
ls(x)
})
#' @export
#' @rdname classes
#' @return \code{compare()}: A character value indicating how the comparison
#' is defined in a \code{pim.poset} object, or the poset-slot of
#' a \code{pim.environment} object respectively.
setGeneric('compare',function(x) standardGeneric('compare'))
#' @export
#' @rdname classes
setMethod('compare',
signature=c('pim.environment'),
function(x){
x@poset@compare
})
#' @export
#' @rdname classes
setMethod('compare',
signature=c('pim.poset'),
function(x){
x@compare
})
#' @return \code{model()}: a character value that displays
#' the type of model (difference, marginal, regular or customized)
#' @rdname classes
#' @export
setGeneric('model', function(object, ...){
standardGeneric('model')})
#' @rdname classes
setMethod('model',
'pim',
function(object){object@model})
#' @rdname classes
setMethod('model',
'pim.summary',
function(object){object@model})
#' @return \code{link()}: a character value that displays
#' the type of link (difference, marginal, regular or customized)
#' @rdname classes
#' @export
setGeneric('link', function(object, ...){
standardGeneric('link')})
#' @rdname classes
setMethod('link',
'pim',
function(object){object@link})
#' @rdname classes
setMethod('link',
'pim.summary',
function(object){object@link})
|
54d2e1a722e16908345ef2d3e6ab2dc712470470
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/casino/examples/Poker.Rd.R
|
3fa633a6c4250f105fae7cbee712b7a67b281d76
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 365
|
r
|
Poker.Rd.R
|
library(casino)
### Name: Poker
### Title: Poker R6 Class
### Aliases: Poker
### Keywords: datasets
### ** Examples
set.seed(101315)
setup()
# draw poker
x <- Poker$new(who = "Player 1", type = "draw", bet = 10)
x$play()
x$hold(1, 2, 5)
x$draw()
# stud poker (bet 20)
x <- Poker$new(who = "Player 1", type = "stud", bet = 20)
x$play()
# clean-up
delete()
|
11ead613775b181871dc241fe49e7ae40298167d
|
e1fe373fd6e8b404ef826069861e12efa88f0eac
|
/R_code/01_monte_carlo.R
|
3891980d1ee47eba752e746d35ef88e1c3124b0c
|
[] |
no_license
|
Nathan-Mather/Heterogeneous-Teacher-Value-Added
|
e7331c07fdb45d3e1174078ac01e05b0937051ba
|
e5be646cfe957f74caf4a23511bc0c1626a41a61
|
refs/heads/master
| 2023-03-31T23:01:37.889489
| 2023-03-21T19:38:05
| 2023-03-21T19:38:05
| 244,950,847
| 0
| 0
| null | 2020-07-01T00:45:02
| 2020-03-04T16:35:05
|
TeX
|
UTF-8
|
R
| false
| false
| 21,789
|
r
|
01_monte_carlo.R
|
# =========================================================================== #
# ===================== Run the Monte Carlo Simulation ====================== #
# =========================================================================== #
# Clear data.
rm(list = ls(pos = ".GlobalEnv"), pos = ".GlobalEnv")
# No scientific notation.
options(scipen = 999)
# Clean console history.
cat("\f")
# Define a notin function.
`%notin%` <- Negate(`%in%`)
# Parallel options.
do_parallel <- TRUE
ncores <- 20
# =========================================================================== #
# ========================= File paths and packages ========================= #
# =========================================================================== #
# Load packages.
library(boot)
library(broom)
library(data.table)
library(doParallel)
library(doRNG)
library(ggplot2)
library(Kendall)
library(Matrix)
library(matrixStats)
library(np)
library(quantreg)
library(readxl)
library(tidyr)
# Check users to set directory.
# (NOTE TO MIKE, add something unique to your base working directory to detect
# when it is your computer)
my_wd <- getwd()
if (my_wd %like% "Nmath_000") {
# Base directory.
base_path <- "c:/Users/Nmath_000/Documents/Research/"
# Path for data to save.
out_data <- "C:/Users/Nmath_000/Documents/Research/Value_added_local/results/"
# path to save qc
out_qc <- paste0(out_data, "/qc/")
} else if (my_wd %like% "ricksmi") {
# Base directory.
base_path <- "c:/Users/ricksmi/Desktop/vam/"
# Path for data to save.
out_data <- "c:/Users/ricksmi/Desktop/vam/data/mc/"
} else {
# Base directory.
base_path <- "/home/tanner/Documents/Research/HeterogenousTeacherVA/Git/"
# Path for data to save.
out_data <- "/home/tanner/Documents/Research/HeterogenousTeacherVA/Output/"
}
# Load model_xwalk.
model_xwalk <- data.table(read_excel(paste0(base_path,
"Heterogeneous-Teacher-Value-Added/R_code/model_xwalk_SDUSD.xlsx")))
# create int directory if it idoesnt exist already
if(!file.exists(paste0(out_data, "/int_results"))){
dir.create(paste0(out_data, "/int_results"))
}
# Load our functions now that we have a file path.
func_path <- "Heterogeneous-Teacher-Value-Added/R_code/functions/"
source(paste0(base_path, func_path, "mc_functions.R"))
source(paste0(base_path, func_path, "binned_va_function.R"))
source(paste0(base_path, func_path, "qtile_va_function.R"))
source(paste0(base_path, func_path, "np_hack_va_function.R"))
source(paste0(base_path, func_path, "simulate_test_data.R"))
source(paste0(base_path, func_path, "teacher_impact.R"))
source(paste0(base_path, func_path, "weighting_functions.R"))
source(paste0(base_path, func_path, "welfare_statistic.R"))
source(paste0(base_path, func_path, "simulate_sdusd_data.R"))
source(paste0(base_path, func_path, "quality_control_functions.R"))
# Get a time stamp.
date_time <- gsub("-", "_", Sys.time())
date_time <- gsub(":", "_", date_time)
date_time <- gsub(" ", "__", date_time)
# =========================================================================== #
# =============================== Set options =============================== #
# =========================================================================== #
# Set parallel options.
if (my_wd %like% "Nmath_000") {
myCluster <- makeCluster(4, # number of cores to use
type = "PSOCK") # type of cluster
# (Must be "PSOCK" on Windows)
} else {
myCluster <- makeCluster(ncores, # number of cores to use
type = "FORK") # type of cluster
# (Must be "PSOCK" on Windows)
}
if (do_parallel) {
registerDoParallel(myCluster)
registerDoRNG()
}
#=====================================#
# ==== check for existing results ====
#=====================================#
if (file.exists(paste0(out_data,'xwalk.csv'))) {
old_xwalk <- fread( paste0(out_data, 'xwalk.csv'))
setnames(old_xwalk, "done_flag", "done_flag_old")
# subset to only done runs
old_xwalk <- old_xwalk[done_flag_old == 1]
# mark old and new xwalks
model_xwalk[, flag_new := 1]
old_xwalk[, flag_old := 1]
old_xwalk[, impact_function := as.character(impact_function)]
# merge old and new
merged_cols <- setdiff(colnames(model_xwalk), c("done_flag", "flag_new"))
model_xwalk <- merge(model_xwalk, old_xwalk,merged_cols, all = TRUE)
# check for completed runs we don't need to duplicate
model_xwalk[done_flag_old == 1, done_flag := 1]
model_xwalk[, done_flag_old := NULL, ]
# check for runs we no longer want and should remorve
to_remove <- model_xwalk[flag_old == 1 & is.na(flag_new), run_id]
# remove runs that are no longer in xwalk
for(k in to_remove){
# remove results
unlink(paste0(out_data, "/int_results/results_", k, ".csv"))
# remove qc
unlink(paste0(out_data, "/qc/run_", k ),
recursive = TRUE)
}
# Now subset to only the ones from the new xwalk
model_xwalk <- model_xwalk[flag_new == 1]
# get rid of extra variables I created
model_xwalk[,c("flag_new", "flag_old") := NULL]
# get list of run id's to use
run_id_list <- 1:nrow(model_xwalk)
run_id_list <- setdiff(run_id_list, model_xwalk$run_id)
# initialize run id counter
run_id_counter <- 1
} else {
run_id_list <- 1:nrow(model_xwalk)
run_id_counter <- 1
}
colnames(model_xwalk)
# set model xwalk order
setorder(model_xwalk, "teacher_student_xwalk", "impact_type", "impact_function", "weight_type" ,"max_diff")
# =========================================================================== #
# ============================= Run Monte Carlo ============================= #
# =========================================================================== #
# Loop over xwalk to run this.
for(i in 1:nrow(model_xwalk)){
# see if it has been run already
done_flag <- model_xwalk[i, done_flag]
if(done_flag == 1){
next()
}
# get run id. There is a list of unused ID's and a a counter
# for which ID we are on (does not correspond to I because of skipped rows)
run_id_i <- run_id_list[[run_id_counter]]
run_id_counter <- run_id_counter + 1
#====================#
# ==== get parms ====
#====================#
# Set seed.
set.seed(42)
# Set parameters for this Monte Carlo run.
# Run parameters.
qc_flag <- model_xwalk[i, qc_flag] # should we create quality control output? (runs slower)
nsims <- model_xwalk[i, nsims] # how many simulations to do
ts_xwalk_name <- model_xwalk[i, teacher_student_xwalk] # which teacher stuent xwalk file to use
p_npoints <- model_xwalk[i, npoints] # number of grid points over which to calculate welfare added
# Simulated data parameters.
p_test_SEM <- model_xwalk[i, test_SEM] # SEM of test
p_impact_type <- model_xwalk[i, impact_type] # one of 'MLRN', 'MLR', 'MNoR', 'MNo', 'No'
p_impact_function <- model_xwalk[i, impact_function] # which teacher impact function to use, and integer
p_min_diff <- model_xwalk[i, min_diff] # minimum impact difference between best and worst matched students
p_max_diff <- model_xwalk[i, max_diff] # maximum impact difference between best and worst matched students
p_covariates <- model_xwalk[i, covariates] # whether or not to include covariates
p_peer_effects <- model_xwalk[i, peer_effects] # whether or not to include peer effects
p_stud_sorting <- model_xwalk[i, stud_sorting] # whether or not to include student sorting
p_rho <- model_xwalk[i, rho] # correlation between teacher and student ability
p_ta_sd <- model_xwalk[i, ta_sd] # teacher ability standard deviation
p_tc_sd <- model_xwalk[i, tc_sd] # teacher center standard deviation
p_n_cohorts <- model_xwalk[i, n_cohorts] # number of cohorts per teacher
p_pretest_coef <- model_xwalk[i, pretest_coef] #coefficent on student pretest/ability
# Weight and estimation parameters.
p_weight_type <- model_xwalk[i, weight_type] # style of social planner pareto weights
p_method <- model_xwalk[i, method] # method of estimation used
p_lin_alpha <- model_xwalk[i, lin_alpha] # for linear weights
p_pctile <- model_xwalk[i, pctile] # for rawlsian
p_weight_below <- model_xwalk[i, weight_below ] # for rawlsian
p_weight_above <- model_xwalk[i, weight_above] # for rawlsian
p_v_alpha <- model_xwalk[i, v_alpha] # for v weights
p_mrpctile <- model_xwalk[i, mrpctile] # for mr weights
p_mrdist <- model_xwalk[i, mrdist] # for mr weights
p_weighted_average <- model_xwalk[i, weighted_average] # whether or not to calculate a weighted average of standard and NP
p_num_cats <- model_xwalk[i, num_cats] # number of bins for binned estimator
# Add in the run id.
model_xwalk[i, run_id := run_id_i]
# load teacher student xwalk
teacher_student_xwalk <- fread(paste0(base_path, "Heterogeneous-Teacher-Value-Added/R_code/", ts_xwalk_name, ".csv"))
#============================#
# ==== simulate teachers ====
#============================#
# Simulate teacher data #note: loacted in funcitons/simulate_sdusd_data
teacher_ability_xwalk <- simulate_teacher_ability(teacher_student_xwalk = teacher_student_xwalk,
ta_sd = p_ta_sd,
school_cor = 0,
tc_sd = p_tc_sd,
min_diff = p_min_diff,
max_diff = p_max_diff)
#=================#
# ==== run qc ====
#=================#
if(qc_flag == 1){
teacher_xwalk_qc(in_teacher_ability_xwalk = teacher_ability_xwalk,
run_id = run_id_i,
out_path =out_qc)
}
#====================#
# ==== get truth ====
#====================#
# Get true WW impact.
teacher_info <- welfare_statistic(in_dt = teacher_ability_xwalk,
type = 'true',
npoints = p_npoints,
weight_type = p_weight_type,
in_test_1 = NULL,
lin_alpha = p_lin_alpha,
pctile = p_pctile,
weight_below = p_weight_below,
weight_above = p_weight_above,
v_alpha = p_v_alpha,
mrpctile = p_mrpctile,
mrdist = p_mrdist,
impact_type = p_impact_type,
impact_function = p_impact_function,
qc_flag = qc_flag,
out_qc_path = out_qc,
run_id_qc = run_id_i)
standard_info <- welfare_statistic(in_dt = teacher_ability_xwalk,
type = 'true',
npoints = p_npoints,
weight_type = 'equal',
in_test_1 = NULL,
lin_alpha = p_lin_alpha,
pctile = p_pctile,
weight_below = p_weight_below,
weight_above = p_weight_above,
v_alpha = p_v_alpha,
mrpctile = p_mrpctile,
mrdist = p_mrdist,
impact_type = p_impact_type,
impact_function = p_impact_function,
qc_flag = 0)
setnames(standard_info, old=c('true_welfare'), new=c('true_standard'))
# Merge on the teacher center.
teacher_info <- merge(teacher_info, unique(teacher_ability_xwalk[, c('teacher_id',
'teacher_center')]),
'teacher_id')
#==========================#
# ==== Run simulations ====
#==========================#
# Run a Monte Carlo with the specified parameters.
if (do_parallel) {
mc_res <- foreach(j = 1:nsims) %dopar% single_iteration_fun( teacher_ability_xwalk = teacher_ability_xwalk,
n_cohorts = p_n_cohorts,
pretest_coef = p_pretest_coef,
weight_type = p_weight_type,
method = p_method,
num_cats = p_num_cats,
lin_alpha = p_lin_alpha,
pctile = p_pctile,
weight_below = p_weight_below,
weight_above = p_weight_above,
v_alpha = p_v_alpha,
mrpctile = p_mrpctile,
mrdist = p_mrdist,
npoints = p_npoints,
test_SEM = p_test_SEM,
impact_type = p_impact_type,
impact_function = p_impact_function,
covariates = p_covariates,
peer_effects = p_peer_effects,
stud_sorting = p_stud_sorting,
rho = p_rho,
weighted_average = p_weighted_average)
} else {
mc_res <- foreach(j = 1:nsims) %do% single_iteration_fun( teacher_ability_xwalk = teacher_ability_xwalk,
n_cohorts = p_n_cohorts,
pretest_coef = p_pretest_coef,
weight_type = p_weight_type,
method = p_method,
num_cats = p_num_cats,
lin_alpha = p_lin_alpha,
pctile = p_pctile,
weight_below = p_weight_below,
weight_above = p_weight_above,
v_alpha = p_v_alpha,
mrpctile = p_mrpctile,
mrdist = p_mrdist,
npoints = p_npoints,
test_SEM = p_test_SEM,
impact_type = p_impact_type,
impact_function = p_impact_function,
covariates = p_covariates,
peer_effects = p_peer_effects,
stud_sorting = p_stud_sorting,
rho = p_rho,
weighted_average = p_weighted_average)
}
# Stack the results for this run.
mc_res <- rbindlist(mc_res)
# Get the mean estimates for each teacher. The by groups are all descriptive
# variables.
mean_tab <- mc_res[, list(mean_standard = mean(standard_welfare),
sd_standard = sd(standard_welfare)), teacher_id]
if (p_method %like% 'bin') {
mean_tab <- merge(mean_tab,
mc_res[, list(mean_bin = mean(binned_welfare),
sd_bin = sd(binned_welfare)), teacher_id],
'teacher_id')
}
if (p_method %like% 'np') {
mean_tab <- merge(mean_tab,
mc_res[, list(mean_np = mean(np_welfare),
sd_np = sd(np_welfare)), teacher_id],
'teacher_id')
}
if (p_method %like% 'quantile') {
mean_tab <- merge(mean_tab,
mc_res[, list(mean_quantile = mean(quantile_welfare),
sd_quantile = sd(quantile_welfare)),
teacher_id],
'teacher_id')
}
# Merge on teacher info.
mean_tab <- merge(mean_tab, teacher_info, "teacher_id")
mean_tab <- merge(mean_tab, standard_info, "teacher_id")
# Add some more indicators.
mean_tab[, run_id := run_id_i]
mean_tab[, nsims := nsims]
mean_tab[, date_time := date_time]
# Ensure the output file has all columns and is setup correctly.
print(paste0('Finished with simulation ', i))
for (name in c('teacher_id', 'mean_standard', 'sd_standard', 'mean_bin',
'sd_bin', 'mean_quantile', 'sd_quantile', 'mean_np',
'sd_np', 'true_welfare', 'true_standard', 'teacher_center',
'run_id', 'nsims', 'date_time')) {
if (name %notin% colnames(mean_tab)) {
mean_tab[, .GlobalEnv$name := numeric()]
}
}
mean_tab <- mean_tab[, c('teacher_id', 'mean_standard', 'sd_standard',
'mean_bin', 'sd_bin','mean_np', 'sd_np',
'mean_quantile', 'sd_quantile', 'true_welfare',
'true_standard', 'teacher_center', 'run_id', 'nsims',
'date_time')]
# Write to the file.
write.csv(mean_tab,
file = paste0(out_data, "int_results/", "results_", run_id_i, ".csv"),
row.names = FALSE)
# mark this row as done
model_xwalk[i, done_flag :=1]
# Save a copy of the most recent xwalk so there are no mixups.
write.csv(model_xwalk,
paste0(out_data, '/xwalk.csv'),
row.names = FALSE)
} # Close Monte Carlo loop.
# Let go of the processors.
if(do_parallel){
stopCluster(myCluster)
}
#========================#
# ==== stack results ====
#========================#
# load in results, stack them, save as one
file_paths <- list.files(paste0(out_data, "int_results"),
full.names = TRUE)
results <- rbindlist(lapply(file_paths, fread))
# save results
write.csv(results,
paste0(out_data,
"results.csv"))
# =========================================================================== #
# =============================== Save xwalk ================================ #
# =========================================================================== #
# Save a copy of the most recent xwalk so there are no mixups.
write.csv(model_xwalk,
paste0(out_data, '/xwalk.csv'),
row.names = FALSE)
|
d1289d2ccffe78a13fe317e78db78f67c9d51255
|
00412b811e8c6cc63cb351a8055fa28307a72258
|
/R/BWMR.R
|
64430d52d0971eab2c33d1ad92b3b18d1a4e5ca7
|
[] |
no_license
|
jiazhao97/BWMR
|
6588b1a0dc5d66535f0110e495a06f754f8fae9f
|
c00e8186f12ed25899aaf85b7ac5c98c6d86c24c
|
refs/heads/master
| 2023-06-24T03:24:14.384393
| 2023-06-14T06:57:01
| 2023-06-14T06:57:01
| 158,942,967
| 10
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,134
|
r
|
BWMR.R
|
###### VEM and Inference for BWMR ######
##
### INPUT
## gammahat: SNP-exposure effect;
## Gammahat: SNP-outcome effect;
## sigmaX: standard error of SNP-exposure effect;
## sigmaY: standard error of SNP-outcome effect;
##
### OUTPUT
## beta the estimate of beta;
## se_beta the estimate the standard error of beta;
## P-value P-value
## plot1 Plot of Data with Standard Error Bar;
## plot2 Plot of Evidence Lower Bound (ELBO);
## plot3 Posterior Mean of Weight of Each Observation;
## plot4 Plot of Weighted Data and Its Regression Result.
# packages
library("ggplot2")
# known parameters for the prior distributions
sqsigma0 <- (1e+6)^2
alpha <- 100
## define function to calculate ELBO and E[Lc] (the approximate log-likelihood)
ELBO_func <- function(N, gammahat, Gammahat, sqsigmaX, sqsigmaY, mu_beta, sqsigma_beta, mu_gamma, sqsigma_gamma, a, b, pi_w, sqsigma, sqtau) {
# + E[log p(gammahat, sqsigmaX | gamma)]
l <- - 0.5*sum(log(sqsigmaX)) - 0.5*sum(((gammahat-mu_gamma)^2+sqsigma_gamma)/sqsigmaX)
# + E[log p(Gammahat, sqsigmaY| beta, gamma, w, sqtau)]
l <- l - 0.5*log(2*pi)*sum(pi_w) - 0.5*sum(pi_w*log(sqsigmaY+sqtau))
l <- l - 0.5*sum(pi_w*((mu_beta^2+sqsigma_beta)*(mu_gamma^2+sqsigma_gamma)-2*mu_beta*mu_gamma*Gammahat+Gammahat^2)/(sqsigmaY+sqtau))
# + E[log p(beta | sqsigma0)]
l <- l - 0.5*(mu_beta^2+sqsigma_beta)/sqsigma0
# + E[log p(gamma | sqsigma)]
l <- l - 0.5*N*log(sqsigma) - 0.5/sqsigma*sum(mu_gamma^2+sqsigma_gamma)
# + E[log p(w | pi1)]
l <- l + (digamma(a)-digamma(a+b))*sum(pi_w) + (digamma(b)-digamma(a+b))*(N-sum(pi_w))
# + E[log p(pi1)]
l <- l + (alpha-1)*(digamma(a)-digamma(a+b))
# - E[log q(beta)]
l <- l + 0.5*log(sqsigma_beta)
# - E[log q(gamma)]
l <- l + 0.5*sum(log(sqsigma_gamma))
# - E[log q(pi1)]
l <- l - (a-1)*(digamma(a)-digamma(a+b)) - (b-1)*(digamma(b)-digamma(a+b)) + lbeta(a, b)
# - E[log q(w)]
# Need to check if pi_w = 0 or pi_w = 1, since there are log terms of pi_w and 1 - pi_w.
# e1 <- pi_w*log(pi_w)
# e2 <- (1-pi_w)*log(1-pi_w)
# e1[which(pi_w == 0)] <- 0
# e2[which(pi_w == 1)] <- 0
# l <- l - sum(e1+e2)
## A TRICK TO SIMPLIFY THE CODE
l <- l - sum(pi_w*log(pi_w+(pi_w==0)) + (1-pi_w)*log(1-pi_w+(pi_w==1)))
# l: ELBO
}
BWMR <- function(gammahat, Gammahat, sigmaX, sigmaY) {
## data
N <- length(gammahat)
sqsigmaX <- sigmaX^2
sqsigmaY <- sigmaY^2
### Variational EM algorithm ###
# initialize
# initial parameters of BWMR
beta <- 0
sqtau <- 1^2
sqsigma <- 1^2
# initial parameters of variational distribution
mu_gamma <- gammahat
sqsigma_gamma <- rep(0.1, N)
pi_w <- rep(0.5, N)
# declare sets of ELBO and approximate log-likelihood
ELBO_set <- numeric(0)
for (iter in 1:5000) {
## Variational E-Step
# beta
sqsigma_beta <- 1/(1/sqsigma0 + sum(pi_w*(mu_gamma^2+sqsigma_gamma)/(sqsigmaY+sqtau)))
mu_beta <- sum(pi_w*mu_gamma*Gammahat/(sqsigmaY+sqtau))*sqsigma_beta
# gamma
sqsigma_gamma <- 1/(1/sqsigmaX + pi_w*(mu_beta^2+sqsigma_beta)/(sqsigmaY+sqtau) + 1/sqsigma)
mu_gamma <- (gammahat/sqsigmaX + pi_w*Gammahat*mu_beta/(sqsigmaY+sqtau))*sqsigma_gamma
# pi1
a <- alpha + sum(pi_w)
b <- N + 1 - sum(pi_w)
# w
q0 <- exp(digamma(b) - digamma(a+b))
q1 <- exp(- 0.5*log(2*pi) - 0.5*log(sqsigmaY+sqtau) - 0.5*((mu_beta^2+sqsigma_beta)*(mu_gamma^2+sqsigma_gamma)-2*mu_beta*mu_gamma*Gammahat+Gammahat^2)/(sqsigmaY+sqtau) + digamma(a)-digamma(a+b))
pi_w <- q1/(q0+q1)
if (sum(pi_w) == 0){
message("Invalid IVs!")
mu_beta = NA
se_beta = NA
P_value = NA
return(list(beta=NA, se_beta=NA, P_value=NA))
}
## M-Step
sqsigma <- sum(mu_gamma^2 + sqsigma_gamma)/N
sqtau <- sum(pi_w*((mu_beta^2+sqsigma_beta)*(mu_gamma^2+sqsigma_gamma)-2*mu_beta*mu_gamma*Gammahat+Gammahat^2)*sqtau^2/((sqsigmaY+sqtau)^2)) / sum(pi_w/(sqsigmaY+sqtau))
sqtau <- sqrt(sqtau)
## check ELBO
ELBO <- ELBO_func(N, gammahat, Gammahat, sqsigmaX, sqsigmaY, mu_beta, sqsigma_beta, mu_gamma, sqsigma_gamma, a, b, pi_w, sqsigma, sqtau)
ELBO_set <- c(ELBO_set, ELBO)
if (iter > 1 && (abs((ELBO_set[iter]-ELBO_set[iter-1])/ELBO_set[iter-1]) < 1e-6)) {
break
}
}
# message("Iteration=", iter, ", beta=", mu_beta, ", tau=", sqrt(sqtau), ", sigma=", sqrt(sqsigma), ".")
### visualize the result of VEM algorithm
# Plot1: Plot of Data with Standard Error Bar
df1 <- data.frame(
gammahat = gammahat,
Gammahat = Gammahat,
sigmaX = sigmaX,
sigmaY = sigmaY
)
plot1 <- ggplot(data = df1, aes(x = gammahat, y = Gammahat)) +
geom_pointrange(aes(ymin = Gammahat - sigmaY, ymax = Gammahat + sigmaY), color="gray59", size = 0.3) +
geom_errorbarh(aes(xmin = gammahat - sigmaX, xmax = gammahat + sigmaX, height = 0), color="gray59") +
labs(x = "SNP-exposure effect", y = "SNP-outcome effect", title = "Plot1: Plot of data with standard error bar")
# Plot2: Plot of Evidence Lower Bound (ELBO)
iteration <- seq(1, (length(ELBO_set)), by = 1)
df2 <- data.frame(
iteration = iteration,
ELBO_iter = ELBO_set
)
plot2 <- ggplot(df2, aes(x=iteration, y=ELBO_iter)) + geom_line(size = 0.5, color = "tomato1") + geom_point(size=0.5, color = "tomato1") +
labs(x = "iteration", y="elbo", title = "Plot2: Plot of evidence lower bound (elbo)")
# Plot3: Posterior Mean of Weight of Each Observation
serial_number <- seq(1, N, by = 1)
df3 <- data.frame(
weight = pi_w,
serial_number = serial_number
)
plot3 <- ggplot(data = df3, mapping = aes(x = factor(serial_number), y = weight, fill = weight)) + geom_bar(stat = 'identity', position = 'dodge') +
labs(x = "observation No.", y = "weight", title = "Plot3: Posterior mean of weight of each observation") +
ylim(0, 1) +
theme(axis.text.x = element_text(size = 5))
# scale_x_discrete(breaks = seq(10, N, 20)) +
# Plot4: Plot of Weighted Data and Its Regression Result
df4 <- data.frame(
gammahat = gammahat,
Gammahat = Gammahat,
sqsigmaX = sqsigmaX,
sqsigmaY = sqsigmaY,
w = pi_w
)
plot4 <- ggplot(df4, aes(x=gammahat, y=Gammahat, color=w)) + geom_point(size = 0.3) +
geom_pointrange(aes(ymin = Gammahat - sigmaY, ymax = Gammahat + sigmaY), size = 0.3) +
geom_errorbarh(aes(xmin = gammahat - sigmaX, xmax = gammahat + sigmaX, height = 0)) +
geom_abline(intercept=0, slope=mu_beta, color="#990000", linetype="dashed", size=0.5) +
labs(x = "SNP-exposure effect", y = "SNP-outcome effect", title = "Plot4: Plot of weighted data and its regression result")
### LRVB and Standard Error ###
## matrix V
forV <- matrix(nrow = N, ncol = 4)
forV[ ,1] <- sqsigma_gamma
forV[ ,2] <- 2*mu_gamma*sqsigma_gamma
forV[ ,3] <- forV[ ,2]
forV[ ,4] <- 2*sqsigma_gamma^2 + 4*mu_gamma^2*sqsigma_gamma
V <- matrix(rep(0, (3*N+4)*(3*N+4)), nrow = 3*N+4, ncol = 3*N+4)
for (j in 1:N) {
V[(3*j):(3*j+1), (3*j):(3*j+1)] <- matrix(forV[j, ], 2, 2)
V[3*j+2, 3*j+2] <- pi_w[j] - (pi_w[j]^2)
}
V[1:2, 1:2] <- matrix(c(sqsigma_beta, 2*mu_beta*sqsigma_beta, 2*mu_beta*sqsigma_beta, 2*sqsigma_beta^2+4*mu_beta^2*sqsigma_beta), 2, 2)
V[(3*N+3):(3*N+4), (3*N+3):(3*N+4)] <- matrix(c(trigamma(a)-trigamma(a+b), -trigamma(a+b), -trigamma(a+b), trigamma(b)-trigamma(a+b)), 2, 2)
## matrix H
H <- matrix(rep(0, (3*N+4)*(3*N+4)), nrow = 3*N+4, ncol = 3*N+4)
forH <- matrix(nrow = N, ncol = 6)
forH[ ,1] <- pi_w*Gammahat/(sqsigmaY+sqtau)
forH[ ,2] <- mu_gamma*Gammahat/(sqsigmaY+sqtau)
forH[ ,3] <- -0.5*pi_w/(sqsigmaY+sqtau)
forH[ ,4] <- -0.5*(mu_gamma^2+sqsigma_gamma)/(sqsigmaY+sqtau)
forH[ ,5] <- mu_beta*Gammahat/(sqsigmaY+sqtau)
forH[ ,6] <- -0.5*(mu_beta^2+sqsigma_beta)/(sqsigmaY+sqtau)
for (j in 1:N) {
H[1, 3*j] <- forH[j, 1]
H[1, 3*j+2] <- forH[j, 2]
H[2, 3*j+1] <- forH[j, 3]
H[2, 3*j+2] <- forH[j, 4]
H[(3*N+3):(3*N+4), 3*j+2] <- c(1, -1)
H[3*j+2, (3*N+3):(3*N+4)] <- c(1, -1)
H[(3*j):(3*j+1), 3*j+2] <- forH[j, 5:6]
H[3*j+2, (3*j):(3*j+1)] <- forH[j, 5:6]
}
H[ ,1] <- H[1, ]
H[ ,2] <- H[2, ]
## accurate covariance estimate and standard error
I <- diag(3*N+4)
Sigma_hat <- try(solve(I-V%*%H)%*%V)
if (class(Sigma_hat) == "try-error"){
message("Invalid IVs!")
return(list(beta=NA, se_beta=NA, P_value=NA))
} else{
se_beta <- sqrt(Sigma_hat[1, 1])
}
## test
W <- (mu_beta/se_beta)^2
# P_value <- 1 - pchisq(W, 1)
P_value <- pchisq(W, 1, lower.tail=F)
message("Estimate of beta=", mu_beta, ", se of beta=", se_beta, ", P-value=", P_value, ".")
## output
output <- list(beta=mu_beta, se_beta=se_beta, P_value=P_value,
weights=pi_w, tau=sqrt(sqtau), sigma=sqrt(sqsigma), mu_pi=a/(a+b),
plot1=plot1, plot2=plot2, plot3=plot3, plot4=plot4)
}
|
388561fc228c43f9e25bb919249599faedbd3e24
|
8e6e55fe43bc3ed64f01fec4ed07c027b29f96a6
|
/man/make_report_copy_url.Rd
|
8de7b5dfbeb609121417690cb0c8435400676c82
|
[
"MIT"
] |
permissive
|
carlganz/salesforcer
|
a3ec51c556b79b4734b5c8d844f000c2573fadbc
|
2078627bc988e5d58f90d16bf42c603507ab16db
|
refs/heads/main
| 2023-04-14T23:50:26.698773
| 2021-04-27T15:44:55
| 2021-04-27T15:44:55
| 362,164,928
| 1
| 0
|
NOASSERTION
| 2021-04-27T15:38:47
| 2021-04-27T15:38:46
| null |
UTF-8
|
R
| false
| true
| 381
|
rd
|
make_report_copy_url.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/endpoints-analytics-report.R
\name{make_report_copy_url}
\alias{make_report_copy_url}
\title{Report Copy URL generator}
\usage{
make_report_copy_url(report_id)
}
\description{
Report Copy URL generator
}
\note{
This function is meant to be used internally. Only use when debugging.
}
\keyword{internal}
|
54a010df2f23786dd528d32329aab7d19ac21b9e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/RFishBC/examples/backCalc.Rd.R
|
bb99dd17e1d2c6033bda51df5ecbdcdf2669f3e8
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 178
|
r
|
backCalc.Rd.R
|
library(RFishBC)
### Name: backCalc
### Title: Back-calculate length at previous ages from standard data
### format.
### Aliases: backCalc
### ** Examples
## None yet.
|
fb927219bafb52e455fec86c93e8375f02b4ad2e
|
32670124e173dc6a71c7b120567f0ea5b5afe8f5
|
/plot4.R
|
53ea8890cc28415410792a5d0f08ebecfd57a1ff
|
[] |
no_license
|
evashah/ExData_Plotting1
|
abf5b4ea09446628e68f7655cb7ef830a9b103c5
|
2b9724c0618c575f6d89d1a0bb28d9c7d23892ac
|
refs/heads/master
| 2021-01-12T21:09:35.883936
| 2016-03-21T05:49:21
| 2016-03-21T05:49:21
| 54,358,267
| 0
| 0
| null | 2016-03-21T03:51:06
| 2016-03-21T03:51:06
| null |
UTF-8
|
R
| false
| false
| 1,688
|
r
|
plot4.R
|
# Exploratory Data Analysis
# Coursera
# John Hopkins University
# Week 1 - Assignment for Peer Review - Plot 4
#read data from text file
url <- "household_power_consumption.txt"
Data <- read.table(url, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
# subset data based on date
subsetData <- Data[Data$Date %in% c("1/2/2007","2/2/2007") ,]
rm(Data)
summary(subsetData) # check the class for each column
DateandTime <- strptime(paste(subsetData$Date, subsetData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
GlobalActivePower <- as.numeric(subsetData$Global_active_power) # convert Global Active Power data to numeric
GlobalReactivePower <- as.numeric(subsetData$Global_reactive_power) # convert Global Active Power data to numeric
Voltage <- as.numeric(subsetData$Voltage) # convert Voltage data to numeric
SubMetering1 <- as.numeric(subsetData$Sub_metering_1)
SubMetering2 <- as.numeric(subsetData$Sub_metering_2)
# plot all 4 graphs
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
# graph 1
plot(DateandTime, GlobalActivePower, type="l", xlab="", ylab="Global Active Power")
# graph 2
plot(DateandTime, Voltage, type="l", xlab="datetime", ylab="Voltage")
# graph 3
plot(DateandTime, SubMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(DateandTime, SubMetering2, type="l", col="red")
lines(DateandTime, subsetData$Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"),bty="n")
# graph 4
plot(DateandTime, GlobalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power") # graph 4
dev.off()
|
9121429c99ee8e0990480b8e8eef582424260aa8
|
ec5f2581317b76209dc8e26ebe839f39b578f371
|
/Practical_ML_Proj.R
|
62fea85214b1938d7c0466a1acfef94d421aeb4d
|
[] |
no_license
|
mnicho03/Practical-Machine-Learning_Course-Project
|
22c6a626706f67f1b2b619bb89c0f013dbd65986
|
2a507ff22370e34b85ddfef8b3e72cb26de6cad5
|
refs/heads/master
| 2021-04-12T09:56:30.627020
| 2018-03-24T01:03:53
| 2018-03-24T01:03:53
| 126,552,311
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,180
|
r
|
Practical_ML_Proj.R
|
#set working directory
setwd("S:/Documents/R")
#load R packages
library(data.table) # for quick file reading
library(caret) # for model building - ideal for classification & regression problems
library(randomForest) # for efficient random forest model building
set.seed(16) # for reproducibility
#download train/test datasets for evaluation
training <- fread("https://d396qusza40orc.cloudfront.net/predmachlearn/pml-training.csv", na.strings=c("NA","#DIV/0!",""))
#test dataset to be used for final evaluation only & will be ignored for all model building
testing <- fread("https://d396qusza40orc.cloudfront.net/predmachlearn/pml-testing.csv", na.strings=c("NA","#DIV/0!",""))
#evaluate datasets
dim(training)
dim(testing)
#as expected, test dataset contains 20 records for final evaluation
#convert to DF's
training <- as.data.frame(training)
testing <- as.data.frame(testing)
#Remove columns with >= 98% of NA or "" values
relevant_Columns <- !apply(training, 2, function(x) sum(is.na(x)) > .98 || sum(x=="") > .98)
training <- training[, relevant_Columns]
testing <- testing[,relevant_Columns]
#convert target variable to factor
training$classe <- as.factor(training$classe)
#include classe in testing set as NA
testing$classe <- as.factor(NA)
#review training and test sets again to remove additional unneeded columns
names(testing) # first seven variables can be removed based on our goal (times, username, etc.)
names(training)
testing[,1:7] <- NULL
training[,1:7] <- NULL
#prep for cross validation in prediction mode
#since we already have the final test set, we'll create a validation set within the training dataset
inValidation <- createDataPartition(y = training$classe, p = .7, list = FALSE)
TrainingSet <- training[inValidation,]
ValidationSet <- training[-inValidation,]
dim(TrainingSet)
dim(ValidationSet)
# data exploration
#show breakdown of the classe variable (e.g. A = exercised performed as intended)
histogram(TrainingSet$classe, col = "blue", xlab = "Classe ~ Target Variable")
summary(TrainingSet$classe)
#Algorithm 1: Random Forest
#build the model
modFit_RF <- randomForest(classe ~ ., data = TrainingSet, ntree = 100, mtry = 16)
# modFit_RF_CV <- rfcv(TrainingSet, TrainingSet$classe, cv.fold = 3)
#predict using decision tree model against validation set
prediction_RF <- predict(modFit_RF, ValidationSet)
#review accuracy against the validation set
confusionMatrix(prediction_RF, ValidationSet$classe)
---
#Algorithm 2: Generalized Boosted Model
#Tune the model:
# utilize 3-fold cross validation to further break apart the training set and determine best model settings
# with GBM, the 3 tuning parameters checked will be trees, shrinkage, and interaction depth.
objControl <- trainControl(method='cv', number=3, returnResamp='none', classProbs = TRUE)
#train the model
modFit_GBM <- train(classe ~.,
data = TrainingSet,
method='gbm',
trControl=objControl,
metric = "Accuracy",
preProc = c("center", "scale"))
#predict using GBM on the validation set
prediction_GBM <- predict(modFit_GBM, ValidationSet)
#review accuracy against the validation set
confusionMatrix(prediction_GBM, ValidationSet$classe)
#compare models
modelComp <- data.frame(rbind(confusionMatrix(prediction_RF, ValidationSet$classe)$overall, confusionMatrix(prediction_GBM, ValidationSet$classe)$overall))
rownames(modelComp) <- c("RF", "GBM")
print(modelComp) # across the board, the random forest model outperforms the generalized boosted model
#visualizations
plot(modFit_GBM) #displays model accuracy increases across cross validation sets as boosting iterations rise
plot(modFit_RF) # displays decreasing model error rate as # of trees expands
legend("topright", colnames(modFit_RF$err.rate), col = 1:5, cex=.8, fill = 1:5)
#display top 10 most important variables based on predictions for each model
varImp_GBM <- varImp(modFit_GBM, useModel = FALSE)
plot(varImp_GBM, top = 10)
#predictions against quiz questions
predict(modFit_RF, testing)
|
d0a99b3939e5cee558b936eea1117a6a2fa4b824
|
508b8a9315e2cf98b417f68b5aa30ad2216e21c4
|
/plot4.R
|
271514d2571181b9e30cc464aefa63958d6f3c7a
|
[] |
no_license
|
5tos/ExData_Plotting1
|
baf94f108d654c450e8c8bdef49f6d97eb3838a9
|
e63739f0d618a9cb608797ee9f5ab7c87ddcc78e
|
refs/heads/master
| 2021-01-21T07:15:09.489056
| 2015-06-07T21:39:55
| 2015-06-07T21:39:55
| 36,961,078
| 0
| 0
| null | 2015-06-06T00:00:24
| 2015-06-06T00:00:23
| null |
UTF-8
|
R
| false
| false
| 1,210
|
r
|
plot4.R
|
plot4 <- function() {
# Exploratory Data Analysis - Course Project 1
# Note: The data file must be in the same folder as this R file
# Read in the required data for the two days 1/2/2007 and 2/2/2007
hpc<-subset(read.csv("household_power_consumption.txt", header = TRUE, sep = ";", na.string=c("?")), Date=="1/2/2007" | Date=="2/2/2007")
# Add a datetime column to the data
hpc$datetime<-strptime(paste(hpc$Date,hpc$Time),format="%d/%m/%Y %H:%M:%S")
# Open a png file:
png(file = "plot4.png", width = 480, height = 480, units = "px")
# Set up a 2x2 grid of plots:
par(mfcol = c(2,2))
# Plot the charts:
# Re-draw Plot 2
plot(hpc$datetime,hpc$Global_active_power, type="l", xlab="", ylab="Global Active Power")
# Re-draw Plot 3
plot(hpc$datetime, hpc$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(hpc$datetime,hpc$Sub_metering_2, type="l", col="red")
lines(hpc$datetime,hpc$Sub_metering_3, type="l", col="blue")
legend("topright", lty=1, col=c("black","red","blue"), legend= c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
# Two new plots:
with(hpc, plot(datetime,Voltage, type="l"))
with(hpc, plot(datetime,Global_reactive_power, type="l"))
# Close the file
dev.off()
}
|
1e6c2d2bfc7628ef75a5eef6a6e0b67468a217ae
|
7eb00eafde0a8cda77c7aba94762e5e7263ee877
|
/man/single_cluster_df.Rd
|
a9a7f26a3aed5a23fe03054c2b7516f4e693fe54
|
[] |
no_license
|
caseywdunn/ms_treeinform
|
997e454d0854e76ae8df1cc74fe6fa9cff055179
|
75290804c14fd811725b8ce5cfae0d60c9731675
|
refs/heads/master
| 2021-05-23T03:19:15.428111
| 2020-07-06T20:46:03
| 2020-07-06T20:46:03
| 81,823,603
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,170
|
rd
|
single_cluster_df.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/single_cluster_df.R
\name{single_cluster_df}
\alias{single_cluster_df}
\title{Creates a single data frame from a list of cluster size distribution data frames
with Method column indicating method (examples: Trinity, Corset, treeinform) and
ID column indicating sample (examples: SRX5, melanogaster).}
\usage{
single_cluster_df(list_cluster_dfs, methods, ids)
}
\arguments{
\item{list_cluster_dfs}{List of cluster size distribution data frames}
\item{methods}{Vector of methods. Must be same length as list_cluster_dfs}
\item{ids}{Vector of IDs. Must be same length as list_cluster_dfs}
}
\value{
Data frame with columns: size, freq, Method, ID
}
\description{
Creates a single data frame from a list of cluster size distribution data frames
with Method column indicating method (examples: Trinity, Corset, treeinform) and
ID column indicating sample (examples: SRX5, melanogaster).
}
\examples{
df1 <- data.frame(size=c(1,2), freq=c(10,20))
df2 <- data.frame(size=c(1,2), freq=c(5,5))
methods <- c("method1", "method2")
ids <- c("id1", "id1")
single_cluster_df(list(df1,df2),methods,ids)
}
|
c79ff2703b8196b7a51607f99abfd98e66ebfa51
|
3e6b792216769241a0171327e33f2df59d481215
|
/LOF-GOF_trainer.R
|
2049671eb2dd4aeedca31a0fddb73be550a6bc09
|
[] |
no_license
|
HanadiBaeissa/Identification-LOF-and-GOF
|
8689e6ec95fe1f7a42f0d52bf74d293561f373aa
|
aa3a9ad311091bcc59a360e6003f10e79ed4d14b
|
refs/heads/master
| 2021-09-10T17:54:52.427460
| 2018-03-30T14:40:18
| 2018-03-30T14:40:18
| 103,120,776
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,142
|
r
|
LOF-GOF_trainer.R
|
set.seed(12345)
library(e1071)
args = commandArgs(trailingOnly=TRUE)
#data <- read.csv("RF_OGvsTS.csv",header=TRUE,row.names = 1) # Read Data
data <- read.csv(args[1],header=TRUE,row.names = 1) # Read Data
print("data loaded sucessfully !")
colnames(data)[1] <- "Class" # assign response class
### separate data into matix and response
dataX <- data[,2:length(data)]
dataY <- as.factor(data[,1])
### handle missing values to median. Can also use mean , mode ###
impute.value <- function(x) replace(x, is.na(x), median(x, na.rm = TRUE))
data.new <- sapply(dataX, function(x){
if(is.numeric(x)){
impute.value(x)
} else {
x
}
}
)
dataX <- as.data.frame(data.new) # create data frame
data <- cbind(dataX,dataY)
colnames(data)[length(data)] <- "Class"
accuracies <-c()
Numbers <- as.numeric(args[4])
#### split data into test and train set ####################################
for (i in 1:Numbers) {
print (i)
print("##############################")
# First decide percent and and creat random test and train set ##
# following coomand gives persent which later used for data split
# eg. if 25 given the percent is 25 (25 %) means test set contain 1/4 of data
#rest 3/4 data will be in train set.
prct <- as.numeric(args[2])
prct <- prct / 100 # this creats percent
sample.points <- 1:nrow(data) # this tells about how many samples are in data
# following command will create random range of index values
ind <- sample(sample.points,floor(length(sample.points) * prct))
# Now create test and train set
test.set <- data[ind,] # creates a data.frame with random sample
train.set <- data[-ind,]
##############################################################################
#### Following Section is for parameter tune ###
# following command creates a grid range for optimal parameter search
# as svm has two parameters "gamma" and "cost" it searches best values over different combinations of both parameters
# user may define own grid such as gamma= c(0.00001,0.0001,0.001) cost = c(0.001,0.001,0.1,1) etc.
# parameters are tuned using 10 fold cross validation
#parameter.tune <- tune.svm(Class~.,data = train.set, gamma= 10^(seq(-8,-1,by=0.5)), cost = 10^seq(-3,1,by=0.2))
parameter.tune <- tune.svm(Class~.,data = train.set, gamma= 10^(seq(-6,-1)), cost = 10^seq(-3,1))
cost <- format(parameter.tune$best.parameters$cost,digits=5,nsmall=4)
gamma <- format(parameter.tune$best.parameters$gamma,digits=5,nsmall=4)
# you may check their best values by typing summary(parameter.tune) #
paste("Tuned Values :","gamma",gamma,"and cost",cost)
# now use tuned parameter for model building #
svm.model <- svm(Class~.,data=train.set,kernel=args[3],gamma = gamma, cost = cost)
summary(svm.model)
save(svm.model,file="svm.model.RData")
##############################################################################
# validation
validation <- predict(svm.model,test.set[,-length(test.set)])
# create confusion matrix
confusion.matrix <- table(pred = validation, true = test.set[,length(test.set)])
confusion.matrix
# calculate performance measures
# sensitivity = TP / TP + FN
# specificity = TN / TN + FP
# Accuarcy = TP + TN / TP + TN + FP + FN
sensitivity <- format(confusion.matrix[4] / (confusion.matrix[4] + confusion.matrix[3]),digits=4)
specificity <- format(confusion.matrix[1] / (confusion.matrix[1] + confusion.matrix[2]),digits=4)
accuracy <- format((confusion.matrix[4] + confusion.matrix[1]) / sum(confusion.matrix),digits=4,nsmall=3)
paste("For data set :",args[1]," the model spliltted in test and train set where train set has ",nrow(train.set),"samples while test set has ",nrow(test.set),
".Model was generated using training set and optimal parameter were selected usinng 10 fold cross validation result.",
"The specificity of model on test set is ",specificity," sensitivity is ",sensitivity," and accuracy is ",accuracy)
accuracies <- c(accuracies,accuracy)
}
accuracies
mean_accuracy <- mean(as.numeric(accuracies))
deviation <- format(sd(as.numeric(accuracies)),digits=5)
paste("Accuracy is ",mean_accuracy,"±",deviation)
|
b59a837a6b5e6352759317d501ee995c19dbcc1c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/dr/vignettes/overview.R
|
4e525074b02e1907bdcf3e43e5b952675c9f5f7d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,586
|
r
|
overview.R
|
### R code from vignette source 'overview.Rnw'
###################################################
### code chunk number 1: a1
###################################################
library(dr)
opt <- options(width=66)
###################################################
### code chunk number 2: a11
###################################################
summary(s0 <- dr(LBM~log(SSF)+log(Wt)+log(Hg)+log(Ht)+log(WCC)+log(RCC)+
log(Hc)+log(Ferr),data=ais,slice.function=dr.slices.arc,nslices=8,
chi2approx="wood",numdir=4,method="sir"))
###################################################
### code chunk number 3: a2
###################################################
dr.coordinate.test(s0,hypothesis=~.-log(RCC))
###################################################
### code chunk number 4: a2
###################################################
dr.coordinate.test(s0,hypothesis=~.-log(RCC),d=2)
###################################################
### code chunk number 5: a2
###################################################
m0 <- drop1(s0,update=TRUE)
###################################################
### code chunk number 6: overview.Rnw:563-564
###################################################
s1a <- dr.step(s0,scope=~log(Wt),stop=0.20)
###################################################
### code chunk number 7: overview.Rnw:599-600
###################################################
summary(s1 <- update(s0, group=~Sex))
###################################################
### code chunk number 8: a2
###################################################
s2 <- update(s0,method="save")
summary(s2)
###################################################
### code chunk number 9: save
###################################################
drop1(s1,update=FALSE)
###################################################
### code chunk number 10: overview.Rnw:706-707
###################################################
summary(s3 <- update(s2,group=~Sex))
###################################################
### code chunk number 11: overview.Rnw:745-746
###################################################
summary(s2 <- update(s0,method="phdres"))
###################################################
### code chunk number 12: one
###################################################
(m1 <- dr(LBM~log(Ht)+log(Wt)+log(SSF)+log(RCC)+log(WCC)+log(Ferr)+
log(Hc)+log(Hg),data=ais,method="ire",nslices=8,numdir=4,
slice.function=dr.slices.arc,itmax=200,steps=1,eps=1.e-6))
###################################################
### code chunk number 13: two
###################################################
dr.basis(m1,numdir=2)
###################################################
### code chunk number 14: three
###################################################
dr.basis(m1,3)
###################################################
### code chunk number 15: mct
###################################################
dr.coordinate.test(m1,~.-log(Hg))
dr.coordinate.test(m1,~.-log(Hg),d=2)
###################################################
### code chunk number 16: drop1
###################################################
drop1(m1,update=FALSE)
###################################################
### code chunk number 17: pire
###################################################
m2 <- dr(LBM~log(Ht)+log(Wt)+log(SSF)+log(RCC)+log(WCC)+log(Ferr)+
log(Hc)+log(Hg),group=~Sex,data=ais,method="ire",nslices=8,
numdir=4,slice.function=dr.slices.arc,itmax=200,steps=1,
eps=1.e-6)
###################################################
### code chunk number 18: pire1
###################################################
m2
###################################################
### code chunk number 19: overview.Rnw:1102-1104
###################################################
wts <- dr.weights(LBM~Ht+Wt+RCC+WCC,data=ais)
i1 <- dr(LBM~Ht+Wt+RCC+WCC,weights=wts,method="phdres",data=ais)
###################################################
### code chunk number 20: overview.Rnw:1113-1115
###################################################
y1 <- c(1,1,1,2,3,4,5,6,7,8,8,8)
dr.slices(y1,3)
###################################################
### code chunk number 21: overview.Rnw:1128-1130
###################################################
y2 <- c(1,2,3,4,1,2,3,4,1,2,3,4)
dr.slices(cbind(y1,y2),5)
###################################################
### code chunk number 22: overview.Rnw:1145-1146
###################################################
dr.permutation.test(s0,npermute=99,numdir=4)
|
3abd7b71bf18de10345b6244229f75189f2dd47d
|
d5cce2c35ee95e28934d4aff345b6f57251df5a4
|
/Rscripts/points pre post covid.R
|
b4fedc20774993fda79ae1b2180153550e49f4fb
|
[] |
no_license
|
Alantjee/Thesis-Home-Advantage
|
80cb140336084fc4c97006ebe616926dc61a2246
|
17dc41fa74fd62badef1c26d9b451acb0e8f16ea
|
refs/heads/main
| 2023-05-07T16:23:18.579866
| 2021-06-04T06:08:42
| 2021-06-04T06:08:42
| 354,041,833
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,761
|
r
|
points pre post covid.R
|
points_home_covid <- mean(covid_data$home_points)
points_home_noncovid <- mean(non_covid_data$home_points)
points_away_covid <- mean(covid_data$away_points)
points_away_noncovid <- mean(non_covid_data$away_points)
sd_home_covid <- sd(covid_data$home_points)
sd_away_covid <- sd(covid_data$away_points)
sd_home_noncovid <- sd(non_covid_data$home_points)
sd_away_noncovid <- sd(non_covid_data$away_points)
se_home_covid <- sd_home_covid/sqrt(length(covid_data))
se_away_covid <- sd_away_covid/sqrt(length(covid_data))
se_home_noncovid <- sd_home_noncovid/sqrt(length(non_covid_data))
se_away_noncovid <- sd_away_noncovid/sqrt(length(non_covid_data))
situation2 <- c("Average points home covid", "Average points away covid", "Average points home pre covid","Average points away pre covid")
meanpoints2 <- c(points_home_covid ,points_away_covid,points_home_noncovid ,points_away_noncovid)
se <- c(se_home_covid, se_away_covid, se_home_noncovid, se_away_noncovid)
df_mean_points <- cbind(situation2, meanpoints2, se)
df_mean_points <- data.frame(df_mean_points)
str(df_mean_points)
df_mean_points$situation2 <- factor(situation2, levels = c("Average Points home pre covid","Average Points Away pre covid","Average Points Home covid", "Average Points Away covid"))
levels(df_mean_points$situation2)
df_mean_points
plot_mean_points <- ggplot(df_mean_win, aes(x = situation, y = meanpoints2,
ymin = meanpoints2-se, ymax = meanpoints2+se)) +
geom_bar(aes(color = situation), stat = "identity", fill ="white") +
geom_errorbar(aes(color = situation), width = 0.2) +
xlab("Home vs Away points") +
ylab("Average points") +
ggtitle("Points home and away pre and post covid") +
theme_minimal()
plot_mean_points
|
d8e1fba45dd9df3af5d6f8af3754fb511a79d7ce
|
3cc888d60aa5e76ccee48be08d1e9849792bf503
|
/man/SL.ksvm.Rd
|
d96fb7a9c49c02cb63ae101e818597ee1be7d1e1
|
[] |
no_license
|
ecpolley/SuperLearner
|
4584cdbe7dccf945689958d20d0ea779a826bbda
|
801aa6039460648d4dfd87c1fad77e5f29391cb7
|
refs/heads/master
| 2023-07-24T21:52:33.665047
| 2023-07-18T13:56:30
| 2023-07-18T13:56:30
| 1,622,048
| 245
| 82
| null | 2019-08-06T14:25:24
| 2011-04-16T05:18:51
|
R
|
UTF-8
|
R
| false
| true
| 4,198
|
rd
|
SL.ksvm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SL.ksvm.R
\encoding{utf-8}
\name{SL.ksvm}
\alias{SL.ksvm}
\title{Wrapper for Kernlab's SVM algorithm}
\usage{
SL.ksvm(Y, X, newX, family, type = NULL, kernel = "rbfdot",
kpar = "automatic", scaled = T, C = 1, nu = 0.2, epsilon = 0.1,
cross = 0, prob.model = family$family == "binomial",
class.weights = NULL, cache = 40, tol = 0.001, shrinking = T, ...)
}
\arguments{
\item{Y}{Outcome variable}
\item{X}{Training dataframe}
\item{newX}{Test dataframe}
\item{family}{Gaussian or binomial}
\item{type}{ksvm can be used for classification , for regression, or for
novelty detection. Depending on whether y is a factor or not, the default
setting for type is C-svc or eps-svr, respectively, but can be overwritten
by setting an explicit value. See ?ksvm for more details.}
\item{kernel}{the kernel function used in training and predicting. This
parameter can be set to any function, of class kernel, which computes the
inner product in feature space between two vector arguments. See ?ksvm for
more details.}
\item{kpar}{the list of hyper-parameters (kernel parameters). This is a list
which contains the parameters to be used with the kernel function. See
?ksvm for more details.}
\item{scaled}{A logical vector indicating the variables to be scaled. If
scaled is of length 1, the value is recycled as many times as needed and
all non-binary variables are scaled. Per default, data are scaled
internally (both x and y variables) to zero mean and unit variance. The
center and scale values are returned and used for later predictions.}
\item{C}{cost of constraints violation (default: 1) this is the 'C'-constant
of the regularization term in the Lagrange formulation.}
\item{nu}{parameter needed for nu-svc, one-svc, and nu-svr. The nu parameter
sets the upper bound on the training error and the lower bound on the
fraction of data points to become Support Vectors (default: 0.2).}
\item{epsilon}{epsilon in the insensitive-loss function used for eps-svr,
nu-svr and eps-bsvm (default: 0.1)}
\item{cross}{if a integer value k>0 is specified, a k-fold cross validation
on the training data is performed to assess the quality of the model: the
accuracy rate for classification and the Mean Squared Error for regression}
\item{prob.model}{if set to TRUE builds a model for calculating class
probabilities or in case of regression, calculates the scaling parameter of
the Laplacian distribution fitted on the residuals. Fitting is done on
output data created by performing a 3-fold cross-validation on the training
data. (default: FALSE)}
\item{class.weights}{a named vector of weights for the different classes,
used for asymmetric class sizes. Not all factor levels have to be supplied
(default weight: 1). All components have to be named.}
\item{cache}{cache memory in MB (default 40)}
\item{tol}{tolerance of termination criterion (default: 0.001)}
\item{shrinking}{option whether to use the shrinking-heuristics (default: TRUE)}
\item{...}{Any additional parameters, not currently passed through.}
}
\value{
List with predictions and the original training data &
hyperparameters.
}
\description{
Wrapper for Kernlab's support vector machine algorithm.
}
\examples{
data(Boston, package = "MASS")
Y = Boston$medv
# Remove outcome from covariate dataframe.
X = Boston[, -14]
set.seed(1)
sl = SuperLearner(Y, X, family = gaussian(),
SL.library = c("SL.mean", "SL.ksvm"))
sl
pred = predict(sl, X)
summary(pred$pred)
}
\references{
Hsu, C. W., Chang, C. C., & Lin, C. J. (2016). A practical guide to support
vector classification. \url{https://www.csie.ntu.edu.tw/~cjlin/papers/guide/guide.pdf}
Scholkopf, B., & Smola, A. J. (2001). Learning with kernels: support vector
machines, regularization, optimization, and beyond. MIT press.
Vapnik, V. N. (1998). Statistical learning theory (Vol. 1). New York: Wiley.
Zeileis, A., Hornik, K., Smola, A., & Karatzoglou, A. (2004). kernlab-an S4
package for kernel methods in R. Journal of statistical software, 11(9),
1-20.
}
\seealso{
\code{\link{predict.SL.ksvm}} \code{\link[kernlab]{ksvm}}
\code{\link[kernlab]{predict.ksvm}}
}
|
ed40fafed5432ab36491acd63179a3ea6f5a541d
|
ade83b195746e2add5234f4d488ec449b6ca6e82
|
/man/fars_summarize_years.Rd
|
27d642d2b2972c59ffda19568bad7645ea78693d
|
[] |
no_license
|
Mridul0001/FARSFunctions
|
ded6100cfafa8aa04b956dde629c3da104c2116c
|
61b3455a6152b330a5d129617da8262af50e45b5
|
refs/heads/master
| 2022-10-23T23:53:05.765012
| 2020-06-11T03:56:38
| 2020-06-11T03:56:38
| 271,444,711
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 761
|
rd
|
fars_summarize_years.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_summarize_years}
\alias{fars_summarize_years}
\title{Count number of motor vehicle accidents by year for selected years}
\usage{
fars_summarize_years(years)
}
\arguments{
\item{years}{Numeric vector of years to summarize.}
}
\value{
Returns a tbl_df of the number of motor vehicle accidents for each selected year.
}
\description{
Counts the number of fatal motor vehicle accidents for each month of the specified years in the FARS data set.
}
\details{
Throws an error if specified years are invalid.
}
\examples{
\dontrun{fars_summarize_years(c(2013,2014,2015))}
}
\seealso{
\url{https://www.nhtsa.gov/Data/Fatality-Analysis-Reporting-System-(FARS)}
}
|
318e5c0e21c59e059fdf44383984f0a0f852fb00
|
41a4e292e9bb4218ea7ccb577a034081ffec7fc7
|
/man/where.Rd
|
e07f5c344d47764d52dc60a38c4ed04f67367538
|
[] |
no_license
|
ahorawzy/usefulr
|
6b5272d755a4fc7209f30d2c008cb145168104ad
|
4c1cf747ec871d549503b130ce2ebbdcbd56b086
|
refs/heads/master
| 2021-07-02T16:52:19.179497
| 2019-04-09T02:01:38
| 2019-04-09T02:01:38
| 143,407,393
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 406
|
rd
|
where.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functional.R
\name{where}
\alias{where}
\title{Logical functional: where}
\usage{
where(f, x)
}
\arguments{
\item{f}{A function returns logical value.}
\item{x}{The list.}
}
\description{
Return a logical vector to judge every elements in list.
}
\examples{
df <- data.frame(x = 1:3, y = c("a","b","c"))
where(is.factor,df)
}
|
2d7957fdb681538252fa21ff96c76ff93a47beb7
|
ed6b8a6f4a8a7dcf38d3632451ca01419fd7bebf
|
/R/stringtools.R
|
277cf746153a8eb7fe32412aa034a5367dad081a
|
[] |
no_license
|
skranz/stringtools
|
8ab0ee094e3edf837a5b95cb81ba6f5b084f6505
|
8a37e75f057c7af3a0b115bd1e82ae66c91a0579
|
refs/heads/master
| 2022-05-22T02:36:16.012223
| 2022-05-04T20:28:27
| 2022-05-04T20:28:27
| 12,137,116
| 1
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 45,164
|
r
|
stringtools.R
|
# A package with functions for string and text modifications to complement stringr
#
# pos can be a
# vector: assuming an element of size 1 that specifies a single char at that positions
# n*2 matrix: first column left position, right column right position
# list of vectors or matrices, specifying different pos for different str
# Special charcters that can appear in find patterns
.onLoad = function(...) {
# If loaded as library overwrite restore.point to an empty function
assign("restore.point", function(...){}, envir=parent.env(environment()))
}
if (!exists("glob"))
glob = new.env()
glob$DO_CHECK_STR_PAR = TRUE
#' Check if parameter to a str function have allowed dimensions
check.str.par = function(str,para) {
if(!glob$DO_CHECK_STR_PAR)
return
if (length(para)>1) {
len = sapply(para,length)
} else {
len = length(para)
}
len = c(length(str),len)
if ((length(unique(len[len>1]))>1)) {
stop("Lengths of parameters are ", paste(c("str",names(para)),"=",len,collapse=" ")," All parameters with length>1 must have the same length!")
}
}
examples.str.between = function() {
str = c("a * (4+3)","b+3)+1","(a*3+(1+2))", ")")
str.between(str,"(",")")
str.between("#< type and","#< "," ")
}
#' Returns the between the first occurence of start and the first occurence of end
#' @export
str.between = function(str,start,end,...) {
str.left.of(str.right.of(str,start,...),
end, ...)
}
examples.str.left.of = function() {
str = c("a = 5","b+3","a+3 = 4 = 3", "=")
str.left.of(str,"=")
str.left.of(str,"=", not.found=NA)
str.right.of(str,"=")
str.right.of(str,"=", not.found=NA)
}
#' Returns the substring left to the first occurence of pattern
#' @export
str.left.of = function(str,pattern,..., not.found=str) {
pos = str.locate.first(str, pattern,...)
res = substring(str,1,pos[,1]-1)
rows = is.na(pos[,1])
res[rows] = not.found[rows]
res
}
#' Returns the substring right to the first occurence of pattern
#' @export
str.right.of = function(str,pattern,...,not.found=str) {
pos = str.locate.first(str, pattern,...)
res = substring(str,pos[,2]+1,)
rows = is.na(pos[,2])
res[rows] = not.found[rows]
res
}
#' Returns a string constisting of times spaces, vectorized over times
#' @export
str.space = function(times, space=" ") {
space.str = paste0(rep(space,max(times)),collapse="")
substring(space.str,1,last=times)
}
example.str.space = function() {
str.space(0:4)
}
#' trims whitespaces from string
#' @export
str.trim = function(txt) {
str_trim(txt)
}
#' strings will be treated as fixed constant in regex
#'
#' @param str a string vector
#' @param fixed if FALSE just return str
#' @return transformed string vector of same size as str
#' @export
regexp.fixed =function(str,fixed=TRUE) {
if (!fixed) return(str)
paste("\\Q",str,"\\E",sep="")
}
examples.regexp.fixed = function() {
str = c("A.","*")
# regexp.fixed transforms strings
regexp.fixed(str)
# fixed in stringr flags strings instead
fixed(str)
}
#' Transforms a vector of strings like c("A","B","C") into "A|B|C"
#'
#' @param str a string vector
#' @param fixed if TRUE treats str as constants in regexp
#' @return a single string
#' @export
str.list.to.regexp.or = function(str,fixed=TRUE) {
if (fixed) {
return(paste(regexp.fixed(str),collapse="|"))
} else {
return(paste(str,collapse="|"))
}
}
examples.str.list.to.regexp.or = function(){
greek=c("alpha","beta","gamma")
str.list.to.regexp.or(greek)
}
#' Combines c("A","B") into a single string seperated by line breaks
#'
#' @export
merge.lines = function(txt, collapse = "\n") {
paste(txt,collapse=collapse)
}
#' transforms a single string with line breaks into a vector with one element for each line
#' @export
sep.lines = function(txt, collapse = "\n") {
if (length(txt)>1)
txt = merge.lines(txt,collapse)
stringr::str_split(txt,collapse)[[1]]
}
examples.merge.lines = test.sep.lines = function() {
merge = merge.lines(c("A","B"))
merge
sep.lines(merge)
}
#' Returns a logical vector with TRUE for every character of str that is in pos
#' @export
str.inpos = function(str,pos) {
stopifnot(length(str) == 1)
inpos = rep(FALSE,nchar(str))
if (length(pos)==0) return(inpos)
for (i in 1:NROW(pos)) {
inpos[pos[i,1]:pos[i,2]]=TRUE
}
return(inpos)
}
#' a synonym for nchar
#' @export
str.len = function(str) {
nchar(str)
}
#' remove charcaters on left and right of a string
#' str.remove.ends(c("ABCDEF","01"),1,3) returns c("BC","")
#' @export
str.remove.ends = function(str, left=0,right=0) {
check.str.par(str,list(left=left,right=right))
substring(str,left+1,nchar(str)-right)
}
examples.str.remove.ends = function(str, left=0,right=0) {
str.remove.ends(c("ABCDEF","01345"),1,3)
str.remove.ends(c("ABCDEF"),1:2,1:2)
str.remove.ends(c("ABCDEF","01345"),1:2,1)
# The following calls throw errors!
str.remove.ends(c("ABCDEF","01345"),1:2,1:3)
str.remove.ends(c("ABCDEF","01345","NOW!"),1:2,1)
}
#' Returns als elements of txt that begin with pattern
#' @export
str.starts.with = function(txt,pattern) {
substring(txt,1,nchar(pattern))==pattern
}
examples.str.starts.with = function() {
str = c("Hi how are you", "hi", "now what", "Hi")
str.starts.with(str,"Hi")
}
#' Returns als elements of txt that end with pattern
#' @export
str.ends.with = function(txt,pattern) {
substring(txt,nchar(txt)-nchar(pattern)+1,)==pattern
}
examples.str.ends.with = function() {
str = c("Hi how are you", "hi", "now what", "Hi")
str.ends.with(str,"you")
}
#' Returns a string constisting of times spaces, vectorized over times
#' @export
str.space = function(times, space=" ") {
space.str = paste0(rep(space,max(times)),collapse="")
substring(space.str,1,last=times)
}
example.str.space = function() {
str.space(0:4)
}
#' Returns als elements of txt that end with pattern
#' @export
str.ends.with = function(txt,pattern) {
substring(txt,nchar(txt)-nchar(pattern)+1,)==pattern
}
examples.str.ends.with = function() {
str = c("Hi how are you", "hi", "now what", "Hi")
str.ends.with(str,"you")
}
#' keeps characters on left
#' @export
str.left = function(str, len=1) {
check.str.par(str,list(len=len))
substring(str,1,len)
}
#' keeps characters on right
#' @export
str.right = function(str, len=1) {
check.str.par(str,list(len=len))
substring(str,nchar(str)-len+1,nchar(str))
}
#' Splits a single string str at positions specified by pos
#' @param str character vector that shall be splitted
#' @param pos split positions can be
#' vector: assuming an element of size 1 that specifies a single char at that positions
#' n*2 matrix: first column left position, right column right position
#' list of vectors or matrices, specifying different pos for different str
#' @param keep.pos default=FALSE. If TRUE add the tokens that describe the split to the result otherwise remove them
#' @return single return is length of pos (if vector) or NCOL(pos) if pos is matrix
#' @export
str.split.at.pos = function(str, pos, keep.pos = FALSE, compl=NULL, max.char = max(nchar(str)),pos.mat.like.list=FALSE) {
restore.point("str.split.at.pos")
if (is.list(pos)) {
stopifnot(length(str)==length(pos))
fun = function(i)
str.split.at.pos(str[i],pos[[i]],keep.pos=keep.pos)
return(lapply(seq_along(str),fun))
}
if (!is.matrix(pos)) {
pos = cbind(pos,pos)
}
if (NROW(pos)==0)
return(str)
if (pos.mat.like.list) {
stopifnot(length(str)==NROW(pos))
fun = function(i)
str.split.at.pos(str[i],pos[i,,drop=FALSE],keep.pos=keep.pos)
return(lapply(seq_along(str),fun))
}
if (is.null(compl)) {
compl = pos.complement(pos,keep.pos=keep.pos)
}
if (length(str)>1) {
fun = function(i)
str.split.at.pos(str[i],pos,keep.pos=keep.pos,compl=compl,max.char=max.char)
return(t(sapply(seq_along(str),fun)))
}
if (compl[NROW(compl),1]>max.char)
compl = compl[-NROW(compl),,drop=FALSE]
ret = substring(str,compl[,1],compl[,2])
ret[is.na(ret)]=""
return(ret)
}
examples.str.split.at.pos = function() {
str = c("1234567890")
pos = c(3,5,7)
str.split.at.pos(str,pos,keep.pos = FALSE)
str.split.at.pos(str,pos,keep.pos = TRUE)
pos = rbind(c(2,3),c(5,5),c(7,9))
str.split.at.pos(str,pos,keep.pos = FALSE)
str.split.at.pos(str,pos,keep.pos = TRUE)
# Multiple str
str = c("Hello ernie","abcg","hello erna")
pos = c(2,5,8)
str.split.at.pos(str,pos,keep.pos=TRUE)
pos = list(c(3,5),c(2),c(1,9))
str.split.at.pos(str,pos,keep.pos=TRUE)
str = c("Hello ernie","abcdefg","hello erna")
pos = str.locate.first(str,"e",ignore=ignore)
pos
str.split.at.pos(str,pos,keep.pos=TRUE,pos.mat.like.list=FALSE)
str.split.at.pos(str,pos,keep.pos=TRUE,pos.mat.like.list=TRUE)
}
#' converts a string into a vector of single characters
#' @export
to.char.vector = function(str,collapse="") {
if (length(str)>1)
str = paste(str,collapse=collapse)
nc = nchar(str)
ind = 1:nc
substring(str,ind,ind)
}
#' converts into a vector of strings into a matrix of single characters
#' @export
to.char.matrix = function(str,drop=FALSE) {
if (length(str)==1 & drop) {
nc = nchar(str)
ind = 1:nc
substring(str,ind,ind)
} else {
nc = max(nchar(str))
ind = rep(1:nc,each=NROW(str))
matrix(substring(rep(str,times=nc),ind,ind),nrow=length(str))
}
}
#' converts a matrix of of single chars in a vector of one string per row
#' @export
char.matrix.to.str = function(mat,collapse="") {
if (!is.matrix(mat))
return(paste(mat,collapse=collapse))
apply(mat,1,paste,collapse=collapse)
}
#' converts a vector of chars into a single string or multiple strings, broken by sep
#' @export
char.vector.to.str = function(vec,sep=NULL,collapse="") {
str = paste(vec,collapse="")
if (!is.null(sep)) {
str = sep.lines(str,sep)
}
return(str)
}
examples.to.char.matrix = function() {
str =c("Now that is a nice matrix","but short!")
mat = to.char.matrix(str)
mat
char.matrix.to.str(mat)
vec = to.char.vector(str,collapse="\n")
vec
char.vector.to.str(vec,collapse="\n")
}
#' ignore is a logical vector or matrix stating which char positions shall be ignored
#' the function removes the substrings for which ignore=TRUE
#' @export
str.remove.ignore = function(str,ignore) {
restore.point("str.remove.ignore")
mat = to.char.matrix(str)
if (NCOL(mat)==0)
return(str)
if (length(str)>1 & !is.matrix(ignore))
ignore = matrix(ignore,nrow=NROW(str),ncol=NCOL(mat),byrow=TRUE)
if (NCOL(ignore)>NCOL(mat))
ignore = ignore[,1:NCOL(mat)]
if (NCOL(ignore)<NCOL(mat)) {
warning("str.remove.ignore: ignore has fewer columns than number of chars of longest element in str. Fill up with ignore=FALSE")
old.ignore = ignore
ignore = matrix(FALSE,NROW(ignore),NCOL(mat))
ignore[,1:NCOL(old.ignore)] = old.ignore
}
mat[ignore] = ""
char.matrix.to.str(mat)
}
examples.str.remove.ignore = function() {
str =c("1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZ","1234567890")
ignore = rep(FALSE,max(nchar(str)))
ignore[c(4:5,8:20)] = TRUE
str
str.remove.ignore(str,ignore)
}
#' Returns for every element of str whether there is a match with pattern
#' works similar than grepl
#' @export
has.substr = function(str, pattern, fixed=TRUE, perl=FALSE, ignore =NULL) {
ret = str.locate.first(str,pattern,fixed=fixed,perl=perl,ignore=ignore)
!is.na(ret[,1])
}
#' Just a synonym for has.substr
#' @export
str.detect = has.substr
examples.has.substr = function() {
str = c("abcdefgh","12347382709")
pattern = c("a")
str.has.substr(str,pattern)
}
#' Find substring positions or matches
#'
#' A general wrapper from str.locate.first, str.locate.all, str.extract.first, str.extract.all
#'
#' @param str vector of strings that will be searched
#' @param pattern a search pattern
#' @param fixed if FALSE perform regular expression search
#' @param first shall only the first element be returned
#' @param all shall all elements be returned
#' @param simplify try to simplify a list return
#' @param matches if FALSE pos will returned, otherwise the extracted substrings
#' @export
str.find = function(str, pattern, fixed=TRUE, first=FALSE,all=!first, simplify = TRUE,matches=FALSE,...) {
restore.point("str.find")
if (length(pattern)==0) {
warning(str.find("called without pattern"))
stop()
return(str)
}
# Return the found matches instead of the position
if (matches) {
if (fixed)
pattern = stringr::fixed(pattern)
if (all) {
ret = str_extract_all(str,pattern,...)
} else {
ret = str_extract(str,pattern,...)
}
if (simplify) {
if (length(str)==1) {
if (first) {
if (is.na(ret[[1]]))
return(character(0))
return(ret[[1]])
#return(as.vector(ret[[1]]))
}
if (NROW(ret[[1]])==0)
return(character(0))
return(ret[[1]])
}
if (first) {
return(ret)
}
}
return(ret)
}
# Return position of found strings
if (fixed)
pattern = stringr::fixed(pattern)
if (all) {
ret = str_locate_all(str,pattern,...)
} else {
ret = str_locate(str,pattern,...)
}
if (simplify) {
if (length(str)==1) {
if (first) {
if (is.na(ret[[1]]))
return(matrix(NA,nrow=0,ncol=2))
return(ret[[1]])
#return(as.vector(ret[[1]]))
}
if (NROW(ret[[1]])==0)
return(matrix(NA,nrow=0,ncol=2))
return(ret[[1]])
}
if (first) {
return(ret)
}
}
return(ret)
}
#' Finds start and end positions of first substring that matches pattern
#' @param ignore.pos a logical vector or logical matrix indicating which locations of str shall be ignored in the search
#' @return single.return is a 1*2 matrix. First column start position, second column end position
#' @export
str.locate.first = function(str, pattern, fixed=TRUE, perl=FALSE, ignore =NULL, ignore.pos=NULL,only.pos=NULL) {
restore.point("str.locate.first")
if (is.null(pattern))
return(cbind(start=numeric(0),end=numeric(0)))
#print(ignore.pos)
ignore = get.ignore(ignore,ignore.pos,only.pos,str=str)
# Return positions of found strings
if (is.null(ignore)) {
if (fixed) {
ret = str_locate(str,stringr::fixed(pattern))
return(ret)
}
if (length(pattern)==1) {
reg.list = gregexpr(pattern,str,perl=perl)
} else {
stopifnot(length(str)==length(pattern) | length(str)==1 | length(pattern)==1)
str = rep(str,length.out=length(pattern))
fun.gregexpr = function(i)
gregexpr(pattern[i],str[i],perl=perl)[[1]]
reg.list = lapply(seq_along(pattern),fun.gregexpr)
}
fun = function(reg) {
len=attr(reg,"match.length")
ind = which(len>0)
if (length(ind)==0)
return(c(NA,NA))
return(reg[ind[1]]+c(0,len[ind[1]]-1))
}
ret.mat = t(sapply(reg.list,fun))
return(ret.mat)
}
# Some char positions can be ignored
ret = adapt.ignore.and.get.ci(str,ignore)
ignore = ret$ignore
ci = ret$ci
# Ignore matches that are in ignore.char.pos
str.ig = str.remove.ignore(str,ignore)
restore.point("str.locate.first.ignore")
ret = str.locate.first(str.ig,pattern,fixed=fixed,perl=perl)
# Add the cummulated sum of ignored chars
if (is.matrix(ci)) {
fun = function(i) {
ci.shifted = ci[i,!ignore[i,]]
as.numeric(ret[i,1] + ci.shifted[ret[i,1]])
}
left = sapply(1:NROW(ci),fun)
ret = ret+left-ret[,1]
} else {
ret = ret + ci[!ignore][ret[,1]]
}
return(ret)
}
examples.str.locate.first = function() {
str.locate.first("Hello",NULL)
str.locate.first("Hello","l")
str.locate.first(c("Hello","What","lol"),"l")
str.locate.first("Hello",c("l","e"))
str.locate.first(c("Hello","What","lol"),c("l","g","o"))
str = "Hello ernie!"
ignore = rep(FALSE,max(nchar(str)))
ignore[c(2:4)] = TRUE
pos = str.locate.first(str,"e",ignore=ignore)
pos
str.split.at.pos(str,pos[,1],keep.pos=TRUE)
ignore.pos = cbind(2,4)
pos = str.locate.first(str,"e",ignore.pos=ignore.pos)
pos
str.split.at.pos(str,pos[,1],keep.pos=TRUE)
str.detect(str,c("A","[a-z]*"),fixed=FALSE)
str = c("Hello ernie","abcdefg","hello erna")
pos = str.locate.first(str,"e",ignore=ignore)
pos
str.split.at.pos(str,pos,keep.pos=TRUE,pos.mat.like.list=TRUE)
# Compare regular expression matching
str = c("012ab0121","adch3b23","0123")
regexpr("[ab]*",str)
gregexpr("[ab]*",str)
gregexpr("[ab]*",str,perl=TRUE)
str_locate(str,c("b"))
str_locate(str,"[ab]*")
str_locate_all(str,"[ab]*")
str.locate.first(str,"[ab]*",fixed=FALSE)
str.detect(str,"[ab]*",fixed=FALSE)
}
#' Locate a pattern at the start of strings
#' @export
str.locate.at.start = function(str, pattern, fixed=TRUE) {
restore.point("str.locate.at.start")
if (!fixed)
stop("Not yet implemented...")
len = max(length(str),length(pattern))
num.char = nchar(pattern)
start = substring(str,1,num.char)
mat = matrix(NA,len,2)
does.start = which(start == pattern)
if (length(does.start)==0)
return(mat)
num.char = rep(num.char, length.out = len)
mat[does.start,1] = 1
mat[does.start,2] = num.char[does.start]
return(mat)
}
examples.str.locate.at.start = function() {
str.locate.at.start(c("0123456012","1230","012012","01bsf"),"012")
str.locate.at.start("0123456",c("012","0","1"))
str.locate.at.end(c("0123456012","1230","012","01bsf"),"012")
}
#' Locate a pattern at the end of str
#' @export
str.locate.at.end = function(str, pattern, fixed=TRUE) {
restore.point("str.locate.at.end")
if (!fixed)
stop("Not yet implemented...")
len = max(length(str),length(pattern))
num.char = nchar(pattern)
start = substring(str,nchar(str)-num.char+1,nchar(str))
mat = matrix(NA,len,2)
does.start = which(start == pattern)
if (length(does.start)==0)
return(mat)
num.char = rep(num.char, length.out = len)
mat[does.start,1] = (nchar(str)-num.char+1)[does.start]
mat[does.start,2] = nchar(str)[does.start]
return(mat)
}
examples.str.locate.at.end = function() {
str.locate.at.end(c("0123456012","1230","012","01bsf"),"012")
}
#' Check if str completely matches a pattern (not just a substring)
#' @export
str.matches.pattern = function(str,pattern,fixed=TRUE) {
if (!fixed)
stop("Not yet implemented...")
return(str == pattern)
}
# A helper function
adapt.ignore.and.get.ci = function(str,ignore) {
restore.point("adapt.ignore.and.get.ci")
maxchar = max(nchar(str))
if (!is.matrix(ignore) & length(ignore)<maxchar) {
ignore.old = ignore
ignore = rep(FALSE,maxchar)
ignore[1:length(ignore.old)] = ignore.old
} else if (is.matrix(ignore) & NCOL(ignore)<maxchar) {
ignore.old = ignore
ignore = matrix(FALSE,NROW(ci),maxchar)
ignore[,1:NCOL(ignore.old)] = ignore.old
}
ci = cumsum.ignore(ignore)
if (!is.matrix(ignore)) {
ignore = matrix(ignore,NROW(str),NROW(ignore),byrow=TRUE)
}
if (!is.matrix(ci)) {
ci = matrix(ci,NROW(ignore),NCOL(ignore),byrow=TRUE)
}
return(list(ignore=ignore,ci=ci))
}
#' Finds start and end positions of all substrings that match pattern
#' @param ignore.pos a logical vector or logical matrix indicating which locations of str shall be ignored in the search
#' @return a list, of matrices n * 2 matrices. The first column is the start position, second column end position of each match
#' @export
str.locate.all = function(str, pattern, fixed=TRUE, perl=FALSE, ignore =NULL, ignore.pos=NULL,only.pos=NULL) {
restore.point("str.locate.all")
#print(ignore.pos)
ignore = get.ignore(ignore,ignore.pos,only.pos,str=str)
if (is.null(ignore)) {
if (fixed) {
ret = str_locate_all(str,stringr::fixed(pattern))
return(ret)
}
if (length(pattern)==1) {
reg.list = gregexpr(pattern,str,perl=perl)
} else {
stopifnot(length(str)==length(pattern) | length(str)==1 | length(pattern)==1)
str = rep(str,length.out=length(pattern))
fun.gregexpr = function(i)
gregexpr(pattern[i],str[i],perl=perl)[[1]]
reg.list = lapply(seq_along(pattern),fun.gregexpr)
}
fun = function(reg) {
len=attr(reg,"match.length")
ind = which(len>0)
if (length(ind)==0)
return(matrix(NA,0,2))
left = reg[ind]
right = left + len[ind]-1
mat = matrix(c(left,right),NROW(ind),2)
return(mat)
}
ret.mat = lapply(reg.list,fun)
return(ret.mat)
}
# Some char positions can be ignored
# Adapt positions to true positions
if (length(str)==1 & length(pattern)>0)
str = rep(str,length(pattern))
if (length(str)==1 & is.matrix(ignore))
str = rep(str,NROW(ignore))
ret = adapt.ignore.and.get.ci(str,ignore)
ignore = ret$ignore
ci = ret$ci
# Ignore matches that are in ignore.char.pos
str.ig = str.remove.ignore(str,ignore)
pos.list = str_locate_all(str.ig,pattern)
add.ci.to.pos = function(i) {
#estore.point("jsodjsfjs")
# position in str.ig
pos.mat = pos.list[[i]]
if (length(pos.mat) == 0)
return(pos.mat)
# ci.shifted tells us how much to add
# to pos.mat positions when translating
# str.ig positions to str positions
ci.shifted = ci[i, !ignore[i, ]]
# One row for each pos.mat
ci.shifted = matrix(ci.shifted, NROW(pos.mat), length(ci.shifted), byrow = TRUE)
left.pos.shift = ci.shifted[cbind(seq_len(NROW(pos.mat)), pos.mat[, 1]) ]
#right.pos.shift = ci.shifted[cbind(seq_len(NROW(pos.mat)), pos.mat[, 2]) ]
# pos.mat + cbind(left.pos.shift, right.pos.shift)
# this is equivalent to the lines above
# as pos.mat[,2] always has same shift as pos.mat[,1]
pos.mat + left.pos.shift
}
lapply(seq_along(pos.list),add.ci.to.pos)
}
examples.str.locate.all = function() {
str.locate.all("0120121","1")
str.locate.all(c("0120121","abce","011"),"1")
str = c("0120121","abce","011bb1")
#str = c("0120121")
ignore = rep(FALSE,max(nchar(str)))
ignore[c(2:4)] = TRUE
str.locate.all(str,"1",ignore=ignore)
ignore.pos = rbind(c(2,4))
str.locate.all(str,"1",ignore.pos=ignore.pos)
str.locate.all(str,c("1","b","a"),ignore=ignore)
str = c("0120121")
str.locate.all(str,c("1","b","2"),ignore=ignore)
# Compare regular expression matching
str = c("012ab0121","adch3b23","0123")
gregexpr("[ab]*",str)
str_locate_all(str,"[ab]*")
str.locate.first(str,"[ab]*",fixed=FALSE)
str.locate.all(str,"[ab]*",fixed=FALSE)
str.locate.all(str,c("[ab]*","3+","0*"),fixed=FALSE)
str.locate.first(str,c("[ab]*","2","0*"),fixed=FALSE)
str.locate.all(str,"ab",fixed=FALSE)
return(ret)
}
#' pos is a matrix or a list of matrices specifying positions as returned by str.locate.all
#' @export
str.at.pos = function(str,pos) {
if (is.list(pos)) {
restore.point("str.at.pos.list")
stopifnot(length(pos)==length(str))
fun = function(i)
str.at.pos(str[i],pos[[i]])
return(lapply(seq_along(str),fun))
}
restore.point("str.at.pos.no.list")
#rerestore.point("str.at.pos.no.list")
if (length(pos)==0)
return(rep("",0))
substring(str,pos[,1],pos[,2])
}
examples.str.at.pos = function() {
str = c("012ab0121","abce","0had112bb1")
pos = str.locate.all(str,"[a-z]*",fixed=FALSE)
pos
str.at.pos(str,pos)
return(ret)
}
#' Returns a list that contains for each element of str (or pattern) a vector of all substrings that match the pattern. If for a string no element is matched an empty list is returned
#' @export
str.extract.all = function(str, pattern, fixed=FALSE, perl=FALSE, ignore =NULL) {
pos = str.locate.all(str=str,pattern=pattern,fixed=fixed,perl=perl,ignore=ignore)
return(str.at.pos(str,pos))
}
#' Returns a vector that contains for each element of str (or pattern) the first substring that matches pattern or NA if no match could be found
#' @export
str.extract.first = function(str, pattern, fixed=FALSE, perl=FALSE, ignore =NULL) {
pos = str.locate.first(str=str,pattern=pattern,fixed=fixed,perl=perl,ignore=ignore)
return(str.at.pos(str,pos))
}
examples.extract.all = function() {
str = "12ab12ab"
regexec("(([0-9]+)([a-z]+))*",str)
regexec("1",str)
regexpr("([0-9]+)([a-z]+)",str)
x <- c("A and B", "A, B and C", "A, B, C and D", "foobar")
pattern <- "[[:space:]]*(,|and)[[:space:]]"
## Match data from regexpr()
m <- regexpr(pattern, x)
m
regmatches(x, m)
regmatches(x, m, invert = TRUE)
## Match data from gregexpr()
m <- gregexpr(pattern, x)
regmatches(x, m)
regmatches(x, m, invert = TRUE)
str.extract.first(c("0120121","abce","011"),"1")
str.extract.all(c("0120121","abce","011"),"1")
# Compare regular expression matching
str = c("012ab0121","adch3b23","0123")
str_extract_all(str,"[ab]*")
str.extract.all(str,"[ab]*")
str_extract(str,"[ab]*")
str.extract.first(str,"[ab]*")
return(ret)
}
#' Returns the number of matches of pattern in each element of str
str.number.matches = function(str, pattern,...) {
res = str.locate.all(str,pattern,...)
sapply(res,NROW)
}
#' An alternative interface to str.split
#' @export
str.tokenize = function(str,split=" ",only.one.split=FALSE,simplify=TRUE,...) {
ret = str.split(str,split,first=only.one.split,...)
if (simplify & is.list(ret))
ret = unlist(ret)
return(ret)
}
#' Splits string vectors
#' @param str a vector of strings
#' @param pattern vector where splits take place
#' @return A list with same length as str. Each list element i contains the split substrings from str[i]
#' @export
str.split = function(str,pattern, first=FALSE, keep.match = FALSE,...) {
restore.point("str.split")
#rerestore.point("str.split")
check.str.par(str,list(pattern=pattern))
stopifnot(length(str)==length(pattern) | length(str)==1 | length(pattern)==1)
if (length(str)==1)
str = rep(str,length.out=length(pattern))
if (first) {
pos = str.locate.first(str=str,pattern=pattern,...)
return(str.split.at.pos(str,pos,keep.pos=keep.match))
} else {
#pos = str.locate.all(str=str,pattern=pattern)
pos = str.locate.all(str=str,pattern=pattern,...)
restore.point("jhhshf")
return(str.split.at.pos(str,pos,keep.pos=keep.match))
}
}
examples.str.split = function() {
str = "Hi\n\nyou!"
str.split(str,"\n", keep.match=!TRUE)
str <- c("aes_afe_f", "qwe.rty", "yui0op[3", "b")
#split x on the letter e
str
str.split(str, "e", keep.match=TRUE)
str.split(str, "e", first=TRUE, keep.match=TRUE)
str = c("aes_afe_fe")
ignore.pos = cbind(1,3)
str.split(str, "e", keep.match=TRUE, ignore.pos=ignore.pos)
str.split(str, "e", first=TRUE,keep.match=TRUE, ignore.pos=ignore.pos)
str = "abscde3823nsd34"
str.split(str, "[a-z]*", fixed=FALSE, keep.match=TRUE)
str.split(str, c("[a-z]*","d"), fixed=FALSE, keep.match=TRUE)
str = c("abscde3823nsd34","8748274")
str.split(str, c("[a-z]*","d"), fixed=FALSE, keep.match=TRUE)
}
#' replace a string at the positions specified by pos
#' @param str a vector, or a single element
#' @param pos a matrix of substring positions, or a list of such matrices if str is a vector
#' @param new a vector of new strings for each substring position, or a list of such vectors if length(str)>1
#' @return string (vector) of length(str) in which the substrings have been replaced
#' @export
str.replace.at.pos = function(str,pos,new,pos.mat.like.list=FALSE) {
restore.point("str.replace.at.pos")
if (is.list(pos)) {
stopifnot(length(str)==length(pos) & is.list(new) & length(new) == length(pos))
fun = function(i)
str.replace.at.pos(str[i],pos[[i]],new[[i]])
return(lapply(seq_along(str),fun))
}
if (!is.matrix(pos)) {
pos = cbind(pos,pos)
}
if (pos.mat.like.list) {
stopifnot(length(str)==NROW(pos))
fun = function(i)
str.replace.at.pos(str[i],pos[i,,drop=FALSE],new[i])
return(lapply(seq_along(str),fun))
}
if (length(str)>1) {
stopifnot(length(str)==NROW(pos))
fun = function(i)
str.replace.at.pos(str[i],pos,new)
return(sapply(seq_along(str),fun))
}
if (NROW(new)==0) return(str)
if (NROW(pos)>1) {
ord = order(pos[,1])
pos = pos[ord,]
new = new[ord]
} else {
if (pos[1,1]==1 & pos[1,2]==nchar(str))
return(new)
}
# Every second element will be the new one
pos.keep = pos.complement(pos,is.sorted=TRUE,end=nchar(str))
str.keep = str.at.pos(str,pos.keep)
all.pos = rbind(pos.keep,pos)
ord = order(all.pos[,1])
all.str = c(str.keep,new)[ord]
return(paste(all.str,collapse=""))
}
examples.str.replace.at.pos = function() {
str = "1234567890"
pos = rbind(c(7,7),c(4,5))
new = c("XXX","...")
str.replace.at.pos(str,pos,new)
str = c("1234567890","ahgdasdajsdgadhsabd")
str.replace.at.pos(str,pos,new)
}
examples.has.substr = function() {
str = c("12347382709")
pattern = c("a","4","56","34","766","b")
has.substr(str,pattern)
}
#' Replaces in str every occurence of pattern by replacement
#'
#' @param str the string to replaced
#' @param pattern the substring to be replaced
#' @param replacment the new substrings
#' @return a string
#' @export
str.replace = function(str,pattern,replacement,fixed=TRUE,perl=FALSE,ignore=NULL, ignore.pos=NULL, only.pos=NULL,ignore.pattern="_IGNORE_",...) {
#restore.point("str.replace")
len = max(length(str),length(pattern),length(replacement))
if (len > 1) {
ret = sapply(1:len, function (i,...) {
str.replace(str[min(length(str),i)],
pattern[min(length(pattern),i)],
replacement[min(length(replacement),i)],
fixed, perl,ignore,ignore.pos,only.pos,...)
},...)
return(ret)
}
restore.point("str.replace.single")
pos = ignore.and.complement.pos(ignore,ignore.pos,only.pos)
is.ignore = attr(pos,"is.ignore")
if (sum(is.ignore)>0) {
if (has.substr(pattern,ignore.pattern)) {
ig.pos=pos[is.ignore,,drop=FALSE]
repl.pos= matrix(NA,NROW(ig.pos),2)
new.str = vector("character", NROW(ig.pos))
i = 2
for (i in 1:NROW(ig.pos)) {
# Replace ignored area i with placeholder ignore.pattern
str.pl = str.replace.at.pos(str, ig.pos[i,,drop=FALSE], ignore.pattern)
# Search for pattern in replaced string: get position and string
rpos = str.locate.first(str.pl,pattern,fixed,perl, ignore.pos = ig.pos[-i,,drop=FALSE])
ostr = str.at.pos(str.pl,rpos)
rpos[,2] = rpos[,2]-nchar(ignore.pattern)+diff(ig.pos[i,])+1
# Replace the string
nstr = sub(pattern, replacement,ostr,fixed=fixed,perl=perl)
nstr = sub(ignore.pattern,substring(str,ig.pos[i,1],ig.pos[i,2]),nstr,fixed=TRUE)
#nstr = sub(pattern, replacement,ostr,fixed=fixed,perl=perl,...)
repl.pos[i,] = rpos
new.str[i] = nstr
}
rem = duplicated(repl.pos) | is.na(repl.pos[,1])
repl.pos = repl.pos[!rem,,drop=FALSE]
new.str = new.str[!rem]
mod.str = str.replace.at.pos(str, repl.pos,new.str)
return(mod.str)
} else {
# Can simply search over the separate not ignored substrings
sub = str.at.pos(str,pos)
not.ignore = !attr(pos,"is.ignore")
#ret = gsub(pattern, replacement,sub,fixed=fixed)
ret = gsub(pattern, replacement,sub[not.ignore],fixed=fixed,perl=perl,...)
sub[not.ignore] = ret
return(paste0(sub,collapse=""))
}
} else {
return(gsub(pattern, replacement,str,fixed=fixed,...))
}
}
examples.str.replace = function() {
str = c("12345678901234567890")
pattern = c("34","12")
replacement = c("AB","Holla die Waldfee")
pos = cbind(1,10)
str.replace(str,pattern,replacement, ignore.pos=pos)
str.replace(str,pattern,replacement, only.pos=pos)
str.replace(str,pattern,replacement)
str = "int{5*2}*{2*3}"
pattern = "int{_IGNORE_}"
replacement = "integer{_IGNORE_}"
pos = cbind(c(5,11),c(7,13))
str.replace(str,pattern,replacement, ignore.pos=pos)
}
#' Performs sequentially all replacements of pattern and replace on the same strings str
#'
#' A very slow implementation
#' @export
str.replace.list = function(str,pattern,replacement,...) {
restore.point("str.replace.list")
for (i in 1:NROW(pattern)) {
str = str.replace(str,pattern[i],replacement[i],...)
}
return(str)
# did.collapse = FALSE
# if (NROW(str)>1) {
# did.collapse = TRUE
# str = paste(str,collapse=collapse)
# }
# pattern = regexp.or(pattern)
# pos = str.find(str,pattern,fixed=FALSE,simplify=TRUE)
# matches = str.substr(str,pos[,1],pos[,2])
# match.ind = match(matches,pattern)
# str = str.replace.at.pos(str,pos,pattern[match.ind])
# if (did.collapse)
# str = sep.lines(str,collapse)
# if (!return.matches) {
# return(str)
# } else {
# return(list(str=str,matches = matches, replaces=pattern[match.ind]))
# }
}
examples.str.replace.list = function() {
str.replace.list("na dies ist doch",c("a","e"),c("A","E"))
}
show.blocks = function(blocks, str) {
data.frame(lev=blocks$levels, out.l=blocks$outer[,1], out.r = blocks$outer[,2],
in.l=blocks$inner[,1],in.r = blocks$inner[,2], str = substring(str,blocks$outer[,1],blocks$outer[,2]) )
}
show.pos = function(pos,str) {
if (NROW(pos)==0)
return(pos)
data.frame(left=pos[,1],right=pos[,2], str = substring(str,pos[,1],pos[,2]) )
}
# Helper function for str.replace.by.blocks
# an island is a region corresponding to the interior of one block
# an island has i) mountains: sub regions with level above the islands level
# ii) plains : the pos.complement to mountains within the island
replace.island = function(island.row, str,blocks, pattern.plains, level,pattern.number.mountains,replacement,sub.txt,fixed=TRUE) {
restore.point("replace.island")
left = blocks$inner[island.row,1]
right = blocks$inner[island.row,2]
island.str = substring(str,left,right)
mountains= blocks$inner[
which(blocks$levels == level+1
& blocks$inner[,1]>=left
& blocks$inner[,2]<=right),,drop=FALSE]
plains = pos.complement(mountains, start=left, end=right)
show.blocks(blocks,str)
island.row
show.pos(cbind(left,right),str)
show.pos(mountains,str)
show.pos(plains,str)
plains.str = str.at.pos(str,plains)
# The island has not enough plains to match the pattern
if (length(plains.str)<length(pattern.plains))
return(list(replaced=FALSE,new=island.str,old=island.str))
# Pattern has no mountains, i.e. we simply ignore the mountains in the replacement
if (length(pattern.plains)==1) {
ignore.pos = cbind(mountains-left+1)
new.island.str = str.replace(island.str, pattern.plains,ignore.pos = ignore.pos,fixed=fixed)
return(list(replaced= new.island.str!=island.str,new.island.str,island.str))
}
# We have an island with mountains. We search for matching chains of plains
# Search through the different pattern plains
# Starting plain: must match at end
i = 1
first.pos = str.locate.at.end(plains.str,pattern.plains[i],fixed=fixed)
matches = !is.na(first.pos[,1])
if (sum(matches)==0)
return(list(replaced=FALSE,new=island.str,old=island.str))
# Center plains,must match completely
if (length(pattern.plains)>2) {
for (i in 2:(length(pattern.plains)-1)) {
new.matches = str.matches.pattern(plains.str[-(1:(i-1))], pattern.plains[i],fixed=fixed)
matches = matches & c(new.matches,rep(FALSE,i-1))
}
}
# The last plain must match at the start
i = length(pattern.plains)
# Starting plain: must match at end
last.pos = str.locate.at.start(plains.str,pattern.plains[i],fixed=fixed)
matches = matches & c(!is.na(last.pos[,1])[-(1:(i-1))], rep(FALSE,i-1))
if (sum(matches)==0)
return(list(replaced=FALSE,new=island.str,old=island.str))
# We have found matches to be replaced
start.with.mountain = plains[1,1]>mountains[1,1]
mountains.str = str.at.pos(str,mountains)
nm = pattern.number.mountains
np =length(pattern.plains)
# The following loop construction rules out overlapping replacements
counter = 0
new.str = NULL
replace.pos = matrix(NA,0,2)
match.ind = 1
while (match.ind <= length(matches)-np+1) {
#message("replace.island(match.ind=",match.ind,")")
match.ind = which(matches & match.ind <= 1:length(matches) )[1]
if (is.na(match.ind))
break
new.str = c(new.str,
str.replace.list(replacement,
pattern=paste0("_",sub.txt,1:nm,"_"),
replacement=mountains.str[(match.ind:(match.ind+nm))+start.with.mountain])
)
#plains.str[match.ind+np-1]
replace.left = plains[match.ind,1] + first.pos[match.ind,1]-left
replace.right = plains[match.ind+np-1,1] + last.pos[match.ind+np-1,2]-left
replace.pos = rbind(replace.pos,c(replace.left,replace.right))
#show.pos(replace.pos,str)
# The last plain may be overlapping
# This is a bit dirty.... need to think about some better code...
match.ind = match.ind + max(np-1,1)
}
show.pos(replace.pos, island.str)
new.island.str = str.replace.at.pos(island.str,replace.pos, new.str)
return(list(replaced=TRUE,new=new.island.str,old=island.str))
}
#' Helper function
adapt.pos.after.replace = function(pos,left,len.old,len.new) {
if (length(left)>1) {
for (i in seq_along(left)) {
pos = adapt.pos.after.replace(pos,left[i],len.old[i],len.new[i])
}
return(pos)
}
restore.point("adapt.pos.after.replace")
right = left + len.old-1
delta.len = len.new-len.old
rows = pos[,1]>left
pos[rows,1] = pos[rows,1]+delta.len
rows = pos[,2]>left
pos[rows,2] = pos[rows,2]+delta.len
return(pos)
}
#' Helper function
adapt.blocks.after.replace = function(block,...) {
block$inner = adapt.pos.after.replace(block$inner,...)
block$outer = adapt.pos.after.replace(block$outer,...)
return(block)
}
#' Replaces in str every occurence of pattern by replacement
#'
#' @param str the string to replaced
#' @param pattern the substring to be replaced
#' @param replacment the new substrings
#' @param block a block retrieved from str.block.pos alternatively, you can provide block.start and block.end
#' @param block.start string with which the blocks start, e.g. "("
#' @param block.end string with which the blocks end, e.g. ")"
#' @param only.replace.smaller.than if not NULL only replaces matches whose number of characters is less or equal to only.replace.smaller.than
#' @param only.replace.larger.than if not NULL only replaces matches whose number of characters is bigger or equal to only.replace.larger.than
#' @return a string
#' @export
str.replace.by.blocks = function(str,pattern,replacement,blocks=NULL,sub.txt="SUB",block.start, block.end,block.ignore=NULL,use.levels=NULL,fixed=TRUE, only.replace.smaller.than=NULL, only.replace.larger.than=NULL) {
restore.point("str.replace.by.level")
if (length(str)>1) {
stopifnot(is.null(blocks))
new.str = sapply(str,str.replace.by.blocks,pattern=pattern,replacement=replacement,blocks=blocks,sub.txt=sub.txt,block.start=block.start, block.end=block.end,block.ignore=bock.ignore,use.levels=use.levels,fixed=fixed, only.replace.smaller.than=only.replace.smaller.than, only.replace.larger.than=only.replace.larger.than)
return(new.str)
}
if (is.null(blocks))
blocks = str.blocks.pos(str, start=block.start, end=block.end, ignore=block.ignore, fixed=fixed)
if (length(blocks$levels)==0) {
blocks = blocks.add.level.0(blocks,str)
} else if ( blocks$levels[1]!=0) {
blocks = blocks.add.level.0(blocks,str)
}
show.blocks(blocks,str)
levels = blocks$levels
if (is.null(use.levels))
use.levels = unique(levels)
sub.pattern = paste0("_",sub.txt,"_")
# Splitt pattern in different parts before and after ignore
pattern.plains = str.at.pos(pattern,
pos.complement(str_locate_all(pattern,sub.pattern)[[1]], str=pattern))
pattern.number.mountains = str.number.matches(pattern,sub.pattern,fixed=TRUE)
level = 0
old.str = str
old.blocks = blocks
for (level in rev(use.levels)) {
#message("level = ", level)
island.rows = which(levels==level)
ret =lapply(island.rows,replace.island,str=str,blocks=blocks, pattern.plains=pattern.plains, level=level,pattern.number.mountains=pattern.number.mountains,replacement=replacement,fixed=fixed,sub.txt=sub.txt)
df = data.frame(data.table::rbindlist(ret),island.rows)
df = df[df[,"replaced"],]
if (!is.null(only.replace.larger.than))
df = df[nchar(df$old)>=only.replace.larger.than,]
if (!is.null(only.replace.smaller.than))
df = df[nchar(df$old)<=only.replace.smaller.than,]
str = str.replace.at.pos(str,blocks$inner[df$island.rows,,drop=FALSE],df$new)
blocks = adapt.blocks.after.replace(blocks,left=blocks$inner[df$island.rows,],len.old=nchar(df$old),len.new=nchar(df$new))
show.blocks(blocks,str)
}
return(str)
}
examples.str.replace.by.blocks = function() {
# Replace latex fractions
str = "5+\\frac{x^2+x^2}{1+\\frac{2}{x*5}}*2"
str.replace.by.blocks(str,"\\frac{_SUB_}{_SUB_}","(_SUB1_)/(_SUB2_)",
block.start = "{", block.end = "}")
str.replace.by.blocks(str,"\\frac{_SUB_}{_SUB_}","(_SUB1_)/(_SUB2_)",
block.start = "{", block.end = "}",
only.replace.larger.than=20)
str.replace.by.blocks(str,"\\frac{_SUB_}{_SUB_}","(_SUB1_)/(_SUB2_)",
block.start = "{", block.end = "}",
only.replace.smaller.than=20)
str ="-\\frac{\\sigma_{m}-\\beta\\sigma_{b}}{\\beta-1}=\\frac{\\sigma_{m}-\\beta\\sigma_{b}}{1-\\beta}"
str ="\\frac{1}{2}=\\frac{3}{4}"
str.replace.by.blocks(str,"\\frac{_SUB_}{_SUB_}","(_SUB1_)/(_SUB2_)",
block.start = "{", block.end = "}")
}
#' Add level 0 to blocks
blocks.add.level.0 = function(blocks,str,end=nchar(str)) {
blocks$inner = rbind(c(1,end),blocks$inner)
blocks$outer = rbind(c(1,end),blocks$outer)
blocks$levels = c(0,blocks$levels)
return(blocks)
}
#' Returns a pos matrix indicating blocks like brackets ( ) or quoted parts "text"
#'
#' We allow for nested blocks. The position matrix also has an attribute level that describes the level of each block
#'
#' @export
str.blocks.pos= function(str, start, end,
ignore = NULL, ignore.start = ignore, ignore.end = ignore,
fixed = TRUE,fixed.start = fixed, fixed.end = fixed, verbose=TRUE) {
restore.point("str.blocks.pos")
if (length(str) > 1)
stop("Not yet implemented for vectors of strings")
# Blocks like (),{},[], begin end, ...
if (start != end) {
start.pos = str.locate.all(str, start, ignore=ignore.start,fixed=fixed.start)[[1]]
end.pos = str.locate.all(str, end, ignore=ignore.end,fixed=fixed.start)[[1]]
# Validity check
if (NROW(start.pos) != NROW(end.pos)) {
if (verbose) {
cat(paste0("Error when finding ",start,end, "block in"))
cat(paste0("\n",str))
}
stop("Number of block starts and ends differs!")
}
n = NROW(start.pos)
if (n==0)
return(list(inner=start.pos, outer=start.pos, levels=c()))
pos.levels = rep(NA,n)
# Compute level
all = c(start.pos[,2],end.pos[,1])
ord = order(all)
ind = c(1:n,1:n)[ord]
open = c(rep(1,n),rep(-1,n))[ord]
levels = cumsum(open)
pos.levels[ind[open==1]] = levels[open==1]
#Highly inefficient, should write C code here
end.ord = rep(NA,n)
used.start = rep(FALSE,n)
for (i in 1:n) {
ind = which(start.pos[,2]<end.pos[i,1] & !used.start)
ind = ind[length(ind)]
used.start[ind]=TRUE
end.ord[i]=ind
}
end.pos[end.ord,] = end.pos
return(list(outer=cbind(start.pos[,1],end.pos[,2]),
inner=cbind(start.pos[,2]+1,end.pos[,1]-1),
levels=pos.levels))
# Blocks like "" ''
} else {
pos = str.locate.all(str, start, ignore=ignore.start, fixed=fixed)[[1]]
n = NROW(pos)
if (n>0) {
if ((n %% 2) != 0)
stop(paste("Number of block starts and ends differs! Need even number of not ignored", start))
start.pos = pos[seq(1,n,by=2),,drop=FALSE]
end.pos = pos[seq(2,n,by=2),,drop=FALSE]
return(list(inner=cbind(start.pos[,2]+1,end.pos[,1]-1),
outer=cbind(start.pos[,1],end.pos[,2]),
levels=rep(1,n/2)))
} else {
return(list(inner=pos, outer=pos, levels=c()))
}
}
}
examples.str.blocks.pos = function() {
str = '1+(5*(2+3)+(2+(4-1)))'
# 123456789012345678901
str.blocks.pos(str,"(",")")
}
|
c037ca5f60b05b4e719112e4b61aeecac0f0e128
|
399d81b985f21f7e6824df37c1669165ddecf3bf
|
/cachematrix.R
|
82a4e379b8c343f767846238c1744354fb2e6121
|
[] |
no_license
|
boolean10/ProgrammingAssignment2
|
47620a4ce207759d58635c4b939babc3cf1ae2c5
|
271ed52ccb5ab64e11c546913334a726897a078f
|
refs/heads/master
| 2021-01-20T23:46:00.263665
| 2014-08-24T21:58:31
| 2014-08-24T21:58:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 800
|
r
|
cachematrix.R
|
## S. Jackson
## R Programming Assignment 2
##
## Function takes a square matrix and creates a matrix object to cache it's inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(im) inv <<- im
getInverse <- function() inv
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## function computes the inverse of the special "matrix" returned from makeCacheMatrix
## and return a matrix that is the inverse of 'x'
cacheSolve <- function(x, ...) {
im <- x$getInverse()
if(!is.null(im)) {
message("getting cached data")
return(im)
}
data <- x$get()
im <- solve(data, ...)
x$setInverse(im)
im
}
|
36ebec4152fd70124b91dd8fffb4714069b253b3
|
9020f49b910697b007774eef4a4ccbf90a82e4c6
|
/R/ladder-fuel-metrics_density_tls_als_uav_zeb.R
|
ffe605e8c68156c9ab0a66ee0635d7fd3ad350de
|
[] |
no_license
|
seanreilly66/tls_data_processing
|
307b1f28c07eef520a863c3c3e059ee7d7ed78a1
|
3a5799ceb1af52e0378e7f3122f64d06e09568a8
|
refs/heads/main
| 2023-02-27T23:34:47.264549
| 2021-02-09T02:35:11
| 2021-02-09T02:35:11
| 310,700,481
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,698
|
r
|
ladder-fuel-metrics_density_tls_als_uav_zeb.R
|
# ==============================================================================
#
# TLS, UAV, ALS, ZEB ladder fuel calculation using density
#
# ==============================================================================
#
# Author: Sean Reilly, sean.reilly66@gmail.com
#
# Created: 6 Dec 2020
# Last commit: 6 Dec 2020
#
# Status: Development
#
# ==============================================================================
#
# Description:
#
# Computes TLS ladder fuel metrics up to 8m using density
#
# "tls_ladder_fuel_1to2" "tls_ladder_fuel_7to8"
# "tls_ladder_fuel_1to5" "tls_ladder_fuel_1to3"
# 'tls_ladder_fuel_1to4
#
# ==============================================================================
#
# User inputs:
#
# tls_las_folder = Folder location for TLS .las files
# tls_las_files = list.files function to get tls files to be processed. Can be modified
# with a pattern to restrict search
# resolutions = Vector of resolutions (in meters) to use for chm and canopy cover
# out_file = output .csv file name
#
# ==============================================================================
#
# Package dependences:
#
# sp, raster, lidR, tidyverse, glue
#
# ==============================================================================
#
# Known problems:
#
# ==============================================================================
library(lidR)
library(tidyverse)
library(glue)
# ================================= User inputs ================================
#tls_las_folder <- 'data/las'
tls_las_folder <- 'D:/c1 - Pepperwood/c1_DEMnorm_las_plot'
tls_las_files <- list.files(tls_las_folder, pattern = 'tls', full.names = TRUE)
als_las_folder <- 'D:/c1 - Pepperwood/c1_ALS_normplot'
als_las_files <- list.files(als_las_folder, pattern = 'als', full.names = TRUE)
uav_las_folder <- 'D:/c6 - Saddle Mtn/c6_uas'
uav_las_files <- list.files(uav_las_folder, pattern = 'c6', full.names = TRUE)
zeb_las_folder <- 'D:/c1 - Pepperwood/c1_zeb_cut2plot'
zeb_las_files <- list.files(zeb_las_folder, pattern = 'zeb', full.names = TRUE)
banner_data <- read_csv('D:/Analyses/Ladder fuels-airtable.csv')
# out_file <- 'data/voxel_ladder-fuels.csv'
out_file <- 'D:/Analyses/output/c6_ladder-fuels_metrics_uas_210129_2.csv'
# ============ Compute density based ladder fuels metrics for TLS data ===========
tls_combine <- tibble(
campaign = numeric(),
plot = numeric(),
tls_ladder_fuel_1to2 = numeric(),
tls_ladder_fuel_1to3 = numeric(),
tls_ladder_fuel_1to4 = numeric(),
tls_ladder_fuel_7to8 = numeric()
)
for (tls_file in tls_las_files) {
campaign <- str_extract(tls_file, '(?<=c)[:digit:]') %>%
as.numeric()
plot <- str_extract(tls_file, '(?<=p)[:digit:]+') %>%
as.numeric()
message('processing TLS campaign ', campaign, ' plot ', plot)
tls_las <- tls_file %>%
readLAS(select = '')
tls_metric <- as_tibble(tls_las$Z) %>%
rename(Z='value') %>%
summarize(
tls_ladder_fuel_1to2 = sum(Z > 1 & Z <= 2) / sum(Z <= 2),
tls_ladder_fuel_1to3 = sum(Z > 1 & Z <= 3) / sum(Z <= 3),
tls_ladder_fuel_1to4 = sum(Z > 1 & Z <= 4) / sum(Z <= 4),
tls_ladder_fuel_7to8 = sum(Z > 7 & Z <= 8) / sum(Z <= 8)
) %>%
add_column(campaign, plot, .before = 1)
tls_combine <- tls_combine %>%
add_row(tls_metric)
}
# ============ Compute density based ladder fuels metrics for ALS data ===========
als_combine <- tibble(
campaign = numeric(),
plot = numeric(),
als_ladder_fuel_1to2 = numeric(),
als_ladder_fuel_1to3 = numeric(),
als_ladder_fuel_1to4 = numeric(),
als_ladder_fuel_1to5 = numeric(),
als_ladder_fuel_7to8 = numeric()
)
for (als_file in als_las_files) {
campaign <- str_extract(als_file, '(?<=c)[:digit:]') %>%
as.numeric()
plot <- str_extract(als_file, '(?<=p)[:digit:]+') %>%
as.numeric()
message('processing als campaign ', campaign, ' plot ', plot)
als_las <- als_file %>%
readLAS(select = '')
als_metric <- as_tibble(als_las$Z) %>%
rename(Z='value') %>%
summarize(
als_ladder_fuel_1to2 = sum(Z > 1 & Z <= 2) / sum(Z <= 2),
als_ladder_fuel_1to3 = sum(Z > 1 & Z <= 3) / sum(Z <= 3),
als_ladder_fuel_1to4 = sum(Z > 1 & Z <= 4) / sum(Z <= 4),
als_ladder_fuel_7to8 = sum(Z > 7 & Z <= 8) / sum(Z <= 8)
) %>%
add_column(campaign, plot, .before = 1)
als_combine <- als_combine %>%
add_row(als_metric)
}
# ============ Compute density based ladder fuels metrics for UAV data ===========
uav_combine <- tibble(
campaign = numeric(),
plot = numeric(),
uav_ladder_fuel_1to2 = numeric(),
uav_ladder_fuel_1to3 = numeric(),
uav_ladder_fuel_1to4 = numeric(),
uav_ladder_fuel_1to5 = numeric(),
uav_ladder_fuel_7to8 = numeric()
)
for (uav_file in uav_las_files) {
campaign <- str_extract(uav_file, '(?<=c)[:digit:]') %>%
as.numeric()
plot <- str_extract(uav_file, '(?<=p)[:digit:]+') %>%
as.numeric()
message('processing uav campaign ', campaign, ' plot ', plot)
uav_las <- uav_file %>%
readLAS(select = '')
uav_metric <- as_tibble(uav_las$Z) %>%
rename(Z='value') %>%
summarize(
uav_ladder_fuel_1to2 = sum(uav_las$Z > 1 & uav_las$Z <= 2) / sum(uav_las$Z <= 2),
uav_ladder_fuel_1to3 = sum(uav_las$Z > 1 & uav_las$Z <= 3) / sum(uav_las$Z <= 3),
uav_ladder_fuel_1to4 = sum(uav_las$Z > 1 & uav_las$Z <= 4) / sum(uav_las$Z <= 4),
uav_ladder_fuel_7to8 = sum(uav_las$Z > 7 & uav_las$Z <= 8) / sum(uav_las$Z <= 8)
) %>%
add_column(campaign, plot, .before = 1)
uav_combine <- uav_combine %>%
add_row(uav_metric)
}
# ============ Compute density based ladder fuels metrics for ZEB data ===========
zeb_combine <- tibble(
campaign = numeric(),
plot = numeric(),
zeb_ladder_fuel_1to2 = numeric(),
zeb_ladder_fuel_1to3 = numeric(),
zeb_ladder_fuel_1to4 = numeric(),
zeb_ladder_fuel_1to5 = numeric(),
zeb_ladder_fuel_7to8 = numeric()
)
for (zeb_file in zeb_las_files) {
campaign <- str_extract(zeb_file, '(?<=c)[:digit:]') %>%
as.numeric()
plot <- str_extract(zeb_file, '(?<=p)[:digit:]+') %>%
as.numeric()
message('processing zeb campaign ', campaign, ' plot ', plot)
zeb_las <- zeb_file %>%
readLAS(select = '')
zeb_metric <- as_tibble(zeb_las$Z) %>%
rename(Z='value') %>%
summarize(
zeb_ladder_fuel_1to2 = sum(Z > 1 & Z <= 2) / sum(Z <= 2),
zeb_ladder_fuel_1to3 = sum(Z > 1 & Z <= 3) / sum(Z <= 3),
zeb_ladder_fuel_1to4 = sum(Z > 1 & Z <= 4) / sum(Z <= 4),
zeb_ladder_fuel_7to8 = sum(Z > 7 & Z <= 8) / sum(Z <= 8)
) %>%
add_column(campaign, plot, .before = 1)
zeb_combine <- zeb_combine %>%
add_row(zeb_metric)
}
# ================================= banner data ================================
banner_data$Plot <- stringr::str_replace(banner_data$Plot, 'p', '')
banner_summary <- banner_data %>%
group_by(Plot) %>%
summarize(
trad_ladder_fuel_1to2 = mean(trad_ladder_fuel_1to2, na.rm = TRUE),
trad_ladder_fuel_1to3 = mean(trad_ladder_fuel_1to3, na.rm = TRUE),
trad_ladder_fuel_1to4 = mean(trad_ladder_fuel_1to4, na.rm = TRUE)
) %>%
rename(plot=Plot) %>%
mutate_at('plot', as.numeric)
# ==============================================================================
combined_metrics <- tls_combine %>%
full_join(als_combine, by = c('campaign','plot')) %>%
full_join(zeb_combine, by = c('campaign','plot')) %>%
full_join(uav_combine, by = c('campaign','plot')) %>%
full_join(banner_summary, by = 'plot')
write.csv(combined_metrics, out_file)
# ==============================================================================
write.csv(uav_combine, out_file)
|
8a19be168f076417b6c14b9078f9b85cc8c88b4b
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/PoweR/R/statcompute.R
|
a443db941e975c81bf3c99980a558fdcb2ca0e7d
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,884
|
r
|
statcompute.R
|
statcompute <- function(stat.index,data,levels=c(0.05,0.1),critvalL=NULL,critvalR=NULL,alter=0,stat.pars=NULL) {
if(getRversion() < "3.1.0") dontCheck <- identity
tmp <- names(getDLLRegisteredRoutines("PoweR")[[".C"]])
ind.stats <- grep("stat",tmp)
nb.stats <- length(ind.stats)
if (!(stat.index %in% 1:nb.stats)) stop("This test statistic has not been included in the package!")
if (is.null(stat.pars) || is.na(stat.pars)) {
stat.pars <- rep(0,getnbparstats(stat.index)) # C++ technical requirement.
nbparstat <- 0 # The default values will be used by the C++ function.
} else {
nbparstat <- length(stat.pars)
}
n <- length(data)
if (n<2) stop("'data' should be a vector of length at least 2")
if (!(alter %in% 0:4)) stop("'alter' should be 0, 1, 2, 3 or 4")
nblevels <- length(levels)
if (!is.null(critvalL)) {
if (length(critvalL) != nblevels) stop("'critvalL' length and 'levels' length should not differ!")
cL <- critvalL
} else {
cL <- 0
}
if (!is.null(critvalR)) {
if (length(critvalR) != nblevels) stop("'critvalR' length and 'levels' length should not differ!")
cR <- critvalR
} else {
cR <- 0
}
usecrit <- 1
if (is.null(critvalL) && is.null(critvalR)) usecrit <- 0
Cstat.name <- paste("stat",stat.index,sep="")
out <- .C(dontCheck(Cstat.name),as.double(data),as.integer(n),as.double(levels),as.integer(nblevels),rep(" ",50),0L,statistic=0.0,pvalcomp=1L,pvalue=0.0,cL=as.double(cL),cR=as.double(cR),as.integer(usecrit),alter=as.integer(alter),decision=as.integer(rep(0,nblevels)),stat.pars=as.double(stat.pars),nbparstat=as.integer(nbparstat),PACKAGE="PoweR")
if (out$pvalcomp == 0L) out$pvalue <- NA
return(list(statistic=out$statistic,pvalue=out$pvalue,decision=out$decision,alter=out$alter,stat.pars=out$stat.pars[1:out$nbparstat]))
}
|
8a7f846f5170587bb33607d52d37ebfc81cff5cc
|
e6549edacf38351730ca91ead2456d50ba20f1cd
|
/man/TOthreshda2.rd
|
c336eec857e923e9084df7f1dac606a92874308c
|
[] |
no_license
|
cran/wavethresh
|
96f92574f59f62f77b9b5fe5c318e27011de585c
|
433dac8d2b5f3bf806530a29b5fe022fd2fe9087
|
refs/heads/master
| 2022-11-29T22:37:39.292801
| 2022-11-16T14:20:02
| 2022-11-16T14:20:02
| 17,700,852
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,847
|
rd
|
TOthreshda2.rd
|
\name{TOthreshda2}
\alias{TOthreshda2}
\title{Data analytic wavelet thresholding routine}
\usage{
TOthreshda2(ywd, alpha = 0.05, verbose = FALSE, return.threshold = FALSE)
}
\arguments{
\item{ywd}{The \code{\link{wd.object}} that you wish to threshold.}
\item{alpha}{The smoothing parameter which is a p-value }
\item{verbose}{Whether messages get printed}
\item{return.threshold}{If TRUE then the threshold value gets returned
rather than the actual thresholded object}
}
\description{
This function might be better called using the regular
\code{\link{threshold}} function using the \code{op2} policy.
Corresponds to the wavelet thresholding routine developed by
Ogden and Parzen (1994) Data dependent wavelet thresholding in nonparametric
regression with change-point applications. \emph{Tech Rep 176},
University of South Carolina, Department of Statistics.
}
\details{
The TOthreshda2 method operates in a similar fashion to
\code{\link{TOthreshda1}} except that it takes the cumulative sum
of squared coefficients, creating a sample "Brownian bridge" process,
and then using the standard Kolmogorov-Smirnov statistic in testing.
In this situation, the level of the hypothesis tests, alpha, has default
value 0.05. Note that the choice of alpha controls the smoothness of
the resulting wavelet estimator -- in general, a relatively large alpha
makes it easier to include coefficients, resulting in a more wiggly
estimate; a smaller alpha will make it more difficult to include
coefficients, yielding smoother estimates.
}
\value{
Returns the threshold value if \code{return.threshold==TRUE} otherwise
returns the shrunk set of wavelet coefficients.
}
\seealso{\code{\link{threshold}},\code{\link{TOthreshda1}}, \code{\link{wd}}}
\author{Todd Ogden}
\keyword{smooth}
|
46decdef5cccb7c5374282b8291ea88d2cd72a8c
|
3a3e3e050d6deb8544ff2838ab4b698a492d2eb7
|
/R/correlated_regions_enrichedheatmap.R
|
1b27cd3a924d7e6be291ff4c188695440fae6aff
|
[] |
no_license
|
jokergoo/epik
|
f9eb86c38eab46913a2787296fe5e023caf70f2b
|
16ae793be02554ddda89401a888327dce87c5a4a
|
refs/heads/master
| 2021-01-12T05:25:21.598897
| 2019-09-27T08:04:03
| 2019-09-27T08:04:03
| 77,924,435
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 35,188
|
r
|
correlated_regions_enrichedheatmap.R
|
normalize_epigenomic_signals = function(cr, target, marks = NULL, expr = NULL, include_correlation_matrix = TRUE,
extend = 5000, target_ratio = 0.1) {
cr_param = metadata(cr)$cr_param
sample_id = cr_param$sample_id
subgroup = cr_param$subgroup
subgroup_level = unique(subgroup)
n_subgroup = length(subgroup_level)
target_name = deparse(substitute(target))
############ enriched to methylations ##################
message(qq("normalizing methylation to @{target_name}"))
if(include_correlation_matrix) {
meth_mat_corr = normalizeToMatrix(cr, target, mapping_column = "gene_id", value_column = "corr",
extend = extend, mean_mode = "absolute", target_ratio = target_ratio, background = 0)
} else {
meth_mat_corr = NULL
}
meth_mat_mean = enrich_with_methylation(target, sample_id, target_ratio = target_ratio, extend = extend)
meth_mat_mean[attr(meth_mat_mean, "failed_rows"), ] = 0.5
if(n_subgroup <= 1) {
meth_mat_diff = enrich_with_methylation(target, sample_id, target_ratio = target_ratio, extend = extend, mode = rowIQRs)
meth_mat_diff[attr(meth_mat_diff, "failed_rows"), ] = 0
} else if(n_subgroup == 2) {
meth_mat_mean_1 = enrich_with_methylation(target, sample_id[subgroup == subgroup_level[1]], target_ratio = target_ratio, extend = extend)
failed_rows = attr(meth_mat_mean_1, "failed_rows")
message(qq("There are @{length(failed_rows)} failed rows when normalizing methylation to the targets."))
meth_mat_mean_1[failed_rows, ] = 0.5
meth_mat_mean_2 = enrich_with_methylation(target, sample_id[subgroup == subgroup_level[2]], target_ratio = target_ratio, extend = extend)
failed_rows = attr(meth_mat_mean_2, "failed_rows")
message(qq("There are @{length(failed_rows)} failed rows when normalizing methylation to the targets."))
meth_mat_mean_2[failed_rows, ] = 0.5
meth_mat_diff = meth_mat_mean_1 - meth_mat_mean_2
} else {
meth_mat_mean_list = lapply(subgroup_level, function(le) {
meth_mat_mean = enrich_with_methylation(target, sample_id[subgroup == le], target_ratio = target_ratio, extend = extend)
failed_rows = attr(meth_mat_mean, "failed_rows")
message(qq("There are @{length(failed_rows)} failed rows when normalizing methylation to the targets."))
meth_mat_mean[failed_rows, ] = 0.5
meth_mat_mean
})
meth_array_mean = array(dim = c(dim(meth_mat_mean), n_subgroup))
for(i in seq_along(meth_mat_mean_list)) {
meth_array_mean[, , i] = meth_mat_mean_list[[i]]
}
meth_mat_diff = apply(meth_array_mean, c(1, 2), max) - apply(meth_array_mean, c(1, 2), min)
meth_mat_diff = copyAttr(meth_mat_mean, meth_mat_diff)
}
################# enrich to histome modifications #################
hist_mat_corr_list = list()
hist_mat_mean_list = list()
hist_mat_diff_list = list()
for(k in seq_along(marks)) {
message(qq("normalizing @{marks[k]} signals to @{target_name}"))
hm_sample_id = intersect(sample_id, chipseq_hooks$sample_id(marks[k]))
hm_subgroup = subgroup[sample_id %in% hm_sample_id]
hm_subgroup_level = unique(hm_subgroup)
n_hm_subgroup = length(hm_subgroup_level)
# applied to each sample, each mark
lt = enrich_with_histone_mark(target, sample_id = hm_sample_id, mark = marks[k], return_arr = TRUE,
target_ratio = target_ratio, extend = extend)
hist_mat_mean_list[[k]] = lt[[2]]
arr = lt[[1]]
# only calculate the correlation when there are enough samples
if(length(hm_sample_id) >= 5 && include_correlation_matrix) {
# detect regions that histone MARKS correlate to expression
expr2 = expr[target$gene_id, intersect(colnames(expr), hm_sample_id)]
hist_mat_corr = matrix(nrow = nrow(expr2), ncol = ncol(meth_mat_mean))
counter = set_counter(nrow(hist_mat_corr), fmt = " calculate correlation for %s rows.")
for(i in seq_len(nrow(hist_mat_corr))) {
counter()
for(j in seq_len(ncol(hist_mat_corr))) {
suppressWarnings(x <- cor(arr[i, j, ], expr2[i, ], method = "spearman"))
hist_mat_corr[i, j] = x
}
}
hist_mat_corr[is.na(hist_mat_corr)] = 0
hist_mat_corr = copyAttr(meth_mat_mean, hist_mat_corr)
hist_mat_corr_list[[k]] = hist_mat_corr
} else {
hist_mat_corr_list[k] = list(NULL)
}
if(n_hm_subgroup <= 1) {
hist_mat_diff_list[[k]] = apply(arr, c(1, 2), IQR, na.rm = TRUE)
hist_mat_diff_list[[k]] = copyAttr(meth_mat_mean, hist_mat_diff_list[[k]])
} else if(n_hm_subgroup == 2) {
hm_sample_id_subgroup1 = hm_sample_id[hm_subgroup == hm_subgroup_level[1]]
hm_sample_id_subgroup2 = hm_sample_id[hm_subgroup == hm_subgroup_level[2]]
h1 = apply(arr[, , hm_sample_id_subgroup1], c(1, 2), mean, na.rm = TRUE)
h2 = apply(arr[, , hm_sample_id_subgroup2], c(1, 2), mean, na.rm = TRUE)
hist_mat_diff_list[[k]] = h1 - h2
hist_mat_diff_list[[k]] = copyAttr(meth_mat_mean, hist_mat_diff_list[[k]])
} else {
h_list = lapply(hm_subgroup_level, function(le) {
apply(arr[, , hm_sample_id[hm_subgroup == le]], c(1, 2), mean, na.rm = TRUE)
})
hm_array_mean = array(dim = c(dim(meth_mat_mean), n_hm_subgroup))
for(i in seq_along(h_list)) {
hm_array_mean[, , i] = h_list[[i]]
}
hist_mat_diff_list[[k]] = apply(hm_array_mean, c(1, 2), max) - apply(hm_array_mean, c(1, 2), min)
hist_mat_diff_list[[k]] = copyAttr(meth_mat_mean, hist_mat_diff_list[[k]])
}
}
names(hist_mat_corr_list) = marks
names(hist_mat_mean_list) = marks
names(hist_mat_diff_list) = marks
return(list(meth_mat_corr = meth_mat_corr,
meth_mat_mean = meth_mat_mean,
meth_mat_diff = meth_mat_diff,
hist_mat_corr_list = hist_mat_corr_list,
hist_mat_mean_list = hist_mat_mean_list,
hist_mat_diff_list = hist_mat_diff_list))
}
if(is.memoised(normalize_epigenomic_signals)) {
normalize_epigenomic_signals = memoise(normalize_epigenomic_signals)
}
merge_row_order = function(mat, l_list) {
do.call("c", lapply(l_list, function(l) {
if(sum(l) == 0) return(integer(0))
if(sum(l) == 1) return(which(l))
dend1 = as.dendrogram(hclust(dist_by_closeness2(mat[l, ])))
dend1 = reorder(dend1, rowMeans(mat[l, ]))
od = order.dendrogram(dend1)
which(l)[od]
}))
}
add_boxplot_as_column_annotation = function(ht_list, width, anno_name, anno_title = anno_name) {
gl = width
row_order_list = row_order(ht_list)
lt = lapply(row_order_list, function(ind) gl[ind])
bx = boxplot(lt, plot = FALSE)$stats
n = length(row_order_list)
x_ind = (seq_len(n) - 0.5)/n
w = 1/n*0.5
decorate_annotation(anno_name, slice = 1, {
rg = range(bx)
rg[1] = rg[1] - (rg[2] - rg[1])*0.1
rg[2] = rg[2] + (rg[2] - rg[1])*0.1
pushViewport(viewport(y = unit(1, "npc") + unit(1, "mm"), just = "bottom", height = unit(2, "cm"), yscale = rg))
grid.rect(gp = gpar(col = "black"))
grid.segments(x_ind - w/2, bx[5, ], x_ind + w/2, bx[5, ], default.units = "native", gp = gpar(lty = 1:2))
grid.segments(x_ind - w/2, bx[1, ], x_ind + w/2, bx[1, ], default.units = "native", gp = gpar(lty = 1:2))
grid.segments(x_ind, bx[1, ], x_ind, bx[5, ], default.units = "native", gp = gpar(lty = 1:2))
grid.rect(x_ind, colMeans(bx[c(4, 2), ]), width = w, height = bx[4, ] - bx[2, ], default.units = "native", gp = gpar(fill = "white", lty = 1:2))
grid.segments(x_ind - w/2, bx[3, ], x_ind + w/2, bx[3, ], default.units = "native", gp = gpar(lty = 1:2))
grid.yaxis(main = FALSE, gp = gpar(fontsize = 8))
grid.text(anno_title, y = unit(1, "npc") + unit(2.5, "mm"), gp = gpar(fontsize = 14), just = "bottom")
upViewport()
})
}
# == title
# Visualizing enrichment for epigenomic signals at TSS-CGIs
#
# == param
# -cr correalted regions
# -txdb transcriptome annotation which was used in `correlated_regions`
# -expr expression matrix which was used in `correlated_regions`
# -cgi CpG island, a `GenomicRanges::GRanges` object
# -fdr_cutoff cutoff for fdr, used to filter significant CRs
# -meth_diff_cutoff cutoff for methylation difference. If there are no subgroup information or only one subgroup,
# ``meth_IQR`` column is used for filtering. If there are more than one subgroups, ``meth_diameter``
# column is used for filtering.
# -marks names of histone marks, should be supported in `chipseq_hooks`
# -type visualize negative correlated regions or positive correlated regions
# -extend base pairs extended to upstream and downstream
# -expr_ha a `ComplexHeatmap::HeatmapAnnotation` class object. It is used for the expression heatmap
#
# == details
# There are several heatmaps visualize various signals enriched at TSS-CGIs. In the plot, in the extended
# CGI region, one CGI only overlaps to one gene TSS and one gene TSS should only overlap to one extended CGI.
# Since one CGI only corresponds to one gene, in the heatmap, each row corresponds to one single gene.
#
# There are following heatmaps:
#
# - heatmap for gene expression
# - If ``cr`` is returned form `cr_enriched_heatmap`, there is a one column heatmap which
# shows the k-means cluters genes belong to
# - heatmap for significant correlated regions
# - a point plot showing CGI length
# - heatmap for correlation between methylation and gene expression
# - heatmap for mean methylation
# - heatmap for metnylation difference
# - heatmap for correlation, mean signal and signal difference for histone marks
#
# If there are more than 12 heatmaps, they will be put into two pages.
#
# Heatmaps are split into two sub-clusters by k-means clustering on the mean methylation matrix.
# If there are two subgroups in all samples, each subcluster are split by high expression/low expression
# in subgroup 1. In each high expression/low expression, rows are split by the k-means clusters calculated
# in `cr_enriched_heatmap`. Finally, rows are clustered by considering closeness of signals in the extended
# CGI regions.
#
# == value
# no value is returned
#
# == author
# Zuguang Gu <z.gu@dkfz.de>
cr_enriched_heatmap_at_tss_cgi = function(cr, txdb, expr, cgi,
fdr_cutoff = 0.05, meth_diff_cutoff = 0.1, marks = NULL, type = "neg", extend = 5000,
expr_ha) {
n_subgroup = NULL
subgroup = NULL
subgroup_level = NULL
expr_col_od = NULL
km_col = NULL
mat_mix = NULL
eval(SNIPPET_ATTACH_CR_PARAM)
message(qq("filter sigCR by fdr_cutoff = @{fdr_cutoff}, meth_diff_cutoff = @{meth_diff_cutoff}"))
eval(SNIPPET_FILTER_SIG_CR)
gene = genes(txdb)
message("extracting gene tss")
tss = promoters(gene, upstream = 1, downstream = 0)
tss = tss[names(tss) %in% sig_cr$gene_id]
if(length(extend) == 1) extend = rep(extend, 2)
cgi_extend = cgi
start(cgi_extend) = start(cgi) - extend[1]
end(cgi_extend) = end(cgi) + extend[2]
# there should only be one tss in +-5kb of CGI and one tss should
# only overlaps to one extended CGI
mtch = as.matrix(findOverlaps(cgi_extend, tss))
t1 = table(mtch[, 1])
t2 = table(mtch[, 2])
s1 = as.numeric(names(t1[t1 == 1]))
s2 = as.numeric(names(t2[t2 == 1]))
l = mtch[, 1] %in% s1 & mtch[, 2] %in% s2
mtch = mtch[l, ]
cgi2 = cgi[mtch[, 1]]
cgi2$gene_id = names(tss[mtch[, 2]])
names(cgi2) = cgi2$gene_id
strand(cgi2) = strand(tss[mtch[, 2]])
message(qq("@{length(cgi2)} left filtered by one-to-one mapping between tss and cgi"))
target_ratio = mean(width(cgi2))/(sum(extend) + mean(width(cgi2)))
target = cgi2
target_name = "cgi"
message("normalize to sigCR")
eval(SNIPPET_NORMALIZE_SIG_CR)
if(is.not.null(km)) {
km = km[names(target)]
}
cgi2 = cgi2[names(target)]
cgi_width = width(cgi2)
cgi_width[cgi_width > quantile(cgi_width, 0.99)] = quantile(cgi_width, 0.99)
width_anno_name = "cgi_width"
width_anno = cgi_width
message("normalize to epi signals")
eval(SNIPPET_NORMALIZE_EPI_SIGNALS)
########### prepare the order of rows and columns
message("determing row and colum orders")
eval(SNIPPET_ROW_ORDER_AND_COLUMN_ORDER)
cor_col_fun = colorRamp2(c(-1, 0, 1), c("darkgreen", "white", "red"))
meth_col_fun = colorRamp2(c(0, 0.5, 1), c("blue", "white", "red"))
cr_col = c("-1" = "darkgreen", "0" = "white", "1" = "red")
fixed_heatmap = 2
eval(SNIPPET_HEATMAP_PAGE)
if(missing(expr_ha)) {
if(n_subgroup >= 2) {
expr_ha = HeatmapAnnotation(subgroup = subgroup, col = list(subgroup = structure(rand_color(n_subgroup), names = subgroup_level)),
show_annotation_name = TRUE, annotation_name_side = "left", annotation_name_gp = gpar(fontsize = 10))
}
}
######### construct heatmap list ############
## if there are too many heatmaps, they will be put in two pages.
epi_color = c(brewer.pal(8, "Set2"), brewer.pal(12, "Set3"))
epi_mark_list = list()
epi_title_list = list()
n_heatmap = 0
n_row_group = 2
expr = t(apply(expr, 1, function(x) {
qu = quantile(x, c(0.05, 0.95), na.rm = TRUE)
x[x < qu[1]] = qu[1]
x[x > qu[2]] = qu[2]
x
}))
expr = t(scale(t(expr)))
ht_list = Heatmap(expr, name = "expr", show_row_names = FALSE,
show_column_names = FALSE, width = unit(5, "cm"), show_column_dend = FALSE, cluster_columns = FALSE, column_order = expr_col_od,
top_annotation = expr_ha, column_title = "Expression", show_row_dend = FALSE,
use_raster = TRUE, raster_quality = 2)
n_heatmap = n_heatmap + 1
if(is.not.null(km)) {
ht_list = ht_list + Heatmap(km[names(target)], name = "km_groups", col = km_col, show_row_names = FALSE,
width = unit(0.5, "cm"))
}
ht_list = ht_list + EnrichedHeatmap(mat_mix, name = qq("@{type}CR"), col = cr_col,
top_annotation = HeatmapAnnotation(lines1 = anno_enriched(gp = gpar(neg_col = "darkgreen", pos_col = "red", lty = 1:n_row_group))),
top_annotation_height = unit(2, "cm"), column_title = qq("sig@{type}CR"),
use_raster = TRUE, raster_quality = 2, combined_name_fun = NULL)
n_heatmap = n_heatmap + 1
ht_list = ht_list + rowAnnotation(foo_width = row_anno_points(width_anno, axis = TRUE, gp = gpar(col = "#00000040")),
width = unit(1, "cm"))
message("append epi heatmaps")
eval(SNIPPET_APPEND_EPI_HEATMAP)
message("draw heatmaps")
eval(SNIPPET_DRAW_HEATMAP)
return(invisible(NULL))
}
# == title
# Visualizing enrichment for epigenomic signals at TSS
#
# == param
# -cr correalted regions
# -txdb transcriptome annotation which was used in `correlated_regions`
# -expr expression matrix which was used in `correlated_regions`
# -cgi CpG island, a `GenomicRanges::GRanges` object
# -fdr_cutoff cutoff for fdr
# -meth_diff_cutoff cutoff for methylation difference. If there are no subgroup information or only one subgroup,
# ``meth_IQR`` column is used for filtering. If there are more than one subgroups, ``meth_diameter``
# column is used for filtering.
# -marks names of histone marks, should be supported in `chipseq_hooks`
# -type visualize negative correlated regions or positive correlated regions
# -extend base pairs extended to upstream and downstream
# -expr_ha a `ComplexHeatmap::HeatmapAnnotation` class object.It is used for the expression heatmap
#
# == details
# There are several heatmaps visualize various signals enriched at gene TSS.
#
# There are following heatmaps:
#
# - heatmap for gene expression
# - a point plot showing gene length
# - If ``cr`` is returned form `cr_enriched_heatmap`, there is a one column heatmap which
# shows the k-means cluters genes belong to
# - heatmap for CGI enrichment at TSS
# - heatmap for significant correlated regions
# - heatmap for correlation between methylation and gene expression
# - heatmap for mean methylation
# - heatmap for metnylation difference
# - heatmap for correlation, mean signal and signal difference for histone marks
#
# If there are more than 12 heatmaps, they will be put into two pages.
#
# Heatmaps are split into two sub-clusters by k-means clustering on the mean methylation matrix.
# If there are two subgroups in all samples, each subcluster are split by high expression/low expression
# in subgroup 1. In each high expression/low expression, rows are split by the k-means clusters calculated
# in `cr_enriched_heatmap`. Finally, rows are clustered by considering closeness of signals in the extended
# TSS regions.
#
# == value
# no value is returned
#
# == author
# Zuguang Gu <z.gu@dkfz.de>
cr_enriched_heatmap_at_tss = function(cr, txdb, expr, cgi, fdr_cutoff = 0.05,
meth_diff_cutoff = 0.1, marks = NULL, type = "neg", extend = c(5000, 10000),
expr_ha) {
sig_cr = NULL
n_subgroup = NULL
subgroup = NULL
subgroup_level = NULL
expr_col_od = NULL
km_col = NULL
mat_mix = NULL
eval(SNIPPET_ATTACH_CR_PARAM)
message(qq("filter sigCR by fdr_cutoff = @{fdr_cutoff}, meth_diff_cutoff = @{meth_diff_cutoff}"))
eval(SNIPPET_FILTER_SIG_CR)
gene = genes(txdb)
message("extracting gene tss")
tss = promoters(gene, upstream = 1, downstream = 0)
tss = tss[names(tss) %in% sig_cr$gene_id]
target = tss
axis_name = c("-5KB", "TSS", "5KB")
target_ratio = 0.1
target_name = "tss"
message("normalize to sigCR")
eval(SNIPPET_NORMALIZE_SIG_CR)
if(is.not.null(km)) {
km = km[names(target)]
}
gl = width(gene[tss$gene_id])
gl[gl > quantile(gl, 0.95)] = quantile(gl, 0.95)
width_anno_name = "gene_width"
width_anno = gl
# normalize to CGI
mat_cgi = normalizeToMatrix(cgi, target, extend = extend, mean_mode = "absolute")
message("normalize to epi signals")
eval(SNIPPET_NORMALIZE_EPI_SIGNALS)
########### prepare the order of rows and columns
message("determing row and colum orders")
eval(SNIPPET_ROW_ORDER_AND_COLUMN_ORDER)
cor_col_fun = colorRamp2(c(-1, 0, 1), c("darkgreen", "white", "red"))
meth_col_fun = colorRamp2(c(0, 0.5, 1), c("blue", "white", "red"))
cr_col = c("-1" = "darkgreen", "0" = "white", "1" = "red")
fixed_heatmap = 3
eval(SNIPPET_HEATMAP_PAGE)
if(missing(expr_ha)) {
if(n_subgroup >= 2) {
expr_ha = HeatmapAnnotation(subgroup = subgroup, col = list(subgroup = structure(rand_color(n_subgroup), names = subgroup_level)),
show_annotation_name = TRUE, annotation_name_side = "left", annotation_name_gp = gpar(fontsize = 10))
}
}
######### construct heatmap list ############
## if there are too many heatmaps, they will be put in two pages.
epi_color = c(brewer.pal(8, "Set2"), brewer.pal(12, "Set3"))
epi_mark_list = list()
epi_title_list = list()
n_heatmap = 0
n_row_group = 2
expr = t(apply(expr, 1, function(x) {
qu = quantile(x, c(0.05, 0.95), na.rm = TRUE)
x[x < qu[1]] = qu[1]
x[x > qu[2]] = qu[2]
x
}))
expr = t(scale(t(expr)))
ht_list = Heatmap(expr, name = "expr", show_row_names = FALSE,
show_column_names = FALSE, width = unit(5, "cm"), show_column_dend = FALSE, cluster_columns = FALSE, column_order = expr_col_od,
top_annotation = expr_ha, column_title = "Expression", show_row_dend = FALSE,
use_raster = TRUE, raster_quality = 2)
n_heatmap = n_heatmap + 1
ht_list = ht_list + rowAnnotation(foo_width = row_anno_points(width_anno, axis = TRUE, gp = gpar(col = "#00000040")),
width = unit(1, "cm"))
if(is.not.null(km)) {
ht_list = ht_list + Heatmap(km[names(target)], name = "km_groups", col = km_col, show_row_names = FALSE,
width = unit(1, "cm"))
}
ht_list = ht_list + EnrichedHeatmap(mat_cgi, col = c("white", "darkorange"), name = "CGI",
top_annotation = HeatmapAnnotation(lines1 = anno_enriched(gp = gpar(col = "darkorange", lty = 1:n_row_group))),
top_annotation_height = unit(2, "cm"), column_title = "CGI", axis_name = axis_name,
use_raster = TRUE, raster_quality = 2)
ht_list = ht_list + EnrichedHeatmap(mat_mix, name = qq("@{type}CR"), col = cr_col,
top_annotation = HeatmapAnnotation(lines1 = anno_enriched(gp = gpar(neg_col = "darkgreen", pos_col = "red", lty = 1:n_row_group))),
top_annotation_height = unit(2, "cm"), column_title = qq("sig@{type}CR"),
use_raster = TRUE, raster_quality = 2, combined_name_fun = NULL, axis_name = axis_name)
n_heatmap = n_heatmap + 1
message("append epi heatmaps")
eval(SNIPPET_APPEND_EPI_HEATMAP)
message("draw heatmaps")
eval(SNIPPET_DRAW_HEATMAP)
return(invisible(NULL))
}
# == title
# Visualizing enrichment for epigenomic signals at gene body
#
# == param
# -cr correalted regions
# -txdb transcriptome annotation which was used in `correlated_regions`
# -expr expression matrix which was used in `correlated_regions`
# -cgi CpG island, a `GenomicRanges::GRanges` object
# -K which k-means cluster which is generated by `cr_enriched_heatmap`
# -marks names of histone marks, should be supported in `chipseq_hooks`
# -extend base pairs extended to upstream and downstream
# -expr_ha a `ComplexHeatmap::HeatmapAnnotation` class object.It is used for the expression heatmap
#
# == details
# There are several heatmaps visualize various signals enriched at gene body
#
# There are following heatmaps:
#
# - heatmap for gene expression
# - a point plot showing gene length
# - If ``cr`` is returned form `cr_enriched_heatmap`, there is a one column heatmap which
# shows the k-means cluters genes belong to
# - heatmap for CGI enrichment at TSS
# - heatmap for correlation between methylation and gene expression
# - heatmap for mean methylation
# - heatmap for metnylation difference
# - heatmap for correlation, mean signal and signal difference for histone marks
#
# If there are more than 12 heatmaps, they will be put into two pages.
#
# Heatmaps are split into three sub-clusters by k-means clustering on the mean methylation matrix.
# If there are two subgroups in all samples, each subcluster are split by high expression/low expression
# in subgroup 1. In each high expression/low expression, rows are split by the k-means clusters calculated
# in `cr_enriched_heatmap`. Finally, rows are clustered by mean methylation matrix.
#
# == value
# no value is returned
#
# == author
# Zuguang Gu <z.gu@dkfz.de>
cr_enriched_heatmap_at_gene = function(cr, txdb, expr, cgi, K = 1, marks = NULL,
extend = 5000, expr_ha) {
sample_id = NULL
n_subgroup = NULL
subgroup = NULL
subgroup_level = NULL
meth_mat_mean = NULL
km_col = NULL
eval(SNIPPET_ATTACH_CR_PARAM)
if(is.null(km)) {
stop("`cr` should be returned by ``.")
}
gi = names(km[km == K])
cr = cr[cr$gene_id %in% gi]
gene = genes(txdb)
gene = gene[names(gene) %in% cr$gene_id]
target = gene
target_name = "gene"
target_ratio = 0.6
axis_name = c("-5KB", "TSS", "TES", "5KB")
expr = expr[names(gene), , drop = FALSE]
km = km[names(gene)]
# normalize to CGI
mat_cgi = normalizeToMatrix(cgi, gene, extend = extend, target_ratio = target_ratio, mean_mode = "absolute")
message("normalize to epi signals")
eval(SNIPPET_NORMALIZE_EPI_SIGNALS)
gl = width(gene[gene$gene_id])
gl[gl > quantile(gl, 0.95)] = quantile(gl, 0.95)
width_anno_name = "gene_width"
width_anno = gl
expr = expr[names(gene), sample_id, drop = FALSE]
if(n_subgroup == 2) {
expr_mean = rowMeans(expr[, subgroup == subgroup_level[1], drop = FALSE]) -
rowMeans(expr[, subgroup == subgroup_level[1], drop = FALSE])
expr_split = ifelse(expr_mean > 0, "high", "low")
expr_split = factor(expr_split, levels = c("high", "low"))
} else {
expr_split = NULL
}
n_upstream_index = length(attr(meth_mat_mean, "upstream_index"))
meth_split = kmeans(meth_mat_mean[, seq(round(n_upstream_index*0.8), round(n_upstream_index*1.2))], centers = 3)$cluster
x = tapply(rowMeans(meth_mat_mean[, seq(round(n_upstream_index*0.8), round(n_upstream_index*7/5))]), meth_split, mean)
od = structure(order(x), names = names(x))
meth_split = paste0("cluster", od[as.character(meth_split)])
if(n_subgroup == 2) {
combined_split = paste(meth_split, expr_split, sep = "|")
row_order = merge_row_order(meth_mat_mean, list(
combined_split == "cluster1|high",
combined_split == "cluster1|low",
combined_split == "cluster2|high",
combined_split == "cluster2|low",
combined_split == "cluster3|high",
combined_split == "cluster3|low"
))
} else {
combined_split = meth_split
row_order = merge_row_order(meth_mat_mean, list(
combined_split == "cluster1",
combined_split == "cluster2",
combined_split == "cluster3"
))
}
expr_col_od = do.call("c", lapply(subgroup_level, function(le) {
dend1 = as.dendrogram(hclust(dist(t(expr[, subgroup == le, drop = FALSE]))))
hc1 = as.hclust(reorder(dend1, colMeans(expr[, subgroup == le, drop = FALSE])))
col_od1 = hc1$order
which(subgroup == le)[col_od1]
}))
cor_col_fun = colorRamp2(c(-1, 0, 1), c("darkgreen", "white", "red"))
meth_col_fun = colorRamp2(c(0, 0.5, 1), c("blue", "white", "red"))
cr_col = c("-1" = "darkgreen", "0" = "white", "1" = "red")
fixed_heatmap = 2
eval(SNIPPET_HEATMAP_PAGE)
if(missing(expr_ha)) {
if(n_subgroup >= 2) {
expr_ha = HeatmapAnnotation(subgroup = subgroup, col = list(subgroup = structure(rand_color(n_subgroup), names = subgroup_level)),
show_annotation_name = TRUE, annotation_name_side = "left", annotation_name_gp = gpar(fontsize = 10))
}
}
######### construct heatmap list ############
## if there are too many heatmaps, they will be put in two pages.
epi_color = c(brewer.pal(8, "Set2"), brewer.pal(12, "Set3"))
epi_mark_list = list()
epi_title_list = list()
n_heatmap = 0
n_row_group = 3
expr = t(apply(expr, 1, function(x) {
qu = quantile(x, c(0.05, 0.95), na.rm = TRUE)
x[x < qu[1]] = qu[1]
x[x > qu[2]] = qu[2]
x
}))
expr = t(scale(t(expr)))
ht_list = Heatmap(expr, name = "expr", show_row_names = FALSE,
show_column_names = FALSE, width = unit(5, "cm"), show_column_dend = FALSE, cluster_columns = FALSE, column_order = expr_col_od,
top_annotation = expr_ha, column_title = "Expression", show_row_dend = FALSE,
use_raster = TRUE, raster_quality = 2)
n_heatmap = n_heatmap + 1
ht_list = ht_list + rowAnnotation(foo_width = row_anno_points(width_anno, axis = TRUE, gp = gpar(col = "#00000040")),
width = unit(1, "cm"))
ht_list = ht_list + Heatmap(km[names(target)], name = "km_groups", col = km_col, show_row_names = FALSE,
width = unit(1, "cm"))
ht_list = ht_list + EnrichedHeatmap(mat_cgi, col = c("white", "darkorange"), name = "CGI",
top_annotation = HeatmapAnnotation(lines1 = anno_enriched(gp = gpar(col = "darkorange", lty = 1:n_row_group))),
top_annotation_height = unit(2, "cm"), column_title = "CGI", axis_name = axis_name,
use_raster = TRUE, raster_quality = 2)
n_heatmap = n_heatmap + 1
message("append epi heatmaps")
eval(SNIPPET_APPEND_EPI_HEATMAP)
message("draw heatmaps")
eval(SNIPPET_DRAW_HEATMAP)
return(invisible(NULL))
}
# == title
# Visualizing enrichment for epigenomic signals at TSS-CGIs
#
# == param
# -cr correalted regions
# -txdb transcriptome annotation which was used in `correlated_regions`
# -expr expression matrix which was used in `correlated_regions`
# -gf genomic features, a `GenomicRanges::GRanges` object
# -fdr_cutoff cutoff for fdr
# -meth_diff_cutoff cutoff for methylation difference. If there are no subgroup information or only one subgroup,
# ``meth_IQR`` column is used for filtering. If there are more than one subgroups, ``meth_diameter``
# column is used for filtering.
# -marks names of histone marks, should be supported in `chipseq_hooks`
# -type visualize negative correlated regions or positive correlated regions
# -extend base pairs extended to upstream and downstream
# -min_reduce base pairs for merging neighbouring regions
# -min_width minimal width of regions
# -nearest_by "tss" or "gene", how to connect genomic features to genes
# -expr_ha a `ComplexHeatmap::HeatmapAnnotation` class object.It is used for the expression heatmap
#
# == details
# There are several heatmaps visualize various signals enriched at genomic features. After annotate to genes,
# in the extended regions, each region can only have one gene.
#
# There are following heatmaps:
#
# - heatmap for gene expression
# - If ``cr`` is returned form `cr_enriched_heatmap`, there is a one column heatmap which
# shows the k-means cluters genes belong to
# - heatmap for significant correlated regions
# - a point plot showing region length
# - heatmap for correlation between methylation and gene expression
# - heatmap for mean methylation
# - heatmap for metnylation difference
# - heatmap for correlation, mean signal and signal difference for histone marks
#
# If there are more than 12 heatmaps, they will be put into two pages.
#
# Heatmaps are split into two sub-clusters by k-means clustering on the mean methylation matrix.
# If there are two subgroups in all samples, each subcluster are split by high expression/low expression
# in subgroup 1. In each high expression/low expression, rows are split by the k-means clusters calculated
# in `cr_enriched_heatmap`. Finally, rows are clustered by considering closeness of signals in the extended
# gf regions.
#
# == value
# no value is returned
#
# == author
# Zuguang Gu <z.gu@dkfz.de>
cr_enriched_heatmap_at_genomic_features = function(cr, txdb, expr, gf,
fdr_cutoff = 0.05, meth_diff_cutoff = 0.1, marks = NULL, type = "neg", extend = 5000,
min_reduce = 1, min_width = 1000, nearest_by = "tss", expr_ha) {
gm_extend = NULL
n_subgroup = NULL
subgroup = NULL
subgroup_level = NULL
expr_col_od = NULL
km_col = NULL
mat_mix = NULL
eval(SNIPPET_ATTACH_CR_PARAM)
message(qq("filter sigCR by fdr_cutoff = @{fdr_cutoff}, meth_diff_cutoff = @{meth_diff_cutoff}"))
eval(SNIPPET_FILTER_SIG_CR)
gf_origin = gf
# overlap gf to gene extended regions
message("extracting gene tss")
gm = genes(txdb)
gm = gm[gm$gene_id %in% unique(cr$gene_id)]
tss = promoters(gm, upstream = 1, downstream = 0)
gl = width(gm)
g = gm
strand(g) = "*"
start(g) = start(g) - gm_extend[1]
end(g) = end(g) + ifelse(length(gm_extend) == 2, gm_extend[2], gm_extend[1])
start(g) = ifelse(start(g) > 1, start(g), 1)
mtch = as.matrix(findOverlaps(g, gf))
gf = gf[unique(mtch[, 2])]
message(qq("@{length(gf)} are in extended gene regios."))
if(min_reduce >= 0) {
gf = reduce(gf, min = min_reduce)
message(qq("@{length(gf)} remain after merging by min_reduce <= @{min_reduce}"))
}
gf = gf[width(gf) >= min_width, ]
message(qq("@{length(gf)} regions remain after removing regions with width <= @{min_width}"))
# find associated gene (by tss for by gene)
if(nearest_by == "tss") {
message("look for nearest tss")
d = distanceToNearest(gf, tss, select = "all")
subjectHits = subjectHits(d)
ind = tapply(seq_len(length(d)), queryHits(d), function(ind) {
ind[which.max(gl[subjectHits[ind]])][1]
})
d = d[as.vector(ind)]
gf2 = gf[queryHits(d)]
gf2$distanceToNearest = mcols(d)$distance
gf2$nearestGene = gm$gene_id[subjectHits(d)]
gf2$nearestGeneStrand = strand(tss[subjectHits(d)])
l = gf2$nearestGeneStrand == "+" & start(gf2) < start(tss[subjectHits(d)]) |
gf2$nearestGeneStrand == "-" & end(gf2) > end(tss[subjectHits(d)])
l = as.vector(l)
gf2$distanceToNearest[l] = -gf2$distanceToNearest[l]
} else {
message("look for nearest gene body")
d = distanceToNearest(gf, gm, select = "all")
subjectHits = subjectHits(d)
ind = tapply(seq_len(length(d)), queryHits(d), function(ind) {
ind[which.max(gl[subjectHits[ind]])][1]
})
d = d[as.vector(ind)]
gf2 = gf[queryHits(d)]
gf2$distanceToNearest = mcols(d)$distance
gf2$nearestGene = gm$gene_id[subjectHits(d)]
gf2$nearestGeneStrand = strand(gm[subjectHits(d)])
# if the gf is overlapped to gene body, how much of it is overlapped by the gene
gg = pintersect(gf2, gm[gf2$nearestGene], resolve.empty = "max")
gf2$overlapGenePercent = width(gg)/width(gf2)
l = gf2$nearestGeneStrand == "+" & start(gf2) < start(gm[subjectHits(d)]) |
gf2$nearestGeneStrand == "-" & end(gf2) > end(gm[subjectHits(d)])
l = as.vector(l)
gf2$distanceToNearest[l] = -gf2$distanceToNearest[l]
}
message(qq("@{length(gf2)} regions remain after overlapping to genes"))
strand(gf2) = strand(gm[gf2$nearestGene])
names(gf2) = gf2$nearestGene
gf2$gene_id = gf2$nearestGene
target_ratio = mean(width(gf2))/(sum(extend) + mean(width(gf2)))
target = gf2
target_name = "gf"
message("normalize to sigCR")
eval(SNIPPET_NORMALIZE_SIG_CR)
message("normalize to gf")
mat_gf = normalizeToMatrix(gf_origin, target, extend = extend, target_ratio = target_ratio)
if(length(target) < 10) {
message("too few regions left, just quit.")
return(invisible(NULL))
}
if(is.not.null(km)) {
km = km[names(target)]
}
gf_width = width(gf2)
gf_width[gf_width > quantile(gf_width, 0.99)] = quantile(gf_width, 0.99)
width_anno_name = "gf_width"
width_anno = gf_width
message("normalize to epi signals")
eval(SNIPPET_NORMALIZE_EPI_SIGNALS)
########### prepare the order of rows and columns
message("determing row and colum orders")
eval(SNIPPET_ROW_ORDER_AND_COLUMN_ORDER)
cor_col_fun = colorRamp2(c(-1, 0, 1), c("darkgreen", "white", "red"))
meth_col_fun = colorRamp2(c(0, 0.5, 1), c("blue", "white", "red"))
cr_col = c("-1" = "darkgreen", "0" = "white", "1" = "red")
gf_col = colorRamp2(c(0, 1), c("white", "blue"))
fixed_heatmap = 3
eval(SNIPPET_HEATMAP_PAGE)
if(missing(expr_ha)) {
if(n_subgroup >= 2) {
expr_ha = HeatmapAnnotation(subgroup = subgroup, col = list(subgroup = structure(rand_color(n_subgroup), names = subgroup_level)),
show_annotation_name = TRUE, annotation_name_side = "left", annotation_name_gp = gpar(fontsize = 10))
}
}
######### construct heatmap list ############
## if there are too many heatmaps, they will be put in two pages.
epi_color = c(brewer.pal(8, "Set2"), brewer.pal(12, "Set3"))
epi_mark_list = list()
epi_title_list = list()
n_heatmap = 0
n_row_group = 2
ht_list = Heatmap(expr, name = "expr", show_row_names = FALSE,
show_column_names = FALSE, width = unit(5, "cm"), show_column_dend = FALSE, cluster_columns = FALSE, column_order = expr_col_od,
top_annotation = expr_ha, column_title = "Expression", show_row_dend = FALSE,
use_raster = TRUE, raster_quality = 2)
n_heatmap = n_heatmap + 1
if(is.not.null(km)) {
ht_list = ht_list + Heatmap(km[names(target)], name = "km_groups", col = km_col, show_row_names = FALSE,
width = unit(0.5, "cm"))
}
ht_list = ht_list + EnrichedHeatmap(mat_gf, name = "gf", col = gf_col,
top_annotation = HeatmapAnnotation(lines1 = anno_enriched(gp = gpar(col = "blue", lty = 1:n_row_group))),
top_annotation_height = unit(2, "cm"), column_title = qq("gf"),
use_raster = TRUE, raster_quality = 2, combined_name_fun = NULL)
n_heatmap = n_heatmap + 1
ht_list = ht_list + EnrichedHeatmap(mat_mix, name = qq("@{type}CR"), col = cr_col,
top_annotation = HeatmapAnnotation(lines1 = anno_enriched(gp = gpar(neg_col = "darkgreen", pos_col = "red", lty = 1:n_row_group))),
top_annotation_height = unit(2, "cm"), column_title = qq("sig@{type}CR"),
use_raster = TRUE, raster_quality = 2, combined_name_fun = NULL)
n_heatmap = n_heatmap + 1
ht_list = ht_list + rowAnnotation(foo_width = row_anno_points(width_anno, axis = TRUE, gp = gpar(col = "#00000040")),
width = unit(1, "cm"))
message("append epi heatmaps")
eval(SNIPPET_APPEND_EPI_HEATMAP)
message("draw heatmaps")
eval(SNIPPET_DRAW_HEATMAP)
return(invisible(NULL))
}
|
855af72801e112e928b3900840e971d3f2f54d7e
|
3e93dfa5e4efe24cd24fafa1363e7e68881d3e1a
|
/1920_ctmm.R
|
3041ffc07a0a3dc28b5dcfbbb230ce7379a6f365
|
[] |
no_license
|
kelseyefisher/monarchoccurrence
|
368e031c220593a8a840757fcadedacb6564624a
|
dfa03368f7f980e70f1e97c2a1e08b1c851e3f92
|
refs/heads/main
| 2023-05-08T19:34:22.746697
| 2021-06-01T19:59:29
| 2021-06-01T19:59:29
| 331,306,480
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,150
|
r
|
1920_ctmm.R
|
#home laptop
#setwd("C:/Users/kelse/Box Sync/Publications/19&20 Habitat Utilization/Data Analysis")
#work
setwd("C:/Users/kefisher/Box/Publications/19&20 Habitat Utilization/Data Analysis")
ctmm19<-read.csv("19_itworked_forctmm.csv", header=TRUE)
ctmm20<-read.csv("20_itworked_forctmm.csv", header=TRUE)
release<-read.csv("1920_release.csv", header=TRUE)
ctmm<-rbind(ctmm19, ctmm20)
ctmm<-merge(ctmm, release, by="monarchrunid")
ctmm$individual.local.identifier<-ctmm$ctmmlabel
head(ctmm)
dates <- strptime(as.character(ctmm$date), format="%Y-%m-%d", tz="GMT")
#extract GMT times
times <- as.character(ctmm$time)
#create date-times and cast as POSIXct
datetimes <- as.POSIXct(paste(dates, times), tz="GMT")
#add appropriate datetimes into Monarch file
ctmm$timestamp <- datetimes
head(ctmm)
library(ctmm)
library(raster)
##turn into a telemetry object
monarchs<-as.telemetry(ctmm)
#####################################################
######FOLLOWING IS WHAT I WANT TO RUN AS A LOOP######
#####################################################
#fit a variogram from each monarch (individual.local.identifier = ac)
vg<- variogram(monarchs$`ac`)
#guess the best model fit for each variogram
GUESS<- ctmm.guess(monarchs$`ac`,
interactive = FALSE)
#include the error estimates in the calculation
GUESS$error<-TRUE
#Select best model and store the fit model for each individual
FIT<-ctmm.select(monarchs$`ac`,GUESS)
#run occurrence model for each individual based on best selected model
OCCU_ac<-occurrence(monarchs$`ac`,FIT)
plot(OCCU_ac)
OCCU.ac<-raster(OCCU_ac,level.UD=0.95,level=0.95,DF="PMF")
OCCU.OCCU.ac2<-projectRaster(OCCU.ac, crs=CRS('+proj=utm +zone=15'))
plot(OCCU.ac2)
writeRaster(OCCU.ac2, 'ac_occurrence.tif', options=c('TFW=YES'))
###next run of the loop would look like this (next individual.local.identifier = ae)...
vg_ae <- variogram(ctmm$`ae`)
GUESS_ae <- ctmm.guess(ctmm$`ae`,
interactive = FALSE)
GUESS_ae$error<-TRUE
FIT_ae<-ctmm.select(ctmm$`ae`,GUESS_ae)
OCCU_ae<-occurrence(ctmm$`ae`,FIT_ae)
|
a770fe613d043781c334c0ad195773f0f5be75dd
|
d4ffff9664af4a1b00d4cf80797f73579b5f00b0
|
/2chan/pfo_scripts/HCT116/full_compile_plots.R
|
290025a3742109a5829aa1bbf131c9d56c85ef4f
|
[] |
no_license
|
ezemiron/Chain
|
6ef15e35c299ba4c3995c54047769944e3b51a0f
|
743bcedd302e2476e1b93611fd7b40afe98c0649
|
refs/heads/master
| 2021-03-27T20:41:34.269451
| 2019-05-01T13:30:45
| 2019-05-01T13:30:45
| 115,116,869
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,834
|
r
|
full_compile_plots.R
|
library(ggplot2)
compile <- read.csv("/home/eze/Documents/papers/chrom_marks/results/distribution/full-compiled_June6.csv")
compile$dataset <- paste0(compile$cell, compile$condition)
print(compile$dataset)
compile <- compile[-grep("mClover_pos", compile$marker), ]
compile$marker_f <- factor(compile$marker, levels=c("Smc3", "RNAPIIs2p", "H3K4me3", "H3K27me3", "H3K9me2"))
#COLORS=c("olivedrab1","darkolivegreen3", "cadetblue1", "cadetblue4")
#COLORS=c("palegreen1", "yellowgreen", "cadetblue1", "cadetblue4")
COLORS=c("olivedrab","olivedrab3","darkslategray","darkslategray4")
LABELS=c("Parental Aux (-)", "Parental Aux (+)", "Scc1mAID Aux (-)", "Scc1mAID Aux (+)")
BREAKS=c("HCT116ParentalAux0h", "HCT116ParentalAux2h", "HCT116Scc1mAIDAux0h", "HCT116Scc1mAIDAux2h")
TITLESIZE=18
TEXTSIZE=14
####################
## CELLS/DATASET ###
####################
datasets <- unique(compile$dataset)
count <- rep(0, length(datasets))
names(count) <- datasets
for(i in datasets) {
count[i] <- nrow(compile[which(compile$class==1 & compile$dataset == i), ])
}
count <- (table(compile[which(compile$class==1), "dataset"]))
####################
## NUCLEAR VOLUME ##
####################
compile$cellID <- as.numeric(factor(paste0(compile$cell, compile$condition, compile$marker, compile$cellno)))
volume <- rep(0, max(compile$cellID))
names(volume) <- unique(compile$cellID)
compile$volume <- rep(0, nrow(compile))
for(i in 1:max(compile$cellID) ){
volume[i] <- sum(compile[which(compile$cellID==i), "classnum"])
compile[which(compile$cellID==i), "volume"] <- volume[i]
}
sum(volume)==sum(compile$classnum)
sum(volume)==sum(compile[seq(from=1, to=nrow(compile), by=7), "volume"])
per_cell <- compile[which(compile$class==1), ]
per_condit <- data.frame(dataset=datasets, vol_avg=rep(0, length(datasets)),
vol_sd=rep(0, length(datasets)), vol_upperCI=rep(0, length(datasets)),
vol_lowerCI=rep(0, length(datasets)), n=rep(0, length(datasets)) )
rownames(per_condit) <- per_condit$dataset
for(i in per_condit$dataset) {
per_condit[i, "vol_avg"] <- mean(per_cell[which(per_cell$dataset == per_condit[i, "dataset"]), "volume"])
per_condit[i, "vol_sd"] <- sd(per_cell[which(per_cell$dataset == per_condit[i, "dataset"]), "volume"])
per_condit[i, "vol_upperCI"] <- per_condit[i, "vol_avg"] + 1.96 * per_condit[i, "vol_sd"] / sqrt(count[i])
per_condit[i, "vol_lowerCI"] <- per_condit[i, "vol_avg"] - 1.96 * per_condit[i, "vol_sd"] / sqrt(count[i])
}
p <- ggplot(per_condit, aes(x= dataset, y=vol_avg, fill=dataset)) +
#xlab("cell type and treatment") +
xlab("")+
ylab("nuclear mask volume (voxels)") +
theme(panel.background=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank()) +
scale_fill_manual(values=COLORS, breaks=BREAKS, labels=LABELS)+
geom_bar(position=position_dodge(0.9), stat="identity")+
geom_errorbar(position=position_dodge(0.9), width=0.7,
aes(ymin=vol_lowerCI, ymax=vol_upperCI))
pdf("/home/eze/Documents/papers/chrom_marks/results/plots/nuclearvolume_june6.pdf")
p
dev.off()
####################
## N. FOCI #########
####################
test<- unlist(by(compile$classnum, compile$cellID, sum))
test==volume
foci <- unlist(by(compile$dist1t, compile$cellID, sum))
compile$total_foci <- rep(foci, each=7)
tester = 49
sum(compile[which(compile$cellID==tester), "dist1t"]) == foci[tester]
all(compile[which(compile$cellID==tester), "total_foci"]== foci[tester])
pdf("/home/eze/Documents/papers/chrom_marks/results/plots/foci_nonnormalized_june9.pdf")
ggplot(compile[which(compile$class=="1"),]) + ylab("total foci/image") +
geom_point(aes(x=marker_f, y=total_foci, color=dataset)) +
theme(panel.background=element_blank(), legend.key=element_blank(), axis.ticks.x=element_blank()) +
scale_color_manual(values=COLORS, breaks=BREAKS, labels=LABELS)
dev.off()
compile$foci_norm <- compile$total_foci/compile$volume
per_cell <- compile[which(compile$class==1),]
foci_comp <- data.frame(mean= c(by(per_cell$foci_norm, paste0(per_cell$dataset,"*", per_cell$marker), mean)),
std= c(by(per_cell$foci_norm, paste0(per_cell$dataset,"*", per_cell$marker), sd)),
n=c(by(per_cell$foci_norm, paste0(per_cell$dataset,"*", per_cell$marker), length)))
foci_comp$upperCI <- foci_comp$mean + 1.96*foci_comp$std/sqrt(foci_comp$n)
foci_comp$lowerCI <- foci_comp$mean - 1.96*foci_comp$std/sqrt(foci_comp$n)
foci_comp$marker <- unlist(strsplit(rownames(foci_comp), split="\\*"))[seq(from=2, to=nrow(foci_comp)*2,by=2)]
foci_comp$dataset <- unlist(strsplit(rownames(foci_comp), split="\\*"))[seq(from=1, to=nrow(foci_comp)*2, by=2)]
foci_comp$marker_f <- factor(foci_comp$marker, levels=c("Smc3", "RNAPIIs2p", "H3K4me3", "H3K27me3", "H3K9me2"))
p <- ggplot(foci_comp, aes(x=marker_f, y=mean, fill=dataset)) +
geom_bar(stat="identity", position=position_dodge(0.9) ) +
geom_errorbar(aes(ymin=lowerCI, ymax=upperCI), position=position_dodge(0.9), width=0.7) +
labs(x= "IF mark",
y="Foci number/Nuclear voxel",
fill="Cell type and treatment") +
scale_fill_manual(values=COLORS, breaks=BREAKS, labels=LABELS)+
theme(panel.background=element_blank())
pdf("/home/eze/Documents/papers/chrom_marks/results/plots/foci_june6.pdf")
p
dev.off()
p <- ggplot(foci_comp[grep("Aux2h", foci_comp$dataset),], aes(x=marker_f, y=mean, fill=dataset)) +
geom_bar(stat="identity", position=position_dodge(0.9) ) +
geom_errorbar(aes(ymin=lowerCI, ymax=upperCI), position=position_dodge(0.9), width=0.7) +
labs(x= "IF mark",
y="Foci number/Nuclear voxel",
fill="Cell type and treatment") +
scale_fill_manual(values=COLORS[c(2,4)], breaks=BREAKS, labels=LABELS)+
theme(panel.background=element_blank())
pdf("/home/eze/Documents/papers/chrom_marks/results/plots/foci_AUXpos_june6.pdf")
p
dev.off()
marker <- "Smc3"
marker <- "H3K4me3"
marker <- "H3K27me3"
marker <- "RNAPIIs2p"
marker <- "H3K9me2"
savename <- paste0("/home/eze/Documents/papers/chrom_marks/results/plots/foci_auxpos_", marker,".pdf")
foci_plot <- compile[which(compile$marker == marker),]
foci_plot <- foci_plot[grep("Aux2h", foci_plot$dataset), ]
pdf(savename)
ggplot(foci_plot[which(foci_plot$class=="1"),]) + ylab("total foci / image") + xlab("nuclear volume") +
ggtitle(marker) +
geom_point(aes(x=volume, y=total_foci, color=dataset), size=2.5) +
theme(panel.background=element_blank(), legend.key=element_blank(), axis.ticks.x=element_blank(),
axis.title=element_text(size=TITLESIZE), axis.text=element_text(size=TEXTSIZE),
legend.text=element_text(size=TEXTSIZE), legend.title=element_text(size=TITLESIZE),
legend.position="none") +
scale_color_manual(values=COLORS[c(2,4)], breaks=BREAKS, labels=LABELS)
dev.off()
####################
## CHROM COMPACT ###
####################
compact_mean <- c(unlist(by(compile$classnumn, paste0(compile$dataset, "_", compile$class), mean)))
compact_sd <- c(unlist(by(compile$classnumn, paste0(compile$dataset, "_", compile$class), sd)))
if( all(names(compact_mean) == names(compact_sd))) {
compaction <- data.frame(data=names(compact_mean), mean=compact_mean, sd=compact_sd)
compaction$class <- sapply(strsplit(as.character(compaction$data), "_"), "[[", 2)
compaction$dataset <- sapply(strsplit(as.character(compaction$data), "_"), "[[", 1)
compaction$n <- rep(0, nrow(compaction))
for(i in names(count)){
compaction[which(compaction$dataset==i), "n"] <- count[i]
}
compaction$upperCI <- compaction$mean + 1.96 * compaction$sd / sqrt(compaction$n)
compaction$lowerCI <- compaction$mean - 1.96 * compaction$sd / sqrt(compaction$n)
}
p <- ggplot(compaction, aes(x=class, y=mean, fill=dataset)) +
labs(x= "chromatin compaction class",
y= "nuclear proportion",
fill="Cell type and treatment") +
theme(panel.background=element_blank()) +
scale_fill_manual(values=COLORS, labels=LABELS, breaks=BREAKS)+
geom_bar(position=position_dodge(0.9), stat="identity")+
geom_errorbar(position=position_dodge(0.9), width=0.7,
aes(ymin=lowerCI, ymax=upperCI))
pdf("/home/eze/Documents/papers/chrom_marks/results/plots/compaction_plot_june6.pdf")
p
dev.off()
####################
## EFFECT OF AUX ###
####################
comp <- data.frame(class=unique(compaction$class),
Parental=rep(0,length(unique(compaction$class))), Scc1=rep(0, length(unique(compaction$class))))
rownames(comp) <- comp$class
for ( i in comp$class ){
keep <- compaction[which(keep$class==i),]
comp[i, "Parental"] <- 1- (keep[grep("ParentalAux0h", keep$data), "mean"] / keep[grep("ParentalAux2h", keep$data), "mean"])
comp[i, "Scc1"] <- 1- (keep[grep("Scc1mAIDAux0h", keep$data), "mean"] / keep[grep("Scc1mAIDAux2h", keep$data), "mean"])
}
library(reshape); COMP <- melt(comp)
pdf("/home/eze/Documents/papers/chrom_marks/results/plots/chromatincompaction_auxineffect.pdf")
ggplot(COMP, aes(x=class, y=value, fill=variable)) +
geom_bar(stat="identity", position=position_dodge(0.9)) +
labs( fill="cell type",
x ="chromatin compaction class",
y = "proportion in class Aux(+)/Aux(-)") +
theme(panel.background=element_blank(), legend.key=element_blank(),
axis.text=element_text(size=TEXTSIZE), axis.title=element_text(size=TITLESIZE),
legend.text=element_text(size=TEXTSIZE), legend.title=element_text(size=TITLESIZE) )
dev.off()
####################
### SINGLE BATCH ###
####################
batch <- compile[which(compile$marker=="H3K9me2"), ]
compact_mean <- c(unlist(by(batch$classnumn, paste0(batch$dataset, "_", batch$class), mean)))
compact_sd <- c(unlist(by(batch$classnumn, paste0(batch$dataset, "_", batch$class), sd)))
if( all(names(compact_mean) == names(compact_sd))) {
compaction <- data.frame(data=names(compact_mean), mean=compact_mean, sd=compact_sd)
compaction$class <- sapply(strsplit(as.character(compaction$data), "_"), "[[", 2)
compaction$dataset <- sapply(strsplit(as.character(compaction$data), "_"), "[[", 1)
compaction$n <- rep(0, nrow(compaction))
count_batch <- (table(batch[which(batch$class==1), "dataset"]))
for(i in names(count_batch)){
compaction[which(compaction$dataset==i), "n"] <- count_batch[i]
}
compaction$upperCI <- compaction$mean + 1.96 * compaction$sd / sqrt(compaction$n)
compaction$lowerCI <- compaction$mean - 1.96 * compaction$sd / sqrt(compaction$n)
}
p <- ggplot(compaction, aes(x=class, y=mean, fill=dataset)) +
labs(x= "chromatin compaction class",
y= "nuclear proportion",
fill="Cell type and treatment") +
ggtitle("chromatin compaction for slides processed in batch IF")+
theme(panel.background=element_blank()) +
scale_fill_manual(values=COLORS, labels=LABELS, breaks=BREAKS)+
geom_bar(position=position_dodge(0.9), stat="identity")+
geom_errorbar(position=position_dodge(0.9), width=0.7,
aes(ymin=lowerCI, ymax=upperCI))
pdf("/home/eze/Documents/papers/chrom_marks/results/plots/compaction_batch_plot_june6.pdf")
p
dev.off()
####################
## AUX vs NO-AUX ###
####################
compactAux_mean <- c(unlist(by(compile$classnumn, paste0(compile$condition, "_", compile$class), mean)))
compactAux_sd <- c(unlist(by(compile$classnumn, paste0(compile$condition, "_", compile$class), sd)))
if( all(names(compactAux_mean) == names(compactAux_sd))) {
compactionA <- data.frame(data=names(compactAux_mean), mean=compactAux_mean, sd=compactAux_sd)
compactionA$class <- sapply(strsplit(as.character(compactionA$data), "_"), "[[", 2)
compactionA$dataset <- sapply(strsplit(as.character(compactionA$data), "_"), "[[", 1)
}
p <- ggplot(compactionA, aes(x=class, y=mean, fill=dataset)) +
labs(x= "chromatin compaction class",
y= "nuclear proportion",
fill="Cell treatment") +
theme(panel.background=element_blank(), legend.position=c(0.8,0.85),
axis) +
ggtitle("Effect of Auxin-treatment, both cell types") +
scale_fill_manual(values=c("red1", "red4"), labels=c("Aux (+)", "Aux (-)"), breaks=c("Aux2h", "Aux0h"))+
geom_bar(position=position_dodge(0.9), stat="identity")+
geom_errorbar(position=position_dodge(0.9), width=0.7,
aes(ymin=mean-sd, ymax=mean+sd))
pdf("/home/eze/Documents/papers/chrom_marks/results/plots/compaction_AuxvsNoAux2.pdf")
p
dev.off()
####################
### Aux(+), P v S ##
####################
p <- ggplot(compaction[grep("Aux2h", compaction$data),], aes(x=class, y=mean, fill=dataset)) +
labs(x= "chromatin compaction class",
y= "nuclear proportion",
fill="Cell type and treatment") +
theme(panel.background=element_blank(), legend.position=c(0.8,0.85)) +
scale_fill_manual(values=COLORS[c(2,4)], labels=LABELS, breaks=BREAKS)+
geom_bar(position=position_dodge(0.9), stat="identity")+
geom_errorbar(position=position_dodge(0.9), width=0.7,
aes(ymin=lowerCI, ymax=upperCI))
pdf("/home/eze/Documents/papers/chrom_marks/results/plots/compaction_Auxpos.pdf")
p
dev.off()
####################
### AUX+ comp'rsn ##
####################
compile2 <-compile[which(compile$dataset %in% c("HCT116Scc1mAIDAux2h", "HCT116ParentalAux2h")),]
compactSvP_mean <- c(unlist(by(compile2$classnumn, paste0(compile2$cell, "_", compile2$class), mean)))
compactSvP_sd <- c(unlist(by(compile2$classnumn, paste0(compile2$cell, "_", compile2$class), sd)))
if( all(names(compactSvP_mean) == names(compactSvP_sd))) {
compactionS <- data.frame(data=names(compactSvP_mean), mean=compactSvP_mean, sd=compactSvP_sd)
compactionS$class <- sapply(strsplit(as.character(compactionS$data), "_"), "[[", 2)
compactionS$dataset <- sapply(strsplit(as.character(compactionS$data), "_"), "[[", 1)
}
p <- ggplot(compactionS, aes(x=class, y=mean, fill=dataset)) +
labs(x= "chromatin compaction class",
y= "nuclear proportion",
fill="cell type, auxin treated") +
theme(panel.background=element_blank()) +
ggtitle("Comparison of Auxin-treated cells") +
scale_fill_manual(values=COLORS[c(2,4)])+
geom_bar(position=position_dodge(0.9), stat="identity")+
geom_errorbar(position=position_dodge(0.9), width=1,
aes(ymin=mean-sd, ymax=mean+sd))
pdf("/home/eze/Documents/papers/chrom_marks/results/plots/compaction_AuxTxt.pdf")
p
dev.off()
####################
## IF DISTRIBUT ####
####################
focidist_mean <- c(unlist(by(compile$dist1n, paste0(compile$dataset, "_", compile$class, "_", compile$marker), mean)))
focidist_sd <- c(unlist(by(compile$dist1n, paste0(compile$dataset, "_", compile$class, "_", compile$marker), sd)))
focidist_AvLogNorm <- c(unlist(by(compile$lognorm1, paste0(compile$dataset, "_", compile$class, "_", compile$marker), mean)))
if( all(names(focidist_mean) == names(focidist_sd))) {
focidist <- data.frame(data=names(focidist_mean), AvLogNorm=focidist_AvLogNorm, mean=focidist_mean, sd=focidist_sd)
focidist$class <- sapply(strsplit(as.character(focidist$data), "_"), "[[", 2)
focidist$dataset <- sapply(strsplit(as.character(focidist$data), "_"), "[[", 1)
focidist$marker <- sapply(strsplit(as.character(focidist$data), "_"), "[[", 3)
focidist$n <- rep(0, nrow(focidist))
for(i in names(count)){
focidist[which(focidist$dataset==i), "n"] <- count[i]
}
focidist$upperCI <- focidist$mean + 1.96 * focidist$sd / sqrt(focidist$n)
focidist$lowerCI <- focidist$mean - 1.96 * focidist$sd / sqrt(focidist$n)
}
focidist$uprLogErr <- sqrt((focidist$AvLogNorm-log2(focidist$upperCI))^2)
focidist$lowLogErr <- sqrt((focidist$AvLogNorm-log2(focidist$lowerCI))^2)
for(marker in unique(focidist$marker)) {
savename <- paste0("/home/eze/Documents/papers/chrom_marks/results/plots/focidist_", marker, ".pdf")
pdf(savename)
ggplot(focidist[which(focidist$marker == marker),],
aes(x=class, y=AvLogNorm, fill=dataset)) +
labs(x= "chromatin compaction class",
y= "foci distribution",
title = marker,
fill="Cell type and treatment") +
theme(panel.background=element_blank()) +
scale_fill_manual(values=COLORS, breaks=BREAKS, labels=LABELS)+
geom_bar(position=position_dodge(0.9), stat="identity")+
geom_errorbar(position=position_dodge(0.9), width=0.7,
aes(ymin=AvLogNorm - lowLogErr, ymax=AvLogNorm + uprLogErr))
dev.off()
savename2 <- paste0("/home/eze/Documents/papers/chrom_marks/results/plots/focidist_Auxpos_", marker, ".pdf")
pdf(savename2)
auxpos <- focidist[grep("Aux2h", focidist$dataset),]
ggplot(auxpos[which(auxpos$marker == marker),],
aes(x=class, y=mean, fill=dataset)) +
labs(x= "chromatin compaction class",
y= "foci distribution",
title = marker,
fill="Cell type and treatment") +
theme(panel.background=element_blank(),legend.position=c(0.75, 0.9),
legend.title=element_text(size=TITLESIZE), legend.text=element_text(size=TEXTSIZE),
axis.title=element_text(size=TITLESIZE), axis.text=element_text(size=TEXTSIZE)) +
scale_fill_manual(values=COLORS[c(2,4)], breaks=BREAKS, labels=LABELS)+
geom_bar(position=position_dodge(0.9), stat="identity")+
geom_errorbar(position=position_dodge(0.9), width=0.7,
aes(ymin=lowerCI, ymax=upperCI))
dev.off()
}
#### STATS: 'effect size'??
#### change in each cell line w.r.t aux: (parental 0aux - parental 2aux) vs (scc1maid 0aux - scc1maid 2aux)
#### what has a bigger effect size: auxin (in population of Scc1maid cell line) or cell type (in population of auxin-treated cells)
|
a2f2b039f098533ced116615d230614f80c487c3
|
96dc2a1cca9a616b619511588f0589e08e79b541
|
/R/methods.R
|
ce7dfb9a3f8b6b40b8a213573fdbf83b23d3c4e6
|
[] |
no_license
|
amnahsiddiqa/ropls
|
03959d01ffb2c9e9d4be876f933dc0127e619458
|
3b1912056b23ca4ac3640d1b14ab91c92e7e2b6e
|
refs/heads/master
| 2021-09-19T18:30:38.990273
| 2018-06-25T18:44:51
| 2018-06-25T18:44:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 57,709
|
r
|
methods.R
|
#' @rdname opls
#' @export
setMethod("opls", signature(x = "ExpressionSet"),
function(x, y = NULL, ...) {
datMN <- t(exprs(x))
if(is.null(y)) {
opl <- opls(datMN, ...)
} else {
if(!is.character(y)) {
stop("'y' must be a character when the 'opls' method is applied to an 'ExpressionSet' instance")
} else {
samDF <- phenoData(x)@data
if(!(y %in% colnames(samDF))) {
stop("'y' must be the name of a column of the sampleMetadata slot of the 'ExpressionSet' instance")
} else {
rspFcVcn <- samDF[, y]
opl <- opls(datMN, rspFcVcn, ...)
}
}
}
opl
})
#' @rdname opls
#' @export
setMethod("opls", signature(x = "data.frame"),
function(x, ...) {
if(!all(sapply(x, data.class) == "numeric")) {
stop("'x' data frame must contain columns of 'numeric' vectors only", call. = FALSE)
} else
x <- as.matrix(x)
opl <- opls(x, ...)
opl
})
#' @rdname opls
#' @export
setMethod("opls", signature(x = "matrix"),
function(x,
y = NULL,
predI = NA,
orthoI = 0,
algoC = c("default", "nipals", "svd")[1],
crossvalI = 7,
log10L = FALSE,
permI = 20,
scaleC = c("none", "center", "pareto", "standard")[4],
subset = NULL,
printL = TRUE,
plotL = TRUE,
.sinkC = NULL,
...) {
if(!is.null(.sinkC)) ## Diversion of messages is required for the integration into Galaxy
sink(.sinkC, append = TRUE)
## Checking arguments
##-------------------
## x -> xMN
if(is.data.frame(x)) {
if(!all(sapply(x, data.class) == "numeric")) {
stop("'x' data frame must contain columns of 'numeric' vectors only", call. = FALSE)
} else
x <- as.matrix(x)
} else if(is.matrix(x)) {
if(mode(x) != "numeric")
stop("'x' matrix must be of 'numeric' mode", call. = FALSE)
} else
stop("'x' must be either a data.frame or a matrix", call. = FALSE)
if(any(apply(x, 2, function(colVn) all(is.na(colVn)))))
stop("'x' contains columns with 'NA' only", call. = FALSE)
xMN <- x
## y -> yMCN
yLevelVc <- NULL
if(!is.null(y)) {
if(is.vector(y)) {
if(!(mode(y) %in% c("character", "numeric")))
stop("'y' vector must be of 'character' or 'numeric' type", call. = FALSE)
if(length(y) != nrow(xMN))
stop("'y' vector length must be equal to the number of rows of 'x'", call. = FALSE)
yMCN <- matrix(y, ncol = 1)
} else if(is.factor(y)) {
if(length(y) != nrow(xMN))
stop("'y' factor length must be equal to the number of rows of 'x'", call. = FALSE)
yLevelVc <- levels(y)
yMCN <- matrix(as.character(y), ncol = 1)
} else if(is.matrix(y)) {
if(!(mode(y) %in% c("character", "numeric")))
stop("'y' matrix must be of 'character' or 'numeric' type", call. = FALSE)
if(nrow(y) != nrow(xMN))
stop("'x' and 'y' must have the same number of rows", call. = FALSE)
if(ncol(y) > 1 && mode(y) != "numeric")
stop("Multiple response 'y' matrices must be of numeric type", call. = FALSE)
yMCN <- y
} else
stop("'y' must be either a vector, a factor, or a matrix", call. = FALSE)
} else
yMCN <- NULL
## NA in Y only possible for multi-response regression (i.e., Y is a numeric matrix)
if(!is.null(yMCN) &&
ncol(yMCN) == 1 &&
any(is.na(drop(yMCN))))
stop("In case of single response modeling, 'y' must not contain missing values", call. = FALSE)
if(!is.logical(log10L))
stop("'log10L' must be a logical", call. = FALSE)
if(permI < 0 || (permI - floor(permI)) > 1e-10)
stop("'permI' must be an integer", call. = FALSE)
if(permI > 0 && (is.null(yMCN) || ncol(yMCN) > 1)) {
## warning("Permutation testing available for single response (O)PLS(-DA) models only", call. = FALSE)
permI <- 0
}
if(permI > 0 && !is.null(subset)) {
permI <- 0
warning("'permI' set to 0 because train/test partition is selected", call. = FALSE)
}
if(!(algoC %in% c('default', 'nipals', 'svd')))
stop("'algoC' must be either 'default', 'nipals', or 'svd'", call. = FALSE)
if(algoC == "default")
algoC <- ifelse(is.null(yMCN) && !any(is.na(c(xMN))), "svd", "nipals")
if(!is.null(yMCN) && algoC != "nipals")
stop("'nipals' algorithm must be used for (O)PLS(-DA)", call. = FALSE)
if((is.na(orthoI) || orthoI > 0) && is.null(yMCN))
stop("'y' response cannot be NULL for OPLS(-DA) modeling", call. = FALSE)
if(!is.null(yMCN)) {
if(is.na(orthoI) || orthoI > 0) {
if(ncol(yMCN) > 1) {
stop("OPLS regression only available for a single 'y' response", call. = FALSE)
} else if(mode(yMCN) == "character" && length(unique(drop(yMCN))) > 2)
stop("OPLS-DA only available for binary classification (use PLS-DA for multiple classes)", call. = FALSE)
}
}
if(is.na(orthoI) || orthoI > 0)
if(is.na(predI) || predI > 1) {
predI <- 1
warning("OPLS: number of predictive components ('predI' argument) set to 1", call. = FALSE)
}
if(!is.na(predI) && !is.na(orthoI) && ((predI + orthoI) > min(dim(xMN))))
stop("The sum of 'predI' (", predI, ") and 'orthoI' (", orthoI, ") exceeds the minimum dimension of the 'x' data matrix (", min(dim(xMN)), ")" , call. = FALSE)
if(!(length(scaleC) == 1 && scaleC %in% c('none', 'center', 'pareto', 'standard')))
stop("'scaleC' must be either 'none', 'center', 'pareto', or 'standard'", call. = FALSE)
if(!is.null(subset) && (is.null(yMCN) || ncol(yMCN) > 1))
stop("train/test partition with 'subset' only available for (O)PLS(-DA) models of a single 'y' response", call. = FALSE)
if(!is.null(subset) &&
!(mode(subset) == 'character' && subset == 'odd') &&
!all(subset %in% 1:nrow(xMN)))
stop("'subset' must be either set to 'odd' or an integer vector of 'x' row numbers", call. = FALSE)
if(crossvalI > nrow(xMN))
stop("'crossvalI' must be less than the row number of 'x'", call. = FALSE)
## Constants
##----------
epsN <- .Machine[["double.eps"]] ## [1] 2.22e-16
## Character to numeric convertion function (for classification)
##--------------------------------------------------------------
if(!is.null(yMCN) && mode(yMCN) == "character") {
if(!is.null(yLevelVc)) {
claVc <- yLevelVc
} else
claVc <- sort(unique(drop(yMCN)))
if(length(claVc) == 2) {
## binary response kept as a single vector for OPLS-DA computations
.char2numF <- function(inpMCN,
c2nL = TRUE) {
if(c2nL) {
outMCN <- inpMCN == claVc[2]
mode(outMCN) <- "numeric"
} else {
outMCN <- matrix(claVc[as.numeric(inpMCN > 0.5) + 1],
ncol = 1,
dimnames = dimnames(inpMCN))
}
return(outMCN)
}
} else
.char2numF <- function(inpMCN,
c2nL = TRUE) {
if(c2nL) {
outMCN <- t(sapply(drop(inpMCN),
function(claC) as.numeric(claVc == claC)))
colnames(outMCN) <- claVc
} else {
outMCN <- t(t(apply(inpMCN, 1,
function(rowVn) claVc[which(rowVn == max(rowVn))[1]])))
colnames(outMCN) <- "y1"
}
return(outMCN)
}
} else
.char2numF <- NULL
##------------------------------------
## Computations
##------------------------------------
## rownames and colnames
if(is.null(rownames(xMN)))
if(!is.null(yMCN) && !is.null(rownames(yMCN))) {
rownames(xMN) <- rownames(yMCN)
} else
rownames(xMN) <- paste0("s", 1:nrow(xMN))
if(is.null(colnames(xMN)))
colnames(xMN) <- paste0("x", 1:ncol(xMN))
if(!is.null(yMCN)) {
if(is.null(rownames(yMCN)))
rownames(yMCN) <- rownames(xMN)
if(is.null(colnames(yMCN)))
colnames(yMCN) <- paste0("y", 1:ncol(yMCN))
}
## Log10 transformation
##---------------------
if(log10L)
xMN <- .log10F(xMN)
## Test indices
##-------------
obsIniVi <- 1:nrow(xMN)
if(!is.null(subset)) {
subsetL <- TRUE
if(length(subset) == 1 && subset == "odd") {
if(mode(yMCN) == "numeric")
subsetVi <- seq(1, nrow(xMN), by = 2)
else {
subsetVi <- integer()
for(claC in unique(drop(yMCN)))
subsetVi <- c(subsetVi,
which(drop(yMCN) == claC)[seq(1, sum(drop(yMCN) == claC), by = 2)])
subsetVi <- sort(subsetVi)
}
} else
subsetVi <- subset
if(crossvalI > length(subsetVi))
stop("'crossvalI' must be less than the number of samples in the subset", call. = FALSE)
} else {
subsetL <- FALSE
subsetVi <- numeric()
}
## Filtering out zero variance variables
##--------------------------------------
xVarIndLs <- list()
xVarIndLs[[1]] <- 1:nrow(xMN)
if(subsetL) {
xVarIndLs[[1]] <- subsetVi
} ## else if(!is.null(yMCN) && ncol(yMCN) == 1 && nrow(xMN) >= 2 * crossvalI)
## for(cvkN in 1:crossvalI)
## xVarIndLs <- c(xVarIndLs, list(setdiff(1:nrow(xMN), cvaOutLs[[cvkN]])))
xVarVarLs <- lapply(xVarIndLs,
function(xVarVi) {
apply(xMN[xVarVi, , drop = FALSE],
2,
function(colVn) var(colVn, na.rm = TRUE))
})
xZeroVarVi <- integer()
for(k in 1:length(xVarVarLs))
xZeroVarVi <- union(xZeroVarVi, which(xVarVarLs[[k]] < epsN))
if(length(xZeroVarVi) > 0) {
names(xZeroVarVi) <- colnames(xMN)[xZeroVarVi]
xMN <- xMN[, -xZeroVarVi, drop = FALSE]
warning("The variance of the ",
length(xZeroVarVi),
" following variables is less than ",
signif(epsN, 2),
" in the full or partial (cross-validation) dataset: these variables will be removed:\n",
paste(names(xZeroVarVi), collapse = ", "),
call. = FALSE)
}
## Core
##-----
opl <- .coreF(xMN = xMN,
yMCN = yMCN,
orthoI = orthoI,
predI = predI,
scaleC = scaleC,
algoC = algoC,
crossvalI = crossvalI,
subsetL = subsetL,
subsetVi = subsetVi,
.char2numF = .char2numF)
opl@suppLs[["y"]] <- y
if(is.null(opl@suppLs[["yMCN"]])) {
opl@typeC <- "PCA"
} else {
if(ncol(opl@suppLs[["yMCN"]]) > 1 || mode(opl@suppLs[["yMCN"]]) == "numeric")
opl@typeC <- "PLS"
else
opl@typeC <- "PLS-DA"
}
if(opl@summaryDF[, "ort"] > 0)
opl@typeC <- paste("O", opl@typeC, sep = "")
opl@xZeroVarVi <- xZeroVarVi
## opl@suppLs[["yLevelVc"]] <- yLevelVc
## Permutation testing (Szymanska et al, 2012)
if(permI > 0) {
modSumVc <- colnames(opl@summaryDF)
permMN <- matrix(0,
nrow = 1 + permI,
ncol = length(modSumVc),
dimnames = list(NULL, modSumVc))
perSimVn <- numeric(1 + permI)
perSimVn[1] <- 1
permMN[1, ] <- as.matrix(opl@summaryDF)
for(k in 1:permI) {
yVcn <- drop(opl@suppLs[["yMCN"]])
if(!subsetL) {
yPerVcn <- sample(yVcn)
} else {
yPerVcn <- numeric(nrow(xMN))
refVi <- opl@subsetVi
tesVi <- setdiff(1:nrow(xMN), refVi)
yPerVcn[refVi] <- sample(yVcn[refVi])
yPerVcn[tesVi] <- yVcn[tesVi]
}
yPerMCN <- matrix(yPerVcn, ncol = 1)
perOpl <- .coreF(xMN = xMN,
yMCN = yPerMCN,
orthoI = opl@summaryDF[, "ort"],
predI = opl@summaryDF[, "pre"],
scaleC = scaleC,
algoC = algoC,
crossvalI = crossvalI,
subsetL = subsetL,
subsetVi = opl@subsetVi,
.char2numF = .char2numF)
permMN[1 + k, ] <- as.matrix(perOpl@summaryDF)
perSimVn[1 + k] <- .similarityF(opl@suppLs[["yMCN"]], yPerMCN,
.char2numF = .char2numF,
charL = mode(opl@suppLs[["yMCN"]]) == "character")
}
permMN <- cbind(permMN, sim = perSimVn)
perPvaVn <- c(pR2Y = (1 + length(which(permMN[-1, "R2Y(cum)"] >= permMN[1, "R2Y(cum)"]))) / (nrow(permMN) - 1),
pQ2 = (1 + length(which(permMN[-1, "Q2(cum)"] >= permMN[1, "Q2(cum)"]))) / (nrow(permMN) - 1))
opl@summaryDF[, "pR2Y"] <- perPvaVn["pR2Y"]
opl@summaryDF[, "pQ2"] <- perPvaVn["pQ2"]
opl@suppLs[["permMN"]] <- permMN
}
##------------------------------------
## Numerical results
##------------------------------------
opl@descriptionMC <- rbind(samples = ifelse(!subsetL,
nrow(xMN),
length(subsetVi)),
X_variables = ncol(xMN),
near_zero_excluded_X_variables = length(opl@xZeroVarVi))
totN <- length(c(xMN))
nasN <- sum(is.na(c(xMN)))
if(!is.null(opl@suppLs[["yMCN"]])) {
opl@descriptionMC <- rbind(opl@descriptionMC,
Y_variables = ncol(opl@suppLs[["yMCN"]]))
totN <- totN + length(c(opl@suppLs[["yMCN"]]))
nasN <- nasN + sum(is.na(c(opl@suppLs[["yMCN"]])))
}
opl@descriptionMC <- rbind(opl@descriptionMC,
missing_values = paste0(nasN, " (", round(nasN / totN * 100), "%)"))
## Raw summary
##------------
opl@suppLs[["topLoadI"]] <- 3
if(ncol(xMN) > opl@suppLs[["topLoadI"]]) {
xVarVn <- apply(xMN, 2, var)
names(xVarVn) <- 1:length(xVarVn)
xVarVn <- sort(xVarVn)
xVarSorVin <- as.numeric(names(xVarVn[seq(1, length(xVarVn), length = opl@suppLs[["topLoadI"]])]))
opl@suppLs[["xSubIncVarMN"]] <- xMN[, xVarSorVin, drop = FALSE]
} else
opl@suppLs[["xSubIncVarMN"]] <- xMN
if(ncol(xMN) <= 100) {
xCorMN <- cor(xMN, use = "pairwise.complete.obs")
xCorMN[lower.tri(xCorMN, diag = TRUE)] <- 0
if(ncol(xMN) > opl@suppLs[["topLoadI"]]) {
xCorNexDF <- which(abs(xCorMN) >= sort(abs(xCorMN), decreasing = TRUE)[opl@suppLs[["topLoadI"]] + 1],
arr.ind = TRUE)
xCorDisMN <- matrix(0,
nrow = nrow(xCorNexDF),
ncol = nrow(xCorNexDF),
dimnames = list(colnames(xMN)[xCorNexDF[, "row"]],
colnames(xMN)[xCorNexDF[, "col"]]))
for(k in 1:nrow(xCorDisMN))
xCorDisMN[k, k] <- xCorMN[xCorNexDF[k, "row"], xCorNexDF[k, "col"]]
} else
xCorDisMN <- xCorMN
opl@suppLs[["xCorMN"]] <- xCorDisMN
rm(xCorDisMN)
}
## Printing
##---------
if(printL) {
show(opl)
warnings()
}
## Plotting
##---------
if(plotL)
plot(opl, typeVc = "summary")
## Closing connection
##-------------------
if(!is.null(.sinkC)) ## Used in the Galaxy module
sink()
## Returning
##----------
return(invisible(opl))
})
#' Show method for 'opls' objects
#'
#' Displays information about the dataset and the model.
#'
#' @aliases show.opls show,opls-method
#' @param object An S4 object of class \code{opls}, created by the \code{opls}
#' function.
#' @return Invisible.
#' @author Philippe Rinaudo and Etienne Thevenot (CEA)
#' @examples
#'
#' data(sacurine)
#' attach(sacurine)
#' sacurine.plsda <- opls(dataMatrix, sampleMetadata[, "gender"])
#'
#' show(sacurine.plsda)
#'
#' detach(sacurine)
#'
#' @rdname show
#' @export
setMethod("show", "opls",
function(object) {
cat(object@typeC, "\n", sep = "")
cat(object@descriptionMC["samples", ],
" samples x ",
object@descriptionMC["X_variables", ],
" variables",
ifelse(grepl("PLS", object@typeC),
paste0(" and ", ncol(object@suppLs[["yMCN"]]), " response", ifelse(ncol(object@suppLs[["yMCN"]]) > 1, "s", "")),
""), "\n", sep = "")
cat(object@suppLs[["scaleC"]], " scaling of predictors",
ifelse(object@typeC == "PCA",
"",
paste0(" and ",
ifelse(mode(object@suppLs[["yMCN"]]) == "character" && object@suppLs[["scaleC"]] != "standard",
"standard scaling of ",
""),
"response(s)")), "\n", sep = "")
if(substr(object@descriptionMC["missing_values", ], 1, 1) != "0")
cat(object@descriptionMC["missing_values", ], " NAs\n", sep = "")
if(substr(object@descriptionMC["near_zero_excluded_X_variables", ], 1, 1) != "0")
cat(object@descriptionMC["near_zero_excluded_X_variables", ],
" excluded variables (near zero variance)\n", sep = "")
optDigN <- options()[["digits"]]
options(digits = 3)
print(object@summaryDF)
options(digits = optDigN)
}) ## show
#' Print method for 'opls' objects
#'
#' Displays information about the dataset and the model.
#'
#' @aliases print.opls print,opls-method
#' @param x An S4 object of class \code{opls}, created by the \code{opls}
#' function.
#' @param ... Currently not used.
#' @return Invisible.
#'
#' @author Etienne Thevenot, \email{etienne.thevenot@@cea.fr}
#' @examples
#'
#' data(sacurine)
#' attach(sacurine)
#' sacurine.plsda <- opls(dataMatrix, sampleMetadata[, "gender"])
#'
#' print(sacurine.plsda)
#'
#' detach(sacurine)
#'
#' @rdname print
#' @export
setMethod("print", "opls",
function(x, ...) {
cat("\n1) Data set:\n", sep = "")
cat(x@descriptionMC["samples", ],
" samples x ",
x@descriptionMC["X_variables", ],
" variables",
ifelse(grepl("PLS", x@typeC),
paste0(" and ", ncol(x@suppLs[["yMCN"]]), " response", ifelse(ncol(x@suppLs[["yMCN"]]) > 1, "s", "")),
""),
"\n", sep = "")
cat(x@descriptionMC["missing_values", ], " NAs\n", sep = "")
cat(x@descriptionMC["near_zero_excluded_X_variables", ], " excluded variables (near zero variance)\n", sep = "")
cat(x@suppLs[["scaleC"]], " x", ifelse(x@typeC != "PCA", " and y", ""), " scaling\n", sep = "")
cat("Summary of the ", x@suppLs[["topLoadI"]], " increasing variance spaced raw variables:\n", sep = "")
print(summary(x@suppLs[["xSubIncVarMN"]]))
if(!is.null(x@suppLs[["xCorMN"]])) {
cat("Correlations between the X-variables:\n")
print(signif(x@suppLs[["xCorMN"]], 2))
cat("\n", sep = "")
}
cat("\n2) Model: ", x@typeC, "\n", sep = "")
cat("Correlations between variables and first 2 components:\n", sep = "")
if(x@summaryDF[, "pre"] + x@summaryDF[, "ort"] < 2) {
warning("A single component model has been selected by cross-validation", call. = FALSE)
tCompMN <- x@scoreMN
pCompMN <- x@loadingMN
} else {
if(x@summaryDF[, "ort"] > 0) {
tCompMN <- cbind(x@scoreMN[, 1], x@orthoScoreMN[, 1])
pCompMN <- cbind(x@loadingMN[, 1], x@orthoLoadingMN[, 1])
colnames(pCompMN) <- colnames(tCompMN) <- c("h1", "o1")
} else {
tCompMN <- x@scoreMN[, 1:2]
pCompMN <- x@loadingMN[, 1:2]
}
}
cxtCompMN <- cor(x@suppLs[["xModelMN"]], tCompMN,
use = "pairwise.complete.obs")
if(x@suppLs[["topLoadI"]] * 4 < ncol(x@suppLs[["xModelMN"]])) {
pexVi <- integer(x@suppLs[["topLoadI"]] * ncol(pCompMN) * 2) ## 'ex'treme values
for(k in 1:ncol(pCompMN)) {
pkVn <- pCompMN[, k]
pexVi[1:(2 * x@suppLs[["topLoadI"]]) + 2 * x@suppLs[["topLoadI"]] * (k - 1)] <- c(order(pkVn)[1:x@suppLs[["topLoadI"]]],
rev(order(pkVn, decreasing = TRUE)[1:x@suppLs[["topLoadI"]]]))
}
} else
pexVi <- 1:ncol(x@suppLs[["xModelMN"]])
pxtCompMN <- cbind(pCompMN,
cxtCompMN)
if(ncol(pCompMN) == 1) {
colnames(pxtCompMN)[2] <- paste0("cor_", colnames(pxtCompMN)[2])
} else
colnames(pxtCompMN)[3:4] <- paste0("cor_", colnames(pxtCompMN)[3:4])
topLoadMN <- pxtCompMN
topLoadMN <- topLoadMN[pexVi, , drop = FALSE]
if(x@suppLs[["topLoadI"]] * 4 < ncol(x@suppLs[["xModelMN"]]) &&
ncol(pCompMN) > 1) {
topLoadMN[(2 * x@suppLs[["topLoadI"]] + 1):(4 * x@suppLs[["topLoadI"]]), c(1, 3)] <- NA
topLoadMN[1:(2 * x@suppLs[["topLoadI"]]), c(2, 4)] <- NA
}
print(signif(topLoadMN, 2))
message("")
optDigN <- options()[["digits"]]
options(digits = 3)
print(x@modelDF)
options(digits = optDigN)
}) ## print
#' Plot Method for (O)PLS(-DA)
#'
#' This function plots values based upon a model trained by \code{opls}.
#'
#' @aliases plot.opls plot,opls-method
#' @param x An S4 object of class \code{opls}, created by the \code{opls}
#' function.
#' @param y Currently not used.
#' @param typeVc Character vector: the following plots are available:
#' 'correlation': Variable correlations with the components, 'outlier':
#' Observation diagnostics (score and orthogonal distances), 'overview': Model
#' overview showing R2Ycum and Q2cum (or 'Variance explained' for PCA),
#' 'permutation': Scatterplot of R2Y and Q2Y actual and simulated models after
#' random permutation of response values; 'predict-train' and 'predict-test':
#' Predicted vs Actual Y for reference and test sets (only if Y has a single
#' column), 'summary' [default]: 4-plot summary showing permutation, overview,
#' outlier, and x-score together, 'x-variance': Spread of raw variables
#' corresp. with min, median, and max variances, 'x-loading': X-loadings (the 6
#' of variables most contributing to loadings are colored in red to facilitate
#' interpretation), 'x-score': X-Scores, 'xy-score': XY-Scores, 'xy-weight':
#' XY-Weights
#' @param parAsColFcVn Optional factor character or numeric vector to be
#' converted into colors for the score plot; default is NA [ie colors will be
#' converted from 'y' in case of (O)PLS(-DA) or will be 'black' for PCA]
#' @param parCexN Numeric: amount by which plotting text should be magnified
#' relative to the default
#' @param parCompVi Integer vector of length 2: indices of the two components
#' to be displayed on the score plot (first two components by default)
#' @param parDevNewL Should the graphics be displayed in a new window
#' [default]; If FALSE, parLayL must be set to FALSE also
#' @param parEllipsesL Should the Mahalanobis ellipses be drawn? If 'NA'
#' [default], ellipses are drawn when either a character parAsColVcn is
#' provided (PCA case), or when 'y' is a character factor ((O)PLS-DA cases).
#' @param parLabVc Optional character vector for the labels of observations on
#' the plot; default is NA [ie row names of 'x', if available, or indices of
#' 'x', otherwise, will be used]
#' @param parTitleL Should the titles of the plots be printed on the graphics
#' (default = TRUE); It may be convenient to set this argument to FALSE when
#' the user wishes to add specific titles a posteriori
#' @param file.pdfC Figure filename (e.g. in case of batch mode) ending with
#' '.pdf'; for multiple graphics, set parLayL to TRUE; default is NULL (no
#' saving; displaying instead)
#' @param .sinkC Character: Name of the file for R output diversion [default =
#' NULL: no diversion]; Diversion of messages is required for the integration
#' into Galaxy
#' @param ... Currently not used.
#' @author Etienne Thevenot, \email{etienne.thevenot@@cea.fr}
#' @examples
#'
#' data(sacurine)
#' attach(sacurine)
#'
#' for(typeC in c("correlation", "outlier", "overview",
#' "permutation", "predict-train","predict-test",
#' "summary", "x-loading", "x-score", "x-variance",
#' "xy-score", "xy-weight")) {
#'
#' print(typeC)
#'
#' if(grepl("predict", typeC))
#' subset <- "odd"
#' else
#' subset <- NULL
#'
#' opLs <- opls(dataMatrix, sampleMetadata[, "gender"],
#' predI = ifelse(typeC != "xy-weight", 1, 2),
#' orthoI = ifelse(typeC != "xy-weight", 1, 0),
#' permI = ifelse(typeC == "permutation", 10, 0),
#' subset = subset,
#' printL = FALSE, plotL = FALSE)
#'
#' plot(opLs, typeVc = typeC)
#'
#' }
#'
#' detach(sacurine)
#'
#' @rdname plot
#' @export
setMethod("plot", signature(x = "opls"),
function(x,
y,
typeVc = c("correlation",
"outlier",
"overview",
"permutation",
"predict-train",
"predict-test",
"summary",
"x-loading",
"x-score",
"x-variance",
"xy-score",
"xy-weight")[7],
parAsColFcVn = NA,
parCexN = 0.8,
parCompVi = c(1, 2),
parDevNewL = TRUE,
parEllipsesL = NA,
parLabVc = NA,
parTitleL = TRUE,
file.pdfC = NULL,
.sinkC = NULL,
...) {
if(!is.null(.sinkC)) ## Diversion of messages is required for the integration into Galaxy
sink(.sinkC, append = TRUE)
if("summary" %in% typeVc) {
if(!is.null(x@suppLs[["permMN"]]))
typeVc <- c("permutation",
"overview",
"outlier",
"x-score")
else
typeVc <- c("overview",
"outlier",
"x-score",
"x-loading")
}
## Checking arguments
##-------------------
if(!all(typeVc %in% c('correlation', 'outlier', 'overview', 'permutation', 'predict-train', 'predict-test', 'x-loading', 'x-score', 'x-variance', 'xy-score', 'xy-weight')))
stop("'typeVc' elements must be either 'correlation', 'outlier', 'overview', 'permutation', 'predict-train', 'predict-test', 'x-loading', 'x-score', 'x-variance', 'xy-score', 'xy-weight'", call. = FALSE)
if('predict-test' %in% typeVc && length(x@subsetVi) == 0)
stop("For the 'predict-test' graphic to be generated, 'subset' must not be kept to NULL", call. = FALSE)
if(!any(is.na(parLabVc))) {
if(length(x@subsetVi) > 0 && length(parLabVc) != nrow(x@suppLs[["yMCN"]])) {
stop("When 'subset' is not NULL, 'parLabVc' vector length must be equal to the number of train + test samples (here: ", nrow(x@suppLs[["yMCN"]]), ").", call. = FALSE)
} else if(length(parLabVc) != nrow(x@scoreMN))
stop("'parLabVc' vector length must be equal to the number of 'x' rows")
if(mode(parLabVc) != "character")
stop("'parLabVc' must be of 'character' type")
}
if(!any(is.na(parAsColFcVn))) {
if(length(x@subsetVi) > 0 && length(parAsColFcVn) != nrow(x@suppLs[["yMCN"]])) {
stop("When 'subset' is not NULL, 'parAsColFcVn' vector length must be equal to the number of train + test samples (here: ", nrow(x@suppLs[["yMCN"]]), ").", call. = FALSE)
} else if(length(parAsColFcVn) != nrow(x@scoreMN))
stop("'parAsColFcVn' vector length must be equal to the number of 'x' rows")
if(!(mode(parAsColFcVn) %in% c("character", "numeric")))
stop("'parAsColFcVn' must be of 'character' or 'numeric' type")
if(is.character(parAsColFcVn)) {
parAsColFcVn <- factor(parAsColFcVn)
warning("Character 'parAsColFcVn' set to a factor", call. = FALSE)
}
}
if(is.null(x@suppLs[["permMN"]]) && 'permutation' %in% typeVc)
stop("'permI' must be > 0 for 'permutation' graphic to be plotted", call. = FALSE)
if(x@summaryDF[, "ort"] > 0)
if(parCompVi[1] != 1) {
parCompVi[1] <- 1
warning("OPLS: first component to display ('parCompVi' first value) set to 1", call. = FALSE)
}
if("xy-weight" %in% typeVc &&
substr(x@typeC, 1, 3) != "PLS")
## (is.null(yMCN) || is.na(x@summaryDF[, "ort"]) || x@summaryDF[, "ort"] > 0))
stop("'xy-weight graphic can be displayed only for PLS(-DA) models", call. = FALSE)
if(any(grepl('predict', typeVc)))
if(is.null(x@suppLs[["yMCN"]]) ||
ncol(x@suppLs[["yMCN"]]) > 1 ||
(mode(x@suppLs[["yMCN"]]) == "character" && length(unique(drop(x@suppLs[["yMCN"]]))) > 2))
## if(any(grepl('predict', typeVc)) && is.matrix(x@fitted"]]) && ncol(x@fitted"]]) > 1)
## if(any(grepl('predict', typeVc)) && (is.null(yMCN) || ncol(yMCN) != 1))
stop("'predict' graphics available for single response regression or binary classification only", call. = FALSE)
if(is.na(parEllipsesL)) {
if((all(is.na(parAsColFcVn)) && grepl("-DA$", x@typeC)) ||
(!all(is.na(parAsColFcVn)) && is.factor(parAsColFcVn))) {
parEllipsesL <- TRUE
} else
parEllipsesL <- FALSE
## if((x@typeC == "PCA" && !all(is.na(parAsColFcVn)) && is.factor(parAsColFcVn)) || ## PCA case
## grepl("-DA$", x@typeC)) { ## (O)PLS-DA cases
## parEllipsesL <- TRUE
## } else
## parEllipsesL <- FALSE
} else if(parEllipsesL && !grepl("-DA$", x@typeC) && (all(is.na(parAsColFcVn)) || !is.factor(parAsColFcVn)))
stop("Ellipses can be plotted for PCA (or PLS regression) only if the 'parAsColFcVn' is a factor",
call. = FALSE)
if(x@summaryDF[, "pre"] + x@summaryDF[, "ort"] < 2) {
if(!all(typeVc %in% c("permutation", "overview"))) {
warning("Single component model: only 'overview' and 'permutation' (in case of single response (O)PLS(-DA)) plots available", call. = FALSE)
typeVc <- "overview"
if(!is.null(x@suppLs[["permMN"]]))
typeVc <- c("permutation", typeVc)
}
tCompMN <- x@scoreMN
pCompMN <- x@loadingMN
} else {
if(x@summaryDF[, "ort"] > 0) {
if(parCompVi[2] > x@summaryDF[, "ort"] + 1)
stop("Selected orthogonal component for plotting (ordinate) exceeds the total number of orthogonal components of the model", call. = FALSE)
tCompMN <- cbind(x@scoreMN[, 1], x@orthoScoreMN[, parCompVi[2] - 1])
pCompMN <- cbind(x@loadingMN[, 1], x@orthoLoadingMN[, parCompVi[2] - 1])
colnames(pCompMN) <- colnames(tCompMN) <- c("h1", paste("o", parCompVi[2] - 1, sep = ""))
} else {
if(max(parCompVi) > x@summaryDF[, "pre"])
stop("Selected component for plotting as ordinate exceeds the total number of predictive components of the model", call. = FALSE)
tCompMN <- x@scoreMN[, parCompVi, drop = FALSE]
pCompMN <- x@loadingMN[, parCompVi, drop = FALSE]
}
}
## if(ncol(tCompMN) > 1) {
## mahInvCovMN <- solve(cov(tCompMN))
## pcaResMN <- cbind(sdsVn = apply(tCompMN,
## 1,
## function(x) sqrt(t(as.matrix(x)) %*% mahInvCovMN %*% as.matrix(x))),
## odsVn = apply(x@suppLs[["xModelMN"]] - tcrossprod(tCompMN, pCompMN),
## 1,
## function(x) sqrt(drop(crossprod(x[complete.cases(x)])))))
## } else
## pcaResMN <- NULL
cxtCompMN <- cor(x@suppLs[["xModelMN"]], tCompMN,
use = "pairwise.complete.obs")
if(!is.null(x@suppLs[["yModelMN"]]))
cytCompMN <- cor(x@suppLs[["yModelMN"]], tCompMN, use = "pairwise.complete.obs")
if(x@suppLs[["topLoadI"]] * 4 < ncol(x@suppLs[["xModelMN"]])) {
pexVi <- integer(x@suppLs[["topLoadI"]] * ncol(pCompMN) * 2) ## 'ex'treme values
for(k in 1:ncol(pCompMN)) {
pkVn <- pCompMN[, k]
pexVi[1:(2 * x@suppLs[["topLoadI"]]) + 2 * x@suppLs[["topLoadI"]] * (k - 1)] <- c(order(pkVn)[1:x@suppLs[["topLoadI"]]],
rev(order(pkVn, decreasing = TRUE)[1:x@suppLs[["topLoadI"]]]))
}
} else
pexVi <- 1:ncol(x@suppLs[["xModelMN"]])
pxtCompMN <- cbind(pCompMN,
cxtCompMN)
if(ncol(pCompMN) == 1) {
colnames(pxtCompMN)[2] <- paste0("cor_", colnames(pxtCompMN)[2])
} else
colnames(pxtCompMN)[3:4] <- paste0("cor_", colnames(pxtCompMN)[3:4])
topLoadMN <- pxtCompMN
topLoadMN <- topLoadMN[pexVi, , drop = FALSE]
if(x@suppLs[["topLoadI"]] * 4 < ncol(x@suppLs[["xModelMN"]]) &&
ncol(pCompMN) > 1) {
topLoadMN[(2 * x@suppLs[["topLoadI"]] + 1):(4 * x@suppLs[["topLoadI"]]), c(1, 3)] <- NA
topLoadMN[1:(2 * x@suppLs[["topLoadI"]]), c(2, 4)] <- NA
}
## Observation and variable names and colors
##------------------------------------------
## obsLabVc
if(!any(is.na(parLabVc))) {
obsLabVc <- parLabVc
} else if(!is.null(x@suppLs[["yMCN"]]) && ncol(x@suppLs[["yMCN"]]) == 1) { ## (O)PLS of single response
obsLabVc <- rownames(x@suppLs[["yMCN"]])
} else { ## PCA
if(!is.null(rownames(tCompMN))) {
obsLabVc <- rownames(tCompMN)
} else
obsLabVc <- as.character(1:nrow(tCompMN))
}
if(length(x@subsetVi) > 0) {
## (O)PLS(-DA) models of a single 'y' response
tesLabVc <- obsLabVc[-x@subsetVi]
obsLabVc <- obsLabVc[x@subsetVi]
} else
tesLabVc <- ""
## obsColVc
if(!any(is.na(parAsColFcVn))) {
obsColVc <- .plotColorF(as.vector(parAsColFcVn))[["colVc"]]
obsLegVc <- as.vector(parAsColFcVn)
} else if(!is.null(x@suppLs[["yMCN"]]) && ncol(x@suppLs[["yMCN"]]) == 1) { ## (O)PLS of single response
obsColVc <- .plotColorF(c(x@suppLs[["yMCN"]]))[["colVc"]]
obsLegVc <- c(x@suppLs[["yMCN"]])
} else { ## PCA
obsColVc <- rep("black", nrow(tCompMN))
obsLegVc <- NULL
}
if(length(x@subsetVi) > 0) {
## (O)PLS(-DA) models of a single 'y' response
tesColVc <- obsColVc[-x@subsetVi]
obsColVc <- obsColVc[x@subsetVi]
if(!is.null(obsLegVc)) {
tesLegVc <- obsLegVc[-x@subsetVi]
obsLegVc <- obsLegVc[x@subsetVi]
}
}
## Layout
##-------
if(!parDevNewL && length(typeVc) != 1)
stop("'typeVc' must be of length 1 when 'parDevNewL' is set to FALSE", call. = FALSE)
if(parDevNewL) {
layRowN <- ceiling(sqrt(length(typeVc)))
if(is.null(file.pdfC))
dev.new()
else
pdf(file.pdfC)
layout(matrix(1:layRowN^2, byrow = TRUE, nrow = layRowN))
}
layL <- !parDevNewL || length(typeVc) > 1
## Par
##----
if(layL) {
marVn <- c(4.6, 4.1, 2.6, 1.6)
} else
marVn <- c(5.1, 4.1, 4.1, 2.1)
par(font=2, font.axis=2, font.lab=2, lwd=2,
mar=marVn,
pch=18)
## Graph
##------
for(ploC in typeVc)
.plotF(ploC,
opl = x,
obsColVc = obsColVc,
obsLabVc = obsLabVc,
obsLegVc = obsLegVc,
layL = layL,
parCexN = parCexN,
parEllipsesL = parEllipsesL,
parTitleL = parTitleL,
parCompVi = parCompVi,
typeVc = typeVc,
tCompMN = tCompMN,
pCompMN = pCompMN,
cxtCompMN = cxtCompMN,
cytCompMN = cytCompMN,
## pcaResMN = pcaResMN,
topLoadMN = topLoadMN,
pexVi = pexVi,
tesColVc = tesColVc,
tesLabVc = tesLabVc,
tesLegVc = tesLegVc)
if(layL)
par(font=1, font.axis=1, font.lab=1, lwd=1,
mar=c(5.1, 4.1, 4.1, 2.1),
pch=1)
if(!is.null(file.pdfC))
dev.off()
## Closing connection
##-------------------
if(!is.null(.sinkC)) ## Used in the Galaxy module
sink()
}) ## plot
#' Fitted method for 'opls' objects
#'
#' Returns predictions of the (O)PLS(-DA) model on the training dataset
#'
#' @aliases fitted.opls fitted,opls-method
#' @param object An S4 object of class \code{opls}, created by the \code{opls}
#' function.
#' @param ... Currently not used.
#' @return Predictions (either a vector, factor, or matrix depending on the y
#' response used for training the model)
#' @author Etienne Thevenot, \email{etienne.thevenot@@cea.fr}
#' @examples
#'
#' data(sacurine)
#' attach(sacurine)
#' sacurine.plsda <- opls(dataMatrix, sampleMetadata[, "gender"])
#'
#' fitted(sacurine.plsda)
#'
#' detach(sacurine)
#'
#' @rdname fitted
#' @export
setMethod("fitted", "opls",
function(object, ...) {
if(!is.null(object@suppLs[["yPreMN"]])) {
if(mode(object@suppLs[["yMCN"]]) == "character") {
yPredMCN <- object@suppLs[[".char2numF"]](object@suppLs[["yPreMN"]],
c2nL = FALSE)
if(is.vector(object@suppLs[["y"]])) {
fit <- c(yPredMCN)
names(fit) <- rownames(yPredMCN)
} else if(is.factor(object@suppLs[["y"]])) {
fit <- c(yPredMCN)
names(fit) <- rownames(yPredMCN)
fit <- factor(fit, levels = levels(object@suppLs[["y"]]))
} else if(is.matrix(object@suppLs[["y"]])) {
fit <- yPredMCN
} else
stop() ## this case should not happen
} else {
yPredMCN <- object@suppLs[["yPreMN"]]
if(is.vector(object@suppLs[["y"]])) {
fit <- c(yPredMCN)
names(fit) <- rownames(yPredMCN)
} else if(is.matrix(object@suppLs[["y"]])) {
fit <- yPredMCN
} else
stop() ## this case should not happen
}
return(fit)
} else
return(NULL)
}) ## fitted
#' @rdname tested
#' @export
setMethod("tested", "opls",
function(object) {
if(!is.null(object@suppLs[["yTesMN"]])) {
if(mode(object@suppLs[["yMCN"]]) == "character") {
yTestMCN <- object@suppLs[[".char2numF"]](object@suppLs[["yTesMN"]],
c2nL = FALSE)
if(is.vector(object@suppLs[["y"]])) {
test <- c(yTestMCN)
names(test) <- rownames(yTestMCN)
} else if(is.factor(object@suppLs[["y"]])) {
test <- c(yTestMCN)
names(test) <- rownames(yTestMCN)
test <- factor(test, levels = levels(object@suppLs[["y"]]))
} else if(is.matrix(object@suppLs[["y"]])) {
test <- yTestMCN
} else
stop() ## this case should not happen
} else {
yTestMCN <- object@suppLs[["yTesMN"]]
if(is.vector(object@suppLs[["y"]])) {
test <- c(yTestMCN)
names(test) <- rownames(yTestMCN)
} else if(is.matrix(object@suppLs[["y"]])) {
test <- yTestMCN
} else
stop() ## this case should not happen
}
return(test)
} else
stop("Test results only available for (O)PLS(-DA) models", call. = FALSE)
})
#' Coefficients method for (O)PLS models
#'
#' Coefficients of the (O)PLS(-DA) regression model
#'
#' @aliases coef.opls coef,opls-method
#' @param object An S4 object of class \code{opls}, created by \code{opls}
#' function.
#' @param ... Currently not used.
#' @return Numeric matrix of coefficients (number of rows equals the number of
#' variables, and the number of columns equals the number of responses)
#' @author Etienne Thevenot, \email{etienne.thevenot@@cea.fr}
#' @examples
#'
#' data(sacurine)
#' attach(sacurine)
#'
#' sacurine.plsda <- opls(dataMatrix,
#' sampleMetadata[, "gender"])
#'
#' head(coef(sacurine.plsda))
#'
#' detach(sacurine)
#'
#' @rdname coef
#' @export
setMethod("coef", "opls",
function(object, ...) {
return(object@coefficientMN)
}) ## coef
#' Residuals method for (O)PLS models
#'
#' Returns the residuals from the (O)PLS(-DA) regression models
#'
#' @aliases residuals.opls residuals,opls-method
#' @param object An S4 object of class \code{opls}, created by \code{opls}
#' function.
#' @param ... Currently not used.
#' @return Numeric matrix or vector (same dimensions as the modeled y
#' response); if y is a character vector or a factor (in case of
#' classification), the residuals equal 0 (predicted class identical to the
#' true class) or 1 (prediction error)
#' @author Etienne Thevenot, \email{etienne.thevenot@@cea.fr}
#' @examples
#'
#' data(sacurine)
#' attach(sacurine)
#'
#' sacurine.pls <- opls(dataMatrix,
#' sampleMetadata[, "age"])
#'
#' head(residuals(sacurine.pls))
#'
#' detach(sacurine)
#'
#' @rdname residuals
#' @export
setMethod("residuals", "opls",
function(object, ...) {
if(grepl("PLS", object@typeC)) {
fit <- fitted(object)
if(length(object@subsetVi) == 0) {
trainVi <- 1:length(fit)
} else
trainVi <- object@subsetVi
if(mode(object@suppLs[["yMCN"]]) == "numeric") {
y <- object@suppLs[["y"]]
if(is.matrix(y))
y <- y[trainVi, , drop = FALSE]
else
y <- y[trainVi]
return(y - fit)
} else
return(as.numeric(as.character(c(object@suppLs[["y"]])[trainVi]) != as.character(c(fit))))
} else
stop("'residuals' defined for (O)PLS(-DA) regression models only", call. = FALSE)
}) ## residuals
#' Predict method for (O)PLS models
#'
#' Returns predictions of the (O)PLS(-DA) model on a new dataset
#'
#' @aliases predict.opls predict,opls-method
#' @param object An S4 object of class \code{opls}, created by \code{opls}
#' function.
#' @param newdata Either a data frame or a matrix, containing numeric columns
#' only, with the same number of columns (variables) as the 'x' used for model
#' training with 'opls'.
#' @param ... Currently not used.
#' @return Predictions (either a vector, factor, or matrix depending on the y
#' response used for training the model)
#' @author Etienne Thevenot, \email{etienne.thevenot@@cea.fr}
#' @examples
#'
#' data(sacurine)
#' attach(sacurine)
#'
#' predictorMN <- dataMatrix
#' responseFc <- sampleMetadata[, "gender"]
#'
#' sacurine.plsda <- opls(predictorMN,
#' responseFc,
#' subset = "odd")
#'
#' trainVi <- getSubsetVi(sacurine.plsda)
#'
#' table(responseFc[trainVi], fitted(sacurine.plsda))
#'
#' table(responseFc[-trainVi],
#' predict(sacurine.plsda, predictorMN[-trainVi, ]))
#'
#' detach(sacurine)
#'
#' @rdname predict
#' @export
setMethod("predict", "opls",
function(object, newdata, ...) {
if(object@typeC == "PCA")
stop("Predictions currently available for (O)PLS(-DA) models only (not PCA)",
call. = FALSE)
if(missing(newdata)) {
return(fitted(object))
} else {
if(is.data.frame(newdata)) {
if(!all(sapply(newdata, data.class) == "numeric")) {
stop("'newdata' data frame must contain numeric columns only", call. = FALSE)
} else
newdata <- as.matrix(newdata)
} else if(is.matrix(newdata)) {
if(mode(newdata) != "numeric")
stop("'newdata' matrix must be of 'numeric' mode", call. = FALSE)
} else
stop("'newdata' must be either a data.frame or a matrix", call. = FALSE)
if(ncol(newdata) != as.numeric(object@descriptionMC["X_variables", ])) {
if(length(object@xZeroVarVi) == 0) {
stop("'newdata' number of variables is ",
ncol(newdata),
" whereas the number of variables used for model training was ",
as.numeric(object@descriptionMC["X_variables", ]),
".",
call. = FALSE)
} else if(ncol(newdata) - as.numeric(object@descriptionMC["X_variables", ]) ==
as.numeric(object@descriptionMC["near_zero_excluded_X_variables", ])) {
warning(as.numeric(object@descriptionMC["near_zero_excluded_X_variables", ]),
" near zero variance variables excluded during the model training will be removed from 'newdata'.",
call. = FALSE)
newdata <- newdata[, -object@xZeroVarVi, drop = FALSE]
} else {
stop("'newdata' number of variables (",
ncol(newdata),
") does not correspond to the number of initial variables (",
as.numeric(object@descriptionMC["X_variables", ]),
") minus the number of near zero variance variables excluded during the training (",
as.numeric(object@descriptionMC["near_zero_excluded_X_variables", ]),
").",
call. = FALSE)
}
}
xteMN <- scale(newdata, object@xMeanVn, object@xSdVn)
if(object@summaryDF[, "ort"] > 0) {
for(noN in 1:object@summaryDF[, "ort"]) {
if(object@suppLs[["naxL"]]) {
xtoMN <- matrix(0, nrow = nrow(xteMN), ncol = 1)
for(i in 1:nrow(xtoMN)) {
comVl <- complete.cases(xteMN[i, ])
xtoMN[i, ] <- crossprod(xteMN[i, comVl], object@orthoWeightMN[comVl, noN]) / drop(crossprod(object@orthoWeightMN[comVl, noN]))
}
} else
xtoMN <- xteMN %*% object@orthoWeightMN[, noN]
xteMN <- xteMN - tcrossprod(xtoMN, object@orthoLoadingMN[, noN])
}
}
if(object@suppLs[["naxL"]]) {
yTesScaMN <- matrix(0, nrow = nrow(xteMN), ncol = ncol(object@coefficientMN),
dimnames = list(rownames(xteMN), colnames(object@coefficientMN)))
for(j in 1:ncol(yTesScaMN))
for(i in 1:nrow(yTesScaMN)) {
comVl <- complete.cases(xteMN[i, ])
yTesScaMN[i, j] <- crossprod(xteMN[i, comVl], object@coefficientMN[comVl, j])
}
} else
yTesScaMN <- xteMN %*% object@coefficientMN
## if(object@suppLs[["nayL"]])
## yTesScaMN <- yTesScaMN[!is.na(yMCN[testVi, ]), , drop = FALSE]
yTesMN <- scale(scale(yTesScaMN,
FALSE,
1 / object@ySdVn),
-object@yMeanVn,
FALSE)
attr(yTesMN, "scaled:center") <- NULL
attr(yTesMN, "scaled:scale") <- NULL
if(is.factor(fitted(object))) {
yTestMCN <- object@suppLs[[".char2numF"]](yTesMN,
c2nL = FALSE)
predMCNFcVcn <- as.character(yTestMCN)
names(predMCNFcVcn) <- rownames(newdata)
predMCNFcVcn <- factor(predMCNFcVcn, levels = levels(object@suppLs[["y"]]))
} else if(is.vector(fitted(object))) {
if(is.character(fitted(object))) {
yTestMCN <- object@suppLs[[".char2numF"]](yTesMN,
c2nL = FALSE)
predMCNFcVcn <- as.character(yTestMCN)
names(predMCNFcVcn) <- rownames(newdata)
} else {
predMCNFcVcn <- as.numeric(yTesMN)
names(predMCNFcVcn) <- rownames(newdata)
}
} else if(is.matrix(fitted(object))) {
if(mode(fitted(object)) == "character") {
predMCNFcVcn <- object@suppLs[[".char2numF"]](yTesMN,
c2nL = FALSE)
} else
predMCNFcVcn <- yTesMN
rownames(predMCNFcVcn) <- rownames(newdata)
}
return(predMCNFcVcn)
}
}) ## predict
#' @rdname getSummaryDF
#' @export
setMethod("getSummaryDF", "opls",
function(object) {
return(object@summaryDF)
})
#' @rdname getPcaVarVn
#' @export
setMethod("getPcaVarVn", "opls",
function(object) {
return(object@pcaVarVn)
})
#' @rdname getScoreMN
#' @export
setMethod("getScoreMN", "opls",
function(object, orthoL = FALSE) {
if(orthoL)
return(object@orthoScoreMN)
else
return(object@scoreMN)
})
#' @rdname getLoadingMN
#' @export
setMethod("getLoadingMN", "opls",
function(object, orthoL = FALSE) {
if(orthoL)
return(object@orthoLoadingMN)
else
return(object@loadingMN)
})
#' @rdname getWeightMN
#' @export
setMethod("getWeightMN", "opls",
function(object, orthoL = FALSE) {
if(orthoL)
return(object@orthoWeightMN)
else
return(object@weightMN)
})
#' @rdname getVipVn
#' @export
setMethod("getVipVn", "opls",
function(object, orthoL = FALSE) {
if(orthoL)
return(object@orthoVipVn)
else
return(object@vipVn)
})
#' @rdname getSubsetVi
#' @export
setMethod("getSubsetVi", "opls",
function(object) {
return(object@subsetVi)
})
#' @rdname checkW4M
setMethod("checkW4M", "ExpressionSet",
function(eset, ...) {
datMN <- t(exprs(eset))
samDF <- pData(eset)
varDF <- fData(eset)
chkL <- .checkW4mFormatF(datMN, samDF, varDF)
if(!chkL) {
stop("Problem with the sample or variable names in the tables to be imported from (exported to) W4M", call. = FALSE)
} else
return(TRUE)
})
#' @rdname toW4M
setMethod("toW4M", "ExpressionSet",
function(eset, filePrefixC = paste0(getwd(), "/out_"), verboseL = TRUE, ...){
if(checkW4M(eset)) {
datMN <- exprs(eset)
datDF <- cbind.data.frame(dataMatrix = rownames(datMN),
as.data.frame(datMN))
filDatC <- paste0(filePrefixC, "dataMatrix.tsv")
filSamC <- paste0(filePrefixC, "sampleMetadata.tsv")
filVarC <- paste0(filePrefixC, "variableMetadata.tsv")
write.table(datDF,
file = filDatC,
quote = FALSE,
row.names = FALSE,
sep = "\t")
samDF <- pData(eset)
samDF <- cbind.data.frame(sampleMetadata = rownames(samDF),
samDF)
write.table(samDF,
file = filSamC,
quote = FALSE,
row.names = FALSE,
sep = "\t")
varDF <- fData(eset)
varDF <- cbind.data.frame(variableMetadata = rownames(varDF),
varDF)
write.table(varDF,
file = filVarC,
quote = FALSE,
row.names = FALSE,
sep = "\t")
if(verboseL) {
cat("The following 3 files:\n")
print(basename(filDatC))
print(basename(filSamC))
print(basename(filVarC))
cat("have been written in the following directory:\n")
print(dirname(filDatC))
}
}
})
|
ce44f8821a36415483cad1285fd344f8354bb77a
|
1803c09436d9627552df7a58079399715501272d
|
/MINTPlugin.R
|
cdd56cc8cb25d20d35ff37c6959a94ab316f472c
|
[
"MIT"
] |
permissive
|
movingpictures83/MINT
|
466e39a277fe622b8400841ebf307cb5e0cd832a
|
ac990e8a1eff820018a39c56f351e3cba9d31b11
|
refs/heads/master
| 2022-11-16T12:12:10.819876
| 2020-07-05T22:23:16
| 2020-07-05T22:23:16
| 274,972,878
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,384
|
r
|
MINTPlugin.R
|
## ----global_options, include=FALSE---------------------------------------
library(knitr)
knitr::opts_chunk$set(dpi = 100, echo= TRUE, warning=FALSE, message=FALSE, fig.align = 'center',
fig.show=TRUE, fig.keep = 'all', out.width = '50%')
## ----message = FALSE-----------------------------------------------------
library(mixOmics)
## ------------------------------------------------------------------------
#data(stemcells)
input <- function(inputfile) {
parameters <<- read.table(inputfile, as.is=T);
rownames(parameters) <<- parameters[,1];
dataset <<- parameters["data", 2]
outcome <<- parameters["outcome", 2]
studies <<- parameters["study", 2]
X <<- read.csv(dataset, header=TRUE)
rownames(X) <<- X[,1]
X <<- X[,-1]
Y <<- read.csv(outcome, header=TRUE)
rownames(Y) <<- Y[,1]
Y <<- Y[,-1]
study <<- read.csv(studies, header=TRUE)
rownames(study) <<- study[,1]
study <<- study[,-1]
study <<- as.factor(study)
print(study)
}
run <- function() {}
#the combined data set X
#X = stemcells$gene
#dim(X)
# the outcome vector Y:
#Y = stemcells$celltype
#length(Y)
#summary(Y)
# the vector indicating each independent study
#study = stemcells$study
# number of samples per study:
#summary(study)
# experimental design
#table(Y,study)
output <- function(outputfile) {
## ------------------------------------------------------------------------
mint.plsda.res.perf = mint.plsda(X = X, Y = Y, study = study, ncomp = 5)
set.seed(2543) # for reproducible result in this example
perf.mint.plsda.cell <- perf(mint.plsda.res.perf, validation = "Mfold", folds = 5,
progressBar = FALSE, auc = TRUE)
## ------------------------------------------------------------------------
plot(perf.mint.plsda.cell, col = color.mixo(5:7))
## ------------------------------------------------------------------------
write.csv(perf.mint.plsda.cell$global.error$BER, paste(outputfile, "error", "BER", "csv", sep="."))
write.csv(perf.mint.plsda.cell$global.error$overall, paste(outputfile, "error", "overall", "csv", sep="."))
write.csv(perf.mint.plsda.cell$global.error$error.rate.class$max.dist, paste(outputfile, "error", "max", "csv", sep="."))
write.csv(perf.mint.plsda.cell$global.error$error.rate.class$centroids.dist, paste(outputfile, "error", "centroids", "csv", sep="."))
write.csv(perf.mint.plsda.cell$global.error$error.rate.class$mahalanobis.dist, paste(outputfile, "error", "mahalanobis", "csv", sep="."))
## ------------------------------------------------------------------------
write.csv(perf.mint.plsda.cell$choice.ncomp, paste(outputfile, "optimalcomponents", "pretuned", "csv", sep="."))
## ------------------------------------------------------------------------
mint.plsda.res = mint.plsda(X = X, Y = Y, study = study, ncomp = 2)
#mint.plsda.res # lists the different functions
plotIndiv(mint.plsda.res, legend = TRUE, title = 'MINT PLS-DA',
subtitle = 'stem cell study', ellipse = T)
## ---- eval = TRUE, include = TRUE----------------------------------------
tune.mint = tune(X = X, Y = Y, study = study, ncomp = 2, test.keepX = seq(1, 100, 1),
method = 'mint.splsda', dist = "max.dist", progressBar = FALSE)
# tune.mint # lists the different types of outputs
# mean error rate per component and per tested keepX value
# tune.mint$error.rate
## ------------------------------------------------------------------------
# optimal number of components
#tune.mint$choice.ncomp #tune.mint$choice.ncomp # tell us again than ncomp=1 is sufficient
write.csv(tune.mint$choice.ncomp, paste(outputfile, "optimalcomponents", "posttuned", "csv", sep="."))
# optimal keepX
write.csv(tune.mint$choice.keepX, paste(outputfile, "optimalcomponents", "keep", "csv", sep="."))
plot(tune.mint, col = color.jet(2))
## ------------------------------------------------------------------------
mint.splsda.res = mint.splsda(X = X, Y = Y, study = study, ncomp = 2,
keepX = tune.mint$choice.keepX)
#mint.splsda.res # lists useful functions that can be used with a MINT object
## ------------------------------------------------------------------------
#selectVar(mint.splsda.res, comp = 1)
write.csv(selectVar(mint.splsda.res, comp = 1)$value, paste(outputfile, "importantvalues", "csv", sep="."))
## ------------------------------------------------------------------------
plotIndiv(mint.splsda.res, study = 'global', legend = TRUE, title = 'MINT sPLS-DA',
subtitle = 'Global', ellipse=T)
## ------------------------------------------------------------------------
plotIndiv(mint.splsda.res, study = 'all.partial', title = 'MINT sPLS-DA',
subtitle = paste("Study",1:4))
## ------------------------------------------------------------------------
plotArrow(mint.splsda.res)
## ------------------------------------------------------------------------
plotVar(mint.splsda.res, cex = 4)
## ------------------------------------------------------------------------
cim(mint.splsda.res, comp = 1, margins=c(10,5),
row.sideColors = color.mixo(as.numeric(Y)), row.names = FALSE,
title = "MINT sPLS-DA, component 1")
## ------------------------------------------------------------------------
network(mint.splsda.res, color.node = c(color.mixo(1), color.mixo(2)), comp = 1,
shape.node = c("rectangle", "circle"),
color.edge = color.jet(50),
lty.edge = "solid", lwd.edge = 2,
show.edge.labels = FALSE, interactive = FALSE,
#,save = 'jpeg', #uncomment the following if you experience margin issues with RStudio
#name.save = network
)
## ------------------------------------------------------------------------
plotLoadings(mint.splsda.res, contrib="max", method = 'mean', comp=1,
study="all.partial", legend=FALSE, title="Contribution on comp 1",
subtitle = paste("Study",1:4))
## ------------------------------------------------------------------------
set.seed(123) # for reproducibility of the results
perf.mint = perf(mint.splsda.res, progressBar = FALSE, dist = 'max.dist')
#perf.mint$global.error
write.csv(perf.mint$global.error$BER, paste(outputfile, "error", "finalmodel", "BER", "csv", sep="."))
write.csv(perf.mint$global.error$overall, paste(outputfile, "error", "finalmodel", "overall", "csv", sep="."))
write.csv(perf.mint$global.error$error.rate.class$max.dist, paste(outputfile, "error", "finalmodel", "max", "csv", sep="."))
## ------------------------------------------------------------------------
plot(perf.mint, col = color.mixo(5))
## ------------------------------------------------------------------------
# we predict on study 3
ind.test = which(study == "3")
test.predict <- predict(mint.splsda.res, newdata = X[ind.test, ], dist = "max.dist",
study.test = factor(study[ind.test]))
Prediction <- test.predict$class$max.dist[, 2]
# the confusion table compares the real subtypes with the predicted subtypes
write.csv(get.confusion_matrix(truth = Y[ind.test],
predicted = Prediction), paste(outputfile, "confusionmatrix", "csv", sep="."))
## ------------------------------------------------------------------------
auc.mint.splsda = auroc(mint.splsda.res, roc.comp = 2)
## ------------------------------------------------------------------------
auc.mint.splsda = auroc(mint.splsda.res, roc.comp = 2, roc.study = '2')
}
|
5abbb11999b872228356e54ff5e57026fe4507d1
|
17fdd34b68df267b8262d532adddba733879b0b8
|
/man/gr.genome.Rd
|
773af55338be21be185f6d05719ed16f57b5c16a
|
[] |
no_license
|
kevinmhadi/khtools
|
f0b57e0be0014084f2f194465ab4a924fe502268
|
85d64808f8decd71f30510ccd18f38986031be74
|
refs/heads/master
| 2023-07-19T21:50:22.341824
| 2023-07-19T01:46:03
| 2023-07-19T01:46:03
| 235,495,453
| 0
| 3
| null | null | null | null |
UTF-8
|
R
| false
| true
| 530
|
rd
|
gr.genome.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\name{gr.genome}
\alias{gr.genome}
\title{create GRanges of full genome coordinates}
\usage{
gr.genome(si, onlystandard = TRUE, genome = NULL)
}
\value{
GRanges
}
\description{
Grabs *.chrom.sizes file from
environmental variable "DEFAULT_GENOME" or
"DEFAULT_BSGENOME"
May need to set either of these via
Sys.setenv(DEFAULT_BSGENOME = "path_to_ref.chrom.sizes")
Sys.setenv(DEFAULT_GENOME = "path_to_ref.chrom.sizes")
}
\author{
Kevin Hadi
}
|
200fdb955b982eaca160bc389b703cc77de6cf5d
|
6af9ff655188ff73b08403baa47462cc754399f3
|
/man/makeFits_initial.Rd
|
4f5c3138ff20516e0e3c9626d73eb1072547aea3
|
[] |
no_license
|
cran/SCEM
|
94571f83e77b97efd67d6d0f791544a0187d4568
|
17bf30bb05c2687636c6bd5b9bba56e8ac4fa446
|
refs/heads/master
| 2023-07-27T20:53:48.324107
| 2021-09-02T06:20:12
| 2021-09-02T06:20:12
| 393,422,843
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,742
|
rd
|
makeFits_initial.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeFits_initial.R
\name{makeFits_initial}
\alias{makeFits_initial}
\title{Prepare results for cosine model fit with given initialization for two parameters.}
\usage{
makeFits_initial(paths, amplitude, intercept)
}
\arguments{
\item{paths}{A list of data frames, where each frame contains the data for one individual. Every
data frame should have two columns with names 'distance' and 'oxygen'.}
\item{amplitude}{Initial value for the amplitude parameter.}
\item{intercept}{Initial value for the intercept parameter.}
}
\value{
A data frame containing the following components:
\item{amplitude}{estimated amplitude}
\item{intercept}{estimated intercept}
\item{x0}{delay of the data}
\item{X}{period of the data}
\item{birth}{birth seasonality estimate}
\item{predictedMin}{predicted minimum for the oxygen isotope variable}
\item{predictedMax}{predicted maximum for the oxygen isotope variable}
\item{observedMin}{observed minimum for the oxygen isotope variable}
\item{observedMax}{observed minimum for the oxygen isotope variable}
\item{MSE}{mean squared error corresponding to the model fit for every individual}
\item{Pearson}{Pearson's R^2 corresponding to the model fit for every individual}
}
\description{
Performs the nonlinear least squares (NLS) regression method for the cosine
model, with the given initial values for amplitude and intercept. It fits the NLS method
as required, and then computes different quantities for the birth seasonality estimates
corresponding to different individuals.
}
\examples{
armenia_split = split(armenia,f = armenia$ID)
amp = seq(1,10,by=0.5)
int = seq(-25,0,by=0.5)
makeFits_initial(armenia_split,amp[1],int[1])
}
|
bc37b6769910e2a341945393c9276dcf4eb4c270
|
6874d2514172b9e809dccf1e4879e0edfaabb050
|
/man/safe_is_online.Rd
|
86bf3ac11bcf9ba1a9d2339555bbd65e7fda9ecf
|
[] |
no_license
|
cran/sen2r
|
27e59e874a36d30b02f319b1e77fcd66f54d3f2e
|
3720d77a025fc9f8d9e04825910e830f35ffa61b
|
refs/heads/master
| 2023-06-29T11:44:40.672296
| 2023-06-16T06:10:02
| 2023-06-16T06:10:02
| 216,648,730
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,143
|
rd
|
safe_is_online.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/safe_is_online.R
\name{safe_is_online}
\alias{safe_is_online}
\title{Check if SAFE is available for download}
\usage{
safe_is_online(s2_prodlist = NULL, apihub = NA, verbose = TRUE)
}
\arguments{
\item{s2_prodlist}{Named character: list of the products to be checked,
in the format \code{safelist} (see \linkS4class{safelist}).
Alternatively, it can be the path of a JSON file exported by \link{s2_order}.}
\item{apihub}{Path of the "apihub.txt" file containing credentials
of SciHub account.
If NA (default), the default location inside the package will be used.}
\item{verbose}{Logical: if TRUE, provide processing messages summarising
how many of the SAFE archives in \code{s2_prodlist} are available online.}
}
\value{
A logical vector of the same length and names of the SAFE products
passed with \code{s2_prodlist},
in which each element is TRUE if the corresponding SAFE archive is
available for download, FALSE if it is not or NA in case of errors with
the SAFE url.
}
\description{
The function checks if the required SAFE archives are
available for download, or if they have to be ordered from the Long Term
Archive.
}
\note{
License: GPL 3.0
}
\examples{
\donttest{
if (is_scihub_configured()) {
# Generate the lists of products
pos <- sf::st_sfc(sf::st_point(c(-57.8815,-51.6954)), crs = 4326)
time_window <- as.Date(c("2018-02-21", "2018-03-20"))
list_safe <- s2_list(spatial_extent = pos, time_interval = time_window)
# (at the time the documentation was written, this list was containing 5
# archives already available online and 2 stored in the Long Term Archive)
# Check for availability
safe_is_online(list_safe)
}
}
}
\references{
L. Ranghetti, M. Boschetti, F. Nutini, L. Busetto (2020).
"sen2r": An R toolbox for automatically downloading and preprocessing
Sentinel-2 satellite data. \emph{Computers & Geosciences}, 139, 104473.
\doi{10.1016/j.cageo.2020.104473}, URL: \url{https://sen2r.ranghetti.info/}.
}
\author{
Luigi Ranghetti, phD (2019)
Lorenzo Busetto, phD (2020)
}
|
233a3f7312b273093e6ec6732c8383516fc8d591
|
825ba952e08d095b4f23d819d2c1d0c9d7373656
|
/functions.R
|
0722dca5f40d95c0c27b5ed5dba8070e2126bd70
|
[] |
no_license
|
EdinZecevic/dataanalysis
|
32d680390af21837b3bc5c322542241aaf3274e9
|
075c68b621595acb33087abe439eea75343e427a
|
refs/heads/master
| 2020-08-23T02:17:29.296158
| 2020-01-25T00:36:28
| 2020-01-25T00:36:28
| 216,522,220
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 492
|
r
|
functions.R
|
f<- function(x){
y<-1
g(x)
}
g<-function(x){
(x+y)/2
}
f(5)
mean(1:5) # result is 3 becouse it is 3
mean #how is implemented function mean
mean<- function(x) 2*x +5
mean(1:5)
rm(mean)#removing mean that we implemented
summary(1:10)
summary
d<- function(x){
min <- min(x)
max <- max(x)
mean <- mean(x)
sd <- sd(x)
return(c(min,max,mean,sd))
}
d(1:10)
pr<-function(x,m,s) (1/(s*sqrt(2*pi)))*exp(-(x-m^2)/(2*s^2))
pr(3,2,1)
dnorm(x, mean = 0, sd = 1, log = FALSE)
dnorm(3,2,1)
|
34d319cf18cd3b7ea9c543fbf1cd05d0a4e5f0ca
|
c1c6db61047082a5c20767bf8c0d709184250482
|
/003invar.R
|
cde4dfd4f4f76ab163673dd167e178bc919987f5
|
[] |
no_license
|
heelgueta/mag-socialid
|
8d68a232ac843069883a963037ba5a970bedf67c
|
157349ab9e9067f66949bd5cdbfefa4bab127bd7
|
refs/heads/master
| 2022-12-16T19:38:04.646608
| 2020-09-03T15:22:47
| 2020-09-03T15:22:47
| 267,233,372
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,861
|
r
|
003invar.R
|
###multigroup cfas
###choose dataset?
df <- dfm5
df <- dfm7
df <- dfc5
df <- dfc7
#group cfa sex
fitconfig <- lavaan::cfa(mod2o, data=df,estimator="MLR", group="sex")
fitmetric <- lavaan::cfa(mod2o, data=df,estimator="MLR", group="sex", group.equal = c("loadings"))
fitscalar <- lavaan::cfa(mod2o, data=df,estimator="MLR", group="sex", group.equal = c("loadings","intercepts"))
fitresidu <- lavaan::cfa(mod2o, data=df,estimator="MLR", group="sex", group.equal = c("loadings","intercepts","residuals"))
#group cfa age
fitconfig <- lavaan::cfa(mod2o, data=df,estimator="MLR", group="ag2")
fitmetric <- lavaan::cfa(mod2o, data=df,estimator="MLR", group="ag2", group.equal = c("loadings"))
fitscalar <- lavaan::cfa(mod2o, data=df,estimator="MLR", group="ag2", group.equal = c("loadings","intercepts"))
fitresidu <- lavaan::cfa(mod2o, data=df,estimator="MLR", group="ag2", group.equal = c("loadings","intercepts","residuals"))
paste(round(lavaan::fitMeasures(fitconfig, c("chisq.scaled","df","cfi.scaled","tli.scaled","rmsea.scaled","rmsea.ci.lower.scaled","rmsea.ci.upper.scaled","srmr","aic","bic")),3))
paste(round(lavaan::fitMeasures(fitmetric, c("chisq.scaled","df","cfi.scaled","tli.scaled","rmsea.scaled","rmsea.ci.lower.scaled","rmsea.ci.upper.scaled","srmr","aic","bic")),3))
paste(round(lavaan::fitMeasures(fitscalar, c("chisq.scaled","df","cfi.scaled","tli.scaled","rmsea.scaled","rmsea.ci.lower.scaled","rmsea.ci.upper.scaled","srmr","aic","bic")),3))
paste(round(lavaan::fitMeasures(fitresidu, c("chisq.scaled","df","cfi.scaled","tli.scaled","rmsea.scaled","rmsea.ci.lower.scaled","rmsea.ci.upper.scaled","srmr","aic","bic")),3))
lavaan::anova(fitconfig,fitmetric,fitscalar,fitresidu)
lavaan::standardizedSolution(fitconfig)
lavaan::standardizedSolution(fitmetric)
lavaan::standardizedSolution(fitscalar)
lavaan::standardizedSolution(fitresidu)
|
97bc0a6724ea60ac8a82b62417bfdf62e6da0f3f
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/onemap/R/compare.R
|
98a89d9736d413beb0fc1b788ae60c1db3b94a02
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,823
|
r
|
compare.R
|
#######################################################################
# #
# Package: onemap #
# #
# File: compare.R #
# Contains: compare, print.compare #
# #
# Written by Gabriel R A Margarido & Marcelo Mollinari #
# copyright (c) 2009, Gabriel R A Margarido & Marcelo Mollinari #
# #
# First version: 02/27/2009 #
# Last update: 09/25/2009 #
# License: GNU General Public License version 2 (June, 1991) or later #
# #
#######################################################################
## This function evaluates all n!/2 possible orders for n markers
compare <- function(input.seq,n.best=50,tol=10E-4,verbose=FALSE) {
## checking for correct objects
if(!any(class(input.seq)=="sequence")) stop(sQuote(deparse(substitute(input.seq)))," is not an object of class 'sequence'")
if(length(input.seq$seq.num) > 5) cat("WARNING: this operation may take a VERY long time\n")
flush.console()
if(length(input.seq$seq.num) > 10) {
cat("\nIt is not wisely trying to use 'compare' with more than 10 markers \n")
ANSWER <- readline("Are you sure you want to proceed? [y or n]\n")
while(substr(ANSWER, 1, 1) != "n" & substr(ANSWER, 1, 1) != "y")
ANSWER <- readline("\nPlease answer: 'y' or 'n' \n")
if (substr(ANSWER, 1, 1) == "n") stop("Execution stopped!")
}
if(length(input.seq$seq.num) == 2) return(map(input.seq, tol=tol)) ## nothing to be done for 2 markers
else {
## allocating variables
rf.init <- vector("list",length(input.seq$seq.num)-1)
phase.init <- vector("list",length(input.seq$seq.num)-1)
best.ord <- matrix(NA,(n.best+1),length(input.seq$seq.num))
best.ord.rf <- matrix(NA,(n.best+1),length(input.seq$seq.num)-1)
best.ord.phase <- matrix(NA,(n.best+1),length(input.seq$seq.num)-1)
best.ord.like <- best.ord.LOD <- rep(-Inf,(n.best+1))
## 'phases' gathers information from two-point analyses
list.init <- phases(input.seq)
## 'perm.pars' generates all n!/2 orders
all.ord <- perm.pars(input.seq$seq.num)
cat("\nComparing",nrow(all.ord),"orders: \n\n")
if (verbose){
for(i in 1:nrow(all.ord)){
## print output for each order
cat("Order", i, ":", all.ord[i,], "\n")
flush.console()
## get initial values for the HMM
all.match <- match(all.ord[i,],input.seq$seq.num)
for(j in 1:(length(input.seq$seq.num)-1)){
if(all.match[j] > all.match[j+1]){
rf.init[[j]] <- list.init$rf.init[[acum(all.match[j]-2)+all.match[j+1]]]
phase.init[[j]] <- list.init$phase.init[[acum(all.match[j]-2)+all.match[j+1]]]
}
else {
rf.init[[j]] <- list.init$rf.init[[acum(all.match[j+1]-2)+all.match[j]]]
phase.init[[j]] <- list.init$phase.init[[acum(all.match[j+1]-2)+all.match[j]]]
}
}
Ph.Init <- comb.ger(phase.init)
Rf.Init <- comb.ger(rf.init)
if(nrow(Ph.Init)>1){
##Removing ambigous phases
rm.ab<-rem.amb.ph(M=Ph.Init, w=input.seq, seq.num=all.ord[i,])
Ph.Init <- Ph.Init[rm.ab,]
Rf.Init <- Rf.Init[rm.ab,]
if(class(Ph.Init)=="integer"){
Ph.Init<-matrix(Ph.Init,nrow=1)
Rf.Init<-matrix(Rf.Init,nrow=1)
}
}
for(j in 1:nrow(Ph.Init)){
## estimate parameters
final.map <- est.map.c(geno=get(input.seq$data.name, pos=1)$geno[,all.ord[i,]],
type=get(input.seq$data.name, pos=1)$segr.type.num[all.ord[i,]],
phase=Ph.Init[j,],
rec=Rf.Init[j,],
verbose=FALSE,
tol=tol)
best.ord[(n.best+1),] <- all.ord[i,]
best.ord.rf[(n.best+1),] <- final.map$rf
best.ord.phase[(n.best+1),] <- Ph.Init[j,]
best.ord.like[(n.best+1)] <- final.map$loglike
## arrange orders according to the likelihood
like.order <- order(best.ord.like, decreasing=TRUE)
best.ord <- best.ord[like.order,]
best.ord.rf <- best.ord.rf[like.order,]
best.ord.phase <- best.ord.phase[like.order,]
best.ord.like <- sort(best.ord.like, decreasing=TRUE)
}
}
}
else{
count <- 0
pb <- txtProgressBar(style=3)
setTxtProgressBar(pb, 0)
## nc<-NA
## out.pr <- seq(from=1,to=nrow(all.ord), length.out=20)
cat(" ")
for(i in 1:nrow(all.ord)){
## # print output for each order
## if (sum(i == round(out.pr))){
## cat(rep("\b",nchar(nc)+1),sep="")
## nc<-round(i*100/nrow(all.ord))
## cat(nc,"%", sep="")
## flush.console()
## }
## get initial values for the HMM
all.match <- match(all.ord[i,],input.seq$seq.num)
for(j in 1:(length(input.seq$seq.num)-1)){
if(all.match[j] > all.match[j+1]){
rf.init[[j]] <- list.init$rf.init[[acum(all.match[j]-2)+all.match[j+1]]]
phase.init[[j]] <- list.init$phase.init[[acum(all.match[j]-2)+all.match[j+1]]]
}
else {
rf.init[[j]] <- list.init$rf.init[[acum(all.match[j+1]-2)+all.match[j]]]
phase.init[[j]] <- list.init$phase.init[[acum(all.match[j+1]-2)+all.match[j]]]
}
}
Ph.Init <- comb.ger(phase.init)
Rf.Init <- comb.ger(rf.init)
if(nrow(Ph.Init)>1){
##Removing ambigous phases
rm.ab<-rem.amb.ph(M=Ph.Init, w=input.seq, seq.num=all.ord[i,])
Ph.Init <- Ph.Init[rm.ab,]
Rf.Init <- Rf.Init[rm.ab,]
if(class(Ph.Init)=="integer"){
Ph.Init<-matrix(Ph.Init,nrow=1)
Rf.Init<-matrix(Rf.Init,nrow=1)
}
}
for(j in 1:nrow(Ph.Init)){
## estimate parameters
final.map <- est.map.c(geno=get(input.seq$data.name, pos=1)$geno[,all.ord[i,]],
type=get(input.seq$data.name, pos=1)$segr.type.num[all.ord[i,]],
phase=Ph.Init[j,],
rec=Rf.Init[j,],
verbose=FALSE,
tol=tol)
best.ord[(n.best+1),] <- all.ord[i,]
best.ord.rf[(n.best+1),] <- final.map$rf
best.ord.phase[(n.best+1),] <- Ph.Init[j,]
best.ord.like[(n.best+1)] <- final.map$loglike
## arrange orders according to the likelihood
like.order <- order(best.ord.like, decreasing=TRUE)
best.ord <- best.ord[like.order,]
best.ord.rf <- best.ord.rf[like.order,]
best.ord.phase <- best.ord.phase[like.order,]
best.ord.like <- sort(best.ord.like, decreasing=TRUE)
}
count<-count+1
setTxtProgressBar(pb, count/nrow(all.ord))
}
close(pb)
}
## cat("\nFinished\n\n")
cat("\n")
best.ord.LOD <- round((best.ord.like-max(best.ord.like))/log(10),4)
structure(list(best.ord = best.ord, best.ord.rf = best.ord.rf,
best.ord.phase = best.ord.phase, best.ord.like = best.ord.like,
best.ord.LOD = best.ord.LOD, data.name=input.seq$data.name, twopt=input.seq$twopt), class = "compare")
}
}
## print method for object class 'compare'
print.compare <-
function(x,...) {
FLAG<-0
if(class(get(x$data.name, pos=1)) != "outcross") FLAG<-1
phases.char <- c("CC","CR","RC","RR")
n.ord <- max(which(head(x$best.ord.LOD,-1) != -Inf))
unique.orders <- unique(x$best.ord[1:n.ord,])
n.ord.nest <- nrow(unique.orders)
phases.nested <- vector("list",n.ord.nest)
LOD <- vector("list",n.ord.nest)
for (i in 1:n.ord.nest) {
same.order <- which(apply(x$best.ord[1:n.ord,],1,function(x) all(x==unique.orders[i,])))
ifelse(length(same.order)==1,phases.nested[[i]] <- t(as.matrix(x$best.ord.phase[same.order,])),phases.nested[[i]] <- x$best.ord.phase[same.order,])
LOD[[i]] <- x$best.ord.LOD[same.order]
}
skip <- c(nchar(n.ord.nest),max(nchar(unique.orders[1,])+2))
cat("\nNumber of orders:",n.ord,"\n")
if(FLAG==0){ ## outcrossing
leng.print <- nchar(paste("order ",format(n.ord.nest,width=skip[1]),": ",paste(format(unique.orders[1,],width=skip[2]),collapse="")," ",format(11.11,digits=2,format="f",width=6)," ",format(11.11,digits=2,format="f",width=6),"\n",sep=""))
cat(paste("Best ",n.ord.nest," unique orders",paste(rep(" ",leng.print-37),collapse=""),"LOD Nested LOD","\n",sep=""))
cat(paste(rep("-",leng.print),collapse=""),"\n")
}
else if(FLAG==1){ ## other
leng.print <- nchar(paste("order ",format(n.ord.nest,width=skip[1]),": ",paste(format(unique.orders[1,],width=skip[2]),collapse="")," ",format(11.11,digits=2,format="f",width=6),"\n",sep=""))
cat(paste("Best ",n.ord.nest," unique orders",paste(rep(" ",leng.print-25),collapse=""),"LOD","\n",sep=""))
cat(paste(rep("-",leng.print),collapse=""),"\n")
}
else stop ("Should not get here!")
if(FLAG==0){ ## outcrossing
for (i in 1:n.ord.nest) {
cat(paste("order ",format(i,width=skip[1]),": ",paste(format(unique.orders[i,],width=skip[2]),collapse=""),"\n",sep=""))
for (j in 1:dim(phases.nested[[i]])[1]) {
cat(paste("\t",paste(rep(" ",1+skip[1]+skip[2]),collapse=""),paste(format(phases.char[phases.nested[[i]][j,]],width=skip[2]),collapse="")," ",formatC(round(LOD[[i]][j],2),digits=2,format="f",width=6)," ",formatC(round(LOD[[i]][j]-LOD[[i]][1],2),digits=2,format="f",width=6),"\n",sep=""))
}
cat(paste(rep("-",leng.print),collapse=""))
cat("\n")
}
}
else if(FLAG==1){ ## other
for (i in 1:n.ord.nest) {
cat(paste("order ",format(i,width=skip[1]),": ",paste(format(unique.orders[i,],width=skip[2]),collapse=""), " ",formatC(round(LOD[[i]][1],2),digits=2,format="f",width=6), "\n",sep=""))
}
cat(paste(rep("-",leng.print),collapse=""))
cat("\n")
}
else stop ("Should not get here!")
}
## end of file
|
0cfdcace69d05b76fd83b350f744609cf4f0a530
|
ec3dc56b806ff77e94e31dd94860cdbe621f1547
|
/cachematrix.R
|
bf4fa513226a8705cd4c485ca4aa3ce6ff1e8ace
|
[] |
no_license
|
kchbaw/ProgrammingAssignment2
|
4e43f8243ff2a13664c26c9f05da8a7096ec21f2
|
634268742c9df8f3e88db22fe855d4d7e46785ff
|
refs/heads/master
| 2020-09-02T11:23:29.480591
| 2019-11-03T18:58:39
| 2019-11-03T18:58:39
| 219,210,651
| 0
| 0
| null | 2019-11-02T20:34:48
| 2019-11-02T20:34:47
| null |
UTF-8
|
R
| false
| false
| 3,280
|
r
|
cachematrix.R
|
## OVerall I used two functions very similar the cachemean function provided as an example. The first function, makeCacheMatrix, takes a matrix as an input and returns a list. This list has cached data saved to it.
## The 2nd funtion, cachesolve, takes input list in the format of the output of makeCacheMatrix. If the matrix has been previously tested the the cached inverse matric will be returned. If this is a new matrix then the cachesolve will determine the inverse.
## makeCacheMatrix function follows. This function takes an input matrix and returns a list.
#1. x is the input matrix, the default is an empty matix.
#2. The first step is define the function set that sets the initial value of x and m. The <<- operator means this information is called object in an environment that is different from an enviroment different from the current function.
#3. The 2nd step defines the get function as makine equal to the input matrix
#4. The next steps assign the inverse of the matrix to m
#5. Lastly, the output list is created. set data (x & m), get (input matrix), setsolve (the inverse), and getsolve (new cached m data)
#6. By naming the objects in the list, $ can be used to select parts.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## cachesolve funtion takes an input object is the format of the ouptuf of makeCacheMatrix (a list). If the original matrix has been previously tested then cached data is returned along with a note. If not this functin calculates the inverse.
#1. The first step is to determine if there is cached data. If so a note and the cached data is returned
#2. The next step retuns the inverse if not cached
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
##I used the following 4 matrices the test the data
matrix1 <- matrix(c(3, 3.2, 3.5, 3.6), 2, 2)
matrix1
test1<-makeCacheMatrix(matrix1)
test1inv<-cacheSolve(test1)
test1inv
matrix1%*%test1inv #verify the result is an inverse by multiplying the original matrix and the inverse
cacheSolve(makeCacheMatrix(matrix1)) #used a nested function to test
matrix2 <- matrix(c(4, 2, 7, 6), 2, 2)
matrix2
test2<-makeCacheMatrix(matrix2)
test2inv<-cacheSolve(test2)
test2inv
matrix2%*%test2inv
cacheSolve(makeCacheMatrix(matrix2))
matrix3 <- matrix(c(6,2,8,4), 2, 2)
matrix3
test3<-makeCacheMatrix(matrix3)
test3inv<-cacheSolve(test3)
test3inv
matrix3%*%test3inv
cacheSolve(makeCacheMatrix(matrix3))
matrix4<-matrix(c(1,0,5,2,1,6,3,4,0), 3,3) #3 by 3 matrix
matrix4
test4<-makeCacheMatrix(matrix4)
test4inv<-cacheSolve(test4)
test4inv
matrix4%*%test4inv
cacheSolve(makeCacheMatrix(matrix4))
|
e727fb2b317a477a62961675e6b2b89103aab52e
|
669cb22798e081bcdd50308251b3dc14715a369d
|
/man/music-package.Rd
|
72171f7a471c35f5e58a3f30ae47c919441fd395
|
[] |
no_license
|
cran/music
|
1205fdddd3eea8e9c17bb6a33e359862bc269132
|
168954210763189e90a05b3ad35da07e3b7415c9
|
refs/heads/master
| 2022-07-21T21:17:15.856809
| 2022-07-10T17:30:02
| 2022-07-10T17:30:02
| 171,648,062
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 742
|
rd
|
music-package.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/zzz.R
\docType{package}
\name{music-package}
\alias{music-package}
\title{\pkg{music}: Learn and use music theory}
\description{
The music package allows you to build, play, and visualize scales, chords, and chord progression.
For playback, \pkg{music} builds waveforms as matrices and passes them to the \pkg{audio} package which
interfaces with the system's audio driver.
The default notation and frequencies used throughout the package are based on twelve-tone equal temperament tuning
(12ET). Custom tuning can be defined by specifying frequency ratios and a root note. See \link{note2freq}.
A4 defaults to 440Hz, and can be changed with the 'A4' argument.
}
|
3d8558233d16a51e6093c45c1b5996c8e0aa2fea
|
6028f2b2cc7afb1cc2c5f22ee37ce96cc6f5af70
|
/R_code_spatial2.r
|
9670c16f1980444adad3bae8603f97562d05d098
|
[] |
no_license
|
serenasconci/ecologiadelpaesaggio
|
ec9070d604c2d855f1d5cb492f359fd3a46d5277
|
4bb451474355918be2f3a2a0c0bb3ae5b3fab10f
|
refs/heads/master
| 2021-04-01T15:39:10.942090
| 2020-06-21T14:18:16
| 2020-06-21T14:18:16
| 248,197,276
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,084
|
r
|
R_code_spatial2.r
|
### R spatial
library(sp)
#dati da usare
data(meuse)
head(meuse)
coordinates(meuse)=~x+y
#COORDINATE DEL DATA SET
spplot(meuse,"zinc")
#spplot dei dati di zinco
#ESERCIZIO:spplot dati di rame
head(meuse)
spplot(meuse,"copper")
bubble(meuse,"zinc")
#LA FUNZIONE bubble() E' UN ALTRO MODO PER PLOTTARE I DATI
#CREA UN GRAFICO A BOLLE DI DATI SPAZIALI
#ESERCIZIO: bubble del rame, colorato di rosso
bubble(meuse,"copper",col="red")
#esempio dati ottenuti da tesi
#foraminiferi (Sofia), carbon capture (Marco)
foram <- c(10, 20, 35, 55, 67, 80)
carbon <- c(5, 15, 30, 70, 85, 99)
plot(foram,carbon,col="green",cex=2,pch=19)
# <- PERMETTE DI DARE UN TITOLO
#dati dall'esterno:covid-19
setwd("C:/lab") #Windows
#IL SET WORKING DIRECTORY SERVE AD INDICARE LA CARTELLA DOVE SI ANDRA' A LAVORARE
#IN QUESTO CASO VERRA' UTILIZZATA LA CARTELLA lab
covid <- read.table("covid_agg.csv",head=TRUE)
#LA FUNZIONE read.table(NOME DEL FILE, head=TRUE")PERMETTE DI LEGGERE UN FILE IN FORMATO TABELLA
#CREA UN FRAME DI DATI
#A QUESTA TABELLA E' STATO ASSEGNATO IL NOME covid
head(covid)
|
be1470880511af5d195993fbd695a27cb80aa87b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/CUMP/examples/combGWAS.Rd.R
|
acff7499d5aa2d88b2cba0bc0d0ab3a77b49f823
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 945
|
r
|
combGWAS.Rd.R
|
library(CUMP)
### Name: combGWAS
### Title: Combining Univariate Association Test Results of Multiple
### Phenotypes for Detecting Pleiotropy
### Aliases: combGWAS
### ** Examples
##The following are two fake examples. Do NOT run.
##Please refer to example.pdf for details.
##no change of beta signs before combining
##combGWAS(project="mv",traitlist=c("phen1","phne2"),
## traitfile=c("Phen1GWAS.csv", "Phen2GWAS.csv"), comb_method=c("z","chisq"),
## betasign=c(1,1), snpid="SNPID", beta="beta", SE="SE",
## coded_all="coded_all"", AF_coded_all=" AF_coded_all ", pvalue="pval")
##change of beta signs before combining: the beta sign for the 2nd phenotype reversed
##combGWAS(project="mv",traitlist=c("phen1","phne2"),
## traitfile=c("Phen1GWAS.csv", "Phen2GWAS.csv"), comb_method=c("z","chisq"),
## betasign=c(1,-1), snpid="SNPID", beta="beta", SE="SE",
## coded_all="coded_all ", AF_coded_all=" AF_coded_all ", pvalue="pval")
|
8338a3e5fecd7c61c165d5a642d26915e9f5ba5c
|
c6a9861ea469c1100a38f4ce58d0037512269db0
|
/dlm.R
|
4d4769944024f04bf17418aa1a56a52da95403aa
|
[] |
no_license
|
derekgray/Depth-project
|
1f472306e312f7bcfed2440a9d7b86992fbcd55b
|
5361367c5c623707805ee94714661f1d258eb7c4
|
refs/heads/master
| 2020-05-16T23:23:37.324862
| 2013-02-08T00:07:05
| 2013-02-08T00:07:05
| 7,879,317
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,468
|
r
|
dlm.R
|
#1 The random walk with noise
randomwalk <- function(x) dlmModPoly(1, dV = x[1], dW = x[2])
StructTS(response, "level") #level is W, epsilon is V
randomwalk.fit <- dlmMLE(response, parm = c(1, sd(response)),build = randomwalk)
mod <- randomwalk(randomwalk.fit$par)
unlist(randomwalk(randomwalk.fit$par)[c("V", "W")])
bob<-dlmFilter(response,mod)
par(mfrow=c(2,2))
plot(dropFirst(bob$m), main="filtered state") #filtered values of state vector
plot(dropFirst(bob$f),main="one step ahead forecast") #one step ahead forecast
plot(dropFirst(bob$a), main="predicted") #predicted values given data up to and including previous time unit
plot(bob$y, main="raw data") #data input
AICdlm(filteredobject="bob",MLEfitobject="randomwalk.fit",responsedata="response",burn_in=10)
#2 random walk with seasonal component
library(dlm)
randomwalk.seasonal <- function(x) dlmModPoly(order=1, dV = exp(x[1]), dW = exp(x[2])) +dlmModSeas(frequency=12, dV=sd(response))
randomwalk.seasonal.fit<-dlmMLE(response,par=rep(0,2), build=randomwalk.seasonal, lower=c(1e-6,0))
mod.randomwalk.seasonal <- randomwalk.seasonal(randomwalk.seasonal.fit$par)
unlist(randomwalk.seasonal(randomwalk.seasonal.fit$par)[c("V", "W")])
randomwalk.seasonal.filtered<-dlmFilter(response,mod.randomwalk.seasonal)
plot(randomwalk.seasonal.filtered$f) #plot one step ahead forecasts
plot(randomwalk.seasonal.filtered$m[,1:3]) #filtered values of state vectors
lines(response,col="red")
AICdlm(filteredobject="randomwalk.seasonal.filtered",MLEfitobject="randomwalk.seasonal.fit",responsedata="response",burn_in=10)
#for comparison, try the StructTS function
bob2<-StructTS(response,type="trend") #local trend model, V=epsilon, W=level
plot(bob2$fitted)
bob3<-StructTS(response,type="BSM") #local trend plus seasonal component
plot(bob3$fitted)
#3 local trend model with seasonal component
library(dlm)
localtrend.seasonal <- function(x) dlmModPoly(order=2, dV = exp(x[1]), dW = c(rep(exp(x[2]),2))) +dlmModSeas(frequency=12, dV=sd(response))
localtrend.seasonal.fit<-dlmMLE(response,par=rep(0,2), build=localtrend.seasonal, lower=c(1e-6,0))
mod <- localtrend.seasonal(localtrend.seasonal.fit$par)
unlist(localtrend.seasonal(localtrend.seasonal.fit$par)[c("V", "W")])
localtrend.seasonal.filtered<-dlmFilter(response,mod)
plot(localtrend.seasonal.filtered$f) #plot one step ahead forecasts
plot(localtrend.seasonal.filtered$m[,1:3]) #filtered values of state vectors
lines(response,col="red")
AICdlm(filteredobject="localtrend.seasonal.filtered",MLEfitobject="localtrend.seasonal.fit",responsedata="response",burn_in=10)
#for comparison, try the StructTS function
bob2<-StructTS(response,type="trend") #local trend model, V=epsilon, W=level
plot(bob2$fitted)
bob3<-StructTS(response,type="BSM") #local trend plus seasonal component
plot(bob3$fitted)
#4 Just temperature
datas<-ts.intersect(response,temppred)
response<-datas[,1]
temppred<-datas[,2]
temp.function <- function(x) dlmModReg(temppred,dW=sd(temppred),dV=exp(x[2]),addInt=FALSE)
temp.fit <- dlmMLE(response,par=c(sd(temppred), 1), build=temp.function)
mod.temp <- temp.function(temp.fit$par)
bob.temp<-dlmFilter(response,mod.temp)
unlist(temp.function(temp.fit$par)[c("V", "W")])
par(mfrow=c(2,2))
plot(bob$m, main="filtered state") #filtered values of state vector
plot(bob$f,main="one step ahead forecast") #one step ahead forecast
plot(bob$a, main="predicted") #predicted values given data up to and including previous time unit
plot(bob$y, main="raw data") #data input
AICdlm(filteredobject="bob.temp",MLEfitobject="temp.fit",responsedata="response",burn_in=10)
outS <- dlmSmooth(response, mod)
plot(dropFirst(outS$s))
#5 Temperature plus seasonal component
datas<-ts.intersect(response,temppred)
response<-datas[,1]
temppred<-datas[,2]
library(vegan)
tempstand<-decostand(temppred, method="standardize")
temp.seasonal.function <- function(x) dlmModReg(tempstand,dW=sd(tempstand),dV=exp(x[2]),addInt=FALSE) +dlmModSeas(frequency=12,dV=sd(response))
temp.seasonal.fit <- dlmMLE(response,par=c(sd(tempstand), 1), build=temp.seasonal.function)
mod.temp.seasonal <- temp.seasonal.function(temp.seasonal.fit$par)
bob.temp.seasonal<-dlmFilter(response,mod.temp.seasonal)
unlist(temp.seasonal.function(temp.seasonal.fit$par)[c("V", "W")])
par(mfrow=c(2,2))
plot(bob$m[,1:3], main="filtered state") #filtered values of state vector
plot(bob$f,main="one step ahead forecast") #one step ahead forecast
plot(bob$a[,1:3], main="predicted") #predicted values given data up to and including previous time unit
plot(bob$y, main="raw data") #data input
AICdlm(filteredobject="bob.temp.seasonal",MLEfitobject="temp.seasonal.fit",responsedata="response",burn_in=10)
#6 stratification plus seasonal component
datas<-ts.intersect(response,stratpred)
response<-datas[,1]
stratpred<-datas[,2]
library(vegan)
stratstand<-decostand(stratpred, method="standardize")
temp.seasonal.function <- function(x) dlmModReg(stratstand,dW=sd(stratstand),dV=exp(x[2]),addInt=FALSE) +dlmModSeas(frequency=12,dV=sd(response))
temp.seasonal.fit <- dlmMLE(response,par=c(sd(stratstand), 1), build=temp.seasonal.function)
mod.temp.seasonal <- temp.seasonal.function(temp.seasonal.fit$par)
bob.temp.seasonal<-dlmFilter(response,mod.temp.seasonal)
unlist(temp.seasonal.function(temp.seasonal.fit$par)[c("V", "W")])
par(mfrow=c(2,2))
plot(bob$m[,1:3], main="filtered state") #filtered values of state vector
plot(bob$f,main="one step ahead forecast") #one step ahead forecast
plot(bob$a[,1:3], main="predicted") #predicted values given data up to and including previous time unit
plot(bob$y, main="raw data") #data input
AICdlm(filteredobject="bob.temp.seasonal",MLEfitobject="temp.seasonal.fit",responsedata="response",burn_in=10)
#2 random walk plus avg temperature in top 250m----------------------------------
datas<-ts.intersect(response,temppred)
temppred<-datas[,2]
response<-datas[,1]
#combine models
random.temp.function <- function(x) dlmModPoly(1, dV = x[1], dW = x[2])+ dlmModReg(temppred,dW=sd(temppred),addInt=FALSE)
random.temp.fit <- dlmMLE(response,par=c(1,sd(response),sd(temppred)), build=random.temp.function)
mod <- random.temp.function(random.temp.fit$par)
#try the model with discount factors between 0.9 and 0.99
res<-list()
for (DFA in c(seq(from=0.9,to=1,by=0.01))){
bob<-dlmFilterDF(response,mod,DF=DFA)
res[[as.character(DFA)]]<-AICdlm(filteredobject="bob",MLEfitobject="random.temp.fit",responsedata="response",burn_in=10)
}
bob<-dlmFilterDF(response,mod, DF=0.98) #0.98 best discount
par(mfrow=c(2,2))
plot(bob$m, main="filtered state") #filtered values of state vector
plot(bob$f,main="one step ahead forecast") #one step ahead forecast
plot(bob$a, main="predicted") #predicted values given data up to and including previous time unit
plot(bob$y, main="raw data") #data input
AICdlm(filteredobject="bob1",MLEfitobject="random.temp.fit",responsedata="response",burn_in=10)
#3 random walk plus avg temperature in top 250m plus day length----------------------------------
random.temp.function <- function(x) dlmModPoly(order=1, dV = x[1], dW = x[2])+ dlmModReg(temppred,dW=sd(temppred), addInt=FALSE)
random.temp.fit <- dlmMLE(response,par=c(1,sd(response),sd(temppred)), build=random.temp.function)
mod <- random.temp.function(random.temp.fit$par)
bob<-dlmFilter(response,mod)
par(mfrow=c(2,2))
plot(bob$m, main="filtered state") #filtered values of state vector
plot(bob$f,main="one step ahead forecast") #one step ahead forecast
plot(bob$a, main="predicted") #predicted values given data up to and including previous time unit
plot(bob$y, main="raw data") #data input
AICdlm(filteredobject="bob",MLEfitobject="random.temp.fit",responsedata="response",burn_in=10)
#4a random walk plus seasonality plus statification intensity----------------------------------
datas<-ts.intersect(response,stratpred)
response<-datas[,1]; stratpred<-datas[,2]
random.temp.function <- function(x) dlmModPoly(order=1, dV = x[1], dW = x[2])+ dlmModSeas(frequency=12)
random.temp.fit <- dlmMLE(response,par=c(1,sd(response)), build=random.temp.function)
mod <- random.temp.function(random.temp.fit$par)
bob<-dlmFilter(response,mod)
par(mfrow=c(2,2))
plot(bob$m, main="filtered state") #filtered values of state vector
plot(bob$f,main="one step ahead forecast") #one step ahead forecast
plot(bob$a, main="predicted") #predicted values given data up to and including previous time unit
plot(bob$y, main="raw data") #data input
AICdlm(filteredobject="bob",MLEfitobject="random.temp.fit",responsedata="response",burn_in=10)
#4c random walk plus seasonality plus phytoplankton DWA----------------------------------
datas<-ts.intersect(response,NAfillts(allphyt))
response<-datas[,1]; phytpred<-datas[,2]
random.temp.function <- function(x) dlmModReg(log10(phytpred+1),dW=1, addInt=FALSE)
random.temp.fit <- dlmMLE(response,par=0, build=random.temp.function)
mod <- random.temp.function(random.temp.fit$par)
bob<-dlmFilter(response,mod)
par(mfrow=c(2,2))
plot(bob$m, main="filtered state") #filtered values of state vector
plot(bob$f,main="one step ahead forecast") #one step ahead forecast
plot(bob$a, main="predicted") #predicted values given data up to and including previous time unit
plot(bob$y, main="raw data") #data input
AICdlm(filteredobject="bob",MLEfitobject="random.temp.fit",responsedata="response",burn_in=10)
#4b random walk plus seasonality plus statification intensity----------------------------------
datas<-ts.intersect(response,stratpred)
response<-datas[,1]; stratpred<-datas[,2]
random.temp.function <- function(x) dlmModPoly(order=1, dV = x[1], dW = x[2])+ dlmModSeas(frequency=12) + dlmModReg(stratpred,dW=sd(stratpred), addInt=FALSE)
random.temp.fit <- dlmMLE(response,par=c(1,sd(response),sd(stratpred)), build=random.temp.function)
mod <- random.temp.function(random.temp.fit$par)
bob<-dlmFilter(response,mod)
par(mfrow=c(2,2))
plot(bob$m, main="filtered state") #filtered values of state vector
plot(bob$f,main="one step ahead forecast") #one step ahead forecast
plot(bob$a, main="predicted") #predicted values given data up to and including previous time unit
plot(bob$y, main="raw data") #data input
AICdlm(filteredobject="bob",MLEfitobject="random.temp.fit",responsedata="response",burn_in=10)
#function to calculate statistics for model selection
AICdlm<-function(filteredobject,responsedata,MLEfitobject,burn_in=10){
MSE<-c(); MAD<-c(); MAPE<-c(); U<-c(); logLik<-c(); k<-c()
linearTrend_resid <- get(filteredobject)$y-get(filteredobject)$f
linearTrend_resid <- tail(linearTrend_resid, -burn_in)
MSE[as.character(MLEfitobject)] <- mean(linearTrend_resid^2) #mean squared error
MAD[as.character(MLEfitobject)] <- mean(abs(linearTrend_resid)) #mean absolute deviation (measure of dispersion)
MAPE[as.character(MLEfitobject)] <- mean(abs(linearTrend_resid) / tail(get(responsedata), -burn_in)) #Mean absolute percentage error, 0 is a perfect fit
U[as.character(MLEfitobject)] <- sqrt(mean(linearTrend_resid^2) / mean(tail(diff(get(responsedata))^2, -(burn_in-1)))) #Theil's U, < 1 better than guessing, 1 means same as guessing, >1 worse than guessing
logLik[as.character(MLEfitobject)] <- -get(MLEfitobject)$value
k[as.character(MLEfitobject)] <- length(get(MLEfitobject)$par)
AIC <- -2 * (logLik - k) # vectorized, for all models at once
result<-data.frame(MSE,MAD,MAPE,U,logLik,k,AIC)
return(result)
}
#Function to use the dlmFilter with varying discount factors.--------------------
dlmFilterDF <- function (y, mod, simplify = FALSE, DF)
{
## storage.mode(y) <- "double"
mod1 <- mod
yAttr <- attributes(y)
ytsp <- tsp(y)
y <- as.matrix(y)
timeNames <- dimnames(y)[[1]]
stateNames <- names(mod$m0)
m <- rbind(mod$m0, matrix(0, nr = nrow(y), nc = length(mod$m0)))
a <- matrix(0, nr = nrow(y), nc = length(mod$m0))
f <- matrix(0, nr = nrow(y), nc = ncol(y))
U.C <- vector(1 + nrow(y), mode = "list")
D.C <- matrix(0, 1 + nrow(y), length(mod$m0))
U.R <- vector(nrow(y), mode = "list")
D.R <- matrix(0, nrow(y), length(mod$m0))
U.W <- vector(nrow(y), mode = "list")
D.W <- matrix(0, nrow(y), length(mod$m0))
Wliste <- vector(nrow(y), mode = "list")
P <- vector(nrow(y), mode = "list")
tmp <- La.svd(mod$V, nu = 0)
Uv <- t(tmp$vt)
Dv <- sqrt(tmp$d)
Dv.inv <- 1/Dv
Dv.inv[abs(Dv.inv) == Inf] <- 0
sqrtVinv <- Dv.inv * t(Uv)
sqrtV <- Dv * Uv
tmp <- La.svd(mod$C0, nu = 0)
U.C[[1]] <- t(tmp$vt)
D.C[1, ] <- sqrt(tmp$d)
for (i in seq(length = nrow(y))) {
tF.Vinv <- t(mod$FF) %*% crossprod(sqrtVinv)
a[i, ] <- mod$GG %*% m[i, ]
P[[i]] <- mod$GG %*% crossprod(D.C[i,] * t(U.C[[i]])) %*% t(mod$GG)
Wliste[[i]] <- P[[i]]* ((1-DF)/DF)
svdW <- La.svd( Wliste[[i]] , nu = 0)
sqrtW <- sqrt(svdW$d) * svdW$vt
U.W[[i]] <- t(svdW$vt)
D.W[i, ] <- sqrt(svdW$d)
tmp <- La.svd(rbind(D.C[i, ] * t(mod$GG %*% U.C[[i]]),
sqrtW), nu = 0)
U.R[[i]] <- t(tmp$vt)
D.R[i, ] <- tmp$d
f[i, ] <- mod$FF %*% a[i, ]
D.Rinv <- 1/D.R[i, ]
D.Rinv[abs(D.Rinv) == Inf] <- 0
tmp <- La.svd(rbind(sqrtVinv %*% mod$FF %*% U.R[[i]],
diag(x = D.Rinv, nrow = length(D.Rinv))), nu = 0)
U.C[[i + 1]] <- U.R[[i]] %*% t(tmp$vt)
foo <- 1/tmp$d
foo[abs(foo) == Inf] <- 0
D.C[i + 1, ] <- foo
m[i + 1, ] <- a[i, ] + crossprod(D.C[i + 1, ] * t(U.C[[i
+ 1]])) %*% tF.Vinv %*% as.matrix(y[i, ] - f[i,])
}
m <- drop(m)
a <- drop(a)
f <- drop(f)
attributes(f) <- yAttr
ans <- list(m = m, U.C = U.C, D.C = D.C, a = a, U.R = U.R,
D.R = D.R, f = f, U.W=U.W, D.W=D.W)
ans$m <- drop(ans$m)
ans$a <- drop(ans$a)
ans$f <- drop(ans$f)
attributes(ans$f) <- yAttr
if (!is.null(ytsp)) {
tsp(ans$a) <- ytsp
tsp(ans$m) <- c(ytsp[1] - 1/ytsp[3], ytsp[2:3])
class(ans$a) <- class(ans$m) <- if (length(mod$m0) >
1)
c("mts", "ts")
else "ts"
}
if (!(is.null(timeNames) && is.null(stateNames))) {
dimnames(ans$a) <- list(timeNames, stateNames)
dimnames(ans$m) <- list(if (is.null(timeNames)) NULL else c("",
timeNames), stateNames)
}
if (simplify)
return(c(mod = list(mod1), ans))
else {
attributes(y) <- yAttr
ans <- c(y = list(y), mod = list(mod1), ans)
class(ans) <- "dlmFiltered"
return(ans)
}
}
bob<-c()
set.seed(1234)
r <- rnorm(100)
X <- r
u <- -1*X + 0.5*rnorm(100)
MyModel <- function(x) dlmModReg(X, FALSE, dV = x[1]^2)
fit <- dlmMLE(u, parm = c(0.3), build = MyModel)
mod <- MyModel(fit$par)
bob<-dlmFilter(u,mod)
plot(bob$a)
points(bob$f,col="red")
StructTS(response, "level")
#level is W, epsilon is V
myMod <- dlmModReg(ts.intersect(response,temppred),addInt=F, dV = 0.3228)
bob<-dlmFilter(response, myMod)
#p is number of parameters
p=1
Akaike<-(-2)*(-dlmLL(response, myMod))+2*p
2*k-(2*log(-dlmLL(response, myMod)))
buildFun <- function(x) {dlmModReg(response, dV = 0.3228)}
mod <- dlmModPoly(1, dV = 16300, C0 = 1e8)
X(mod) <- matrix(0, nrow = length(Nile))
X(mod)[time(Nile) == 1899] <- 60580
JW(mod) <- 1
library(dlm)
?stdata<-BJsales.lead
BJmod <- dlmModReg(X = cbind(BJsales.lead, log(BJsales.lead)))
X(BJmod)
JFF(BJmod)
FF(BJmod)
bob<-dlmFilter(data, BJmod)
plot(data)
lines(bob$a)
http://definetti.uark.edu/~gpetris/UseR-2011/SSMwR-useR2011handout.pdf
FF(BJmod)
nileBuild <- function(par) {
dlmModPoly(1, dV = exp(par[1]), dW = exp(par[2]))
}
nileMLE <- dlmMLE(Nile, rep(0,2), nileBuild); nileMLE$conv
nileMod <- nileBuild(nileMLE$par)
V(nileMod)
W(nileMod)
nileFilt <- dlmFilter(Nile, nileMod)
nileSmooth <- dlmSmooth(nileFilt)
plot(cbind(Nile, nileFilt$m[-1], nileSmooth$s[-1]), plot.type='s',
col=c("black","red","blue"), ylab="Level", main="Nile river", lwd=c(1,2,2))
library(sspir)
phistart <- StructTS(UKgas)$coef
c(3.7e-4,0,1.7e-5,7.1e-4)
StructTS(UKgas)
gasmodel <- ssm(log10(UKgas) ~ -1 + tvar(polytime(time,degree=1)) + tvar(sumseason(time,period=4)), phi=NULL)
fit <- getFit(gasmodel)
plot(fit$m[,1:3])
a<-ssm(response~ tvar(polytime(index(response),degree=1)))
fit<-getFit(a)
plot(fit$m[,2])
lines(response,col="red")
library(dlmodeler)
data(vandrivers)
vandrivers$y <- ts(vandrivers$y,start=1969,frequency=12)
vd.time <- time(vandrivers$y)
vd <- ssm( y ~ tvar(1) + seatbelt + sumseason(vd.time,12),
family=poisson(link="log"),
data=vandrivers,
phi = c(1,0.0004),
C0=diag(13)*100,
fit=FALSE
)
phi(vd)["(Intercept)"] <- exp(- 2*3.703307 )
C0(vd) <- diag(13)*1000
vd.res <- kfs(vd)
plot( vd.res$m[,1:3] )
dwa.time <- time(response)
dwa.mod <- ssm( response ~ tvar(1) + tvar(temppred),family=gaussian,phi =NULL, fit=T)
dwa.mod.f<-kfilter(dwa.mod)
plot(dwa.mod$ss$y)
lines(dwa.mod.f$m[,2], col="red")
phi(vd)["(Intercept)"] <- exp(- 2*3.703307 )
C0(vd) <- diag(13)*1000
vd.res <- kfs(vd)
plot( vd.res$m[,1:3] )
data(kurit)
m1 <- SS(kurit)
phi(m1) <- c(100,5)
m0(m1) <- matrix(130)
C0(m1) <- matrix(400)
m1.f <- kfilter(m1)
plot(m1$y)
lines(m1.f$m,lty=2)
# generate some data
N <- 365*5
t <- c(1:N,rep(NA,365))
a <- rnorm(N+365,0,.5)
y <- pi + cos(2*pi*t/365.25) + .25*sin(2*pi*t/365.25*3) +
exp(1)*a + rnorm(N+365,0,.5)
# build a model for this data
library(dlmodeler)
datas<-ts.intersect(response,stratpred)
resp<-datas[,1]
temp<-datas[,2]
m1 <- dlmodeler.build.polynomial(0,sigmaH=NA,sigmaQ=NA,name="level")
m4 <- dlmodeler.build.regression(temp,sigmaH=NA,name="reg")
m5 <- dlmodeler.add(m1,m4,name="mymodel")
fit<-dlmodeler.fit(resp,model=m1, method="MLE")
AIC(fit)
f <- dlmodeler.filter(resp, fit$model, raw.result=TRUE)
# extract all the components
lines(as.vector(f$f))
m.state.mean <- dlmodeler.extract(f,m4,type="state", value="mean")
m.state.cov <- dlmodeler.extract(f,m4,type="state", value="covariance")
m.obs.mean <- dlmodeler.extract(f,m4,type="observation",value="mean")
m.obs.cov <- dlmodeler.extract(f,m4,type="observation",value="covariance")
m.obs.int <- dlmodeler.extract(f,m4,type="observation",value="interval",prob=.99)
plot(as.vector(resp))
lines(m.obs.int$level$upper[1,],col="red")
lines(m.obs.int$level$lower[1,],col="red")
lines(m.obs.int$level$mean[1,],col=2)
#create test dataset
bob<-c()
set.seed(1234)
r <- rnorm(100)
u <- -5*X + 0.5*rnorm(100) #known regression parameters
#Fit regression model with predictor (r)
MyModel <- function(x) dlmModReg(r, FALSE, dV = x[1]^2,dW=x[2]^2) #r is explantory variable
fit <- dlmMLE(u, parm = c(0.3,0.3), build = MyModel) #u is response variable
mod <- MyModel(fit$par)
unlist(MyModel(fit$par)[c("V", "W")])
bob<-dlmFilter(u,mod)
plot(bob$a) #plot regression coefficients through time
plot(bob$f)
AICdlm(filteredobject="bob",MLEfitobject="fit",responsedata="u",burn_in=10)
lines(u,col="red")
#random walk model
Mymodel2 <- function(x) dlmModPoly(order=1, dV = exp(x[1]), dW = exp(x[2]))
result<-list()
for (i in 1:10){
fit2<-dlmMLE(u,par=rep(i-1,2), build=Mymodel2, lower=c(1e-6,0))
result[[i]] <-unlist(Mymodel2(fit2$par)[c("V", "W")])
}
result
mod <- Mymodel2(fit2$par)
unlist(Mymodel2(fit2$par)[c("V", "W")])
bob<-dlmFilter(u,mod)
plot(bob$f) #plot regression coefficients through time
lines(u,col="red")
AICdlm(filteredobject="bob",MLEfitobject="fit2",responsedata="u",burn_in=10)
StructTS(u,type="level") #first order polynomial, V=epsilon, W=level
|
e656e10f948d1680d9d12a55830c5fb180d4f843
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/circular/R/mle.wrappednormal.R
|
eb8c08abc4dee952fd7dfe30b38f2389af1167b4
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,647
|
r
|
mle.wrappednormal.R
|
#############################################################
# #
# mle.wrappednormal function #
# Author: Claudio Agostinelli #
# Email: claudio@unive.it #
# Date: August, 10, 2006 #
# Copyright (C) 2006 Claudio Agostinelli #
# #
# Version 0.2-2 #
#############################################################
mle.wrappednormal <- function(x, mu=NULL, rho=NULL, sd=NULL, K=NULL, tol=1e-5, min.sd=1e-3, min.k=10, max.iter=100, verbose=FALSE, control.circular=list()) {
# Handling missing values
x <- na.omit(x)
if (length(x)==0) {
warning("No observations (at least after removing missing values)")
return(NULL)
}
if (is.circular(x)) {
datacircularp <- circularp(x)
} else if (is.circular(mu)) {
datacircularp <- circularp(mu)
} else {
datacircularp <- list(type="angles", units="radians", template="none", modulo="asis", zero=0, rotation="counter")
}
dc <- control.circular
if (is.null(dc$type))
dc$type <- datacircularp$type
if (is.null(dc$units))
dc$units <- datacircularp$units
if (is.null(dc$template))
dc$template <- datacircularp$template
if (is.null(dc$modulo))
dc$modulo <- datacircularp$modulo
if (is.null(dc$zero))
dc$zero <- datacircularp$zero
if (is.null(dc$rotation))
dc$rotation <- datacircularp$rotation
x <- conversion.circular(x, units="radians", zero=0, rotation="counter", modulo="2pi")
attr(x, "class") <- attr(x, "circularp") <- NULL
if (!is.null(mu)) {
mu <- conversion.circular(mu, units="radians", zero=0, rotation="counter", modulo="2pi")
attr(mu, "class") <- attr(mu, "circularp") <- NULL
}
res <- MlewrappednormalRad(x, mu, rho, sd, min.sd, K, min.k, tol, max.iter, verbose)
mu <- conversion.circular(circular(res[1]), dc$units, dc$type, dc$template, dc$modulo, dc$zero, dc$rotation)
result <- list()
result$call <- match.call()
result$mu <- mu
result$rho <- res[2]
result$sd <- res[3]
result$est.mu <- res[4]
result$est.rho <- res[5]
result$convergence <- TRUE
if (res[6] > max.iter) {
result$convergence <- FALSE
}
class(result) <- "mle.wrappednormal"
return(result)
}
MlewrappednormalRad <- function(x, mu=NULL, rho=NULL, sd=NULL, min.sd, K=NULL, min.k=10, tol, max.iter, verbose) {
n <- length(x)
sinr <- sum(sin(x))
cosr <- sum(cos(x))
est.mu <- FALSE
if (is.null(mu)) {
mu <- atan2(sinr, cosr)
est.mu <- TRUE
}
est.rho <- FALSE
if (is.null(sd)) {
if (is.null(rho)) {
sd <- sqrt(-2*log(sqrt(sinr^2 + cosr^2)/n))
if (is.na(sd) || sd < min.sd)
sd <- min.sd
est.rho <- TRUE
} else {
sd <- sqrt(-2*log(rho))
}
}
xdiff <- 1+tol
iter <- 0
if (is.null(K)) {
range <- max(mu, x) - min(mu, x)
K <- (range+6*sd)%/%(2*pi)+1
K <- max(min.k, K)
}
while (xdiff > tol & iter <= max.iter) {
iter <- iter + 1
mu.old <- mu
sd.old <- sd
z <- .Fortran("mlewrpno",
as.double(x),
as.double(mu),
as.double(sd),
as.integer(n),
as.integer(K),
as.integer(est.mu),
as.integer(est.rho),
w=double(n),
wk=double(n),
wm=double(n),
PACKAGE="circular"
)
w <- z$w
wk <- z$wk
wm <- z$wm
if (est.mu) {
mu <- sum(x)/n
if (any(wk!=0)) {
mu <- mu + 2*pi*mean.default(wk[wk!=0]/w[wk!=0])
}
}
if (est.rho) {
if (any(wm!=0)) {
sd <- sqrt(sum(wm[wm!=0]/w[wm!=0])/n)
} else {
sd <- min.sd
}
}
if (verbose) {
cat("mu: ", mu, "\n")
cat("rho: ", exp(-sd^2/2), "\n")
cat("sd: ", sd, "\n")
}
xdiff <- max(abs(mu - mu.old), abs(sd - sd.old))
}
rho <- exp(-sd^2/2)
result <- c(mu, rho, sd, est.mu, est.rho, iter)
return(result)
}
#############################################################
# #
# print.mle.wrappednormal function #
# Author: Claudio Agostinelli #
# E-mail: claudio@unive.it #
# Date: November, 19, 2003 #
# Version: 0.1-2 #
# #
# Copyright (C) 2003 Claudio Agostinelli #
# #
#############################################################
print.mle.wrappednormal <- function(x, digits = max(3, getOption("digits") - 3), ...) {
cat("\nCall:\n",deparse(x$call),"\n\n",sep="")
cat("mu: ")
cat(format(x$mu, digits=digits), "\n")
cat("\n")
cat("rho: ")
cat(format(x$rho, digits=digits), "\n")
cat("\n")
cat("sd: ")
cat(format(x$sd, digits=digits), "\n")
cat("\n")
if (!x$est.mu) cat("mu is known\n")
if (!x$est.rho) {
cat("rho and sd are known\n")
}
if (!x$convergence) cat("\nThe convergence is not achieved after the prescribed number of iterations \n")
invisible(x)
}
|
b8a17b9e6131c5541bcc95c77001e34e07f485c3
|
5cfc866af872deb8c1aa1c21610717fff6fad03e
|
/R/plot.R
|
ef9488aec3296f9dbc15dbdbc32351b0738dd9af
|
[] |
no_license
|
lichen-lab/powmic
|
0ad2c6153a4b5b1804b7215aa91b0c9b99e4b9d9
|
69dac1470f1a6a22f8b4cadfbf637cb48015e505
|
refs/heads/master
| 2023-04-13T02:12:29.715643
| 2023-04-06T15:49:42
| 2023-04-06T15:49:42
| 216,932,529
| 5
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,617
|
r
|
plot.R
|
plotStrata<-function(assess.out,
figure.type=c('power','fdr','type1error','tp','fp','taxon'),
stratify.by=c('prevalence','abundance'),
is.errorbar=TRUE,is.legend=TRUE) {
nsim=dim(assess.out$TP)[2]
sum.out=summaryAssess(assess.out,assess.type='stratify')
if(length(sum.out)!=2) stop('assess.type should be set as stratify in summaryAssess!')
alpha.type=assess.out$alpha.type
stratify.type=assess.out$stratify.type
alpha.level=assess.out$alpha.level
figure.type=match.arg(figure.type)
xlabel=switch(stratify.by,
prevalence = "Strata (prevalence)",
abundance = "Strata (abundance)"
)
ylabel=switch(figure.type,
power="Power",
fdr="FDR",
type1error="Type1 Error",
tp="TP",
fp="FP",
taxon="Taxon")
if(figure.type=='power'){
TPR.m=sum.out$m[,,'TPR']
TPR.s=sum.out$s[,,'TPR'] /nsim
TPR.m=melt(TPR.m)
TPR.s=melt(TPR.s)
TPR.m$sd=TPR.s$value
dat=TPR.m
#.plotPowerStrata(TPR.m)
}else if(figure.type=='fdr'){
FDR.m=sum.out$m[,,'FDR']
FDR.s=sum.out$s[,,'FDR'] /nsim
FDR.m=melt(FDR.m)
FDR.s=melt(FDR.s)
FDR.m$sd=FDR.s$value
FDR.m$alpha.level=alpha.level
dat=FDR.m
#.plotFDRStrata(FDR.m)
}else if(figure.type=='type1error'){
FPR.m=sum.out$m[,,'FPR']
FPR.s=sum.out$s[,,'FPR'] /nsim
FPR.m=melt(FPR.m)
FPR.s=melt(FPR.s)
FPR.m$sd=FPR.s$value
FPR.m$alpha.level=alpha.level
dat=FPR.m
#.plotType1ErrorStrata(FPR.m)
}else if(figure.type=='tp'){
TP.m=sum.out$m[,,'TP']
TP.s=sum.out$s[,,'TP'] /nsim
TP.m=melt(TP.m)
TP.s=melt(TP.s)
TP.m$sd=TP.s$value
TP.m$alpha.level=alpha.level
dat=TP.m
}else if(figure.type=='fp'){
FP.m=sum.out$m[,,'FP']
FP.s=sum.out$s[,,'FP'] /nsim
FP.m=melt(FP.m)
FP.s=melt(FP.s)
FP.m$sd=FP.s$value
FP.m$alpha.level=alpha.level
dat=FP.m
}else if(figure.type=='taxon'){
taxon.m=sum.out$m[,,'Taxon']
taxon.s=sum.out$s[,,'Taxon'] /nsim
taxon.m=melt(taxon.m)
taxon.s=melt(taxon.s)
taxon.m$sd=taxon.s$value
taxon.m$alpha.level=alpha.level
dat=taxon.m
}
p=ggplot(dat, aes(x=stratify.name, y=value, group=samplesize, color=samplesize)) +
geom_line() +
geom_point()+
geom_errorbar(aes(ymin=value-sd, ymax=value+sd), width=.2,position=position_dodge(0.05))
p=p+labs(title="", x=xlabel, y = ylabel)+theme_bw()
p=p+theme(axis.text.x=element_text(size=12,angle = 30, hjust = 1,face='bold'),
axis.text.y=element_text(size=12,angle = 0, hjust = 1,face='bold'),
axis.title=element_text(size=12,face="bold"),
strip.text = element_text(size = 14,face="bold"),
legend.text=element_text(size=12,face='bold'),
legend.title=element_text(size=14,face='bold'))
if(!is.legend) p=p+theme(legend.position='none')
p
}
plotStrataAll <- function(assess.out,
figure.type=c('power','fdr','type1error','tp','fp','taxon'),
stratify.by=c('prevalence','abundance'),
is.errorbar=TRUE,is.legend=TRUE){
p1=plotStrata(assess.out,'power',stratify.by=stratify.by,is.legend = is.legend)
p2=plotStrata(assess.out,'fdr',stratify.by=stratify.by,is.legend = is.legend)
p3=plotStrata(assess.out,'tp',stratify.by=stratify.by,is.legend = is.legend)
p4=plotStrata(assess.out,'fp',stratify.by=stratify.by,is.legend = is.legend)
grid.arrange(p1, p2, p3, p4, nrow = 2)
}
|
090404fce02e67c172f7bc5e55dd0c41b19c893f
|
95419f986006e519af08ac10ff397f8eccfd33f7
|
/a2.R
|
2083da040c0828ade761c176d0e1efde19555f99
|
[] |
no_license
|
MasonYue/R-studio
|
123914883ba9e49d21b5e1e035835fa9d545b721
|
6dd4d5b40f4fccc101ceded9a6d3f84aafc47799
|
refs/heads/master
| 2020-04-06T06:05:07.141986
| 2016-12-17T21:43:06
| 2016-12-17T21:43:06
| 73,636,886
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,972
|
r
|
a2.R
|
#### A1 ####
### Marriage Licenses ###
require(plyr)
wed <- read.csv("marriage.csv", head=T)
str(wed)
wed <- within(wed, {
Year <- factor(gsub("-.*", "", TIME_PERIOD))
Month <- factor(month.abb[as.numeric(gsub(".*-", "", TIME_PERIOD))], levels = month.abb)
})
wed.agg <- ddply(wed, .(Year, Month), summarize, MarriageLic = sum(MARRIAGE_LICENSES))
# 1D table
wed.tab1 <- with(wed, tapply(MARRIAGE_LICENSES, CIVIC_CENTRE, sum)); wed.tab1
# Barplot
wed.bar <- arrange(ddply(wed, "CIVIC_CENTRE", summarize, MarriageLic = sum(MARRIAGE_LICENSES)), -MarriageLic)
with(wed.bar, barplot(MarriageLic, names.arg=CIVIC_CENTRE, main="Marriage license count by Region"))
# 2D table
wed.tab2 <- with(wed, tapply(MARRIAGE_LICENSES, list(Year, Month), sum)); wed.tab2
# Stacked bar
wed.bar2 <- ddply(wed, c("CIVIC_CENTRE", "Month"), summarize, MarriageLic = sum(MARRIAGE_LICENSES))
require(ggplot2) # I like these plots better than base R graphics. Also easier to code
ggplot(wed.bar2, aes(x= Month, y= MarriageLic, fill= CIVIC_CENTRE)) + geom_bar(stat="identity") + labs(y= "Marriage Licenses")
# barplot(xtabs(MarriageLic ~ CIVIC_CENTRE + Month, data=wed.bar))
# barplot(with(wed, tapply(MARRIAGE_LICENSES, list(CIVIC_CENTRE, Month), sum)), col=1:4)
# legend("topright", legend= levels(wed$CIVIC_CENTRE), fill= 1:4, title= "Civic Centres")
### Business Licenses ###
biz <- read.csv("businessLicences.csv", head=T, stringsAsFactors = F)
str(biz)
require(lubridate) # Nice package for dealing with dates, although you don't need it
biz <- within(biz, {
# Date <- as.Date(Issued, format = ifelse(nchar(Issued) > 8, "%d/%m/%Y", "%d/%m/%y"))
Date <- as.Date(Issued, format = "%d/%m/%y")
Year <- factor(year(Date))
Month <- factor(month.abb[month(Date)], levels = month.abb)
})
biz <- subset(biz, Year %in% unique(wed$Year), select=c(Month, Year)) # Doing it here to save RAM, but could be done in join instead
biz <- droplevels(biz) # Gets rid of unused factor levels
biz.agg <- ddply(biz, .(Year, Month), summarize, BusinessLic = length(Month))
### Regression with combined data ###
licenses <- join(wed.agg, biz.agg, by = c("Year", "Month"))
rm(wed, wed.agg, biz, biz.agg, wed.bar, wed.bar2, wed.tab1, wed.tab2)
with(licenses, plot(BusinessLic, MarriageLic))
fit <- lm(MarriageLic ~ BusinessLic, data=licenses)
summary(fit)
confint(fit, level = 0.88)
newData <- data.frame(BusinessLic = 550)
predict(fit, newData, interval="prediction")
#### A2 ####
### Building Permits ###
require(plyr)
require(lubridate)
require(ggplot2)
build.raw <- read.csv("activepermits.csv", head= T)
str(build.raw)
# QA1
build <- within(build.raw, {
Date.App <- as.Date(APPLICATION_DATE, format = "%Y/%m/%d")
Date.Iss <- as.Date(ISSUED_DATE, format = "%Y/%m/%d")
Year <- factor(year(Date.App))
Month <- factor(month.abb[month(Date.App)], levels = month.abb)
Days.Applied.to.Issued <- as.numeric(Date.Iss - Date.App)
CURRENT_USE <- toupper(CURRENT_USE)
PROPOSED_USE <- toupper(PROPOSED_USE)
CURRENT_USE_cleaned <- CURRENT_USE
})
build <- subset(build, Days.Applied.to.Issued >= 0, # & Days.Applied.to.Issued < 3*365,
select=c(Year, Month, Date.App, Days.Applied.to.Issued, STATUS, POSTAL,
CURRENT_USE, CURRENT_USE_cleaned, PROPOSED_USE, DWELLING_UNITS_CREATED, DWELLING_UNITS_LOST))
str(build)
summary(build)
# QA2
sum(as.character(build$CURRENT_USE) != as.character(build$PROPOSED_USE))
tab <- with(build, table(CURRENT_USE))
tab <- tab[order(tab)] # Put in ascending order
tab[tab >= 30] # At least 30 records
build <- within(build, {
CURRENT_USE_cleaned[grepl("(APT)|(ARTMENT)|(CONDO)", CURRENT_USE)] <- "Apartment"
CURRENT_USE_cleaned[grepl("(UNIV)|(COLLEGE)|(SCHOOL)", CURRENT_USE)] <- "Educational"
CURRENT_USE_cleaned[grepl("(RESTAURANT)|(REST)", CURRENT_USE)] <- "Restaurant"
CURRENT_USE_cleaned[grepl("PARKING", CURRENT_USE)] <- "Parking Lot"
CURRENT_USE_cleaned[grepl("(RETAIL)|(RET)", CURRENT_USE)] <- "Retail"
CURRENT_USE_cleaned[grepl("(OFFICE)|(OFF)", CURRENT_USE)] <- "Office"
CURRENT_USE_cleaned[grepl("USE", CURRENT_USE)] <- "Mixed"
CURRENT_USE_cleaned[grepl("LAB", CURRENT_USE)] <- "Laboratory"
CURRENT_USE_cleaned[grepl("(IND)|(INDUSTRIAL)|(PLANT)|(MANUF)|(LUMBER)", CURRENT_USE)] <- "Industrial"
CURRENT_USE_cleaned[grepl("(RES)|(RESIDENTIAL)", CURRENT_USE)] <- "Residential"
CURRENT_USE_cleaned[grepl("(SUBWAY)|(TRANSIT)|(UNION)",CURRENT_USE)] <- "Transit Station"
CURRENT_USE_cleaned[grepl("(CHURCH)|(WORSHIP)", CURRENT_USE)] <- "Place of Worship"
CURRENT_USE_cleaned[grepl("(VACANT)|(VACNT)", CURRENT_USE)] <- "Vacant"
CURRENT_USE_cleaned[grepl("(N/A)|(NOT KNOWN)", CURRENT_USE)] <- ""
SFD <- grepl("(SFD)|(SINGLE)|(SF RES)", CURRENT_USE)
Det <- grepl("(DETACH)|(DET)|(DETCAHED)|(DETATCHED)", CURRENT_USE)
Sem <- grepl("SEMI", CURRENT_USE)
Row <- grepl("(ROW)|(TOWN)", CURRENT_USE)
CURRENT_USE_cleaned <- ifelse(Sem, "SFD Semi-Detached", ifelse(Det, "SFD Detached", ifelse(Row, "SFD Rowhouse", ifelse(SFD, "SFD", CURRENT_USE_cleaned))))
CURRENT_USE_cleaned[grepl("(UNIT)|(TRIPLEX)|(DUPLEX)", CURRENT_USE)] <- "Multi-Unit"
CURRENT_USE_cleaned[grepl("(GROUP)|(NURSING)|(AGED)|(LONG TERM)", CURRENT_USE)] <- "Nursing Home"
CURRENT_USE_cleaned[grepl("(ARENA)|(PARK)|(BOWLING)|(FITNESS)|(THEATRE)|(STADIUM)|(RECREAT)|(CLUB)", CURRENT_USE)] <- "Nursing Home"
})
## --- Testing --- ##
# tab2 <- with(build, table(CURRENT_USE_cleaned))
# tab2 <- tab2[order(tab2)] # Put in ascending order
# tab2[tab2 >= 30] # At least 30 records
dfQA2a <- arrange(ddply(build, "CURRENT_USE_cleaned", nrow), -V1)
names(dfQA2a) <- c("Usage", "Count")
subset(dfQA2a, Count >= 30)
# QA2b
hist(build$Days.Applied.to.Issued, main="Toronto Building Permit Delays", xlab="Days")
# QA2c
build$log_days <- log(build$Days.Applied.to.Issued + 1)
hist(build$log_days, main="Toronto Building Permit Delays", xlab="log(Days)")
build$res <- factor(ifelse(build$DWELLING_UNITS_CREATED > 0, "Increase", ifelse(build$DWELLING_UNITS_LOST > 0, "Decrease", "No Change")))
summary(build$res)
# QA2d
ggplot(subset(build, !is.na(res)), aes(x= log_days, fill= res)) + geom_histogram()
# QA2e
ggplot(subset(build, !is.na(res)), aes(y= log_days, x= res)) + geom_boxplot()
# QA2f
dfQA2f <- arrange(ddply(build, "STATUS", summarize, AvgDelay = round(mean(Days.Applied.to.Issued), 0)), -AvgDelay)
dfQA2f[1:10,]
## QA3
## Word Cloud ##
# install.packages('wordcloud')
# install.packages('tagcloud')
library(wordcloud)
# library(tm)
# library(tagcloud)
tags <- ddply(subset(build, POSTAL == "M5G"), "CURRENT_USE_cleaned", nrow)
wordcloud(tags$CURRENT_USE, tags$V1, max.words=20, random.order=FALSE, rot.per=.30)
# QA4
build.sub <- subset(build, Date.App >= '2011-01-01' & Date.App <= '2015-08-31', select=c(Year, Month))
build.agg <- ddply(build.sub, .(Year, Month), summarize, BuildingPerms = length(Month))
toronto <- join(licenses, build.agg, by = c("Year", "Month"))
# a
with(toronto, plot(BuildingPerms, BusinessLic))
fit <- lm(BusinessLic ~ BuildingPerms, data= toronto)
abline(coef(fit), col="red")
# b - Residual plots
par(mfrow=c(1,2))
plot(fit, 1); plot(fit, 2)
### QB1 - A1 Submission times ###
times <- read.csv("a2data.csv", head= T)
times <- within(times, {
time <- ifelse(MTroom %in% c("SS 2117", "SS 1069"), 17, 10) - a1hours - a1minutes/60 + 24*(15 - a1Date)
a1 <- as.numeric(a1)
a1_sq <- a1^2
log_time <- log(time + 1)
})
# times[!is.na(times$time),]
str(times)
times <- subset(times, !is.na(time) & time >= 0, select=c(MTroom, a1, time, a1_sq, log_time, date.ambiguity))
par(mfrow=c(1,2))
with(times, hist(time, breaks = 30))
with(times, hist(a1, breaks = 30))
with(times, hist(log_time, breaks = 30))
with(times, hist(a1_sq, breaks = 30))
# SLR model
fit <- lm(a1 ~ time, data= times)
plot(fit, 1); plot(fit, 2)
par(mfrow=c(1,1))
with(times, plot(time, a1))
abline(coef(fit), col="red")
par(mfrow=c(1,2))
plot(fit, 1); plot(fit, 2)
# Transformed model
fitT <- lm(a1_sq ~ log_time, data= times)
with(times, plot(log_time, a1_sq))
abline(coef(fitT), col="red")
par(mfrow=c(1,2))
plot(fitT, 1); plot(fitT, 2)
sqrt(predict(fitT, data.frame(log_time = log(12 + 1)), interval= "confidence"))
times <- arrange(times, MTroom, time)
write.csv(subset(times, select=c(MTroom, a1, time, date.ambiguity)), "A1times.csv", row.names= F)
## Use this or not ??? ##
#### TTC ####
require(XLConnect)
require(reshape2)
require(plyr)
wb <- loadWorkbook("PM_TTC.xls")
# Riders
ttc <- readWorksheet(wb, sheet = "Data", startRow = 27, endRow = 34, rownames = T)
names(ttc) <- c("Year", month.abb)
ttc$Year <- factor(gsub("[^0-9]*", "", ttc$Year))
ttc.1 <- melt(ttc, id.vars = "Year", value.name = "TTCriders", variable.name = "Month")
# Riders target
ttc <- readWorksheet(wb, sheet = "Data", startRow = 36, endRow = 42, rownames = T, head = F)
names(ttc) <- c("Year", month.abb)
ttc$Year <- factor(gsub("[^0-9]*", "", ttc$Year))
ttc.2 <- melt(ttc, id.vars = "Year", value.name = "TTCtarget", variable.name = "Month")
# Money
ttc <- readWorksheet(wb, sheet = "Data", startRow = 64, endRow = 71, rownames = T)
names(ttc) <- c("Year", month.abb)
ttc$Year <- factor(gsub("[^0-9]*", "", ttc$Year))
ttc.3 <- melt(ttc, id.vars = "Year", value.name = "Dollars_Millions", variable.name = "Month")
# Money budget
ttc <- readWorksheet(wb, sheet = "Data", startRow = 73, endRow = 79, rownames = T, head = F)
names(ttc) <- c("Year", month.abb)
ttc$Year <- factor(gsub("[^0-9]*", "", ttc$Year))
ttc.4 <- melt(ttc, id.vars = "Year", value.name = "Budget_Millions", variable.name = "Month")
# Join together
ttc.agg <- join(ttc.1, ttc.2, by = c("Year", "Month"))
ttc.agg <- join(ttc.agg, ttc.3, by = c("Year", "Month"))
ttc.agg <- join(ttc.agg, ttc.4, by = c("Year", "Month"))
toronto <- join(toronto, ttc.agg, by = c("Year", "Month"))
rm(ttc.1, ttc.2, ttc.3, ttc.4, ttc.agg)
with(toronto, plot(TTCriders, Dollars_Millions))
with(toronto, plot(TTCtarget, TTCriders))
with(toronto, plot(BuildingPerms, Dollars_Millions))
fit <- lm(Dollars_Millions ~ TTCriders, data=toronto)
summary(fit)
fit <- lm(Dollars_Millions ~ TTCriders, data=toronto)
summary(fit)
# ITEM NAME DESCRIPTION FORMAT
# PERMIT_NUM Last two digits of calendar year, plus IBMS-generated sequence 99 999999
# REVISION_NUM Two digit number identifying revisions to permit application made after permit issuance 99
# PERMIT_TYPE Text field describing the type of permit. Eg Small Residential Projects; Building Addtions/Alterations
# STRUCTURE_TYPE Identifies that type of structure the application relates to. Eg SFD- Detached; Apartment Building
# WORK Overall description of the type of work covered by application.
# STREET_NUM Address – street number 99
# STREET_NAME Address – street name Upper Case
# STREET_TYPE Address – street type Eg AVE, ST, DR, RD
# STREET_DIRECTION Address – street direction Eg E, W, N, S
# POSTAL First 3 digits of postal code
# GEO_ID City-defined, unique identifier for Property Address
# APPLICATION_DATE Date that the application was received and entered into IBMS YYYY/MM/DD
# ISSUED_DATE Date that the permit was issued. YYYY/MM/DD
# COMPLETED_DATE Date work is complete and permit is cleared N/A for this dataset, included for compatibility purpose
# STATUS Current status of application / permit Eg Permit Issued; Inspection; Closed
# DESCRIPTION Description of work proposed in application
# CURRENT USE Use of the property at the time of application submission
# PROPOSED_USE Use of the property after completion of work covered by permit
# DWELLING_UNITS_CREATED Number of residential dwelling units created by completion of permit work.
# DWELLING_UNITS_LOST Number of residential dwelling units lost by completion of permit work.
# ASSEMBLY Assembly Occupancy area (in sq metres) covered by permit work. (eg Restaurant, Library, Theatre)
# INSTITUTIONAL Institutional Occupancy area (in sq metres) covered by permit work (eg Hospital, Nursing Home)
# RESIDENTIAL Residential Occupancy area (in sq metres) covered by permit work
# BUSINESS_AND_PERSONAL_SERVICES Business and Personal Services Occupancy area (in sq metres) covered by permit work (Office, Bank, Medical Clinic)
# MERCANTILE Mercantile Occupancy area (in sq metres) covered by permit work (eg Department Store, Supermarket)
# INDUSTRIAL Industrial Occupancy area (in sq metres) covered by permit work (eg Warehouse, Gas Station)
# INTERIOR ALTERATIONS Floor area (in sq metres) covered by permit work
# DEMOLITION Floor area (in sq metres) covered by permit work
|
18fdac3013e034721b28f5b3065919f5e8d57a8d
|
3a5e193354fff9dd67846fba9d63a80d1a7886a6
|
/Assig3/rankall.R
|
5358919981e8efe7d4dadb4f8cc37f2bfb26e873
|
[
"MIT"
] |
permissive
|
phaser/datasciencerepo
|
4c2824311cbc666b56d93684fc56ad4232abbe9c
|
ce71bc15b53b114f03d2205d69248e98a3badd99
|
refs/heads/master
| 2020-06-06T06:01:26.733494
| 2016-08-09T14:42:47
| 2016-08-09T14:42:47
| 31,729,999
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,812
|
r
|
rankall.R
|
source("rankhospital.R")
rankall <- function(disease, num = "best")
{
outcome <- read.csv("outcome-of-care-measures.csv", colClasses="character")
## check if disease valid
ds <- noquote(strsplit(disease, " "))
ds <- unlist(noquote(lapply(ds, capwords)))
ds <- paste0(ds, collapse=".")
nms <- names(outcome)
exp <- paste0("^Hospital.30.Day.Death.*", ds, ".*")
match <- grep(exp, nms)
if (length(match) == 0)
{
stop("invalid outcome")
}
idx <- match[1]
states <- unique(outcome[,7])
states <- as.character(states)
states <- states[order(states)]
hospitals <- vector(length=length(states))
for (i in seq_along(states))
{
ioutcome <- outcome[outcome[,7] == states[i],]
ioutcome[,idx] <- as.numeric(ioutcome[,idx])
ioutcome <- ioutcome[!is.na(ioutcome[,idx]),]
ioutcome <- ioutcome[order(ioutcome[,2]),]
if (class(num) == "character")
{
if (!(num == "best" || num == "worst"))
{
stop("num: invalid argument")
}
minimum <- if (num == "best")
{
min(ioutcome[,idx])
} else
{
max(ioutcome[,idx])
}
ioutcome <- ioutcome[ioutcome[,idx] == minimum,]
hospitals[i] <- ioutcome[1,2]
} else
if (class(num) == "numeric")
{
if (num > length(outcome[,2]))
{
return(NA)
}
ioutcome <- ioutcome[order(ioutcome[,idx]),]
hospitals[i] <- ioutcome[num,2]
} else
{
stop("num: invalid argument")
}
}
data.frame(hospital=hospitals, state=states)
}
|
2a81f51c31b17bceb282a54bce5ab08be62cab98
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/SpatioTemporal/examples/print.predictSTmodel.Rd.R
|
993b9c4163f8515386a22d5ba93a951ca386037b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 232
|
r
|
print.predictSTmodel.Rd.R
|
library(SpatioTemporal)
### Name: print.predictSTmodel
### Title: Print details for 'predictSTmodel' object
### Aliases: print.predictSTmodel
### ** Examples
##load data
data(pred.mesa.model)
print(pred.mesa.model)
|
071899ca63ab383cd3eed6b682733fd947a0636a
|
2863c81e7fe418f274962367f2564eb17d620f0b
|
/Week2_SimpleExponentialSmoothing_msVolumeData_2017-07-02 FULL CODE.R
|
75c0a473836b98e2c1b1814f74226935500706fb
|
[] |
no_license
|
brilliantFire/Time-Series-and-Forecasting-with-R
|
ca202fd3af852ae0abb2b1d5788a36b2486038a8
|
8a3d61561050b3f25f9e6cb5c375289c0222f6c4
|
refs/heads/master
| 2021-01-21T11:53:58.586286
| 2017-09-15T18:48:27
| 2017-09-15T18:48:27
| 95,321,122
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,857
|
r
|
Week2_SimpleExponentialSmoothing_msVolumeData_2017-07-02 FULL CODE.R
|
### ~*~*~*~*~*~* Rebecca Vislay Wade ~*~*~*~*~*~* ###
### ~*~*~*~*~*~* PREDICT 413: Time Series & Forecasting ~*~*~*~*~*~* ###
### ~*~*~*~*~*~* Summer 2017 | Section 57 | NU MSPA ~*~*~*~*~*~* ###
### ~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~*~* ###
### ~*~*~*~*~*~*~*~*~*~*~*~ July 2, 2017 ~*~*~*~*~*~*~*~*~*~*~*~*~* ###
### This program demonstrates some simple moving average and exponential
### smoothing methods with SMA() from the TTR package and ses() from the
### forecast package, respectively. Included is a custom function called
### sesExperiment that performs SES for three different alpha values and
### plots the fits and residuals overlaid with the original data.
# Load some libraries.
library(TTR)
library(forecast)
# Read in from .csv
daily = read.csv("~/NU_MSPA/R_work/PREDICT_413/data/dailyVolumeCleaned.csv", header = TRUE)
# Convert to time series (ts) object, break into 1, 3, and 6 month chunks.
tsDaily = ts(daily$mss)
tsDaily6mos = ts(tsDaily[130:253]) # Jul-Dec 2016
tsDaily3mos = ts(tsDaily[173:253]) # Oct-Dec 2016
tsDailySep2016 = ts(tsDaily[173:193]) # Sep 2016
# First, a simple moving average with SMA() from the TTR library.
smaSmooth = SMA(tsDaily6mos, n = 5)
ts.plot(tsDaily6mos, smaSmooth, gpars = list(col = c("black", "red")))
# Function to try different alphas and data subsets with ses().
# Arguments: data = your time series as a ts object
# alphas = vector (numeric) of 3 different alphas to try
# colors = vector (strings) of colors for the plot
# Output: Two panel plot of fits and residuals.
# Original time series is always plotted in black.
sesExperiment = function(data, alphas, colors){
ses01 = ses(data, alpha = alphas[1], initial = "simple")
ses02 = ses(data, alpha = alphas[2], initial = "simple")
ses03 = ses(data, alpha = alphas[3], initial = "simple")
# Plot setup:
par(mfrow=c(2,1), mar=c(5.2, 4.2, 4.2, 8.2))
# Plot fits:
ts.plot(data, ses01$fitted, ses02$fitted, ses03$fitted,
gpars = list(col = c("black", colors), lwd = rep(3.0, 4), xaxt = "n",
yaxt = "n"),
xlab = "")
axis(1, cex.axis = 1.2)
axis(2, cex.axis = 1.2)
title(xlab = "Days", ylab = "Manuscripts Received",
main = "SES Fits",
cex.lab = 1.3, cex.main = 1.5)
grid(nx = NULL, ny = NULL, col = "gray45", lty = "dotted",
lwd = 1.0)
legend("bottomright", inset=c(-0.2,0), xpd = TRUE, legend = alphas, col = colors, lty = 1,
lwd = 3.0, title = "Alpha", cex = 1.0)
# Plot residuals:
ts.plot(data, ses01$residuals, ses02$residuals, ses03$residuals,
gpars = list(col = c("black", colors), lwd = rep(3.0, 4), xaxt = "n",
yaxt = "n"),
xlab = "")
axis(1, cex.axis = 1.2)
axis(2, cex.axis = 1.2)
title(xlab = "Days", ylab = "Manuscripts Received",
main = "SES Residuals",
cex.lab = 1.3, cex.main = 1.5)
grid(nx = NULL, ny = NULL, col = "gray45", lty = "dotted",
lwd = 1.0)
legend("bottomright", inset = c(-0.2,0), xpd = TRUE, legend = alphas, col = colors, lty = 1,
lwd = 3.0, title = "Alpha", cex = 1.0)
}
# Experiment #1: 6 months of data (Jul-Dec 2016), alpha = 0.1, 0.3, 0.5
sesExperiment(tsDaily6mos, alphas = c(0.5, 0.3, 0.1),
colors = c("green4", "deepskyblue", "firebrick"))
# Experiment #2: 3 months of data (Oct-Dec 2016), alpha = 0.1, 0.3, 0.5
sesExperiment(tsDaily3mos, alphas = c(0.5, 0.3, 0.1),
colors = c("green4", "deepskyblue", "firebrick"))
# Experiment #3: 3 months of data (Oct-Dec 2016), alpha = 0.2, 0.3, 0.4
sesExperiment(tsDaily3mos, alphas = c(0.4, 0.3, 0.2),
colors = c("green4", "deepskyblue", "firebrick"))
|
4a1ab711847d0d140da40638694b571157b6b460
|
0f1b9a4093205dcaf5d9432529ab2f2858bed6c3
|
/bugLocalization/A_SOBER/SOBER_Main.R
|
6e59a19f3cee0b6e6ee3a23e66aaad6e654847e6
|
[] |
no_license
|
zhuofubai/NUMFL
|
9cea285bfca34939d779a20837f90bbd2cd4d415
|
13fb7167b34ce1427390f9f4907cf3c55bf3aad4
|
refs/heads/master
| 2016-09-06T10:37:46.994952
| 2014-11-01T03:38:32
| 2014-11-01T03:38:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 995
|
r
|
SOBER_Main.R
|
rm(list = ls())
source('C:/Users/zhuofu/RProject/bugLocalization/A_SOBER/SOBER.R')
dir1<-"C:/Users/zhuofu/workspace/apacheCommonMath2.1/TestEigenDecomposition/Data";###oldData2
dir2<-"C:/Users/zhuofu/workspace/apacheCommonMath3.1.1/TestDSCompiler/Data";
dir3<-"C:/Users/zhuofu/workspace/apacheCommonMath3.1.1/TestBlockRealMatrix/Data";
dir4<-"C:/Users/zhuofu/workspace2/apacheCommonMath3.1.1/TestRotationAndVector3D/Data";
n1=10;
n2=11;
n3=10
n4=10;
Dirs=c(dir1,dir2,dir3,dir4)
vers=c(n1,n2,n3,n4)
for(index in 1:length(Dirs)){
dir<-Dirs[index]
n<-vers[index]
print(dir)
print(n)
sober_score<- rep(0, n)
percent_sober<- rep(0, n)
for(datafolder in 1:n){
result<-SOBER(dir,datafolder);
sober_score[datafolder]<-result$sober;
percent_sober[datafolder]<-sober_score[datafolder]/result$n
}
write(percent_sober, file="sober", append=TRUE, ncolumns=n, sep=" ")
}
# percent_simple
# print(mean(percent_simple))
# percent_complex
# print(mean(percent_complex))
|
2f3aab61b41b486cafb713a35db405948481b3ca
|
45d92ce0e14442795a47d307e58b79028d39082e
|
/plot1.R
|
3fd80af5f1d9c57cadc81dfe1c6207a9478e134d
|
[] |
no_license
|
dmontiel242/ExPDA_CP2
|
3887bb351298787cbe66f54794735a5d4cd570d5
|
6b686e10f84c79fc2c6a42c2a126ebfa182e0b9b
|
refs/heads/master
| 2021-01-18T13:42:29.310547
| 2015-07-26T00:39:55
| 2015-07-26T00:39:55
| 39,701,957
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 529
|
r
|
plot1.R
|
## This first line will likely take a few seconds. Be patient!
library(dplyr)
library(reshape2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#select only data for Baltimore City based on SCC value
meltNEI <- melt(NEI,id = c('fips','SCC','Pollutant','type','year'),measure.vars='Emissions')
tmp <-dcast(data = meltNEI,year~variable,sum)
png('plot1.png')
with(tmp,plot(year,Emissions))
title('Total Emissions in United States vs. Year')
with(tmp,abline(lm(Emissions ~ year)))
dev.off()
|
ac28519cabbf1d24f3a55b70abbc66da216d738c
|
c5b75254602af556444e1eca4e25e17f9199f307
|
/global.r
|
ec1e7f488ae5e62df6fd5ea49da1f046c7c89d21
|
[] |
no_license
|
mounicasampara/Student-Modelling
|
f8b2f7aadc9edddaa98c585705e4c916a1de538f
|
d6d86a66be6e291acfc23e7b286acb9ccd09751f
|
refs/heads/master
| 2020-06-21T14:33:57.033361
| 2019-07-18T20:08:18
| 2019-07-18T20:08:18
| 197,481,726
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 21,394
|
r
|
global.r
|
library(shiny)
library(RSQLite)
library(DT)
library(DBI)
library(dplyr)
library(dbplyr)
sqlitePath <- "d:/mydb1.db"
print(sqlitePath)
db <- dbConnect(SQLite(), sqlitePath)
print(db)
getData_mocktest2 <- function(id){
query <- sprintf("select question from mocktest2_test1 where ID= %s",id)
#print(query)
question <- dbGetQuery(db,query)
#print(question)
query1 <- sprintf("select option1 from mocktest2_test1 where ID= %s",id)
query2 <- sprintf("select option2 from mocktest2_test1 where ID= %s",id)
query3 <- sprintf("select option3 from mocktest2_test1 where ID= %s",id)
query4 <- sprintf("select option4 from mocktest2_test1 where ID= %s",id)
query5 <- sprintf("select Answer from mocktest2_test1 where ID = %s",id)
option1<-dbGetQuery(db,query1)
option2<-dbGetQuery(db,query2)
option3<-dbGetQuery(db,query3)
option4<-dbGetQuery(db,query4)
answer<-dbGetQuery(db,query5)
x<-c(question,option1,option2,option3,option4,answer)
return(x)
}
getData_mocktest1 <- function(id){
query <- sprintf("select question from demotest_1 where ID= %s",id)
#print(query)
question <- dbGetQuery(db,query)
#print(question)
query1 <- sprintf("select option1 from demotest_1 where ID= %s",id)
query2 <- sprintf("select option2 from demotest_1 where ID= %s",id)
query3 <- sprintf("select option3 from demotest_1 where ID= %s",id)
query4 <- sprintf("select option4 from demotest_1 where ID= %s",id)
query5 <- sprintf("select Answer from demotest_1 where ID = %s",id)
option1<-dbGetQuery(db,query1)
option2<-dbGetQuery(db,query2)
option3<-dbGetQuery(db,query3)
option4<-dbGetQuery(db,query4)
answer<-dbGetQuery(db,query5)
x<-c(question,option1,option2,option3,option4,answer)
return(x)
}
getData_mocktest3 <- function(id){
query <- sprintf("select question from mocktest3_test1 where ID= %s",id)
#print(query)
question <- dbGetQuery(db,query)
#print(question)
query1 <- sprintf("select option1 from mocktest3_test1 where ID= %s",id)
query2 <- sprintf("select option2 from mocktest3_test1 where ID= %s",id)
query3 <- sprintf("select option3 from mocktest3_test1 where ID= %s",id)
query4 <- sprintf("select option4 from mocktest3_test1 where ID= %s",id)
query5 <- sprintf("select Answer from mocktest3_test1 where ID = %s",id)
option1<-dbGetQuery(db,query1)
option2<-dbGetQuery(db,query2)
option3<-dbGetQuery(db,query3)
option4<-dbGetQuery(db,query4)
answer<-dbGetQuery(db,query5)
x<-c(question,option1,option2,option3,option4,answer)
return(x)
}
check_login_details <- function(data){
query <- sprintf("select count(mailId) from registration where mailid like '%s' and password like '%s';",data[1],data[2])
print(query)
res <- dbGetQuery(db,query)
print(res)
print(data[2])
print(length(res))
if(res==0)
{
print("new user or incorrect login")
query1 <- sprintf("select count(mailId) from registration where mailId like '%s';",data[1])
res2<- dbGetQuery(db,query1)
print(res2)
if(res2==0)#new user
{
return(2)
}
else#wrong pswd
return(0)
}
if(res==1)#correct login credentials
{
return(1)
}
}
#----------------------------------
#----------------------------------
insert_reg_data <- function(data){
query <- sprintf("insert into registration values ('%s','%s','%s','%s','%s','%s')",
data[1],data[2],data[3],data[4],data[5],data[6])
print(query)
dbSendQuery(db,query)
}
#----------------------------------
#----------------------------------
insert_intial_std_model_data <- function(mail){
query <- sprintf("insert into std_model values ('%s',%s,%s,%s,%s,%s,%s);",
mail,0,0,0,0,0,0)
print(query)
dbSendQuery(db,query)
}
#----------------------------------
#----------------------------------
getData <- function(id){
query <- sprintf("select question from demotest_questions where ID= %s",id)
#print(query)
question <- dbGetQuery(db,query)
#print(question)
query1 <- sprintf("select option1 from demotest_questions where ID= %s",id)
query2 <- sprintf("select option2 from demotest_questions where ID= %s",id)
query3 <- sprintf("select option3 from demotest_questions where ID= %s",id)
query4 <- sprintf("select option4 from demotest_questions where ID= %s",id)
query5 <- sprintf("select Answer from demotest_questions where ID = %s",id)
option1<-dbGetQuery(db,query1)
option2<-dbGetQuery(db,query2)
option3<-dbGetQuery(db,query3)
option4<-dbGetQuery(db,query4)
answer<-dbGetQuery(db,query5)
x<-c(question,option1,option2,option3,option4,answer)
return(x)
}
m_getData <- function(id,data_base){
table_name <- paste0("demotest",data_base)
print(table_name)
query <- sprintf("select question from %s where ID= %s",table_name,id)
#print(query)
question <- dbGetQuery(db,query)
#print(question)
query1 <- sprintf("select option1 from %s where ID= %s",table_name,id)
query2 <- sprintf("select option2 from %s where ID= %s",table_name,id)
query3 <- sprintf("select option3 from %s where ID= %s",table_name,id)
query4 <- sprintf("select option4 from %s where ID= %s",table_name,id)
query5 <- sprintf("select Answer from %s where ID = %s",table_name,id)
option1<-dbGetQuery(db,query1)
option2<-dbGetQuery(db,query2)
option3<-dbGetQuery(db,query3)
option4<-dbGetQuery(db,query4)
answer<-dbGetQuery(db,query5)
x<-c(question,option1,option2,option3,option4,answer)
return(x)
}
m2_getData <- function(id,data_base){
table_name <- paste0("mocktest2_test",data_base)
print(table_name)
query <- sprintf("select question from %s where ID= %s",table_name,id)
#print(query)
question <- dbGetQuery(db,query)
#print(question)
query1 <- sprintf("select option1 from %s where ID= %s",table_name,id)
query2 <- sprintf("select option2 from %s where ID= %s",table_name,id)
query3 <- sprintf("select option3 from %s where ID= %s",table_name,id)
query4 <- sprintf("select option4 from %s where ID= %s",table_name,id)
query5 <- sprintf("select Answer from %s where ID = %s",table_name,id)
option1<-dbGetQuery(db,query1)
option2<-dbGetQuery(db,query2)
option3<-dbGetQuery(db,query3)
option4<-dbGetQuery(db,query4)
answer<-dbGetQuery(db,query5)
x<-c(question,option1,option2,option3,option4,answer)
return(x)
}
m3_getData <- function(id,data_base){
table_name <- paste0("mocktest3_test",data_base)
print(table_name)
query <- sprintf("select question from %s where ID= %s",table_name,id)
#print(query)
question <- dbGetQuery(db,query)
#print(question)
query1 <- sprintf("select option1 from %s where ID= %s",table_name,id)
query2 <- sprintf("select option2 from %s where ID= %s",table_name,id)
query3 <- sprintf("select option3 from %s where ID= %s",table_name,id)
query4 <- sprintf("select option4 from %s where ID= %s",table_name,id)
query5 <- sprintf("select Answer from %s where ID = %s",table_name,id)
option1<-dbGetQuery(db,query1)
option2<-dbGetQuery(db,query2)
option3<-dbGetQuery(db,query3)
option4<-dbGetQuery(db,query4)
answer<-dbGetQuery(db,query5)
x<-c(question,option1,option2,option3,option4,answer)
return(x)
}
resp_saving <- function(id,ans,res)
{
print("inside response saving function")
query <- sprintf("update demotest_questions set selectedans = %s where ID= %s",ans,id)
dbSendQuery(db,query)
query1 <- sprintf("update demotest_questions set result = %s where ID= %s",res,id)
dbSendQuery(db,query1)
}
m_resp_saving <- function(id,ans,res,data_base){
print("inside mocktest response saving function")
table_name <- paste0("demotest",data_base)
query <- sprintf("update %s set selectedans = %s where ID =%s",table_name,ans,id)
dbSendQuery(db,query)
query1 <- sprintf("update %s set result = %s where ID =%s",table_name,res,id)
dbSendQuery(db,query1)
}
m2_resp_saving <- function(id,ans,res,data_base){
print("inside mocktest response saving function")
table_name <- paste0("mocktest2_test",data_base)
query <- sprintf("update %s set selectedans = %s where ID =%s",table_name,ans,id)
dbSendQuery(db,query)
query1 <- sprintf("update %s set result = %s where ID =%s",table_name,res,id)
dbSendQuery(db,query1)
}
m3_resp_saving <- function(id,ans,res,data_base){
print("inside mocktest response saving function")
table_name <- paste0("mocktest3_test",data_base)
query <- sprintf("update %s set selectedans = %s where ID =%s",table_name,ans,id)
dbSendQuery(db,query)
query1 <- sprintf("update %s set result = %s where ID =%s",table_name,res,id)
dbSendQuery(db,query1)
}
calc_prob <- function(q,d){
d1 <- c(0,0,0,0,0)
levels_frame <- c(0,0,0,0,0)
query <- sprintf("select mailid from prsnt_student where name like 'shravya';")
res <- dbGetQuery(db,query)
print(res)
query <- sprintf("select * from studentmodel_final where mail_id = '%s';",res)
d1<- dbGetQuery(db,query)
print(d1)
for(i in 1:5){
print("NOW VALUE OF I IS +++++++++++++++++++++++++++")
print(i)
print(d1[i+1])
print(q)
print("kaeufkuea")
print(d[i])
if(d1[i+1] == 0 && q == 1){
d1[i+1] = d[i]
}
else if(d1[i+1] == 0 && q == 0){
print("first else if")
next
}
else if(d[i]==0 && q==1){
print("Second else if")
next
}
else{
data1 <- c(0,0,0,0,0,0)
data2 <- c(0,0,0,0,0,0)
data1 <- get_dataframe(d1[i+1],1)
data2 <- get_dataframe(d[i],q)
print(data1)
print(data2)
den <- calc_den (data1,data2)
data_frame_res <- calc_numr (data1,data2,den)
print("your probabilty of answering a question related to the topic 'probability' is")
print(data_frame_res)
d1[i+1] = data_frame_res[2]
levels_frame[i] = data_frame_res[1]
}
}
print(d1)
print(levels)
#now insert data into database
query <- sprintf("update studentmodel_final set ratio = %s ,probability = %s, geometry = %s, pNc =%s, aptitude = %s,ratio_level=%s,probability_level = %s,geometry_level = %s,pNc_level=%s,aptitude_level = %s where mail_id like '%s'",
d1[2],d1[3],d1[4],d1[5],d1[6],levels_frame[1],levels_frame[2],levels_frame[3],levels_frame[4],levels_frame[5],res)
print(query)
dbSendQuery(db,query)
}
dempster_shafer<-function(id)
{
query <- sprintf("select result from demotest_questions where id=%s",id)
print(query)
result <- dbGetQuery(db,query)
query1 <- sprintf("select ratio from demotest_questions where id=%s",id)
query2 <- sprintf("select probability from demotest_questions where id=%s",id)
query3 <- sprintf("select geometry from demotest_questions where id=%s",id)
query4 <- sprintf("select pNc from demotest_questions where id=%s",id)
query5 <- sprintf("select aptitude from demotest_questions where id=%s",id)
ratio <- dbGetQuery(db,query1)
probability <- dbGetQuery(db,query2)
geometry <- dbGetQuery(db,query3)
pNc <- dbGetQuery(db,query4)
aptitude <- dbGetQuery(db,query5)
d <- c(ratio,probability,geometry,pNc,aptitude)
print("hello there")
print(d)
calc_prob(result,d)
}
m_dempster_shafer<-function(id,data_base)
{
table_name <- paste0("demotest_",data_base)
query <- sprintf("select result from %s where id=%s",table_name,id)
print(query)
result <- dbGetQuery(db,query)
query1 <- sprintf("select ratio from %s where id=%s",table_name,id)
query2 <- sprintf("select probability from %s where id=%s",table_name,id)
query3 <- sprintf("select geometry from %s where id=%s",table_name,id)
query4 <- sprintf("select pNc from %s where id=%s",table_name,id)
query5 <- sprintf("select aptitude from %s where id=%s",table_name,id)
ratio <- dbGetQuery(db,query1)
probability <- dbGetQuery(db,query2)
geometry <- dbGetQuery(db,query3)
pNc <- dbGetQuery(db,query4)
aptitude <- dbGetQuery(db,query5)
d <- c(ratio,probability,geometry,pNc,aptitude)
print("hello there")
print(d)
calc_prob(result,d)
}
m2_dempster_shafer<-function(id,data_base)
{
table_name <- paste0("mocktest2_test",data_base)
query <- sprintf("select result from %s where id=%s",table_name,id)
print(query)
result <- dbGetQuery(db,query)
query1 <- sprintf("select ratio from %s where id=%s",table_name,id)
query2 <- sprintf("select probability from %s where id=%s",table_name,id)
query3 <- sprintf("select geometry from %s where id=%s",table_name,id)
query4 <- sprintf("select pNc from %s where id=%s",table_name,id)
query5 <- sprintf("select aptitude from %s where id=%s",table_name,id)
ratio <- dbGetQuery(db,query1)
probability <- dbGetQuery(db,query2)
geometry <- dbGetQuery(db,query3)
pNc <- dbGetQuery(db,query4)
aptitude <- dbGetQuery(db,query5)
d <- c(ratio,probability,geometry,pNc,aptitude)
print("hello there")
print(d)
calc_prob(result,d)
}
m3_dempster_shafer<-function(id,data_base)
{
table_name <- paste0("mocktest3_test",data_base)
query <- sprintf("select result from %s where id=%s",table_name,id)
print(query)
result <- dbGetQuery(db,query)
query1 <- sprintf("select ratio from %s where id=%s",table_name,id)
query2 <- sprintf("select probability from %s where id=%s",table_name,id)
query3 <- sprintf("select geometry from %s where id=%s",table_name,id)
query4 <- sprintf("select pNc from %s where id=%s",table_name,id)
query5 <- sprintf("select aptitude from %s where id=%s",table_name,id)
ratio <- dbGetQuery(db,query1)
probability <- dbGetQuery(db,query2)
geometry <- dbGetQuery(db,query3)
pNc <- dbGetQuery(db,query4)
aptitude <- dbGetQuery(db,query5)
d <- c(ratio,probability,geometry,pNc,aptitude)
print("hello there")
print(d)
calc_prob(result,d)
}
get_dataframe<- function(v,n){
v <- as.numeric(v)
data <- c(0,0,0,0,0,0)
print("inside insert_prob")
if(n==0){
print("question attempted wrong")
#v in low
data[1] <- v
rem <- (1-v)*10
if(v == 0.8 ){
first_part <- 0.2
second_part <- 0
data[4] <- first_part
data[6] <- second_part
}
else if(rem %% 2 ==0){
#if even
first_part <- (rem/2)+1
second_part <- (rem/2)-1
data[4] <- first_part/10
data[6] <- second_part/10
}
else{
if(v==0.9){
first_part <- 1
second_part <- 0
}
else{
first_part <- (rem/2)-0.5
second_part <- rem - first_part
}
data[6] <- first_part/10
data[4] <- second_part/10
}
}
else{
#v in master
print("question attempted correctly")
data[3] <- v
rem <- (1-v)*10
print(rem)
if(v == 0.8 ){
first_part <- 0.2
second_part <- 0
data[6] <- first_part
data[5] <- second_part
}
else if(rem %% 2 ==0){
#if even
first_part <- (rem/2)-1
second_part <- (rem/2)+1
data[5] <- first_part/10
data[6] <- second_part/10
}
else{
if(v==0.9){
first_part <- 1
second_part <- 0
}
else{
first_part <- (rem/2)-0.5
second_part <- rem - first_part
}
data[5] <- first_part/10
data[6] <- second_part/10
}
}
return (data)
}
calc_den <- function(data1,data2){
print("inside calc_den")
den1 <- (data1[1]*data2[2])+(data1[1]*data2[3])+(data1[1]*data2[6])
den2 <- (data1[2]*data2[1])+(data1[2]*data2[3])+(data1[2]*data2[5])
den3 <- (data1[3]*data2[1])+(data1[3]*data2[2])+(data1[3]*data2[4])
den4 <- (data1[4]*data2[3])
den5 <- (data1[5]*data2[2])
den6 <- (data1[6]*data2[1])
print(den1)
print(den2)
print(den3)
print(den4)
print(den5)
print(den6)
den <- (den1+den2+den3+den4+den5+den6)
print(den)
den7 <- 1.00-den
print(den7)
return(den7)
}
calc_numr <- function(data1,data2,den){
print("inside calc_numr")
numr1 <- (data1[1]*data2[1])+(data1[1]*data2[4])+(data1[1]*data2[5])+(data1[1]*data2[6])+(data1[4]*data2[1])+(data1[4]*data2[5])+(data1[5]*data2[1])
numr2 <- (data1[2]*data2[2])+(data1[2]*data2[4])+(data1[2]*data2[6])+(data1[4]*data2[2])+(data1[4]*data2[6])+(data1[6]*data2[2])+(data1[6]*data2[4])
numr3 <- (data1[3]*data2[3])+(data1[3]*data2[5])+(data1[3]*data2[6])+(data1[5]*data2[3])+(data1[5]*data2[6])+(data1[6]*data2[3])+(data1[6]*data2[5])
numr4 <- (data1[4]*data2[4])
numr5 <- (data1[5]*data2[5])
numr6 <- (data1[6]*data2[6])
print(numr1)
print(numr2)
print(numr3)
print(numr4)
print(numr5)
print(numr6)
print("you now and see m1 xor m2 values")
m1_xor_m2 <- c((numr1/den),(numr2/den),(numr3/den),(numr4/den),(numr5/den),(numr6/den))
print(m1_xor_m2)
m_index=1
for(k in 2:5){
if(m1_xor_m2[k]>m1_xor_m2[m_index]){
m_index = k
}
}
print(m_index)
print(m1_xor_m2[m_index])
m <- max(m1_xor_m2)
print("well max is")
print(m)
print(m1_xor_m2[m_index])
val <- m *100
print(val)
p <- floor(val %% 10)
print(p)
if(p < 5 ){
res <- (floor(m*10))/10
}
else{
res <- (ceiling(m*10))/10
}
print(res)
data_frame_result <- c(m_index,res)
return(data_frame_result)
}
get_levels <- function(){
result<-dbGetQuery(db,"select * from studentmodel_final where mail_id='shravyayadavk@gmail.com'")
print(result)
query <- sprintf("select mailid from prsnt_student where name like 'shravya';")
res <- dbGetQuery(db,query)
print(res)
query1 <- sprintf("select ratio_level from studentmodel_final where mail_id = '%s'",res)
print(query1)
r <- dbGetQuery(db,query1)
print(r)
query2 <- sprintf("select probability_level from studentmodel_final where mail_id =' %s'",res)
print(query2)
p <- dbGetQuery(db,query2)
print(p)
query3 <- sprintf("select geometry_level from studentmodel_final where mail_id =' %s'",res)
print(query3)
g <- dbGetQuery(db,query3)
print(g)
query4 <- sprintf("select pNc_level from studentmodel_final where mail_id ='%s'",res)
print(query4)
pNc <- dbGetQuery(db,query4)
print(pNc)
query5 <- sprintf("select aptitude_level from studentmodel_final where mail_id = '%s'",res)
print(query5)
a <- dbGetQuery(db,query5)
print(a)
h <- c(as.numeric(r),as.numeric(p),as.numeric(g),as.numeric(pNc),as.numeric(a))
print(h)
return(h)
}
get_plot_data <- function(){
result<-dbGetQuery(db,"select * from studentmodel_final where mail_id='shravyayadavk@gmail.com'")
print(result)
query <- sprintf("select mailid from prsnt_student where name like 'shravya';")
res <- dbGetQuery(db,query)
print(res)
query1 <- sprintf("select ratio from studentmodel_final where mail_id = '%s'",res)
print(query1)
r <- dbGetQuery(db,query1)
print(r)
query2 <- sprintf("select probability from studentmodel_final where mail_id =' %s'",res)
print(query2)
p <- dbGetQuery(db,query2)
print(p)
query3 <- sprintf("select geometry from studentmodel_final where mail_id =' %s'",res)
print(query3)
g <- dbGetQuery(db,query3)
print(g)
query4 <- sprintf("select pNc from studentmodel_final where mail_id ='%s'",res)
print(query4)
pNc <- dbGetQuery(db,query4)
print(pNc)
query5 <- sprintf("select aptitude from studentmodel_final where mail_id = '%s'",res)
print(query5)
a <- dbGetQuery(db,query5)
print(a)
h <- c(as.numeric(r),as.numeric(p),as.numeric(g),as.numeric(pNc),as.numeric(a))
print(h)
return(h)
}
get_data_frame <- function(probs,x){
print(probs)
df <- c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
for(j in 1:5){
print(j)
level <- probs[j]
print(level)
i = level
new_x <- x() + level
x(new_x)
print("NOW STARTED ;)")
print(x())
while(i<=6){
print("value of i is:")
print(i)
if(i%%2==0){
#if i is even
df[x()] <- 1
new_x <- x() + 1
x(new_x)
print("now value of x is")
print(x())
print(df)
i=i+1
}
else{
#if i is odd
df[x()] <-1
print("df contents are")
print(df)
new_x <- x() + 1
x(new_x)
print("now value of X is")
print(x())
df[x()] <-1
print("df contents are")
print(df)
new_x <- x() + 1
x(new_x)
print("now value of X is")
print(x())
i=i+2
}
}
new_x <- x() - 1
x(new_x)
print("now value of X is")
print(x())
}
print("well now final df is")
print(df)
return(df)
}
get_next_level <- function(data){
d <- c(0,0,0,0,0,0)
#to construct a vector
for (i in data){
d[i] = d[i] + 1
}
print("d vector contents are:")
print(d)
#get max
max_index = 1
for(i in 2:6){
if(d[i] > d[max_index]){
max_index = i
}
}
print("max value is:")
print(max_index)
print(max_index)
}
|
7e1153b94d9e5eae709bfd4bee15c75e115e6dfa
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/ExplainPrediction/R/explain.R
|
056bc25b2d80b82b1eec37a72df8798a35481e1c
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,648
|
r
|
explain.R
|
# The code for generation of individual instance explanations and model explanations.
# Methods: EXPLAIN, IME
#
# Author: rmarko
###############################################################################
#explain<- function(object, instances, ...) UseMethod("explanation", object)
# return explanation for given classifier, instance and explanation mode using method EXPLAIN
explain <- function(model, testData, explainInfo, naMode=c("avg", "na"), explainType=c("WE","infGain","predDiff"),
classValue=1, nLaplace=nrow(testData))
{
if (! inherits(model, "CoreModel"))
stop("Model shall be of class CoreModel.")
isRegression <- model$noClasses == 0
noClasses <- model$noClasses
noInst <- nrow(testData)
if (! isRegression) {
class.lev <- model$class.lev;
noClasses <- model$noClasses
if (is.numeric(classValue)) {
selClass <- factor(class.lev[classValue],levels=class.lev)
classIdx <- classValue
}
else if (is.factor(classValue)) {
selClass <- classValue
classIdx <- as.integer(classValue)
0}
else if (is.character(classValue)) {
selClass <- factor(classValue,levels=class.lev)
if (is.na(selClass))
stop("Invalid classValue parameter supplied, shall be compatible with the model: ", classValue)
classIdx <- as.integer(selClass)
}
else
stop("Wrong type of classValue parameter supplied: ", classValue)
}
naMode <- match.arg(naMode)
explainType <- match.arg(explainType)
if (isRegression)
explainType <- "predDiff"
newFormula <- reformulate(all.vars(model$formula)[-1])
dat <- model.frame(newFormula, data=testData, na.action=na.pass);
discDat <- applyDiscretization(dat, explainInfo$discretization)
noAttr <- ncol(dat)
# get prediction and predicted class
pred <- getPredictions(model, dat, nLaplace, noClasses, classIdx)
# prepare data structures for output
expl <- matrix(data=0, nrow=noInst, ncol=noAttr)
pCXnA <- matrix(data=0, nrow=noInst, ncol=noAttr)
colnames(expl) <- colnames(pCXnA) <- names(dat)
#avIdxs <- matrix(data=0, nrow=noInst, ncol=noAttr)
#names(avIdxs) <- names(dat)
for (a in 1:noAttr) {
attrName <- names(dat)[a]
origValue <- dat[,a]
if (naMode=="na") {
if (is.factor(dat[1,a]))
dat[,a] <- factor(NA, levels=origValue[1])
else
dat[,a] <- NA
pCXnA[,a] <- getPredictions(model, dat, nLaplace, noClasses, classIdx)
}
else if (naMode=="avg") {
for (v in 1:length(explainInfo$"discPoints"[[attrName]])){
dat[,a] <- explainInfo$"discPoints"[[attrName]][v]
nApredAV <- getPredictions(model, dat, nLaplace, noClasses, classIdx)
pCXnA[,a] <- pCXnA[,a] + explainInfo$"pAV"[[attrName]][v] * nApredAV
}
}
if (!isRegression)
pCXnA[,a] <- correctLaplace(pCXnA[,a], nLaplace, noClasses)
expl[,a] <- explainWithMethod(pred, pCXnA[,a], explainType)
dat[,a] <- origValue
}
list(expl=expl, pCXA=pred, pCxnA=pCXnA)
}
getPredictions<-function(model, instances, nLaplace=1, noClasses=2,classIdx=1) {
pred <- predict(model, instances)
if (model$noClasses == 0) {
pCX <- pred
}
else {
pCX <- pred$probabilities[,classIdx]
pCX <- correctLaplace(pCX, nLaplace, noClasses)
}
return(pCX)
}
explainWithMethod<-function(pCXA, pCXnA, explainType=c("WE","infGain","predDiff")){
match.arg(explainType)
noInst = length(pCXA)
expl = vector(length=noInst)
if (explainType=="WE") {
for (i in 1:noInst) {
## we should not get exactly 0, but if we do...
if (pCXA[i]==0.0 || pCXA[i]==1.0 || pCXnA[i]==0.0 || pCXnA[i]==1.0) {
if (pCXA[i]==pCXnA[i])
expl[i] = 0.0
else {
if (pCXA[i]>pCXnA[i]){
expl[i] = 1.0
}
else {
expl[i] = -1.0
}
}
}
else
expl[i]=log2(pCXA[i]*(1-pCXnA[i])/((1.0-pCXA[i])*pCXnA[i]))
}
}
else if (explainType=="infGain") {
## return information gain for given probabilities
for (i in 1:noInst) {
## we should not get exactly 0, but if we do...
if (pCXA[i]==0.0 || pCXnA[i]==0.0) {
if (pCXA[i]==pCXnA[i])
expl[i] = 0.0
else {
if (pCXA[i]>pCXnA[i]){
expl[i] = 1.0
}
else {
expl[i] = -1.0
}
}
}
else
expl[i]=-log2(pCXnA[i]/pCXA[i])
}
} else if (explainType == "predDiff")
expl <- pCXA - pCXnA
return(expl)
}
correctLaplace <- function(pC, n, nC) {
## return Laplace correction of given probabilities
if (n > 0)
pC = (pC*n + 1.0)/(n+nC)
else
return(pC)
}
prepareForExplanations <- function(model, trainData, method=c("EXPLAIN", "IME"), estimator=NULL,
genType=c("rf","rbf","indAttr"), noAvgBins=20) {
method <- match.arg(method)
explainInfo <- list(discretization=list(), avNames=list())
if (method == "IME") {
genType <- match.arg(genType)
explainInfo$generator <- list()
explainInfo$discretization <- discretize(model$formula, trainData, method = "equalFrequency", equalDiscBins=noAvgBins)
if (genType=="indAttr")
explainInfo$generator <- indAttrGen(model$formula, trainData, cdfEstimation = "ecdf")
else if (genType=="rbf")
explainInfo$generator <- rbfDataGen(model$formula, trainData)
else if (genType=="rf")
explainInfo$generator <- treeEnsemble(model$formula, trainData, minNodeWeight=10, noSelectedAttr=max(2,ceiling(sqrt(ncol(trainData)-1))))
discData <- applyDiscretization(trainData, explainInfo$discretization)
className <- all.vars(model$formula)[1]
attrNames <- all.vars(model$formula)[-1]
# prepare discretization points and names
for (a in 1:length(attrNames)) {
aname <- attrNames[a]
explainInfo$avNames[[a]] <- levels(discData[1,aname])
}
}
else if (method=="EXPLAIN") {
if (is.null(estimator))
if (model$noClasses == 0) # regression
estimator <- "RReliefFexpRank"
else
estimator <- "ReliefFexpRank"
explainInfo$pAV <- list()
explainInfo$discPoints <- list()
explainInfo$discretization <- discretize(model$formula, trainData, method="greedy", estimator=estimator)
midPoints <- intervalMidPoint(trainData, explainInfo$discretization, midPointMethod="equalFrequency")
discData <- applyDiscretization(trainData, explainInfo$discretization)
className <- all.vars(model$formula)[1]
attrNames <- all.vars(model$formula)[-1]
# prepare discretization points and names
for (a in 1:length(attrNames)) {
aname <- attrNames[a]
if (is.factor(trainData[1,aname])) {
ordered <- is.ordered(trainData[1,aname])
explainInfo$discPoints[[a]] <- factor(levels(trainData[1,aname]), levels=levels(trainData[1,aname]), ordered=ordered)
}
else {
explainInfo$discPoints[[a]] <- midPoints[[aname]]
}
explainInfo$avNames[[a]] <- levels(discData[1,aname])
}
names(explainInfo$discPoints) <-names(explainInfo$avNames) <- attrNames
# prepare pAV, probabilities of attribute values or discretization intervals
for (a in 1:length(attrNames)) {
aname <- attrNames[a]
noVal <- table(discData[,aname], useNA="no")
explainInfo$pAV[[a]] <- correctLaplace(noVal / sum(noVal), nrow(discData), length(noVal))
names(explainInfo$pAV[[a]]) <- levels(discData[1,aname])
}
names(explainInfo$pAV) <- attrNames
}
explainInfo
}
# average the explanations over attribute values and discretization intervals
explanationAverages <- function(model, trainData, method=c("EXPLAIN", "IME"), explainInfo, naMode=c("avg", "na"),
explainType=c("WE","infGain","predDiff"),classValue=1, nLaplace=nrow(trainData),
pError=0.05, err=0.05, batchSize=40, maxIter=1000)
{
noInst <- nrow(trainData)
newFormula <- reformulate(all.vars(model$formula)[-1])
dat <- model.frame(newFormula, data=trainData, na.action=na.pass);
noAttr <- ncol(dat)
method <- match.arg(method)
discDat <- applyDiscretization(dat, explainInfo$discretization)
## initialization
avExplain <- avExplainPos <- avExplainNoPos <- avExplainNeg <- avExplainNoNeg <- avExplainNo <- list()
aExplain <- vector(mode="numeric",length=noAttr)
aExplainPos <- vector(mode="numeric",length=noAttr)
aExplainNeg <- vector(mode="numeric",length=noAttr)
aExplainNoPos <- vector(mode="numeric",length=noAttr)
aExplainNoNeg <- vector(mode="numeric",length=noAttr)
aExplainPosSum <- vector(mode="numeric",length=noAttr)
aExplainNegSum <- vector(mode="numeric",length=noAttr)
if (method == "EXPLAIN")
expl = explain(model, dat, explainInfo, naMode, explainType, classValue,nLaplace)$expl
else if (method == "IME")
expl <- ime(model, dat, classValue=classValue, imeInfo=explainInfo,
pError=pError, err=err, batchSize=batchSize, maxIter=maxIter)$expl
## computing for each value
for (a in 1:noAttr) {
noExpl <- length(explainInfo$avNames[[a]])
avExplain[[a]] <- vector(mode="numeric",length=noExpl)
avExplainPos[[a]] <- vector(mode="numeric",length=noExpl)
avExplainNeg[[a]] <- vector(mode="numeric",length=noExpl)
avExplainNoPos[[a]] <- vector(mode="numeric",length=noExpl)
avExplainNoNeg[[a]] <- vector(mode="numeric",length=noExpl)
avExplainNo[[a]] <- vector(mode="numeric",length=noExpl)
posExpl <- expl[,a] >= 0
agg <-aggregate(data.frame(expl=expl[,a], count=rep(1, times=noInst)), by=list(ddBy=discDat[[a]],signBy=posExpl),sum )
avExplainPos[[a]][agg[agg[,"signBy"]==TRUE,"ddBy"]] <- agg[agg[,"signBy"]==TRUE,"expl"]
avExplainNoPos[[a]][agg[agg[,"signBy"]==TRUE,"ddBy"]] <- agg[agg[,"signBy"]==TRUE,"count"]
avExplainNeg[[a]][agg[agg[,"signBy"]==FALSE,"ddBy"]] <- agg[agg[,"signBy"]==FALSE,"expl"]
avExplainNoNeg[[a]][agg[agg[,"signBy"]==FALSE,"ddBy"]] <- agg[agg[,"signBy"]==FALSE,"count"]
avExplainNo[[a]] <- avExplainNoPos[[a]] + avExplainNoNeg[[a]]
avExplain[[a]] <- avExplainPos[[a]] + avExplainNeg[[a]]
aExplainNoPos[a] <- sum(avExplainNoPos[[a]])
aExplainPosSum[a] <- sum(avExplainPos[[a]])
aExplainNoNeg[a] <- sum(avExplainNoNeg[[a]])
aExplainNegSum[a] <- sum(avExplainNeg[[a]])
# averages
avExplainPos[[a]] = avExplainPos[[a]] / avExplainNoPos[[a]]
avExplainPos[[a]][is.nan(avExplainPos[[a]])] <- 0
avExplainNeg[[a]] = avExplainNeg[[a]] / avExplainNoNeg[[a]]
avExplainNeg[[a]][is.nan(avExplainNeg[[a]])] <- 0
avExplain[[a]] = (avExplainPos[[a]] + avExplainNeg[[a]]) / avExplainNo[[a]]
avExplain[[a]][is.nan(avExplain[[a]])] <- 0
}
# attribute explanation averages
aExplain = (aExplainPosSum + aExplainNegSum) / (aExplainNoPos + aExplainNoNeg)
aExplain[is.nan(aExplain)] <- 0
aExplainPos = aExplainPosSum / aExplainNoPos
aExplainPos[is.nan(aExplainPos)] <- 0
aExplainNeg = aExplainNegSum / aExplainNoNeg
aExplainNeg[is.nan(aExplainNeg)] <- 0
return(list(attrAvg=aExplain, attrPosAvg=aExplainPos, attrNegAvg=aExplainNeg, avAvg=avExplain, avPosAvg=avExplainPos, avNegAvg=avExplainNeg))
}
# generate explanations and vizualizes it
explainVis<-function(model, trainData, testData, visLevel=c("both","model","instance"), method=c("EXPLAIN", "IME"),
problemName="", dirName=getwd(),
fileType=c("none","pdf","eps","emf","jpg","png","bmp","tif","tiff"), naMode=c("avg", "na"),
explainType=c("WE","infGain","predDiff"), classValue=1, nLaplace=nrow(trainData), estimator=NULL,
pError=0.05, err=0.05, batchSize=40, maxIter=1000, genType=c("rf", "rbf", "indAttr"),
noAvgBins=20, displayAttributes=NULL, modelVisCompact=FALSE,
displayThreshold=0.0, normalizeTo=0, displayColor=c("color","grey"), noDecimalsInValueName=2,
modelTitle="Model explanation for", instanceTitle="Explaining prediction for", recall=NULL)
{
if (! inherits(model, "CoreModel"))
stop("Model shall be of class CoreModel.")
isRegression <- model$noClasses == 0
noInst <- nrow(testData)
if (! isRegression) {
class.lev <- model$class.lev;
noClasses <- model$noClasses
if (is.numeric(classValue)) {
selClass <- factor(class.lev[classValue],levels=class.lev)
classIdx <- classValue
}
else if (is.factor(classValue)) {
selClass <- classValue
classIdx <- as.integer(classValue)
}
else if (is.character(classValue)) {
selClass <- factor(classValue,levels=class.lev)
if (is.na(selClass))
stop("Invalid classValue parameter supplied, shall be compatible with the model: ", classValue)
classIdx <- as.integer(selClass)
}
else
stop("Wrong type of classValue parameter supplied: ", classValue)
}
visLevel <- match.arg(visLevel)
method <- match.arg(method)
naMode <- match.arg(naMode)
explainType <- match.arg(explainType)
fileType <- match.arg(fileType)
if (fileType=="none")
fileType <- ""
displayColor <- match.arg(displayColor)
genType <- match.arg(genType)
className <- all.vars(model$formula)[1]
modelName <- model$model
if (isRegression) {
modelTitleName <- sprintf("%s %s, %s\nmodel: %s", modelTitle, problemName, className, modelName)
classValueName <- ""
explainType <- "predDiff"
}
else {
classValueName <- as.character(selClass)
modelTitleName <- sprintf("%s %s, %s = %s\nmodel: %s", modelTitle, problemName, className, classValueName, modelName)
}
## prepare explanations and averages
if (is.null(recall)) {
explainInfo <- prepareForExplanations(model, trainData, method=method, estimator=estimator, genType=genType, noAvgBins=noAvgBins)
explAvg <- explanationAverages(model, trainData, method=method, explainInfo=explainInfo,
naMode=naMode, explainType=explainType, classValue=classValue, nLaplace=nLaplace,
pError=pError, err=err, batchSize=batchSize, maxIter=maxIter)
}
else {
explainInfo <- recall$explainInfo
explAvg <- recall$explAvg
}
testDataDisc <- applyDiscretization(testData, explainInfo$discretization)
if (method=="EXPLAIN") {
if (is.null(recall))
expl <- explain(model, testData, explainInfo=explainInfo, naMode=naMode, explainType=explainType, classValue=classValue, nLaplace=nLaplace)
else
expl <- recall$expl
methodDesc <- paste("method ", method,", type ", explainType, sep="")
}
else if (method=="IME") {
if (is.null(recall))
expl <- ime(model, testData, classValue=classValue, imeInfo=explainInfo, pError=pError, err=err, batchSize=batchSize, maxIter=maxIter)
else
expl <- recall$expl
methodDesc <- paste("method ", method, sep="")
}
noAttr <- ncol(expl$expl)
attrNames <- colnames(expl$expl)
# model explanation plot
if (visLevel %in% c("both","model","compactModel")) {
if (is.null(displayAttributes)) {
displayAttributes <- attrNames
matched <- 1:length(attrNames)
}
else { # check if provided names are correct
matched <- displayAttributes %in% attrNames
if (!all(matched))
stop("Invalid attribute name(s) in parameter displayAttributes: ", paste(displayAttributes[ !matched ], collapse=", "))
}
preparePlot(fileName=paste(dirName,"/", problemName,"_model.", fileType, sep=""))
modelAVexplain(modelTitleName, displayAttributes, explainInfo$avNames[matched],
explainDesc=methodDesc, explAvg$attrPosAvg[matched], explAvg$attrNegAvg[matched],
explAvg$avPosAvg[matched], explAvg$avNegAvg[matched],
modelVisCompact=modelVisCompact, displayThreshold=displayThreshold, displayColor=displayColor)
#modelAVexplain(modelTitleName, attrNames, explainInfo$avNames,
# explainDesc=methodDesc, explAvg$attrPosAvg, explAvg$attrNegAvg,
# explAvg$avPosAvg, explAvg$avNegAvg, displayAttributes=displayAttributes, modelVisCompact=modelVisCompact, displayThreshold=displayThreshold, displayColor=displayColor)
if (fileType != "") # plotting to file
dev.off()
}
# instance explanation plot
if (visLevel %in% c("both","instance")) {
preparePlot(fileName=paste(dirName,"/", problemName,"_inst.", fileType, sep=""))
avPosAvg <- avNegAvg <- expl$expl
for (a in 1:noAttr) {
avPosAvg[,a] <- explAvg$"avPosAvg"[[a]][as.integer(testDataDisc[,a])]
avNegAvg[,a] <- explAvg$"avNegAvg"[[a]][as.integer(testDataDisc[,a])]
}
if (className %in% names(testData))
trueClass <- as.character(testData[,className])
else
trueClass<-vector(mode="character",length=noInst)
pCXnA <- NULL
for (i in 1:noInst) {
if (method=="EXPLAIN") {
pCXnA <- expl$"pCXnA"[i,]
}
explainVisPN(instanceTitle,problemName, row.names(testData)[i], modelName, className, classValueName, attrNames,
as.character(format(testData[i,],digits=noDecimalsInValueName)),
pCXA=expl$"pCXA"[i], pCXnA=pCXnA, explainDesc=methodDesc,
expl$"expl"[i,], avPosAvg[i,], avNegAvg[i,],
threshold=displayThreshold, displayColor=displayColor, normalizeTo=normalizeTo,
trueClass=trueClass[i], printPCXnA=FALSE)
}
if (fileType != "")
dev.off()
}
invisible(list(expl=expl, explAvg=explAvg, explainInfo=explainInfo))
}
modelAVexplain<-function(titleName, attrNames, attrValues, explainDesc,
attrExplainPos, attrExplainNeg, avExplainPos,avExplainNeg,
modelVisCompact=FALSE, displayThreshold=0.0, displayColor=c("color","grey"))
{
displayColor <- match.arg(displayColor,c("color","grey"))
if (displayColor=="color"){
colAttrPos <- "blue"
colAttrNeg <- "red"
colAVpos <- "orange"
colAVneg <- "lightblue"
}
else if (displayColor=="grey") {
colAttrPos <- "gray50"
colAttrNeg <- "grey50"
colAVpos <- "gray90"
colAVneg <- "gray90"
}
boxHeight <- 1.0
noAttr <- length(attrNames)
aname <- as.character(attrNames)
maxChars <- max(nchar(aname))
allValues <- noAttr
avname <- list()
xLim <- max(abs(attrExplainPos), abs(attrExplainNeg))
yLabel <- c()
usedValues <- list()
if (modelVisCompact){
yLabel <- attrNames
}
else {
for (a in 1:noAttr) {
usedValues[[a]] <- (abs(avExplainPos[[a]]) > displayThreshold) | (abs(avExplainNeg[[a]]) > displayThreshold)
allValues <- allValues + sum(usedValues[[a]])
avname[[a]] <- as.character(attrValues[[a]][usedValues[[a]]])
yLabel <- c(yLabel,attrNames[[a]])
yLabel <- c(yLabel,avname[[a]])
xLim <- max(xLim, abs(avExplainPos[[a]][usedValues[[a]]]), abs(avExplainNeg[[a]][usedValues[[a]]]))
maxChars <- max(maxChars, nchar(avname[[a]]))
# for (v in 1:length(attrValues[[a]])) {
# xLim = max(xLim, abs(avExplainPos[[a]][v]), abs(avExplainNeg[[a]][v]))
# maxChars = max(maxChars, nchar(avname[[a]][[v]]))
# }
}
}
x <- c(0, 0)
y <- c(1, allValues)
par(xpd=NA,mgp=c(3,0.7,0),mar=c(4.5,7,4,2))
plot(x, y, type = "l", xlim = c(-xLim, xLim), ylim = c(1, allValues+0.5), xlab = explainDesc,
ylab = "", axes = F)
atLabel <- atLabelComp(xLim)
axis(1, at=atLabel,labels=atLabel)
## left y axis, attribute names
las = 1 ## horizontal
cex.axis <- 1
if (maxChars > 15) {
cex.axis <- 0.9
if (maxChars > 20)
cex.axis <- 0.6
}
if (allValues > 20)
cex.axis <- max(0.4, -0.6/80 * allValues + 1.15) # linearly: 1 at 20, 0.4 at 100
axis(2, at=+0.4*boxHeight+c(1:allValues), labels=yLabel,las=las, cex.axis=cex.axis)
title(main = titleName)
# instead of y axis labels
if (modelVisCompact)
text(-xLim*1.07,(allValues+1), labels=c("attributes"), adj = c(1, 0))
else
text(-xLim*1.07,(allValues+1), labels=c("attributes/values"), adj = c(1, 0))
chExp = 0.6 ## char expansion for boxes
y = 1
for(iA in 1:noAttr) {
if (!modelVisCompact)
segments(-xLim,y-0.1, xLim,y-0.1,lty="dashed")
xDown <- 0
xUp <- attrExplainPos[[iA]]
labelTxt =sprintf("%.2f", attrExplainPos[[iA]])
rect(xDown, y, xUp, y+0.80*boxHeight, col=colAttrPos)
xDown <- attrExplainNeg[[iA]]
xUp <- 0
rect(xDown, y, xUp, y+0.80*boxHeight, col=colAttrNeg)
y=y+1
if (!modelVisCompact){
for (v in 1:length(attrValues[[iA]])) {
if (usedValues[[iA]][v]){
xDown <- 0
xUp <- avExplainPos[[iA]][[v]]
rect(xDown, y, xUp, y+0.80*boxHeight, col=colAVpos)
xDown <- avExplainNeg[[iA]][[v]]
xUp <- 0
rect(xDown, y, xUp, y+0.80*boxHeight, col=colAVneg)
y=y+1
}
}
}
}
segments(-xLim,y-0.1, xLim,y-0.1,lty="dashed")
}
explainVisPN<-function(instanceTitle, problemName, instanceName, modelName, className, classValueName, attrNames, attrValues,
pCXA, pCXnA, explainDesc, expl,
avgAVexplainPos, avgAVexplainNeg, threshold=0, displayColor=c("color","grey"),
normalizeTo=0, trueClass="", printPCXnA=FALSE)
{
displayColor <- match.arg(displayColor)
if (displayColor=="color"){
colExplainPos = "blue"
colExplainNeg = "red"
colAvgPos = "lightblue"
colAvgNeg = "orange"
}
else if (displayColor=="grey") {
colExplainPos = "gray50"
colExplainNeg = "grey50"
colAvgPos = "gray90"
colAvgNeg = "gray90"
}
noAttr = length(attrNames)
usedAttr <- which(abs(expl[]) >= threshold)
noUsed <- length(usedAttr)
absSumUsed <- sum(abs(expl[usedAttr]))
if (noUsed == 0) {
plot.new()
text(0.5,0.5,"All explanations are below treshold", vfont=c("serif","bold"))
text(0.5,0.4,sprintf("model=%s, treshold=%g",modelName, threshold), vfont=c("serif","bold"))
text(0.5,0.3,"check also naMode used", vfont=c("serif","bold"))
}
else {
usedAttrNames <- 1:noUsed
usedAttrValues <- 1:noUsed
maxCharsV <- 0
uA <- 1:noUsed
for (iA in 1:noUsed) {
usedAttrNames[iA] <- attrNames[usedAttr[iA]]
# usedAttrNames[iA] <- gsub("_","\n",attrNames[usedAttr[iA]],fixed=TRUE)
uA[iA] <- attrValues[usedAttr[iA]]
## if usedAttrValues are too long, make them shorter
if (is.numeric(uA[[iA]]) && uA[[iA]]!=floor(uA[[iA]]))
usedAttrValues[[iA]]<-sprintf("%.3f",uA[[iA]])
else if (is.factor(uA[[iA]]))
usedAttrValues[[iA]]<- as.character(uA[[iA]])
else
usedAttrValues[[iA]]<- uA[[iA]]
for (v in 1:length(usedAttrValues[[iA]])) {
maxCharsV = max(maxCharsV, nchar(usedAttrValues[[iA]][[v]]))
usedAttrValues[[iA]][[v]] <- usedAttrValues[[iA]][[v]]
# usedAttrValues[[iA]][[v]] <- gsub("_","\n",usedAttrValues[[iA]][[v]],fixed=TRUE) ;
}
}
if (is.null(avgAVexplainPos))
avgAVexplainPos <- rep(0, length(expl))
if (is.null(avgAVexplainNeg))
avgAVexplainNeg <- rep(0, length(expl))
if (normalizeTo > 0 && absSumUsed > 0){
for(iA in 1:noUsed) {
expl[usedAttr[[iA]] ] <- expl[usedAttr[[iA]] ] / absSumUsed * normalizeTo
avgAVexplainPos[[usedAttr[[iA]] ]] <- avgAVexplainPos[[usedAttr[[iA]] ]] / absSumUsed * normalizeTo
avgAVexplainNeg[[usedAttr[[iA]] ]] <- avgAVexplainNeg[[usedAttr[[iA]] ]] / absSumUsed * normalizeTo
}
}
boxHeight = 1.0
chExp = 1.0 ## char expansion for boxes
xLim <- max(abs(expl),abs(avgAVexplainPos),abs(avgAVexplainNeg))
x <- c(0, 0)
y <- c(1, noUsed)
par(xpd=NA,mgp=c(3,0.7,0),mar=c(5.5,7,5,7))
plot(x, y, type = "l", xlim = c(-xLim, xLim), ylim = c(1.0, noUsed+0.5), xlab = explainDesc, ylab="", axes = F)
text(xLim*1.09,(noUsed+0.4), labels=c("attribute value"), adj = c(0, 0))
text(-xLim*1.09,(noUsed+0.4), labels=c("attribute"), adj = c(1, 0))
#plot(x, y, type = "l", xlim = c(-xLim, xLim), ylim = c(0.5, noUsed+0.5), xlab = explainName, ylab = "attributes", axes = F)
#text(xLim+3,(noUsed+0.5)/2, labels=c("attribute values"), adj = c(0.5, 0.5), srt=90)
atLabel <- atLabelComp(xLim)
axis(1, at=atLabel,labels=atLabel)
## left y axis, attribute names
anam <- as.character(usedAttrNames)
lasA = 1 ## horizontal
cex.axisA = 1
maxCharsA = max(nchar(anam))
if (maxCharsA > 15) {
cex.axisA = 0.9
if (maxCharsA > 20)
cex.axisA = 0.6
}
axis(2, at=+boxHeight/8.0+c(1:noUsed), labels=usedAttrNames,las=lasA, cex.axis=cex.axisA)
lasV = 1 ## horizontal
cex.axisV = 1
if (maxCharsV > 25) {
cex.axisV = 0.9
if (maxCharsV > 20)
cex.axisV = 0.6
}
axis(4, at=+boxHeight/8.0+c(1:noUsed), labels=usedAttrValues, las=lasV, cex.axis=cex.axisV)
if (classValueName=="") { ## regression
titleName <- sprintf("%s %s, %s\ninstance: %s, model: %s", instanceTitle, problemName, className, instanceName, modelName)
subtitleName <- sprintf("%s = %.2f", className, pCXA)
}
else {
titleName <- sprintf("%s %s, %s = %s\ninstance: %s, model: %s", instanceTitle, problemName, className, classValueName, instanceName, modelName)
subtitleName <- sprintf("p(%s=%s) = %.2f", className, classValueName, pCXA)
}
if (trueClass!="") {
if (classValueName=="") # regression
tcStr<-sprintf("true %s=%.2f" ,className, as.numeric(trueClass))
else tcStr<-sprintf("true %s=%s", className, trueClass)
subtitleName<-paste(subtitleName,tcStr,sep="; ")
}
title(main = titleName, sub=subtitleName)
for(iA in 1:noUsed) {
y <- iA
if (expl[usedAttr[[iA]]] >= 0.0) {
xDown <- 0
xUp <- expl[usedAttr[[iA]]]
rect(xDown, y, xUp, y+boxHeight/4, col=colExplainPos)
if (printPCXnA && !is.null(pCXnA)) {
if (classValueName=="")
labelTxt =sprintf("f(x/A)=%.3f", pCXnA[usedAttr[iA]])
else
labelTxt =sprintf("p(%s|x/A)=%.2f", classValueName, pCXnA[usedAttr[iA]])
text(-xLim/100, y+boxHeight/8, labels = labelTxt, adj=c(1,0.5),cex=chExp, vfont=c("sans serif","plain"))
}
}
else {
xDown <- expl[usedAttr[[iA]]]
xUp <- 0
rect(xDown, y, xUp, y+boxHeight/4, col=colExplainNeg)
if (printPCXnA && !is.null(pCXnA)) {
if (classValueName=="")
labelTxt =sprintf("f(x/A)=%.3f", pCXnA[usedAttr[iA]])
else
labelTxt =sprintf("p(%s|x/A)=%.2f", classValueName,pCXnA[usedAttr[iA]])
text(xLim/100, y+boxHeight/8, labels = labelTxt, adj=c(0,0.5),cex=chExp, vfont=c("sans serif","plain"))
}
}
## print averages for attribute's values
## positive average
xDown <- 0
xUp <- avgAVexplainPos[usedAttr[iA]]
rect(xDown, y+2*boxHeight/8, xUp, y+3*boxHeight/8, col=colAvgPos)
## negative average
xDown <- avgAVexplainNeg[usedAttr[iA]]
xUp <- 0
rect(xDown, y+2*boxHeight/8, xUp, y+3*boxHeight/8, col=colAvgNeg)
}
}
}
atLabelComp<-function(xLim) {
labs <- c(10,5,2,1)
inc <- c(2,1,0.5)
if (xLim > labs[1])
while (xLim > labs[1]) {
labs <- labs * 10
inc <- inc *10
}
else if (xLim < labs[4])
while (xLim < labs[4]) {
labs <- labs / 10
inc <- inc / 10
}
i=3
while (xLim > labs[i])
i <- i-1
atLabel <- seq(-labs[i],labs[i],inc[i])
atLabel
}
# return explanation for given classifier, instance using method IME
ime <- function(model, testData, classValue=1, imeInfo, pError=0.05, err=0.05, batchSize=40, maxIter=1000) {
if (! inherits(model, "CoreModel"))
stop("Model shall be of class CoreModel.")
isRegression <- model$noClasses == 0
noClasses <- model$noClasses
noInst <- nrow(testData)
if (! isRegression) {
class.lev <- model$class.lev;
noClasses <- model$noClasses
if (is.numeric(classValue)) {
selClass <- factor(class.lev[classValue],levels=class.lev)
classIdx <- classValue
}
else if (is.factor(classValue)) {
selClass <- classValue
classIdx <- as.integer(classValue)
}
else if (is.character(classValue)) {
selClass <- factor(classValue,levels=class.lev)
if (is.na(selClass))
stop("Invalid classValue parameter supplied, shall be compatible with the model: ", classValue)
classIdx <- as.integer(selClass)
}
else
stop("Wrong type of classValue parameter supplied: ", classValue)
}
newFormula <- reformulate(all.vars(model$formula)[-1])
dat <- model.frame(newFormula, data=testData, na.action=na.pass);
noAttr <- ncol(dat)
# prepare data structures for output
expl <- matrix(data=0, nrow=noInst, ncol=noAttr)
stddev <- matrix(data=0, nrow=noInst, ncol=noAttr)
iter <- matrix(data=0, nrow=noInst, ncol=noAttr)
colnames(expl) <- colnames(stddev) <- names(dat)
perm<-matrix(FALSE,nrow=batchSize,ncol=noAttr)
batchMxSize <- batchSize * noAttr
zSq <- abs(qnorm(pError/2))^2
errSq <- err^2
for (i in 1:nrow(dat)){
inst <- dat[rep(i,batchSize),]
for (a in 1:noAttr) {
noIter <- 0
moreIterations <- TRUE
while (moreIterations) {
# perm is matrix of boolean values; TRUE means that index is befor a-th in the random permutation
perm[] <- sample(c(FALSE,TRUE),size=batchMxSize,replace=TRUE)
inst1 <- newdata(imeInfo$generator, size=batchSize)[,colnames(expl)] # random instances
perm[,a] <- FALSE # a-th shall not be replaced with selected instance
inst1[perm] <- inst[perm] # replace TRUE (preeceding) with selected instance
inst2 <- inst1 # inst2 has all succedding (including a-th) filled with random instances
inst1[,a] <- dat[i,a] # inst1 has a-th filled with selected instance
f1 <- getPredictions(model, inst1, nLaplace=0, noClasses=0, classIdx)
f2 <- getPredictions(model, inst2, nLaplace=0, noClasses=0, classIdx)
diff <- f1-f2
expl[i,a] <- expl[i,a] + sum(diff)
noIter <- noIter + batchSize
stddev[i,a] <- stddev[i,a] + sum(diff * diff)
v2 <- stddev[i,a]/noIter - (expl[i,a]/noIter)^2
neededIter <- zSq * v2 / errSq
if (neededIter <= noIter || noIter >= maxIter)
moreIterations <- FALSE
}
expl[i,a] <- expl[i,a] / noIter
stddev[i,a] <- sqrt(stddev[i,a]/noIter - (expl[i,a]/noIter)^2)
iter[i,a] <- noIter
}
}
pCXA <- getPredictions(model, dat, nLaplace=0, noClasses=0, classIdx)
list(expl=expl, pCXA = pCXA, stddev=stddev, noIter=iter)
}
|
95e47eba1d5bcf8cfb00a92d5fbfd087d52f7232
|
94014ad2e9085e73eb3b95e97120ecb30859f088
|
/man/bin_scdata.Rd
|
83e0bcc3250ca6e3a9954db0591bc1018b0fc6c2
|
[] |
no_license
|
aarzalluz/scfilters
|
e491e42580c78c187dbc007629656b5768617b58
|
6c200fb49309003f147879787d6cc423931e61ff
|
refs/heads/master
| 2021-08-23T21:05:15.837834
| 2016-08-07T17:07:39
| 2016-08-07T17:07:39
| 60,845,930
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,973
|
rd
|
bin_scdata.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/binning.R
\name{bin_scdata}
\alias{bin_scdata}
\title{Bin genes by mean expression.}
\usage{
bin_scdata(dataset, method = c("window_number", "window_size"), parameter)
}
\arguments{
\item{dataset}{A list, containing the top window generated by \code{extract_top_genes}
as the first element, and the rest of undivided genes as the second.}
\item{method}{A string, indicating the method to be used to bin the genes by mean
expression.}
\item{parameter}{An integer. Indicates the numeric parameter to use in the previously
chosen method. Values are not restricted, but should be coherent with the method of choice.}
}
\value{
A data frame containing the binned genes.
}
\description{
Divides the genes that were not included in the top window in windows of the same size,
by their mean expression.
}
\details{
There are two binning methods available:
\itemize{
\item \code{"window_number"}: Divides the genes into the number of windows specified in
\code{parameter}, regardless of their size.
\item \code{"window_size"}: Divides the genes into windows of the size specified in
\code{parameter}, regardless of the number of windows generated.
}
This function uses the \code{ntile} function, in the \code{dplyr} package to assign a bin
number to each gene based on the value contained in the \code{mean} column, corresponding
to its mean expression. These are then added as a the column \code{bin} using the \code{mutate}
function, also in the \code{dplyr} package.
\strong{Important note:} This function is designed to take the list output by the
\code{extract_top_window} function as an argument, operating only on the second element
of it.
Once the genes in it have been binned, both elements of the list are bound
together in a data frame and returned. The output is similar, but a new column \code{bin}
is added, which indicates the window number assigned to each gene.
}
|
56855c68e745700611bb16c2f9211323703c5c74
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/cba/examples/sdists.Rd.R
|
4a53db58e03ae6909f24f109d025ed783f2e0e0f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,265
|
r
|
sdists.Rd.R
|
library(cba)
### Name: sdists
### Title: Sequence Distance Computation
### Aliases: sdists
### Keywords: cluster
### ** Examples
### numeric data
sdists(list(c(2,2,3),c(2,4,3))) # 2
sdists(list(c(2,2,3),c(2,4,3)),weight=c(1,1,0,1)) # 1
### character data
w <- matrix(-1,nrow=8,ncol=8) # weight/score matrix for
diag(w) <- 0 # longest common subsequence
colnames(w) <- c("",letters[1:7])
x <- sapply(rbinom(3,64,0.5),function(n,x)
paste(sample(x,n,rep=TRUE),collapse=""),
colnames(w)[-1])
x
sdists(x,method="aw",weight=w)
sdists(x,x,method="aw",weight=w) # check
## pairwise
sdists(x,rev(x),method="aw",weight=w,pairwise = TRUE)
diag(w) <- seq(0,7)
sdists(x,method="aw", weight=w) # global alignment
sdists(x,method="awl",weight=w) # local alignment
## empty strings
sdists("", "FOO")
sdists("", list(c("F","O","O")))
sdists("", list("")) # space symbol
sdists("", "abc", method="aw", weight=w)
sdists("", list(""), method="aw", weight=w)
### asymmetric weights
w[] <- matrix(-sample(0:5,64,TRUE),ncol=8)
diag(w) <- seq(0,7)
sdists(x,x,method="aw", weight=w)
sdists(x,x,method="awl",weight=w)
### missing values
sdists(list(c(2,2,3),c(2,NA,3)),exclude=NULL) # 2 (include anything)
sdists(list(c(2,2,3),c(2,NA,3)),exclude=NA) # NA
|
a2d56a7d7886bee4565b8ba40ce666df16f90c16
|
4adf5f9ca9f2565c9ccf0354777d9cfd6f53e7b7
|
/Regtrees.R
|
d34de4b1d09b776138d89ad86426a92c0f85963e
|
[] |
no_license
|
arch456/Big-City-Health-Data
|
04c3c3e3be162b9ac294dbe6de0ba46f8e91ddf2
|
46a18a9a90867b2d54a4c7bc1a9231e6253b2e8a
|
refs/heads/master
| 2022-04-23T15:31:53.258196
| 2020-04-25T23:46:52
| 2020-04-25T23:46:52
| 258,897,296
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,622
|
r
|
Regtrees.R
|
# Load the dataset and run summary()
health.data = read.csv("Big_Cities_Health_Data_Inventory.csv")
summary (health.data)
# ii) Data Pre-processing
# Extract the data for High School Students
require(dplyr)
drink.st <- health.data %>%
filter(Indicator == "Percent of High School Students Who Binge Drank")
# Extract the data for Adults
drink.ad <- health.data %>%
filter(Indicator == "Percent of Adults Who Binge Drank")
# Remove unwanted variables
drink.st <- drink.st[c(3:7)]
drink.ad <- drink.ad[c(3:7)]
#library(MASS)
require(tree)
set.seed(1)
train = sample(1:nrow(drink.st), nrow(drink.st)/2)
tree.drink.st=tree(Value~.,drink.st,subset=train)
summary(tree.drink.st)
plot(tree.drink.st)
text(tree.drink.st,pretty=0, cex = 0.6)
tree.drink.st
cv.drink.st=cv.tree(tree.drink.st)
plot(cv.drink.st$size,cv.drink.st$dev,type='b')
# Pruning
prune.drink.st=prune.tree(tree.drink.st,best=5)
plot(prune.drink.st)
text(prune.drink.st,pretty=0)
#Making predictions
yhat=predict(tree.drink.st,newdata=drink.st[-train,])
drink.st.test = drink.st[-train,"Value"]
plot(yhat,drink.st.test)
abline(0,1)
#MSE
mean((yhat-drink.st.test)^2)
# Bagging
require(randomForest)
set.seed(1)
bag.drink.st=randomForest(Value~.,data=drink.st,subset=train,mtry=4,ntree = 500, importance=TRUE)
bag.drink.st
yhat.bag = predict(bag.drink.st,newdata=drink.st[-train,])
plot(yhat.bag, drink.st.test)
abline(0,1)
mean((yhat.bag-drink.st.test)^2)
# Random Forest
set.seed(1)
rf.drink.st=randomForest(Value~.,data=drink.st,subset=train,mtry=4,importance=TRUE )
yhat.rf = predict(rf.drink.st,newdata=drink.st[-train,])
mean((yhat.rf-drink.st.test)^2)
# Same as Bagging because we use the same number of variables
importance(rf.drink.st)
varImpPlot(rf.drink.st)
# This shows that Race..Ethnicity and Place are the most important variables
# Boosting
require(gbm)
set.seed(1)
boost.drink.st=gbm(Value~.,data=drink.st[train,],distribution="gaussian",n.trees=5000,interaction.depth=4)
summary(boost.drink.st)
boost.drink.st
plot(boost.drink.st)
#Place and Race..Ethnicity are the most important variables as seen above. We can also produce partial dependence plots for these two variables. The plots below show marginal effect of selected variables on the response.
par(mfrow=c(1,2))
plot(boost.drink.st, x = drink.st$Race..Ethnicity, ylim = (0:15),type = "b")
plot(boost.drink.st,i="Place")
plot(boost.drink.st,i="Race..Ethnicity")
text(boost.drink.st, pretty = 0)
yhat.boost=predict(boost.drink.st,newdata=drink.st[-train,],n.trees=5000)
mean((yhat.boost-drink.st.test)^2)
boost.drink.st=gbm(Value~.,data=drink.st[train,],distribution="gaussian",n.trees=5000,interaction.depth=4,shrinkage=0.2,verbose=F)
yhat.boost=predict(boost.drink.st,newdata=drink.st[-train,],n.trees=5000)
mean((yhat.boost-drink.st.test)^2)
# Best mean is obtained with Random Forest
#install.packages("ggplot2")
#library(ggplot2)
#ggplot(boost.drink.st, aes(Place, y)) +
# geom_point() +
# coord_flip()
---
title: "Tree_Analysis"
author: "Archana Chittoor"
output: word_document
---
The second type of analysis we perform on our data is Decision Tree analysis.
We have chosen the Regression tree as our method of data analysis because our response Value is numerical (quantitative).
The Regression tree has advantages over other Regression models. It is easier to interpret and has a good graphical representation. Our intention is to investigate the relationship between the response Value and the predictors Gender, Ethnicity, Value and Place by using Decision Trees. We will also implement Bagging, Boosting and Random Forests, selecting the best method that produces the minimum Mean Squared Error (MSE). In addition, we will compare and analyse the importance of each of our predictor variables in relationship to the Value indicator.
```{r}
# Load the dataset and run summary()
health.data = read.csv("Big_Cities_Health_Data_Inventory.csv")
summary (health.data)
```
### Extracting and Refining High School Students' data
We first extract the Smoking and Drinking data for high school students by applying the filters with Indicator as "Percent of High School Students Who Binge Drank" and "Percent of High School Students Who Currently Smoke" respectively. This gives us two datasets drink.st and smoke.st. Once the data is extracted, we will retain only the variables of interest and eliminate the others, for further analysis. Additionally, we also rename the column Race..Ethnicity to Ethnicity because it is not recommended to have special symbols in column names. Also, we would need to remove rows with cumulative data, such as "All" in Ethnicity and "U.S. Total" in Place as these do not help us in the interpretion of results.
```{r}
require(dplyr)
# Drinking data for High School Students
drink.st <- health.data %>%
filter(Indicator == "Percent of High School Students Who Binge Drank")
# Smoking data for Students
smoke.st <- health.data %>%
filter(Indicator == "Percent of High School Students Who Currently Smoke")
# Remove unwanted variables and rename some columns
drink.st <- drink.st[c(3:7)]
drink.st$Ethnicity <- drink.st$Race..Ethnicity
drink.st <- drink.st[-c(3)]
smoke.st <- smoke.st[c(3:7)]
smoke.st$Ethnicity <- smoke.st$Race..Ethnicity
smoke.st <- smoke.st[-c(3)]
```
### Extracting and Refining Adults' data
```{r}
# Drinking data for Adults
drink.ad <- health.data %>%
filter(Indicator == "Percent of Adults Who Binge Drank")
# Smoking data for Adults
smoke.ad <- health.data %>%
filter(Indicator == "Percent of Adults Who Currently Smoke")
# Remove unwanted variables and rename some columns
drink.ad <- drink.ad[c(3:7)]
drink.ad$Ethnicity <- drink.ad$Race..Ethnicity
drink.ad <- drink.ad[-c(3)]
smoke.ad <- smoke.ad[c(3:7)]
smoke.ad$Ethnicity <- smoke.ad$Race..Ethnicity
smoke.ad <- smoke.ad[-c(3)]
# Remove missing data which has been found only in smoke.ad dataset
smoke.ad <- smoke.ad %>%
filter(Value != "NA")
```
We will be performing our entire analysis on these four datasets which have been generated based on our Questons of Interest.
### a) Response (variable of interest) with the type and units:
For all four data sets that we work on, **Value** is the response or variable of interest. It is a Numerical (or quantitative) variable. It has no units but rather a numerical indicator about a particular health condition that we are interested in.
### b) Explanatory/grouping variable(s) with the type and units:
The explanatory variables for the four datasets (drink.st, smoke.st, drink.ad and smoke.ad) are:
* Year (type: Date)
* Gender (Factor with 3 levels - Male, Female, Both)
* Ethnicity (Factor with 9 levels such as Native American, Asian/PI and so on )
* Place (Factor with 29 levels)
We will not use the other columns like Indicator. Category, Indicator, BCBH.Requested.Methodology, Source , Methods and Notes in the data as they do not add any value and we obtain no new relationships or dependencies when these are taken into account.
### c) How Regression Trees apply to our analysis:
We use Regression trees to investigate the relationships in our data as per the below questions of interest:
* i) What are the major factors causing smoking and drinking problems among High School students in the most urban cities of the United States? How much are these conditions influenced by the place, ethnicity, and gender of the students?
* ii) Similarly, how much effect do predictors like place, gender and ethnicity have on smoking and drinking problems among adults in US’s biggest cities?
To address the above questions, we need to find relationships/ dependencies between our response **Value** and the predictors given by **Place, Gender, Year and Ethnicity**. As the response is numerical, it makes sense to use Regression trees to generate trees that examine our data. We will implement Bagging, Random Forest and Boosting to reduce the Mean Squared Error. Furthermore, we can determine which of the variables are the most important, and list them down according to their significance.
**Limitations of using Regression Trees on our data:**
We are limited by the number of variables which is four. Due to this, Random Forest would be same as Bagging because we need to use all four variables in both, and anything less than that will not give us relevant outputs.
However, Regression trees prove useful for analyzing the data when the response is numerical as mentioned above. They provide an easy interpretation and a good graphical representation as well.
Describe (shortly) the methods you choose to apply and how they are related to your problem
statement. Discuss possible limitations related to the data or selected methods.
d) State hypotheses in words and using appropriate notation (if appropriate).
e) List appropriate data conditions (if applicable). Test any (if possible).
## Analysis
### Basic Regression Trees
We first create basic Regression trees for each of our four datasets **drink.st, smoke.st, drink.ad and smoke.ad**.
#### i) Drinking data for Students
```{r}
require(tree)
set.seed(1)
## Create the Training dataset
train = sample(1:nrow(drink.st), nrow(drink.st)/2)
tree.drink.st=tree(Value~.,drink.st,subset=train)
summary(tree.drink.st)
```
```{r}
plot(tree.drink.st)
text(tree.drink.st,pretty=0, cex = 0.6)
tree.drink.st
```
We will prune the tree now.
##### Pruning:
```{r}
cv.drink.st=cv.tree(tree.drink.st)
plot(cv.drink.st$size,cv.drink.st$dev,type='b')
```
```{r}
prune.drink.st=prune.tree(tree.drink.st,best=5)
plot(prune.drink.st)
text(prune.drink.st,pretty=0, cex = 0.6)
prune.drink.st
```
**Interpretation:**
The tree after pruning to 5 terminal nodes seems to be easier to interpret and has a better graphical representation. The best predictor seems to be Place because it is used for the initial split, where the Place is Baltimore, Los Angeles, San Diego on one side and all other cities (25 cities) are on the other side. Ethnicity is the next best predictor used and the tree is split depending on it being Asian/PI, Black, Hispanic, Multiracial on one side and all other ethnicities on the other side. The tree() function has used only Place and Ethnicity for building the Regression tree. In addition, we can see that the predictions have higher values on the left sub-tree as compared to the other side, which is as expected.
##### Making Predictions on test data:
```{r}
yhat=predict(tree.drink.st,newdata=drink.st[-train,])
drink.st.test = drink.st[-train,"Value"]
plot(yhat,drink.st.test)
abline(0,1)
```
##### Mean Squared Error
```{r}
mean((yhat-drink.st.test)^2)
```
The error rate is quite high and we need to implement Bagging, Random Forest or Boosting to reduce the error and see if we can obtain a better fit.
##### Bagging
```{r}
require(randomForest)
set.seed(1)
bag.drink.st=randomForest(Value~.,data=drink.st,subset=train,mtry=4,ntree = 500, importance=TRUE)
bag.drink.st
yhat.bag = predict(bag.drink.st,newdata=drink.st[-train,])
plot(yhat.bag, drink.st.test)
abline(0,1)
mean((yhat.bag-drink.st.test)^2)
```
Bagging reduces the error rate significantly and it is computed as 14.59.
##### Random Forest:
```{r}
set.seed(1)
rf.drink.st=randomForest(Value~.,data=drink.st,subset=train,mtry=4,importance=TRUE )
yhat.rf = predict(rf.drink.st,newdata=drink.st[-train,])
mean((yhat.rf-drink.st.test)^2)
```
Random Forest gives the same MSE as Bagging because both are equivalent in this case ( due to same value of mtry).
##### Boosting
```{r}
require(gbm)
set.seed(1)
boost.drink.st=gbm(Value~.,data=drink.st[train,],distribution="gaussian",n.trees=5000,interaction.depth=4)
summary(boost.drink.st)
```
Place and Ethnicity are the most important variables as seen above. We can also produce partial dependence plots for these two variables. The plots below show marginal effect of selected variables on the response.
```{r}
par(mfrow=c(1,2))
plot(boost.drink.st,i="Place", type = "l")
plot(boost.drink.st,i="Ethnicity", type = "l")
yhat.boost=predict(boost.drink.st,newdata=drink.st[-train,],n.trees=5000)
mean((yhat.boost-drink.st.test)^2)
boost.drink.st=gbm(Value~.,data=drink.st[train,],distribution="gaussian",n.trees=5000,interaction.depth=4,shrinkage=0.2,verbose=F)
yhat.boost=predict(boost.drink.st,newdata=drink.st[-train,],n.trees=5000)
mean((yhat.boost-drink.st.test)^2)
```
The MSE when we perform Boosting is more than that of Bagging, 23.07 when we use default Shrinkage Parameter and 40.94 when the Shrinkage Parameter is increased to 0.2 .
Therefore, we choose Regression Tree with Bagging as the best model as it generates the least MSE.
##### Importance of Variables:
```{r}
importance(bag.drink.st)
varImpPlot(bag.drink.st)
```
**Conclusion:**
As seen above the most important predictor is **Place** and the next best predictor is **Ethnicity**.
On average, when we examine the plots generated after Boosting, we find that the cities **Miami, Florida and San Antonio, TX** have the highest problem of Binge Drinking among High School students. Cities such as **Los Angeles, CA and San Diego County, CA** have the least indicator values leading to the inference that these cities seem to have least binge drinking problems among students.
When it comes to ethnicities, we find that White community has the highest drinking rate among students and Black, Asian/PI have the lowest rates.
#### ii) Smoking data for Students
```{r}
set.seed(1)
## Create the Training dataset
train = sample(1:nrow(smoke.st), nrow(smoke.st)/2)
tree.smoke.st=tree(Value~.,smoke.st,subset=train)
summary(tree.smoke.st)
```
```{r}
plot(tree.smoke.st)
text(tree.smoke.st,pretty=0, cex = 0.6)
tree.smoke.st
```
We will perform Pruning on the tree now.
##### Pruning:
```{r}
cv.smoke.st=cv.tree(tree.smoke.st)
plot(cv.smoke.st$size,cv.smoke.st$dev,type='b')
```
```{r}
prune.smoke.st=prune.tree(tree.smoke.st,best=7)
plot(prune.smoke.st)
text(prune.smoke.st,pretty=0, cex = 0.6)
prune.smoke.st
```
**Interpretation:**
The tree after pruning to 7 terminal nodes seems to be easier to interpret and has a better graphical reperesentation. The best predictor seems to be Ethnicity in this case because it is used for the initial split, where the Place is Asian/PI, lack on one side on one side and all other ethnicities are on the other side. Place seems to be the next best predictor used and the tree is split with Miami, New York, San Francisco on the higher side and all others on the other lower side. The Year is also used to split the data on the left sub-tree.
##### Making Predictions on test data:
```{r}
yhat=predict(tree.smoke.st,newdata=smoke.st[-train,])
smoke.st.test = smoke.st[-train,"Value"]
plot(yhat,smoke.st.test)
abline(0,1)
```
##### Mean Squared Error
```{r}
mean((yhat-smoke.st.test)^2)
```
The error rate is not bad and we can implement Bagging, Random Forest or Boosting to reduce the error and see if we can obtain a better fit.
##### Bagging
```{r}
require(randomForest)
set.seed(1)
bag.smoke.st=randomForest(Value~.,data=smoke.st,subset=train,mtry=4,ntree = 500, importance=TRUE)
bag.smoke.st
yhat.bag = predict(bag.smoke.st,newdata=smoke.st[-train,])
plot(yhat.bag, smoke.st.test)
abline(0,1)
mean((yhat.bag-smoke.st.test)^2)
```
Bagging reduces the error rate significantly and it is computed as 12.81.
##### Random Forest:
```{r}
set.seed(1)
rf.smoke.st=randomForest(Value~.,data=smoke.st,subset=train,mtry=4,importance=TRUE )
yhat.rf = predict(rf.smoke.st,newdata=smoke.st[-train,])
mean((yhat.rf-smoke.st.test)^2)
```
Random Forest gives the same MSE as Bagging because both are equivalent in this case ( due to same value of mtry).
##### Boosting
```{r}
require(gbm)
set.seed(1)
boost.smoke.st=gbm(Value~.,data=smoke.st[train,],distribution="gaussian",n.trees=5000,interaction.depth=4)
summary(boost.smoke.st)
```
Place and Ethnicity are the most important variables as seen above. We can also produce partial dependence plots for these two variables. The plots below show marginal effect of selected variables on the response.
```{r}
par(mfrow=c(1,2))
plot(boost.smoke.st,i="Place", type = "l")
plot(boost.smoke.st,i="Ethnicity", type = "l")
yhat.boost=predict(boost.smoke.st,newdata=smoke.st[-train,],n.trees=5000)
mean((yhat.boost-smoke.st.test)^2)
boost.smoke.st=gbm(Value~.,data=smoke.st[train,],distribution="gaussian",n.trees=5000,interaction.depth=4,shrinkage=0.2,verbose=F)
yhat.boost=predict(boost.smoke.st,newdata=smoke.st[-train,],n.trees=5000)
mean((yhat.boost-smoke.st.test)^2)
```
The MSE when we perform Boosting is more than that of Bagging, 16.09 when we use default Shrinkage Parameter and 34.99 when the Shrinkage Parameter is increased to 0.2 .
Therefore, we choose Regression Tree with Bagging as the best model as it generates the least MSE.
##### Importance of Variables:
```{r}
importance(bag.smoke.st)
varImpPlot(bag.smoke.st)
```
As seen above the most important predictor is **Place** and the next best predictor is **Ethnicity**.
Also, as seen in the plots generated after Boosting, we find that **Miami and Seattle** have higher smoking rates among high school students whereas the rates are lowest in **Detroit**. Similarly, when it comes to ethnicities, smoking rates are highest in Multiracial section of society and lowest in Black, Asian/PI communities.
#### iii) Drinking data for Adults
```{r}
require(tree)
set.seed(1)
## Create the Training dataset
train = sample(1:nrow(drink.ad), nrow(drink.ad)/2)
tree.drink.ad=tree(Value~.,drink.ad,subset=train)
summary(tree.drink.ad)
```
```{r}
plot(tree.drink.ad)
text(tree.drink.ad,pretty=0, cex = 0.6)
tree.drink.ad
```
We will prune the tree now.
##### Pruning:
```{r}
cv.drink.ad=cv.tree(tree.drink.ad)
plot(cv.drink.ad$size,cv.drink.ad$dev,type='b')
```
```{r}
prune.drink.ad=prune.tree(tree.drink.ad,best=8)
plot(prune.drink.ad)
text(prune.drink.ad,pretty=0, cex = 0.6)
prune.drink.ad
```
**Interpretation:**
The tree after pruning to 8 terminal nodes seems to be easier to interpret and has a better graphical reperesentation. The best predictor seems to be Place followed by ethnicity where Asian/PI, Black are on the lower side. Year also seems to be important as the year 2013 seems to have higher rates of drinking issues among adults.
##### Making Predictions on test data:
```{r}
yhat=predict(tree.drink.ad,newdata=drink.ad[-train,])
drink.ad.test = drink.ad[-train,"Value"]
plot(yhat,drink.ad.test)
abline(0,1)
```
##### Mean Squared Error
```{r}
mean((yhat-drink.ad.test)^2)
```
The error rate is quite high and we need to implement Bagging, Random Forest or Boosting to reduce the error and see if we can obtain a better fit.
##### Bagging
```{r}
require(randomForest)
set.seed(1)
bag.drink.ad=randomForest(Value~.,data=drink.ad,subset=train,mtry=4,ntree = 500, importance=TRUE)
bag.drink.ad
yhat.bag = predict(bag.drink.ad,newdata=drink.ad[-train,])
plot(yhat.bag, drink.ad.test)
abline(0,1)
mean((yhat.bag-drink.ad.test)^2)
```
Bagging reduces the error rate significantly and it is computed as 67.55 which is still high.
##### Random Forest:
```{r}
set.seed(1)
rf.drink.ad=randomForest(Value~.,data=drink.ad,subset=train,mtry=4,importance=TRUE )
yhat.rf = predict(rf.drink.ad,newdata=drink.ad[-train,])
mean((yhat.rf-drink.ad.test)^2)
```
Random Forest gives the same MSE as Bagging because both are equivalent in this case ( due to same value of mtry).
##### Boosting
```{r}
require(gbm)
set.seed(1)
boost.drink.ad=gbm(Value~.,data=drink.ad[train,],distribution="gaussian",n.trees=5000,interaction.depth=4)
summary(boost.drink.ad)
```
Again, we see that **Place and Ethnicity** are the most important variables as seen above. We can also produce partial dependence plots for these two variables. The plots below show marginal effect of selected variables on the response.
```{r}
par(mfrow=c(1,2))
plot(boost.drink.ad,i="Place")
plot(boost.drink.ad,i="Ethnicity")
yhat.boost=predict(boost.drink.ad,newdata=drink.ad[-train,],n.trees=5000)
mean((yhat.boost-drink.ad.test)^2)
boost.drink.ad=gbm(Value~.,data=drink.ad[train,],distribution="gaussian",n.trees=5000,interaction.depth=4,shrinkage=0.2,verbose=F)
yhat.boost=predict(boost.drink.ad,newdata=drink.ad[-train,],n.trees=5000)
mean((yhat.boost-drink.ad.test)^2)
```
There is no improvement in MSE when we perform Boosting. It is more than that of Bagging, 79.02 when we use default Shrinkage Parameter and 83.89 when the Shrinkage Parameter is increased to 0.2 .
Therefore, we choose Regression Tree with Bagging as the best model as it generates the least MSE.
##### Importance of Variables:
```{r}
importance(bag.drink.ad)
varImpPlot(bag.drink.ad)
```
Again, it is observed that the most important predictor is **Place** and the next best predictor is **Ethnicity**.
#### iv) Smoking data for Adults
```{r}
require(tree)
set.seed(1)
## Create the Training dataset
train = sample(1:nrow(smoke.ad), nrow(smoke.ad)/2)
tree.smoke.ad=tree(Value~.,smoke.ad,subset=train)
summary(tree.smoke.ad)
```
```{r}
plot(tree.smoke.ad)
text(tree.smoke.ad,pretty=0, cex = 0.6)
tree.smoke.ad
```
We will perform Pruning on the tree now.
##### Pruning:
```{r}
cv.smoke.ad=cv.tree(tree.smoke.ad)
plot(cv.smoke.ad$size,cv.smoke.ad$dev,type='b')
```
```{r}
prune.smoke.ad=prune.tree(tree.smoke.ad,best=7)
plot(prune.smoke.ad)
text(prune.smoke.ad,pretty=0, cex = 0.6)
prune.smoke.ad
```
**Interpretation:**
The tree after pruning to 5 terminal nodes seems to be easier to interpret and has a better graphical reperesentation. The best predictor seems to be Place because it is used for the initial split, where the Place is Baltimore, Los Angeles, San Diego on one side and all other cities (25 cities) are on the other side. Ethnicity is the next best predictor used and the tree is split depending on it being Asian/PI, Black, Hispanic, Multiracial on one side and all other ethnicities on the other side. The tree() function has used only Place and Ethnicity for building the Regression tree. In addition, we can see that the predictions have higher values on the left sub-tree as compared to the other side, which is as expected.
##### Making Predictions on test data:
```{r}
yhat=predict(tree.smoke.ad,newdata=smoke.ad[-train,])
smoke.ad.test = smoke.ad[-train,"Value"]
plot(yhat,smoke.ad.test)
abline(0,1)
```
##### Mean Squared Error
```{r}
mean((yhat-smoke.ad.test)^2)
```
The error rate is quite high and we need to implement Bagging, Random Forest or Boosting to reduce the error and see if we can obtain a better fit.
##### Bagging
```{r}
require(randomForest)
set.seed(1)
bag.smoke.ad=randomForest(Value~.,data=smoke.ad,subset=train,mtry=4,ntree = 500, importance=TRUE)
bag.smoke.ad
yhat.bag = predict(bag.smoke.ad,newdata=smoke.ad[-train,])
plot(yhat.bag, smoke.ad.test)
abline(0,1)
mean((yhat.bag-smoke.ad.test)^2)
```
Bagging reduces the error rate significantly and it is computed as 14.59.
##### Random Forest:
```{r}
set.seed(1)
rf.smoke.ad=randomForest(Value~.,data=smoke.ad,subset=train,mtry=4,importance=TRUE )
yhat.rf = predict(rf.smoke.ad,newdata=smoke.ad[-train,])
mean((yhat.rf-smoke.ad.test)^2)
```
Random Forest gives the same MSE as Bagging because both are equivalent in this case ( due to same value of mtry).
##### Boosting
```{r}
require(gbm)
set.seed(1)
boost.smoke.ad=gbm(Value~.,data=smoke.ad[train,],distribution="gaussian",n.trees=5000,interaction.depth=4)
summary(boost.smoke.ad)
```
Place and Ethnicity are the most important variables as seen above. We can also produce partial dependence plots for these two variables. The plots below show marginal effect of selected variables on the response.
```{r}
par(mfrow=c(1,2))
plot(boost.smoke.ad,i="Place", type = "l")
plot(boost.smoke.ad,i="Ethnicity", type = "l")
yhat.boost=predict(boost.smoke.ad,newdata=smoke.ad[-train,],n.trees=5000)
mean((yhat.boost-smoke.ad.test)^2)
boost.smoke.ad=gbm(Value~.,data=smoke.ad[train,],distribution="gaussian",n.trees=5000,interaction.depth=4,shrinkage=0.2,verbose=F)
yhat.boost=predict(boost.smoke.ad,newdata=smoke.ad[-train,],n.trees=5000)
mean((yhat.boost-smoke.ad.test)^2)
```
The MSE when we perform Boosting is more than that of Bagging, 23.07 when we use default Shrinkage Parameter and 40.94 when the Shrinkage Parameter is increased to 0.2 .
Therefore, we choose Regression Tree with Bagging as the best model as it generates the least MSE.
##### Importance of Variables:
```{r}
importance(bag.smoke.ad)
varImpPlot(bag.smoke.ad)
```
As seen above the most important predictor is Place and the next best predictor is Ethnicity.
|
243cdd01c58a0dd603cbda96f49147fa42b98b55
|
253ef857dab883fe064bf5513095e6b266515298
|
/utils/vocanoplot.and.pearson.correlation.R
|
2abac61521bafef03f2b8a5ec5e15e340d9e72ed
|
[] |
no_license
|
sunhuaiyu/snakeflow
|
3343ecfd386dabed62f040455f13058019806150
|
e6836b5c98740613ad1d851ee6199fde0b89f22e
|
refs/heads/master
| 2023-08-23T23:53:52.894782
| 2021-10-28T17:48:13
| 2021-10-28T17:48:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,953
|
r
|
vocanoplot.and.pearson.correlation.R
|
library(DESeq2)
library(pheatmap)
library(RColorBrewer)
library(ape)
setwd("./projects/SOX1_low_high/")
load("./temp/deseq2.dds.RData")
#rename
s1EGFPgroup = group
s1EGFPdds = dds
s1EGFPdf = df
s1EGFPntd = ntd
# load wt samples deseq2 results
load("~/projects/neural_patterning/temp/deseq2.dds.RData")
## extract low high degs
res <- results(s1EGFPdds, contrast=c("condition",'High' ,'Low'))
#res <- results(s1EGFPdds)
resOrdered <- res[order(res$padj),]
resOrdered = as.data.frame(resOrdered)
pheno = read.csv("~/projects/neural_patterning/wgcna/sampleinfo.wt.csv")
pheno2 = read.table("./sample_info_single.txt", sep = " ", comment.char = '#')
patt_pheno = pheno[pheno$treat != 'ES',]
patt_pheno_d8 = patt_pheno[patt_pheno$time == 'D8',]
indx = which(abs(res$log2FoldChange)> 1 & res$padj < 0.05)
# extract log normalized counts
s1EGFP_degs_ntd = s1EGFPntd[indx,]
s1EGFP_degs_names = rownames(s1EGFP_degs_ntd)
## exact wt day 8 ntd
wt_ntd = ntd[s1EGFP_degs_names, c(as.character(patt_pheno_d8$alias))]
# combine two dataframe
wt_s1EGFP_ntd = cbind(wt_ntd, s1EGFP_degs_ntd)
# format colnames
coln = colnames(wt_s1EGFP_ntd)
# sufix = substring(coln, 7)
# prefix = substr(coln,1,5)
# prefix = gsub("CTRL1", "_A", prefix)
# prefix = gsub("CTRL2", "_B", prefix)
coln = gsub("CTRL", "WT", coln)
coln = gsub("Untreated", "CT0.0", coln)
colnames(wt_s1EGFP_ntd) = coln
group_names = c("IWP2","IWP2","CT0.0","CT0.0","CT0.4","CT0.4",
"CT0.8","CT0.8","CT1.0","CT1.0","CT2.0","CT2.0",
"CT3.0","CT3.0","CT4.0","CT4.0","CT4.0RA","CT4.0RA",
"SOX1_low","SOX1_low","SOX1_low",
"SOX1_high","SOX1_high","SOX1_high",)
### averaging replicates ntds
group_wt_ntd=data.frame(row.names = rownames(wt_s1EGFP_ntd))
for (i in seq(1,18,2)){
print(coln[c(i,i+1)])
temp = rowMeans(wt_s1EGFP_ntd[,coln[c(i,i+1)]])
names(temp) = group_names[i]
group_wt_ntd = cbind(group_wt_ntd,temp)
}
names(group_wt_ntd) = group_names[seq(1,18,2)]
group_s1_ntd=data.frame(row.names = rownames(wt_s1EGFP_ntd))
temp1 = rowMeans(s1EGFP_degs_ntd[,1:3])
temp2 = rowMeans(s1EGFP_degs_ntd[,4:6])
group_s1_ntd = cbind(group_s1_ntd,temp1,temp2)
names(group_s1_ntd) = c("Low","High")
group_wt_s1_ntd = cbind(group_wt_ntd, group_s1_ntd)
### plotting correlation
# save data
write.csv(wt_s1EGFP_ntd, "S1EGFP.HighLow.DEGS.ntd.csv",quote = F)
# correlation to wt samples
corrs_pearson = cor(wt_s1EGFP_ntd)
corrs_spearman = cor(wt_s1EGFP_ntd, method='spearman')
corrs_pearson_mean = cor(group_wt_s1_ntd)
corrs_spearman_mean = cor(group_wt_s1_ntd, method='spearman')
pheatmap(corrs_pearson, main="Pearson")
pheatmap(corrs_spearman, main="Spearman")
pheatmap(corrs_pearson_mean, main="Pearson")
pheatmap(corrs_spearman_mean, main="Spearman")
cmap = colorRampPalette(rev(brewer.pal(n = 7, name ="RdBu")))(100)
pheatmap(corrs_pearson, color = cmap, main="Pearson", filename = "S1EGFP.HighLow.and.WT.Day8.pearson.pdf" )
pheatmap(corrs_spearman, color = cmap,main="Spearman", filename="S1EGFP.HighLow.and.WT.Day8.spearman.pdf")
pheatmap(corrs_pearson_mean, color = cmap, main="Pearson", filename = "S1EGFP.HighLow.and.WT.Day8.pearson.average.pdf" )
pheatmap(corrs_spearman_mean, color = cmap,main="Spearman", filename="S1EGFP.HighLow.and.WT.Day8.spearman.average.pdf")
# hclust
s21.dist=dist(t(wt_s1EGFP_ntd),method="euclidean")
out.hclust=hclust(s21.dist,method="complete") #根据距离聚类
plot(out.hclust)
y = wt_s1EGFP_ntd
y = y[rowSums(y)>0,]
mydatascale=t(scale(t(y)))
hc=hclust(as.dist(1-cor(mydatascale, method="spearman")), method="complete")
plot(hc)
# convert to dendrogram
hcd = as.dendrogram(hc)
plot(hcd, horiz = TRUE, type ="triangle", xlab='Height') # type="rectangle"
# Unrooted
plot(as.phylo(hc), type = "unrooted", cex = 0.6,
no.margin = TRUE)
plot(as.phylo(hc), type = "fan")
# Radial
plot(as.phylo(hc), type = "radial")
## scatter plot of degs
indx = which(abs(res$log2FoldChange)> 1 & res$padj < 0.05)
load("./temp/txi.salmon.RData")
degs_tpm = txi.salmon$abundance[indx,]
## vocano plot
#1.导入数据包:
library(ggplot2)
# 2. 准备数据
res2 = as.data.frame(res)
res3 = res2[complete.cases(res2),]
## anotate genenames
library('EnsDb.Hsapiens.v86')
#remove tails(versions) of gene id
gene_id <- gsub('\\.[0-9a-zA-Z]+', '', rownames(res3))
#change the ensemble gene_id to gene_name for plotting
edb <- EnsDb.Hsapiens.v86
maps_names <- mapIds(edb, keys = gene_id, column="GENENAME",
keytype = "GENEID", multiVals = "first")
res3$gene_name <- maps_names
res3$change = as.factor(ifelse(abs(res3$log2FoldChange)>1 & res3$padj < 0.0507,
ifelse(res3$log2FoldChange > 0,'Up','Down'),'Not'))
res3 = res3[complete.cases(res3),]
this_tile = paste0("The number of up genes are", nrow(res3[res3$change == 'Up',]),
"\nThe number of down genes are", nrow(res3[res3$change == 'Down',]))
## add gene name to vacano plot
shown=strsplit("SOX1 OTX1 OTX2 LMX1A LMX1B EN1 WNT3A WNT1 HOXA2 HOXA1 GBX2 PHOX2B EPHA3"," ")[[1]]
#shown=strsplit("SOX1 OTX1 OTX2 LMX1A LMX1B EN1 WNT3A WNT1 HOXA2 HOXA1 PHOX2B EPHA3"," ")[[1]]
res3$shown_name = as.factor(ifelse(res3$gene_name %in% shown, 'yes','no'))
res4 = res3[order(res3$padj),]
# 3.设置横轴和纵轴
library(ggrepel)
r04=ggplot(data = res3,aes(x=log2FoldChange,y=-1*log10(padj), color=change))
# 4.显示火山图
r04 = r04+geom_point(alpha=0.5, size=1.75,stroke = 0)+
theme_set(theme_set(theme_bw(base_size=16)))+
geom_text_repel(data=subset(res3, shown_name == 'yes'),
aes(label=gene_name),
segment.size = 0.2,
segment.color = "grey50")+
xlab(expression(log[2]('Fold Change')))+
ylab(expression(-log[10]('adjusted p-value')))+
scale_color_manual(values=c('blue','black','red'))+
ggtitle(this_tile) +
theme(plot.title = element_text(size=16,hjust=0.5, face="bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
#geom_hline(yintercept=1.3)+geom_vline(xintercept=c(-1,1))
print(r04)
### move label to left and right
r06=ggplot(data = res3,aes(x=log2FoldChange,y=-1*log10(padj), color=change))
# 4.显示火山图
r06 = r06+geom_point(alpha=0.5, size=1.75,stroke = 0)+
theme_set(theme_set(theme_bw(base_size=16)))+
geom_text_repel(data=subset(res3, shown_name == 'yes'& log2FoldChange < 0),
aes(label=gene_name),
xlim = c(NA, -5),
segment.size = 0.2,
segment.color = "grey50")+
geom_text_repel(data=subset(res3, shown_name == 'yes'& log2FoldChange > 0),
aes(label=gene_name),
xlim = c(5, NA),
segment.size = 0.2,
segment.color = "grey50")+
xlab(expression(log[2]('Fold Change')))+
ylab(expression(-log[10]('adjusted p-value')))+
scale_color_manual(values=c('blue','black','red'))+
ggtitle(this_tile) +
theme(plot.title = element_text(size=16,hjust=0.5, face="bold"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
#geom_hline(yintercept=1.3)+geom_vline(xintercept=c(-1,1))
print(r06)
# 6.设置坐标轴范围和标题 #横坐标范围:xlim(),纵坐标范围:ylim()函数,添加标签:labs(title=“..”,x=“..”,y=“..”)函数r03xy=addcolor+xlim(-4,4)+ ylim(0,30)+ labs(title="Volcanoplot",x=expression(log2(log2FoldChange)),y=expression(-log10(pvalue)))
# 8.添加阈值线(y轴截距,横坐标范围)
addline=volcano+geom_hline(yintercept=1.3)+geom_vline(xintercept=c(-1,1))
addline
# 9.保存图片(名称,图,宽,高):
ggsave("volcano8.png",volcano,width=8,height=8)
##res3$sign
res3$sign <- ifelse(res3$padj < 0.05 & abs(res3$log2FoldChange) > 2,res3$gene_name,'')
for (i in 1:17){
print(coln[c(i,i+1)])
print("\n")
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.