blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ef228d7040e78a15e1f38fde1ac519e0dc8d72a1
|
c36c96cf50cab02edfbab770c89bdddceed89542
|
/ui.R
|
177979a3f8c6c5a9c1bfdb00b2754e33523a9d01
|
[] |
no_license
|
n3iii/DDP
|
99e8f8bac66177b301ae8d69059cae7222bc8fae
|
edeea6e149ddf9131589e408b33027036192e1b4
|
refs/heads/master
| 2020-04-05T22:54:03.678344
| 2015-08-19T10:46:53
| 2015-08-19T10:46:53
| 41,006,453
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,033
|
r
|
ui.R
|
shinyUI(fluidPage(
titlePanel("Stock Moving Slope"),
sidebarLayout(
sidebarPanel(
selectInput(inputId = "ticker",
label = "Stock Ticker:",
choices = c('AAPL', 'IBM', 'JNJ', 'WMT', 'YUM'),
selected = 'IBM'),
sliderInput("sRow",
"Row in File to Start:",
min = 1,
max = 500,
value = 1),
sliderInput("dRange",
"Date Range:",
min = 30,
max = 500,
value = 50),
sliderInput("mRange",
"Moving Average Range:",
min = 5,
max = 50,
value = 20),
sliderInput("sdCoef",
"Standard Deviation Multiplier:",
min = 0.5,
max = 3,
value = 1),
radioButtons("gain", "Gain Table Action:",
c("Add High/Low" = "add",
"Subtract High/Low" = "sub")),
downloadButton('downloadPDF', 'Download PDF'),
downloadButton('downloadDoc', 'Download Word Doc')
),
mainPanel(
tabsetPanel(
tabPanel('Welcome',
h3('Stock Moving Slope'),
p('The purpose of this app is to use skills learned in this
specialization to look at the stock market. In particular,
we want to calculate a moving average of the slope of
the closing price for a selected stock. This may help show
if a stock is trending or staying within a range.'),
p('The sidebar contains the following selectors.'),
tags$ul(
tags$li("Ticker the the abbreviation for the stock"),
tags$li("Start Row, most recent day to look at"),
tags$li("Range of Rows (days) preceding the start row."),
tags$li("Moving Average Range for the slope of the close line."),
tags$li("Standard Deviation Multiplier for the yellow/pink cutoff."),
tags$li("Gain Table Action allow you to go short on the gain table.")),
p('The Closing Price Plot tab shows a plot of the closing price
for the selected stock over the range of days selected, starting from
the start day selected.'),
p('The Slope Plot tab is the moving average of the slope, starting
with the most recent day and going back for the Moving Average Range.
It then takes the next most recent day, doing the same thing.
This continues for the selected range of days, so that the plot
will show the incremental moving changes of the slope for the day
range selected.'),
p('The Slope/Close Plot tab overlays the closing price line with information
from the Slope Plot. Any day the slope plot is one standard
deviation or more above zero, that day is colored in yellow. Any
day the slope plot is one standard deviation or more below zero
is colored in pink. The Standard Deviation Multiplier allows you to
vary the yellow/pink cutoff from 0.5 to 3 standard deviations.'),
p('The Gain Table tab is a chart of what would happen if you used the
Slope/Close plot to buy (take a long position) on the first day a
stock went yellow and sell the stock on the first non-yellow day. It
does the same for the pink, but the radio button lets you subtract
the pink from the yellow (taking a short position on the pinks).')
),
tabPanel("Closing Price Plot",
textOutput('close_text'),
'Notice that the days run from higher to lower values.
That is because the number represents the row in which
that data is contained.',
plotOutput("close_plot", height = "300px")
),
tabPanel("Slope Plot",
p('The slope plot moves about zero as it goes from a positive slope
to negative and back again. The yellow line is the standard deviation
times its multiplier above zero. The pink line shows it below zero.'),
plotOutput("slope_plot", height = "300px")
),
tabPanel("Slope/Close Plot",
p('Any day on which the moving average of the slope is equal to or
greater than one standard deviation, that day has a yellow line.
Any day on which a negative slope is equal to or
greater than one standard deviation, that day has a pink line.'),
plotOutput("slope_close_plot", height = "300px")
),
tabPanel("Gain Table",
p('Although the purpose for exploring the moving average of the slope
is to determine if the stock is trending or in a range,
it is interesting to see what would happen if you used the slope ranges
from the previous chart as buy and sell signals. For a given day range
you can vary the length of the moving average, the standard deviation
cutoff and adding or subtracting the pink buys and sells.'),
tableOutput("gain_table")
)
)
)
)
))
|
8604f7b7aac97374f2defdf4ed691482e74c7be6
|
f45ed0bf62703a21f49cb497e73583eb324c0f77
|
/lib/gbs2bed_ames282.R
|
aa1ee8d82e9b4e66dc170d8f5d60e08ac164a00b
|
[] |
no_license
|
yangjl/Misc
|
1a4271a89751b4f25033c4df4dd304ac78811471
|
2b95f0149c6cd4e90c3d830347a8365887ca9477
|
refs/heads/master
| 2021-01-16T22:03:36.383306
| 2017-05-23T22:42:23
| 2017-05-23T22:42:23
| 29,626,233
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,654
|
r
|
gbs2bed_ames282.R
|
### Jinliang Yang
### April 23th, 2015
###
gbs2bed_ames <- function(gbsfile="/group/jrigrp4/AllZeaGBSv2.7impV5/ZeaGBSv27_Ames282.hmp.txt",
outfile="/group/jrigrp4/AllZeaGBSv2.7impV5/ZeaGBSv27_Ames.bed5"){
### read in GBS file
#library("data.table")
ames <- fread(gbsfile, header=TRUE, sep="\t")
ames <- as.data.frame(ames)
#message(sprintf("Loaded [ %s ] SNPs and [ %s ] cols for file [%s]!", nrow(gbs), ncol(gbs), gbsfile))
### change to BED5+ format
gbs <- ames
gbs <- gbs[, c(3,4,4,1,2,5, 12:ncol(gbs))]
names(gbs)[1:6] <- c("chr", "start", "end", "snpid", "alleles", "nchar")
#nms <- names(gbs)
#nms <- gsub("\\..*$", "", nms)
#names(gbs) <- nms
gbs$start <- gbs$start -1
message(sprintf("Changed to BED5+ format and start filtering ..."))
### filter SNPs contain multiple alleles
gbs$nchar <- nchar(as.character(gbs$alleles))
subg <- subset(gbs, nchar == 3)
subg <- subg[, -6]
#idx <- grep("-", subg$alleles)
#subg <- subg[-idx,]
message(sprintf("Remaining [ %s ] sites with two variations!", nrow(subg)))
message(sprintf("Start to IUPAC=>N transforming, recoding and writing ..."))
###change IUPAC Ambiguity Codes
#M A or C K
#R A or G Y
#W A or T W
#S C or G S
#Y C or T R
#K G or T M
subg[subg=="M"] <- "N"
subg[subg=="R"] <- "N"
subg[subg=="W"] <- "N"
subg[subg=="S"] <- "N"
subg[subg=="Y"] <- "N"
subg[subg=="K"] <- "N"
write.table(subg, outfile, sep="\t", row.names=FALSE, col.names=TRUE, quote=FALSE)
message(sprintf("DONE!"))
}
|
23b5f77b568559c83eb2e511b1a81eff27cb1449
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Linear_Algebra_by_Jim_Hefferon/CH5/EX2.10/Ex5_2_10.R
|
63487d6c6731de8ac9d4a866f15d7c87211d3b6d
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 539
|
r
|
Ex5_2_10.R
|
#Example 2.10,chapter 5,scetion III.2,page 414
#package used matlib v0.9.1
#Github reposiory of matlib :https://github.com/friendly/matlib
#installation and loading library
#install.packages("matlib")
library("matlib")
N <- matrix(c(0,1,0,0,0,0,1,0,0,0,0,1,0,0,0,0),ncol=4)
P <- matrix(c(1,0,1,0,0,2,1,0,1,1,1,0,0,0,0,1),ncol = 4)
A <- P %*% N %*% Inverse(P)
A
#The new matrix,A is nilpotent; its fourth power is the zero matrix.
x <- P %*% N^4 %*% Inverse(P)
#since (PNP^-1)^4 = P * N^4 *P^-1
y <- det(x)
all.equal(y,0)
|
1f73c408e8948b1990746c713bb14fae45b3911a
|
5072176fd6b49aefdef14049a3d1ba313da95ee3
|
/man/reducolor.Rd
|
4a1025422b54ff71fa47609c2b0850143a8e6696
|
[
"MIT"
] |
permissive
|
UBC-MDS/rimager
|
c806457feefd0b46488e83e924ebc84970ebd986
|
d2323be373f0e065a37e73ff502ab6251989ec6a
|
refs/heads/master
| 2021-01-16T12:43:37.370616
| 2020-03-26T21:49:34
| 2020-03-26T21:49:34
| 243,405,240
| 0
| 4
|
NOASSERTION
| 2020-03-26T21:49:35
| 2020-02-27T01:40:59
|
R
|
UTF-8
|
R
| false
| true
| 878
|
rd
|
reducolor.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/reducolor.R
\name{reducolor}
\alias{reducolor}
\title{Reduce image color to either 2 or 8 colors for cartoonized effect}
\usage{
reducolor(input_path, style, output_path = NULL)
}
\arguments{
\item{input_path}{character the image file path}
\item{style}{string vector
selected two colors from c("white", "black", "red", "green", "blue", "yellow", "pink", "aqua" )
or "eight" for eight colors}
\item{output_path}{character if not Null, the modified image will be saved
in the provided folder path and name}
}
\value{
modified image array
}
\description{
Reduce image color to either 2 or 8 colors for cartoonized effect
}
\examples{
input_path <- system.file("tmp_image", "mandrill.jpg", package = "rimager")
new <- reducolor(input_path, c("black", "white"), "new.jpg")
OpenImageR::imageShow(new)
}
|
52d155406f363d9e6f7f49d6fad80ba49f49166c
|
50e3cbaea158c93651cd0377f6d2e6faa8f5273b
|
/man/fa_read.Rd
|
ebe5a8df0a8ab573a8618a6facf4a36b2e11dadf
|
[] |
no_license
|
cran/seqmagick
|
b24261d186e15d7c3443770f8e89ace3fefd4def
|
e27d1022d7e56a033e7e22888de66345689afec0
|
refs/heads/master
| 2023-07-11T11:55:31.539756
| 2023-06-27T04:10:02
| 2023-06-27T04:10:02
| 236,890,609
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 474
|
rd
|
fa_read.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.R
\name{fa_read}
\alias{fa_read}
\title{fa_read}
\usage{
fa_read(file, type = "auto")
}
\arguments{
\item{file}{fasta file}
\item{type}{one of 'DNA', 'RNA', 'AA', 'unknown' or 'auto'}
}
\value{
BStringSet object
}
\description{
read fasta file
}
\examples{
fa_file <- system.file("extdata/HA.fas", package="seqmagick")
fa_read(fa_file)
}
\author{
Guangchuang Yu
}
|
bfbda0d967e9cf672433840be713142b2924bc2f
|
67de204b7f0550def8eea7d6ca605f43aed653fc
|
/app/lib/analysis/plots/comment.R
|
267515b55e8f80e0e15405a3672f5d628ab92405
|
[] |
no_license
|
andymeneely/sira-nlp
|
b1b1bb8a783adac6a69001565d49d8357a4dd8c5
|
b027a5d7407043b6541e2aa02704a7239f109485
|
refs/heads/master
| 2021-01-11T05:29:16.209735
| 2017-12-09T17:13:19
| 2017-12-09T17:13:19
| 69,055,241
| 1
| 1
| null | 2017-06-19T18:42:12
| 2016-09-23T19:36:51
|
Python
|
UTF-8
|
R
| false
| false
| 14,169
|
r
|
comment.R
|
# Initialize Boilerplate ----
source("boilerplate.R")
source("data/comment.R")
InitGlobals()
## Yngve ====
### Query Data
dataset <- GetCommentYngve()
### Plot
metric <- "Comment Yngve (Log Scale)"
title <- "Distribution of Comment Yngve"
plot.dataset <- dataset %>%
inner_join(., COMMENT.TYPE, by = "comment_id") %>%
select(-comment_id) %>%
melt(., id.vars = c("type"))
# Render
png("diagrams/comment.yngve.png", width = 500, height = 400)
ggplot(plot.dataset, aes(x = type, y = value, fill = type)) +
geom_boxplot() +
scale_x_discrete(labels = COMMENT.TYPE.LABELS) +
scale_y_log10() +
scale_fill_manual(values = FILLCOLORS) +
facet_wrap(~ variable, nrow = 1, scales = "free",
labeller = as_labeller(COMMENT.METRIC.LABELS)) +
labs(title = title, x = "Comment Type", y = metric) +
GetTheme() +
theme(legend.position = "none")
dev.off()
## Frazier ====
### Query Data
dataset <- GetCommentFrazier()
### Plot
metric <- "Comment Frazier"
title <- "Distribution of Comment Frazier"
plot.dataset <- dataset %>%
inner_join(., COMMENT.TYPE, by = "comment_id") %>%
select(-comment_id) %>%
melt(., id.vars = c("type"))
# Render
png("diagrams/comment.frazier.png", width = 500, height = 400)
ggplot(plot.dataset, aes(x = type, y = value, fill = type)) +
geom_boxplot() +
scale_x_discrete(labels = COMMENT.TYPE.LABELS) +
scale_fill_manual(values = FILLCOLORS) +
facet_wrap(~ variable, nrow = 1, scales = "free",
labeller = as_labeller(COMMENT.METRIC.LABELS)) +
labs(title = title, x = "Comment Type", y = metric) +
GetTheme() +
theme(legend.position = "none")
dev.off()
## Propositional Density ====
### Query Data
dataset <- GetCommentPdensity()
### Plot
metric <- "Comment p-density"
title <- "Distribution of Comment p-density"
plot.dataset <- dataset %>%
inner_join(., COMMENT.TYPE, by = "comment_id") %>%
select(-comment_id) %>%
melt(., id.vars = c("type"))
# Render
png("diagrams/comment.pdensity.png", width = 500, height = 400)
ggplot(plot.dataset, aes(x = type, y = value, fill = type)) +
geom_boxplot() +
scale_x_discrete(labels = COMMENT.TYPE.LABELS) +
scale_fill_manual(values = FILLCOLORS) +
facet_wrap(~ variable, nrow = 1, scales = "free",
labeller = as_labeller(COMMENT.METRIC.LABELS)) +
labs(title = title, x = "Comment Type", y = metric) +
GetTheme() +
theme(legend.position = "none")
dev.off()
## Content Density ====
### Query Data
dataset <- GetCommentCdensity()
### Plot
metric <- "Comment c-density (Sqrt Scale)"
title <- "Distribution of Comment c-density"
plot.dataset <- dataset %>%
inner_join(., COMMENT.TYPE, by = "comment_id") %>%
select(-comment_id) %>%
melt(., id.vars = c("type"))
# Render
png("diagrams/comment.cdensity.png", width = 500, height = 400)
ggplot(plot.dataset, aes(x = type, y = value, fill = type)) +
geom_boxplot() +
scale_x_discrete(labels = COMMENT.TYPE.LABELS) +
scale_y_sqrt() +
scale_fill_manual(values = FILLCOLORS) +
facet_wrap(~ variable, nrow = 1, scales = "free",
labeller = as_labeller(COMMENT.METRIC.LABELS)) +
labs(title = title, x = "Comment Type", y = metric) +
GetTheme() +
theme(legend.position = "none")
dev.off()
## Sentiment ====
### Query Data
dataset <- GetCommentSentiment()
### Plot
metric <- "Comment Sentiment"
title <- "Distribution of Comment Sentiment"
plot.dataset <- dataset %>%
inner_join(., COMMENT.TYPE, by = "comment_id") %>%
select(-comment_id) %>%
melt(., id.vars = c("type"))
# Render
png("diagrams/comment.sentiment.png", width = , height = )
ggplot(plot.dataset, aes(x = type, y = value, fill = type)) +
geom_boxplot() +
scale_x_discrete(labels = COMMENT.TYPE.LABELS) +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(values = FILLCOLORS) +
facet_wrap(~ variable, nrow = 1, scales = "free",
labeller = as_labeller(COMMENT.METRIC.LABELS)) +
labs(title = title, x = "Comment Type", y = metric) +
GetTheme() +
theme(legend.position = "none")
dev.off()
## Uncertainty ====
### Query Data
dataset <- GetCommentUncertainty()
### Plot
metric <- "% Comments"
title <- "Distribution of Comment Uncertainty"
interim.dataset <- dataset %>%
inner_join(., COMMENT.TYPE, by = "comment_id") %>%
select(-comment_id)
alpha.dataset <- interim.dataset %>%
filter(has_doxastic == T) %>%
group_by(type, has_doxastic) %>%
summarize(num_doxastic = n()) %>%
select(type, num_doxastic)
alpha.dataset <- interim.dataset %>%
filter(has_epistemic == T) %>%
group_by(type, has_epistemic) %>%
summarize(num_epistemic = n()) %>%
select(type, num_epistemic) %>%
inner_join(., alpha.dataset, by = "type")
alpha.dataset <- interim.dataset %>%
filter(has_conditional == T) %>%
group_by(type, has_conditional) %>%
summarize(num_conditional = n()) %>%
select(type, num_conditional) %>%
inner_join(., alpha.dataset, by = "type")
alpha.dataset <- interim.dataset %>%
filter(has_investigative == T) %>%
group_by(type, has_investigative) %>%
summarize(num_investigative = n()) %>%
select(type, num_investigative) %>%
inner_join(., alpha.dataset, by = "type")
alpha.dataset <- interim.dataset %>%
filter(has_uncertainty == T) %>%
group_by(type, has_uncertainty) %>%
summarize(num_uncertain = n()) %>%
select(type, num_uncertain) %>%
inner_join(., alpha.dataset, by = "type")
beta.dataset <- interim.dataset %>%
group_by(type) %>%
summarise(num_comments = n())
plot.dataset <- inner_join(alpha.dataset, beta.dataset, by = "type") %>%
mutate(has_doxastic = num_doxastic / num_comments) %>%
mutate(has_epistemic = num_epistemic / num_comments) %>%
mutate(has_conditional = num_conditional / num_comments) %>%
mutate(has_investigative = num_investigative / num_comments) %>%
mutate(has_uncertainty = num_uncertain / num_comments) %>%
select(type, has_doxastic, has_epistemic, has_conditional, has_investigative,
has_uncertainty) %>%
melt(., id.vars = c("type"))
# Render
png("diagrams/comment.uncertainty.png", width = 800, height = 600)
ggplot(plot.dataset, aes(x = type, y = value, fill = variable)) +
geom_bar(stat = "identity", position = "dodge") +
geom_text(aes(label = scales::percent(value)), vjust = "inward",
position = position_dodge(width=0.9)) +
scale_x_discrete(labels = COMMENT.TYPE.LABELS) +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(name = "Uncertainty", values = FILLCOLORS,
labels = COMMENT.METRIC.LABELS) +
labs(title = title, x = "Comment Type", y = metric) +
GetTheme()
dev.off()
## Politeness ====
### Query Data
dataset <- GetCommentPoliteness()
### Plot
metric <- "Comment Politeness"
title <- "Distribution of Comment Politeness"
plot.dataset <- dataset %>%
inner_join(., COMMENT.TYPE, by = "comment_id") %>%
select(-comment_id) %>%
melt(., id.vars = c("type"))
# Render
png("diagrams/comment.politeness.png", width = , height = )
ggplot(plot.dataset, aes(x = type, y = value, fill = type)) +
geom_boxplot() +
scale_x_discrete(labels = COMMENT.TYPE.LABELS) +
scale_fill_manual(values = FILLCOLORS) +
facet_wrap(~ variable, nrow = 1, scales = "free",
labeller = as_labeller(COMMENT.METRIC.LABELS)) +
labs(title = title, x = "Comment Type", y = metric) +
GetTheme() +
theme(legend.position = "none")
dev.off()
## Formality ====
### Query Data
dataset <- GetCommentFormality()
### Plot
metric <- "Comment Formality"
title <- "Distribution of Comment Formality"
plot.dataset <- dataset %>%
inner_join(., COMMENT.TYPE, by = "comment_id") %>%
select(-comment_id) %>%
melt(., id.vars = c("type"))
# Render
png("diagrams/comment.formality.png", width = , height = )
ggplot(plot.dataset, aes(x = type, y = value, fill = type)) +
geom_boxplot() +
scale_x_discrete(labels = COMMENT.TYPE.LABELS) +
scale_fill_manual(values = FILLCOLORS) +
facet_wrap(~ variable, nrow = 1, scales = "free",
labeller = as_labeller(COMMENT.METRIC.LABELS)) +
labs(title = title, x = "Comment Type", y = metric) +
GetTheme() +
theme(legend.position = "none")
dev.off()
## Informativeness ====
### Query Data
dataset <- GetCommentInformativeness()
### Plot
metric <- "Comment Informativeness"
title <- "Distribution of Comment Informativeness"
plot.dataset <- dataset %>%
inner_join(., COMMENT.TYPE, by = "comment_id") %>%
select(-comment_id) %>%
melt(., id.vars = c("type"))
# Render
png("diagrams/comment.informativeness.png", width = , height = )
ggplot(plot.dataset, aes(x = type, y = value, fill = type)) +
geom_boxplot() +
scale_x_discrete(labels = COMMENT.TYPE.LABELS) +
scale_fill_manual(values = FILLCOLORS) +
facet_wrap(~ variable, nrow = 1, scales = "free",
labeller = as_labeller(COMMENT.METRIC.LABELS)) +
labs(title = title, x = "Comment Type", y = metric) +
GetTheme() +
theme(legend.position = "none")
dev.off()
## Implicature ====
### Query Data
dataset <- GetCommentImplicature()
### Plot
metric <- "Comment Implicature"
title <- "Distribution of Comment Implicature"
plot.dataset <- dataset %>%
inner_join(., COMMENT.TYPE, by = "comment_id") %>%
select(-comment_id) %>%
melt(., id.vars = c("type"))
# Render
png("diagrams/comment.implicature.png", width = , height = )
ggplot(plot.dataset, aes(x = type, y = value, fill = type)) +
geom_boxplot() +
scale_x_discrete(labels = COMMENT.TYPE.LABELS) +
scale_fill_manual(values = FILLCOLORS) +
facet_wrap(~ variable, nrow = 1, scales = "free",
labeller = as_labeller(COMMENT.METRIC.LABELS)) +
labs(title = title, x = "Comment Type", y = metric) +
GetTheme() +
theme(legend.position = "none")
dev.off()
## Project Experience ====
### Query Data
dataset <- GetProjectExperience()
### Plot
metric <- "Project Experience (Sqrt Scale)"
title <- "Distribution of Project Experience"
plot.dataset <- dataset %>%
inner_join(., COMMENT.TYPE, by = "comment_id") %>%
select(-comment_id, -author) %>%
melt(., id.vars = c("type"))
# Render
png("diagrams/comment.projectexperience.png", width = , height = )
ggplot(plot.dataset, aes(x = type, y = value, fill = type)) +
geom_boxplot() +
scale_x_discrete(labels = COMMENT.TYPE.LABELS) +
scale_y_sqrt() +
scale_fill_manual(values = FILLCOLORS) +
facet_wrap(~ variable, nrow = 1, scales = "free",
labeller = as_labeller(COMMENT.METRIC.LABELS)) +
labs(title = title, x = "Comment Type", y = metric) +
GetTheme() +
theme(legend.position = "none")
dev.off()
## Module Experience ====
### Query Data
dataset <- GetModuleExperience()
### Plot
metric <- "Module Experience"
title <- "Distribution of Module Experience"
plot.dataset <- dataset %>%
inner_join(., COMMENT.TYPE, by = "comment_id") %>%
select(-comment_id) %>%
melt(., id.vars = c("type"))
# Render
png("diagrams/comment.moduleexperience.png", width = , height = )
ggplot(plot.dataset, aes(x = type, y = value, fill = type)) +
geom_boxplot() +
scale_x_discrete(labels = COMMENT.TYPE.LABELS) +
scale_fill_manual(values = FILLCOLORS) +
facet_wrap(~ variable, nrow = 1, scales = "free",
labeller = as_labeller(COMMENT.METRIC.LABELS)) +
labs(title = title, x = "Comment Type", y = metric) +
GetTheme() +
theme(legend.position = "none")
dev.off()
## File Experience ====
### Query Data
dataset <- GetFileExperience()
### Plot
metric <- "File Experience"
title <- "Distribution of File Experience"
plot.dataset <- dataset %>%
inner_join(., COMMENT.TYPE, by = "comment_id") %>%
select(-comment_id) %>%
melt(., id.vars = c("type"))
# Render
png("diagrams/comment.fileexperience.png", width = , height = )
ggplot(plot.dataset, aes(x = type, y = value, fill = type)) +
geom_boxplot() +
scale_x_discrete(labels = COMMENT.TYPE.LABELS) +
scale_fill_manual(values = FILLCOLORS) +
facet_wrap(~ variable, nrow = 1, scales = "free",
labeller = as_labeller(COMMENT.METRIC.LABELS)) +
labs(title = title, x = "Comment Type", y = metric) +
GetTheme() +
theme(legend.position = "none")
dev.off()
## Bug Familiarity ====
### Query Data
dataset <- GetBugFamiliarity()
### Plot
metric <- "Bug Familiarity"
title <- "Distribution of Bug Familiarity"
interim.dataset <- dataset %>%
inner_join(., COMMENT.TYPE, by = "comment_id") %>%
select(-comment_id)
alpha.dataset <- interim.dataset %>%
group_by(type, is_bugfamiliar) %>%
summarize(alpha_num_comments = n())
beta.dataset <- interim.dataset %>%
group_by(type) %>%
summarize(beta_num_comments = n())
plot.dataset <- inner_join(alpha.dataset, beta.dataset, by = "type") %>%
mutate(pct_comments = alpha_num_comments / beta_num_comments) %>%
select(type, is_bugfamiliar, pct_comments)
# Render
png("diagrams/comment.bugfamiliarity.png", width = , height = )
ggplot(plot.dataset, aes(x = type, y = pct_comments, fill = is_bugfamiliar)) +
geom_bar(stat = "identity", position = "dodge") +
geom_text(aes(label = scales::percent(pct_comments)), vjust = "inward",
position = position_dodge(width=0.9)) +
scale_x_discrete(labels = COMMENT.TYPE.LABELS) +
scale_y_continuous(labels = scales::percent) +
scale_fill_manual(name = metric, values = FILLCOLORS,
labels = COMMENT.METRIC.LABELS) +
labs(title = title, x = "Comment Type", y = "% Comments") +
GetTheme()
dev.off()
## Number of Sentences ====
### Query Data
dataset <- GetCommentLength()
### Plot
metric <- "# Sentences (Log Scale)"
title <- "Distribution of Number of Sentences"
plot.dataset <- dataset %>%
inner_join(., COMMENT.TYPE, by = "comment_id") %>%
select(-comment_id) %>%
melt(., id.vars = c("type"))
# Render
png("diagrams/comment.length.png", width = 500, height = 400)
ggplot(plot.dataset, aes(x = type, y = value, fill = type)) +
geom_boxplot() +
scale_x_discrete(labels = COMMENT.TYPE.LABELS) +
scale_y_log10() +
scale_fill_manual(values = FILLCOLORS) +
facet_wrap(~ variable, nrow = 1, scales = "free",
labeller = as_labeller(COMMENT.METRIC.LABELS)) +
labs(title = title, x = "Comment Type", y = metric) +
GetTheme() +
theme(legend.position = "none")
dev.off()
|
ce8981d949f5927d05723e7109e74181e7361f90
|
503900569f8fe6ff34202e12f6dad9a42bd908d7
|
/transpose/app.R
|
312da1ca37c1ffcfbc5096ef24ed6bb72de99a1c
|
[] |
no_license
|
nickriches/transpose
|
0663a174bb10e03676832b347b8b644886a82905
|
9a730931f33543c072a601e6b3c22ff6b1fdd319
|
refs/heads/master
| 2022-09-22T06:12:56.866311
| 2020-06-04T17:20:11
| 2020-06-04T17:20:11
| 268,739,067
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 37,650
|
r
|
app.R
|
library(shiny)
library(knitr) # To prepare Rmarkdown instructions
library(tidyverse) # For data manipulation
library(readtext) # Read in .doc and .docx files
library(udpipe) # Part-of-speech-tagger
library(tools) # To get file extension
library(DT) # To create a datatable
library(colourpicker)
library(googleLanguageR)
library(tokenizers)
library(stringdist)
# library(shinyalert)
# library(shinyjs)
# library(V8)
langs <- read_csv("langs.csv")
lang_list <- langs$lang_long
colours <- read_csv("colours.csv")
VERB_colour <- colours[2,2]
COPULA_colour <- colours[3,2]
AUXILIARY_colour <- colours[4,2]
PARTICLE_colour <- colours[5,2]
ADVB_colour <- colours[6,2]
NOUN_colour <- colours[7,2]
DET_colour <- colours[8,2]
ADJ_colour <- colours[9,2]
PRON_colour <- colours[10,2]
PREP_colour <- colours[11,2]
SUB_colour <- colours[12,2]
COORD_colour <- colours[13,2]
PUNCT_colour <- colours[14,2]
INTERJECTION_colour <- colours[15,2]
shinyApp(
ui <- fluidPage( # Open fluid page ----
# Instructions page ----
navbarPage("Translation App",
tabPanel("Instructions",
uiOutput('Rmarkdown_instructions')
),
# Let's get started navbar ----
navbarMenu("Let's get started!",
#(1) Enter text tab panel ----
tabPanel("(1) Enter text",
radioButtons("radio", label = h3("How do you wish to enter your data?"),
choices = list("Upload file (.doc, .docx, or .txt)" = 1, "Enter text in textbox" = 2),
width = '100%', selected = 1),
conditionalPanel(condition = "input.radio == 1",
fileInput("text_file", "Select file",
multiple = FALSE,
accept = c("text/plain",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"application/msword")
)
),
conditionalPanel(condition = "input.radio == 2",
textAreaInput("text_file_TA", "Enter text here...",
placeholder = "Enter text here...",
width = "100%", height = "100%", resize = "both")
# verbatimTextOutput("value")
)
), # End of tabPanel
#(2) Check language tab panel ----
tabPanel("(2) Check language",
htmlOutput("text_example"),
uiOutput(label = "from... to...",
"selectize")
# conditionalPanel(condition = "length(input$selectize) == 0",
# h2("Bingo")
# # verbatimTextOutput("value")
# )
) # End of tabPanel
), # End of navBarMenu "Let's get started!"
# Let's explore nav bar ----
tabPanel("Let's explore!",
tags$head(
tags$style(HTML({"
.mytooltip {
position: relative;
display: inline-block;
}
.mytooltip .tooltiptext {
visibility: hidden;
width: 120px;
background-color: #4d0026;
color: #fff;
text-align: center;
border: 6px solid #ff80ff;
padding: 5px 0;
/* Position the tooltip */
position: absolute;
z-index: 1;
bottom: 100%;
left: 50%;
margin-left: -60px;
}
.mytooltip:hover .tooltiptext {
visibility: visible;
}
"}))
),
h3("Table will take a few seconds to appear/refresh..."),
DT::dataTableOutput("table_coloured")
), # End of tabPanel "Let's Explore!"
# Colours tab ----
tabPanel("Colours",
selectInput(inputId = "colour_scheme",
label = h3("Select colour scheme"),
choices = list("All colours" = 2,
"Verb-related words only" = 3,
"Noun-related words only" = 4,
"Linking words (conjunctions and Prepositions)" = 5),
selected = 2),
h3("Widgets contain hexadecimal colour codes.
Colours may be conveniently copied and pasted by copying and pasting these codes."),
br(),
h3("Word classes in the Verb Complex (sometimes called Verb Phrase)"),
htmlOutput("colour_picker_verb"),
htmlOutput("colour_picker_copula"),
htmlOutput("colour_picker_auxiliary"),
htmlOutput("colour_picker_particle"),
htmlOutput("colour_picker_advb"),
br(),
h3("Word classes in the Noun Phrase"),
htmlOutput("colour_picker_noun"),
htmlOutput("colour_picker_det"),
htmlOutput("colour_picker_adj"),
htmlOutput("colour_picker_pron"),
br(),
h3("Linking words"),
htmlOutput("colour_picker_prep"),
htmlOutput("colour_picker_sub"),
htmlOutput("colour_picker_coord"),
br(),
h3("Other"),
htmlOutput("colour_picker_punct"),
htmlOutput("colour_picker_interjection")
), # End of tabPanel "Colors"
# Punctuation tab ====
tabPanel("Punctuation",
h4("Punctuation characters can cause a lot of problems for the app, because Google Translate
treats different characters differently in different languages. For example, when translating
from English to Spanish it will replace a comma with a semi-colon. This then effects the way
the app segments sentences (e.g. how it decides when the sentence begins and ends). This will
result either in an error, or a weird output that is difficult to interpret."),
h4("To prevent this,
the app replaces problematic punctuation characters. Typically, one needs to replace \"exotic\"
characters with \"boring\" ones, e.g. semi-colons with commas. This page allows you to see how
characters are replaced, and allows you to specify your own rules if you wish. Just type
the original characters in the left hand box, and the replacing characters in the right hand
box. You can also edit boxes which already contain characters."),
# From https://stackoverflow.com/questions/20637248/shiny-4-small-textinput-boxes-side-by-side
# fluidRow(
# box(width = 12, title = "A Box in a Fluid Row I want to Split",
splitLayout(
textInput("replaced1", value = ":", label = "Original character"),
textInput("replacer1", value = ",", label = "Replacement character")
),
splitLayout(
textInput("replaced2", value = ";", label=NULL),
textInput("replacer2", value = ",", label=NULL)
),
splitLayout(
textInput("replaced3", value = ":", label=NULL),
textInput("replacer3", value = ",", label=NULL)
),
splitLayout(
textInput("replaced4", value = "(", label=NULL),
textInput("replacer4", value = ",", label=NULL)
),
splitLayout(
textInput("replaced5", value = ")", label=NULL),
textInput("replacer5", value = ",", label=NULL)
),
splitLayout(
textInput("replaced6", value = "", label=NULL),
textInput("replacer6", value = "", label=NULL)
),
splitLayout(
textInput("replaced7", value = "", label=NULL),
textInput("replacer7", value = "", label=NULL)
),
splitLayout(
textInput("replaced8", value = "", label=NULL),
textInput("replacer8", value = "", label=NULL)
),
splitLayout(
textInput("replaced9", value = "", label=NULL),
textInput("replacer9", value = "", label=NULL)
),
splitLayout(
textInput("replaced10", value = "", label=NULL),
textInput("replacer10", value = "", label=NULL)
),
# )
# )
# https://github.com/jienagu/DT-Editor
#
# titlePanel("Replace punctuation characters"),
# h3("Translation may go wrong if punctuation characters are not adequately dealt with."),
# h3("In general, unusual punctuation characters such as dashes need to be replaced with commas"),
# h3("The table below shows you which characters in the `source` are replaced with"),
# shinyjs::useShinyjs(),
# shinyjs::extendShinyjs(text = "shinyjs.refresh = function() { location.reload(); }"),
# actionButton("refresh", "Reset",style="color: #fff; background-color: #337ab7; border-color: #2e6da4"),
#
# helpText("Note: Remember to save any updates!"),
# br(),
# ### tags$head() is to customize the download button
# tags$head(tags$style(".butt{background-color:#230682;} .butt{color: #e6ebef;}")),
# # downloadButton("Trich_csv", "Download in CSV", class="butt"),
# useShinyalert(), # Set up shinyalert
# uiOutput("MainBody_punct_table"),
# actionButton(inputId = "Updated_punct_table",label = "Save")
#
) # End of tabPanel "Punctuation"
) # End of navBarPage
), # End of fluidPage
# server statement----
server <- function(input, output, session){
text <- reactive({
if(is.null(input$text_file) & input$text_file_TA=="") return(NULL)
if(is.null(input$text_file)==FALSE){
text <- readtext(input$text_file$datapath)$text
}
if(input$text_file_TA!=""){
text <- input$text_file_TA
}
#
return(text)
})
# lang_iso (obtaining language) ----
lang_iso <- reactive({
if(is.null(input$text_file) & input$text_file_TA=="") return(NULL)
if(is.null(input$text_file)==FALSE){
text <- readtext(input$text_file$datapath)$text
}
if(input$text_file_TA!=""){
text <- input$text_file_TA
}
gl_auth("translation-app-256015-5b586d7ca141.json")
# gl_auth("/Users/nickriches/Google Drive/AHRC artificial intelligence/translation_shiny_web_app_prototype/translation app/translation-app-256015-5b586d7ca141.json")
lang_iso <- googleLanguageR::gl_translate_detect(text)$language
return(lang_iso)
})
# lang_eng ----
lang_eng <- reactive({
lang_eng <- langs$lang_long[which(langs$iso_code == lang_iso())]
return(lang_eng)
})
# table (showing transcript)----
table <- reactive({
# Main functions for generating colours and labels
# highlights text in a particular colours
highlight <- function(text, colour){
result <- paste0("<span style=\"background-color:", colour, ";\">",
" ", text, " ",
"</span>")
return(result)
}
add_tool_tip <- function(text, label){
result <- paste0("<div class=\"mytooltip\">",
text,
"<span class=\"tooltiptext\">",
label,
"</span>",
"</div>")
return(result)
}
# browser(); one <- 1; one <- 1; one <-1 ; one <- 1
if(is.null(input$VERB_colour)) {VERB_colour <- "#FFAB94"} else {VERB_colour <- input$VERB_colour}
if(is.null(input$COPULA_colour)) {COPULA_colour <- "#FFAB94"} else {COPULA_colour <- input$COPULA_colour}
if(is.null(input$AUXILIARY_colour)) {AUXILIARY_colour <- "#FAD4CB"} else {AUXILIARY_colour <- input$AUXILIARY_colour}
if(is.null(input$PARTICLE_colour)) {PARTICLE_colour <- "#FAD4CB"} else {PARTICLE_colour <- input$PARTICLE_colour}
if(is.null(input$ADVB_colour)) {ADVB_colour <- "#FAD4CB"} else {ADVB_colour <- input$ADVB_colour}
if(is.null(input$NOUN_colour)) {NOUN_colour <- "#B6B6F5"} else {NOUN_colour <- input$NOUN_colour}
if(is.null(input$DET_colour)) {DET_colour <- "#ADFFFF"} else {DET_colour <- input$DET_colour}
if(is.null(input$ADJ_colour)) {ADJ_colour <- "#ADFFFF"} else {ADJ_colour <- input$ADJ_colour}
if(is.null(input$PRON_colour)) {PRON_colour <- "#99FF69"} else {PRON_colour <- input$PRON_colour}
if(is.null(input$PREP_colour)) {PREP_colour <- "#FFFF52"} else {PREP_colour <- input$PREP_colour}
if(is.null(input$SUB_colour)) {SUB_colour <- "#FCAD46"} else {SUB_colour <- input$SUB_colour}
if(is.null(input$COORD_colour)) {COORD_colour <- "#FFCD7D"} else {COORD_colour <- input$COORD_colour}
if(is.null(input$PUNCT_colour)) {PUNCT_colour <- "#eeeedd"} else {PUNCT_colour <- input$PUNCT_colour}
if(is.null(input$INTERJECTION_colour)) {INTERJECTION_colour <- "#C29A72"} else {INTERJECTION_colour <- input$INTERJECTION_colour}
highlight_wc <- function(string, wc){
if(is.na(wc)){return(string)}
# red (original colours - user may change)
else if(wc == "VERB"){result <- add_tool_tip(highlight(paste0("<b>",string,"</b>"), VERB_colour), "VERB")}
else if(wc == "COPULA"){result <- add_tool_tip(highlight(paste0("<b>", string, "</b>"), COPULA_colour), "COPULA")}
# orange
else if(wc == "SCONJ"){result <- add_tool_tip(highlight(string, SUB_colour), "SCONJ.")}
# light orange
else if(wc == "CCONJ"){result <- add_tool_tip(highlight(string, COORD_colour), "CCONJ.")}
# green
else if(wc == "PRON"){result <- add_tool_tip(highlight(string, PRON_colour), "PRON.")}
# pink
else if(wc == "AUX"){result <- add_tool_tip(highlight(string, AUXILIARY_colour), "AUX.")}
else if(wc == "ADV"){result <- add_tool_tip(highlight(string, ADVB_colour), "ADV.")}
else if(wc == "PART"){result <- add_tool_tip(highlight(string, PARTICLE_colour), "PARTICLE")}
# dark blue
else if(wc == "NOUN"){result <- add_tool_tip(highlight(string, NOUN_colour), "NOUN")}
else if(wc == "PROPN"){result <- add_tool_tip(highlight(string, NOUN_colour), "PROPN")}
# cyan
else if(wc == "DET"){result <- add_tool_tip(highlight(string, DET_colour), "DET.")}
else if(wc == "DET.poss"){result <- add_tool_tip(highlight(string, DET_colour), "DET.poss")}
else if(wc == "ADJ"){result <- add_tool_tip(highlight(string, ADJ_colour), "ADJ.")}
else if(wc == "NUM"){result <- add_tool_tip(highlight(string, DET_colour), "NUM.")}
# brown
else if(wc == "INTJ"){result <- add_tool_tip(highlight(string, INTERJECTION_colour), "INTJ")}
# yellow
else if(wc == "ADP"){result <- add_tool_tip(highlight(string, PREP_colour), "PREP.")}
# grey
else if(wc == "PUNCT"){result <- add_tool_tip(highlight(string, PUNCT_colour), "PUNCT.")}
else if(wc == "X"){result <- add_tool_tip(highlight(string, "#b8b894"), "X")}
else if(wc == "SYM"){result <- add_tool_tip(highlight(string, "#b8b894"), "SYM")}
else{result <- string}
return(result)
}
from_text <- text()
adbs <- function(x){ # Add Double Backslash where necessary
return(case_when(
x == ")" ~ "\\)",
x == "(" ~ "\\(",
x == "]" ~ "\\]",
x == "[" ~ "\\[",
x == "}" ~ "\\}",
x == "{" ~ "\\{",
x == "" ~ " ",
TRUE ~ x
))
}
from_text <- str_replace_all(from_text, adbs(input$replaced1), adbs(input$replacer1))
from_text <- str_replace_all(from_text, adbs(input$replaced2), adbs(input$replacer2))
from_text <- str_replace_all(from_text, adbs(input$replaced3), adbs(input$replacer3))
from_text <- str_replace_all(from_text, adbs(input$replaced4), adbs(input$replacer4))
from_text <- str_replace_all(from_text, adbs(input$replaced5), adbs(input$replacer5))
from_text <- str_replace_all(from_text, adbs(input$replaced6), adbs(input$replacer6))
from_text <- str_replace_all(from_text, adbs(input$replaced7), adbs(input$replacer7))
from_text <- str_replace_all(from_text, adbs(input$replaced8), adbs(input$replacer8))
from_text <- str_replace_all(from_text, adbs(input$replaced9), adbs(input$replacer9))
from_text <- str_replace_all(from_text, adbs(input$replaced10), adbs(input$replacer10))
# To be inserted back into final version
# browser(); one <- 1; one <- 1; one <- 1; one <- 1; one <- 1
num_targets <- length(input$selectize) - 1
from_iso <- langs$iso_code[which(langs$lang_long == input$selectize[1])]
from_lang_long <- langs$lang_long[which(langs$lang_long == input$selectize[1])]
# To be removed from final version
# from_text <- "Qué quieres hacer esta noche? Yo quiero ir al cine. Quieres venir conmigo?"
# to_text <- "What do you want to do tonight? I want to go to the movies. Do you want to come with me?"
# from_lang_long <- "Spanish; Castilian"
# to_lang_long <- "English"
#xxxxxxxxxxxxxxxxxxxxxxxxx
# Load models
from_udpipe_model_name <- langs$udpipe_name[which(langs$lang_long == from_lang_long)]
# Routine for if model is found.
if(is.na(from_udpipe_model_name) == FALSE){
from_model <- udpipe_download_model(from_udpipe_model_name, model_dir = tempdir())
from_model <- udpipe_load_model(from_model$file_model)
from_parsed <- as.data.frame(udpipe_annotate(from_model, from_text))
# browser(); one <- 1; one <- 1; one <- 1; one <- 1
from_parsed$coloured <- mapply(highlight_wc, from_parsed$token, from_parsed$upos)
from_parsed$hasclass <- paste0("has", tolower(from_parsed$upos))
from_parsed %>%
group_by(sentence_id) %>%
summarise(sentence_coloured = paste(coloured, collapse = " "),
sentence_not_coloured = paste(token, collapse = " "),
hasclass = paste(hasclass, collapse = " ")) ->
from_table
}
# Routine for if model is not found (basically creates dataframe with no colouring)
if(is.na(from_udpipe_model_name) == TRUE){
from_table <- as.data.frame(unlist(tokenize_sentences(from_text)))
names(from_table)[1] <- "sentence_coloured"
from_table$sentence_not_coloured <- from_table$sentence_coloured
from_table$hasclass = ""
from_table$sentence_id <- as.numeric(row.names(from_table))
from_table <- subset(from_table, select=c(4,1,2,3))
}
all_table <- from_table
for(loop in 1:num_targets){
to_iso <- langs$iso_code[which(langs$lang_long == input$selectize[loop + 1])]
to_lang_long <- langs$lang_long[which(langs$lang_long == input$selectize[loop + 1])]
to_text <- gl_translate(text(), target = to_iso, source = from_iso)$translatedText
to_text <- str_replace_all(to_text, adbs(input$replaced1), adbs(input$replacer1))
to_text <- str_replace_all(to_text, adbs(input$replaced2), adbs(input$replacer2))
to_text <- str_replace_all(to_text, adbs(input$replaced3), adbs(input$replacer3))
to_text <- str_replace_all(to_text, adbs(input$replaced4), adbs(input$replacer4))
to_text <- str_replace_all(to_text, adbs(input$replaced5), adbs(input$replacer5))
to_text <- str_replace_all(to_text, adbs(input$replaced6), adbs(input$replacer6))
to_text <- str_replace_all(to_text, adbs(input$replaced7), adbs(input$replacer7))
to_text <- str_replace_all(to_text, adbs(input$replaced8), adbs(input$replacer8))
to_text <- str_replace_all(to_text, adbs(input$replaced9), adbs(input$replacer9))
to_text <- str_replace_all(to_text, adbs(input$replaced10), adbs(input$replacer10))
to_udpipe_model_name <- langs$udpipe_name[which(langs$lang_long == to_lang_long)]
# Routine for if model is found.
if(is.na(to_udpipe_model_name) == FALSE){
to_model <- udpipe_download_model(to_udpipe_model_name, model_dir = tempdir())
to_model <- udpipe_load_model(to_model$file_model)
to_parsed <- as.data.frame(udpipe_annotate(to_model, to_text))
to_parsed$coloured <- mapply(highlight_wc, to_parsed$token, to_parsed$upos)
to_parsed$hasclass <- paste0("has", tolower(to_parsed$upos))
to_parsed %>%
group_by(sentence_id) %>%
summarise(sentence_coloured = paste(coloured, collapse = " "),
sentence_not_coloured = paste(token, collapse = " "),
hasclass = paste(hasclass, collapse = " ")) ->
to_table
}
# Routine for if model is not found (basically creates dataframe with no colouring)
if(is.na(to_udpipe_model_name) == TRUE){
to_table <- as.data.frame(unlist(tokenize_sentences(to_text)))
names(to_table)[1] <- "sentence_coloured"
to_table$sentence_not_coloured <- to_table$sentence_coloured
to_table$hasclass = ""
to_table$sentence_id <- as.numeric(row.names(to_table))
to_table <- subset(to_table, select=c(4,1,2,3))
}
all_table <- rbind(all_table, to_table)
}
all_table %>% arrange(sentence_id) -> all_table
# Create a variable that swaps from and to for each sentence_id to aid Universal Filter
all_table$swapped <- ""
ref <- 1
for(i in 1:nrow(all_table)){
if(i %% (num_targets + 1) == 1){ref <- i}
if(i == ref){
start <- i + 1
stop <- i + num_targets
all_table$swapped[i] <- paste(all_table$sentence_not_coloured[start:stop], collapse = " ")
}
if(i > ref){
all_table$swapped[i] <- all_table$sentence_not_coloured[ref]
}
}
# Add symbol in from of all "from" lines
all_table$is_source <- rep(x = c(1, rep(0, num_targets)), times = nrow(all_table) / (num_targets + 1))
add_plus <- function(sentence, is_source){
if(is_source == 1) return(paste("<font color=\"red\">===</font>", sentence))
if(is_source == 0) return(paste("<font color=\"white\">===</font>", sentence))
}
all_table$sentence_coloured <- mapply(add_plus, all_table$sentence_coloured, all_table$is_source)
return(all_table)
})
# colours ----
verb_col <- reactive({
colour <- colours[1, as.numeric(input$colour_scheme)]
return(colour)
})
copula_col <- reactive({
colour <- colours[2, as.numeric(input$colour_scheme)]
return(colour)
})
auxiliary_col <- reactive({
colour <- colours[3, as.numeric(input$colour_scheme)]
return(colour)
})
particle_col <- reactive({
colour <- colours[4, as.numeric(input$colour_scheme)]
return(colour)
})
advb_col <- reactive({
colour <- colours[5, as.numeric(input$colour_scheme)]
return(colour)
})
noun_col <- reactive({
colour <- colours[6, as.numeric(input$colour_scheme)]
return(colour)
})
det_col <- reactive({
colour <- colours[7, as.numeric(input$colour_scheme)]
return(colour)
})
adj_col <- reactive({
colour <- colours[8, as.numeric(input$colour_scheme)]
return(colour)
})
pron_col <- reactive({
colour <- colours[9, as.numeric(input$colour_scheme)]
return(colour)
})
prep_col <- reactive({
colour <- colours[10, as.numeric(input$colour_scheme)]
return(colour)
})
sub_col <- reactive({
colour <- colours[11, as.numeric(input$colour_scheme)]
return(colour)
})
coord_col <- reactive({
colour <- colours[12, as.numeric(input$colour_scheme)]
return(colour)
})
punct_col <- reactive({
colour <- colours[13, as.numeric(input$colour_scheme)]
return(colour)
})
interjection_col <- reactive({
colour <- colours[14, as.numeric(input$colour_scheme)]
return(colour)
})
output$colour_picker_verb <- renderUI({
colourpicker::colourInput(
inputId = "VERB_colour",
label = "Main Verb (label = VERB)",
value = verb_col()
)
})
output$colour_picker_copula <- renderUI({
colourpicker::colourInput(
inputId = "COPULA_colour",
label = "Copula (label = COPULA)",
value = copula_col()
)
})
output$colour_picker_auxiliary <- renderUI({
colourpicker::colourInput(
inputId = "AUXILIARY_colour",
label = "Auxiliary verb (label = AUXILIARY)",
value = auxiliary_col()
)
})
output$colour_picker_particle <- renderUI({
colourpicker::colourInput(
inputId = "PARTICLE_colour",
label = "Verb particle (label = PARTICLE)",
value = particle_col()
)
})
output$colour_picker_advb <- renderUI({
colourpicker::colourInput(
inputId = "ADVB_colour",
label = "Adverb (label = ADVB)",
value = advb_col()
)
})
output$colour_picker_noun <- renderUI({
colourpicker::colourInput(
inputId = "NOUN_colour",
label = "Noun (label = NOUN)",
value = noun_col()
)
})
output$colour_picker_det <- renderUI({
colourpicker::colourInput(
inputId = "DET_colour",
label = "Determiner (label = DET)",
value = det_col()
)
})
output$colour_picker_adj <- renderUI({
colourpicker::colourInput(
inputId = "ADJ_colour",
label = "Adjective (label = ADJ)",
value = adj_col()
)
})
output$colour_picker_pron <- renderUI({
colourpicker::colourInput(
inputId = "PRON_colour",
label = "Pronoun (label = PRON)",
value = pron_col()
)
})
output$colour_picker_prep <- renderUI({
colourpicker::colourInput(
inputId = "PREP_colour",
label = "Preposition (label = PREP)",
value = prep_col()
)
})
output$colour_picker_sub <- renderUI({
colourpicker::colourInput(
inputId = "SUB_colour",
label = "Subordinator (label = SUB)",
value = sub_col()
)
})
output$colour_picker_coord <- renderUI({
colourpicker::colourInput(
inputId = "COORD_colour",
label = "Coordinator (label = COORD)",
value = coord_col()
)
})
output$colour_picker_punct <- renderUI({
colourpicker::colourInput(
inputId = "PUNCT_colour",
label = "Punctuation (label = PUNCT)",
value = punct_col()
)
})
output$colour_picker_interjection <- renderUI({
colourpicker::colourInput(
inputId = "INTERJECTION_colour",
label = "Interjection (label = INTERJECTION)",
value = interjection_col()
)
})
# ***RENDERING STATEMENTS*** ----
# Rmarkdown_instructions ----
#colour_set statement ====
# output$VERB_colour = renderUI({ # NB it looks as if there needs to be a new renderstatement for each dropdown
#
# colourpicker::colourInput(
# inputId = "VERB_colour",
# label = "Main Verb (label = VERB)",
# # value = colours[1, input$scheme]
# value = "#FFAB94"
# # showColour = "background"
# )
#
# # colourpicker::colourInput(
# # inputId = "COPULA_colour",
# # label = "Copula (label = COP.)",
# # value = "#FFAB94"
# # # showColour = "background"
# # )
#
# #
# # colourpicker::colourInput(
# # inputId = "AUXILIARY_colour",
# # label = "Auxiliary Verb (label = AUX.)",
# # value = "#FAD4CB"
# # # showColour = "background"
# # ),
# #
# # colourpicker::colourInput(
# # inputId = "PARTICLE_colour",
# # label = "Particle e.g. \"to\" in \"to go\" (label = PART.)",
# # value = "#FAD4CB"
# # # showColour = "background"
# # ),
# #
# # colourpicker::colourInput(
# # inputId = "ADV_colour",
# # label = "Adverb (label = ADV.)",
# # value = "#FAD4CB"
# # # showColour = "background"
# # ),
# #
# # hr(),
# #
# # h3("Noun Phrase"),
# #
# # colourpicker::colourInput(
# # inputId = "NOUN_colour",
# # label = "Noun (label = NOUN)",
# # value = "#B6B6F5"
# # # showColour = "background"
# # ),
# #
# #
# # colourpicker::colourInput(
# # inputId = "DET_colour",
# # label = "Determiner (label = DET., or DET.poss if possessive)",
# # value = "#ADFFFF"
# # # showColour = "background"
# # ),
# #
# # colourpicker::colourInput(
# # inputId = "ADJ_colour",
# # label = "Adjective (label = ADJ.)",
# # value = "#ADFFFF"
# # # showColour = "background"
# # ),
# #
# # colourpicker::colourInput(
# # inputId = "PRON_colour",
# # label = "Pronoun (label = PRON.)",
# # value = "#99FF69"
# # # showColour = "background"
# # ),
# #
# # hr(),
# #
# # h3("Prepositions"),
# #
# # colourpicker::colourInput(
# # inputId = "PREP_colour",
# # label = "Prepositions (label = PREP.)",
# # value = "#FFFF52"
# # # showColour = "background"
# # ),
# #
# # hr(),
# #
# # h3("Linking Words"),
# #
# # colourpicker::colourInput(
# # inputId = "SUB_colour",
# # label = "Subordinating Conjunction (label = SCONJ.)",
# # value = "#FCAD46"
# # # showColour = "background"
# # ),
# #
# # colourpicker::colourInput(
# # inputId = "COORD_colour",
# # label = "Coordinating Conjunction (label = CCONJ.)",
# # value = "#FFCD7D"
# # # showColour = "background"
# # ),
# #
# # hr(),
# #
# # h3("Others"),
# #
# # colourpicker::colourInput(
# # inputId = "PUNCT_colour",
# # label = "Punctuation Character (label = PUNCT.)",
# # value = "#eeeedd"
# # # showColour = "background"
# # ),
# #
# # colourpicker::colourInput(
# # inputId = "INTERJECTION_colour",
# # label = "Interjection (label = INTJ.)",
# # value = "#C29A72"
# # # showColour = "background"
# # )
#
# # ) # end of HTML statement ====
#
# })
#
output$Rmarkdown_instructions <- renderUI({
# HTML(rmarkdown::render('Rmarkdown_instructions.Rmd'))
HTML(markdown::markdownToHTML(knit('Rmarkdown_instructions_reduced.Rmd', quiet = TRUE)))
# includeHTML("Rmarkdown_instructions.html")
})
# (2) Check language tab panel ----
output$text_example <- renderUI({
text <- substr(text(), 1, 1000)
HTML(paste0("<p><h1>Text</h1><h3>(up to 1000th character)</h3>", text,"</p>"))
})
# Language Selectize ----
output$selectize = renderUI({
selectizeInput(inputId = "selectize", # NB refer to input$selectize
label = "from... to...",
choice = lang_list,
selected = lang_eng(),
multiple = TRUE)
})
# table_coloured ----
output$table_coloured = DT::renderDataTable({
datatable(table(),
filter = c("top"),
rownames = FALSE,
escape = FALSE,
options = list(paging = FALSE, autoWidth = TRUE, searching = TRUE,
search = list(regex = TRUE, scrollX = TRUE)
)
) %>% formatStyle(columns = c(1), width='100px') %>%
formatStyle("sentence_coloured","white-space"="nowrap") %>%
formatStyle("sentence_not_coloured","white-space"="nowrap", color = "lightgray") %>%
formatStyle("hasclass","white-space"="nowrap", color = "lightgray") %>%
formatStyle("swapped","white-space"="nowrap", color = "lightgray") %>%
formatStyle("is_source","white-space"="nowrap", color = "lightgray")
})
} # end of server statement
)
|
f414dbcb34289e7b0869d23770e391cc834ac2e7
|
dfa09fcc25994c4c7f33b3fa9a91ba6ce7096547
|
/man/resultC.Rd
|
a95982080688fb24476bb4826651ea9d64a04523
|
[] |
no_license
|
chensyustc/SC19027
|
a64a8b2137951ae46a814f0389ee06ac849965d8
|
1e291cd7c96cab5d020e471d62a1b3a74a70efe8
|
refs/heads/master
| 2020-12-03T13:16:13.909137
| 2020-01-02T07:37:25
| 2020-01-02T07:37:25
| 230,364,676
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 444
|
rd
|
resultC.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{resultC}
\alias{resultC}
\title{Bias of estimated sigma and average model size using Rcpp}
\usage{
resultC(hsigma, hbeta)
}
\arguments{
\item{hsigma}{the estimated sigma}
\item{hbeta}{the estimated coefficients}
}
\value{
bias of estimated sigma and average model size
}
\description{
Bias of estimated sigma and average model size using Rcpp
}
|
3bc8519b6142df0b89cc63b2d8caa33b6ef000cd
|
a3f7826863b6b81bc99ccf9c414f8bcf09a335e7
|
/R/myKable.R
|
6748ad9b15b42aab7f343d7a590bcede1bcd63d4
|
[] |
no_license
|
cran/rmdHelpers
|
24c9516a15a8d6de20bb92df4df1ceba27786ce1
|
b091a8e1ec70f651305074b03ccb38dd0008c599
|
refs/heads/master
| 2021-01-18T18:09:53.043265
| 2016-07-11T23:09:59
| 2016-07-11T23:09:59
| 55,989,977
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 915
|
r
|
myKable.R
|
myKable <-
function(x
, row.names = NA
, boldRowNames = TRUE
, boldColNames = TRUE
, ...){
# Function to bold row.names and colnames
# I still need to add explicit handling for things other than markdown
if(boldRowNames){
if(is.na(row.names)){
# Handle defaults
if(is.null(row.names(x))){
# Do nothing, won't print
} else if(identical(row.names(x), as.character(1:nrow(x)))){
# Do nothing, won't print
} else{
row.names(x) <- paste0("**",row.names(x),"**")
}
} else if(!row.names){
# Do nothing
} else if(row.names){
# Handle auto include row.names
row.names(x) <- paste0("**",row.names(x),"**")
}
}
if(boldColNames){
colnames(x) <- paste0("**",colnames(x),"**")
}
# Send to kable
kable(x, row.names = row.names, ...)
}
|
d46a27572a32f7a1e637046deba924c489d26690
|
4a78e4ae68e138abfea88515a101adef410e6bdc
|
/asc_complaints.R
|
ae9cfed2ef3390145df4c5dbeabe655a709086a7
|
[] |
no_license
|
airsafe/analyses
|
8a708d12803d1f93ef54b9907f1be2f86d31b9a8
|
54a6dc14c360312b7f316683797436f84af14727
|
refs/heads/master
| 2021-01-10T14:57:04.641933
| 2020-01-03T21:25:21
| 2020-01-03T21:25:21
| 46,495,199
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 20,987
|
r
|
asc_complaints.R
|
# Exploration of complaint file
# ADMINISTRATIVE NOTES
# Note: To describe database at any point, use str(*name*)
# Note: To clear R workspace, use rm(list = ls())
# Note: Searching R help files - RSiteSearch("character string")
# Note: To clear console, use CTRL + L
# PURPOSE
# The goal of this exercise was to take an edited version of the contents of
# the AirSafe.com complaint database and do some basic exploratory analysis of
# the data. The information was downloaded in early January 2016 and
# consists of all of the complaints submitted from late May 2012 to early
# January 2016. The pre-processing step included removing duplicate entries
# and consolidating information apparently submitted in one or more submissions
# that were actually referring to the same complaint.
# The first step is to install new packages that will be needed for the analysis.
options(repos = c(CRAN = "http://cran.rstudio.com"))
if("e1071" %in% rownames(installed.packages()) == FALSE)
{install.packages("e1071")}
library(e1071)
# Note that raw data was pre-processed to exclude non-English content
complaints.raw = read.csv("asc_complaints.csv")
complaints=complaints.raw
colnames(complaints)
# There are 14 variables (column names) for the raw data:
# 1. Timestamp - Date and time of submission
# 2. Name
# 3. Address
# 4. City
# 5. State.Province
# 6. Country
# 7. Email
# 8. Phone
# 9. Airline
# 10. Flight.Number
# 11. Location.Flight.Leg
# 12. Date - Date of occurrcene
# 13. Complaint.Categories
# 14. Please.include.additional.details.below
#
# Will rename several columns
colnames(complaints)[colnames(complaints)=="Please.include.additional.details.below"] = "Notes"
colnames(complaints)[colnames(complaints)=="Complaint.Categories"] = "Categories"
colnames(complaints)[colnames(complaints)=="State.Province"] = "State"
colnames(complaints)[colnames(complaints)=="Flight.Number"] = "Flight"
colnames(complaints)[colnames(complaints)=="Location.Flight.Leg"] = "Location"
# All except Timestamp and Date should be of type character. Will start by
# making them all character
# Note that the '[]' keeps it as data frame and does not make it a list
complaints[] = lapply(complaints, as.character)
# Change Timestamp to as.POSIXlt which has elements in a list
complaints$Time = as.POSIXlt(complaints$Timestamp, format="%m/%d/%Y %H:%M:%S")
complaints$Year = complaints$Time$year + 1900 # Years indexed from 1900
complaints$Month = complaints$Time$mon + 1 # Months indexed from zero
# Convert months from character to numeric
complaints$Month = as.numeric(as.character(complaints$Month))
# Convert to month
complaints$Month = month.abb[complaints$Month]
# Data overview
paste("There were a total of ",format(nrow(complaints), big.mark = ","),
" unique complaints in the database. The earliest record was from ",
as.Date(min(range(complaints$Time))),
" and the latest from ", as.Date(max(range(complaints$Time))), ".",
sep = "")
# Make months factors and order them as they are in a calendar
complaints$Month = factor(complaints$Month,levels=c("Jan", "Feb","Mar", "Apr","May",
"Jun","Jul","Aug","Sep","Oct", "Nov","Dec"), ordered=TRUE)
# Extract the day of the week from the Time variable
# complaints$Day = complaints$Time$mday
complaints$Day = weekdays(complaints$Time, abbreviate = TRUE)
# Make days into factors and order them as they are in a calendar
complaints$Day = factor(complaints$Day,levels=c("Sun","Mon","Tue",
"Wed","Thu","Fri","Sat"), ordered=TRUE)
complaints$Hour = complaints$Time$hour
barplot(table(complaints$Year),
main = "Number of complaints by year",
xlab = "Year",
ylab = "Number of complaints",
las = 1,
col = "dodgerblue")
barplot(table(complaints$Day),
main = "Complaints by day of the week",
xlab = "Day of the week",
ylab = "number of complaints",
las = 1,
col = "dodgerblue")
barplot(table(complaints$Month),
main = "Complaints by month of the year",
xlab = "Month of the year",
ylab = "number of complaints",
las = 1,
cex.names=0.9,
col = "dodgerblue")
barplot(table(complaints$Hour),
main = "Complaints by hour when submitted",
xlab = "Time of day of submission",
ylab = "number of complaints",
las = 1,
cex.names=0.6,
col = "dodgerblue")
# Will also add a column that has a word count for the note associated
# with each complaint.
# The following splits each notes by non-word breaks (\W in regular expressions)
# and counts them using length(), and uses vapply to make it a vector
# of length nrow(complaints), which becomes the new variable 'Note_length
complaints$Note_length = vapply(strsplit(complaints$Notes,"\\W+"),length,integer(1))
# Summary statistics
paste("Of the",format(length(complaints$Note_length), big.mark = ","), "complaints, only",
sum(complaints$Note_length==0),"did not leave some kind of explanatory a note.", sep=" ")
# Cumulative distribution of number of words used in the notes section
plot.ecdf(complaints$Note_length,
main = "Cumulative distribution of number of words used in Notes section",
xlab="Number of words",
ylab="Cumulative probabilities")
# Note length
print("Note length varied widely, with most being between 250 and 1000 words, roughly equivalent to one to five typewritten pages.")
paste("Of the", format(nrow(complaints), big.mark = ","), "complaints," , sep = " ")
paste(" -", format(sum(complaints$Note_length==0), big.mark = ","), "left no notes,",sep = " ")
paste(" -", format(sum(complaints$Note_length>0 & complaints$Note_length < 250), big.mark = ","), "left notes 1 to 250 words long,",sep = " ")
paste(" -", format(sum(complaints$Note_length>250 & complaints$Note_length < 1000), big.mark = ","), "left notes 251 to 1,000 words long, and",sep = " ")
paste(" -", format(sum(complaints$Note_length >1000), big.mark = ","), "left notes over 1,000 words long.",sep = " ")
# There are 17 categories in the complaint form, and one can select
# more than one:
# 1. Delays or other Flight Problems
# 2. Checked or carry on baggage
# 3. Reservations/Boarding/Ground Services
# 4. Cancellations
# 5. Fares/Refunds/Online Booking
# 6. In flight services/Meals
# 7. Safety
# 8. Security/Airport Secreening
# 9. Overbooking
# 10. Customer Service
# 11. Frequent Flyer Programs
# 12. Discrimination
# 13. Disability
# 14. Travel with children
# 15. Travel with pets
# 16. Passenger behavior
# 17. Other
# Category names used in complaint form
category_names = c("Delays or other Flight Problems",
"Checked or carry on baggage",
"Reservations/Boarding/Ground Services",
"Cancellations",
"Fares/Refunds/Online Booking",
"In flight services/Meals",
"Safety",
"Security/Airport Secreening",
"Overbooking",
"Customer Service",
"Frequent Flyer Programs",
"Discrimination",
"Disability",
"Travel with children",
"Travel with pets",
"Passenger behavior",
"Other")
# Make category names R friendly column names
category.vars = make.names(category_names)
# Insert binary variables inidcating which category is associated
# with each complaint
for (i in 1:length(category_names)){
xx = as.numeric(grepl(category_names[i],complaints$Categories))
complaints = cbind(complaints,xx)
# Add appropriate R-friendly variable name to new column
names(complaints)[ncol(complaints)] = category.vars[i]
}
# Determine how many categories were checked in each complaint
complaints$cat_checked = apply(complaints[,(colnames(complaints) %in% category.vars)],1,sum)
# Insert binary variables inidcating which category is associated
# with each complaint
cat_used = NULL
for (i in 1:length(category_names)){
xx = as.numeric(grepl(category_names[i],complaints$Categories))
cat_used[i] = sum(xx) # How many times this category used
}
# Test data frame with number of times each category used
cat_used = cbind(category_names,as.numeric(cat_used))
rownames(cat_used) = NULL
colnames(cat_used) = c("Category","Uses")
# Distribution of categories used only once
cat_used_once = NULL
for (i in 1:length(category_names)){
xx = as.numeric(grepl(category_names[i],complaints$Categories[which(complaints$cat_checked==1)]))
cat_used_once[i] = sum(xx) # How many times this category used
}
# Append this vector to cat used
cat_used = cbind(cat_used,cat_used_once)
colnames(cat_used) = c("Category","Used","Used_once")
cat_used = as.data.frame(cat_used)
cat_used$Used = as.numeric(as.character(cat_used$Used))
cat_used$Used_once = as.numeric(as.character(cat_used$Used_once))
cat_used = cat_used[cat_used$Used>0,]
cat_used$Ratio = round((cat_used$Used_once/cat_used$Used),3)
paste(nrow(cat_used), "of", length(category_names),"categories used as the only category checked at least one time.",sep=" ")
# Category used sorted, with ratio of how often that category
# was the only one checked
cat.order = order(cat_used$Used, decreasing = TRUE)
cat_used_sorted = cat_used[cat.order,]
rownames(cat_used_sorted) = NULL
print(cat_used_sorted[,1:4])
# Correlation of checkboxes where only one category checked
# solo_check = complaints[complaints$cat_checked==1,]
#
# cor(complaints[,which(colnames(solo_check) %in% category.vars)])
# Choose a dependent variable based on categories
# and run predictions based on words used
# Now let's deal with the comments
# Install new packages
options(repos = c(CRAN = "http://cran.rstudio.com"))
if("tm" %in% rownames(installed.packages()) == FALSE)
{install.packages("tm")}
library(tm)
# SnowballC is a word stemming algorithm for collapsing
# words to a common root to aid comparison of vocabulary.
if("SnowballC" %in% rownames(installed.packages()) == FALSE)
{install.packages("SnowballC")}
library(SnowballC)
# Install Twitter reading package for fun
# if("twitteR" %in% rownames(installed.packages()) == FALSE)
# {install.packages("twitteR")}
# library(twitteR)
# setup_twitter_oauth("kCYGJInM6evrkwADSySrkTroL", "JnOkxROeVHiTsiNKfrPFc7LHwEReoDwdQk5buUdmPS98xCayTY")
#
#
# tweets = userTimeline("airsafe", n=100)
# Create corpus
# Build a corpus, and specify the source to be character vectors
corpus = Corpus(VectorSource(complaints$Notes))
# Look at corpus
corpus
# corpus[[55]]
# Convert all words to lower case
# Creates both lower case and meta data
corpus_trans = tm_map(corpus,content_transformer(tolower))
# Creates lower case content without meta data
corpus = tm_map(corpus, tolower)
# corpus[[1]]
# IMPORTANT NOTE: If you are using the latest version of the tm package,
# you will need to run the following line before continuing
# (it converts corpus to a Plain Text Document).
# This is a recent change having to do with the tolower function that
# occurred after this video was recorded.
corpus = tm_map(corpus, PlainTextDocument) # Convert to plain text document
corpus = tm_map(corpus, removePunctuation) # Remove punctuation
corpus = tm_map(corpus, removeNumbers) # Remove numbers
# Remove stopwords and popular air travel words
# which will leave o corpus of words more likely to be related to
# the subject matter of the comlaint
corpus = tm_map(corpus, removeWords, c('flight',
'air',
'airport',
'airline',
'airlines',
stopwords("english")))
corpus <- tm_map(corpus, stripWhitespace) # Strip whitespace
# corpus[[1]]
# Stem document (removes variations, kees only the root of words)
# corpus = tm_map(corpus, stemDocument)
# corpus[[1]]
# Create matrix
# Will now create a matrix of all the words used in the Notes section
# where the previos steps filtered out many common words
frequencies = DocumentTermMatrix(corpus)
# Will now add column to complaints data frame that will show
# How many filtered words are in each complaint
word_counts.row = rowSums(as.matrix(frequencies)) # Number of times each word appears?
# word_counts.col = colSums(as.matrix(frequencies)) # Number of words with each document
# Add this to the complaints data frame
complaints$Note_length_dtm = as.numeric(as.character(word_counts.row))
# Now will look only at the most common or popular words
# Will include only those words that occur in at least 3% of the complaints
frequencies.common = removeSparseTerms(frequencies, 0.97)
# now we have a data frame of which popular words occur
# in each document, meaning they occur in at least 3% of all the complaints
most.pop = as.data.frame(as.matrix(frequencies.common))
# Before we looked at words for each note, now will count both
# popular words for each note, plus number of times each word occurs
word_counts.row_pop = rowSums(as.matrix(frequencies.common)) # Number of times each word appears?
word_counts.col_pop = colSums(as.matrix(frequencies.common)) # Number of words with each document
# Add the popular words to the complaints data frame
complaints$Note_popular_words = as.numeric(as.character(word_counts.row_pop))
# Now create, then sort, a new data frame of the mos popular words
most.popular.words = as.data.frame(word_counts.col_pop)
names(most.popular.words) = "Wordcount"
# Row names are the words, will make that a new column, and
# get rid of the rownames
most.popular.words$Word = rownames(most.popular.words)
rownames(most.popular.words) = NULL
pop.order = order(most.popular.words$Wordcount, decreasing = TRUE)
most.popular.words.ordered = most.popular.words[pop.order,]
rownames(most.popular.words.ordered) = NULL
# This takes care of some odd cases where the DocumentTermLength transformation
# results in more words that the plain text. This can happen for some non-English
# text content such as arabic
pos_note = which(complaints$Note_length>0 & complaints$Note_length_dtm )
# The note ratio is the ratio of number of words after the
# document term process divided by the original number of words.
note_ratio = complaints$Note_length_dtm/complaints$Note_length
note_ratio_pop = complaints$Note_popular_words/complaints$Note_length
# The first histogram gives the distribution of the ratio of
# filtered words to unfiltered words for all the notes
hist(note_ratio,
xlim=c(0,1), ylim=c(0,1000),
main="Ratio of filtered words to all words in Notes",
xlab = "Ratio", col = rgb(0.8,0.1,0.1,0.5))
print("Summary of ratio of filtered words over total words in a Note")
summary(note_ratio)
print("In this second histogram, the ratio is for a Note's words that are both filtered and used in 3% of complaints over all words.")
# The second histogram gives the distribution of the ratio of
# filtered and popular (used in at least 3% of complaints)
# words to unfiltered words for all the notes
hist(note_ratio_pop,
xlim=c(0,1), ylim=c(0,1000),
main="Ratio of filtered and popular words to all words in Notes",
xlab = "Ratio", col=rgb(0.1,0.1,0.8,0.5))
print("Summary of ratio of filtered and popular words over total words in a Note")
summary(note_ratio_pop)
# Combining the two distributions in an overlapping way
hist(note_ratio, col= rgb(0.8,0.1,0.1,0.5),
xlim=c(0,1), ylim=c(0,1000),
main="Overlapping Histograms of ratios of filtered words",
xlab="Ratio")
hist(note_ratio_pop, col=rgb(0.1,0.1,0.8,0.5), add=T)
box()
if("wordcloud" %in% rownames(installed.packages()) == FALSE)
{install.packages("wordcloud")}
library(wordcloud)
# look at top 20 words and word cloud of top 100
print("Top 20 words used")
print(most.popular.words.ordered[1:20,2:1],row.names = FALSE)
print("Word cloud of top 100 most used filtered words")
wordcloud(corpus, scale=c(2.5,0.25),
max.words=100,
random.order=FALSE)
# Note: no analysis from this point forward. What appears below
# is the outline of steps needed to create more consistency in how
# airline names were used in the complaints.
# Function for removing muliple spaces
multispace <- function(x){
x = gsub("(?<=[\\s])\\s*|^\\s+$", "", x, perl=TRUE)
return(x)
}
# DATA CLEANING: Removing unnecessary non-printing characters
# Before evaluating laser encounters by city, airport, and state, steps must be taken to ensure uniformity
# of definitions. One way to do that is to eliminate unecessary leading and trailing space characters.
# In this case, a function was created that could be applied to multiple location-related variables.
# FUNCTION FOR REMOVING LEADING AND TRAILING SPACES AND NON-PRINTING CHARACTERS
# Function 'stripper' definition
# The first step is to ensure the vector 'x' is character type by using 'as.character()' function.
# The next step is to remove the leading space characters, including leading tab,
# newline, vertical tab, form feed, carriage return, and space:
#
# - x = sub("^[[:space:]]+", "", x)
#
# Less general alterative is t use sub("^\\s+", "", x)
#
# Trailing spaces can be removed in a simlar fashion:
# - str = sub("[[:space:]]+$", "", str)
#
# Less general alterative is t use sub("\\s+$", "", x)
# Notes:
# - The "$" character is the end of string character, "^"is beginning of string character
# - Note that without the "+", only the first instance would be removed
# - [:space:] is all space characters (tab, newline, vertical tab, form feed, carriage return, and space)
stripper <- function(x){
# This function removes leading and trailing spaces from a vector.
# Equivalent to the str_trim() function in the strigr package
x = as.character(x)
x = sub("[[:space:]]+$", "", x) # Remove leading space characters
x = sub("^[[:space:]]+", "", x) # Remove trailing space characters
return(x)
}
# Remove leading and trailing space characters from selected variables,
# as well as multiple spaces
# for (i in 1:ncol(complaints)) {
# complaints[,i] = multispace(complaints[,i])
# complaints[,i] = stripper(complaints[,i])
# }
complaints[] = lapply(complaints,multispace)
complaints[] = lapply(complaints,stripper)
# Function capitalizes first letter of each word
simpleCap <- function(x) {
s = tolower(x)
s = strsplit(s, " ")[[1]]
paste(toupper(substring(s, 1,1)), substring(s, 2),
sep="", collapse=" ")
}
# Run the simpleCap function and create a new variable
# called "Carrier"
# complaints$Carrier = sapply(complaints$Airline,simpleCap)
# Review of raw data showed a variety of spelling options for
# Airlines. The following will collapse the varieties into
# something more tractable by using 'grep' function to match
# key character strings (all exact matches)
# "Spirit Air" Spirit_Airlines
# "Singapor" to "Singapore_Airlines"
# "u.s. airway", complaints$Carrier, ignore.case=TRUE
# "United ex"), complaints$Carrier, ignore.case = TRUE United_Express
# "United "), complaints$Carrier, ignore.case = TRUE to United
# "Us " complaints$Carrier, ignore.case = FALSE)
# "Air India" to Air_India
# "American Airline" to American
# "British Air" British_Airways
# "Virgin Austra" to Virgin_Australia
# "Virgin Atlantic" to Virgin_Atlantic
# "Virgin Amer" to Virgin_America
# "Virgin Airlines" to Virgin
# "Southwest Air" to Southwest_Airlines
# "Saudi" to Saudia
# Qat to Qatar_Air
# "West Je" to WestJet
# "Usa" to US_Airways
# "ppine to Philippine_Airlines"
# "Us Air Ways" to US_Airways
# "Usair" to US_Airways
# "Usairways" to US_Airways
# "Us Airways" to American
# "Aer Lingus/" | "Aerlingus" to Aer_Lingus
# "Aero Mexico" to Aeromexico
# "Argentin" to Aerolineas_Argentinas
# "Air Canad" to Air_Canada
# "Air Franc" to Air_France
# "ish Air" to British Airways
# "Cathay" Cathay_Pacific
# "China Air" to China_Airlines
# "China Eastern" to China_Eastern
# "China Southern" to China_Southern
# "Copa Air" to Copa
# "Delta" to Delta
# "Air Franc" to Air_France
# "Alaska" to Alaska
# "Alitali" to Alitalia
# "Alle" to Allegiant
# "Egypt" to EgyptAir
# "El Al" to El_Al
# "West Je" to "WestJet"
# "Westjet" to "WestJeat"
# Malaysia to Malaysia_Airlines
# hansa to Lufthansa
# tsst=grep("Delta", complaints$Carrier, ignore.case = TRUE)) find airline ndx
|
928caca8ee4f39afc46f887288f3c3903df50ad4
|
42d8105ddeb0ab7592b0d634107de240776294f6
|
/Class4/elections.R
|
53e5a8aeb8513c84f4454756cb877ac3bd8e5a7a
|
[] |
no_license
|
x0wllaar/MASNA-R-Programming-2020
|
af003e70f5bd05134ad132d4b12834272949bf21
|
74452be5dd8edcfc11e0a20b1b9ca22e677e0209
|
refs/heads/master
| 2023-01-02T19:30:26.411466
| 2020-10-19T23:12:37
| 2020-10-19T23:12:37
| 292,828,300
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,083
|
r
|
elections.R
|
library(data.table)
library(purrr)
library(stargazer)
library(vioplot)
library(corrplot)
library(MASS)
library(car)
library(nortest)
#Working with data!
##We have a file with 2012 presidential election results in Russia
elec_file <- "47130-8314.csv"
##Load this file into R (data.table)
##The file is UTF-8 encoded and contains cyrillic, expect problems on Windows
##Fread accepts "encoding" paramenter
all_data <- fread(elec_file, encoding = "UTF-8")
##Select columns "kom1", "kom2", "kom3", "1", "9", "10", "19", "20", "21", "22", "23" from the data
##Rename them to "region", "tik", "uik", "total", "invalid", "valid", "Zh", "Zu", "Mi", "Pr", "Pu"
dt_1 <- all_data[,c("kom1", "kom2", "kom3", "1", "9", "10", "19", "20", "21", "22", "23")]
colnames(dt_1) <- c("region", "tik", "uik", "total", "invalid", "valid", "Zh", "Zu", "Mi", "Pr", "Pu")
##Add a variable called turnout (valid + invalid) (total number of voters)
##Add a variable called turnout_p (turnout / total * 100) (voter turnout percentage)
dt_1$turnout <- dt_1$valid + dt_1$invalid
dt_1[,turnout := valid + invalid]
dt_1$turnout_p <- (dt_1$turnout/dt_1$total) * 100
dt_1[,turnout_p := (turnout / total) * 100]
##Remove Baikonur and voters outside Russia from the data
##"Территория за пределами РФ"
##"Город Байконур (Республика Казахстан)"
dt_1_c <- dt_1[
!grepl("Территория за пределами РФ", region, fixed = TRUE)
][
!grepl("Город Байконур (Республика Казахстан)", region, fixed = TRUE)
]
##Remove rows with missing data
dt_1_c_nona <- na.omit(dt_1_c)
##Display descriptives of the data
summary(dt_1_c_nona)
##Aggregate columns turnout, total, invalid, valid, Zh, Zu, Mi, Pr, Pu by region
##(by summing them)
dt_2 <- dt_1_c_nona[, .(
turnout = sum(turnout),
total = sum(total),
invalid = sum(invalid),
valid = sum(valid),
Zh = sum(Zh),
Zu = sum(Zu),
Mi = sum(Mi),
Pr = sum(Pr),
Pu = sum(Pu)
), by = region]
##Recompute turnout percentage for each region
dt_2[,turnout_p := (turnout / total) * 100]
##Create a factor variable with the region type
##“область”, “республика”, “край”, “округ”, “город”
##"oblast", "respublika", "krai", "okrug", "gorod"
##HINT: Use grepl and data.table subsetting
dt_2[grepl("область", tolower(region), fixed = TRUE), RegType := 1]
dt_2[grepl("республика", tolower(region), fixed = TRUE), RegType := 2]
dt_2[grepl("край", tolower(region), fixed = TRUE), RegType := 3]
dt_2[grepl("округ", tolower(region), fixed = TRUE), RegType := 4]
dt_2[grepl("город", tolower(region), fixed = TRUE), RegType := 5]
#We use tolower here so that the case of the words does not matter
dt_2[,RegType := factor(RegType)]
levels(dt_2$RegType) <- c("oblast", "respublika", "krai", "okrug", "gorod")
#Convert into factor, then assign readable names for levels
##Display a (fancy) barplot with the number of regions of different types
#We use ylim here to force the height of the y axis, so the percentage for the
#highest bar does not get cut off
reg_percent_table <- table(dt_2$RegType) * 100 / sum(table(dt_2$RegType))
reg_percent_table %>%
barplot(main = "Russia regions by type",
col = "indianred",
ylab = "% Regions",
xlab = "Type",
ylim = c(0,60)) %>%
text(x = ., y = reg_percent_table + 1, labels = paste(round(reg_percent_table, 2), "%"))
##Display a pie chart with the same information
#I use https://colorbrewer2.org/ for palettes
color_palette_reg_types <- c('#e41a1c','#377eb8','#4daf4a','#984ea3','#ff7f00')
#Make a vector of labels with percentages (\n is the newline symbol)
reg_types_labels <- paste(names(reg_percent_table), "\n",
round(reg_percent_table, 2), "%", sep="")
pie(reg_percent_table,
main = "Observations by month",
col=color_palette_reg_types,
labels = reg_types_labels)
##Display a (fancy) histogram of turnout percentage
hist(dt_2$turnout_p,
col = "darkorchid1",
main = "% Turnout",
xlab = "% Turnout",
ylab = "Probability",
breaks=15,
freq = F)
##Compute vote percentage for each of the candidates (number_of_voted / valid)
dt_2[, Zh_p := Zh / valid * 100]
dt_2[, Zu_p := Zu / valid * 100]
dt_2[, Mi_p := Mi / valid * 100]
dt_2[, Pr_p := Pr / valid * 100]
dt_2[, Pu_p := Pu / valid * 100]
##Use stargazer to make a summary table that we can use in Word (for the
##seminar, we will use the text format)
dt_2 %>% stargazer(type = "text")
#Or we can select the columns we need
dt_2[, c("Zh_p","Zu_p","Mi_p","Pr_p","Pu_p", "turnout_p")] %>%
stargazer(covariate.labels = c("% Zhirinovsky",
"% Zuganov",
"% Mironov",
"% Prokhorov",
"% Putin",
"% Turnout"),
title="Summary statistics for 2012 presidential elections",
type = "html", out="election_table.html")
##Display a boxplot of percentage vote for Zuganov
boxplot(dt_2$Zu_p, col = "cadetblue")
##Display a violin plot of percentage vote for Mironov
vioplot(dt_2$Mi_p, col = "cadetblue")
##Display a density plot of valid percentage
#We need to compute %valid
dt_2[,valid_p := valid / turnout * 100]
plot(density(dt_2$valid_p),
main = "% Valid votes distribution",
xlab = "% Valid votes",
lwd = 3,
col = "olivedrab")
##Display a scatterplot where X = percentage vote for Putin and Y = percentage
##turnout
reg_colors <- dt_2$RegType %>%
as.character %>%
replace(., . == "oblast", "#e41a1c") %>%
replace(., . == "respublika", "#377eb8") %>%
replace(., . == "krai", "#4daf4a") %>%
replace(., . == "okrug", "#984ea3") %>%
replace(., . == "gorod", "#ff7f00")
plot(x = dt_2$Pu_p, y = dt_2$turnout_p,
main = "% Turnout vs % Putin",
xlab = "% Putin",
ylab = "% Turnout",
pch = 19,
col = reg_colors
)
##Display a correlation matrix and a fancy correlation table for votes for
##different candidates and percentage turnout
cor.mat <- cor(dt_2[, c("Zh_p","Zu_p","Mi_p","Pr_p","Pu_p", "turnout_p")])
#Rename columns and rows to something readable
rownames(cor.mat) <- c("% Zhirinovsky",
"% Zuganov",
"% Mironov",
"% Prokhorov",
"% Putin",
"% Turnout")
colnames(cor.mat) <- c("% Zhirinovsky",
"% Zuganov",
"% Mironov",
"% Prokhorov",
"% Putin",
"% Turnout")
print(cor.mat)
corrplot(cor.mat,
method = "color",
type = "lower",
addCoef.col = "black")
##Use stargazer to make a correlation table that we can use in Word (for the
##seminar, we will use the text format)
cor.mat %>% stargazer(type = "html", out = "election_correlations.html")
##Display 5 regions with the most and the least votes for Putin
#Most votes
dt_2[order(-Pu_p)][1:5] %>% View()
#Least votes
dt_2[order(Pu_p)][1:5] %>% View()
##Display a heatmap (or maybe a 3d plot) of percentage for Putin vs Percentage
##turnout
el_2d_den <- MASS::kde2d(dt_2$Pu_p, dt_2$turnout_p)
filled.contour(el_2d_den, color.palette = heat.colors)
contour(el_2d_den, add = TRUE)
##Test the valid percentage and votes for Putin for normality
##density plots
mean_pu <- dt_2$Pu_p %>% mean()
sd_pu <- dt_2$Pu_p %>% sd()
plot(density(dt_2$Pu_p),
main = "% Putin distribution",
xlab = "% Putin",
lwd = 3,
col = "olivedrab")
curve(dnorm(x, mean = mean_pu, sd = sd_pu),
col = "red",
lwd = 3,
add = TRUE,
)
##QQ plot
qqPlot(dt_2$Pu_p)
##Anderson-Darling test
ad.test(dt_2$Pu_p)
|
b9a44b33960328f84e179fff47cd207127b90bee
|
e1eba8f8812ff239d21dd5b1f348ecf62e48ddc9
|
/R/utils.R
|
1f5e90cd50c03ab9cb6862f64b21ecc661267c4d
|
[] |
no_license
|
lorenzwalthert/namespaces
|
e5c60259f5e2f86c032f6da16af76a22b9cd93af
|
1d7c95f54bf1202068789b4706a0dcc66d126ef3
|
refs/heads/master
| 2020-03-09T09:51:50.753911
| 2019-05-06T09:59:17
| 2019-05-06T09:59:17
| 128,722,777
| 7
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,041
|
r
|
utils.R
|
#' Decode base64
#'
#' Decodes base64, which is a common format returned by the GitHub API.
#' @keywords internal
decode <- function(encoded) {
rawToChar(base64enc::base64decode(encoded)) %>%
strsplit("\n") %>%
.[[1]]
}
#' Turn key value pairs into a string
#'
#' @para ... named arguments where the name is the key and the
#' value is the value.
#' @keywords internal
key_value_pair_to_chr <- function(...) {
values <- list(...)
keys <- names(values)
paste(keys, unname(values), sep = "=", collapse = "&") %>%
remove_emtpy_chr()
}
remove_emtpy_chr <- function(x) {
x[x != ""]
}
remove_comments <- function(x) {
gsub("#.*$", "", x)
}
#' Wrapper around tibble::deframe()
#'
#' @param x object to deframe
#' @param deframe Whether or not to deframe.
#' @keywords internal
may_unlist <- function(x, deframe) {
if (deframe) {
x %>%
unlist() %>%
unname()
} else {
x
}
}
first <- function(x) {
nth(x, 1)
}
last <- function(x) {
nth(x, length(x))
}
nth <- function(x, n) {
x[n]
}
|
2ba4d806c5fc5b7758b344d5de72d08a74ce3f3b
|
ace90651f890d21104b1f17d55bb5e377402aa55
|
/R/ba_describe-methods.R
|
7961e953edf76342cafbc446e31db76956f04bbe
|
[] |
no_license
|
c5sire/brapix
|
da7959e804c85cb64e952dbe351df82fdd555974
|
58dd8d05553f30c861b6acca8e18ccb13660a219
|
refs/heads/master
| 2021-05-02T03:00:56.757500
| 2018-02-09T12:46:33
| 2018-02-09T12:46:33
| 120,891,028
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 649
|
r
|
ba_describe-methods.R
|
#' ba_describe.ba_locations
#'
#' describe method for an object of class brapi_con, which will only display the crop, database address:port and user
#'
#' @param x a brapi_locations object
#' @param ... other print parameters
#' @author Reinhard Simon
#' @example inst/examples/ex-describe.R
#' @family brapiutils
#' @export
ba_describe.ba_locations <- function(x, ...) {
# Print in console
missing_geo <- x[is.na(x$latitude), ]
cpl <- nrow(x)
mis <- nrow(missing_geo)
pct <- mis / cpl * 100
cat(paste0("n locations = ", cpl, "\n"))
cat(paste0("n locations with missing lat/lon = ", mis, " (", pct, "%) \n\n"))
return(invisible())
}
|
d7ca8814731c9a2e42268784d770430557d9d1d3
|
f2f213e423ddee153d8c67f725f5be3ed7093c00
|
/Statistical Functions/simpleRegression.R
|
441d9653209eb8adc006a75127c89c7395a8571d
|
[] |
no_license
|
dawu29/RStudio-exersices
|
ab79ddea635b703071f00a35d7f964ce7ca6c669
|
e2d1c61e2da0cb3b1633555c7b89f42c0a5b5ee4
|
refs/heads/main
| 2023-02-08T20:04:07.082051
| 2020-12-30T02:50:29
| 2020-12-30T02:50:29
| 315,168,044
| 0
| 0
| null | 2020-11-23T05:17:51
| 2020-11-23T01:16:15
|
R
|
UTF-8
|
R
| false
| false
| 822
|
r
|
simpleRegression.R
|
#-------------------------------------------------------------------
# SIMPLE REGRESSION
#-------------------------------------------------------------------
x<-c(6,6.3,6.5,6.8,7,7.1,7.5,7.5,7.6)
y<-c(39,58,49,53,80,86,115,124,104)
plot(x,y,main="Simple Linear Regression")
Sxy = sum((x-mean(x))*(y-mean(y))) # mean(x) is \bar{x}
Sxx = sum((x-mean(x))^2)
beta1hat = Sxy/Sxx
beta0hat = mean(y) - beta1hat*mean(x)
beta0hat
beta1hat
yhat = beta0hat+beta1hat*x
SSE = sum((y-yhat)^2)
n = length(y)
SST = sum((y-mean(y))^2)
SSR = SST - SSE
R2 = SSR/SST
R2
stdError = sqrt((SSE/(n-2))/Sxx)
stdError
tstat = beta1hat/stdError
tstat
2*pt(tstat, df=n-2, lower.tail = FALSE)
# use the R package
lm.out<-lm(y~x)
lm.out
summary(lm.out)
lines(x,fitted(lm.out))
anova(lm.out)
|
18ddfc176602040b6bdd7c758ff163429f39a546
|
a29dba249bbd87c29d731a5b794771fda5cf5117
|
/R/評估/IG.r
|
d2780a5304c6e6ed6179ea92528e6ad7401888f5
|
[] |
no_license
|
DaYi-TW/Data-science
|
30b4f009c074c7fe9a14e9d963dde37c127802c5
|
ce8f5dfcf463a25b5a868fe64014d4311c633a18
|
refs/heads/main
| 2023-06-29T01:32:59.259537
| 2021-07-21T03:36:34
| 2021-07-21T03:36:34
| 370,018,467
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 573
|
r
|
IG.r
|
#輸入變數:class_lab:類別屬性,fea:欲評估屬性
#輸出變數:eval_value:屬性的IG值
IG=function(class_label,fea){
fea=as.data.frame(fea)
eval_value=as.data.frame(matrix(,ncol(fea),2))
colnames(eval_value)=c("feature","IG")
eval_value[,1]=colnames(fea)
eval_value[,2]=sapply(1:ncol(fea),FUN=function(i,fea,class_label){
eval=cbind(fea[i],class_label)
eval=as.data.frame(lapply(eval,as.factor))
colnames(eval)[ncol(eval)]='class'
return(InfoGainAttributeEval(class ~ . , data = eval))
},fea=fea,class_label=class_label)
return(eval_value)
}
|
5fbfb4f5ba43d71878713b4d744c096c48f66ac0
|
16b3d48264d6c78a6258f261543036d9a6284ae0
|
/Survival Analysis/Survival Analysis.R
|
617358abae4943054254a3a1b4f8c9f43e368f23
|
[] |
no_license
|
staciewow/Statistics-in-R
|
273a97fb613bed1386960148e4afd30d78804989
|
bbadba4c4b19ea73c762eb9cad339ee8d2936c9e
|
refs/heads/master
| 2020-03-06T20:43:26.241540
| 2018-03-28T00:45:47
| 2018-03-28T00:45:47
| 127,060,475
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,333
|
r
|
Survival Analysis.R
|
# About Survival Analysis
library(OIsurv) # Includes the "survival" and "KMsurv" packages used for analysis and data sets
#other packages in the market, this isn't the only one for survival analysis
# What is survival analysis? - A set of methods for analyzing data where the outcome variable is the time until the occurrence of an even of interest, e.g. death.
# Why not linear regression? - Survival times are always positive and regression canno't handle the censoring of observations, e.g. in a given study if some of the patients survive past when the data is collected, these patient observations represent a right censor. Another cause of censoring is from patients dropping out of the study. Unlike regression models, survival models correctly incorporate information from oth censored and uncensored observations.
# In survival analysis we can estimate two functions dependent on time:
# [1] The survival function - gives, for every time, the probability of surviving (not experiencing the event)
# [2] The hazard function - gives the potential that the event will occur, per time unit, given that an individual has surived up to the specific time.
# Functions in the survival packages apply methods to Surv objects, which are created by the Surv() function.
# Censoring
library(OIsurv)
# Here's a dataset that looks at survival times for individuals with a certain type of tumor
data(tongue)
attach(tongue)
# Let's start by looking at group 1 only:
g1 <- Surv(time[type==1],delta[type==1])
#type: only look at type 1 tongue cancer
g1
# shows us an ordered list of survival times, plus signs represent observations that are right censored.
detach(tongue)
# Here's an example of left-truncated right-censored observations:
data(psych)
p1 <- with(psych, Surv(age,age+time, death)) # note I have to use the with function here because I did not attach psych
#age + time = the age when the death is measured, either dead or still alive
p1
# Interpretation for first observation: Patient entered study at 51 years of age and survived until 52 years old.
# Estimating the Survival Function with Kaplan-Meier and Pointwise Confidence Intervals
library(OIsurv)
data(tongue)
g1 <- with(tongue, Surv(time[type==1],delta[type==1]))
# The Kaplan-Meier estimate is a nonparametric MLE of the survival function, S(t)
# Fitting a survival function like you would a regression...
# Here we use the simplest model where we look at the survival object against an intercept.
fit <- survfit(g1~1, data = tongue, conf.int = .95, conf.type = "log") # for 95% confidence interval with interval type being a log function (could be linear with "plain" or could be log(-log(t))) with "log-log"
fit
summary(fit) # survival = Kaplan Meier estimate at each time
plot(fit, main = "Kaplan-Meier estimate with 95% point-wise confidence", xlab = "Time (weeks)", ylab = "Survival Function", xlim = c(0, 200))
#survival probability is plotted, which also is the 4th column in the summary(fit)
# shows us the survival probability for each week. The confidence intervals are valid only pointwise; the confidence range does not capture with 95% confidence over the entire range of time values, but only the confidence range for a particular time value.
# we can also split a Kaplan Meier estimate across a specific variable, e.g.:
g2 <- with(tongue, Surv(time, delta))
#another survival subject: not only look at type1, but look at type 1 and 2.
fit2 <- survfit(g2~type, data = tongue, conf.int = .95, conf.type = "log")
summary(fit2)
plot(fit2, main = "Kaplan-Meier estimates", xlab = "Time (weeks)", ylab = "Survival Function", xlim = c(0, 200), lty=c(1,2))
legend('topright', c("Type1","Type2"), lty=c(1,2))
#the question is: are these 2 basically the same or significantly different?
# Comparing Two Survival Curves
# Let's do a test to see if the two survival curves above are statistically different.
survdiff(Surv(time, delta)~type, data = tongue)
# We reject the null that both the survival functions are the same at the 90% confidence level; however, we fail to reject the null at the 95% level.
#p= 0.0949 , with 95% ci, they are basically the same.
#might be different result with 90% ci.
# Simultaneous Confidence Intervals
library(OIsurv)
data(tongue)
g3 <- with(tongue, Surv(time[type==1],delta[type==1]))
# If we'd like to create confidence bands that capture the true survival function with a 95% accuracy we will need to use simultaneous confidence intervals. This can be done with the confBands() function.
ci <- confBands(g3, confLevel = .95, confType = "log-log", type = "hall")
#confband!!!
fit3 <- survfit(g3~1, data = tongue, conf.int = .95, conf.type = "log-log")
plot(fit3, main = "Kaplan-Meier estimate with 95% point-wise confidence", xlab = "Time (weeks)", ylab = "Survival Function", xlim = c(0, 200))
lines(ci, lty = 3, col = "red")
legend('topright', c("Survival Estimate","Pointwise Interval", "Simultaneous Interval"), lty=c(1,2,3), col = c("black", "Black", "red"))
#when it close to end, they started to expand, because there were less people left.
# Cumulative Hazard Function
library(OIsurv)
data(tongue)
g4 <- with(tongue, Surv(time[type==1],delta[type==1]))
fit4 <- summary(survfit(g4~1, data = tongue, conf.int = .95, conf.type = "log-log"))
# The cumulative hazard function (H(t)) and the survival function S(t) are related in the following way for continuous data:
# S(t) = exp[-H(t)]
# Lets use our survival function to calculate estimates for the hazard function (potential particular event will occur):
H <- -log(fit4$surv)
H <- c(H, tail(H,1))
plot(c(fit4$time, 200), H, main = "Cumulative Hazard Functions", xlab = "Time (weeks)", ylab = "Hazard Functions", lty = 1, type = "s", ylim = range(H))
# By realizing H(t) = f(t)/S(t) H(t) can be interpreted as, "the density of events at t, divided by the probability of surviving to that duration without experiencing the event". Essentially it's a ratio that measures how likely the event will occur in a standardized form.
# Another approximation of the cumulative hazard function is sum[(the number of individuals at risk)/(the number of events that took place after time, t)]:
H.2 <- cumsum(fit4$n.event / fit4$n.risk)
H.2 <- c(H.2, tail(H.2,1))
points(c(fit4$time, 200), H.2, lty = 2, type = "s")
legend("topleft", c("H","H.2"), lty = c(1,2))
|
c95b296a63ae042edcad428b6808811b41a47ef0
|
3f705d76c0a99c5a41b6722f347b56f981b4df8c
|
/scripts/Q2.3.r
|
cf645aea89228c696471d7d0860be358a3da4544
|
[] |
no_license
|
cypowers/multivariance
|
f46cb66f45222e707aa1ff4174d07775a97c853e
|
74ca254dde840cd9fcbaf44bd4068148b54e5477
|
refs/heads/master
| 2020-03-17T05:39:50.523326
| 2018-05-14T13:41:40
| 2018-05-14T13:41:40
| 133,324,623
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 436
|
r
|
Q2.3.r
|
data <- read.table("data/2.3 data.txt", header = TRUE)
data
data2 <- data[2:6]
data2
summary(data2)
S <- cov(data) # Covarience
R <- cor(data2) # Correlation
R
uniq_root <- eigen(R)
uniq_root
uniq_root$values/sum(uniq_root$values)
p_data <- princomp(data2, cor=TRUE)
summary(p_data)
screeplot(p_data, type="lines", pch=19, main="Scree Plot")
p_data$loadings
biplot(p_data, cex=0.7, col=c("red", "blue"), main="Biplot")
names(uniq_root)
|
fa0e72d92d2385bfdd197f7dc1cc5035271ca384
|
d167ca17d4649c6122c49696dca4a4187cbdbe9b
|
/loss.small.evals.R
|
1d2cb3d391ed9d30e67bd9eebe0352b3853d34f3
|
[] |
no_license
|
tdhock/changepoint-data-structure
|
ec5e1ba5857862862626f14c33b6288bdf084e65
|
e352ced2c313ea1f08e6a92c00422943c21c363d
|
refs/heads/master
| 2021-06-11T14:31:51.893602
| 2021-04-21T22:16:48
| 2021-04-21T22:16:48
| 169,180,248
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 634
|
r
|
loss.small.evals.R
|
source("packages.R")
loss.small <- readRDS("loss.small.rds")
nb.evals <- loss.small[, {
is.dec <- c(TRUE, diff(loss) < 0)
dt <- data.table(loss, changes)[is.dec]
result <- .C(
"modelSelectionFwd_interface",
loss=as.double(dt$loss),
complexity=as.double(dt$changes),
N=as.integer(nrow(dt)),
models=integer(nrow(dt)),
breaks=double(nrow(dt)),
evals=integer(nrow(dt)),
PACKAGE="penaltyLearning")
with(result, list(
models.in=nrow(dt),
models.out=N+1,
max.evals=max(evals),
total.evals=sum(evals)
))
}, by=list(profile.id, chromosome)]
saveRDS(nb.evals, "loss.small.evals.rds")
|
436a3b39643391b74e830e4de36e3347b7a58579
|
e81f55d813e5cbd4ec78a62aed26cf9c26bda877
|
/scripts/at2masterdownloads.R
|
f19cc386594680c1490cf50244872fc7306cedeb
|
[] |
no_license
|
ewiik/lac
|
47065852aef8a057c1426645ac268dcb1018e08a
|
dbe5601f76ff7f609b8a32022fc519e35d72d30b
|
refs/heads/master
| 2021-01-10T11:50:50.447189
| 2016-03-07T01:31:11
| 2016-03-07T01:31:11
| 45,217,985
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,224
|
r
|
at2masterdownloads.R
|
## read in all supporting data for AT2 and get it organised
## using master file for pigs and C/N stuff....
## FIXME: no actual diatom counts in Dropbox????
## read in files
master <- read.csv("data/private/AT2_MasterSpreadsheet_15-12-15.csv") # rundepth is topdepth
cladorel <- read.csv("data/private/AT2-Cladocera-counts.csv") # this is file last modified 8th Feb 2015
cladoraw <- read.csv("data/private/AT2-Cladocera-counts-raw.csv") # this is file last modified 8th Feb 2015
cladoraw[is.na(cladoraw)] <- 0 # replace NA with 0, since these are true 0s
chiroraw <- read.csv("data/private/AT2-chiro-counts-raw.csv") # this is the file last modified
## FIXME: check with Maarten that this one (sheet "cleaned") is actually raw data
chiroraw[is.na(chiroraw)] <- 0 # replace NA with 0, since these are true 0s
plant <- read.csv("data/private/at2macroallplantcorr.csv")
## create pigments
pigseq <- grep("Phaeo|ytin.a", names(master))
pigs <- cbind(master$Running.Depth, master[,pigseq[1]:pigseq[2]])
names(pigs)[1] <- "rundepthtop"
## create chemistry; X. denotes %.
geoseq <- grep("BioS|C.N", names(master))
geos <- cbind(master$Running.Depth, master$LOI, master[,geoseq[1]:geoseq[2]])
names(geos)[1:2] <- c("rundepthtop", "LOI")
## correct rundepth for clados; use rundepth for macros since same sample material used
## --> know that last sample same since clados also terminate at 200something with larger gap
## in the last two samples
take <- nrow(cladorel)
max <- nrow(plant)
taken <- plant[(max-take + 1):max,1]
cladorel$Depth <- taken
names(cladorel)[names(cladorel) == "Depth"] <- "rundepthtop"
cladoraw <- cbind(taken, cladoraw)
names(cladoraw)[1] <- "rundepthtop"
chiroraw <- cbind(taken, chiroraw)
names(chiroraw)[1] <- "rundepthtop"
## create initial Stratiplots for initial discussion
pdf("data/private/allplots.pdf", width = 15, onefile = TRUE)
Stratiplot(geos[,-1], geos[,1], type = "h", varTypes = "absolute", col = "black")
Stratiplot(pigs[,-1], pigs[,1], type = "h", varTypes = "absolute", col = "black")
Stratiplot(cladoraw[,-1], cladoraw[,1], type = "h", varTypes = "absolute", col = "black")
Stratiplot(chiroraw[,-1], chiroraw[,1], type = "h", varTypes = "absolute", col = "black")
dev.off()
|
f4bc7b0d892f8d51bc156b200d5b4d1d8ec2d59b
|
b761234cdc3b07e81dbc05da5ec1f726650ee7bd
|
/R/read_officer.R
|
3e17415522f8083d9275350bab76a6c5792c3df1
|
[
"MIT"
] |
permissive
|
elipousson/officerExtras
|
1d76ee389f2d649cf397199d00fb6894fd42eaa0
|
f491277b69e659bb65f65f258878516b2c997e78
|
refs/heads/main
| 2023-08-27T01:32:07.879195
| 2023-08-26T16:51:15
| 2023-08-26T16:51:15
| 606,570,447
| 8
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,100
|
r
|
read_officer.R
|
#' Read a docx, pptx, potx, or xlsx file or use an existing object from officer
#' if provided
#'
#' [read_officer()] is a variant of [officer::read_docx()],
#' [officer::read_pptx()], and [officer::read_xlsx()] that allows users to read
#' different Microsoft Office file types with a single function.
#' [read_docx_ext()], [read_pptx_ext()], and [read_xlsx_ext()] are wrappers for
#' [read_officer()] that require the matching input file type. All versions
#' allow both a filename and path (the officer functions only use a path). If a
#' rdocx, rpptx, or rxlsx class object is provided to x, the object is checked
#' based on the fileext parameter and then returned as is.
#'
#' @param filename,path File name and path. Default: `NULL`. Must include a
#' "docx", "pptx", or "xlsx" file path. "dotx" and "potx" files are also
#' supported.
#' @param x A rdocx, rpptx, or rxlsx class object If x is provided, filename and
#' path are ignored. Default: `NULL`
#' @param docx,pptx,xlsx A rdocx, rpptx, or rxlsx class object passed to the x
#' parameter of [read_officer()] by the variant functions. Defaults to `NULL`.
#' @param allow_null If `TRUE`, function supports the default behavior of
#' [officer::read_docx()], [officer::read_pptx()], or [officer::read_xlsx()]
#' and returns an empty document if x, filename, and path are all `NULL`. If
#' `FALSE`, one of the three parameters must be supplied.
#' @param quiet If `FALSE`, warn if docx is provided when filename and/or path
#' are also provided. Default: `TRUE`.
#' @inheritParams check_office_fileext
#' @return A rdocx, rpptx, or rxlsx object.
#' @seealso
#' [officer::read_docx()]
#' @rdname read_officer
#' @export
#' @importFrom cli cli_alert_warning cli_alert_success symbol
#' @importFrom rlang current_call
#' @importFrom officer read_docx
read_officer <- function(filename = NULL,
path = NULL,
fileext = c("docx", "pptx", "xlsx"),
x = NULL,
arg = caller_arg(x),
allow_null = TRUE,
quiet = TRUE,
call = parent.frame(),
...) {
cli_quiet(quiet)
has_input_file <- !is_null(c(filename, path))
if (is.null(x)) {
if (has_input_file || !allow_null) {
path <- set_office_path(filename, path, fileext = fileext, call = call)
filename <- basename(path)
fileext <- str_extract_fileext(path)
} else {
fileext <- match.arg(fileext)
if ("docx" %in% fileext) {
path <- system.file(
"template", "styles_template.docx",
package = "officerExtras"
)
}
new_obj <- switch(fileext,
"docx" = "empty document",
"pptx" = "pptx document with 0 slides",
"xlsx" = "xlsx document with 1 sheet"
)
cli::cli_alert_success("Creating a new {new_obj}")
}
x <- rlang::try_fetch(
switch(fileext,
"docx" = officer::read_docx(path),
"dotx" = officer::read_docx(path),
"pptx" = officer::read_pptx(path),
"potx" = officer::read_pptx(path),
"xlsx" = officer::read_xlsx(path)
),
error = function(cnd) {
cli::cli_abort("{.val {fileext}} file can't be read.", parent = cnd)
},
warning = function(cnd) {
cli::cli_warn(message = cnd)
}
)
} else {
if (has_input_file) {
cli::cli_alert_warning(
"{.arg filename} and {.arg path} are ignored if {.arg {arg}} is provided."
)
}
check_officer(x, what = paste0("r", fileext), call = call, ...)
}
if (!is.null(filename)) {
cli::cli_alert_success(
"Reading {.filename {filename}}{cli::symbol$ellipsis}"
)
}
if (fileext != "xlsx") {
cli_doc_properties(x, filename)
}
invisible(x)
}
#' @name read_docx_ext
#' @rdname read_officer
#' @export
read_docx_ext <- function(filename = NULL,
path = NULL,
docx = NULL,
allow_null = FALSE,
quiet = TRUE) {
read_officer(
filename = filename,
path = path,
fileext = "docx",
x = docx,
allow_null = allow_null,
quiet = quiet
)
}
#' @name read_pptx_ext
#' @rdname read_officer
#' @export
read_pptx_ext <- function(filename = NULL,
path = NULL,
pptx = NULL,
allow_null = FALSE,
quiet = TRUE) {
read_officer(
filename = filename,
path = path,
fileext = "pptx",
x = pptx,
allow_null = allow_null,
quiet = quiet
)
}
#' @name read_xlsx_ext
#' @rdname read_officer
#' @export
read_xlsx_ext <- function(filename = NULL,
path = NULL,
xlsx = NULL,
allow_null = FALSE,
quiet = TRUE) {
read_officer(
filename = filename,
path = path,
fileext = "xlsx",
x = xlsx,
allow_null = allow_null,
quiet = quiet
)
}
#' List document properties for a rdocx or rpptx object
#'
#' @keywords internal
#' @noRd
#' @importFrom cli cli_rule symbol cli_dl
cli_doc_properties <- function(x, filename = NULL) {
props <- officer_properties(x)
if (is_null(props)) {
return(props)
}
msg <- "{cli::symbol$info} document properties:"
if (!is.null(filename)) {
msg <- "{cli::symbol$info} {.filename {filename}} properties:"
}
cli::cli_rule(msg)
cli::cli_dl(
items = discard(props, function(x) {
x == ""
})
)
}
#' Get doc properties for a rdocx or rpptx object as a list
#'
#' [officer_properties()] is a variant on [officer::doc_properties()] that will
#' warn instead of error if document properties can't be found
#'
#' @param x A rdocx or rpptx object.
#' @param values A named list with new properties to replace existing document
#' properties before they are returned as a named list.
#' @param keep.null Passed to [utils::modifyList()]. If `TRUE`, retain
#' properties in returned list even if they have `NULL` values.
#' @returns A named list of existing document properties or (if values is
#' supplied) modified document properties.
#' @inheritParams check_officer
#' @export
#' @importFrom officer doc_properties
#' @importFrom rlang set_names
#' @importFrom utils modifyList
#' @importFrom cli cli_warn
officer_properties <- function(x,
values = list(),
keep.null = FALSE,
call = caller_env()) {
check_officer(x, what = c("rdocx", "rpptx"), call = call)
props <- rlang::try_fetch(
officer::doc_properties(x),
error = function(cnd) {
cli::cli_warn(
"Document properties can't be found for {.filename {x}}",
parent = cnd
)
NULL
}
)
if (is_null(props)) {
return(props)
}
utils::modifyList(
rlang::set_names(as.list(props[["value"]]), props[["tag"]]),
values,
keep.null
)
}
#' Set filepath for docx file
#'
#' @keywords internal
#' @noRd
#' @importFrom cli cli_vec
set_office_path <- function(filename = NULL,
path = NULL,
fileext = c("docx", "pptx", "xlsx"),
call = parent.frame()) {
check_string(filename, allow_null = TRUE, call = call)
check_string(path, allow_null = TRUE, call = call)
if (is.null(path)) {
if (is.null(filename)) {
args <- c("filename", "path")
cli::cli_abort("{.arg {args}} can't both be {.code NULL}")
}
path <- filename
} else if (!is.null(filename)) {
path <- file.path(path, filename)
}
fileext <- match.arg(fileext, several.ok = TRUE)
if ((("pptx" %in% fileext) && is_fileext_path(path, "potx")) ||
(("dotx" %in% fileext) && is_fileext_path(path, "dotx"))) {
return(path)
}
check_office_fileext(
path,
arg = cli_vec_last(
c("filename", "path")
),
fileext = fileext,
call = call
)
path
}
|
ffcf0d0879bd57cce3654b169bce71fe171265e4
|
abdf3380f36b8fd63a6390aa54e73730417570bc
|
/tests/testthat.R
|
2b5f9fb96764396bd133bcb4a710c14481a8cb98
|
[] |
no_license
|
dpique/oncomix
|
a2f25d1cffc3415de07799f4c6b831d9242d0bba
|
ec0a61f8249bf9b36f633206d479b01289410031
|
refs/heads/master
| 2021-05-23T06:08:24.259863
| 2017-12-17T17:06:08
| 2017-12-17T17:06:08
| 94,810,609
| 2
| 1
| null | 2017-08-15T17:50:33
| 2017-06-19T18:57:16
|
HTML
|
UTF-8
|
R
| false
| false
| 62
|
r
|
testthat.R
|
library(testthat)
library(oncomix)
test_check("oncomix")
|
0fa1c4236de2079b954c3c977d9f2f9663ddf387
|
a518c2ca0ac4edb94ccbf144e7cd58f13b512bc6
|
/man/nzmaths.Rd
|
3be048c50f2f89f15366f154ee313ac6321b1f1f
|
[] |
no_license
|
tslumley/svylme
|
e6f5dd0fab582c4cfd35b5ecd5e6f272e029cdf9
|
2a1305ec0f1c1b0959146569c28d899431fcc939
|
refs/heads/master
| 2023-08-10T09:42:56.243181
| 2023-07-21T00:37:47
| 2023-07-21T00:37:47
| 127,377,020
| 26
| 6
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,592
|
rd
|
nzmaths.Rd
|
\name{nzmaths}
\alias{nzmaths}
\docType{data}
\title{
Maths Performance Data from the PISA 2012 survey in New Zealand
}
\description{
Data on maths performance, gender, some problem-solving variables and some school resource variables.
}
\usage{data("nzmaths")}
\format{
A data frame with 4291 observations on the following 26 variables.
\describe{
\item{\code{SCHOOLID}}{School ID}
\item{\code{CNT}}{Country id: a factor with levels \code{New Zealand}}
\item{\code{STRATUM}}{a factor with levels \code{NZL0101} \code{NZL0102} \code{NZL0202} \code{NZL0203}}
\item{\code{OECD}}{Is the country in the OECD?}
\item{\code{STIDSTD}}{Student ID}
\item{\code{ST04Q01}}{Gender: a factor with levels \code{Female} \code{Male}}
\item{\code{ST14Q02}}{Mother has university qualifications \code{No} \code{Yes}}
\item{\code{ST18Q02}}{Father has university qualifications \code{No} \code{Yes}}
\item{\code{MATHEFF}}{Mathematics Self-Efficacy: numeric vector}
\item{\code{OPENPS}}{Mathematics Self-Efficacy: numeric vector}
\item{\code{PV1MATH},\code{PV2MATH},\code{PV3MATH},\code{PV4MATH},\code{PV5MATH} }{'Plausible values' (multiple imputations) for maths performance}
\item{\code{W_FSTUWT}}{Design weight for student}
\item{\code{SC35Q02}}{Proportion of maths teachers with professional development in maths in past year}
\item{\code{PCGIRLS}}{Proportion of girls at the school}
\item{\code{PROPMA5A}}{Proportion of maths teachers with ISCED 5A (math major)}
\item{\code{ABGMATH}}{Does the school group maths students: a factor with levels \code{No ability grouping between any classes} \code{One of these forms of ability grouping between classes for s} \code{One of these forms of ability grouping for all classes}}
\item{\code{SMRATIO}}{Number of students per maths teacher}
\item{\code{W_FSCHWT}}{Design weight for school}
\item{\code{condwt}}{Design weight for student given school}
}
}
\source{
A subset extracted from the \code{PISA2012lite} R package, \url{https://github.com/pbiecek/PISA2012lite}
}
\references{
OECD (2013) PISA 2012 Assessment and Analytical Framework: Mathematics, Reading, Science, Problem Solving and Financial Literacy. OECD Publishing.
}
\examples{
data(nzmaths)
oo<-options(survey.lonely.psu="average") ## only one PSU in one of the strata
des<-svydesign(id=~SCHOOLID+STIDSTD, strata=~STRATUM, nest=TRUE,
weights=~W_FSCHWT+condwt, data=nzmaths)
m1<-svy2lme(PV1MATH~ (1+ ST04Q01 |SCHOOLID)+ST04Q01*(PCGIRLS+SMRATIO)+MATHEFF+OPENPS, design=des)
options(oo)
}
\keyword{datasets}
|
8012180937aa933f154baee2f66a56ab8a4ef7f8
|
bfbdfd00872efbec5ac8f449dcb058792baec3a0
|
/R/dic.R
|
b239487d75d37d70482ae77c6f608bbc7403b42e
|
[] |
no_license
|
jags/rjags
|
ad35dda50e96b11ac79af985b1e0a77b89fa28c8
|
e1c94aa8e2e73e4345c3e35abbdd32f72a34045f
|
refs/heads/master
| 2020-04-13T19:37:32.208375
| 2018-10-19T17:02:30
| 2018-10-19T17:02:30
| 163,408,294
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,073
|
r
|
dic.R
|
# R package rjags file R/dic.R
# Copyright (C) 2009-2013 Martyn Plummer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License version
# 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
#
"dic.samples" <-
function(model, n.iter, thin=1, type="pD", ...)
{
if (nchain(model) == 1) {
stop("2 or more parallel chains required")
}
if (!inherits(model, "jags"))
stop("Invalid JAGS model")
if (!is.numeric(n.iter) || length(n.iter) != 1 || n.iter <= 0)
stop("n.iter must be a positive integer")
load.module("dic", quiet=TRUE)
limits <- vector("list",2)
pdtype <- match.arg(type, c("pD","popt"))
status <- .Call("set_monitors", model$ptr(), c("deviance",pdtype),
limits, limits, as.integer(thin), "mean", PACKAGE="rjags")
if (!any(status)) {
stop("Failed to set monitors")
}
update(model, n.iter = as.integer(n.iter), ...)
dev <- .Call("get_monitored_values_flat", model$ptr(), "mean",
PACKAGE="rjags")
for (i in seq(along=dev)) {
class(dev[[i]]) <- "mcarray"
}
if (status[1]) {
.Call("clear_monitor", model$ptr(), "deviance", NULL, NULL, "mean",
PACKAGE="rjags")
}
if (status[2]) {
.Call("clear_monitor", model$ptr(), pdtype, NULL, NULL, "mean",
PACKAGE="rjags")
}
ans <- list("deviance" = dev$deviance, "penalty" = dev[[type]],
"type" = type)
class(ans) <- "dic"
return(ans)
}
"print.dic" <- function(x, digits= max(3, getOption("digits") - 3), ...)
{
deviance <- sum(x$deviance)
cat("Mean deviance: ", format(deviance, digits=digits), "\n")
psum <- sum(x[[2]])
cat(names(x)[[2]], format(mean(psum), digits=digits), "\n")
cat("Penalized deviance:", format(deviance + psum, digits=digits), "\n")
invisible(x)
}
"-.dic" <- function(e1, e2)
{
diffdic(e1, e2)
}
"diffdic" <- function(dic1,dic2)
{
if (!identical(dic1$type, dic2$type)) {
stop("incompatible dic object: different penalty types")
}
n1 <- names(dic1$deviance)
n2 <- names(dic2$deviance)
if (!identical(n1, n2)) {
### Try matching names in lexicographic order
if(!identical(sort(n1), sort(n2))) {
stop("incompatible dic objects: variable names differ")
}
### Reset names to order of the first argument
ord1 <- order(n1)
ord2 <- order(n2)
dic2$deviance[ord1] <- dic2$deviance[ord2]
dic2$penalty[ord1] <- dic2$penalty[ord2]
}
delta <- sapply(dic1$deviance, mean) + sapply(dic1$penalty, mean) -
sapply(dic2$deviance, mean) - sapply(dic2$penalty, mean)
class(delta) <- "diffdic"
return(delta)
}
"print.diffdic" <- function(x, ...)
{
cat("Difference: ", sum(x), "\n", sep="")
cat("Sample standard error: ", sqrt(length(x)) * sd(x), "\n", sep="")
invisible(x)
}
"waic.samples" <-
function(model, n.iter, node=NULL, trace=FALSE, thin=1, ...)
{
if (!inherits(model, "jags"))
stop("Invalid JAGS model")
if (!is.numeric(n.iter) || length(n.iter) != 1 || n.iter <= 0)
stop("n.iter must be a positive integer")
if(! jags.version() > 4.3 ) {
stop('This function cannot be used with the version of JAGS on your system: consider updating')
}
if(is.null(node)){
node <- "deviance"
}else{
if(is.character(node) && any(node == "deviance")
&& !all(node == "deviance")){
stop("node name 'deviance' cannot be used: pass node=NULL for all observed stochastic nodes")
}
}
if(!is.character(node) || length(node)==0)
stop("node must either be NULL or a character string of length >=1")
if(!is.logical(trace) || length(trace)!=1)
stop("trace must logical of length 1")
pn <- parse.varnames(node)
load.module("dic", quiet=TRUE)
status <- .Call("set_monitors", model$ptr(), pn$names, pn$lower, pn$upper,
as.integer(thin), "density_mean", PACKAGE="rjags")
if (!any(status)) {
stop("Failed to set a necessary monitor")
}
status <- .Call("set_monitors", model$ptr(), pn$names, pn$lower, pn$upper,
as.integer(thin), "logdensity_variance", PACKAGE="rjags")
if (!any(status)) {
stop("Failed to set a necessary monitor")
}
if(trace){
status <- .Call("set_monitors", model$ptr(), pn$names, pn$lower, pn$upper,
as.integer(thin), "logdensity_trace", PACKAGE="rjags")
if (!any(status)) {
stop("Failed to set the optional trace monitor")
}
}
update(model, n.iter = as.integer(n.iter), ...)
density_mean <- .Call("get_monitored_values", model$ptr(), "density_mean", PACKAGE="rjags")
for(i in seq(along=density_mean)){
tname <- names(density_mean)[i]
curdim <- dim(density_mean[[i]])
class(density_mean[[i]]) <- "mcarray"
# Ensure dim and dimnames are correctly set:
if(is.null(curdim)){
curdim <- length(density_mean[[i]])
dim(density_mean[[i]]) <- curdim
}
# If this is a deviance-type monitor then set the stochastic node names:
if(tname=='deviance'){
attr(density_mean[[i]], "elementnames") <- observed.stochastic.nodes(model, curdim[1])
# If a partial node array then extract the precise element names:
}else if(!tname %in% node.names(model)){
attr(density_mean[[i]], "elementnames") <- expand.varname(tname, dim(density_mean[[i]])[1])
# Otherwise just set the varname as the whole array:
}else{
attr(density_mean[[i]], "varname") <- tname
}
.Call("clear_monitor", model$ptr(), pn$names[i], pn$lower[[i]], pn$upper[[i]], "density_mean", PACKAGE="rjags")
}
logdensity_variance <- .Call("get_monitored_values", model$ptr(), "logdensity_variance", PACKAGE="rjags")
for(i in seq(along=pn$names)){
tname <- names(logdensity_variance)[i]
curdim <- dim(logdensity_variance[[i]])
class(logdensity_variance[[i]]) <- "mcarray"
# Ensure dim and dimnames are correctly set:
if(is.null(curdim)){
curdim <- c(variable=length(logdensity_variance[[i]]))
dim(logdensity_variance[[i]]) <- curdim
}
# If this is a deviance-type monitor then set the stochastic node names:
if(tname=='deviance'){
attr(logdensity_variance[[i]], "elementnames") <- observed.stochastic.nodes(model, curdim[1])
# If a partial node array then extract the precise element names:
}else if(!tname %in% node.names(model)){
attr(logdensity_variance[[i]], "elementnames") <- expand.varname(tname, dim(logdensity_variance[[i]])[1])
# Otherwise just set the varname as the whole array:
}else{
attr(logdensity_variance[[i]], "varname") <- tname
}
.Call("clear_monitor", model$ptr(), pn$names[i], pn$lower[[i]], pn$upper[[i]], "logdensity_variance", PACKAGE="rjags")
}
raw <- list(density_mean, logdensity_variance)
names(raw) <- c('density_mean', 'logdensity_variance')
if(trace){
logdensity_trace <- .Call("get_monitored_values", model$ptr(), "logdensity_trace", PACKAGE="rjags")
for(i in seq(along=pn$names)){
tname <- names(logdensity_trace)[i]
curdim <- dim(logdensity_trace[[i]])
class(logdensity_trace[[i]]) <- "mcarray"
# Ensure dim and dimnames are correctly set:
if(is.null(curdim)){
curdim <- c(variable=length(logdensity_trace[[i]]))
dim(logdensity_trace[[i]]) <- curdim
}
# If this is a deviance-type monitor then set the stochastic node names:
if(tname=='deviance'){
attr(logdensity_trace[[i]], "elementnames") <- observed.stochastic.nodes(model, curdim[1])
# If a partial node array then extract the precise element names:
}else if(!tname %in% node.names(model)){
attr(logdensity_trace[[i]], "elementnames") <- expand.varname(tname, dim(logdensity_trace[[i]])[1])
# Otherwise just set the varname as the whole array:
}else{
attr(logdensity_trace[[i]], "varname") <- tname
}
.Call("clear_monitor", model$ptr(), pn$names[i], pn$lower[[i]], pn$upper[[i]], "logdensity_trace", PACKAGE="rjags")
}
raw <- c(raw, list(logdensity_trace = logdensity_trace))
}
# Calculation is always done using running mean/variance:
waictable <- waic.table(density_mean, logdensity_variance)
ans <- list(waictable=waictable, mcarray=raw)
class(ans) <- 'JAGSwaic'
return(ans)
}
waic.table <- function(density_mean, logdensity_variance){
if(missing(density_mean) || missing(logdensity_variance)){
stop('Missing arguments to density_mean and logdensity_variance are not allowed')
}
# Collapse variable lists to single matrix:
dm_matrix <- do.call('cbind', lapply(density_mean, function(x){
if('iteration' %in% names(dim(x))){
stop('iteration numbers detected in the density_mean')
}
cdim <- dim(x)
dim(x) <- c(cdim[-length(cdim)], iteration=1, cdim[length(cdim)])
return(do.call('rbind', as.mcmc.list(x)))
}))
ldv_matrix <- do.call('cbind', lapply(logdensity_variance, function(x){
if('iteration' %in% names(dim(x))){
stop('iteration numbers detected in the logdensity_variance')
}
cdim <- dim(x)
dim(x) <- c(cdim[-length(cdim)], iteration=1, cdim[length(cdim)])
return(do.call('rbind', as.mcmc.list(x)))
}))
stopifnot(all(dim(dm_matrix)==dim(ldv_matrix)))
N <- ncol(dm_matrix)
result <- lapply(1:nrow(dm_matrix), function(chain){
lpd <- log(dm_matrix[chain,])
elpd <- lpd - ldv_matrix[chain,]
waic <- -2 * elpd
ans <- c(elpd_waic=sum(elpd), p_waic=sum(ldv_matrix[chain,]), waic=-2*sum(elpd))
})
result <- do.call('cbind', result)
dimnames(result)[[2]] <- paste0('chain', 1:ncol(result))
return(result)
}
print.JAGSwaic <- function(x, ...){
print.default(x$waictable, ...)
}
|
b8c9a0e06222fc5bcaa8d3f6c8b81fccfe262a8e
|
bc714def3a27f812bf00c5b89c3e64687594ff23
|
/R/l3.R
|
0803faded7f5af614d910b5b2bf126d3503e04b7
|
[] |
no_license
|
devillemereuil/RAFM
|
af4846c78e00a9d0fd3be19eeb905e6ed7c4abdd
|
ca2fb6d3ddc47a1f3dfdd6a02027502b5cdbb30e
|
refs/heads/master
| 2021-04-03T07:28:41.408826
| 2018-03-13T09:31:59
| 2018-03-13T09:31:59
| 125,025,235
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 123
|
r
|
l3.R
|
l3 <-
function(logalpha_, prioralpha_){
return(dnorm(logalpha_, prioralpha_[1], sqrt(prioralpha_[2]), log=TRUE))
}
|
4f5db7f516097f9a8ad56f809af3d6ac9cdd596b
|
9a1277a635b73c72472ae40442994d6c301ca1b4
|
/R/separate_img.R
|
fb4d4606d972fdc133e9a6590e9a2f4418b0b512
|
[] |
no_license
|
muschellij2/neurobase
|
eaf8632de4659cd857bb5a864bf3a60f83333a89
|
375101bab5a546bd8c8a092c21190b48b36f9a13
|
refs/heads/master
| 2022-10-25T16:00:24.322516
| 2022-10-23T16:07:05
| 2022-10-23T16:07:05
| 68,750,968
| 5
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,535
|
r
|
separate_img.R
|
.separate_img = function(img,
levels = NULL,
drop_zero = TRUE){
if (is.null(levels)) {
levels = unique(c(img))
} else {
levels = unique(levels)
}
if (drop_zero) {
levels = setdiff(levels, 0)
}
if (length(levels) == 0) {
stop("No non-zero values in the levels this image!")
}
levels = sort(levels)
res = lapply(levels, function(x) {
img == x
})
names(res) = levels
return(res)
}
#' @name separate_img-methods
#' @docType methods
#' @aliases separate_img
#' @title Separate Labeled Image into Multiple Binary Images
#' @description Takes in an image, gets the unique values, then
#' creates a list of binary images for each one of those values.
#' @note Exact equalling is using \code{==}
#' @return A \code{nifti} object (or list of them) or class of
#' object passed in if not specified
#' @param img character path of image or
#' an object of class \code{nifti}, or list of images
#' @param levels if \code{levels} is given, then the separation is only
#' done for those levels and not unique values of the image.
#' @param drop_zero Should zeroes be dropped from the labels? Zero
#' usually denotes background or non-interesting voxels
#' @export
#' @examples
#' set.seed(5)
#' dims = rep(10, 3)
#' arr = array(rpois(prod(dims), lambda = 2), dim = dims)
#' nim = oro.nifti::nifti(arr)
#' simg = separate_img(nim)
#' simg_arr = separate_img(arr)
#' slist = lapply(simg, function(x) array(x, dim(x)))
#' testthat::expect_equal(slist, simg_arr)
#'
#' rnifti = RNifti::asNifti(nim)
#' timg = tempimg(nim)
#' limg = list(factor(timg), factor(timg))
#' func = separate_img
#' func(arr)
#' func(nim)
#' func(rnifti)
#' func(timg)
#' func(limg)
setGeneric("separate_img", function(img,
levels = NULL,
drop_zero = TRUE) standardGeneric("separate_img"))
#' @rdname separate_img-methods
#' @aliases separate_img,nifti-method
#' @export
setMethod("separate_img", "nifti", function(img, levels = NULL,
drop_zero = TRUE) {
res = .separate_img(img = img,
levels = levels,
drop_zero = drop_zero)
return(res)
})
#' @rdname separate_img-methods
#' @aliases separate_img,array-method
#' @export
setMethod("separate_img", "array", function(img, levels = NULL,
drop_zero = TRUE) {
res = .separate_img(img = img,
levels = levels,
drop_zero = drop_zero)
return(res)
})
#' @rdname separate_img-methods
#' @aliases separate_img,ANY-method
#' @export
#' @importFrom RNifti updateNifti
setMethod("separate_img", "ANY", function(img, levels = NULL,
drop_zero = TRUE) {
# workaround because can't get class
if (inherits(img, "niftiImage")) {
res = .separate_img(img = img,
levels = levels,
drop_zero = drop_zero)
res = lapply(res, function(x) {
RNifti::updateNifti(x, template = img)
})
return(res)
} else {
stop("Not implemented for this type!")
}
return(img)
})
#' @rdname separate_img-methods
#' @aliases separate_img,factor-method
#'
#' @export
setMethod("separate_img", "factor", function(img,
levels = NULL,
drop_zero = TRUE) {
img = as.character(img)
img = separate_img(img,
levels = levels,
drop_zero = drop_zero)
return(img)
})
#' @rdname separate_img-methods
#' @aliases separate_img,character-method
#'
#' @export
setMethod("separate_img", "character", function(img,
levels = NULL,
drop_zero = TRUE) {
img = check_nifti(img)
img = separate_img(img,
levels = levels,
drop_zero = drop_zero)
return(img)
})
#' @rdname separate_img-methods
#' @aliases separate_img,list-method
#' @export
setMethod("separate_img", "list", function(img, levels = NULL,
drop_zero = TRUE) {
### add vector capability
img = lapply(img, separate_img,
levels = levels,
drop_zero = drop_zero
)
return(img)
})
|
355f3fdfa613593835badcd4c9ad79ae3d03775c
|
771502151a4e152ecb69c075703ff35756a0b35b
|
/PlotFit3dPeople/server.R
|
e4b84299182e89f265479dfa1fd7a5150ebb1273
|
[] |
no_license
|
hinto033/radar_chart
|
89337ff1170df75947d7c9d6fe7b59d04a49497f
|
8d642ab0513df00bec1b5e49b7a1a00a1809f43b
|
refs/heads/master
| 2021-01-17T01:54:21.222806
| 2017-03-07T20:53:15
| 2017-03-07T20:53:15
| 39,858,084
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,023
|
r
|
server.R
|
# server.R
###Need to:
##Convert to LMI and FMI
#finish the final calculations
#Produce the radar charts.
library(fmsb)
maxmin <- data.frame(
Z_TR=c(2, -2),
Z_LA=c(2, -2),
Z_LL=c(2, -2),
Z_RL=c(2, -2),
Z_RA=c(2, -2))
chartDim <- c(1,1)
#setwd('X:\\bhinton\\radar_chart\\Plot-From-DXA')
blackData <- read.table(file=sprintf("data/Black.ZScoreValues.txt", sep="\t"))
hispData <- read.table(file=sprintf("data/Hisp.ZScoreValues.txt", sep="\t"))
whiteData <- read.table(file=sprintf("data/White.ZScoreValues.txt", sep="\t"))
fullData <- rbind(blackData, hispData, whiteData)
#Import the Fit 3D Group
fit3dBase <- read.table(file="data/DXA.Fit3d.Export.txt", sep="\t", header = TRUE)
dfit3dBase <- data.frame(transform(fit3dBase,
ageYr= age,
Gender= SEX,
Race= ethnicity,
avgArmFat = (LARM_FAT + RARM_FAT) / 2,
avgLegFat = (L_LEG_FAT + R_LEG_FAT) / 2,
avgArmLI = (LARM_LEAN + RARM_LEAN) / 2,
avgLegLI = (L_LEG_LEAN + R_LEG_LEAN) / 2,
BMI = (WBTOT_MASS/1000) / ((height_cm/100)^2),
FMI = (WBTOT_FAT/1000) / ((height_cm/100)^2),
LMI = (WBTOT_LEAN/1000) / ((height_cm/100)^2)
))
dfit3dBase$ageYr= floor(dfit3dBase$age)
dfit3dBase <- transform(dfit3dBase, avgArmFmi = (avgArmFat/1000) / ((height_cm/100)^2),
avgLegFmi = (avgLegFat/1000) / ((height_cm/100)^2),
trunkFmi = (TRUNK_FAT/1000) / ((height_cm/100)^2),
leftArmFmi = (LARM_FAT/1000) / ((height_cm/100)^2),
leftLegFmi = (L_LEG_FAT/1000) / ((height_cm/100)^2),
rightLegFmi = (R_LEG_FAT/1000) / ((height_cm/100)^2),
rightArmFmi = (RARM_FAT/1000) / ((height_cm/100)^2),
avgArmLmi = (avgArmLI/1000) / ((height_cm/100)^2),
avgLegLmi = (avgLegLI/1000) / ((height_cm/100)^2),
trunkLmi = (TRUNK_LEAN/1000) / ((height_cm/100)^2),
leftArmLmi = (LARM_LEAN/1000) / ((height_cm/100)^2),
leftLegLmi = (L_LEG_LEAN/1000) / ((height_cm/100)^2),
rightLegLmi = (R_LEG_LEAN/1000) / ((height_cm/100)^2),
rightArmLmi = (RARM_LEAN/1000) / ((height_cm/100)^2)
)
genderFix <- function(x) {
if(x == 'M') y <- "Male"
if(x == 'F') y <- "Female"
return(y)
}
dfit3dBase$Gender <- sapply(dfit3dBase$SEX,genderFix)
RaceFix <- function(x) {
if(x == 'black') y <- 'Non-Hispanic Black'
else if(x == 'white') y <- 'Non-Hispanic White'
else if(x == 'hispanic') y <- 'Hispanic'
else y <- 'Other'
return(y)
}
dfit3dBase$Race <- sapply(dfit3dBase$ethnicity,RaceFix)
#if (selectNumber == 1) {
# chartDim <- c(1,1)
#} else if (selectNumber == 2) {
# chartDim <- c(1,2)
#}else if (selectNumber == 4) {
# chartDim <- c(2,2)
#}else if (selectNumber == 9) {
# chartDim <- c(3,3)
#}
#Calculate Z Scores for all these people.
#Find way to just target the age in that row.
#Take just the eligible people (Hisp, White, Black)
fit3dEligible <- subset(dfit3dBase, dfit3dBase$Race=="Hispanic"
| dfit3dBase$Race=="Non-Hispanic Black"
| dfit3dBase$Race == "Non-Hispanic White")
####Works to here####
#####
# #
# #
#Part 2: Importing LMS Z scores (And maybe calculating values?)
# #
# #
#Explanation:This section imports the L,M,S values from the LMS chartmaker modeling
#and calculates what the left leg/right leg and left arm/right arm individual z scores
#would be based on the Average leg and average arm L,M,S values. It then stores these
#values in new columns and gives an opportunity to export this new dataset in a new .txt
#table separated by race
# Formula to convert from value (y) to z score (z)
# z = ( y / m)^L - 1 / (L*S)
#Inputs:
#Specifies which columns to keep from LMS tables
keep <- c("Age","L", "M", "S")
bfArmFmiLms <-
read.table("data/BlackFmiLmi_Female_AvgArmFMI_020202t.txt", header=T, skip=10, sep="\t")
bfArmLmiLms <-
read.table("data/BlackFmiLmi_Female_AvgArmLMI_010401t.txt", header=T, skip=10, sep="\t")
bfLegFmiLms <-
read.table("data/BlackFmiLmi_Female_AvgLegFMI_020302t.txt", header=T, skip=10, sep="\t")
bfLegLmiLms <-
read.table("data/BlackFmiLmi_Female_AvgLegLMI_010401t.txt", header=T, skip=10, sep="\t")
bfTrunkFmiLms <-
read.table("data/BlackFmiLmi_Female_TrunkFMI_020402t.txt", header=T, skip=10, sep="\t")
bfTrunkLmiLms <-
read.table("data/BlackFmiLmi_Female_TrunkLMI_010401t.txt", header=T, skip=10, sep="\t")
#Keeps only the relevant columns for the black females
bfLms <- cbind(bfArmFmiLms[keep], bfArmLmiLms[keep],
bfLegFmiLms[keep], bfLegLmiLms[keep],
bfTrunkFmiLms[keep], bfTrunkLmiLms[keep])
bmArmFmiLms <-
read.table("data/BlackFmiLmi_Male_AvgArmFMI_020202t.txt", header=T, skip=10, sep="\t")
bmArmLmiLms <-
read.table("data/BlackFmiLmi_Male_AvgArmLMI_020601t.txt", header=T, skip=10, sep="\t")
bmLegFmiLms <-
read.table("data/BlackFmiLmi_Male_AvgLegFMI_020202t.txt", header=T, skip=10, sep="\t")
bmLegLmiLms <-
read.table("data/BlackFmiLmi_Male_AvgLegLMI_010501t.txt", header=T, skip=10, sep="\t")
bmTrunkFmiLms <-
read.table("data/BlackFmiLmi_Male_TrunkFMI_020401tt.txt", header=T, skip=10, sep="\t")
#This blew up at 8 yr old and didn't display a number so I put a junk variable in.
bmTrunkLmiLms <-
read.table("data/BlackFmiLmi_Male_TrunkLMI_010601t.txt", header=T, skip=10, sep="\t")
#Keeps only the relevant columns for the black males
bmLms <- cbind(bmArmFmiLms[keep], bmArmLmiLms[keep],
bmLegFmiLms[keep], bmLegLmiLms[keep],
bmTrunkFmiLms[keep], bmTrunkLmiLms[keep])
hfArmFmiLms <-
read.table("data/HispFmiLmi_Female_AvgArmFMI_020302t.txt", header=T, skip=10, sep="\t")
hfArmLmiLms <-
read.table("data/HispFmiLmi_Female_AvgArmLMI_020401t.txt", header=T, skip=10, sep="\t")
hfLegFmiLms <-
read.table("data/HispFmiLmi_Female_AvgLegFMI_020301t.txt", header=T, skip=10, sep="\t")
hfLegLmiLms <-
read.table("data/HispFmiLmi_Female_AveLegLMI_020401t.txt", header=T, skip=10, sep="\t")
hfTrunkFmiLms <-
read.table("data/HispFmiLmi_Female_TrunkFMI_020402t.txt", header=T, skip=10, sep="\t")
hfTrunkLmiLms <-
read.table("data/HispFmiLmi_Female_TrunkLMI_020401t.txt", header=T, skip=10, sep="\t")
#Hispanic Females
hfLms <- cbind(hfArmFmiLms[keep], hfArmLmiLms[keep],
hfLegFmiLms[keep], hfLegLmiLms[keep],
hfTrunkFmiLms[keep], hfTrunkLmiLms[keep])
hmArmFmiLms <-
read.table("data/HispFmiLmi_Male_AvgArmFMI_010403t.txt", header=T, skip=10, sep="\t")
hmArmLmiLms <-
read.table("data/HispFmiLmi_Male_AvgArmLMI_010702t.txt", header=T, skip=10, sep="\t")
hmLegFmiLms <-
read.table("data/HispFmiLmi_Male__AvgLegFMI_010102t.txt", header=T, skip=10, sep="\t")
hmLegLmiLms <-
read.table("data/HispFmiLmi_Male_AvgLegLMI_010602t.txt", header=T, skip=10, sep="\t")
hmTrunkFmiLms <-
read.table("data/HispFmiLmi_Male_TrunkFMI_020502t.txt", header=T, skip=10, sep="\t")
hmTrunkLmiLms <-
read.table("data/HispFmiLmi_Male_TrunkLMI_010702t.txt", header=T, skip=10, sep="\t")
#Hispanic Males
hmLms <- cbind(hmArmFmiLms[keep], hmArmLmiLms[keep],
hmLegFmiLms[keep], hmLegLmiLms[keep],
hmTrunkFmiLms[keep], hmTrunkLmiLms[keep])
wfArmFmiLms <-
read.table("data/WhiteFmiLmi_Female_AvgArmFMI_020202t.txt", header=T, skip=10, sep="\t")
wfArmLmiLms <-
read.table("data/WhiteFmiLmi_Female_AvgArmLMI_010401t.txt", header=T, skip=10, sep="\t")
wfLegFmiLms <-
read.table("data/WhiteFmiLmi_Female_AvgLegFMI_020301t.txt", header=T, skip=10, sep="\t")
wfLegLmiLms <-
read.table("data/WhiteFmiLmi_Female_AvgLegLMI_010601t.txt", header=T, skip=10, sep="\t")
wfTrunkFmiLms <-
read.table("data/WhiteFmiLmi_Female_TrunkFMI_020402t.txt", header=T, skip=10, sep="\t")
wfTrunkLmiLms <-
read.table("data/WhiteFmiLmi_Female_TrunkLMI_010401t.txt", header=T, skip=10, sep="\t")
#White Females
wfLms <- cbind(wfArmFmiLms[keep], wfArmLmiLms[keep],
wfLegFmiLms[keep], wfLegLmiLms[keep],
wfTrunkFmiLms[keep], wfTrunkLmiLms[keep])
wmArmFmiLms <-
read.table("data/WhiteFmiLmi_Male_AvgArmFMI_020402t.txt", header=T, skip=10, sep="\t")
wmArmLmiLms <-
read.table("data/WhiteFmiLmi_Male_AvgArmLMI_010801t.txt", header=T, skip=10, sep="\t")
wmLegFmiLms <-
read.table("data/WhiteFmiLmi_Male_AvgLegFMI_010202t.txt", header=T, skip=10, sep="\t")
wmLegLmiLms <-
read.table("data/WhiteFmiLmi_Male_AvgLagLMI_020702t.txt", header=T, skip=10, sep="\t")
wmTrunkFmiLms <-
read.table("data/WhiteFmiLmi_Male_TrunkFMI_020502t.txt", header=T, skip=10, sep="\t")
wmTrunkLmiLms <-
read.table("data/WhiteFmiLmi_Male_TrunkLMI_020702t.txt", header=T, skip=10, sep="\t")
#White Males
wmLms <- cbind(wmArmFmiLms[keep], wmArmLmiLms[keep],
wmLegFmiLms[keep], wmLegLmiLms[keep],
wmTrunkFmiLms[keep], wmTrunkLmiLms[keep])
rows = nrow(fit3dEligible)
FullZSet = NULL
for (j in 1:rows){
race = fit3dEligible$Race[j]
gender = fit3dEligible$Gender[j]
age = fit3dEligible$ageYr[j]
zScore <- fit3dEligible[j ,]
if (race == 'Non-Hispanic Black'){
racePrefix = 'b'
}else if (race == 'Non-Hispanic White'){
racePrefix = 'w'
}else if (race == 'Hispanic'){
racePrefix = 'h'
}
if (gender == 'Male'){
genderPrefix = 'm'
}else if (gender == 'Female'){
genderPrefix = 'f'
}
frames <- c(sprintf("%s%sLms", racePrefix, genderPrefix))
df <- get(frames)
lmsChart <- assign(as.character(frames), df, envir= .GlobalEnv)
agerow = age - 7
lmsAge <- lmsChart[agerow ,]
#Converts all to data matrix (better for calculations)
lArmFmi = data.matrix(lmsAge[2])
mArmFmi = data.matrix(lmsAge[3])
sArmFmi = data.matrix(lmsAge[4])
lArmLmi = data.matrix(lmsAge[6])
mArmLmi = data.matrix(lmsAge[7])
sArmLmi = data.matrix(lmsAge[8])
lLegFmi = data.matrix(lmsAge[10])
mLegFmi = data.matrix(lmsAge[11])
sLegFmi = data.matrix(lmsAge[12])
lLegLmi = data.matrix(lmsAge[14])
mLegLmi = data.matrix(lmsAge[15])
sLegLmi = data.matrix(lmsAge[16])
lTrunkFmi = data.matrix(lmsAge[18])
mTrunkFmi = data.matrix(lmsAge[19])
sTrunkFmi = data.matrix(lmsAge[20])
lTrunkLmi = data.matrix(lmsAge[22])
mTrunkLmi = data.matrix(lmsAge[23])
sTrunkLmi = data.matrix(lmsAge[24])
#Select just a row
zScore1 <- transform(zScore,
zLArmFmi= (((leftArmFmi/mArmFmi)^lArmFmi)-1)/(lArmFmi*sArmFmi),
zRArmFmi= (((rightArmFmi/mArmFmi)^lArmFmi)-1)/(lArmFmi*sArmFmi),
zLArmLmi= (((leftArmLmi/mArmLmi)^lArmLmi)-1)/(lArmLmi*sArmLmi),
zRArmLmi= (((rightArmLmi/mArmLmi)^lArmLmi)-1)/(lArmLmi*sArmLmi),
zLLegFmi= (((leftLegFmi/mLegFmi)^lLegFmi)-1)/(lLegFmi*sLegFmi),
zRLegFmi= (((rightLegFmi/mLegFmi)^lLegFmi)-1)/(lLegFmi*sLegFmi),
zLLegLmi= (((leftLegLmi/mLegLmi)^lLegLmi)-1)/(lLegLmi*sLegLmi),
zRLegLmi= (((rightLegLmi/mLegLmi)^lLegLmi)-1)/(lLegLmi*sLegLmi),
zTrunkFmi= (((trunkFmi/mTrunkFmi)^lTrunkFmi)-1)/(lTrunkFmi*sTrunkFmi),
zTrunkLmi= (((trunkLmi/mTrunkLmi)^lTrunkLmi)-1)/(lTrunkLmi*sTrunkLmi))
colnames(zScore1)[c(49:58)] <-
c('zLArmFmi', 'zRArmFmi', 'zLArmLmi', 'zRArmLmi', 'zLLegFmi', 'zRLegFmi',
'zLLegLmi', 'zRLegLmi', 'zTrunkFmi', 'zTrunkLmi')
#Calculates avg z score (useful in finding populations based on avg Z = +2, 0, -2, etc)
zScore2 <- data.frame(transform(zScore1,
zAvgFmi= (zTrunkFmi+zLArmFmi+zRArmFmi+zLLegFmi+zRLegFmi) / 5,
zAvgLmi= (zTrunkLmi+zLArmLmi+zRArmLmi+zLLegLmi+zRLegLmi) / 5,
ZSDFMI = sd(c(zLArmFmi, zRArmFmi,zLLegFmi,zRLegFmi, zTrunkFmi)),
ZSDLMI = sd(c(zLArmLmi, zRArmLmi,zLLegLmi,zRLegLmi, zTrunkLmi)))
)
#Keeps only th
keep <- c('BMI','FMI','LMI', "height_cm","scan_package_id",
"ageYr",'Gender','Race','zLArmFmi', 'zRArmFmi',
'zLArmLmi', 'zRArmLmi', 'zLLegFmi', 'zRLegFmi',
'zLLegLmi', 'zRLegLmi', 'zTrunkFmi', 'zTrunkLmi','zAvgFmi', 'zAvgLmi', 'ZSDFMI',
'ZSDLMI')
zScore3 <- zScore2[keep]
#These are the n-1 versions of the SDs
FullZSet = rbind(FullZSet, zScore3)
#Changes column names to create the radar charts later on
}#End of For statment
colnames(FullZSet) <- c('BMI','FMI','LMI', "height_cm","scan_package_id",
"ageYr",'Gender','Race','Z_FMI_LA', 'Z_FMI_RA',
'Z_LMI_LA', 'Z_LMI_RA', 'Z_FMI_LL', 'Z_FMI_RL',
'Z_LMI_LL', 'Z_LMI_RL', 'Z_FMI_TR', 'Z_LMI_TR','zAvgFmi',
'zAvgLmi', 'ZSDFMI','ZSDLMI')
shinyServer(
function(input, output) {
output$map <- renderPlot({
zData2 <- FullZSet[input$Person , ]
dzData <- data.frame(zData2)
#print(dzData)
#print(2)
#Converts that set to dataframe
#finds dimensions of that table and takes selectNumber random rows from that data set
#dimension <- dim(dzData)
#nRow <- floor(runif(1, 1,dimension[1])) #Normally floor(runif(selectNumber, 1,dimension[1]))
#selects out only those random rows and their FMI/LMI data
fmiData <- dzData[1,c("Z_FMI_TR","Z_FMI_LA", "Z_FMI_LL", "Z_FMI_RL", "Z_FMI_RA")]
lmiData <- dzData[1,c("Z_LMI_TR","Z_LMI_LA", "Z_LMI_LL", "Z_LMI_RL", "Z_LMI_RA")]
#renames the columns because column names in fmiData/lmiData must match maxmin
colnames(fmiData) <- c("Z_TR", "Z_LA", "Z_LL", "Z_RL", "Z_RA")
colnames(lmiData) <- c("Z_TR", "Z_LA", "Z_LL", "Z_RL", "Z_RA")
ind1Data <- rbind(maxmin,fmiData[1,],lmiData[1,]) #normally in a loop and i instead of 1
op <- par(mar=c(1, 2, 2, 1),mfrow=chartDim)
radarchart(ind1Data, axistype=3, seg=4, cex.main=1, plty=1, plwd=2,
pcol = c("goldenrod3", "firebrick4"),
vlabels=c("TR", "RA", "RL", "LL", "LA"), caxislabels=c("-2","-1","0","1","2"),
title=sprintf("%s %s Individual FMI/LMI Chart", input$race, input$gender))
legend('topright', c("FMI", "FFMI") , lwd=2,
col=c("goldenrod3", "firebrick4"), bty='n', cex=1.2)
})
output$text1 <- renderText({
#total <- total1[ which(total1$Gender==input$gender
# & total1$Race==input$race) , ]
zData2 <- FullZSet[input$Person , ]
paste("Age/Gender/Race: ", zData2$ageYr, zData2$Gender, zData2$Race)
})
output$text2 <- renderText({
#total <- total1[ which(total1$Gender==input$gender
# & total1$Race==input$race) , ]
zData2 <- FullZSet[input$Person , ]
paste("BMI/FMI/LMI: ", zData2$BMI, zData2$FMI, zData2$LMI)
})
output$text3 <- renderText({
#total <- total1[ which(total1$Gender==input$gender
# & total1$Race==input$race) , ]
zData2 <- FullZSet[input$Person , ]
paste("Package ID Number: ", zData2$scan_package_id)
})
}
)
# race <- switch(input$race,
# "Percent White" = counties$white,
# "Percent Black" = counties$black,
# "Percent Hispanic" = counties$hispanic,
# "Percent Asian" = counties$asian)
# gender <- switch(input$gender,
# "Percent White" = "darkgreen",
# "Percent Black" = "black",
# "Percent Hispanic" = "darkorange",
# "Percent Asian" = "darkviolet")
# age <- switch(input$age,
# "Percent White" = "% White",
# "Percent Black" = "% Black",
# "Percent Hispanic" = "% Hispanic",
# "Percent Asian" = "% Asian")
#percent_map(var = data,
# color = color,
# legend.title = legend,
# max = input$range[2],
# min = input$range[1])
|
07ee1bb6f5f0ec9596ca7bdea2531a3dd9ae565e
|
76beb7e70f9381a5bded37834ba8783e16cc8b9a
|
/ipmbook-code/c2/Diagnose Monocarp Growth Kernel.R
|
9d7f90a2f14aafc41095844bbfaddb60b3c69fd0
|
[] |
no_license
|
aekendig/population-modeling-techniques
|
6521b1d5e5d50f5f3c156821ca5d4942be5a1fc9
|
713a5529dcbe7534817f2df139fbadbd659c4a0c
|
refs/heads/master
| 2022-12-29T20:54:51.146095
| 2020-10-07T12:18:23
| 2020-10-07T12:18:23
| 302,026,874
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,900
|
r
|
Diagnose Monocarp Growth Kernel.R
|
### This script assumes that you have just source'd the Monocarp model
### using MonocarpSimulateIBM.R
# or, load an .Rdata file with saved simulation results
load("MonocarpSimData.Rdata")
require(car)
require(mgcv)
source("../utilities/Standard Graphical Pars.R")
## Construct a data set of plausible size
pick.data <- seq(1, nrow(sim.data), length = 300)
test.data <- sim.data[round(pick.data), ]
test.data <- na.omit(subset(test.data, select = c(size, size1)))
e <- order(test.data$size)
test.data <- test.data[e, ]
## refit models to the reduced data set
mod.grow <- lm(size1 ~ size, data = test.data)
cat(length(mod.grow$fitted))
set_graph_pars("panel4")
# Plot residuals versus fitted for growth model
zhat <- fitted(mod.grow)
resid <- residuals(mod.grow)
plot(zhat, resid, xlab = "Fitted values", ylab = "Residuals")
gam.resid <- gam(resid ~ s(zhat), method = "REML")
rhat <- predict(gam.resid, type = "response")
points(zhat, rhat, type = "l")
add_panel_label("a")
# Normal qq-plot for growth model
sresid <- rstandard(mod.grow)
qqPlot(sresid, main = "", xlab = "Normal quantiles", ylab = "Standardized residual quantiles",
col.lines = "black", lwd = 1)
add_panel_label("b")
# Absolute residuals versus fitted
plot(zhat, sqrt(abs(sresid)), xlab = "Fitted values", ylab = "sqrt(|Std Residuals|)")
gam.sresid <- gam(sqrt(abs(sresid)) ~ s(zhat), method = "REML")
rhat <- predict(gam.sresid, type = "response")
points(zhat, rhat, type = "l")
add_panel_label("c")
# compare to a gam fit
gam.grow <- gam(size1 ~ s(size), data = test.data, method = "REML")
AIC(gam.grow, mod.grow)
gam.grow.fitted <- predict(gam.grow, type = "response")
matplot(test.data$size, cbind(fitted(mod.grow), gam.grow.fitted), type = "l",
lty = c(1, 2), lwd = 2, xlab = "Size t", ylab = "Fitted size t+1")
add_panel_label("d")
# dev.copy2eps(file = "../../figures/c2/DiagnoseMonocarp1.eps")
|
961e530374604709c4e79e905215d163e2ff08a2
|
cb9adc2ebaecde6169e6261cc52cb78029b2061b
|
/exhaustion.r
|
c7c5367b022f7f7b7469bc85f4ecb8318bd7592b
|
[] |
no_license
|
zxzx310310/DSL_paper
|
97e5cef1c50bd1158b77898259e2ff6f6b34a58d
|
4d38df01f915cb4e256dde38ebec5c731f225ea5
|
refs/heads/master
| 2021-05-02T14:12:42.331994
| 2019-08-22T13:49:01
| 2019-08-22T13:49:01
| 120,715,450
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,448
|
r
|
exhaustion.r
|
#----時間紀錄(開始)----
startTime <- Sys.time()
#----資料初始化(本地端)----
sourceData <- read.csv(file = "assets/商品資料庫_s.csv") #讀取原始資料
preferenceTable <- read.csv(file = "assets/preferenceTable_s.csv") #讀取商品偏好表
sourceData <- sourceData[c(-1, -13)] #移除不必要的資料欄位
names(sourceData)[11] <- "重量" #重新命名欄位名稱
goodData <- sourceData #將原始資料複製一份
goodData <- cbind(goodData, "Selected" = 0, "Preference" = 1) #新增被選擇欄位
#----環境參數設定----
maxVolume <- 13000 #最大箱子體積
maxWeight <- 16000 #最大重量(g)
userItemValues <- 10 #使用者需要的數量
maxPrice <- 550 #使用者金額
#----Function----
#偏好值與類別合併:
#將使用者對商品種類的偏好與原始商品資料進行合併成一個Data Frame, 使原始資料有使用者對每個商品的品項偏好
preference_match <- function(good_data, preference_table) {
#gene_list: 被選擇出的基因清單
#require_goods: 必要性的商品清單
#non_require_goods: 不必要性的商品清單
#user_preference: 使用者對商品種類的偏好
for (i in 1:dim(preference_table)[1]) {
# good_data[good_data$種類==good_preference$category[i],]$Preference <- as.numeric(good_preference$preference[i])^2
good_data[good_data$種類==preference_table$category[i],]$Preference <- as.numeric(preference_table$preference[i])
}
return(good_data)
}
#計算總重量
total_weight <- function(gene_list) {
#gene_list: 被選擇出的基因清單
for(i in 1:length(gene_list)) {
sum_weight <- sum(gene_list[[i]][[1]]$'重量')
gene_list[[i]]["totalWeight"] <- list(sum_weight)
}
return(gene_list)
}
#偏好的適應度方法(算式分母為偏好值1~偏好的最大值)
fitness_preference <- function(gene_list, require_goods, non_require_values, preference_table) {
#gene_list: 被選擇出的基因清單
#require_goods: 必要性的商品清單
#non_require_goods: 不必要性的商品清單
#user_preference: 使用者對商品種類的偏好
max_preference <- max(preference_table$preference)
for(i in 1:length(gene_list)) {
reuslt <- 1
for (k in 1:sum(length(require_goods), non_require_values)) {
temp_preferenced <- 1+as.numeric((gene_list[[i]][[1]]$'Preference'[k])^2 - 1) / sum((1:max_preference)^2) #偏好的計算公式
sum_preferenced <- sum(gene_list[[i]][[1]]$'Preference')
reuslt <- reuslt*temp_preferenced
}
gene_list[[i]]["fitPreference"] <- list(reuslt)
gene_list[[i]]["totalPreference"] <- sum_preferenced
}
return(gene_list)
}
#體積的適應度方法(已加入懲罰值)
fitness_volume <- function(gene_list, bin_volume) {
#gene_list: 被選擇出的基因清單
#bin_volume: 箱子的乘積
for (i in 1:length(gene_list)) {
sum_volume <- sum(gene_list[[i]][[1]]$'體積') #將最大限制體積減去每個基因的總體積
subtraction_volume <- bin_volume-sum_volume #容積上限與選擇商品之總體積的差額
reuslt <- abs(subtraction_volume)/bin_volume #將體積適應度算出
if (sum_volume >=(bin_volume*0.6) & sum_volume <=bin_volume) {
if (subtraction_volume==0) {
reuslt <- reuslt + 1 #若適應度等於0就給予懲罰值1, e.g. (49795.2-27749.25)/49795.2=0.4427324, 愈接近0表示價格差距越小
} else {
reuslt <- reuslt + 2 #若適應度大於0就給予懲罰值2
}
} else {
reuslt <- reuslt + 3 #剩下結果將給予懲罰值3
}
gene_list[[i]]["fitVolume"] <- reuslt
gene_list[[i]]["totalVolume"] <- sum_volume
volume_rate <- sum_volume / maxVolume
gene_list[[i]]["volumeRate"] <- volume_rate
}
return(gene_list)
}
#價格的適應度方法(已加入懲罰值)
fitness_price <- function(gene_list, limit_price) {
#gene_list: 被選擇出的基因清單
#limit_price: 價格最高限制
for (i in 1:length(gene_list)) {
sum_price <- sum(gene_list[[i]][[1]]$'單價') #將最大限制金額減去每個基因的總金額
subtraction_price <- limit_price-sum_price #預算與商品組合之總價格的差額
reuslt <- abs(subtraction_price)/limit_price #將價格適應度算出
if (subtraction_price==0) {
reuslt <- reuslt + 1
} else if(subtraction_price>0){
reuslt <- reuslt + 2
} else {
reuslt <- reuslt + 3
}
gene_list[[i]]["fitPrice"] <- reuslt
gene_list[[i]]["totalPrice"] <- sum_price
}
return(gene_list)
}
#總體的適應度方法
fitness_total <- function(gene_list) {
#gene_list: 被選擇出的基因清單
sum_fit <- unlist(lapply(gene_list, function(x) x$fitVolume*x$fitPrice))
for (i in 1:length(gene_list)) {
sum_fit <- gene_list[[i]]$'fitVolume'*gene_list[[i]]$'fitPrice'*gene_list[[i]]$'fitPreference'
gene_list[[i]]["totalFit"] <- sum_fit
}
return(gene_list)
}
filter_weight <- function(gene_list, limit_weight, limit_volume) {
condition_pop <- list()
for (i in 1:length(gene_list)) {
if(gene_list[[i]]$'totalWeight' <= limit_weight & gene_list[[i]]$'totalVolume' <= limit_volume & gene_list[[i]]$'totalVolume' >= (limit_volume*0.6)){
condition_pop <- append(condition_pop, gene_list[i]) #將未超過限制重量的染色體放入新的群組
}
}
condition_pop <- condition_pop[order(sapply(condition_pop, function(x) x$totalFit), decreasing=FALSE)] #將人口按照適應函數遞減排序
return(condition_pop)
}
#---執行----
level <- levels(goodData$種類)
level <- level[order(nchar(level), level)]
requiredList <- level[1:6]
nonRequiredList <- level[-1:-length(requiredList)]
goodData <- preference_match(good_data = goodData, preference_table = preferenceTable)
goodKind <- list()
for (i in 1:length(level)) {
goodKind[[i]] <- goodData[goodData$'種類' == level[i], ]
}
combination <- list()
result <- data.frame(產品代號 = factor(), 品名 = factor(), 單價 = integer(), 體積 = numeric(), 廠牌 = factor(), 長 = numeric(), 寬 = numeric(), 高 = numeric(), 種類 = factor(), 葷素 = factor(), 重量 = integer(), Selected = numeric(), Preference = numeric())
index <- 1
for (a1 in 1:nrow(goodKind[[1]])) {
for (b1 in 1:nrow(goodKind[[2]])) {
for (c1 in 1:nrow(goodKind[[3]])) {
for (d1 in 1:nrow(goodKind[[4]])) {
for (e1 in 1:nrow(goodKind[[5]])) {
for (f1 in 1:nrow(goodKind[[6]])) {
for (g1 in 1:nrow(goodKind[[7]])) {
for (g2 in 1:nrow(goodKind[[8]])) {
for (h1 in 1:nrow(goodKind[[9]])) {
for (i1 in 1:nrow(goodKind[[10]])) {
result <- rbind(result, goodKind[[1]][a1,])
result <- rbind(result, goodKind[[2]][b1,])
result <- rbind(result, goodKind[[3]][c1,])
result <- rbind(result, goodKind[[4]][d1,])
result <- rbind(result, goodKind[[5]][e1,])
result <- rbind(result, goodKind[[6]][f1,])
result <- rbind(result, goodKind[[7]][g1,])
result <- rbind(result, goodKind[[8]][g2,])
result <- rbind(result, goodKind[[9]][h1,])
result <- rbind(result, goodKind[[10]][i1,])
combination[[index]] <- list(result)
index = index +1
result <- data.frame(產品代號 = factor(), 品名 = factor(), 單價 = integer(), 體積 = numeric(), 廠牌 = factor(), 長 = numeric(), 寬 = numeric(), 高 = numeric(), 種類 = factor(), 葷素 = factor(), 重量 = integer(), Selected = numeric(), Preference = numeric())
}
}
}
}
}
}
}
}
}
}
#----時間紀錄(結束)----
combination <- total_weight(gene_list = combination)
combination <- fitness_preference(gene_list = combination, require_goods = requiredList, non_require_values = nonRequiredValues, preference_table = preferenceTable)
combination <- fitness_volume(gene_list = combination, bin_volume = maxVolume)
combination <- fitness_price(gene_list = combination, limit_price = maxPrice)
combination <- fitness_total(gene_list = combination)
combination <- filter_weight(gene_list = combination, limit_weight = maxWeight, limit_volume = maxVolume)
endTime <- Sys.time()
resultTime <- endTime - startTime
print(resultTime)
|
4bc8a653728b91b8c150420839c96cb0ae73f646
|
15b5a30b17ce3b1dea0ed27ac6b436047c27150e
|
/shiny/ui.R
|
7ce6901dd36ac1b2fb079aed35ac47b16fc738bc
|
[] |
no_license
|
wwkong/UW-Course-Evals-Shiny
|
e1bbf5c501e19f595112b1057918dc2375e1b2d4
|
367e51cadbe6c6d70d37cb0917d6139d698ec48f
|
refs/heads/master
| 2016-09-11T03:01:18.913369
| 2015-04-19T02:29:52
| 2015-04-19T02:29:56
| 33,841,207
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,732
|
r
|
ui.R
|
shinyUI(fluidPage(
# Header:
titlePanel("Shiny - UW Course Evaluations",
title="Analysis of UW Course Evaluations"),
# Sub-header
fluidRow(column(12,p("Coded by William Kong. All rights reserved."))),
# Input in sidepanel:
sidebarPanel(
#------------------------------ Input Data ------------------------------
# Variable selection:
conditionalPanel(
condition="input.conditionedPanels==1",
htmlOutput("varselect")),
# Filter Variable:
conditionalPanel(
condition="input.conditionedPanels==1",
htmlOutput("filterselect")),
# Filter Value:
conditionalPanel(
condition="input.conditionedPanels==1",
htmlOutput("filtervalue")),
# Subset String:
conditionalPanel(
condition="input.conditionedPanels==1",
htmlOutput("subsetStr")),
# Submit Subset
conditionalPanel(
condition="input.conditionedPanels==1",
actionButton("subsetButton","Reload Data")),
#------------------------------ Plot Data ------------------------------
# Question selection:
conditionalPanel(
condition="input.conditionedPanels==2",
htmlOutput("question")),
# Group selection:
conditionalPanel(
condition="input.conditionedPanels==2",
htmlOutput("group")),
# Sorting Value:
conditionalPanel(
condition="input.conditionedPanels==2",
htmlOutput("sortvalue"))
),
# Main Panel
mainPanel(
tabsetPanel(
tabPanel("Input",
dataTableOutput("table"),
value=1),
tabPanel("Plot",
plotOutput("plot", clickId = 'scatterPosn'),
value=2),
id="conditionedPanels"
)
)
))
|
0e79d782a012343072e5ecca1bd03bdc31791cf1
|
aa26052173994c5ce2363f11340f771d83d380a4
|
/man/showcues.Rd
|
2be689959580e3e0f06f14a34e28a8a98a8baa67
|
[] |
no_license
|
ronypik/FFTrees
|
ff92103e0c7d3105d9da96580da66d311e5a71ff
|
21421d9e7a48db3508bc721cd5b2ed9e60b0b19b
|
refs/heads/master
| 2021-01-11T16:31:20.673528
| 2017-01-26T08:04:48
| 2017-01-26T08:04:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 806
|
rd
|
showcues.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/showcues_function.R
\name{showcues}
\alias{showcues}
\title{Visualizes cue accuracies from an FFTrees object in a ROC space}
\usage{
showcues(x = NULL, data = "train", main = NULL, top = 5,
palette = c("#0C5BB07F", "#EE00117F", "#15983D7F", "#EC579A7F", "#FA6B097F",
"#149BED7F", "#A1C7207F", "#FEC10B7F", "#16A08C7F", "#9A703E7F"))
}
\arguments{
\item{x}{An FFTrees object}
\item{data}{A string indicating whether or not to show training ("train") or testing ("test") cue accuracies}
\item{main}{Main plot description}
\item{top}{An integer indicating how many of the top cues to highlight}
\item{palette}{An optional vector of colors}
}
\description{
Visualizes cue accuracies from an FFTrees object in a ROC space
}
|
5072bde09203fa5b59b9fdf973ba737646d61167
|
ad24e05bb17df332554fe592d8f4070ad709db3a
|
/RStudio - text lessons/Run-shiny-apps.R
|
8354878d6ebeba3ff514567c84e5d80b66008118
|
[] |
no_license
|
jyuill/proj-r-shiny
|
19d28ba43d6091dff319968eeec8f2846b0c58e0
|
74bf261352ac53c1969b8a41c035701499e1ed3b
|
refs/heads/master
| 2023-01-11T23:45:25.619212
| 2023-01-09T05:53:34
| 2023-01-09T05:53:34
| 79,695,710
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 766
|
r
|
Run-shiny-apps.R
|
## R file to run shiny apps - using examples from R Studio text lessons
library(shiny)
## need to highlight desired code and use 'ctrl+enter' to run
## use path from project working directory
## Lesson 1: Basic Histogram
runApp("RStudio - text lessons/Lesson1-histogram")
## Lesson 2: HTML
runApp("RStudio - text lessons/Lesson2-html")
## Lesson 3: Control Widgets
runApp("RStudio - text lessons/Lesson3-control-widgets")
## Lesson 4: Reactive Output
runApp("RStudio - text lessons/Lesson4-reactive-output",
display.mode = "showcase")
## Lesson 5: census app
runApp("RStudio - text lessons/Lesson5-census-app",
display.mode = "showcase")
## Lesson 6: stock vis
runApp("RStudio - text lessons/Lesson6-stock-vis",
display.mode = "showcase")
|
eadbdb68fad3eb9a3d93068d79e39f4e642ba01c
|
5350321bf95b9b836140cdadf0ad1108c140ee76
|
/R/convert_date.R
|
42acbcf74f594943c12a21029d9fe5596963816c
|
[
"MIT"
] |
permissive
|
barrenWuffet/convPkg
|
b439c4c954fa73be30b1ee5e1617b76cfe1ecf0b
|
483a6267da7a52bf02833bd18771173ee584cada
|
refs/heads/master
| 2021-07-04T12:51:16.565108
| 2021-06-04T23:54:26
| 2021-06-04T23:54:26
| 24,740,982
| 7
| 3
|
NOASSERTION
| 2019-04-01T14:52:17
| 2014-10-02T23:42:45
|
R
|
UTF-8
|
R
| false
| false
| 1,595
|
r
|
convert_date.R
|
#' Converts all columns of class POSIXct or POSIXt in a data.frame to Date class.
#'
#' @param xx A data.frame containing columns of class POSIXct or POSIXt
#'
#' @return data.frame with any columns of class POSIXct or POSIXt converted to Dates
#' @export
#'
#' @examples
#' z <- seq(1472562988, 1472563988, 100)
#' df1 <- data.frame(col1 = as.POSIXct(z, origin = "1960-01-01"))
#' df2 <- convert_date(df1)
#' cnn(df1)
#' cnn(df2)
#'
#' @author \itemize{
#' \item Andrei Rukavina - \url{https://github.com/arukavina}
#' \item Thijn van der Heijden - \email{avanderheijden@@alixpartners.com}
#' \item Zach Armentrout - \email{zarmentrout@@alixpartners.com}
#' \item Qianbo Wang - \email{qwang@@alixpartners.com}
#' \item James Wang - \email{swang@@alixpartners.com}
#' }
#'
#'
#'
convert_date <- function(xx){
# dateind <- names(which(sapply(sapply(xx, class),function(x) any(x %in% c("POSIXct", "POSIXt" )))))
# cat('found ',length(dateind), ' dates : \n' )
# lapply(dateind,function(x) cat(x,' --- \n'))
#
# xx[,dateind] <- data.frame(lapply(dateind,function(x) as.Date(xx[,x])))
#
# return(xx)
# dateind <- names(which(sapply(sapply(xx, class),function(x) any(x %in% c("POSIXct", "POSIXt" )))))
dateind_a <- names(xx[sapply(xx,function(x)is(x,"POSIXct"))])
dateind_b <- names(xx[sapply(xx,function(x)is(x,"POSIXt"))])
dateind <- unique(c(dateind_a, dateind_b))
cat('found ',length(dateind), ' dates : \n' )
lapply(dateind,function(x) cat(x,' --- \n'))
xx[,dateind] <- data.frame(lapply(dateind,function(x) as.Date(xx[,x])))
return(xx)
}
|
78ab49c4613c6cea7b0493628e50392dc99b606b
|
7d9627e3973c43a820b4a0819d69563f4f4eadb4
|
/PCA/pca.r
|
43d2e4fd63ff91998110a9a83bedcebeb4be20fd
|
[] |
no_license
|
Kinsman-Road/rcode
|
48dbd102de59108f3c457fce866775fb49bfc691
|
b07e066fbc6819aec57703af029fa30fe20838d3
|
refs/heads/master
| 2021-08-07T21:44:36.902168
| 2021-01-19T22:58:15
| 2021-01-19T22:58:15
| 241,498,378
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,296
|
r
|
pca.r
|
#Resources
#https://www.datacamp.com/community/tutorials/pca-analysis-r
#http://www.sthda.com/english/articles/31-principal-component-methods-in-r-practical-guide/118-principal-component-analysis-in-r-prcomp-vs-princomp/
#https://www.climate.gov/maps-data/dataset/past-weather-zip-code-data-table
#::::: Import :::::
library(readxl)
pre <- read_excel("PCA/pca.xlsx", sheet = "pca.pre")
post <- read_excel("PCA/pca.xlsx", sheet = "pca.post")
#::::: Preparing datasets as data frames :::::
pre <- data.frame(pre)
post <- data.frame(post)
pre.n <- pre[1:7] #create dataframes with only numerical columns from pre
post.n <- post[1:7] #create dataframes with only numerical columns from post
#:::::PCA:::::
library(factoextra)
library(FactoMineR)
pca.pre <- prcomp(pre.n, scale = TRUE) #singular value pca method - not spectral decomposition
pca.post <- prcomp(post.n, scale = TRUE) #singular value pca method - not spectral decomposition
pre.eig <- get_eigenvalue(pca.pre)
post.eig <- get_eigenvalue(pca.post)
#::::: PCA Coordinates :::::
#These are what is driving the direction of the plots below
#Pre-Construction PCA Coordinates
pre.vcf <- function(pre.load, comp.sdev){pre.load*comp.sdev}
pre.load <- pca.pre$rotation
pre.sdev <- pca.pre$sdev
pre.vcoord <- t(apply(pre.load, 1, pre.vcf, pre.sdev ))
pre.vc <- head(pre.vcoord[,1:7]) #1:8 just refers to the number of dimensions/eigenvectors to choose
#Post-Construction PCA Coordinates
post.vcf <- function(post.load, comp.sdev){post.load*comp.sdev}
post.load <- pca.post$rotation
post.sdev <- pca.post$sdev
post.vcoord <- t(apply(post.load, 1, post.vcf, post.sdev))
post.vc <- head(post.vcoord[,1:7]) #1:8 just refers to the number of dimensions/eigenvectors to choose
pre.vc #table of pre pca coords
post.vc #table of post pca coords
#:::::PCA cos2:::::
pre.cos2 <- pre.vcoord^2
post.cos2 <- post.vcoord^2
pre.cos2 #table of contribution to each dimension
post.cos2 #table of contribution to each dimension
#:::::PCA Contributions to Each Given Component:::::
pre.cc2 <- apply(pre.cos2, 2, sum)
contrib <- function(pre.cos2, pre.cc2){pre.cos2*100/pre.cc2}
pre.varc <- t(apply(pre.cos2, 1, contrib, pre.cc2))
pre.vcontrib <- head(pre.varc[,1:7]) #1:7 number of dimensions/eigenvectors to choose
post.cc2 <- apply(post.cos2, 2, sum)
contrib <- function(post.cos2, post.cc2){post.cos2*100/post.cc2}
post.varc <- t(apply(post.cos2, 1, contrib, post.cc2))
post.vcontrib <- head(post.varc[,1:7]) #1:7 number of dimensions/eigenvectors to choose
pre.vcontrib
post.vcontrib
#:::::Creating a scree plot:::::
pre.scree <- fviz_eig(pca.pre)
post.scree <- fviz_eig(pca.post)
pre.scree
post.scree
#:::::Creating contribution plot for individual observations:::::
pre.ind <- fviz_pca_ind(pca.pre,
col.ind = "cos2", #maybe "contribution?"
gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"), #default R colors
repel = TRUE,
label = "none",
title = "Pre-Construction Individual Plots")
post.ind <- fviz_pca_ind(pca.post,
col.ind = "cos2",
gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"), #default R colors
repel = TRUE,
label = "none",
title = "Post-Construction Individual Plots")
pre.ind
post.ind
#:::::Creating contribution plot for variable contributions:::::
pre.var <- fviz_pca_var(pca.pre,
col.var = "cos2", #maybe "contribution?"
gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"), #default R colors
repel = TRUE,
title = "Pre-Construction Variable Contribution")
post.var <- fviz_pca_var(pca.post,
col.var = "cos2", #maybe "contribution?"
gradient.cols = c("#00AFBB", "#E7B800", "#FC4E07"), #default R colors
repel = TRUE,
title = "Post-Construction Variable Contribution")
pre.var
post.var
#:::::Creating a biplot(combination of ind + var plots):::::
pre.bp <- fviz_pca_biplot(pca.pre,
col.ind = "#fa995c",
col.var = "#2f2091",
label = "var",
repel = TRUE,
title = "Pre-Construction Biplot")
post.bp <- fviz_pca_biplot(pca.post,
col.ind = "#fa995c",
col.var = "#2f2091",
label = "var",
repel = TRUE,
title = "Post-Construction Biplot")
pre.bp
post.bp
#:::::Creating an individual PCA plot with ellipses for categories:::::
#(1) First define categories as factors
#--(1a) Pre categories
pre.g.species <- as.factor(pre$species[1:470])
pre.g.solar <- as.factor(pre$solar[1:470])
pre.g.cat <- as.factor(pre$category[1:470])
pre.g.cam <- as.factor(pre$camera[1:470])
pre.g.traffic <- as.factor(pre$traffic[1:470])
pre.g.dnc <- as.factor(pre$dnc[1:470])
#--(1b) Post categories
post.g.species <- as.factor(post$species[1:655])
post.g.solar <- as.factor(post$solar[1:655])
post.g.cat <- as.factor(post$category[1:655])
post.g.cam <- as.factor(post$camera[1:655])
post.g.traffic <- as.factor(post$traffic[1:655])
post.g.dnc <- as.factor(post$dnc[1:655])
#(2) Produce ellipses PCA graphs for every factor
#--(2a) Pre-Construction Ellipses PCA categories
pre.species <- fviz_pca_ind(pca.pre,
col.ind = pre.g.species,
palette = c( ),
addEllipses = TRUE,
ellipse.type = "confidence",
legend.title = "Groups",
repel = TRUE,
label = "none",
title = "Pre-Construction: Species Groupings")
pre.solar <- fviz_pca_ind(pca.pre,
col.ind = pre.g.solar,
palette = c( ),
addEllipses = TRUE,
ellipse.type = "confidence",
legend.title = "Groups",
repel = TRUE,
label = "none",
title = "Pre-Construction: Daylight Preference")
pre.cat <- fviz_pca_ind(pca.pre,
col.ind = pre.g.cat,
palette = c( ),
addEllipses = TRUE,
ellipse.type = "confidence",
legend.title = "Groups",
repel = TRUE,
label = "none",
title = "Pre-Construction: Mammalian Groupings")
pre.cam <- fviz_pca_ind(pca.pre,
col.ind = pre.g.cam,
palette = c( ),
addEllipses = TRUE,
ellipse.type = "confidence",
legend.title = "Groups",
repel = TRUE,
label = "none",
title = "Pre-Construction: Camera Preference")
pre.traffic <- fviz_pca_ind(pca.pre,
col.ind = pre.g.traffic,
palette = c( ),
addEllipses = TRUE,
ellipse.type = "confidence",
legend.title = "Groups",
repel = TRUE,
label = "none",
title = "Pre-Construction: SUMMER Traffic Preference")
pre.dnc <- fviz_pca_ind(pca.pre,
col.ind = pre.g.dnc,
palette = c(""),
addEllipses = TRUE,
ellipse.type = "confidence",
legend.title = "groups",
repel = TRUE,
label = "none",
title = "Pre-Construction: D/N/C Category")
#--(2b) Post-Construction Ellipses PCA categories
post.species <- fviz_pca_ind(pca.post,
col.ind = post.g.species,
palette = c( ),
addEllipses = TRUE,
ellipse.type = "confidence",
legend.title = "Groups",
repel = TRUE,
label = "none",
title = "Post-Construction: Species Groupings")
post.solar <- fviz_pca_ind(pca.post,
col.ind = post.g.solar,
palette = c( ),
addEllipses = TRUE,
ellipse.type = "confidence",
legend.title = "Groups",
repel = TRUE,
label = "none",
title = "Post-Construction: Daylight preference")
post.cat <- fviz_pca_ind(pca.post,
col.ind = post.g.cat,
palette = c( ),
addEllipses = TRUE,
ellipse.type = "confidence",
legend.title = "Groups",
repel = TRUE,
label = "none",
title = "Post-Construction: Mammalian Groupings")
post.cam <- fviz_pca_ind(pca.post,
col.ind = post.g.cam,
palette = c( ),
addEllipses = TRUE,
ellipse.type = "confidence",
legend.title = "Groups",
repel = TRUE,
label = "none",
title = "Post-Construction: Camera Preference")
post.traffic <- fviz_pca_ind(pca.post,
col.ind = post.g.traffic,
palette = c( ),
addEllipses = TRUE,
ellipse.type = "confidence",
legend.title = "Groups",
repel = TRUE,
label = "none",
title = "Post-Construction: SUMMER Traffic Preference")
post.dnc <- fviz_pca_ind(pca.post,
col.ind = post.g.dnc,
palette = c( ),
addEllipses = TRUE,
ellipse.type = "confidence",
legend.title = "Groups",
repel = TRUE,
label = "none",
title = "Post-Construction: D/N/C Category")
#Generate Plots
pre.scree
pre.ind
pre.var
pre.bp
pre.species
pre.solar
pre.cat
pre.cam
pre.traffic
pre.dnc
post.scree
post.ind
post.var
post.bp
post.species
post.solar
post.cat
post.cam
post.traffic
post.dnc
|
6157e3b6988305d7d7d6130882c8e87143a438c1
|
15e6816528dfd35bb10c2c87897812e9c416fd3a
|
/man/readBlast.Rd
|
a12ad1c49ddb5f1b347f07564becaa0372e95928
|
[] |
no_license
|
jackgisby/packFinder
|
0e038fd8529ac43e47c9adbfdcfb017b6122c178
|
068bad218f049e389608dba10c348b581daa9449
|
refs/heads/master
| 2022-08-14T23:50:21.026807
| 2022-07-18T10:19:53
| 2022-07-18T10:19:53
| 201,337,387
| 6
| 1
| null | 2019-10-28T12:15:12
| 2019-08-08T21:04:47
|
R
|
UTF-8
|
R
| false
| true
| 3,103
|
rd
|
readBlast.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/readBlast.R
\name{readBlast}
\alias{readBlast}
\title{Convert NCBI BLAST+ Files to Dataframe}
\usage{
readBlast(
file,
minE = 1,
length = 0,
identity = 0,
removeExactMatches = FALSE,
scope = NULL,
packMatches = NULL
)
}
\arguments{
\item{file}{The file path of the blast file.}
\item{minE}{Blast results with e values greater than
the specified cutoff will be ignored.}
\item{length}{Blast results alignment lengths lower below
this value will be ignored}
\item{identity}{Blast results with target sequence identities below
this value will be ignored.}
\item{removeExactMatches}{If true, matches with 100% sequence identity will
be ignored to prevent self-hits.}
\item{scope}{If specified, blast results below the specified value
will be ignored. Note that the dataframe of transposon
matches must also be supplied to calculate scope. Scope is
the proportion of the transposon's internal sequence
occupied by the BLAST hit.}
\item{packMatches}{taframe containing genomic ranges and names referring
to sequences to be extracted. Can be obtained from
\code{\link{packSearch}} or generated from a
\code{\link[GenomicRanges:GRanges-class]{GRanges}} object,
after conversion to a dataframe. Must contain the
following features:
\itemize{
\item start - the predicted element's start base
sequence position.
\item end - the predicted element's end
base sequence position.
\item seqnames - character string
referring to the sequence name in \code{Genome} to
which \code{start} and \code{end} refer to.
}}
}
\value{
A dataframe containing the converted .blast6out file.
The file contains the following features:
\itemize{
\item Query sequence ID
\item Target sequence ID
\item Percenty sequence identity
\item Alignment length
\item Number of mismatches
\item Number of gaps
\item Base position of alignment start
in query sequence
\item Base position of alignment end in query sequence
\item Base position of alignment start in target sequence
\item Base position of alignment end in target sequence
\item E-value
\item Bit score
}
}
\description{
Reads .blast6out files (NCBI Blast Format) generated by
the VSEARCH clustering and alignment algorithms.
}
\details{
blast6out file is tab-separated text file compatible with
NCBI BLAST m8 and NCBI BLAST+ outfmt 6 formats. One
cluster/alignment can be found for each line.
}
\examples{
readBlast(system.file(
"extdata",
"packMatches.blast6out",
package = "packFinder"
))
}
\references{
For further information, see the NCBI BLAST+ application
documentation and help pages
(https://www.ncbi.nlm.nih.gov/pubmed/20003500?dopt=Citation).
VSEARCH may be downloaded from
\url{https://github.com/torognes/vsearch}; see
\url{https://www.ncbi.nlm.nih.gov/pubmed/27781170}
for further information.
}
\seealso{
code{\link{blastAnalysis}}, code{\link{blastAnnotate}},
code{\link{packAlign}},
code{\link{readUc}}, code{\link{packClust}}
}
\author{
Jack Gisby
}
|
f139fd6afd2d00c8d64f39d1c4de690b0caf2791
|
dae88885e447582fa3f6f0c31ba0a7a5e4b96a32
|
/R/qqplots.R
|
2a36c6cc2761020bc3ef15d2609b4861ec6a08ce
|
[] |
no_license
|
jergosh/cluster
|
078cc62b3af11a36c93a5e64482f63a2eec77f16
|
0cd07bedf8386d4b32c021c5613ce74cac23557d
|
refs/heads/master
| 2021-05-01T17:28:16.393079
| 2016-12-07T11:28:04
| 2016-12-07T11:28:04
| 44,182,290
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,366
|
r
|
qqplots.R
|
ggd.qqplot = function(pvector, main=NULL, ...) {
o = -log10(sort(pvector,decreasing=F))
e = -log10( 1:length(o)/length(o) )
plot(e,o,pch=19,cex=1, main=main, ...,
xlim=c(0,max(e)), ylim=c(0,max(o)),
ann=FALSE)
mtext(expression(Expected~~-log[10](italic(p))), side=1, line=2.5, cex=0.7)
mtext(expression(Observed~~-log[10](italic(p))), side=2, line=1.75, cex=0.7)
lines(e,e,col="red")
}
ggd.qqplot.mult = function(pvector, factor, cols, all=F, main=NULL, ...) {
es <- list()
os <- list()
max_e <- 0.0
max_o <- 0.0
for (l in levels(factor)) {
o <- -log10(sort(pvector[factor == l], decreasing=F))
e <- -log10(1:length(o)/length(o))
os[[l]] <- o
es[[l]] <- e
max_e <- max(c(max_e, e))
max_o <- max(c(max_o, o))
}
plot(NA, main=main, ...,
xlim=c(0, max_e), ylim=c(0, max_o),
ann=FALSE)
mtext(expression(Expected~~-log[10](italic(p))), side=1, line=2.5)
mtext(expression(Observed~~-log[10](italic(p))), side=2, line=2.0)
if (all) {
o <- -log10(sort(pvector, decreasing=F))
e <- -log10(1:length(o)/length(o))
points(e, o, pch=1, cex=1, col="black")
}
for (l in levels(factor)) {
points(es[[l]], os[[l]], pch=1, cex=1, col=cols[which(levels(factor) %in% l)])
}
lines(c(0, max_e), c(0, max_e), col="black")
}
|
1e3a84507d0b2accca914cbdfa7e35b653fb0a4a
|
fefd0ae2c6ce3ef6230091b1fa437631a8c72e1f
|
/W2/w2part3rassignment.R
|
6570080f34203920dd0422f3e604b0d530b21410
|
[] |
no_license
|
praveenkandasamy/johnhopkinscourse2
|
c54512e9b8072946b818b398cce38929269b994f
|
f745bbee2fb768e53db79ef55b36fff477fc2670
|
refs/heads/master
| 2020-06-16T12:42:32.944465
| 2019-07-06T20:34:00
| 2019-07-06T20:34:00
| 195,578,731
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 548
|
r
|
w2part3rassignment.R
|
corr <- function(directory, threshold = 0){ #function
filelist <- list.files(path = directory, pattern = "*.csv", full.names = TRUE) # create a list of files vector
id <- 1:332
for (i in id){
data <- read.csv(filelist[i]) #loop through all the files and read them
threshold <- sum(complete.cases(data)) #call completed cases boolean fun and then count the no
}
cor(data[c("nitrate", "sulfate")], use = "complete.obs")
}
# couldnt get this to work, possible issue around the last subsetting line of code
|
a86a7afe5a015104a87886d770866d8e54bc12e3
|
e3c0607809caa6e35ffb2af5ac890678936a7704
|
/namelist.general.post.r
|
29be745c7246a0c73fc819e0957d924027742664
|
[] |
no_license
|
chrisdane/echam
|
30988375fb51b99caca355693b5c3817e32ad178
|
2f3b4df106b6549744d9ea5547acfc6fe90ec772
|
refs/heads/master
| 2023-07-11T10:39:11.091753
| 2023-06-26T07:20:23
| 2023-06-26T07:20:23
| 207,476,216
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,093
|
r
|
namelist.general.post.r
|
# r
# input for post_echam.r
message("###################### namelist.general.post.r start ##########################")
graphics.off()
options(show.error.locations=T)
options(warn=2) # stop on warnings
#options(warn=0) # back to default
# clear work space
if (T) {
message("\nclear work space ...")
ws <- ls()
ws <- ws[-which(ws == "repopath")]
rm(list=ws)
}
# load helper functions of this repo
script_helper_functions <- paste0(repopath, "/helper_functions.r")
message("\nload `repopath`/helper_functions.r = ", script_helper_functions, " ...")
source(script_helper_functions) # get_host()
# get host options
host <- get_host()
host$repopath <- repopath
# load functions from submodule
message("\nload functions from submodule dir ", host$repopath, "/functions\" ...")
# needed myfunctions.r functions:
# ht(), is.leap(), identical_list(), make_posixlt_origin(), ncdump_get_filetype()
for (i in c("myfunctions.r")) source(paste0(host$homepath, "/functions/", i))
# general options
verbose <- 1 # 0,1
post_force <- F # redo calculation although output file already exists
clean <- T # remove temporary files
# cdo options
cdo_silent <- "" # "-s" for silent or ""
cdo_select_no_history <- "" # "--no_history" or ""
cdo_convert_grb2nc <- T # should post processing result be converted to nc (will be set to T if new dates are wanted)?
cdo_OpenMP_threads <- paste0("-P ", max(1, trunc(0.75*as.integer(system("nproc", intern=T))))) # "-P n" or "" (will be irgnored on commands that do not support OMP)
cdo_set_rel_time <- T # conversion from absolute to relative time
cdo_run_from_script <- T # create temporary file and run long cdo command from there
# maximum number of args cdo
# stan0/1: getconf ARG_MAX 2621440
# paleosrv1: getconf ARG_MAX 2097152
cdo_nchar_max_arglist <- 2350000 # reduce this number if you get segmentation fault on the cdo selection command (many files)
# nco options
# maximum number of args nco
# $(getconf PAGE_SIZE)*32 = 4096*32 = 131072
nco_nchar_max_arglist <- 131071
# nice options
# -n, --adjustment=N
# add integer N to the niceness (default 10)
# Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process)
# levante: only values >= 0 are allowed
nice_options <- "" # default: do not use nice
#nice_options <- "-n 19"
#nice_options <- "-n 10"
nice_options <- "-n 0"
# ionice options
# -c, --class class
# Specify the name or number of the scheduling class to use; 0 for none, 1 for realtime, 2 for best-effort, 3 for idle.
# -n, --classdata level
# Specify the scheduling class data. This only has an effect if the class accepts an argument. For realtime and best-effort, 0-7 are valid data (priority levels), and 0 represents the highest priority level.
ionice_options <- "" # default: do not use ionice
#ionice_options <- "-c2 -n3"
ionice_options <- "-c2 -n0"
# model specific general options
mpiom1_remap <- T
# known dimnames; add further
# so far only time needed
known_dimnames <- list(time=c("time", "Time", "TIME", "time_mon", "T", "t"))
# cdo commands for some variables
cdo_known_cmds <- list(
"psl"=list(cmd=c("<cdo> merge <aps> <geosp> <t>",
"<cdo> sealevelpressure")),
"hvel"=list(cmd=c("<cdo> expr,'hvel=sqrt(uo*uo + vo*vo)' <uvo>")),
# TOA imbalance
# https://github.com/ncar-hackathons/gallery/blob/master/cmip6dpdt_pendergrass/get_cmip6_ECS-alt.ipynb
# cmor:
# N = rsdt - rsut - rlut
# rsdt = toa_incoming_shortwave_flux = TOA Incident Shortwave Radiation
# rsut = toa_outgoing_shortwave_flux = TOA Outgoing Shortwave Radiation
# rlut = toa_outgoing_longwave_flux = TOA Outgoing Longwave Radiation
# rtmt = net_downward_radiative_flux_at_top_of_atmosphere_model = Net Downward Radiative Flux at Top of Model
# echam:
# N = trad0 + srad0 (= `cdo add trad0 srad0`)
# trad0 = top thermal radiation (OLR)
# srad0 = net top solar radiation
# srad0d = top incoming SW radiation = rsdt
"toa_imbalance"=list(cmd="<cdo> -setname,toa_imbalance -enssum <rsdt> -mulc,-1.0 <rsut> -mulc,-1.0 <rlut>"),
"quv_direction"=list(cmd=c("<cdo> -setname,quv_direction -divc,3.141593 -mulc,180 -atan2 <qv> <qu>",
"<nco_ncatted> -O -a long_name,quv_direction,o,c,\"direction of water vapor transport\"",
"<nco_ncatted> -O -a units,quv_direction,o,c,\"degree\"")),
"wisoaprt_d_post"=list(cmd=c("<cdo> -setname,wisoaprt_d -setcode,10 -mulc,1000. -subc,1. -div -div <wisoaprt> <aprt> <wiso_smow_files>",
"<nco_ncatted> -O -a long_name,wisoaprt_d,o,c,\"delta of total precipitation\"",
"<nco_ncatted> -O -a units,wisoaprt_d,o,c,\"o/oo\"")),
"wisoaprl_d_post"=list(cmd="<cdo> -setname,wisoaprl_d -setcode,13 -mulc,1000. -subc,1. -div -div <wisoaprl> <aprl> <wiso_smow_files>"),
"wisoaprc_d_post"=list(cmd="<cdo> -setname,wisoaprc_d -setcode,14 -mulc,1000. -subc,1. -div -div <wisoaprc> <aprc> <wiso_smow_files>"),
"wisoaprs_d_post"=list(cmd="<cdo> -setname,wisoaprs_d -setcode,15 -mulc,1000. -subc,1. -div -div <wisoaprs> <aprs> <wiso_smow_files>"),
"wisoevap_d_post"=list(cmd=c("<cdo> -setname,wisoevap_d -setcode,19 -mulc,1000. -subc,1. -div -div <wisoevap> <evap> <wiso_smow_files>",
"<nco_ncatted> -O -a long_name,wisoevap_d,o,c,\"delta of evaporation\"",
"<nco_ncatted> -O -a units,wisoevap_d,o,c,\"o/oo\"")),
"wisope_d_post"=list(cmd=c("<cdo> -setname,wisope_d -setcode,20 -mulc,1000. -subc,1. -div -div <wisope> <pe> <wiso_smow_files>",
"<nco_ncatted> -O -a long_name,wisope_d,o,c,\"delta of precip minus evap\"",
"<nco_ncatted> -O -a units,wisope_d,o,c,\"o/oo\"")),
"wisows_d_post"=list(cmd="<cdo> -setname,wisows_d -setcode,11 -mulc,1000. -subc,1. -div -div <wisows> <ws> <wiso_smow_files>"),
"wisosn_d_post"=list(cmd="<cdo> -setname,wisosn_d -setcode,12 -mulc,1000. -subc,1. -div -div <wisosn> <sn> <wiso_smow_files>"),
"wisosnglac_d_post"=list(cmd="<cdo> -setname,wisoasnglac_d -setcode,33 -mulc,1000. -subc,1. -div -div <wisosnglac> <snglac> <wiso_smow_files>"),
"wisorunoff_d_post"=list(cmd="<cdo> -setname,wisorunoff_d -setcode,17 -mulc,1000. -subc,1. -div -div <wisorunoff> <runoff> <wiso_smow_files>"),
"aprt_times_temp2"=list(cmd=c("<cdo> -setname,aprt_times_temp2 -mul <aprt> <temp2>",
"<nco_ncatted> -O -a code,aprt_times_temp2,d,,", # delete old `code` attribute
"<nco_ncatted> -O -a table,aprt_times_temp2,d,,", # delete old `table` attribute
"<nco_ncatted> -O -a long_name,aprt_times_temp2,o,c,\"aprt times temp2\"",
"<nco_ncatted> -O -a units,aprt_times_temp2,o,c,\"mm/month degC\"")),
"aprt_times_tsurf"=list(cmd=c("<cdo> -setname,aprt_times_tsurf -mul <aprt> <tsurf>",
"<nco_ncatted> -O -a code,aprt_times_tsurf,d,,",
"<nco_ncatted> -O -a table,aprt_times_tsurf,d,,",
"<nco_ncatted> -O -a long_name,aprt_times_tsurf,o,c,\"aprt times tsurf\"",
"<nco_ncatted> -O -a units,aprt_times_tsurf,o,c,\"mm/month degC\"")),
"temp2aprt"=list(cmd=c("<cdo> -setname,temp2aprt -div <aprt_times_temp2> <aprt>",
"<nco_ncatted> -O -a code,temp2aprt,d,,",
"<nco_ncatted> -O -a table,temp2aprt,d,,",
"<nco_ncatted> -O -a long_name,temp2aprt,o,c,\"temp2 weighted by aprt\"",
"<nco_ncatted> -O -a units,temp2aprt,o,c,\"degC\"")),
"tsurfaprt"=list(cmd=c("<cdo> -setname,tsurfaprt -div <aprt_times_tsurf> <aprt>",
"<nco_ncatted> -O -a code,tsurfaprt,d,,",
"<nco_ncatted> -O -a table,tsurfaprt,d,,",
"<nco_ncatted> -O -a long_name,tsurfaprt,o,c,\"tsurf weighted by aprt\"",
"<nco_ncatted> -O -a units,tsurfaprt,o,c,\"degC\"")),
"fgco2"=list(cmd=c("<cdo> -setname,fgco2 -mulc,-0.272912 <co2_flx_ocean>",
# co2_flx_ocean:7
# into atm --> into ocean; kgCO2 --> kgC
"<nco_ncatted> -O -a code,fgco2,d,,",
"<nco_ncatted> -O -a table,fgco2,d,,",
"<nco_ncatted> -O -a long_name,fgco2,o,c,\"Surface Downward Flux of Total CO2 [kgC m-2 s-1]\"")),
"nbp"=list(cmd=c("<cdo> -setname,nbp -mulc,-0.272912 -enssum <co2_flx_land> <co2_flx_lcc> <co2_flx_harvest>",
# co2_flx_land:6 + co2_flx_lcc:24 + co2_flx_harvest:25
# into atm --> into land; kgCO2 --> kgC; nbp = netAtmosLandCO2Flux
"<nco_ncatted> -O -a code,nbp,d,,",
"<nco_ncatted> -O -a table,nbp,d,,",
"<nco_ncatted> -O -a long_name,nbp,o,c,\"Carbon Mass Flux out of Atmosphere Due to Net Biospheric Production on Land [kgC m-2 s-1]\"")),
"netAtmosLandCO2Flux"=list(cmd=c("<cdo> -setname,netAtmosLandCO2Flux -mulc,-0.272912 -enssum <co2_flx_land> <co2_flx_lcc> <co2_flx_harvest>",
# into atm --> into land; kgCO2 --> kgC; netAtmosLandCO2Flux = nbp
"<nco_ncatted> -O -a code,netAtmosLandCO2Flux,d,,",
"<nco_ncatted> -O -a table,netAtmosLandCO2Flux,d,,",
paste0("<nco_ncatted> -O -a long_name,netAtmosLandCO2Flux,o,c,\"Net flux of CO2 between atmosphere and ",
"land (positive into land) as a result of all processes [kgC m-2 s-1]\""))),
"co2_flx_total"=list(cmd=c("<cdo> -setname,co2_flx_total -add <fgco2> <nbp>",
paste0("<nco_ncatted> -O -a long_name,co2_flx_total,o,c,\"Total CO2 flux of ocean and land; ",
"fgco2+nbp (positive into ocean/land) [kgC m-2 s-1]\""))),
"fLuc"=list(cmd=c("<cdo> -setname,fLuc -mulc,0.272912 <co2_flx_lcc>", # kgCO2 --> kgC
"<nco_ncatted> -O -a code,fLuc,d,,",
"<nco_ncatted> -O -a table,fLuc,d,,",
"<nco_ncatted> -O -a long_name,fLuc,o,c,\"Net Carbon Mass Flux into Atmosphere due to Land Use Change [kgC m-2 s-1]\"")),
"litter"=list(cmd=c(paste0("<cdo> -setname,litter -enssum ",
"-vertsum <boxYC_acid_ag1> -vertsum <boxYC_acid_ag2> ", # 1: leaf, 2: wood
"-vertsum <boxYC_water_ag1> -vertsum <boxYC_water_ag2> ",
"-vertsum <boxYC_ethanol_ag1> -vertsum <boxYC_ethanol_ag2> ",
"-vertsum <boxYC_nonsoluble_ag1> -vertsum <boxYC_nonsoluble_ag2>"),
"<nco_ncatted> -O -a code,litter,d,,",
"<nco_ncatted> -O -a table,litter,d,,",
"<nco_ncatted> -O -a long_name,litter,o,c,\"Litter carbon (yasso)\"")),
"soilFast"=list(cmd=c(paste0("<cdo> -setname,soilFast -enssum ",
"-vertsum <boxYC_acid_bg1> -vertsum <boxYC_acid_bg2> ", # 1: leaf, 2: wood
"-vertsum <boxYC_water_bg1> -vertsum <boxYC_water_bg2> ",
"-vertsum <boxYC_ethanol_bg1> -vertsum <boxYC_ethanol_bg2> ",
"-vertsum <boxYC_nonsoluble_bg1> -vertsum <boxYC_nonsoluble_bg2>"),
"<nco_ncatted> -O -a code,soilFast,d,,",
"<nco_ncatted> -O -a table,soilFast,d,,",
"<nco_ncatted> -O -a long_name,soilFast,o,c,\"Fast soil carbon (yasso)\"")),
"cSoilSlow"=list(cmd=c(paste0("<cdo> -setname,cSoilSlow -mulc,0.0120107 -add ", # molC --> kgC
"-vertsum <boxYC_humus_1> -vertsum <boxYC_humus_2>"), # 1: leaf, 2: wood
# `cdo -add -vertsum <file> -vertsum <file>` is faster than
# `cdo -vertsum -add <file> <file>` (tested with 740MB files)
"<nco_ncatted> -O -a code,cSoilSlow,d,,",
"<nco_ncatted> -O -a table,cSoilSlow,d,,",
"<nco_ncatted> -O -a units,cSoilSlow,o,c,\"kgC m-2\"",
"<nco_ncatted> -O -a long_name,cSoilSlow,o,c,\"Carbon Mass in Slow Soil Pool\"")),
"divuvttot"=list(cmd=c(paste0("<cdo> -setname,divuvttot -add ",
"-selvar,divuvt <divuvt> -selvar,divuvteddy <divuvteddy>"),
"<nco_ncatted> -O -a long_name,divuvttot,o,c,\"mean + eddy div_h(u_h T)\"")),
"chl"=list(cmd=c(paste0("<cdo> -setname,npp -add ",
"-selvar,bgc15 <bgc15> -selvar,bgc06 <bgc06>"),
paste0("<nco_ncatted> -O -a long_name,chl,o,c,",
"\"Mass Concentration of Total Phytoplankton Expressed as Chlorophyll in Sea Water; Chl_diatoms + Chl_phytoplankton\""))),
"npp_nanophy_dia"=list(cmd=c(paste0("<cdo> -setname,npp_nanophy_dia -add ",
"-selvar,diags3d01 <diags3d01> -selvar,diags3d02 <diags3d02>"),
paste0("<nco_ncatted> -O -a long_name,npp_nanophy_dia,o,c,",
"\"net primary production by nanophytoplankton + net primary production by diatoms\""))),
"pCO2a"=list(cmd=c("<cdo> -setname,pCO2a -sub <pCO2s> <dpCO2s>", # recom in µatm; oce - (oce - air) = oce - oce + air = air
"<nco_ncatted> -O -a long_name,pCO2a,o,c,\"Partial pressure of atmospheric CO2\"")),
"apco2"=list(cmd=c("<cdo> -setname,apco2 -sub <spco2> <dpco2>", # cmip6 in Pa; oce - (oce - air) = oce - oce + air = air
"<nco_ncatted> -O -a long_name,apco2,o,c,\"Partial pressure of atmospheric CO2\"")),
"POCphydiadet"=list(cmd=c("<cdo> -setname,POCphydiadet -enssum <bgc05> <bgc14> <bgc08>", # phyc + diac + detc
"<nco_ncatted> -O -a long_name,poc,o,c,\"Carbon from small pyhtoplankton + diatoms + detritus\"")),
"calcite"=list(cmd=c("<cdo> -setname,calcite -enssum <bgc20> <bgc21>", # phycal + detcal
"<nco_ncatted> -O -a long_name,calcite,o,c,\"Calcite from small pyhtoplankton + detritus\"")),
"sedimentC"=list(cmd=c("<cdo> -setname,sedimentC -enssum <benC> <benCalc>",
"<nco_ncatted> -O -a long_name,sedimentC,o,c,\"Benthic carbon and calcium carbonate\"")),
"silicate"=list(cmd=c("<cdo> -setname,siliate -enssum <bgc16> <bgc17> <bgc18> <benSi>", # (diatom + detritus + dissolved acid + benthic) silicate
"<nco_ncatted> -O -a long_name,silicate,o,c,\"Diatoms + detritus + dissolved acid + benthic Silicate\""))
) # cdo_known_cmds
message("###################### namelist.general.post.r finish ##########################")
|
8993fb647cec87ad4fc93385eeeb6e19c05df508
|
a442f04a26b881d93318911a2d14f5b91189fdef
|
/R/hf_diabetes_meds.R
|
4d7fa5a946f41333dc1cae261853702ce06d1ae2
|
[] |
no_license
|
unmtransinfo/cerner-tools
|
fb45b7d347e17ea444a794ad276e958966390864
|
93ada80c97a28f405007d7178631cec674a21f76
|
refs/heads/master
| 2023-06-23T00:36:59.047465
| 2023-06-09T17:13:20
| 2023-06-09T17:13:20
| 157,255,596
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,124
|
r
|
hf_diabetes_meds.R
|
library(vioplot)
hf <- read.delim("data/hf_diabetes+labs+meds.csv", stringsAsFactors=F)
print(sprintf("total input data rows: %d", nrow(hf)))
hf$lab_date <- as.Date(hf$lab_date, "%Y-%m-%d")
hf$med_date <- as.Date(hf$med_date, "%Y-%m-%d")
hf <- hf[hf$lab_date >= hf$med_date,]
#hf <- hf[hf$numeric_result>3,]
print(sprintf("total working data rows: %d", nrow(hf)))
n_data <- nrow(hf)
hf$days_m2l <- as.integer(hf$days_m2l)
diabetes_codes <- read.delim("data/hf_diabetes_codes.csv", colClasses="character")
diabetes_codes <- diabetes_codes[order(diabetes_codes$diagnosis_code),]
ndc <- length(levels(as.factor(hf$diagnosis_code)))
print(sprintf("diabetes codes: %d ; diabetes diagnoses in dataset: %s", nrow(diabetes_codes), ndc))
n_total <- 0
for (code in levels(as.factor(hf$diagnosis_code)))
{
n <- nrow(hf[hf$diagnosis_code==code,])
desc <- diabetes_codes$diagnosis_description[diabetes_codes$diagnosis_code==code]
print(sprintf("%5s [N = %7d, %4.1f%%] %s", code, n, 100*n/n_data, desc))
n_total <- n_total + n
}
print(sprintf("DEBUG: n_total = %d",n_total))
print(sprintf("mean days (med->lab): %4.1f", mean(as.integer(hf$lab_date-hf$med_date),na.rm=T)))
lab_codes <- read.delim("data/hf_labs_hgb-a1c_codes.csv", colClasses="character")
hf$lab_mn <- rep(NA,nrow(hf))
for (id in levels(as.factor(hf$lab_procedure_id)))
{
lab_mn <- lab_codes[lab_codes$lab_procedure_id==id,]$lab_procedure_mnemonic
print(sprintf("DEBUG: %s: %s",id,lab_mn))
hf$lab_mn[hf$lab_procedure_id==id] <- lab_mn
}
n_total <- 0
for (lab_mn in levels(as.factor(hf$lab_mn)))
{
n <- nrow(hf[hf$lab_mn==lab_mn ,])
if (n>0)
print(sprintf("[N = %6d, %4.1f%%] %s", n, 100*n/n_data, lab_mn))
n_total <- n_total + n
}
print(sprintf("DEBUG: n_total = %d",n_total))
meds <- read.delim("data/hf_meds_insulins.csv", colClasses="character")
hf$generic_name <- rep(NA,nrow(hf))
hf$route <- rep(NA,nrow(hf))
for (id in levels(as.factor(hf$medication_id)))
{
med_gname <- meds$generic_name[meds$medication_id==id]
route <- meds$route_description[meds$medication_id==id]
hf$generic_name[hf$medication_id==id] <- med_gname
hf$route[hf$medication_id==id] <- route
}
hf$route <- as.factor(hf$route)
print(table(hf$route))
## Group insulin into engineered vs natural:
hf$med_class <- as.character(rep(NA,nrow(hf)))
hf$med_class[grepl("aspart",hf$generic_name, ignore.case=T)] <- "engineered"
hf$med_class[grepl("lispro",hf$generic_name, ignore.case=T)] <- "engineered"
hf$med_class[grepl("glargine",hf$generic_name, ignore.case=T)] <- "engineered"
hf$med_class[is.na(hf$med_class)] <- "natural"
#print(table(hf$med_class, hf$generic_name))
tbl <- table(hf$med_class, hf$generic_name)
n_total <- 0
for (rn in rownames(tbl))
{
for (cn in colnames(tbl))
{
n <- tbl[rn,cn]
if (n>0)
print(sprintf("[N = %6d, %4.1f%%] %s: %s", n, 100*n/n_data, rn, cn))
n_total <- n_total + n
}
}
print(sprintf("DEBUG: n_total = %d",n_total))
hf$med_class <- as.factor(hf$med_class)
print(table(hf$med_class))
print(table(hf$med_class, hf$route))
hf$numeric_result <- as.numeric(hf$numeric_result)
hgbval_all <- hf$numeric_result
hgbval_eng <- hgbval_all[hf$med_class=="engineered"]
hgbval_nat <- hgbval_all[hf$med_class=="natural"]
print(sprintf("mean Hgb A1C: %.2f ; variance: %.2f", mean(hgbval_all,na.rm=T), var(hgbval_all,na.rm=T)))
print(sprintf("mean Hgb A1C (engineered insulin): %.2f ; variance: %.2f", mean(hgbval_eng,na.rm=T), var(hgbval_eng,na.rm=T)))
print(sprintf("mean Hgb A1C (natural insulin): %.2f ; variance: %.2f", mean(hgbval_nat,na.rm=T), var(hgbval_nat,na.rm=T)))
for (v in 0:25)
{
print(sprintf("HgbA1C = %2d-%2d: e=%5d n=%5d", v, v+1, length(which(as.integer(hgbval_eng)==v)), length(which(as.integer(hgbval_nat)==v))))
}
tt <- t.test(hgbval_eng[!is.na(hgbval_eng)], hgbval_nat[!is.na(hgbval_nat)], var.equal=F)
print(sprintf("Welch's 2-sample T-test p-value = %g", tt$p.value))
#boxplot box includes 2nd and 3rd quantile. Thus 50% of data in box.
#range=1.5 means 97% of data within whiskers.
boxplot(hgbval_eng[!is.na(hgbval_eng)],
hgbval_nat[!is.na(hgbval_nat)],
ylim=c(0,25),
names=c("engineered","natural"),
col="tomato",
range=1.5,
varwidth=T,
boxwex=0.5)
title(main="Hgb A1C vs. Insulin class")
abline(h=mean(hgbval_eng,na.rm=T), col="gray", lwd=2)
abline(h=mean(hgbval_nat,na.rm=T), col="gray", lwd=2)
text(1,mean(hgbval_eng,na.rm=T),sprintf("mean = %.2f",mean(hgbval_eng,na.rm=T)), pos=3, cex=0.8)
text(2,mean(hgbval_nat,na.rm=T),sprintf("mean = %.2f",mean(hgbval_nat,na.rm=T)), pos=1, cex=0.8)
###
vioplot(hgbval_eng[!is.na(hgbval_eng)],
hgbval_nat[!is.na(hgbval_nat)],
ylim=c(0,25),
names=c("engineered","natural"),
col="tomato",
range=1.5,
wex=0.5
)
title(main="Hgb A1C vs. Insulin class")
text(1,mean(hgbval_eng,na.rm=T),sprintf("mean = %.2f\nvar = %.2f",mean(hgbval_eng,na.rm=T), var(hgbval_eng,na.rm=T)), pos=4, cex=0.8)
text(2,mean(hgbval_nat,na.rm=T),sprintf("mean = %.2f\nvar = %.2f",mean(hgbval_nat,na.rm=T), var(hgbval_nat,na.rm=T)), pos=4, cex=0.8)
|
88ad95b5d1ed89f14a2914143e992b797ce6ac08
|
2dcb9d91668917be46c25549b6e42ecde77fcd33
|
/man/xml_parse.Rd
|
77f8e41d741c8365c4a63244586950b619788959
|
[] |
no_license
|
arturochian/xml2
|
03cdd3a5135ad74ad1519daef6271fdb84d68071
|
9754f9fc69f5d77f0a4098012a304c340cbeec12
|
refs/heads/master
| 2020-12-29T03:19:31.547580
| 2015-02-12T20:56:17
| 2015-02-12T20:56:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 254
|
rd
|
xml_parse.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/hello.R
\name{xml_parse}
\alias{xml_parse}
\title{Parse XML string}
\usage{
xml_parse(x)
}
\description{
Parse XML string
}
\examples{
xml_parse("<foo> 123 </foo>")
}
|
46f1227be03c38780ee45ff394e812a78c327c75
|
1dda9df405a23ab8dea17648051cec68f8ec3196
|
/shiny/source/GUI/asm_GUI_LB.R
|
67f214e82fe2dd5cb9c6e3cd31e1d8094b053e72
|
[
"NIST-PD"
] |
permissive
|
asm3-nist/DART-MS-DST
|
57dd0b2b8c39120f769396d1dfda07ea4d36b96c
|
966a5b4ba5d1cd8498431d951986e515eb40980d
|
refs/heads/master
| 2023-05-03T11:22:44.520330
| 2021-05-19T15:14:05
| 2021-05-19T15:14:05
| 297,452,624
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 100
|
r
|
asm_GUI_LB.R
|
asm_GUI_LB <- tabPanel(
"Library Builder (offline)",
DisclaimerMessage,
EmailMessage
)
|
ee0da9c16f5413b93bcb794d9b686f18ee9bb55e
|
f09df42ce7959b701bc73e0f0f09778070751d37
|
/ROC-AUC.R
|
dbf7bdf7b8a70e72666e8c4a6cc6f8a6d50cead0
|
[] |
no_license
|
saldh/R
|
f4e30c22a16e6a0eadadd267892deb121f345d0f
|
1ac1490b9a8bc1c5dace04eb1528a5556152b8f3
|
refs/heads/master
| 2020-03-26T07:25:37.960188
| 2019-04-12T08:32:36
| 2019-04-12T08:32:36
| 144,653,834
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,595
|
r
|
ROC-AUC.R
|
library(pROC) # install with install.packages("pROC")
library(randomForest) # install with install.packages("randomForest")
## Generate weight and obesity datasets.
set.seed(420) # this will make my results match yours
num.samples <- 100
## genereate 100 values from a normal distribution with
## mean 172 and standard deviation 29, then sort them
weight <- sort(rnorm(n=num.samples, mean=172, sd=29))
## Now we will decide if a sample is obese or not.
## NOTE: This method for classifying a sample as obese or not
## was made up just for this example.
## rank(weight) returns 1 for the lightest, 2 for the second lightest, ...
## ... and it returns 100 for the heaviest.
## So what we do is generate a random number between 0 and 1. Then we see if
## that number is less than rank/100. So, for the lightest sample, rank = 1.
## This sample will be classified "obese" if we get a random number less than
## 1/100. For the second lightest sample, rank = 2, we get another random
## number between 0 and 1 and classify this sample "obese" if that random
## number is < 2/100. We repeat that process for all 100 samples
obese <- ifelse(test=(runif(n=num.samples) < (rank(weight)/num.samples)),
yes=1, no=0)
obese ## print out the contents of "obese" to show us which samples were
## classified "obese" with 1, and which samples were classified
## "not obese" with 0.
## plot the data
plot(x=weight, y=obese)
## fit a logistic regression to the data...
glm.fit=glm(obese ~ weight, family=binomial)
lines(weight, glm.fit$fitted.values)
## draw ROC and AUC using pROC
## NOTE: By default, the graphs come out looking terrible
## The problem is that ROC graphs should be square, since the x and y axes
## both go from 0 to 1. However, the window in which I draw them isn't square
## so extra whitespace is added to pad the sides.
roc(obese, glm.fit$fitted.values, plot=TRUE)
## Now let's configure R so that it prints the graph as a square.
##
par(pty = "s") ## pty sets the aspect ratio of the plot region. Two options:
## "s" - creates a square plotting region
## "m" - (the default) creates a maximal plotting region
roc(obese, glm.fit$fitted.values, plot=TRUE)
## NOTE: By default, roc() uses specificity on the x-axis and the values range
## from 1 to 0. This makes the graph look like what we would expect, but the
## x-axis itself might induce a headache. To use 1-specificity (i.e. the
## False Positive Rate) on the x-axis, set "legacy.axes" to TRUE.
roc(obese, glm.fit$fitted.values, plot=TRUE, legacy.axes=TRUE)
## If you want to rename the x and y axes...
roc(obese, glm.fit$fitted.values, plot=TRUE, legacy.axes=TRUE, percent=TRUE, xlab="False Positive Percentage", ylab="True Postive Percentage")
## We can also change the color of the ROC line, and make it wider...
roc(obese, glm.fit$fitted.values, plot=TRUE, legacy.axes=TRUE, percent=TRUE, xlab="False Positive Percentage", ylab="True Postive Percentage", col="#377eb8", lwd=4)
## If we want to find out the optimal threshold we can store the
## data used to make the ROC graph in a variable...
roc.info <- roc(obese, glm.fit$fitted.values, legacy.axes=TRUE)
str(roc.info)
## and then extract just the information that we want from that variable.
roc.df[roc.df$tpp > 60 & roc.df$tpp < 80,]
## We can calculate the area under the curve...
roc(obese, glm.fit$fitted.values, plot=TRUE, legacy.axes=TRUE, percent=TRUE, xlab="False Positive Percentage", ylab="True Postive Percentage", col="#377eb8", lwd=4, print.auc=TRUE)
## ...and the partial area under the curve.
roc(obese, glm.fit$fitted.values, plot=TRUE, legacy.axes=TRUE, percent=TRUE, xlab="False Positive Percentage", ylab="True Postive Percentage", col="#377eb8", lwd=4, print.auc=TRUE, print.auc.x=45, partial.auc=c(100, 90), auc.polygon = TRUE, auc.polygon.col = "#377eb822")
## Now let's fit the data with a random forest...
rf.model <- randomForest(factor(obese) ~ weight)
## ROC for random forest
roc(obese, rf.model$votes[,1], plot=TRUE, legacy.axes=TRUE, percent=TRUE, xlab="False Positive Percentage", ylab="True Postive Percentage", col="#4daf4a", lwd=4, print.auc=TRUE)
## Now layer logistic regression and random forest ROC graphs..
roc(obese, glm.fit$fitted.values, plot=TRUE, legacy.axes=TRUE, percent=TRUE, xlab="False Positive Percentage", ylab="True Postive Percentage", col="#377eb8", lwd=4, print.auc=TRUE)
plot.roc(obese, rf.model$votes[,1], percent=TRUE, col="#4daf4a", lwd=4, print.auc=TRUE, add=TRUE, print.auc.y=40)
legend("bottomright", legend=c("Logisitic Regression", "Random Forest"), col=c("#377eb8", "#4daf4a"), lwd=4)
## Now that we're done with our ROC fun, let's reset the par() variables.
## There are two ways to do it...
par(pty = "m")
#2
library(randomForest)
library(pROC)
# generate some random data
set.seed(1111)
train <- data.frame(condition = sample(c("mock", "lethal", "resist"), replace = T, size = 1000))
train$feat01 <- sapply(train$condition, (function(i){ if (i == "mock") { rnorm(n = 1, mean = 0)} else if (i == "lethal") { rnorm(n = 1, mean = 1.5)} else { rnorm(n = 1, mean = -1.5)} }))
train$feat02 <- sapply(train$condition, (function(i){ if (i == "mock") { rnorm(n = 1, mean = 0)} else if (i == "lethal") { rnorm(n = 1, mean = 1.5)} else { rnorm(n = 1, mean = -1.5)} }))
train$feat03 <- sapply(train$condition, (function(i){ if (i == "mock") { rnorm(n = 1, mean = 0)} else if (i == "lethal") { rnorm(n = 1, mean = 1.5)} else { rnorm(n = 1, mean = -1.5)} }))
head(train)
test <- data.frame(condition = sample(c("mock", "lethal", "resist"), replace = T, size = 1000))
test$feat01 <- sapply(test$condition, (function(i){ if (i == "mock") { rnorm(n = 1, mean = 0)} else if (i == "lethal") { rnorm(n = 1, mean = 1.5)} else { rnorm(n = 1, mean = -1.5)} }))
test$feat02 <- sapply(test$condition, (function(i){ if (i == "mock") { rnorm(n = 1, mean = 0)} else if (i == "lethal") { rnorm(n = 1, mean = 1.5)} else { rnorm(n = 1, mean = -1.5)} }))
test$feat03 <- sapply(test$condition, (function(i){ if (i == "mock") { rnorm(n = 1, mean = 0)} else if (i == "lethal") { rnorm(n = 1, mean = 1.5)} else { rnorm(n = 1, mean = -1.5)} }))
head(test)
model <- randomForest(formula = condition ~ ., data = train, ntree = 10, maxnodes= 100, norm.votes = F)
# predict test set, get probs instead of response
predictions <- as.data.frame(predict(model, test, type = "prob"))
predictions$predict <- names(predictions)[1:3][apply(predictions[,1:3], 1, which.max)]
predictions$observed <- test$condition
head(predictions)
# 1 ROC curve, mock vs non mock
roc.mock <- roc(ifelse(predictions$observed=="mock", "mock", "non-mock"), as.numeric(predictions$mock))
plot(roc.mock, col = "gray60")
# others
roc.lethal <- roc(ifelse(predictions$observed=="lethal", "lethal", "non-lethal"), as.numeric(predictions$mock))
roc.resist <- roc(ifelse(predictions$observed=="resist", "resist", "non-resist"), as.numeric(predictions$mock))
lines(roc.lethal, col = "blue")
lines(roc.resist, col = "red")
# 3
data(aSAH)
rocobj1 <- roc(aSAH$outcome, aSAH$s100b)
rocobj2 <- roc(aSAH$outcome, aSAH$wfns)
rocobj3 <- roc(aSAH$outcome, aSAH$ndka)
auc(rocobj1)
auc(rocobj2)
auc(rocobj3)
#绘制曲线
plot(rocobj1)
#其他参数美化(自定义网络线颜色等等)
plot(rocobj1, print.auc=TRUE, auc.polygon=TRUE, grid=c(0.1, 0.2), grid.col=c("green", "red"), max.auc.polygon=TRUE,
auc.polygon.col="skyblue", print.thres=TRUE)
g3 <- ggroc(list(s100b=rocobj, wfns=rocobj2, ndka= rocobj3))
|
6dc542378068fb99193c8ca836b8e05d07b40599
|
9c79f8d1e89ee5adf7b93115ccc741d3303404f1
|
/Scripts_Curso_R/Tarea_3_The_Office.R
|
7b889e40c537debf026b7d8c13e90fd9bea8b9c9
|
[] |
no_license
|
derek-corcoran-barrios/derek-corcoran-barrios.github.io
|
e1631feef111cfc9bc693df1853e02818435071a
|
ccb8f21c053fd41559082eb58ccb7f64cc7fcf86
|
refs/heads/master
| 2023-07-17T13:11:43.739914
| 2023-07-03T07:24:21
| 2023-07-03T07:24:21
| 107,616,762
| 33
| 33
| null | 2020-06-18T19:25:50
| 2017-10-20T01:23:44
|
HTML
|
UTF-8
|
R
| false
| false
| 1,477
|
r
|
Tarea_3_The_Office.R
|
library(tidyverse)
Episodes <- read_csv("https://raw.githubusercontent.com/derek-corcoran-barrios/The_office/master/The_Office_Episodes_per_Character.csv")
words <- read_csv("https://raw.githubusercontent.com/derek-corcoran-barrios/The_office/master/The_office_Words.csv")
stop_words <- read_csv("https://raw.githubusercontent.com/derek-corcoran-barrios/The_office/master/stop_words.csv")
Presonajes_por_temp <- words %>%
group_by(speaker, season) %>%
summarise(n = n()) %>%
ungroup()%>%
group_by(season) %>%
slice_max(order_by = n, n = 10) %>%
ungroup() %>%
arrange(season, desc(n))
Presonajes_por_temp <- Presonajes_por_temp %>% dplyr::select(speaker) %>% distinct()
Eps_Per_Season <- words %>%
dplyr::select(season, episode) %>%
distinct() %>%
group_by(season) %>%
summarise(Eps = n())
Palabras_por_Temp <-
Presonajes_por_temp %>%
left_join(words) %>%
group_by(speaker, season) %>%
summarise(n = n()) %>%
ungroup() %>%
pivot_wider(names_from = speaker, values_from = n, values_fill = 0) %>%
pivot_longer(cols = Andy:Toby, names_to = "speaker", values_to = "words") %>%
arrange(season) %>%
left_join(Eps_Per_Season) %>%
group_by(speaker) %>%
mutate(words = words/Eps, Lag = lag(words), delta = words-Lag) %>%
dplyr::filter(!is.na(delta))
G <-ggplot(Palabras_por_Temp, aes(x = season, y = delta)) +
geom_path(aes(color = speaker)) +
theme(legend.position = "bottom")
plotly::ggplotly(G)
|
233e9140d80f00ef29190404d3c1b54c03be8c21
|
c10c3e569ee4581269295f40d977ef1783202793
|
/R/imp_import.R
|
b1fa8f7e2929b542a2c10a69a49ee769bf61675d
|
[] |
no_license
|
zoltankovacs/EThu
|
6de4a14885d3e980c0bd9d75347fef54892dbffd
|
d72f65d2bed9003fceafa6c70be9c3fbcca2129d
|
refs/heads/master
| 2021-08-23T09:05:35.997209
| 2017-11-18T10:29:21
| 2017-11-18T10:29:21
| 111,195,054
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,028
|
r
|
imp_import.R
|
impD <- function(NrFile = 1) { # import the raw txt file(s), NrFile: which file to import
files <- list.files(paste0(getwd(), "/rawdata"), full.names = TRUE, pattern = "*.txt")
filesShort <- list.files(paste0(getwd(), "/rawdata"), full.names = FALSE, pattern = "*.txt")
print(paste0("The file: '", filesShort[NrFile], "' was imported"))
return(read.table(files[NrFile]))
} #Eof
getFolderName <- function() {
return(unlist(strsplit(getwd(),split=c("/")))[length(unlist(strsplit(getwd(),split=c("/"))))])
} #Eof
getNames <- function(rawData, nameChange = FALSE) {
smplName <- unlist(lapply(strsplit(rownames(rawData), "_"), function(x) paste(x[1:length(x)-1], collapse ="_")))
smplPos <- as.numeric(unlist(lapply(strsplit(rownames(rawData), "_"), function(x) x[length(x)])))
repeats <- as.numeric(apply(data.frame(rle(smplName)$lengths), 1, function(x) seq(1:x)))
print("The following samples are in the set:")
smplName <- as.factor(smplName)
smplNameLev <- levels(smplName)
print(smplNameLev)
if (nameChange) {
for (i in 1:length(smplNameLev)) {
cat(paste0("\nPlease provide the new name for ", smplNameLev[i], " (use '-' to not change): "))
a <- readLines(n = 1)
if (!a == "-") {
levels(smplName)[i] <- a
}
} #Efor
} #Eof
return(cbind(data.frame(smplName), data.frame(smplPos, repeats)))
} #Eof
#' @title add variables to ET data
#' @description Add variables
#' @details XXX Here the details of how the folder should be named, with
#' separators etc.
#' @param rawData ET raw data imported from the txt file
#' @param day optional argument useful if the experiment was performed in different days
#' @param nameChange optional argument if the name of the groups has to be renamed
#' @return a table with additional columns
#' @export
mDataStr <- function(rawData, day = 1, nameChange = FALSE){ # make data structure
data <- data.frame(getNames(rawData, nameChange = nameChange))
if (!is.na(day)) {
data <- cbind(data, day)
}
data$sensors <- rawData
return(data)
} #Eof
chngNames <- function(charVect, toWhat){
NewNames <- as.factor(charVect)
levels(NewNames) <- toWhat
return(NewNames)
} #Eof
addNumRep <- function(data){
sInd <- which(colnames(data) == "sensors")
numRep <- data[,-sInd]
for(i in 1:ncol(numRep)){
if (length(unique(numRep[,i])) > 9) {
numRep[,i] <- makeHeatColors(numRep[,i], startCol = "red", endCol = "blue")
} else {
numRep[,i] <- as.numeric(as.factor(numRep[,i]))
} #Eif
} #Efor
dataa <- data[, -sInd]
dataa$numRep <- data.frame(numRep)
dataa$sensors <- data[, sInd]
return(dataa)
} #Eof
# addVars <- function(rawData, day = 1) {
# colnames(rawData) <- paste("X", colnames(rawData), sep = "_")
# header <- getNames(rawData)
# return(cbind(header, day, rawData))
# } #Eof
importStructureData <- function(NrFile = 1, day = 1, nameChange = FALSE) {
rawData <- impD(NrFile)
dataStr <- mDataStr(rawData, nameChange = nameChange)
dataStrNr <- addNumRep(dataStr)
return(dataStrNr)
} #Eof
|
cfc30b756c0ee0d8df0f5b31c036e7812556656e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pterrace/examples/muscle_fiber_dat.Rd.R
|
9d54d4d28ec861cdbed8225418de5d9ecdf0bfd0
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 831
|
r
|
muscle_fiber_dat.Rd.R
|
library(pterrace)
### Name: muscle_fiber_dat
### Title: Point cloud sampled from the muscle tissue cross-sectional image
### Aliases: muscle_fiber_dat
### Keywords: datasets
### ** Examples
# load muscle fiber data
data(muscle_fiber_dat)
# input variables
Xlim <- c(-50,350)
Ylim <- c(-50,250)
lim <- cbind(Xlim, Ylim)
by <- 6
spseq <- seq(2,40,length.out = 9)
# compute persistence terrace
muscle_fiber_pt=computept(muscle_fiber_dat,sp=spseq,lim=lim,by=by)
## Not run:
##D # compute persistence terrace with parallel option
##D spseq <- seq(2,40,length.out = 30)
##D two_circle_density_pt <- computept(muscle_fiber_dat,sp=spseq,lim=lim,by=by,par=TRUE)
## End(Not run)
# draw terrace area plot
terracearea(muscle_fiber_pt,dimension=1,maxheight=20)
# draw persistence terrace
plotpt(muscle_fiber_pt,cmax=12,dimension=1)
|
81f44e718c051eed97c0d8a50b50584e3bc8baa6
|
50221ba3c8d502486f21e11946aca054a96e04f9
|
/run.py
|
297b5183b15d98cb6f808b25b5836687d8dbbf00
|
[] |
no_license
|
HilarioCuervo/first_commit
|
84307b2888504a06676e70c047c52c06ba94c950
|
393b49aaf07a74a6497c1c8e69aabe65a4a7bd11
|
refs/heads/master
| 2023-04-29T15:32:33.073800
| 2021-04-28T13:46:33
| 2021-04-28T13:46:33
| 349,608,329
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 30
|
py
|
run.py
|
a = 2
b = 3
c = a + b
print(c)
|
c2db22cd303f4b8e9ff75475aeb3fbdedf08c92b
|
6b40427744ca122897f25eda12504d4239870437
|
/run_analysis.R
|
7947798235b037187e30d1262317def78db8054e
|
[] |
no_license
|
henzi23/datacleanproject
|
614286315c9e11198e8fb5489ef88abb33b5e527
|
ae0562e106bd41b893771909dc507fdecd506501
|
refs/heads/master
| 2021-01-01T15:44:35.379542
| 2014-10-26T14:02:07
| 2014-10-26T14:02:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,511
|
r
|
run_analysis.R
|
## This is the R script to create a tidy dataset from wearble computing dataset found at
## https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
## The script assumes this dataset has been extracted into your working directory
## The lines below read the data into R
features<-read.table("UCI HAR Dataset/features.txt",header=FALSE)
xtrain<-read.table("UCI HAR Dataset/train/X_train.txt")
xtest<-read.table("UCI HAR Dataset/test/X_test.txt")
ytrain<-read.table("UCI HAR Dataset/train/y_train.txt")
subtrain<-read.table("UCI HAR Dataset/train/subject_train.txt")
subtest<-read.table("UCI HAR Dataset/test/subject_test.txt")
ytest<-read.table("UCI HAR Dataset/test/y_test.txt")
## These lines merges the data into one dataset called rawdata
test<-cbind(subtest,ytest)
rm("subtest","ytest")
train<-cbind(subtrain,ytrain)
rm("subtrain","ytrain")
dat2<-rbind(train,test)
rm("train","test")
dat<-rbind(xtrain,xtest)
rm("xtrain","xtest")
colnames(dat)<-features[,2]
rm("features")
colnames(dat2)<-c("Subject","Activity")
rawdata<-cbind(dat2,dat)
rm("dat2","dat")
## These lines extracts out only the measurement of Means and Standard Deviations
## and saves it as data frame selectdata
meanstd<-c(1:6,41:46,81:86,121:126,161:166,201:202,214:215,227:228,253:254,266:271,345:350,424:429,503:504,516:517,529:530,542:543,562:563)
selectdata<-rawdata[,meanstd]
rm("meanstd")
## These lines rename the Activity column in selectdata to readable names.
selectdata$Activity<-as.character(selectdata$Activity)
selectdata$Activity[selectdata$Activity=="1"] <- "WALKING"
selectdata$Activity[selectdata$Activity=="2"] <- "WALKING_UPSTAIRS"
selectdata$Activity[selectdata$Activity=="3"] <- "WALKING_DOWNSTAIRS"
selectdata$Activity[selectdata$Activity=="4"] <- "SITTING"
selectdata$Activity[selectdata$Activity=="5"] <- "STANDING"
selectdata$Activity[selectdata$Activity=="6"] <- "LAYING"
## These lines change the activity and subject columns of selectdata into factors
selectdata$Activity<-as.factor(selectdata$Activity)
selectdata$Subject<-as.factor(selectdata$Subject)
## These lines melt the dataset and dcasts it back into a tidy dataset called tidydata with
## the average of each variable by each activity and each subject.
library(reshape2)
datamelt<-melt(selectdata,id=c("Subject","Activity"))
tidydata<-dcast(datamelt,Subject + Activity~variable,mean)
rm("datamelt")
## This line writes the tidy set to a file called tidydata.txt
write.table(tidydata,"tidydata.txt",row.name=FALSE)
|
da3085dd6475c93d95049ac363cb6dd2478935e9
|
e9e5a348573f0099d8a6c03ab90ca93d7e6df9ca
|
/bDiscrim.R
|
93cbc4c04359beaf1f5bafa3d38583007a0c9603
|
[] |
no_license
|
nxskok/stad29-notes
|
a39f73502e18f92b12024a910a3e4f83b3929c15
|
a8a887e621b84fdadb974bf50c384ba65d2a8383
|
refs/heads/master
| 2021-06-08T11:21:53.709889
| 2021-04-26T23:02:15
| 2021-04-26T23:02:15
| 161,848,845
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,808
|
r
|
bDiscrim.R
|
### R code from vignette source '/home/ken/teaching/d29/notes/bDiscrim.Rnw'
###################################################
### code chunk number 1: berzani
###################################################
hilo=read.table("manova1.txt",header=T)
attach(hilo)
fno=as.integer(fertilizer)
plot(yield,weight,pch=fno,col=fno)
###################################################
### code chunk number 2: bDiscrim.Rnw:50-52
###################################################
library(MASS)
hilo.lda=lda(fertilizer~yield+weight)
###################################################
### code chunk number 3: bDiscrim.Rnw:67-68
###################################################
hilo.lda
names(hilo.lda)
hilo.lda$svd
###################################################
### code chunk number 4: workington
###################################################
plot(hilo.lda)
###################################################
### code chunk number 5: bDiscrim.Rnw:113-116
###################################################
hilo.pred=predict(hilo.lda)
hilo.pred$class
cbind(hilo,predicted=hilo.pred$class)
table(fertilizer,predicted=hilo.pred$class)
###################################################
### code chunk number 6: bDiscrim.Rnw:127-129
###################################################
pp=round(hilo.pred$posterior,4)
cbind(hilo,hilo.pred$x,pp)
###################################################
### code chunk number 7: bDiscrim.Rnw:142-146
###################################################
yy=seq(29,38,0.5)
ww=seq(10,14,0.5)
hilo.new=expand.grid(yield=yy,weight=ww)
hilo.pred=predict(hilo.lda,hilo.new)
###################################################
### code chunk number 8: santini
###################################################
plot(yield,weight,col=fno,pch=fno)
z=matrix(hilo.pred$x,length(yy),
length(ww),byrow=F)
contour(yy,ww,z,add=T)
###################################################
### code chunk number 9: bDiscrim.Rnw:173-174
###################################################
detach(hilo)
###################################################
### code chunk number 10: bDiscrim.Rnw:181-184
###################################################
peanuts=read.table("peanuts.txt",header=T)
head(peanuts)
attach(peanuts)
###################################################
### code chunk number 11: combos
###################################################
combo=paste(variety,location,sep="-")
combo=factor(combo)
combo
library(rgl)
plot3d(y,smk,w,col=as.numeric(combo),size = 100)
###################################################
### code chunk number 12: bDiscrim.Rnw:206-209
###################################################
library(MASS)
peanuts.lda=lda(combo~y+smk+w)
peanuts.lda$scaling
peanuts.lda$svd
###################################################
### code chunk number 13: bDiscrim.Rnw:224-225
###################################################
peanuts.lda$means
###################################################
### code chunk number 14: mancini
###################################################
plot(peanuts.lda)
names(peanuts.lda)
###################################################
### code chunk number 15: vierchowod
###################################################
plot(peanuts.lda,dimen=2)
###################################################
### code chunk number 16: bDiscrim.Rnw:269-271
###################################################
mycol=as.integer(combo)
mycol
###################################################
### code chunk number 17: delpiero
###################################################
plot(peanuts.lda,dimen=2,col=mycol)
###################################################
### code chunk number 18: bDiscrim.Rnw:301-303
###################################################
peanuts.pred=predict(peanuts.lda)
names(peanuts.pred)
library(rgl)
plot3d(peanuts.pred$x,col=as.numeric(combo),size=10)
table(combo,pred.combo=peanuts.pred$class)
###################################################
### code chunk number 19: bDiscrim.Rnw:314-316
###################################################
pp=round(peanuts.pred$posterior,2)
data.frame(combo,pred=peanuts.pred$class,pp)
###################################################
### code chunk number 20: bDiscrim.Rnw:328-331
###################################################
peanuts.lda$scaling
mm=cbind(y,smk,w,peanuts.pred$x)
head(mm)
###################################################
### code chunk number 21: bDiscrim.Rnw:360-363
###################################################
peanuts.cv=lda(combo~y+smk+w,CV=T)
pc=peanuts.cv$class
table(combo,pc)
###################################################
### code chunk number 22: graziani
###################################################
plot(peanuts.lda,dimen=2,col=mycol)
###################################################
### code chunk number 23: bDiscrim.Rnw:381-383
###################################################
pp=round(peanuts.cv$posterior,3)
data.frame(combo,pc,pp)
###################################################
### code chunk number 24: bDiscrim.Rnw:429-434
###################################################
active=read.table("profile.txt",header=T)
attach(active)
active.lda=lda(job~reading+dance+tv+ski)
active.lda$svd
active.lda$scaling
###################################################
### code chunk number 25: totti
###################################################
plot(active.lda)
###################################################
### code chunk number 26: bDiscrim.Rnw:468-471
###################################################
active.pred=predict(active.lda)
pj=active.pred$class
table(job,pj)
###################################################
### code chunk number 27: bDiscrim.Rnw:481-484
###################################################
pp=round(active.pred$posterior,3)
dd=data.frame(job,pj,pp)
dd[c(5,6,9,15),]
###################################################
### code chunk number 28: bDiscrim.Rnw:496-499
###################################################
active.cv=lda(job~reading+dance+tv+ski,CV=T)
pj=active.cv$class
table(job,pj)
###################################################
### code chunk number 29: bDiscrim.Rnw:510-513
###################################################
pp=round(active.cv$posterior,3)
rows=c(5,6,7,9,15)
data.frame(job,pj,pp)[rows,]
###################################################
### code chunk number 30: nesta
###################################################
plot(active.lda,abbrev=3,cex=1.5)
###################################################
### code chunk number 31: bDiscrim.Rnw:557-561
###################################################
crops=read.table("remote-sensing.txt",header=T)
str(crops)
head(crops)
x1
rm(x1)
rm(x2)
attach(crops)
detach(crops)
library(MASS)
crops.lda=lda(crop~x1+x2+x3+x4)
crops.lda$svd
###################################################
### code chunk number 32: bDiscrim.Rnw:574-576
###################################################
crops.lda$means
round(crops.lda$scaling,3)
###################################################
### code chunk number 33: bDiscrim.Rnw:586-587
###################################################
round(crops.lda$scaling,3)
###################################################
### code chunk number 34: bDiscrim.Rnw:599-600
###################################################
options(width=55)
###################################################
### code chunk number 35: bDiscrim.Rnw:605-607
###################################################
crop.i=as.integer(crop)
crop.i
###################################################
### code chunk number 36: piacentini
###################################################
plot(crops.lda,dimen=2,abbrev=2,col=crop.i,cex=1.5)
###################################################
### code chunk number 37: bDiscrim.Rnw:635-639
###################################################
# or dplyr
library(dplyr)
crops %>% filter(crop!="Clover") -> crops2
str(crops2)
crops2=crops[crop!="Clover",]
detach(crops)
attach(crops2)
crops2.lda=lda(crop~x1+x2+x3+x4)
###################################################
### code chunk number 38: bDiscrim.Rnw:653-656
###################################################
crops2.lda$means
crops2.lda$svd
crops2.lda$scaling
###################################################
### code chunk number 39: nedved
###################################################
plot(crops2.lda,dimen=2,col=as.numeric(crop),abbrev=2,cex=1)
###################################################
### code chunk number 40: bDiscrim.Rnw:674-677
###################################################
crops2.pred=predict(crops2.lda)
pc=crops2.pred$class
tab=table(Crop=crop,Pred=pc)
tab
row(tab)
col(tab)
is.diag=(row(tab)==col(tab))
is.diag
tab
tab[is.diag]
tab[!is.diag]
miscl=sum(tab[!is.diag])/sum(tab)
miscl
library(rgl)
crops2.pred$x
plot3d(crops2.pred$x,col=as.numeric(crop))
text3d(crops2.pred$x,text=abbreviate(crop,3),col=as.numeric(crop))
# is it really only LD1 that helps?
plot(crops2.lda,dimen=1) # might need the par(mar) thing for this
###################################################
### code chunk number 41: bDiscrim.Rnw:686-687
###################################################
options(width=60)
###################################################
### code chunk number 42: bDiscrim.Rnw:692-695
###################################################
post=round(crops2.pred$posterior,3)
rows=c(2,4,5,9,10,11,16,17,24,25)
data.frame(crop,pc,post)[rows,]
###################################################
### code chunk number 43: bDiscrim.Rnw:708-710
###################################################
crops2.manova=manova(cbind(x1,x2,x3,x4)~crop)
summary(crops2.manova)
|
bd2ce9c05d13508354bf2a1dbc2cdf07b9c8b9b2
|
208fe844817df6e34f869afb60cd69d2cc1e2ba8
|
/main.R
|
c123a1f5717aac54b19b18577e17e90b92c10b6d
|
[] |
no_license
|
jyjek/pasha_pdf
|
fefa9b94fc0797e1f5f9d413ff77ff2c65a63c3f
|
58e5613b3d0f882d2ca9a15726b60ade82cab6c2
|
refs/heads/master
| 2020-06-17T09:19:54.815502
| 2019-07-08T20:11:46
| 2019-07-08T20:11:46
| 195,878,323
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,652
|
r
|
main.R
|
library(tidyverse)
library(tabulizer)
library(textclean)
library(stringr)
f <- "data/documentView_retrieveStatementPdf07.pdf"
f1 <- "data/documentView_retrieveStatementPdf07 (2).pdf"
hawaii_telecom <- function(f){
local_df <- pdf_text(f) %>% # читаємо pdf
.[[1]] %>% str_split(., "\n", simplify = TRUE) %>% # розбиваємо строки
data.frame() %>%
t() %>%
as.data.frame() %>%
magrittr::set_colnames("data") %>%
filter(grepl("Payments|Service Period|Account Number|TOTAL AMOUNT DUE|TOTAL NEW CHARGES|Payment Due|Invoice Number",data)) # обираємо тік потрібну інфу
global_acc <- local_df %>% # шукаємо глобальний аккаунт
filter(grepl("Account",data)) %>% # по ключовому слову
mutate_all(as.character) %>%
mutate(data = replace_white(data)) %>% # удаляэмо зайві пробели
.[1,1] %>%
str_extract(., '(?<=Account Number:\\s)\\w+') # шукаємо слово наступне за Account Number:
inv_number <- local_df %>%
filter(grepl("Invoice Number",data)) %>%
mutate_all(as.character) %>%
mutate(data = replace_white(data)) %>%
.[1,1] %>%
str_extract(., '(?<=Invoice Number:\\s)\\w+')
inv_date <- local_df %>%
filter(grepl("Invoice Date",data)) %>%
mutate_all(as.character) %>%
mutate(data = replace_white(data)) %>%
.[1,1] %>%
str_split(.," ") %>% unlist() %>% .[length(.)] # шукаємо останній елемент масиву (там дата по файлу). краще переписати на регулярний вираз, но хз як
total <- local_df %>%
filter(grepl("TOTAL AMOUNT DUE",data)) %>%
mutate_all(as.character) %>%
mutate(data = replace_white(data)) %>%
.[1,1] %>% parse_number() # парсимо число
new <- local_df %>%
filter(grepl("TOTAL NEW CHARGES",data)) %>%
mutate_all(as.character) %>%
mutate(data = replace_white(data)) %>%
.[1,1] %>% parse_number()
period <- local_df %>%
filter(grepl("Service Period",data)) %>%
mutate_all(as.character) %>%
mutate(data = replace_white(data)) %>%
.[1,1] %>%
str_split(.," ") %>% unlist()
pay_due <- local_df %>%
filter(grepl("Payment Due",data)) %>%
mutate_all(as.character) %>%
mutate(data = replace_white(data)) %>%
.[1,1] %>%
gsub(" Payment Due: ",'',.) %>% as.Date(., "%B %d, %Y") # переводимо текст дати в дату
start <- period %>% .[length(.)-2] %>% lubridate::mdy(.) # переводимо дату в класс Date
end <- period %>% .[length(.)] %>% lubridate::mdy(.)
res <-data.frame( # фінальний дата фрейм
"Contract_Number" = "HawaiianTelcom_PublicStorage",
"Provider" = "Hawaiiantel_us_fix_man",
"Global_Account" = global_acc,
"Invoice_Number" = inv_number,
"Invoice_Date" = inv_date,
"Total" = new,
"Currency" = "USD",
"Total_Amount_Due" = total,
"Due_Date" = pay_due,
"Date_From" = start,
"Date_To" = end)
return(res)
}
path<-c(paste0(getwd(),"/data/")) # вказуємо шлях до папки, де наші файли
data <- data_frame(filename = list.files(path)) %>% #циклом проганяємо всі файли
# slice(8:9) %>%
mutate(file_contents = map(paste0("data/",filename),
~hawaii_telecom(.))) %>%
unnest() %>%
select(-filename)
|
b47154109872d33fb71ecf9d7d921edcebe57f31
|
5ea19ffbb17c4f943de4b9e3047f7a7fa8bfa605
|
/R_Code_and_Analysis/distance_decay/old/distance_decay.R
|
b6f6aed2e24c4cdba4da3ce35c90c647a50c3a4e
|
[] |
no_license
|
mawhal/Calvert_O-Connor_eelgrass
|
c0dfbc02a8ea8c217512e1389be709649dfdde85
|
fad8a7be27ce79a99ebb5744043318984c5cb42d
|
refs/heads/master
| 2023-02-13T17:54:02.839659
| 2020-12-19T00:41:46
| 2020-12-19T00:41:46
| 183,318,061
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 76,826
|
r
|
distance_decay.R
|
################Preliminary Analyses to explore the impact of distance between sites on grazer dissimilarity
##Started by Coreen April 2020
##This script makes distance matrices between sites and plots Bray-Curtis grazer dissimilarity between each pair of sites against dstance between each pair of sites for 2014-207
##For some reason i did each year totally separately. No idea why
## updated by Whalen on 15 May 2020. Keeping analysis separated by date for now
## updated by Bia on 07 July 2020 to add ASV level microbes, update for corrected microbial tables and add title
library(vegan)
library(tidyverse)
# library(distances)
library(fields)
library(cowplot)
# Geographic distance between site pairs ---------------------------------------
Hakaispatial <- read.csv("metadata/00_Hakai_UBC_metadata_MASTER - geolocation_site.csv")
Hakaispatial1 <- Hakaispatial %>%
select(site_name, lat, long)
##Change site names that don't match master grazer data
Hakaispatial1$site_name <- recode(Hakaispatial1$site_name,
"inner_choked" = "choked_inner", "sandspit" = "choked_sandspit")
coords <- Hakaispatial %>% select( long, lat )
spdf <- SpatialPointsDataFrame( coords, Hakaispatial1 )
## Make distance matrix
Hakai.distance <- rdist.earth( coords[,c('long','lat')] )
Hakai.geog <- as.data.frame(Hakai.distance)
# Hakai.distance<- distances(Hakaispatial1, id_variable = "site_name", dist_variables = NULL)
# Hakai.geog <-as.data.frame(as.matrix(Hakai.distance))
###Now make the distance matricies long
# Hakai.geog$Sites1 <- rownames(Hakai.geog)
colnames(Hakai.geog) <- Hakaispatial1$site
Hakai.geog$Sites1 <- Hakaispatial1$site
##Data frame of distances between all site pairs
Hakai.geographic.distance <- Hakai.geog %>%
gather(Sites2, Geog_Distance, - Sites1)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = T)
#### MACROEUKARYOTES
# pick a taxonomic level
level <- "finest"
# folder location
path <- "R_Code_and_Analysis/betadiversity/Bray-Curtis/"
##### 2016 grazers ------------------------------------------------------------
dist16 <- read_csv( paste0(path,"2016_macroeuk_braycurtis_",level,".csv") )
# just take the upper portion of the distance matrix so we don't repeat the numbers
dist16 <- as.data.frame(as.matrix(dist16))
dist16[lower.tri(dist16,diag = T)] <- NA
meta16 <- read_csv( paste0(path,"2016_macroeuk_metadata.csv") )
meta16$site <- unlist( lapply( strsplit( meta16$sample, split = "_"), function(z) paste(z[1:(length(z)-1)],collapse = "_") ) )
# script used to calculate Bray-Curtis have shorten column names to make the distance matrix more compact.
meta16$samp.short <- vegan::make.cepnames(meta16$sample)
###Now make the distance matrix long
dist16$Sites1 <- colnames(dist16)
dist16.collapse <- dist16 %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist16.sites <- left_join( dist16.collapse, select(meta16, site, sample, samp.short), by=c("Sites1" = "samp.short") )
dist16.sites <- left_join( dist16.sites, select(meta16, site, sample, samp.short), by=c("Sites2" = "samp.short") )
dist16.sites <- dist16.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist16.distance <- dist16.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2016.distance <- left_join(dist16.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
### plots
# Graph1 <- Hakai.2016.distance %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2016 Macroeukaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2016.distance %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2016 Macroeukaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC16 <- Hakai.2016.distance %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2016 macroeukaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
##### 2017 grazers ------------------------------------------------------------
dist17 <- read_csv( paste0(path,"2017_macroeuk_braycurtis_",level,".csv") )
# just take the upper portion of the distance matrix so we don't repeat the numbers
dist17 <- as.data.frame(as.matrix(dist17))
dist17[lower.tri(dist17,diag = T)] <- NA
meta17 <- read_csv( paste0(path,"2017_macroeuk_metadata.csv") )
meta17$site <- unlist( lapply( strsplit( meta17$sample, split = "_"), function(z) paste(z[1:(length(z)-1)],collapse = "_") ) )
# script used to calculate Bray-Curtis have shorten column names to make the distance matrix more compact.
meta17$samp.short <- vegan::make.cepnames(meta17$sample)
###Now make the distance matricies long
dist17$Sites1 <- colnames(dist17)
dist17.collapse <- dist17 %>%
gather(Sites2, Community_Distance, - Sites1)
# add sites from metadata
dist17.sites <- left_join( dist17.collapse, select(meta17, site, sample, samp.short), by=c("Sites1" = "samp.short") )
dist17.sites <- left_join( dist17.sites, select(meta17, site, sample, samp.short), by=c("Sites2" = "samp.short") )
dist17.sites <- dist17.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist17.distance <- dist17.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2017.distance <- left_join(dist17.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
### plots
# Graph1 <- Hakai.2017.distance %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2017 Macroeukaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2017.distance %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2017 Macroeukaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC17 <- Hakai.2017.distance %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2017 macroeukaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
##### 2015 grazers ------------------------------------------------------------
dist15 <- read_csv( paste0(path,"2015_macroeuk_braycurtis_",level,".csv") )
# just take the upper portion of the distance matrix so we don't repeat the numbers
dist15 <- as.data.frame(as.matrix(dist15))
dist15[lower.tri(dist15,diag = T)] <- NA
meta15 <- read_csv( paste0(path,"2015_macroeuk_metadata.csv") )
meta15$site <- unlist( lapply( strsplit( meta15$sample, split = "_"), function(z) paste(z[1:(length(z)-1)],collapse = "_") ) )
# script used to calculate Bray-Curtis have shorten column names to make the distance matrix more compact.
meta15$samp.short <- vegan::make.cepnames(meta15$sample)
###Now make the distance matrix long
dist15$Sites1 <- colnames(dist15)
dist15.collapse <- dist15 %>%
gather(Sites2, Community_Distance, - Sites1)
# add sites from metadata
dist15.sites <- left_join( dist15.collapse, select(meta15, site, sample, samp.short), by=c("Sites1" = "samp.short") )
dist15.sites <- left_join( dist15.sites, select(meta15, site, sample, samp.short), by=c("Sites2" = "samp.short") )
dist15.sites <- dist15.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist15.distance <- dist15.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2015.distance <- left_join(dist15.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
### plots
# Graph1 <- Hakai.2015.distance %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2015 Macroeukaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2015.distance %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2015 Macroeukaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC15 <- Hakai.2015.distance %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2015 macroeukaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
##### 2014 grazers ------------------------------------------------------------
dist14 <- read_csv( paste0(path,"2014_macroeuk_braycurtis_",level,".csv") )
# just take the upper portion of the distance matrix so we don't repeat the numbers
dist14 <- as.data.frame(as.matrix(dist14))
dist14[lower.tri(dist14,diag = T)] <- NA
meta14 <- read_csv( paste0(path,"2014_macroeuk_metadata.csv") )
meta14$site <- unlist( lapply( strsplit( meta14$sample, split = "_"), function(z) paste(z[1:(length(z)-1)],collapse = "_") ) )
# script used to calculate Bray-Curtis have shorten column names to make the distance matrix more compact.
meta14$samp.short <- vegan::make.cepnames(meta14$sample)
###Now make the distance matrix long
dist14$Sites1 <- colnames(dist14)
dist14.collapse <- dist14 %>%
gather(Sites2, Community_Distance, - Sites1)
# add sites from metadata
dist14.sites <- left_join( dist14.collapse, select(meta14, site, sample, samp.short), by=c("Sites1" = "samp.short") )
dist14.sites <- left_join( dist14.sites, select(meta14, site, sample, samp.short), by=c("Sites2" = "samp.short") )
dist14.sites <- dist14.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist14.distance <- dist14.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2014.distance <- left_join(dist14.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
### plots
# Graph1 <- Hakai.2014.distance %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2014 Macroeukaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2014.distance %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2014 Macroeukaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC14 <- Hakai.2014.distance %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2014 macroeukaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
windows(12,3)
macro <- cowplot::plot_grid( BC14, BC15, BC16, BC17, ncol=4)
ggsave( paste0("R_Code_and_Analysis/distance_decay/BCdecay_macroeuk_",level,".png"), width = 12, height = 3 )
#### MICROBES
#### Prokaryotes - 16S
## ASV LEVEL
# pick a year
year <- 2015
#load 16S microbial distance matrix ASV
bc_16S15_ASV <- read_csv(paste0("R_Code_and_Analysis/mantel/ASV_16S_",year,"_braycurtis.csv") )
# bc_16S15_ASV <- bc_16S15_ASV %>%
# dplyr::rename("sample" = "X1")
bc_16S15_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/ASV_16S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_16S15_ASV$Sites1 <- colnames(bc_16S15_ASV)
dist16S15.collapse <- bc_16S15_ASV %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist16S15.sites <- left_join( dist16S15.collapse, select(bc_16S15_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist16S15.sites <- left_join( dist16S15.sites, select(bc_16S15_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist16S15.sites <- dist16S15.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist16S15.distance <- dist16S15.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2015.distance.16S <- left_join(dist16S15.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2015.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2015 prokaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2015.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2015 prokaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC15 <- Hakai.2015.distance.16S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2015 prokaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
# pick a year
year <- 2016
#load 16S microbial distance matrix ASV
bc_16S16_ASV <- read_csv(paste0("R_Code_and_Analysis/mantel/ASV_16S_",year,"_braycurtis.csv") )
# bc_16S16_ASV <- bc_16S16_ASV %>%
# dplyr::rename("sample" = "X1")
bc_16S16_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/ASV_16S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_16S16_ASV$Sites1 <- colnames(bc_16S16_ASV)
dist16S16.collapse <- bc_16S16_ASV %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist16S16.sites <- left_join( dist16S16.collapse, select(bc_16S16_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist16S16.sites <- left_join( dist16S16.sites, select(bc_16S16_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist16S16.sites <- dist16S16.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist16S16.distance <- dist16S16.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2016.distance.16S <- left_join(dist16S16.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2016.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2016 prokaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2016.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2016 prokaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC16 <- Hakai.2016.distance.16S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2016 prokaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
# pick a year
year <- 2017
#load 16S microbial distance matrix ASV
bc_16S17_ASV <- read_csv(paste0("R_Code_and_Analysis/mantel/ASV_16S_",year,"_braycurtis.csv") )
# bc_16S17_ASV <- bc_16S17_ASV %>%
# dplyr::rename("sample" = "X1")
bc_16S17_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/ASV_16S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_16S17_ASV$Sites1 <- colnames(bc_16S17_ASV)
dist16S17.collapse <- bc_16S17_ASV %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist16S17.sites <- left_join( dist16S17.collapse, select(bc_16S17_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist16S17.sites <- left_join( dist16S17.sites, select(bc_16S17_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist16S17.sites <- dist16S17.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist16S17.distance <- dist16S17.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2017.distance.16S <- left_join(dist16S17.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2017.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2017 prokaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2017.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2017 prokaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC17 <- Hakai.2017.distance.16S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2017 prokaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
# pick a year
year <- 2018
#load 16S microbial distance matrix ASV
bc_16S18_ASV <- read_csv(paste0("R_Code_and_Analysis/mantel/ASV_16S_",year,"_braycurtis.csv") )
# bc_16S18_ASV <- bc_16S18_ASV %>%
# dplyr::rename("sample" = "X1")
bc_16S18_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/ASV_16S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_16S18_ASV$Sites1 <- colnames(bc_16S18_ASV)
dist16S18.collapse <- bc_16S18_ASV %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist16S18.sites <- left_join( dist16S18.collapse, select(bc_16S18_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist16S18.sites <- left_join( dist16S18.sites, select(bc_16S18_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist16S18.sites <- dist16S18.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist16S18.distance <- dist16S18.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2018.distance.16S <- left_join(dist16S18.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2018.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2018 prokaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2018.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2018 prokaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC18 <- Hakai.2018.distance.16S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2018 prokaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
windows(12,3)
title <- ggdraw() + draw_label("ASV level",fontface = 'bold', size = 14, x = 0.5, hjust = 0) # add margin on the left of the drawing canvas, so title is aligned with left edge of first plot
ASV_16S_raw <- cowplot::plot_grid( BC15, BC16, BC17, BC18, ncol=4)
ASV_16S <- plot_grid(title, plots,ncol = 1,rel_heights = c(0.05, 1)) # rel_heights values control vertical title margins
ggsave(paste0("R_Code_and_Analysis/distance_decay/BCdecay_prokaryote_ASV.png"), width = 12, height = 5 )
## GENUS LEVEL
# pick a year
year <- 2015
#load 16S microbial distance matrix GENUS
bc_16S15_genus <- read_csv(paste0("R_Code_and_Analysis/mantel/genus_16S_",year,"_braycurtis.csv") )
# bc_16S15_genus <- bc_16S15_genus %>%
# dplyr::rename("sample" = "X1")
bc_16S15_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/genus_16S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_16S15_genus$Sites1 <- colnames(bc_16S15_genus)
dist16S15.collapse <- bc_16S15_genus %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist16S15.sites <- left_join( dist16S15.collapse, select(bc_16S15_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist16S15.sites <- left_join( dist16S15.sites, select(bc_16S15_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist16S15.sites <- dist16S15.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist16S15.distance <- dist16S15.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2015.distance.16S <- left_join(dist16S15.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2015.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2015 prokaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2015.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2015 prokaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC15 <- Hakai.2015.distance.16S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2015 prokaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
# pick a year
year <- 2016
#load 16S microbial distance matrix GENUS
bc_16S16_genus <- read_csv(paste0("R_Code_and_Analysis/mantel/genus_16S_",year,"_braycurtis.csv") )
# bc_16S16_genus <- bc_16S16_genus %>%
# dplyr::rename("sample" = "X1")
bc_16S16_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/genus_16S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_16S16_genus$Sites1 <- colnames(bc_16S16_genus)
dist16S16.collapse <- bc_16S16_genus %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist16S16.sites <- left_join( dist16S16.collapse, select(bc_16S16_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist16S16.sites <- left_join( dist16S16.sites, select(bc_16S16_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist16S16.sites <- dist16S16.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist16S16.distance <- dist16S16.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2016.distance.16S <- left_join(dist16S16.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2016.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2016 prokaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2016.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2016 prokaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC16 <- Hakai.2016.distance.16S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2016 prokaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
# pick a year
year <- 2017
#load 16S microbial distance matrix GENUS
bc_16S17_genus <- read_csv(paste0("R_Code_and_Analysis/mantel/genus_16S_",year,"_braycurtis.csv") )
# bc_16S17_genus <- bc_16S17_genus %>%
# dplyr::rename("sample" = "X1")
bc_16S17_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/genus_16S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_16S17_genus$Sites1 <- colnames(bc_16S17_genus)
dist16S17.collapse <- bc_16S17_genus %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist16S17.sites <- left_join( dist16S17.collapse, select(bc_16S17_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist16S17.sites <- left_join( dist16S17.sites, select(bc_16S17_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist16S17.sites <- dist16S17.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist16S17.distance <- dist16S17.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2017.distance.16S <- left_join(dist16S17.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2017.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2017 prokaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2017.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2017 prokaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC17 <- Hakai.2017.distance.16S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2017 prokaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
# pick a year
year <- 2018
#load 16S microbial distance matrix GENUS
bc_16S18_genus <- read_csv(paste0("R_Code_and_Analysis/mantel/genus_16S_",year,"_braycurtis.csv") )
# bc_16S18_genus <- bc_16S18_genus %>%
# dplyr::rename("sample" = "X1")
bc_16S18_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/genus_16S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_16S18_genus$Sites1 <- colnames(bc_16S18_genus)
dist16S18.collapse <- bc_16S18_genus %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist16S18.sites <- left_join( dist16S18.collapse, select(bc_16S18_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist16S18.sites <- left_join( dist16S18.sites, select(bc_16S18_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist16S18.sites <- dist16S18.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist16S18.distance <- dist16S18.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2018.distance.16S <- left_join(dist16S18.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2018.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2018 prokaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2018.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2018 prokaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC18 <- Hakai.2018.distance.16S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2018 prokaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
title <- ggdraw() + draw_label("genus level",fontface = 'bold', size = 14, x = 0.5, hjust = 0) # add margin on the left of the drawing canvas, so title is aligned with left edge of first plot
genus_16S_raw <- cowplot::plot_grid( BC15, BC16, BC17, BC18, ncol=4)
genus_16S <- plot_grid(title, genus_16S_raw,ncol = 1,rel_heights = c(0.05, 1)) # rel_heights values control vertical title margins
ggsave(paste0("R_Code_and_Analysis/distance_decay/BCdecay_prokaryote_genus.png"), width = 12, height = 5 )
### FAMILY LEVEL
# pick a year
year <- 2015
#load 16S microbial distance matrix family
bc_16S15_family <- read_csv(paste0("R_Code_and_Analysis/mantel/family_16S_",year,"_braycurtis.csv") )
# bc_16S15_family <- bc_16S15_family %>%
# dplyr::rename("sample" = "X1")
bc_16S15_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/family_16S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_16S15_family$Sites1 <- colnames(bc_16S15_family)
dist16S15.collapse <- bc_16S15_family %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist16S15.sites <- left_join( dist16S15.collapse, select(bc_16S15_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist16S15.sites <- left_join( dist16S15.sites, select(bc_16S15_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist16S15.sites <- dist16S15.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist16S15.distance <- dist16S15.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2015.distance.16S <- left_join(dist16S15.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2015.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2015 prokaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2015.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2015 prokaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC15 <- Hakai.2015.distance.16S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2015 prokaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
# pick a year
year <- 2016
#load 16S microbial distance matrix family
bc_16S16_family <- read_csv(paste0("R_Code_and_Analysis/mantel/family_16S_",year,"_braycurtis.csv") )
# bc_16S16_family <- bc_16S16_family %>%
# dplyr::rename("sample" = "X1")
bc_16S16_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/family_16S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_16S16_family$Sites1 <- colnames(bc_16S16_family)
dist16S16.collapse <- bc_16S16_family %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist16S16.sites <- left_join( dist16S16.collapse, select(bc_16S16_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist16S16.sites <- left_join( dist16S16.sites, select(bc_16S16_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist16S16.sites <- dist16S16.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist16S16.distance <- dist16S16.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2016.distance.16S <- left_join(dist16S16.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2016.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2016 prokaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2016.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2016 prokaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC16 <- Hakai.2016.distance.16S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2016 prokaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
# pick a year
year <- 2017
#load 16S microbial distance matrix family
bc_16S17_family <- read_csv(paste0("R_Code_and_Analysis/mantel/family_16S_",year,"_braycurtis.csv") )
# bc_16S17_family <- bc_16S17_family %>%
# dplyr::rename("sample" = "X1")
bc_16S17_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/family_16S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_16S17_family$Sites1 <- colnames(bc_16S17_family)
dist16S17.collapse <- bc_16S17_family %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist16S17.sites <- left_join( dist16S17.collapse, select(bc_16S17_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist16S17.sites <- left_join( dist16S17.sites, select(bc_16S17_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist16S17.sites <- dist16S17.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist16S17.distance <- dist16S17.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2017.distance.16S <- left_join(dist16S17.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2017.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2017 prokaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2017.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2017 prokaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC17 <- Hakai.2017.distance.16S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2017 prokaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
# pick a year
year <- 2018
#load 16S microbial distance matrix family
bc_16S18_family <- read_csv(paste0("R_Code_and_Analysis/mantel/family_16S_",year,"_braycurtis.csv") )
# bc_16S18_family <- bc_16S18_family %>%
# dplyr::rename("sample" = "X1")
bc_16S18_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/family_16S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_16S18_family$Sites1 <- colnames(bc_16S18_family)
dist16S18.collapse <- bc_16S18_family %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist16S18.sites <- left_join( dist16S18.collapse, select(bc_16S18_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist16S18.sites <- left_join( dist16S18.sites, select(bc_16S18_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist16S18.sites <- dist16S18.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist16S18.distance <- dist16S18.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2018.distance.16S <- left_join(dist16S18.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2018.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2018 prokaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2018.distance.16S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2018 prokaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC18 <- Hakai.2018.distance.16S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2018 prokaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
title <- ggdraw() + draw_label("family level",fontface = 'bold', size = 14, x = 0.5, hjust = 0) # add margin on the left of the drawing canvas, so title is aligned with left edge of first plot
family_16S_raw <- cowplot::plot_grid( BC15, BC16, BC17, BC18, ncol=4)
family_16S <- plot_grid(title, family_16S_raw,ncol = 1,rel_heights = c(0.05, 1)) # rel_heights values control vertical title margins
ggsave(paste0("R_Code_and_Analysis/distance_decay/BCdecay_prokaryote_family.png"), width = 12, height = 5 )
### microeukaryotes - 18S
## ASV LEVEL
# pick a year
year <- 2015
#load 18S microbial distance matrix ASV
bc_18S15_ASV <- read_csv(paste0("R_Code_and_Analysis/mantel/ASV_18S_",year,"_braycurtis.csv") )
# bc_18S15_ASV <- bc_18S15_ASV %>%
# dplyr::rename("sample" = "X1")
bc_18S15_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/ASV_18S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_18S15_ASV$Sites1 <- colnames(bc_18S15_ASV)
dist18S15.collapse <- bc_18S15_ASV %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist18S15.sites <- left_join( dist18S15.collapse, select(bc_18S15_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist18S15.sites <- left_join( dist18S15.sites, select(bc_18S15_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist18S15.sites <- dist18S15.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist18S15.distance <- dist18S15.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2015.distance.18S <- left_join(dist18S15.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2015.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2015 microeukaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2015.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2015 microeukaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC15 <- Hakai.2015.distance.18S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2015 microeukaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
# pick a year
year <- 2016
#load 18S microbial distance matrix ASV
bc_18S16_ASV <- read_csv(paste0("R_Code_and_Analysis/mantel/ASV_18S_",year,"_braycurtis.csv") )
# bc_18S16_ASV <- bc_18S16_ASV %>%
# dplyr::rename("sample" = "X1")
bc_18S16_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/ASV_18S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_18S16_ASV$Sites1 <- colnames(bc_18S16_ASV)
dist18S16.collapse <- bc_18S16_ASV %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist18S16.sites <- left_join( dist18S16.collapse, select(bc_18S16_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist18S16.sites <- left_join( dist18S16.sites, select(bc_18S16_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist18S16.sites <- dist18S16.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist18S16.distance <- dist18S16.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2016.distance.18S <- left_join(dist18S16.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2016.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2016 microeukaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2016.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2016 microeukaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC16 <- Hakai.2016.distance.18S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2016 microeukaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
# pick a year
year <- 2017
#load 18S microbial distance matrix ASV
bc_18S17_ASV <- read_csv(paste0("R_Code_and_Analysis/mantel/ASV_18S_",year,"_braycurtis.csv") )
# bc_18S17_ASV <- bc_18S17_ASV %>%
# dplyr::rename("sample" = "X1")
bc_18S17_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/ASV_18S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_18S17_ASV$Sites1 <- colnames(bc_18S17_ASV)
dist18S17.collapse <- bc_18S17_ASV %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist18S17.sites <- left_join( dist18S17.collapse, select(bc_18S17_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist18S17.sites <- left_join( dist18S17.sites, select(bc_18S17_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist18S17.sites <- dist18S17.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist18S17.distance <- dist18S17.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2017.distance.18S <- left_join(dist18S17.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2017.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2017 microeukaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2017.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2017 microeukaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC17 <- Hakai.2017.distance.18S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2017 microeukaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
# pick a year
year <- 2018
#load 18S microbial distance matrix ASV
bc_18S18_ASV <- read_csv(paste0("R_Code_and_Analysis/mantel/ASV_18S_",year,"_braycurtis.csv") )
# bc_18S18_ASV <- bc_18S18_ASV %>%
# dplyr::rename("sample" = "X1")
bc_18S18_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/ASV_18S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_18S18_ASV$Sites1 <- colnames(bc_18S18_ASV)
dist18S18.collapse <- bc_18S18_ASV %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist18S18.sites <- left_join( dist18S18.collapse, select(bc_18S18_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist18S18.sites <- left_join( dist18S18.sites, select(bc_18S18_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist18S18.sites <- dist18S18.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist18S18.distance <- dist18S18.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2018.distance.18S <- left_join(dist18S18.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2018.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2018 microeukaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2018.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2018 microeukaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC18 <- Hakai.2018.distance.18S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2018 microeukaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
windows(12,3)
title <- ggdraw() + draw_label("ASV level",fontface = 'bold', size = 14, x = 0.5, hjust = 0) # add margin on the left of the drawing canvas, so title is aligned with left edge of first plot
ASV_18S_raw <- cowplot::plot_grid( BC15, BC16, BC17, BC18, ncol=4)
ASV_18S <- plot_grid(title, plots,ncol = 1,rel_heights = c(0.05, 1)) # rel_heights values control vertical title margins
ggsave(paste0("R_Code_and_Analysis/distance_decay/BCdecay_microeuk_ASV.png"), width = 12, height = 5 )
## GENUS LEVEL
# pick a year
year <- 2015
#load 18S microbial distance matrix GENUS
bc_18S15_genus <- read_csv(paste0("R_Code_and_Analysis/mantel/genus_18S_",year,"_braycurtis.csv") )
# bc_18S15_genus <- bc_18S15_genus %>%
# dplyr::rename("sample" = "X1")
bc_18S15_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/genus_18S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_18S15_genus$Sites1 <- colnames(bc_18S15_genus)
dist18S15.collapse <- bc_18S15_genus %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist18S15.sites <- left_join( dist18S15.collapse, select(bc_18S15_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist18S15.sites <- left_join( dist18S15.sites, select(bc_18S15_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist18S15.sites <- dist18S15.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist18S15.distance <- dist18S15.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2015.distance.18S <- left_join(dist18S15.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2015.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2015 microeukaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2015.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2015 microeukaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC15 <- Hakai.2015.distance.18S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2015 microeukaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
# pick a year
year <- 2016
#load 18S microbial distance matrix GENUS
bc_18S16_genus <- read_csv(paste0("R_Code_and_Analysis/mantel/genus_18S_",year,"_braycurtis.csv") )
# bc_18S16_genus <- bc_18S16_genus %>%
# dplyr::rename("sample" = "X1")
bc_18S16_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/genus_18S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_18S16_genus$Sites1 <- colnames(bc_18S16_genus)
dist18S16.collapse <- bc_18S16_genus %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist18S16.sites <- left_join( dist18S16.collapse, select(bc_18S16_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist18S16.sites <- left_join( dist18S16.sites, select(bc_18S16_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist18S16.sites <- dist18S16.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist18S16.distance <- dist18S16.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2016.distance.18S <- left_join(dist18S16.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2016.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2016 microeukaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2016.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2016 microeukaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC16 <- Hakai.2016.distance.18S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2016 microeukaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
# pick a year
year <- 2017
#load 18S microbial distance matrix GENUS
bc_18S17_genus <- read_csv(paste0("R_Code_and_Analysis/mantel/genus_18S_",year,"_braycurtis.csv") )
# bc_18S17_genus <- bc_18S17_genus %>%
# dplyr::rename("sample" = "X1")
bc_18S17_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/genus_18S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_18S17_genus$Sites1 <- colnames(bc_18S17_genus)
dist18S17.collapse <- bc_18S17_genus %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist18S17.sites <- left_join( dist18S17.collapse, select(bc_18S17_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist18S17.sites <- left_join( dist18S17.sites, select(bc_18S17_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist18S17.sites <- dist18S17.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist18S17.distance <- dist18S17.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2017.distance.18S <- left_join(dist18S17.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2017.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2017 microeukaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2017.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2017 microeukaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC17 <- Hakai.2017.distance.18S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2017 microeukaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
# pick a year
year <- 2018
#load 18S microbial distance matrix GENUS
bc_18S18_genus <- read_csv(paste0("R_Code_and_Analysis/mantel/genus_18S_",year,"_braycurtis.csv") )
# bc_18S18_genus <- bc_18S18_genus %>%
# dplyr::rename("sample" = "X1")
bc_18S18_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/genus_18S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_18S18_genus$Sites1 <- colnames(bc_18S18_genus)
dist18S18.collapse <- bc_18S18_genus %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist18S18.sites <- left_join( dist18S18.collapse, select(bc_18S18_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist18S18.sites <- left_join( dist18S18.sites, select(bc_18S18_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist18S18.sites <- dist18S18.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist18S18.distance <- dist18S18.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2018.distance.18S <- left_join(dist18S18.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2018.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2018 microeukaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2018.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2018 microeukaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC18 <- Hakai.2018.distance.18S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2018 microeukaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
windows(12,3)
title <- ggdraw() + draw_label("genus level",fontface = 'bold', size = 14, x = 0.5, hjust = 0) # add margin on the left of the drawing canvas, so title is aligned with left edge of first plot
genus_18S_raw <- cowplot::plot_grid( BC15, BC16, BC17, BC18, ncol=4)
genus_18S <- plot_grid(title, genus_18S_raw,ncol = 1,rel_heights = c(0.05, 1)) # rel_heights values control vertical title margins
ggsave(paste0("R_Code_and_Analysis/distance_decay/BCdecay_microeuk_genus.png"), width = 12, height = 5 )
### FAMILY LEVEL
# pick a year
year <- 2015
#load 18S microbial distance matrix family
bc_18S15_family <- read_csv(paste0("R_Code_and_Analysis/mantel/family_18S_",year,"_braycurtis.csv") )
# bc_18S15_family <- bc_18S15_family %>%
# dplyr::rename("sample" = "X1")
bc_18S15_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/family_18S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_18S15_family$Sites1 <- colnames(bc_18S15_family)
dist18S15.collapse <- bc_18S15_family %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist18S15.sites <- left_join( dist18S15.collapse, select(bc_18S15_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist18S15.sites <- left_join( dist18S15.sites, select(bc_18S15_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist18S15.sites <- dist18S15.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist18S15.distance <- dist18S15.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2015.distance.18S <- left_join(dist18S15.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2015.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2015 microeukaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2015.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2015 microeukaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC15 <- Hakai.2015.distance.18S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2015 microeukaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
# pick a year
year <- 2016
#load 18S microbial distance matrix family
bc_18S16_family <- read_csv(paste0("R_Code_and_Analysis/mantel/family_18S_",year,"_braycurtis.csv") )
# bc_18S16_family <- bc_18S16_family %>%
# dplyr::rename("sample" = "X1")
bc_18S16_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/family_18S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_18S16_family$Sites1 <- colnames(bc_18S16_family)
dist18S16.collapse <- bc_18S16_family %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist18S16.sites <- left_join( dist18S16.collapse, select(bc_18S16_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist18S16.sites <- left_join( dist18S16.sites, select(bc_18S16_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist18S16.sites <- dist18S16.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist18S16.distance <- dist18S16.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2016.distance.18S <- left_join(dist18S16.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2016.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2016 microeukaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2016.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2016 microeukaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC16 <- Hakai.2016.distance.18S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2016 microeukaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
# pick a year
year <- 2017
#load 18S microbial distance matrix family
bc_18S17_family <- read_csv(paste0("R_Code_and_Analysis/mantel/family_18S_",year,"_braycurtis.csv") )
# bc_18S17_family <- bc_18S17_family %>%
# dplyr::rename("sample" = "X1")
bc_18S17_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/family_18S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_18S17_family$Sites1 <- colnames(bc_18S17_family)
dist18S17.collapse <- bc_18S17_family %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist18S17.sites <- left_join( dist18S17.collapse, select(bc_18S17_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist18S17.sites <- left_join( dist18S17.sites, select(bc_18S17_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist18S17.sites <- dist18S17.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist18S17.distance <- dist18S17.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2017.distance.18S <- left_join(dist18S17.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2017.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2017 microeukaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2017.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2017 microeukaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC17 <- Hakai.2017.distance.18S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2017 microeukaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
# pick a year
year <- 2018
#load 18S microbial distance matrix family
bc_18S18_family <- read_csv(paste0("R_Code_and_Analysis/mantel/family_18S_",year,"_braycurtis.csv") )
# bc_18S18_family <- bc_18S18_family %>%
# dplyr::rename("sample" = "X1")
bc_18S18_meta <- read_csv(paste0("R_Code_and_Analysis/mantel/family_18S_",year,"_metadata.csv") )
###Now make the distance matrices long
bc_18S18_family$Sites1 <- colnames(bc_18S18_family)
dist18S18.collapse <- bc_18S18_family %>%
gather(Sites2, Community_Distance, -Sites1)
# add sites from metadata
dist18S18.sites <- left_join( dist18S18.collapse, select(bc_18S18_meta, site, site_quadrat_id, labels), by=c("Sites1" = "labels") )
dist18S18.sites <- left_join( dist18S18.sites, select(bc_18S18_meta, site, site_quadrat_id, labels), by=c("Sites2" = "labels") )
dist18S18.sites <- dist18S18.sites %>% select( Sites1=site.x, Sites2=site.y, Community_Distance )
dist18S18.distance <- dist18S18.sites %>%
# separate(Sites1, c("Site_1", "Sample_1"), sep = "-", remove = TRUE)%>%
# separate(Sites2, c("Site_2", "Sample_2"), sep = "-", remove = TRUE)%>%
unite("Site.Pair", Sites1, Sites2, sep = "-", remove = FALSE)
### Unite into one data frame
Hakai.2018.distance.18S <- left_join(dist18S18.distance,Hakai.geographic.distance, by = "Site.Pair") %>%
filter( !is.na(Community_Distance) )
# ### plots
# Graph1 <- Hakai.2018.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites1),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2018 microeukaryotes")+
# geom_smooth(method = lm)
# Graph2 <- Hakai.2018.distance.18S %>%
# drop_na(Geog_Distance) %>%
# drop_na(Community_Distance)%>%
# ggplot(aes(x = Geog_Distance, y = Community_Distance))+
# theme_classic()+
# geom_point(aes(colour = Sites2),alpha=0.25)+
# xlab("Geographic distance (km)")+
# ylab("B-C dissimilarity 2018 microeukaryotes")+
# geom_smooth(method = lm)
# plot_grid(Graph1, Graph2, nrow = 2)
BC18 <- Hakai.2018.distance.18S %>%
drop_na(Geog_Distance) %>%
drop_na(Community_Distance)%>%
ggplot(aes(x = Geog_Distance, y = Community_Distance))+
theme_classic()+
geom_point(alpha=0.25)+
xlab("Geographic distance (km)")+
ylab("B-C dissimilarity\n2018 microeukaryotes")+
ylim( c(0,1) ) + xlim( c(0,41) ) +
geom_smooth(method = lm)
windows(12,3)
title <- ggdraw() + draw_label("family level",fontface = 'bold', size = 14, x = 0.5, hjust = 0) # add margin on the left of the drawing canvas, so title is aligned with left edge of first plot
family_18S_raw <- cowplot::plot_grid( BC15, BC16, BC17, BC18, ncol=4)
family_18S <- plot_grid(title, family_18S_raw,ncol = 1,rel_heights = c(0.05, 1)) # rel_heights values control vertical title margins
ggsave(paste0("R_Code_and_Analysis/distance_decay/BCdecay_microeuk_family.png"), width = 12, height = 5 )
######################################
### saving all at the finest level ###
######################################
title_ASV <- ggdraw() + draw_label("Finest taxonomic level (ASV for microbes)",fontface = 'bold', size = 18, x = 0.35, hjust = 0) # add margin on the left of the drawing canvas, so title is aligned with left edge of first plot
finest_ASV <- cowplot::plot_grid (ASV_16S_raw, ASV_18S_raw,macro, ncol=1)
finest_ASV_title <- plot_grid(title_ASV, finest_ASV, ncol = 1,rel_heights = c(0.05, 1))
finest_ASV_title
ggsave(paste0("R_Code_and_Analysis/distance_decay/BCdecay_all_finest_ASV.png"), width = 12, height = 10 )
title_genus <- ggdraw() + draw_label("Finest taxonomic level (genus for microbes)",fontface = 'bold', size = 18, x = 0.35, hjust = 0) # add margin on the left of the drawing canvas, so title is aligned with left edge of first plot
finest_genus <- cowplot::plot_grid (ASV_16S_raw, ASV_18S_raw,macro, ncol=1)
finest_genus_title <- plot_grid(title_genus, finest_genus, ncol = 1,rel_heights = c(0.05, 1))
finest_genus_title
ggsave(paste0("R_Code_and_Analysis/distance_decay/BCdecay_all_finest_genus.png"), width = 12, height = 10 )
########################################
### saving all at the coarsest level ###
########################################
title_family <- ggdraw() + draw_label("Family level",fontface = 'bold', size = 18, x = 0.5, hjust = 0) # add margin on the left of the drawing canvas, so title is aligned with left edge of first plot
family <- cowplot::plot_grid (family_16S_raw, family_18S_raw,macro, ncol=1)
family_title <- plot_grid(title_family, family, ncol = 1,rel_heights = c(0.05, 1))
family_title
ggsave(paste0("R_Code_and_Analysis/distance_decay/BCdecay_all_family.png"), width = 12, height = 10 )
|
1434b2f21f3562bc6343cbfc9b3fc17cbfa4cd4d
|
073892c868e40d709be048603cee7c5ed549dd6d
|
/code/paper/figures/1/main.r
|
ea73181b298733cbdb48f7af7e72aee3c68c2316
|
[] |
no_license
|
Ran485/TFbenchmark
|
9d7d0a3372841080f53ec1beeca9a65a6f1c510a
|
1c7b9f11c5ba2aa7afdeda768e3c99e2bde18607
|
refs/heads/master
| 2021-10-28T10:12:49.078369
| 2019-04-23T10:58:32
| 2019-04-23T10:58:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,980
|
r
|
main.r
|
rm(list = ls())
home = '/Volumes/GoogleDrive/My Drive/projects/TFbenchmark/'
setwd(home)
source('code/lib/utils.r')
# Load network
network = read.csv(file = 'data/TF_target_sources/omnipath_scores/database_20180915.csv', stringsAsFactors = F)
names(network)[5:8] = c('curated', 'ChIPseq', 'TF binding motif', 'inferred GTEx')
nrow(unique(network[, 1:2]))
# retrieve TF
x = lapply(names(network)[5:8], function(evidence){
unique(network[ network[,evidence] , ]$TF)
})
names(x) = gsub('is_evidence_', '', names(network)[5:8])
sapply(x, length)
df = melt(x)
names(df) = c('TF', 'dataset')
df$value = 1
m = dcast(df, TF~dataset, fill = 0)
names(m)[1] = 'Identifier'
png('paper/figures/Figure1/Figure1b.png', res = 300, width = 1600, height = 1200)
upset(m, sets = colnames(m)[-1], main.bar.color = 'gray20', sets.bar.color = my_color_palette$EMBL[4],
empty.intersections = F, set_size.angles = 90, number.angles = 25,
order.by = "freq", point.size = 2.5, line.size = 0.5, mb.ratio = c(.6, .4), text.scale = c(1.5, 1.2, 1.5, 1.2, 1.5, 1),
mainbar.y.label = 'shared TFs', sets.x.label = 'total TFs') # 9000x3100
dev.off()
TFlist = list()
TFlist$only_inferred_GTEx = as.character(m[ which(rowSums(m[,-1]) == 1 & m[,'inferred GTEx'] == 1), "Identifier" ])
TFlist$only_chip = as.character(m[ which(rowSums(m[,-1]) == 1 & m[,'ChIPseq'] == 1), "Identifier" ])
TFlist$only_curated = as.character(m[ which(rowSums(m[,-1]) == 1 & m[,'curated'] == 1), "Identifier" ])
TFlist$only_tbfs = as.character(m[ which(rowSums(m[,-1]) == 1 & m[,'TF binding motif'] == 1), "Identifier" ])
TFlist$inferred = as.character(m[ which(m[,'inferred GTEx'] == 1), "Identifier" ])
TFlist$curated = as.character(m[ which(m[,'curated'] == 1), "Identifier" ])
TFlist$ChIPseq = as.character(m[ which(m[,'ChIPseq'] == 1), "Identifier" ])
TFlist$TFBS = as.character(m[ which(m[,'TF binding motif'] == 1), "Identifier" ])
TFlist$at_least_3_evidences = as.character(m[ which(rowSums(m[,-1]) > 2), "Identifier" ])
TFlist$at_least_2_evidences = as.character(m[ which(rowSums(m[,-1]) > 1), "Identifier" ])
TFlist$at_least_4_evidences = as.character(m[ which(rowSums(m[,-1]) > 3), "Identifier" ])
# retrieve TFTG
network$TFtarget = paste(network$TF, network$target)
x = lapply(names(network)[5:8], function(evidence){
unique(network[ network[,evidence] , ]$TFtarget)
})
names(x) = gsub('is_evidence_', '', names(network)[5:8])
sapply(x, length)
df = melt(x)
names(df) = c('TFtarget', 'dataset')
df$value = 1
m = dcast(df, TFtarget~dataset, fill = 0)
names(m)[1] = 'Identifier'
png('paper/figures/Figure1/Figure1d.png', res = 300, width = 2000, height = 1200)
upset(m, sets = colnames(m)[-c(1)], main.bar.color = 'gray20', sets.bar.color = my_color_palette$EMBL[2], scale.intersections = 'log2',
empty.intersections = F, set_size.angles = 90, number.angles = 21,
order.by = "freq", point.size = 2.5, line.size = 0.5, mb.ratio = c(.6, .4), text.scale = c(1.5, 1.2, 1.5, 1.2, 1.5, 1),
mainbar.y.label = 'shared TF-TG', sets.x.label = 'total TF-TG') # 9000x3100
dev.off()
# enrichment
load(file = 'data/TF_info/TFrole_genesets.rdata')
load(file = 'data/annotations/KEGGpathways_SLAPE_MSigDB.rdata')
TFrole_genesets$regulatory_effect$unknown = setdiff(unlist(TFrole_genesets$TF_class), unlist(TFrole_genesets$regulatory_effect))
TFrole_genesets$tissue_of_expression$intermediate = setdiff(unlist(TFrole_genesets$TF_class), unlist(TFrole_genesets$tissue_of_expression))
source('code/paper/figures/1/lib_enrichment.r')
TFrole_features = unique(sapply(strsplit(names(TFrole_genesets), '\\.'), head, 1))
# Figure 1C
re3 = lapply(TFrole_genesets, analyse_genesets, genes = TFlist$at_least_3_evidences)
re3
plot_enrichment(re3,feature = 'regulatory_effect') + ggtitle('TFs covered by > 2 evidences')
plot_enrichment(re3) + ggtitle('TFs covered by > 2 evidences')
ggsave(filename = 'paper/figures/Figure1/Figure1c.png', dpi=300, width = 7, height = 3.5)
# Figure S1
# A
re = list()
re$tissues = list()
TFrole_genesets$tissue_of_expression$expressed_in_most_tissues = TFrole_genesets$tissue_of_expression$`no_tissue-specific`
TFrole_genesets$tissue_of_expression$`no_tissue-specific` = NULL
re$tissues$at_least_3_evidences = analyse_genesets(geneset = TFrole_genesets$tissue_of_expression, genes = TFlist$at_least_3_evidences)
re$tissues$at_least_2_evidences = analyse_genesets(geneset = TFrole_genesets$tissue_of_expression, genes = TFlist$at_least_2_evidences)
re$tissues$at_least_4_evidences = analyse_genesets(geneset = TFrole_genesets$tissue_of_expression, genes = TFlist$at_least_4_evidences)
re$tissues$only_inferred_GTEx = analyse_genesets(geneset = TFrole_genesets$tissue_of_expression, genes = TFlist$only_inferred_GTEx)
re$tissues$ChIPseq = analyse_genesets(geneset = TFrole_genesets$tissue_of_expression, genes = TFlist$ChIPseq)
re$tissues$curated = analyse_genesets(geneset = TFrole_genesets$tissue_of_expression, genes = TFlist$curated)
re$tissues$TFBS = analyse_genesets(geneset = TFrole_genesets$tissue_of_expression, genes = TFlist$TFBS)
# B
re$kegg = list()
re$kegg$at_least_3_evidences = analyse_genesets(geneset = KEGG_PATH$HGNC_SYMBOL, genes = TFlist$at_least_3_evidences)
re$kegg$at_least_2_evidences = analyse_genesets(geneset = KEGG_PATH$HGNC_SYMBOL, genes = TFlist$at_least_2_evidences)
re$kegg$at_least_4_evidences = analyse_genesets(geneset = KEGG_PATH$HGNC_SYMBOL, genes = TFlist$at_least_4_evidences)
re$kegg$only_inferred_GTEx = analyse_genesets(geneset = KEGG_PATH$HGNC_SYMBOL, genes = TFlist$only_inferred_GTEx)
re$kegg$ChIPseq = analyse_genesets(geneset = KEGG_PATH$HGNC_SYMBOL, genes = TFlist$ChIPseq)
re$kegg$curated = analyse_genesets(geneset = KEGG_PATH$HGNC_SYMBOL, genes = TFlist$curated)
re$kegg$TFBS = analyse_genesets(geneset = KEGG_PATH$HGNC_SYMBOL, genes = TFlist$TFBS)
# Figure S1
# C
re$domains = list()
re$domains$at_least_3_evidences = lapply(TFrole_genesets, analyse_genesets, genes = TFlist$at_least_3_evidences)
re$domains$at_least_2_evidences = lapply(TFrole_genesets, analyse_genesets, genes = TFlist$at_least_2_evidences)
re$domains$at_least_4_evidences = lapply(TFrole_genesets, analyse_genesets, genes = TFlist$at_least_4_evidences)
re$domains$only_inferred_GTEx = lapply(TFrole_genesets, analyse_genesets, genes = TFlist$only_inferred_GTEx)
re$domains$ChIPseq = lapply(TFrole_genesets, analyse_genesets, genes = TFlist$ChIPseq)
re$domains$curated = lapply(TFrole_genesets, analyse_genesets, genes = TFlist$curated)
re$domains$TFBS = lapply(TFrole_genesets, analyse_genesets, genes = TFlist$TFBS)
plot_enrichment_grid(re$tissues)
ggsave(filename = 'paper/figures/supplementary/FigureS1a.png', width = 15, height = 2.3)
plot_enrichment_grid(re$kegg)
ggsave(filename = 'paper/figures/supplementary/FigureS1b.png', width = 15, height = 10)
plot_enrichment_grid(lapply(re$domains, function(x) x$TF_class ))
ggsave(filename = 'paper/figures/supplementary/FigureS1c.png', width = 15, height = 4)
|
3cc1b2a616fbc75c95827afd0e16074006f7f34a
|
bc42c76a961ef56d4d08a714c0eaabb4366a36a1
|
/R/NHFaux.R
|
ac4f10546dd66d319ec9f895496e5fb9fdd527a7
|
[] |
no_license
|
cran/IndTestPP
|
593ab1dc0ddb6addd008e80aed948d88058a240c
|
a628d5be9c314513541656d6e2ea28dd9bc91cee
|
refs/heads/master
| 2021-06-28T21:12:36.085070
| 2020-08-28T18:00:03
| 2020-08-28T18:00:03
| 64,703,962
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 248
|
r
|
NHFaux.R
|
NHFaux <-
function(r,L, lambdaD,posD,typeD, T)
{
posLW<-L[(L>=r)&(L<=(T-r))]
L1D<-(1-min(lambdaD)/lambdaD)
L1L0<-sapply(posLW, FUN = prodN2, r=r,L1D=L1D,posD=posD, typeD=typeD)
NHF<-sum(L1L0)/length(posLW)
return(NHF)
}
|
d1c15af287610caaf9c0d1c82ef69cff1bdd4e02
|
cc0254622f705d4049af62b843dcab0a3e393de1
|
/man/plotICC.Rd
|
fccedc3ff3642aa0cd92cb5ff99937c1dc0d9c1b
|
[] |
no_license
|
cran/eRm
|
88c4ff62cc445f4e8ad90a4fdffc00de4246716e
|
b54bd5930675dcfab50a10ec401b4eefa2990c91
|
refs/heads/master
| 2021-07-20T03:19:44.904031
| 2021-02-15T10:03:06
| 2021-02-15T10:03:06
| 17,695,687
| 4
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,241
|
rd
|
plotICC.Rd
|
\encoding{UTF-8}
\name{plotICC}
\alias{plotICC}
\alias{plotICC.Rm}
\alias{plotjointICC}
\alias{plotjointICC.dRm}
\title{ICC Plots}
\description{Plot functions for visualizing the item characteristic curves}
\usage{
\method{plotICC}{Rm}(object, item.subset = "all", empICC = NULL, empCI = NULL,
mplot = NULL, xlim = c(-4, 4), ylim = c(0, 1),
xlab = "Latent Dimension", ylab = "Probability to Solve", main=NULL,
col = NULL, lty = 1, legpos = "left", ask = TRUE, ...)
\method{plotjointICC}{dRm}(object, item.subset = "all", legend = TRUE,
xlim = c(-4, 4), ylim = c(0, 1), xlab = "Latent Dimension",
ylab = "Probability to Solve", lty = 1, legpos = "topleft",
main="ICC plot",col=NULL,...)
}
\arguments{
\item{object}{object of class \code{Rm} or \code{dRm}}
\item{item.subset}{Subset of items to be plotted. Either a numeric vector indicating
the column in \code{X} or a character vector indiciating the column name.
If \code{"all"} (default), all items are plotted.}
\item{empICC}{Plotting the empirical ICCs for objects of class \code{dRm}.
If \code{empICC=NULL}
(the default) the empirical ICC is not drawn. Otherwise, \code{empICC} must be
specified as a list where the first element must be one of
\code{"raw"}, \code{"loess"}, \code{"tukey"}, \code{"kernel"}. The other optional elements are
\code{smooth} (numeric), \code{type} (line type for empirical ICCs,
useful values are \code{"p"} (default), \code{"l"}, and \code{"b"},
see graphics parameter \code{type} in \code{\link{plot.default}}),
\code{pch}, \code{col}, and \code{lty}, plotting `character', colour and linetype
(see \code{\link{par}}). See details and examples below.
}
\item{empCI}{Plotting confidence intervals for the the empirical ICCs.
If \code{empCI=NULL} (the default) no confidence intervals are drawn.
Otherwise, by specifying \code{empCI} as a list gives `exact' confidence
intervals for each point of the empirical ICC.
The optional elements of this list are \code{gamma}, the confidence level,
\code{col}, colour, and \code{lty}, line type. If \code{empCI} is specified
as an empty list,
the default values \code{empCI=list(gamma=0.95,col="red",lty="dotted")}
will be used.
}
\item{mplot}{if \code{NULL} the default setting is in effect. For models of class \code{dRm} this
is \code{mplot = TRUE}, i.e.,
the ICCs for up to 4 items are plotted in one figure. For \code{Rm}
models the default is \code{FALSE} (each item in one figure) but may be set to \code{TRUE}.
}
\item{xlab}{Label of the x-axis.}
\item{ylab}{Label of the y-axis.}
\item{xlim}{Range of person parameters.}
\item{ylim}{Range for probability to solve.}
\item{legend}{If \code{TRUE}, legend is provided, otherwise the ICCs are labeled.}
\item{col}{If not specified or \code{NULL}, line colors are determined automatically.
Otherwise, a scalar or vector with appropriate color specifications may be supplied
(see \code{\link{par}}).}
\item{lty}{Line type.}
\item{main}{Title of the plot.}
\item{legpos}{Position of the legend with possible values \code{"bottomright"},
\code{"bottom"}, \code{"bottomleft"}, \code{"left"}, \code{"topleft"}, \code{"top"},
\code{"topright"}, \code{"right"} and \code{"center"}.
If \code{FALSE} no legend is displayed.}
\item{ask}{If \code{TRUE} (the default) and the \code{R} session is interactive the user is asked for input,
before a new figure is drawn. \code{FALSE} is only useful if automated figure export is
in effect, e.g., when using \code{\link{Sweave}}.}
\item{\ldots}{Additional plot parameters.}
}
\details{Empirical ICCs for objects of class \code{dRm} can be plotted using the option \code{empICC}, a
list where the first element specifies the type of calculation of the empirical values.
If \code{empICC=list("raw", other specifications)}
relative frequencies of the positive responses are calculated for each rawscore group and plotted
at the position of the corresponding person parameter. The other options use the default versions
of various smoothers: \code{"tukey"} (see \code{\link{smooth}}), \code{"loess"} (see \code{\link{loess}}),
and \code{"kernel"} (see \code{\link{ksmooth}}). For \code{"loess"} and \code{"kernel"} a further
element, \code{smooth},
may be specified to control the span (default is 0.75) or the bandwith (default is 0.5),
respectively. For example, the specification could be \code{empirical = list("loess", smooth=0.9)}
or \code{empirical = list("kernel",smooth=2)}.
Higher values result in smoother estimates of the empirical ICCs.
The optional confidence intervals are obtained by a procedure first given in
Clopper and Pearson (1934) based on the beta distribution (see \code{\link{binom.test}}).
}
\note{For most of the plot options see \code{\link{plot}} and \code{\link{par}}.}
%\value{}
%\references{}
\author{Patrick Mair, Reinhold Hatzinger}
%\note{}
\seealso{\code{\link{plotGOF}}}
\examples{
\dontrun{
# Rating scale model, ICC plot for all items
rsm.res <- RSM(rsmdat)
thresholds(rsm.res)
plotICC(rsm.res)
# now items 1 to 4 in one figure without legends
plotICC(rsm.res, item.subset = 1:4, mplot = TRUE, legpos = FALSE)
# Rasch model for items 1 to 8 from raschdat1
# empirical ICCs displaying relative frequencies (default settings)
rm8.res <- RM(raschdat1[,1:8])
plotICC(rm8.res, empICC=list("raw"))
# the same but using different plotting styles
plotICC(rm8.res, empICC=list("raw",type="b",col="blue",lty="dotted"))
# kernel-smoothed empirical ICCs using bandwidth = 2
plotICC(rm8.res, empICC = list("kernel",smooth=3))
# raw empirical ICCs with confidence intervals
# displaying only items 2,3,7,8
plotICC(rm8.res, item.subset=c(2,3,7,8), empICC=list("raw"), empCI=list())
# Joint ICC plot for items 2, 6, 8, and 15 for a Rasch model
res <- RM(raschdat1)
plotjointICC(res, item.subset = c(2,6,8,15), legpos = "left")
}
}
\keyword{models}
|
ab2d8bf1b17ee5021885c98fe2ad980a8c177298
|
c65dac3d7161db24db2c963b2448c20339c421be
|
/example.r
|
5a5359f18fa5bcd16207a6d79cc89936331fbe47
|
[] |
no_license
|
strug-lab/RVS
|
aa19bb5db48d11b144c6768b89716700022fe538
|
3265ff03e413ffc73d8bbfa8057813ea1e01640c
|
refs/heads/master
| 2016-09-06T03:14:59.984165
| 2014-10-28T04:27:17
| 2014-10-28T04:27:17
| 19,017,632
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 797
|
r
|
example.r
|
#
# Read vcf file
#
a = 'C:/chr11_113low_56high/1g115low_1g56exomehigh_filtered.hg19.chr11.vcf'
#
# Read vcf helper functions
#
source('likelihood_vcf.r')
filen = a
filecon = file(filen, open='r')
#
# Skip header of vcf file.
# n = may be changed until reach header that contains list of samples
tt2 = readLines(filecon, n=128)
#
# S contains list of 169 samples
# One should change accordingly
#
S = unlist(strsplit(tt2[128],'\t'))[10:178]
# Genotype likelihoods
A0M = NULL
A1M = NULL
A2M = NULL
# Cordinates
Cord = NULL
s = TRUE
l = 0
while (s){
l=l+1
F = try(read.table(filecon, nrows = 10000, sep='\t'), silent = TRUE)
if ( class(F) == 'try-error'){break}
if ( length(F) == 0){break}
# Contains all information.
AA = get_L_vcf(F)
}
|
a29945d8157550f7d64ae1505547a5222cae6ca9
|
93427de297e8ef8232ea2874b4f9fec5e0ecbdab
|
/R/haplo.bin.R
|
2a78e72d84a3fb210eac13a7999835dfbe8387e5
|
[] |
no_license
|
cran/SimHap
|
03f5402bdd68f3ca6b6f139db631b217c9d6cf2b
|
dd834d94c954662ee49c3c50799166557de1c72d
|
refs/heads/master
| 2020-05-18T07:48:55.312886
| 2012-04-14T00:00:00
| 2012-04-14T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 16,103
|
r
|
haplo.bin.R
|
`haplo.bin` <-
function(formula1, formula2, pheno, haplo, sim, effect="add", sub=NULL, adjust=FALSE) {
library(stats)
call <- match.call()
hapFreqs <- haplo$hapObject$final.freq
haplo <- haplo$hapData
if(!identical(as.character(unique(pheno$ID)), as.character(unique(haplo$ID)))) stop("Phenotype data and Haplotype data are not in the same order.")
formula1_nofactors <- formula1
formula1_terms <- attr(terms(formula1_nofactors), "term.labels")
if(any(regexpr(":", formula1_terms)!=-1)){
formula1_terms <- formula1_terms[-which(regexpr(":", formula1_terms)!=-1)]
}
if(any(regexpr("factor", formula1_terms)==1)) {
formula1_terms[which(regexpr("factor", formula1_terms)==1)] <- substr(formula1_terms[which(regexpr("factor", formula1_terms)==1)],8,nchar(formula1_terms[which(regexpr("factor", formula1_terms)==1)])-1)
}
#else formula1_terms <- attr(terms(formula1_nofactors), "term.labels")
freq.estnums <- freqTest(terms=formula1_terms, freqs=hapFreqs, n=length(unique(haplo[,1])), effect=effect)
# first column retains number of non-zero weights for individual
# second column holds the current iteration index for the next weight change
num_weights <- matrix(0, nrow=nrow(pheno), ncol=2)
num_indivs <- nrow(pheno)
# these are the two distributed occurrence matrices representing both corresponding haplotypes
# the dimensions are #iterations by #individuals
hap1s_result <- matrix(0, nrow=sim, ncol=num_indivs)
hap2s_result <- matrix(0, nrow=sim, ncol=num_indivs)
# *****************************
print("* Finding highest individual frequency ...")
# this section calculates the highest frequency of an indiv
lastID <- haplo[1,1]
count <- 1
biggest <- 1
for(i in 2:nrow(haplo)) {
tmpID <- haplo[i,1]
if(lastID==tmpID) {
# only increment count if the weight is not too small in the context
# of the number of iterations
#if((sim*(as.numeric(haplo[i,ncol(haplo)]))) > 1)
count <- count+1
}
else {
if(count>biggest)
biggest <- count
count <- 1
}
lastID <- tmpID
}
# at this point 'biggest' is the highest frequency of an individual
# which translates to the highest number of different haplotype combinations
print(" Done")
# *****************************
indiv_weights <- matrix(0, nrow=num_indivs, ncol=biggest)
indiv_hap1s <- matrix(0, nrow=num_indivs, ncol=biggest)
indiv_hap2s <- matrix(0, nrow=num_indivs, ncol=biggest)
lastID <- haplo[1,1]
indiv_hap1s[1,1] <- haplo[1,2]
indiv_weights[1,1] <- haplo[1,4]
indiv_hap2s[1,1] <- haplo[1,3]
count <- 1
indiv <- 1
# ****************************
print("* Populating individual haplotypes and posterior probabilities ...")
# This section makes a count of the number of occurences of each individual
for(i in 2:nrow(haplo)) {
tmpID <- haplo[i,1]
this_weight <- as.numeric(haplo[i,ncol(haplo)])
# one attempt at rounding too small weight*sim up to '1'
if((sim*this_weight) < 1)
this_weight <- 1 / sim
# the next element is the same ID as the last
if(lastID==tmpID) {
# only increment count if the weight is not too small in the context
# of the number of iterations
if((sim*this_weight) >= 1)
count <- count+1
}
# this element's ID differs from the last: store and restart count
else {
num_weights[indiv,1] <- count
indiv <- indiv + 1
count <- 1
}
if((sim*this_weight) >= 1) {
indiv_weights[indiv,count] <- this_weight
indiv_hap1s[indiv,count] <- haplo[i,2]
indiv_hap2s[indiv,count] <- haplo[i,3]
}
lastID <- tmpID
}
# take care of the last element
num_weights[indiv,1] <- count
print(" Done")
# ****************************
# ****************************
print("* Distributing individual occurrences across the simulations by posterior probability ...")
# main loop across all iterations
fit2.glm <- eval(substitute(glm(formula2, data=pheno, family=binomial, subset=subset), list(subset=sub)))
#log likelihood for smaller, nested model (without haplotypes)
lnLsmall <- logLik(fit2.glm)
for(i in 1:sim) {
# determine weight for each individual and populate hapXs vectors
for(j in 1:num_indivs) {
# save processing time ... ;)
current_numw <- num_weights[j,1]
# invalid pheno case, weights for an indiv do not add up to one
if(current_numw == 0) {
print("Error. Weights do not sum to 1, indiv ID:")
print(j)
print(i)
print("NOTE: Intermediate result returned")
return(weights.result)
}
weight <- as.numeric(indiv_weights[j,current_numw])
hap1s_result[i,j] <- indiv_hap1s[j,current_numw]
hap2s_result[i,j] <- indiv_hap2s[j,current_numw]
if(i>=(num_weights[j,2] + (sim*weight))) {
num_weights[j,1] <- current_numw - 1
num_weights[j,2] <- num_weights[j,2] + (sim*weight)
}
}
# report on progress
if(i==round(sim*0.01))
print(" 1%")
if(i==round(sim*0.05))
print(" 5%")
if(i==round(sim*0.25))
print(" 25%")
if(i==round(sim*0.5))
print(" 50%")
if(i==round(sim*0.75))
print(" 75%")
if(i==round(sim*0.90))
print(" 90%")
}
print(" Done")
# ****************************
# ****************************
print("* Generating a random pattern of individuals for each simulation ...")
# This section generates the random choice of individual
# create a vector with a linear progression 0 -> sim
choice <- 1:sim
sim_choice <- matrix(0, nrow=num_indivs, ncol=sim)
# generate a random choice for each individual
for(i in 1:num_indivs) {
sim_choice[i,] <- sample(choice, sim, replace=FALSE)
}
print(" Done")
# ****************************
# ****************************
print("* Constructing dataframes and performing generalised linear model for each simulation ...")
# This section constructs the dataframe for each iteration in preparation for glm
haplo_table <- table(c(haplo[,2],haplo[,3]))
num_haplos <- dim(haplo_table)
names_haplos <- names(haplo_table)
# prepare the reusable dataframe container ...
dataframe_extra <- matrix(0, nrow=num_indivs, ncol=num_haplos)
dataframe_extra <- as.data.frame(dataframe_extra)
for(i in 1:ncol(dataframe_extra)){
colnames(dataframe_extra)[i] <- paste(names_haplos[i])}
# perform loop through all iterations, constructing the dataframe and applying glm to each one
#------------------------------------
coef.dat <- NULL
p.dat <- NULL
stderror.dat <- NULL
out <- NULL
output <- NULL
pvals <- NULL
stderrors <- NULL
anovp.dat <- NULL
anovdf.dat <- NULL
anovresdf.dat <- NULL
anovfullp.dat <- NULL
anovfulldf.dat <- NULL
anovfullresdf.dat <- NULL
anov.out <- NULL
anov.out1 <- NULL
anov.out2 <- NULL
anovfull.out <- NULL
aic.dat <- NULL
lr.dat <- NULL
lrt.dat <- NULL
lnLbig.dat <- NULL
vcov.list <- list(NULL)
beta.list <- list(NULL)
# the dynamic point at which the algorithm reports progress
five_percent <- round(sim*0.05)
report <- five_percent
# main loop
for(i in 1:sim) {
dataframe_extra[,] <- 0
# Dominant model
if(effect=="dom") {
for(j in 1:num_indivs) {
simul <- sim_choice[j,i]
hap1_str <- hap1s_result[simul, j]
hap2_str <- hap2s_result[simul, j]
colnumber <- match(hap1_str, names_haplos)
dataframe_extra[j,colnumber] <- 1
if(hap1_str != hap2_str) {
colnumber <- match(hap2_str, names_haplos)
dataframe_extra[j,colnumber] <- 1
}
}
}
# Recessive model
if(effect=="rec") {
for(j in 1:num_indivs) {
simul <- sim_choice[j,i]
hap1_str <- hap1s_result[simul, j]
if(hap1_str == hap2s_result[simul, j]) {
colnumber <- match(hap1_str, names_haplos)
dataframe_extra[j,colnumber] <- 1
}
}
}
# Additive model
if(effect=="add") {
for(j in 1:num_indivs) {
simul <- sim_choice[j,i]
colnumber <- match(hap1s_result[simul, j], names_haplos)
dataframe_extra[j,colnumber] <- 1
colnumber <- match(hap2s_result[simul, j], names_haplos)
dataframe_extra[j,colnumber] <- dataframe_extra[j,colnumber] + 1
}
}
# concatenate the extra haplotype columns
dataframe <- as.data.frame(cbind(pheno, dataframe_extra))
#change dataframe (if necessary) to include only indivs with complete data for all terms in formula1
dataframe <- dataframe[complete.cases(dataframe[formula1_terms]),]
# perform the glm with the current dataframe
# glm
fit1.glm <- eval(substitute(glm(formula1, data=dataframe, family=binomial, subset=subset), list(subset=sub)))
fit.glm <- as.data.frame(summary(fit1.glm)$coefficients)
anov <- as.data.frame(anova(fit2.glm, fit1.glm, test="Chisq"))
anovfull <- as.data.frame(anova(fit1.glm, test="Chisq"))
#extract log-likelihood of model with haplotypes
lnLbig <- logLik(fit1.glm)
lnLbig.dat <- rbind(lnLbig.dat, lnLbig)
lr <- -2*(lnLsmall[1]-lnLbig[1])
lr.dat <- rbind(lr.dat, lr)
lr.df <- attr(lnLbig, "df")-attr(lnLsmall,"df")
lrt <- pchisq(lr,df=lr.df)
lrt.dat <- rbind(lrt.dat, lrt)
# extract variance-covariance matrix
vcov.list[[i]] <- vcov(fit1.glm)
beta.list[[i]] <- fit.glm$Estimate
aic <- AIC(fit1.glm)
aic.dat <- rbind(aic.dat, aic)
# add this row to anovfull.dat
anovfullp.row <- anovfull$"Pr(>Chi)"
anovfullp.dat <- rbind(anovfullp.dat, anovfullp.row)
anovfulldf.row <- anovfull$Df
anovfulldf.dat <- rbind(anovfulldf.dat, anovfulldf.row)
anovfullresdf.row <- anovfull$"Resid. Df"
anovfullresdf.dat <- rbind(anovfullresdf.dat, anovfullresdf.row)
# add this row to anovp.dat
anovp.row <- anov$"Pr(>Chi)"[2]
anovp.dat <- rbind(anovp.dat, anovp.row)
anovdf.row <- anov$Df
anovdf.dat <- rbind(anovdf.dat, anovdf.row)
anovresdf.row <- anov$"Resid. Df"
anovresdf.dat <- rbind(anovresdf.dat, anovresdf.row)
# add this row to coef.dat
coef.row <- fit.glm$Estimate
coef.dat <- rbind(coef.dat, coef.row)
stderror.row <- fit.glm$"Std. Error"
stderror.dat <- rbind(stderror.dat, stderror.row)
# extract some elements from the glm summary method and add row to p.dat
pvals <- t(fit.glm[ncol(fit.glm)])
p.dat <- rbind(p.dat, pvals)
# report on progress
if(i==report) {
percentage <- report * 100 / sim
print(paste(percentage, "%"))
report <- report + five_percent
}
}
# nullify the row names
row.names(anovdf.dat) <- NULL
row.names(anovp.dat) <- NULL
row.names(anovresdf.dat) <- NULL
row.names(anovfulldf.dat) <- NULL
row.names(anovfullp.dat) <- NULL
row.names(anovfullresdf.dat) <- NULL
row.names(p.dat) <- NULL
row.names(stderror.dat) <- NULL
row.names(coef.dat) <- NULL
row.names(aic.dat) <- NULL
row.names(lr.dat) <- NULL
row.names(lrt.dat) <- NULL
row.names(lnLbig.dat) <- NULL
anovdf.dat <- as.data.frame(anovdf.dat)
anovp.dat <- as.data.frame(anovp.dat)
anovresdf.dat <- as.data.frame(anovresdf.dat)
anovfulldf.dat <- as.data.frame(anovfulldf.dat)
anovfullp.dat <- as.data.frame(anovfullp.dat)
anovfullresdf.dat <- as.data.frame(anovfullresdf.dat)
aic.dat <- as.data.frame(aic.dat)
lr.dat <- as.data.frame(lr.dat)
lrt.dat <- as.data.frame(lrt.dat)
lnLbig.dat <- as.data.frame(lnLbig.dat)
p.dat <- as.data.frame(p.dat)
coef.dat <- as.data.frame(coef.dat)
stderror.dat <- as.data.frame(stderror.dat)
names(aic.dat) <- c("AIC")
allResults <- list(Coef=coef.dat, Std.Error=stderror.dat, P.Value=p.dat)
names(allResults$Coef) <- row.names(fit.glm)
names(allResults$Std.Error) <- row.names(fit.glm)
names(allResults$P.Value) <- row.names(fit.glm)
# allResults <- list(OR=OR.dat, OR.lower.95CI=ORlower.dat, OR.upper.95CI=ORupper.dat, P.Value=p.dat)
# names(allResults$OR) <- row.names(fit.glm)
# names(allResults$OR.lower.95CI) <- row.names(fit.glm)
# names(allResults$OR.upper.95CI) <- row.names(fit.glm)
# names(allResults$P.Value) <- row.names(fit.glm)
# sum.of.squares <- NULL
# for(i in 1:ncol(stderror.dat)){
# sum.of.squares <- cbind(sum.of.squares,sum(stderror.dat[,i]^2))
# }
# sum.of.squares <- as.data.frame(sum.of.squares)
# names(sum.of.squares) <- names(stderror.dat)
# se1 <- sqrt(sum.of.squares/nrow(stderror.dat))
# se2 <- sd(coef.dat)
# se.adj <- sqrt(se1^2 + se2^2)
# Combine inferences across the imputed datasets
out.mi <- UVI(coef.dat, stderror.dat^2,n=num_indivs, ADJ=adjust)
ind.haploeffect <- which(!is.element(names(fit1.glm$coefficients), names(fit2.glm$coefficients)))
p.full <- length(fit1.glm$coefficients)
L.contrast <- NULL
for(j in 1:length(ind.haploeffect)){
L.contrast <- rbind(L.contrast, c(rep(0, ind.haploeffect[j]-1),1,rep(0, p.full-ind.haploeffect[j])) )
}
out.mi.haps <- MVI(beta.list, vcov.list, L=L.contrast)
out.mi.haps <- out.mi.haps
out.coef <- as.numeric(formatC(out.mi$coefficients))
out.pval <- as.numeric(formatC(out.mi$p.value))
out.se <- as.numeric(formatC(out.mi$se))
#if(!is.null(predicted.dat)) predicted.vals <- formatC(mean(predicted.dat))
summary.coefs <- data.frame(cbind(out.coef, out.se, out.pval), row.names=row.names(fit.glm))
names(summary.coefs) <- c("Coefficient", "Std.error", "P.Value")
WALD.out <- cbind(round(out.mi.haps[4]), round(out.mi.haps[5],2), round(out.mi.haps[1], digits=4), round(out.mi.haps[3], digits=4))
WALD.out <- as.data.frame(WALD.out)
names(WALD.out) <- c("Num DF","Den DF","F.Stat", "P.Value")
row.names(WALD.out) <- ""
anovfull.out <- cbind(colMeans(anovfullresdf.dat), colMeans(anovfulldf.dat), formatC(colMeans(anovfullp.dat)))
row.names(anovfull.out) <- row.names(anovfull)
anovfull.out[1,2] <- ""
anovfull.out[1,3] <- ""
anovfull.out <- as.data.frame(anovfull.out)
names(anovfull.out) <- c("Residual DF", "DF", "P-Value")
likelihood.out <- paste("'log Lik'", round(colMeans(lnLbig.dat), digits=3), paste("(df=", attr(lnLbig, "df"), ")", sep=""))
anov.out1 <- cbind(colMeans(anovresdf.dat[1]), "", "")
row.names(anov.out1) <- c("1")
anov.out2 <- cbind(colMeans(anovresdf.dat[2]), colMeans(anovdf.dat[2]), signif(colMeans(anovp.dat), digits=3))
row.names(anov.out2) <- c("2")
print(" Done")
# ****************************
# Arrange the output data
for(i in 1:ncol(coef.dat)){
out$coef.CI[i] <- paste("(",formatC(quantile(coef.dat[,i], probs=c(0.025), na.rm=TRUE)),",",formatC(quantile(coef.dat[,i], probs=c(0.975), na.rm=TRUE)),")", sep="")
out$pval.CI[i] <- paste("(",formatC(quantile(p.dat[,i], probs=c(0.025), na.rm=TRUE)),",",formatC(quantile(p.dat[,i], probs=c(0.975), na.rm=TRUE)),")", sep="")
out$se.CI[i] <- paste("(",formatC(quantile(stderror.dat[,i], probs=c(0.025), na.rm=TRUE)),",",formatC(quantile(stderror.dat[,i], probs=c(0.975), na.rm=TRUE)),")", sep="")
}
out <- data.frame(cbind(out.coef, out$coef.CI, out.se, out$se.CI, out.pval, out$pval.CI))
names(out) <- c("Coef", "Coef.quantiles", "Std.Error", "Std.Error.quantiles", "P.Val", "P.Val.quantiles")
row.names(out) <- row.names(fit.glm)
anov.out <- rbind(anov.out1, anov.out2)
anov.out <- as.data.frame(anov.out)
names(anov.out) <- c("Residual DF", "DF", "P.Value")
if(effect=="add") Effect <- ("ADDITIVE")
if(effect=="dom") Effect <- ("DOMINANT")
if(effect=="rec") Effect <- ("RECESSIVE")
"%w/o%" <- function(x,y) x[!x %in% y]
invars <- names(fit1.glm$coef)
check <- invars %w/o% row.names(out)
if(length(check) != 0) cat(c(check, "removed due to singularities"), "\n")
out.list <- list(formula1=formula1, formula2=formula2, results=out,empiricalResults=allResults, summary.coefs=summary.coefs,ANOD=anovfull.out,logLik=likelihood.out, WALD=WALD.out, aic=colMeans(aic.dat), aicEmpirical=aic.dat, effect=Effect)
class(out.list) <- "hapBin"
return(out.list)
}
|
9293fd7b4fb5637f12631625775cd56bdef1ede8
|
f45dd2f2c39445c70f89874025b5fc9eb0e42929
|
/demo/SimSeq.R
|
9a8b9cca36cf8f5b49670b0b4db877ebce82346d
|
[] |
no_license
|
sbenidt/SimSeq
|
84858e529303e96491648d015e8449b1c978db45
|
2ae1518ab759da3a7554f867f31d95d3a9f90460
|
refs/heads/master
| 2021-01-20T12:04:45.988720
| 2015-03-07T06:23:14
| 2015-03-07T06:23:14
| 12,185,093
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,704
|
r
|
SimSeq.R
|
data(kidney)
counts <- kidney$counts # Matrix of read counts from KIRC dataset
replic <- kidney$replic # Replic vector indicating paired columns
treatment <- kidney$treatment # Treatment vector indicating Non-Tumor or Tumor columns
nf <- apply(counts, 2, quantile, 0.75)
require(fdrtool)
### Example 1: Simulate Matrix with 1000 DE genes and 4000 EE genes
data.sim <- SimData(counts = counts, replic = replic, treatment = treatment,
sort.method = "paired", k.ind = 5, n.genes = 5000, n.diff = 1000,
norm.factors = nf)
### Example 2: Calculate weights vector beforehand to save run time in
### repeated simulations
sort.list <- SortData(counts = counts, treatment = treatment, replic = replic,
sort.method = "paired", norm.factors = nf)
counts <- sort.list$counts
replic <- sort.list$replic
treatment <- sort.list$treatment
nf <- sort.list$norm.factors
probs <- CalcPvalWilcox(counts, treatment, sort.method = "paired",
sorted = TRUE, norm.factors = nf, exact = FALSE)
weights <- 1 - fdrtool(probs, statistic = "pvalue", plot = FALSE, verbose = FALSE)$lfdr
data.sim <- SimData(counts = counts, replic = replic, treatment = treatment,
sort.method = "paired", k.ind = 5, n.genes = 5000, n.diff = 1000,
weights = weights, norm.factors = nf)
### Example 3: Specify which genes you want to use in the simulation
# Randomly sample genes or feed in the exact genes you wish to use
genes.diff <- sample(1:nrow(counts), size = 1000, prob = weights)
genes <- c(sample(1:nrow(counts)[-genes.diff], 4000), genes.diff)
data.sim <- SimData(counts = counts, replic = replic, treatment = treatment,
sort.method = "paired", k.ind = 5, genes.select = genes,
genes.diff = genes.diff, weights = weights, norm.factors = nf)
### Example 4: Simulate matrix with DE genes having log base 2 fold change greater than 1
# add one to counts matrix to avoid infinities when taking logs
tumor.mean <- rowMeans(log2((counts[, treatment == "Tumor"] + 1) %*% diag(1/nf[treatment == "Tumor"])))
nontumor.mean <- rowMeans(log2((counts[, treatment == "Non-Tumor"] + 1) %*% diag(1/nf[treatment == "Non-Tumor"])))
lfc <- tumor.mean - nontumor.mean
weights.zero <- abs(lfc) < 1
weights[weights.zero] <- 0
data.sim <- SimData(counts = counts, replic = replic, treatment = treatment,
sort.method = "paired", k.ind = 5, n.genes = 5000, n.diff = 1000,
weights = weights, norm.factors = nf)
### Example 5: Simulate three treatment groups:
### 3 Different types of Differential Expression Allowed
### First Group Diff, Second and Third group Equal
### Second Group Diff, First and Third group Equal
### Third Group Diff, First and Second group Equal
k <- 5 # Sample Size in Each treatment group
### Sample DE genes beforehand
N <- nrow(counts)
genes.de <- sample(1:N, size = 1000, prob = weights) # Sample all DE genes
DE1 <- genes.de[1:333] # Sample DE genes with first trt diff
DE2 <- genes.de[334:666] # Sample DE genes with sec trt diff
DE3 <- genes.de[667:1000] # Sample DE genes with third trt diff
EE <- sample( (1:N)[-genes.de], size = 4000) #Sample EE genes
genes.tot <- c(EE, genes.de)
genes.de1 <- union(DE2, EE) #Assign DE genes for first sim
genes.de2 <- union(DE2, DE3) #Assign DE genes for second sim
data.sim1 <- SimData(counts = counts, replic = replic, treatment = treatment,
sort.method = "paired", k.ind = k, genes.select = genes.tot,
genes.diff = genes.de1, weights = weights, norm.factors = nf)
#remove pairs of columns used in first simulation
cols.rm <- c(data.sim1$col[1:(2*k)], data.sim1$col[1:(2*k)] + 1)
counts.new <- counts[, -cols.rm]
nf.new <- nf[-cols.rm]
replic.new <- replic[-cols.rm]
treatment.new <- treatment[-cols.rm]
### Set switch.trt = TRUE for second sim
data.sim2 <- SimData(counts = counts.new, replic = replic.new, treatment = treatment.new,
sort.method = "paired", k.ind = k, genes.select = genes.tot,
genes.diff = genes.de2, weights = weights, norm.factors = nf.new,
switch.trt = TRUE)
### Remove first k.ind entries from first sim and combine two count matrices
counts.sim <- cbind(data.sim1$counts[, -(1:k)], data.sim2$counts)
### treatment group levels for simulated matrix
trt.grp <- rep(NA, 5000)
trt.grp[is.element(data.sim1$genes.subset, DE1)] <- "DE_First_Trt"
trt.grp[is.element(data.sim1$genes.subset, DE2)] <- "DE_Second_Trt"
trt.grp[is.element(data.sim1$genes.subset, DE3)] <- "DE_Third_Trt"
trt.grp[is.element(data.sim1$genes.subset, EE)] <- "EE"
|
d62e4c10388fe938321bba5ba287f8afa4f327fb
|
c7b4ef7427031fd72755c1aedbcb41a2a8b4abd7
|
/K-means US_Arrests.R
|
a2c35a4d55a45b6c4350821ac82768dd37f451bd
|
[] |
no_license
|
edkambeu/K-Means-Clustering
|
0b2d8edf19853c89722131cadb49cbc8b0a7f1e2
|
19eca4e5cb6e41c994fc01ff1771d5a4d664cb68
|
refs/heads/master
| 2023-08-19T03:19:05.178421
| 2021-10-02T20:40:43
| 2021-10-02T20:40:43
| 412,883,407
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,775
|
r
|
K-means US_Arrests.R
|
#Importing data
data("USArrests")
str(USArrests)
#Looking at the data
head(USArrests)
tail(USArrests)
str(USArrests)
#Is there any missing value in the dataset?
any(is.na(USArrests))
#Any errors in the data set
summary(USArrests)
#Scaling the data
USArrests_scaled <- scale(USArrests)
head(USArrests_scaled)
#K-means clustering-k=2
set.seed(2)
USArrests_scaled_kmeans <- kmeans(USArrests_scaled, centers = 2, nstart = 25)
#Examining the return values of the kmeans algorithm
USArrests_scaled_kmeans$tot.withinss
#Creating a function that calculates number of total within sum of squares for a particular value of k
wss <- function(k){
USArrests_scaled_kmeans <- kmeans(USArrests_scaled, centers = k, nstart = 25)
return(USArrests_scaled_kmeans$tot.withinss)
}
# Calculating total withing sum_sum of squares for up to 10 clusters
k_wss <- 1:10
wss_10 <- sapply(k_wss, wss)
wss_10
#Preparing data for an elbow plot
elbow_plot_data <- as.data.frame(cbind(k_wss,wss_10))
class(elbow_plot_data)
#Plotting an elbow plot using ggplot2
library(ggplot2)
ggplot(data = elbow_plot_data, aes(x = k_wss,y = wss_10)) +
geom_point()+
geom_line()+
scale_x_continuous(breaks = sequence)
labs(title = "Elbow plot", x = "No of clusters", y= "Total within sum of squares")
#k-means clustering using the optimal number of clusters k=2
set.seed(3)
USArrests_scaled_kmeans2 = kmeans(USArrests_scaled, centers = 2, nstart = 25)
#Accessing the clusters
USArrests_scaled_kmeans2$cluster
#Adding clusters to the original data
US_Arrests_with_clusters = cbind(USArrests, clusters = USArrests_scaled_kmeans2$cluster)
head(US_Arrests_with_clusters)
#Visualizing the clusters using a cluster plot
library(factoextra)
fviz_cluster(USArrests_scaled_kmeans2, data = USArrests)
|
ac1d3a69dd7148b49cb5d33f572219470a2dc1c7
|
6464efbccd76256c3fb97fa4e50efb5d480b7c8c
|
/paws/man/iotanalytics_describe_logging_options.Rd
|
f72b998dc94643dad451cf5fc0a8946cdc4f65e0
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
johnnytommy/paws
|
019b410ad8d4218199eb7349eb1844864bd45119
|
a371a5f2207b534cf60735e693c809bd33ce3ccf
|
refs/heads/master
| 2020-09-14T23:09:23.848860
| 2020-04-06T21:49:17
| 2020-04-06T21:49:17
| 223,286,996
| 1
| 0
|
NOASSERTION
| 2019-11-22T00:29:10
| 2019-11-21T23:56:19
| null |
UTF-8
|
R
| false
| true
| 509
|
rd
|
iotanalytics_describe_logging_options.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iotanalytics_operations.R
\name{iotanalytics_describe_logging_options}
\alias{iotanalytics_describe_logging_options}
\title{Retrieves the current settings of the AWS IoT Analytics logging options}
\usage{
iotanalytics_describe_logging_options()
}
\description{
Retrieves the current settings of the AWS IoT Analytics logging options.
}
\section{Request syntax}{
\preformatted{svc$describe_logging_options()
}
}
\keyword{internal}
|
0a9cdbeb7f104f7bc355ce8071c80847b8c7a232
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/9071_0/rinput.R
|
641b9eb72b598b16558f1fc5a19bed7005f86fd6
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("9071_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9071_0_unrooted.txt")
|
6c82e858e807a8431745eaa1756815ab02c0b3d5
|
4ebfa1f80041836d40c9b23bc0c44cd9a40a48e5
|
/Rcode.R
|
00724ecd2cf6b8adc81e00fe3e030bc113e0cffb
|
[] |
no_license
|
ar3781/MayInstitute-Example
|
f78a982e28c2633aebf2ee9dc0552b1187b58a41
|
365718a14994df54b9de7734090bfb8299786867
|
refs/heads/master
| 2020-05-18T14:32:40.306209
| 2019-05-01T20:19:53
| 2019-05-01T20:19:53
| 184,474,680
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 60
|
r
|
Rcode.R
|
data = iris
plot(x=iris$Sepal-Length), y=iris$Septal.Width)
|
7d936cdcbe9d8de4411576452d860d4635db3513
|
abad318b342c41d0f73f9d5491c2f05fce216430
|
/cachematrix.R
|
073ef5d077f25f861599e17e29fb0b8b451ac381
|
[] |
no_license
|
JoieGiArdT/ProgrammingAssignment2
|
cd649ce11ffbb8037e59eddfdaa4a33ad1cbc9d8
|
61f67f45357249d8da341326e2d58af96d2b07c6
|
refs/heads/master
| 2022-11-26T18:33:14.139151
| 2020-08-03T22:26:34
| 2020-08-03T22:26:34
| 284,796,627
| 0
| 0
| null | 2020-08-03T20:08:31
| 2020-08-03T20:08:30
| null |
UTF-8
|
R
| false
| false
| 2,015
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of
## what your functions do
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## The function is made up of four more functions, the
## first function "set" receives as an argument the matrix
## that we want to calculate the inverse of, then proceeds
## to assign the matrix to the variable x that was originally
## created as 'x = matrix ()', as I take a value within the
## function, if we apply the concept of lexical scope, the
## function get () that shows the variable x, will always
## look for this within the environment where the function is
## defined, otherwise if it had not been this way we could Call
## the get () function and it would have searched our global
## environment. Then it has two more functions, setinv () saves
## the inverse supplied by the user, and finally getinv () that
# shows the inverse previously entered.
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
matrix <- x$get()
inv <- solve(matrix, ...)
x$setinv(inv)
inv
}
## This function is related to makeCacheMatrix, because it calls its
## functions within it, with the aim of verifying if the inverse of
## the cache we want to calculate is found in our cache, if it
## verifies that if it exists it sends the user to review the cache
## and print that value in reverse; on the other hand, if there is
## no such value, it would be in charge of calculating the inverse of
## the matrix previously supplied and print said calculation.
|
57d6ff5a376b27e723e2e9535400c5c647fdd450
|
00b21e537d2150cd44d1783b660de09208f75978
|
/R/viewHashes.R
|
9b53e6ce281540ba617f5aa0328426374c6edd09
|
[] |
no_license
|
wdwatkins/gdpAnalytics
|
6f16db6fa1d55cb30c9b45cbc39f1aa49887ff3e
|
6c2a29aa65d7de60c5b84620314f9161c7306d8a
|
refs/heads/master
| 2021-01-23T06:25:19.768023
| 2019-06-07T23:40:31
| 2019-06-07T23:40:31
| 86,365,636
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 654
|
r
|
viewHashes.R
|
library(dplyr)
library(data.table)
library(lubridate)
jobsDF <- fread('data/uniqueDF_4_21.csv', stringsAsFactors = FALSE,
colClasses = "character")
xmlDF <- fread('data/GDP_XML_4_21.csv')
joinedDF_noAgent <- left_join(jobsDF, xmlDF, by = "requestLink")
successJobs <- filter(joinedDF_noAgent, status == "SUCCEEDED") %>%
mutate(creationDate = date(creationTime))
allGrp <- group_by(successJobs, data_uri, md5) %>% summarize(n=n()) %>%
arrange(desc(n))
for(hash in allGrp$md5) {
filtDF <- filter(successJobs, md5 == hash)
print(paste(nrow(filtDF), "rows"))
View(filtDF)
invisible(readline("Press a key for next"))
}
|
f4db505b4744f4548d8ebb7f7fbb6837c14b3c8d
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/polspline/examples/predict.polymars.Rd.R
|
1b3c4a25dedd84f60d9a45a7c4b6f246fa6bb53f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 369
|
r
|
predict.polymars.Rd.R
|
library(polspline)
### Name: predict.polymars
### Title: Polymars: multivariate adaptive polynomial spline regression
### Aliases: predict.polymars
### Keywords: smooth nonlinear
### ** Examples
data(state)
state.pm <- polymars(state.region, state.x77, knots = 15, classify = TRUE, gcv = 1)
table(predict(state.pm, x = state.x77, classify = TRUE), state.region)
|
98652a6942cd03280ee04950ccae00e1df5827ad
|
b926f0ac08bfe1b7c0feb654849cbdc70330d462
|
/man/functiontable.Rd
|
f2810de2c820b232918db4360541bef635fead45
|
[
"CC0-1.0"
] |
permissive
|
hpiwowar/knitcitations
|
2157e0c94c376dc5a539996c1b472310d0ae0a9d
|
97456fe4fa138eac68dc4e242500bf9fe8c4012c
|
refs/heads/master
| 2021-01-17T22:50:40.657655
| 2013-02-11T19:51:22
| 2013-02-11T19:51:22
| 8,145,133
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 919
|
rd
|
functiontable.Rd
|
\name{functiontable}
\alias{functiontable}
\title{a table of functions in a package}
\usage{
functiontable(pkg, ...)
}
\arguments{
\item{pkg}{a string specifying the name of a package,}
\item{...}{additional arguments to xtable}
}
\value{
the output of xtable (as html, or specify type="latex")
}
\description{
This function takes a package name an generates a
two-column table with the names of each function in the
package and the short description from the help
documentation.
}
\details{
useful for Sweave/knit manuals specifying a table of
functions Note that xtable format can also be set with
\code{options(xtable.type="latex")} \code{or
options(xtable.type="html")}. This function modified from
DWin's solution on StackOverflow.com,
http://stackoverflow.com/questions/7326808/getting-the-list-of-functions-in-an-r-package-to-be-used-in-latex
}
\examples{
functiontable("xtable")
}
|
9f5ebce92924da7844c3745e72ff0b955f39f69a
|
bdd86fde8ecc268a08ab787ae295c0175164f556
|
/man/plot_ci.Rd
|
8728dbb62a7870661ebdd5818faa905b0265e756
|
[] |
no_license
|
mauriziopaul/litterDiallel
|
448c94e7fb42ba823fda54c3ef7a698959e97625
|
dba0c8383f6baf0dc20a2136243db208f2af33fc
|
refs/heads/master
| 2022-06-19T05:45:25.506450
| 2022-05-30T22:16:54
| 2022-05-30T22:16:54
| 124,441,857
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,021
|
rd
|
plot_ci.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/litterDiallel.R
\name{plot_ci}
\alias{plot_ci}
\title{plot_ci}
\usage{
plot_ci(
midvals,
narrow.intervals,
wide.intervals,
names = 1:length(midvals),
add = FALSE,
main = "",
main.line = 2,
xlab = "Estimate",
xlab.line = 2.5,
xlim = NULL,
ylab = "",
yaxis = TRUE,
ylim = c(0, length(midvals)),
name.line = 4,
pch.midvals = 19,
col = "black",
col.midvals = col,
cex.labels = 1,
type = "p",
name.margin = 6.1,
title.margin = 4.1,
title.line = 3.5,
bottom.margin = 5.1,
bottom.line = 4.5,
right.margin = 2.1,
right.line = 1.5,
mar = sides(left = name.margin, bottom = bottom.margin, top = title.margin, right =
right.margin),
mar.update = sides(),
before.data = function() { },
plt.left = NULL,
plt.right = NULL,
plt.bottom = NULL,
plt.title = NULL,
...
)
}
\arguments{
\item{midvals}{midvals}
\item{narrow.intervals}{narrow.intervals}
\item{wide.intervals}{wide.intervals}
\item{names}{names}
\item{add}{add}
\item{main}{main}
\item{main.line}{main.line}
\item{xlab}{xlab}
\item{xlab.line}{xlab.line}
\item{xlim}{xlim}
\item{ylab}{ylab}
\item{yaxis}{yaxis}
\item{ylim}{ylim}
\item{name.line}{name.line}
\item{pch.midvals}{pch.midvals}
\item{col}{col}
\item{col.midvals}{col.midvals}
\item{cex.labels}{cex.labels}
\item{type}{type}
\item{name.margin}{name.margin}
\item{title.margin}{title.margin}
\item{title.line}{title.line}
\item{bottom.margin}{bottom.margin}
\item{bottom.line}{bottom.line}
\item{right.margin}{right.margin}
\item{right.line}{right.line}
\item{mar}{mar}
\item{mar.update}{mar.update}
\item{before.data}{before.data}
\item{plt.left}{plt.left}
\item{plt.right}{plt.right}
\item{plt.bottom}{plt.bottom}
\item{plt.title}{plt.title}
\item{...}{additional arguments}
}
\value{
returns plotted caterpillar plot with confidence intervals
}
\description{
stack multiple mcmc chains into one mcmc object
}
\examples{
## not run
}
|
42d2e2efa68ac2994b5026925116a9a28733ea29
|
9cce1788a21acd01c9deab2bb25f3733a356736c
|
/man/related_artists.Rd
|
fa79407fda8502a94d0d2c6bac823909bd8c4fbb
|
[
"MIT"
] |
permissive
|
raffrica/spotifyremoji
|
30fd90fa270943627ec3f270b770923ba8e917cc
|
629df278794d586df550a32c93780c0c9d9ac76d
|
refs/heads/master
| 2020-03-09T14:11:43.800853
| 2018-04-14T19:21:30
| 2018-04-14T19:21:30
| 128,828,857
| 0
| 0
| null | 2018-04-09T20:18:41
| 2018-04-09T20:18:41
| null |
UTF-8
|
R
| false
| true
| 559
|
rd
|
related_artists.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/related_artists.R
\name{related_artists}
\alias{related_artists}
\title{Prints dataframe of artist's related artists.}
\usage{
related_artists(user_auth_token, artistName)
}
\arguments{
\item{user_auth_token:}{String containing the users authentication tokent. See README for details}
\item{artistName:}{String specifyign an arists name}
}
\value{
dataframe object
}
\description{
Prints dataframe of artist's related artists.
}
\examples{
related_artists(auth, "Haftbefehl")
}
|
39b5b36a186e0525b9f507c774a7b70dd3398d93
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/forecast/examples/thetaf.Rd.R
|
3670fe5824eeefe874b7b6449c04ffa4edaaec9e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 173
|
r
|
thetaf.Rd.R
|
library(forecast)
### Name: thetaf
### Title: Theta method forecast
### Aliases: thetaf
### Keywords: ts
### ** Examples
nile.fcast <- thetaf(Nile)
plot(nile.fcast)
|
2f0e806576349c37ee71b8cd6443c038b6bdb198
|
36628243c050cc012243cce16d55e6d24c95b1cf
|
/man/client_slack.Rd
|
c139217cfa3f61fe2795549cdcfc66b7cf1dc516
|
[
"MIT"
] |
permissive
|
TymekDev/sendeR
|
e5bf9ca406dd130b8003f54c00050de16fedae7a
|
32142f3ee24ad0c1b674102848e41c461a5107d0
|
refs/heads/master
| 2022-11-07T07:07:13.054088
| 2020-06-26T16:48:17
| 2020-06-26T16:48:17
| 213,371,734
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,120
|
rd
|
client_slack.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/client_slack.R
\name{client_slack}
\alias{client_slack}
\title{Slack client}
\usage{
client_slack(slack_webhook, ...)
}
\arguments{
\item{slack_webhook}{a webhook obtained from the Slack API settings.}
\item{...}{named arguments with additional fields which will be passed to
\code{\link{set_fields}} during client creation.}
}
\description{
Client extending the \code{\link{client_sendeR}} for the Slack
service. In addition to any fields in the \code{\link{client_sendeR}} this one
contains \code{slack_webhook} which is needed to send a message via the Slack
Webhook API. For additional information on how to create a webhook see details.
}
\details{
To create your own webhook head to
\url{https://api.slack.com/messaging/webhooks}.
\strong{Note}: Webhooks are permanently connected to one channel.
}
\examples{
client <- client_slack("my_webhook")
# Variant with default parameters set
client2 <- client_slack("my_webhook", message = "Default message template")
}
\seealso{
\code{\link{is.client_slack}}, \code{\link{send_message}}
}
|
8b995b68aa21d0940f863fed139785010da5c6bf
|
1cf864651a3cad23eb3c7f25aecda77b9d51c7e5
|
/man/createstartvalues.Rd
|
a3252d36b3fb6f16c68bdb78bee3d88a1b0ce995
|
[] |
no_license
|
gobbios/EloRating
|
98eec32ae178db6bca95d55691c5d66b525bce9a
|
ebb4957676b3ff5638e5eb9ca34464a480138902
|
refs/heads/master
| 2023-06-08T00:58:34.065438
| 2023-06-02T10:12:35
| 2023-06-02T10:12:35
| 79,722,236
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,406
|
rd
|
createstartvalues.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createstartvalues.R
\name{createstartvalues}
\alias{createstartvalues}
\title{calculate start values from prior knowledge}
\usage{
createstartvalues(
ranks = NULL,
rankclasses = NULL,
shape = 0.3,
startvalue = 1000,
k = 100
)
}
\arguments{
\item{ranks}{named vector, contains the ordinal ranks of all individuals for which such prior knowledge exists, names of the vector refer to the individual codes as they occur in the interaction sequence supplied to \code{\link{elo.seq}}}
\item{rankclasses}{list with four items, each representing a rank class in descending order, if a given rank class is empty supply it as \code{NULL}, see details and examples}
\item{shape}{numeric, between 0 and 1, by default \code{shape=0.3}. This value determines the 'steepness' of the initial values. Steepest is at \code{shape=0} and shallowest is at \code{shape=1}. See examples.}
\item{startvalue}{numeric, the rating value with which an individual starts into the rating process. By default \code{startvalue=1000}}
\item{k}{numeric, the \emph{k} factor that determines the maximum change in ratings. By default \code{k=100}}
}
\value{
list with three items:\cr
\item{res}{a named numeric vector with the startvalues to be supplied to \code{\link{elo.seq}}}
\item{k}{\emph{k} factor used}
\item{startvalue}{start value used}
}
\description{
calculate start values from prior knowledge
}
\details{
only one of \code{ranks} or \code{rankclasses} can be supplied.
if you wish to supply rank classes you need to supply four categories and it is assumed that the first list item is the highest class. If you have less than four rank classes, you still need to supply a list with four items and set those that you wish to ignore to \code{NULL}, see examples.
}
\examples{
# assuming a group with 7 individuals
# with four rank classes
myrankclasses <- list(alpha = "a", high=c("b", "c"), mid=c("d", "e"), low=c("f", "g"))
createstartvalues(rankclasses = myrankclasses)
# with two rank classes
myrankclasses2 <- list(class1 = NULL, high=c("a", "b", "c"), class3=NULL, low=c("d", "e", "f", "g"))
createstartvalues(rankclasses = myrankclasses2)
# with ordinal ranks
myranks <- 1:7; names(myranks) <- letters[1:7]
createstartvalues(ranks = myranks)
}
\references{
\insertRef{newton-fisher2017a}{EloRating}
}
\author{
Christof Neumann
}
|
b590f6c782e4f574391ded0a610008dcc473eb98
|
bdeb6048c3fbaf04e916f1f6a0f341ac4d47f088
|
/LAPDcalls2.R
|
b9f2860702b04b2023034da42df793d8f0cf4a17
|
[] |
no_license
|
RexWoon/blog-files
|
0657162c05e2592aa1761e06418be49d9e6afe6e
|
55c032fe134c34b56de44601755b81de5a66a8b7
|
refs/heads/master
| 2021-01-10T16:13:52.413049
| 2017-05-25T03:04:06
| 2017-05-25T03:04:06
| 50,158,861
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,475
|
r
|
LAPDcalls2.R
|
##### LAPD Calls Part Deux##################
library(ggplot2)
library(dplyr)
library(scales)
############## Time series of number of calls per day#####################
lapd.data <- read.csv("LAPD_Calls_for_Service_YTD_2015.csv")
Day <- unique(lapd.data$Dispatch.Date)
daytotal <- vector()
for (i in 1:length(Day)){
daytotal[i] <- nrow(filter(lapd.data,lapd.data$Dispatch.Date==Day[i]))
}
x <- seq(as.Date("2015/1/1"), by = "day", length.out = length(Day))
df <- data.frame(x,daytotal)
ggplot(df,aes(x,daytotal))+geom_line()+theme_grey()+scale_x_date(breaks=date_breaks("months"),labels = date_format("%b"))+labs(title="Number of calls to dispatch per day",y="Number of calls",x="Date")
ggplot(df,aes(x[7:304],daytotal[7:304]))+geom_line()+theme_grey()+scale_x_date(breaks=date_breaks("months"),labels = date_format("%b"))+labs(title="Number of calls to dispatch per day",y="Number of calls",x="Date")
##########ACF and PACF###########
acf(daytotal,lag.max = 30)
pacf(daytotal)
############## Box Cox######################
library(MASS)
t <- 1:length(daytotal)
bc <- boxcox(daytotal~t,lambda = seq(-5,0,.2), plotit = T)
lam <- bc$x[which.max(bc$y)]
lam
############# Transformed series ###############
trans_day <- daytotal^lam
qplot(x,trans_day,geom = "line")+theme_minimal()+scale_y_continuous(name = "transformed number of calls (in thousands)")+xlab("Day")+scale_x_date(breaks=date_breaks("months"),labels = date_format("%b"))
################### Differencing ##############
diff_trans_day <- diff(trans_day)
qplot(x[2:304],diff_trans_day,geom = "line")+theme_minimal()+scale_y_continuous(name = "differenced transformed number of calls (in thousands)")+xlab("Day")+scale_x_date(breaks=date_breaks("months"),labels = date_format("%b"))
############ Model Selection ##################
acf(diff_trans_day,lag.max = 30)
pacf(diff_trans_day)
library(TSA)
eacf(diff_trans_day) #arima(1,1,2)?
############## first option of model##############
fittedmodel <- arima(trans_day, order = c(1,1,2))
fittedmodel
hist(fittedmodel$residuals,probability = T) # approx normal, with outlier
shapiro.test(fittedmodel$residuals)
#Shapiro-Wilk normality test
#
#data: fittedmodel$residuals
#W = 0.98484, p-value = 0.3511
# p-value is biggish, we cannot reject for most common significance levels
qqnorm(fittedmodel$residuals)
# check for white noise of residuals by examining the acf and pacf
#par(mfrow=c(2,1))
acf(fittedmodel$residuals)
pacf(fittedmodel$residuals)
|
0f9ee0d5affe279a9f8efcb748b87f353a3f7459
|
745d585395acad1376d84f8ca1284c13f2db70f0
|
/R/calcCumulatedDiscount.R
|
dbe6d6110f57fe332c1ce71ad25dbc3705805191
|
[] |
no_license
|
pik-piam/quitte
|
50e2ddace0b0e2cbfabf8539a0e08efe6bb68a0b
|
4f5330695bd3d0e05d70160c1af64f0e436f89ea
|
refs/heads/master
| 2023-08-20T04:15:16.472271
| 2023-08-09T08:14:32
| 2023-08-09T08:14:32
| 206,053,101
| 0
| 8
| null | 2023-08-09T08:14:34
| 2019-09-03T10:39:07
|
R
|
UTF-8
|
R
| false
| false
| 5,366
|
r
|
calcCumulatedDiscount.R
|
#' Calculates the cumulated discounted time series
#'
#' Discount and cumulated a times series - gives the time series of the net
#' present value (NPV). Baseyear for the NPV is the first period.
#'
#'
#' @param data a quitte object containing consumption values - consumption has
#' to be named "Consumption"
#' @param nameVar name of the variable to be cumulated (and discounted)
#' @param nameDisrate Name of the variable containing the discount rate
#' @param discount The discount rate: either a numeric value, or 'BAU' to
#' choose the discount rate supplied in nameDisrate
#' @param fixYear From the discounted time series, substract the value in year
#' fixYear, if fixYear is not 'none'
#' @return cumulated discounted values for each scenario, model, region (quitte
#' object)
#' @author Anselm Schultes
#' @examples
#'
#' \dontrun{
#' erg <- calcCumulatedDiscount(data, disRate=0.03)
#' }
#'
#' @importFrom reshape2 dcast
#'
#' @export
calcCumulatedDiscount = function(data,
nameVar='Consumption',
nameDisrate='Interest Rate t/(t-1)|Real',
discount=0.05,
fixYear='none'){
# this functions implements the functionality found here (only for options CumMode=1, BaseMode=1, DisMode=1):
# http://localhost:8836/projects/remind-matlab/repository/entry/Core/Scripts/cumulate_time_2D.m
# takes a quitte object, returns the present value time series.
# the baseyear is the first year in the time series
# option fixYear: From the discounted time series, substract the value in fixYear - defaults to none. In that case the value in the baseyear is zero anyways by construction.
#Just do this for the specified variable, preserve all other columns.
data = data[data$variable %in% c(nameVar,nameDisrate),]
data$year = as.integer(as.character(data$period))
if(length(levels(factor(data$year))) == 1){
stop('This time series only contains one point - aggregation will not work!')
}
data=data[,!(names(data) == 'unit')]
#convert to wide format
data = dcast(data,... ~ variable)
#rename variable
names(data)[names(data) == nameVar] = 'varToAggregate'
if(nameDisrate %in% names(data) ){
names(data)[names(data) == nameDisrate] = 'disRate'
}
if(is.numeric(discount)){
data$disRate = discount
} else{
warning('Endogenous interest discount is not validated yet.')
}
#group for all other columns:
col_grp = names(data)[!(names(data) %in% c('varToAggregate','disRate','period','year'))]
#calculate discount factor from discount rate:
erg = data %>%
group_by(!!!syms(col_grp)) %>%
mutate(
discFactor = cumprod((1 + !!sym('disRate'))^(-(!!sym('year') - lag(!!sym('year'),default=first(!!sym('year')),order_by=!!sym('year'))))),
w = (!!sym('year') - first(!!sym('year'))) , # just for diagnostics
discFactor2 = (1 + !!sym('disRate'))^(-(!!sym('year') - first(!!sym('year')))) ## just for diagnostics this equals discFactor for time-indep disRate
)
#AJS question: how can I keep a column in the dataframe without grouping it?
#this calculated annually compounded weight factors according to Elmar's method
erg = erg %>%
group_by(!!!syms(col_grp)) %>%
mutate(
weight1 = mapply(
function(dt,dr) {
sum( (1+dr)^(-seq(0.5, as.double(dt-0.5)) )
* (1 - seq(0.5, as.double(dt-0.5))/dt)
)
}, # Why no use (1:dt) instead??
(!!sym('year') - lag(!!sym('year'), default = first(!!sym('year')), order_by = !!sym('year'))), # first element in year here doesnt matter anyways, will be thrown out later on..
!!sym('disRate')
),
weight2 = mapply(
function(dt,dr) {
sum( (1+dr)^(-(seq(0.5, as.double(dt-0.5)) - dt))
* (seq(0.5, as.double(dt-0.5))/dt)
)
},
(!!sym('year') - lag(!!sym('year'), default = first(!!sym('year')), order_by = !!sym('year'))),
!!sym('disRate')
),
weightSum = !!sym('weight2') + !!sym('weight1') # just for diagnostics
)
yrs = as.integer(as.character(levels(factor(erg$year))))
#yrs = yrs[yrs != min(yrs)] ## all time steps but the first one.
#calculate the whole discounted time series: FIXME how to do this more elegantly?
erg_allT = do.call(rbind,lapply(yrs,function(p){
tmp <- erg %>%
filter(!!sym('year') <= p) %>%
group_by(!!!syms(col_grp)) %>%
summarise(
discountedAggregate = sum(
( !!sym('varToAggregate') * !!sym('discFactor') * !!sym('weight2')
+ ( lag(!!sym('varToAggregate'), order_by = !!sym('year'))
* lag(!!sym('discFactor'), order_by = !!sym('year')) * !!sym('weight1')
)
)[-1] )
) %>% ungroup()
tmp$period = p
tmp
}))
names(erg_allT)[names(erg_allT)=='discountedAggregate'] = 'value'
erg_allT$unit = NA
erg_allT$variable = paste0(nameVar,'|aggregated')
#shift resulting time series by the value in the year fixYear
if(fixYear != 'none'){
# if(! 'POSIXct' %in% class(fixYear)) fixYear = ISOYear(fixYear)
erg_allT = erg_allT %>%
group_by(!!!syms(col_grp)) %>%
mutate(value = !!sym('value') - !!sym('value')[!!sym('period') == !!sym('fixYear')])
}
return(as.quitte(as.data.frame(erg_allT)))
}
|
863076ad06f555062485816dde070e0aa5679aa6
|
ca2de03ce862c0bf549de4fea51817600793084e
|
/SW2 Midterm/Seatwork 2 Midterm/SW Midterm Angelo Ricohermozo.R
|
c329c6f0cfcdf7bac384caafefd00fb8aa97dcae
|
[] |
no_license
|
Ranzelle06/Midterm_Repo
|
1fff8373182abc336c7dbb915f86a127c7721e72
|
99f93152106ecde4d7ed80815f51134970144731
|
refs/heads/master
| 2020-03-22T05:08:25.033802
| 2018-09-18T18:39:49
| 2018-09-18T18:39:49
| 139,544,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,283
|
r
|
SW Midterm Angelo Ricohermozo.R
|
data <- read.csv("Seatwork 2 Midterm/midetrmseatwork_data.csv")
MeanFunction <- function(data, removeNA = TRUE){
col_num <- ncol(data)
means_per_col <- numeric(col_num)
for(element in 1:col_num){
means_per_col[element] <- mean(data[ ,element], na.rm = removeNA)
}
means_per_col
}
MeanFunction(data)
subset_data <- funtion(data$Wind, data$Ozone = 25, data$Temp = 70){
subset_param <- (data$Wind>data$Ozone)&(data$Wind<data$Temp)
data$Wind[subset_param]
}
subset_data(x)
#1
data <- read.csv("Seatwork 2 Midterm/midetrmseatwork_data.csv")
subset_data <- function(data, min, max){
y <- ifelse(data$Ozone>min & data$Temp>max , data$Wind, NA)
mean(y, na.rm = TRUE)
}
subset_data(data, 25, 70)
#2
MeanFunction <- function(data, Month, Day ){
z <- 0
row_num <- nrow(data)
for(row in 1:row_num){
z[row] <- ifelse(data[row, 5]==Month & data[row, 6]==Day, data[row,4], NA)
}
mean(z, na.rm = TRUE)
}
MeanFunction(data, 9, 8)
MinFunction <- function(data, Month){
z<-0
row_num <- nrow(data)
for(row in 1:row_num){
z[row] <- ifelse(data[row, 5]== Month, data[row, 1], NA)
}
min(z , na.rm =TRUE)
}
MinFunction(data, 5)
MinFunction(data, 6)
MinFunction(data, 7)
MinFunction(data, 8)
MinFunction(data, 9)
MinFunction(data, 10)
|
d4fffea3888a83e2c99a74dfd4bfed40ce31f567
|
cbe529bda1ca9624c7d89e9beea75c6202787d64
|
/R/team_functions.R
|
350da534f486bf1b354f15392a3937380f25ad48
|
[
"MIT"
] |
permissive
|
JamesDalrymple/cmhmisc
|
bc5b29a182d5816f204b008e7cce77b8f5fb5312
|
6590092cb43fe9778799fec2ae33adea5f711c85
|
refs/heads/master
| 2021-10-15T23:35:54.128023
| 2019-02-06T21:25:16
| 2019-02-06T21:25:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,960
|
r
|
team_functions.R
|
#' @title WCCMH team functions
#' @description
#' cmh_recode recodes Washtenaw CMH team names to a standardized
#' team format.
#' recode_team_prog recodes Washtenaw CMH team/program names to a standardized
#' program format.
#' cmh_teams_f factors (ordered is an option) teams.
#' cmh_priority_dt assigns a priority to all of the main teams.
#'
#' @param x A character vector of team names. Will be coerced to character if
#' class(x) is factor.
#' @param missing_key What will happen if a recode_ function is supplied a value not
#' found in recode key. Default is 'non-CMH'. If missing_key is assigned to NULL,
#' an error will occur if any values are in x and not in recode_key.
#' @param levels The levels that will be assigned. Unspecified inputs result
#' in NA.
#' @param level_order The order of the levels. Defaults to NULL.
#'
#' @return recode_x functions: A vector of recoded team/program names.
#' cmh_teams_f A factored vector.
#' cmh_priority_dt A data.table object.
#'
#' @note need testing, consider adding an automatic "missing" assignment
#' with a warning message.
#'
#' @examples
#' cmh_recode("WSH - ACT")
#' cmh_recode(c("WSH - ACT", "DD Adult"))
#' require(cmhmisc)
#' require(magrittr)
#' test_vector <- c("ACT", "WSH - Children's Services - Home Based Ellsworth",
#' "WSH - Children's Services Ellsworth", "WSH - DD Adult Annex",
#' "WSH - DD Adult Ellsworth", "WSH - MI - Adult Annex", "WSH - MI - Adult Towner",
#' "Washtenaw County Community Mental Health")
#' cmh_recode(test_vector)
#' @importFrom TBmisc as.chr
#' @importFrom data.table data.table :=
#'
#' @name team_functions
NULL
#' @rdname team_functions
team_names <- list(
DD = c("DD"),
ACT = c("ACT"),
MI = c("MI", "ATO"),
"Child HB" = c("Home Based", "^Child HB$"),
Child = c("^Child$", "Children's Services"),
Access = c("CSTS", "Access", "Engagement",
"Washtenaw County Community Mental Health"),
UM = c("UM", "Utilization Management"),
"non-CMH" = c("non-CMH", "Court", "ICSS", "Crisis Residential"),
PORT = c("PATH", "PORT"),
OBRA = c("OBRA")
)
#' @rdname team_functions
#' @export
cmh_recode <- function(x) {
for (i in seq_along(team_names)) {
x[grepl(x = x,
pattern = paste0(team_names[[i]], collapse = "|") )] <- names(team_names[i])
}
return(x)
}
# cmh_recode <- function(x, missing_key = "non-CMH") {
# if (class(x) == "factor") x <- as.chr(x)
# if (any(is.na(x))) x[is.na(x)] <- missing_key
# recode_key <- cmh_team_key
# unknown <- setdiff(x, unlist(recode_key, use.names = FALSE))
# if (length(unknown) > 0) {
# recode_key$unknown <- unknown
# }
# recode_string(x = x, recode_key = recode_key)
# }
#' @rdname team_functions
cmh_program_key <- list(
DD = c("DD"),
MI = c("ACT", "MI"),
`Y&F` = c("Child", "Child HB"),
PORT = c("PATH"),
Access = c("Access"),
OBRA = c("OBRA"),
UM = c("UM"),
`non-CMH` = c("non-CMH")
)
#' @rdname team_functions
#' @export
recode_team_prog <- function(x, missing_key = "non-CMH") {
x <- cmh_recode(x)
if (class(x) == "factor") x <- as.chr(x)
if (any(is.na(x))) x[is.na(x)] <- missing_key
unknown <- setdiff(x, unlist(cmh_program_key, use.names = FALSE))
if (length(unknown) > 0) {
cmh_program_key$unknown <- unknown
}
recode_string(x, recode_key = cmh_program_key)
}
#' @rdname team_functions
#' @export
cmh_teams_f <- function(x,
levels = c("ACT", "DD", "MI", "Child HB", "Child"),
level_order = NULL) {
if (missing(level_order) || is.null(level_order)) {
level_order <- FALSE
} else {
level_order <- is.ordered(x)
}
result <- factor(
x, levels, labels = levels,
exclude = setdiff(x = x, levels), ordered = level_order
)
return(result)
}
#' @rdname team_functions
#' @export
cmh_priority_dt <-
data.table(team = c("OBRA", "DD", "ACT", "MI", "Child HB", "Child",
"PORT", "UM", "Access", "non-CMH"),
priority = 1:10)
|
20a43f92bc3dcc77b845d7f43ed15ea40ca982b6
|
a4e7ce9ece9ab83b6ca5ef06b22f7b8b2c043362
|
/RDeco/demo/testClustering.R
|
3b466872f54124cac6fecdb3f5a072f8f1e7a319
|
[] |
no_license
|
giuliomorina/DECO
|
fb89fc2ffa94e70aefa85bc2f699ebdf3ce40e90
|
05a5565cf0bf8900248efd05d462c6cfa3e99b13
|
refs/heads/master
| 2021-06-10T19:33:14.319654
| 2016-12-01T11:13:26
| 2016-12-01T11:13:26
| 74,596,605
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 280
|
r
|
testClustering.R
|
library(parallel)
clust <- makePSOCKcluster(c("greywagtail",
"greyheron",
"greypartridge",
"greyplover"))
x <- list(X=5,Y=4,Z=8,T=9)
lambda <- clusterApplyLB(clust, x, sqrt)
stopCluster(clust)
|
752e32a02b41f00d2ebb8219457d17691268700b
|
f75ca2ee0877514a8728dfca44a30bc2fe2da74d
|
/R/group_rates.R
|
7fc58d10bf979f5014db8a3844f14ded41a4daed
|
[] |
no_license
|
rafalab/smallcount
|
f5858cc5ec51f89037b1f7d867a78554840a63d0
|
98f500684c8df958fa6eef91310c4583d9a2f6ca
|
refs/heads/main
| 2023-06-16T21:09:39.832958
| 2021-07-13T02:33:40
| 2021-07-13T02:33:40
| 365,328,599
| 9
| 2
| null | 2021-05-26T15:00:53
| 2021-05-07T19:03:02
|
R
|
UTF-8
|
R
| false
| false
| 789
|
r
|
group_rates.R
|
#' Rowwise rates for groups
#'
#' @param y A tgCMatrix sparse Matrix.
#' @param g A factor defining the group for each column.
#'
#' @export
#'
group_rates <- function(y, g){
if(!is(y, "dgCMatrix")) stop("y must be class dgCMatrix")
if(!is.factor(g)){
warning("Coercing g into a factor")
g <- as.factor(g)
}
js <- as.numeric(g)
rowsums <- matrix(0, nrow(y), length(n))
colsums <- vector("numeric", length(n))
for(j in 1:ncol(y)){
ind <- (y@p[j]+1):y@p[j+1]
real_ind <- y@i[ind] + 1
k <- js[j]
x <- y@x[ind]
rowsums[real_ind, k] <- rowsums[real_ind, k] + x
colsums[k] <- colsums[k] + sum(x)
}
rowsums <- sweep(rowsums, 2, colsums, FUN = "/")
colnames(rowsums) <- levels(g)
rownames(rowsums) <- rownames(y)
return(rowsums)
}
|
850ad14564199fb30b313e5fa112a13140f61bda
|
d8f643de8f7d1bc3af1478e8f934e4c41ddbc6f1
|
/man/try_catch_error_as_na.Rd
|
a3097d1803754851350ed6e58c94043a24bb0758
|
[] |
no_license
|
meerapatelmd/police
|
d0aff7be9a95a3928c6884675f3cef0b587f11b9
|
7f4f440a0e21de0af10a027c38573af51b059601
|
refs/heads/master
| 2023-01-13T12:59:48.668697
| 2020-11-29T21:45:57
| 2020-11-29T21:45:57
| 258,654,643
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 407
|
rd
|
try_catch_error_as_na.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/try_catch_error_as_na.R
\name{try_catch_error_as_na}
\alias{try_catch_error_as_na}
\title{Skip error messages, records NA, and continues to loop on the expression}
\usage{
try_catch_error_as_na(expr)
}
\arguments{
\item{expr}{expression}
}
\description{
Skip error messages, records NA, and continues to loop on the expression
}
|
6a3da295d9bddc0a40e97c010f58f1051d4459f8
|
bf67c57a29eeb452a32bd77f820a274f7fe11bee
|
/tests/testthat/test_integration_builtin_templates.r
|
4b9320f9144339ea55edeb3e7735ce1122e4c387
|
[] |
no_license
|
Display-Lab/pictoralist
|
231ac2c3ad82b5b362c61aadf3dd1519b20a8ad7
|
7c4dacab17390bad1e49c4e9cf9a366e8a0fbee9
|
refs/heads/master
| 2021-06-19T14:28:34.803546
| 2020-03-23T18:53:05
| 2020-03-23T18:53:05
| 159,402,840
| 1
| 0
| null | 2020-03-23T17:20:58
| 2018-11-27T21:39:36
|
R
|
UTF-8
|
R
| false
| false
| 10,576
|
r
|
test_integration_builtin_templates.r
|
context("Integration test of baked in templates")
test_that("Baked in templates with single time points work with mtx data",{
mtx_data <- read_data(spekex::get_data_path("mtx"))
mtx_spek <- spekex::read_spek(spekex::get_spek_path("mtx"))
templates <- load_templates()
mtx_templates <- c(templates$ComparisonBarGraphHOR,
templates$ComparisonBarGraphVERT,
templates$EnhancedLeaderboard,
templates$Leaderboard,
templates$IUDGraph,
templates$TopPerformerGraph)
results <- lapply(mtx_templates, FUN=function(t, recip, data, spek){t$run(recip, data, spek)},
recip = "E87746", data=mtx_data, spek=mtx_spek)
is_ggplot <- sapply(results, function(x){"ggplot" %in% class(x)})
expect_true(all(is_ggplot))
})
test_that("Baked in templates with single time points work with va data",{
va_data <- read_data(spekex::get_data_path("va"))
va_spek <- spekex::read_spek(spekex::get_spek_path("va"))
templates <- load_templates()
va_templates <- c(templates$SingleLineGraph)
results <- lapply(va_templates, FUN=function(t, recip, data, spek){t$run(recip, data, spek)},
recip = "6559AA", data=va_data, spek=va_spek)
is_ggplot <- sapply(results, function(x){"ggplot" %in% class(x)})
expect_true(all(is_ggplot))
})
test_that("Data provided is used in Top Performer Template", {
mtx_data <- read_data(spekex::get_data_path("mtx"))
mtx_spek <- spekex::read_spek(spekex::get_spek_path("mtx"))
templates <- load_templates()
denom_colname <- 'total_scripts'
numer_colname <- 'high_dose_scripts'
recip_data <- filter(mtx_data, mtx_data$practice == "E87746")
recip_data_zero <- filter(mtx_data, mtx_data$practice == "A81001")
data_denom <- sum(recip_data[denom_colname])
data_numer <- sum(recip_data[numer_colname])
tpg_env <- templates$TopPerformerGraph
result <- tpg_env$run("E87746", mtx_data, mtx_spek)
result_zero <- tpg_env$run("A81001", mtx_data, mtx_spek)
template_denom <- result$data$value[1]
template_recip <- result$data$id[1]
template_recip_zero <- result_zero$data$id[1]
expect_true(template_denom == data_denom)
expect_true(template_recip == "E87746")
expect_true(template_recip_zero == "A81001")
})
test_that("Data provided is used in IUD Graph Template", {
mtx_data <- read_data(spekex::get_data_path("mtx"))
mtx_spek <- spekex::read_spek(spekex::get_spek_path("mtx"))
templates <- load_templates()
denom_colname <- 'total_scripts'
numer_colname <- 'high_dose_scripts'
recip_data <- filter(mtx_data, mtx_data$practice == "E84076")
data_denom <- sum(recip_data[denom_colname])
data_numer <- sum(recip_data[numer_colname])
iud_env <- templates$IUDGraph
result <- iud_env$run("E84076", mtx_data, mtx_spek)
template_recip <- result$data$id[1]
template_numer <- result$data$numer[1]
template_denom <- result$data$denom[1]
expect_true(template_recip == "E84076")
expect_true(template_numer == data_numer)
expect_true(template_denom == data_denom)
})
test_that("Data provided is used in ComparisonBarGraphHOR", {
mtx_data <- read_data(spekex::get_data_path("mtx"))
mtx_spek <- spekex::read_spek(spekex::get_spek_path("mtx"))
templates <- load_templates()
denom_colname <- 'total_quantity'
numer_colname <- 'total_scripts'
recipient <- "E84076"
compHOR_env <- templates$ComparisonBarGraphHOR
result <- compHOR_env$run(recipient, mtx_data, mtx_spek)
top_performers <- mtx_data %>%
group_by(practice) %>%
summarise(total_scripts = sum(total_scripts), total_quantity = sum(total_quantity)) %>%
mutate(percentage = round(total_scripts/total_quantity, digits=2)) %>%
arrange(desc(total_scripts/total_quantity)) %>%
select(practice, percentage) %>%
head(14)
# If recipient not in top 14, remove last elem and add recipient
if(!(recipient %in% top_performers$practice)) {
recip_data <- filter(mtx_data, mtx_data$practice == recipient)
data_denom <- sum(recip_data[denom_colname])
data_numer <- sum(recip_data[numer_colname])
top_performers <- top_performers %>% head(13) %>%
rbind(c(recipient, round(data_numer/data_denom, digits = 2)))
}
are_equal <- all(result$data$lengths == top_performers$percentage)
expect_true(are_equal)
})
test_that("Data provided is used in ComparisonBarGraphVERT", {
mtx_data <- read_data(spekex::get_data_path("mtx"))
mtx_spek <- spekex::read_spek(spekex::get_spek_path("mtx"))
templates <- load_templates()
denom_colname <- 'total_quantity'
numer_colname <- 'total_scripts'
recipient <- "E84076"
compVERT_env <- templates$ComparisonBarGraphVERT
result <- compVERT_env$run(recipient, mtx_data, mtx_spek)
top_performers <- mtx_data %>%
group_by(practice) %>%
summarise(total_scripts = sum(total_scripts), total_quantity = sum(total_quantity)) %>%
mutate(percentage = round(total_scripts/total_quantity, digits=2)) %>%
arrange(desc(total_scripts/total_quantity)) %>%
select(practice, percentage) %>%
head(14)
# If recipient not in top 14, remove last elem and add recipient
if(!(recipient %in% top_performers$practice)) {
recip_data <- filter(mtx_data, mtx_data$practice == recipient)
data_denom <- sum(recip_data[denom_colname])
data_numer <- sum(recip_data[numer_colname])
top_performers <- top_performers %>% head(13) %>%
rbind(c(recipient, round(data_numer/data_denom, digits = 2)))
}
are_equal <- all(result$data$lengths == top_performers$percentage)
expect_true(are_equal)
})
test_that("Data provided is used in EnhancedLeaderboard", {
mtx_data <- read_data(spekex::get_data_path("mtx"))
mtx_spek <- spekex::read_spek(spekex::get_spek_path("mtx"))
templates <- load_templates()
denom_colname <- 'total_quantity'
numer_colname <- 'total_scripts'
recipient <- "E84076"
enh_env <- templates$EnhancedLeaderboard
result <- enh_env$run(recipient, mtx_data, mtx_spek)
top_performers <- mtx_data %>%
group_by(practice) %>%
summarise(total_scripts = sum(total_scripts), total_quantity = sum(total_quantity)) %>%
mutate(percentage = floor(100*total_scripts/total_quantity)) %>%
arrange(desc(total_scripts/total_quantity)) %>%
select(practice, percentage, total_scripts, total_quantity) %>%
head(7)
numer_all_equal <- all(result$data$numer == top_performers$total_scripts)
denom_all_equal <- all(result$data$denom == top_performers$total_quantity)
expect_true(numer_all_equal)
expect_true(denom_all_equal)
})
test_that("Data provided is used in Leaderboard", {
mtx_data <- read_data(spekex::get_data_path("mtx"))
mtx_spek <- spekex::read_spek(spekex::get_spek_path("mtx"))
templates <- load_templates()
denom_colname <- 'total_quantity'
numer_colname <- 'total_scripts'
recipient <- "E84076"
lead_env <- templates$Leaderboard
result <- lead_env$run(recipient, mtx_data, mtx_spek)
top_performers <- mtx_data %>%
group_by(practice) %>%
summarise(total_scripts = sum(total_scripts), total_quantity = sum(total_quantity)) %>%
mutate(percentage = floor(100*total_scripts/total_quantity)) %>%
arrange(desc(total_scripts/total_quantity)) %>%
select(practice, percentage, total_scripts, total_quantity) %>%
head(7)
numer_all_equal <- all(result$data$numer == top_performers$total_scripts)
denom_all_equal <- all(result$data$denom == top_performers$total_quantity)
expect_true(numer_all_equal)
expect_true(denom_all_equal)
})
test_that("Data provided is used in baked in SingleLineGraph", {
va_data <- read_data(spekex::get_data_path("va"))
va_spek <- spekex::read_spek(spekex::get_spek_path("va"))
templates <- load_templates()
numer_colname <- 'documented'
denom_colname <- 'total'
recipient <- "6559AA"
lead_env <- templates$SingleLineGraph
result <- lead_env$run(recipient, va_data, va_spek)
performer <- va_data %>%
filter(sta6a == recipient) %>%
select(sta6a, report_month, documented, total)
dates <- performer$report_month
template_dates <- result$data$dates
all_equal <- all(dates == template_dates)
expect_true(all_equal)
})
test_that("Data provided is used in baked in ComparisonLineGraph", {
va_data <- read_data(spekex::get_data_path("va"))
va_spek <- spekex::read_spek(spekex::get_spek_path("va"))
templates <- load_templates()
numer_colname <- 'documented'
denom_colname <- 'total'
recipient <- "6559AA"
cmp_lne_env <- templates$ComparisonLineGraph
result <- cmp_lne_env$run(recipient, va_data, va_spek)
ids <- c("4429AA", "5569AA", "5689AB", "6559AA")
ids_used_in_template <- as.character(unique(result$data$id))
ids_used_in_test <- c(ids[1], ids[2], ids[3], ids[4])
all_equal <- all(ids_used_in_template == ids_used_in_test)
expect_true(all_equal)
})
test_that("Data provided is used in baked in PairedBarGraph", {
va_data <- read_data(spekex::get_data_path("va"))
va_spek <- spekex::read_spek(spekex::get_spek_path("va"))
templates <- load_templates()
numer_colname <- 'documented'
denom_colname <- 'total'
recipient <- "6559AA"
paired_env <- templates$PairedBarGraph
result <- paired_env$run(recipient, va_data, va_spek)
test_dates <- c("2018-02-01", "2018-03-01", "2018-04-01", "2018-05-01")
all_equal <- all(test_dates == unique(result$data$date))
expect_true(all_equal)
})
test_that("Data provided is used in baked in PairedBarGraphHOR", {
skip("No data available for testing")
va_data <- read_data(spekex::get_data_path("va"))
va_spek <- spekex::read_spek(spekex::get_spek_path("va"))
templates <- load_templates()
numer_colname <- 'documented'
denom_colname <- 'total'
recipient <- "6559AA"
paired_HOR_env <- templates$PairedBarGraphHOR
result <- paired_HOR_env$run(recipient, va_data, va_spek)
test_dates <- c("2018-02-01", "2018-03-01", "2018-04-01", "2018-05-01")
all_equal <- all(test_dates == unique(result$data$date))
expect_true(all_equal)
})
test_that("Data provided is used in baked in SingleBarGraph", {
va_data <- read_data(spekex::get_data_path("va"))
va_spek <- spekex::read_spek(spekex::get_spek_path("va"))
templates <- load_templates()
numer_colname <- 'documented'
denom_colname <- 'total'
recipient <- "6559AA"
bar_env <- templates$SingleBarTemplate
result <- bar_env$run(recipient, va_data, va_spek)
test_dates <- c("2018-02-01", "2018-03-01", "2018-04-01", "2018-05-01")
all_equal <- all(test_dates == unique(result$data$dates))
expect_true(all_equal)
})
|
30e75fde891cd59fd8b0e6f43fe1070d639efa96
|
b6bd266b6b10290665231f1cc9bc892b51cf6716
|
/man/sample_2006.Rd
|
d7e54e1beb9d5c3efd267f15cd9619d8dc8e4df6
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
tereom/estcomp
|
9a95e9a0be674d1f029801d3818a8aee8cf3f718
|
817f7e20ab82bffd064db4ccd68f5303a72844e5
|
refs/heads/master
| 2020-06-30T15:26:14.627799
| 2019-11-05T16:17:34
| 2019-11-05T16:17:34
| 200,871,105
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,250
|
rd
|
sample_2006.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sample_2006.R
\docType{data}
\name{sample_2006}
\alias{sample_2006}
\title{Sample of 2006 presidential elections.}
\format{election_2006: A data frame with 7200 rows and 10 columns:
\describe{
\item{state_code, state_name, state_abbr}{Character variables indicating
the state corresponding to the polling station. State code's follow INEGI's
standard}
\item{polling_id}{Numeric identifier of the polling station}
\item{edo_id}{State id}
\item{pri_pvem}{Number of votes favoring the parties PRI and/or PVEM}
\item{pan}{Number of votes favoring PAN}
\item{panal}{Number of votes favoring Partido Nueva Alianza}
\item{prd_pt_conv}{Number of votes favoring the parties PRD, PT,
Convergencia}
\item{psd}{Number of votes favoring the parties PSD}
\item{otros}{Number of votes that do not favor any of the parties (null,
non-registered candidates)}
\item{total}{Total number of votes registered}
\item{stratum}{stratum corresponding to the polling station}
}}
\source{
\url{https://cartografia.ife.org.mx}
}
\usage{
sample_2006
}
\description{
A dataset containing a stratified random sample of the 2006 presidential
elections.
}
\keyword{datasets}
|
b697e412781c9fef4c8dc03ce1a579b9e5ebc66b
|
cfacbfb653f0662be0c70d2c6659c3d1d3305b71
|
/Data-Mining/Lab/XGBoost/XGBoost-Tutorial.R
|
597bb8cc0dac79891294b8520c10a6e6561e1cbc
|
[] |
no_license
|
ihaawesome/Graduate
|
37327af1acd4b2f2bf56648485e5a8378a2bbddd
|
a0ee4b8863b2cd03855685d17cab802e2b5898d3
|
refs/heads/master
| 2020-05-03T07:46:48.563738
| 2019-09-17T05:49:38
| 2019-09-17T05:49:38
| 178,507,439
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,981
|
r
|
XGBoost-Tutorial.R
|
setwd('C:/Users/HK/Desktop/GitHub/Graduate/DataMining/XGBoost')
##### XGBoost Tutorial #####
# how to use Xgboost to build a model and make predictions
# gradient boosting framework: linear & tree learning
# Input Type: matrix, dgCMatrix, xgb.DMatrix (recommended)
# 1.2 Installation
library(xgboost)
# 1.3 Learning
# 1.3.2 Dataset loading
# use Mushroom data
data(agaricus.train, package = 'xgboost')
data(agaricus.test, package = 'xgboost')
train <- agaricus.train
test <- agaricus.test
str(train) # data (X ; dgCMatrix class), label (y)
dim(train$data) # 80%
dim(test$data) # 20%
# 1.3.3 Basic training
# max_depth = depth of the trees
# nthread = the number of cpu threads to use
# nrounds
# = each round enhances the model by further reducing the difference
# between ground truth and prediction
# dgCMatrix class
bstSparse <- xgboost(
data = train$data, label = train$label,
max_depth = 2, eta = 1, nthread = 2, nrounds = 2,
objective = 'binary:logistic'
)
# xgb.DMatrix class
dtrain <- xgb.DMatrix(data = train$data, label = train$label)
bst <- xgboost(
data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2,
objective = 'binary:logistic', verbose = 2 # 0 (silence)
)
# 1.5 Perform the prediction
pred <- predict(bst, newdata = test$data)
print(length(pred))
print(head(pred)) # predicted probabilities
# 1.6 Transform the regression in a binary classification
# The only thing that XGBoost does is a regression.
# set the rule that if a specific observation is classified as 1.
prediction <- as.numeric(pred > 0.5)
print(head(prediction)) # predicted label
# 1.7 Measuring model performance
err <- mean(prediction != test$label) # misclassification rate
print(paste('test-error =', err))
# 1.8 Advanced features
dtrain <- xgb.DMatrix(data = train$data, label = train$label)
dtest <- xgb.DMatrix(data = test$data, label = test$label)
# 1.8.2 Measure learning progress with 'xgb.train'
# follow the progress of the learning after each round to evalutate an overfitting.
# cross-validation
watchlist <- list(train = dtrain, test = dtest)
bst <- xgb.train(
data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2,
watchlist = watchlist, objective = 'binary:logistic'
)
# have some evaluation metrics
bst <- xgb.train(
data = dtrain, max_depth = 2, eta = 1, nthread = 2, nrounds = 2,
watchlist = watchlist, eval_metric = 'error', eval_metric = 'logloss',
objective = 'binary:logistic'
)
# 1.8.3 Linear boosting
# All the learnings we have performed were based on boosting trees.
# Second algorithm: linear boosting
bst <- xgb.train(
data = dtrain, booster = 'gblinear',
max_depth = 2, eta = 1, nthread = 2, nrounds = 2,
watchlist = watchlist, eval_metric = 'error', eval_metric = 'logloss',
objective = 'binary:logistic'
)
# to catch a linear link, liniear boosting is the best.
# to catch a non=linear link, decision trees can be much better.
# 1.8.4 Manipulating xgb.DMatrix
xgb.DMatrix.save(dtrain, 'dtrain.buffer') # save
dtrain2 <- xgb.DMatrix('dtrain.buffer') # load
bst <- xgb.train(
data = dtrain2, max_depth = 2, eta = 1, nthread = 2, nrounds = 2,
watchlist = watchlist, objective = "binary:logistic"
)
# 1.8.4.2 Information Extraction
label <- getinfo(dtest, 'label')
pred <- predict(bst, dtest)
err <- as.numeric(sum(as.integer(pred > 0.5) != label)) / length(label)
print(paste('test-error =', err))
# 1.8.5 View feature importance/influence
importance_matrix <- xgb.importance(model = bst)
print(importance_matrix)
xgb.plot.importance(importance_matrix)
# 1.8.5.1 View the trees
xgb.dump(bst, with_stats = TRUE)
xgb.plot.tree(model = bst)
# 1.8.5.2 Save and load models
xgb.save(bst, 'xgboost.model') # save to a local MODEL file
bst2 <- xgb.load('xgboost.model')
pred2 <- predict(bst2, test$data)
print(sum(abs(pred2 - pred))) # same
rawVec <- xgb.save.raw(bst) # save model to R's raw vector
bst3 <- xgb.load(rawVec)
pred3 <- predict(bst3, test$data)
print(sum(abs(pred3 - pred)))
|
a94efc63fa9e89c8f8fcb744989e5bff54f16b82
|
f72a6bc75fd994afd900dd72d0d03e6ecd875191
|
/credit card.R
|
d3930d8df70f496a8bfb59a664cfbe51f2c0130d
|
[] |
no_license
|
belenamita/namita
|
503134d2ee7900c35d287eee54bf5e9277bb76c7
|
7a73383f6a0df687c20215a33bd129e7228faf45
|
refs/heads/master
| 2021-05-26T01:06:26.010517
| 2020-09-03T06:55:07
| 2020-09-03T06:55:07
| 253,994,130
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 540
|
r
|
credit card.R
|
#Logistic Regression
#Credit Card Problem
Crcard <- read.csv("//Users//smitshah//Desktop//Assignments//Logistic Regression//creditcard.csv")
attach(Crcard)
str(Crcard)
Crcard.omit=na.omit(Crcard)
Crcard.omit
#Model Building
Model1 <- glm(factor(card)~reports+age+income+share+expenditure+factor(owner)+factor(selfemp)+dependents+months+majorcards+active,family = "binomial",data = Crcard)
summary(Model1)
#Linear regression technique applied
exp(coef(Model1))
table(Crcard$card)
prob1 <- predict(Model1,type = "response",Crcard)
prob1
|
69c38c4cde6135da7341b0acf093b8314f553d0c
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/oppr/man/plot_phylo_persistence.Rd
|
491a34d65ae599e2dca811e39b6f781e29a28e17
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 5,357
|
rd
|
plot_phylo_persistence.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_phylo_persistence.R
\name{plot_phylo_persistence}
\alias{plot_phylo_persistence}
\title{Plot a phylogram to visualize a project prioritization}
\usage{
plot_phylo_persistence(
x,
solution,
n = 1,
symbol_hjust = 0.007,
return_data = FALSE
)
}
\arguments{
\item{x}{project prioritization \code{\link{problem}}.}
\item{solution}{\code{\link[base]{data.frame}} or
\code{\link[tibble]{tibble}} table containing the solutions. Here,
rows correspond to different solutions and columns correspond to
different actions. Each column in the argument to \code{solution} should
be named according to a different action in \code{x}.
Cell values indicate if an action is funded in a given solution or not,
and should be either zero or one. Arguments to \code{solution} can
contain additional columns, and they will be ignored.}
\item{n}{\code{integer} solution number to visualize.
Since each row in the argument to \code{solutions} corresponds to a
different solution, this argument should correspond to a row in
the argument to \code{solutions}. Defaults to 1.}
\item{symbol_hjust}{\code{numeric} horizontal adjustment parameter to
manually align the asterisks and dashes in the plot. Defaults to
\code{0.007}. Increasing this parameter will shift the symbols further
right. Please note that this parameter may require some tweaking
to produce visually appealing publication quality plots.}
\item{return_data}{\code{logical} should the underlying data used to create
the plot be returned instead of the plot? Defaults to \code{FALSE}.}
}
\value{
A \code{\link[ggtree]{ggtree}} object, or a
\code{\link[tidytree]{treedata}} object if \code{return_data} is
\code{TRUE}.
}
\description{
Create a plot showing a phylogenetic tree (i.e. a "phylogram") to visualize
the probability that phylogenetic branches are expected to persist
into the future under a solution to a project prioritization problem.
}
\details{
This function requires the \pkg{ggtree} (Yu \emph{et al.} 2017).
Since this package is distributed exclusively
through \href{https://bioconductor.org}{Bioconductor}, and is not
available on the
\href{https://cran.r-project.org/}{Comprehensive R Archive Network},
please execute the following command to install it:
\code{source("https://bioconductor.org/biocLite.R");biocLite("ggtree")}.
If the installation process fails, please consult the package's \href{https://bioconductor.org/packages/release/bioc/html/ggtree.html}{online documentation}.
In this plot, each phylogenetic branch is colored according to probability
that it is expected to persist into the future (see Faith 2008).
Features that directly benefit from at least a single
completely funded project with a non-zero cost are depicted with an
asterisk symbol. Additionally, features that indirectly benefit from funded
projects---because they are associated with partially funded projects that
have non-zero costs and share actions with at least one completely funded
project---are depicted with an open circle symbol.
}
\examples{
# set seed for reproducibility
set.seed(500)
# load the ggplot2 R package to customize plots
library(ggplot2)
data(sim_projects, sim_features, sim_actions)
# build problem
p <- problem(sim_projects, sim_actions, sim_features,
"name", "success", "name", "cost", "name") \%>\%
add_max_phylo_div_objective(budget = 400, sim_tree) \%>\%
add_binary_decisions() \%>\%
add_heuristic_solver(number_solutions = 10)
\donttest{
# solve problem
s <- solve(p)
# plot the first solution
plot(p, s)
# plot the second solution
plot(p, s, n = 2)
# since this function returns a ggplot2 plot object, we can customize the
# appearance of the plot using standard ggplot2 commands!
# for example, we can add a title
plot(p, s) + ggtitle("solution")
# we could also also set the minimum and maximum values in the color ramp to
# correspond to those in the data, rather than being capped at 0 and 1
plot(p, s) +
scale_color_gradientn(name = "Probability of\npersistence",
colors = viridisLite::inferno(150, begin = 0,
end = 0.9,
direction = -1)) +
ggtitle("solution")
# we could also change the color ramp
plot(p, s) +
scale_color_gradient(name = "Probability of\npersistence",
low = "red", high = "black") +
ggtitle("solution")
# we could even hide the legend if desired
plot(p, s) +
scale_color_gradient(name = "Probability of\npersistence",
low = "red", high = "black") +
theme(legend.position = "hide") +
ggtitle("solution")
# we can also obtain the raw plotting data using return_data=TRUE
plot_data <- plot(p, s, return_data = TRUE)
print(plot_data)
}
}
\references{
Faith DP (2008) Threatened species and the potential loss of
phylogenetic diversity: conservation scenarios based on estimated extinction
probabilities and phylogenetic risk analysis. \emph{Conservation Biology},
\strong{22}: 1461--1470.
Yu G, Smith DK, Zhu H, Guan Y, & Lam TTY (2017) ggtree: an
R package for visualization and annotation of phylogenetic trees with their
covariates and other associated data. \emph{Methods in Ecology and
Evolution}, \strong{8}: 28--36.
}
|
8d6cfd6a5516325996190afed84749a52778cd60
|
13f0b3f37544339d5821b2a416a9b31a53f674b1
|
/man/find_group_match.Rd
|
c96c164be3713835931244611784e1f1fb953c8b
|
[
"MIT"
] |
permissive
|
hejtmy/eyer
|
1f8a90fd7a8af0a4c4c73790633589dc624edda2
|
0b49566c76ab659184d62e1cdd658b45b0d33247
|
refs/heads/master
| 2020-04-24T11:17:25.414641
| 2019-09-17T22:44:52
| 2019-09-17T22:44:52
| 171,920,561
| 0
| 0
|
MIT
| 2019-09-10T21:54:40
| 2019-02-21T18:08:07
|
R
|
UTF-8
|
R
| false
| true
| 577
|
rd
|
find_group_match.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eyer-synchronisation.R
\name{find_group_match}
\alias{find_group_match}
\title{tries to find a sequency of N elements in eye_durations that correspond to the synchro durations
returns index of first matchin eye event}
\usage{
find_group_match(eye_durations, set_synchro_durations, allowed_difference)
}
\arguments{
\item{allowed_difference}{}
}
\description{
tries to find a sequency of N elements in eye_durations that correspond to the synchro durations
returns index of first matchin eye event
}
|
0cac176d3bf4776545fd83766b78cd0f5dbc343a
|
c03b75d4c6cd199a6a252799b4382b061e7c53e6
|
/figure/plot3.R
|
23bd4628e61905567c86e4d45008332f7c35172c
|
[] |
no_license
|
ravinderpratap/ExData_Plotting1
|
bac39c6b24359bfea19f6625a71154eb3d5b0be0
|
6977e8f5d0d981259562538e0c150eb4928cc26e
|
refs/heads/master
| 2020-05-07T22:31:27.850894
| 2019-04-14T08:03:02
| 2019-04-14T08:03:02
| 180,948,734
| 0
| 0
| null | 2019-04-12T06:54:58
| 2019-04-12T06:54:57
| null |
UTF-8
|
R
| false
| false
| 1,467
|
r
|
plot3.R
|
# Loading required Packages
library(dplyr)
#Set Working Directory for reading dataset
setwd("C:/Users/r.pratap.singh/Desktop/JohnHopkins/exdata_data_household_power_consumption")
#Read the file
power_data <- read.table("household_power_consumption.txt", header=T, sep = ";", na.strings = "?")
head(power_data)
str(power_data)
# Converting to date format as using Y not y to identify the ccyy format of year
power_data$Date <- as.Date(power_data$Date, "%d/%m/%Y")
# Selecting only the feb 01 2017 and Feb 02 2017
power_data_sel <- subset(power_data, Date >= "2007-02-01" & Date <= "2007-02-02")
combined_dt <- paste(power_data_sel$Date, power_data_sel$Time)
combined_dt <- strptime(combined_dt, "%Y-%m-%d %H:%M:%S")
power_data_sel <- cbind(combined_dt,power_data_sel)
#Set Working Directory for ploting
setwd("C:/Users/r.pratap.singh/Desktop/JohnHopkins/ExData_Plotting1/figure")
#Generating Line plot
plot(power_data_sel$combined_dt, power_data_sel$Sub_metering_1,type = "l",
xlab=" " , ylab = "Energy Sub Metering")
lines(power_data_sel$combined_dt, power_data_sel$Sub_metering_2,col="red")
lines(power_data_sel$combined_dt, power_data_sel$Sub_metering_3,col="blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black", "red", "blue"), lty = 1)
#copying the generated plot to png device (4) and then switch off the device.
dev.copy(png,"plot3.png", width= 480, height = 480)
dev.off()
|
6cdca795c39a5ee779efde5972e8724d4a60bced
|
5c7e7dce5d0b75b2299f0710393ecf29e768e342
|
/man/recalc_snowextent_scene.Rd
|
33e404b9d6c8b17ca90a0e2d8ab16ad915f3ff27
|
[] |
no_license
|
SebEagle/snowcoveR
|
995c860ec05fe456b6c8914c48f37af532f50316
|
39d21758976bb697068c84e64aad84b86fddc05d
|
refs/heads/master
| 2020-03-09T08:22:35.168913
| 2018-05-25T21:29:21
| 2018-05-25T21:29:21
| 128,687,597
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,287
|
rd
|
recalc_snowextent_scene.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/recalc_snowextent_scene.R
\name{recalc_snowextent_scene}
\alias{recalc_snowextent_scene}
\title{recalc_snowextent_scene}
\usage{
recalc_snowextent_scene(store_directory, dem_subdir, height_zone_size,
threshold1_snow, threshold2_cloud)
}
\arguments{
\item{store_directory}{Directory where all satellite scenes are stored in subfolders}
\item{dem_subdir}{subdirectory, where DEM files are stored}
\item{height_zone_size}{defines the size (height difference) of the altitudinal zones for which the snowcover percentages are calculated;
must be the same size as used in function 'calc_scene_snowstats'}
\item{threshold1_snow}{set thresholds for heigth zone snow probability, where pixel will be excluded from the class snow}
\item{threshold2_cloud}{set thresholds for heigth zone snow probability, above which cloud pixels will be added to the class snow}
}
\description{
This function named 'recalc_snowextent_scene' recalculates snow extent to exclude low probable snow pixels and
include high probable snow under cloudcover. For that the previously calculated dem height zone probabilities are used
(function 'calc_scene_snowstats')
}
\author{
Sebastian Buchelt (M.Sc. EAGLE - University of Wuerzburg)
}
|
8ab93fd7b242e9fcb0f49b4bda1f80fcd81b4207
|
af243341d1c806d2c67e9a7101f92ab508d4f05e
|
/analysis/Fig_S_trankplots.R
|
3ba8e691b0806c055eff7160961763ac5fed076a
|
[] |
no_license
|
michaelchimento/acquisition_production_abm
|
5fb103e1528785d703899695ae15fe247ecd8763
|
a3d74aafc7a16b93373651a203eb11a9432dd389
|
refs/heads/master
| 2023-05-25T14:45:29.223353
| 2023-05-18T05:24:25
| 2023-05-18T05:24:25
| 285,799,633
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,063
|
r
|
Fig_S_trankplots.R
|
library(tidyverse)
library(rethinking)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
library(grid)
library(gridGraphics)
library(gridExtra)
loadRData <- function(fileName){
#loads an RData file, and returns it
load(fileName)
get(ls()[ls() != "fileName"])
}
load(file="../model_outputs/Rda_files/df_EWA_posterior_homogeneous_inference.Rda")
df=df_vanilla %>% group_by(sim) %>% summarize(rho=true_rho[1],sigma=true_sigma[1]) %>% mutate(value=paste0("rho:",rho,"; sigma:",sigma)) %>% arrange(rho,sigma)
j=1
for (i in df$sim){
fit=loadRData(paste0("fits/fit",i,"_homogeneous.RDA"))
par(oma=c(1,.75,.75,.75))
trankplot(fit, n_cols=2)
grid.echo()
assign(paste0("p",j),grid.grab())
dev.off()
j=j+1
}
ggarrange(p1,p2,p3,p4,p5,p6,p7,p8,p9, nrow=9, labels=df$value)
ggsave("../output/trankplots_homogeneous.png", width=10, height=18, scale=2, units="cm")
load(file="../model_outputs/Rda_files/df_EWA_posterior_social_inference.Rda")
df=df_vanilla %>% group_by(sim) %>% summarize(rho=true_rho[1],sigma=true_sigma[1]) %>% mutate(value=paste0("rho:",rho,"; sigma:",sigma)) %>% arrange(rho,sigma)
j=1
for (i in df$sim){
fit=loadRData(paste0("fits/fit",i,"_social.RDA"))
par(oma=c(1,.75,.75,.75))
trankplot(fit, n_cols=2)
grid.echo()
assign(paste0("p",j),grid.grab())
dev.off()
j=j+1
}
ggarrange(p1,p2,p3,p4,p5,p6,p7,p8,p9, nrow=9, labels=df$value)
ggsave("../output/trankplots_social.png", width=10, height=18, scale=2, units="cm")
load(file="../model_outputs/Rda_files/df_EWA_posterior_asocial_inference.Rda")
df=df_vanilla %>% group_by(sim) %>% summarize(rho=true_rho[1],sigma=true_sigma[1]) %>% mutate(value=paste0("rho:",rho,"; sigma:",sigma)) %>% arrange(rho,sigma)
j=1
for (i in df$sim){
fit=loadRData(paste0("fits/fit",i,"_asocial.RDA"))
par(oma=c(1,.75,.75,.75))
trankplot(fit, n_cols=2)
grid.echo()
assign(paste0("p",j),grid.grab())
dev.off()
j=j+1
}
ggarrange(p1,p2,p3,p4,p5,p6,p7,p8,p9, nrow=9, labels=df$value)
ggsave("../output/trankplots_asocial.png", width=10, height=18, scale=2, units="cm")
|
36e01c37a2635837609fed2e28b19ab158199537
|
d99e3989183cddfac8a2011e91929ca104192b29
|
/plot1.R
|
ee6638bd0c0fe203223381db8fc964ad485f085a
|
[] |
no_license
|
Diegoscn/ExData_Plotting1
|
86e7d290678b68e3529a9d35c9ba95eb2c9c97df
|
11efb17516c97517c53b9d7b84cc2c8b37d910e7
|
refs/heads/master
| 2021-01-24T05:15:41.027509
| 2014-06-08T23:43:46
| 2014-06-08T23:43:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 561
|
r
|
plot1.R
|
### Read data and plot a histogram
data <- read.table("household_power_consumption.txt", sep=";",header=TRUE)
data$DateTime <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
data <- subset(data, as.Date(DateTime) >= as.Date("2007-02-01") &
as.Date(DateTime) <= as.Date("2007-02-02"))
data$Global_active_power = as.numeric(as.character(data$Global_active_power))
png("plot1.png", width = 480, height = 480)
hist(data$Global_active_power, main = "Global Active Power",
xlab = "Global Active Power (kW)", col = "Red")
dev.off()
|
d346e12704e486d399c6b9bcae2ce5ef53478592
|
595aa005d1a9d84b03c54b6049453b1e1495b424
|
/man/run_multiple_iscam.Rd
|
e403e53e5d426975d30a46af6e38e26c8300303f
|
[] |
no_license
|
pbs-assess/gfiscamutils
|
6e67c316c0a91d0e639dff8a46eeb4c22d5dd194
|
815275ca470bd086f28ccab752cba91c4c5dbfb7
|
refs/heads/master
| 2023-09-03T22:54:08.246801
| 2023-03-10T09:12:24
| 2023-03-10T09:14:10
| 198,681,740
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 441
|
rd
|
run_multiple_iscam.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run-iscam.R
\name{run_multiple_iscam}
\alias{run_multiple_iscam}
\title{Run multiple iscam models in parallel}
\usage{
run_multiple_iscam(model_dirs, ...)
}
\arguments{
\item{model_dirs}{A vector of model directories}
\item{...}{Arguments passed to \code{\link[=run_iscam]{run_iscam()}}}
}
\value{
Nothing
}
\description{
Run multiple iscam models in parallel
}
|
6e0ad8036ab05b0949d32fd509726f01a25d112d
|
4d6cb9288727a510475fc1e9ebcf247653486580
|
/2021/day03.R
|
aaac0523aa38ed2642ad5480ae22c1b3a49f948d
|
[] |
no_license
|
rrrlw/advent-of-code
|
b9ac82442d7c6164ca49c4fb3107fa323810680a
|
66c26a723717bfd7d95e2cb4e690735ec0f66005
|
refs/heads/main
| 2021-12-28T02:36:51.593985
| 2021-12-15T19:40:24
| 2021-12-15T19:40:24
| 226,371,569
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,208
|
r
|
day03.R
|
library(magrittr)
library(dplyr)
#####UTILITY#####
binvec_to_b10 <- function(bin_vec) {
bin_vec %>%
as.character %>%
paste(collapse = "") %>%
strtoi(base = 2L)
}
filter_step_o2 <- function(df, pos) {
df %>%
filter(.[[pos]] == as.integer(median(.[[pos]] + 0.5)))
}
filter_step_co2 <- function(df, pos) {
df %>%
filter(.[[pos]] == as.integer(1 - median(.[[pos]])))
}
get_o2_rating <- function(df) {
for (pos in seq_len(ncol(df))) {
df <- filter_step_o2(df, pos)
if (nrow(df) <= 1) break
}
binvec_to_b10(df)
}
get_co2_rating <- function(df) {
for (pos in seq_len(ncol(df))) {
df <- filter_step_co2(df, pos)
if (nrow(df) <= 1) break
}
binvec_to_b10(df)
}
#####INPUT#####
fin <- file("day03.in", open = "r")
vals <- readLines(fin)
single_len <- nchar(vals[1])
vals <- vals %>%
strsplit("", fixed = TRUE) %>%
unlist %>%
as.integer %>%
matrix(ncol = single_len, byrow = TRUE)
close(fin)
#####PART 1#####
gamma_vec <- apply(X = vals, MARGIN = 2, FUN = median)
eps_vec <- 1 - gamma_vec
print(binvec_to_b10(gamma_vec) * binvec_to_b10(eps_vec))
#####PART 2#####
vals_df <- as.data.frame(vals)
print(get_o2_rating(vals_df) * get_co2_rating(vals_df))
|
38a44241df92a89870d4b31b153f57cd6b725423
|
4434c2a0f03d1cf8ca0ee8abc3cda21ce82cbc64
|
/dircheck.r
|
b5b2834ac650e44e544018508a0cfe53ec1385be
|
[] |
no_license
|
churchlabUT/ldrc
|
7c3b3498d438642a617494a30abe86d1394c5b09
|
4f67751fa709aab8515c189b183ccd71fdbbff12
|
refs/heads/master
| 2020-12-31T07:55:02.382876
| 2016-02-05T21:00:06
| 2016-02-05T21:00:06
| 49,591,076
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,249
|
r
|
dircheck.r
|
#This script is useful if you need an easy way to see what's in everyone's directory.
#To run this script type 'Rscript dircheck.r ______ __' into the terminal,
#The first blank after the name has five options: runs, check, fs, all, allfiles, subfiles. Each are explained below
#The second blank is the subject index you get from 'Rscript dircheck.r check'. The number to the left of the subject you want to look at
#will be the number you stick in the second blank. Only works for the 'subfiles' option. Ex. 'Rscript dircheck.r subfiles 20'
#Whichever option you choose: allfiles, subfiles, all, fs - the output will be inside a .txt file of the same name. Ex. all.txt
#sets up the filenames for the subjects in main folder-both austin and houston
dir = '/corral-repl/utexas/ldrc/'
filenames = c(Sys.glob(sprintf('%sldrc_*', dir)), Sys.glob(sprintf('%sH_*', dir)))
#Houston data
#dir ='/corral-repl/utexas/ldrc/PHILIPS/'
#filenames = Sys.glob(sprintf('%sH_*', dir))
#takes in arguments
args = commandArgs(trailingOnly = TRUE)
#sets the argument classes
type = as.character(args[1])
sub = as.numeric(args[2])
#check = Gives you an index of the subject you're trying to look at. Put after subfiles.
if (type == 'check'){
print(filenames)
}
#creates files for the loop to write into
all = file('all.txt', 'w')
fs = file('fs.txt', 'w')
subfiles = file('subfiles.txt', 'w')
allfiles = file('allfiles.txt', 'w')
runs = file('runs.txt', 'w')
#First blank
#fs = shows subjects that have Freesurfer files in the wrong place
#all = shows all the folders every subject has in their main folder
#allfiles - shows all the files in the subdirectories for all the subjects. Lots of info, takes a
#while
#subfiles = shows all the subfiles within the sub directories of the subject you want to look at. Look for subject's index number in the
#check option.
#run = lists all the runs for each participant
for (i in 1:length(filenames)){
if (type == 'fs'){
dirs = list.files(path = filenames[i], all.files = F)
if ('bem' %in% dirs){
sink(fs, append = T)
print(filenames[i])
print(dirs)
sink()
}
} else if (type == 'all'){
dirs = list.files(path = filenames[i], all.files = F)
sink(all, append = T)
print(filenames[i])
print(dirs)
sink()
} else if (type == 'allfiles'){
dirs = list.files(path = filenames[i], all.files = F, full.names = T, include.dirs = T)
sink(allfiles)
for (f in 1: length(dirs)){
print(dirs[f])
print(sprintf(' %s',list.files(dirs[f], recursive = F)))
}
sink()
} else if (type == 'runs'){
dirs = list.files(path = filenames[i], all.files = F, full.names = T, include.dirs = T)
sink(runs, append = T)
print(list.dirs(dirs[grep('BOLD', dirs)]))
sink()
}
}
if (type == 'subfiles'){
dirs = list.files(path = filenames[sub], all.files = F, full.names = T, include.dirs = T)
sink(subfiles)
for (f in 1: length(dirs)){
print(dirs[f])
print(sprintf(' %s',list.files(dirs[f])))
}
sink()
}
close(all)
close(fs)
close(subfiles)
close(allfiles)
close(runs)
|
8ee691db0194085537a728bb01c0c4ebaafc2c20
|
adc72eff51513f076338e0f591277bdec5dc5295
|
/TwitterProject/forest.R
|
ff248238f4c66f21a5fcec670245e171f74fd6df
|
[] |
no_license
|
BryceRobinette/MATH4400Project
|
fa1448f081b556048b23d83a0d7eab9506cdb94b
|
259de04c113b755271381fe9e7c3d88b92813641
|
refs/heads/main
| 2023-01-11T22:17:49.051676
| 2020-11-03T17:48:58
| 2020-11-03T17:48:58
| 303,511,271
| 0
| 0
| null | 2020-11-02T22:02:24
| 2020-10-12T20:56:50
|
R
|
UTF-8
|
R
| false
| false
| 1,188
|
r
|
forest.R
|
library(RMariaDB)
library(tm)
library(syuzhet)
library(wordcloud)
library(randomForest)
library(plyr)
#Run random forest algorigthm.
Random_Forest = function(){
source("helpers.R")
query <- "SELECT DISTINCT tweet, person FROM candidates;"
df = dbGetQuery(con, query)
df$tweet = clean(df$tweet)
df$tweet = clean(df$tweet)
training_data = prep_Model_Data(df, 0.97)
train.index <- sample(c(1:dim(training_data)[1]), floor( 0.7 * dim(training_data)[1] ), replace = FALSE)
#Generally 500 - 1000 trees is perfect
trees = 1000
forest = generate_RandomForest(training_data, train.index, trees)
query <- "SELECT DISTINCT tweet, person FROM tweets;"
df = dbGetQuery(con, query)
df$tweet = clean(df$tweet)
df$tweet = clean(df$tweet)
input_data = prep_Model_Data(df, 1)
input_data = equalify_rows(input_data, training_data)
pred <- predict(forest, type = "response", newdata = input_data)
#factor 0 for trump, 1 for biden
highest = count(pred)
winner = highest[which.max(highest$freq),]
if (winner[1] == 0){
return('Trump')
}
if (winner[1] == 1){
return('Biden')
}
}
|
8b2bf532039f63ddcc7913f7cdac77e8aecd0475
|
a05e541c30580b2091f05bba7bcc373d23333290
|
/data_aggregation.r
|
1ff9a7ec18fe5282e51ba0f98a47f9c3b7454283
|
[] |
no_license
|
krmaas/software_carpentry_2014_12_5
|
8370a733eae8bd96702a606e69894956978702f9
|
7aab499ad7cf4af49e66ab8306bf8fa93ab05cd2
|
refs/heads/master
| 2016-09-03T07:30:46.856236
| 2014-05-13T23:22:38
| 2014-05-13T23:22:38
| 19,756,220
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,232
|
r
|
data_aggregation.r
|
### apply, built in works with rows/columns of matrices and arrays
### aggregate, built in for groups by factors in single vector
### read 2011 wickham, jorunal of statistical software http://www.jstatsoft.org/v40/i01/paper
### plyr::ddply, groups by factor in data.frame
### plyr::l*ply
### dplyr optimized for large datasets
gDat <- read.delim("gapminderDataFiveYear.txt")
str(gDat)
## take dataframe
## linear regression of lifeExp on year
## return intercept and slope
lm(lifeExp~year, gDat)
library(ggplot2)
ggplot(gDat, aes(x=year, y=lifeExp))+geom_point()+
geom_smooth(method="lm")
fit <- lm(lifeExp~I(year - 1952), gDat)
str(fit) ###scary
coef(fit)
## write function:
## input: a data.frame
## output: the intercept and slope
lm.intercept.slope <- function(df) {
fit <- lm(lifeExp~I(year-1952), df)
return(coef(fit))
}
lm.intercept.slope(gDat)
#create a subset of gDat with one country
#store as object
#apply lm.intercept.slope to it
x <- subset(gDat, country=="Rwanda")
lm.intercept.slope(x)
ggplot(x, aes(x=year, y=lifeExp))+geom_point()+
geom_smooth(method="lm")
### use ddply to run through all the countries
library(plyr)
country.lE.lm <- ddply(gDat, ~country, lm.intercept.slope)
country.lE.lm
|
083dce6ee76a40394deae2347369f9dd8b580d61
|
4f13d728eaa1d82f6cfca9f943e5ddda2c654c2d
|
/Sample.R
|
ef3c6c60ead2becf4e0874e305057d3faefe38a8
|
[] |
no_license
|
GITAshRose/Edx_course
|
08575ff6cfd3c33d4706211d6070a33fee3e9b16
|
c8b747cd0be7c332ef7b3d58bd6ea08cc0c8ce14
|
refs/heads/master
| 2023-03-03T22:50:52.801639
| 2021-02-17T12:57:23
| 2021-02-17T12:57:23
| 339,721,205
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 26
|
r
|
Sample.R
|
data(mtcars)
head(mtcars)
|
201d62e9f6c710a19f0591d3e50409a51c10386c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mltools/examples/exponential_weight.Rd.R
|
a3f7705e7ff29534de05345d98a38fc7cecbed1e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 233
|
r
|
exponential_weight.Rd.R
|
library(mltools)
### Name: exponential_weight
### Title: Exponential Weight
### Aliases: exponential_weight
### ** Examples
exponential_weight(1:3, slope=.1)
exponential_weight(1:3, slope=1)
exponential_weight(1:3, slope=10)
|
8b6576f36d717a6db1f6cba9b91c4325f2bdab9d
|
fc680f24d60a8bf68e144e367e454d0183379ed8
|
/R_codesnippets_usefull.R
|
b0d6103c53520543579f3cf520025c4f48f8a3e0
|
[] |
no_license
|
lv601/Phosphoenrichment
|
696da90d08362def85df517dc46dfc25432542e8
|
098300f9861ac6338f6e6086579f649bde3bec32
|
refs/heads/master
| 2022-10-18T17:32:06.096310
| 2020-06-12T15:44:09
| 2020-06-12T15:44:09
| 271,834,008
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,609
|
r
|
R_codesnippets_usefull.R
|
##Code Snippets Usefull
readFun <- function( filename ) {
# read in the data
data <- read.csv( filename,
header = FALSE,
col.names = c( "Name", "Gender", "Count" ) )
# add a "Year" column by removing both "yob" and ".txt" from file name
data$Year <- gsub( "yob|.txt", "", filename )
return( data )
}
# execute that function across all files, outputting a data frame
doMC::registerDoMC( cores = 4 )
babynames <- plyr::ldply( .data = list.files(pattern="*.txt"),
.fun = readFun,
.parallel = TRUE )
#read in required packages
data_path = "C:/Users/Martin/Documents/Projekt_PTM_merge/results_PTM_melanoma/results"
require(data.table)
setDTthreads(threads = 0)
setwd(data_path)
#create a list of the files from your target directory
file_list <- list.files(path=data_path)
#initiate a blank data frame, each iteration of the loop will append the data from the given file to this variable
dataset <- data.frame()
#had to specify columns to get rid of the total column
for (i in 1:length(file_list)){
temp_data <- fread(file_list[i], stringsAsFactors = F) #read in files using the fread function from the data.table package
#temp_data2 = temp_data[,2:16]
dataset <- rbindlist(list(dataset, temp_data), use.names = T, fill=TRUE) #for each iteration, bind the new data to the building dataset
}
files = lapply(files, basename)
for (i in files) {
fl <- paste0(i)
fl
}
ddat <- as.list(rep("", 20))
for(i in 1:20) {
ddat[[i]] <- data.frame(ivec = 1:i)
#other processing..
}
for(i in files) {
assign(paste("d",i,sep="_"),data)
}
###Grouping and mean
airquality <- data.frame(City = c("CityA", "CityA","CityA",
"CityB","CityB","CityB",
"CityC", "CityC"),
year = c("1990", "1990", "2010", "1990",
"2000", "2010", "2000", "2010"),
month = c("June", "July", "August",
"June", "July", "August",
"June", "August"),
PM10 = c(runif(3), rnorm(5)),
PM25 = c(runif(3), rnorm(5)),
Ozone = c(runif(3), rnorm(5)),
CO2 = c(runif(3), rnorm(5)))
airquality
library(dplyr)
airquality %>%
group_by(City, year) %>%
summarise_at(vars("PM25", "Ozone", "CO2"), mean)
(0.4513109+0.0416877)/2
|
5849057d8a565735d6d231e346d208c9da2374d4
|
6e32987e92e9074939fea0d76f103b6a29df7f1f
|
/googleaiplatformv1.auto/man/GoogleCloudAiplatformV1BatchPredictionJobOutputInfo.Rd
|
1d93152a8b3d4cd1d923a0e8d3881c0a8d66c281
|
[] |
no_license
|
justinjm/autoGoogleAPI
|
a8158acd9d5fa33eeafd9150079f66e7ae5f0668
|
6a26a543271916329606e5dbd42d11d8a1602aca
|
refs/heads/master
| 2023-09-03T02:00:51.433755
| 2023-08-09T21:29:35
| 2023-08-09T21:29:35
| 183,957,898
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 709
|
rd
|
GoogleCloudAiplatformV1BatchPredictionJobOutputInfo.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aiplatform_objects.R
\name{GoogleCloudAiplatformV1BatchPredictionJobOutputInfo}
\alias{GoogleCloudAiplatformV1BatchPredictionJobOutputInfo}
\title{GoogleCloudAiplatformV1BatchPredictionJobOutputInfo Object}
\usage{
GoogleCloudAiplatformV1BatchPredictionJobOutputInfo()
}
\value{
GoogleCloudAiplatformV1BatchPredictionJobOutputInfo object
}
\description{
GoogleCloudAiplatformV1BatchPredictionJobOutputInfo Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Further describes this job's output. Supplements output_config.
}
\concept{GoogleCloudAiplatformV1BatchPredictionJobOutputInfo functions}
|
cb3b98e4d46cd3dad2a819fe522d7e8090433a73
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/mtk/examples/getDistributionParameters-methods.Rd.R
|
1fba4536367a68787c24e1bbd4e1b7e677a5158d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 739
|
r
|
getDistributionParameters-methods.Rd.R
|
library(mtk)
### Name: getDistributionParameters-methods
### Title: The 'getDistributionParameters' method
### Aliases: getDistributionParameters-methods getDistributionParameters
### ** Examples
# Define three factors
x1 <- make.mtkFactor(name="x1", distribName="unif",
distribPara=list(min=-pi, max=pi))
x2 <- make.mtkFactor(name="x2", distribName="unif",
distribPara=list(min=-pi, max=pi))
x3 <- make.mtkFactor(name="x3", distribName="unif",
distribPara=list(min=-pi, max=pi))
# Build an object of the "mtkExpFactors" class
ishi.factors <- mtkExpFactors(list(x1,x2,x3))
# Return the parameters of the distributions managed by all the factors as a nested list
names <- getDistributionParameters(ishi.factors)
|
9861c8c2ff4e11d21878fa30f0f17425d9d656b6
|
de9df77e3b35f0b9cd77693a815b14e903cb9dce
|
/Emission_Lines/Extinction/Calzetti_Base_Fluxes.r
|
2cc85c45cb15e3870eea9e4aa692b82aa793288c
|
[] |
no_license
|
Gargoloso/Skyfall
|
f16d37449291dd37bf4dffd1e344953ec9accf1d
|
a80b7293ae04875a386629b1094ed61e350666c9
|
refs/heads/master
| 2020-03-18T03:11:20.881201
| 2018-07-19T03:18:51
| 2018-07-19T03:18:51
| 134,220,033
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,164
|
r
|
Calzetti_Base_Fluxes.r
|
#############################################################################
#############################################################################
## This calculates extinction corrected fluxes using the Calzetti et al. ##
## (200) extinction law. ##
## June 10, 2018 by A. Robleto-Orús ##
#############################################################################
#############################################################################
##Clean the workspace
rm(list=ls(all=TRUE))
##Libraries
require("stringr")
########################################################################
## DATA INPUT ##
########################################################################
setwd("~/Rings/ringed_work/") #Directrory with our data
data <- read.table("lis.dat", header=TRUE)
attach(data)
galaxy <- as.character(name)
####################################################################
####################################################################
## ITERATION FOR ALL GALAXIES ##
####################################################################
####################################################################
##Loop for all galaxies
for(i in 1:length(galaxy)){
print('****************************')
print("NEW GALAXY: ")
print(galaxy[i])
print('****************************')
####################################################################
####################################################################
## EXTRACTION OF EMISSION-LINE DATA ##
####################################################################
####################################################################
##Load data
print('Extracting H-alpha and [N II] 6583 data.')
path_ha <- str_c(galaxy[i],"/lis_ha.res") #creates path to Ha and [N II] data file for each galaxy
data0 <- read.table(path_ha, header=TRUE)
print('Extracting H-beta and [O III] 5007 data.')
path_hb <- str_c(galaxy[i],"/lis_hb.res") #creates path to H-beta and [OIII] data file for each galaxy
data3 <- read.table(path_hb, header=TRUE)
##Merge data
DATA <- merge(data0,data3,by.x = 1, by.y =1)
attach(DATA)
##Extracting coordinates from ID
print('Extracting spaxels IDs.')
ID <- id[which(fluxha!=500 & !is.na(fluxha) & !is.na(fluxnii1) & !is.na(fluxnii2) & !is.na(fluxoiii1) & !is.na(fluxoiii2) & snhb >= 3 & snha >= 3)]
##Extracting line surface specific fluxes [1e-16 erg cm^-2 s^-1 A^-1 arcsec^-2]
print('Extracting emission lines.')
#H-alpha 6563
print('Extracting H-alpha.')
fa <- fluxha[which(fluxha!=500 & !is.na(fluxha) & !is.na(fluxnii1) & !is.na(fluxnii2) & !is.na(fluxoiii1) & !is.na(fluxoiii2) & snhb >= 3 & snha >= 3)] #Obtaining the H-alpha surface specific flux.
#[N II] 6548
print('Extracting [N II] 6548')
fn1 <- fluxnii1[which(fluxha!=500 & !is.na(fluxha) & !is.na(fluxnii1) & !is.na(fluxnii2) & !is.na(fluxoiii1) & !is.na(fluxoiii2) & snhb >= 3 & snha >= 3)] #Obtaining the [N II] 6548 surface specific flux.
#[N II] 6583
print('Extracting [N II] 6583.')
fn <- fluxnii2[which(fluxha!=500 & !is.na(fluxha) & !is.na(fluxnii1) & !is.na(fluxnii2) & !is.na(fluxoiii1) & !is.na(fluxoiii2) & snhb >= 3 & snha >= 3)] #Obtaining the[N II] surface specific flux.
#H-beta 4861
print('Extracting H-beta.')
fb <- fluxhb[which(fluxha!=500 & !is.na(fluxha) & !is.na(fluxnii1) & !is.na(fluxnii2) & !is.na(fluxoiii1) & !is.na(fluxoiii2) & snhb >= 3 & snha >= 3)] #Obtaining the[H-beta] surface specific flux.
#[O III] 4959
print('Extracting [O III] 4959')
fo1 <- fluxoiii1[which(fluxha!=500 & !is.na(fluxha) & !is.na(fluxnii1) & !is.na(fluxnii2) & !is.na(fluxoiii1) & !is.na(fluxoiii2) & snhb >= 3 & snha >= 3)] #Obtaining the [O III] 4959 surface specific flux.
#[O III] 5007
print('Extracting [O III] 5007.')
fo <- fluxoiii2[which(fluxha!=500 & !is.na(fluxha) & !is.na(fluxnii1) & !is.na(fluxnii2) & !is.na(fluxoiii1) & !is.na(fluxoiii2) & snhb >= 3 & snha >= 3)] #Obtaining the [O III] 5007 surface specific flux.
##Convert from surface specific flux. to standard specific intensity [erg cm^-2 s^-1 A^-1 sr^-1]
#print('Converting surface specific flux units to specific intensities.')
#fa <-(1e-16)*fa /2.3504e-11
#fn <-(1e-16)*fn /2.3504e-11
#fb <-(1e-16)*fb /2.3504e-11
#fo <-(1e-16)*fo /2.3504e-11
##Multiply all values by 1e-16 (CALIFA reference level) Comment this section if using the previous conversion to specific intensity.
fa <-(1e-16)*fa
fn1 <-(1e-16)*fn1
fn <-(1e-16)*fn
fb <-(1e-16)*fb
fo1 <-(1e-16)*fo1
fo <-(1e-16)*fo
#####################################################
## DEREDDENING ##
#####################################################
print('Dereddening.')
Rv <- 4.05 #Value for star-forming or high redshift galaxies, in Calzetti et al=. (2000)
l <- c(6563,6548,6583,4861,4959,5007,3729) #Lines we are going to deredden, in Angstrom.
l <- l*1e-4 #Convert Angstrom to micrometres.
#Calzetti et al. (2000) extinction law.
k_l1 <- (2.659*(-2.156+(1.509/l)-(0.198/l^2)+(0.011/l^3)))+Rv #For 0.12 to 0.63 micrometres
k_l2 <- (2.659*(-1.857+(1.040/l)))+Rv #For 0.63 to 2.20 micrometres.
Rab <- (fa/fb)/2.86 #Ratio of attenuated Halpha/Hbeta over non-attenuated one.
EBV <- log10(Rab)/(0.4*1.163) #Colour excess E(B-V) in magnitudes.
Fa <- fa*10^(0.4*EBV*k_l2[1]) #H-alpha dereddening
Fb <- fb*10^(0.4*EBV*k_l1[4]) #H-beta dereddening
Fn1 <- fn1*10^(0.4*EBV*k_l2[2]) #[N II] 6548 dereddening
Fn <- fn*10^(0.4*EBV*k_l2[3]) #[N II] 6583 dereddening
Fo1 <- fo1*10^(0.4*EBV*k_l1[5]) #[O III] 4959 dereddening
Fo <- fo*10^(0.4*EBV*k_l1[6]) #[O III] 6583 dereddening
AHa <- k_l2[1]*EBV #H-alpha extinction (in magnitudes)
##############################################################################
##Save data to files
print('Saving fluxes to data file.')
resume <- data.frame(ID,AHa,EBV,Fa,Fb,Fn1,Fn,Fo1,Fo)
tabla <- str_c(galaxy[i],"/Calzetti_Base_Fluxes.dat")
write.table(resume, tabla, sep="\t",quote=FALSE)
}
|
4b44397fc60066d39c04aebe0dff62171944b4b5
|
e25af04a06ef87eb9fc0c3c8a580b8ca4e663c9b
|
/man/Sobolev.Rd
|
f01e87e9f9f49c091de52907f4dd4f4eade1d9a0
|
[] |
no_license
|
cran/sphunif
|
c049569cf09115bb9d4a47333b85c5b7522e7fd8
|
4dafb9d08e3ac8843e8e961defcf11abe2efa534
|
refs/heads/master
| 2023-07-16T01:12:47.852866
| 2021-09-02T06:40:02
| 2021-09-02T06:40:02
| 402,474,585
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 7,668
|
rd
|
Sobolev.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Sobolev.R
\name{Sobolev}
\alias{Sobolev}
\alias{d_p_k}
\alias{weights_dfs_Sobolev}
\alias{d_Sobolev}
\alias{p_Sobolev}
\alias{q_Sobolev}
\title{Asymptotic distributions of Sobolev statistics of spherical uniformity}
\usage{
d_p_k(p, k, log = FALSE)
weights_dfs_Sobolev(p, K_max = 1000, thre = 0.001, type, Rothman_t = 1/3,
Pycke_q = 0.5, Riesz_s = 1, log = FALSE, verbose = TRUE,
Gauss = TRUE, N = 320, tol = 1e-06, force_positive = TRUE,
x_tail = NULL)
d_Sobolev(x, p, type, method = c("I", "SW", "HBE")[1], K_max = 1000,
thre = 0.001, Rothman_t = 1/3, Pycke_q = 0.5, Riesz_s = 1,
ncps = 0, verbose = TRUE, N = 320, x_tail = NULL, ...)
p_Sobolev(x, p, type, method = c("I", "SW", "HBE", "MC")[1], K_max = 1000,
thre = 0.001, Rothman_t = 1/3, Pycke_q = 0.5, Riesz_s = 1,
ncps = 0, verbose = TRUE, N = 320, x_tail = NULL, ...)
q_Sobolev(u, p, type, method = c("I", "SW", "HBE", "MC")[1], K_max = 1000,
thre = 0.001, Rothman_t = 1/3, Pycke_q = 0.5, Riesz_s = 1,
ncps = 0, verbose = TRUE, N = 320, x_tail = NULL, ...)
}
\arguments{
\item{p}{integer giving the dimension of the ambient space \eqn{R^p} that
contains \eqn{S^{p-1}}.}
\item{k}{sequence of integer indexes.}
\item{log}{compute the logarithm of \eqn{d_{p,k}}? Defaults to
\code{FALSE}.}
\item{K_max}{integer giving the truncation of the series that compute the
asymptotic p-value of a Sobolev test. Defaults to \code{1e3}.}
\item{thre}{error threshold for the tail probability given by the
the first terms of the truncated series of a Sobolev test. Defaults to
\code{1e-3}.}
\item{type}{Sobolev statistic. For \eqn{p = 2}, either \code{"Watson"},
\code{"Rothman"}, \code{"Pycke_q"}, or \code{"Hermans_Rasson"}.
For \eqn{p \ge 2}, \code{"Ajne"}, \code{"Gine_Gn"}, \code{"Gine_Fn"},
\code{"Bakshaev"}, \code{"Riesz"}, \code{"PCvM"}, \code{"PAD"}, or
\code{"PRt"}.}
\item{Rothman_t}{\eqn{t} parameter for the Rothman test, a real in
\eqn{(0, 1)}. Defaults to \code{1 / 3}.}
\item{Pycke_q}{\eqn{q} parameter for the Pycke "\eqn{q}-test", a real in
\eqn{(0, 1)}. Defaults to \code{1 / 2}.}
\item{Riesz_s}{\eqn{s} parameter for the \eqn{s}-Riesz test, a real in
\eqn{(0, 2)}. Defaults to \code{1}.}
\item{verbose}{output information about the truncation? Defaults to
\code{TRUE}.}
\item{Gauss}{use a Gauss--Legendre quadrature rule of \code{N} nodes
in the computation of the Gegenbauer coefficients? Otherwise, call
\code{\link{integrate}}. Defaults to \code{TRUE}.}
\item{N}{number of points used in the \link[=Gauss_Legen_nodes]{
Gauss--Legendre quadrature} for computing the Gegenbauer coefficients.
Defaults to \code{320}.}
\item{tol}{tolerance passed to \code{\link{integrate}}'s \code{rel.tol} and
\code{abs.tol} if \code{Gauss = FALSE}. Defaults to \code{1e-6}.}
\item{force_positive}{set negative}
\item{x_tail}{scalar evaluation point for determining the upper tail
probability. If \code{NULL}, set to the \code{0.90} quantile of the whole
series, computed by the \code{"HBE"} approximation.}
\item{x}{vector of quantiles.}
\item{method}{method for approximating the density, distribution, or
quantile function. Must be \code{"I"} (Imhof), \code{"SW"}
(Satterthwaite--Welch), \code{"HBE"} (Hall--Buckley--Eagleson), or
\code{"MC"} (Monte Carlo; only for distribution or quantile functions).
Defaults to \code{"I"}.}
\item{ncps}{non-centrality parameters. Either \code{0} (default) or a
vector with the same length as \code{weights}.}
\item{...}{further parameters passed to \code{*_\link{wschisq}}.}
\item{u}{vector of probabilities.}
}
\value{
\itemize{
\item \code{d_p_k}: a vector of size \code{length(k)} with the
evaluation of \eqn{d_{p,k}}.
\item \code{weights_dfs_Sobolev}: a list with entries \code{weights} and
\code{dfs}, automatically truncated according to \code{K_max} and
\code{thre} (see details).
\item \code{d_Sobolev}: density function evaluated at \code{x}, a vector.
\item \code{p_Sobolev}: distribution function evaluated at \code{x},
a vector.
\item \code{q_Sobolev}: quantile function evaluated at \code{u}, a vector.
}
}
\description{
Approximated density, distribution, and quantile functions for
the asymptotic null distributions of Sobolev statistics of uniformity
on \eqn{S^{p-1}:=\{{\bf x}\in R^p:||{\bf x}||=1\}}{S^{p-1}:=
\{x\in R^p:||x||=1\}}. These asymptotic distributions are infinite
weighted sums of (central) chi squared random variables:
\deqn{\sum_{k = 1}^\infty v_k^2 \chi^2_{d_{p, k}},}
where
\deqn{d_{p, k} := {{p + k - 3}\choose{p - 2}} + {{p + k - 2}\choose{p - 2}}}
is the dimension of the space of eigenfunctions of the Laplacian on
\eqn{S^{p-1}}, \eqn{p\ge 2}, associated to the \eqn{k}-th
eigenvalue, \eqn{k\ge 1}.
}
\details{
The truncation of \eqn{\sum_{k = 1}^\infty v_k^2 \chi^2_{d_{p, k}}} is
done to the first \code{K_max} terms and then up to the index such that
the first terms explain the tail probability at the \code{x_tail} with
an absolute error smaller than \code{thre} (see details in
\code{\link{cutoff_wschisq}}). This automatic truncation takes place when
calling \code{*_Sobolev}. Setting \code{thre = 0} truncates to \code{K_max}
terms exactly. If the series only contains odd or even non-zero terms, then
only \code{K_max / 2} addends are \emph{effectively} taken into account
in the first truncation.
}
\examples{
# Circular-specific statistics
curve(p_Sobolev(x = x, p = 2, type = "Watson", method = "HBE"),
n = 2e2, ylab = "Distribution", main = "Watson")
curve(p_Sobolev(x = x, p = 2, type = "Rothman", method = "HBE"),
n = 2e2, ylab = "Distribution", main = "Rothman")
curve(p_Sobolev(x = x, p = 2, type = "Pycke_q", method = "HBE"), to = 10,
n = 2e2, ylab = "Distribution", main = "Pycke_q")
curve(p_Sobolev(x = x, p = 2, type = "Hermans_Rasson", method = "HBE"),
to = 10, n = 2e2, ylab = "Distribution", main = "Hermans_Rasson")
# Statistics for arbitrary dimensions
test_statistic <- function(type, to = 1, pmax = 5, M = 1e3, ...) {
col <- viridisLite::viridis(pmax - 1)
curve(p_Sobolev(x = x, p = 2, type = type, method = "MC", M = M,
...), to = to, n = 2e2, col = col[pmax - 1],
ylab = "Distribution", main = type, ylim = c(0, 1))
for (p in 3:pmax) {
curve(p_Sobolev(x = x, p = p, type = type, method = "MC", M = M,
...), add = TRUE, n = 2e2, col = col[pmax - p + 1])
}
legend("bottomright", legend = paste("p =", 2:pmax), col = rev(col),
lwd = 2)
}
# Ajne
test_statistic(type = "Ajne")
\donttest{
# Gine_Gn
test_statistic(type = "Gine_Gn", to = 1.5)
# Gine_Fn
test_statistic(type = "Gine_Fn", to = 2)
# Bakshaev
test_statistic(type = "Bakshaev", to = 3)
# Riesz
test_statistic(type = "Riesz", Riesz_s = 0.5, to = 3)
# PCvM
test_statistic(type = "PCvM", to = 0.6)
# PAD
test_statistic(type = "PAD", to = 3)
# PRt
test_statistic(type = "PRt", Rothman_t = 0.5)
# Quantiles
p <- c(2, 3, 4, 11)
t(sapply(p, function(p) q_Sobolev(u = c(0.10, 0.05, 0.01), p = p,
type = "PCvM")))
t(sapply(p, function(p) q_Sobolev(u = c(0.10, 0.05, 0.01), p = p,
type = "PAD")))
t(sapply(p, function(p) q_Sobolev(u = c(0.10, 0.05, 0.01), p = p,
type = "PRt")))
# Series truncation for thre = 1e-5
sapply(p, function(p) length(weights_dfs_Sobolev(p = p, type = "PCvM")$dfs))
sapply(p, function(p) length(weights_dfs_Sobolev(p = p, type = "PRt")$dfs))
sapply(p, function(p) length(weights_dfs_Sobolev(p = p, type = "PAD")$dfs))
}
}
\author{
Eduardo García-Portugués and Paula Navarro-Esteban.
}
|
765b2bcfea8995478a9cd5facafbdd249da87d12
|
d81b9067f72bcc60dca62e3552768015cfa4eab6
|
/complete_code/05 - SVM.R
|
340beff5f3fb919b3cbb61da18b14182ece136cd
|
[] |
no_license
|
mrverde/msc_dissertation_santander
|
f626aa414b34ce95366cd11f74e61e6cd2b8b943
|
6a3045363be0bca9281424fb77a4f83d7bdbf415
|
refs/heads/master
| 2021-08-19T14:50:09.965744
| 2017-11-26T18:50:23
| 2017-11-26T18:50:23
| 112,000,564
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,119
|
r
|
05 - SVM.R
|
######################### 02 - SVM #########################
#Cargamos las librerías
library(doMC)
library(caret)
library(Boruta)
library(ggplot2)
library(ggthemes)
library(reshape2)
library(gridExtra)
library(DMwR)
library(caret)
#Establecemos los núcleos usados a 8
registerDoMC(cores=7)
#Establezco el directorio de trabajo de la seccion
setwd("/home/usuario/Documentos/R/TFM/02.1-Logit/")
########## FUNCIÓN RECURSIVA REGRESIÓN LOGÍSTICA ##########
recursive_SVM <- function(input.df, target_var, eval.df, target_eval, metod, iter=50, val_cut=0.00001, ROC_val_evol=c(), ROC_vars_evol=c(), contador=0){
#Funcion recursiva que va probando todas las variables de un dataframe y va seleccionando las variables que mas aumentan la curva ROC
#input.df es el DF con nuestros datos de entrenamiento
#target_var es el nombre de la columna objetivo a clasificar en df.input
#eval.df es el nombre del df con los datos de test
#target_eval es la columna a clasificar de los datos de test
#val_cut es la sensibilidad de la formula
#iter es el numero de iteraciones
#NO TOCAR EL RESTO DE VARIABLES
contador <- contador + 1
print(paste("Iteracion ", as.character(contador)))
grid <- expand.grid(C=c(1), sigma= c(.001))
ctrl <- trainControl(method = "none", search="grid", allowParallel = TRUE, summaryFunction=twoClassSummary, classProbs=TRUE)
#Bloque de prueba de todas las variables de la bd
ROC_val <- c()
ROC_var <- c()
input.iter.df <- input.df[ , !names(input.df) %in% c(ROC_vars_evol, "TARGET")]
for (i in 1:ncol(input.iter.df)){
if (i%%50 == 0){print(paste("Subiteracion ", as.character(i)))}
if (contador == 1){
mod <- train(as.formula(paste(paste(paste(as.character(substitute(target_var)), " ~ "), paste(ROC_vars_evol, "+ ", collapse=" + ")), colnames(input.iter.df[i]),sep = "")), data=input.df, method=metod, metric="ROC", tuneGrid = grid, trControl=ctrl)
}else if(contador != 1){
mod <- train(as.formula(paste(paste("TARGET ~", paste(ROC_vars_evol, collapse=" + ")), paste(" + ", colnames(input.iter.df[i])),sep = "")), data=input.df, method=metod, metric="ROC", tuneGrid = grid, trControl=ctrl)
}
pred <- predict(mod, validation.s)
pred <- as.character(pred)
pred[pred=="X0"] <- 0
pred[pred=="X1"] <- 1
pred <- as.numeric(pred)
ROC_val[i] <- InformationValue::AUROC(target_eval,pred)
ROC_var[i] <- colnames(input.iter.df[i])
}
#Bloque de extracción de la mejor variable
ROC <- data.frame(var=ROC_var, ROC=ROC_val)
ROC_vars_evol <- append(ROC_vars_evol, (as.character(ROC[which.max(ROC$ROC), 1])))
ROC_val_evol <- append(ROC_val_evol, (as.numeric(ROC[which.max(ROC$ROC), 2])))
print(as.character(ROC[which.max(ROC$ROC), 1]))
print((as.numeric(ROC[which.max(ROC$ROC), 2])))
#Bloque de salida
if(contador > 1){
if(ROC_val_evol[length(ROC_val_evol)] <= ROC_val_evol[length(ROC_val_evol)-1]){
print("Fin - Añadir una variable baja el valor ROC")
df <- data.frame(var=ROC_vars_evol, ROC=ROC_val_evol)
df$pos_var=1:nrow(df)
beepr::beep(3)
return(df)
}else if(contador == iter){
print("Fin - Se ha llegado al numero de iteraciones")
df <- data.frame(var=ROC_vars_evol, ROC=ROC_val_evol)
df$pos_var=1:nrow(df)
beepr::beep(3)
return(df)
}else if(ROC_val_evol[length(ROC_val_evol)] - (ROC_val_evol[length(ROC_val_evol)-1]) <= val_cut){
print("Fin - Se ha llegado al nivel de sensibilidad")
df <- data.frame(var=ROC_vars_evol, ROC=ROC_val_evol)
df$pos_var=1:nrow(df)
beepr::beep(3)
return(df)
}
}
recursive_SVM(input.df, target_var, eval.df, target_eval, metod, iter, val_cut, ROC_val_evol, ROC_vars_evol, contador)
}
#Para ejecutarlo
train_down.s$TARGET <- make.names(TARGET_down)
output_down.s_SVM <- recursive_SVM(data.frame(train_down.s), TARGET, validation.s, TARGET_validation, "svmLinear", 50)
#train_up.s$TARGET <- make.names(TARGET_up)
#output_up.s_SVM <- recursive_SVM(data.frame(train_up.s), TARGET, validation.s, TARGET_validation, "svmLinear", 50)
train_smote.s$TARGET <- make.names(TARGET_smote)
output_smote.s_SVM <- recursive_SVM(data.frame(train_smote.s), TARGET, validation.s, TARGET_validation, "svmLinear", 50)
train_down.s$TARGET <- make.names(TARGET_down)
output_down.s_SVM_poly <- recursive_SVM(data.frame(train_down.s), TARGET, validation.s, TARGET_validation, "svmPoly", 50)
#train_up.s$TARGET <- make.names(TARGET_up)
#output_up.s_SVM <- recursive_SVM(data.frame(train_up.s), TARGET, validation.s, TARGET_validation, "svmLinear", 50)
train_smote.s$TARGET <- make.names(TARGET_smote)
output_smote.s_SVM_poly <- recursive_SVM(data.frame(train_smote.s), TARGET, validation.s, TARGET_validation, "svmPoly", 50)
train_down.s$TARGET <- make.names(TARGET_down)
output_down.s_SVM_rad <- recursive_SVM(data.frame(train_down.s), TARGET, validation.s, TARGET_validation, "svmRadial", 50)
#train_up.s$TARGET <- make.names(TARGET_up)
#output_up.s_SVM <- recursive_SVM(data.frame(train_up.s), TARGET, validation.s, TARGET_validation, "svmLinear", 50)
train_smote.s$TARGET <- make.names(TARGET_smote)
output_smote.s_SVM_rad <- recursive_SVM(data.frame(train_smote.s), TARGET, validation.s, TARGET_validation, "svmRadial", 50)
mi_vars <- c("num_var30", "imp_op_var39_efect_ult1", "num_var8", "num_op_var40_comer_ult3", "saldo_var8", "num_op_var40_efect_ult3", "ind_var25_cte", "var15")
ctrl <- trainControl(method = "none", search="grid", allowParallel = TRUE, summaryFunction=twoClassSummary, classProbs=TRUE)
bind_rose$TARGET <- make.names(TARGET_rose)
contador <- 0
for (i in c(0.25, 0.5, 0.75, 1, 1.25, 1.5)){
for (j in c(.01, .015, 0.2, 0.3, 0.4)){
contador <- contador + 1
grid <- expand.grid(C=c(i), sigma= c(j))
print(paste("Iteración", as.character(contador)))
print(paste("C =", as.character(i), "sigma =", as.character(j)))
mod <- train(as.formula(paste(paste("TARGET ~", paste(boruta_signif, collapse=" + ")),sep = "")), data=bind_rose, method="svmRadial", metric="ROC", tuneGrid = grid, trControl=ctrl)
pred <- predict(mod, bind_validation)
pred <- as.character(pred)
pred[pred=="X0"] <- 0
pred[pred=="X1"] <- 1
pred <- as.numeric(pred)
ROC_val[contador] <- InformationValue::AUROC(TARGET_validation,pred)
print(paste("AUC -> ", as.character(ROC_val[contador])))
}
}
ROC_var <- colnames(input.iter.df[i])
#GUARDAR DF
df <- data.frame(var=ROC_vars_evol, roc_value=ROC_val_evol)
save(output, file="ROC_evol_logit_227vars_sucesivo_31vars_79ROC.Rda")
#EXPORTAR CSV A KAGGLE
logitmod <- glm(as.formula(paste("TARGET ~", paste(output_smote.s_logit$var[1:(length(output_smote.s_logit$var)-6)], collapse=" + "))), family = binomial, data = train_smote.s)
pred_final <- predict(logitmod,newdata=test_final.s, type="response")
y_pred_num <- ifelse(pred_final > 0.5, 1, 0)
table(y_pred_num)
df_out <- data.frame(ID=test_finalbackup$ID, TARGET=y_pred_num)
write.csv(df_out, file = "ROC_evol_logit_227vars_sucesivo_24vars_83ROC_smote_log.csv",row.names=FALSE)
|
a2213ac6dd036a505b69fbf4c9d086a1cf8c97bd
|
ef5d2a392a111815e932a4ec758bab5cb3e073cf
|
/R/include_tweet.R
|
79fd66f787336a38108cd43f15a1ac1990972cd6
|
[
"MIT"
] |
permissive
|
gadenbuie/tweetrmd
|
19b6d74c295e289c14e9950946c29d2eaec4c280
|
c683b537a4a5234ee750fff234d21e4e9c201ba8
|
refs/heads/main
| 2023-02-07T05:50:25.881377
| 2023-02-03T02:27:22
| 2023-02-03T02:27:22
| 230,986,374
| 104
| 16
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,805
|
r
|
include_tweet.R
|
#' Include A Tweet in All R Markdown Formats
#'
#' Similar to [knitr::include_graphics()], but for tweets. In HTML documents,
#' the tweet is embedded using [tweet_embed()] and for all other documents types
#' a screen shot of the tweet is rendered and used [tweet_screenshot()]. If you
#' would rather that just the text of the tweet be included in non-HTML outputs,
#' use [tweet_embed()].
#'
#' @return An `htmltools::tagList()` to include a tweet in an HTML document, or
#' a screen shot of the tweet for use in non-HTML outputs.
#'
#' @examples
#'
#' include_tweet("https://twitter.com/dsquintana/status/1275705042385940480")
#'
#' @inheritParams tweet_embed
#' @inheritDotParams tweet_embed
#' @family Tweet-embedding functions
#' @export
include_tweet <- function(tweet_url, plain = FALSE, ...) {
if (!in_knitr() || knitr::is_html_output()) {
return(tweet_embed(tweet_url, plain = plain, ...))
}
if (isTRUE(plain) || !requires_webshot2(stop = FALSE)) {
knitr::asis_output(tweet_markdown(tweet_url, ...))
} else {
tweet_screenshot(tweet_url, ...)
}
}
#' @describeIn include_tweet Return a tweet as plain markdown.
#' @export
tweet_markdown <- function(tweet_url, ...) {
assert_string(tweet_url)
bq <- tweet_blockquote(tweet_url, ...)
html_to_markdown(bq)
}
html_to_markdown <- function(html, ...) {
rmarkdown::pandoc_available(error = TRUE)
tmpfile <- tempfile(fileext = ".html")
tmpout <- tempfile(fileext = ".md")
on.exit(unlink(c(tmpfile, tmpout)))
writeLines(format(html), tmpfile)
rmarkdown::pandoc_convert(tmpfile, from = "html", output = tmpout)
x <- paste(readLines(tmpout), collapse = "\n")
# strip twitter ?ref_src from urls
gsub("(twitter[.]com.+?)([?][^)]+)", "\\1", x)
}
in_knitr <- function() {
!is.null(knitr::current_input())
}
|
84c98e6e752b7c637e0b93f4a946fbd067831f69
|
4640be0f41a18abd7453670d944e094a36e4181d
|
/R/to_phylo.R
|
e15875a44a8f38ab88662b49eb67588561636d9a
|
[] |
no_license
|
gitter-badger/datelife
|
534059d493b186030f0c2507ce8b35027d120dec
|
94d93bb4e6cecd0884afe99571bf96b291454899
|
refs/heads/master
| 2020-06-16T22:49:58.916722
| 2019-06-20T16:25:11
| 2019-06-20T16:25:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23,871
|
r
|
to_phylo.R
|
#' Convert patristic matrix to a phylo object. Used inside: summarize_datelife_result, CongruiyTree.
#' @param patristic_matrix A patristic matrix
#' @param clustering_method A character vector indicating the method to construct the tree. Options are
#' \describe{
#' \item{nj}{Neighbor-Joining method applied with ape::nj function.}
#' \item{upgma}{Unweighted Pair Group Method with Arithmetic Mean method applied with phangorn::upgma function.}
#' \item{bionj}{An improved version of the Neighbor-Joining method applied with ape::bionj function.}
#' \item{triangle}{Riangles method applied with ape::triangMtd function.}
#' \item{mvr}{Minimum Variance Reduction method applied with ape::mvr function.}
#' }
# We might add the option to insert a function as clustering_method.
# Before, we hard coded it to try Neighbor-Joining first; if it errors, it will try UPGMA.
# Now, it uses nj for phylo_all summary, and we are using our own algorithm to get a tree from a summary matrix
#' @param fix_negative_brlen Boolean indicating whether to fix negative branch lengths in resulting tree or not. Default to TRUE.
#' @param variance_matrix A variance matrix from a datelifeResult list of patristic matrices. Usually an output from datelife_result_variance_matrix function. Only used if clustering_method is "mvr".
#' @inheritParams tree_fix_brlen
#' @return A rooted phylo object
#' @export
patristic_matrix_to_phylo <- function(patristic_matrix, clustering_method = "nj", fix_negative_brlen = TRUE, fixing_method = 0, ultrametric = TRUE, variance_matrix = NULL) {
# patristic_matrix <- threebirds_result[[5]]
if(!inherits(patristic_matrix, "matrix") & !inherits(patristic_matrix, "data.frame")){
message("patristic_matrix argument is not a matrix")
return(NA)
}
# has to be matrix not data frame:
if(inherits(patristic_matrix, "data.frame")){
patristic_matrix <- as.matrix(patristic_matrix)
}
clustering_method <- match.arg(arg = tolower(clustering_method), choices = c("nj", "upgma", "bionj", "triangle", "mvr"), several.ok = FALSE)
if(anyNA(patristic_matrix)) {
patristic_matrix <- patristic_matrix[rowSums(is.na(patristic_matrix)) != ncol(patristic_matrix),colSums(is.na(patristic_matrix)) != nrow(patristic_matrix)]
} # Get rid of rows and columns with all NA or NaNs, leaves the ones with some NA or NaNs
if(dim(patristic_matrix)[1] == 2) {
phy <- ape::rtree(n = 2, rooted = TRUE, tip.label = rownames(patristic_matrix), br = 0.5 * patristic_matrix[1,2])
phy$clustering_method <- "ape::rtree"
phy$citation <- names(patristic_matrix)
return(phy)
}
phycluster <- cluster_patristicmatrix(patristic_matrix, variance_matrix)
phy <- choose_cluster(phycluster, clustering_method)
if(!inherits(phy, "phylo")){
message("Clustering patristic matrix to phylo failed.")
phy$citation <- names(patristic_matrix)
return(phy)
}
phy$negative_brlen <- NA
mess1 <- "Converting from patristic distance matrix to a tree resulted in some negative branch lengths;"
mess2 <- "the largest by magnitude is"
# when original tree IS ultrametric and has negative edges:
if(is.null(phy$edge.length.original) & any(phy$edge.length < 0)){
warning(paste(mess1, mess2, min(phy$edge.length)))
}
# when original tree is NOT ultrametric and has negative edges:
if(!is.null(phy$edge.length.original) & any(phy$edge.length.original < 0)){
warning(paste(mess1, mess2, min(phy$edge.length.original)))
# and when tree was forced ultrametric and there still are neg edges:
if(any(phy$edge.length < 0)){
warning(paste("After phytools::forcing.ultrametric there still are negative branch lengths;", mess2, min(phy$edge.length)))
}
}
if(any(phy$edge.length < 0)){
phy$negative_brlen <- list(edge_number = which(phy$edge.length < 0))
# phy$edge.length[which(phy$edge.length < 0)] <- 0 #sometimes NJ returns tiny negative branch lengths. https://github.com/phylotastic/datelife/issues/11
if(fix_negative_brlen){
phy$negative_brlen <- list(edge_number = which(phy$edge.length < 0))
phy <- tree_fix_brlen(tree = phy, fixing_criterion = "negative", fixing_method = fixing_method)
fixing_method_called <- as.list(environment())$fixing_method
phy$negative_brlen <- c(phy$negative_brlen, list(fixing_method = fixing_method_called))
warning(paste0("Negative branch lengths were fixed with tree_fix_brlen, fixing_method = ", fixing_method_called))
}
}
# for cases when there are no neg branch lengths to fix (or we don't want them fixed)
# and we still want the final tree to be ultrametric:
if(ultrametric){
if(is.null(phy$edge.length.original)){
phy <- force_ultrametric(phy)
}
}
phy$tip.label <- gsub(" ", "_", phy$tip.label)
phy$citation <- names(patristic_matrix)
class(phy) <- c(class(phy), "datelifeTree")
return(phy)
}
#' Force a non ultrametric phylo object to be ultrametric
#' @inheritParams phylo_check
#' @return A phylo object
#' @export
force_ultrametric <- function(phy){
# enhance: check if there is an edge.length.original already There
# something like how many grepl("edge.length.original") in names(phy) and add an integer after it.
if(!inherits(phy, "phylo")){
message("phy argument is not a phylo object.")
return(NA)
}
if(!ape::is.ultrametric(phy)){
phy$edge.length.original <- phy$edge.length
phy <- phytools::force.ultrametric(tree = phy, method = "extend")
phy$force.ultrametric <- "extend"
}
return(phy)
}
#' Cluster a patristic matrix into a tree with various methods.
#'
#' @inheritParams patristic_matrix_to_phylo
#' @return A list of trees (with potential NAs if a method was unsuccesful) from clustering with NJ, UPGMA, BIONJ, triangle method and MVR.
#' @details Methods include the following and their variants to handle missing values:
#' \describe{
#' \item{nj}{Neighbor-Joining method applied with ape::nj function.}
#' \item{upgma}{Unweighted Pair Group Method with Arithmetic Mean method applied with phangorn::upgma function.}
#' \item{bionj}{An improved version of the Neighbor-Joining method applied with ape::bionj function.}
#' \item{triangle}{Riangles method applied with ape::triangMtd function.}
#' \item{mvr}{Minimum Variance Reduction method applied with ape::mvr function.}
#' }
#' @export
cluster_patristicmatrix <- function(patristic_matrix, variance_matrix = NULL){
if(!inherits(patristic_matrix, "matrix") & !inherits(patristic_matrix, "data.frame")){
message("patristic_matrix argument is not a matrix")
return(NA)
}
# has to be matrix not data frame:
if(inherits(patristic_matrix, "data.frame")){
patristic_matrix <- as.matrix(patristic_matrix)
}
if(dim(patristic_matrix)[1] < 2) {
return(NA)
}
if(dim(patristic_matrix)[1] == 2) {
message("patristic_matrix has two taxa only, you don't need to cluster.")
return(NA)
} else {
phyclust <- vector(mode = "list", length = 9)
names(phyclust) <- c("nj", "njs", "upgma", "upgma_daisy", "bionj", "bionjs", "triangMtd", "triangMtds", "mvrs")
phyclust$nj <- tryCatch(ape::nj(patristic_matrix), error = function (e) NA)
if(inherits(phyclust$nj, "phylo")){
phyclust$nj <- tryCatch(phytools::midpoint.root(phyclust$nj),
error = function(e) NA)
}
# if (is.null(phyclust$nj)){ # case when we have missing data (NA) on patristic_matrix and regular nj does not work; e.g. clade thraupidae SDM.results have missing data, and upgma chokes
# njs appears to be the only option for missing data with NJ
# but see Criscuolo and Gascuel. 2008. Fast NJ-like algorithms to deal with incomplete distance matrices. BMC Bioinformatics 9:166
phyclust$njs <- tryCatch(ape::njs(patristic_matrix), error = function(e) NA)
if(inherits(phyclust$njs, "phylo")){
phyclust$njs <- tryCatch(phytools::midpoint.root(phyclust$njs),
error = function(e) NA)
}
# } else {
# root the tree on the midpoint (only for trees with ape::Ntip(phy) > 2):
# phy <- tryCatch(phangorn::midpoint(phy), error = function(e) NULL)
# using phytools::midpoint.root instead of phangorn::midpoint bc it's less error prone.
# sometimes, nj and njs do not work if patristic matrices come from sdm. why? it was probably the midpoint function from phangorn. Using phytools one now.
# use regular upgma (or implemented with daisy and hclust) when nj or midpoint.root fail
phyclust$upgma <- tryCatch(phangorn::upgma(patristic_matrix), error = function (e) NA)
# if (is.null(phyclust$upgma)){ # case when we have missing data (NA) on patristic_matrix and regular upgma does not work; e.g. clade thraupidae SDM.results have missing data, and upgma chokes
phyclust$upgma_daisy <- tryCatch({
# using daisy to calculate dissimilarity matrix instead of as.dist (which is used in phangorn::upgma) when there are NAs in the matrix; agnes does not work with NAs either.
patristic_matrix <- patristic_matrix*0.5 # doing this because it's giving ages that are too old, so it must be taking total distance
DD <- cluster::daisy(x = patristic_matrix, metric = "euclidean")
hc <- stats::hclust(DD, method = "average") # original clustering method from phangorn::upgma. Using agnes() instead hclust() to cluster gives the same result.
phy <- ape::as.phylo(hc)
phy <- phylobase::reorder(phy, "postorder")
phy
}, error = function(e) NA)
# }
phyclust$bionj <- tryCatch(ape::bionj(patristic_matrix), error = function (e) NA)
# if (is.null(phyclust$bionj)){ # case when we have missing data (NA) on patristic_matrix and regular nj does not work; e.g. clade thraupidae SDM.results have missing data, and upgma chokes
# njs appears to be the only option for missing data with NJ
# but see Criscuolo and Gascuel. 2008. Fast NJ-like algorithms to deal with incomplete distance matrices. BMC Bioinformatics 9:166
phyclust$bionjs <- tryCatch(ape::bionjs(patristic_matrix), error = function(e) NA)
if(inherits(phyclust$bionjs, "phylo")){
phyclust$bionjs <- tryCatch(phytools::midpoint.root(phyclust$bionjs),
error = function(e) NA)
}
# } else {
if(inherits(phyclust$bionj, "phylo")){
phyclust$bionj <- tryCatch(phytools::midpoint.root(phyclust$bionj),
error = function(e) NA)
}
phyclust$triangMtd <- tryCatch(ape::triangMtd(patristic_matrix), error = function (e) NA)
# if (is.null(phyclust$triangMtd)){ # case when we have missing data (NA) on patristic_matrix and regular nj does not work; e.g. clade thraupidae SDM.results have missing data, and upgma chokes
# njs appears to be the only option for missing data with NJ
# but see Criscuolo and Gascuel. 2008. Fast NJ-like algorithms to deal with incomplete distance matrices. BMC Bioinformatics 9:166
phyclust$triangMtds <- tryCatch(ape::triangMtds(patristic_matrix), error = function(e) NA)
if(inherits(phyclust$triangMtds, "phylo")){
phyclust$triangMtds <- tryCatch(phytools::midpoint.root(phyclust$triangMtds),
error = function(e) NA)
}
# } else {
if(inherits(phyclust$triangMtd, "phylo")){
phyclust$triangMtd <- tryCatch(phytools::midpoint.root(phyclust$triangMtd),
error = function(e) NA)
}
if(inherits(variance_matrix, "matrix")){
# not possible to use the version for complete matrices, how to fill a variance matrix with missing values? I tried filling it with 0s and it runs but the output trees are network like...
phyclust$mvrs <- tryCatch(ape::mvrs(patristic_matrix, variance_matrix), error = function (e) NA)
if(inherits(phyclust$mvrs, "phylo")){
if(any(is.na(phyclust$mvrs$edge.length))){
phyclust$mvrs <- NA
}
}
}
return(phyclust)
}
}
#' Choose an ultrametric phylo object from cluster_patristicmatrix obtained with a particular clustering method, or the next best tree.
#' If there are not any ultrametric trees, it does not force them.
#'
#' @inheritParams patristic_matrix_to_phylo
#' @param phycluster An output from cluster_patristicmatrix
#' @return A phylo object or NA
#' @export
choose_cluster <- function(phycluster, clustering_method = "nj"){
if(!mode(phycluster) %in% "list"){
message("phycluster argument is not a list; check that out.")
return(NA)
}
# Choose between nj, njs, upgma or upgma_daisy only for now.
# keep <- match(c("nj", "njs", "upgma", "upgma_daisy"), names(phycluster))
# phycluster <- phycluster[keep]
phy_return <- NA
if(length(phycluster) == 0){
message("phycluster argument is length 0")
return(NA)
}
if(inherits(phycluster, "phylo")){ # it is a tree of two tips
return(phycluster)
} else { # it is a list of results from cluster_patristicmatrix
fail <- sapply(phycluster, function(x) !inherits(x, "phylo"))
if(all(fail)){
message("The patristic matrix could not be transformed into a tree with any of the default methods (NJ, UPGMA)")
return(NA)
}
phycluster <- phycluster[!fail] # take out the fails or unattempted from cluster_patristicmatrix
if(length(phycluster) == 1){
phy <- phycluster[[1]]
phy$clustering_method <- names(phycluster)
# if(!ape::is.ultrametric(phy)){
# phy$edge.length.original <- phy$edge.length
# phy <- phytools::force.ultrametric(tree = phy, method = "extend")
# phy$force.ultrametric <- "extend"
# }
return(phy)
} else {
ultram <- sapply(phycluster, ape::is.ultrametric)
ultram2 <- sapply(phycluster, ape::is.ultrametric, 2)
if(length(ultram) == 0 & length(ultram2) == 0){
message(paste("The patristic matrix could not be transformed into an ultrametric tree with any of the default methods:", toupper(names(phycluster))))
# return(NA)
}
choice <- grepl(clustering_method, names(phycluster)) # choice can only be one
ff <- which(choice & ultram) # if the chosen method gives an ultrametric tree
if(length(ff) != 0){
ff <- ff[1]
phy <- phycluster[[ff]]
phy$clustering_method <- names(phycluster)[ff]
return(phy)
}
ff <- which(!choice & ultram) # if not, take the not chosen but ultrametric
if(length(ff) != 0){
ff <- ff[1]
phy <- phycluster[[ff]]
phy$clustering_method <- names(phycluster)[ff]
return(phy)
}
ff <- which(choice & ultram2) # if not, take the chosen one but less ultrametric
if(length(ff) != 0){
ff <- ff[1]
phy <- phycluster[[ff]]
# phy$edge.length.original <- phy$edge.length
# phy <- phytools::force.ultrametric(tree = phy, method = "extend")
# phy$force.ultrametric <- "extend"
phy$clustering_method <- names(phycluster)[ff]
return(phy)
}
ff <- which(!choice & ultram2) # if not, take the not chosen one but less ultrametric
if(length(ff) != 1){
ff <- ff[1]
phy <- phycluster[[ff]]
# phy$edge.length.original <- phy$edge.length
# phy <- phytools::force.ultrametric(tree = phy, method = "extend")
# phy$force.ultrametric <- "extend"
phy$clustering_method <- names(phycluster)[ff]
return(phy)
}
}
}
}
#' Go from a summary matrix to an ultrametric phylo object.
#' @param summ_matrix A summary patristic distance matrix from sdm or median. See details.
#' @inheritParams datelife_query_check
#' @param total_distance Boolean. If TRUE it will divide the matrix byhalf, if FALSE it will take iy as is.
#' @param use A character vector indicating what type of age to use for summary. One of the following
#' \describe{
#' \item{mean}{It will use the mean of the node age distributions.}
#' \item{min}{It will use the minimum age from the node age distrbutions.}
#' \item{max}{Choose this if you wanna be conservative; it will use the maximum age from the node age distrbutions.}
#' }
#' @param target_tree A phylo object. Use this in case you want a particular backbone for the output tree.
#' @inheritDotParams get_otol_synthetic_tree
#' @return An ultrametric phylo object.
#' @details It can take a regular patristic distance matrix, but there are simpler methods for that implemented in patristic_matrix_to_phylo.
#' @export
summary_matrix_to_phylo <- function(summ_matrix, datelife_query = NULL, total_distance = TRUE, use = "mean", target_tree = NULL, ...){
# enhance: add other methods, not only bladj.
# for debugging here
# summ_matrix <- subset2_sdm_matrix
# summ_matrix <- median_matrix
use <- match.arg(use, c("mean", "median", "min", "max"))
if(!inherits(summ_matrix, "matrix") & !inherits(summ_matrix, "data.frame")){
message("summ_matrix argument is not a matrix")
return(NA)
}
if(!is.null(datelife_query)){
input_ott_match <- suppressMessages(check_ott_input(input = datelife_query, ...))
# match inputt_ott_match and unique(c(colnames(summ_matrix), rownames(summ_matrix)))
# change the names in target tree to the names from summ_matrix (which are the ones that come from the original query)
}
# summ_matrix <- data.frame(summ_matrix)
# everything up to patristic_matrix_to_phylo ok if it is a data frame too
if(inherits(summ_matrix, "data.frame")){
summ_matrix <- as.matrix(summ_matrix)
colnames(summ_matrix) <- gsub("\\.", " ", colnames(summ_matrix))
}
if(total_distance){
summ_matrix <- summ_matrix * 0.5 # bc it's total distance tip to tip
}
# get a backbone tree:
# chronogram <- geiger::PATHd8.phylo(phy_target, calibrations)
# try(chronogram <- geiger::PATHd8.phylo(phy_target, calibrations), silent = TRUE)
if(!inherits(target_tree, "phylo")){
target_tree <- suppressMessages(get_otol_synthetic_tree(input = colnames(summ_matrix), ...))
if(!inherits(target_tree, "phylo")){
# we should find a better way to do this, but it should be ok for now:
target_tree <- suppressWarnings(suppressMessages(patristic_matrix_to_phylo(summ_matrix, ultrametric = TRUE)))
# target_tree <- consensus(phyloall, p = 0.5) # can't use consensus here: not all trees have the same number of tips, duh
}
target_tree <- ape::collapse.singles(target_tree)
# ape::is.ultrametric(target_tree)
# ape::is.binary(target_tree)
# plot(target_tree, cex = 0.5)
}
if(!inherits(target_tree, "phylo")){
message("target_tree is missing or not a phylo object and a backbone tree could not be constructed; returning NA")
message("Hint: Was summ_matrix constructed from an object with no good groves? Try running get_best_grove first.")
# enhance: add a more formal test of best grove
return(NA)
}
target_tree$edge.length <- NULL
target_tree$edge.length.original <- NULL
target_tree$tip.label <- gsub(" ", "_", target_tree$tip.label)
# test that taxonA and taxonB are all in target tree tip labels
rownames(summ_matrix) <- gsub(" ", "_", rownames(summ_matrix))
colnames(summ_matrix) <- gsub(" ", "_", colnames(summ_matrix))
# find taxa missing in target tree and remove them from summ_matrix
missing <- is.na(match(colnames(summ_matrix), target_tree$tip.label))
whichmiss <- colnames(summ_matrix)[missing]
if(any(missing)){
message("Some taxa in summ_matrix are not in target_tree (", paste0(whichmiss, collapse = ", "), ")")
missingrow <- is.na(match(rownames(summ_matrix), target_tree$tip.label))
summ_matrix <- summ_matrix[!missingrow, !missing]
}
# to be get_all_calibrations.data.frame:
calibrations <- summarize_summary_matrix(summ_matrix)
# ATTENTION
# start of use_all_calibrations_bladj, that contains match_all_calibrations
# use_all_calibrations_bladj(phy = target_tree, calibrations = caibrations, type = use)
# start of match_all_calibrations:
# get the coincident node numbers:
# ape::is.binary(target_tree)
target_tree_nodes <- sapply(seq(nrow(calibrations)), function(i)
phytools::findMRCA(tree = target_tree,
tips = as.character(calibrations[i,c("taxonA", "taxonB")]),
type = "node"))
target_tree_nodes <- target_tree_nodes - ape::Ntip(target_tree)
all_nodes <- sort(unique(target_tree_nodes))
# get the node age distribution:
all_ages <- lapply(all_nodes, function(i) calibrations[target_tree_nodes == i, "Age"])
# any(sapply(all_ages, is.null)) # if FALSE, all nodes have at least one calibration.
calibrations2 <- data.frame(MRCA = paste0("n", all_nodes), MinAge = sapply(all_ages, min), MaxAge= sapply(all_ages, max))
# calibrations2$MRCA is a factor so have to be made as.character to work with bladj
if(all(all_nodes < ape::Ntip(target_tree))){
all_nodes_numbers <- all_nodes + ape::Ntip(target_tree)
node_index <- "consecutive"
} else {
all_nodes_numbers <- all_nodes
node_index <- "node_number"
}
target_tree$node.label <- NULL # make sure its null, so we can rename all nodes of interest to match our labels
target_tree <- tree_add_nodelabels(tree = target_tree, node_index = node_index) # all nodes need to be named so make_bladj_tree runs properly
# end of match_all_calibrations
if("mean" %in% use){
node_ages <- sapply(seq(nrow(calibrations2)), function(i) sum(calibrations2[i,c("MinAge", "MaxAge")])/2)
}
if("min" %in% use){
node_ages <- calibrations2[,c("MinAge")]
}
if("max" %in% use){
node_ages <- calibrations2[,c("MaxAge")]
}
new_phy <- make_bladj_tree(tree = target_tree, nodenames = as.character(calibrations2$MRCA),
nodeages = node_ages)
new_phy$dating_method <- "bladj"
new_phy$calibration_distribution <- stats::setNames(all_ages, all_nodes_numbers)
# new_phy$calibration_MIN <- calibrations2$MinAge
# new_phy$calibration_MAX <- calibrations2$MaxAge
# new_phy$calibration_MRCA <- all_nodes_numbers
# end use_all_calibrations_bladj
new_phy$clustering_method <- NULL
new_phy$ott_ids <- NULL
if(!is.null(target_tree$ott_ids)){
tt <- match(new_phy$tip.label, target_tree$tip.label)
# match(c("a", "b", "c", "d"), c("c", "d", "a", "a", "a", "b"))
new_phy$ott_ids <- target_tree$ott_ids[tt]
}
return(new_phy)
}
#' function to get min, mean and max summary chronograms from a summary matrix of a datelifeResult object.
#' @inheritParams summary_matrix_to_phylo
#' @inheritDotParams summary_matrix_to_phylo
#' @export
#' @details
#' With the function summary_matrix_to_phylo users can choose the minimum, mean or maximum ages from the saummary matrix as calibration points to get a single summary chronogram.
#' With this function users get all three summary chronograms in a multiphylo object.
# modified from get_all_summaries function from datelife_examples
summary_matrix_to_phylo_all <- function(summ_matrix, target_tree = NULL, ...){
tmean <- summary_matrix_to_phylo(summ_matrix = summ_matrix, use = "mean", target_tree = target_tree, ...)
tmin <- summary_matrix_to_phylo(summ_matrix = summ_matrix, use = "min", target_tree = target_tree, ...)
tmax <- summary_matrix_to_phylo(summ_matrix = summ_matrix, use = "max", target_tree = target_tree, ...)
res <- c(tmean, tmin, tmax)
names(res) <- c("mean_tree", "min_tree", "max_tree")
return(res)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.