blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a80ea44d0a7de279d0e0699a6a326cc2f0d8611e
|
f860a2ddbebe96ad25f2347823d1ad31a5ae949e
|
/R/inclass/class_5.R
|
5e577869d59c232fad11fb8e2075e7d932cf20b6
|
[
"MIT"
] |
permissive
|
mespe/STS198
|
edd0e966a329b8701048e2c8371a57b0a261f2fa
|
4dd8184e67689ff9d0af3dab1813973e46f59df3
|
refs/heads/master
| 2021-01-22T18:51:06.426391
| 2017-09-15T23:10:34
| 2017-09-15T23:10:34
| 85,125,705
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,917
|
r
|
class_5.R
|
# Plotting with ggplot2
# Apr 17, 2017
# STS 198
# Load the package we need with the library command
# Remember, the package has to be installed first,
# and will need to be reloaded each new R session
library(ggplot2)
load("../data/health_costs.rda")
# The basic structure of ggplot is:
ggplot(health, aes(x = Average.Total.Payments))
# This builds the plot and tells ggplot which variables
# go on the x and y axes, but does not fill in the "middle"
# To fill in the middle, we need to give it a geom (geometry)
# The most basic geom is the histogram
ggplot(health, aes(x = Average.Total.Payments)) +
geom_histogram()
# If we want to compare groups, we can do this inside the function
ggplot(health, aes(x = Average.Total.Payments,
fill = Provider.City == "SACRAMENTO")) +
geom_histogram()
# This is not quite right - there are so many less Sacramento values
# that you cannot see them on the plot
# A density plot solves this issue
ggplot(health, aes(x = Average.Total.Payments,
fill = Provider.City == "SACRAMENTO")) +
geom_density(alpha = 0.5)
# Looking at only the Sacramento data
sac = subset(health, Provider.City == "SACRAMENTO")
# Even though the density plot worked well with the
# question: how does Sacramento compare to the rest of the US?
# If does not work well here
ggplot(sac, aes(x = Average.Total.Payments,
fill = Provider.Name)) +
geom_density(alpha = 0.4)
# There are too many groups on top of eachother
# We can try just colored lines without the fill
ggplot(sac, aes(x = Average.Total.Payments,
color = Provider.Name)) +
geom_density(alpha = 0.5)
# Not much better
# What we really want is each category grouped together
# and then the categories side-by-side
# Boxplots are good for this
ggplot(sac, aes(x = Provider.Name,
y = Average.Total.Payments,
color = Provider.Name)) +
geom_boxplot()
ggplot(sac, aes(x = Provider.Name,
y = Average.Total.Payments,
color = Provider.Name)) +
geom_violin()
# Boxplots work well for a categorical and a numeric value
# but don't work with 2 numeric values
# scatterplots are better to compare two numeric variables
# Just like before, we can color the point by group
ggplot(sac, aes(x = Average.Covered.Charges,
y = Average.Total.Payments,
color = Provider.Name)) +
geom_point()
# To help us see the relationships between the groups
# we can add a smoother
ggplot(sac, aes(x = Average.Covered.Charges,
y = Average.Total.Payments,
color = Provider.Name)) +
geom_point() +
geom_smooth()
# The last plot is still really busy
# there are too many points at the low values
# one way to deal with this is to separate the
# the different facilities into separate plots
ggplot(sac, aes(x = Average.Covered.Charges,
y = Average.Total.Payments,
color = Provider.Name)) +
geom_point(aes(size = Total.Discharges)) +
geom_smooth() +
facet_wrap(~Provider.Name)
################################################################################
# Lets look at the heart data again
sort(unique(sac$DRG.Definition))
# Here I used a new function, the %in% operator
# %in% just returns TRUE in each position of the vector
# on the left side are in the right side
var = c("291 - HEART FAILURE & SHOCK W MCC",
"292 - HEART FAILURE & SHOCK W CC",
"293 - HEART FAILURE & SHOCK W/O CC/MCC")
# Doesn't look like a function
sac_heart = subset(sac, DRG.Definition %in% var)
# same as (DRG.Def == var[1]) | (DRG.Def == var[2]) | ...
ggplot(sac_heart, aes(x = Provider.Name,
y = Average.Total.Payments,
color = Provider.Name)) +
geom_boxplot()
|
a937860b9fe954c13611820db6e7bd3719e1a6ac
|
525ae71105cf4e428dcdf7f991b5a930e7e50c77
|
/doc/figures/kernel-expt-extra.R
|
3835c30100bc6fc7bc40cef74d5e5ded7336724a
|
[] |
no_license
|
rmcclosk/thesis
|
3e6d04d767fb31da2dc01ae9911d57dfcb6d414a
|
1dfaece42f34ad63741e04199fbebd6ed79c88cf
|
refs/heads/master
| 2021-01-21T18:46:17.045602
| 2016-07-27T17:43:26
| 2016-07-27T17:43:26
| 44,255,896
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,839
|
r
|
kernel-expt-extra.R
|
#!/usr/bin/env Rscript
library(igraph)
library(ape)
nnode <- 7
ninf <- 4
set.seed(0)
pow <- c(0.5, 1, 1.5)
col <- c("red", "blue", "green")
inf.col <- "black"
for (i in 1:3) {
g <- sample_pa(nnode, m=2, power=pow[i], directed=FALSE)
png(sprintf("tinynet%d.png", i), bg="transparent", width=100, height=100)
par(mar=c(0, 0, 0, 0) + 0.1)
plot(g, vertex.size=10, vertex.label=NA, vertex.color=col[i],
vertex.frame.color=col[i], edge.color=col[i], edge.width=5)
dev.off()
png(sprintf("tinyepi%d.png", i), bg="transparent", width=100, height=100)
par(mar=c(0, 0, 0, 0) + 0.1)
ecol <- rep(col[i], ecount(g))
vcol <- rep(c(inf.col, col[i]), c(ninf, vcount(g)-ninf))
ecol[sample(unique(unlist(g[[1:ninf,1:ninf,edges=TRUE]])), ninf-1)] <- inf.col
plot(g, vertex.size=10, vertex.label=NA, vertex.color=vcol,
vertex.frame.color=vcol, edge.color=ecol, edge.width=5)
dev.off()
png(sprintf("tinytree%d.png", i), bg="transparent", width=100, height=100)
par(mar=c(0, 0, 0, 0) + 0.1)
t <- rcoal(ninf)
plot(t, direction="down", edge.width=5, edge.color=col[i], show.tip.label=FALSE)
dev.off()
}
npt <- 5
x <- c(rnorm(npt, sd=0.2), rnorm(npt, mean=2, sd=0.2), rnorm(npt, mean=4, sd=0.2))
y <- rnorm(npt*3)
png("tinypca.png", bg="transparent", width=500, height=100)
par(mar=c(0, 0, 0, 0) + 0.1)
plot(x, y, pch=16, cex=3, col=rep(col, each=npt))
box(lwd=4)
dev.off()
x <- c(rnorm(npt, sd=0.2), rnorm(npt, mean=2, sd=0.2), rnorm(npt, mean=4, sd=0.2))
y <- c(rnorm(npt, mean=1, sd=0.2), rnorm(npt, mean=0, sd=0.2), rnorm(npt, mean=1, sd=0.2))
png("tinyksvr.png", bg="transparent", width=100, height=100, type="cairo")
par(mar=c(0, 0, 0, 0) + 0.1)
plot(splinefun(x=c(0, 2, 4), y=c(1, 0, 1)), lwd=4, xlim=c(-0.5, 4.5))
plot(splinefun(x=c(0, 2, 4), y=c(1, 0, 1)), lwd=32, add=TRUE, xlim=c(-0.5, 4.5),
col=rgb(0, 0, 0, alpha=0.5))
points(x, y, pch=16, cex=2, col=rep(col, each=npt))
box(lwd=4)
dev.off()
x <- c(rnorm(npt, sd=0.2), rnorm(npt, mean=2, sd=0.2), rnorm(npt, mean=4, sd=0.2))
y <- c(rnorm(npt, mean=0, sd=0.2), rnorm(npt, mean=0.33, sd=0.2), rnorm(npt, mean=1, sd=0.2))
png("tinysvr.png", bg="transparent", width=100, height=100, type="cairo")
par(mar=c(0, 0, 0, 0) + 0.1)
plot(x, y, pch=16, cex=2, col=rep(col, each=npt), type="n")
abline(a=0, b=0.2, lwd=4)
abline(a=0, b=0.2, lwd=32, col=rgb(0, 0, 0, alpha=0.5))
points(x, y, pch=16, cex=2, col=rep(col, each=npt))
box(lwd=4)
dev.off()
x <- c(rnorm(npt, sd=0.2), rnorm(npt, mean=2, sd=0.2), rnorm(npt, mean=4, sd=0.2))
y <- c(rnorm(npt, mean=0, sd=0.2), rnorm(npt, mean=0.5, sd=0.2), rnorm(npt, mean=1, sd=0.2))
png("tinyreg.png", bg="transparent", width=100, height=100)
par(mar=c(0, 0, 0, 0) + 0.1)
plot(x, y, pch=16, cex=2, col=rep(col, each=npt))
abline(a=0, b=0.25, lwd=4)
box(lwd=4)
dev.off()
|
cee547a2f39e69daf3b6449167ec917d2b5527b9
|
9d34bd30b0396a15cd4ba9bd853dd44c5ff24939
|
/Chapter03/7_keras_mnist_vae_outlier.R
|
4d14aff61ff49d563cb9bc62dc3fc4522ed0a479
|
[
"MIT"
] |
permissive
|
leetschau/R-Deep-Learning-Projects
|
1db7edcf4adf50ab399a40e8e8864f62698584a8
|
162902e51c873ad1b6cbb95d32ba4e49684a53e0
|
refs/heads/master
| 2020-03-26T23:25:46.872402
| 2018-08-27T02:12:05
| 2018-08-27T02:12:05
| 145,541,565
| 0
| 0
|
MIT
| 2018-08-21T09:34:59
| 2018-08-21T09:34:58
| null |
UTF-8
|
R
| false
| false
| 3,266
|
r
|
7_keras_mnist_vae_outlier.R
|
library(keras)
# Switch to the 1-based indexing from R
options(tensorflow.one_based_extract = FALSE)
K <- keras::backend()
mnist <- dataset_mnist()
X_train <- mnist$train$x
y_train <- mnist$train$y
X_test <- mnist$test$x
y_test <- mnist$test$y
## Exclude "0" from the training set. "0" will be the outlier
outlier_idxs <- which(y_train!=0, arr.ind = T)
X_train <- X_train[outlier_idxs,,]
y_test <- sapply(y_test, function(x){ ifelse(x==0,"outlier","normal")})
# reshape
dim(X_train) <- c(nrow(X_train), 784)
dim(X_test) <- c(nrow(X_test), 784)
# rescale
X_train <- X_train / 255
X_test <- X_test / 255
original_dim <- 784
latent_dim <- 2
intermediate_dim <- 256
# Model definition --------------------------------------------------------
X <- layer_input(shape = c(original_dim))
hidden_state <- layer_dense(X, intermediate_dim, activation = "relu")
z_mean <- layer_dense(hidden_state, latent_dim)
z_log_sigma <- layer_dense(hidden_state, latent_dim)
sample_z<- function(params){
z_mean <- params[,0:1]
z_log_sigma <- params[,2:3]
epsilon <- K$random_normal(
shape = c(K$shape(z_mean)[[1]]),
mean=0.,
stddev=1
)
z_mean + K$exp(z_log_sigma/2)*epsilon
}
z <- layer_concatenate(list(z_mean, z_log_sigma)) %>%
layer_lambda(sample_z)
# we instantiate these layers separately so as to reuse them later
decoder_hidden_state <- layer_dense(units = intermediate_dim, activation = "relu")
decoder_mean <- layer_dense(units = original_dim, activation = "sigmoid")
hidden_state_decoded <- decoder_hidden_state(z)
X_decoded_mean <- decoder_mean(hidden_state_decoded)
# end-to-end autoencoder
variational_autoencoder <- keras_model(X, X_decoded_mean)
# encoder, from inputs to latent space
encoder <- keras_model(X, z_mean)
# generator, from latent space to reconstructed inputs
decoder_input <- layer_input(shape = latent_dim)
decoded_hidden_state_2 <- decoder_hidden_state(decoder_input)
decoded_X_mean_2 <- decoder_mean(decoded_hidden_state_2)
generator <- keras_model(decoder_input, decoded_X_mean_2)
loss_function <- function(X, decoded_X_mean){
cross_entropy_loss <- loss_binary_crossentropy(X, decoded_X_mean)
kl_loss <- -0.5*K$mean(1 + z_log_sigma - K$square(z_mean) - K$exp(z_log_sigma), axis = -1L)
cross_entropy_loss + kl_loss
}
variational_autoencoder %>% compile(optimizer = "rmsprop", loss = loss_function)
history <- variational_autoencoder %>% fit(
X_train, X_train,
shuffle = TRUE,
epochs = 10,
batch_size = 256,
validation_data = list(X_test, X_test)
)
plot(history)
# Reconstruct on the test set
preds <- variational_autoencoder %>% predict(X_test)
error <- rowSums((preds-X_test)**2)
eval <- data.frame(error=error, class=as.factor(y_test))
library(dplyr)
library(ggplot2)
eval %>%
ggplot(aes(x=class,fill=class,y=error))+geom_boxplot()
threshold <- 5
y_preds <- sapply(error, function(x){ifelse(x>threshold,"outlier","normal")})
# Confusion matrix
table(y_preds,y_test)
library(ROCR)
pred <- prediction(error, y_test)
perf <- performance(pred, measure = "tpr", x.measure = "fpr")
auc <- unlist(performance(pred, measure = "auc")@y.values)
auc
plot(perf, col=rainbow(10))
|
d82c82e261eda6ddd678d4edfd9477d7f02b4818
|
7f3b374a7b55239d572950f7e4e255e85bfeb98e
|
/tests/testthat/test-goslim.R
|
3ec54ad35f7414997d71687c4cd8bb1630f4fa61
|
[
"Apache-2.0"
] |
permissive
|
lianos/msigdb.data
|
be31b676676eeb694afb6dc58b3c7f38d73295c1
|
68db424ae9a617e4856487733df3d43dad4aed86
|
refs/heads/master
| 2020-07-23T16:49:24.592068
| 2020-06-15T15:47:57
| 2020-06-15T15:47:57
| 207,635,979
| 0
| 0
|
NOASSERTION
| 2020-02-13T00:12:58
| 2019-09-10T18:39:18
|
R
|
UTF-8
|
R
| false
| false
| 642
|
r
|
test-goslim.R
|
context("GO slim")
test_that("go_slim = 'generic' returns slim ontology", {
c5.all <- msigdb_retrieve("C5", go_slim = FALSE)
c5.slim <- msigdb_retrieve("C5", go_slim = TRUE)
sum.all <- c5.all %>%
group_by(name) %>%
summarize(gs_id = gs_id[1], subcategory = subcategory[1], n = n()) %>%
ungroup() %>%
arrange(subcategory, name)
sum.slim <- c5.slim %>%
group_by(name) %>%
summarize(gs_id = gs_id[1], subcategory = subcategory[1], n = n()) %>%
ungroup() %>%
arrange(subcategory, name)
expect_lt(nrow(sum.slim), nrow(sum.all))
expect_setequal(sum.slim[["subcategory"]], sum.all[["subcategory"]])
})
|
22ec42a50d8f783a15334b085d52530870081160
|
7ef89cac1d731432e4b3e741920a8e13aa793b13
|
/man/saveXLworkbook.Rd
|
f6c617e854a2ea59140fdcf2220a38d25d85f5a6
|
[] |
no_license
|
cran/xlsimple
|
70d8a6285f07082dbbe8c49e0dbac70627278a4c
|
3f8a8f3454ef8a1c169de070f45b28594e8680bd
|
refs/heads/master
| 2021-01-18T03:14:35.446704
| 2020-07-31T23:50:03
| 2020-07-31T23:50:03
| 85,837,170
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,300
|
rd
|
saveXLworkbook.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/saveXLworkbook.R
\name{saveXLworkbook}
\alias{saveXLworkbook}
\title{Save 'Excel' Workbook to disk}
\usage{
saveXLworkbook(wbList, fname = "xl.Out.xlsx", timeStamp = FALSE, clean = TRUE)
}
\arguments{
\item{wbList}{list with workbook and default cell styles (i.e., output from getXLsettings)}
\item{fname}{'Excel' file name}
\item{timeStamp}{Logical field to include date/time stamp in file name (TRUE [default]).}
\item{clean}{Logical field indicating whether to remove original sheets in workbook}
}
\value{
n/a
}
\description{
Save 'Excel' Workbook to disk
}
\examples{
XL.wb <- getXLsettings()
XL.wb <- addXLsheetStd(XL.wb, mtcars)
XL.wb <- addXLsheetStd(XL.wb, mtcars, "mtcars1")
XL.wb <- addXLsheetStd(XL.wb, mtcars, "mtcars2", "Standard mtcars data frame")
XL.wb$pName <- "ProjName" # optional, blank if not included
XL.wb$pDesc <- "ProjDesc" # optional, blank if not included
saveXLworkbook(XL.wb, file.path(tempdir(), 'myXLfile.xlsx'), timeStamp=FALSE, clean=FALSE)
saveXLworkbook(XL.wb, file.path(tempdir(), 'myXLfile.xlsx'), timeStamp=TRUE, clean=FALSE)
saveXLworkbook(XL.wb, file.path(tempdir(), 'myXLfile.xlsx'), timeStamp=TRUE, clean=TRUE)
saveXLworkbook(XL.wb, file.path(tempdir(), 'myXLfile.xlsx'))
}
|
aeb9c9cea212b8a7ddd98ae299a886f012caf287
|
32d68b278e9ef10d76e78acd1cef62407826f9dc
|
/cfl/server.R
|
f0c1d9a99df868bf88716f69682053bab92af120
|
[
"MIT"
] |
permissive
|
ycalvinner/shiny-server
|
78b2c978f72a0f0a2aeeca8793469758de7ee233
|
ebbc091535197dada1b14afa87e2cd12f832f797
|
refs/heads/master
| 2021-02-07T17:33:11.686108
| 2020-02-29T23:57:52
| 2020-02-29T23:57:52
| 244,057,426
| 0
| 0
|
MIT
| 2020-02-29T23:34:38
| 2020-02-29T23:34:37
| null |
UTF-8
|
R
| false
| false
| 7,762
|
r
|
server.R
|
library(shiny)
library(shinyjs)
library(dplyr)
library(ggvis)
library(reshape2)
TEST_GAMEPAGE <- FALSE
demo_clips <- list(
list(videoId = "snwanVaPMys", startSeconds = 126, endSeconds = 135),
list(videoId = "snwanVaPMys", startSeconds = 145, endSeconds = 153),
list(videoId = "snwanVaPMys", startSeconds = 196, endSeconds = 210),
list(videoId = "snwanVaPMys", startSeconds = 260, endSeconds = 296),
list(videoId = "snwanVaPMys", startSeconds = 359, endSeconds = 368),
list(videoId = "mefLj3eB7Gc", startSeconds = 8, endSeconds = 30),
list(videoId = "mefLj3eB7Gc", startSeconds = 63, endSeconds = 75),
list(videoId = "mefLj3eB7Gc", startSeconds = 122, endSeconds = 138),
list(videoId = "mefLj3eB7Gc", startSeconds = 145, endSeconds = 157),
list(videoId = "mefLj3eB7Gc", startSeconds = 183, endSeconds = 195)
)
function(input, output, session) {
values <- reactiveValues(
playing = FALSE,
playdata = NULL,
gamedata = NULL,
touchdowns = c()
)
closeVideo <- function() {
shinyjs::hide("myoverlay", TRUE, "fade", 0.25)
shinyjs::hide("youtube_area")
shinyjs::delay(250, {values$playing <- TRUE})
shinyjs::runjs('$("#youtubeplayer").attr("src", "");')
}
shinyjs::onclick("youtube_close", closeVideo())
shinyjs::onclick("myoverlay", closeVideo())
observe({
if (!is.null(input$videodone) && input$videodone > 0) {
}
})
observe({
if (values$playing) {
shinyjs::html("play", paste0(icon("pause"), " Pause"))
} else {
shinyjs::html("play", paste0(icon("play"), " Play"))
}
})
# On welcome page, user clicks on a game row
observeEvent(input$gamerowclick, {
shinyjs::addClass(selector = "body", class = "game_page")
values$gamedata <- input$gamerowclick
gameid <- values$gamedata[['sked_id']]
playdata <- load_playdata(gameid)
new_game(playdata)
})
observeEvent(values$gamedata, {
js$newgame(values$gamedata)
})
new_game <- function(playdata) {
shinyjs::reset("game_page")
values$playing <- FALSE
values$playdata <- playdata
# store time points of touchdowns
touchdown_idx <- which(playdata$eventType == "Score" &
playdata$eventScore == 30)
values$touchdowns <- playdata[touchdown_idx, ]$seconds
output$home_events <- renderUI({
lapply(
seq(nrow(playdata)),
function(x) {
row <- playdata[x, ]
if (!row$eventHome || is.na(row$eventType)) {
return(NULL)
}
div(
class = paste0("gameevent event-home event-", row$eventType),
style = paste0("height: ", row$eventScore*2, "px;",
"left: ", row$seconds / MAX_TIME * 100, "%;"),
`data-time` = row$seconds,
`data-tooltip` = row$details
)
}
)
})
output$away_events <- renderUI({
lapply(
seq(nrow(playdata)),
function(x) {
row <- playdata[x, ]
if (row$eventHome || is.na(row$eventType)) {
return(NULL)
}
div(
class = paste0("gameevent event-away event-", row$eventType),
style = paste0("height: ", row$eventScore*2, "px;",
"left: ", row$seconds / MAX_TIME * 100, "%;"),
`data-time` = row$seconds,
`data-tooltip` = row$details
)
}
)
})
shinyjs::hide("welcome_page")
shinyjs::show("game_page")
pos <- playdata$pos[1]
end_pos <- playdata$end_pos[1]
ishome <- playdata$eventHome[1]
js$setline(pos, end_pos, ishome)
}
shinyjs::html("output_quarter", "Q1")
shinyjs::html("output_time", "15:00")
output$output_quarter <- renderText({
paste0("Q", get_quarter(input$time))
})
output$output_time <- renderText({
get_time(input$time)
})
observeEvent(input$time, {
playdata <- values$playdata
index <- findInterval(input$time, playdata$second, all.inside = TRUE)
pos <- playdata$pos[index]
end_pos <- playdata$end_pos[index]
ishome <- playdata$eventHome[index]
js$setline(pos, end_pos, ishome)
shinyjs::html(id = "homescore", html = playdata$home_score_after[index])
shinyjs::html(id = "awayscore", html = playdata$away_score_after[index])
})
observeEvent(input$play, {
values$playing <- !values$playing
})
observeEvent(input$gameeventclick, {
updateSliderInput(session, "time", value = input$gameeventclick)
})
observe({
lapply(EVENT_TYPES, function(x)
shinyjs::removeClass("time-wrapper", paste0("show-", x)))
shinyjs::removeClass()
lapply(input$eventTypeFilter, function(x)
shinyjs::addClass("time-wrapper", paste0("show-", x)))
})
observe({
invalidateLater(100, session)
if (!values$playing) {
return()
}
isolate({
prevval <- input$time
val <- prevval + 4 * input$speed
})
# figure out if a touchdown just happened
for(touchdown in values$touchdowns) {
if (touchdown >= prevval && touchdown < val) {
values$playing <- FALSE
shinyjs::show("myoverlay", TRUE, "fade", 0.25)
shinyjs::delay(250, shinyjs::show("youtube_area"))
clip_idx <- sample(length(demo_clips), 1)
clip_info <- demo_clips[[clip_idx]]
js$playyoutube(clip_info)
}
}
if (val >= MAX_TIME) {
updateSliderInput(session, "time", value = MAX_TIME)
values$playing <- FALSE
return()
}
updateSliderInput(session, "time", value = val)
})
observe({
onclick("back_to_welcome", {
values$playing <- FALSE
shinyjs::show("welcome_page")
shinyjs::hide("game_page")
shinyjs::removeClass(selector = "body", class = "game_page")
})
})
frame <- reactive({
if(is.null(values$gamedata['sked_id'])){
return(data.frame(value=double(0),
variable=character(0),
x = double(0),
id = integer(0)))
}
set.seed(values$gamedata['sked_id'] %>% unlist)
bluPeak = runif(n = floor(runif(n=1,min=2,max=5)),min=1,max=3600)
redPeak = runif(n = floor(runif(n=1,min=1,max=4)),min=1,max=3600)
blu = c(sample(1:3600,300,replace = T),
unlist(sapply(bluPeak,function(x){
rnorm(100,mean = x, sd = 150)
})))
red = c(sample(1:3600,300,replace = T),
unlist(sapply(redPeak,function(x){
rnorm(100,mean = x, sd = 150)
})))
blu = blu %>% density %>% .$y
red = red %>% density %>% .$y
frame = list(Heart_Rate = blu, Fan_Tweets = red) %>% melt
frame$x = 1:length(red)
frame$id = 1:nrow(frame)
names(frame) = c('value','variable','x','id')
frame$value = scale01(frame$value) * 100
return(frame)
})
frame %>% ggvis(~x ,~value, stroke= ~variable,key := ~id) %>%
add_tooltip(function(x){
return(paste0('<p>',
gsub('_',' ',frame()$variable[x$id]),
'</p><p>',
format(frame()$value[x$id],digits=2),
'</p>'))
}) %>%
layer_points(size := 4) %>% hide_legend(scales = 'stroke') %>%
hide_axis("x") %>% hide_axis("y") %>%
set_options(height = 70, width = 800,resizable=FALSE,padding = padding(0,0,0,0)) %>%
bind_shiny('reactionPlot')
############## TEST GAME PAGE
if (TEST_GAMEPAGE) {
allgames <- read.csv("data/cfl_games_trim.csv", stringsAsFactors = FALSE) %>%
arrange(desc(game_date))
gameid <- 12843
values$gamedata <- allgames[allgames$sked_id == gameid, ]
playdata <- load_playdata(gameid)
new_game(playdata)
}
}
|
e3d8e44495f39c504dfc66043c61ec9a978974ff
|
cbf36503e046f03f25e54411352ba25ab872fba9
|
/R/tvvarGAM.R
|
11beb797ed300f1df7209f01d33a3870f05801e5
|
[] |
no_license
|
LauraBringmann/tvvarGAM
|
f3d618ae93d9971dea1747bb7a1ed55f8775f3f3
|
099f68a69c842f88d3be75a72db9a884d0686756
|
refs/heads/master
| 2023-09-01T03:56:34.587239
| 2023-08-27T16:54:05
| 2023-08-27T16:54:05
| 99,564,430
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,397
|
r
|
tvvarGAM.R
|
#' @import stats graphics utils mgcv mvtnorm
#' @title Fit the xxx model (gam)
#'
#' @description \code{tvvarGAM} xxx.
#'
#' @param data An \eqn{(nt\times nv)}{(nt x nv)} data matrix, or an object of
#' class 'tvvarSIM'.
#' @param nb xxx (default = 10).
#' @param consec xxx (default = \code{NULL}).
#' @param scale xxx (default = \code{FALSE}).
#' @param beepvar xxx (default = \code{NULL}).
#' @param dayvar xxx (default = \code{NULL}).
#' @param tvvarOpt xxx (default = \code{"TVVAR"}).
#' @param thresholding xxx (default = \code{FALSE}).
#' @param pbar xxx (default = \code{TRUE}).
#'
#' @return The function returns a list (an object of class \code{tvvarGAM}) with 3
#' elements:
#' \item{call}{xxx.}
#' \item{Results_GAM}{xxx.}
#' \item{model}{xxx.}
#'
#' @section Details: xxx.
#'
#' @references
#' \insertRef{RobertsLaughlin1996}{tvvarGAM}
#'
#' \insertRef{Robertsetal2000}{tvvarGAM}
#'
#' @author Laura Bringmann, \email{l.f.bringmann@rug.nl}
#'
#' @examples
#' # Example 1 - xxx
#'
#' # Example 2 - xxx
#' @export
tvvarGAM <- function(data = NULL, # An (nt x nv) data matrix *or* an object of class 'tvvarSIM'
nb = 10,
consec = NULL,
scale = FALSE,
beepvar = NULL,
dayvar = NULL,
tvvarOpt = "TVVAR",
thresholding = FALSE,
pbar = TRUE)
{
#---------- Input check ---------
ifelse (is.null(data),
stop("Parameter 'data' is empty! \n Either supply a data matrix or a simulated data object of class 'tvvarSIM'."),
ifelse(class(data) == "tvvarSIM",
{
SIMdata <- data
data <- data$y
simulated <- TRUE
},
ifelse(is.numeric(data),
simulated <- FALSE,
stop("Parameter 'data' is empty! \n Either supply a data matrix or a simulated data object of class 'tvvarSIM'.")
)
)
)
# ----- Compute consec argument -----
ifelse (is.null(consec),
ifelse (is.null(beepvar) || is.null(dayvar),
ifelse (is.null(beepvar) && is.null(dayvar),
consec <- 1:nrow(data),
stop("Parameter 'consec' was not provided; only 'dayvar' or 'beepvar' was provided.\n In such cases, provide BOTH 'dayvar' and 'beepvar'.")),
consec <- beepday2consec(beepvar = beepvar, dayvar = dayvar)),
if (!is.null(beepvar) || !is.null(dayvar))
stop("Please specify the consecutiveness of measurements either via consec, OR via dayvar and beepvar.")
)
# --------- Compute Aux Variables ---------
nt <- nrow(data)
nv <- ncol(data)
tt <- 1:nt
# Define colnames, if not provided with data:
if (is.null(colnames(data))) colnames(data) <- paste0("X", 1:nv)
coln <- colnames(data)
# The lagged colnames:
colnL <- paste0(coln, "L")
call <- list(data = if (simulated) SIMdata else data,
nb = nb,
consec = consec,
simulated = simulated,
beepvar = beepvar,
dayvar = dayvar,
scale = scale,
tvvarOpt = tvvarOpt,
thresholding = thresholding)
# --------- Estimating GAM ---------
mod_all <- tvvarDATA(data = data,
tvvarOpt = tvvarOpt,
nb = nb,
pbar = pbar,
scale = scale,
consec = consec)$model
# --------- Retrieving results ---------
Results_GAM <- array(NA, c(nv+1, nv, nt, 3))
estimates <- lapply(1:nv, function(x) plot(mod_all[[x]], select = "None", n = nt))
estimates.fit <- lapply(estimates, function(x) sapply(1:(nv+1), function(y) x[[y]]$fit))
estimates.se <- lapply(estimates, function(x) sapply(1:(nv+1), function(y) x[[y]]$se))
estimates.int <- lapply(1:nv, function(x) cbind(rep(coef(mod_all[[x]])[1], nt), matrix(0, nt, nv)))
for (ii in 1:nv)
{
Results_GAM[, ii, , 1] <- t(estimates.int[[ii]] + estimates.fit[[ii]] + estimates.se[[ii]])
Results_GAM[, ii, , 2] <- t(estimates.int[[ii]] + estimates.fit[[ii]])
Results_GAM[, ii, , 3] <- t(estimates.int[[ii]] + estimates.fit[[ii]] - estimates.se[[ii]])
}
if (thresholding)
{
tmp.sgn <- sign(Results_GAM[, ii, , 1] * Results_GAM[, ii, , 3]) > 0
Results_GAM[, ii, , 2] <- Results_GAM[, ii, , 2] * (tmp.sgn * 1)
}
Results <- list('Estimate' = Results_GAM[, , , 2],
'CI_low' = Results_GAM[, , , 3],
'CI_high' = Results_GAM[, , , 1])
outlist <- list(call = call,
Results_GAM = Results,
model = mod_all)
class(outlist) <- "tvvarGAM"
return(outlist)
}
# Define plot() method for class "tvvarGAM":
#' @export
plot.tvvarGAM <- function(x, # object of class 'tvvarGAM'
...)
{
ifelse(class(x$call$data) == "tvvarSIM",
data <- x$call$data$y,
data <- x$call$data)
coln <- colnames(data)
colnL <- paste0(coln, "L") # the lagged colnames
nv <- ncol(data)
nt <- nrow(data)
tt <- 1:nt
par(mfrow = c(nv, nv+1),
oma = c(2, 2, .25, .25),
mar = c(2, 2, 1, 1),
mgp = c(2, 1, 0),
xpd = NA)
for (i in 1:nv) {
mod <- x$model[[i]]
for (j in 1:(nv+1)) {
plot.gam(mod,
seWithMean = TRUE,
select = j,
rug = FALSE,
ylim = if (j == 1) NULL else c(-1, 1),
shift = if (j == 1) coef(mod)[1] else 0,
xlab = "Time",
ylab = if (j == 1) paste0("Intercept of ", coln[i]) else paste0(coln[i], " ~ ", colnL[j-1]),
bty = "n"
)
if (x$call$simulated)
{
if (j == 1) lines(tt, x$call$data$aint[, i], col = "red") else lines(tt, x$call$data$rho[, (i-1)*nv + (j-1)], col = "red")
}
}
}
}
# Define summary() method for class "tvvarGAM":
#' @export
summary.tvvarGAM <- function(object,
...)
{
object[[3]]
}
|
1eb87be44231070680e293bdb5bdf13ad9593826
|
534184299c941fce365c0f8b9bdc9689f1e7ebc5
|
/tests/testthat/test_fclean.R
|
304707bd9b0344dad2960ae3822bbff1321b63f2
|
[] |
no_license
|
schignel/foofactors
|
a41d9a47d80a0d0a7040d8ceb72d0001af1d81d6
|
14873d2f64bccb447a81fbe7cae0f04162eef5eb
|
refs/heads/master
| 2020-04-06T07:24:43.776916
| 2018-11-16T02:11:23
| 2018-11-16T02:11:23
| 157,271,487
| 0
| 0
| null | 2018-11-12T20:20:53
| 2018-11-12T20:20:53
| null |
UTF-8
|
R
| false
| false
| 1,209
|
r
|
test_fclean.R
|
context("Cleaning factors")
# test that various types of inconsistencies (mimicking common data entry errors) are all cleaned as expected.
test_that("fclean cleans factor (or character)", {
production_leftgap <- factor(c(" HIGH production", " MED production ", " MED production ", " low Production"))
production_rightgap <- factor(c("high production ", "MED pRODUction ", "med productION ", "LOw prOduCtion "))
production_mixgap <- factor(c(" high production ", "MED pRODUction ", "med productION ", "LOw prOduCtion "))
production_allcaps <- factor(c("HIGH PRODUCTION", "MED PRODUCTION", "MED PRODUCTION", "LOW PRODUCTION"))
production_spaces <- factor(c("HIGH PRODUCTION", "MED PRODUCTION" , "MED PRODUCTION","LOW PRODUCTION"))
production_clean <- factor(c("High Production", "Med Production", "Med Production", "Low Production"))
expect_identical(fclean(production_leftgap), production_clean)
expect_identical(fclean(production_rightgap), production_clean)
expect_identical(fclean(production_mixgap), production_clean)
expect_identical(fclean(production_allcaps), production_clean)
expect_identical(fclean(production_spaces), production_clean)
})
|
cba1da52d6c76eeaaebe6a69335953fe97fe17a7
|
9b71957ad9a560abb9caaadaf002a6d3454b7f52
|
/UniProj/prac.R
|
5ed58d0571f41f09c8102b85925afc0023f29964
|
[] |
no_license
|
akersh1996/RWork
|
ba463b083b182368e120c225282a824a292bb17f
|
14760f365734707bcf477f3b333fefbb228a57e2
|
refs/heads/master
| 2021-05-04T13:53:05.259411
| 2018-03-16T20:56:24
| 2018-03-16T20:56:24
| 120,324,216
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,551
|
r
|
prac.R
|
cat("Some routines for the Math3733 practical\n")
cat("Version 1.4\n")
# updated Nov 2002
bmsim=function(nsim,ntime) {
wn=matrix(rnorm(nsim*ntime),nrow=nsim,ncol=ntime)/sqrt(ntime)
bm=matrix(0, nrow=nsim,ncol=ntime+1)
for(i in 1:nsim) bm[i,]=cumsum(c(0,wn[i,]))
bm
}
stoch.int=function(fun1=fun.null,fun2=fun.null,w=bm,lh=T,
sigma=1, nu=1, alpha=1) {
environment(fun1)=environment()
environment(fun2)=environment()
nsim=nrow(w)
ntime=ncol(w)-1
jj=0
if(lh==F) jj=1
si=rep(0,nsim)
for(j in 1:ntime) {
si=si+sigma*fun1(w[,j+jj])*(w[,j+1]-w[,j]) + nu*fun2(w[,j+jj])/ntime
}
si
}
sde=function(fun1=fun.null,fun2=fun.null,w=bm,init=1,
sigma=1, nu=1, alpha=1) {
environment(fun1)=environment()
environment(fun2)=environment()
ntime=ncol(w)-1
y=w
y[,1]=init
for(j in 1:ntime) {
y[,j+1]=y[,j] + sigma*fun1(y[,j])*(w[,j+1]-w[,j])+nu*fun2(y[,j])/ntime
}
y
}
dw2=function(w=bm) {
dif=w[,-1]-w[,-ncol(w)]
dt=(dif^2)%*%rep(1,ncol(w)-1)
dt
}
fun.null=function(x) {0}
fun.one=function(x) {1}
fun.id=function(x) {x}
fun.sq=function(x) {x*x}
fun.exp=function(x) {exp(alpha * x)}
qqchisq=function(x,df,main=NULL) {
y=sort(x)
n=length(x)
ords=qchisq((1:n)/(n+1),df)
plot(ords,y,main=main)
}
bmsim(1000,1000)
si1 = stoch.int(fun1=fun.id,fun2=fun.null, sigma=2)
si2 = stoch.int(fun1=fun.id, fun2=fun.null,lh=F, sigma=2)
dif = si2-si1
dif[5]
dif[50]
dif[70]
dif[200]
dif[600]
dif[201]
length
si1[5]
sde1<-sde(fun1=fun.id, fun2=fun.id, w=bm, init=1, sigma=-1, nu=1)
sde1[8]
|
0400c58ea4fda13f0748ca990c7f6dd927684383
|
fa905c9b9d21e30e49dac046ee3b6577b78d883c
|
/web_illumina_custom.R
|
38f3b6b7b77702cb18422eb510bfb31ce15ba57a
|
[] |
no_license
|
timplab/timp_illumina
|
3ac1d79c8a844ea8666e6bd51e974e9f7077a0f6
|
fc0b06a02629a3b28b5b03be3edcd14e5b8203ee
|
refs/heads/master
| 2021-07-26T23:51:55.612355
| 2012-09-07T13:59:15
| 2012-09-07T13:59:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,257
|
r
|
web_illumina_custom.R
|
##Ok - make some plots specific for webpage
##Adding mds and dot plot
setwd("~/Data/Illumina_Bead/Analysis/")
load("predata.rda")
library(RColorBrewer)
library(plotrix)
hector=read.csv("~/Data/Illumina_Bead/Analysis/ordered_cpgs.csv", stringsAsFactors=F)
top25=hector$X[1:25]
##PCA
p=prcomp(t(data$fqbeta[top25,]))
##MDS
z=cmdscale( dist( t( data$fqbeta[top25,]) ), k=2)
norms=data$fsamp$Progression==0
carcy=data$fsamp$Progression>2
ady=data$fsamp$Progression>0&data$fsamp$Progressoin<3
##MDS colors
colly=c("green", "orange", "blue", "red","brown")
type_col=factor(data$fsamp$Class)
types=levels(type_col)
levels(type_col)=colly
##Get coordinates and ranges of ellipse defining normal range
nprof=list(xcoor=rep(0,5), ycoor=rep(0,5), xrad=rep(0,5), yrad=rep(0,5), col=rep(as.character("black"),5))
for (i in 1:5) {
type_norms=data$fsamp$Class==types[i]&data$fsamp$Progression==0
ztn=z[type_norms,]
nprof$xcoor[i]=median(ztn[,1])
nprof$ycoor[i]=median(ztn[,2])
nprof$xrad[i]=mad(ztn[,1])*3
nprof$yrad[i]=mad(ztn[,2])*3
nprof$col[i]=colly[i]
}
pca_range_x=range(p$x[,1])
pca_range_y=range(p$x[,2])
mds_range_x=range(z[,1])
mds_range_y=range(z[,2])
pdf("Movie/norms_mds.pdf")
plot(z[norms,1], z[norms,2], bg=as.character(type_col[norms]), pch=21, xlim=mds_range_x, ylim=mds_range_y)
draw.ellipse(x=nprof$xcoor, y=nprof$ycoor, a=nprof$xrad, b=nprof$yrad, lty=2, lwd=2,border=nprof$col)
legend("topright", c("Breast", "Colon", "Lung", "Wilms", "Thyroid"), col=as.character(levels(type_col)), pch=16)
dev.off()
pdf("Movie/carc_mds.pdf")
plot(z[carcy,1], z[carcy,2], bg=as.character(type_col[norms]), pch=21, xlim=mds_range_x, ylim=mds_range_y)
draw.ellipse(x=nprof$xcoor, y=nprof$ycoor, a=nprof$xrad, b=nprof$yrad, lty=2, lwd=2,border=nprof$col)
legend("topright", c("Breast", "Colon", "Lung", "Wilms", "Thyroid"), col=as.character(levels(type_col)), pch=16)
dev.off()
## ok - for dot plot, need to set y values to the different tissue types, and add jitter
tissue.y=as.numeric(type_col)*2-as.numeric(carcy)
tissue.y=jitter(tissue.y)
pdf("Movie/dotplot_mds.pdf")
plot(z[(norms|carcy),1], tissue.y[(norms|carcy)], bg=as.character(type_col[(norms|carcy)]), pch=21, xlim=mds_range_x, ylim=c(0,11))
##draw.ellipse(x=nprof$xcoor, y=nprof$ycoor, a=nprof$xrad, b=nprof$yrad, lty=2, lwd=2,border=nprof$col)
legend("topright", c("Breast", "Colon", "Lung", "Wilms", "Thyroid"), col=as.character(levels(type_col)), pch=16)
dev.off()
##Just Breast for Rafa
pdf("Movie/just_breast_norm.pdf")
plot(z[(norms&(data$fsamp$Class==2)),1],z[(norms&(data$fsamp$Class==2)),2], pch=21, xlim=c(-.75, .25), ylim=c(-.6, .1))
draw.ellipse(x=nprof$xcoor[1], y=nprof$ycoor[1], a=nprof$xrad[1], b=nprof$yrad[1], lty=2, lwd=2,border=nprof$col[1])
dev.off()
##Find norms and cancers
norms=data$fsamp$Progression==0
carcy=data$fsamp$Progression>2
##Far is 2, shore is 1, island is 0
islstatus=(data$probes$UCSC_Dist_to_Island>0)+(data$probes$UCSC_Dist_to_Island>2000)
cgi.colors <- brewer.pal(8,"Dark2")[c(4:5,8)]
normvar=apply(data$fqbeta[,norms],1,mad)
cancvar=apply(data$fqbeta[,carcy],1,mad)
pdf("Movie/allvar1.pdf")
rangy=max(max(normvar), max(cancvar))
plot(normvar,cancvar, xlab="Normal", ylab="Cancer",
xlim=c(0, rangy), ylim=c(0,rangy),
bg=cgi.colors[islstatus+1], pch=21)
##Signifcance lines
cc <- qf(.99, sum(carcy)-1, sum(norms)-1)
abline(0,sqrt(cc), lty=2)
abline(0,1)
legend("bottomright", c("Island", "Shore", "Far"), pch=21, pt.bg=cgi.colors)
dev.off()
pdf("Movie/tisvar1.pdf")
for (i in c(2, 3, 4, 6, 7)) {
norms=(data$fsamp$Progression==0)&(data$fsamp$Class==i)
carcy=(data$fsamp$Progression>2)&(data$fsamp$Class==i)
cgi.colors <- brewer.pal(8,"Dark2")[c(4:5,8)]
normvar=apply(data$fqbeta[,norms],1,mad)
cancvar=apply(data$fqbeta[,carcy],1,mad)
rangy=max(max(normvar), max(cancvar))
plot(normvar,cancvar, xlab="Normal", ylab="Cancer",
xlim=c(0, rangy), ylim=c(0,rangy),
bg=cgi.colors[islstatus+1], pch=21,
main=i)
##Signifcance lines
cc <- qf(.99, sum(carcy)-1, sum(norms)-1)
abline(0,sqrt(cc), lty=2)
abline(0,1)
legend("bottomright", c("Island", "Shore", "Far"),
pch=21, pt.bg=cgi.colors)
}
dev.off()
|
bfee71731e18ffa1a6e6c4d0801e39a6aa67996e
|
e5ee4cb735ceda3e28d0c7f759bfc63857ca30f6
|
/scripts/secondchart.R
|
14f0dc4e20e682ddbeed8eda2911072116ed7545
|
[
"MIT"
] |
permissive
|
yiuchungcheung/INFO201-Fast-Food-Restaurants-in-America
|
c78d1d34293f84d7fa35438617c5cd8ed6047bde
|
9aed85e8ffed48d87f84c942ecdac318e9a9909b
|
refs/heads/master
| 2022-04-22T04:19:23.224437
| 2020-03-11T15:28:18
| 2020-03-11T15:28:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 855
|
r
|
secondchart.R
|
library(dplyr)
library(lintr)
library(styler)
library(tidyr)
library(leaflet)
library(ggplot2)
library(plotly)
library(stringr)
data <- read.csv(file = "data/Datafiniti_Fast_Food_Restaurants_May19.csv", stringsAsFactors = FALSE)
second_new_chart <- function(df, number, colour_bar) {
number <- as.numeric(number)
top_title <- paste0("Number of Top ", number, " Fast Food Restaurants by State")
bar_graph <- df %>%
group_by(province) %>%
summarise(total_num_restaurants = n()) %>%
top_n(number) %>%
plot_ly(
type = "bar",
x = ~province,
y = ~total_num_restaurants,
color = I(colour_bar)
) %>%
layout(
title = top_title,
xaxis = list(title = "State (Abr.)"),
yaxis = list(title = "Total Number of Restaurants")
)
return(bar_graph)
}
#second_new_chart(data, 5, "pink")
|
c74cf90bd679c96d5098ad18a2a4f88258e784b6
|
331e7816d55b9d3de50253d1b096e8707859a11c
|
/R/calmarFunctions.R
|
c5afafa81f43108dc60a65722028c4870f3f3477
|
[] |
no_license
|
haroine/icarus
|
e515732a69d82614bb248807f882559188d291a7
|
bd51ecf29bc7f07111219534dbd401f78c1daa84
|
refs/heads/master
| 2023-06-09T19:41:26.432469
| 2023-05-27T15:42:26
| 2023-05-27T15:42:26
| 38,872,499
| 10
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,159
|
r
|
calmarFunctions.R
|
# copyright (C) 2014-2023 A.Rebecq
# Functions designed so that calibration can be made in a familiar
# setting for Calmar and Calmar2 users
nModalities <- function(col)
{
return(length(unique(col)))
}
calibrationMatrix <- function(entryMatrix, popVector=TRUE, isQuantitative=NULL)
{
if(is.null(isQuantitative)) {
isQuantitative <- rep(FALSE, ncol(entryMatrix))
}
entryMatrix = data.matrix(entryMatrix)
# Initialization of return matrix
nRows = nrow(entryMatrix)
nCols = 0
N = ncol(entryMatrix)
# Particular case if entryMatrix has only one row
if(is.null(N)) {
N=1
nRows = length(entryMatrix)
}
for(i in 1:N)
{
nCols = nCols + nModalities(entryMatrix[,i])
}
namesMatrix = names(entryMatrix)
calibrationMatrix = matrix(0, nRows, 0, byrow=T)
for(i in 1:N)
{
if(!isQuantitative[i]) {
calibrationMatrix = cbind(calibrationMatrix, colToDummies(entryMatrix[,i], namesMatrix[i]))
} else {
calibrationMatrix = cbind(calibrationMatrix, entryMatrix[,i])
}
}
# Add "population" vector
if(popVector) {
calibrationMatrix = cbind(calibrationMatrix, rep(1,nrow(calibrationMatrix)))
}
return(calibrationMatrix)
}
dummyModalitiesMatrix <- function(entryMatrix)
{
dmatrix = calibrationMatrix(entryMatrix)
dmatrix[dmatrix!=0] = 1
return(dmatrix)
}
## private function that computes weighted estimates
## @keywords internal
HTtotals <- function(dummyModalitiesMatrix, weights)
{
return(weights%*%dummyModalitiesMatrix)
}
## ensures compatibility with first version of icarus
## (when it was still called gaston 0.0.1)
## @keywords internal
createCalibrationMatrix <- function(marginMatrix, data, popVector=TRUE)
{
# Select calibration variables in the table
# (and indicates whether they are quantitative / categorical)
selectVector = marginMatrix[,1]
isQuantitative = as.numeric(marginMatrix[,2])
isQuantitative[isQuantitative != 0] <- 1
isQuantitative <- 1 - as.numeric(isQuantitative) # is considered as boolean by R
Xs = data[,selectVector]
# Mise en forme de la matrice de calage
matrixCal = calibrationMatrix(Xs, popVector, isQuantitative)
return(matrixCal)
}
## Main private function for the creation of the margin matrix
## @param calmarMatrix matrix of margins without the names column
## @keywords internal
formatMargins <- function(calmarMatrix, calibrationMatrix, popTotal=NULL, pct=FALSE)
{
# Create empty vector of margins
cMatrixCopy = calmarMatrix
if(is.vector(cMatrixCopy)) {
cMatrixCopy = t(as.matrix(calmarMatrix))
calmarMatrix = t(as.matrix(calmarMatrix))
}
typeMargins = cMatrixCopy[,1]
typeMargins[typeMargins==0] = 1
cMargins = rep(0,sum(typeMargins))
# Fill cMargins
i=1
curRow = 1
while(curRow <= nrow(calmarMatrix))
{
if(calmarMatrix[curRow,1] == 0)
{
cMargins[i]=calmarMatrix[curRow,2]
i=i+1
}
else
{
n = calmarMatrix[curRow,1]
## If categorial margins are not entered as percentages,
## do not multiply by popTotal (except if it is popVector !)
if( all(calmarMatrix[curRow,2:(n+1)] < 1) && (is.null(popTotal) || !pct) ) {
warning(paste("All margins in variable ",curRow,"are less than 1 : should they be considered as percentages ?"))
}
if(pct) {
if(is.null(popTotal)) {
stop("popTotal has to be set when pct is TRUE")
} else {
## If sum is strictly equal to 100, divide by 100
## (allows for a behavior closer to Calmar2)
if( sum(calmarMatrix[curRow,2:(n+1)]) == 100 ) {
calmarMatrix[curRow,2:(n+1)] <- calmarMatrix[curRow,2:(n+1)] / 100
}
popTotalNum <- popTotal
}
} else {
popTotalNum <- 1
}
for(j in 2:(n+1))
{
cMargins[i] = calmarMatrix[curRow,j]*popTotalNum
i = i+1
}
}
curRow = curRow+1
}
# If there is still one column, it is the population one, so we add popTotal to cMargins
# ... unless specified otherwise
if(i <= ncol(calibrationMatrix) && !is.null(popTotal))
cMargins[i] = popTotal
return(cMargins)
}
#' Stats for initial weights, calibrated weights, and margins.
#' @description
#' Gives stats about the calibration process: differences between
#' totals after/before calibration and margins. Totals for categorical
#' variables are displayed in percentages.
#' (same as first panels output in Calmar/Calmar 2)
#' Output is a list, which might not be convenient for exports (e.g. for integration
#' into a scientific report). In such cases,
#' use function \code{\link{marginStats}}, which outputs a dataframe.
#' @param data dataframe containing the survey data
#' @param marginMatrix matrix of margins
#' @param popTotal total of population, useful if margins are entered in relative value
#' @param pct Set this to true if margins for categorical variables are written in percentages
#' @param colWeights name of weights column in the dataframe
#' @param colCalibratedWeights name of calibrated weights column in the dataframe (if applicable)
#' @param calibThreshold If difference between calibration estimate and margin differ more than
#' this parameter, calibration is considered to have failed
#' @return List containing stats on weights and margins
#' @seealso \code{\link{marginStats}}
#' @export
calibrationMarginStats <- function(data, marginMatrix, popTotal=NULL, pct=FALSE, colWeights, colCalibratedWeights=NULL, calibThreshold=1.0) {
displayCalibratedWeights <- TRUE
if(is.null(colCalibratedWeights)) {
displayCalibratedWeights <- FALSE
colCalibratedWeights <- colWeights
}
if(displayCalibratedWeights) {
textAfter <- "After Calibration"
} else {
textAfter <- "Current"
}
enteredAsPct <- FALSE
popTotalMarginDisplay <- popTotal
if(is.null(popTotal)) {
enteredAsPct <- FALSE
if(displayCalibratedWeights) {
popTotal <- sum(data[colCalibratedWeights])
} else {
popTotal <- sum(data[colWeights])
}
popTotalMarginDisplay <- NA
}
if(pct) {
enteredAsPct <- TRUE
}
toWarn = FALSE
displayWarningMessage = FALSE
# Somme des poids (total)
totalWeights = sum(data.matrix(data[colWeights]))
totalCalibrated = sum(data[colCalibratedWeights])
vecTotal = c(totalWeights, totalCalibrated, popTotalMarginDisplay)
names(vecTotal) = c("Before calibration",textAfter, "Margin")
vecTotal = round(vecTotal,2)
marginStatsList = list(vecTotal)
marginNames = marginMatrix[,1]
if(is.null(marginMatrix)) {
names(marginStatsList) = c("Total")
return(marginStatsList)
}
# Other margins
for(i in 1:nrow(marginMatrix)) {
toWarn = FALSE
vecTotal = NULL
if(as.numeric(marginMatrix[i,2]) == 0) { # If variable is numeric
sumWeights = data.matrix(data[marginNames[i]])[,1] %*% data.matrix(data[colWeights])[,1]
sumCalibrated = data.matrix(data[marginNames[i]])[,1] %*% data.matrix(data[colCalibratedWeights])[,1]
margin = as.numeric(marginMatrix[i,3])
vecTotal = c(sumWeights, sumCalibrated, margin)
vecTotal = as.numeric(vecTotal)
vecTotal = round(vecTotal,2)
# Check if calibration is exact
if(is.na(sumCalibrated)) stop(paste("Modality is present in margin tables but not in sample : ",i,";",j))
if(abs(sumCalibrated - margin) >= calibThreshold) {
toWarn = TRUE
displayWarningMessage = TRUE
#vecTotal = c(vecTotal,"*") # Old convention (same as in Calmar)
vecTotal = c(vecTotal,round(abs((sumCalibrated - margin)/margin),4))
}
if(toWarn == FALSE) {
names(vecTotal) = c("Before calibration",textAfter,"Margin")
} else {
names(vecTotal) = c("Before calibration",textAfter,"Margin", "Warning")
}
} else { # If variable has modalities
modalities = data.matrix(unique(data[marginNames[i]])[,1])
modalities = sort(modalities)
# TODO : Assert length(modalities) == marginMatrix[i,2]
for(j in 1:marginMatrix[i,2]) {
toWarn = FALSE
sumWeights = sum(data.matrix(data[data[marginNames[i]] == modalities[j],][colWeights]))
sumCalibrated = sum(data.matrix(data[data[marginNames[i]] == modalities[j],][colCalibratedWeights]))
if(!enteredAsPct) {
## By convention, margin for categorical variables are given in percentages
margin = as.numeric(marginMatrix[i,2+j])
# tempStatVec = c(sumWeights, sumCalibrated, margin)
tempStatVec = c(sumWeights/totalWeights*100, sumCalibrated/totalCalibrated*100, margin/popTotal*100)
} else {
margin = as.numeric(marginMatrix[i,2+j])
tempStatVec = c(sumWeights/totalWeights*100, sumCalibrated/totalCalibrated*100, margin*100)
}
#tempStatVec = c(sumWeights, sumCalibrated, margin) # TODO : change here level / structure
# tempStatVec = c(sumWeights/totalWeights*100, sumCalibrated/totalCalibrated*100, margin/popTotal*100)
tempStatVec = round(tempStatVec,2)
# Check if calibration is exact
if(is.na(sumCalibrated)) stop(paste("Modality is present in margin tables but not in sample : ",i,";",j))
if(abs(sumCalibrated - margin) >= calibThreshold) {
# toWarn = TRUE
displayWarningMessage = TRUE
# tempStatVec = c(tempStatVec, "*")
}
vecTotal = rbind(vecTotal, tempStatVec, deparse.level = 0)
}
# rownames = marginName_modalities(i)
rownames(vecTotal) = modalities
# "Little stars" if not perfectly calibrated
if(toWarn == FALSE) {
colnames(vecTotal) = c("Before calibration",textAfter,"Margin")
} else {
colnames(vecTotal) = c("Before calibration",textAfter,"Margin", "Warning")
}
}
marginStatsList[[i+1]] = vecTotal
}
# Name of statsMargesList
names(marginStatsList) = c("Total", marginNames)
if(displayWarningMessage && displayCalibratedWeights)
writeLines("Careful, calibration may not be exact")
return(marginStatsList)
}
#' Stats for initial weights, calibrated weights, and margins.
#' @description
#' Just like \code{\link{calibrationMarginStats}}, gives stats about the calibration process:
#' differences between totals after/before calibration and margins. Totals for categorical
#' variables are displayed in percentages. The last column, named "difference", shows
#' the difference (in percentage points) between initial estimates and margins (if colCalibratedWeights is NULL)
#' or between calibrated estimates and margins (if colCalibratedWeights is not NULL).
#' Output is a dataframe, which might be more convenient to export than a list
#' (e.g. for integration into reports).
#' @param data dataframe containing the survey data
#' @param marginMatrix matrix of margins
#' @param pct Set this to true if margins for categorical variables are written in percentages
#' @param popTotal total of population, useful if margins are entered in relative value
#' @param colWeights name of weights column in the dataframe
#' @param colCalibratedWeights name of calibrated weights column in the dataframe (if applicable)
#' @param calibThreshold If difference between calibration estimate and margin differ more than
#' this parameter, calibration is considered to have failed
#' @return Dataframe containing stats on weights and margins
#' @seealso \code{\link{calibrationMarginStats}}
#' @export
marginStats <- function(data, marginMatrix, pct=FALSE, popTotal=NULL, colWeights
, colCalibratedWeights=NULL, calibThreshold=1.0) {
listMarginStats <- calibrationMarginStats(data, marginMatrix, popTotal, pct, colWeights
, colCalibratedWeights, calibThreshold)
marginStatsDF <- marginStatsDF_gen(listMarginStats)
## Compute column difference
marginStatsDF <- marginStatsDF[,-c(4)]
if( is.null(colCalibratedWeights) ) {
marginStatsDF <- marginStatsDF[,-c(2)] # Do not display calibrated weigths column
marginStatsDF[,3] <- round(abs(marginStatsDF[,2] - marginStatsDF[,1])/marginStatsDF[,2]*100,2)
## Correct coefficients for categorical variables
marginStatsDF <- correctCoefsCategorical(marginStatsDF, marginMatrix)
names(marginStatsDF) <- c("Before calibration","Margin", "Difference (pct)")
} else {
marginStatsDF[,4] <- round(abs(marginStatsDF[,3] - marginStatsDF[,2])/marginStatsDF[,3]*100,2)
## Correct coefficients for categorical variables
marginStatsDF <- correctCoefsCategorical(marginStatsDF, marginMatrix, ncol1=2, ncol2=3, ncol3=4)
colnames(marginStatsDF) <- c("Before calibration","After calibration","Margin","Difference (pct)")
}
return(marginStatsDF)
}
# Private function, created to deal with a new warning
# appearing in `rbind.data.frame` that is properly handled in the rest of the code
marginStatsDF_gen <- function(listMarginStats) {
return_df <- tryCatch(
{
do.call(rbind.data.frame, listMarginStats)
},
error=function(cond) {
message(cond)
return(NA)
},
warning=function(cond) {
warn_message <- cond$message
if( !(grepl("number of columns of result", warn_message, fixed = T)) ||
!(grepl("is not a multiple of vector length", warn_message, fixed = T)) ||
!(grepl("of arg", warn_message, fixed = T)) ) {
message(cond)
return(NA)
} else {
suppressWarnings(do.call(rbind.data.frame, listMarginStats))
}
})
return_df
}
## Private function, used in marginMatrix to account for
## categorical variables, whose stats are displayed in percentages
correctCoefsCategorical <- function(marginStatsDF_init, marginMatrix, ncol1=1, ncol2=2, ncol3=3) {
marginStatsDF <- marginStatsDF_init
nModalCateg <- 0
for(i in 1:nrow(marginMatrix)) {
nModal <- as.numeric(marginMatrix[i,2])
if(nModal > 0) {
for(j in 1:(nModal)) {
## Offset of 1 because of popTotal in first line of marginStatsDF
marginStatsDF[i+nModalCateg+1,ncol3] <- round(abs(marginStatsDF[i+nModalCateg+1,ncol2] - marginStatsDF[i+nModalCateg+1,ncol1]),2)
if(j < nModal) nModalCateg <- nModalCateg + 1
}
}
}
return(marginStatsDF)
}
## Check validity of marginMatrix (deprecated)
checkMarginMatrix <- function(marginMatrix) {
.Deprecated("checkNumberMargins")
checkMatrix = FALSE
if(is.null(marginMatrix)) return(TRUE) # Case NULL is OK
return(checkMatrix)
}
## Displays number of NAs among margins
## @keywords internal
missingValuesMargins <- function(data, marginMatrix) {
nVar = nrow(marginMatrix)
marginNames = marginMatrix[,1]
returnMatrix = cbind(marginNames, rep(0,nVar))
for(i in 1:nVar) {
returnMatrix[i,2] = nrow(data[is.na(data[marginNames[i]]),])
}
colnames(returnMatrix) = c("Margin","Missing values")
return(returnMatrix)
}
## Checks if number of modalities in data matches expected ones according
## to marginMatrix
## @keywords internal
checkNumberMargins <- function(data, marginMatrix) {
returnBool = TRUE
marginNames = marginMatrix[,1]
for(i in 1:length(marginNames)) {
nModalities = length(table(data.matrix(data[marginNames[i]])))
expectedModalities = as.numeric(marginMatrix[i,2])
if(nModalities != expectedModalities && expectedModalities > 0) { ## "0" indicates calibration is made on quantitative total
writeLines(paste("Error on column ",marginNames[i]," : ",nModalities," modalities in data and ",expectedModalities," expected in margins"))
return(FALSE)
}
}
return(TRUE)
}
#' Regroup calibration modalities
#' @description
#' Beware, this function modifies the calibrationMatrix and marginMatrix objects entered in parameter?
#' Regroups modalities entered in "vecModalities" into single
#' "newModality" in "calibrationMatrix" and adapts "marginMatrix" to the new concept.
#' Typical usage is right before a calibration (and after comptutation of marginMatrix), when
#' you realise calibration output is better when several modalities are reduced to one.
#' (typically very rare modalities, on which calibration constraints are very restrictive).
#' Uses pseudo-"call by reference" via eval.parent because 2 objects are modified :
#' calibrationMatrix and marginMatrix
#' @param calibrationMatrix calibration matrix
#' @param marginMatrix matrix containing the margins to the Icarus format
#' @param calibrationVariable name of the calibration varaible for which regroupment has to be done
#' @param vecModalities Initial modalities of the variable
#' @param newModality Regrouped modalities of the variable
#'
#' @examples
#' \dontrun{
#' ## Suppose we have a calibration matrix and a margin matrix containing information
#' ## for two categorical variables "X1" (10 modalities) and "X2" (5 modalities)
#'
#' matrixCal <- data.frame(matrix(
#' c(floor(10*runif(100))+1,floor((5)*runif(100))+1,
#' floor(10*runif(100))+1,rep(10,100)),
#' ncol=4))
#' marginMatrix <- matrix(c("X1",10,rep(1/10,10),
#' "X2",5,rep(1/5,5),rep(0,5)), nrow=2, byrow=TRUE)
#'
#' # table(matrixCal$X1)
#' # 1 2 3 4 5 6 7 8 9 10
#' # 9 8 8 8 11 15 13 6 10 12
#' # marginMatrix
#' # [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10] [,11] [,12]
#' # [1,] "X1" "10" "0.1" "0.1" "0.1" "0.1" "0.1" "0.1" "0.1" "0.1" "0.1" "0.1"
#' # [2,] "X2" "5" "0.2" "0.2" "0.2" "0.2" "0.2" "0" "0" "0" "0" "0"
#'
#' regroupCalibrationModalities(matrixCal, marginMatrix, "X1", c(3,4,8), "0")
#'
#' # table(matrixCal$X1)
#' # 0 1 2 5 6 7 9 10
#' # 22 9 8 11 15 13 10 12
#' # marginMatrix
#' # [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8] [,9] [,10]
#' # [1,] "X1" "8" "0.3" "0.1" "0.1" "0.1" "0.1" "0.1" "0.1" "0.1"
#' # [2,] "X2" "5" "0.2" "0.2" "0.2" "0.2" "0.2" "0" "0" "0"
#' }
#' @export
regroupCalibrationModalities <- function(calibrationMatrix, marginMatrix, calibrationVariable, vecModalities, newModality) {
# First, check if number of modalities match in calibrationMatrix and marginMatrix,
# otherwise stop
if(!checkNumberMargins(calibrationMatrix, marginMatrix))
stop("Number of modalities must match between calibrationMatrix and marginMatrix to regroup calibration modalities.")
newCalibrationMatrix <- calibrationMatrix
newMarginMatrix <- marginMatrix
## Modification in calibrationMatrix
newCalibrationMatrix[calibrationVariable] <- regroupUnContiguuousModalities(data.matrix(newCalibrationMatrix[calibrationVariable]), vecModalities, newModality)
## Modification in marginMatrix
calVarModalities <- unique(data.matrix(calibrationMatrix[calibrationVariable]))
if(newModality %in% calVarModalities) {
stop("New modality cannot be a modality that already exists in calibration matrix")
}
orderedCalVarModalities <- calVarModalities[order(calVarModalities)]
indicesVecModalities <- which(orderedCalVarModalities %in% vecModalities)
indicesVecModalities <- indicesVecModalities+2 ## First two columns are name and nModalities
modifiedLine <- marginMatrix[marginMatrix[,1] == calibrationVariable,]
sumRegrouped <- sum(as.numeric(modifiedLine[indicesVecModalities]))
modifiedLine <- modifiedLine[-indicesVecModalities]
# Insert new margin (sum) to the right place
modifiedLine <- modifiedLine[modifiedLine != 0]
newCalVarModalities <- unique(data.matrix(newCalibrationMatrix[calibrationVariable]))
orderedNewCalVarModalities <- newCalVarModalities[order(newCalVarModalities)]
insertPosition <- which(orderedNewCalVarModalities==newModality)
modifiedLine <- c(modifiedLine[1:(2+insertPosition-1)],sumRegrouped,
modifiedLine[(2+insertPosition):length(modifiedLine)])
newNModalities <- as.numeric(modifiedLine[2]) - length(vecModalities) + 1
modifiedLine[2] <- newNModalities
# Add 0s to end line
modifiedLine <- modifiedLine[1:(as.numeric(modifiedLine[2])+2)]
modifiedLine <- c(modifiedLine, rep("0.0000",ncol(marginMatrix) - length(modifiedLine)))
# Careful, sum of weights must be equal to 1 even after modalities have been regrouped
sumMarginLine <- sum(as.numeric(modifiedLine[3:length(modifiedLine)]))
if( sumMarginLine != 1 ) {
maxMarginValue <- max(as.numeric(modifiedLine[3:(as.numeric(modifiedLine[2])+2)]))
maxIndex <- which(as.numeric(modifiedLine[3:length(modifiedLine)]) == maxMarginValue)
modifiedLine[maxIndex+2] <- maxMarginValue + 1 - sumMarginLine
}
# Replace in marginMatrix
newMarginMatrix[marginMatrix[,1] == calibrationVariable,] <- modifiedLine
# Check if last column of margin matrix is all 0s. If it is, drop last column
# (means larger line has been reduced). Continue to do so until last colmun is not only 0s.
while( sum(as.numeric(newMarginMatrix[,ncol(newMarginMatrix)])) == 0 ) {
newMarginMatrix <- newMarginMatrix[, -ncol(newMarginMatrix)]
}
eval.parent(substitute(calibrationMatrix <- newCalibrationMatrix))
eval.parent(substitute(marginMatrix <- newMarginMatrix))
}
#' Adds a margin to marginMatrix
#'
#' @param marginMatrix The matrix of margins to add the new margin to
#' @param varName Name of variable in calibration matrix corresponding
#' to the new margin
#' @param vecTotals values of margins (Calmar style) for the variable.
#' Note : if length(vecTotals) > 1, then sum(thresholdAdjustToOne) has to be 1.
#' @param adjustToOne if TRUE and sum(vecTotals) is nearly 1, modify values of vecTotals
#' so that sum is 1.
#' @param thresholdAdjustToOne adjust sum(vecTotals) to 1 if difference
#' is under thresholdAdjustToOne
#'
#' @export
addMargin <- function(marginMatrix, varName, vecTotals, adjustToOne=TRUE, thresholdAdjustToOne = 0.01) {
if(varName %in% marginMatrix[,1]) {
stop(paste(varName,"is already in margin matrix."))
}
newMarginMatrix <- marginMatrix
# Length of vecTotals :
if( length(vecTotals) == 1 ) {
nModality <- 0
} else {
if( length(vecTotals) > 1 ) {
nModality <- length(vecTotals)
} else {
stop("vecTotals must be non NULL vector")
}
}
# Adjust vecTotals to 1
if( nModality > 1 && sum(vecTotals) != 1 ) {
if(adjustToOne && abs(sum(vecTotals) - 1) < thresholdAdjustToOne) {
# Adjust highest value
maxMarginValue <- max(as.numeric(vecTotals))
maxIndex <- which.max(as.numeric(vecTotals))
vecTotals[maxIndex] <- maxMarginValue + 1 - sum(vecTotals)
} else {
stop("sum(vecTotals) must be equal to 1.")
}
}
newMarginLine <- c(varName, nModality, vecTotals)
# newMarginLine must have right format before it is added to
# newMarginMatrix
if(length(newMarginLine) < ncol(newMarginMatrix)) {
# Add missing zeroes :
missingZeroes <- rep(0, ncol(newMarginMatrix) - length(newMarginLine))
newMarginLine <- c(newMarginLine, missingZeroes)
}
if(length(newMarginLine) > ncol(newMarginMatrix)) {
# Add columns of 0s to newMarginMatrix
missingZeroes <- matrix(nrow = nrow(newMarginMatrix), ncol = (length(newMarginLine) - ncol(newMarginMatrix)), 0)
newMarginMatrix <- cbind(newMarginMatrix, missingZeroes)
}
# Append to newMarginMatrix :
newMarginMatrix <- rbind(newMarginMatrix, newMarginLine, deparse.level = 0)
return(newMarginMatrix)
}
## Modifies margin
modifyMargin <- function(marginMatrix, varName, vecTotals, adjustToOne=TRUE, thresholdAdjustToOne = 0.01) {
# Delete selected margin
indexSelectedMargin <- NULL
i <- 1
while(i <= nrow(marginMatrix)) {
if(marginMatrix[i,1] == varName) {
indexSelectedMargin <- i
}
i <- i+1
}
newMarginMatrix <- marginMatrix[-indexSelectedMargin,]
if(is.null(ncol(newMarginMatrix))) {
newMarginMatrix <- t(as.matrix(newMarginMatrix))
}
# Add selected margin
newMarginMatrix <- addMargin(newMarginMatrix, varName, vecTotals, adjustToOne, thresholdAdjustToOne)
return(newMarginMatrix)
}
## Private function that creates margins to the right format
## @keywords internal
createFormattedMargins <- function(data, marginMatrix, popTotal=NULL, pct=FALSE) {
if(is.null(marginMatrix)) {
if(is.null(popTotal)){
stop("No margin or population total specified for dataMen.")
}
writeLines("Calibration only made on population totals for dataMen")
matrixCal = rep(1,nrow(data))
formattedMargins = c(popTotal)
} else {
# Creation of the elements
calmarMatrix = marginMatrix[,2:ncol(marginMatrix)]
# Transform calmarMatrix to numeric matrix to avoid problems in formatMargins
if(!is.vector(calmarMatrix)) {
calmarMatrix = matrix(as.numeric(calmarMatrix), nrow=nrow(calmarMatrix), ncol=ncol(calmarMatrix), byrow=F)
} else {
calmarMatrix = as.numeric(calmarMatrix)
}
popVector <- TRUE
if(is.null(popTotal)) {
popVector <- FALSE
}
matrixCal = createCalibrationMatrix(marginMatrix,data, popVector)
formattedMargins = formatMargins(calmarMatrix, matrixCal, popTotal, pct)
}
return(list(formattedMargins, matrixCal))
}
|
627937f64d7767a6432205be04926092de542f7a
|
1da1269745b6ce6806ffd7a15668fc27470cd921
|
/R/ghg_q_subpart_level_information.R
|
c903eb6d55d5f343ebf7ed849be72b2ff2a2fb52
|
[] |
no_license
|
markwh/envirofacts
|
d0c3bb7495060fd00b825c1e72602479f8a92b72
|
815ba95808a37f552d9a7041be532817e4766b90
|
refs/heads/master
| 2021-01-10T07:14:32.874354
| 2019-03-27T02:28:15
| 2019-03-27T02:28:15
| 50,798,175
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 638
|
r
|
ghg_q_subpart_level_information.R
|
#' Retrieve q subpart level information data from ghg database
#'
#' @param FACILITY_ID e.g. '1000029'. See Details.
#' @param REPORTING_YEAR e.g. '2010'. See Details.
#' @param GHG_NAME e.g. 'Biogenic Carbon dioxide'. See Details.
#' @param GHG_QUANTITY e.g. '0'. See Details.
#' @export
ghg_q_subpart_level_information <- function(FACILITY_ID = NULL, REPORTING_YEAR = NULL,
GHG_NAME = NULL, GHG_QUANTITY = NULL) {
args <- list(FACILITY_ID = FACILITY_ID, REPORTING_YEAR = REPORTING_YEAR, GHG_NAME = GHG_NAME,
GHG_QUANTITY = GHG_QUANTITY)
ret <- envir_get("q_subpart_level_information", args)
ret
}
|
b6b5f0973303271aa6b88927da40f74adcfea458
|
ceb3918a00d69ea84b6a0057cf84da1ccb736c7c
|
/man/shrink_env.Rd
|
b9918578f10990638ed8fe9fc876536492cbc539
|
[] |
no_license
|
zsmith27/CHESSIE
|
3006d6f7b4b49f1bf846837d597fd31c5d87996b
|
785192be00e1b4713fa00238b93996f8d365f9f2
|
refs/heads/master
| 2020-05-25T22:14:07.599940
| 2018-08-20T16:09:42
| 2018-08-20T16:09:42
| 84,974,122
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 409
|
rd
|
shrink_env.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/site_classification.R
\name{shrink_env}
\alias{shrink_env}
\title{Just Important Environmental Parameters}
\usage{
shrink_env(prep.data)
}
\arguments{
\item{prep.data}{= the output of the prep_data function.}
}
\value{
A data frame of environmental parameters of interest.
}
\description{
Just Important Environmental Parameters
}
|
6e6ba2469164d1caf0e8f8f8112f9cdf2972a1b5
|
55dc28128b9e6f4c9aed24d48b82f0aec1b6b8d0
|
/run_analysis.R
|
ed8423c9351d3f9d9d31ef7d3deab9ffc9f570d3
|
[] |
no_license
|
gwpjp/JH3---Project
|
8e628a1feffdf55278d179e7e9ac3a406813602f
|
12f698e337faf65d6cfc7e38b267abfb015fd980
|
refs/heads/master
| 2021-03-12T20:25:55.680463
| 2015-04-24T13:35:09
| 2015-04-24T13:35:09
| 34,496,298
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,327
|
r
|
run_analysis.R
|
#Assuming that the working directory is the UCI HAR Dataset, the following
#steps read the data into R.
Xtrain <- read.table("./train/X_train.txt") #reads the 561 variable training set
subtrain <- read.table("./train/subject_train.txt") #reads the subject training set
Ytrain<- read.table("./train/Y_train.txt") #reads the activity training set
Xtest <- read.table("./test/X_test.txt") #reads the 561 variable test set
subtest <- read.table("./test/subject_test.txt") #reads the subject test set
Ytest <- read.table("./test/Y_test.txt") #reads the activity test set
library("dplyr")
#Merges all the training data into 1 data frame along with variable names
train <- cbind(subtrain, Ytrain, Xtrain)
#Merges all the test data into 1 data frame along with variable names
test <- cbind(subtest, Ytest, Xtest)
#Combines the two data sets
data <- rbind(train, test) #data has 10,299 rows and 563 columns
#Names the columns of the data set using features.txt
varnames <- read.table("features.txt") #reads the variable names
varnames <- as.character(varnames[,2])
varnames <- c("Subject", "Activity", varnames)
names(data) <- varnames
#Extracts only the measurements on std deviation and mean. This will only
#include variables with names that specifically contain "mean()" or
#"std()" and not variables that simply contain "mean".
remain <- c(grep("mean\\(",varnames),grep("std\\(", varnames))
remain <- c(1,2,remain[order(remain)])
subdata <- data[,remain] #This data set now has 68 columns
write.table(colnames(subdata),"extractednames.txt") #These are the names of the extracted variables
#Turns activity numbers into descriptive names
subdata[,2] <- gsub("1","Walking",subdata[,2])
subdata[,2] <- gsub("2","Walking Upstairs",subdata[,2])
subdata[,2] <- gsub("3","Walking Downstairs",subdata[,2])
subdata[,2] <- gsub("4","Sitting",subdata[,2])
subdata[,2] <- gsub("5","Standing",subdata[,2])
subdata[,2] <- gsub("6","Laying",subdata[,2])
# Creates a tidy data output of the average of all the variables for each
# subject for each of his/her activities.
library("reshape2")
dataMelt <- melt(subdata, id=c("Subject","Activity"),
measure.vars = names(subdata[,3:68]))
tidydata <- dcast(dataMelt, Subject + Activity ~ variable, mean)
#Outputs the data to a .txt file
write.table(tidydata,"tidydata.txt")
|
2bbefb8365b995510c1e08bf9a60ca1878ebd2a6
|
a7b6719bd2d2337538dda259cbfae4d815366dee
|
/pollutantmean.r
|
acfdc67fd99478d2a6bdbd037edcdc2dffc20909
|
[] |
no_license
|
iamShashi/data-science-Coursera-using-R
|
5bfe203a7d3798326869c13c680050292f1fcfaf
|
2ba27bccfb43decbe6745c4c252d994e5ffa8f55
|
refs/heads/master
| 2021-01-20T19:39:19.350854
| 2016-07-01T17:01:16
| 2016-07-01T17:01:16
| 62,407,567
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 396
|
r
|
pollutantmean.r
|
pollutantmean<-function(directory,pollutant,id=1:332){
e<-0
colb<-integer(length(1))
for( i in id){
i<-formatC(i,width = 3, format="d",flag="0")
dirr<-paste(directory,i,sep ="/")
dirr<-paste(dirr,"csv",sep=".")
a<-read.csv(dirr)
b<-a[pollutant]
b<-as.numeric(unlist(b))
colb<-c(colb,b)
}
d<-mean(colb,na.rm = TRUE)
return(d)
}
|
5a8a8c14451a2f69547a56b8fc6fe62cd69cb275
|
6d93b620bba520d574f5d4c7842d76af49b8c3b2
|
/Deprecated/21_01_Format_Reviews_Data(deprecated3).R
|
ff18b5ea64549d4aab3a76aabbc7dc0f11c31e8f
|
[] |
no_license
|
agrajg/Paper2_Code
|
48301641107ddef25f892d7056798505781c7e5c
|
11f431580c8769182435ca9a2d5d1e3c1134fa5b
|
refs/heads/master
| 2020-05-01T01:42:42.418713
| 2019-06-14T16:22:16
| 2019-06-14T16:22:16
| 177,200,913
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,025
|
r
|
21_01_Format_Reviews_Data(deprecated3).R
|
cat('Begin Running file : ', "Loading_the_data_file.R", " ...", '\n')
cat('--------------------------------------------------------', '\n')
file.time <- Sys.time()
cat('Clearing up workspace.', '\n')
cat('----------------------', '\n')
{
rm(list = ls())
source(file = "00_00_Preamble.R")
packages<-c("ggplot2", "foreach", "doParallel", "tidyr", "dplyr", "caret", "stringr", "gmm", "magrittr", "quanteda", "tidytext", "tm", "SnowballC")
# remove.packages(packages, lib = .libPaths())
check.packages(packages)
}
cat('Load the reviews data.', '\n')
cat('----------------------', '\n')
{
load(file = paste(project.path,"Output/TEMP/","11_00_Reviews_data_plain.RData", sep=""))
review.plain.data <- review.plain.data %>% mutate(rev_date = date)
review.plain.data$rev_date <- as.Date(review.plain.data$rev_date, "%d%b%Y")
review.plain.data <- review.plain.data %>% arrange(propertyid, rev_date)
}
review_corpus = Corpus(VectorSource(review.plain.data$comments))
review_corpus = tm_map(review_corpus, content_transformer(tolower))
review_corpus = tm_map(review_corpus, removeNumbers)
review_corpus = tm_map(review_corpus, removePunctuation)
review_corpus = tm_map(review_corpus, removeWords, c("the", "and", stopwords("english")))
review_corpus = tm_map(review_corpus, stripWhitespace)
inspect(review_corpus[1])
review_dtm <- DocumentTermMatrix(review_corpus)
review_dtm
inspect(review_dtm[500:505, 500:505])
review_dtm = removeSparseTerms(review_dtm, 0.99)
review_dtm
inspect(review_dtm[1,1:20])
findFreqTerms(review_dtm, 1000)
freq = data.frame(sort(colSums(as.matrix(review_dtm)), decreasing=TRUE))
wordcloud(rownames(freq), freq[,1], max.words=50, colors=brewer.pal(1, "Dark2"))
review_dtm_tfidf <- DocumentTermMatrix(review_corpus, control = list(weighting = weightTfIdf))
review_dtm_tfidf = removeSparseTerms(review_dtm_tfidf, 0.99)
review_dtm_tfidf
# The first document
inspect(review_dtm_tfidf[1,1:20])
freq = data.frame(sort(colSums(as.matrix(review_dtm_tfidf)), decreasing=TRUE))
wordcloud(rownames(freq), freq[,1], max.words=100, colors=brewer.pal(1, "Dark2"))
review.plain.data$comments = NULL
review.plain.data = cbind(review.plain.data, as.matrix(review_dtm_tfidf))
object.size(review.plain.data)/1000000000
# =====================================================================================================================
# stopwords.list <- c(stopwords('en'),stopwords('fr'), stopwords('es'), stopwords('pt'), stopwords('de'), stopwords(language = "zh", source = "misc"), stopwords('ru'))
# stopwords.list <- c(stopwords('en'))
language.list <- setdiff(getStemLanguages(),c("porter", "turkish"))
stopwords.list <- unlist(c(lapply(X = language.list, FUN = stopwords)), recursive = TRUE, use.names = TRUE)
quanteda_options(threads = detectCores() - 1)
quanteda_options(language_stemmer = 'English')
# =====================================================================================================================
# Loading the text data
# ---------------------------------------------------------------------------------------------------------------------
start.loadReview <- Sys.time()
load(file = paste(project.path,"Output/TEMP/","11_00_Reviews_data_plain.RData", sep=""))
review.plain.data <- review.plain.data %>% mutate(rev_date = date)
review.plain.data$rev_date <- as.Date(review.plain.data$rev_date, "%d%b%Y")
review.plain.data <- review.plain.data %>% arrange(propertyid, rev_date)
end.loadReview <- Sys.time()
cat('Time take to load reviews data is : ')
print(end.loadReview - start.loadReview)
gc()
# Tokenizing and preprocessing
# ---------------------------------------------------------------------------------------------------------------------
# *********************************************************************************************************************
# Ngrams also
review.tokens <- tokens(review.plain.data$comments,
what = "word",
remove_numbers = TRUE,
remove_punct = TRUE,
remove_symbols = TRUE,
remove_hyphens = TRUE,
remove_twitter = TRUE,
remove_url = TRUE,
remove_separators = TRUE
) # ngrams = 1:2, skip = 0:1, concatenator = "_"
# *********************************************************************************************************************
# Converting each token to lowe case
review.tokens <- tokens_tolower(review.tokens)
# Removing stopwords used in multiple languages, not just english
review.tokens <- tokens_select(review.tokens, pattern = stopwords.list, selection = "remove")
# Stemming the tokens to make run running the same word
review.tokens <- tokens_wordstem(review.tokens, language = getStemLanguages())
# Converting to a document feature matrix
# ---------------------------------------------------------------------------------------------------------------------
review.tokens.dfm <- dfm(review.tokens, tolower = FALSE)
# head(docnames(review.tokens.dfm), 20)
# ndoc(review.tokens.dfm)
# nfeat(review.tokens.dfm)
# Begin the process of converting to a usable data.
# ---------------------------------------------------------------------------------------------------------------------
# The DFM contains a lot of features which is difficult to convert to a data frame.
# Most of these features are sparse. We try to remove the most sparse ones from the DFM.
# We realize that with the current capacity, we can only covert about 1800 features to a DF.
# Anyway most of them are about useless since they are so sprse.
# Use functionality of trim and ngrams to test between various specifications of this text data.
# *********************************************************************************************************************
# Trim the DFM
text.df <- dfm_trim(review.tokens.dfm, min_termfreq = 2500, termfreq_type = "count", verbose = TRUE)
# min_termfreq = NULL, max_termfreq = NULL,
# termfreq_type = c("count", "prop", "rank", "quantile"),
# min_docfreq = NULL, max_docfreq = NULL, docfreq_type = c("count",
# "prop", "rank", "quantile")
print(text.df)
# *********************************************************************************************************************
# Compute TF-IDF matrix
text.df <- dfm_tfidf(text.df, scheme_tf = "count", scheme_df = "inverse", base = 10, force = FALSE)
# Convert to a data frame
text.df <- convert(text.df, to = "data.frame")
gc()
# Combine with original to keep the rental and date information.
text.df <- bind_cols(review.plain.data, text.df, .id = NULL)
# *** UNCOMMENT IN THE FINAL CODE RUN ***
# Remove unnecessary objects
rm(list = c("end.loadReview",
"language.list",
"review.plain.data",
"review.tokens",
"review.tokens.dfm",
"start.loadReview",
"stopwords.list" ))
gc()
# Collect reviews that are posted on same day
text.df <- text.df %>%
select(-reviewer_id, -id,-reviewer_name,-comments,-document) %>%
group_by(propertyid , rev_date, date) %>%
summarize_all(list(textvar = sum), na.rm = TRUE) %>% as.tbl()
# Filling missing values with zeros.
text.df[is.na(text.df)] <-0
print(text.df)
# Cumulating the data and save.
text.df <- text.df %>% arrange(propertyid, rev_date) %>% group_by(propertyid) %>% mutate_at(vars(-propertyid,-rev_date,-date),cumsum)
text.df <- text.df %>% select(-rev_date) %>% rename(rev_date=date)
object.size(text.df) %>% print()
save.time.begin <- Sys.time()
save(text.df, file = paste(project.path, "Output/TEMP/", "22_01_text_df.RData", sep = ""))
save.time.end <- Sys.time()
cat('Time taken to save text df object : ', '\n')
print(save.time.end-save.time.begin)
gc()
print(text.df)
# =====================================================================================================================
# # ***DON'T COMMENT IF RUNNING FRESH***
# # ***THIS MAY BE JUNK NOW***
# # Creating a rental time panel
# # ---------------------------------------------------------------------------------------------------------------------
# ptm.PanelPrep <- proc.time()
#
# # Use rev_date to match review data with the panel.
#
# # *********************************************************************************************************************
# # How many reviews were accumulated before booking.
# demand.data$date = as.Date(demand.data$date,format="%d%b%Y")
# rental.time.panel <- demand.data %>% # mutate(rev_date = either booking date or date of stay)
# select(propertyid, date, rev_date) %>%
# as.tbl()
# # *********************************************************************************************************************
#
# print(rental.time.panel)
# save(rental.time.panel, file = paste(project.path, "Output/TEMP/", "21_01_rental_time_panel.RData", sep = ""))
# rm(rental.time.panel)
# gc()
# cat('Time taken to prepare the panel : ')
# print(proc.time() - ptm.PanelPrep)
# # =====================================================================================================================
# # =====================================================================================================================
# text.df <- text.df %>%
# full_join(rental.time.panel, by = c("propertyid", "rev_date"))
#
# text.df[is.na(text.df)] <-0
# # DF <- tidy(review.tokens.dfm)
# # DF2 <- DF %>% cast_dtm(document, term, count)
# # convert(review.tokens.dfm, to = c("matrix"), docvars = NULL)
# # review.tokens.df <- as.data.frame(review.tokens.dfm)
# # colnames(DF2)
#
# review.tokens.dfm
#
# dfm.trim <- dfm_trim(review.tokens.dfm, max_docfreq = 1974)
# print(dfm.trim)
# textplot_wordcloud(dfm.trim, color = rev(RColorBrewer::brewer.pal(10, "RdBu")))
#
#
# dfm.trim <- dfm_trim(review.tokens.dfm, min_docfreq = 1975)
# print(dfm.trim)
# textplot_wordcloud(dfm.trim, color = rev(RColorBrewer::brewer.pal(10, "RdBu")))
#
#
# dfm.trim <- dfm_trim(review.tokens.dfm, max_termfreq = 1974)
# print(dfm.trim)
# textplot_wordcloud(dfm.trim, color = rev(RColorBrewer::brewer.pal(10, "RdBu")))
#
#
# dfm.trim <- dfm_trim(review.tokens.dfm, min_termfreq = 2000)
# print(dfm.trim)
# textplot_wordcloud(dfm.trim, color = rev(RColorBrewer::brewer.pal(10, "RdBu")))
#
#
#
# DF4 <- convert(dfm.trim, to = "data.frame")
#
#
#
#
#
#
# # review.panel <- rental.time.panel %>% left_join(review.data, by = c("propertyid", "date"))
# # rm(rental.time.panel)
# # rm(review.data)
#
#
# # # Take a sample (this part need to be commented out)
# # # ---------------------------------------------------------------------------------------------------------------------
# # # demand.data.2 <- demand.data
# # set.seed(48374)
# # review.plain.data <- sample_frac(review.plain.data, size = 0.02)
# # # =====================================================================================================================
|
8ba4af1f57c6e2db262db9e1e224487d80508b54
|
289b57556cc13cdb4780d6f6cce1d1f0fbb315a6
|
/R Programming/polutant.R
|
de176fbd9b06aa27725eeecb6a1eaedbfff34333
|
[] |
no_license
|
Kibrom1/datasciencecoursera
|
83c592811e47738c8be12374a260ab0f6cf85d8b
|
3aa3e9f69b75b98f0643bc136fb75f2100750adf
|
refs/heads/master
| 2021-06-13T07:18:36.872907
| 2017-04-17T00:47:54
| 2017-04-17T01:12:42
| 81,401,744
| 0
| 0
| null | 2017-02-11T23:02:34
| 2017-02-09T02:53:46
| null |
UTF-8
|
R
| false
| false
| 1,128
|
r
|
polutant.R
|
polutantmean <- function(directory, polutant, id = 1:332){
setwd(directory)
result_data <- list()
allfiles <- list.files()
for(f in id){
my_dataset <- read.csv(allfiles[f], encoding = "r")
result_data <- rbind(result_data, my_dataset)
}
setwd('..')
meanResult <- result_data
}
complete <- function(directory, id = 1:332){
setwd(directory)
result_data <- list()
allfiles <- list.files()
nobNum <- data.frame()
finalResult <- list()
for(i in id){
my_datasets <- read.csv(allfiles[i], encoding = "r")
#completeCases <- complete.cases(my_datasets)
finalResult <- rbind(finalResult, c(id=i, nobs=nrow(na.omit(my_datasets))))
}
setwd('..')
result <- finalResult
}
corr <- function(directory, thershold = 0){
setwd(directory)
allfiles <- list.files()
sulfate <- list()
nitrate <- list()
for(i in 1:thershold){
my_datasets <- read.csv(allfiles[i], encoding = "r")
if(complete.cases(readline(my_datasets))){
sulfate <- append(my_datasets$sulfate)
nitrate <- append(my_datasets$nitrate)
}
}
setwd('..')
result <- corr(sulfate, nitrate)
}
|
ccb76d61aa17f9b672354ac1baa03d39665cecf0
|
5957f68dcf465daf2046d577ab25b42d3bb5427a
|
/R/sinlogr.R
|
efac90d21ab714e2d6e794316e6a0ff13b10184d
|
[] |
no_license
|
nunesmatt/binhf
|
64467133ada0f50335dad4696d0e16da4f9f19a1
|
e8bb971a703b07cd5f5962d6644fe3db9a7b728a
|
refs/heads/master
| 2018-09-21T08:21:52.284925
| 2018-07-19T08:16:55
| 2018-07-19T08:16:55
| 126,087,430
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 119
|
r
|
sinlogr.R
|
`sinlogr` <-
function (t)
{
y<-sinlog(t)
y <- c(y, rev(y))
n <- length(y)
y[seq(from=1, to=n, by=2)]
}
|
823d439df664a1824582b290edf159a1d62db361
|
75625daae0fc6b9f40995244b98f4dd3197335b9
|
/Trees/Trees_Assignment_Voting.R
|
42709b1cb3b3895b53ef54997c216d7bd5265188
|
[] |
no_license
|
sharathlives/MIT-edx-AnalyticsEdge-StatsiticalModeling
|
e92c44d31c81bce1e17b167b7967d4e208b300fa
|
01b1654feaf2dcd4ab96e3eac79a52a2f951fa53
|
refs/heads/master
| 2021-01-10T04:21:14.580867
| 2015-05-30T05:37:34
| 2015-05-30T05:37:34
| 36,544,928
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,362
|
r
|
Trees_Assignment_Voting.R
|
#Load the dataset
gerber <- read.csv("gerber.csv")
#Exploration
str(gerber)
table(gerber$voting); 1- mean(gerber$voting) #proportion of voters in the election; baseline accuracy
tapply(gerber$voting, gerber$hawthorne, mean)
tapply(gerber$voting, gerber$civicduty, mean)
tapply(gerber$voting, gerber$neighbors, mean)
tapply(gerber$voting, gerber$self, mean)
tapply(gerber$voting, gerber$control, mean)
#Build the model
LogModel = glm(voting ~ civicduty + hawthorne + self + neighbors, data=gerber, family="binomial")
summary(LogModel)
#Compute accuracy
predictLog = predict(LogModel, type="response")
table(gerber$voting, predictLog > 0.3) #accuracy = 0.542
table(gerber$voting, predictLog > 0.5) #accuracy = 0.6
1- mean(gerber$voting) #Since accuracy of the model is less than the baseline accuracy, this is a weak predictive model
#Build a CART model
CARTmodel = rpart(voting ~ civicduty + hawthorne + self + neighbors, data=gerber) #this is a regression tree. we haven't specified mathod = class
#Build another CART model based on cross validation.
CARTmodel2 = rpart(voting ~ civicduty + hawthorne + self + neighbors, data=gerber, cp=0.0)
#this shows that 0.31 percent of the civic duty people voted (<0.5 - no)
#Build another tree that has the sex variable included
CARTmodel3 = rpart(voting ~ civicduty + hawthorne + self + neighbors + sex, data=gerber, cp=0.0)
#this shows that males are more likely to vote than females
#Create a tree to understand how the interaction terms are handled
CARTcontrol = rpart(voting ~ control, data=gerber, cp=0.0)
CARTsex = rpart(voting ~ control + sex, data=gerber, cp=0.0)
prp(CARTcontrol, digits=6)
control <- subset(gerber, control == 1); tapply(control$gender, control$voting, mean)
tapply <- tapply(control$voting, control$sex, mean) #gices the same result as the rgeression tree
#The split says that if control = 1, predict 0.296638, and if control = 0, predict 0.34. The absolute difference between these is 0.043362.
#Using the second tree (with control and sex), determine who is affected more by NOT being in the control group (being in any of the four treatment groups)
#We see that men and women are affected about the same if they are in the control group
#Create a logistic regression to understand how interaction terms are handled
LogModelSex = glm(voting ~ control + sex, data=gerber, family="binomial") #The coefficent for sex is negative indicating that women are less likely to vote. Women have a larger variable in the sex variable
Possibilities = data.frame(sex=c(0,0,1,1),control=c(0,1,0,1))
predict(LogModelSex, newdata=Possibilities, type="response")
#The four values in the results correspond to the four possibilities in the order they are stated above ( (Man, Not Control), (Man, Control), (Woman, Not Control), (Woman, Control) ). There is very little differnece between the tree and the logistic regression
LogModel2 = glm(voting ~ sex + control + sex:control, data=gerber, family="binomial")
#Negative value of the interaction term means, If a person is a woman and in the control group, the chance that she voted goes down.
predict(LogModel2, newdata=Possibilities, type="response") #The logistic regression model now predicts 0.2904558 for the (Woman, Control) case, so there is now a very small difference (practically zero) between CART and logistic regression.
|
91638cad4fccf1ed64970f4001ef8bea8a264fc4
|
7d7866a55e03f5b0d669028d45ad5606f2d3d561
|
/man/hist.Rd
|
f10b107dedd9157d8e7724a9ad563a26891349eb
|
[
"MIT"
] |
permissive
|
ndhutso/geneSummary
|
beb6d65d7d9e7bd9b40a130cf01a72f82cdd76ad
|
687ddc8c8f90d41ec563bf92b9e59d5d92e917fc
|
refs/heads/master
| 2020-07-05T21:34:55.116152
| 2019-08-16T19:20:05
| 2019-08-16T19:20:05
| 202,784,950
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 342
|
rd
|
hist.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hist.R
\name{hist}
\alias{hist}
\title{Histogram of Data}
\usage{
hist(D1a,D2a)
}
\description{
Compares the data in a histogram
}
\examples{
data <- getGEO("GSE43452")
D2a <- extExp(data)[[2]]
D1a <- extGene(data)[[2]]
hist(D1a,D2a)
}
\author{
Nicholas Hutson
}
|
1029a1d8bfd434d0d2f3b1d8d1793360659220d8
|
be7ab4febfd096b722ab7843f89b473efb3c13bc
|
/R_code/learning.R
|
4af78a2a204a7fe3c340ff433b13b59d6cdb09a3
|
[] |
no_license
|
bgossage/bys602
|
011ddb3f0585b2aa7228c6e921af5bff977908d3
|
46e50189a8460db4e8c1bf1411213a1f83705216
|
refs/heads/master
| 2021-01-11T18:55:07.396279
| 2017-05-10T21:45:35
| 2017-05-10T21:45:35
| 79,656,310
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 352
|
r
|
learning.R
|
#
# Learning R
#
# Clear R's brain...
rm( list = ls() )
library(dplyr)
library(ggplot2)
x <- seq( from=-10, to=10, by=0.1 )
y <- x*x
z = y * y
qplot( x, y, geom="line")
die = 1:6
generate = function()
{
s = sample( die, size=2, replace=TRUE )
return(sum(s))
}
Toss = replicate( 10000, generate() )
qplot( Toss, binwidth=1 )
|
f373dd9f260f0a6e2b8872dad6ca3b60aaf8bf85
|
64fa69a9d2ec2eff2e11086821655631884d7a54
|
/online_results_viewer/server.R
|
57762f7c8cc06efd89ed3fb6beb4ff2366ca4ad0
|
[] |
no_license
|
mkiang/opioid_geographic
|
0e4466ac94ba3fbb7b3707382b4e07a0ec5df9e8
|
b464ede6bccfa42caea7cb591dc23e7a17b39e6f
|
refs/heads/master
| 2020-04-16T21:25:36.169096
| 2019-03-23T16:36:25
| 2019-03-23T16:36:25
| 165,925,172
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,318
|
r
|
server.R
|
## Imports ----
library(shiny)
library(geofacet)
library(tidyverse)
library(DT)
library(statebins)
library(viridis)
library(here)
## Load additional code ----
source(here("shiny_helpers", "mk_nytimes.R"))
source(here("shiny_helpers", "gen_aapc_trends_plot.R"))
source(here("shiny_helpers", "gen_aapc_summary_table.R"))
source(here("shiny_helpers", "gen_hotspots_legend.R"))
source(here("shiny_helpers", "gen_hotspots_map.R"))
source(here("shiny_helpers", "gen_hotspots_statebins.R"))
source(here("shiny_helpers", "gen_hotspots_text.R"))
source(here("shiny_helpers", "gen_color_legend.R"))
source(here("shiny_helpers", "gen_state_apc_results.R"))
source(here("shiny_helpers", "state_table_generators.R"))
source(here("shiny_helpers", "gen_apc_rate_table.R"))
source(here("shiny_helpers", "gen_state_lel_plot.R"))
source(here("shiny_helpers", "gen_lel_map.R"))
source(here("shiny_helpers", "gen_lel_statebins.R"))
source(here("shiny_helpers", "gen_lel_table.R"))
## Make a state name:abbrev dictionary ----
st_name_abbrev <- as.list(c(state.abb, "DC"))
names(st_name_abbrev) <- c(state.name, "District of Columbia")
st_name_abbrev <- as.list(sort(unlist(st_name_abbrev)))
## Load data ----
jp_results <-
readRDS(here("shiny_data", "joinpoint_results_public.RDS"))
lel_df <-
readRDS(here("shiny_data", "ex_diff_all_ages_all_areas.RDS"))
lel_df <- lel_df %>%
filter(race == "total") %>%
select(-race) %>%
mutate(opioid_cat = factor(
opioid_type,
levels = c(
"opioids",
"natural",
"heroin",
"synth",
"car_accident",
"firearms"
),
labels = c(
"All opioids",
"Natural",
"Heroin",
"Synthetic",
"Car Accidents",
"Firearms"
),
ordered = TRUE
))
## Helpers
hotspots_picker <-
function(jp_results,
h_p_or_q,
h_sigpvalue,
mort_mid_bin,
apc_mid_bin,
h_year,
statebins) {
if (length(statebins) > 0) {
gen_hotspots_statebins(
jp_results = jp_results,
h_p_or_q = h_p_or_q,
h_sigpvalue = h_sigpvalue,
mort_mid_bin = mort_mid_bin,
apc_mid_bin = apc_mid_bin,
h_year = h_year
)
} else {
gen_hotspots_map(
jp_results = jp_results,
h_p_or_q = h_p_or_q,
h_sigpvalue = h_sigpvalue,
mort_mid_bin = mort_mid_bin,
apc_mid_bin = apc_mid_bin,
h_year = h_year
)
}
}
lel_picker <-
function( lel_df,
l_age,
l_year,
l_comparison,
l_statebins) {
if (length(l_statebins) > 0) {
gen_lel_statebins(
lel_df = lel_df,
l_age = l_age,
l_year = l_year,
l_comparison = l_comparison
)
} else {
gen_lel_map(
lel_df = lel_df,
l_age = l_age,
l_year = l_year,
l_comparison = l_comparison
)
}
}
## Start server code ----
shinyServer(function(input, output) {
## Main joinpoint state map
output$state_map <- renderPlot({
gen_aapc_trends_plot(
jp_results = jp_results,
opioid_types = input$outcome,
show_raw = input$show_raw,
raw_ci = input$raw_ci,
model_fit = input$model_fit,
linetype_sig = input$linetype_sig,
joinpoint = input$joinpoint,
joinpoint_sig = input$joinpoint_sig,
ymax = input$ymax,
legends_on = input$legends_on,
sig_aapc_only = input$sig_aapc_only,
sigpvalue = as.numeric(input$sigpvalue),
p_or_q = input$p_or_q,
disable_clip = input$disable_clip
)
})
## AAPC summary table
output$aapc_table <- DT::renderDataTable({
gen_aapc_summary_table(jp_results = jp_results,
opioid_types = input$outcome)
})
## Hotspot legend
output$hotspots_legend <- renderPlot(expr = {
gen_hotspots_legend(
mort_mid_bin = input$mort_mid_bin,
apc_mid_bin = input$apc_mid_bin
)
},
width = 250)
## Hotspots map
output$hotspots_map <-
renderPlot({
hotspots_picker(
jp_results = jp_results,
h_p_or_q = input$h_p_or_q,
h_sigpvalue = input$h_sigpvalue,
mort_mid_bin = input$mort_mid_bin,
apc_mid_bin = input$apc_mid_bin,
h_year = input$h_year,
statebins = input$h_statebins[[1]]
)
})
## More Context ("percentiles")
output$percentile_hotspots <- renderUI({
gen_hotspots_text(
jp_results = jp_results,
h_p_or_q = input$h_p_or_q,
h_sigpvalue = input$h_sigpvalue,
mort_mid_bin = input$mort_mid_bin,
apc_mid_bin = input$apc_mid_bin,
h_year = input$h_year
)
})
output$apc_rate_table <- DT::renderDataTable({
gen_apc_rate_table(jp_results = jp_results,
h_year = input$h_year)
})
## State-specific APC plot
output$state_specific <- renderPlot({
gen_state_apc_results(
jp_results = jp_results,
s_outcome = input$s_outcome,
s_state = input$s_state,
s_p_or_q = input$s_p_or_q,
s_sigpvalue = input$s_sigpvalue,
s_show_raw = input$s_show_raw,
s_raw_ci = input$s_raw_ci,
s_model_fit = input$s_model_fit,
s_linetype_sig = input$s_linetype_sig,
s_joinpoint = input$s_joinpoint,
s_joinpoint_sig = input$s_joinpoint_sig,
s_legends_on = input$s_legends_on
)
})
## APC state-specific model fit
output$state_table_fit <- DT::renderDataTable({
gen_state_table_fit(
jp_results = jp_results,
s_state = input$s_state,
s_outcome = input$s_outcome
)
})
## State-specific LEL plot
output$state_specific_lel <- renderPlot({
gen_state_lel_plot(lel_df,
s_outcome = input$s_outcome,
s_state = input$s_state)
})
## APC state-specific model estimates
output$state_table_estimates <- DT::renderDataTable({
gen_state_table_model_estimates(
jp_results = jp_results,
s_state = input$s_state,
s_outcome = input$s_outcome
)
})
## APC state-specific model estimates
output$state_table_predictions <- DT::renderDataTable({
gen_state_table_predictions(
jp_results = jp_results,
s_state = input$s_state,
s_outcome = input$s_outcome
)
})
## LEL plot
output$lel_map <- renderPlot({
lel_picker(
lel_df = lel_df,
l_age = input$l_age,
l_year = input$l_year,
l_comparison = input$l_comparison,
l_statebins = input$l_statebins
)
})
## LEL plot
output$lel_table <- DT::renderDataTable({
gen_lel_table(
lel_df = lel_df,
l_age = input$l_age,
l_year = input$l_year
)
})
})
|
efe887a78c7d54180b7d1aa22fb36a3cbf6ae15e
|
907aaa2ef40dd8beeb9d533fa519fac0afaf8e37
|
/R/plotXY.r
|
614400111f0192777e379a6192031b38e84f3f8b
|
[] |
no_license
|
AndreasFischer1985/qqBaseX
|
eaee341155d66d4ff92ca00d6b4d419c3bf1f28a
|
98bec0ce041666d09d2c89a4ddc6b84a2349fa53
|
refs/heads/master
| 2022-09-14T18:58:05.493380
| 2022-08-26T11:52:38
| 2022-08-26T11:52:38
| 189,703,556
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,450
|
r
|
plotXY.r
|
#' Function plotXY
#'
#' Plots bivariate correlation based on two numeric vectors.
#' @param x Numeric vector.
#' @param y Numeric vector of the same length as x.
#' @param complexity Numeric value specifying the amount of nonlinearity modelled. Defaults to 0 (i.e., a linear model).
#' @param rep.nnet Numeric value specifying the number of nnet-objects to choose the best model from.
#' @param attrModel Logical value specifying whether to add the model as an attribute to the object returned.
#' @param na.rm Logical value indicating whether missing values should be skipped. Defaults to T.
#' @param color1 Color of points in the scattergram. Defaults to rgb(0,0,0,.7).
#' @param color2 Color of the regression line. Defaults to rgb(0,0,1).
#' @param color3 Color of the prediction interval. Defaults to rgb(0,0,1,.2).
#' @param ... additional parameters passed to the plot function.
#' @details Plots scattergram and bivariate correlation based on two numeric vectors.
#' @keywords plotting
#' @export
#' @examples
#' plotXY()
plotXY <- function (x = NULL, y = NULL, complexity = 0, rep.nnet = 10,
attrModel = T, na.rm = T, color1 = rgb(0, 0, 0, 0.7), color2 = rgb(0,
0, 1), color3 = rgb(0, 0, 1, 0.2), xlab = "x", ylab = "y",
axes = T, add = F, main = NA, sub = NA, pch = 16, lwd = 2,
cex = 0.7, cex.sub = 0.7, generalize = F, main1 = NULL, main2 = NULL,
main3 = NULL, mar = NA, adj.main1 = 0, adj.main2 = 0, adj.main3 = 0,
col.main1 = "black", col.main2 = "black", col.main3 = "black",
cex.main1 = 1.2, cex.main2 = 1.2, cex.main3 = 1.2, font.main1 = 1,
font.main2 = 2, font.main3 = 4, ...)
{
if (is.null(sub))
sub = ifelse(complexity == 0, "Shaded area represents 95%-confidence interval.",
"Shaded area represents 95%-prediction interval.")
if (is.null(x) & is.null(y)) {
x = rnorm(100)
y = rnorm(100)
}
mar0 = NULL
if (is.numeric(mar)) {
mar0 = par("mar")
par(mar = mar)
}
data = data.frame(x, y)
if (na.rm == T)
data = data[complete.cases(data), ]
data0 = data
data = data.frame(scale(data))
colnames(data) = colnames(data0)
nnet = NULL
lm = NULL
if (complexity > 0)
if (length(grep("^quantqual$", (installed.packages()[,
"Package"]))) == 0) {
complexity = 0
warning("complexity set to 0 because quantqual-package is not installed.\nYou may install it via devtools::install_github(\"AndreasFischer1985/quantqual\")")
}
if (complexity > 0) {
if (!generalize)
nnet = quantqual::nnets(data, "y", size = complexity,
linout = T, rep.nnet = rep.nnet)[[1]]
else nnet = quantqual::af.nnet(data, "y", size = complexity,
decay = NULL, linout = T, rep.nnet = rep.nnet)
xTrain = data[colnames(data) != "y"]
yTrain = data["y"]
p = quantqual::predintNNET(nnet, xTrain, yTrain, main = main,
sub = sub, color1 = color1, color2 = color2, color3 = color3,
xlab = xlab, ylab = ylab, axes = axes, plot = F)
p = p[order(data[, 1]), ]
len = dim(p)[1]
in1 = sort(data[, 1])
ou1 = p[, 1]
inner = p[, 2]
outer = p[, 3]
}
else {
l1 = lm(data[, 2] ~ data[, 1])
lm = l1
co1 = confint(l1)
len = 100
in1 = seq(min(data[, 1]), max(data[, 1]), length.out = len)
ou1 = in1 * coef(l1)[2] + coef(l1)[1]
ou2 = data.frame(in1 * co1[2, 1] + co1[1, 1], in1 * co1[2,
2] + co1[1, 2], in1 * co1[2, 1] + co1[1, 2], in1 *
co1[2, 2] + co1[1, 1])
inner = apply(ou2, 1, min)
outer = apply(ou2, 1, max)
}
unscale = function(x, m, s) x * s + m
in1 = unscale(in1, mean(data0[, 1], na.rm = T), sd(data0[,
1], na.rm = T))
ou1 = unscale(ou1, mean(data0[, 2], na.rm = T), sd(data0[,
2], na.rm = T))
inner = unscale(inner, mean(data0[, 2], na.rm = T), sd(data0[,
2], na.rm = T))
outer = unscale(outer, mean(data0[, 2], na.rm = T), sd(data0[,
2], na.rm = T))
if (add == F)
plot(data0[, 1], data0[, 2], xlab = xlab, ylab = ylab,
main = main, type = "n", axes = axes, ...)
if (add == F)
if (!is.null(sub))
title(sub = sub, cex.sub = cex.sub)
polygon(c(in1, in1[length(in1):1]), c(inner, outer[length(outer):1]),
col = color3[1], border = NA)
if (length(data0[, 1]) != length(color1))
color1 = color1[1]
points(data0[, 1], data0[, 2], pch = pch, col = color1)
lines(in1, ou1, , col = color2[1], lwd = lwd)
dat = data.frame(predictor = in1, prediction = ou1, lower.bound = inner,
upper.bound = outer)
if (attrModel)
if (!is.null(nnet))
attr(dat, "model") = nnet
else attr(dat, "model") = lm
if (!is.null(main1))
title(main1, line = 1, adj = adj.main1, cex.main = cex.main1,
col = col.main1, font.main = font.main1)
if (!is.null(main2))
title(main2, line = 2, adj = adj.main2, cex.main = cex.main2,
col = col.main2, font.main = font.main2)
if (!is.null(main3))
title(main3, line = 3, adj = adj.main3, cex.main = cex.main3,
col = col.main3, font.main = font.main3)
if (is.numeric(mar))
par(mar = mar0)
return(invisible(dat))
}
|
f0e7a038f40204ff3d57b327671486e74636b124
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sampling/examples/inclusionprobabilities.Rd.R
|
c6a860d915077830041df7be8b8df536c42075e7
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 799
|
r
|
inclusionprobabilities.Rd.R
|
library(sampling)
### Name: inclusionprobabilities
### Title: Inclusion probabilities
### Aliases: inclusionprobabilities
### Keywords: survey
### ** Examples
############
## Example 1
############
# a vector of positive numbers
a=1:20
# computation of the inclusion probabilities for a sample size n=12
pik=inclusionprobabilities(a,12)
pik
############
## Example 2
############
# Computation of the inclusion probabilities proportional to the number
# of inhabitants in each municipality of the Belgian database.
data(belgianmunicipalities)
pik=inclusionprobabilities(belgianmunicipalities$Tot04,200)
# the first-order inclusion probabilities for each municipality
data.frame(pik=pik,name=belgianmunicipalities$Commune)
# the inclusion probability sum is equal to the sample size
sum(pik)
|
92cb72deec31a66a7fff42583b31f2a0e923f252
|
018a481574caece2e71b95ebc4c53b26090faf26
|
/12_4_fitting_mlm_in_R.R
|
3dc3fa28a478b5b73219b04f07fe97a654066529
|
[] |
no_license
|
davidpupovac/multilevel_modelling_gelman_hill
|
408f887f3180cad8d5a54194170d2b6f1e0b4a88
|
7bc38d746246b93a42aff2baf65a21b47c3a8f68
|
refs/heads/master
| 2021-01-12T11:36:45.255770
| 2016-05-21T16:09:59
| 2016-05-21T16:09:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,537
|
r
|
12_4_fitting_mlm_in_R.R
|
# Chapter 12.2
# Repeated measurements, time-series cross sections, and other
# non-nested structures
# Page 253
# dependencies
library ("arm")
library("dplyr")
# get radon data
# Data are at http://www.stat.columbia.edu/~gelman/arm/examples/radon
srrs2 <- read.table ("~/llibres/llibres_tecnics/regression_multilevel_gelman_hill_2006/ARM_Data/radon/srrs2.dat",
header = TRUE,
sep = ",")
as_data_frame(srrs2)
summary(srrs2)
glimpse(srrs2)
# create extra variables for the model
mn <- srrs2$state == "MN" # Minessota indicator
radon <- srrs2$activity[mn]
log.radon <- log(ifelse(radon == 0, 0.1, radon))
floor <- srrs2$floor[mn] # 0 for basement, 1 for first floor
n <- length(radon)
y <- log.radon
x <- floor
# create county sequential code
county.name <- as.vector(srrs2$county[mn])
uniq <- unique(county.name)
J <- length(uniq)
county <- rep(NA, J)
for (i in 1:J){
county[county.name == uniq[i]] <- i
}
## Varying-intercept model w/ no predictors
M0 <- lmer(y ~ 1 + (1 | county))
display(M0)
## Including x as a predictor
M1 <- lmer(y ~ x + (1 | county))
display(M1)
# estimated regression coefficicents
coef(M1)
# fixed and random effects
fixef(M1)
re <- ranef(M1)
head(re$county$`(Intercept)`)
re$county$`(Intercept)`[1:10]
# uncertainties in the estimated coefficients
se.fixef(M1)
se_re <- se.ranef(M1)
se_re$county[1:10]
str(se_re$county)
# 95% CI for the slope
fixef(M1)["x"] + c(-2, 2) * se.fixef(M1)["x"]
# or
fixef(M1)[2] + c(-2, 2) * se.fixef(M1)[2]
# 95% CI for the intercept in county 26
coef(M1)$county[26, 1] + c(-2, 2) * se.ranef(M1)$county[26]
# 95% CI for the error in the intercept in county 26
as.matrix(ranef(M1)$county)[26] + c(-2, 2) * se.ranef(M1)$county[26]
## Complete pooling regression
lm.pooled <- lm(y ~ x)
display(lm.pooled)
## No pooling regression
lm.unpooled <- lm(y ~ x + factor(county) -1)
display(lm.unpooled)
# to plot Figure 12.4
a.hat.M1 <- coef(M1)$county[, 1] # 1st column is the intercept
b.hat.M1 <- coef(M1)$county[, 2] # 2nd element is the slope
x.jitter <- x + runif(n, -0.05, 0.05)
display8 <- c(36, 1, 35, 21, 14, 71, 61, 70) # counties to be displayed
y.range <- range(y[!is.na(match(county,display8))])
par(mfrow = c(2, 4))
for (j in display8){
plot (x.jitter[county==j], y[county==j], xlim=c(-.05,1.05), ylim=y.range,
xlab="floor", ylab="log radon level", main=uniq[j],cex.lab=1.2,
cex.axis=1.1, pch=20, mgp=c(2,.7,0), xaxt="n", yaxt="n", cex.main=1.1)
axis (1, c(0,1), mgp=c(2,.7,0), cex.axis=1)
axis (2, c(-1,1,3), mgp=c(2,.7,0), cex.axis=1)
curve (coef(lm.pooled)[1] + coef(lm.pooled)[2]*x, lty=2, col="blue", add=TRUE)
curve (coef(lm.unpooled)[j+1] + coef(lm.unpooled)[1]*x, col="red", add=TRUE)
curve (a.hat.M1[j] + b.hat.M1[j]*x, lwd=1, col="black", add=TRUE)
}
## Multilevel model ests vs. sample size (plot on the right on figure 12.3)
a.se.M1 <- se.coef(M1)$county
par (mar=c(5,5,4,2)+.1)
plot (sample.size.jittered, t(a.hat.M1), cex.lab=1.2, cex.axis=1.1,
xlab="sample size in county j", ylab=expression (paste
("est. intercept, ", alpha[j], " (multilevel model)")),
pch=20, log="x", ylim=c(.15,3.5), yaxt="n", xaxt="n")
axis (1, c(1,3,10,30,100), cex.axis=1.1)
axis (2, seq(0,3), cex.axis=1.1)
for (j in 1:J){
lines (rep(sample.size.jittered[j],2),
as.vector(a.hat.M1[j]) + c(-1,1)*a.se.M1[j], lwd=.5, col="gray10")
}
abline (coef(lm.pooled)[1], 0, lwd=.5)
|
cd9e7c87e7d4d6558c01795cb82ed2a7d3ebc7e8
|
4fa1216d26cf678ecd9468d53bac585c29496cef
|
/SIS_Workshops.R
|
08b9ba855668bae4c63477f2dc070acf344fa3fd
|
[] |
no_license
|
christrudeaumtl/SIS-Seminar-series-2019
|
0b820e1c39ce1e584118ba916c23b7efac806ed0
|
5311a8d1e32cf187c25a0a1a085be1afd586737c
|
refs/heads/master
| 2020-08-23T11:47:45.308107
| 2019-10-28T18:30:05
| 2019-10-28T18:30:05
| 216,609,116
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,244
|
r
|
SIS_Workshops.R
|
# Basic concepts
1 + 2
x <- 1 + 2
y <- c(1, 2, 3, 4)
# Set working directory and load libraries
setwd("Research communication/Workshops/R SIS Seminar 2019/")
library(tidyverse)
library(plyr)
library(janitor)
library(reshape2)
# Importing data
raw.data <- read_csv("library stats.csv", locale=locale(encoding = "UTF-8"))
names(), summary(), View(), head()
# Selecting libraries
hist(raw.data$hours, breaks = 25)
plot(raw.data$staff.total)
plot(raw.data$surface)
raw.data %>%
filter(is.na(surface)) %>%
dplyr::count(library)
# Building a clean data set
clean.data <- raw.data %>%
select(-music, -audiobooks, -movies, -videogames) %>%
filter(!is.na(surface)) %>%
mutate(
ave.circulation=books/loans,
ave.borrows=books/members
)
# Summarise the data at the borough level
clean.data %>%
ddply(c("borough", "year"), summarise,
loans=sum(loans),
staff.total=sum(staff.total),
books=sum(books)) %>%
dplyr::arrange(desc(loans))
# Access climate data
emissions <- tempfile()
download.file("https://www150.statcan.gc.ca/n1/tbl/csv/38100097-eng.zip", emissions)
raw.env.data <- unz(emissions, "38100097.csv") %>% read_csv()
# Clean climate data
clean.env.data <- raw.env.data %>%
clean_names() %>%
dplyr::rename(
year=ref_date,
region=geo
) %>%
select(year, region, sector, value) %>%
mutate(
block=case_when(
region %in% c("Canada") ~ "National",
region %in% c("New Brunswick", "Newfoundland and Labrador", "Nova Scotia", "Prince Edward Island") ~ "Atlantic",
region %in% c("Quebec", "Ontario") ~ "Central",
region %in% c("Alberta", "Saskatchewan", "Manitoba") ~ "Prairies",
region %in% c("British Columbia") ~ "Pacific",
region %in% c("Northwest Territories", "Nunavut", "Yukon") ~ "North"
)
)
ghg.summary <- clean.env.data %>%
filter(sector=="Total, industries and households") %>%
filter(year==2016) %>%
dplyr::arrange(desc(value))
clean.env.data %>%
filter(sector=="Total, industries and households") %>%
filter(block!="National") %>%
ddply(c("year", "block"), summarise,
ave.emissions=mean(value)) %>%
filter(year==2017)
# Access population data
raw.pop.data <- read_csv("population.csv",
col_names = c("year", "code", "level", "region", "gnr", "gnr_lf", "quality", "geo_code", "dim", "mid", "notes", "total", "male", "female"),
skip=1
)
head(raw.pop.data)
table(raw.pop.data$dim)
province.population <- raw.pop.data %>%
filter(dim=="Population, 2016") %>%
select(region, total)
# Join data sets
anti_join(ghg.summary, province.population, by="region")
test <- ghg.summary %>%
add_row(year=2017, region="Elbonia", sector="Total, industries and households", value=19109, block="Outside")
anti_join(test, province.population, by="region")
anti_join(province.population, test, by="region")
ghg.summary <- full_join(ghg.summary, province.population, by="region") %>%
mutate(percapita.emissions=value/total)
ghg.summary %>%
dplyr::arrange(desc(percapita.emissions))
|
2f4366f6179fc563c3d6b5249cdf10a6c11fbe65
|
5b8a20546eb0912651ea4c5102e0f2d569ef9554
|
/man/get_teams.Rd
|
428bcb30675c246137d3a2b66b2df9e8ab6affe9
|
[] |
no_license
|
zmalosh/NhlDataHelpR
|
b83d73db227bc171c8030f4c6bc65904cee9f0bc
|
15ddd93c93589534d55b718444d277a091b5f4a8
|
refs/heads/master
| 2020-09-01T05:01:52.815000
| 2019-11-01T02:17:56
| 2019-11-01T02:17:56
| 218,886,194
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 339
|
rd
|
get_teams.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_teams.R
\name{get_teams}
\alias{get_teams}
\title{Title}
\usage{
get_teams(includeRoster = TRUE)
}
\arguments{
\item{includeRoster}{flag to enable rosters for each team}
}
\value{
all teams from the NHL API
}
\description{
Title
}
\examples{
get_teams()
}
|
5897b0b460f785ecdde8595678a09a4cb47f2829
|
d795d03a39b08dbeb85a1a3c38909ecf8f6bd986
|
/General_LDFA.R
|
f86484411021b1599f985945c373d7ebcb44024f
|
[] |
no_license
|
PhDMattyB/LDFA_GeometricData
|
2eaaa61d10ce4a7b3e872c5f2030a31495d36deb
|
d607635aaf899c898ae4dadceec2b715c3c034a4
|
refs/heads/master
| 2020-05-18T07:49:51.182694
| 2019-04-30T14:32:01
| 2019-04-30T14:32:01
| 184,277,120
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,817
|
r
|
General_LDFA.R
|
##############################
## Linear discriminant function analysis body shape data
##
## Matt Brachmann (PhDMattyB)
##
## 2019-03-13
##
##############################
setwd('~/PhD/Morphometrics/Working Directory')
library(tidyverse)
library(wesanderson)
library(patchwork)
library(janitor)
library(devtools)
library(skimr)
theme_set(theme_bw())
# Other packages to load
## Read in your morphometric data
## I used the filter function to only look at polymorphic populations
Morpho_data = read_csv('AllLakes_AllometryExcluded_PWS_Combined.csv') %>%
filter(LaMorph %in% c('G.SB', 'G.PI', 'S.LGB', 'S.PI',
'S.PL', 'T.LGB', 'T.PL', 'T.SB',
'V.BR', 'V.SIL'))
## Needed to basically copy and paste the data for a benthic and pelagic morph
## This allowed my to make Vectors corresponding to different benthic and pelagic
## morph pairs
Morpho_SLGB = Morpho_data %>%
slice(78:103) %>%
mutate(LaMorph2 = as.factor(case_when(
LaMorph == 'S.LGB' ~ 'S.LGB2'))) %>%
select(-LaMorph) %>%
rename(LaMorph = LaMorph2) %>%
select(id:BP2, LaMorph, Sex:BPLD1,
contains('PW'), UNIX:CS)
Morpho_TPL = Morpho_data %>%
slice(297:354) %>%
mutate(LaMorph2 = as.factor(case_when(
LaMorph == 'T.PL' ~ 'T.PL2'))) %>%
select(-LaMorph) %>%
rename(LaMorph = LaMorph2) %>%
select(id:BP2, LaMorph, Sex:BPLD1,
contains('PW'), UNIX:CS)
Morpho_cleaned = bind_rows(Morpho_data, Morpho_SLGB, Morpho_TPL) %>%
group_by(LaMorph)
## The vectors I mentioned above
Morpho_cleaned = mutate(.data = Morpho_cleaned,
Vector = as.factor(case_when(
LaMorph == "G.SB" ~ "GSBPI",
LaMorph == 'G.PI' ~ 'GSBPI',
LaMorph == 'S.LGB' ~ 'SLGBPI',
LaMorph == 'S.PI'~ 'SLGBPI',
LaMorph == 'S.LGB2' ~ 'SLGBPL',
LaMorph == 'S.PL' ~ 'SLGBPL',
LaMorph == 'T.LGB' ~ 'TLGBPL',
LaMorph == 'T.PL' ~ 'TLGBPL',
LaMorph == 'T.SB' ~ 'TSBPL',
LaMorph == 'T.PL2' ~ 'TSBPL',
LaMorph == 'V.BR' ~ 'VSILBR',
LaMorph == 'V.SIL' ~ 'VSILBR')))
## Filter for the vector you want to analyze body shape for
Morpho_grouped = Morpho_cleaned %>%
arrange(BP) %>%
group_by(Vector) %>%
filter(Vector == 'VSILBR')
## The LDFA analysis, put in all of your partial warp and uniform component measurements
ldfa = lda(Morpho_grouped$BP ~ Morpho_grouped$PW1X+ Morpho_grouped$PW1Y+ Morpho_grouped$PW2X+ Morpho_grouped$PW2Y+
Morpho_grouped$PW3X+ Morpho_grouped$PW3Y+ Morpho_grouped$PW4X+ Morpho_grouped$PW4Y+
Morpho_grouped$PW5X+ Morpho_grouped$PW5Y+ Morpho_grouped$PW6X+ Morpho_grouped$PW6Y+
Morpho_grouped$PW7X+ Morpho_grouped$PW7Y+ Morpho_grouped$PW8X+ Morpho_grouped$PW8Y+
Morpho_grouped$PW9X+ Morpho_grouped$PW9Y+ Morpho_grouped$PW10X+ Morpho_grouped$PW10Y+
Morpho_grouped$PW11X+ Morpho_grouped$PW11Y+ Morpho_grouped$PW12X+ Morpho_grouped$PW12Y+
Morpho_grouped$PW13X+ Morpho_grouped$PW13Y+ Morpho_grouped$PW14X+ Morpho_grouped$PW14Y+
Morpho_grouped$PW15X+ Morpho_grouped$PW15Y+ Morpho_grouped$PW16X+ Morpho_grouped$PW16Y+
Morpho_grouped$PW17X+ Morpho_grouped$PW17Y+ Morpho_grouped$PW18X+ Morpho_grouped$PW18Y+
Morpho_grouped$PW19X+ Morpho_grouped$PW19Y+ Morpho_grouped$UNIX+ Morpho_grouped$UNIY, CV = T)
lda_predict = predict(ldfa)
apply(lda_predict$posterior, MARGIN = 1, FUN = max)
table = table(Morpho_grouped$BP, lda_predict$class)
sum(table[row(table) == col(table)])/sum(table)
#leave one out cross validation
##CV needs to be true in LDA
table2 = table(Morpho_grouped$BP, ldfa$class)
##Calculate the re-substitution error for the cross validation
sum(table2[row(table2) == col(table2)])/sum(table2)
|
9a29dd06d14400ab450e74f780073ddcab2a33d4
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/themetagenomics/tests/testthat/test-topic-effects.R
|
fdbcbd5eb4d5902d4904966ff1f59edc10b1c489
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,474
|
r
|
test-topic-effects.R
|
context('estimate topic effects')
test_that('est.topics returns correct results for different formulae',{
skip_on_cran()
skip_on_travis()
DAT <- readRDS(system.file('testdata','otudata.rds',package='themetagenomics'))
set.seed(23)
x <- prepare_data(otu_table=DAT$OTU,
rows_are_taxa=FALSE,
tax_table=DAT$TAX,
metadata=DAT$META,
formula=~DIAGNOSIS,
refs='Not IBD',
cn_normalize=TRUE,
drop=TRUE)
y <- find_topics(x,K=5,init_type='Spectral',tol=1e-03)
z1 <- est(y)
expect_identical(colnames(est(y)[[1]][[1]][[1]]),c('estimate','10%','90%'))
expect_identical(colnames(est(y,ui_level=.95)[[1]][[1]][[1]]),c('estimate','2.5%','97.5%'))
expect_identical(y$modelframe,z1$modelframe)
expect_error(est(y,metadata=DAT$META[1:7,],formula=~PCDAI))
x <- prepare_data(otu_table=DAT$OTU,
rows_are_taxa=FALSE,
tax_table=DAT$TAX,
metadata=DAT$META,
formula=~PCDAI,
refs='Not IBD',
cn_normalize=TRUE,
drop=TRUE)
y <- find_topics(x,K=5,init_type='Spectral',tol=1e-03)
expect_identical(y$modelframe,est(y)$modelframe)
expect_error(est(y,metadata=DAT$META[1:7,],formula=~DIAGNOSIS))
expect_warning(est(y,metadata=DAT$META,formula=~DIAGNOSIS))
z2 <- est(y,metadata=DAT$META,formula=~DIAGNOSIS,refs='Not IBD')
expect_identical(z1$modelframe[rownames(DAT$META)[!is.na(DAT$META$PCDAI)],],
z2$modelframe[rownames(DAT$META)[!is.na(DAT$META$PCDAI)],])
DAT <- readRDS(system.file('testdata','seqdata.rds',package='themetagenomics'))
set.seed(423)
x <- prepare_data(otu_table=DAT$ABUND,
rows_are_taxa=FALSE,
tax_table=DAT$TAX,
metadata=DAT$META,
formula=~Site + Day,
refs='UBERON:saliva',
cn_normalize=FALSE,
drop=TRUE)
y <- find_topics(x,K=5,init_type='Spectral')
z1 <- est(y)
expect_identical(colnames(est(y)[[1]][[1]][[1]]),c('estimate','10%','90%'))
expect_identical(colnames(est(y,ui_level=.95)[[1]][[1]][[1]]),c('estimate','2.5%','97.5%'))
expect_identical(y$modelframe,z1$modelframe)
x <- prepare_data(otu_table=DAT$ABUND,
rows_are_taxa=FALSE,
tax_table=DAT$TAX,
metadata=DAT$META,
formula=~Day + Site,
refs='UBERON:saliva',
cn_normalize=FALSE,
drop=TRUE)
y <- find_topics(x,K=5,init_type='Spectral',tol=1e-03)
z2 <- est(y)
expect_true(mean(abs(z1$topic_effects$`SiteUBERON:feces`$est[,1]-z2$topic_effects$`SiteUBERON:feces`$est[,1])) < .1)
expect_true(mean(abs(z1$topic_effects$Day$est[,1]-z2$topic_effects$Day$est[,1])) < .1)
set.seed(23)
x <- prepare_data(otu_table=DAT$ABUND,
rows_are_taxa=FALSE,
tax_table=DAT$TAX,
metadata=DAT$META,
formula=~Site + s(Day),
refs='UBERON:saliva',
cn_normalize=FALSE,
drop=TRUE)
y <- find_topics(x,K=5,init_type='Spectral',tol=1e-03)
z1 <- est(y)
expect_identical(colnames(est(y)[[1]][[1]][[1]]),c('estimate','10%','90%'))
expect_identical(colnames(est(y,ui_level=.95)[[1]][[1]][[1]]),c('estimate','2.5%','97.5%'))
expect_identical(y$modelframe,z1$modelframe)
x <- prepare_data(otu_table=DAT$ABUND,
rows_are_taxa=FALSE,
tax_table=DAT$TAX,
metadata=DAT$META,
formula=~s(Day) + Site,
refs='UBERON:saliva',
cn_normalize=FALSE,
drop=TRUE)
y <- find_topics(x,K=5,init_type='Spectral',tol=1e-03)
z2 <- est(y)
expect_true(mean(abs(z1$topic_effects$`SiteUBERON:feces`$est[,1]-z2$topic_effects$`SiteUBERON:feces`$est[,1])) < .1)
expect_true(mean(abs(z1$topic_effects$Day$est[,1]-z2$topic_effects$Day$est[,1])) < .1)
set.seed(23)
x <- prepare_data(otu_table=DAT$ABUND,
rows_are_taxa=FALSE,
tax_table=DAT$TAX,
metadata=DAT$META,
formula=~Multi + s(Day),
refs='1',
cn_normalize=FALSE,
drop=TRUE)
y <- find_topics(x,K=5,init_type='Spectral',tol=1e-03)
z1 <- est(y)
expect_identical(colnames(est(y)[[1]][[1]][[1]]),c('estimate','10%','90%'))
expect_identical(colnames(est(y,ui_level=.95)[[1]][[1]][[1]]),c('estimate','2.5%','97.5%'))
expect_identical(y$modelframe,z1$modelframe)
x <- prepare_data(otu_table=DAT$ABUND,
rows_are_taxa=FALSE,
tax_table=DAT$TAX,
metadata=DAT$META,
formula=~s(Day) + Multi,
refs='1',
cn_normalize=FALSE,
drop=TRUE)
y <- find_topics(x,K=5,init_type='Spectral',tol=1e-03)
z2 <- est(y)
expect_true(mean(abs(z1$topic_effects$Multi2$est[,1]-z2$topic_effects$Multi2$est[,1])) < .1)
expect_true(mean(abs(z1$topic_effects$Multi3$est[,1]-z2$topic_effects$Multi3$est[,1])) < .1)
expect_true(mean(abs(z1$topic_effects$Day$est[,1]-z2$topic_effects$Day$est[,1])) < .1)
})
|
41521eddb72f3a8a5d20b47f0a2f735e045791be
|
89904aad1e489f936a03f44a8f96a709a27ac0d7
|
/MFDFA_toolbox/Error_Bars.R
|
81d4eaec1e52f3dfb8da8afcd4402b6370f104d0
|
[] |
no_license
|
tehrandavis/usefulR
|
d77629ac186a25c69fc93b3030ef14c62e60117e
|
31fb94127b7fc6d407b280d856c56c242d094bb1
|
refs/heads/master
| 2021-01-23T05:44:34.918507
| 2019-06-21T14:51:59
| 2019-06-21T14:51:59
| 92,980,858
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 589
|
r
|
Error_Bars.R
|
prepanel.ci <- function(x, y, ly, uy, subscripts, ...)
{
y <- as.numeric(y)
ly <- as.numeric(ly[subscripts])
uy <- as.numeric(uy[subscripts])
list(ylim = range(y, uy, ly, finite = TRUE))
}
panel.ci <- function(x, y, ly, uy, subscripts, pch = 16, ...)
{
x <- as.numeric(x)
y <- as.numeric(y)
ly <- as.numeric(ly[subscripts])
uy <- as.numeric(uy[subscripts])
panel.arrows(x, ly, x, uy, col = c('blue'),
length = .25, unit = "native",
angle = 90, code =7)
panel.xyplot(x, y, pch = pch, ...)
}
|
c040df1d67e9084c015c5fd9c75c6d98059cbfdc
|
6d5a7d0a5f55520fceb0a2868bc6b7fb7903075a
|
/man/lca-tree-method.Rd
|
1183af478780ac3dc5615ad85830e9330352611e
|
[] |
no_license
|
meta-QSAR/simple-tree
|
7dbb617aff4e637d1fcce202890f322b99364494
|
28ff7bf591d3330498a3c8a85d8ae5a3d27b37d5
|
refs/heads/master
| 2016-09-14T07:08:33.743705
| 2015-08-04T09:31:24
| 2015-08-04T09:31:24
| 58,349,277
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 532
|
rd
|
lca-tree-method.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/class-tree.R
\docType{methods}
\name{lca,tree-method}
\alias{lca,tree-method}
\title{Find the lowest common ancestor.}
\usage{
\S4method{lca}{tree}(object, node1.id, node2.id)
}
\arguments{
\item{object}{A tree object.}
\item{node1.id}{The ID of the 1st node.}
\item{node2.id}{The ID of the 2nd node.}
}
\value{
\code{lca} returns a character value.
}
\description{
\code{lca} returns the ID of the lowest common ancestor between two nodes.
}
|
1bff49da52d187160dbad0d95f4b5e72c1774d18
|
3eb2711c70ad29c4625dec5f572253511afcab71
|
/man/cwm.Rd
|
ec81db6608717c4e6d4f0d1ca9fc91baee17f022
|
[] |
no_license
|
cran/vegdata
|
5b3609cb51cc56f3b5b1b7a2e765f4039fc1ab8f
|
a5681ac219367a2b883225b2df720c7e51bd59d9
|
refs/heads/master
| 2022-12-21T03:25:11.693116
| 2022-12-17T14:50:02
| 2022-12-17T14:50:02
| 17,700,792
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,408
|
rd
|
cwm.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cwm.r
\name{cwm}
\alias{cwm}
\title{Indicate site conditions with community weighted mean values of traits or with mode of gradient classes (sum of species amplitudes).}
\usage{
cwm(veg, refl, trait.db = 'ecodbase.dbf', ivname, keyname = 'LETTERCODE',
method, weight, db, ...)
}
\arguments{
\item{veg}{Vegetation matrix with plots in rows and species in columns}
\item{refl}{Name of Turboveg taxonomic reference list}
\item{trait.db}{data frame with species trait values}
\item{ivname}{Name of the trait in trait.db to be used}
\item{keyname}{Name of the column in trait dataframe to join with colnames of veg table}
\item{method}{mean (weighted value of single traits, or mode (maximum) of trait classes)}
\item{weight}{additional weight, e.g niche breath of species}
\item{db}{name of Turboveg database}
\item{\dots}{additional arguments}
}
\value{
Vector with the ecological classification of sites. Either mean trait values or mode of gradient classes.
}
\description{
Calculates community weighted mean trait values, like mean Ellenberg indicator values.
Alternatively (method = 'mode') environmental conditions can be calculated according to the concept of sums of amplitudes of species along ecological gradients.
}
\details{
Trait values of 0 will be handled as NA values because Turboveg dBase can not handle NA values properly.
}
\examples{
\dontrun{
db <- 'elbaue'
veg <- tv.veg(db, cover.transform='sqrt', check.critical = FALSE)
site <- tv.site(db, verbose = FALSE)
#' Exclude plots with very high water level fluctuation
veg <- veg[site$SDGL < 60,]
veg <- veg[,colSums(veg) > 0]
site <- site[site$SDGL < 60,]
#' Load species trait value database
traits <- tv.traits(db)
#' Mean indicator values of Ellenberg F values
mEIV_F <- isc(veg, trait.db = traits, ivname = 'OEK_F', method = 'mean')
plot(site$MGL, mEIV_F, xlab = 'Mean groundwater level')
#' Mode (most frequent level) of Ellenberg F values
library(reshape)
traitmat <- cast(traits, LETTERCODE ~ OEK_F)
traitmat <- traitmat[,-14]
ilevel <- isc(veg, trait.db = traitmat, ivname = as.character(1:11), method = 'mode')
boxplot(site$MGL ~ ordered(ilevel, levels = levels(ilevel)[c(2,4,3,5,6:10,1)]))
}
}
\author{
Florian Jansen \email{florian.jansen@uni-rostock.de}
}
|
97a941416ba94df2a0919a930fd46c5d9839af7e
|
87e3533a33d8c698d3d8eb65b90152ec7258858e
|
/cachematrix.R
|
7f9638fba427047b1ec0d840e99912db35962870
|
[] |
no_license
|
gpmerwe/ProgrammingAssignment2
|
678e1d6317cd06dec6a34b579536d78493b1a3f8
|
709220f0973f6c11bab54892cd0bc4b924d32867
|
refs/heads/master
| 2021-01-22T05:16:09.696697
| 2017-02-11T10:15:32
| 2017-02-11T10:15:32
| 81,642,322
| 0
| 0
| null | 2017-02-11T09:07:33
| 2017-02-11T09:07:33
| null |
UTF-8
|
R
| false
| false
| 1,370
|
r
|
cachematrix.R
|
## The first function creates a list that contains
## the matrix itself and its inverse
## The second function looks up the list to retrieve
## the inverse of the matrix
## The makeCacheMatrix function consists of 4 functions,
## setmatrix sets the contained matrix to that of your matrix
## getmatrix just retrieves the contained matrix
## setinverse calculates the inverse of the contained matrix
## getinverse retrieves the stored inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
setmatrix <- function(y) {
x <<- y
m <<- NULL
}
getmatrix <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(setmatrix = setmatrix, getmatrix = getmatrix,
setinverse = setinverse,
getinverse = getinverse)
}
## cacheSolve retrieves the inverse of you matrix from the
## list created by the makeCacheMatrix function
## If the inverse is stored, then it just retrieves the inverse
## else it calculates the inverse, stores and returns the result
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("Getting cached data")
return(m)
}
data <- x$getmatrix()
m <- solve(data, ...)
x$setmatrix(m)
m
}
|
9dce1dc94cf57582c37b5a18a2c084a88c222105
|
f2afad9842782cd02c4bf04d6ecac12aa6af51b9
|
/complete.r
|
7addc0d9b88dd782481c66f703cdea6408179f6e
|
[] |
no_license
|
shashank-krr/datasciencecoursera
|
cf1f7ed57d3b3380c4211e745fb916cd94bfb10b
|
2149e1ca2944c2ce060a380c4a97fade0b287fdf
|
refs/heads/master
| 2021-01-20T02:41:23.228359
| 2017-08-30T01:45:53
| 2017-08-30T01:45:53
| 101,330,166
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 291
|
r
|
complete.r
|
complete = function(directory, id= 1:332){
nobs = numeric()
files = list.files(directory , pattern = ".csv", full.names = TRUE)
for(i in id){
values = sum(complete.cases(read.csv(files[i])))
nobs = c(nobs, values )
}
data.frame(id, nobs)
}
|
7b09b024621a93525229ae800f18ac5803ec9c35
|
ef572bd2b0515892d1f59a073b8bf99f81d6a734
|
/man/createAnalytics.Rd
|
6a9d33df2d1e3a8dd8017e3c85b19640002d167e
|
[
"CC0-1.0"
] |
permissive
|
pepfar-datim/datapackr
|
5bc604caa1ae001b6c04e1d934c0c613c59df1e6
|
9275632673e45948db6846513a53c1436cfc0e47
|
refs/heads/master
| 2023-08-30T23:26:48.454382
| 2023-08-11T13:01:57
| 2023-08-11T13:01:57
| 170,350,211
| 9
| 7
|
CC0-1.0
| 2023-09-11T21:53:24
| 2019-02-12T16:19:47
|
R
|
UTF-8
|
R
| false
| true
| 563
|
rd
|
createAnalytics.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/createAnalytics.R
\name{createAnalytics}
\alias{createAnalytics}
\title{createAnalytics(d)}
\usage{
createAnalytics(d, d2_session = dynGet("d2_default_session", inherits = TRUE))
}
\arguments{
\item{d}{Datapackr object}
\item{d2_session}{R6 datimutils object which handles authentication with DATIM}
}
\value{
Modified d object with d$data$analytics
}
\description{
Wrapper function for creation of d$data$analytics object
which is suitable for export to external analytics sytems.
}
|
2566600091873d28e3a2a77359562d08248ddf31
|
5bca27d00ed15100b3524386a90e488bfb3e1e80
|
/ConsumerExpenditures.R
|
c26779fde1afffecd151480e6d8de47058796de4
|
[] |
no_license
|
whelanh/WorldValueSurveyRCode
|
07d5359089e4d70c82b6b1ba0b0deb3fe4016b84
|
032268e7c41d7a9b398ca0050dae584ce7f4510e
|
refs/heads/master
| 2021-07-25T18:02:59.671991
| 2020-07-03T10:40:21
| 2020-07-03T10:40:21
| 74,976,183
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,361
|
r
|
ConsumerExpenditures.R
|
# analyze survey data for free (http://asdfree.com) with the r language
# consumer expenditure survey
# replication of the output of various macros stored in the "CE macros.sas" example program
# using 2011 public use microdata
# # # # # # # # # # # # # # # # #
# # block of code to run this # #
# # # # # # # # # # # # # # # # #
# library(downloader)
# setwd( "C:/My Directory/CES/" )
# source_url( "https://raw.githubusercontent.com/ajdamico/asdfree/master/Consumer%20Expenditure%20Survey/2011%20fmly%20intrvw%20-%20analysis%20examples.R" , prompt = FALSE , echo = TRUE )
# # # # # # # # # # # # # # #
# # end of auto-run block # #
# # # # # # # # # # # # # # #
# this r script will review the example analyses of both imputed and non-imputed variables
# described in the "CE macros program documentation.doc" document
# in the folder "Programs 2011\SAS\" inside the bls documentation file
# http://www.bls.gov/cex/pumd/documentation/documentation11.zip
# contact me directly for free help or for paid consulting work
# anthony joseph damico
# ajdamico@gmail.com
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
###################################################################################################################
# prior to running this replication script, all ces 2011 public use microdata files must be loaded as R data #
# files (.rda) on the local machine. running the "2010-2011 ces - download.R" script will create these files. #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# https://github.com/ajdamico/asdfree/blob/master/Consumer%20Expenditure%20Survey/download%20all%20microdata.R #
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# that script will save a number of .rda files in C:/My Directory/CES/2011/ (or the working directory was chosen) #
###################################################################################################################
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# set your working directory.
# the CES 2011 R data files (.rda) should have been
# stored in a year-specific directory within this folder.
# so if the file "fmli111x.rda" exists in the directory "C:/My Directory/CES/2011/intrvw/"
# then the working directory should be set to "C:/My Directory/CES/"
# use forward slashes instead of back slashes
# uncomment this line by removing the `#` at the front..
setwd( "~/Downloads/SCF/" )
# ..in order to set your current working directory
# turn off scientific notation in most output
options( scipen = 20 )
library(mitools) # allows analysis of multiply-imputed survey data
library(stringr) # load stringr package (manipulates character strings easily)
library(plyr) # contains the rbind.fill() function, which stacks two data frames even if they don't contain the same columns. the rbind() function does not do this
library(survey) # load survey package (analyzes complex design surveys)
library(downloader) # downloads and then runs the source() function on scripts from github
# load two svyttest functions (one to conduct a df-adjusted t-test and one to conduct a multiply-imputed t-test)
source_url( "https://raw.githubusercontent.com/ajdamico/asdfree/master/Consumer%20Expenditure%20Survey/ces.svyttest.R" , prompt = FALSE )
# now that these two functions have been loaded into r, you can view their source code by uncommenting the two lines below
# svyttest.df
# svyttest.mi
# set this number to the year you would like to analyze..
year <- 2015
# r will now take the year you've selected and re-assign the current working directory
# to the year-specific folder based on what you'd set above
# so if you'd set C:/My Directory/CES/ above, it's now been changed to C:/My Directory/CES/2011/
setwd( paste( getwd() , year , sep = "/" ) )
# pull the last two digits of the year variable into a separate string
yr <- substr( year , 3 , 4 )
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# read in the five quarters of family data files (fmli)
# load all five R data files (.rda)
load( paste0( "./intrvw/fmli" , yr , "1x.rda" ) )
load( paste0( "./intrvw/fmli" , yr , "2.rda" ) )
load( paste0( "./intrvw/fmli" , yr , "3.rda" ) )
load( paste0( "./intrvw/fmli" , yr , "4.rda" ) )
load( paste0( "./intrvw/fmli" , as.numeric( yr ) + 1 , "1.rda" ) )
# save the first quarter's data frame into a new data frame called 'fmly'
fmly <- get( paste0( "fmli" , yr , "1x" ) )
# and create a new column called 'qtr' with all ones
fmly$qtr <- 1
# loop through the second, third, and fourth fmli data frames
for ( i in 2:4 ){
# copy each quarter into a new data frame called 'x'
x <- get( paste0( "fmli" , yr , i ) )
# add a quarter variable (2, 3, then 4)
x$qtr <- i
# stack 'x' below what's already in the fmly data table
# ..this stacks quarters 2, 3, and 4 below quarter 1
fmly <- rbind.fill( fmly , x )
}
# repeat the steps above on the fifth quarter (which uses the following year's first quarter of data)
x <- get( paste0( "fmli" , as.numeric( yr ) + 1 , "1" ) )
x$qtr <- 5
# final stacking of the fifth quarter
fmly <- rbind.fill( fmly , x )
# now the 'fmly' data table contains everything needed for analyses
# delete the temporary data frame from memory
rm( x )
# also delete the data frames loaded by the five load() function calls above
rm(
list =
c(
paste0( "fmli" , yr , "1x" ) ,
paste0( "fmli" , yr , 2:4 ) ,
paste0( "fmli" , as.numeric( yr ) + 1 , "1" )
)
)
# clear up RAM
gc()
# create a character vector containing 45 variable names (wtrep01, wtrep02, ... wtrep44 and finlwt21)
wtrep <- c( paste0( "wtrep" , str_pad( 1:44 , 2 , pad = "0" ) ) , "finlwt21" )
# immediately loop through each weight column (stored in the wtrep vector)
# and overwrite all missing values (NA) with zeroes
for ( i in wtrep ) fmly[ is.na( fmly[ , i ] ) , i ] <- 0
# create a new variable in the fmly data table called 'totalexp'
# that contains the sum of the total expenditure from the current and previous quarters
fmly$totalexp <- rowSums( fmly[ , c( "totexppq" , "totexpcq" ) ] , na.rm = TRUE )
# immediately convert missing values (NA) to zeroes
fmly[ is.na( fmly$totalexp ) , "totalexp" ] <- 0
# annualize the total expenditure by multiplying the total expenditure by four,
# creating a new variable 'annexp' in the fmly data table
fmly <- transform( fmly , annexp = totalexp * 4 )
# the "CE macros.sas" file creates estimates that match the mse = TRUE option set here.
# in order to match the sas software provided by the bureau of labor statistics, keep this set to TRUE
# if this option is set to TRUE
# R will exactly match SUDAAN results and Stata with the MSE option results
options( survey.replicates.mse = TRUE )
# otherwise if it is commented out or set to FALSE
# R will exactly match Stata without the MSE option results
# Stata svyset command notes can be found here: http://www.stata.com/help.cgi?svyset
# add a column called 'one' to the fmly data table containing 1s throughout
fmly$one <- 1
# create the survey design as a balanced repeated replication survey object,
# with 44 replicate weights
fmly.design <-
svrepdesign(
repweights = "wtrep[0-9]+" ,
weights = ~finlwt21 ,
data = fmly
)
# after its creation, explore these attributes by typing the object into the console..
# print a basic description of the replicate design
fmly.design
# print the available attributes of this object
attributes( fmly.design )
# access one of the attributes.. hey how about the degrees of freedom?
fmly.design$degf
#####################
# analysis examples #
#####################
# count the total (unweighted) number of records in fmly #
unwtd.count(
~one ,
fmly.design
)
# broken out by the urban/rural variable #
svyby(
~one ,
~bls_urbn ,
fmly.design ,
unwtd.count
)
# calculate the mean of a linear variable #
# average annual household expenditure - nationwide
svymean(
~annexp ,
~age_ref%/%10,
design = fmly.design
)
# by urban/rural
svyby(
~annexp ,
~bls_urbn ,
design = fmly.design ,
svymean
)
# calculate the distribution of a categorical variable #
# sex_ref should be treated as a factor (categorical) variable
# instead of a numeric (linear) variable
# this update statement converts it.
# the svyby command below will not run without this
fmly.design <-
update(
sex_ref = factor( sex_ref ) ,
fmly.design
)
# percent of households headed by males vs. females - nationwide
svymean(
~sex_ref ,
design = fmly.design
)
# by urban/rural
svyby(
~sex_ref ,
~bls_urbn ,
design = fmly.design ,
svymean
)
# calculate the median and other percentiles #
################### HUGH WHELAN CODE ##################################################
library(reldist)
# minimum, 25th, 50th, 75th, maximum
# annual expenditure in the united states
exp<-svyquantile(
~annexp ,
design = fmly.design ,
c( seq(0,1,0.01) )
)
incbtx <- svyquantile(
~fincbtxm ,
design = fmly.design ,
c( seq(0,1,0.01) )
)
incatx <- svyquantile(
~finatxem ,
design = fmly.design ,
c( seq(0,1,0.01) )
)
gini(exp)
gini(incbtx)
gini(incatx)
########################### END HUGH WHELAN CODE #######################################
# by urban/rural
svyby(
~annexp ,
~bls_urbn ,
design = fmly.design ,
svyquantile ,
c( 0 , .25 , .5 , .75 , 1 ) ,
ci = T
)
######################
# subsetting example #
######################
# restrict the fmly.design object to
# households headed by females only
fmly.female <-
subset(
fmly.design ,
sex_ref %in% 2
)
# now any of the above commands can be re-run
# using fmly.female object
# instead of the fmly.design object
# in order to analyze households headed by females only
# calculate the mean of a linear variable #
# average household expenditure - nationwide,
# restricted to households headed by females
svymean(
~annexp ,
design = fmly.female
)
# remove this subset design to clear up memory
rm( fmly.female )
# clear up RAM
gc()
######################################
# CE macros.sas replication examples #
######################################
# replicate the first macro shown in the "CE macros program documentation.doc" document
# the example macro (seen on page 7) looks like this, without the comments (#)
# %MEAN_VARIANCE(DSN = FMLY,
# FORMAT = BLS_URBN $URBN.,
# USE_WEIGHTS = YES,
# BYVARS = BLS_URBN,
# ANALVARS = ANNEXP FINCBTXM,
# IMPUTED_VARS = FINCBTX1-FINCBTX5,
# CL = 99,
# DF = RUBIN87,
# TITLE1 = COMPUTING MEANS AND VARIANCES,
# TITLE2 = VARIABLES FROM THE FAMILY FILE,
# TITLE3 = ,
# XOUTPUT =
# );
# instead of exporting all of these results into a large text output (like sas does)
# the following steps will produce each of the components, one at a time
# count the total (unweighted) number of records in fmly #
# broken out by the urban/rural variable, as specified in the sas macro call above
svyby( ~one , ~bls_urbn , fmly.design , unwtd.count )
# calculate means and standard errors, and save the results into a new object
# but also print the results to the screen.
# r hint: when assigning ( <- ) an object to another object, you can print the object to the screen
# at the same time as assigning ( <- ) it by encasing it in parentheses
# note that the following commands use svyby() outside of a svymean call
# as opposed to svymean() alone, because the results need to be broken out by
# the bls_urbn variable, as specified in the sas macro call above
# print and save the mean and standard error
#######################################################################################
# Code added by Hugh Whelan for Essay on "Retirement Crisis in America?"
# Data for Tables 3 and 4
fmly.design <-
update(
ageDecile = factor( age_ref%/%10 ) ,
incDecile = factor(inclass),
annexpPC = annexp/fam_size,
fincbtxmPC = fincbtxm/fam_size,
fmly.design
)
# Stats for 75+ year old age group
fmly.haveIncome <-
subset(
fmly.design ,
age_ref>=75,
)
# Household data for Table 3
svyby(~annexp, by = ~incDecile, denominator = ~fincbtxm , fmly.haveIncome, svyratio)
a<-svyby( ~annexp , ~incDecile , fmly.haveIncome , svymean , na.rm = TRUE )
b<-svyby( ~fincbtxm , ~incDecile , fmly.haveIncome , svymean , na.rm = TRUE )
e <- svyby( ~fam_size , ~incDecile , fmly.haveIncome , svymean , na.rm = TRUE )
# percent of households headed by inclass
c<-svymean(
~incDecile ,
design = fmly.haveIncome
)
d<-merge(a,b,by="incDecile")
d$new <- coef(c)
# Per Capita Data for Table 4
svyby(~annexpPC, by = ~incDecile, denominator = ~fincbtxmPC , fmly.haveIncome, svyratio)
a<-svyby( ~annexpPC , ~incDecile , fmly.haveIncome , svymean , na.rm = TRUE )
b<-svyby( ~fincbtxmPC , ~incDecile , fmly.haveIncome , svymean , na.rm = TRUE )
e <- svyby( ~fam_size , ~incDecile , fmly.haveIncome , svymean , na.rm = TRUE )
# percent of households headed by inclass
c<-svymean(
~incDecile ,
design = fmly.haveIncome
)
d<-merge(a,b,by="incDecile")
d$new <- coef(c)
# 65 to 74 year old Age Group Data
fmly.haveIncome <-
subset(
fmly.design ,
age_ref>=65 & age_ref <75,
)
# Household data for Table 3
svyby(~annexp, by = ~incDecile, denominator = ~fincbtxm , fmly.haveIncome, svyratio)
a<-svyby( ~annexp , ~incDecile , fmly.haveIncome , svymean , na.rm = TRUE )
b<-svyby( ~fincbtxm , ~incDecile , fmly.haveIncome , svymean , na.rm = TRUE )
# percent of households headed by inclass
c<-svymean(
~incDecile ,
design = fmly.haveIncome
)
d<-merge(a,b,by="incDecile")
d$new <- coef(c)
# Per Capita Data for Table 4
svyby(~annexpPC, by = ~incDecile, denominator = ~fincbtxmPC , fmly.haveIncome, svyratio)
a<-svyby( ~annexpPC , ~incDecile , fmly.haveIncome , svymean , na.rm = TRUE )
b<-svyby( ~fincbtxmPC , ~incDecile , fmly.haveIncome , svymean , na.rm = TRUE )
e <- svyby( ~fam_size , ~incDecile , fmly.haveIncome , svymean , na.rm = TRUE )
# percent of households headed by inclass
c<-svymean(
~incDecile ,
design = fmly.haveIncome
)
d<-merge(a,b,by="incDecile")
d$new <- coef(c)
################################ END Hugh Whelan Code #################################
|
3344985bca4707134d6345b90de1b33a1d3e8571
|
3f4d651c3d7431db4da76e7b89031911dc6eb913
|
/man/dfe_acad_year.Rd
|
95d4cc1ec02ac4613069a1457e8f4c46a29aa60f
|
[] |
no_license
|
TomFranklin/dferap
|
394ada7b7b28da2a761b8016d97c2a0e8fd30c6d
|
a43e34e4d7f1ee1808028c32f4f369eef716bfc1
|
refs/heads/master
| 2020-04-03T10:40:30.294751
| 2018-10-29T16:05:58
| 2018-10-29T16:05:58
| 155,199,335
| 0
| 0
| null | 2018-10-29T16:05:59
| 2018-10-29T11:19:09
|
R
|
UTF-8
|
R
| false
| true
| 762
|
rd
|
dfe_acad_year.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dfe_acad_year.R
\name{dfe_acad_year}
\alias{dfe_acad_year}
\title{Change the style of academic year}
\usage{
dfe_acad_year(year)
}
\arguments{
\item{year}{is the acadmic year we'll input, e.g. 201213 which will be converted into 2012/13}
}
\value{
Returns a character string
}
\description{
The \code{dfe_acad_year} function converts academic year numbers e.g. 201213 into strings with a forwward slash "2012/13"
}
\details{
The registers are sorted by publication date and name by
alphabetical order. The top five registers are output as a character string
including commas and an and, for inclusion in the report.
}
\examples{
library(dferap)
dfe_acad_year(201213)
"2012/13"
}
|
f7d6137cfa6b40206c4ca515a2b1bb438e8f728e
|
b39b3bceeb9a56925d61c0e4f928a3455e9a7336
|
/plot2.R
|
e8032b7741a0c75b65c5ec84c417beda065bf93f
|
[] |
no_license
|
sulaksh555/exploratory-data-analysis-project-1
|
3a081ca00f6bf2d2ed3fbc5e66cb7ce44c12f911
|
ffe41370930c8c3035c4eadfacd52d4f36461716
|
refs/heads/main
| 2023-05-12T08:25:25.216559
| 2021-05-30T19:11:39
| 2021-05-30T19:11:39
| 372,284,677
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 624
|
r
|
plot2.R
|
# Read in the dataset
fileName<-"household_power_consumption.txt"
df<-read.table(file=fileName, header=TRUE, sep=";")
# Subset the dataset to the dates of interest
dfSubset<-df[df$Date %in% c("1/2/2007", "2/2/2007"), ]
# Create vectors for the x-variable and the y-variable
activePower<-as.numeric(dfSubset$Global_active_power)
dateTime<-strptime(x=paste(dfSubset$Date, dfSubset$Time, sep=" "),
format="%d/%m/%Y %H:%M:%S")
# Write to a PNG file using a graphics device
png(filename="plot2.png")
plot(x=dateTime, y=activePower, type="l", xlab="",
ylab="Global Active Power (kilowatts)")
dev.off()
|
f0219461acbbd53c025d862b6bd3dc78fac8d4c1
|
928e156654ecfbac540efded61829ac360de49e0
|
/R/overfitRR.R
|
8127bb10710a6cb65353fdb59e3d6040cdaf9f75
|
[] |
no_license
|
cran/RRphylo
|
a8e77bc72a1abad5e1e193faa7d6cc1c241bbab6
|
12611ed837148f9958f47d168fe9936e0d8c5114
|
refs/heads/master
| 2023-06-09T07:21:52.536620
| 2023-06-04T11:10:02
| 2023-06-04T11:10:02
| 128,387,796
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 45,925
|
r
|
overfitRR.R
|
#' @title Testing RRphylo methods overfit
#' @description Testing the robustness of \code{\link{search.trend}}
#' (\cite{Castiglione et al. 2019a}), \code{\link{search.shift}}
#' (\cite{Castiglione et al. 2018}), \code{\link{search.conv}}
#' (\cite{Castiglione et al. 2019b}), and \code{\link{PGLS_fossil}} results to
#' sampling effects and phylogenetic uncertainty.
#' @usage
#' overfitRR(RR,y,phylo.list=NULL,s=0.25,swap.args=NULL,trend.args=NULL,shift.args=NULL,
#' conv.args=NULL, pgls.args=NULL,aces=NULL,x1=NULL,aces.x1=NULL,cov=NULL,
#' rootV=NULL,nsim=100,clus=0.5)
#' @param RR an object produced by \code{\link{RRphylo}}.
#' @param y a named vector of phenotypes.
#' @param phylo.list a list (or multiPhylo) of alternative phylogenies to be
#' tested.
#' @param s the percentage of tips to be cut off. It is set at 25\% by default.
#' If \code{phylo.list} is provided, this argument is ignored.
#' @param swap.args a list of arguments to be passed to the function
#' \code{\link{swapONE}}, including \code{list(si=NULL,si2=NULL,}
#' \code{node=NULL)}. If \code{swap.arg} is unspecified, the function
#' automatically sets both \code{si} and \code{si2} to 0.1. If
#' \code{phylo.list} is provided, swapping is not performed.
#' @param trend.args a list of arguments specific to the function
#' \code{search.trend}, including \code{list(node=NULL,x1.residuals=FALSE)}.
#' If a trend for the whole tree is to be tested, type \code{trend.args =
#' list()}. No trend is tested if left unspecified.
#' @param shift.args a list of arguments specific to the function
#' \code{search.shift}, including \code{list(node=NULL,} \code{state=NULL)}.
#' Arguments \code{node} and \code{state} can be specified at the same time.
#' @param conv.args a list of arguments specific to the function
#' \code{search.conv}, including \code{list(node=NULL,} \code{state=NULL,
#' declust=FALSE)}. Arguments \code{node} and \code{state} can be specified at
#' the same time.
#' @param pgls.args a list of arguments specific to the function
#' \code{PGLS_fossil}, including \code{list(modform,} \code{data,
#' tree=FALSE,RR=TRUE,...)}. If \code{tree=TRUE}, \code{PGLS_fossil} is
#' performed by using the RRphylo output tree as \code{tree} argument. If
#' \code{RR=TRUE}, \code{PGLS_fossil} is performed by using the RRphylo output
#' as \code{RR} argument. Arguments \code{tree} and \code{RR} can be
#' \code{TRUE} at the same time. \code{...} are further argument passed to
#' \code{PGLS_fossil}.
#' @param aces if used to produce the \code{RR} object, the vector of those
#' ancestral character values at nodes known in advance must be specified.
#' Names correspond to the nodes in the tree.
#' @param x1 the additional predictor to be specified if the RR object has been
#' created using an additional predictor (i.e. multiple version of
#' \code{RRphylo}). \code{'x1'} vector must be as long as the number of nodes
#' plus the number of tips of the tree, which can be obtained by running
#' \code{RRphylo} on the predictor as well, and taking the vector of ancestral
#' states and tip values to form the \code{x1}.
#' @param aces.x1 a named vector of ancestral character values at nodes for
#' \code{x1}. It must be indicated if the RR object has been created using
#' both \code{aces} and \code{x1}. Names correspond to the nodes in the tree.
#' @param cov if used to produce the \code{RR} object, the covariate must be
#' specified. As in \code{RRphylo}, the covariate vector must be as long as
#' the number of nodes plus the number of tips of the tree, which can be
#' obtained by running \code{RRphylo} on the covariate as well, and taking the
#' vector of ancestral states and tip values to form the covariate.
#' @param rootV if used to produce the \code{RR} object, the phenotypic value at
#' the tree root must be specified.
#' @param nsim number of simulations to be performed. It is set at 100 by
#' default.
#' @param clus the proportion of clusters to be used in parallel computing. To
#' run the single-threaded version of \code{overfitRR} set \code{clus} = 0.
#' @return The function returns a 'RRphyloList' object containing:
#' @return \strong{$mean.sampling} the mean proportion of species actually
#' removed from the tree over the iterations.
#' @return \strong{$tree.list} a 'multiPhylo' list including the trees generated
#' within \code{overfitRR}
#' @return \strong{$RR.list} a 'RRphyloList' including the results of each
#' \code{RRphylo} performed within \code{overfitRR}
#' @return \strong{$rootCI} the 95\% confidence interval around the root value.
#' @return \strong{$ace.regressions} a 'RRphyloList' including the results of
#' linear regression between ancestral state estimates before and after the
#' subsampling.
#' @return \strong{$conv.results} a list including results for
#' \code{search.conv} performed under \code{clade} and \code{state}
#' conditions. If a node pair is specified within \code{conv.args}, the
#' \code{$clade} object contains the percentage of simulations producing
#' significant p-values for convergence between the clades, and the proportion
#' of tested trees (i.e. where the clades identity was preserved; always 1 if
#' no \code{phylo.list} is supplied). If a state vector is supplied within
#' \code{conv.args}, the object \code{$state} contains the percentage of
#' simulations producing significant p-values for convergence within (single
#' state) or between states (multiple states).
#' @return \strong{$shift.results} a list including results for
#' \code{search.shift} performed under \code{clade} and \code{sparse}
#' conditions. If one or more nodes are specified within \code{shift.args},
#' the \code{$clade} object contains for each node the percentage of
#' simulations producing significant p-value separated by shift sign, and the
#' same figures by considering all the specified nodes as evolving under a
#' single rate (all.clades). For each node the proportion of tested trees
#' (i.e. where the clade identity was preserved; always 1 if no
#' \code{phylo.list} is supplied) is also indicated. If a state vector is
#' supplied within \code{shift.args}, the object \code{$sparse} contains the
#' percentage of simulations producing significant p-value separated by shift
#' sign ($p.states).
#' @return \strong{$trend.results} a list including the percentage of
#' simulations showing significant p-values for phenotypes versus age and
#' absolute rates versus age regressions for the entire tree separated by
#' slope sign ($tree). If one or more nodes are specified within
#' \code{trend.args}, the list also includes the same results at nodes ($node)
#' and the results for comparison between nodes ($comparison). For each node the proportion
#' of tested trees (i.e. where the clade identity was preserved; always 1 if
#' no \code{phylo.list} is supplied) is also indicated.
#' @return \strong{$pgls.results} two 'RRphyloList' objects including results of
#' \code{PGLS_fossil} performed by using the phylogeny as it is (\code{$tree})
#' or rescaled according to the \code{RRphylo} rates (\code{$RR}).
#' @author Silvia Castiglione, Carmela Serio, Pasquale Raia
#' @details Methods using a large number of parameters risk being overfit. This
#' usually translates in poor fitting with data and trees other than the those
#' originally used. With \code{RRphylo} methods this risk is usually very low.
#' However, the user can assess how robust the results got by applying
#' \code{search.shift}, \code{search.trend}, \code{search.conv} or
#' \code{PGLS_fossil} are by running \code{overfitRR}. With the latter, the
#' original tree and data are subsampled by specifying a \code{s} parameter,
#' that is the proportion of tips to be removed from the tree. In some cases,
#' though, removing as many tips as imposed by \code{s} would delete too many
#' tips right in clades and/or states under testing. In these cases, the
#' function maintains no less than 5 species at least in each clade/state
#' under testing (or all species if there is less), reducing the sampling
#' parameter \code{s} if necessary. Internally, \code{overfitRR} further
#' shuffles the tree by using the function \code{\link{swapONE}}. Thereby,
#' both the potential for overfit and phylogenetic uncertainty are accounted
#' for straight away.
#'
#' Otherwise, a list of alternative phylogenies can be supplied to
#' \code{overfitRR}. In this case subsampling and swapping arguments are
#' ignored, and robustness testing is performed on the alternative topologies
#' as they are. If a clade has to be tested either in \code{search.shift},
#' \code{search.trend}, or \code{search.conv}, the function scans each
#' alternative topology searching for the corresponding clade. If the species
#' within such clade on the alternative topology differ more than 10% from the
#' species within the clade in the original tree, the identity of the clade is
#' considered disrupted and the test is not performed.
#' @export
#' @seealso \href{../doc/overfitRR.html}{\code{overfitRR} vignette} ;
#' \href{../doc/search.trend.html}{\code{search.trend} vignette} ;
#' \href{../doc/search.shift.html}{\code{search.shift} vignette} ;
#' \href{../doc/search.conv.html}{\code{search.conv} vignette} ;
#' @importFrom utils setTxtProgressBar txtProgressBar
#' @references Castiglione, S., Tesone, G., Piccolo, M., Melchionna, M.,
#' Mondanaro, A., Serio, C., Di Febbraro, M., & Raia, P. (2018). A new method
#' for testing evolutionary rate variation and shifts in phenotypic evolution.
#' \emph{Methods in Ecology and Evolution}, 9:
#' 974-983.doi:10.1111/2041-210X.12954
#' @references Castiglione, S., Serio, C., Mondanaro, A., Di Febbraro, M.,
#' Profico, A., Girardi, G., & Raia, P. (2019a) Simultaneous detection of
#' macroevolutionary patterns in phenotypic means and rate of change with and
#' within phylogenetic trees including extinct species. \emph{PLoS ONE}, 14:
#' e0210101. https://doi.org/10.1371/journal.pone.0210101
#' @references Castiglione, S., Serio, C., Tamagnini, D., Melchionna, M.,
#' Mondanaro, A., Di Febbraro, M., Profico, A., Piras, P.,Barattolo, F., &
#' Raia, P. (2019b). A new, fast method to search for morphological
#' convergence with shape data. \emph{PLoS ONE}, 14, e0226949.
#' https://doi.org/10.1371/journal.pone.0226949
#' @examples
#' \dontrun{
#' data("DataOrnithodirans")
#' DataOrnithodirans$treedino->treedino
#' DataOrnithodirans$massdino->massdino
#' DataOrnithodirans$statedino->statedino
#' cc<- 2/parallel::detectCores()
#'
#' # Extract Pterosaurs tree and data
#' library(ape)
#' extract.clade(treedino,746)->treeptero
#' massdino[match(treeptero$tip.label,names(massdino))]->massptero
#' massptero[match(treeptero$tip.label,names(massptero))]->massptero
#'
#'
#' RRphylo(tree=treedino,y=massdino,clus=cc)->dinoRates
#' RRphylo(tree=treeptero,y=log(massptero),clus=cc)->RRptero
#'
#' # Case 1 search.shift under both "clade" and "sparse" condition
#' search.shift(RR=dinoRates, status.type= "clade")->SSnode
#' search.shift(RR=dinoRates, status.type= "sparse", state=statedino)->SSstate
#'
#' overfitRR(RR=dinoRates,y=massdino,swap.args =list(si=0.2,si2=0.2),
#' shift.args = list(node=rownames(SSnode$single.clades),state=statedino),
#' nsim=10,clus=cc)->orr.ss
#'
#' # Case 2 search.trend on the entire tree
#' search.trend(RR=RRptero, y=log(massptero),nsim=100,clus=cc,cov=NULL,node=NULL)->STtree
#'
#' overfitRR(RR=RRptero,y=log(massptero),swap.args =list(si=0.2,si2=0.2),
#' trend.args = list(),nsim=10,clus=cc)->orr.st1
#'
#' # Case 3 search.trend at specified nodescov=NULL,
#' search.trend(RR=RRptero, y=log(massptero),node=143,clus=cc)->STnode
#'
#' overfitRR(RR=RRptero,y=log(massptero),
#' trend.args = list(node=143),nsim=10,clus=cc)->orr.st2
#'
#' # Case 4 overfitRR on multiple RRphylo
#' data("DataCetaceans")
#' DataCetaceans$treecet->treecet
#' DataCetaceans$masscet->masscet
#' DataCetaceans$brainmasscet->brainmasscet
#' DataCetaceans$aceMyst->aceMyst
#'
#' ape::drop.tip(treecet,treecet$tip.label[-match(names(brainmasscet),
#' treecet$tip.label)])->treecet.multi
#' masscet[match(treecet.multi$tip.label,names(masscet))]->masscet.multi
#'
#' RRphylo(tree=treecet.multi,y=masscet.multi,clus=cc)->RRmass.multi
#' RRmass.multi$aces[,1]->acemass.multi
#' c(acemass.multi,masscet.multi)->x1.mass
#'
#' RRphylo(tree=treecet.multi,y=brainmasscet,x1=x1.mass,clus=cc)->RRmulti
#' search.trend(RR=RRmulti, y=brainmasscet,x1=x1.mass,clus=cc)->STcet
#' overfitRR(RR=RRmulti,y=brainmasscet,trend.args = list(),
#' x1=x1.mass,nsim=10,clus=cc)->orr.st3
#'
#' search.trend(RR=RRmulti, y=brainmasscet,x1=x1.mass,x1.residuals=TRUE,
#' clus=cc)->STcet.resi
#' overfitRR(RR=RRmulti,y=brainmasscet,trend.args = list(x1.residuals=TRUE),
#' x1=x1.mass,nsim=10,clus=cc)->orr.st4
#'
#' # Case 5 searching convergence between clades and within a single state
#' data("DataFelids")
#' DataFelids$PCscoresfel->PCscoresfel
#' DataFelids$treefel->treefel
#' DataFelids$statefel->statefel
#'
#' RRphylo(tree=treefel,y=PCscoresfel,clus=cc)->RRfel
#' search.conv(RR=RRfel, y=PCscoresfel, min.dim=5, min.dist="node9",clus=cc)->SC.clade
#' as.numeric(c(rownames(SC.clade[[1]])[1],as.numeric(as.character(SC.clade[[1]][1,1]))))->conv.nodes
#'
#' overfitRR(RR=RRfel, y=PCscoresfel,conv.args =
#' list(node=conv.nodes,state=statefel,declust=TRUE),nsim=10,clus=cc)->orr.sc
#'
#' # Case 6 overfitRR on PGLS_fossil
#' library(phytools)
#' rtree(100)->tree
#' fastBM(tree)->resp
#' fastBM(tree,nsim=3)->resp.multi
#' fastBM(tree)->pred1
#' fastBM(tree)->pred2
#'
#' PGLS_fossil(modform=y1~x1+x2,data=list(y1=resp,x2=pred1,x1=pred2),tree=tree)->pgls_noRR
#'
#' RRphylo(tree,resp,clus=cc)->RR
#' PGLS_fossil(modform=y1~x1+x2,data=list(y1=resp,x2=pred1,x1=pred2),tree=tree,RR=RR)->pgls_RR
#'
#' overfitRR(RR=RR,y=resp,
#' pgls.args=list(modform=y1~x1+x2,data=list(y1=resp,x2=pred1,x1=pred2),
#' tree=TRUE,RR=TRUE),nsim=10,clus=cc)->orr.pgls1
#'
#' PGLS_fossil(modform=y1~x1+x2,data=list(y1=resp.multi,x2=pred1,x1=pred2),tree=tree)->pgls2_noRR
#'
#' RRphylo(tree,resp.multi,clus=cc)->RR
#' PGLS_fossil(modform=y1~x1+x2,data=list(y1=resp.multi,x2=pred1,x1=pred2),tree=tree,RR=RR)->pgls2_RR
#'
#' overfitRR(RR=RR,y=resp.multi,
#' pgls.args=list(modform=y1~x1+x2,data=list(y1=resp.multi,x2=pred1,x1=pred2),
#' tree=TRUE,RR=TRUE),nsim=10,clus=cc)->orr.pgls2
#'
#'
#' }
overfitRR<-function(RR,y,
phylo.list=NULL,
s=0.25,
swap.args=NULL,
trend.args=NULL,
shift.args=NULL,
conv.args=NULL,
pgls.args=NULL,
aces=NULL,x1=NULL,aces.x1=NULL,cov=NULL,rootV=NULL,nsim=100,
clus=0.5)
{
# require(phytools)
# require(ddpcr)
# require(rlist)
if (!requireNamespace("ddpcr", quietly = TRUE)) {
stop("Package \"ddpcr\" needed for this function to work. Please install it.",
call. = FALSE)
}
'%ni%' <- Negate('%in%')
RR$tree->tree
y <- treedataMatch(tree, y)[[1]]
RR$aces->y.ace
tree$node.label<-rownames(y.ace)
if(!is.null(phylo.list)){
if(!is.null(swap.args)) warning("Swapping is not performed if a list of alternative phylogenies is provided",immediate.=TRUE)
s<-0
si<-0
si2<-0
swap.node<-NULL
nsim<-length(phylo.list)
}else{
if(!is.null(swap.args)){
if(any(is.null(names(swap.args)))) stop("All swap.args must be named")
if(is.null(swap.args$si)) si<-0.1 else si<-swap.args$si
if(is.null(swap.args$si2)) si2<-0.1 else si2<-swap.args$si2
if(is.null(swap.args$node)) swap.node<-NULL else swap.node<-swap.args$node
}else{
si<-0.1
si2<-0.1
swap.node<-NULL
}
}
if(!is.null(trend.args)){
if(length(trend.args)>0&any(is.null(names(trend.args)))) stop("All trend.args must be named")
trend<-TRUE
if(!is.null(trend.args$node)) trend.node<-trend.args$node else trend.node<-NULL
if(!is.null(trend.args$x1.residuals)) trend.x1.residuals<-trend.args$x1.residuals else trend.x1.residuals<-FALSE
} else {
trend<-FALSE
trend.node<-NULL
trend.x1.residuals<-FALSE
}
if(!is.null(shift.args)){
if(any(is.null(names(shift.args)))) stop("All shift.args must be named")
if(!is.null(shift.args$node)) shift.node<-shift.args$node else shift.node<-NULL
if(!is.null(shift.args$state)) {
shift.state<-shift.args$state
shift.state<-treedataMatch(tree,shift.state)[[1]][,1]
}else shift.state<-NULL
}else{
shift.node<-NULL
shift.state<-NULL
}
if(!is.null(conv.args)){
if(any(is.null(names(conv.args)))) stop("All conv.args must be named")
if(!is.null(conv.args$node)) conv.node<-conv.args$node else conv.node<-NULL
if(!is.null(conv.args$state)){
conv.state<-conv.args$state
conv.state<-treedataMatch(tree,conv.state)[[1]][,1]
}else conv.state<-NULL
if(!is.null(conv.args$declust)) conv.declust<-conv.args$declust else conv.declust<-FALSE
}else{
conv.node<-NULL
conv.state<-NULL
conv.declust<-NULL
}
if(!is.null(pgls.args)){
if(any(is.null(names(pgls.args)))) stop("All pgls.args must be named")
modform<-pgls.args$modform
pgls.data<-pgls.args$data
if(pgls.args$tree) pgls.tree<-pgls.args$tree else pgls.tree<-NULL
if(pgls.args$RR) pgls.RR<-pgls.args$RR else pgls.RR<-NULL
pgls.args<-pgls.args[which(!names(pgls.args)%in%c("modform","data","tree","RR"))]
}else{
modform<-NULL
pgls.data<-NULL
pgls.tree<-NULL
pgls.RR<-NULL
}
pb = txtProgressBar(min = 0, max = nsim, initial = 0)
rootlist<-list()
RR.list<-tree.list<-list()
acefit<-STcut<-SScut<-SScutS<-SCcut<-SCcutS<-PGLScut<-PGLScutRR<-list()
trend.node.match<-shift.node.match<-conv.node.match<-list()
real.s<-array()
for(k in 1:nsim){
setTxtProgressBar(pb,k)
if(s>0){
unlist(lapply(trend.node,function(x) {
length(tips(tree,x))->lenx
if(lenx<=5) tips(tree,x) else sample(tips(tree,x),5)
}))->out.st
unlist(lapply(shift.node,function(x) {
length(tips(tree,x))->lenx
if(lenx<=5) tips(tree,x) else sample(tips(tree,x),5)
}))->out.ss
unlist(lapply(conv.node,function(x) {
length(tips(tree,x))->lenx
if(lenx<=5) tips(tree,x) else sample(tips(tree,x),5)
}))->out.sc
if(!is.null(shift.state)){
table(shift.state)->tab.ss
unlist(lapply(1:length(tab.ss),function(x) {
if(tab.ss[x]<=5) names(which(shift.state==names(tab.ss)[x])) else
sample(names(which(shift.state==names(tab.ss)[x])),5)
}))->out.st.ss
} else out.st.ss<-NULL
if(!is.null(conv.state)){
table(conv.state)->tab.cs
unlist(lapply(1:length(tab.cs),function(x) {
if(tab.cs[x]<=5) names(which(conv.state==names(tab.cs)[x])) else
sample(names(which(conv.state==names(tab.cs)[x])),5)
}))->out.st.sc
}else out.st.sc<-NULL
unique(c(out.st,out.ss,out.sc,out.st.ss,out.st.sc))->outs
if(length(outs>0)) tree$tip.label[-match(outs,tree$tip.label)]->samtips else tree$tip.label->samtips
sx<-s
repeat({
if(length(samtips)>Ntip(tree)*sx) break else s*.9->sx
})
sample(samtips,round(Ntip(tree)*sx,0))->offs
}
if(!is.null(phylo.list)) phylo.list[[k]]->tree.swap else
suppressWarnings(swapONE(tree,si=si,si2=si2,node=swap.node,plot.swap=FALSE)[[1]])->tree.swap
y[match(tree.swap$tip.label,rownames(y)),,drop=FALSE]->y
if(s>0){
tree.swap$edge[tree.swap$edge[,1]==(Ntip(tree.swap)+1),2]->rootdesc
if(length(which(rootdesc<(Ntip(tree.swap)+1)))>0) tree.swap$tip.label[rootdesc[which(rootdesc<Ntip(tree.swap)+1)]]->saver else saver="xx"
if(saver%in%offs) offs[-which(offs==saver)]->offs
y[-which(rownames(y)%in%offs),,drop=FALSE]->ycut
drop.tip(tree.swap,which(rownames(y)%ni%rownames(ycut)))->treecut
y.ace[which(rownames(y.ace)%in%treecut$node.label),,drop=FALSE]->y.acecut
}else{
y->ycut
tree.swap->treecut
y.ace->y.acecut
}
treecut->tree.list[[k]]
1-(Ntip(treecut)/Ntip(tree))->real.s[k]
if(!is.null(cov)){
treedataMatch(treecut,cov)$y->covcut
c(RRphylo(treecut,covcut,clus=clus)$aces[,1],covcut)->covcut
# cov[match(c(rownames(y.acecut),rownames(ycut)),names(cov))]->covcut
# names(covcut)[1:Nnode(treecut)]<-seq((Ntip(treecut)+1),(Ntip(treecut)+Nnode(treecut)))
}else covcut<-NULL
if(!is.null(x1)) {
as.matrix(x1)->x1
treedataMatch(treecut,x1)$y->x1cut
rbind(RRphylo(treecut,x1cut,clus=clus)$aces,x1cut)->x1cut
# x1[match(c(rownames(y.acecut),rownames(ycut)),rownames(x1)),,drop=FALSE]->x1cut
# rownames(x1cut)[1:Nnode(treecut)]<-seq((Ntip(treecut)+1),(Ntip(treecut)+Nnode(treecut)))
}else x1cut<-NULL
if(!is.null(aces)){
if(is.vector(aces)) as.matrix(aces)->aces
aces->acescut
drop<-c()
for(i in 1:nrow(aces)) {
if(length(which(tips(tree,rownames(aces)[i])%in%treecut$tip.label))>1){
getMRCA(treecut,tips(tree,rownames(aces)[i])[which(tips(tree,rownames(aces)[i])%in%treecut$tip.label)])->newN
if(!is.null(phylo.list)){
length(tips(treecut,newN))/length(tips(tree,rownames(aces)[i]))->sh.tips
if(sh.tips<=1.1&sh.tips>=0.9) newN->rownames(acescut)[i] else c(drop,i)->drop
}else newN->rownames(acescut)[i]
# getMRCA(treecut,tips(tree,rownames(aces)[i])[which(tips(tree,rownames(aces)[i])%in%treecut$tip.label)])->rownames(acescut)[i]
}else c(drop,i)->drop
}
if(length(drop>0)) acescut[-drop,]->acescut
if(is.null(nrow(acescut))) acescut<-NULL
}else acescut<-NULL
if(!is.null(aces.x1)){
if(is.vector(aces.x1)) as.matrix(aces.x1)->aces.x1
aces.x1->aces.x1cut
drop<-c()
for(i in 1:nrow(aces.x1)) {
if(length(which(tips(tree,rownames(aces.x1)[i])%in%treecut$tip.label))>1){
getMRCA(treecut,tips(tree,rownames(aces.x1)[i])[which(tips(tree,rownames(aces.x1)[i])%in%treecut$tip.label)])->newN1
if(!is.null(phylo.list)){
length(tips(treecut,newN1))/length(tips(tree,rownames(aces.x1)[i]))->sh.tips
if(sh.tips<=1.1&sh.tips>=0.9) newN1->rownames(aces.x1cut)[i] else c(drop,i)->drop
}else newN1->rownames(aces.x1cut)[i]
# getMRCA(treecut,tips(tree,rownames(aces.x1)[i])[which(tips(tree,rownames(aces.x1)[i])%in%treecut$tip.label)])->rownames(aces.x1cut)[i]
}else c(drop,i)->drop
}
if(length(drop>0)) aces.x1cut[-drop,,drop=FALSE]->aces.x1cut
if(is.null(nrow(aces.x1cut))) aces.x1cut<-NULL
}else aces.x1cut<-NULL
if(!is.null(trend.node)){
trend.node.cut<-array()
for(i in 1:length(trend.node)) {
getMRCA(treecut,tips(tree,trend.node[i])[which(tips(tree,trend.node[i])%in%treecut$tip.label)])->trN
if(!is.null(phylo.list)){
length(tips(treecut,trN))/length(tips(tree,trend.node[i]))->sh.tips
if(sh.tips<=1.1&sh.tips>=0.9) trN->trend.node.cut[i] else NA->trend.node.cut[i]
} else trN->trend.node.cut[i]
# getMRCA(treecut,tips(tree,trend.node[i])[which(tips(tree,trend.node[i])%in%treecut$tip.label)])->trend.node.cut[i]
}
data.frame(trend.node,trend.node.cut)->trend.node.match[[k]]
trend.node.cut[which(!is.na(trend.node.cut))]->trend.node.cut
if(length(trend.node.cut)==0) trend.node.cut<-NULL
}else trend.node.cut<-NULL
if(!is.null(shift.node)){
shift.node.cut<-array()
for(i in 1:length(shift.node)){
getMRCA(treecut,tips(tree,shift.node[i])[which(tips(tree,shift.node[i])%in%treecut$tip.label)])->shN
if(!is.null(phylo.list)){
length(tips(treecut,shN))/length(tips(tree,shift.node[i]))->sh.tips
if(sh.tips<=1.1&sh.tips>=0.9) shN->shift.node.cut[i] else NA->shift.node.cut[i]
} else shN->shift.node.cut[i]
}
# getMRCA(treecut,tips(tree,shift.node[i])[which(tips(tree,shift.node[i])%in%treecut$tip.label)])->shift.node.cut[i]
data.frame(shift.node,shift.node.cut)->shift.node.match[[k]]
shift.node.cut[which(!is.na(shift.node.cut))]->shift.node.cut
if(length(shift.node.cut)==0) shift.node.cut<-NULL
}
if(!is.null(shift.state)) {
shift.state[match(c(tree.swap$node.label,tree.swap$tip.label), names(shift.state))]->shift.state
shift.state[match(rownames(ycut),names(shift.state))]->shift.state.cut
}
if(!is.null(conv.node)){
conv.node.cut<-array()
for(i in 1:length(conv.node)){
getMRCA(treecut,tips(tree,conv.node[i])[which(tips(tree,conv.node[i])%in%treecut$tip.label)])->scN
if(!is.null(phylo.list)){
length(tips(treecut,scN))/length(tips(tree,conv.node[i]))->sh.tips
if(sh.tips<=1.1&sh.tips>=0.9) scN->conv.node.cut[i] else NA->conv.node.cut[i]
} else scN->conv.node.cut[i]
if(any(is.na(conv.node.cut))) conv.node.cut<-NULL
}
# getMRCA(treecut,tips(tree,conv.node[i])[which(tips(tree,conv.node[i])%in%treecut$tip.label)])->conv.node.cut[i]
if(!is.null(conv.node.cut)) data.frame(conv.node,conv.node.cut)->conv.node.match[[k]] else NULL->conv.node.match[[k]]
}
if(!is.null(conv.state)) {
conv.state[match(c(tree.swap$node.label,tree.swap$tip.label), names(conv.state))]->conv.state
conv.state[match(rownames(ycut),names(conv.state))]->conv.state.cut
}
if(!is.null(pgls.tree)|!is.null(pgls.RR)) {
ddpcr::quiet(lapply(pgls.data,function(x){
if(is.null(nrow(x))) treedataMatch(treecut, x)[[1]][,1] else treedataMatch(treecut, x)[[1]]
})->pgls.datacut)
}
if(!is.null(rootV)) rootV->rootVcut else rootVcut<-NULL
RRphylo(treecut,ycut,aces=acescut,x1=x1cut,aces.x1=aces.x1cut,cov=covcut,rootV = rootVcut,clus=clus)->RRcut->RR.list[[k]]
if(trend|!is.null(trend.node)) ddpcr::quiet(search.trend(RRcut,ycut,x1=x1cut,x1.residuals = trend.x1.residuals,node=trend.node.cut,cov=covcut,clus=clus)->stcut->STcut[[k]],all=TRUE)
if(!is.null(shift.node)&&!is.null(shift.node.cut)) ddpcr::quiet(search.shift(RRcut,status.type="clade",node=shift.node.cut)->sscut->SScut[[k]],all=TRUE)
if(!is.null(shift.state)) ddpcr::quiet(search.shift(RRcut,status.type="sparse",state=shift.state.cut)->sscut->SScutS[[k]],all=TRUE)
if(!is.null(conv.node)&&any(!is.na(conv.node.cut))) ddpcr::quiet(search.conv(RR=RRcut,y=ycut,nodes=na.omit(conv.node.cut),aceV=acescut,clus=clus)->sccut->SCcut[[k]],all=TRUE)
if(!is.null(conv.state)) ddpcr::quiet(search.conv(tree=treecut,y=ycut,state=conv.state.cut,aceV=acescut,declust=conv.declust,clus=clus)->sccut->SCcutS[[k]],all=TRUE)
# if(!is.null(pgls.tree)) ddpcr::quiet(PGLS_fossil(modform,data=pgls.datacut,tree=treecut)->PGLScut[[k]],all=TRUE)
# if(!is.null(pgls.RR)) ddpcr::quiet(PGLS_fossil(modform,data=pgls.datacut,tree=RRcut$tree,RR=RRcut)->PGLScutRR[[k]],all=TRUE)
if(!is.null(pgls.tree)) ddpcr::quiet(do.call(PGLS_fossil,c(list(modform=modform,data=pgls.datacut,tree=treecut),pgls.args))->PGLScut[[k]],all=TRUE)
if(!is.null(pgls.RR)) ddpcr::quiet(do.call(PGLS_fossil,c(list(modform=modform,data=pgls.datacut,RR=RRcut),pgls.args))->PGLScutRR[[k]],all=TRUE)
RRcut$aces[1,]->rootlist[[k]]
summary(lm(y.acecut~RRcut$aces))->acefit[[k]]
do.call(rbind,lapply(seq(1:ncol(y.acecut)),function(x) summary(lm(y.acecut[,x]~RRcut$aces[,x]))$coef[c(1,2,7,8)]))->acefit[[k]]
if(!is.null(colnames(y))) rownames(acefit[[k]])<-colnames(y) else rownames(acefit[[k]])<-sapply(1:ncol(y),function(x) paste("y",x,sep=""))
colnames(acefit[[k]])<-c("intercept","slope","p.intercept","p.slope")
}
if(length(unlist(rootlist))>length(rootlist)){
do.call(rbind,rootlist)->rootlist
apply(rootlist,2,function(x) quantile(x,c(0.025,0.975)))->CIroot
data.frame(root=t(y.ace)[,1],"CI 2.5"=t(CIroot)[,1],"CI 97.5"=t(CIroot)[,2])->root.conf.int
if(!is.null(colnames(y))) rownames(root.conf.int)<-colnames(y) else rownames(root.conf.int)<-sapply(1:ncol(y),function(x) paste("y",x,sep=""))
}else{
unlist(rootlist)->rootlist
quantile(rootlist,c(0.025,0.975))->CIroot
data.frame(root=y.ace[1,,drop=FALSE],"CI 2.5"=CIroot[1],"CI 97.5"=CIroot[2])->root.conf.int
}
if(!is.null(shift.node)){
mapply(a=shift.node.match,b=SScut,function(a,b){
a[match(rownames(b$single.clade),a[,2]),1]->rownames(b$single.clade)
b$single.clade
},SIMPLIFY = FALSE)->singles
t(sapply(shift.node,function(k){
t(sapply(singles,function(j) {
if(any(as.numeric(rownames(j))==k)) j[which(as.numeric(rownames(j))==k),] else c(NA,NA)
}))->pran
pran[which(!is.na(pran[,1])),]->pran
cbind(length(which(pran[,2]>=0.975))/nrow(pran),length(which(pran[,2]<=0.025))/nrow(pran),nrow(pran)/nsim)
}))->shift.res.clade
rownames(shift.res.clade)<-shift.node
colnames(shift.res.clade)<-c("p.shift+","p.shift-","tested.trees")
lapply(SScut,function(j) j$all.clades)->allcla
if(!all(is.null(allcla))){
do.call(rbind, allcla)->allcla
rbind(cbind(length(which(allcla$p.value>=0.975))/nrow(allcla),
length(which(allcla$p.value<=0.025))/nrow(allcla),nrow(allcla)/nsim),
shift.res.clade)->shift.res.clade
rownames(shift.res.clade)[1]<-"all.clades"
}
}else shift.res.clade<-NULL
if(!is.null(shift.state)){
p.shift<-matrix(ncol=2,nrow=nrow(SScutS[[1]][[1]]))
for(i in 1:nrow(SScutS[[1]][[1]])){
unlist(lapply(lapply(SScutS,"[[",1),function(x) x[i,2]))->pr
c(length(which(pr>=0.975))/nsim,length(which(pr<=0.025))/nsim)->p.shift[i,]
}
rownames(p.shift)<-rownames(SScutS[[1]][[1]])
colnames(p.shift)<-c("p.shift+","p.shift-")
p.shift->shift.res.state
}else shift.res.state<-NULL
list(shift.res.clade,shift.res.state)->shift.res
names(shift.res)<-c("clade","sparse")
if(trend|!is.null(trend.node)){
#### Whole tree ####
if(ncol(y)==1) iter<-1 else iter<-ncol(y)+1
phen.trend<-rate.trend<-list()
for(j in 1:iter){
as.data.frame(do.call(rbind,lapply(lapply(STcut,"[[",2),function(x) x[j,]))[,c(1,3)])->pr#->phen.ran[[j]]
as.data.frame(do.call(rbind,lapply(lapply(STcut,"[[",3),function(x) x[j,]))[,c(1,3)])->rr#->rat.ran[[j]]
c(sum(pr$slope>0&pr$p.random>=0.975)/nsim,
sum(pr$slope>0&pr$p.random<=0.025)/nsim,
sum(pr$slope<0&pr$p.random>=0.975)/nsim,
sum(pr$slope<0&pr$p.random<=0.025)/nsim)->phen.trend[[j]]
c(sum(rr$slope>0&rr$p.random>=0.975)/nsim,
sum(rr$slope>0&rr$p.random<=0.025)/nsim,
sum(rr$slope<0&rr$p.random>=0.975)/nsim,
sum(rr$slope<0&rr$p.random<=0.025)/nsim)->rate.trend[[j]]
names(phen.trend[[j]])<-names(rate.trend[[j]])<-c("slope+p.up","slope+p.down","slope-p.up","slope-p.down")
}
do.call(rbind,phen.trend)->phen.trend
do.call(rbind,rate.trend)->rate.trend
if(!is.null(colnames(y))){
if(ncol(y)==1) colnam<-colnames(y) else colnam<-c(colnames(y),"multiple")
}else{
if(ncol(y)==1) colnam<-"y" else
colnam<-c(sapply(1:ncol(y),function(x) paste("y",x,sep="")),"multiple")
}
rownames(phen.trend)<-rownames(rate.trend)<-colnam
list(phen.trend,rate.trend)->p.trend
names(p.trend)<-c("phenotype","rates")
p.trend->whole.tree.res
if(!is.null(trend.node)){
mapply(a=trend.node.match,b=lapply(STcut,"[[",4),function(a,b){
a[match(names(b),a[,2]),1]->names(b)
b
},SIMPLIFY = FALSE)->phen.node
mapply(a=trend.node.match,b=lapply(STcut,"[[",5),function(a,b){
a[match(names(b),a[,2]),1]->names(b)
b
},SIMPLIFY = FALSE)->rat.node
p.phen.node<-list()
p.rate.node<-list()
for(k in 1:length(trend.node)){
lapply(phen.node,function(j) {
if(any(as.numeric(names(j))==trend.node[k])) j[[which(as.numeric(names(j))==trend.node[k])]] else NA
})->pran
pran[which(!sapply(pran,function(w) all(is.na(w))))]->phen.pran
lapply(rat.node,function(j) {
if(any(as.numeric(names(j))==trend.node[k])) j[[which(as.numeric(names(j))==trend.node[k])]] else NA
})->pran
pran[which(!sapply(pran,function(w) all(is.na(w))))]->rat.pran
p.phen.node.y<-matrix(ncol=7,nrow=iter)
p.rate.node.y<-matrix(ncol=5,nrow=iter)
for(w in 1:iter){
as.data.frame(do.call(rbind,lapply(phen.pran,function(x) x[w,])))->pnod
as.data.frame(do.call(rbind,lapply(rat.pran,function(x) x[w,])))->rnod
c(sum(pnod$slope>0&pnod$p.slope>=0.975)/nrow(pnod),
sum(pnod$slope>0&pnod$p.slope<=0.025)/nrow(pnod),
sum(pnod$slope<0&pnod$p.slope>=0.975)/nrow(pnod),
sum(pnod$slope<0&pnod$p.slope<=0.025)/nrow(pnod),
sum(pnod$emm.difference>0&pnod$p.emm<=0.05)/nrow(pnod),
sum(pnod$emm.difference<0&pnod$p.emm<=0.05)/nrow(pnod),
nrow(pnod)/nsim)->p.phen.node.y[w,]
c(sum(rnod$emm.difference>0&rnod$p.emm<=0.05)/nrow(rnod),
sum(rnod$emm.difference<0&rnod$p.emm<=0.05)/nrow(rnod),
sum((rnod$slope.node-rnod$slope.others)>0&rnod$p.slope<=0.05)/nrow(rnod),
sum((rnod$slope.node-rnod$slope.others)<0&rnod$p.slope<=0.05)/nrow(rnod),
nrow(rnod)/nsim)->p.rate.node.y[w,]
}
colnames(p.phen.node.y)<-c("slope+p.up","slope+p.down","slope-p.up","slope-p.down","p.emm+","p.emm-","tested.trees")
colnames(p.rate.node.y)<-c("p.emm+","p.emm-","p.slope+","p.slope-","tested.trees")
if(!is.null(colnames(y))){
if(ncol(y)==1) colnam<-colnames(y) else colnam<-c(colnames(y),"multiple")
}else{
if(ncol(y)==1) colnam<-"y" else
colnam<-c(sapply(1:ncol(y),function(x) paste("y",x,sep="")),"multiple")
}
rownames(p.phen.node.y)<-rownames(p.rate.node.y)<-colnam
p.phen.node.y->p.phen.node[[k]]
p.rate.node.y->p.rate.node[[k]]
}
names(p.phen.node)<-names(p.rate.node)<-trend.node
list(p.phen.node,p.rate.node)->p.trend.node
names(p.trend.node)<-c("phenotype","rates")
node.res<-p.trend.node
if(length(trend.node)>1){ #### Node comparison ####
apply(combn(trend.node,2),2,function(j) c(paste(j[1],j[2],sep="-"),paste(j[2],j[1],sep="-")))->tn.pairs
lapply(STcut,function(j) j$group.comparison)->comptot
mapply(x=lapply(comptot,"[[",1)[!sapply(comptot,is.null)],
xx=trend.node.match[!sapply(comptot,is.null)],function(x,xx){
if(ncol(y)>1){
t(apply(x[[1]],1,function(fx) xx[match(gsub("g","",fx[1:2]),xx[,2]),1]))->x[[1]][,1:2]
lapply(2:length(x), function(xw) x[[xw]][,1:2]<<-x[[1]][,1:2])
apply(x[[1]][,1:2],1,function(fx) paste(fx,collapse="-"))->realn
unlist(apply(tn.pairs,2,function(jj) which(realn%in%jj)))->roword
lapply(x,function(fx) fx[roword,])->x
realn[roword]->realn
apply(tn.pairs,2,function(jj) sum(match(realn,jj,nomatch = 0)))->revcols
revcols[which(revcols>0)]->revcols
lapply(x,function(kk){
if(any(revcols==2)){
data.frame(kk[which(revcols==2),c(2,1,4,3)],1-kk[which(revcols==2),5],
-1*kk[which(revcols==2),6],kk[which(revcols==2),7])->kk[which(revcols==2),]
}
data.frame(kk,pair=apply(kk[,1:2],1,function(jk) paste(jk,collapse = "-")))
})
}else{
t(apply(x,1,function(fx) xx[match(gsub("g","",fx[1:2]),xx[,2]),1]))->x[,1:2]
apply(x[,1:2],1,function(fx) paste(fx,collapse="-"))->realn
unlist(apply(tn.pairs,2,function(jj) which(realn%in%jj)))->roword
x[roword,]->x
realn[roword]->realn
apply(tn.pairs,2,function(jj) sum(match(realn,jj,nomatch = 0)))->revcols
revcols[which(revcols>0)]->revcols
if(any(revcols==2)){
data.frame(x[which(revcols==2),c(2,1)],-1*x[which(revcols==2),3],
x[which(revcols==2),c(4,6,5,7)])->x[which(revcols==2),]
}
data.frame(x,pair=apply(x[,1:2],1,function(jk) paste(jk,collapse = "-")))
}
},SIMPLIFY = FALSE)->pcomptot
mapply(x=lapply(comptot,"[[",2)[!sapply(comptot,is.null)],
xx=trend.node.match[!sapply(comptot,is.null)],function(x,xx){
if(ncol(y)>1){
t(apply(x[[1]],1,function(fx) xx[match(gsub("g","",fx[1:2]),xx[,2]),1]))->x[[1]][,1:2]
lapply(2:length(x), function(xw) x[[xw]][,1:2]<<-x[[1]][,1:2])
apply(x[[1]][,1:2],1,function(fx) paste(fx,collapse="-"))->realn
unlist(apply(tn.pairs,2,function(jj) which(realn%in%jj)))->roword
lapply(x,function(fx) fx[roword,])->x
realn[roword]->realn
apply(tn.pairs,2,function(jj) sum(match(realn,jj,nomatch = 0)))->revcols
revcols[which(revcols>0)]->revcols
lapply(x,function(kk){
if(any(revcols==2)){
data.frame(kk[which(revcols==2),c(2,1)],-1*kk[which(revcols==2),3],kk[which(revcols==2),c(4,6,5,7)])->kk[which(revcols==2),]
}
data.frame(kk,pair=apply(kk[,1:2],1,function(jk) paste(jk,collapse = "-")))
})
}else{
t(apply(x,1,function(fx) xx[match(gsub("g","",fx[1:2]),xx[,2]),1]))->x[,1:2]
apply(x[,1:2],1,function(fx) paste(fx,collapse="-"))->realn
unlist(apply(tn.pairs,2,function(jj) which(realn%in%jj)))->roword
x[roword,]->x
realn[roword]->realn
apply(tn.pairs,2,function(jj) sum(match(realn,jj,nomatch = 0)))->revcols
revcols[which(revcols>0)]->revcols
if(any(revcols==2)){
data.frame(x[which(revcols==2),c(2,1)],-1*x[which(revcols==2),3],
x[which(revcols==2),c(4,6,5,7)])->x[which(revcols==2),]
}
data.frame(x,pair=apply(x[,1:2],1,function(jk) paste(jk,collapse = "-")))
}
},SIMPLIFY = FALSE)->rcomptot
comp.phen.y<-comp.rat.y<-list()
for(w in 1:iter){
nod.nam<-list()
p.comp.phen<-p.comp.rat<-matrix(ncol=5,nrow=ncol(tn.pairs))
for(k in 1:ncol(tn.pairs)){
if(ncol(y)>1)
do.call(rbind,lapply(lapply(pcomptot,"[[",w),function(x) x[which(x$pair==tn.pairs[1,k]),]))->pcomp else
do.call(rbind,lapply(pcomptot,function(x) x[which(x$pair==tn.pairs[1,k]),]))->pcomp
if(w==1) pcomp[nrow(pcomp),1:2]->nod.nam[[k]]
as.data.frame(pcomp[,3:7])->pcomp#->phen.comp[[k]]
if(ncol(y)>1)
do.call(rbind,lapply(lapply(rcomptot,"[[",w),function(x) x[which(x$pair==tn.pairs[1,k]),]))[,3:7,drop=FALSE]->rcomp else
do.call(rbind,lapply(rcomptot,function(x) x[which(x$pair==tn.pairs[1,k]),]))[,3:7,drop=FALSE]->rcomp
c(sum((pcomp$slope.group_1-pcomp$slope.group_2)>0&pcomp$p.slope>=0.95)/nrow(pcomp),
sum((pcomp$slope.group_1-pcomp$slope.group_2)<0&pcomp$p.slope<=0.05)/nrow(pcomp),
sum(pcomp$emm.difference>0&pcomp$p.emm<=0.05)/nrow(pcomp),
sum(pcomp$emm.difference<0&pcomp$p.emm<=0.05)/nrow(pcomp),
nrow(pcomp)/nsim)->p.comp.phen[k,]
c(sum(rcomp$emm.difference>0&rcomp$p.emm<=0.05)/nrow(rcomp),
sum(rcomp$emm.difference<0&rcomp$p.emm<=0.05)/nrow(rcomp),
sum((rcomp$slope.group_1-rcomp$slope.group_2)>0&rcomp$p.slope<=0.05)/nrow(rcomp),
sum((rcomp$slope.group_1-rcomp$slope.group_2)<0&rcomp$p.slope<=0.05)/nrow(rcomp),
nrow(rcomp)/nsim)->p.comp.rat[k,]
}
colnames(p.comp.phen)<-c("p.slope+","p.slope-","p.emm+","p.emm-","tested.trees")
colnames(p.comp.rat)<-c("p.emm+","p.emm-","p.slope+","p.slope-","tested.trees")
if(w==1) do.call(rbind, nod.nam)->nam.pair
rownames(p.comp.phen)<-rownames(p.comp.rat)<-apply(nam.pair,1, function(x) paste(x[1], x[2], sep="-"))
p.comp.phen->comp.phen.y[[w]]
p.comp.rat->comp.rat.y[[w]]
}
p.comp.phenN<-p.comp.ratN<-list()
for(q in 1:ncol(tn.pairs)){
do.call(rbind,lapply(comp.phen.y,function(x) x[q,]))->p.comp.phenN[[q]]
do.call(rbind,lapply(comp.rat.y,function(x) x[q,]))->p.comp.ratN[[q]]
if(!is.null(colnames(y))){
if(ncol(y)==1) colnam<-colnames(y) else colnam<-c(colnames(y),"multiple")
rownames(p.comp.phenN[[q]])<-rownames(p.comp.ratN[[q]])<-colnam
}else{
if(ncol(y)==1) colnam<-"y" else
colnam<-c(sapply(1:ncol(y),function(x) paste("y",x,sep="")),"multiple")
}
rownames(p.comp.phenN[[q]])<-rownames(p.comp.ratN[[q]])<-colnam
}
names(p.comp.phenN)<-names(p.comp.ratN)<-rownames(comp.phen.y[[1]])
list(p.comp.phenN,p.comp.ratN)->p.comp
names(p.comp)<-c("phenotype","rates")
}else{
p.comp<-NULL
}
if(length(trend.node)>1) node.res<-list(node=node.res,comparison=p.comp) else node.res<-list(node=node.res)
trend.res<-do.call(c,list(tree=list(whole.tree.res),node.res))
}else trend.res<-whole.tree.res
}else trend.res<-NULL
if(!is.null(conv.node)){
lapply(SCcut,"[[",1)->scres
scres[which(!sapply(SCcut,is.null))]->scres
matrix(c(length(which(lapply(scres,function(x) x[1,8])<=0.05))/length(scres),
length(which(lapply(scres,function(x) x[1,9])<=0.05))/length(scres),
length(scres)/nsim),ncol=3)->p.convC
colnames(p.convC)<-c("p.ang.bydist","p.ang.conv","tested.trees")
rownames(p.convC)<-paste(conv.node,collapse="-")
}else p.convC<-NULL
if(!is.null(conv.state)){
lapply(SCcutS,"[[",1)->scresS
p.convS<-matrix(ncol=2,nrow=nrow(scresS[[1]]))
if("nostate"%in%conv.state&length(unique(conv.state)[-which(unique(conv.state)=="nostate")])){
c(length(which(sapply(scresS,function(x) x[1,3])<=0.05))/nsim,
length(which(sapply(scresS,function(x) x[1,4])<=0.05))/nsim)->p.convS[1,]
rownames(p.convS)<-rownames(scresS[[1]])
colnames(p.convS)<-colnames(scresS[[1]])[3:4]
}else{
for(i in 1:nrow(scresS[[1]])){
c(length(which(sapply(scresS,function(x) x[i,5])<=0.05))/nsim,
length(which(sapply(scresS,function(x) x[i,6])<=0.05))/nsim)->p.convS[i,]
}
rownames(p.convS)<-apply(scresS[[1]][,1:2],1,function(x) paste(x[1],x[2],sep="-"))
colnames(p.convS)<-colnames(scresS[[1]])[5:6]
}
}else p.convS<-NULL
list(p.convC,p.convS)->conv.res
names(conv.res)<-c("clade","state")
if(is.null(pgls.tree)) PGLScut<-NULL else class(PGLScut)<-"RRphyloList"
if(is.null(pgls.RR)) PGLScutRR<-NULL else class(PGLScutRR)<-"RRphyloList"
list(PGLScut,PGLScutRR)->pgls.res
names(pgls.res)<-c("tree","RR")
# res<-list(mean(real.s),root.conf.int,acefit,conv.res,shift.res,trend.res,pgls.res)
# names(res)<-c("mean.sampling","rootCI","ace.regressions","conv.results","shift.results","trend.results","pgls.results")
class(RR.list)<-"RRphyloList"
class(acefit)<-"RRphyloList"
class(tree.list)<-"multiPhylo"
res<-structure(list(mean.sampling = mean(real.s),
tree.list=tree.list,
RR.list=RR.list,
rootCI=root.conf.int,
ace.regressions=acefit,
conv.results=conv.res,
shift.results=shift.res,
trend.results=trend.res,
pgls.results=pgls.res),
class = "RRphyloList")
res
}
#' @export
print.RRphyloList<-function(x,...){
if("mean.sampling"%in%attributes(x)[[1]]) cat(paste(length(x[[2]]),"overfitRR simulations",sep=" ")) else
if("lambda"%in%attributes(x[[1]])[[1]]) cat("List of",paste(length(x),"RRphylo outputs",sep=" ")) else
if(class(x[[1]])[1]%in%c("gls","procD.lm")) cat("List of",paste(length(x),"PGLS_fossil outputs",sep=" ")) else
cat("List of",paste(length(x),"outputs",sep=" "))
}
|
4e2a4708facfe43a2e72c76b46d9133119a6d749
|
571ab7a18fc1a77db70166d558e12c0a2e1f75f4
|
/Walmart_Trees.R
|
da0f980d8d58a789d654fb5a7464c6ab3800b738
|
[] |
no_license
|
5l1v3r1/R-walmartKaggle
|
dc1713c7aeeb77f0626e07211bf0ba3667f9c955
|
784eb4841b82708fb99eb4f571a75104eeae8099
|
refs/heads/master
| 2021-04-03T21:18:18.775213
| 2014-04-06T16:01:59
| 2014-04-06T16:01:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,837
|
r
|
Walmart_Trees.R
|
library(lubridate)
library(plyr)
library(tree)
library(sqldf)
#Import data
import = read.csv("/Users/kylehundman/Desktop/Walmart/train.csv", header=T)
#Create month variable
import$Date = as.Date(import$Date, "%m/%d/%y") #convert to date
import$month = month(import$Date) #grab month (using lubridate)
import$week = week(import$Date) #grab week (using lubridate)
import$year = year(import$Date) #grab week (using lubridate)
#Group Depts by average sales (35 bins)
deptAvgs = sqldf('SELECT avg(Weekly_Sales) as avg, Dept FROM import GROUP BY Dept') #Look at average sales
deptAvgs = sqldf('SELECT * FROM deptAvgs ORDER BY avg')
hist(deptAvgs$avg, breaks = 35)
deptAvgs$deptBin = cut(deptAvgs$avg, breaks = 35, labels = FALSE) #group depts into 35 "avg sales" bins
import = join(import, deptAvgs, by = "Dept") #vlookup equivalent (for dept bins) using "plyr" package
?join
#Group weeks by average sales (35 bins)
weekAvgs = sqldf('SELECT avg(Weekly_Sales) as avg, week FROM import GROUP BY week') #Look at average sales
weekAvgs = sqldf('SELECT * FROM weekAvgs ORDER BY avg')
hist(weekAvgs$avg, breaks = 35)
weekAvgs$weekBin = cut(weekAvgs$avg, breaks = 35, labels = FALSE) #group weeks into 35 "avg sales" bins
import = join(import, weekAvgs, by = "week") #vlookup equivalent (for week bins) using "plyr" package
# Drop unusable cols
train = with(import, data.frame(sales = Weekly_Sales , month = month, year = year, holiday = IsHoliday,
storeType = StoreType, storeSize = StoreSize, week = week,
deptAvgSalesBin = deptBin, temp = Temperature, fuelPrice = Fuel_Price,
cpi = CPI, unemployment = Unemployment, weekAvgSalesBin = weekBin))
#missing values
sum(is.na(train)) #128
train_complete = train[complete.cases(train),]
#Fit tree
control = tree.control(nobs=nrow(train), mincut = 1, minsize = 2, mindev = 0.0001)
#default is mindev = 0.01, which only gives a 10-node tree
train.tr <- tree(sales ~ .,train,control=control)
train.tr
summary(train.tr)
plot(train.tr,type="u"); text(train.tr,digits=2) #type="p" plots proportional branch lengths
######now prune tree and plot deviance vs. complexity parameter
train.tr1<-prune.tree(train.tr)
plot(train.tr1)
######now plot CV deviance vs complexity parameter
plot(cv.tree(train.tr, , prune.tree))
######now find the final tree with the best value of complexity parameter
train.tr1<-prune.tree(train.tr, best = 10) #can replace replace argument “k=0.4” by “best=11”
train.tr1
plot(train.tr1,type="u");text(train.tr1,digits=3)
#Prediction error
yhat = predict(train.tr1)
plot(yhat, train_complete$sales)
r = 1-(var(train_complete$sales - yhat)/var(train_complete$sales))
r
#PREDICT TEST SET
#-----------------------------
#Import data
test = read.csv("/Users/kylehundman/Desktop/Walmart/test.csv", header=T)
#Create month variable
test$Date = as.Date(test$Date, "%m/%d/%y") #convert to date
test$month = month(test$Date) #grab month (using lubridate)
test$week = week(test$Date) #grab week (using lubridate)
test$year = year(test$Date) #grab week (using lubridate)
#Join weeks and depts with their assigned bins
test = join(test, deptAvgs, by = "Dept") #vlookup equivalent (for dept bins) using "plyr" package
test = join(test, weekAvgs, by = "week") #vlookup equivalent (for week bins) using "plyr" package
# Drop unusable cols
test = with(test, data.frame(month = month, year = year, holiday = IsHoliday,
storeType = StoreType, storeSize = StoreSize, week = week,
deptAvgSalesBin = deptBin, temp = Temperature, fuelPrice = Fuel_Price,
cpi = CPI, unemployment = Unemployment, weekAvgSalesBin = weekBin))
test$month = as.character(test$month)
test$deptAvgSalesBin = as.character(test$deptAvgSalesBin)
#NAs in test
sum(is.na(test$cpi)) #38,162 #38,162 - Only May, June, July missing
sum(is.na(test$unemployment)) #38,162 - Only May, June, July missing
sqldf('SELECT min(month) FROM test WHERE year == 2012') #earliest month for prediction - November 2012
#Impute avg
imputeCPI = sqldf('SELECT avg(cpi) FROM test WHERE year == 2013 and month == 4 OR month == 8') #avg April and August for imputing
imputeUnemployment = sqldf('SELECT avg(unemployment) FROM test WHERE year == 2013 and month == 4 OR month == 8') #avg April and August for imputing
test$cpi[is.na(test$cpi)]= imputeCPI
test$unemployment[is.na(test$unemployment)]= imputeUnemployment
test$unemployment = as.numeric(test$unemployment)
test$cpi = as.numeric(test$cpi)
sum(is.na(test))
#Create Submission
yhatTest = predict(train.tr, test)
sum(is.na(yhatTest))
write.table(yhatTest, col.names = "Weekly_Sales", file = "/Users/kylehundman/Desktop/Walmart/Submission_triplehugeTree.csv")
|
be97d8b216c065234aa3e062542edaa184f0f52a
|
893ad18460bfa6a8cef47084e047473152c8c8cc
|
/man/calc_fc.Rd
|
d036e1592c03c6af2db60fc7e765095198a4d676
|
[] |
no_license
|
cran/BayesianPower
|
8d3f5d7a078b0779fa09480e4e3f251f0fdcd1e9
|
566367a630767911aa227d4715a701449c41ebe7
|
refs/heads/master
| 2020-12-21T20:59:30.367732
| 2020-06-22T07:40:16
| 2020-06-22T07:40:16
| 236,558,700
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 828
|
rd
|
calc_fc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{calc_fc}
\alias{calc_fc}
\title{Compute the complexity or fit for two hypotheses.}
\usage{
calc_fc(hyp, hyp2, means, sds, nsamp = 1000)
}
\arguments{
\item{hyp}{A constraint matrix defining H1.}
\item{hyp2}{A constraint matrix defining H2 OR a character \code{'u'}
or \code{'c'} specifying an unconstrained or complement hypothesis}
\item{means}{A vector of posterior or prior means}
\item{sds}{A vector or posterior or prior standard deviation}
\item{nsamp}{A number. The number of prior or posterior samples to determine the
fit and complexity}
}
\value{
A vector.
The proportion of posterior samples in agreement with H1 and with H2
}
\description{
Compute the complexity or fit for two hypotheses.
}
|
e27b1d954fe1a44fcce788c4cb53c228b48de86b
|
46e27718fdfed55fa0973c6b693af237ad742bca
|
/R/reads_per_gene_distribution.R
|
e94c9118b945197bf81d739232791bd5a9f539c0
|
[] |
no_license
|
urhonenh/PN0079_DVL3_RNA-seq
|
15df32469f9df8146889c4e01b74891af37aff7c
|
bd388285976d4bb5967ba70e14c783e003b2f73a
|
refs/heads/main
| 2023-08-14T14:55:20.746119
| 2021-10-15T11:32:57
| 2021-10-15T11:32:57
| 364,518,653
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,693
|
r
|
reads_per_gene_distribution.R
|
#!/usr/bin/env Rscript
# A script for plotting distribution of reads per gene in the dataset.
# Written by: Henna Urhonen
# E-mail: henna.urhonen@tuni.fi
suppressPackageStartupMessages({
library(DESeq2)
library(dplyr)
library(ggplot2)
})
dedir = "/bmt-data/genomics/projects/dvl3_mouse_rna-seq/04_DE_analysis/"
outdir = "/bmt-data/genomics/projects/dvl3_mouse_rna-seq/06_visualization/"
gitrepo = "/home/hu425279/PN0079_DVL3_rna-seq/"
###############
dds <- readRDS(paste0(dedir, "dds_object.rds"))
raw_counts <- counts(dds)
raw_rowsums <- as.data.frame(rowSums(raw_counts))
log2_rowsums <- log2(raw_rowsums + 1) # Use pseudocount 1
print(min(log2_rowsums))
print(max(log2_rowsums))
# Histogram of log2 transformed values for the whole dataset.
p <- ggplot(log2_rowsums, aes(x=log2_rowsums[,1])) +
geom_histogram(binwidth=1, colour="black", fill="white") + ggtitle("Total log2 read counts per gene in the DVL3 dataset") +
xlab("log2 read count") + ylab("Number of genes") + theme_bw()
pdf(paste0(outdir, "log2_reads_per_gene.pdf"))
p
dev.off()
# Histogram of raw counts for genes with smaller total counts.
library("ggforce")
low_count_rowsums <- as.data.frame(raw_rowsums[which(raw_rowsums[,1] <= 30),])
yRange = c(0,3000)
p2 <- ggplot(low_count_rowsums, aes(x=low_count_rowsums[,1])) +
geom_histogram(binwidth=1, colour="black", fill="white") + ggtitle("Low count genes with max. 30 reads in total") +
xlab("Raw read count") + ylab("Number of genes") + theme_bw() + facet_zoom(ylim = c(0, 3000)) # Zoom to this range in an additional figure
pdf(paste0(outdir, "raw_reads_per_low_count_gene.pdf"))
p2
dev.off()
|
a65b88ac3e8b4393b22964de2b6055bf8e722174
|
cbc215b281e6a300538206ea28bfc47c0a4923b8
|
/URBACT_Network_Report/STEP1_BasicTextMining_URBACTreport.R
|
05ab918db2e5f7d8478d91933915605b622f1d2e
|
[] |
no_license
|
pgourdongeo/Chap5_TextMining
|
11d7dcb8231e8643b028d3c21fee4d6de7b1aca6
|
4745ab8eab81faaaf02f791591d084c2f0ea7d8a
|
refs/heads/master
| 2023-09-02T06:54:04.478372
| 2021-11-02T10:18:17
| 2021-11-02T10:18:29
| 295,954,411
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 29,070
|
r
|
STEP1_BasicTextMining_URBACTreport.R
|
############################### URBACT Networks reports ##################################
#
#
# DESCRIPTION : Analyse textuelle des rapports de projets URBACT
#
#
############################################################################## PG novembre 2020
setwd("~/Chap5_TextMining/URBACT_Network_Report/")
# Packages
# Packages
library(tidyverse)
library(tidytext)
library(skimr)
# library(GGally)
library(stargazer)
library(patchwork)
library(wordcloud)
library(udpipe)
library(ggrepel)
library(topicmodels)
library(R.temis)
### Data
#
URBACT_project <- read.csv2("~/Chap5_TextMining/Data/URBACT/UrbactNetworks_complete_V2.csv", stringsAsFactors = F)
# Geom data
## rec
# rec <- st_read("~/Chap5_TextMining/Data/Geometry/rec_3035.geojson")
#
# #EU
# sfEU <- st_read("~/Chap5_TextMining/Data/Geometry/fondEuropeLarge.geojson", stringsAsFactors = FALSE,crs = 3035)
#
####################### Create a corpus from text
dir <- "~/Chap5_TextMining/Data/URBACT/TextFilesUrbactNetwork/TexturbactNet/"
all_txts <- list.files(dir,pattern = '*.txt', full.names = T)
df<- map_df(all_txts, ~ data_frame(txt = read_file(.x)) %>% mutate(doc_id = basename(.x)))
df <- df %>% mutate(Code_Network = str_remove(doc_id, ".txt"))
# saveRDS(df, "DataProd/textUrbactNetwork.rds")
write.csv2(df, "DataProd/textUrbactNetwork.csv", fileEncoding = "UTF-8", row.names = F)
############# ==== TEXT MINING ===========###############
###############""" SpacyR and tidy approach
# install.packages("spacyr")
library(spacyr)
# spacy_install()
#spacy_download_langmodel(model = "en_core_web_lg" )
# Choose model
spacy_initialize(model = "en_core_web_lg")
#spacyr::spacy_finalize()
# test with one sentence
textReport <- "Berlin provides a case study of two practical interventions which can arrest and ultimately reverse decline"
parsedtxt <- spacy_parse(textReport)
stargazer(parsedtxt, summary = FALSE, rownames = FALSE)
# process documents and obtain a data.table
textReport <- df$txt
names(textReport) <- df$Code_Network
parsedtxt <- spacy_parse(textReport)
# Remove stopwords
CleanParsedText <- unnest_tokens(parsedtxt, word, token) %>%
dplyr::anti_join(stop_words)
################ ====~ BASIC TEXT MINING ==== ##########
# Size of the corpus
DocSize <- parsedtxt %>% group_by(doc_id)%>% summarise(NWords = as.numeric(n()))%>% arrange(NWords) %>% as.data.frame()
ggplot(DocSize) +
geom_bar(aes(x= reorder(doc_id,-NWords), y = NWords), stat = "identity") +
theme(plot.subtitle = element_text(vjust = 1), plot.caption = element_text(vjust = 1)) +
labs(title = "Nombre de mots par document du corpus des rapports de projet URBACT", x = "Document ID") +
theme(axis.title = element_text(size = 13),
axis.text = element_text(size = 12),
plot.title = element_text(size = 16),
axis.text.x= element_text(angle=90, vjust = 0.5))
summary(DocSize$NWords)
# stargazer needs a df not a tibble
stargazer(DocSize, median = TRUE, digit.separator = " ")
head(DocSize)
### lexique (without stopwords)
Lexique <- CleanParsedText %>%
count(word, sort = T)
### Top words
Top100 <- Lexique %>% top_n(100,n)
wordcloud(words = Top100$word, freq = Top100$n, min.freq = 1,
max.words=100, random.order=FALSE, rot.per=0.35)
## pct type of POS
stats <- txt_freq(parsedtxt$pos[!parsedtxt$pos == "PUNCT"])
ggplot(stats) +
geom_bar(aes(x = reorder(key, -freq_pct), y = freq_pct), stat = "identity") +
coord_flip() + theme_bw()
stats <- txt_freq(CleanParsedText$pos)
ggplot(stats) +
geom_bar(aes(x = reorder(key, -freq_pct), y = freq_pct), stat = "identity") +
coord_flip() + theme_bw()
## nouns
stats <- subset(CleanParsedText, pos %in% c("NOUN"))
stats <- txt_freq(stats$lemma)
g1 <- ggplot(stats[1:30, ]) +
geom_bar(aes(x = reorder(key, freq), y = freq), stat = "identity") +
coord_flip() + theme_bw() + labs(title = "Noms",x = NULL, y = "Occurrences (log10)") + scale_y_log10(n.breaks = 10)
# scale_y_log10()
## adjectives
stats <- subset(CleanParsedText, pos %in% c("ADJ"))
stats <- txt_freq(stats$lemma)
g2 <- ggplot(stats[1:30, ]) +
geom_bar(aes(x = reorder(key, freq), y = freq), stat = "identity") +
coord_flip() + theme_bw() + labs(x = "Adjectifs", y = "Occurrences") + scale_y_continuous(n.breaks = 10)
## Verb
stats <- subset(CleanParsedText, pos %in% c("VERB"))
stats <- txt_freq(stats$lemma)
g3 <- ggplot(stats[1:30, ]) +
geom_bar(aes(x = reorder(key, freq), y = freq), stat = "identity") +
coord_flip() + theme_bw() + labs(title = "Verbes",x = NULL, y = "Occurrences", caption ="Sources : URBACT site web / P.Gourdon 2020" ) +
scale_y_continuous(n.breaks = 10)
# cooccurrence
cooctermsCorpus <- cooccurrence(CleanParsedText$lemma, skipgram = 3)
cooctermsCorpus %>% filter(term1 == "create" & cooc > 5)
## Plot
gridVerbNom <- g1/g3
gridVerbNom
ggsave("OUT/URBACTreport_Top30_Verb_Noun.pdf", width = 8.3, height = 8.3 , units = "in" )
################ Key words
## Using RAKE
stats <- keywords_rake(x = parsedtxt, term = "lemma", group = "doc_id",
relevant = parsedtxt$pos %in% c("NOUN", "ADJ"), ngram_max = 3,n_min = 3)
stats$key <- factor(stats$keyword, levels = rev(stats$keyword))
statsTop <- stats %>% filter(ngram >1)%>% top_n(30, freq)
summary(stats$freq)
summary(stats$rake)
statsTop <- stats %>% filter(ngram >1)%>% filter(freq>30 & rake > 2)
ggplot(statsTop) +
geom_bar(aes(x = reorder(key, freq), y = freq), stat = "identity") +
# scale_y_log10(n.breaks = 8)+
scale_y_continuous(n.breaks = 8)+
coord_flip() + theme_bw() + labs(x = "Mots-clés identifiés par RAKE", y = "Occurrences dans le corpus",
caption = "Note : les mots-clés représentés sont issus du triple filtrage suivant : n-gram > 1 & occurences > 30 & Score 'Rake' > 2\nSources : URBACT site web / P.Gourdon 2020")
ggsave("OUT/KW_URBACTreport_RAKE_ngram2_freq30_Rake2.pdf", width = 8.3, height = 5.8 , units = "in" )
statsTop <- stats %>% filter(ngram >1) %>% filter(freq > 30 & rake > 1.5)
ggplot(statsTop) +
geom_point(aes(x=freq,y = rake), shape = 21, fill = "orange", color = "grey80", alpha= 0.7) +
# scale_size(range = c(2:10), breaks = c(min(TopTfIdfFreq$MeanTfIdf): max(TopTfIdfFreq$MeanTfIdf)))+
scale_x_log10() +
geom_text_repel(aes(label = keyword,x=freq,y = rake ), size = 2.5, segment.alpha= 0.5,segment.size = 0.3)+
labs(x = "Nombre d'occurrences dans le corpus (log10)", y = "Qualité du mot-clé (RAKE)")
## Using Pointwise Mutual Information Collocations
# remove other words like proper nouns, numeric
stats <- keywords_collocation(x = CleanParsedText %>% filter(!pos == "PROPN" & !pos == "NUM" & !pos == "X"),
term = "lemma", group = "sentence_id", ngram_max = 4, n_min = 3)
stats$key <- factor(stats$keyword, levels = rev(stats$keyword))
statsTop <- stats %>% top_n(30, freq)
statsTop <- stats %>% filter(freq > 30 & pmi > 7)
ggplot(statsTop) +
geom_bar(aes(x = reorder(key, freq), y = freq), stat = "identity") +
coord_flip() + theme_bw() + labs(x = "Mots-Clés identifiés avec la PMI Collocation", y = "Nombre d'occurrences dans le corpus",
caption = "Note : les mots-clés représentés sont issus du double filtrage suivant : occurences > 30 & Score 'PMI' > 7\nSources : URBACT site web / P.Gourdon 2020")
ggsave("OUT/KW_URBACTreport_PMI_Top30.pdf", width = 8.3, height = 5.8 , units = "in" )
## Tf idf
Ntermdoc <- CleanParsedText %>%
# filter(!pos == "VERB" ) %>%
filter(!pos == "PROPN" & !pos == "NUM" & !pos == "X") %>%
filter(entity == "")%>%
count(lemma, doc_id)
TfIdfDoc <- Ntermdoc %>% bind_tf_idf(lemma, doc_id, n)
TopTfIdf <- TfIdfDoc %>% group_by(doc_id)%>% top_n(10, tf_idf)
TfIdMean <- TfIdfDoc %>% group_by(lemma) %>% summarise(MeanTfIdf = mean(tf_idf), Freq = sum(n), MaxTfIdf = max(tf_idf), idf = max(idf))
## mots frequent terms with low idf
Top1 <- TfIdMean %>% filter(idf<1) %>% arrange(desc(Freq))%>% ungroup() %>% top_n(50, Freq)
## mots frequent terms with high idf
Top2 <- TfIdMean %>% filter(idf>1) %>% arrange(desc(Freq))%>% ungroup() %>% top_n(50, Freq)
#plots
ga <-ggplot(Top1) +
geom_point(aes(x=Freq,y = idf, size = MeanTfIdf), shape = 21, fill = "orange", color = "grey80", alpha= 0.7) +
# scale_size(range = c(2:10), breaks = c(min(TopTfIdfFreq$MeanTfIdf): max(TopTfIdfFreq$MeanTfIdf)))+
scale_y_log10()+
scale_x_log10() +
scale_size(range = c(1, 6), limits = c(min(Top1$MeanTfIdf), max(Top2$MeanTfIdf))) +
geom_text_repel(aes(label = lemma,x=Freq,y = idf ), size = 2.5, segment.alpha= 0.5,segment.size = 0.3)+
labs(x = "Nombre d'occurrences dans le corpus (log10)", y = "IDF (log10)", size = "Moyenne du Tf-Idf",
title = "A", subtitle = "Mots les plus fréquents communs à la majorité des documents")+
theme(legend.position="none")
options(scipen= 999)
gb <- ggplot(Top2) +
geom_point(aes(x=Freq,y = idf, size = MeanTfIdf), shape = 21, fill = "orange", color = "grey80", alpha= 0.7) +
# scale_size(range = c(2:10), breaks = c(min(TopTfIdfFreq$MeanTfIdf): max(TopTfIdfFreq$MeanTfIdf)))+
scale_y_log10()+
scale_x_log10() +
scale_size(range = c(1, 6),
limits = c(min(Top1$MeanTfIdf), max(Top2$MeanTfIdf)),
breaks = round(c(min(Top1$MeanTfIdf),0.0001, 0.0005,0.001, 0.005, 0.01, 0.020, max(Top2$MeanTfIdf)), digits = 5)) +
geom_text_repel(aes(label = lemma,x=Freq,y = idf ), size = 2.5, segment.alpha= 0.5, segment.size = 0.3)+
labs(x = "Nombre d'occurrences dans le corpus (log10)", y = "IDF (log10)", size = "Moyenne du Tf-Idf",
caption = "Source : URBACT site web / P. Gourdon 2020",title = "B", subtitle = "Mots les plus fréquents spécifiques à des sous-ensembles de documents")+
theme(legend.position="bottom", legend.direction = "horizontal")+
guides(size=guide_legend(nrow=1, label.position = "bottom"))
grid2 <- ga/gb
grid2
ggsave("OUT/URBACTreport_topterms_Idf_N.pdf", width = 8.3, height = 8.3 , units = "in" )
### Variable
#### tf idf by category
CleanParsedText <- CleanParsedText %>%
left_join(select(URBACT_project, LeadPartnerCountry,Region,Phase, Type,
Start, doc_id = Code_Network))
Politics <- read.csv2("~/Chap5_TextMining/Data/CountryPolitical/CountryInfo_PoliticalTypo.csv",
encoding = "UTF-8", stringsAsFactors = FALSE)
Politics <- Politics %>% mutate(iso_a2 = recode(iso_a2, "GB" = "UK"))
colnames(Politics)
CleanParsedText <- CleanParsedText %>% left_join(select(Politics, LeadPartnerCountry = iso_a2,
LocGovType_HorizontalPwrRelation,
LocGovType_VerticalPwrRelation,
LocGovType_PoliticalLeadership,
LocGovType_MunicipalAdmin))
skim(CleanParsedText$LocGovType_MunicipalAdmin)
URBACT_project_text <- URBACT_project %>% filter(Code_Network %in% df$Code_Network)
### Region
cat <- "LeadPartnerCountry"
Cat_words <- CleanParsedText %>%
filter(!pos == "PROPN" & !pos == "NUM" & !pos == "X") %>%
filter(entity == "") %>%
count(!!as.name(cat), lemma, sort = TRUE)
plot_cat <- Cat_words %>%
bind_tf_idf(lemma,!!as.name(cat), n) %>%
mutate(lemma = fct_reorder(lemma, tf_idf))
n_cat <- URBACT_project_text %>% group_by(!!as.name(cat)) %>% count()
#specific filter for country (do not run for region)
countryfilter <- n_cat %>% filter(n>3) %>% select(LeadPartnerCountry) %>% deframe()
plot_cat <- plot_cat %>% filter(LeadPartnerCountry %in% countryfilter)
n_cat <- n_cat %>% filter(LeadPartnerCountry %in% countryfilter)
#
plot_cat %>%
group_by(!!as.name(cat)) %>%
top_n(10, tf_idf) %>%
ungroup() %>%
ggplot(aes(reorder_within(lemma, tf_idf,!!as.name(cat) ), tf_idf, fill = !!as.name(cat))) +
geom_col(show.legend = FALSE) +
labs(x = NULL, y = "tf-idf", caption = "Source : URBACT site web / P. Gourdon 2020") +
scale_x_reordered()+
facet_wrap(as.formula(paste("~", cat)), ncol = 2, scales = "free") +
coord_flip()+
geom_label(aes(label = n, x= reorder_within(lemma, tf_idf,!!as.name(cat) ), y = tf_idf), position = position_stack(0.5), color = "black", size = 2)+
geom_text( data = n_cat,
mapping = aes(x = -Inf, y = Inf, label = paste("Nb docs = ", n, sep = "")),
vjust = -1, hjust = 1.2, size = 2.5)+
theme(legend.position = "none")
ggsave("OUT/URBACTreport_Country_Idf_N.pdf", width = 8.3, height = 8.3 , units = "in" )
## Political typo
cat <- "LocGovType_MunicipalAdmin"
Cat_words <- CleanParsedText %>%
filter(!pos == "PROPN" & !pos == "NUM" & !pos == "X") %>%
filter(entity == "") %>%
count(!!as.name(cat), lemma, sort = TRUE)
plot_cat <- Cat_words %>%
bind_tf_idf(lemma,!!as.name(cat), n) %>%
mutate(lemma = fct_reorder(lemma, tf_idf))
n_cat <- URBACT_project_text %>% left_join(select(Politics, LeadPartnerCountry = iso_a2,
LocGovType_HorizontalPwrRelation,
LocGovType_VerticalPwrRelation,
LocGovType_PoliticalLeadership,
LocGovType_MunicipalAdmin)) %>%
group_by(!!as.name(cat)) %>% count()
plot_cat %>%
group_by(!!as.name(cat)) %>%
top_n(10, tf_idf) %>%
ungroup() %>%
ggplot(aes(reorder_within(lemma, tf_idf, !!as.name(cat)), tf_idf, fill = !!as.name(cat)), show.legend = FALSE) +
geom_col() +
labs(x = NULL, y = "tf-idf") +
scale_x_reordered() +
facet_wrap(as.formula(paste("~", cat)), ncol = 2, scales = "free") +
coord_flip()+
geom_label(aes(label = n, x= reorder_within(lemma, -tf_idf, !!as.name(cat)), y = tf_idf), position = position_stack(0.5), color = "black", size = 2)+
geom_text( data = n_cat,
mapping = aes(x = -Inf, y = Inf, label = paste("Nb docs = ", n, sep = "")),
vjust = -1, hjust = 1.2, size = 2.5)+
theme(legend.position = "none")
## Phase
cat <- "Phase"
Cat_words <- CleanParsedText %>%
filter(!pos == "PROPN" & !pos == "NUM" & !pos == "X") %>%
filter(entity == "") %>%
count(!!as.name(cat), lemma, sort = TRUE)
plot_cat <- Cat_words %>%
bind_tf_idf(lemma,!!as.name(cat), n) %>%
mutate(lemma = fct_reorder(lemma, tf_idf))
n_cat <- URBACT_project_text %>% group_by(!!as.name(cat)) %>% count()
plot_cat %>%
group_by(!!as.name(cat)) %>%
top_n(30, tf_idf) %>%
ungroup() %>%
mutate(lemma = reorder(lemma, tf_idf)) %>%
ggplot(aes(lemma, tf_idf, fill = !!as.name(cat))) +
geom_col(show.legend = FALSE) +
labs(x = NULL, y = "tf-idf") +
facet_wrap(as.formula(paste("~", cat)), ncol = 2, scales = "free") +
coord_flip()+
geom_label(aes(label = n, x= lemma, y = tf_idf), position = position_stack(0.5), color = "black", size = 2)+
geom_text( data = n_cat,
mapping = aes(x = -Inf, y = Inf, label = paste("Nb docs = ", n, sep = "")),
vjust = -1, hjust = 1.2, size = 2.5)+
theme(legend.position = "none")
ggsave("OUT/URBACTReport_tfIdf_phase.pdf", width = 8.3, height = 8.3 , units = "in" )
## Period
# library(questionr)
# irec(CleanParsedText)
## Recodage de CleanParsedText$Start en CleanParsedText$Start_period
CleanParsedText$Start_period <- as.character(CleanParsedText$Start)
CleanParsedText$Start_period <- fct_recode(CleanParsedText$Start_period,
"2007-2008" = "2008",
"2009-2010" = "2009",
"2009-2010" = "2010",
"2007-2008" = "2007",
"2015-2016" = "2015",
"2015-2016" = "2016")
cat <- "Start_period"
Cat_words <- CleanParsedText %>% filter(!is.na(!!as.name(cat))) %>%
filter(!pos == "PROPN" & !pos == "NUM" & !pos == "X") %>%
filter(entity == "") %>%
count(!!as.name(cat), lemma, sort = TRUE)
plot_cat <- Cat_words %>%
bind_tf_idf(lemma,!!as.name(cat), n) %>%
mutate(lemma = fct_reorder(lemma, tf_idf))
plot_cat %>%
group_by(!!as.name(cat)) %>%
top_n(15, tf_idf) %>%
ungroup() %>%
mutate(lemma = reorder(lemma, tf_idf)) %>%
ggplot(aes(lemma, tf_idf, fill = !!as.name(cat))) +
geom_col(show.legend = FALSE) +
labs(x = NULL, y = "tf-idf") +
facet_wrap(as.formula(paste("~", cat)), ncol = 2, scales = "free") +
coord_flip()
# KW by caterory
cat <- "LocGovType_MunicipalAdmin"
cat_KW <- parsedtxt %>% mutate(doc_id = as.numeric(doc_id)) %>%
left_join(select(AttributesClean, CountryCode,Region,ClassePop,ClasseStart,SizeClassURBACT,doc_id = CodePractices))%>%
left_join(select(Politics, CountryCode = iso_a2,
LocGovType_HorizontalPwrRelation,
LocGovType_VerticalPwrRelation,
LocGovType_PoliticalLeadership,
LocGovType_MunicipalAdmin))
cat_KW <- cat_KW %>%
group_by(!!as.name(cat))%>%
do(keywords_rake(x = ., term = "lemma", group = "doc_id",
relevant = .$pos %in% c("NOUN", "ADJ"), ngram_max = 4,n_min = 3) )
statsTop <- cat_KW %>% group_by(!!as.name(cat)) %>% filter(freq>5 & rake > 1.5) %>% top_n(8, freq)
statsTop <- cat_KW %>% group_by(!!as.name(cat)) %>% top_n(5, rake)
ggplot(statsTop) +
geom_bar(aes(x = reorder_within(keyword, freq, !!as.name(cat)), y = freq), stat = "identity") + scale_x_reordered()+
coord_flip() + theme_bw() + labs(x = "Keywords identified by RAKE", y = "Fréquence")+facet_wrap(as.formula(paste("~", cat)),scales = "free")
## Using Pointwise Mutual Information Collocations
# remove stop words
cat_KW <- CleanParsedText %>% filter(!pos == "PROPN" & !pos == "NUM" & !pos == "X") %>%
filter(entity == "") %>%
group_by(!!as.name(cat))%>%
do(keywords_collocation(x = ., term = "lemma", group = "sentence_id", ngram_max = 4, n_min = 3) )
statsTop <- cat_KW %>% group_by(!!as.name(cat)) %>% top_n(5, freq)
statsTop <- cat_KW %>% group_by(!!as.name(cat)) %>% filter(freq>5 & pmi> 6) %>% top_n(5, freq)
ggplot(statsTop) +
geom_bar(aes(x = reorder_within(keyword, freq, !!as.name(cat)), y = freq), stat = "identity") +
coord_flip() + theme_bw() + labs(x = "Keywords identified by PMI Collocation", y = "Fréquence") +scale_x_reordered()+
facet_wrap(as.formula(paste("~", cat)),scales = "free")
######DTM
## Note keep the doc id and the vector variable in the SAME order, because specific_terms(dtm2, SizeVec) does not perform a merge
data <- CleanParsedText %>% filter(!pos == "PROPN" & !pos == "X" & !pos == "NUM") %>%
filter(entity == "") %>%
arrange(doc_id)
x4 <- document_term_frequencies(data[,c("doc_id","lemma")])
x5 <- document_term_frequencies_statistics(x4)
### DTM
dtm <- document_term_matrix(x4)
saveRDS(dtm,"DataProd/dtm_test.rds")
# some words in context
corpus <- import_corpus("DataProd/textUrbactNetwork.csv", textcolumn = 1, format = "csv", language = "en" )
dtmTemis <- build_dtm(corpus)
concordances(corpus, dtmTemis, c("sustainable", "regeneration") )
concordances(corpus, dtmTemis, "roma")
concordances(corpus, dtmTemis, "sustainable regeneration")
df %>% filter(str_detect(df$txt, "sustainable regeneration"))
df %>% filter(str_detect(df$txt, "urban fringe"))
df %>% filter(str_detect(df$txt, "women"))
URBACT_MembershipF %>%
filter(asciiName %in% c("Eidhoven", "Manchester", "Limoges", "Bilbao", "York", "Gdansk")) %>%
filter(City.Statut == "Lead Partner")
URBACT_project_text %>% filter(Code_Network %in% c("7", "18", "32", "36", "65", "73"))
URBACT_MembershipF %>%
filter(asciiName %in% c("Leoben", "Basingstoke", "Cesena", "Barnsley", "Gela")) %>%
filter(City.Statut == "Lead Partner")
URBACT_Project_Report %>% filter(Code_Network %in% c("23", "24", "39", "50", "70", "77"))
############" Topic modeling
#### LDA
## Filter DTM
# dtm <- dtm_remove_tfidf(dtm, top = 1500)
dtmf <- dtm_remove_tfidf(dtm, prob = 0.15)
# #frq filter
# lexiquecat <- data %>%
# group_by(lemma) %>%
# mutate(word_total = n()) %>%
# ungroup() %>%
# filter(word_total > 20)
#
# #tf idf filter
#
# Ntermdoc <- data %>%
# count(lemma, sentence_id)
#
# TfIdfDoc <- Ntermdoc %>% bind_tf_idf(lemma, sentence_id, n)
#
# lexiquecat <- TfIdfDoc %>% top_frac(0.8, tf_idf)
# # convert into a document-term matrix
# # with document names such as sci.crypt_14147
# cat_dtm <- lexiquecat %>%
# count(sentence_id, lemma) %>%
# cast_dtm(sentence_id, lemma, n)
# choose K with Ldatuning
# library(ldatuning)
# saveRDS(dtm, "DataProd/dtm_test.rds")
#
#
# ktopics <- FindTopicsNumber(dtm,
# topics = seq(from = 2, to = 50, by = 1),
# metrics = c("Griffiths2004", "CaoJuan2009", "Arun2010", "Deveaud2014"),
# method = "Gibbs",
# control = list(seed = 2016),
# mc.cores = 2L,
# verbose = TRUE)
# k 26 or 14
cat_lda <- LDA(dtmf, k = 22, control = list(seed = 2016), method = "Gibbs")
cat_lda %>%
tidy() %>%
group_by(topic) %>%
top_n(10, beta) %>%
ungroup() %>%
mutate(term = reorder_within(term, beta, topic)) %>%
ggplot(aes(term, beta, fill = factor(topic))) +
geom_col(show.legend = FALSE) +
facet_wrap(~ topic, scales = "free_y") +
coord_flip() +
scale_x_reordered()+labs(x = "termes")
ggsave("OUT/URBACTreports_LDA_DTM015_K22.pdf", width = 11.7, height = 8.3, units = "in")
# same topic plot but with interpretation (title)
## Naming topics
TableCorrespondanceTopic <- data.frame(topic = 1:22, Interpretation =
c("1. Territorial Cooperation","2. Retail & City Centre", "3. University",
"4. Port & Hub","5. Heritage",
"6. Mobility","7. Abandoned Spaces", "8. School & Youth Employment",
"9. Methodology", "10. Innovation","11. Agro-food",
"12. Funding","13. Tourism","14. Creative City",
"15. Regeneration & Temporary use", "16. Housing & Neighbourhood", "17. City Branding",
"18. Health & Care", "19. Migrants & Minorities", "20. Transport & Urban Sprawl",
"21. Resilience & Transition" ,"22. Target groups"))
cat_lda_tidy <- cat_lda %>%
tidy() %>% left_join(TableCorrespondanceTopic)
# deal with facet title too large
swr = function(string, nwrap=20) {
paste(strwrap(string, width=nwrap), collapse="\n")
}
swr = Vectorize(swr)
cat_lda_tidy <- cat_lda_tidy %>% mutate(Interpretation = swr(Interpretation))
dput(unique(cat_lda_tidy$Interpretation))
cat_lda_tidy$Interpretation <- factor(cat_lda_tidy$Interpretation, levels=dput(unique(cat_lda_tidy$Interpretation)))
# Color vector for topic
library(randomcoloR)
TopicColor <- distinctColorPalette(k = length(unique(cat_lda_tidy$Interpretation)))
names(TopicColor) <- unique(cat_lda_tidy$Interpretation)
# Plot with facet titles
cat_lda_tidy %>%
group_by(Interpretation) %>%
top_n(8, beta) %>%
ungroup() %>%
mutate(term = reorder_within(term, beta, Interpretation)) %>%
ggplot(aes(term, beta, fill = Interpretation)) +
geom_col(show.legend = FALSE) +
scale_fill_manual(values = TopicColor)+
facet_wrap(~ Interpretation, scales = "free_y") +
coord_flip() +
scale_x_reordered()+labs(x = "termes", caption = "Source : URBACT site web / P. Gourdon 2020" )
ggsave("OUT/URBACTreports_LDA_DTM015_K22.pdf", width = 11.7, height = 8.3, units = "in")
#### Topics per document
Practice_lda_gamma <- tidy(cat_lda, matrix = "gamma")
Practice_lda_gamma
ggplot(Practice_lda_gamma,aes(x= as.factor(topic), y=document))+
geom_tile(aes(fill = gamma*100),
colour = "grey") + scale_fill_gradient(low = "white",
high = "steelblue")
summary(Practice_lda_gamma$gamma)
Lda_GammaDoc <- Practice_lda_gamma %>% mutate(PctGamma = gamma*100)%>%
# left_join(TableCorrespondanceTopic, by = c("topic"="CodeTopic")) %>%
# select(-gamma, -topic)%>%
select(-gamma)%>%
spread(key = topic, value = PctGamma)
LdaDisjonct <- Lda_GammaDoc %>% mutate_at(.vars = vars(2:ncol(Lda_GammaDoc)), ~ifelse(.>5,1,0))
# Prepare info on good practice
URBACT_project_text <- URBACT_project_text %>% left_join(Politics, by = c("LeadPartnerCountry" = "iso_a2"))
TableCorrespondanceTopic$topic <- as.character(TableCorrespondanceTopic$topic)
names(TopicColor) <- TableCorrespondanceTopic$Interpretation
URBACT_project_text <- URBACT_project_text %>% mutate(Code_Network = as.numeric(Code_Network))
## Explore gamma (mean and median) by variable
# function
gammatopbycat <- function(Lda_GammaDoc, cat, DfInfoDoc,InterpretationTopic, TopicColor){
require(tidyverse)
require(directlabels)
plots <- list()
#Ndoc
Ndoc <- DfInfoDoc %>% group_by(!!as.name(cat))%>% summarise(N= n())
# Compute mean and med of gamma for each modalities of categorical variable
CatGamma <- Lda_GammaDoc %>% mutate(document = as.numeric(document)) %>%
left_join(select(DfInfoDoc,document = Code_Network, !!as.name(cat))) %>%
select(-document) %>% group_by(!!as.name(cat)) %>% summarise_all(list(mean = ~mean(.), med = ~median(.)))
# Prepare df for plot and add rank
CatGammaRank <- CatGamma %>%
pivot_longer(-!!as.name(cat), names_to = "Topics", values_to = "value") %>%
separate(Topics, sep= "_", remove = TRUE, into= c("topic", "variable")) %>%
group_by(!!as.name(cat),variable) %>%
mutate(rank = rank(desc(value), ties.method = 'min', na.last = "keep") )
# Add interpretation of topic
CatGammaRank <- CatGammaRank %>% left_join(InterpretationTopic)
CatGammaRank <- CatGammaRank %>% mutate(Interpretation = as.character(Interpretation))
# Prepare top 3 by variable (mean or median)
TopMean <- CatGammaRank %>% filter(variable == "mean" & rank < 6) %>% filter(!is.na(!!as.name(cat)))
TopMed <- CatGammaRank %>% filter(variable == "med" & rank < 6) %>% filter(!is.na(!!as.name(cat)))
#Colorvector
FilterTopicColor <- TopicColor[names(TopicColor) %in% unique(TopMean$Interpretation)]
# plot mean
GammaMeanPlot <- ggplot(TopMean, aes(reorder_within(Interpretation, value, !!as.name(cat)), value, fill = Interpretation)) +
geom_col(show.legend = FALSE) +
scale_fill_manual(values = FilterTopicColor) +
geom_text(data = Ndoc,
mapping = aes(x = -Inf, y = Inf, label = paste("Nb docs = ", N, sep = "")),
inherit.aes = FALSE, size = 2.5, vjust = -1, hjust = 1.2)+
facet_wrap(as.formula(paste("~", cat)), scales = "free_y", ncol = 2) +
coord_flip() +
scale_x_reordered() +
labs(x = "Thèmes", y = "Moyenne Gamma (%)", caption = "Source : URBACT site web / P. Gourdon 2020" )
#Colorvector
FilterTopicColor <- TopicColor[names(TopicColor) %in% unique(TopMed$Interpretation)]
# plot med
GammaMedPlot <- ggplot(TopMed, aes(reorder_within(Interpretation, value, !!as.name(cat)), value, fill = Interpretation)) +
geom_col(show.legend = FALSE) +
scale_fill_manual(values = FilterTopicColor) +
geom_text(data = Ndoc,
mapping = aes(x = -Inf, y = Inf, label = paste("Nb docs = ", N, sep = "")),
inherit.aes = FALSE, size = 2.5, vjust = -1, hjust = 1.2)+
facet_wrap(as.formula(paste("~", cat)), scales = "free_y", ncol = 2) +
coord_flip() +
scale_x_reordered() +
labs(x = "Thèmes", y = "Médiane Gamma (%)", caption = "Source : URBACT site web / P. Gourdon 2020" )
## store plots
plots[["GammaMeanPlot"]] <- GammaMeanPlot
plots[["GammaMedPlot"]] <- GammaMedPlot
return(plots)
}
# results exploration
RegionPlot <- gammatopbycat(Lda_GammaDoc, cat = "Region", DfInfoDoc = URBACT_project_text, InterpretationTopic = TableCorrespondanceTopic, TopicColor )
# SizeClassPlot <- gammatopbycat(Lda_GammaDoc, cat = "SizeClassURBACT", DfInfoDoc = URBACT_project_text, InterpretationTopic = TableCorrespondanceTopic, TopicColor )
#
# SizeClassPlot2 <- gammatopbycat(Lda_GammaDoc, cat = "ClassePop", DfInfoDoc = URBACT_project_text, InterpretationTopic = TableCorrespondanceTopic, TopicColor )
PoliticsPlot1 <- gammatopbycat(Lda_GammaDoc, cat = "LocGovType_MunicipalAdmin", DfInfoDoc = URBACT_project_text, InterpretationTopic = TableCorrespondanceTopic, TopicColor )
PoliticsPlot2 <- gammatopbycat(Lda_GammaDoc, cat = "LocGovType_HorizontalPwrRelation", DfInfoDoc = URBACT_project_text, InterpretationTopic = TableCorrespondanceTopic, TopicColor )
PoliticsPlot3 <- gammatopbycat(Lda_GammaDoc, cat = "LocGovType_VerticalPwrRelation", DfInfoDoc = URBACT_project_text, InterpretationTopic = TableCorrespondanceTopic, TopicColor )
PoliticsPlot4 <- gammatopbycat(Lda_GammaDoc, cat = "LocGovType_PoliticalLeadership", DfInfoDoc = URBACT_project_text, InterpretationTopic = TableCorrespondanceTopic, TopicColor )
# poiitic4 withou ceremonial mayors (IE)
|
884a0bc5fa02a20a73d291025d0ce3a2181af8cf
|
f41f8070765c20819f2c5faa5421cb52353459c2
|
/oficina_Parte_3.R
|
1cfcdc5d4d68d008a1f50785e1bc59cf809333a8
|
[] |
no_license
|
Kratochwill/Oficina-Estatistica-basica-com-R-RStudio-
|
2013cc9e09461255661c63daf1213a54bf97fd9f
|
539636943e8dc68e48e635be0778418d1464886f
|
refs/heads/master
| 2021-01-19T03:04:46.606842
| 2017-03-23T20:00:55
| 2017-03-23T20:00:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,766
|
r
|
oficina_Parte_3.R
|
# ---
# title: "Introdução a Estatística Básica e R(RStudio) - 3a. Parte"
# author: "Walter Humberto Subiza Pina"
# date: "11 de janeiro de 2017"
# ---
## Gráficos com a função genérica plot
# O objetivo é mostrar como usar a função plot do pacote base
# para fazer plotagens básicas, como gráfico de variáveis, histogramas e boxplot
#
# Arquivo de dados externo: "dados4.csv" (formato csv2, sem cabeçalho) e "bloco1.csv"
#
# quando trabalhamos com graficos é uma boa prática salvar os parámetros gráficos que sao padrão
#
pro <- par(no.readonly=TRUE)
#
# Experimente na janela 3 (console) o seguinte comando R: demo(graphics)
# Os graficos são úteis?? Vejamos o seguinte exemplo
#
# Tenho um arquivo com 300 observacoes cuja media esperada é de 2.5, um desvio padrão perto de 1 e
# uma distribucao esperada tipo normal. O que podemos analisar?
# Importar dados4.csv, fazer histograma, média e desvio padrão.
# importo os dados com read.csv2
dados4 <- read.csv2("dados4.csv", header=T)
# estrutura dos dados e primeiros 6 registros ou linhas...
str(dados4)
head(dados4)
# algumas estatísticas, média, desvio padrão...
mean(dados4$Valor)
sd(dados4$Valor)
# histograma para verificar a normalidade
hist(dados4$Valor, breaks=10)
# analisar resultados, tendência? calcule a mediana
median(dados4$Valor)
# gráfico sequencial, algum valor chama a atenção??
plot(dados4$Valor)
# zoom sobre dados suspeitos
plot(dados4$Valor, xlim=c(146,160), ylim=c(0.9,1.2))
# carregar arquivo selecao.csv (formato 2)
### Grafico de barras
# O gráfico de barras não é muito informativo com vetores numericos.
bloco1 <- read.csv2("bloco1.csv")
attach(bloco1)
barplot(H, horiz = F)
# Para dados categoricos oferece mais informação.
conto <- table(Sub.Bloco)
barplot(conto,
ylim= c(0,100),
main="est1",
xlab="Estacoes por Sub Blocos")
# horizontal horiz="True"
# Os dados podem ser empilhados, usando uma tabela de dupla entrada, nesse caso, vamos
# classificar por Bloco e Sub Blocos
conto2 <- table( Sub.Bloco,Bloco)
color <- c("darkslategray1", "darkslategray3", "darkslategray4", "red")
barplot(conto2,
ylim = c(0,160),
col= color,
main="Estações por Bloco e Sub-Bloco")
legend("topright", rownames(conto2), fill= color)
# Você deve estar-se perguntando como foram escolhidos essas cores especiais, aqui vai a dica...
# > ### DICA PARA ESCOLHER CORES!!!
# >
# > Instale o pacote "colourpicker", carregue-o e execute o comando: colourPicker(numCols = 3),
# > sendo o numCols =3 a quantidade de cores a escolher, depois abre uma janela adicional para
# > escolher entre muitas opções de cores, quando fechar deve aparecer na console algo como:
# > "navajowhite" "navajowhite3" "navajowhite4", copie para uma variável e use no script.
# >
# Vamos graficar!!
# Primeiro uma plotagem simples com alguns controles. Simulamos dados, 300 valores
# consecutivos e aleatorios entre -5 e +5...veja ?runif
set.seed(12345) # semente serve para reprodução de valores iguais em dados aleatórios (ou seudo...)
x<-1:300
y<-runif(x,-5,5)
# gráfico simples sem nenhum control específico
plot(x,y)
# vamos colocar nomes nos eixos, parametros: xlab e ylab
plot(x, y,
xlab="Sequencial",
ylab="Valores")
# a cor e o símbolo são simples de mudar com "col" e "pch"
# veja ?colors e ?pch, palette
plot(x, y,
xlab="Sequencial",
ylab="Valores",
col="red",
pch = 25)
# O tamanho do símbolo pode ser mudado com "cex", experimente valores como 2 e 0.5
plot(x, y,
xlab="Sequencial",
ylab="Valores",
col="red",
pch = 25,
cex=0.5)
# os limites dos eixos podem ser estabelecidos com "xlim" e "ylim"
plot(x, y,
xlab="Sequencial",
ylab="Valores",
col="red",
pch = 25,
cex=0.5,
ylim=c(-10,10),
xlim=c(0,400))
# O título tambem pode ser adicionado (subtitulo também!!)
plot(x, y,
xlab="Sequencial",
ylab="Valores",
col="red",
pch = 25,
cex=0.5,
ylim=c(-10,10),
xlim=c(0,400),
main = "O meu Titulo principal")
# vamos agora controlar um pouco os eixos...
par(pro) # voltamos aos valores padrão de cada parâmetro
# tamanho dos nomes nos eixos e das etiquetas
par(cex.lab=1.5,cex.axis=1.3)
plot(x, y,
xlab="Sequencial",
ylab="Valores")
# Uma funcao muito útil para ter mais controle dos eixos é "axis".
# Dentre outras coisas, com ela podemos mudamos os valores das marcas, tipo de linha,
# tamanho, espacamento das linhas, cor, etc.
#
# Vejamos:
#
# Uso da função axis
par(pro) # voltamos aos valores padrão..
plot(x,y,
xaxt="n") # o valor xaxt="n", impede de plotar o eixo x
axis(side= 1,
at = c(seq(0, 300, 25))) # plotar os valores no eixo x de x=0 a 300, cada 25
# Podemos determinar etiquetas texto em determinados valores
par(pro)
plot(x,y,
xaxt="n",
yaxt="n")
axis(side= 1,
at = c(seq(0, 300, 25))) # plotar os valores no eixo x de x=0 a 300, cada 25
axis(side = 2,
at = c(-4,0,4),
labels = c("Pequeno","Medio","Grande"))
# a função tambem controla a linha e espessura das marcas
par(pro)
plot(x,y,
xaxt = "n",
yaxt = "n")
axis(side = 1,
at = c(seq(0, 300, 25))) # plotar os valores no eixo x de x=0 a 300, cada 25
axis(side = 2,
at = c(-4,0,4),
labels = c("Pequeno","Medio","Grande"))
axis(side = 3,
at = c(5,25,75),
lwd = 4,
lwd.ticks = 2,
col.ticks = "red")
axis(side=3,
at = c(150,225,275),
lwd = 4,
lwd.ticks = 2,
col.ticks = "blue")
abline(v=150)
abline(v=275)
# o se desejar pode tirar toda a borda da plotagem e mostrar apenas os eixos
# note que o R numera os eixos de 1 a 4 em sentido destrogiro a partir da base
plot(x,y,
bty="n",
xaxt="n",
yaxt="n")
axis(side = 3,
at = seq(0,300,50),
lwd = 3)
axis(side = 4,
at = seq(-5,5,2.5),
lwd = 3)
#
#
# Marcas do eixos, ou etiquetas
#
# Se tem um controle mais fino através de par ou axis:
#
# - valores positivos fazem as marcas estar dentro do gráfico
#
# - valores negativos fazem as marcas estar fora do grafico
#
# mgp toma tres valores:
#
# - o primeiro controla o afastamento entre o gráfico e o eixo do título
#
# - o segundo entre o gráfico e o eixo de etiquetas e
#
# - o terceiro entre o gráfico e a linha do eixo,
#
# vejamos no seguinte exemplo com mgp=c(1.5,0,0):
par(pro)
par(tcl = 0.4, #tcl controla o tamanho das marcas e a direção
mgp = c(1.5,0.2,0)) #
plot(x,y) # experimente com 0.5 e 1
# segundo termo
par(tcl = -0.4,
mgp = c(0.5,2,0))
plot(x,y) # experimente com 0.5
# terceiro termo
par(tcl = 0.4,
mgp = c(0,0.5,2.7))
plot(x,y) # experimente com 0.5
# Um outro exemplo com axis
par(pro)
plot(x,y,
xaxt = "n",
yaxt = "n",
xlab = "",
ylab = "",
main = " Meu Titulo")
axis(side = 1,
at = seq(0,300,50),
tcl = 0.4,
lwd.ticks= 3,
mgp = c(0,0.5,0))
#usamos mtext porque o eixo foi desabilitado em plot..
mtext(side = 1,
text = "Eixo X: sequencial",
line = 1.5)
axis(side = 2,
at = seq(-5,5,2.5),
tcl = 0.3,
lwd.ticks= 3 ,
col.ticks= "orange",
mgp = c(0,1,1))
mtext(side=2,
text="Eixo Y: 300 numeros aleatorios",
line=2.2)
# Vejamos o caso quando temos uma area de plotagem com varios gráficos e eixos semelhantes...
par(pro)
par(oma = c(3,3,3,0), # parâmetro que estabelece a margem exterior da janela comúm
mar = c(3,3,2,2), # parâmetro que estabelece a margem exterior de cada gráfico
mfrow= c(2,2)) # parâmetro que determina a quantidade e posição dos gráficos
plot(1,1, ylab="", xlab="", type="n")
plot(2,1, ylab="", xlab="", type="n")
plot(1,2, ylab="", xlab="", type="n")
plot(2,2, ylab="", xlab="", type="n")
mtext(text = "Rótulo comum ao eixo x",
side = 1,
line = 0,
outer= TRUE)
mtext(text ="Rótulo comum ao eixo y",
side = 2,
line = 0,
outer= TRUE)
mtext(text ="Vários gráficos numa janela só",
side = 3,
line = 0,
outer= TRUE,
cex = 1.5)
# Um outro tipo de gráfico, o histograma
par(pro)
set.seed(123)
# geramos duas amostras normais, com parâmetros diferentes, veja ?rnorm
x <- rnorm(500, 8, 5)
y <- rnorm(500, 0, 2)
plot(x, xlab="Amostra X")
plot(y, xlab="Amostra Y")
hist(x, xlab="Amostra X")
hist(x,
breaks = 20,
ylim = c(0,100),
xlab = "Amostra X",
main = " Histograma com 20 intervalos")
hist(y,
breaks = 20,
ylim = c(0,100),
xlab = "Amostra Y",
main = " Histograma com 20 intervalos")
# Geramos uma distribuição normal assimétrica à direita
z <- cbind(x,y)
hist(z,
breaks = 20,
main = "Histograma de distribuição assimétrica à direita")
abline(v = mean(z), # graficamos a media da amostra
col = "blue",
lwd = 2)
abline(v = median(z), # graficamos a mediana da amostra
col = "red",
lwd = 2)
# Distribuição à esquerda
x2 <- rnorm(500,-8, 5)
y2 <- rnorm(500, 0, 2)
z2 <- cbind(x2,y2)
hist(z2,
breaks = 20,
main = "Histograma de distribuição assimétrica à esquerda")
abline(v = mean(z2),
col = "blue",
lwd = 2)
abline(v = median(z2),
col = "red",
lwd = 2)
### Boxplot
# O boxplot faz a plotagem dos 5 numeros, veja ?boxplot
boxplot(z2)
boxplot(z2,
horizontal = T,
names = c("Valor1","Valor2"),
range = 1.5,
notch = T)
# Vejamos agora como agrupar por fatores, usamos o arquivo selecao..
#
# boxplot(formula, data = ...), formula x ~ grupos_
selecao <- read.csv2("selecao.csv", header=FALSE)
boxplot(selecao$V2 ~ selecao$V1, data=selecao)
# Vamos arrumar os nomes...
boxplot(selecao$V2 ~ selecao$V1, data=selecao, las=2)
# E dar uma cor...automatica..
boxplot(selecao$V2 ~ selecao$V1, data=selecao, las=2, col=rainbow(27))
# Rampas de cores que podem ser usadas (dentre outras) : heat.colors(), terrain.colors(), topo.colors() e cm.colors().
#
# O pacote RColorBrewer cria cores ou niveis de cinza através de gray().
#
# Exemp: gray(0:10/15) produz 10 níveis de cinza.
#
#
# Finalmente alguns títulos nos eixos, convenientemente colocados para nao atrapalhar
# ver opcoes de plot e par
#
# e para salvar o gráfico, nada mais simples...
pdf("meu_boxplot.pdf")
par(pro)
par(tcl=0.4,
mgp=c(3.5,0.5,0),
cex.lab=1.5,
mai=c(1,1,0.5,0.5))# primeiro afasta titulo eixo e o segundo afasta campanhas
boxplot(selecao$V2 ~ selecao$V1,
data=selecao,
xlab ="Campanhas",
ylab ="Metros",
las=2,
col=rainbow(27))
title(main = "Meu Boxplot final",
cex.main = 2,
font.main = 3)
dev.off()
# Esse script executado não produz saída nenhuma, mas grava no diretório de trabalho o gráfico solicitado.
#
# Pode-se usar também: win.metafile(), png(), jpeg(), bmp(), tiff(), xfig() e postscript()
# FIM DE GRAFICOS 1
|
e8cf273737f4ca5c73dc812efb3e0c50d0a68c04
|
bfe4fa7d35d25dbd4749c7db18284630743f943b
|
/for channels/make_permutation_test_data.R
|
e1f05b4f7be24a3cf1401cdf43722d89142ce242
|
[] |
no_license
|
innertron/REU_Kam
|
0ccfe2d4e178b241cdf836d9c066188dbbd65e82
|
bf4028b193f13cc202f66cd28963290722b312ac
|
refs/heads/master
| 2021-01-17T20:16:07.187792
| 2016-08-02T21:29:38
| 2016-08-02T21:29:38
| 61,396,055
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,800
|
r
|
make_permutation_test_data.R
|
#This script plots the causality measure for each pair of regions and saves
#them to a folder
#Import the rEDM library
library(rEDM)
library(scales)
#you need to import the neural_data.txt file by hand.
#neural_data <- dget(...) or press import above
#select the data and the time span of the experiment
time_span <- 1:2000
nd <- neural_data[time_span,]
#select the splined data
# nd <- splined_data
lib <- c(1, length(nd))
pred <- c(1, length(nd))
# shuffle_method = "random_shuffle"
shuffle_method = "ebisuzaki"
permutation_test_data <- data.frame(to=integer(), from=integer(), rho=double(), random_shuffle=double())
for (i in 2:31)
{
Ch1 <- nd[,i]
#make surrogate data for signficance testing
surr_ch1 <- make_surrogate_data(Ch1, method=shuffle_method)
#run the simplex algorithm to get the best embedding dimension
simplex_output <- simplex(Ch1, lib, pred, E=1:6)
bestE_i <- which.max(simplex_output$rho)
i2 = i+1
for(j in i2:32)
{
#get the convergent cross map calculations
Ch1_xmap_Ch2 <- ccm(nd, E = bestE_i, lib_column = i, first_column_time = FALSE,
target_column = j, lib_sizes = 80, random_libs = TRUE, num_samples=20)
Ch2 <- nd[,j]
surr_ch2 <- make_surrogate_data(Ch2, method=shuffle_method)
#run and plot the simplex algorithm to get the best embedding dimension
simplex_output <- simplex(Ch2, lib, pred, E=1:6)
bestE_j <- which.max(simplex_output$rho)
#get the ccm models
Ch2_xmap_Ch1 <- ccm(nd, E = bestE_j, lib_column = j, first_column_time = FALSE,
target_column = i, lib_sizes = 80, random_libs=TRUE, num_samples=20)
#take the means of the ccm's and get the standard deviation
ch1_map_2_mean <- data.frame(ccm_means(Ch1_xmap_Ch2), sd.rho = with(Ch1_xmap_Ch2,
tapply(rho, lib_size, sd)))
ch2_map_1_mean <- data.frame(ccm_means(Ch2_xmap_Ch1), sd.rho = with(Ch2_xmap_Ch1,
tapply(rho, lib_size, sd)))
#record ch1 xmap ch2
permutation_test_data <- rbind(permutation_test_data, data.frame(to=i, from=j, libs=ch1_map_2_mean$lib_size, rho=ch1_map_2_mean$rho, random_shuffle=NA))
#record ch2 xmap ch1
permutation_test_data <- rbind(permutation_test_data, data.frame(to=j, from=i, libs=ch2_map_1_mean$lib_size, rho=ch2_map_1_mean$rho, random_shuffle=NA))
#do the same for the hundred surrogates
for (sur_ind in 1:length(surr_ch1[1,]))
{
sur_dat <- data.frame(Time=nd[,1], sur_1=surr_ch1[,sur_ind], sur_2=surr_ch2[,sur_ind])
#get the convergent cross map calculations
Ch1_xmap_Ch2 <- ccm(sur_dat, E = bestE_i, lib_column = 1, first_column_time = TRUE,
target_column = 2, lib_sizes = 80, random_libs=TRUE, num_samples=20)
#get the ccm models
Ch2_xmap_Ch1 <- ccm(sur_dat, E = bestE_j, lib_column = 2, first_column_time = TRUE,
target_column = 1, lib_sizes = 80, random_libs=TRUE, num_samples=20)
#take the means of the ccm's and get the standard deviation
ch1_map_2_mean <- data.frame(ccm_means(Ch1_xmap_Ch2), sd.rho = with(Ch1_xmap_Ch2,
tapply(rho, lib_size, sd)))
ch2_map_1_mean <- data.frame(ccm_means(Ch2_xmap_Ch1), sd.rho = with(Ch2_xmap_Ch1,
tapply(rho, lib_size, sd)))
#record ch1 xmap ch2
permutation_test_data <- rbind(permutation_test_data, data.frame(to=i, from=j,
libs=ch1_map_2_mean$lib_size, random_shuffle=ch1_map_2_mean$rho, rho=NA))
#record ch2 xmap ch1
permutation_test_data <- rbind(permutation_test_data, data.frame(to=j, from=i,
libs=ch2_map_1_mean$lib_size, random_shuffle=ch2_map_1_mean$rho, rho=NA))
}
print(paste("plotted ",i, "and",j))
}
}
# dput(permutation_test_data, "permutation_test_data_spline_first_second.RData")
|
17955ef8141b5b8ae55908a89be3e5b0a28840c5
|
0049e7ad328d4de263fcba5e6049b35d3a3c1452
|
/ICSAP 01.R
|
df0ef75e333f8fa4faa1557a6190b6eb51309627
|
[] |
no_license
|
azous-nomar85/ICSAP
|
3ddba6ef5681ae1603d717bc050d27cdd8f25091
|
bf81738170ef582c2dc4672493e4d73e1f8d864d
|
refs/heads/master
| 2023-05-02T01:00:18.417581
| 2021-05-26T12:26:58
| 2021-05-26T12:26:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,382
|
r
|
ICSAP 01.R
|
# download dataset
library("readxl")
AtlasBr <- read_excel("./Banco de dados/Atlas Desenvolvimento Humano/Registro Administrativo Total 2012 2017.xlsx",
sheet = "BRASIL", col_names = TRUE)
AtlasRegiao <- read_excel("./Banco de dados/Atlas Desenvolvimento Humano/Registro Administrativo Total 2012 2017.xlsx",
sheet = "REGIÃO", col_names = TRUE)
AtlasRM <- read_excel("./Banco de dados/Atlas Desenvolvimento Humano/Registro Administrativo Total 2012 2017.xlsx",
sheet = "REGIÃO_METROPOLITANA", col_names = TRUE)
AtlasUF <- read_excel("./Banco de dados/Atlas Desenvolvimento Humano/Registro Administrativo Total 2012 2017.xlsx",
sheet = "UNIDADE_DA_FEDERAÇÃO", col_names = TRUE)
AtlasMun <- read_excel("./Banco de dados/Atlas Desenvolvimento Humano/Registro Administrativo Total 2012 2017.xlsx",
sheet = "MUNICÍPIO", col_names = TRUE)
# graph Brasil - barplot
library(ggplot2)
windows()
g <- ggplot(data = AtlasBr, aes(x = as.factor(ANO), y = PINTERSAP))
g +
geom_bar(stat = "identity", width = .7, fill = "steelblue") +
geom_text(aes(label = round(PINTERSAP, 1)), vjust=-.3, size=4.75) +
labs(title = "Internações por Condições Sensíveis à Atenção Primária (%) - Brasil",
x = "Ano",
y = "% Internações por Condições Sensíveis à Atenção Primária") +
theme(plot.title = element_text(size = 14),
axis.title.y = element_text(size = 13.5),
axis.title.x = element_text(size = 13.5),
axis.text = element_text(size = 13.5, colour = "black"),
axis.text.y=element_blank())
# graph Brasil - line
g <- ggplot(data = AtlasBr, aes(x = as.numeric(ANO), y = PINTERSAP, group = 1))
g +
geom_line(color = "blue", size = 1, linetype = "dashed") +
geom_point(shape = 23, size = 3, fill = "blue", color = "black") +
ylim(0, 50) +
labs(title = "Internações por Condições Sensíveis à Atenção Primária (%) - Brasil",
x = "Ano",
y = "% Internações por Condições Sensíveis à Atenção Primária") +
theme(plot.title = element_text(size = 13.49),
axis.title.y = element_text(size = 13),
axis.title.x = element_text(size = 13),
axis.text = element_text(size = 11, colour = "black"))
# graph Regiao - line
g <- ggplot(data = AtlasRegiao, aes(x = as.numeric(ANO), y = PINTERSAP, group = NOME))
g +
geom_point(aes(color = NOME, fill = NOME), shape = 23, size = 3) +
geom_line(aes(color = NOME), linetype = "dashed", size = 1) +
ylim(17.5, 30) +
labs(title = "Internações por Condições Sensíveis à Atenção Primária (%) - Brasil",
x = "Ano",
y = "% Internações por Condições Sensíveis à Atenção Primária") +
theme(plot.title = element_text(size = 13.49),
axis.title.y = element_text(size = 13),
axis.title.x = element_text(size = 13),
axis.text = element_text(size = 13, colour = "black"))
# graph UF - barplot
library(dplyr)
g <- ggplot(data = filter(AtlasUF, ANO==2017), aes(x = reorder(NOME, PINTERSAP), y = PINTERSAP))
g +
geom_bar(stat = "identity", width = .7, fill = "steelblue") +
coord_flip() +
labs(title = "Internações por Condições Sensíveis à Atenção Primária (%) - 2017",
x = "Unidades da Federação (UF)",
y = "% Internações por Condições Sensíveis à Atenção Primária") +
theme(axis.text = element_text(colour = "black"),
axis.text.x = element_blank(),
plot.title = element_text(size = 12)) +
geom_text(aes(label = round(PINTERSAP, 1)), hjust=1.5, vjust = 0.4, colour = "white", size = 4)
# graph RM - barplot
library(dplyr)
g <- ggplot(data = filter(AtlasRM, ANO==2017), aes(x = reorder(NOME, PINTERSAP), y = PINTERSAP))
g +
geom_bar(stat = "identity", width = .7, fill = "steelblue") +
coord_flip() +
labs(title = "ICSAP (%) por Região Metropolitana - 2017",
x = "",
y = "% ICSAP") +
theme(axis.text = element_text(colour = "black", size = 10),
axis.text.x = element_blank(),
plot.title = element_text(size = 12)) +
geom_text(aes(label = round(PINTERSAP, 1)), hjust=1.5, vjust = 0.4, colour = "white", size = 4)
# summary ICSAP by city
tapply(AtlasMun$PINTERSAP, AtlasMun$ANO, summary)
# graph City - density
library(dplyr)
g <- ggplot(data = AtlasMun, aes(x = PINTERSAP, color=factor(ANO)))
g +
geom_density(size = 1) +
labs(title = "Distribuição de Frequência das ICSAP (%) nos municípios",
x = "ICSAP (%)",
y = "Densidade",
colour = "Ano")
# conclusion: right-skewed
# Shapiro-Wilk test
shapiro.test(sample(AtlasMun$PINTERSAP[AtlasMun$ANO==2017], size=5000,
replace=FALSE, prob=NULL))
# conclusion: we cannot assume normality
# Q-Q plot
library(car)
qqPlot(AtlasMun$PINTERSAP[AtlasMun$ANO==2017])
library(ggpubr)
ggqqplot(AtlasMun$PINTERSAP[AtlasMun$ANO==2017])
# conclusion: distribution is not normal
# graph City - density (with transformation)
library(dplyr) # carregar pacote
library(ggplot2) # carregar pacote
windows()
g <- ggplot(data = AtlasMun, aes(x = log(PINTERSAP+10), color=factor(ANO)))
g +
geom_density(size = 1) +
labs(title = "Distribuição de Frequência das ICSAP (%) nos municípios",
x = "ICSAP (%)",
y = "Densidade",
colour = "Ano")
|
2fed69c24b20c6b0c8d6b61d51ca78b792c7023a
|
c677dc67f698061557a7fa2622f72478fcd28cbc
|
/VIS1 (2).R
|
2b7e907f0a6a41639b45725ec3a26840b28ee2a5
|
[] |
no_license
|
pSwitakowski/RDataVisualization
|
4cae903d4335dcb00cc18e99f07e96356af2b68a
|
149d04b69a65c577d4641b4aa526eada89d90d8e
|
refs/heads/main
| 2023-06-07T09:35:36.226667
| 2021-07-08T21:33:48
| 2021-07-08T21:33:48
| 384,252,857
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,823
|
r
|
VIS1 (2).R
|
library(BCA)
library(car)
library(corrplot)
data(Eggs)
# 1. Próbka danych
head(Eggs)
# 2. Macierz korelacji
data_Eggs <- data.frame(as.numeric(Eggs$Month), as.numeric(Eggs$First.Week), as.numeric(Eggs$Easter), Eggs$Cases, Eggs$Egg.Pr, Eggs$Beef.Pr, Eggs$Pork.Pr, Eggs$Chicken.Pr, Eggs$Cereal.Pr)
colnames(data_Eggs) <- c("Month", "First Week", "Easter", "Cases", "Egg.Pr", "Beef.Pr", "Pork.Pr", "Chicken.Pr", "Cereal.Pr")
b<-cor(data_Eggs)
corrplot(b)
corrplot(b, method="number")
# 3. Wykres słupkowy ilości sprzedanych opakowań po jajkach, a numer tygodnia w roku
eggs_df<-data.frame(Eggs$Week,Eggs$Cases)
bar<-barplot(names.arg=eggs_df$Eggs.Week,
height=eggs_df$Eggs.Cases, ylim = c(0,200000),
cex.names=0.7,
col=ifelse(Eggs$Easter=="Pre Easter","lightblue",
ifelse(Eggs$Easter=="Easter","blue",
ifelse(Eggs$Easter=="Post Easter","red", "grey"))),
xlab = "Numer tygodnia",
ylab = "Ilość sprzedanych opakowań po jajkach",
main = "Ilość sprzedanych jajek, a numer tygodnia")
# 4. Wykres cen poszczególnych produktów w czasie
prices <- cbind(Eggs$Egg.Pr, Eggs$Beef.Pr, Eggs$Pork.Pr, Eggs$Chicken.Pr, Eggs$Cereal.Pr)
names(prices) <- c("Eggs", "Beef", "Pork", "Chicken", "Cereal")
matplot(Eggs$Week, prices, pch=23, bg=c("green", "yellow", "gray", "purple", "blue"),
type="o", col="black", ylim=c(40, 200), xlab='Numer tygodnia', ylab='Cena produktu')
legend('topleft', legend=c("Eggs", "Beef", "Pork", "Chicken", "Cereal"),
fill = c("green", "yellow", "gray", "purple", "blue"))
# 5. Wykres pudełkowy ilości sprzedanych jajek w zależności od miesiąca
boxplot(data_Eggs$Cases~data_Eggs$Month,
col="lightblue", xlab="Miesiąc",
ylab= "Sprzedaż",
main="Sprzedaż jajek w zależności od miesiąca.")
# 6. Ilość sprzedawanych jajek w poszczególnych okresach Wielkanocy (Przed, w trakcie i po Wielkanocy)
eggsPreEasterTime <- subset(Eggs, Easter=="Pre Easter")
numberOfCasesPreEaster <- sum(eggsPreEasterTime$Cases)
eggsEasterTime <- subset(Eggs, Easter=="Easter")
numberOfCasesEaster <- sum(eggsEasterTime$Cases)
eggsPostEasterTime <- subset(Eggs, Easter=="Post Easter")
numberOfCasesPostEaster <- sum(eggsPostEasterTime$Cases)
eggsCases = c(numberOfCasesPreEaster, numberOfCasesEaster, numberOfCasesPostEaster)
x <- barplot(eggsCases, main='Sprzedaż jajek w poszczególnych okresach Wielkanocy', col=c('lightblue', 'darkblue', 'lightgreen'),
names=c("Pre Easter", "Easter", "Post Easter"), ylim=c(0, 600000), ylab='Sprzedaż jajek', xlab='Okres Wielkanocy')
text(x,y=eggsCases+20000,labels=as.character(eggsCases))
# Boxplot
Boxplot(Cases~Easter, data=Eggs)
|
a2c2b5b4fb1d2312330e47c00be0127388ddd42a
|
a911cc443ac6df89fc920d1ad9d5b1248f3d327d
|
/man/BeersFlashProfile.Rd
|
896dc584e06fc387e54a8871bed7c844661067f9
|
[] |
no_license
|
cran/DistatisR
|
79190e558faed5e02ac7d65addae94c6c0e5d9f2
|
6ce918849afcc7ab960656ae4ed7d1fdac0d826c
|
refs/heads/master
| 2022-12-09T18:42:30.376327
| 2022-12-05T07:32:43
| 2022-12-05T07:32:43
| 17,678,844
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,180
|
rd
|
BeersFlashProfile.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BeersFlashProfile.R
\docType{data}
\name{BeersFlashProfile}
\alias{BeersFlashProfile}
\title{An example of an excel file
storing the Flash Profile of 6 (fictitious) assessors
evaluating 7 (imaginary) beers.
This excel file can be read by
\code{read.df.excel}.}
\source{
Abdi, H, & Valentin, D. (2007).
\url{https://personal.utdallas.edu/~herve/}
}
\description{
\code{BeersFlashProfile}:
An example of an excel file
storing the Flash Profile of 6 (fictitious) assessors
evaluating 7 (imaginary) beers.
This excel file can be read by
\code{read.df.excel}.
}
\details{
In this example of Flash Profiling
6 (fictitious) assessors evaluated 7 (imaginery) beers.
First, Each assessor chose a set of descriptors
suited to describe these beers and then ranked
(or rated in variations of the technique)
the beers for each dimension. Note that the descriptors
as well as the number of descriptors vary with the judges.
Note:
The names of the variables starts with the Judges ID (J1- to J6-).
Note: the data are stored in the
Excel Sheet called \code{Rankings} of the excel
file \code{BeersFlashProfile.xlsx}.
}
\section{FileName}{
BeersFlashProfile.xlsx
}
\examples{
# get the path and file name
path2file <- system.file("extdata",
"BeersFlashProfile.xlsx", package = 'DistatisR')
# read the data in excel file with read.df.excel
beerDataFlash <- read.df.excel(path = path2file,
sheet = 'Rankings')$df.data
# the Flash Profiling data are now in the data.frame beerDataFlash
}
\references{
Abdi, H., & Valentin, D. (2007).
Some new and easy ways to describe, compare,
and evaluate products and assessors.
In D., Valentin, D.Z. Nguyen, L. Pelletier (Eds)
\emph{New trends in sensory evaluation
of food and non-food products}.
Ho Chi Minh (Vietnam):
Vietnam National University & Ho Chi Minh City Publishing House.
pp. 5-18.
}
\seealso{
BeersProjectiveMapping BeersProjectiveMapping_xlsx
}
\author{
Hervé Abdi
}
\keyword{DistatisR}
\keyword{datasets}
|
ae8c886f3116d6c0af2b628e57baf6c2182bdbeb
|
f6f2017240539aba3698896ebaecec7782de28e7
|
/psm&iptw.R
|
44bc34f284d71ea80b49a5719a75f5bd94f3f43f
|
[] |
no_license
|
alaskaguo/R
|
48c43cef83c3b6ccbaeab66751f6572d3f6c1318
|
368825e0c7ee2f207e75a87323612aedb9b1ef11
|
refs/heads/master
| 2021-10-10T00:59:03.233836
| 2021-09-29T14:46:50
| 2021-09-29T14:46:50
| 213,532,773
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,724
|
r
|
psm&iptw.R
|
rm(list=ls())
library(readxl)
library(readr)
library(stringr)
library(ggplot2)
library(survival)
library(survminer)
library(dplyr)
library(Hmisc)
library(tableone)
library(Matching)
library(survey)
library(MatchIt)
path = 'f:\\myresearch\\房颤数据挖掘'
setwd(path)
data = read.csv('data_filled.csv',row.names = 1)
## 制作单一值计数统计表
unique_count = as.list(apply(data,2,function(x){length(unique(x))}))
unique_status <- data.frame()
for (i in 1:length(unique_count)){
unique_status <- rbind(unique_status,data.frame(names(unique_count[i]),unique_count[i][[1]] ))
}
colnames(unique_status) <- c('items','unique_status')
## 删除只有一个值的列
data = data[,!colnames(data)%in%unique_status[unique_status$unique_status ==1,1]]
## 数据种类小于5的变量转为字符串
to_char = unique_status[unique_status$unique_status<5,][,'items']
data[,colnames(data)%in%to_char] <- apply(data[,colnames(data)%in%to_char],2,function(x)as.character(x))
data[,!colnames(data)%in%to_char] <- apply(data[,!colnames(data)%in%to_char],2,function(x)as.numeric(x))
str(data)
data$Procedure_1 <- ifelse(data$Procedure_1 == '1',T,F)
table(data$Procedure_1)
## 对连续变量进行正太分布检验
contiouns_vars = select_if(data,is.numeric)
plist = as.data.frame(apply(contiouns_vars, 2, function(x)shapiro.test(x)$p.value))
## 更改P值表列名
colnames(plist) = "pvalue"
## 提取非正太分布数据行名
nonnormal_vars = rownames(subset(plist,pvalue < 0.05))
strata = 'Procedure_1'
## 生成自变量名向量
vars = colnames(data)[!colnames(data)%in%c(strata,'Surgery_2')]
## 生成表1对像,按训练集分许
tab_Unmatched <- CreateTableOne(vars = vars, strata = strata, data = data,
addOverall = TRUE)
## 打印表1
tabUnmatched = as.data.frame(print(tab_Unmatched,
explain = F,
showAllLevels = F,
nonnormal = nonnormal_vars,
printToggle = F,
dropEqual = T,
varLabels = T,
smd = T))
write.csv(tabUnmatched,"匹配前基线表.csv")
## Fit model
var_psm = colnames(data)[str_detect(colnames(data),'_psm')]
match_formula = as.formula(str_c(c('as.numeric(',strata,')~',str_c(var_psm,collapse = "+")))) ## 生成公式
match_formula
psModel <- glm(match_formula,
family = binomial(link = "logit"),
data = data)
#计算倾向评分ps
data$psvalue <- predict(psModel,type="response")
data$Procedure_1
#计算逆概率权重IPTW
data$iptw <- ifelse(data$Procedure_1 == T,1/data$psvalue,1/(1-data$psvalue))
#1-提取IPTW后的数据
dataIPTW = svydesign(ids=~1,data=data,weights= ~iptw)
#2-再次构建Table-1
tab_IPTW = svyCreateTableOne(vars=vars, strata=strata,data=dataIPTW,test=T)
#标准化差结果
print(tab_IPTW,showAllLevels=TRUE,smd=TRUE)
tableIPTW = as.data.frame(print(tab_IPTW,
explain = F,
showAllLevels = F,
nonnormal = nonnormal_vars,
printToggle = F,
dropEqual = T,
varLabels = T,
smd = T))
write.csv(tableIPTW,"IPTW基线表.csv")
## 进行PSM
match.it = matchit(match_formula, data = data, method="nearest" ,ratio=1) ## 生成匹配对象
df_match <- match.data(match.it)
## 提取匹配后结局数据
table_mathced <- CreateTableOne(vars = vars, strata = "Procedure_1", data = df_match, test = TRUE,
addOverall = TRUE)
tablemathced = as.data.frame(print(table_mathced,
explain = F,
showAllLevels = F,
nonnormal = nonnormal_vars,
printToggle = F,
dropEqual = T,
varLabels = T,
smd = T))
write.csv(tablemathced,"匹配后基线表.csv")
table_1 = cbind(tabUnmatched,tablemathced,tableIPTW)
write.csv(table_1,'table_1.csv')
#查看是否有SMD>10%的混杂因素 ## 对象是creatableone对象
addmargins(table(ExtractSmd(tab_IPTW) > 0.1))
library(ggplot2)
#提取作图数据
dataPlot <- data.frame(variable=rownames(ExtractSmd(tab_Unmatched)),
Unmatched=as.numeric(ExtractSmd(tab_Unmatched)),
IPTW=as.numeric(ExtractSmd(tab_IPTW)),
matched = as.numeric(ExtractSmd(table_mathced))
)
#指定将要出现在图中的变量
dataPlotMelt<-melt(data= dataPlot,
id.vars=c("variable"),
variable.name= "Method",
value.name= "SMD")
#
varNames <-as.character(dataPlot$variable)[order(dataPlot$Unmatched)]
#
dataPlotMelt$variable<- factor(dataPlotMelt$variable,
levels = varNames)
#画图
ggplot(data = dataPlotMelt,
mapping = aes(x = variable, y = SMD,
group = Method,
color = Method,
shape = Method )) +
#geom_line() +
geom_point(size=4) +
geom_hline(yintercept = 0.1,
color = "red",
lty=2,
size = 0.1) +
coord_flip() +
theme_bw(base_size = 18)
## 绘制生存曲线
time = 'Time_to_relapse'
status = 'as.numeric(recurrence)'
strata = 'Procedure_1'
## 以下为固定代码,数据集需要改变
km_formula = as.formula(str_c(c('Surv(',time,',',status,') ~', strata),collapse = ''))
km_formula
#km_formula
fit_1 <- surv_fit(km_formula, data = data) ### 注意,用surv_fit,不用surfit
ggsurvplot(fit_1,
pval = TRUE, conf.int = F,
risk.table = TRUE, # Add risk table
risk.table.col = "strata", # Change risk table color by groups
linetype = "strata", # Change line type by groups
surv.median.line = "hv", # Specify median survival
ggtheme = theme_bw(), # Change ggplot2 theme
palette = c("#E7B800", "#2E9FDF"),
data=data,
fun = "event"
)
## 固定代码到此结束
#km_formula
fit_2 <- surv_fit(km_formula, data = df_match) ### 注意,用surv_fit,不用surfit
ggsurvplot(fit_2,
pval = TRUE, conf.int = F,
risk.table = TRUE, # Add risk table
risk.table.col = "strata", # Change risk table color by groups
linetype = "strata", # Change line type by groups
surv.median.line = "hv", # Specify median survival
ggtheme = theme_bw(), # Change ggplot2 theme
palette = c("#E7B800", "#2E9FDF"),
data=df_match,
fun = "event"
)
fit_3<- surv_fit(km_formula,
weights=data$iptw,# 创建生存对象
data = data) # 数据集来源
ggsurvplot(fit_3,
pval = TRUE, conf.int = F,
risk.table = TRUE, # Add risk table
risk.table.col = "strata", # Change risk table color by groups
linetype = "strata", # Change line type by groups
surv.median.line = "hv", # Specify median survival
ggtheme = theme_bw(), # Change ggplot2 theme
palette = c("#E7B800", "#2E9FDF"),
data=df_match,
fun = "event"
)
|
65b787b171d027e634a1bc13712fd0f190022036
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/9126_0/rinput.R
|
14ee056d37c0ca1a81343316f3af0d2f0d17326b
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("9126_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="9126_0_unrooted.txt")
|
ee8cc966b35fd41a1ff8954f16d80e2234a32933
|
0d7702f9e21eace56dc305889f81871a5434c3e9
|
/hdc/aggrPlots_nucl.R
|
5b9fbccb086b51976d7c13d5b10f9ba16a57730f
|
[] |
no_license
|
sowmyaiyer/new_repo
|
71f37b48fd502960488462a94b789ec21fd9482e
|
6fe37a6abbb5e339793710eb04c5f9e017ceb072
|
refs/heads/master
| 2021-01-10T03:35:13.789393
| 2016-03-10T18:40:40
| 2016-03-10T18:40:40
| 53,608,144
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,504
|
r
|
aggrPlots_nucl.R
|
f_0h <- commandArgs(TRUE)[1]
f_30min <- commandArgs(TRUE)[2]
f_2h <- commandArgs(TRUE)[3]
f_4h <- commandArgs(TRUE)[4]
f_24h <- commandArgs(TRUE)[5]
outfile <- commandArgs(TRUE)[6]
title <- commandArgs(TRUE)[7]
motif_length <- as.numeric(commandArgs(TRUE)[7])
df_0h <- as.data.frame(read.table(f_0h, row.names=1, sep="\t"))
cat("read\n")
df_30min <- as.data.frame(read.table(f_30min, row.names=1, sep="\t"))
cat("read\n")
df_2h <- as.data.frame(read.table(f_2h, row.names=1, sep="\t"))
cat("read\n")
df_4h <- as.data.frame(read.table(f_4h, row.names=1, sep="\t"))
cat("read\n")
cmeans_0h <- colMeans(df_0h)
cmeans_30min <- colMeans(df_30min)
cmeans_2h <- colMeans(df_2h)
cmeans_4h <- colMeans(df_4h)
pdf(outfile)
left <- -(200+motif_length-1)/2
right <- (200+motif_length-1)/2
cat(length(seq(left, right,1)),"\n")
plot(seq(left, right,1), cmeans_0h, type="l", col="magenta", ylim=c(0,max(c(cmeans_0h,cmeans_30min,cmeans_2h,cmeans_4h))), xlab="distance from motif", ylab="mean signal", bty="n", main=title, cex.main=0.65)
lines(seq(left, right,1),cmeans_30min, col="red")
lines(seq(left, right,1),cmeans_2h, col="orange")
lines(seq(left, right,1),cmeans_4h, col="yellow")
lines(seq(left, right,1),cmeans_24h, col="brown")
legend("topright",c(paste("0h(n =",nrow(df_0h),")"), paste("30min(n =",nrow(df_30min),")"), paste("2h(n =",nrow(df_2h),")"),paste("4h(n =",nrow(df_4h),")"), paste("24h(n =",nrow(df_24h),")")), col=c("magenta","red","orange","yellow","brown"), lty="solid", bty="n",cex=0.6)
dev.off()
|
5b1b552cc1dfafed7cfcc08ee9035de3d5dcda00
|
90930d9c512ffc52217fc75cc9c8b9443dc20665
|
/LogReg_hw3_stocks.R
|
d9fa81f87977f7266a35ce95abe448e0f36a09bc
|
[] |
no_license
|
awasthi-swapnil/RcodeProjects
|
ad15a998274686ea6c06022737bf4cad3339ab34
|
afc98a6ebbb8c6f273c3655b93b793e0662de8b1
|
refs/heads/master
| 2020-04-26T19:30:33.570311
| 2017-06-01T09:06:29
| 2017-06-01T09:06:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,165
|
r
|
LogReg_hw3_stocks.R
|
#read source file
data <- read.csv(file='SP500.csv')
head(data, 10)
colnames(data)
data <- data[7:nrow(data),7:length(data)]
stocksdb1 <- stocksdb
stocksdb <- data[,2:length(data)]
#creating a new column
for (i in 1:nrow(stocksdb))
{
stocksdb$PriceDirection[i] <- ifelse(data$Adj.Close[i+1] > data$Adj.Close[i],1,0)
}
#creating test and training set as per instructions #sampling data
rnum <- (runif(1, .60, .70))
rnum
#rnum <- as.integer(format(num, digits = 2))
part <-sample(1:nrow(stocksdb), rnum * nrow(stocksdb))
trng.d <- stocksdb[part,]
test.d <- stocksdb[-part,]
#Logression regression model
lr.stocks <- glm(PriceDirection ~ ., family=binomial(link="logit"), data=trng.d)
summary(lr.stocks)
#lets predict
pred <- predict(lr.stocks,newdata=test.d, type="response")
head(pred)
#converting predictions > 50% to 1 and remaining to 0
pred_10 <- ifelse(pred > 0.5,1,0)
head(pred_10)
#table - confusion matrix
table(test.d$PriceDirection ,pred_10)
misClasificError <- mean(test.d$PriceDirection != t1[,])
t1<-as.data.frame(test.d$PriceDirection != pred_10)
misClasificError1 <- ifelse(t1[,] == 'TRUE',1,0)
print(paste('Accuracy', 1-misClasificError))
t1
|
9208241db534d0665394766395c12803bcf6f095
|
4b4fd76faaa7235684bb724848078dd91c7716cf
|
/Code/BodyCountPlots.R
|
27fb6a4476913cc0aab29716e741164f0c8e96c7
|
[] |
no_license
|
AchalSuresh/R-coding
|
3d190ad99516d34c82e63171785fbf4ac30a2b81
|
50a6793f81c94dfc6b2363e75c91f6a354522c4d
|
refs/heads/master
| 2021-03-23T23:15:59.586706
| 2020-04-01T21:44:29
| 2020-04-01T21:44:29
| 247,491,309
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,878
|
r
|
BodyCountPlots.R
|
# BU510.650 Data Analytics Week 1
# Plotting data using R
# Example: Film death counts
# Set your working directory. In my case, it is "C:/R-Work".
setwd("./R-coding/Data")
# Read the data from the file "filmdeathcounts.csv" into a data frame called "BodyCountData".
BodyCountData = read.csv("filmdeathcounts.csv")
# Let's change the names of the columns in the data frame to shorter and more descriptive names. We use "colnames" function and assign a vector of new names. The function c() indicates the list of names is a vector.
colnames(BodyCountData) <-c("Film","Year","Bodies","MPAA","Genre","Director","Minutes","IMDB")
# Let's add another column, titled "FilmCount", to our data frame and fill it with "1" for each film.
BodyCountData["FilmCount"] <- 1
# Let's add one more column, "BodyPerMin", which will show the number of bodies per minute for each film.
BodyCountData["BodyPerMin"] <- BodyCountData[,3] / BodyCountData[,7]
# Next, we create a table that will show the total number of bodies for each year. We use the "tapply" function. In the following line, tapply will check what values appear in the "Year" column, and for each year, it will "sum" together the numbers in the "Bodies" column of the films made in that year.
t1=tapply(BodyCountData$Bodies,BodyCountData$Year,FUN="sum")
# We can now create a barplot that shows the total number of bodies in each year.
barplot(t1,xlab="year",ylab="Total # Bodies")
# Load the library "lattice", which gives us further capabilities for plotting.
library(lattice)
# "barchart" comes with the library "lattice", for example.
barchart(t1)
# Create a new table t2, which will have our data in decreasing order of the third column, which is the number of bodies in each movie
t2 <- BodyCountData[order(BodyCountData[,3],decreasing=TRUE),]
# Pick only the first 10 rows of t2 - these are the top-10 movies in terms of body count
t2 <- t2[1:10,]
# Pick only the first columns of t2
t2<-t2[c(1,3)]
# Create a barchart that shows the number of bodies for each movie in t2
barchart(Film ~ Bodies, data=t2)
# Use the "table" function to find out how many films fell into each MPAA category
t3 <- table(BodyCountData$MPAA)
t3
# Create an xy-plot of all movies, plotting MPAA rating on the x-axis and the body count on the y-axis
xyplot(Bodies~MPAA,data=BodyCountData,col="black")
# We could also create a boxplot to capture the same information
boxplot(Bodies~MPAA,data=BodyCountData)
# "bwplot" is the same as boxplot - it comes with the "lattice" library, which we loaded earlier
bwplot(Bodies~MPAA,data=BodyCountData)
# Create an xy-plot of all movies, plotting IMDB ratings on the x-axis and the body count on the y-axis
xyplot(Bodies~IMDB,data=BodyCountData)
# Create an xy-plot of all movies, plotting IMDB ratings on the x-axis and the "bodies per minute" on the y-axis
xyplot(BodyPerMin~IMDB,data=BodyCountData)
|
7c8e4d0850bc5b0efe2a36bb528bdb34b99b00a2
|
07424c50c8d808296c11c8d01bf9bffc1f1d63a1
|
/src/old_code/organism_modeling.R
|
6afb4517e9fa55afef5a3fd00add84f9ccad8f76
|
[] |
no_license
|
colebrookson/microplastics-daphnia-dynamics
|
13519c29181fce0f2f1285e0cf9f547a65d146f7
|
a2c93ca1f3397cc39de4bf255cd42b24d22e575b
|
refs/heads/master
| 2023-08-21T12:36:15.861808
| 2021-09-15T22:02:52
| 2021-09-15T22:02:52
| 265,033,816
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 549
|
r
|
organism_modeling.R
|
##########
##########
# This code contains the analysis for the fitting of the the individual component as a
# part of the larger analysis of the effect of microplastics on daphnia
##########
##########
# AUTHOR: Cole B. Brookson
# DATE OF CREATION: 2020-08-25
##########
##########
# set-up =======================================================================
library(here)
library(tidyverse)
library(rstan)
library(gdata)
library(bayesplot)
library(brms)
library(beepr)
library(parallel)
detectCores()
source(here('./code/data_cleaning.R'))
|
bf26703aa44cc0ea027f05d02f4b297a6de343cb
|
7b73406c16269f514e7690aaf73e9bb35dbaf125
|
/R/sharpen.R
|
a0bb73f71a423728b5d15b1aca9a4188a009e5f0
|
[] |
no_license
|
macressler/dull
|
0c1dcab9f3f9a1651a0d17c46722daff1b7f13fd
|
b7b074ed59ae900861ef01bc73153a0fe328ee50
|
refs/heads/master
| 2021-01-17T21:57:09.619694
| 2015-09-10T03:57:08
| 2015-09-10T03:57:08
| 42,360,637
| 1
| 0
| null | 2015-09-12T14:52:40
| 2015-09-12T14:52:40
| null |
UTF-8
|
R
| false
| false
| 1,774
|
r
|
sharpen.R
|
#' Test a dull application
#'
#' A set of functions to expand upon the \code{expect_*} functions of the
#' \code{testthat} package.
#'
#' @param .app A dull application
#' @param method A character vector of the HTTP method with which to query
#' \code{url}
#' @param uri A character vector of the resource path to query
#' @param status An integer, the expected status
#' @param \dots Additional parameters to pass to \code{\link[httr]{VERB}}
#'
#' @details
#' These functions are currently very limited and would be considered
#' experimental if not for the \code{testthat}. There is plenty more to come in the
#' future.
#'
#' \code{uri} must begin with a \dQuote{/} and is used to construct the complete
#' URL queried. The host name and port used are extracted from the \code{.app}
#' object.
#'
#' @name sharpen
NULL
#' @importFrom httpuv startDaemonizedServer stopDaemonizedServer
#' @importFrom httr VERB
#' @importFrom testthat expect_equal
#' @export
#' @rdname sharpen
expect_response <- function(.app, method, uri, status, ...) {
app_handle <- httpuv::startDaemonizedServer(host = .app$host, port = .app$port, app = .app)
on.exit(httpuv::stopDaemonizedServer(app_handle))
url <- paste0(.app$host, ':', .app$port, uri)
res <- httr::VERB(method, url = url, ...)
eval(bquote(testthat::expect_equal(.(httr::status_code(res)), status)))
}
#' @export
#' @rdname sharpen
expect_get <- function(.app, uri, status, ...) {
expect_response(.app, 'GET', uri, status, ...)
}
#' @export
#' @rdname sharpen
expect_post <- function(.app, uri, status, ...) {
expect_response(.app, 'POST', uri, status, ...)
}
#' @export
#' @rdname sharpen
expect_put <- function(.app, uri, status, ...) {
expect_response(.app, 'PUT', uri, status, ...)
}
|
32fd0c27414707ab9884dfa08ebd8e6f0601986b
|
aec457c82864482b8a9b849520855d499ed923ee
|
/man/LAR.property.Rd
|
2600c69ac2f4952289fe61f5dc0aec280347d9cb
|
[] |
no_license
|
smisaleh/STROMA4
|
2a9d6921b197be69425802a01f02bd0f0970ad1c
|
9a07eab67b4d7ca9cea70f58d77205854d2c0a00
|
refs/heads/master
| 2020-12-24T11:52:24.849482
| 2017-03-24T03:29:53
| 2017-03-24T03:29:53
| 73,111,711
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 698
|
rd
|
LAR.property.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LAR.property.R
\docType{data}
\name{LAR.property}
\alias{LAR.property}
\title{Genelist for LAR.property}
\format{An object of class \code{data.frame} with 1205 rows and 2 columns.
\itemize{
\item{Gene.Symbol: }{ID of transcript (HGNC where available)}
\item{Direction: }{Direction of expression (up/down)}
}}
\source{
Identified in related publication
}
\usage{
data('LAR.property')
}
\value{
An object of class \code{data.frame} with 1205 rows and 2 columns.
}
\description{
This genelist is used to assign the related property using the assign.property function
}
\examples{
data('LAR.property')
}
\keyword{List}
|
36f14cf92ec9884e3c3a18f39fb5a16ac4bd7dc0
|
29de97c348ea6ca6b0e3ad69fba1cb7319802c8f
|
/week_05/weekend_homework/shiny_weekend_homework/app.R
|
a369f7cdc8318bee0b3e5f54eb7c0b9d90953050
|
[] |
no_license
|
Sid-Rodrigues/codeclan_homework_sid
|
ddaa76ecbbde0fcbfa697983f4c56b556faaf2ad
|
fa63de8743667ba59f3003ae8ec096e602537592
|
refs/heads/master
| 2022-12-25T21:24:55.705175
| 2020-10-10T12:03:22
| 2020-10-10T12:03:22
| 275,930,815
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,187
|
r
|
app.R
|
library(shiny)
library(dplyr)
library(ggplot2)
library(shinythemes)
library(CodeClanData)
library(datasets)
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("Game Sales Info"),
tabsetPanel(
tabPanel(
"Search",
fluidRow(
column(3,
radioButtons('rating',
'Select Rating',
choices = c("E", "E10+", "T", "M"),
inline = TRUE)
),
column(3,
selectInput("genre",
"Select Genre",
choices = NULL
)
),
column(3,
selectInput('platform',
'Select Platform',
choices = NULL
)
),
column(3,
selectInput('year',
'Select Year',
choices = NULL)
),
),
fluidRow(
column(6,
checkboxGroupInput("checked",
"Select Fields",
choices = c("Publisher", "Developer", "Sales", "Critic Score", "User Score"),
inline = TRUE),
)
),
fluidRow(
column(12,
tableOutput("game_table"))
)
),
tabPanel(
"Sales",
fluidRow(
column(width = 6, offset = 7,
selectInput('year_sales',
'Select Year',
choices = sort(unique(game_sales$year_of_release)))
)
),
fluidRow(
column(6,
plotOutput("total_sales")),
column(6,
plotOutput("year_sales"))
)
),
tabPanel(
"Popularity",
fluidRow(
column(6,
radioButtons('rating_popularity',
'Select Rating',
choices = c("E", "E10+", "T", "M"),
inline = TRUE)
),
column(6,
selectInput('year_popularity',
'Select Year',
choices = NULL)
)
),
fluidRow(
column(12,
plotOutput("top_five_games")
)
),
fluidRow(
column(12,
plotOutput("top_five_publishers")
)
)
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output, session) {
#updating genre selectInput based on rating
observe({
filtered_genre <- game_sales %>%
filter(rating == input$rating) %>%
select(genre)
updateSelectInput(session, "genre", "Select Genre", choices = unique(filtered_genre))
})
#updating platform selectInput based on rating and genre
observe({
filtered_platform <- game_sales %>%
filter(rating == input$rating) %>%
filter(genre == input$genre) %>%
select(platform)
updateSelectInput(session, "platform", "Select Platform", choices = unique(filtered_platform))
})
#updating year selectInput based on rating, genre and platform
observe({
filtered_year <- game_sales %>%
filter(rating == input$rating) %>%
filter(genre == input$genre) %>%
filter(platform == input$platform) %>%
select(year_of_release) %>%
arrange(year_of_release)
updateSelectInput(session, "year", "Select Year", choices = unique(filtered_year))
})
#reactive function for popularity tab
top_five_filtered <- reactive({
game_sales %>%
select(name, platform, year_of_release, sales, rating, publisher) %>%
filter(rating == input$rating_popularity) %>%
filter(year_of_release == input$year_popularity) %>%
group_by(rating) %>%
arrange(desc(sales)) %>%
slice(seq_len(5))
})
output$game_table <- renderTable({
game_sales %>%
filter(rating == input$rating) %>%
filter(genre == input$genre) %>%
filter(platform == input$platform) %>%
filter(year_of_release == input$year) %>%
slice(1:10)
})
output$total_sales <- renderPlot({
game_sales %>%
select(year_of_release, sales, rating) %>%
group_by(rating) %>%
arrange(desc(sales)) %>%
ggplot() +
aes(x = year_of_release, y = sales, fill = rating) +
geom_bar(width = 0.5, stat = "identity") +
#theme_minimal() +
theme(panel.grid.minor = element_blank()) +
scale_x_continuous(breaks = c(1988 , 1996, 2000, 2005, 2010, 2015)) +
scale_y_continuous(breaks = c(1,5, 10, 20, 30, 40, 50, 100, 125, 150, 175, 200, 225)) +
labs(
x = "\nYear of release",
y = "Sales in millions",
title = "Total Global Sales by Year"
)
})
output$year_sales <- renderPlot({
game_sales %>%
filter(year_of_release == input$year_sales) %>%
group_by(rating) %>%
summarise(total_sales = sum(sales)) %>%
mutate(percent = total_sales/sum(total_sales)*100) %>%
ggplot() +
aes(x = "", y = total_sales, fill = rating) +
geom_bar(stat = "identity") +
coord_polar("y") +
theme_void() +
labs(
x = "\nYear of release",
y = "Sales in millions",
title = "Yearly Global Sales by Rating"
) +
geom_text(aes(x = 1.6, label = paste0(round(percent, 1), "%")),
size = 4, position = position_stack(vjust = 0.5))
})
#Popularity Tab
#updating genre selectInput based on rating
observe({
filtered_year_popularity <- game_sales %>%
filter(rating == input$rating_popularity) %>%
select(year_of_release) %>%
arrange(year_of_release)
updateSelectInput(session, "year_popularity", "Select Year", choices = unique(filtered_year_popularity))
})
output$top_five_games <- renderPlot({
ggplot(top_five_filtered()) +
aes(x = name, y = sales, fill = platform) +
geom_bar(width = 0.5, stat = "identity", position = "dodge") +
#theme_minimal() +
theme(panel.grid.minor = element_blank()) +
labs(
x = "\nGames",
y = "Sales in millions",
title = "Top 5 Games by Year "
)
})
output$top_five_publishers <- renderPlot({
ggplot(top_five_filtered()) +
aes(x = publisher, y = sales, fill = platform) +
geom_bar(width = 0.5, stat = "identity", position = "dodge") +
#theme_minimal() +
theme(panel.grid.minor = element_blank()) +
labs(
x = "\nPublishers",
y = "Sales in millions",
title = "Top 5 Publishers by Year"
)
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
39da7a6dc6958870319dda7b28be5eb91f0b7bff
|
6872e8b5d8ff34d9c4447feddbe45f0306a8b1b4
|
/Caso1/mediacontaminante.R
|
89887b8775328ddd5615d9d300b69fd011ed17cb
|
[] |
no_license
|
SebDelEs/Programacion_Actuarial_III_OT16
|
4197403ced669b00f8141e6b492a52da9a6d6330
|
b9f553d455c7bb8717ff767350d6a207cb2fc018
|
refs/heads/master
| 2020-12-06T04:59:37.774455
| 2016-12-14T14:08:17
| 2016-12-14T14:08:17
| 65,914,613
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,732
|
r
|
mediacontaminante.R
|
mediacontaminante <- function(directorio="~/Actuaria/Tercer Semestre/Programacion III/specdata",contaminante="sulfate",id=1:332){
sumaProm <- 0
restar <- 0
totaldatos <-0
if (contaminante == "sulfate"){
for (i in id) {
if (i < 10){
j <- paste("00",i,".csv",sep = "")
}else{
if ((i>=10)&(i < 100)) {
j <- paste("0",i,".csv",sep="")
}else{
j <- paste(i,".csv",sep = "")
}
}
getwd()
setwd(directorio)
data <- read.csv(j)
prom <- mean(data[,2],na.rm = TRUE)
contar <- complete.cases(data[,2])
contar1 <- contar[contar==TRUE]
numdatos <- length(contar1)
suma <-prom*numdatos
totaldatos <-totaldatos+numdatos
sumaProm <- sumaProm + suma
}
} else {
if (contaminante == "nitrate"){
for (i in id) {
if (i < 10){
j <- paste("00",i,".csv",sep = "")
}else{
if ((i>=10)&(i < 100)) {
j <- paste("0",i,".csv",sep="")
}else{
j <- paste(i,".csv",sep = "")
}
}
getwd()
setwd(directorio)
data <- read.csv(j)
prom <- mean(data[,3],na.rm = TRUE)
if (is.nan(prom)){prom <- 0
}
contar <- complete.cases(data[,3])
contar1 <- contar[contar==TRUE]
if (is.nan(prom)){
numdatos <-0
suma <- 0
}else{
numdatos <- length(contar1)
suma <- prom*numdatos
}
totaldatos <-totaldatos+numdatos
sumaProm <- sumaProm + suma
}
}else {
TotalProm <- c("NA")
}
}
TotalProm<- sumaProm/totaldatos
TotalProm
}
|
8338d3a61cad90e0de5b6b0bb1e94f24d419c7ab
|
2d9fd1240c58b315583d7b43797b6f81061b35f6
|
/Gasolina/main.R
|
a3f5e982b17c95abd1ae3d0cbe876f4c83dcb782
|
[] |
no_license
|
Gabovillayzan/R_workspace_01
|
f6200bcd1bcf5307b213bee700a5b7a0d4b60b9e
|
fb28f2ebfc3df6c432d847c910aabf101e9cec57
|
refs/heads/master
| 2020-07-31T20:57:25.634141
| 2019-09-25T04:46:00
| 2019-09-25T04:46:00
| 210,751,396
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,563
|
r
|
main.R
|
# URL para hacer ranking de gadolinas
# http://m.facilito.gob.pe/facilito-mobile/pages/public/buscarCombustible?idsCombustible=74&idsCombustible=78&idsCombustible=73&idsCombustible=03&masCercano=0&usarMiUbicacion=0&area=5&miLatitud=-12.089684&miLongitud=-77.033528&distrito=150136&provincia=150100&departamento=150000&distritoText=SAN%20MIGUEL&provinciaText=LIMA&departamentoText=LIMA&codigoUnidad=&startIndex=0&recordsPerPage=50&back=0
#Loading the rvest package
library('rvest')
#Specifying the url for desired website to be scraped
url = 'http://m.facilito.gob.pe/facilito-mobile/pages/public/buscarCombustible?idsCombustible=74&idsCombustible=78&idsCombustible=73&idsCombustible=03&idsCombustible=72&idsCombustible=04&masCercano=0&usarMiUbicacion=0&area=10&miLatitud=-12.089684&miLongitud=-77.033528&distrito=150136&provincia=150100&departamento=150000&distritoText=SAN%20MIGUEL&provinciaText=LIMA&departamentoText=LIMA&codigoUnidad=&startIndex=0&recordsPerPage=200&back=0'
download.file(url, destfile = "gasolina.html", quiet=TRUE)
#Reading the HTML code from the website
webpage <- read_html("gasolina.html")
####################################################################
prices_data_html <- html_nodes(webpage,'.texto_3')
#Converting the prices data to text
prices_data <- html_text(prices_data_html)
#Let's have a look at the rankings
head(prices_data)
#Data-Preprocessing: Converting rankings to numerical
prices_data<- gsub("S/.","",prices_data)
prices_data<-as.numeric(prices_data)
#Let's have another look at the rankings
head(prices_data)
####################################################################
company_data_html <- html_nodes(webpage,'.texto_1')
#Converting the companies data to text
company_data <- html_text(company_data_html)
#Let's have a look at the companies
head(company_data)
#Data-Preprocessing: Converting companies
company_data<- sub("\r\n","",company_data)
company_data<- substr(company_data,1,regexpr('\r', company_data))
company_data<- gsub(" ","",company_data)
company_data<- gsub("\r","",company_data)
#Let's have another look at the companies
head(company_data)
####################################################################
tipo_data_html <- html_nodes(webpage,'.texto_2')
#Converting the tipo gas data to text
tipo_data <- html_text(tipo_data_html)
#Let's have a look at the tipo
head(tipo_data)
#Data-Preprocessing: Converting tipo
tipo_data<- gsub("\r\n","",tipo_data)
tipo_data<- gsub(" ","",tipo_data)
tipo_data<- as.factor(tipo_data)
#Let's have another look at the tipo
show(tipo_data)
length(tipo_data)
####################################################################
direccion_data_html <- html_nodes(webpage,'.texto_1')
#Converting the direcciones data to text
direccion_data <- html_text(direccion_data_html)
#Let's have a look at the direcciones
head(direccion_data)
#Data-Preprocessing: Converting direcciones
direccion_data<- sub("\r\n","",direccion_data)
direccion_data<- substr(direccion_data,regexpr('\r', direccion_data),regexpr(')', direccion_data))
direccion_data<- sub("\r\n","",direccion_data)
direccion_data<- gsub(" ","",direccion_data)
#Let's have another look at the direcciones
head(direccion_data)
#let's have the distance in KM also:
posicion1<- regexpr("\\(", direccion_data)
posicion2<- regexpr("\\)", direccion_data)
distancia_data<- substr(direccion_data, posicion1, posicion2)
distancia_data<- gsub("\\(Distancia: ","",distancia_data)
distancia_data<- gsub(" Km\\)","",distancia_data)
distancia_data<- as.numeric(distancia_data)
show(distancia_data)
#############juntamos la informacion
gasolina_df<-data.frame(Precio = prices_data,
Tipo = tipo_data,
Empresa = company_data,
Direccion = direccion_data,
Distancia = distancia_data)
str(gasolina_df)
############## Analisis ##################
#Aplicamos filtros
gasolina_df2 <- as.data.frame(gasolina_df)
transform.data.frame(gasolina_df2, ) ############## falta aplicar descuento de repsol
library('ggplot2')
qplot(data = gasolina_df,Precio,fill = Tipo,bins = 25)
ggplot(gasolina_df,aes(x=Precio,y=Tipo))
ggplot(gasolina_df,aes(x=Precio,y=Tipo))+
geom_point(aes(Empresa))
ggplot(gasolina_df, aes(x=Empresa, y=Precio,color=Tipo)) +
geom_point()
gasolina_df <- filter(gasolina_df,Empresa=="REPSOL COMERCIAL S.A.C.")
library(gapminder)
library(plotly)
p <- gapminder %>%
ggplot(gasolina_df,aes(x=Empresa,y=Tipo,size=Precio)) +
geom_point() +
scale_x_log10() +
theme_bw()
ggplotly(p)
|
cc5cb799ea421141b8c3d190b7253ed7a7165d47
|
f044402735a52fa040c5cbc76737c7950406f8b2
|
/BrCa_Age_Associated_TMA/Packages/biostatUtil/R/ms_plot.R
|
03b4984a84c1a5410f05cf8a662192bde052ac9e
|
[] |
no_license
|
BCCRCMO/BrCa_AgeAssociations
|
5cf34f3b2370c0d5381c34f8e0d2463354c4af5d
|
48a11c828a38a871f751c996b76b77bc33d5a3c3
|
refs/heads/master
| 2023-03-17T14:49:56.817589
| 2020-03-19T02:18:21
| 2020-03-19T02:18:21
| 247,175,174
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,638
|
r
|
ms_plot.R
|
#' Plots for MS analyses
#'
#' `ms_boxplot` shows boxplots of different MS expression data values.
#' `ms_mean_var` shows mean-sd plots for the VSN data values to analyze the
#' mean-variance relationship.
#'
#' @param x data object returned by `ms_process`
#' @param width width of plot
#' @param height height of plot
#' @param path file path to save figure. Device is pdf.
#' @return Both functions return a pdf saved to the file location specified by
#' `path`. `ms_boxplot` shows three boxplots of expression values:
#' raw data values, log2 and vsn transformed values. `ms_mean_var` shows
#' the vsn transformed values and mean-sd plots for each treatment group.
#' @name ms_plot
#' @family Mass Spectrometry functions
#' @author Derek Chiu
#' @export
ms_boxplot <- function(x, width = 8, height = 10, path = NULL) {
dat.plot <- lapply(x[c("raw", "l2", "vsn")], function(y)
tidyr::gather(as.data.frame(y), key = "Sample", value = "Expression"))
all.plots <- Map(ms_gg_boxplot, dat.plot,
c("Raw data values",
"log2(Raw data values)",
"vsn(Raw data values)"))
plot <- gridExtra::marrangeGrob(all.plots, nrow = 1, ncol = 1, top = NULL)
if (!is.null(path))
ggsave(filename = path, plot = plot, width = width, height = height)
return(all.plots)
}
#' ggplot boxplot applied to each data source
#' @noRd
ms_gg_boxplot <- function(x, title) {
p <- ggplot(x, aes_(x = quote(Sample), y = quote(Expression))) +
stat_boxplot(geom = "errorbar", width = 0.4) +
geom_boxplot() +
theme_linedraw() +
theme(plot.title = element_text(face = "bold"),
panel.grid = element_blank(),
axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
ggtitle(title)
return(p)
}
#' @inheritParams ms_plot
#' @param g vector of treatment groups
#' @param title vector of titles for each `g`
#' @name ms_plot
#' @export
ms_mean_var <- function(x, g, title = NULL, width = 8, height = 10,
path = NULL) {
if (is.null(title)) title <- g
dat.plot <- tidyr::gather(as.data.frame(x[["vsn"]]),
key = "Sample", value = "Expression")
bp <- ms_gg_boxplot(dat.plot, "vsn(Raw data values)")
msdp <- Map(function(g, t)
vsn::meanSdPlot(x$vsn[, grep(g, colnames(x$vsn))], plot = FALSE)$gg +
ggtitle(paste("vsn", t)), g = g, t = title)
all.plots <- append(list(bp), unname(msdp))
plot <- gridExtra::marrangeGrob(all.plots, nrow = 1, ncol = 1, top = NULL)
if (!is.null(path))
ggsave(filename = path, plot = plot, width = width, height = height)
return(all.plots)
}
|
f4982ea066618c95359ec59ec48af8bac95ffec2
|
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
|
/interp/man/voronoi.Rd
|
0f78da3f5135174eed3a832f26688d061d8a88d5
|
[] |
no_license
|
akhikolla/InformationHouse
|
4e45b11df18dee47519e917fcf0a869a77661fce
|
c0daab1e3f2827fd08aa5c31127fadae3f001948
|
refs/heads/master
| 2023-02-12T19:00:20.752555
| 2020-12-31T20:59:23
| 2020-12-31T20:59:23
| 325,589,503
| 9
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,474
|
rd
|
voronoi.Rd
|
\name{voronoi}
\title{Voronoi object}
\alias{voronoi}
\arguments{
\item{x,y}{x and y coordinates of nodes of the voronoi mosaic. Each node is a
circumcircle center of some triangle from the Delaunay triangulation.}
\item{node}{logical vector, indicating real nodes of the voronoi
mosaic. These nodes are the centers of circumcircles of triangles with
positive area of the delaunay triangulation.
If \code{node[i]}=\code{FALSE}, (\code{c[i]},\code{x[i]}) belongs to a
triangle with area 0.}
\item{n1,n2,n3}{indices of neighbour nodes. Negative indices indicate
dummy points as neighbours.}
\item{tri}{triangulation object, see \code{\link{triSht}}.}
\item{area}{area of triangle \eqn{i}.
}
\item{ratio}{aspect ratio (inscribed radius/circumradius) of triangle
\eqn{i}.}
\item{radius}{circumradius of triangle i.}
\item{dummy.x,dummy.y}{x and y coordinates of dummy points. They are
used for plotting of unbounded tiles.}
}
\description{
A \code{voronoi} object is created with \code{\link{voronoi.mosaic}}
}
\note{
This version of \code{voronoi} object is generated from the
\code{\link{tri.mesh}} function from package \code{interp}. That's the only
difference to \code{voronoi} objects generated with package
\code{tripack}.
}
\author{
Albrecht Gebhardt <albrecht.gebhardt@aau.at>,
Roger Bivand <roger.bivand@nhh.no>
}
\seealso{
\code{\link{voronoi.mosaic}},\code{\link{plot.voronoi}}
}
\keyword{spatial}
|
868693b21f093f36af9c98fc688d06fa09a79b9d
|
539e3b8d04ba576c89432acc1f9fbec59d83007a
|
/man/get.predpubTable.Rd
|
cce8c9cffac6eb0234a16b40dfe2e132c1141efc
|
[] |
no_license
|
rossmounce/predatory
|
a57c72d439d6f72a8e5301f426ed171199aa8a51
|
afe6071de371cfbeae7e581b1f5c7f1290e9c7d4
|
refs/heads/master
| 2020-03-28T03:44:28.878387
| 2018-09-06T12:16:12
| 2018-09-06T12:16:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,125
|
rd
|
get.predpubTable.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get_predpubTable.R
\name{get.predpubTable}
\alias{get.predpubTable}
\title{Returns a dataframe with information about predatory publishers and journals from Bealls webpage}
\usage{
get.predpubTable()
}
\value{
A dataframe with publisher, name of journal and issn of predatory publications
}
\description{
This function will return a dataframe with information regarding predatory journals from Beall's site (https://scholarlyoa.com/).
The information of predatory publishers has been collected manually, while information on stand-alone journals is catpured by a web scrapping algorithm.
BE AWARE that the dataset for publishers is being built over time and it is not yet complete. Last update: 2016-12-09.
}
\section{Warning}{
While the database for standalone journals is up to date, the information about predatory publishers is not yet complete since it requires
an extensive manual work. The missing data will be regularly included in the upcoming versions of the package.
}
\examples{
my.predpub <- get.predpubTable()
head(my.predpub)
}
|
9396251e18db222fdf406b2d95f6390e9ddb80bd
|
192968e131d29780397e4abab7943bdd6260beee
|
/R/filter_on_min_peptides.R
|
90fec1f0ea5eb199e9aaa40e078731c7ce95632c
|
[] |
no_license
|
abelew/SWATH2stats
|
78b837e6fc7ca1477abe495316a774448b10bdf5
|
21b1928b5f8ff9906776c126a3a24399d5dcb153
|
refs/heads/master
| 2020-05-17T02:07:10.389699
| 2019-04-25T15:56:07
| 2019-04-25T15:56:07
| 128,787,209
| 0
| 0
| null | 2018-04-09T14:45:29
| 2018-04-09T14:45:28
| null |
UTF-8
|
R
| false
| false
| 2,387
|
r
|
filter_on_min_peptides.R
|
#' Filter openSWATH output for proteins that are identified by a minimum of n independent peptides.
#'
#' This function removes entries mapping to proteins that are identified by less
#' than n_peptides.
#' Removing single-hit proteins from an analysis can significantly increase the
#' sensitivity under strict protein fdr criteria, as evaluated by
#' e.g. assess_fdr_overall.
#'
#' @param data Data table that is produced by the openSWATH/iPortal workflow.
#' @param n_peptides Number of minimal number of peptide IDs associated with a
#' protein ID in order to be kept in the dataset.
#' @param rm.decoy Option to remove the decoys during filtering.
#' @param column which column to use for filtering?
#' @return Returns the filtered data frame with only peptides that map to
#' proteins with >= n_peptides peptides.
#' @author Moritz Heusel
#' @examples
#' data("OpenSWATH_data", package="SWATH2stats")
#' data("Study_design", package="SWATH2stats")
#' data <- sample_annotation(OpenSWATH_data, Study_design)
#' data.filtered <- filter_mscore_freqobs(data, 0.01,0.8)
#' data.max <- filter_on_max_peptides(data.filtered, 5)
#' data.min.max <- filter_on_min_peptides(data.max, 3)
#' @export
filter_on_min_peptides <- function(data, n_peptides, rm.decoy = TRUE) {
data <- unifyProteinGroupLabels(data)
if (isTRUE(rm.decoy)) {
data <- removeDecoyProteins(data)
}
data.prot.pep <- unique(data[, c("ProteinName", "FullPeptideName")])
data.prot.pep.n <- tapply(data.prot.pep$FullPeptideName, data.prot.pep$ProteinName,
length)
prot.pep.names <- names(data.prot.pep.n[data.prot.pep.n >= n_peptides & !is.na(data.prot.pep.n)])
data.filtered <- data[data$ProteinName %in% prot.pep.names, ]
message("Before filtering: ", "\n", " Number of proteins: ", length(unique(data$ProteinName)),
"\n", " Number of peptides: ", length(unique(data$FullPeptideName)), "\n\n",
"Percentage of peptides removed: ", round((length(unique(data$FullPeptideName)) -
length(unique(data.filtered$FullPeptideName)))/length(unique(data$FullPeptideName)) *
100, digits = 2), "%", "\n\n", "After filtering: ", "\n", " Number of proteins: ",
length(unique(data.filtered$ProteinName)), "\n", " Number of peptides: ",
length(unique(data.filtered$FullPeptideName)), "\n")
return(data.filtered)
}
|
be6b19cd436cd634fd8c1bd80c00eb167f5a92eb
|
202d8a19f446ec51ba0c467f5ebd8c649a685f8f
|
/run_tests.R
|
e01202769485e8a188bb2f5997372de178a2d81c
|
[] |
no_license
|
jfozard/ProgrammingAssignment2
|
27d6ffe4059131a1c41699d6c1f140b97400c353
|
ebce131a8a4245a2c633366ae80ea38bab89a6ae
|
refs/heads/master
| 2021-01-18T10:44:48.292617
| 2015-02-22T20:39:17
| 2015-02-22T20:39:17
| 31,176,812
| 0
| 0
| null | 2015-02-22T19:41:08
| 2015-02-22T19:41:08
| null |
UTF-8
|
R
| false
| false
| 84
|
r
|
run_tests.R
|
library("testthat")
source("cachematrix.R")
test_dir("tests", reporter="summary")
|
00045eecbb2c756eaa408811edbd6f246424db18
|
cb968fbbb9521395d9d869a82908ca90a532df6a
|
/scripts/07_evaluate.R
|
e31c767774a4c1d5f9cd970a34f7dc50c02c4cb1
|
[] |
no_license
|
michellelawing/massasaugaSDM
|
1d60760623c5cf78eb302dcc9484d1f0aed153a1
|
339e34708d668b837573382543ee9807205d8a09
|
refs/heads/main
| 2023-02-03T16:23:04.603622
| 2020-12-18T04:08:47
| 2020-12-18T04:08:47
| 322,429,328
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,960
|
r
|
07_evaluate.R
|
############################
###--- Model Evaluation ----
############################
# Runs the model evaluation and extracts multiple evaluation statistics
#evaluate models with testing presence and background points
colnames(model_tracker) <- c("Climate", "Filters", "Folds", "Background")
model_tracker <- data.frame(model_tracker)
levels(model_tracker$Background) <- c("BE", "BE10k", "CB", "CB10k", "CL", "CL10k", "OE", "OE10k")
model_tracker$Background <- factor(model_tracker$Background, levels = c("CL", "CL10k", "CB", "CB10k", "OE", "OE10k","BE", "BE10k"))
model_eval <- list()
counter <- 1
for (climate in 1:3){
for (filters in 1:4){
for (folds in 1:5) {
for (backs in 1:8){
input_temp <- input_data[input_data$Filter == pick_filter[[filters]] & input_data$Fold == pick_fold[[folds]] & input_data$Training == "No",1:2]
back_temp <- background[background$Set == pick_back[[backs]] & background$Training == "No", 1:2]
model_eval[[counter]] <- evaluate(input_temp, back_temp, model_list[[counter]], climate_list[[climate]])
counter <- counter + 1
}
}
}
}
#save out the model evaluation
save(model_eval, file = "results/spring2020/modelruns/model_eval.RData")
save(model_tracker, file = "results/spring2020/modelruns/model_tracker.RData")
#load model list and model tracker output from a previous run
# load("results/spring2020/modelruns_20200212_largecircles/model_eval.RData")
# load("results/spring2020/modelruns_20200212_largecircles/model_tracker.RData")
#extract multiple evaluation statistics for use
auc <- unlist(lapply(model_eval, function(x) x@auc))
#max_aucdiff training - testing
aucdiff <- array(NA, dim = c(length(model_tracker[,1])))
for(i in 1:length(model_tracker[,1])){
aucdiff[i] <- model_list[[i]]@results[5] - model_eval[[i]]@auc
}
#get thresholds
ses <- which.min(abs(model_eval[[1]]@TPR - model_eval[[1]]@TNR))
maxsss <- which.max(model_eval[[1]]@TPR + model_eval[[1]]@TNR)
#minimum presence threshold and 10% presence threshold
#kappa at thresholds
kappa_ses <- unlist(lapply(model_eval, function(x) x@kappa[ses]))
kappa_maxsss <- unlist(lapply(model_eval, function(x) x@kappa[maxsss]))
#minimum presence threshold and 10% presence threshold
#True Skill Statistic TSS at thresholds, TPR is sensititivy and TNR is specificity
tss_ses <- unlist(lapply(model_eval, function(x) x@TPR[ses] + x@TNR[ses] - 1))
tss_maxsss <- unlist(lapply(model_eval, function(x) x@TPR[maxsss] + x@TNR[maxsss] - 1))
#minimum presence threshold and 10% presence threshold
#Omission Rate, OR
or_ses <- unlist(lapply(model_eval, function(x) x@OR[ses]))
or_maxsss <- unlist(lapply(model_eval, function(x) x@OR[maxsss]))
#minimum presence threshold and 10% presence threshold
model_tracker <- cbind(model_tracker, "AUC" = auc, "AUCdiff" = aucdiff,
"Kappa_SeS" = kappa_ses, "Kappa_maxSSS" = kappa_maxsss,
"TSS_SeS" = tss_ses, "TSS_maxSSS" = tss_maxsss,
"OR_SeS" = or_ses, "OR_maxSSS" = or_maxsss)
save(model_tracker, file = "results/spring2020/modelruns/model_tracker_eval.RData" )
#Plot evaluation statistics
# load("results/spring2020/modelruns_20200212_largecircles/model_tracker_eval.RData")
#plot AUC by filter, fold, background and climate
ggplot(model_tracker, aes(x = Background, y = AUC)) +
geom_boxplot(aes(x = Background, y = AUC)) +
geom_jitter(aes(color = Folds, shape = Climate)) +
facet_wrap(~Filters, nrow = 2) +
theme(panel.spacing=unit(0, "lines"),
panel.border = element_rect(color = "black", fill = NA, size = 1),
strip.background = element_rect(color = "black", size = 1),
axis.text = element_text(color = "black"))
#ggsave("figures/evaluation metrics/largecircles/AUC_filter_fold_20200211.tiff", width = 7, height = 5, dpi = 600)
#plot OR_SeS by filter, fold, background and climate
ggplot(model_tracker, aes(x = Background, y = OR_SeS)) +
geom_boxplot(aes(x = Background, y = OR_SeS)) +
geom_jitter(aes(color = Folds, shape = Climate)) +
facet_wrap(~Filters, nrow = 2) +
theme(panel.spacing=unit(0, "lines"),
panel.border = element_rect(color = "black", fill = NA, size = 1),
strip.background = element_rect(color = "black", size = 1),
axis.text = element_text(color = "black"))
#ggsave("figures/evaluation metrics/largecircles/ORses_filter_fold_20200211.tiff", width = 7, height = 5, dpi = 600)
#plot OR_maxSSS by filter, fold, background and climate
ggplot(model_tracker, aes(x = Background, y = OR_maxSSS)) +
geom_boxplot(aes(x = Background, y = OR_maxSSS)) +
geom_jitter(aes(color = Folds, shape = Climate)) +
facet_wrap(~Filters, nrow = 2) +
theme(panel.spacing=unit(0, "lines"),
panel.border = element_rect(color = "black", fill = NA, size = 1),
strip.background = element_rect(color = "black", size = 1),
axis.text = element_text(color = "black"))
#ggsave("figures/evaluation metrics/largecircles/ORmax_filter_fold_20200211.tiff", width = 7, height = 5, dpi = 600)
#plot TSS SeS by filter, fold, background and climate
# Proportion of data that fall out of the area predicted as preferential
ggplot(model_tracker, aes(x = Background, y = TSS_SeS)) +
geom_boxplot(aes(x = Background, y = TSS_SeS)) +
geom_jitter(aes(color = Folds, shape = Climate)) +
facet_wrap(~Filters, nrow = 2) +
theme(panel.spacing=unit(0, "lines"),
panel.border = element_rect(color = "black", fill = NA, size = 1),
strip.background = element_rect(color = "black", size = 1),
axis.text = element_text(color = "black"))
#ggsave("figures/evaluation metrics/largecircles/TSSses_filter_fold_20200211.tiff", width = 7, height = 5, dpi = 600)
#plot TSS_maxSSS by filter, fold, background and climate
# Proportion of data that fall out of the area predicted as preferential
ggplot(model_tracker, aes(x = Background, y = TSS_maxSSS)) +
geom_boxplot(aes(x = Background, y = TSS_maxSSS)) +
geom_jitter(aes(color = Folds, shape = Climate)) +
facet_wrap(~Filters, nrow = 2) +
theme(panel.spacing=unit(0, "lines"),
panel.border = element_rect(color = "black", fill = NA, size = 1),
strip.background = element_rect(color = "black", size = 1),
axis.text = element_text(color = "black"))
#ggsave("figures/evaluation metrics/largecircles/TSSmax_filter_fold_20200211.tiff", width = 7, height = 5, dpi = 600)
#plot Kappa_SeS by filter, fold, background and climate
# Proportion of data that fall out of the area predicted as preferential
ggplot(model_tracker, aes(x = Background, y = Kappa_SeS)) +
geom_boxplot(aes(x = Background, y = Kappa_SeS)) +
geom_jitter(aes(color = Folds, shape = Climate)) +
facet_wrap(~Filters, nrow = 2) +
theme(panel.spacing=unit(0, "lines"),
panel.border = element_rect(color = "black", fill = NA, size = 1),
strip.background = element_rect(color = "black", size = 1),
axis.text = element_text(color = "black"))
#ggsave("figures/evaluation metrics/largecircles/KAPPAses_filter_fold_20200211.tiff", width = 7, height = 5, dpi = 600)
#plot Kappa_maxSSS by filter, fold, background and climate
# Proportion of data that fall out of the area predicted as preferential
ggplot(model_tracker, aes(x = Background, y = Kappa_maxSSS)) +
geom_boxplot(aes(x = Background, y = Kappa_maxSSS)) +
geom_jitter(aes(color = Folds, shape = Climate)) +
facet_wrap(~Filters, nrow = 2) +
theme(panel.spacing=unit(0, "lines"),
panel.border = element_rect(color = "black", fill = NA, size = 1),
strip.background = element_rect(color = "black", size = 1),
axis.text = element_text(color = "black"))
#ggsave("figures/evaluation metrics/largecircles/KAPPAmax_filter_fold_20200211.tiff", width = 7, height = 5, dpi = 600)
|
4c3c26e641291ba8a1ec490ba4b86301278a9cb5
|
60884ab1db4935c61b405bffc7524d6d47ba8cc1
|
/runner.R
|
3f916b36caff8e2e8dbcaefe4166cbbb67af0fed
|
[
"MIT"
] |
permissive
|
chintanp/wsdot_evse_update_states
|
82ac74bbe24226487ff3cdac6908537d72b98d5e
|
e959ace59d69225b8235799a4979c11b9627365a
|
refs/heads/master
| 2021-08-31T08:33:08.616016
| 2021-04-27T19:52:25
| 2021-04-27T19:52:25
| 211,201,432
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,045
|
r
|
runner.R
|
update_states_and_gen_trips <- function(a_id = 1) {
source("./R/setup_logging.R")
## Setup the logging destination
lg <-
lgr::get_logger("test")$set_propagate(FALSE)$set_appenders(lgr::AppenderJson$new(layout = LayoutLogstash$new(), file = here::here(
paste0("logs/runner_", as.character(a_id), ".log")
)))
config <- config::get()
# Get parameters from the DB
# Database settings -------------------------------------------------------
if (!DBI::dbCanConnect(
RPostgres::Postgres(),
host = Sys.getenv("MAIN_HOST"),
dbname = Sys.getenv("MAIN_DB"),
user = Sys.getenv("MAIN_USER"),
password = Sys.getenv("MAIN_PWD"),
port = Sys.getenv("MAIN_PORT")
)) {
lg$log(level = "fatal",
msg = "Cannot connect to database",
"ip" = ipify::get_ip())
# Exit if DB cannot connect
stop("Cannot connect to database")
}
main_con <- DBI::dbConnect(
RPostgres::Postgres(),
host = Sys.getenv("MAIN_HOST"),
dbname = Sys.getenv("MAIN_DB"),
user = Sys.getenv("MAIN_USER"),
password = Sys.getenv("MAIN_PWD"),
port = Sys.getenv("MAIN_PORT")
)
# Get params for the analysis id
analysis_params <-
DBI::dbGetQuery(main_con, glue::glue("select sp.param_name, ap.param_value from analysis_params ap
JOIN sim_params sp on ap.param_id = sp.param_id
where ap.analysis_id = {a_id};"))
config[['GLOBAL_SEED']] <- as.numeric(analysis_params$param_value[analysis_params$param_name == 'global_seed'])
config[['CRITICAL_DISTANCE']] <- as.numeric(analysis_params$param_value[analysis_params$param_name == 'critical_distance_miles'])
config[['SOC_LOWER_LIMIT']] <- as.numeric(analysis_params$param_value[analysis_params$param_name == 'soc_lower_limit_pc'])
config[['SOC_UPPER_LIMIT']] <- as.numeric(analysis_params$param_value[analysis_params$param_name == 'soc_upper_limit_pc'])
config[['AVG_TRIP_SPEED']] <- as.numeric(analysis_params$param_value[analysis_params$param_name == 'avg_trip_speed_mph'])
config[['AVG_RENTAL_CAR_COST']] <- as.numeric(analysis_params$param_value[analysis_params$param_name == 'avg_rental_car_cost_usd'])
config[['AVG_FUEL_ECONOMY_RENTAL']] <- as.numeric(analysis_params$param_value[analysis_params$param_name == 'avg_fuel_economy_rental_mpg'])
config[['AVG_FUEL_ECONOMY_OWN']] <- as.numeric(analysis_params$param_value[analysis_params$param_name == 'avg_fuel_economy_own_mpg'])
config[['AVG_RESTROOM_SPACING']] <- as.numeric(analysis_params$param_value[analysis_params$param_name == 'avg_restroom_spacing_miles'])
config[['LOOKUP_DISTANCE']] <- as.numeric(analysis_params$param_value[analysis_params$param_name == 'lookup_distance_miles'])
# browser()
set.seed(config[['GLOBAL_SEED']])
tripgen::update_dc(a_id)
print("destination charger updated")
tripgen::trip_gen(num_days = 1,
config = config,
a_id = a_id)
}
a_id <- read.table("analysis_id", header = F)[1, 1]
update_states_and_gen_trips(a_id)
|
4c2ed5b0d9be0c1a82de17d4d2754aa39f5dbdc7
|
59549521738b64f99664901f5e57b4f3cd1e973d
|
/man/fidlr.Rd
|
c47e91c84ed28dfaafc4feec02adc13edaa19f38
|
[] |
no_license
|
wx2000/fidlr
|
360fbb93dde6f97f97a8686fe9bd8a00c7b3df41
|
eb5e265f37edcf4d6e2f6a91e99625bdb66020ed
|
refs/heads/master
| 2020-03-23T09:22:01.692988
| 2016-04-21T15:42:28
| 2016-04-21T15:42:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 702
|
rd
|
fidlr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fidlr.R
\name{fidlr}
\alias{fidlr}
\title{fidlr: FInancial Data LoadeR}
\usage{
fidlr()
}
\value{
Return either a csv file per instrument or a data frame. The csv file is stored in the working directory and the data frame in the globalenv.
}
\description{
fidlr is an RStudio addin designed to simplify the financial data downloading process.
This initial version is a wrapper around the getSymbols function in the quantmod package and only Yahoo, Google, FRED and Oanda are supported.
More data providers and functionalities might be added over time.
}
\author{
Arnaud Amsellem
}
\seealso{
\code{quantmod}
\code{shiny}
}
|
a4eaa4b9c02d489ac29daf107fccebad30f54d78
|
7e10acd6c868fc92dc6fcee074a8b00c8016162e
|
/tests/testthat/test_fmmm.R
|
99add5a52ff3e7fe54f1b467ad43a240fd2e7f5a
|
[] |
no_license
|
kenmansfield/foofactors
|
abd1ad4ebb4d6d04598e93ea7130ccde0e3f7d42
|
a64ab10cb3082403494faebcf037eec5c04e21f7
|
refs/heads/master
| 2021-01-10T08:18:21.784817
| 2015-11-21T08:14:47
| 2015-11-21T08:14:47
| 46,602,902
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 220
|
r
|
test_fmmm.R
|
context("arithmetic")
test_that("fmmm finds the max minus min", {
a <- c('a','b','c')
b <- c(1,2,3,4,5)
d <- c(1000,5,6,7)
expect_error( fmmm(a) )
expect_identical(fmmm(b), 4)
expect_identical(fmmm(d), 995)
})
|
652ef9769021de86d9e0f7f0173fbf548d98aa80
|
f1c0fb20fcc3482ee1498a4b5c128e33424f2b7d
|
/main.R
|
17ccbb3fffe13d96b668c8a93f3b08b2eb791272
|
[] |
no_license
|
mvaniterson/KaggleDsb2018
|
6ea77b529d3bfc8645b4887677513e2f475d7cf8
|
c18e760781c5ccaf80e9187d5cf5d900288fde18
|
refs/heads/master
| 2021-04-06T05:58:32.443221
| 2018-03-13T15:52:41
| 2018-03-13T15:52:41
| 124,549,499
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,562
|
r
|
main.R
|
## ----requirements--------------------------------------------------------
library(keras)
library(tidyverse)
library(EBImage)
src_files <- list.files("R", pattern="*.R", full.names = TRUE)
tmp <- sapply(src_files, source, .GlobalEnv)
options(EBImage.display = "raster")
TRAIN_PATH = '../input/stage1_train/'
TEST_PATH = '../input/stage1_test/'
HEIGHT = 256
WIDTH = 256
CHANNELS = 3
SHAPE = c(WIDTH, HEIGHT, CHANNELS)
BATCH_SIZE = 16
EPOCHS = 50
## ----prepare data------------------------------------------------------------
train_data <- read_csv("../input/stage1_train_labels.csv") %>%
group_by(ImageId) %>%
summarize(EncodedPixels = list(EncodedPixels)) %>%
mutate(ImageFile = file.path(TRAIN_PATH, ImageId, "images", paste0(ImageId, ".png")),
MaskPath = file.path(TRAIN_PATH, ImageId, "masks"),
MaskFiles = map(MaskPath, list.files, pattern="*.png", full.names = TRUE),
ImageShape = map(ImageFile, .f = function(file) dim(readImage(file))[1:2]))
train_data %>%
glimpse()
## ----Display some images----------------------------------------------------------
input_batch <- sample_n(train_data, 3) %>%
mutate(Y = map2(EncodedPixels, ImageShape, preprocess_masks, new_shape = SHAPE),
X = map(ImageFile, preprocess_image, shape = SHAPE)) %>%
select(X,Y)
input_batch
display(combine(input_batch$Y[[1]], input_batch$X[[1]]), all = TRUE)
display(combine(input_batch$Y[[2]], input_batch$X[[2]]), all = TRUE)
display(combine(input_batch$Y[[3]], input_batch$X[[3]]), all = TRUE)
## ----Define model----------------------------------------------------------
##model <- unet(shape = SHAPE, nlevels = 2, nfilters = 16, dropouts = c(0.1, 0.1, 0.2))
##model <- unet(shape = SHAPE, nlevels = 3, nfilters = 16, dropouts = c(0.1, 0.1, 0.2, 0.3))
model <- unet(shape = SHAPE, nlevels = 4, nfilters = 16, dropouts = c(0.1, 0.1, 0.2, 0.2, 0.3))
model <- model %>%
compile(
optimizer = 'adam',
loss = jaccard_coef_loss,
metrics = c(jaccard_coef)
)
model <- model %>%
compile(
optimizer = 'adam',
loss = dice_coef_loss,
metrics = c(dice_coef)
)
summary(model)
## ----fit-----------------------------------------------------------------
input <- sample_n(train_data, nrow(train_data)) %>%
mutate(Y = map2(EncodedPixels, ImageShape, preprocess_masks, new_shape = SHAPE),
X = map(ImageFile, preprocess_image, shape = SHAPE)) %>%
select(X,Y)
X <- list2tensor(input$X, 4)
Y <- list2tensor(input$Y, 4)
dim(X)
checkpoint <- callback_model_checkpoint(
filepath = "model.hdf5",
save_best_only = TRUE,
period = 1,
verbose = 1
)
early_stopping <- callback_early_stopping(patience = 5)
history <- model %>%
fit(X, Y,
batch_size = BATCH_SIZE,
epochs = EPOCHS,
validation_split = 0.2,
callbacks = list(checkpoint, early_stopping))
## ----inspect model--------------------------------------------------------
plot(history)
save_model_hdf5(model, filepath="unet_model_2_16_256.hdf5")
##model <- load_model_hdf5("unet_model.hdf5", custom_objects=c(dice_coef_loss=dice_coef_loss, dice_coef=dice_coef))
## ----evaluate model-------------------------------------------------------
Y_hat <- predict(model, x = X)
display(combine(Y[1,,,], Y_hat[1,,,]), all = TRUE)
display(combine(Y[100,,,], Y_hat[100,,,]), all = TRUE)
display(combine(Y[320,,,], Y_hat[320,,,]), all = TRUE)
##convert to binary and label
Z <- map(array_branch(Y, 1), bwlabel)
Z_hat <- map(array_branch(Y_hat, 1), .f = function(z) bwlabel(z > .5))
display(colorLabels(combine(Z[[10]], Z_hat[[10]])), all = TRUE)
display(colorLabels(combine(Z[[500]], Z_hat[[500]])), all = TRUE)
display(colorLabels(combine(Z[[632]], Z_hat[[632]])), all = TRUE)
##Estimate mean precision
mp <- map2_dbl(Z, Z_hat, mean_precision)
round(mean(mp), 2)
boxplot(mp)
## ----predict test data----------------------------------------------------------
test_data <- tibble(ImageId = dir(TEST_PATH)) %>%
mutate(ImageFile = file.path(TEST_PATH, ImageId, "images", paste0(ImageId, ".png")),
ImageShape = map(ImageFile, .f = function(file) dim(readImage(file))[1:2]),
X = map(ImageFile, preprocess_image, shape = SHAPE))
test_data %>%
glimpse()
X <- list2tensor(test_data$X, 4)
Y_hat <- predict(model, x = X)
##compare predicted masks with original imagess
display(combine(X[1,,,], Y_hat[1,,,]), all = TRUE)
display(combine(X[5,,,], Y_hat[5,,,]), all = TRUE)
display(combine(X[32,,,], Y_hat[32,,,]), all = TRUE)
## ----submission----------------------------------------------------------
## construct labelled masks and preform run length encoding and decoding for checking
submission <- test_data %>%
add_column(Masks = map(array_branch(Y_hat, 1), .f = function(z) bwlabel(z > .5)[,,1])) %>%
mutate(EncodedPixels = map2(Masks, ImageShape, postprocess_image))
rsamples <- sample_n(submission, 3) %>%
mutate(Y = map2(EncodedPixels, ImageShape, preprocess_masks, new_shape = SHAPE),
X = map(ImageFile, preprocess_image, shape = SHAPE)) %>%
select(X,Y)
X <- list2tensor(rsamples$X, 4)
Y <- list2tensor(rsamples$Y, 4)
display(combine(Y[1,,,], X[1,,,]), all = TRUE)
display(combine(Y[2,,,], X[2,,,]), all = TRUE)
display(combine(Y[3,,,], X[3,,,]), all = TRUE)
submission <- submission %>%
unnest(EncodedPixels) %>%
mutate(EncodedPixels = as.character(EncodedPixels)) %>%
select(ImageId, EncodedPixels)
submission
write_csv(submission, "submission.csv")
|
6d206db7d8f7458bd43d7c8bb5bd55c603dfcbbf
|
e6d211afff7bd6fe7065150a233ca3f3564f13d6
|
/R/normalizer.R
|
49bf3f7a51aa63b3d748cae6373129aa15afb6f7
|
[] |
no_license
|
arcuellar88/gender-indicator
|
cc5b2c2f9c643f87f9f527670aa465e064d3d8e9
|
bbed90dd4fa549bfe636129ff6a824bd8b2f769b
|
refs/heads/master
| 2020-07-03T17:54:27.483828
| 2016-12-30T18:06:58
| 2016-12-30T18:06:58
| 66,368,484
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 409
|
r
|
normalizer.R
|
#' Function to compute the normalization of the value of the indicators by year in DashDB.
#' @param con connection to dashdb (con <- idaConnect("BLUDB", "", ""))
#' @return the results are computed directly in DashDB
#' @examples
#' computeScores(con)
computeScoresDashDBFromFile <- function(con)
{
sqlcommands<-readSQLCommands(paste0(DASHDB,"normalization.sql"))
runSQL(sqlcmdlist=sqlcommands,con=con)
}
|
07b4a61f1bced8a169a446496dfa365d29b6c6aa
|
6c3b2cdea485ec954dd93ee5c87c0a014d80a812
|
/src/R/server.R
|
2999987120dd5c42d47616438e46155aaf360eab
|
[] |
no_license
|
anthonysena/dqcdm-temporal
|
b0bf29bee7a5a3b3b4e402656fb1c95c3d6079b3
|
32d32db703457b784917e7edcfd8f6ad14cc0453
|
refs/heads/master
| 2021-01-21T05:36:12.224659
| 2015-11-07T20:23:09
| 2015-11-07T20:23:09
| 45,744,185
| 0
| 0
| null | 2015-11-07T16:26:57
| 2015-11-07T16:26:57
| null |
UTF-8
|
R
| false
| false
| 476
|
r
|
server.R
|
library("shiny")
# Read in the data.
source("sharedFuns.R")
source("read.R")
source("compare.R")
shinyServer(
function(input, output) {
output$mainplot <- renderPlot({
print("hello")
sn <- input$DB
year <- as.numeric(input$Year)
cid <- input$Cond
browser()
res <- compareYearCondition(dat, sn, year, cid)
res <- computeFlags(res$dat_year, res$dat_control_mean)
g <- plotComparison(res)
return(g)
})
}
)
|
dc68672905b0bb56e40b6be20e7fd54a92c1ab3e
|
213b22f2e6e6072b186db8f3fa934acbef1f740d
|
/R/USR/!FULL USR MODEL.R
|
09add984ed3f96ae5e11042f5cce76c56a5a34cc
|
[] |
no_license
|
cekmorse/Calcs
|
eb03a0c791a161467de496fd8ce5b3023aae8e1b
|
0fc2d4aa755d0d31aa65e477d0b5ddd2ee838782
|
refs/heads/master
| 2021-06-25T20:10:23.622472
| 2015-07-28T17:00:32
| 2015-07-28T17:00:32
| 35,171,312
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,368
|
r
|
!FULL USR MODEL.R
|
source("~/Projects/Thesis/Thesis-Calcs/R/USR/01 Variable Realizations.R")
print("done 01")
source("~/Projects/Thesis/Thesis-Calcs/R/USR/02 Survey Analysis and Plots.r")
print("done 02")
source("~/Projects/Thesis/Thesis-Calcs/R/USR/03 Conc Models.R")
print("done 03")
source("~/Projects/Thesis/Thesis-Calcs/R/USR/04 Conc Lab Error.R")
print("done 04")
source("~/Projects/Thesis/Thesis-Calcs/R/USR/05 River Geometry.R")
print("done 05")
source("~/Projects/Thesis/Thesis-Calcs/R/USR/06 Conc Realizations-Data Setup.R")
print("done 06")
source("~/Projects/Thesis/Thesis-Calcs/R/USR/07 ETref to Evap Realizations.R")
print("done 07")
source("~/Projects/Thesis/Thesis-Calcs/R/USR/08 Conc Realizations.R")
print("done 08")
source("~/Projects/Thesis/Thesis-Calcs/R/USR/09 Transport and Storage.R")
print("done 09")
source("~/Projects/Thesis/Thesis-Calcs/R/USR/10 Water and Mass Models.R")
print("done 10")
source("~/Projects/Thesis/Thesis-Calcs/R/USR/11 Water and Mass Models-A and S.R")
print("done 11")
source("~/Projects/Thesis/Thesis-Calcs/R/USR/12 Water and Mass Models-Atmos.R")
print("done 12")
source("~/Projects/Thesis/Thesis-Calcs/R/USR/13 Water and Mass Models-Storage.R")
print("done 13")
source("~/Projects/Thesis/Thesis-Calcs/R/USR/14 Contrib Mass.R")
print("done 14")
source("~/Projects/Thesis/Thesis-Calcs/R/USR/15 Contrib Water.R")
print("done 15 - Done all")
|
12f576509b244e3eb8a3742ef2456bf97f18f343
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/ThreeWay/R/T2func.R
|
5a4f2e60f242a2eda21d5470deef91dbbf020b42
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,265
|
r
|
T2func.R
|
T2func <-
function(X,n,m,p,r1,r2,r3,start,conv,model,A,B,C,H){
X=as.matrix(X)
if (model==1){
C=diag(r3)
}
if (model==2){
B=diag(r2)
}
if (model==3){
A=diag(r1)
}
cputime=system.time({
# initialize A, B and C
ss=sum(X^2)
dys=0
if (start==0){
cat("Rational ORTHONORMALIZED start",fill=TRUE)
# rational starts via eigendecompositions
if (model!=3){
EIG=eigen(X%*%t(X))
A=EIG$vectors[,1:r1]
}
Z=permnew(X,n,m,p) # yields m x p x n array
if (model!=2){
EIG=eigen(Z%*%t(Z))
B=EIG$vectors[,1:r2]
}
Z=permnew(Z,m,p,n) # yields p x n x m array
if (model!=1){
EIG=eigen(Z%*%t(Z))
C=EIG$vectors[,1:r3]
}
}
if (start==1){
cat("Random ORTHONORMALIZED starts",fill=TRUE)
if (model!=3){
if (n>=r1){
A=orth(matrix(runif(n*r1,0,1),n,r1)-.5)
} else{
A=orth(matrix(runif(r1*r1,0,1),r1,r1)-.5)
A=A[1:n,]
}
}
if (model!=2){
if (m>=r2){
B=orth(matrix(runif(m*r2,0,1),m,r2)-.5)
} else{
B=orth(matrix(runif(r2*r2,0,1),r2,r2)-.5)
B=B[1:m,]
}
}
if (model!=1){
if (p>=r3){
C=orth(matrix(runif(p*r3,0,1),p,r3)-.5)
} else{
C=orth(matrix(runif(r3*r3,0,1),r3,r3)-.5)
C=C[1:p,]
}
}
}
# Update Core
if (start!=2){
Z=permnew(t(A)%*%X,r1,m,p)
Z=permnew(t(B)%*%Z,r2,p,r1)
H=permnew(t(C)%*%Z,r3,r1,r2)
}
# Evaluate f
if (start==2){
Z=B%*%permnew(A%*%H,n,r2,r3)
Z=C%*%permnew(Z,m,r3,n)
Z=permnew(Z,p,n,m) # Z = Xhat, nxmxp
f=sum((X-Z)^2) # use full formula, taking into account possibility of nonoptimal core in start
} else{
f=ss-sum(H^2)
}
cat(paste("Tucker2 function value at start is ",f),fill=TRUE)
iter=0
fold=f+2*conv*f
while (fold-f>f*conv){
iter=iter+1
fold=f
if (model!=3){
# update A (Z=X*C'x B' - GS Z*Z'*A)
Z=permnew(X,n,m,p)
Z=permnew(t(B)%*%Z,r2,p,n)
Z=permnew(t(C)%*%Z,r3,n,r2) # yields n x r2 x r3 array
A=qr.Q(qr(Z%*%(t(Z)%*%A)),complete=FALSE)
}
if (model!=2){
# update B
Z=permnew(X,n,m,p)
Z=permnew(Z,m,p,n)
Z=permnew(t(C)%*%Z,r3,n,m)
Z=permnew(t(A)%*%Z,r1,m,r3) # yields m x r3 x r1 array
B=qr.Q(qr(Z%*%(t(Z)%*%B)),complete=FALSE)
}
if (model!=1){
# update C
Z=permnew(t(A)%*%X,r1,m,p)
Z=permnew(t(B)%*%Z,r2,p,r1) # yields p x r1 x r2 array
C=qr.Q(qr(Z%*%(t(Z)%*%C)),complete=FALSE)
}
# Update Core
Z=permnew(t(A)%*%X,r1,m,p)
Z=permnew(t(B)%*%Z,r2,p,r1)
H=permnew(t(C)%*%Z,r3,r1,r2)
# Evaluate f
f=ss-sum(H^2)
if ((iter%%10)==0){
cat(paste("Tucker2 function value after iteration ",iter," is ",f),fill=TRUE)
}
}
})
ss=sum(X^2)
fp=100*(ss-f)/ss
# compute "intrinsic eigenvalues"
# eigenvalues for A-mode:
La=H%*%t(H)
Y=permnew(H,r1,r2,r3)
Lb=Y%*%t(Y)
Y=permnew(Y,r2,r3,r1)
Lc=Y%*%t(Y)
cat(paste("Tucker2 function value is",f,"after",iter,"iterations", sep=" "),fill=TRUE)
cat(paste("Fit percentage is",fp,"%",sep=" "),fill=TRUE)
cat(paste("Procedure used",(round(cputime[1],2)),"seconds", sep=" "),fill=TRUE)
out=list()
out$A=A
out$B=B
out$C=C
out$H=H
out$f=f
out$fp=fp
out$iter=iter
out$cputime=cputime[1]
out$La=La
out$Lb=Lb
out$Lc=Lc
return(out)
}
|
9b8420c4976b7cd76f73cbfbd951d11f163eb4b3
|
e7b6b9754ebffd6db69611f2ca49edb1f2752399
|
/man/user_creation_manager.Rd
|
018e662e6c0f642e9220ad01fd81aadee3feaf61
|
[] |
no_license
|
8280567/shinyauth
|
59e56e3d95e3a15170997b3d3afa15d4623a6287
|
5dc2bd03331c98c2f9d081e1f326b544cb3cf11e
|
refs/heads/master
| 2021-09-05T10:58:10.608826
| 2018-01-26T17:27:04
| 2018-01-26T17:27:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 361
|
rd
|
user_creation_manager.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_user.R
\name{user_creation_manager}
\alias{user_creation_manager}
\title{This funciton allows an admin to create a new user of the app}
\usage{
user_creation_manager(input, output, session, auth)
}
\description{
This funciton allows an admin to create a new user of the app
}
|
463304868ceb7a58c6982c5331dfa49cfd9bb09a
|
5e94b522bd93166db71ffe6b214edb3daa706908
|
/Materiell fra tidl semestre/h18/Gruppe 2/scripts/4seminar.R
|
9c49b4020ecda564f739523a711ffc6ce8714bd1
|
[] |
no_license
|
liserodland/stv4020aR
|
c3411a6a61f0e1dcdab1f6d7577ae707b86100d0
|
78cd1fade56c05c55cc86dd9adcb72f77a9884d3
|
refs/heads/master
| 2023-08-05T04:26:54.567357
| 2021-09-10T10:42:10
| 2021-09-10T10:42:10
| 288,154,313
| 0
| 7
| null | 2020-08-17T10:47:59
| 2020-08-17T10:47:58
| null |
UTF-8
|
R
| false
| false
| 12,793
|
r
|
4seminar.R
|
#' ## Disposisjon
#' 1. Laste inn data (.rda / .RData)
#' 2. Omkoding #advanced
#' 4. Multinomisk logistisk regresjon
#' 5. Rangert logistisk regresjon
#'
#' ## Laste inn data
#' Denne gangen skal vi laste inn et datasett lagret i en R-fil. Dette er litt anerledes enn funksjonene vi har gjort tidligere (altså read.\*()-familien). Det finnes to typer filer som R-data kan lagres i: *.rda* og *.RData*. Disse er, såvidt jeg vet, helt identiske i format; de har bare forskjellig filtypenavn. Vi skal bruke European Social Survey, men bare med enhetene fra Norge (kommer tilbake med flere land når vi skal ta flernivåanalyse).
#'
## ----lasterda------------------------------------------------------------
rm(list = ls())
load("./data/ess_norge.rda")
head(ess_nor, 3)
#'
#' Her oppretter R objektet for oss (se "ess_nor" i environment). Analysen vi skal gjøre er å se om tillit til politikere påvirker hvilket parti man stemmer på. Avhengig variabel blir derfor variabelen **party_vote_short**. Kan dere gjette på hvilken type regresjon vi skal gjøre?
#'
#' ## Variabler
#'
#' Tabellen under viser en kort variabelbeskrivelse. Jeg har lagt inn labels i selve datasettet og det skal vi jobbe med i første del denne gangen.
#'
#' | Variabel | Målenivå | Beskrivelse |
#' |---------------------|-----------------|---------------------------------------------------------|
#' | idno | Forholdstall | ID indikator for hver enhet |
#' | party_vote | Nominal | Parti stemt på forrige Stortingsvalg |
#' | party_vote_short | Nominal | Forkortet navn på parti stemt på forrige Stortingsvalg |
#' | gender | Dikotom | Kjønn |
#' | year_born | Forholdstall | År født |
#' | income_feel | Ordinal | Hvordan føler du din økonomiske situasjon er for tiden? |
#' | income_decile | Ordinal | Husholningens inntekt i desiler |
#' | trust_parl | Ordinal | Hvor mye stoler du på det nasjonale parlamentet? |
#' | trust_legalsys | Ordinal | Hvor mye stoler du på rettsvesenet? |
#' | trust_police | Ordinal | Hvor mye stoler du på politiet? |
#' | trust_politicians | Ordinal | Hvor mye stoler du på politikere? |
#' | trust_polparties | Ordinal | Hvor mye stoler du på politiske partier? |
#' | trust_eurparl | Ordinal | Hvor mye stoler du på det europeiske parlamentet? |
#' | trust_unitednations | Ordinal | Hvor mye stoler du på FN? |
#'
#'
#' ## Avhengig variabel
#' La oss først kikke litt på vår avhengige variabel. Dette kan man gjøre med både tabeller og figurer, men jeg liker best figurer.
#'
## ----deskAV--------------------------------------------------------------
table(ess_nor$party_vote_short, useNA = "always")
library(ggplot2)
ggplot(ess_nor, aes(x = party_vote_short, fill = party_vote_short)) +
geom_bar()
party_noNA <- ess_nor[which(is.na(ess_nor$party_vote_short) == FALSE), ]
ggplot(party_noNA, aes(x = party_vote_short, fill = party_vote_short)) +
geom_bar() +
scale_fill_manual(values = c("darkred", "darkblue", "blue", "yellow4", "seashell4",
"forestgreen", "red1", "darkgreen", "red2", "green")) +
labs(x = "Partistemmer", y = "Frekvens", fill = "Parti") +
scale_y_continuous(breaks = seq(0, 300, 50)) +
theme_minimal() +
theme(panel.grid.minor = element_blank(),
panel.grid.major.x = element_blank(),
legend.position = "none")
#'
#' Her har jeg lagt inn et plot som viser antall enheter i data som stemte på de forskjellige partiene og et plot der jeg fikser litt på det estetiske. **ggplot** har en milliard måter å endre på plots på, så her er det bare å leke seg til perfeksjon.
#'
#' For selve innholdet i plottet kan vi merke oss at vi har noen partier som er veldig små. Dette gjelder spesielt partiene *Rødt*, *MDG*, og *Kystpartiet*.
#'
#' ## Uavhengig variabel
#' Variabelen vi skal fokusere på er **trust_politicians**. Derfor er det lurt å også kikke litt på denne. Fra tabellen ser vi at den går fra 0 til 10, men hva betyr egentlig det?
#'
## ----deskUV--------------------------------------------------------------
table(ess_nor$trust_politicians, useNA = "always")
attributes(ess_nor$trust_politicians)
attr(ess_nor$trust_politicians, "labels")
#'
#' Da vet vi at 0 er ingen tillit og 10 er fulstendig tillit til politikere på denne variabelen. Kan den brukes som en forholdstallsvariabel? Eller burde vi holde oss til ordinalnivå?
#'
#' ## Bivariat multinomisk logistisk regresjon
#' La oss kjøre en bivariat regresjon, med partistemme som avhengig og tillit til politikere som uavhengig, for å se om det er noen problemer med data. Hvis vi ikke gjør noe med vår avhengig variabel, hvilket parti vil ende opp som referansekategori og er dette en rimelig referansekategori?
#'
#' Multinomisk logistisk regresjon kan kjøres med noen forskjellige pakker i R (det ligger faktisk ikke inne i base-pakken). Vi kjøre med pakken **nnet** her, og funksjonen `multinom()`.
#'
## ----bivarMultinom, tidy=FALSE-------------------------------------------
# install.packages("nnet")
library(nnet)
party_reg <- multinom(party_vote_short ~ trust_politicians,
data = ess_nor,
na.action = "na.exclude", Hess = TRUE)
summary(party_reg)
exp(coef(party_reg))
# Regner litt på Arbeiderpartiet vs. Fremskrittspartiet
exp(0.7870249) / (1 + exp(0.7870249))
exp(0.7870249 + (-0.34755333 * 10)) / (1 + exp(0.7870249 + (-0.34755333 * 10)))
# En enkel måte å sjekke om effekten er signifikant på 5% nivå
confint(party_reg)
# Vi kan også se på hvordan modellen tenker med tanke på sannsynligheter
test_set <- data.frame(trust_politicians = 0:10)
predict(party_reg, newdata = test_set)
predict(party_reg, newdata = test_set, type = "probs")
#'
#' Det er ganske åpenbart at modellen vår ikke er veldig god. Noen problemer:
#' 1. Små partier er problematiske (liten N)
#' 2. Vi mangler noen kontrollvariabler (kanskje man systematisk stemmer med venstre som ung f.eks, samtidig som alder påvirker hvor mye man stoler på politikere).
#' 3. Kanskje AV er ordinal?
#'
#' ## Eliminere små partier
#'
#' Først oppretter vi et nytt datasett der vi fjerner enheter fra de små partiene. Legg merke til at SV, KRF, SP, og V også er veldig nær smertegrensen her; vi kan få problemer med disse også.
#'
## ----subsetLargParties---------------------------------------------------
larger_parties <- ess_nor[which(ess_nor$party_vote_short != "RØDT" &
ess_nor$party_vote_short != "KYST" &
ess_nor$party_vote_short != "MDG" &
is.na(ess_nor$party_vote_short) == FALSE), ]
table(larger_parties$party_vote_short, useNA = "always")
#'
#' ## Fikse kontrollvariabler
#'
#' Vi skal kontrollere for fire ting: inntekt, hvor fornøyd respondenten er med økonomien sin, kjønn og alder. Tanken med alle er den samme: de er bakenforliggende variabler (for tillit til politikere), og de kan tenkes å påvirke både vår avhenige og uavhengige variabel (*backdoor path*).
#'
#'
#' ### To inntektsvariabler
#'
#' Når vi subsetter blir labels på variablene fjernet av en eller annen grunn...så vi må kopiere dem over til det nye datasettet med pakken **labelled** og funksjonen **copy_labels** først. Deretter kan vi sjekke hvilke verdier vi ikke vil ha med videre fra de forskjellige variablene.
#'
## ----incomeCode----------------------------------------------------------
table(larger_parties$income_feel)
attributes(larger_parties$income_feel)
attributes(ess_nor$income_feel)
######
library(labelled)
larger_parties$income_feel <- copy_labels(ess_nor$income_feel,
larger_parties$income_feel)
attr(larger_parties$income_feel, "labels")
######
larger_parties$income_feel2 <- ifelse(larger_parties$income_feel > 4, NA, larger_parties$income_feel)
table(larger_parties$income_feel2, larger_parties$income_feel, useNA = "always")
#####
larger_parties$income_decile <- copy_labels(ess_nor$income_decile,
larger_parties$income_decile)
attr(larger_parties$income_decile, "labels")
#####
attributes(ess_nor$income_decile)
larger_parties$income_decile2 <- ifelse(larger_parties$income_decile > 10, NA, larger_parties$income_decile)
table(larger_parties$income_decile2, larger_parties$income_decile, useNA = "always")
#'
#' ### Kjønn og alder
#' Kjønn ser ut til å være kodet på en fornuftig måte, så her trenger vi ikke gjøre noe. Alder kan vi regne ut med å trekke fødselsår fra året surveyen ble utført (2014). Så sentrerer vi variabelen til median.
#'
## ----genderageCode-------------------------------------------------------
table(larger_parties$gender) # Ca like mange, så referansekategori er ikke viktig
larger_parties$age <- 2014 - larger_parties$year_born
summary(larger_parties$age)
larger_parties$age <- larger_parties$age - median(larger_parties$age) # ingen har NA
summary(larger_parties$age)
#'
#' ## Multinomisk med kontroller
#' Da er det bare å plugge inn variablene i en regresjon.
#'
## ----multivariatMultinom-------------------------------------------------
party_reg2 <- multinom(party_vote_short ~ trust_politicians + income_decile2 +
income_feel2 + age + gender,
data = larger_parties, Hess = TRUE, na.action = "na.exclude")
summary(party_reg2)
confint(party_reg2)
#'
#' Dette er ekstremt god trening for å tolke på egenhånd!
#'
## ----predikerteSannsynligheter-------------------------------------------
test_set2 <- data.frame(trust_politicians = 0:10,
income_decile2 = median(larger_parties$income_decile2, na.rm = TRUE),
income_feel2 = median(larger_parties$income_feel2, na.rm = TRUE),
age = 0,
gender = "female")
predict(party_reg2, newdata = test_set2)
plot_data <- cbind(test_set2, predict(party_reg2, newdata = test_set2, type = "probs"))
library(ggplot2)
ggplot(plot_data, aes(x = trust_politicians, y = A, group = 1)) + geom_point() + geom_line()
ggplot(plot_data, aes(x = trust_politicians)) +
geom_point(aes(y = A)) +
geom_point(aes(y = FRP)) +
geom_line(aes(y = A, group = 1)) +
geom_line(aes(y = FRP, group = 1))
#'
#' ## Rangert logistisk (for spesielt interesserte)
#'
#' Under viser jeg kode for rangert logistisk regresjon. Som Solveig nevnte i forelesning, er dette noe som kan dukke opp som en mulig modell i en evt semester- eller masteroppgave.
#'
## ----rangLog-------------------------------------------------------------
larger_parties$party_vote_short <- factor(larger_parties$party_vote_short,
levels = c("SV", "A", "SP", "KRF", "V", "H", "FRP"))
library(MASS)
rang_party <- polr(party_vote_short ~ trust_politicians + income_decile2 +
income_feel2 + age_sen + gender,
data = larger_parties, Hess = TRUE, na.action = "na.exclude")
summary(rang_party)
#'
#' ## Stargazertabell
## ----stargazer,results='asis',tidy=FALSE---------------------------------
library(stargazer)
stargazer(party_reg, font.size = "footnotesize",
star.cutoffs = c(.05, .01, .001),
column.sep.width = ".01cm",
no.space = FALSE,
covariate.labels = c("Politikertillit (0-10)"))
stargazer(party_reg2, font.size = "footnotesize",
star.cutoffs = c(.05, .01, .001),
column.sep.width = ".01cm",
no.space = FALSE,
covariate.labels = c("Politikertillit (0-10)", "Inntekt (desil)",
"Øk. tilfredshet", "Alder", "Kjønn (mann)"))
stargazer(rang_party, font.size = "footnotesize",
star.cutoffs = c(.05, .01, .001),
column.sep.width = ".01cm",
no.space = FALSE,
covariate.labels = c("Politikertillit (0-10)", "Inntekt (desil)",
"Øk. tilfredshet", "Alder", "Kjønn (mann)"))
#'
## ----ikketenkpådenne, eval=FALSE, echo=FALSE-----------------------------
## knitr::purl("./docs/seminar4.Rmd", output = "./scripts/4seminar.R", documentation = 2)
##
|
a5ba56c27bbd085fe5d3fdcddc704716eb89f778
|
bbf1ae079309eca11270422d3f0d259d1515d430
|
/numerical-tours/r/nt_solutions/segmentation_3_snakes_levelset/exo6.R
|
aad14f9b38112594ea8d187cf0fcc72115301719
|
[
"BSD-2-Clause"
] |
permissive
|
ZichaoDi/Di_MATLABTool
|
5e6a67b613c4bcf4d904ddc47c2744b4bcea4885
|
c071291c63685c236f507b2cb893c0316ab6415c
|
refs/heads/master
| 2021-08-11T07:28:34.286526
| 2021-08-04T18:26:46
| 2021-08-04T18:26:46
| 149,222,333
| 9
| 5
| null | null | null | null |
UTF-8
|
R
| false
| false
| 404
|
r
|
exo6.R
|
par(mfrow=c(2,2))
phi <- phi0
k <- 1
gW <- grad(W, order=2)
for (i in 1:niter){
gD <- grad(phi, order=2)
d <- pmax(eps*array(1, c(n,n)), sqrt(apply(gD**2, c(1,2), sum)))
g <- gD/array(rep(d,2), c(dim(d),2))
G <- W*d*div(g[,,1], g[,,2], order=2) + apply(gW*gD, c(1,2), sum)
phi <- phi + tau*G
if (mod(i, as.integer(niter/4))==0){
k <- k+1
plot_levelset(phi, f0, lw=2)
}
}
|
18c45f759ade3c9b714c058ddc8c23d185027960
|
25ec9519eeb158a777ed9865dfb57aab0809c60d
|
/VERSIONS/LatticeKrig.OLD/R/LatticeKrig.R
|
bffbccd8d854e4763ee961c92167a5799791badc
|
[] |
no_license
|
NCAR/LatticeKrig
|
cccdcaba2d16c96b722de6a2e499e09f5c36ccf2
|
5caccca61f52b53d215d9375dedb8553e6ee75b7
|
refs/heads/master
| 2021-09-14T10:49:13.136451
| 2021-08-23T21:58:31
| 2021-08-23T21:58:31
| 61,819,138
| 7
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,496
|
r
|
LatticeKrig.R
|
# LatticeKrig is a package for analysis of spatial data written for
# the R software environment .
# Copyright (C) 2012
# University Corporation for Atmospheric Research (UCAR)
# Contact: Douglas Nychka, nychka@ucar.edu,
# National Center for Atmospheric Research, PO Box 3000, Boulder, CO 80307-3000
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
LatticeKrig<- function(x, y, Z=NULL, nu=1, nlevel=4, a.wght=4.01, NC=NULL, ...){
# a crisp wrapper where many default values are exercised.
if( is.null(NC)){
N<- length( y)
Nbasis<- 4^(nlevel)/ 3
NCtest<- 2*sqrt( N/(Nbasis))
# NCtest chosen so that NCtest^2 * ( 1 + 4 + 16 + 64) ~~ number of basis functions
# will be about 4*N.
NC<- max(5, NCtest )
}
LKinfo<- LKrig.setup( x=x, NC=NC, nu=1, nlevel=4, a.wght=4.01,...)
# find lambda
obj<- LKrigFindLambda( x=x,y=y, Z=Z, LKinfo=LKinfo)
LKrig( x,y,Z=Z, LKinfo=LKinfo, lambda=obj$lambda.MLE)
}
|
43e31ba096494c7d0ef25fb3ac90f53ae8704b2b
|
05235c8fd39b73786a573605e15f241b2d2a812a
|
/FiPSPi.R
|
e9686e5768d9959464fec465de06b9d2d2577e12
|
[
"BSD-3-Clause"
] |
permissive
|
mpc-bioinformatics/FiPSPi
|
88fa24bf9e7bdb4f1689ef3d78b148c8e0d846c1
|
9679c965b08cf1776d5e2a8b055acf693deec5ff
|
refs/heads/main
| 2023-05-07T15:57:43.948010
| 2021-06-01T11:51:53
| 2021-06-01T11:51:53
| 349,067,195
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 35,177
|
r
|
FiPSPi.R
|
##########################################################################
# COPYRIGHT & SUPPORT #
##########################################################################
#
# FiPSPi: Random Forest-based muscle fiber peptide selection pipeline.
#
# This code has been developed by Dr. Michael Turewicz at the Ruhr University
# Bochum, Germany. It is licensed under the BSD 3-Clause License provided in
# the file 'LICENSE.txt'.
#
# For support please write an e-mail to 'michael.turewicz[at]rub.de'.
#
# This is the original version of the code written for the publication
# by Eggers et al. 'Deep proteomic characterization of skeletal muscle fiber
# types by laser microdissection and mass spectrometry (...)'. The latest
# version of FiPSPi can be found here:
# https://github.com/mpc-bioinformatics/FiPSPi
#
##########################################################################
# USER SETTINGS #
##########################################################################
### set path to current working directory
cwd <- "C:/UNI/Publikationen/2021.06.XX_submitted_Britta_Eggers_Fasertypen/FiPSPi"
### set path to data file (relative path to the working directory specified above)
data.path <- paste0(cwd, "/data/Eggers_et_al_data.txt")
### set path to metadata file (relative path to the working directory specified above)
metadata.path <- paste0(cwd, "/data/Eggers_et_al_metadata.txt")
### set output.path where all created output and graphics are saved
output.path <- paste0(cwd, "/FiPSPi_results")
### -------> Note: the above files and folders must already exist and be accessible!
##########################################################################
# FURTHER SETTINGS & DEPENDENCIES #
##########################################################################
options(scipen=100000)
options(stringsAsFactors=FALSE)
set.seed(1234)
library(randomForest)
library(ROCR)
#library(caret)
library(rpart)
library(rpart.plot)
##########################################################################
# FUNCTION DEFINITIONS #
##########################################################################
#++++++++++++++++++++++ blueyellow ++++++++++++++++++++++++++++++++++++
blueyellow<-function(n){
colorpanel(n, "blue", "yellow")
}
#++++++++++++++++++++++ blueyellow ++++++++++++++++++++++++++++++++++++
#++++++++++++++++++++++ plotFeaturesHeatmap.2 ++++++++++++++++++++++++++++++++++
plotFeaturesHeatmap.2 <- function(features=NULL, x=NULL, n1=NULL, n2=NULL, n3=NULL, n4=NULL,
output.path=NULL, file.name=FALSE, description=FALSE){
if(is.null(features) || is.null(x) || is.null(n1) || is.null(n2)) {
stop("ERROR: Not all mandatory arguments have been defined!")
}
features <- c(na.exclude(features))
datamatrix <- x
brc <- rownames(x)
rows <- brc %in% features
datamatrix <- datamatrix[rows,]
if(description){
rownames(datamatrix) <- elist$genes$Description[rows]
}else{
rownames(datamatrix) <- brc[rows]
}
#title <- ""
if(nrow(datamatrix) > 100) {
cexRowSize <- 0.1
} else if(nrow(datamatrix) > 40) {
cexRowSize <- 0.4
} else if(nrow(datamatrix) > 10) {
cexRowSize <- 0.8
}else {
cexRowSize <- 1.5
}
cexColSize <- 0.8
#Col labels with default distance from heatmap (NA),
#but aligned to middle of heatmap-cells (0.5); default is c(NA,0)
adjCol <- c(NA,0.5)
#defining ColSideColors
ColSideColors <- c(rep("gray25",n1), rep("gray75",n2), rep("red",n3), rep("blue",n4))
#defining the correlation-based distance function
my.dist <- function(x) as.dist((1-cor(t(x)))/2)
#Defining the dimensions and layout of the heatmap and its color key
lmat <- rbind(c(5,4),c(0,1),c(3,2))
lwid <- c(1.5,4)
lhei <- c(1.5,0.2,4)
if(!is.null(output.path)){
png(
paste0(output.path,"/", file.name),
width = 2000,
height = 2000,
pointsize = 10,
res = 300
)
heatmap.2(datamatrix,
distfun=function(x) as.dist((1-cor(t(x)))/2),
Rowv = TRUE, #row and col clustering
Colv = TRUE,
ColSideColors=ColSideColors,
col=blueyellow(300), #color scheme
scale="row", #scale by column
margins=c(15, 15), #margins around heatmap
key=TRUE, #legend is present
key.title="Color key", #title for legend
key.xlab="Expression values", #label for legend axis
key.ylab="Density", #label for legend axis
keysize=1.1, #size of legend strip
key.par=list(cex.lab=1.4),
symkey=FALSE, #colors not symmetrical around 0
density.info="density", #no density information
trace="none",
cexRow=cexRowSize, #row labels' size
cexCol=cexColSize, #column labels' size
labRow=substr(row.names(datamatrix),1,35), #row labels
labCol=colnames(datamatrix), #column labels: limit to 30 chars
adjCol=adjCol,
lmat=lmat,
lhei=lhei,
lwid=lwid
)
#normalized device coordinates (NDC): c(xmin, xmax, ymin, ymax)
par(fig=c(0,0.975,0,0.94), new=TRUE)
legend("topright", # legend location
legend = c(unique(gsub("\\_\\d+", "", colnames(datamatrix)))),
col = c("gray25", "gray75", "red", "blue"), # color key
pch = c(15,15),
cex=1.75,
bty="n",
horiz=TRUE
)
dev.off()
} else {
heatmap.2(datamatrix,
distfun=function(x) as.dist((1-cor(t(x)))/2),
Rowv = TRUE, #row and col clustering
Colv = TRUE,
ColSideColors=ColSideColors,
col=blueyellow(300), #color scheme
scale="row", #scale by column
margins = c(15,15), #margins around heatmap
key=TRUE, #legend is present
key.title="Color key", #title for legend
key.xlab="Expression values", #label for legend axis
key.ylab="Density", #label for legend axis
keysize=1.1, #size of legend strip
key.par=list(cex.lab=1.4), #size of legend strip
symkey=FALSE, #colors not symmetrical around 0
density.info="density", #no density information
trace="none",
cexRow=cexRowSize, #column labels' size
cexCol=cexColSize, #column labels' size
labRow=substr(row.names(datamatrix),1,35), #row labels
labCol=colnames(datamatrix), #column labels: limit to 30 chars
adjCol=adjCol,
lmat=lmat,
lhei=lhei,
lwid=lwid
)
#normalized device coordinates (NDC): c(xmin, xmax, ymin, ymax)
par(fig=c(0,0.975,0,0.94), new=TRUE)
legend("topright", # legend location
legend = c(unique(gsub("\\_\\d+", "", colnames(datamatrix)))),
col = c("gray25", "gray75", "red", "blue"), # color key
pch = c(15,15),
cex=1.75,
bty="n",
horiz=TRUE
)
}
}
#++++++++++++++++++++++ plotFeaturesHeatmap.2 ++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++ rf.rfe +++++++++++++++++++++++++++++++++++++++
rf.rfe <- function(iteration=NULL, datamatrix=NULL, output.path=NULL,
label1="A", label2="B", label3="C", label4="D", n1=NULL, n2=NULL, n3=NULL,
n4=NULL, panel.selection.criterion="accuracy", importance.measure="MDA",
ntree=500, mtry=NULL, verbose=FALSE){
if(is.null(iteration) || is.null(datamatrix) || is.null(n1) ||
is.null(n2) || is.null(n3) || is.null(n4)) {
stop("ERROR: Not all mandatory arguments have been defined!")
}
n.min <- min(n1,n2,n3,n4)
print(n.min)
classes <- factor(c(rep(label1, n1), rep(label2, n2), rep(label3, n3), rep(label4, n4)))
print(classes)
print(length(classes))
acc.best.set.accuracy <- 0
acc.best.set <- c()
sens.best.set.sensitivity <- 0
sens.best.set <- c()
spec.best.set.specificity <- 0
spec.best.set <- c()
rf.importance.total <- matrix(0, nrow=nrow(datamatrix), ncol=2)
colnames(rf.importance.total) <- c("MDA", "MDG")
rownames(rf.importance.total) <- rownames(datamatrix)
if(!is.null(output.path)){
cat(x="set\ttotalAccuracy\ttotalSensi\ttotalSpeci\tAccuracy1\tSensitivity1\tSpecificity1\tAccuracy2\tSensitivity2\tSpecificity2\tAccuracy3\tSensitivity3\tSpecificity3\tAccuracy4\tSensitivity4\tSpecificity4\tCM11\tCM12\tCM13\tCM14\tCM21\tCM22\tCM23\tCM24\tCM31\tCM32\tCM33\tCM34\tCM41\tCM42\tCN43\tCM44\n",
file=paste(output.path, "/rfe_", iteration, ".txt", sep=""),
append=FALSE)
}
if(is.null(ntree)){
ntree <- 500
}
iterations <- 0
while({p <- nrow(datamatrix)} > 0){
iterations <- iterations + 1
if(is.null(mtry)){
mtry.tmp <- sqrt(p)
}else{
mtry.tmp <- mtry
}
#~~~~~~~~~~~~~~~CLASSIFIER END~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
dat <- t(datamatrix)
model_randomForest <- randomForest(x=dat, y=classes, importance=TRUE, keep.forest=FALSE,
ntree=ntree, mtry=mtry.tmp, sampsize=c(n.min, n.min, n.min, n.min))
importance <- importance(model_randomForest)
rf.importance <- data.frame(rownames(importance), importance[,3], importance[,4])
names(rf.importance) <- c('Var', 'MDA', 'MDG')
confusion <- model_randomForest$confusion
print(confusion)
#~~~~~~~~~~~~~~~CLASSIFIER END~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
### class 1:
TP <- confusion[1,1]
FN <- confusion[1,2]+confusion[1,3]+confusion[1,4]
FP <- confusion[2,1]+confusion[3,1]+confusion[4,1]
TN <- confusion[2,2]+confusion[2,3]+confusion[2,4]+confusion[3,2]+confusion[3,3]+confusion[3,4]+confusion[4,2]+confusion[4,3]+confusion[4,4]
ACCURACY1 <- {TP+TN}/{TP+FP+TN+FN}
SENSITIVITY1 <- TP/{TP+FN} #=TPR
SPECIFICITY1 <- TN/{TN+FP} #=(1-FPR)
### class 2:
TP <- confusion[2,2]
FN <- confusion[2,1]+confusion[2,3]+confusion[2,4]
FP <- confusion[1,2]+confusion[3,2]+confusion[4,2]
TN <- confusion[1,1]+confusion[1,3]+confusion[1,4]+confusion[3,1]+confusion[3,3]+confusion[3,4]+confusion[4,1]+confusion[4,3]+confusion[4,4]
ACCURACY2 <- {TP+TN}/{TP+FP+TN+FN}
SENSITIVITY2 <- TP/{TP+FN} #=TPR
SPECIFICITY2 <- TN/{TN+FP} #=(1-FPR)
### class 3:
TP <- confusion[3,3]
FN <- confusion[3,1]+confusion[3,2]+confusion[3,4]
FP <- confusion[1,3]+confusion[2,3]+confusion[4,3]
TN <- confusion[1,1]+confusion[1,2]+confusion[1,4]+confusion[2,1]+confusion[2,2]+confusion[2,4]+confusion[4,1]+confusion[4,2]+confusion[4,4]
ACCURACY3 <- {TP+TN}/{TP+FP+TN+FN}
SENSITIVITY3 <- TP/{TP+FN} #=TPR
SPECIFICITY3 <- TN/{TN+FP} #=(1-FPR)
### class 4:
TP <- confusion[4,4]
FN <- confusion[4,1]+confusion[4,2]+confusion[4,3]
FP <- confusion[1,4]+confusion[2,4]+confusion[3,4]
TN <- confusion[1,1]+confusion[1,2]+confusion[1,3]+confusion[2,1]+confusion[2,2]+confusion[2,3]+confusion[3,1]+confusion[3,2]+confusion[3,3]
ACCURACY4 <- {TP+TN}/{TP+FP+TN+FN}
SENSITIVITY4 <- TP/{TP+FN} #=TPR
SPECIFICITY4 <- TN/{TN+FP} #=(1-FPR)
ACCURACY_total <- sum(diag(confusion))/ sum(confusion) # total Accuracy
#ACCURACY_total <- (ACCURACY1 + ACCURACY2 + ACCURACY3 + ACCURACY4) / 4 # total Accuracy
SENSITIVITY_total <- (SENSITIVITY1 + SENSITIVITY2 + SENSITIVITY3 + SENSITIVITY4) / 4
SPECIFICITY_total <- (SPECIFICITY1 + SPECIFICITY2 + SPECIFICITY3 + SPECIFICITY4) / 4
CM <- as.vector(t(confusion[,-ncol(confusion)])) # Einträge der Confusion Matrix als Vector (zum Abspeichern)
if(!is.null(output.path)){
cat(x=paste(nrow(datamatrix), ACCURACY_total, SENSITIVITY_total, SPECIFICITY_total,
ACCURACY1, SENSITIVITY1, SPECIFICITY1,
ACCURACY2, SENSITIVITY2, SPECIFICITY2,
ACCURACY3, SENSITIVITY3, SPECIFICITY3,
ACCURACY4, SENSITIVITY4, SPECIFICITY4,
CM[1], CM[2],CM[3],CM[4],CM[5],CM[6],CM[7],CM[8],CM[9],CM[10],CM[11],CM[12],CM[13],CM[14],CM[15],CM[16], sep="\t"),
file=paste(output.path, "/rfe_", iteration, ".txt", sep=""), append=TRUE)
cat(x="\n", file=paste(output.path, "/rfe_", iteration, ".txt",
sep=""), append=TRUE)
}
if(verbose){
message(paste0("rf.rfe - features: ", p, ", ACCURACY: ", ACCURACY_total), "\n")
}
if(acc.best.set.accuracy <= ACCURACY_total){
acc.best.set.accuracy <- ACCURACY_total
acc.best.set <- rownames(datamatrix)
}
#if(sens.best.set.sensitivity <= SENSITIVITY){
# sens.best.set.sensitivity <- SENSITIVITY
# sens.best.set <- rownames(datamatrix)
#}
if(spec.best.set.specificity <= SPECIFICITY_total){
spec.best.set.specificity <- SPECIFICITY_total
spec.best.set <- rownames(datamatrix)
}
for(j in 1:nrow(rf.importance)){
rf.importance.total[rf.importance[j,"Var"],"MDA"] <- rf.importance.total[rf.importance[j,"Var"],"MDA"] + rf.importance[j,"MDA"]
rf.importance.total[rf.importance[j,"Var"],"MDG"] <- rf.importance.total[rf.importance[j,"Var"],"MDG"] + rf.importance[j,"MDG"]
}
if(importance.measure == "MDA"){
rf.importance <- rf.importance[order(-rf.importance$MDA),]
}else if(importance.measure == "MDG"){
rf.importance <- rf.importance[order(-rf.importance$MDG),]
}
if(nrow(rf.importance) != 1){
next.set <- rf.importance$Var[1:(nrow(importance)-1)]
}else{
break
}
next.set <-
rownames(datamatrix)[rownames(datamatrix) %in% next.set]
datamatrix <- datamatrix[next.set,,drop=FALSE]
}
if(!is.null(output.path)){
importance.results <- cbind(rownames(rf.importance.total), rf.importance.total)
write.table(x=importance.results, file=paste0(output.path, "/rfe_importance_", iteration, ".txt"), append=TRUE, sep="\t", eol="\n", dec=".", row.names=FALSE, col.names=TRUE)
}
if(panel.selection.criterion == "accuracy"){
if(verbose){
message(paste("feature selection - optimal number of features: ",
length(acc.best.set), sep=""), "\n")
message(paste("feature selection - best accuracy: ",
acc.best.set.accuracy, sep=""), "\n")
}
write.table(x=importance.results[acc.best.set,], file=paste0(output.path, "/rfe_importance_panel.txt"), append=TRUE, sep="\t", eol="\n", dec=".", row.names=FALSE, col.names=TRUE)
return(acc.best.set)
}else if(panel.selection.criterion == "specificity"){
if(verbose){
message(paste("feature selection - optimal number of features: ",
length(spec.best.set), sep=""), "\n")
message(paste("feature selection - best specificity: ",
spec.best.set.specificity, sep=""), "\n")
}
write.table(x=importance.results[spec.best.set,], file=paste0(output.path, "/rfe_importance_panel.txt"), append=TRUE, sep="\t", eol="\n", dec=".", row.names=FALSE, col.names=TRUE)
return(spec.best.set)
}
}
#+++++++++++++++++++++++++++ rf.rfe +++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++ classify.rf.evaluation +++++++++++++++++++++++++++++++++++++++
classify.rf.evaluation <- function(iteration=NULL, trainset=NULL, testset=NULL, classes_train=NULL,
classes_test=NULL, label1="A", label2="B", label3 = "C", label4 = "D", output.path=NULL, ...){
if(is.null(trainset) || is.null(testset)) {
stop("ERROR: Not all mandatory arguments have been defined!")
}
train.dat <- t(as.matrix(trainset))
test.dat <- t(as.matrix(testset))
model.rf <- randomForest(x=train.dat, y=classes_train, importance=TRUE, keep.forest=TRUE)
pred.rf <- predict(object=model.rf, newdata=test.dat, type="response", norm.votes=TRUE)
confusion <- table(observed = classes_test, predicted = pred.rf)
### class 1:
TP <- confusion[1,1]
FN <- confusion[1,2]+confusion[1,3]+confusion[1,4]
FP <- confusion[2,1]+confusion[3,1]+confusion[4,1]
TN <- confusion[2,2]+confusion[2,3]+confusion[2,4]+confusion[3,2]+confusion[3,3]+confusion[3,4]+confusion[4,2]+confusion[4,3]+confusion[4,4]
ACCURACY1 <- {TP+TN}/{TP+FP+TN+FN}
SENSITIVITY1 <- TP/{TP+FN} #=TPR
SPECIFICITY1 <- TN/{TN+FP} #=(1-FPR)
### class 2:
TP <- confusion[2,2]
FN <- confusion[2,1]+confusion[2,3]+confusion[2,4]
FP <- confusion[1,2]+confusion[3,2]+confusion[4,2]
TN <- confusion[1,1]+confusion[1,3]+confusion[1,4]+confusion[3,1]+confusion[3,3]+confusion[3,4]+confusion[4,1]+confusion[4,3]+confusion[4,4]
ACCURACY2 <- {TP+TN}/{TP+FP+TN+FN}
SENSITIVITY2 <- TP/{TP+FN} #=TPR
SPECIFICITY2 <- TN/{TN+FP} #=(1-FPR)
### class 3:
TP <- confusion[3,3]
FN <- confusion[3,1]+confusion[3,2]+confusion[3,4]
FP <- confusion[1,3]+confusion[2,3]+confusion[4,3]
TN <- confusion[1,1]+confusion[1,2]+confusion[1,4]+confusion[2,1]+confusion[2,2]+confusion[2,4]+confusion[4,1]+confusion[4,2]+confusion[4,4]
ACCURACY3 <- {TP+TN}/{TP+FP+TN+FN}
SENSITIVITY3 <- TP/{TP+FN} #=TPR
SPECIFICITY3 <- TN/{TN+FP} #=(1-FPR)
### class 4:
TP <- confusion[4,4]
FN <- confusion[4,1]+confusion[4,2]+confusion[4,3]
FP <- confusion[1,4]+confusion[2,4]+confusion[3,4]
TN <- confusion[1,1]+confusion[1,2]+confusion[1,3]+confusion[2,1]+confusion[2,2]+confusion[2,3]+confusion[3,1]+confusion[3,2]+confusion[3,3]
ACCURACY4 <- {TP+TN}/{TP+FP+TN+FN}
SENSITIVITY4 <- TP/{TP+FN} #=TPR
SPECIFICITY4 <- TN/{TN+FP} #=(1-FPR)
ACCURACY_total <- sum(diag(confusion))/ sum(confusion) # total Accuracy
SENSITIVITY_total <- (SENSITIVITY1 + SENSITIVITY2 + SENSITIVITY3 + SENSITIVITY4) / 4
SPECIFICITY_total <- (SPECIFICITY1 + SPECIFICITY2 + SPECIFICITY3 + SPECIFICITY4) / 4
results <- c(acc_total = ACCURACY_total, sens_total = SENSITIVITY_total, spec_total = SPECIFICITY_total,
acc1 = ACCURACY1, sens1 = SENSITIVITY1, spec1 = SPECIFICITY1,
acc2 = ACCURACY2, sens2 = SENSITIVITY2, spec2 = SPECIFICITY2,
acc3 = ACCURACY3, sens3 = SENSITIVITY3, spec3 = SPECIFICITY3,
acc4 = ACCURACY4, sens4 = SENSITIVITY4, spec4 = SPECIFICITY4)
return(results)
}
#+++++++++++++++++++++++++++ classify.rf.evaluation +++++++++++++++++++++++++++++++++++++++
#+++++++++++++++++++++++++++ combinePerf +++++++++++++++++++++++++++++++++++++++
combinePerf <- function(perf.list, label1, label2, filename, cwd){
if(is.null(perf.list)) {
stop("ERROR: Not all mandatory arguments have been defined!")
}
decis <- c()
classes <- c()
for(m in 1:length(perf.list)){
decis <- c(decis, perf.list[[m]]$pred)
classes <- c(classes, as.character(perf.list[[m]]$classes))
}
prediction.obj <- prediction(decis, classes)
performance.obj <- performance(prediction.obj, measure="tpr", x.measure="fpr")
auc <- performance(prediction.obj,"auc")@y.values[[1]]
png(paste(cwd,"/roc_perflist_", filename, ".png",sep=""), width=2000, height=2000, pointsize=17, res=300)
plot(performance.obj, col="red", lwd=5,main=paste("AUC: ", round(auc,4), sep=""))
abline(a=0,b=1,lwd=2,lty=2,col="gray")
dev.off()
}
#+++++++++++++++++++++++++++ combinePerf +++++++++++++++++++++++++++++++++++++++
##########################################################################
# MAIN #
##########################################################################
#-----------------------------> Begin: Data Import & Pre-Processing
dat <- read.table(data.path, sep="\t", header=TRUE, na.strings = "NA")
sampledat <- read.table(metadata.path, sep="\t", header=TRUE, na.strings = "NA")
rownames(sampledat) <- sampledat$ID
colnames(dat)[1:8] <- c("IsProteinGroupSpecific", "IsProteotypic", "StrippedSequence", "ProteinAccessions", "ProteinDescriptions", "ProteinGroups", "ProteinNames", "Qvalue")
colnames(dat)[9:64] <- rownames(sampledat)
label1 <- "Type I"
label2 <- "Type IIa"
label3 <- "Type IIb"
label4 <- "Type IIx"
group1 <- grep("Type I_", rownames(sampledat), value=TRUE)
group2 <- grep("Type IIa_", rownames(sampledat), value=TRUE)
group3 <- grep("Type IIb_", rownames(sampledat), value=TRUE)
group4 <- grep("Type IIx_", rownames(sampledat), value=TRUE)
n1 <- length(group1)
n2 <- length(group2)
n3 <- length(group3)
n4 <- length(group4)
descriptive.columns <- dat[,1:8]
descriptive.columns2 <- dat[,"StrippedSequence",drop=FALSE]
rownames(descriptive.columns2) <- dat[,"StrippedSequence"]
descriptive.columns[,"ProteinAccessions"] <- gsub("sp\\|", "", descriptive.columns[,"ProteinAccessions"])
descriptive.columns[,"ProteinAccessions"] <- gsub("\\|", "\\_", descriptive.columns[,"ProteinAccessions"])
descriptive.columns[,"ProteinAccessions"] <- gsub("\\;", "\\_", descriptive.columns[,"ProteinAccessions"])
accession.list <- unique(descriptive.columns[,"ProteinAccessions"])
row.IDs <- vector(mode="character", length=length(accession.list))
for(i in 1:length(accession.list)){
idx <- grep(accession.list[i], descriptive.columns[,"ProteinAccessions"], fixed=TRUE)
n.peps <- length(idx)
row.IDs[idx] <- paste0(accession.list[i], "_peptide", 1:n.peps)
}
rownames(dat) <- row.IDs
rownames(descriptive.columns) <- row.IDs
descriptive.columns <- cbind(row.IDs, descriptive.columns)
descriptive.columns2 <- cbind(row.IDs, descriptive.columns2)
write.table(x=descriptive.columns, file=paste0(output.path, "/descriptive.columns.txt"), sep="\t", col.names=TRUE, row.names=FALSE)
dat <- data.matrix(dat[,9:64])
print(paste0("original feature number: ", nrow(dat)))
dat <- na.omit(dat)
print(paste0("feature number after removing features containing NAs: ", nrow(dat)))
write.table(x=cbind(rownames(dat),dat), file=paste0(output.path, "/original_data.txt"), sep="\t", col.names=TRUE, row.names=FALSE)
dat <- dat[,c(group1,group2,group3,group4)]
#-----------------------------> End: Data Import & Pre-Processing
#-----------------------------> Begin: Diff. Analysis
classes <- gsub("\\_\\d+", "", colnames(dat))
rawdat <- 2^dat
p.values <- vector(mode="numeric", length=nrow(dat))
max.mean.ratios <- vector(mode="numeric", length=nrow(dat))
for(i in 1:nrow(dat)){
p.values[i] <- summary(aov(dat[i,] ~ classes))[[1]][["Pr(>F)"]][1]
mean.ratio1 <- max( mean(rawdat[i,group1])/mean(c(rawdat[i,group2], rawdat[i,group3], rawdat[i,group4])), mean(c(rawdat[i,group2], rawdat[i,group3], rawdat[i,group4]))/mean(rawdat[i,group1]) )
mean.ratio2 <- max( mean(rawdat[i,group2])/mean(c(rawdat[i,group1], rawdat[i,group3], rawdat[i,group4])), mean(c(rawdat[i,group1], rawdat[i,group3], rawdat[i,group4]))/mean(rawdat[i,group2]) )
mean.ratio3 <- max( mean(rawdat[i,group3])/mean(c(rawdat[i,group1], rawdat[i,group2], rawdat[i,group4])), mean(c(rawdat[i,group1], rawdat[i,group2], rawdat[i,group4]))/mean(rawdat[i,group3]) )
mean.ratio4 <- max( mean(rawdat[i,group4])/mean(c(rawdat[i,group1], rawdat[i,group2], rawdat[i,group3])), mean(c(rawdat[i,group1], rawdat[i,group2], rawdat[i,group3]))/mean(rawdat[i,group4]) )
max.mean.ratios[i] <- max(mean.ratio1, mean.ratio2, mean.ratio3, mean.ratio4)
}
p.values.adj <- p.adjust(p.values, method="fdr")
names(p.values.adj) <- rownames(dat)
diff.analysis.output <- cbind(names(p.values.adj), p.values, p.values.adj, max.mean.ratios)
colnames(diff.analysis.output) <- c("Feature", "P-value (Anova)", "Adj. p-value", "Max. mean ratio")
write.table(file=paste0(output.path, "/diff_analysis_output.txt"), x=diff.analysis.output,
row.names=FALSE, col.names=TRUE, sep="\t")
#-----------------------------> End: Diff. Analysis
#-----------------------------> Begin: Peptide Selection via Random Forest-based Recursive Feature Elimination
criterion <- "accuracy" #alternative criterion: specificity
imp.measure <- "MDA"
features <- rf.rfe(iteration=1, datamatrix=dat, output.path=output.path, label1="Type I",
label2="Type IIa", label3="Type IIb", label4="Type IIx", n1=n1, n2=n2, n3=n3, n4=n4,
panel.selection.criterion=criterion,
importance.measure=imp.measure, verbose=TRUE)
selection.results <- cbind(features, descriptive.columns[features,"StrippedSequence"])
colnames(selection.results) <- c("Feature", "Sequence")
write.table(file=paste0(output.path, "/selected_features.txt"), x=selection.results,
sep="\t", row.names=FALSE, col.names=TRUE)
rfe.iterations <- read.table(paste0(output.path, "/rfe_1.txt"), sep="\t", header=TRUE, na.strings = "NA")
p <- nrow(dat)
png(paste(output.path,"/rf-rfe_featNumber_vs_accuracy.png",sep=""), width=4000, height=4000, pointsize=30, res=300)
plot(p-rfe.iterations$set, rfe.iterations[,"totalAccuracy"], type="l", xaxt="n", xlab="Feature number",
ylab="Classification accuracy", main="RF-RFE: Feature number vs. accuracy \n (total accuracy)",
pch=19, lwd=12, col=adjustcolor("red", alpha=0.3), cex=1)
points(p-rfe.iterations$set, rfe.iterations[,"totalAccuracy"], pch=19, col=adjustcolor("red", alpha=1.0), cex=0.5)
axis(side=1, label=p:1, at=1:p)
dev.off()
#-----------------------------> End: Peptide Selection via Random Forest-based Recursive Feature Elimination
#-----------------------------> Begin: Validation with Training Set
### 1000 times train/test split for model validation
testruns <- 1000
all.idx <- colnames(dat)
perf.final <- matrix(nrow = testruns, ncol = 15)
colnames(perf.final) = c("acc_total", "sens_total", "spec_total",
"acc1", "sens1", "spec1",
"acc2", "sens2", "spec2",
"acc3", "sens3", "spec3",
"acc4", "sens4", "spec4")
for(i in 1:testruns){
cat("final classification: ", i, "\r")
test.idx1 <- sample(group1, 5) # n1=15
test.idx2 <- sample(group2, 3) # n2=13
test.idx3 <- sample(group3, 5)
test.idx4 <- sample(group4, 3)
test.idx <- c(test.idx1, test.idx2, test.idx3, test.idx4)
train.idx <- setdiff(all.idx, test.idx)
classi <- classify.rf.evaluation(iteration=i, trainset=dat[features,train.idx,drop=FALSE],
testset=dat[features,test.idx,drop=FALSE],
classes_train=as.factor(gsub("\\_\\d+", "", train.idx)),
classes_test=as.factor(gsub("\\_\\d+", "", test.idx)),
label1="Type I", label2="Type IIa", label3="Type IIb", label4="Type IIx", output.path=NULL,
nrounds = best.nrounds, max_depth = best.max_depth)
perf.final[i,] <- classi
}
write.table(perf.final, file = paste0(output.path, "/validation.txt"), row.names = FALSE, sep = "\t")
if(length(features) > 2){
plotFeaturesHeatmap.2(features=features, x=rawdat, n1=n1, n2=n2, n3=n3, n4=n4, output.path=output.path, file.name="heatmap.png", description=FALSE)
}
#-----------------------------> End: Validation with Training Set
#-----------------------------> Begin: PCA using all peptides without NAs
pca.dat <- dat
group.vec <- c(
"Type I",
"Type IIa",
"Type IIb",
"Type IIx"
)
col.vec <- c(
adjustcolor("navy", alpha=0.3),
adjustcolor("red", alpha=0.3),
adjustcolor("darkorchid", alpha=0.3),
adjustcolor("darkgreen", alpha=0.3)
)
pcdat <- prcomp(t(pca.dat), center=TRUE, scale=TRUE)
scores <- pcdat$x
for (i in 1:2){
for (j in i:2){
if (i<j){
XLIM <- c(-max(abs(scores[,i])), max(abs(scores[,i])))
XLIM <- XLIM+(XLIM*0.1)
YLIM <- c(-max(abs(scores[,j])), max(abs(scores[,j])))
YLIM <- YLIM+(YLIM*0.1)
png(paste(output.path, "/01_pca_", i, "_", j, "_filteredFeatures.png", sep=""), width=3600, height=3600, pointsize=15, res=600)
plot(scores[group1,i], scores[group1,j], xlab=paste("PC", i, sep=""), ylab=paste("PC", j, sep=""), xlim=XLIM, ylim=YLIM, pch=20, col=col.vec[1], main="PCA", cex=2)
points(scores[group2,i], scores[group2,j], pch=20, col=col.vec[2], cex=2)
points(scores[group3,i], scores[group3,j], pch=20, col=col.vec[3], cex=2)
points(scores[group4,i], scores[group4,j], pch=20, col=col.vec[4], cex=2)
legend("topleft", legend=group.vec[1:4], col=col.vec[1:4], pch=20, cex=0.75, bg="transparent")
dev.off()
}
}
}
for (i in 1:2){
for (j in i:2){
if (i<j){
XLIM <- c(-max(abs(scores[,i])), max(abs(scores[,i])))
XLIM <- XLIM+(XLIM*0.1)
YLIM <- c(-max(abs(scores[,j])), max(abs(scores[,j])))
YLIM <- YLIM+(YLIM*0.1)
png(paste(output.path, "/02_pca_", i, "_", j, "_filteredFeatures.png", sep=""), width=3600, height=3600, pointsize=15, res=600)
plot(scores[group1,i], scores[group1,j], xlab=paste("PC", i, sep=""), ylab=paste("PC", j, sep=""), xlim=XLIM, ylim=YLIM, pch=20, col=col.vec[1], main="PCA", cex=2)
points(scores[group2,i], scores[group2,j], pch=20, col=col.vec[2], cex=2)
points(scores[group3,i], scores[group3,j], pch=20, col=col.vec[3], cex=2)
points(scores[group4,i], scores[group4,j], pch=20, col=col.vec[4], cex=2)
text(scores[group1,i], scores[group1,j], labels=group1, col="navy", cex=0.4)
text(scores[group2,i], scores[group2,j], labels=group2, col="red", cex=0.4)
text(scores[group3,i], scores[group3,j], labels=group3, col="darkorchid", cex=0.4)
text(scores[group4,i], scores[group4,j], labels=group4, col="darkgreen", cex=0.4)
legend("topleft", legend=group.vec[1:4], col=col.vec[1:4], pch=20, cex=0.75, bg="transparent")
dev.off()
}
}
}
#-----------------------------> End: PCA using all peptides without NAs
#-----------------------------> Begin: PCA using selected peptides
pca.dat <- dat[features,]
group.vec <- c(
"Type I",
"Type IIa",
"Type IIb",
"Type IIx"
)
col.vec <- c(
adjustcolor("navy", alpha=0.3),
adjustcolor("red", alpha=0.3),
adjustcolor("darkorchid", alpha=0.3),
adjustcolor("darkgreen", alpha=0.3)
)
pcdat <- prcomp(t(pca.dat), center=TRUE, scale=TRUE)
scores <- pcdat$x
for (i in 1:2){
for (j in i:2){
if (i<j){
XLIM <- c(-max(abs(scores[,i])), max(abs(scores[,i])))
XLIM <- XLIM+(XLIM*0.1)
YLIM <- c(-max(abs(scores[,j])), max(abs(scores[,j])))
YLIM <- YLIM+(YLIM*0.1)
png(paste(output.path, "/01_pca_", i, "_", j, "_selectedFeatures.png", sep=""), width=3600, height=3600, pointsize=15, res=600)
plot(scores[group1,i], scores[group1,j], xlab=paste("PC", i, sep=""), ylab=paste("PC", j, sep=""), xlim=XLIM, ylim=YLIM, pch=20, col=col.vec[1], main="PCA", cex=2)
points(scores[group2,i], scores[group2,j], pch=20, col=col.vec[2], cex=2)
points(scores[group3,i], scores[group3,j], pch=20, col=col.vec[3], cex=2)
points(scores[group4,i], scores[group4,j], pch=20, col=col.vec[4], cex=2)
legend("topleft", legend=group.vec[1:4], col=col.vec[1:4], pch=20, cex=0.75, bg="transparent")
dev.off()
}
}
}
for (i in 1:2){
for (j in i:2){
if (i<j){
XLIM <- c(-max(abs(scores[,i])), max(abs(scores[,i])))
XLIM <- XLIM+(XLIM*0.1)
YLIM <- c(-max(abs(scores[,j])), max(abs(scores[,j])))
YLIM <- YLIM+(YLIM*0.1)
png(paste(output.path, "/02_pca_", i, "_", j, "_selectedFeatures.png", sep=""), width=3600, height=3600, pointsize=15, res=600)
plot(scores[group1,i], scores[group1,j], xlab=paste("PC", i, sep=""), ylab=paste("PC", j, sep=""), xlim=XLIM, ylim=YLIM, pch=20, col=col.vec[1], main="PCA", cex=2)
points(scores[group2,i], scores[group2,j], pch=20, col=col.vec[2], cex=2)
points(scores[group3,i], scores[group3,j], pch=20, col=col.vec[3], cex=2)
points(scores[group4,i], scores[group4,j], pch=20, col=col.vec[4], cex=2)
text(scores[group1,i], scores[group1,j], labels=group1, col="navy", cex=0.4)
text(scores[group2,i], scores[group2,j], labels=group2, col="red", cex=0.4)
text(scores[group3,i], scores[group3,j], labels=group3, col="darkorchid", cex=0.4)
text(scores[group4,i], scores[group4,j], labels=group4, col="darkgreen", cex=0.4)
legend("topleft", legend=group.vec[1:4], col=col.vec[1:4], pch=20, cex=0.75, bg="transparent")
dev.off()
}
}
}
#-----------------------------> End: PCA using selected peptides
#-----------------------------> Begin: Boxplots
for(i in 1:length(features)){
idx <- match(features[i], rownames(dat))
seq <- descriptive.columns2[descriptive.columns2[,1] == features[i],2]
current.p.value <- formatC(p.values.adj[idx], format = "e", digits = 2)
png(paste0(output.path,"/boxplot_", i, "_", features[i], ".png"), width=2000, height=2000, pointsize=15, res=300)
boxplot(dat[features[i],group1], dat[features[i],group2], dat[features[i],group3], dat[features[i],group4], main=paste0(seq, "\np-value: ", current.p.value), names=c(label1, label2, label3, label4), col=col.vec[1:4], cex.axis=1.25, cex.main=1.5)
dev.off()
}
#-----------------------------> End: Boxplots
#-----------------------------> Begin: Decision Tree
col.list <- list(
adjustcolor("navy", alpha=0.3),
adjustcolor("red", alpha=0.3),
adjustcolor("darkorchid", alpha=0.3),
adjustcolor("darkgreen", alpha=0.3)
)
rpartdat <- data.frame(classes, t(dat[features,]), stringsAsFactors=TRUE)
for(i in 1:length(features)){
colnames(rpartdat)[i+1] <- descriptive.columns2[descriptive.columns2[,1] == features[i],2]
}
rpart.model <- rpart(classes~., data=rpartdat, method="class")
png(paste0(output.path,"/rpart-plot.png"), width=2000, height=2000, pointsize=15, res=300)
rpart.plot(rpart.model, roundint=FALSE,type=2,extra=101,box.palette=col.list)
dev.off()
#-----------------------------> End: Decision Tree
|
0844c1c527945deeb24fdee28a6d3c1201662709
|
e6c363415d45418d3a9d2e4fa965fa6b063f7562
|
/R/gl.recalc.metrics.r
|
7745056e27be3a208270dd50a07146c252fd152b
|
[] |
no_license
|
hakancengiz1/dartR
|
4a5c53d028d179c07568def79aa36c899adb225d
|
51e2fb65bd8cf7b4cdc42d6f621280b557ae39d6
|
refs/heads/master
| 2020-04-29T04:01:12.910064
| 2019-02-05T23:15:34
| 2019-02-05T23:15:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,136
|
r
|
gl.recalc.metrics.r
|
#' Recalculate locus metrics when individuals or populations are deleted from a genlight \{adegenet\} object
#'
#' When individuals are deleted from a genlight object generated by DArT, the locus metrics no longer
#' apply. For example, the Call Rate may be different considering the subset of individuals, compared
#' with the full set. This script recalculates those affected locus metrics, namely, avgPIC, CallRate,
#' freqHets, freqHomRef, freqHomSnp, OneRatioRef, OneRatioSnp, PICRef and PICSnp. Metrics that remain
#' unaltered are RepAvg and TrimmedSeq as they are unaffected by the removal of individuals.
#'
#' The script optionally removes resultant monomorphic loci or loci
#' with all values missing and deletes them (using gl.filter.monomorphs.r).
#'
#' The script returns a genlight object with the recalculated locus metadata.
#'
#' @param x -- name of the genlight object containing SNP genotypes [required]
#' @param v -- verbosity: 0, silent or fatal errors; 1, begin and end; 2, progress log ; 3, progress and results summary; 5, full report [default 2]
#' @return A genlight object with the recalculated locus metadata
#' @export
#' @author Arthur Georges (bugs? Post to \url{https://groups.google.com/d/forum/dartr})
#' @examples
#' gl <- gl.recalc.metrics(testset.gl, v=2)
#' @seealso \code{\link{gl.filter.monomorphs}}
gl.recalc.metrics <- function(x, v=2){
if (is.null(x@other$loc.metrics)) {
cat("No loc.metrics found in gl@other, therefore it will be created to hold the loci metrics. Be aware that some metrics such as TrimmedSequence and RepAvg cannot be created and therefore not all functions within the package can be used (e.g. gl2fasta, gl.filter.RepAvg)\n")
x@other$loc.metrics <- data.frame(nr=1:nLoc(x))
}
if (v > 0) {
cat("Starting gl.recalc.metrics: Recalculating locus metrics\n")
}
# Recalculate statistics
x <- utils.recalc.avgpic(x,v=v)
x <- utils.recalc.callrate(x,v=v)
x <- utils.recalc.maf(x,v=v)
if (v > 1) {
cat("Note: Locus metrics recalculated\n")
}
if (v > 0) {
cat("Completed gl.recalc.metrics\n\n")
}
return (x)
}
|
5d0d7938189d0fbfd7bd172f18455060c18cc667
|
86b2f27330f1bcfdefc6d05ad7edc8eb047b92d7
|
/demo/factors.R
|
b05adc33d3c74560331e78696f5288b5a80a4fab
|
[] |
no_license
|
nuitrcs/r_intro_june2018
|
7f08de281e984a45fb26ab62816d8e7ae72309f5
|
ecb2921c3d84ec4a39e001296b1047fd4820bcdb
|
refs/heads/master
| 2020-03-19T21:33:25.956004
| 2019-04-22T15:25:44
| 2019-04-22T15:25:44
| 136,942,705
| 8
| 4
| null | 2018-09-09T03:57:31
| 2018-06-11T15:06:04
|
HTML
|
UTF-8
|
R
| false
| false
| 1,832
|
r
|
factors.R
|
# difference between character vector and factor
mycolors<-c("red", "blue", "green", "red", "red", "blue")
mycolors
str(mycolors)
mycolors<-factor(mycolors)
mycolors
str(mycolors)
# get levels
levels(mycolors)
# what happens if we set levels?
mycolors
levels(mycolors) <- c("red", "blue", "green")
mycolors
## Uh-oh! we recoded our data!!! AHHHHHHH!!!!!
# fix what we did above
mycolors<-c("red", "blue", "green", "red", "red", "blue")
mycolors<-factor(mycolors)
## ok to assign existing factor values
mycolors
mycolors[4] <- "green"
mycolors
# assigning a new value is problematic (not an existing level)
mycolors
mycolors[1]<-"pink"
mycolors
# how to do this properly
levels(mycolors)<-c(levels(mycolors), "pink") # add pink to levels
mycolors[1]<-"pink" # then set value
mycolors
# alternative way to write this starting originally
mycolors<-c("red", "blue", "green", "red", "red", "blue")
mycolors<-factor(mycolors, levels=c(unique(mycolors), "pink"))
mycolors
mycolors[1]<-"pink"
mycolors
# factoring numeric values
answers <- c(1,4,3,3,2,4,1,4)
answers<- factor(answers)
answers
levels(answers) <- c("never", "sometimes", "usually", "always") # recodes
answers
# with label instead
answers <- c(1,4,3,3,2,4,1,4)
answers<- factor(answers, levels=1:4, labels=c("never", "sometimes", "usually", "always"))
answers
# ordered factors
answers <- factor(c("satisfied", "very satisfied", "very unsatisfied", "unsatisfied", "satisfied"))
answers
table(answers)
answers <- factor(c("satisfied", "very satisfied", "very unsatisfied", "unsatisfied", "satisfied"),
levels=c("very unsatisfied", "unsatisfied", "satisfied", "very satisfied"),
ordered=TRUE)
answers
table(answers)
answers < "satisfied"
# factors are stored as integers (just an artefact of the system)
typeof(mycolors[1])
|
00103ac22dda94212351ec4007526cf871ed8507
|
b0ee5b34efd96c700b13079b1979ded8a4a0a678
|
/cachematrix.R
|
c7e9f7f51b86d6fcc73a688308974038fd3a3be5
|
[] |
no_license
|
Alohany/ProgrammingAssignment2
|
b76deb3b0d537a87bd84da8bdb8731677df84861
|
2fd50a8f77057e080e145b61c8b2a81a7617a7f4
|
refs/heads/master
| 2021-01-17T17:21:02.686319
| 2014-04-23T00:11:35
| 2014-04-23T00:11:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,432
|
r
|
cachematrix.R
|
## These functions allow the user to calculate the inverse of a matrix using the R solve()
## function and then cache the inverse for subsequent retrieval.
## The functions assume that the matrix is invertible and has not changed over time. The
## functions reduce the processing overhead of repeatedly calculating the inverse of the same
## function.
## This function constructs a "special" vector containing four functions that:
## 1 - set the value of the matrix
## 2 - get the value of the matrix
## 3 - set the inverse of the matrix
## 4 - get the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set <- function (y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m<<-inverse
getinverse <- function() m
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## This function checks to see if the inverse of the matrix has already been cached
## If it has been, the inverse is returned. If not, the inverse is calculated using
## the solve function and then stored for future retrieval
cacheSolve <- function(x,...) {
l <- makeCacheMatrix(x)
m<-l$getinverse()
if(!is.null(m)){
message("getting cached data")
return(m)
}
data<-l$get()
m<-solve(data,...)
l$setinverse(m)
m
}
|
d225ed835dfd4dae556d46fb299a04de1b7539a7
|
fe1263d41e239ebb824c689389832068aa976876
|
/inst/example/test_3.R
|
0bbb94e055807efbb07f5a48cd09df5f5c4dcded
|
[
"MIT"
] |
permissive
|
kcf-jackson/typeChecker
|
caee580bdf8030d4b5791bfad0a18159272ee61c
|
39bdf23d4e60811db980168f038294836883d627
|
refs/heads/master
| 2023-03-09T17:34:07.877799
| 2021-02-15T15:38:38
| 2021-02-15T15:38:38
| 300,884,891
| 6
| 1
|
NOASSERTION
| 2021-02-15T15:38:38
| 2020-10-03T13:18:46
|
R
|
UTF-8
|
R
| false
| false
| 387
|
r
|
test_3.R
|
dog <- function(name = ? character) {
list(name = name) ? dog
}
introduce <- function(x = ? dog) {
sprintf("Woof! My name is %s!", x$name)
}
x <- dog("Napawleon")
introduce(x) # correct usage
introduce("Pawgustus") # type error
# In the expression:
# introduce("Pawgustus")
# The following type error is found:
# Type mismatch. Inferred: character; Annotated: dog
|
842775d311c954affafbd11cff0b97c5f852f24f
|
0ccdd0abbf3d39f1c5e971e26a60f53719ba6fb4
|
/ml/scripts/proj_nat_gas_forecast/1_R_Scripts/describe.R
|
77f07736816e2360218e0d64c71379d3101b1898
|
[] |
no_license
|
pickle-donut/RScripts
|
9a87bd616ea3cd89a94c98e8438c3bc80432392b
|
2a60daf6cfbeaa194f696daf699b387544f8f163
|
refs/heads/master
| 2022-11-09T00:01:57.999428
| 2020-06-15T23:27:00
| 2020-06-15T23:27:00
| 270,508,904
| 0
| 0
| null | 2020-06-15T23:27:01
| 2020-06-08T03:06:02
|
R
|
UTF-8
|
R
| false
| false
| 1,473
|
r
|
describe.R
|
#libraries
library(psych)
#working dir
setwd("/Volumes/FILES/Homeworks/R&Py/Project")
#reading file
gas_data = read.csv("ProjectData.csv", header = T)
str(gas_data)
#converting Reporting Date to date object
gas_data$Report_Date = strptime(as.character(gas_data$Report_Date), "%Y-%m-%d")
str(gas_data)
#summary
summary(gas_data)
describe(gas_data)
#identifying outliers
boxplot(gas_data)
boxplot(gas_data$Crude_Price)
boxplot(gas_data$Gas_Price, main="Natural Gas Price")
boxplot(gas_data$Gold_Price, main="Gold Price")
#time series plot
year = strftime(gas_data$Report_Date, "%Y")
gas_data$Year = year
plot(gas_data$Year, gas_data$Gas_Price)
#identifying linear realtionship
pairs(gas_data[,c(2,3,4)], panel = panel.smooth)
#histograms
hist(gas_data$Crude_Price, main="Crude Oil Price")
skew(gas_data$Crude_Price)
kurtosi(gas_data$Crude_Price)
hist(gas_data$Gas_Price, main="Natural Gas Price")
skew(gas_data$Gas_Price)
kurtosi(gas_data$Gas_Price)
hist(gas_data$Gold_Price, main="Gold Price")
skew(gas_data$Gold_Price)
kurtosi(gas_data$Gold_Price)
#Assessing normality
#Crude_Price
qqnorm(gas_data$Crude_Price, main="Crude Oil Price")
qqline(gas_data$Crude_Price, lty=2)
shapiro.test(gas_data$Crude_Price)
#Gas_Price
qqnorm(gas_data$Gas_Price, main="Gas Price")
qqline(gas_data$Gas_Price, lty=2)
shapiro.test(gas_data$Gas_Price)
#Gold_Price
qqnorm(gas_data$Gold_Price, main="Gold Price")
qqline(gas_data$Gold_Price, lty=2)
shapiro.test(gas_data$Gold_Price)
|
68f50e8aca9118e759de3fb137b2cdd7912655cd
|
6433380761f92c0c61c04691d8a9479876a6caab
|
/plot3.R
|
5b801f8c2c9f4ef011af9ea34ae3b317d8355a05
|
[] |
no_license
|
lachrymethod/ExData_Plotting1
|
de562f8204264d792703fa4d4adf280a4eea9da6
|
b8e19d48261f40ebb65fa691bcfefd0638741fab
|
refs/heads/master
| 2020-12-27T06:43:05.248656
| 2015-01-11T01:45:58
| 2015-01-11T01:45:58
| 29,060,669
| 0
| 0
| null | 2015-01-10T15:10:33
| 2015-01-10T15:10:32
| null |
UTF-8
|
R
| false
| false
| 1,398
|
r
|
plot3.R
|
## Exploratory Data Analysis - Course Project 1 Plot 3 Script
## Read the data into R
hpc <- read.table("household_power_consumption.txt", sep = ";",
header = TRUE, stringsAsFactors = FALSE)
## Pull out only the 2 days that we need for the graphs
hpc2 <- hpc[hpc$Date == "1/2/2007" | hpc$Date == "2/2/2007",]
## Combine the Date & Time vectors into one and convert them to
## POSIXct class so that is can be manipulated
hpc2$DateTime <- as.POSIXct(paste(hpc2$Date, hpc2$Time),
format = "%d/%m/%Y %H:%M:%S")
## Set the dev environment
png(file = "plot3.png")
## Create base shell for a plot
par(mar = c(3,5,2,4))
plot(hpc2$DateTime, hpc2$Sub_metering_1, type = "n", xlab = "",
ylab = "Energy sub metering")
## Add the points
points(hpc2$DateTime, hpc2$Sub_metering_1, type = "l")
points(hpc2$DateTime, hpc2$Sub_metering_2, col = "red", type = "l")
points(hpc2$DateTime, hpc2$Sub_metering_3, col = "blue", type = "l")
## Create the Legend
legend("topright", pch = "-", col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## turn off dev environment
dev.off()
|
5b507b548405c81f7a1f7103b4d0ff048f3d111a
|
0b40083a0cd55e52b70a5f86d98d6fecc395c5e6
|
/R/CATcanada_logistic.R
|
e831375553b21148d58377c557c0acc403034559
|
[] |
no_license
|
AridanNadav/CAT_VMF_Analysis_scripts
|
ba41b44a56cd9e901a8fd56b892c31aaa6075bd8
|
be4cc732751dd546f442fa9bc3e1659e9a167fa1
|
refs/heads/master
| 2021-05-01T06:09:28.663968
| 2018-02-11T15:54:59
| 2018-02-11T15:54:59
| 121,136,560
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,715
|
r
|
CATcanada_logistic.R
|
## logistic regression :
rm(list = ls())
library(lme4)
library(lmerTest)
library(ggplot2)
DataPath<-("/Users/papanadipapanadi/Google Drive/Nadav/Tel_Aviv_University/Tom_LAB/Experiments/CAT_frontal_patients/results/old/CATcanadaLog04-Jan-2018.csv") #set the path of your file eva4choice_12_7_123.csv
CurrentData<-read.table(DataPath,header = TRUE, sep=',',na.strings = 999)#load data
CurrentData<-na.omit(CurrentData)
CurrentData$sample__1isControl_2isPatients<-as.factor(CurrentData$sample__1isControl_2isPatients)
CurrentData$sub<-as.factor(CurrentData$sub)
CurrentData$PairType<-as.factor(CurrentData$PairType)
gm1<-lmer(rt ~ PairType + (1|sub), data=(CurrentData[CurrentData$sample__1isControl_2isPatients==1,]), na.action=na.omit) #[CurrentData$sample__1isControl_2isPatients==2,]
summary(gm1)
gm1<-glmer(sample__1isControl_2isPatients ~ rt +(1|sub), data=(CurrentData), na.action=na.omit, family=binomial) #[CurrentData$sample__1isControl_2isPatients==2,]
summary(gm1)
gm1<-glmer(outcome ~ sample__1isControl_2isPatients +(1|sub), data=(CurrentData), na.action=na.omit, family=binomial) #[CurrentData$sample__1isControl_2isPatients==2,]
summary(gm1)
gm1<-glmer(outcome ~ rt +(1|sub), data=(CurrentData[CurrentData$sample__1isControl_2isPatients==2,]), na.action=na.omit, family=binomial) #[CurrentData$sample__1isControl_2isPatients==2,]
summary(gm1)
cc <- confint(gm1,parm="beta_") ## slow (~ 11 seconds)
ctab <- cbind(est=fixef(gm1),cc)
rtab <- exp(ctab)
print(rtab,digits=3)
confint(gm1, parm, level = 0.95,
method = c("profile", "Wald", "boot"), zeta,
nsim = 500,
boot.type = c("perc","basic","norm"),
FUN = NULL, quiet = FALSE,
oldNames = TRUE)
|
6fe15a7d55e3c24802dd5f277e858ce431a835bf
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/meaRtools/examples/aggregate_features.Rd.R
|
277577aba447c705e110690b677b1123a24b0882
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 454
|
r
|
aggregate_features.Rd.R
|
library(meaRtools)
### Name: aggregate_features
### Title: Aggregate Feature Data
### Aliases: aggregate_features
### Keywords: MEA Aggregate
### ** Examples
data("S")
data("parameters")
s<-list()
s[[1]]<-S
spike_features = suppressWarnings( aggregate_features(s, "spike", parameters))
ns_features = suppressWarnings( aggregate_features(s, "ns", parameters) )
burst_features = suppressWarnings( aggregate_features(s, "burst", parameters) )
|
fb5b645a85209b0f1c1366202ca353b43422640d
|
5e428f64d6b8f3900f041d77355b00e1a07e2fd7
|
/R/hess.R
|
e7bc408588ff1adc0c44e8d16d15fa67ab19f5d0
|
[] |
no_license
|
cran/coloredICA
|
691a14ceff06b69a11c9cf82ef74f7d9acb78626
|
78e27e25296f6ac2f62de819772eac9d7534f42b
|
refs/heads/master
| 2016-09-05T20:28:52.037339
| 2014-03-04T00:00:00
| 2014-03-04T00:00:00
| 31,257,572
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 300
|
r
|
hess.R
|
hess <-
function(x,omega,l_period,n,freq,h){
he=colSums(kern(omega,h,freq)$v*as.vector(exp(l_period-x[1]-t((t(freq)-omega))%*%x[2:3]))*cbind(rep(1,n),t((t(freq)-omega)),t((t(freq)-omega))[,1]*t((t(freq)-omega))[,2],t((t(freq)-omega))^2))
matrix(c(he[1:3],he[c(2,5,4)],he[c(3,4,6)]),3,3)
}
|
e171672550036c1009e7e0bf5a912962d43c97a5
|
c9607bb9381ff0b8b206e300e46e7bf92444d517
|
/R/module_table.R
|
c3bdf5df546cc8884048e83f7dd11887e3ba96ed
|
[] |
no_license
|
ComputationalProteomics/FusariumResponseInOatMethods
|
01b99957b431456ab3f1a1069d76957d2bde6c1f
|
b9ffface554d091b504b0357c97e7a83b055924d
|
refs/heads/master
| 2020-11-25T18:46:42.291144
| 2020-02-06T13:43:44
| 2020-02-06T13:43:44
| 228,570,939
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,477
|
r
|
module_table.R
|
table_panel_ui <- function(
id, timepoints, annot_types, expr_presence, datasets, contrast_types, default_display_cols, optional_display_cols,
default_stat_fields, optional_stat_fields) {
ns <- NS(id)
tabPanel(
id,
fluidPage(
tags$head(
tags$style(type="text/css", "select { max-width: 240px; }"),
tags$style(type="text/css", ".span4 { max-width: 290px; }"),
tags$style(type="text/css", ".well { max-width: 280px; }")
),
div(
style = "display:flex; align-items:flex-start",
wellPanel(
style = "float:left;",
selectInput(ns("dataset"), "Dataset", choices=names(datasets), selected=names(datasets)[1]),
selectInput(ns("timepoint"), "Timepoint", choices=timepoints, selected="4d"),
selectInput(ns("contrast_type"), "Contrast type", choices=contrast_types, selected="Infection"),
checkboxInput(ns("do_fdr_filter"), "Do FDR and fold filtering", value=FALSE),
conditionalPanel(
sprintf("input['%s'] == 1", ns("do_fdr_filter")),
selectInput(ns("reg_type"), "Regulation", choices=c("all", "same", "contra"), selected="all"),
sliderInput(ns("fdr_cutoff_argamak"), "FDR cutoff cond. 1", value=0.1, step=0.01, min=0, max=1),
sliderInput(ns("fdr_cutoff_belinda"), "FDR cutoff cond. 2", value=0.1, step=0.01, min=0, max=1)
),
checkboxInput(ns("trunc_long"), "Truncate long strings", value=TRUE),
checkboxInput(ns("do_annot_filter"), "Do annotation filtering", value=FALSE),
conditionalPanel(
sprintf("input['%s'] == 1", ns("do_annot_filter")),
selectInput(ns("annot_type"), "Annotation presence", choices=annot_types, selected=annot_types[1]),
selectInput(ns("arg_expr_pres"), "Cond. 1 expression presence", choices=expr_presence, selected="ALL"),
selectInput(ns("bel_expr_pres"), "Cond. 2 expression presence", choices=expr_presence, selected="ALL")
),
selectInput(ns("table_add_shown_fields"), "Additional shown fields", choices=optional_display_cols, selected=default_display_cols, multiple=TRUE),
selectInput(ns("table_add_stat_fields"), "Additional stat fields", choices=optional_stat_fields, selected=default_stat_fields, multiple=TRUE),
actionButton(ns("button_show_align"), "Do Align"),
downloadButton(ns("download_current"), "Download selection"),
textOutput(ns("table_enrichment_status"))
),
fluidPage(
style = "flex-grow:1; resize:horizontal; overflow-x: scroll; overflow-y: hidden;",
fluidRow(
fluidPage(
h4("About"),
div("Filtering can be performed on FDR (infected-control), presence in transcriptome assemblies for peptides and protein expression"),
div("Do alignment by clicking row and press 'Do align'"),
div("Enrichment is performed for current filtering selection, with all (non-filtered) IDs as the universe")
),
DT::dataTableOutput(ns("table"))
)
)
)
)
)
}
table_vis <- function(input, output, session, table_vars) {
output$table <- DT::renderDataTable({
show_table(
table_vars$cached_filtered_table(),
table_vars$stat_bases(),
get_all_cols=FALSE,
annot_cols=input$table_add_shown_fields,
stat_cols=input$table_add_stat_fields
)
})
}
table_panel <- function(input, output, session, datasets, open_tab, sample_name="sample_name") {
observeEvent(input$button_show_align, {
open_tab("Alignment")
})
table_vars <- list()
table_vars$stat_bases <- reactive({
if (input$contrast_type == "Infection")
stat_base <- paste(c("Inf", "Ctl"), input$timepoint, sep="_")
else if (input$contrast_type == "Variety")
stat_base <- paste(c("Arg", "Bel"), input$timepoint, sep="_")
else
stop("Unknown contrast type: ", input$contrast_type)
stat_base
})
filtered_table <- reactive({
get_filter_table(
datasets[[input$dataset]],
table_vars$stat_bases(),
fold_type=input$reg_type,
fdr_cutoff_arg=input$fdr_cutoff_argamak,
fdr_cutoff_bel=input$fdr_cutoff_belinda,
annotation_presence=input$annot_type,
argamak_expr=input$arg_expr_pres,
belinda_expr=input$bel_expr_pres,
do_fdr_filter=input$do_fdr_filter,
do_string_truncate=input$trunc_long,
include_sdf=TRUE,
contrast_type=input$contrast_type
)})
get_settings <- function() {
settings <- list()
settings[["dataset"]] <- input$dataset
settings[["do_fdr_filter"]] <- input$do_fdr_filter
settings[["fdr_cutoff_argamak"]] <- input$fdr_cutoff_argamak
settings[["fdr_cutoff_belinda"]] <- input$fdr_cutoff_argamak
settings[["annot_type"]] <- input$annot_type
settings[["stat_bases_1"]] <- table_vars$stat_bases()[1]
settings[["stat_bases_2"]] <- table_vars$stat_bases()[2]
settings_df <- do.call("rbind", settings) %>% data.frame()
settings_df <- cbind(rownames(settings_df), settings_df)
colnames(settings_df) <- c("Parameter", "Value")
settings_df
}
current_settings <- reactive({
get_settings()
})
output$download_current <- downloadHandler(
filename = function() {
sprintf("%s.tsv", "current_data")
},
content = function(fname) {
readr::write_tsv(filtered_table(), fname)
}
)
table_vars$cached_full_table <- reactive({
cbind(SummarizedExperiment::rowData(datasets[[input$dataset]]) %>%
data.frame(), assay(datasets[[input$dataset]]) %>% data.frame())
})
table_vars$cached_filtered_table <- filtered_table
table_vars$cached_sdf <- reactive({
filtered_table() %>% dplyr::select(SummarizedExperiment::colData(datasets[[input$dataset]])[[sample_name]])
})
table_vars$dataset <- reactive({
datasets[[input$dataset]]
})
table_vars$timepoint <- reactive({
input$timepoint
})
table_vars$target_id <- reactive({
target_row <- input$table_rows_selected[1]
target_id <- filtered_table()[target_row, ]$ProteinID
target_id
})
table_vars$contrast_type <- reactive({
input$contrast_type
})
return(table_vars)
}
# Here - insert the three filtering aspects
get_filter_table <- function(dataset, stat_bases, fold_type="all", fdr_cutoff_arg=0.1, fdr_cutoff_bel=0.1,
annotation_presence="all", argamak_expr="ALL", belinda_expr="ALL", do_fdr_filter=TRUE,
do_string_truncate=FALSE, include_sdf=FALSE, contrast_type="Infection") {
no_round_fields <- c(
"EValue"
)
format_col <- function(col) {
if (typeof(col) == "double" && !(col %in% no_round_fields)) {
round(col, 5)
}
else if (typeof(col) == "character") {
substr(col, 1, 20)
}
else {
col
}
}
fold_filter <- function(fold1, fold2, fold_type) {
if (fold_type == "all") {
TRUE
}
else if (fold_type == "same") {
sign(fold1) == sign(fold2)
}
else if (fold_type == "contra") {
sign(fold1) != sign(fold2)
}
else {
stop("Unknown fold type: ", fold_type)
}
}
filtered_df <- SummarizedExperiment::rowData(dataset) %>% data.frame()
if (include_sdf) {
filtered_df <- cbind(filtered_df, assay(dataset) %>% data.frame())
}
if (do_string_truncate) {
filtered_df <- filtered_df %>%
lapply(format_col) %>%
data.frame()
}
if (do_fdr_filter) {
filtered_df <- filtered_df %>%
dplyr::filter(fold_filter(
UQ(as.name(sprintf("%s.logFC", stat_bases[1]))),
UQ(as.name(sprintf("%s.logFC", stat_bases[2]))), fold_type)) %>%
dplyr::filter(
UQ(as.name(sprintf("%s.adj.P.Val", stat_bases[1]))) < fdr_cutoff_arg &
UQ(as.name(sprintf("%s.adj.P.Val", stat_bases[2]))) < fdr_cutoff_bel)
}
if (annotation_presence != "all") {
filtered_df <- filtered_df %>% dplyr::filter(.data$annot_type == annotation_presence)
}
if (argamak_expr != "ALL") {
arg_filter_col <- sprintf("%s.presence", stat_bases[1])
filtered_df <- filtered_df %>% dplyr::filter(UQ(as.name(arg_filter_col)) == argamak_expr)
}
if (belinda_expr != "ALL") {
bel_filter_col <- sprintf("%s.presence", stat_bases[2])
filtered_df <- filtered_df %>% dplyr::filter(UQ(as.name(bel_filter_col)) == belinda_expr)
}
filtered_df
}
show_table <- function(filtered_df, stat_bases, stat_cols=NULL, annot_cols=NULL, get_all_cols=FALSE, default_length=25) {
if (!get_all_cols) {
target_fields <- c(
annot_cols,
paste(stat_bases[1], stat_cols, sep="."),
paste(stat_bases[2], stat_cols, sep=".")
)
}
else {
target_fields <- colnames(filtered_df)
}
select_filter_df <- filtered_df %>% dplyr::select(target_fields)
select_filter_df %>%
DT::datatable(selection='single', class="compact cell-border", options=list(pageLength=default_length)) %>%
DT::formatStyle(
c(sprintf("%s.logFC", stat_bases[1]), sprintf("%s.logFC", stat_bases[2])),
color = htmlwidgets::JS("value > 0 ? 'red': 'blue'")
) %>%
DT::formatStyle(
colnames(select_filter_df),
fontSize = '80%'
)
}
|
6dbea64fe22e7394aa0a731cf28d4517c6c033ab
|
eddc6237750e3e1b8ca1605472f09d002d104368
|
/server.R
|
a1db3f37280bbca6fbc31bf85d79eddcdd32ebd9
|
[] |
no_license
|
RcKeller/INFO201-Final-Project
|
3aadbe95135299be610af48818c725a6ff2f138a
|
b63b3164fb8770e305a5d2cc60b969d682c49fe0
|
refs/heads/master
| 2021-04-27T00:27:10.683153
| 2018-03-08T17:57:38
| 2018-03-08T17:57:38
| 123,817,160
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,537
|
r
|
server.R
|
# Builds the server for our Project's shinyapp
the.server <- function(input, output) {
#### DEPENDENCIES
### Utilities
# install.packages('Hmisc')
### Web Packages
# install.packages('shiny')
# install.packages("shinythemes")
# install.packages('httr')
# install.packages('jsonlite')
### Data Manipulation:
# install.packages('dplyr')
# install.packages('tidyr')
### Text Mining:
# install.packages('tidytext')
# install.packages('tm')
# install.packages('SnowballC')
# install.packages('syuzhet')
### Visualization:
# install.packages('ggplot2')
# Loads the libraries that our shinyapp is dependent on.
library(shiny)
library(shinythemes)
library(Hmisc)
library(httr)
library(jsonlite)
library(dplyr)
library(tidyr)
library(tidytext)
library(tm)
library(syuzhet)
library(SnowballC)
library(ggplot2)
## IMPORTS
source('./src/GetProposals.R')
source('./src/AnalyzeProposals.R')
## DATA COLLECTION & ANALYSIS
proposals <- GetProposals()
analysis <- AnalyzeProposals(proposals)
filtered <- reactive({
if (is.null(input$year) | is.null(input$category)) {
if (is.null(input$year) & !is.null(input$category)) {
return(
analysis %>% filter(
Valence <= input$range
& Category %in% input$category
)
)
} else if (is.null(input$category) & !is.null(input$year)) {
analysis %>% filter(
Valence <= input$range
& Year %in% input$year
)
} else {
return(
analysis %>% filter(
Valence <= input$range
)
)
}
} else {
return(
analysis %>% filter(
Valence <= input$range,
Year %in% input$year,
Category %in% input$category
)
)
}
})
output$valence.slider <- renderUI({
minVal <- group_by(analysis, Year) %>% summarize(mins = min(Valence)) %>% select(mins)
sliderInput(
'range',
label = h3('Valence Filter'),
min = ceiling(max(minVal))[1],
max = max(floor(analysis$Valence)),
step = 0.5,
value = round(max(analysis$Valence)/2), #TODOround((min(analysis$Valence)+max(analysis$Valence))/2),
width = '150%',
round = FALSE
)
})
output$categories <- renderUI({
checkboxGroupInput(
'category',
label = h4('Categories of Request to Include in Analysis'),
choices = unique(analysis$Category),
selected = 'Portable'
)
})
output$dynamic.ui <- renderUI({
switch(
input$tab,
'graph' = NULL,
'table' = NULL,
'sum.ui' = NULL
)
})
output$table <- renderDataTable({
filtered() %>% select(Title, Organization, Asked, Received, Anticipation, Trust, Joy, Valence)
})
output$graph <- renderPlot({
filtered.table <- filtered()
emotions <- select(filtered.table, Anger, Anticipation, Disgust, Fear, Sadness, Surprise, Trust)
emotions.means <- c(mean(emotions$Anger), mean(emotions$Anticipation), mean(emotions$Disgust),
mean(emotions$Fear), mean(emotions$Sadness), mean(emotions$Surprise),
mean(emotions$Trust))
barplot(emotions.means, main = "Average Emotion for Given Categories",
xlab = "Emotions",
names.arg = colnames(emotions),
col = c("red", "orange", "yellow", "green",
"blue", "purple", "black"))
})
output$scatter <- renderPlot({
ggplot(data = filtered()) +
geom_point(mapping = aes(x = Valence, y = Received), color = "red") +
labs(title = "Valence versus Money Received",
x = "Valence",
y = "Money Received in Dollars")
})
# Creates a reactive function called 'sum.data' that returns a list. List contains all data required in the
# summary visualizations.
sum.data <- reactive({
# Groups data from reactive function filtered() by their category, and then uses this, along with summary
# tools, to create data frame containing methods of central tendency and variability.
category.sums <- group_by(filtered(), Category) %>%
summarize(Avg.amount.asked = mean(Asked), Avg.amount.received = mean(Received),
Max.received = max(Received), Median.recieved = median(Received), Standard.dev = sd(Received))
# Groups data from filtered() by Valence, grouping Valence into several buckets. Assigns each bucket
# a label and then uses summary functions to create data frame containing methods of central tendency
# and variability.
valence.sums <- mutate(filtered(), Valence.Group = cut(filtered()$Valence,
breaks = c(0, 5, 10, 15, 20, 25, 30, 35, 40),
labels = c("0 - 5", "5 - 10", "10 - 15", "15 - 20",
"20 - 25", "25 - 30", "30 - 35",
"35 - 40")))
valence.sums <- group_by(valence.sums, Valence.Group) %>%
summarize(Avg.amount.asked = mean(Asked), Avg.amount.received = mean(Received),
Max.received = max(Received), Median.recieved = median(Received), Standard.dev = sd(Received))
# Determines if filtered is empty or not. If so, returns empty data frame. Otherwise, unlists Endorsements
# into a vector of numbers and groups data from filtered() Endorsement numbers. Uses this to create a
# data frame with methods of central tendency and variability.
endorsement.sums <- filtered()
if (nrow(filtered()) >= 1) {
endorsement.sums <- mutate(endorsement.sums, Endorsements = unlist(filtered()$Endorsements,
use.names = FALSE))
endorsement.sums <- mutate(endorsement.sums, Endorsement.group = cut(endorsement.sums$Endorsements,
breaks = c(-1, 5, 10, 20,
30, 45, 80, 100, 200),
labels = c("-1 - 5", "5 - 10",
"10 - 20", "20 - 30",
"30 - 45", "45 - 80",
"80 - 100", "100 - 200")))
endorsement.sums <- group_by(endorsement.sums, Endorsement.group) %>%
summarize(Avg.amount.asked = mean(Asked), Avg.amount.received = mean(Received),
Max.received = max(Received), Median.recieved = median(Received), Standard.dev = sd(Received))
}
return(list(cat = category.sums, val = valence.sums, endo = endorsement.sums))
})
# Outputs a table containing summary information for requests based on the category that the request falls
# into
output$cat <- renderTable({
sum.data()$cat
})
# Outputs a table containing summary information for requests based on the valence of the request
output$val <- renderTable({
sum.data()$val
})
# Outputs a table containing summary information for requests based on the endorsements that the request had
output$endo <- renderTable({
sum.data()$endo
})
}
|
1742d3e743af68922d16aed5615fe29ee9478986
|
47cfba59b500089782891b7d9befc1bec4943ba2
|
/entropy/codingWalks.R
|
2280727d2e9835c959ebfeb38b66b96a9f1198ce
|
[] |
no_license
|
charleywu/ruggedness
|
d770a3dee2d2d6c01b60f355946050b643d1de03
|
11b7dba2119603fdfa075fa8ac0983df379cd413
|
refs/heads/master
| 2021-01-10T13:36:31.596341
| 2016-04-20T12:45:26
| 2016-04-20T12:45:26
| 50,932,824
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,001
|
r
|
codingWalks.R
|
#coding random walks
rm(list=ls())
ptm<-proc.time()
setwd("walks")
setwd("10000") #walksize of walk to load
load("walks.Rdata")
#epsilon star values from fastEpsilon.R (1 step epsilon star)
epsilon_star <- c(0.016756621, 0.003327951, 0.005683390, 0.032014352, 0.003956948, 0.034378054, 0.002517841, 0.009171281, 0.013481127, 0.013129699, 0.001362400, 0.001296913, 0.012938648, 0.001057927)
#from 10 step random walks
#epsilon_star <- c(0.14215015, 0.81185975, 0.55446881, 0.50626783, 0.03850807, 0.25362396,0.35231084, 0.28750825, 0.43181304, 0.16201408, 0.79754580, 0.67709166, 0.26071785, 0.43970627)
#negative function because R is stupid (x less than negative y looks like assign value; x<-y)
neg <- function(x) -x
#function for coding each line of random walk (*-*)
line_code <- function(delta_fitness, epsilon){
code <- NA
if (delta_fitness<neg(epsilon)){
code <- neg(1)
}else if (abs(delta_fitness)<=epsilon){
code<-0
}else if (delta_fitness>epsilon){
code<- 1
}
return(code)
}
#function for coding each pair of steps in random walk (*-*-*)
sequence_code <- function(line1, line2){
sequence <- "NA NA"
if (line1==0){
if (line2==1){
sequence <- "0 1"
} else if (line2==neg(1)){
sequence<- "0 -1"
} else if (line2==0){
sequence <- "0 0"
}
} else if (line1==1){
if (line2==0){
sequence <- "1 0"
}else if (line2==neg(1)){
sequence <- "1 -1"
}else if (line2==1){
sequence <- "1 1"
}
} else if (line1==neg(1)){
if (line2==0){
sequence <- "-1 0"
} else if (line2==1){
sequence<- "-1 1"
}else if (line2==neg(1)){
sequence <- "-1 -1"
}
}
return(sequence)
}
walkCodes <- list()
#loop through environments
for (i in 1:length(walkList)){
walks <- walkList[[i]]
estar <- epsilon_star[i]
#loop through epsilon values
distributionList <- list() #distribution of sequences over all epsilon values (9 epsilon values: matrix(100 reps x 998 sequences))
epsilonVec <- c(0, estar/128, estar/64, estar/32, estar/16, estar/8, estar/4, estar/2, estar)
for (e in 1:9){
epsilon<- epsilonVec[e]
#loop through replications
codeMatrix <- matrix(0,nrow=ncol(walks), ncol=9) #9 different sequences x num replications
colnames(codeMatrix) <- c("0 1", "0 -1", "1 0", "1 -1", "-1 0", "-1 1", "1 1", "-1 -1", "0 0") #first 6 are rugged, 7-8 are smooth, and 9 is neutral
for (rep in 1:ncol(walks)){
#loop through sequences
for(step in 1:(nrow(walks)-2)){
step1 <- walks[step,rep]
step2 <- walks[step+1,rep]
step3 <- walks[step+2,rep]
#code lines
line1 <- line_code(step2-step1, epsilon)
line2 <- line_code(step3-step2, epsilon)
#code sequences
seq_code <- sequence_code(line1,line2)
#add sequence to codeMatrix
codeMatrix[rep,][seq_code] <- codeMatrix[rep,][seq_code] + 1
}
}
#aggregate over epsilon values
distributionList[[e]] <- codeMatrix
}
walkCodes[[i]] <- distributionList
print(i)
}
save(walkCodes, file="walkCodes.Rdata")
print(proc.time() - ptm)
setwd("..")
setwd("..")
|
363948ea3cc77699db1d568147547ea026b7fb61
|
214b50f6b78062b18e3232c24fc2664c529f890c
|
/R/size_collab.R
|
0f40f8bf1a3df16fb3e33e0b4c718c7f7ec20d61
|
[
"CC-BY-4.0"
] |
permissive
|
xchen101/HEPSurvey
|
579c93076cbd0b54cd8047d8f02183b4e31a8425
|
709a0e4a260268d59b55689026b9acaa554ac302
|
refs/heads/master
| 2021-11-08T06:21:09.730457
| 2021-09-29T23:22:11
| 2021-09-29T23:22:11
| 125,383,529
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 795
|
r
|
size_collab.R
|
setwd("/Users/xiaolichen/R projects/HEP survey/HEPSurvey/Data/Base/")
data = read.csv("usable_QC_FA.csv")
library (plotly)
x = c("> 500 members", "101 - 500 members", "51 - 100 members", "20 - 50 members", "< 20 members")
y = c(
count_1 <- length (which (data$D4_1 == "> 500 members")),
count_2 <- length (which (data$D4_1 == "101 - 500 members")),
count_3 <- length (which (data$D4_1 == "51 - 100 members")),
count_4 <- length (which (data$D4_1 == "20 - 50 members")),
count_5 <- length (which (data$D4_1 == "< 20 members")))
size <- data.frame(x, y)
size$x <- factor(size$x, levels = size[["x"]])
plot_ly(
x = ~x,
y = ~y,
name = "size",
type = "bar"
) %>%
layout(title = "Size of collaboration",
xaxis = list(title = " "),
yaxis = list(title = " "))
|
87d54776a0e4c31e1d81f92bdc7d6fbab96b4e67
|
17171f285eb9908e7f3011032abefcd29f68d943
|
/data-raw/flattax.R
|
e171c928fac484f11fa7342c5cbe77d48e3a6bcc
|
[] |
no_license
|
zachcp/flattax
|
31b378daf04695a4d506d618186876f9716869b3
|
b2d6c9cbdc17da2220cf43271a63740af4c8dcc0
|
refs/heads/master
| 2022-12-27T05:32:17.623022
| 2020-10-13T19:27:25
| 2020-10-13T19:27:25
| 276,474,080
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,623
|
r
|
flattax.R
|
## code to prepare `flattax` dataset
library(data.table)
tdir <- tempdir()
taxfile <- paste0(tdir, "/new_taxdump.zip")
rankedtaxfile <- paste0(tdir, "/rankedlineage.dmp")
download.file(
"ftp://ftp.ncbi.nlm.nih.gov/pub/taxonomy/new_taxdump/new_taxdump.zip",
taxfile
)
unzip(taxfile, tdir)
if (!file.exists(rankedtaxfile)) {
stop("Error uncompressing Taxfile from NCBI.")
}
flattax <- data.table::fread(
sep = "",
header = FALSE,
rankedtaxfile
)
flattax[,
c("tax_id", "tax_name", "species", "genus", "family", "order", "class", "phylum", "kingdom", "superkingdom")
:= tstrsplit(V1, "\t|\t", fixed=T)][
, V1 := NULL][
, superkingdom := tstrsplit(superkingdom, "\t", keep=1)]
# Species Plus Strains: 1941033
usethis::use_data(flattax, overwrite = TRUE)
## Previous Build ----------------------
#library(dplyr)
#library(tidyr)
#library(taxizedb)
#' #' get_species_ids
#' #'
#' #' use taxizedb SQLite dataset to retrieve species below a high_level_taxid
#' #'
#' #' @param high_level_taxid. Required. Integer.
#' #' @importFrom taxizedb downstream
#' #' @importFrom taxizedb src_ncbi
#' #' @export
#' #' @example
#' #' \dontrun{
#' #' species_ids <- get_species_ids()
#' #' }
#' get_species_ids <- function() {
#' db <- src_ncbi()
#' nodes <- tbl(db, "nodes")
#'
#' nodes %>%
#' filter(rank %in% c("species", "strain")) %>%
#' select(tax_id) %>%
#' as.data.frame() %>%
#' .$tax_id
#'
#'
#' #downstream(high_level_taxid, db='ncbi', downto='species')[[1]]$childtaxa_id
#' }
#'
#' #' create_flat_tax_table
#' #'
#' #' @param taxid. Required. The NCBI taxonom ID used to generate the tax table.
#' #' @param taxlevels. Optional. Default \code{c('superkingdom', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species')}
#' #' @importFrom taxizedb classification
#' #' @importFrom dplyr filter %>% slice_head group_by ungroup
#' #' @importFrom tidyr spread
#' #' @export
#' #' @examples
#' #' \dontrun{
#' #' bacteria <- get_species_ids(2)
#' #' tax_table <- Reduce(
#' #' rbind,
#' #' parallel::mclapply(bacteria, create_flat_table, mc.cores = 6))
#' #' }
#' create_flat_tax_table <- function(
#' taxid,
#' taxlevels= c('superkingdom', 'kingdom','phylum', 'class',
#' 'order', 'family', 'genus', 'species')) {
#'
#' df1 <- taxizedb::classification(taxid)[[1]]
#' df1 <- df1 %>% filter(rank %in% taxlevels)
#' df1 <- df1 %>% select(name, rank)
#'
#' # add dummy data so you are not missing any ranks
#' df2 <- rbind(df1, expand.grid(name="Unknown", rank=taxlevels)) %>%
#' group_by(rank) %>%
#' slice_head(1) %>%
#' ungroup()
#'
#' # cast to wide format which can be used in rbind or output to a file
#' cbind( tidyr::spread(df2, rank, name), taxid=taxid)
#' }
#'
#'
#' #' write_taxonomy_flatfile
#' #'
#' #' write the taxonomy flatfile to disk
#' #' @example
#' #' \dontrun {
#' #' write_taxonomy_flatfile(2, "~/Downloads/temp_tax1.txt")
#' #' }
#' write_taxonomy_flatfile <- function(taxid, outfile, ncpus=1, ...) {
#' tmp <- tempfile()
#' print(tmp)
#' on.exit(unlink(tmp))
#'
#' print('Retrieving IDs')
#' ids <- get_species_ids()
#' print('Creating All Taxids')
#'
#' parallel::mclapply(
#' ids,
#' function(id) {
#' #print(id)
#' df <- create_flat_tax_table(id)
#' write.table(df,
#' file = tmp,
#' quote = FALSE,
#' sep="\t",
#' row.names = FALSE,
#' col.names = FALSE,
#' append = TRUE)
#' },
#' mc.cores=ncpus)
#'
#' file.copy(tmp, outfile)
#' }
#' #' create_flattax_db
#' #'
#' #'
#' #'
#' create_flattax_db <- function() {
#' headers <- c('class', 'family', 'genus', 'kingdom', 'order', 'phylum',
#' 'species', 'superkingdom','taxid')
#' write_taxonomy_flatfile(taxid = 1, outfile = "tax.tax", npcus = 6)
#'
#' dt1 <-data.table::fread("out.tax", col.names = headers)
#' con <- RSQLite::dbConnect(RSQLite::SQLite(), dbname=flat_tax_file)
#'
#' RSQLite::dbWriteTable(
#' conn = db,
#' name = 'taxonomy',
#' value = dt1,
#' append = FALSE
#' )
#'
#' RSQLite::dbDisconnect(db)
#'
#' }
#'
# download the ncbi_taxonomy_database
# taxizedb::db_download_ncbi()
#
# write_taxonomy_flatfile(
# taxid = 1,
# outfile = "data-raw/flattax.txt",
# npcus = 1)
#
#
# flattax <- data.table::fread(
# "data-raw/flattax.txt",
# col.names=c('class', 'family', 'genus', 'kingdom', 'order',
# 'phylum','species', 'superkingdom', 'taxid'))
|
adbbb39161064c823711ca1aec348d136aaead83
|
4de0f73abd076eebf027331dd2c392013cd78fb6
|
/Train/mergeData.R
|
6b076e47f12077e94007a0c8b21e4ad82e6ef056
|
[] |
no_license
|
steve3003/MLSP-psychomy
|
e9bd853936312b533c57ca5c62c2e5aaa3698a68
|
453fe9f4709f4a50a0ae4a374f1f240e39a3df84
|
refs/heads/master
| 2016-09-06T15:36:40.201517
| 2014-07-31T09:05:27
| 2014-07-31T09:05:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 316
|
r
|
mergeData.R
|
library(RWeka)
fnc = read.csv("train_FNC.csv")
sbm = read.csv("train_SBM.csv")
labels = read.csv("train_labels.csv")
data = merge(fnc, sbm, by="Id")
data = merge(data, labels, by="Id")
data$Id = NULL
data$Class = as.factor(data$Class)
write.csv(data, "train.csv", row.names=FALSE)
write.arff(data, "train.arff")
|
59070b4817e8bea6598e56dc71af47f9aa32a62a
|
3c258c7fe3244f4a41dea7d264098ac614eef19a
|
/R/sig-extremes.R
|
0d70cefba42be96cce775aaec598b2fcabda84df
|
[
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain-disclaimer"
] |
permissive
|
USGS-R/repgen
|
379be8577f3effbe7067e2f3dc5b5481ca69999e
|
219615189fb054e3b421b6ffba4fdd9777494cfc
|
refs/heads/main
| 2023-04-19T05:51:15.008674
| 2021-04-06T20:29:38
| 2021-04-06T20:29:38
| 31,678,130
| 10
| 25
|
CC0-1.0
| 2023-04-07T23:10:19
| 2015-03-04T20:24:02
|
R
|
UTF-8
|
R
| false
| false
| 733
|
r
|
sig-extremes.R
|
#' Extremes report.
#'
#' @param data Local data (as list), or URL.
#' @param ... Everything else.
#' @rdname extremes
#' @importFrom rmarkdown render
#' @examples
#' library(jsonlite)
#' library(dplyr)
#' data <-
#' fromJSON(
#' system.file(
#' 'extdata', 'extremes', 'extremes-example-site-train.json', package = 'repgen'
#' )
#' )
#' extremes(data, 'Author Name')
#' @rdname extremes
#' @export
setGeneric(name="extremes",def=function(data, ...){standardGeneric("extremes")})
#'@aliases extremes
#'@rdname extremes
setMethod("extremes", signature = c("list"),
definition = function(data, ...) {
author <- list(...)
return(startRender(data, author, 'extremes'))
}
)
|
703491d66f3206efdf8ff797f34c59dde98da9fd
|
dc7c1016493af2179bd6834614be0902a0133754
|
/trunc.R
|
d5e4ef6edf53499460ce708a4c69320f3289a76c
|
[] |
no_license
|
ashishjsharda/R
|
5f9dc17fe33e22be9a6031f2688229e436ffc35c
|
fc6f76740a78d85c50eaf6519cec5c0206b2910c
|
refs/heads/master
| 2023-08-08T13:57:05.868593
| 2023-07-30T13:51:56
| 2023-07-30T13:51:56
| 208,248,049
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 34
|
r
|
trunc.R
|
num1<-0.8
a<-trunc(num1)
print(a)
|
2e514ec5412fdee5a0399c323fdeadff09381ce9
|
003b28924c215579620bf47fbdd64184582409c4
|
/tests/testthat.R
|
ea5fd2bdd1ca8edcd83d3210225d22d36b41be26
|
[
"MIT"
] |
permissive
|
prestevez/toypackage
|
87d2472ef9afc77336c653d5392eda10c8c9bc3d
|
1f2e3df865c2d491b92b988b7b2d02c2db6d5eb1
|
refs/heads/master
| 2023-02-22T22:32:24.499335
| 2021-01-26T18:55:14
| 2021-01-26T18:55:14
| 333,181,925
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 64
|
r
|
testthat.R
|
library(testthat)
library(toypackage)
test_check("toypackage")
|
4840a73250a7784d3e50c5d73d4ab14b311cb8b6
|
1e76886c729c7e0ae15cf18102fe0f614f9297e0
|
/R/conformal_infer_cv.R
|
75546642f358f53dfb5a48211c33d63089a08934
|
[
"MIT"
] |
permissive
|
tidymodels/probably
|
2abe267ef49a3595d29dd7fdbdf7c836b3103c8d
|
c46326651109fb2ebd1b3762b3cb086cfb96ac88
|
refs/heads/main
| 2023-07-10T13:09:55.973010
| 2023-06-27T17:11:22
| 2023-06-27T17:11:22
| 148,365,953
| 87
| 12
|
NOASSERTION
| 2023-06-27T17:11:24
| 2018-09-11T19:02:58
|
R
|
UTF-8
|
R
| false
| false
| 9,017
|
r
|
conformal_infer_cv.R
|
#' Prediction intervals via conformal inference CV+
#'
#' Nonparametric prediction intervals can be computed for fitted regression
#' workflow objects using the CV+ conformal inference method described by
#' Barber _at al_ (2018).
#'
#' @param object An object from a tidymodels resampling or tuning function such
#' as [tune::fit_resamples()], [tune::tune_grid()], or similar. The object
#' should have been produced in a way that the `.extracts` column contains the
#' fitted workflow for each resample (see the Details below).
#' @param parameters An tibble of tuning parameter values that can be
#' used to filter the predicted values before processing. This tibble should
#' select a single set of hyper-parameter values from the tuning results. This is
#' only required when a tuning object is passed to `object`.
#' @param ... Not currently used.
#' @return An object of class `"int_conformal_cv"` containing the information
#' to create intervals. The `predict()` method is used to produce the intervals.
#' @details
#' This function implements the CV+ method found in Section 3 of Barber _at al_
#' (2018). It uses the resampled model fits and their associated holdout
#' residuals to make prediction intervals for regression models.
#'
#' This function prepares the objects for the computations. The [predict()]
#' method computes the intervals for new data.
#'
#' This method was developed for V-fold cross-validation (no repeats). Interval
#' coverage is unknown for any other resampling methods. The function will not
#' stop the computations for other types of resamples, but we have no way of
#' knowing whether the results are appropriate.
#'
#' @seealso [predict.int_conformal_cv()]
#' @references
#' Rina Foygel Barber, Emmanuel J. Candès, Aaditya Ramdas, Ryan J. Tibshirani
#' "Predictive inference with the jackknife+," _The Annals of Statistics_,
#' 49(1), 486-507, 2021
#' @examplesIf !probably:::is_cran_check()
#' library(workflows)
#' library(dplyr)
#' library(parsnip)
#' library(rsample)
#' library(tune)
#' library(modeldata)
#'
#' set.seed(2)
#' sim_train <- sim_regression(200)
#' sim_new <- sim_regression( 5) %>% select(-outcome)
#'
#' sim_rs <- vfold_cv(sim_train)
#'
#' # We'll use a neural network model
#' mlp_spec <-
#' mlp(hidden_units = 5, penalty = 0.01) %>%
#' set_mode("regression")
#'
#' # Use a control function that saves the predictions as well as the models.
#' # Consider using the butcher package in the extracts function to have smaller
#' # object sizes
#'
#' ctrl <- control_resamples(save_pred = TRUE, extract = I)
#'
#' set.seed(3)
#' nnet_res <-
#' mlp_spec %>%
#' fit_resamples(outcome ~ ., resamples = sim_rs, control = ctrl)
#'
#' nnet_int_obj <- int_conformal_cv(nnet_res)
#' nnet_int_obj
#'
#' predict(nnet_int_obj, sim_new)
#' @export
int_conformal_cv <- function(object, ...) {
UseMethod("int_conformal_cv")
}
#' @export
#' @rdname int_conformal_cv
int_conformal_cv.default <- function(object, ...) {
rlang::abort("No known 'int_conformal_cv' methods for this type of object.")
}
#' @export
#' @rdname int_conformal_cv
int_conformal_cv.resample_results <- function(object, ...) {
check_resampling(object)
check_extras(object)
model_list <- .get_fitted_workflows(object)
y_name <- tune::.get_tune_outcome_names(object)
resids <-
tune::collect_predictions(object, summarize = TRUE) %>%
dplyr::mutate(.abs_resid = abs(.pred - !!rlang::sym(y_name)))
new_infer_cv(model_list, resids$.abs_resid)
}
#' @export
#' @rdname int_conformal_cv
int_conformal_cv.tune_results <- function(object, parameters, ...) {
check_resampling(object)
check_parameters(object, parameters)
check_extras(object)
model_list <- .get_fitted_workflows(object, parameters)
y_name <- tune::.get_tune_outcome_names(object)
resids <-
tune::collect_predictions(object, parameters = parameters, summarize = TRUE) %>%
dplyr::mutate(.abs_resid = abs(.pred - !!rlang::sym(y_name)))
new_infer_cv(model_list, resids$.abs_resid)
}
#' @export
#' @rdname predict.int_conformal_full
predict.int_conformal_cv <- function(object, new_data, level = 0.95, ...) {
mean_pred <-
purrr::map_dfr(
object$models,
~ predict(.x, new_data) %>% parsnip::add_rowindex()
) %>%
dplyr::group_by(.row) %>%
dplyr::summarize(estimate = mean(.pred, na.rm = TRUE), .groups = "drop") %>%
purrr::pluck("estimate")
lower <-
purrr::map_dbl(
as.list(seq_along(mean_pred)),
~ .get_lower_cv_bound(mean_pred[.x], object$abs_resid, level = level)
)
upper <-
purrr::map_dbl(
as.list(seq_along(mean_pred)),
~ .get_upper_cv_bound(mean_pred[.x], object$abs_resid, level = level)
)
dplyr::tibble(.pred_lower = lower, .pred = mean_pred, .pred_upper = upper)
}
#' @export
print.int_conformal_cv <- function(x, ...) {
cat("Conformal inference via CV+\n")
cat("preprocessor:", .get_pre_type(x$models[[1]]), "\n")
cat("model:", .get_fit_type(x$models[[1]]), "\n")
cat("number of models:", format(length(x$models), big.mark = ","), "\n")
cat("training set size:", format(length(x$abs_resid), big.mark = ","), "\n\n")
cat("Use `predict(object, new_data, level)` to compute prediction intervals\n")
invisible(x)
}
# ------------------------------------------------------------------------------
# helpers
new_infer_cv <- function(models, resid) {
if (!is.numeric(resid)) {
rlang::abort("Absolute residuals should be numeric")
}
na_resid <- is.na(resid)
if (all(na_resid)) {
rlang::abort("All of the absolute residuals are missing.")
}
if (!is.list(models)) {
rlang::abort("The model list should be... a list")
}
is_wflow <- purrr::map_lgl(models, workflows::is_trained_workflow)
if (all(!is_wflow)) {
rlang::abort(".extracts does not contain fitted workflows")
}
if (any(!is_wflow)) {
models <- models[is_wflow]
}
res <- list(
models = models,
abs_resid = resid[!na_resid]
)
class(res) <- c("conformal_reg_cv", "int_conformal_cv")
res
}
.get_lower_cv_bound <- function(pred, resid, level = 0.95) {
as.vector(stats::quantile(pred - resid, probs = 1 - level))
}
.get_upper_cv_bound <- function(pred, resid, level = 0.95) {
as.vector(stats::quantile(pred + resid, probs = level))
}
.get_pre_type <- function(x) {
cls <- x %>% workflows::extract_preprocessor() %>% class()
cls <- cls[!grepl("butchered", cls)]
cls[1]
}
.get_fit_type <- function(x) {
fitted <- x %>% workflows::extract_fit_parsnip()
res <- paste0(class(fitted$spec)[1], " (engine = ", fitted$spec$engine, ")")
res
}
.get_fitted_workflows <- function(x, prm = NULL) {
if (is.null(prm)) {
res <- purrr::map(x$.extracts, ~ .x$.extracts[[1]])
} else {
by_vars <- names(prm)
res <-
x %>%
dplyr::select(.extracts) %>%
tidyr::unnest(.extracts) %>%
dplyr::inner_join(prm, by = by_vars) %>%
purrr::pluck(".extracts")
}
res
}
# ------------------------------------------------------------------------------
# checks
check_resampling <- function(x) {
rs <- attr(x, "rset_info")
if (rs$att$class != "vfold_cv") {
msg <- paste0(
"The data were resampled using ", rs$label,
". This method was developed for V-fold cross-validation. Interval ",
"coverage is unknown for your resampling method."
)
rlang::warn(msg)
} else {
if (rs$att$repeats > 1) {
msg <- paste0(
rs$att$repeats, " repeats were used. This method was developed for ",
"basic V-fold cross-validation. Interval coverage is unknown for multiple ",
"repeats."
)
rlang::warn(msg)
}
}
invisible(NULL)
}
check_parameters <- function(x, param, call = rlang::caller_env()) {
prms <- tune::.get_tune_parameter_names(x)
mtr <- tune::collect_metrics(x) %>%
dplyr::distinct(.config, !!!rlang::syms(prms))
remain <- dplyr::inner_join(mtr, param, by = names(param))
if (nrow(remain) > 1) {
msg <-
paste0(
"The `parameters` argument selected ", nrow(remain), " submodels. Only ",
"1 should be selected."
)
rlang::abort(msg, call = call)
}
invisible(NULL)
}
check_extras <- function(x, call = rlang::caller_env()) {
if (!any(names(x) == ".extracts")) {
msg <-
paste0(
"The output must contain a column called '.extracts' that contains the ",
"fitted workflow objects. See the documentation on the 'extract' ",
"argument of the control function (e.g., `control_grid()` or ",
"`control_resamples()`, etc.)."
)
rlang::abort(msg)
}
if (!any(names(x) == ".predictions")) {
msg <-
paste0(
"The output must contain a column called '.predictions' that contains the ",
"holdout predictions. See the documentation on the 'save_pred' ",
"argument of the control function (e.g., `control_grid()` or ",
"`control_resamples()`, etc.)."
)
rlang::abort(msg, cal = call)
}
invisible(NULL)
}
|
4bc734930d8f121f43dca0eee503a9a0a74cb327
|
f9eeb9f033cc0381ab0fe97cc1caa828880ebc8c
|
/Exercise7.R
|
c4360e3c5007f1c572be58dbcd0078094158c37c
|
[] |
no_license
|
msuarez9/IBC_Exercise_07
|
2882fa37aee946f37ea60133b4bbc32a73b5c6df
|
2b8846f7c5ff266e4a5b885e19260ccb32c4ef6e
|
refs/heads/master
| 2020-09-09T16:51:04.948043
| 2019-11-15T04:09:22
| 2019-11-15T04:09:22
| 221,501,697
| 0
| 0
| null | 2019-11-13T16:18:07
| 2019-11-13T16:18:07
| null |
UTF-8
|
R
| false
| false
| 885
|
r
|
Exercise7.R
|
# Mariana Suarez and Jake Fry
# Biocomputing Assignment 7
#Read in iris.csv file
iris <- read.csv(file="iris.csv")
# 1
# Return odd rows of a dataframe
oddRows <- function(df)
{
for(i in 1:nrow(df)){
if (i%%2 != 0){
print(df[i,])
}
}
}
# 2
#Number of observances of indicated species. Arguments are species name and dataframe of interest
speciesNum <- function(species,df)
{
cat(species, ":", nrow(df[df$Species==species,]))
}
#Rows with sepal width > User input. Arguments are width threshold and dataframe of interest
sepalWidth <- function(n,df)
{
df[which(df$Sepal.Width>n),]
}
#Make data for a certain species into a csv file with the species name as file name. Arguments are species name and dataframe of interest
speciesFile <- function(species,df)
{
write.csv(df[df$Species==species,],paste(species,".csv"))
}
|
a3301c63644db3c1463aa63ef0f453e878741ccd
|
1428e35da991101da043708fba1e34bad5098b59
|
/databaza_mrtvych_v_3_1/ui.R
|
9d6ce10317de50931c86e67fd40bc62de8d9acbf
|
[] |
no_license
|
pietro22/R_CODES
|
b3d82bb40ae9b0fd1b87872c756aa0e31ee0449b
|
4c55f9fa804f75a82c155c188da269f2d68d9176
|
refs/heads/master
| 2021-04-09T16:17:22.809490
| 2018-03-21T09:04:06
| 2018-03-21T09:04:06
| 125,852,717
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 37,010
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
###################################################
### verzia 3
### - oprava vypoctu standardizovanej umrtnosti na urovni krajov a okresov
###################################################
library(shiny)
library(shinydashboard)
library(shinyWidgets)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Epidemiologická analýza v 3.0"),
# Tabpanel
navbarPage(
title = '',
tabPanel('Filtre',
sidebarLayout(
sidebarPanel(
pickerInput(
inputId = "year",
label = "Rok:",
choices = c("1996", "1997", "1998", "1999", "2000", "2001", "2002", "2003", "2004", "2005", "2006", "2007", "2008", "2009", "2010",
"2011", "2012", "2013", "2014", "2015"),
options = list(`actions-box` = TRUE),
multiple = TRUE),
pickerInput(
inputId = "region",
label = "Kraj:",
choices = c("Bratislavský" = "1",
"Trnavský" = "2",
"Trenčiansky" = "3",
"Nitriansky" = "4",
"Žilinský" = "5",
"Banskobystrický" = "6",
"Prešovský" = "7",
"Košický" = "8",
"Iný" = 9),
options = list(`actions-box` = TRUE),
multiple = TRUE),
pickerInput(
inputId = "district",
label = "Okres:",
choices = c("Bratislava I" = "101",
"Bratislava II" = "102",
"Bratislava III" = "103",
"Bratislava IV" = "104",
"Bratislava V" = "105",
"Malacky" = "106",
"Pezinok" = "107",
"Senec" = "108",
"Dunajská Streda" = "201",
"Galanta" = "202",
"Hlohovec" = "203",
"Piešťany" = "204",
"Senica" = "205",
"Skalica" = "206",
"Trnava" = "207",
"Bánovce nad Bebravou" = "301",
"Ilava" = "302",
"Myjava" = "303",
"Nové Mesto nad Váhom" = "304",
"Partizánske" = "305",
"Považská Bystrica" = "306",
"Prievidza" = "307",
"Púchov" = "308",
"Trenčín" = "309",
"Komárno" = "401",
"Levice" = "402",
"Nitra" = "403",
"Nové Zámky" = "404",
"Šaľa" = "405",
"Topoľčany" = "406",
"Zlaté Moravce" = "407",
"Bytča" = "501",
"Čadca" = "502",
"Dolný Kubín" = "503",
"Kysucké Nové Mesto" = "504",
"Liptovský Mikuláš" = "505",
"Martin" = "506",
"Námestovo" = "507",
"Ružomberok" = "508",
"Turčianske Teplice" = "509",
"Tvrdošín" = "510",
"Žilina" = "511",
"Banská Bystrica" = "601",
"Banská Štiavnica" = "602",
"Brezno" = "603",
"Detva" = "604",
"Krupina" = "605",
"Lučenec" = "606",
"Poltár" = "607",
"Revúca" = "608",
"Rimavská Sobota" = "609",
"Veľký Krtíš" = "610",
"Zvolen" = "611",
"Žarnovica" = "612",
"Žiar nad Hronom" = "613",
"Bardejov" = "701",
"Humenné" = "702",
"Kežmarok" = "703",
"Levoča" = "704",
"Medzilaborce" = "705",
"Poprad" = "706",
"Prešov" = "707",
"Sabinov" = "708",
"Snina" = "709",
"Stará Ľubovňa" = "710",
"Stropkov" = "711",
"Svidník" = "712",
"Vranov nad Topľou" = "713",
"Gelnica" = "801",
"Košice I" = "802",
"Košice II" = "803",
"Košice III" = "804",
"Košice IV" = "805",
"Košice - okolie" = "806",
"Michalovce" = "807",
"Rožňava" = "808",
"Sobrance" = "809",
"Spišská Nová Ves" = "810",
"Trebišov" = "811",
"Iné" = "999"),
options = list(`actions-box` = TRUE),
multiple = TRUE),
pickerInput(
inputId = "mechanical_death",
label = "Mechanical death diagnosis:",
choices = c("V01", "V02", "V03", "V04", "V05", "V06", "V09", "V10", "V11", "V12", "V13", "V14", "V15", "V16", "V17",
"V18", "V19", "V20", "V21", "V22", "V23", "V24", "V25", "V26", "V27", "V28", "V29", "V30", "V31", "V32", "V33", "V34",
"V37", "V38", "V39", "V40", "V41", "V42", "V43", "V44", "V45", "V46", "V47", "V48", "V49", "V50","V52", "V53", "V54", "V55",
"V57", "V58", "V59", "V60", "V63", "V64", "V65", "V67", "V68", "V69", "V70", "V73", "V74", "V75", "V76", "V77", "V78",
"V79", "V80", "V81", "V82", "V83", "V84", "V85", "V86", "V87", "V88", "V89", "V90", "V92", "V95", "V96", "V97", "V98",
"V99", "W00", "W01", "W02", "W03", "W04", "W05", "W06", "W07", "W08", "W09", "W10", "W11", "W12", "W13", "W14", "W15",
"W16", "W17", "W18", "W19", "W20", "W21", "W22", "W23", "W24", "W25", "W26", "W27", "W29", "W30", "W31", "W32", "W33",
"W34", "W36", "W37", "W38", "W39", "W40", "W41", "W43", "W44", "W45", "W49", "W50", "W51", "W53", "W54", "W55", "W56",
"W57", "W58", "W64", "W65", "W66", "W67", "W68", "W69", "W70", "W72", "W73", "W74", "W75", "W76", "W77", "W78", "W79",
"W80", "W81", "W83", "W84", "W85", "W86", "W87", "W89", "W91", "W92", "W93", "W94", "W99", "X00", "X01", "X02", "X03",
"X04", "X05", "X06", "X07", "X08", "X09", "X10", "X11", "X12", "X13", "X14", "X15", "X16", "X17", "X18", "X19", "X20",
"X21", "X22", "X23", "X25", "X27", "X28", "X29", "X30", "X31", "X33", "X34", "X36", "X37", "X38", "X39", "X40", "X41",
"X42", "X43", "X44", "X45", "X46", "X47", "X48", "X49", "X50", "X53", "X54", "X57", "X58", "X59", "X60", "X61", "X62",
"X63", "X64", "X65", "X66", "X67", "X68", "X69", "X70", "X71", "X72", "X73", "X74", "X75", "X76", "X77", "X78", "X79",
"X80", "X81", "X82", "X83", "X84", "X85", "X89", "X90", "X91", "X92", "X93", "X94", "X95", "X96", "X97", "X99", "Y00",
"Y01", "Y02", "Y03", "Y04", "Y05", "Y06", "Y07", "Y08", "Y09", "Y10", "Y11", "Y12", "Y13", "Y14", "Y15", "Y16", "Y17",
"Y18", "Y19", "Y20", "Y21", "Y22", "Y23", "Y24", "Y25", "Y26", "Y27", "Y28", "Y29", "Y30", "Y31", "Y32", "Y33", "Y34",
"Y35", "Y40", "Y42", "Y43", "Y44", "Y45", "Y46", "Y47", "Y49", "Y51", "Y52", "Y55", "Y59", "Y60", "Y65", "Y69", "Y70",
"Y71", "Y72", "Y73", "Y74", "Y78", "Y79", "Y80", "Y81", "Y82", "Y83", "Y84", "Y85", "Y86", "Y87", "Y88", "Y89"),
options = list(`actions-box` = TRUE),
multiple = TRUE),
pickerInput(
inputId = "initial_death",
label = "Initial death diagnosis:",
choices = c("A01", "A02", "A03", "A04", "A05", "A08", "A09", "A15", "A16", "A17", "A18", "A19", "A26", "A27", "A31",
"A32", "A35", "A37", "A39", "A40", "A41", "A42", "A46", "A48", "A49", "A50", "A52", "A53", "A69", "A77", "A81", "A83",
"A84", "A85", "A86", "A87", "A88", "A89", "B00", "B01", "B02", "B15", "B16", "B17", "B18", "B19", "B20", "B21", "B22",
"B23", "B24", "B25", "B33", "B34", "B44", "B49", "B50", "B54", "B58", "B67", "B77", "B90", "B94", "B96", "B97", "B99",
"C00", "C01", "C02", "C03", "C04", "C05", "C06", "C07", "C08", "C09", "C10", "C11", "C12", "C13", "C14", "C15", "C16",
"C17", "C18", "C19", "C20", "C21", "C22", "C23", "C24", "C25", "C26", "C30", "C31", "C32", "C33", "C34", "C37", "C38",
"C39", "C40", "C41", "C43", "C44", "C45", "C46", "C47", "C48", "C49", "C50", "C51", "C52", "C53", "C54", "C55", "C56",
"C57", "C58", "C60", "C61", "C62", "C63", "C64", "C65", "C66", "C67", "C68", "C69", "C70", "C71", "C72", "C73", "C74",
"C75", "C76", "C80", "C81", "C82", "C83", "C84", "C85", "C86", "C88", "C90", "C91", "C92", "C93", "C94", "C95", "C96",
"C97", "D09", "D10", "D11", "D12", "D13", "D14", "D15", "D16", "D17", "D18", "D20", "D25", "D26", "D27", "D30", "D32",
"D33", "D34", "D35", "D36", "D37", "D38", "D39", "D40", "D41", "D42", "D43", "D44", "D45", "D46", "D47", "D48", "D50",
"D51", "D53", "D55", "D56", "D59", "D60", "D61", "D62", "D64", "D65", "D66", "D67", "D68", "D69", "D70", "D72", "D73",
"D74", "D75", "D76", "D80", "D81", "D82", "D83", "D84", "D86", "D89", "E03", "E04", "E05", "E06", "E07", "E10", "E11",
"E12", "E13", "E14", "E15", "E16", "E20", "E21", "E23", "E24", "E25", "E27", "E31", "E32", "E34", "E40", "E41", "E43",
"E44", "E46", "E50", "E63", "E64", "E65", "E66", "E67", "E68", "E70", "E71", "E72", "E74", "E75", "E76", "E78", "E79",
"E83", "E84", "E85", "E86", "E87", "E88", "E89", "F01", "F03", "F07", "F09", "F10", "F11", "F12", "F18", "F19", "F20",
"F23", "F25", "F29", "F31", "F32", "F50", "F99", "G00", "G03", "G04", "G06", "G08", "G09", "G10", "G11", "G12", "G20",
"G21", "G23", "G24", "G25", "G30", "G31", "G35", "G36", "G37", "G40", "G41", "G52", "G54", "G58", "G61", "G62", "G64",
"G70", "G71", "G72", "G80", "G81", "G82", "G83", "G90", "G91", "G92", "G93", "G95", "G96", "G97", "G98", "H66", "I00",
"I01", "I02", "I05", "I06", "I07", "I08", "I09", "I10", "I11", "I12", "I13", "I15", "I20", "I21", "I22", "I23", "I24",
"I25", "I26", "I27", "I28", "I30", "I31", "I32", "I33", "I34", "I35", "I36", "I37", "I38", "I39", "I40", "I42", "I44",
"I45", "I46", "I47", "I48", "I49", "I50", "I51", "I60", "I61", "I62", "I63", "I64", "I65", "I66", "I67", "I69", "I70",
"I71", "I72", "I73", "I74", "I77", "I78", "I80", "I81", "I82", "I83", "I84", "I85", "I86", "I87", "I88", "I89", "I95",
"I97", "I99", "J01", "J03", "J04", "J05", "J06", "J09", "J10", "J11", "J12", "J13", "J14", "J15", "J16", "J18", "J20",
"J21", "J22", "J30", "J32", "J35", "J36", "J37", "J38", "J39", "J40", "J41", "J42", "J43", "J44", "J45", "J46", "J47",
"J60", "J61", "J62", "J63", "J64", "J65", "J66", "J67", "J68", "J69", "J70", "J80", "J81", "J82", "J84", "J85", "J86",
"J90", "J92", "J93", "J94", "J95", "J96", "J98", "K12", "K14",
"K20", "K21", "K22", "K25", "K26", "K27", "K28", "K29", "K30",
"K31", "K35", "K36", "K37", "K38", "K40", "K41", "K42", "K43",
"K44", "K45", "K46", "K50", "K51", "K52", "K55", "K56", "K57",
"K58", "K60", "K61", "K62", "K63", "K65", "K66", "K70", "K71",
"K72", "K73", "K74", "K75", "K76", "K80", "K81", "K82", "K83",
"K85", "K86", "K90", "K91", "K92", "L02", "L03", "L05", "L08",
"L10", "L51", "L53", "L89", "L97", "M00", "M05", "M06", "M08",
"M10", "M13", "M15", "M16", "M17", "M18", "M19", "M30", "M31",
"M32", "M33", "M34", "M35", "M40", "M41", "M43", "M45", "M46",
"M47", "M48", "M50", "M60", "M62", "M80", "M81", "M83", "M84",
"M85", "M86", "M87", "M88", "M89", "M96", "M99", "N00", "N01",
"N02", "N03", "N04", "N05", "N06", "N07", "N10", "N11", "N12",
"N13", "N14", "N15", "N17", "N18", "N19", "N20", "N21", "N23",
"N25", "N26", "N27", "N28", "N30", "N31", "N32", "N34", "N35",
"N36", "N39", "N40", "N41", "N42", "N45", "N49", "N50", "N60",
"N63", "N70", "N71", "N73", "N76", "N80", "N81", "N82", "N83",
"N99", "O00", "O02", "O03", "O08", "O10", "O15", "O16", "O20",
"O22", "O26", "O44", "O45", "O46", "O71", "O72", "O74", "O75",
"O81", "O85", "O87", "O88", "O89", "O90", "O97", "O99", "P00",
"P01", "P02", "P03", "P04", "P05", "P07", "P08", "P10", "P11",
"P12", "P15", "P20", "P21", "P22", "P23", "P24", "P25", "P26",
"P27", "P28", "P29", "P35", "P36", "P37", "P38", "P39", "P50",
"P51", "P52", "P53", "P54", "P55", "P56", "P57", "P58", "P59",
"P60", "P61", "P70", "P71", "P72", "P74", "P76", "P77", "P78",
"P80", "P83", "P90", "P91", "P94", "P95", "P96", "Q00", "Q01",
"Q02", "Q03", "Q04", "Q05", "Q06", "Q07", "Q10", "Q18", "Q20",
"Q21", "Q22", "Q23", "Q24", "Q25", "Q26", "Q27", "Q28", "Q30",
"Q31", "Q32", "Q33", "Q34", "Q35", "Q37", "Q39", "Q40", "Q41",
"Q42", "Q43", "Q44", "Q45", "Q52", "Q55", "Q56", "Q60", "Q61",
"Q62", "Q63", "Q64", "Q66", "Q67", "Q68", "Q71", "Q74", "Q75",
"Q76", "Q77", "Q78", "Q79", "Q80", "Q81", "Q82", "Q85", "Q86",
"Q87", "Q89", "Q90", "Q91", "Q92", "Q93", "Q95", "Q96", "Q97",
"Q98", "Q99", "R00", "R02", "R04", "R06", "R07", "R09", "R26",
"R32", "R34", "R40", "R50", "R54", "R55", "R56", "R57", "R68",
"R69", "R95", "R96", "R98", "R99", "S00", "S01", "S02", "S03",
"S04", "S05", "S06", "S07", "S08", "S09", "S10", "S11", "S12",
"S13", "S14", "S15", "S16", "S17", "S18", "S19", "S20", "S21",
"S22", "S23", "S24", "S25", "S26", "S27", "S28", "S29", "S30",
"S31", "S32", "S33", "S34", "S35", "S36", "S37", "S38", "S39",
"S41", "S42", "S43", "S44", "S45", "S46", "S48", "S49", "S50",
"S51", "S52", "S55", "S57", "S58", "S59", "S60", "S61", "S62",
"S63", "S65", "S69", "S70", "S71", "S72", "S73", "S75", "S76",
"S77", "S78", "S79", "S80", "S81", "S82", "S83", "S85", "S86",
"S87", "S88", "S89", "S90", "S91", "S92", "S93", "S95", "S97",
"S98", "S99", "T00", "T01", "T02", "T03", "T04", "T05", "T06",
"T07", "T08", "T09", "T10", "T11", "T12", "T13", "T14", "T17",
"T18", "T20", "T21", "T22", "T23", "T24", "T25", "T27", "T28",
"T29", "T30", "T31", "T32", "T33", "T34", "T35", "T36", "T37",
"T38", "T39", "T40", "T41", "T42", "T43", "T44", "T45", "T46",
"T47", "T48", "T49", "T50", "T51", "T52", "T53", "T54", "T55",
"T56", "T57", "T58", "T59", "T60", "T61", "T62", "T63", "T64",
"T65", "T66", "T67", "T68", "T69", "T70", "T71", "T73", "T74",
"T75", "T78", "T79", "T80", "T81", "T82", "T83", "T84", "T85",
"T86", "T87", "T88", "T90", "T91", "T92", "T93", "T94", "T95",
"T96", "T97", "T98"),
options = list(`actions-box` = TRUE),
multiple = TRUE),
pickerInput(
inputId = "initial_diagnosis_chapter",
label = "Initial diagnosis chapter:",
choices = c("1", "2", "3", "4", "5", "6", "8", "9", "10", "11", "12", "13",
"14", "15", "16", "17", "18", "19"),
options = list(`actions-box` = TRUE),
multiple = TRUE),
pickerInput(
inputId = "age_category",
label = "Veková skupina:",
choices = c("0" = "1", "1 - 4" = "2", "5 - 9" = "3", "10 - 14" = "4", "15 - 19" = "5", "20 - 24" = "6", "25 - 29" = "7",
"30 - 34" = "8", "35 - 39" = "9", "40 - 44" = "10", "45 - 49" = "11", "50 - 54" = "12", "55 - 59" = "13", "60 - 64" = "14",
"65 - 69" = "15", "70 - 74" = "16", "75 - 79" = "17", "80 - 84" = "18", "85 - 89" = "19", "90 - 94" = "20", "95+" = "21"),
options = list(`actions-box` = TRUE),
multiple = TRUE),
pickerInput(
inputId = "sex",
label = "Pohlavie:",
choices = c("muži" = "1", "ženy" = "2"),
options = list(`actions-box` = TRUE),
multiple = TRUE),
actionButton("set", "Nastav"),
downloadLink('download_umrtnost', 'Download'),
width = 2
),
mainPanel(
p("Štandardizovaná úmrtnosť podľa vekových kategórií"),
div(tableOutput("pocet_mrtvych"),style = "font-size:70%"),
p("Celková štandardizovaná úmrtnosť"),
div(tableOutput("celkova_umrtnost"),style = "font-size:70%")
))
),
tabPanel("Grafy",
sidebarLayout(
sidebarPanel(
actionButton("draw", "Vykresli graf"),
numericInput('limit0', 'Minimum', 1000),
numericInput('limit1', 'Maximum', 3000),
numericInput('myStep', 'Krok', 200),
downloadButton('save_plot', "Uložiť graf"),
actionButton('groups', 'Okresy'),
actionButton('all_diagnosis', 'Okresy_automaticky'),
downloadButton('downloadData', 'Download vsetky okresy'),
downloadButton('download_umrtnost_vsetky_okresy', 'Download'),
width = 2
),
mainPanel(
plotOutput('my_plot'),
div(tableOutput("umrtnost_vsetky_okresy"),style = "font-size:70%")
)
)),
tabPanel('PGLE',
sidebarLayout(
sidebarPanel(
pickerInput(
inputId = "pgle_year",
label = "Rok:",
choices = c("1996", "1997", "1998", "1999", "2000", "2001", "2002", "2003", "2004", "2005", "2006", "2007", "2008", "2009", "2010",
"2011", "2012", "2013", "2014", "2015"),
options = list(`actions-box` = TRUE),
multiple = TRUE),
pickerInput(
inputId = "pgle_region",
label = "Kraj:",
choices = c("Bratislavský" = "1",
"Trnavský" = "2",
"Trenčiansky" = "3",
"Nitriansky" = "4",
"Žilinský" = "5",
"Banskobystrický" = "6",
"Prešovský" = "7",
"Košický" = "8",
"Iný" = 9),
options = list(`actions-box` = TRUE),
multiple = TRUE),
pickerInput(
inputId = "pgle_district",
label = "Okres:",
choices = c("Bratislava I" = "101",
"Bratislava II" = "102",
"Bratislava III" = "103",
"Bratislava IV" = "104",
"Bratislava V" = "105",
"Malacky" = "106",
"Pezinok" = "107",
"Senec" = "108",
"Dunajská Streda" = "201",
"Galanta" = "202",
"Hlohovec" = "203",
"Piešťany" = "204",
"Senica" = "205",
"Skalica" = "206",
"Trnava" = "207",
"Bánovce nad Bebravou" = "301",
"Ilava" = "302",
"Myjava" = "303",
"Nové Mesto nad Váhom" = "304",
"Partizánske" = "305",
"Považská Bystrica" = "306",
"Prievidza" = "307",
"Púchov" = "308",
"Trenčín" = "309",
"Komárno" = "401",
"Levice" = "402",
"Nitra" = "403",
"Nové Zámky" = "404",
"Šaľa" = "405",
"Topoľčany" = "406",
"Zlaté Moravce" = "407",
"Bytča" = "501",
"Čadca" = "502",
"Dolný Kubín" = "503",
"Kysucké Nové Mesto" = "504",
"Liptovský Mikuláš" = "505",
"Martin" = "506",
"Námestovo" = "507",
"Ružomberok" = "508",
"Turčianske Teplice" = "509",
"Tvrdošín" = "510",
"Žilina" = "511",
"Banská Bystrica" = "601",
"Banská Štiavnica" = "602",
"Brezno" = "603",
"Detva" = "604",
"Krupina" = "605",
"Lučenec" = "606",
"Poltár" = "607",
"Revúca" = "608",
"Rimavská Sobota" = "609",
"Veľký Krtíš" = "610",
"Zvolen" = "611",
"Žarnovica" = "612",
"Žiar nad Hronom" = "613",
"Bardejov" = "701",
"Humenné" = "702",
"Kežmarok" = "703",
"Levoča" = "704",
"Medzilaborce" = "705",
"Poprad" = "706",
"Prešov" = "707",
"Sabinov" = "708",
"Snina" = "709",
"Stará Ľubovňa" = "710",
"Stropkov" = "711",
"Svidník" = "712",
"Vranov nad Topľou" = "713",
"Gelnica" = "801",
"Košice I" = "802",
"Košice II" = "803",
"Košice III" = "804",
"Košice IV" = "805",
"Košice - okolie" = "806",
"Michalovce" = "807",
"Rožňava" = "808",
"Sobrance" = "809",
"Spišská Nová Ves" = "810",
"Trebišov" = "811",
"Iné" = "999"),
options = list(`actions-box` = TRUE),
multiple = TRUE),
pickerInput(
inputId = "pgle_initial_death",
label = "Initial death diagnosis:",
choices = c("A01", "A02", "A03", "A04", "A05", "A08", "A09", "A15", "A16", "A17", "A18", "A19", "A26", "A27", "A31",
"A32", "A35", "A37", "A39", "A40", "A41", "A42", "A46", "A48", "A49", "A50", "A52", "A53", "A69", "A77", "A81", "A83",
"A84", "A85", "A86", "A87", "A88", "A89", "B00", "B01", "B02", "B15", "B16", "B17", "B18", "B19", "B20", "B21", "B22",
"B23", "B24", "B25", "B33", "B34", "B44", "B49", "B50", "B54", "B58", "B67", "B77", "B90", "B94", "B96", "B97", "B99",
"C00", "C01", "C02", "C03", "C04", "C05", "C06", "C07", "C08", "C09", "C10", "C11", "C12", "C13", "C14", "C15", "C16",
"C17", "C18", "C19", "C20", "C21", "C22", "C23", "C24", "C25", "C26", "C30", "C31", "C32", "C33", "C34", "C37", "C38",
"C39", "C40", "C41", "C43", "C44", "C45", "C46", "C47", "C48", "C49", "C50", "C51", "C52", "C53", "C54", "C55", "C56",
"C57", "C58", "C60", "C61", "C62", "C63", "C64", "C65", "C66", "C67", "C68", "C69", "C70", "C71", "C72", "C73", "C74",
"C75", "C76", "C80", "C81", "C82", "C83", "C84", "C85", "C86", "C88", "C90", "C91", "C92", "C93", "C94", "C95", "C96",
"C97", "D09", "D10", "D11", "D12", "D13", "D14", "D15", "D16", "D17", "D18", "D20", "D25", "D26", "D27", "D30", "D32",
"D33", "D34", "D35", "D36", "D37", "D38", "D39", "D40", "D41", "D42", "D43", "D44", "D45", "D46", "D47", "D48", "D50",
"D51", "D53", "D55", "D56", "D59", "D60", "D61", "D62", "D64", "D65", "D66", "D67", "D68", "D69", "D70", "D72", "D73",
"D74", "D75", "D76", "D80", "D81", "D82", "D83", "D84", "D86", "D89", "E03", "E04", "E05", "E06", "E07", "E10", "E11",
"E12", "E13", "E14", "E15", "E16", "E20", "E21", "E23", "E24", "E25", "E27", "E31", "E32", "E34", "E40", "E41", "E43",
"E44", "E46", "E50", "E63", "E64", "E65", "E66", "E67", "E68", "E70", "E71", "E72", "E74", "E75", "E76", "E78", "E79",
"E83", "E84", "E85", "E86", "E87", "E88", "E89", "F01", "F03", "F07", "F09", "F10", "F11", "F12", "F18", "F19", "F20",
"F23", "F25", "F29", "F31", "F32", "F50", "F99", "G00", "G03", "G04", "G06", "G08", "G09", "G10", "G11", "G12", "G20",
"G21", "G23", "G24", "G25", "G30", "G31", "G35", "G36", "G37", "G40", "G41", "G52", "G54", "G58", "G61", "G62", "G64",
"G70", "G71", "G72", "G80", "G81", "G82", "G83", "G90", "G91", "G92", "G93", "G95", "G96", "G97", "G98", "H66", "I00",
"I01", "I02", "I05", "I06", "I07", "I08", "I09", "I10", "I11", "I12", "I13", "I15", "I20", "I21", "I22", "I23", "I24",
"I25", "I26", "I27", "I28", "I30", "I31", "I32", "I33", "I34", "I35", "I36", "I37", "I38", "I39", "I40", "I42", "I44",
"I45", "I46", "I47", "I48", "I49", "I50", "I51", "I60", "I61", "I62", "I63", "I64", "I65", "I66", "I67", "I69", "I70",
"I71", "I72", "I73", "I74", "I77", "I78", "I80", "I81", "I82", "I83", "I84", "I85", "I86", "I87", "I88", "I89", "I95",
"I97", "I99", "J01", "J03", "J04", "J05", "J06", "J09", "J10", "J11", "J12", "J13", "J14", "J15", "J16", "J18", "J20",
"J21", "J22", "J30", "J32", "J35", "J36", "J37", "J38", "J39", "J40", "J41", "J42", "J43", "J44", "J45", "J46", "J47",
"J60", "J61", "J62", "J63", "J64", "J65", "J66", "J67", "J68", "J69", "J70", "J80", "J81", "J82", "J84", "J85", "J86",
"J90", "J92", "J93", "J94", "J95", "J96", "J98", "K12", "K14",
"K20", "K21", "K22", "K25", "K26", "K27", "K28", "K29", "K30",
"K31", "K35", "K36", "K37", "K38", "K40", "K41", "K42", "K43",
"K44", "K45", "K46", "K50", "K51", "K52", "K55", "K56", "K57",
"K58", "K60", "K61", "K62", "K63", "K65", "K66", "K70", "K71",
"K72", "K73", "K74", "K75", "K76", "K80", "K81", "K82", "K83",
"K85", "K86", "K90", "K91", "K92", "L02", "L03", "L05", "L08",
"L10", "L51", "L53", "L89", "L97", "M00", "M05", "M06", "M08",
"M10", "M13", "M15", "M16", "M17", "M18", "M19", "M30", "M31",
"M32", "M33", "M34", "M35", "M40", "M41", "M43", "M45", "M46",
"M47", "M48", "M50", "M60", "M62", "M80", "M81", "M83", "M84",
"M85", "M86", "M87", "M88", "M89", "M96", "M99", "N00", "N01",
"N02", "N03", "N04", "N05", "N06", "N07", "N10", "N11", "N12",
"N13", "N14", "N15", "N17", "N18", "N19", "N20", "N21", "N23",
"N25", "N26", "N27", "N28", "N30", "N31", "N32", "N34", "N35",
"N36", "N39", "N40", "N41", "N42", "N45", "N49", "N50", "N60",
"N63", "N70", "N71", "N73", "N76", "N80", "N81", "N82", "N83",
"N99", "O00", "O02", "O03", "O08", "O10", "O15", "O16", "O20",
"O22", "O26", "O44", "O45", "O46", "O71", "O72", "O74", "O75",
"O81", "O85", "O87", "O88", "O89", "O90", "O97", "O99", "P00",
"P01", "P02", "P03", "P04", "P05", "P07", "P08", "P10", "P11",
"P12", "P15", "P20", "P21", "P22", "P23", "P24", "P25", "P26",
"P27", "P28", "P29", "P35", "P36", "P37", "P38", "P39", "P50",
"P51", "P52", "P53", "P54", "P55", "P56", "P57", "P58", "P59",
"P60", "P61", "P70", "P71", "P72", "P74", "P76", "P77", "P78",
"P80", "P83", "P90", "P91", "P94", "P95", "P96", "Q00", "Q01",
"Q02", "Q03", "Q04", "Q05", "Q06", "Q07", "Q10", "Q18", "Q20",
"Q21", "Q22", "Q23", "Q24", "Q25", "Q26", "Q27", "Q28", "Q30",
"Q31", "Q32", "Q33", "Q34", "Q35", "Q37", "Q39", "Q40", "Q41",
"Q42", "Q43", "Q44", "Q45", "Q52", "Q55", "Q56", "Q60", "Q61",
"Q62", "Q63", "Q64", "Q66", "Q67", "Q68", "Q71", "Q74", "Q75",
"Q76", "Q77", "Q78", "Q79", "Q80", "Q81", "Q82", "Q85", "Q86",
"Q87", "Q89", "Q90", "Q91", "Q92", "Q93", "Q95", "Q96", "Q97",
"Q98", "Q99", "R00", "R02", "R04", "R06", "R07", "R09", "R26",
"R32", "R34", "R40", "R50", "R54", "R55", "R56", "R57", "R68",
"R69", "R95", "R96", "R98", "R99", "S00", "S01", "S02", "S03",
"S04", "S05", "S06", "S07", "S08", "S09", "S10", "S11", "S12",
"S13", "S14", "S15", "S16", "S17", "S18", "S19", "S20", "S21",
"S22", "S23", "S24", "S25", "S26", "S27", "S28", "S29", "S30",
"S31", "S32", "S33", "S34", "S35", "S36", "S37", "S38", "S39",
"S41", "S42", "S43", "S44", "S45", "S46", "S48", "S49", "S50",
"S51", "S52", "S55", "S57", "S58", "S59", "S60", "S61", "S62",
"S63", "S65", "S69", "S70", "S71", "S72", "S73", "S75", "S76",
"S77", "S78", "S79", "S80", "S81", "S82", "S83", "S85", "S86",
"S87", "S88", "S89", "S90", "S91", "S92", "S93", "S95", "S97",
"S98", "S99", "T00", "T01", "T02", "T03", "T04", "T05", "T06",
"T07", "T08", "T09", "T10", "T11", "T12", "T13", "T14", "T17",
"T18", "T20", "T21", "T22", "T23", "T24", "T25", "T27", "T28",
"T29", "T30", "T31", "T32", "T33", "T34", "T35", "T36", "T37",
"T38", "T39", "T40", "T41", "T42", "T43", "T44", "T45", "T46",
"T47", "T48", "T49", "T50", "T51", "T52", "T53", "T54", "T55",
"T56", "T57", "T58", "T59", "T60", "T61", "T62", "T63", "T64",
"T65", "T66", "T67", "T68", "T69", "T70", "T71", "T73", "T74",
"T75", "T78", "T79", "T80", "T81", "T82", "T83", "T84", "T85",
"T86", "T87", "T88", "T90", "T91", "T92", "T93", "T94", "T95",
"T96", "T97", "T98"),
options = list(`actions-box` = TRUE),
multiple = TRUE),
pickerInput(
inputId = "pgle_initial_diagnosis_chapter",
label = "Initial diagnosis chapter:",
choices = c("1", "2", "3", "4", "5", "6", "8", "9", "10", "11", "12", "13",
"14", "15", "16", "17", "18", "19"),
options = list(`actions-box` = TRUE),
multiple = TRUE),
pickerInput(
inputId = "pgle_sex",
label = "Pohlavie:",
choices = c("muži" = "1", "ženy" = "2"),
options = list(`actions-box` = TRUE),
multiple = TRUE),
actionButton('pgle', 'PGLE'),
width = 2
),
mainPanel(
div(tableOutput("pgle"),style = "font-size:70%")
)
))
)
))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.