blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6ba7e4e6d62e0830211240037f774a02f401808b | d0e1f6bd7b270363e9e68e4b6f684c384ba8aa51 | /2. Scripts/Overzicht_Soorten.R | ad38266988b68b223713243fbc5057a5472ca6cf | [] | no_license | WimKo/Avifauna | a7b67903a6586ed6c385279cd242a08ec7a7e1ef | 50dbddfb43825275ace6c060f028923d58df447c | refs/heads/master | 2021-01-10T14:40:20.609520 | 2017-01-29T19:11:59 | 2017-01-29T19:11:59 | 45,529,468 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,192 | r | Overzicht_Soorten.R | require(data.table)
load(paste(getwd(), "/1. Data/Cleaned Data/Waarnemingen.RData", sep=""))
load(paste(getwd(), "/1. Data/Cleaned Data/Avifauna.RData", sep=""))
Avifauna$Datum=as.Date(Avifauna$Datum, "%d/%m/%Y")
Waarnemingen=as.data.frame(Waarnemingen)
Waarnemingen=data.table(Waarnemingen)
Avifauna=data.table(Avifauna)
Status=unique(Waarnemingen[,list(Naam=Naam, Soortstatus=Soortstatus)])
Waarnemingen=Waarnemingen[, list(Datum=Datum, Naam=Naam, Aantal=Aantal)]
Avifauna=Avifauna[, list(Datum=Datum, Naam=Soort, Aantal=Aantal)]
Waarnemingen=rbind(Waarnemingen, Avifauna)
Waarnemingen[, Jaar:=year(Datum)]
Waarnemingen=Waarnemingen[!grep("Hybride", Waarnemingen$Naam),]
Waarnemingen=Waarnemingen[!grep("onbekend", Waarnemingen$Naam),]
Waarnemingen=Waarnemingen[!grep(" X ", Waarnemingen$Naam),]
Waarnemingen[, Status:=Status[match(Waarnemingen$Naam, Status$Naam), Soortstatus]]
Waarnemingen=unique(Waarnemingen)
Table=dcast(data=Waarnemingen[Jaar>=2002 & Status=="Native",], Naam~Jaar, fun.aggregate=length, value.var="Datum")
Table=data.frame(Table)
Table[, 17]=rowSums(Table[, -1])
Table=Table[order(Table$V17),]
rownames(Table)=1:nrow(Table)
write.csv2(Table, "Table.csv")
|
b18f8ab8721ff357fc299a9353cf4fb80eb7f5c7 | adda2343e80a2df8c95f432e3b928dcd7828b5d1 | /code.R | 4b531b2c0f976251eab25683e951fcb449a4eb23 | [] | no_license | underthecurve/city-county | 542a1e0172cbe468cae02d697cf853ef9f7489af | d52dc28c3b26b3072ab34dede1a795452c8af5d9 | refs/heads/master | 2020-03-25T05:00:10.609572 | 2018-08-11T19:33:29 | 2018-08-11T19:33:29 | 143,424,264 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,479 | r | code.R | library('dplyr')
library('ggplot2')
library('reshape2')
library('ggalt')
library('rgdal')
library('maptools')
gpclibPermit()
# a helfpul reference guide: https://www.census.gov/geo/reference/geoguide.html
## all usa states
usa.states <- readOGR("cb_2017_us_state_500k/cb_2017_us_state_500k.shp")
usa.states2 <- usa.states %>%
fortify(region = "NAME") %>%
as_tibble() %>%
left_join(usa.states@data, by = c("id" = "NAME"))
## all usa counties
usa.counties <- readOGR("cb_2017_us_county_500k/cb_2017_us_county_500k.shp")
# https://www.census.gov/geo/maps-data/data/cbf/cbf_counties.html
usa.counties2 <- usa.counties %>%
fortify(region = "NAME") %>%
as_tibble() %>%
left_join(usa.counties@data, by = c("id" = "NAME"))
## baltimore
ggplot(data = usa.counties2 %>% filter(id == "Baltimore"), mapping = aes(x = long, y = lat, group = group)) +
coord_fixed(1.3) +
geom_polygon(color = "white", fill = "#e3e3e3")
## baltimore city
places.md <- readOGR("cb_2017_24_place_500k/cb_2017_24_place_500k.shp")
# https://www.census.gov/geo/maps-data/data/cbf/cbf_place.html
places.md2 <- places.md %>%
fortify(region = "NAME") %>%
as_tibble() %>%
left_join(places.md@data, by = c("id" = "NAME"))
ggplot(data = places.md2 %>% filter(id == "Baltimore"), mapping = aes(x = long, y = lat, group = group)) +
coord_fixed(1.3) +
geom_polygon(color = "white", fill = "#e3e3e3")
ggplot() +
coord_fixed(1.3) +
geom_polygon(data = usa.states2 %>% filter(STATEFP == '24'),
mapping = aes(x = long, y = lat, group = group),
color = "black", fill = "#e3e3e3") +
geom_polygon(data = usa.counties2 %>% filter(id == "Baltimore"),
mapping = aes(x = long, y = lat, group = group),
color = "black", fill = "#8c2a35") +
geom_polygon(data = places.md2 %>% filter(id == "Baltimore"),
mapping = aes(x = long, y = lat, group = group),
color = "black", fill = "#e1ac3b") +
theme(axis.line = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.background = element_blank(),
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
ggsave('plot.png', width = 10, height = 6)
# without state
ggplot() +
coord_fixed(1.3) +
# geom_polygon(data = usa.states2 %>% filter(STATEFP == '24'),
# mapping = aes(x = long, y = lat, group = group),
# color = "black", fill = "#e3e3e3") +
geom_polygon(data = usa.counties2 %>% filter(id == "Baltimore"),
mapping = aes(x = long, y = lat, group = group),
color = "black", size = .2) +
geom_polygon(data = places.md2 %>% filter(id == "Baltimore"),
mapping = aes(x = long, y = lat, group = group),
color = "black", fill = "#9E7C0C", size = .2) +
theme(axis.line = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.background = element_blank(),
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank())
ggsave('baltimore.png', width = 6, height = 6)
# Los Angeles
ggplot(data = usa.counties2 %>% filter(id == "Los Angeles"), mapping = aes(x = long, y = lat, group = group)) +
coord_fixed(1.3) +
geom_polygon(color = "white", fill = "#e3e3e3")
# Los Angeles city
places.ca <- readOGR("cb_2017_06_place_500k/cb_2017_06_place_500k.shp")
# https://www.census.gov/geo/maps-data/data/cbf/cbf_place.html
places.ca2 <- places.ca %>%
fortify(region = "NAME") %>%
as_tibble() %>%
left_join(places.ca@data, by = c("id" = "NAME"))
ggplot() +
coord_fixed(1.3) +
# geom_polygon(data = usa.states2 %>% filter(STATEFP == '06'),
# mapping = aes(x = long, y = lat, group = group),
# color = "black", fill = "#e3e3e3") +
geom_polygon(data = usa.counties2 %>% filter(id == "Los Angeles"),
mapping = aes(x = long, y = lat, group = group),
color = "black", size = .2) +
geom_polygon(data = places.ca2 %>% filter(id == "Los Angeles"),
mapping = aes(x = long, y = lat, group = group, fill = hole),
color = "black", size = .2) + scale_fill_manual(values = c('#999999', '#d3effd')) +
theme(axis.line = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.background = element_blank(),
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.position = 'none')
ggsave('la.png', width = 6, height = 6)
# New York City
places.ny <- readOGR("cb_2017_36_place_500k/cb_2017_36_place_500k.shp")
# https://www.census.gov/geo/maps-data/data/cbf/cbf_place.html
places.ny <- places.ny %>%
fortify(region = "NAME") %>%
as_tibble() %>%
left_join(places.ca@data, by = c("id" = "NAME"))
# this is WRONG! where the f is the east river?!?!
ggplot() +
coord_fixed(1.3) +
geom_polygon(data = places.ny %>% filter(id == "New York"),
mapping = aes(x = long, y = lat, group = group, fill = hole),
color = "black", size = .2)
places.ny <- readOGR("nybb_18b/nybb.shp")
# https://www1.nyc.gov/site/planning/data-maps/open-data/districts-download-metadata.page
places.ny <- places.ny %>%
fortify(region = "BoroName") %>%
as_tibble() %>%
left_join(places.ny@data, by = c("id" = "BoroName"))
ggplot() +
coord_fixed(1.3) +
geom_polygon(data = places.ny,
mapping = aes(x = long, y = lat, group = group, color = id),
size = .4) +
theme(axis.line = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.background = element_blank(),
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.position = 'none')
ggsave('nyc.png', width = 6, height = 6)
# dc ... water areas are a mess ugh
places.dc <- readOGR("tl_2017_us_state/tl_2017_us_state.shp")
# https://www.census.gov/cgi-bin/geo/shapefiles/index.php
places.dc <- places.dc %>%
fortify(region = "NAME") %>%
as_tibble() %>%
left_join(places.dc@data, by = c("id" = "NAME"))
area.water.dc <- readOGR("tl_2017_11001_areawater/tl_2017_11001_areawater.shp")
area.water.dc <- area.water.dc %>%
fortify(region = "FULLNAME") %>%
as_tibble() %>%
left_join(area.water.dc@data, by = c("id" = "FULLNAME"))
ggplot() +
coord_fixed(1.3) +
geom_polygon(data = places.dc %>% filter(id == 'District of Columbia'),
mapping = aes(x = long, y = lat, group = group),
color = "black", size = .2) +
geom_polygon(data = area.water.dc,
mapping = aes(x = long, y = lat, group = group, fill = piece)) +
scale_fill_manual(values = c('#d3effd', 'white', 'white', 'white'))+
geom_polygon(data = area.water.dc %>% filter(id == 'Potomac Riv' & piece != 1),
mapping = aes(x = long, y = lat, group = group)) +
geom_polygon(data = area.water.dc %>% filter(id == 'Kingman Lk' & piece != 1),
mapping = aes(x = long, y = lat, group = group), fill = '#d3effd') +
geom_polygon(data = area.water.dc %>% filter(id == 'Georgetown Reservoir' & piece != 1),
mapping = aes(x = long, y = lat, group = group), fill = '#d3effd') +
theme(axis.line = element_blank(),
axis.text.x = element_blank(),
axis.text.y = element_blank(),
axis.ticks = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.background = element_blank(),
plot.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
legend.position = 'none')
ggsave('dc.png', width = 6, height = 6)
|
6efc0b941d201518e708168b4604a2470d80fd86 | d7c2bf3c38957ac432c53eb4aa3811f5eb37d877 | /02_lists_matrices.R | 59406dc8e90d22fc5042518ed4b2df503cc4ff46 | [] | no_license | milonaityte13/intro_to_R | 6a23996b64a8f2e46379e8a771bf3d32bc23573c | 9783bc32544d825dfc5cf867324acfb3265d4a21 | refs/heads/master | 2021-05-05T20:07:41.446015 | 2018-01-24T16:06:40 | 2018-01-24T16:06:40 | 117,838,415 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 765 | r | 02_lists_matrices.R | #Lists and Matrices
#Lists
list_example <- list(1, "A", TRUE, "hello", 12.53, -12.53)
list_example
www.github.com
second_list <- list(title = "Numbers", numbers = 1:10, data = TRUE)
second_list
#What "type" is a list?
typeof(second_list)
typeof(second_list$title)
print(second_list$numbers[3])
#Matrices
#A zero filled matrix - 6 columns, 3 rows
matrix_example <- matrix(0, ncol=6, nrow=3)
matrix_example
class(matrix_example)
typeof(matrix_example)
str(matrix_example)
dim(matrix_example)
nrow(matrix_example)
ncol(matrix_example)
#Challenge4
length(matrix_example)
#Challege5
challenge5_matrix <- matrix(1:50, ncol=5, nrow=10)
challenge5_matrix2 <-matrix(1:50, nrow = 10, ncol=5, byrow=TRUE)
#Challenge7
matrix(c(4, 1, 9, 5, 10, 7),ncol=2, byrow=TRUE)
|
6a8fba409dc35cabc6464cf540a8d50bdac420c6 | c09e219ce81fa8dc4e3ebe50dbdcf9d8787d5ef0 | /sastuit/app.R | cec7e18364aeeb90dbb96b22e7766a98d36b0969 | [] | no_license | anibalhc/sastuit | 06b35ba3167bb49aa0e527c79cf10e5a766c1666 | 74e483a0f9b4167730e913b38f0a3fa23364b965 | refs/heads/master | 2023-01-23T18:09:12.046996 | 2020-12-08T06:06:38 | 2020-12-08T06:06:38 | 282,161,995 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 42,826 | r | app.R |
# libraries
source("www/files/libraries.R")
source("www/files/indicator/helpers.R")
# Define UI for application
body <- dashboardBody(fluidRow(
column(
width = 12,
box(
title = "Tweets",
width = NULL,
status = "primary",
hr(),
textOutput("nowRegistrosTuits"),
hr(),
div(style = 'height:540px;overflow-x: scroll', DT::dataTableOutput("mytable"))
)
),
column(
3,
align = "right",
offset = 9,
downloadButton('downloadData', uiOutput('btn_download', inline = TRUE))
)
,
tags$style(
type = 'text/css',
"#downloadData { width:100%; margin-top: 10px; background-color: #E05F3D;border: none; }"
)
))
tablaLimpiezaDatos <- dashboardBody(fluidRow(
column(
width = 12,
box(
title = uiOutput("title_tlimpieza") ,
width = NULL,
status = "primary",
hr(),
textOutput("nowRegistros2"),
hr(),
div(style = 'height:540px;overflow-x: scroll', DT::dataTableOutput("tablaLimpiezaDatos"))
)
),
column(
3,
align = "right",
offset = 9,
downloadButton(
'downloadDataLimpiezaDatos',
uiOutput('btn_download_clean', inline = TRUE)
)
)
,
tags$style(
type = 'text/css',
"#downloadDataLimpiezaDatos { width:100%; margin-top: 10px; background-color: #E05F3D;border: none; }"
)
))
tablaCombinaFiles <- dashboardBody(fluidRow(
column(
width = 12,
box(
title = uiOutput("title_tCombinaFiles"),
width = NULL,
status = "primary",
hr(),
textOutput("nowRegistros"),
hr(),
div(style = 'height:540px;overflow-x: scroll', DT::dataTableOutput("preViewCombinaDatos"))
)
),
column(
3,
align = "right",
offset = 9,
downloadButton(
'downloadDataCombinaDatos',
uiOutput('btn_download_rbind', inline = TRUE)
)
)
,
tags$style(
type = 'text/css',
"#downloadDataCombinaDatos { width:100%; margin-top: 10px; background-color: #E05F3D;border: none; }"
)
))
modelo_IA <- dashboardBody(div(class = "result", h3(uiOutput("label_result"))),
panel(fluidRow(
column(
width = 12,
align = "center",
box(
title = uiOutput("label_result_sa"),
width = NULL,
status = "primary",
tableOutput('t_modelo_IA') ,
#div(style = 'overflow-x: scroll', tableOutput('t_modelo_IA'))
# Horizontal line ----
tags$hr()
),
#
column(
3,
align = "right",
offset = 9,
downloadButton(
'downloadDataClassification',
uiOutput('btn_download_Classification', inline = TRUE)
)
)
,
tags$style(
type = 'text/css',
"#downloadDataClassification { width:100%; background-color: #E05F3D;border: none; }"
)
),
column(
width = 6,
align = "center",
box(
title = uiOutput("label_result_frehash"),
width = NULL,
status = "primary",
div(style = 'height:540px; width:350px; overflow-x: scroll', DT::dataTableOutput("t_hashtag"))
),
# Horizontal line ----
tags$hr()
),
column(
width = 6,
align = "center",
box(
title = uiOutput("label_result_country") ,
width = NULL,
status = "primary",
#div(style = 'overflow-x: scroll', DT::dataTableOutput("t_location"))
div(style = 'height:540px;overflow-x: scroll', DT::dataTableOutput("t_location"))
),
# Horizontal line ----
tags$hr()
)
,
# Horizontal line ----
tags$hr()
)))
#b64 <- base64enc::dataURI(file="nube.jpeg", mime="image/png")
ui <- fluidPage(
#call style
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "css/style.css"),
tags$link(rel = "shortcut icon", href = "images/favicon.ico")
),
# useShinyjs - enable/disabled input
useShinyjs(),
theme = shinytheme("flatly"),
navbarPage(
title = "SASTuit",
id = "navbar",
collapsible = TRUE,
tabPanel(
title = uiOutput("title_panel_inicio"),
tags$hr(),
tags$br(),
panel(fluidRow(
align = "center",
p(uiOutput("textSASTuit")),
HTML(
'<center><img src="images/sastuit_emoji.jpg" alt="sastuit"></center>'
),
)),
tags$br(),
tags$hr()
),
tabPanel(
title = uiOutput("title_panel_dtweets"),
panel(
# Application title
titlePanel(title = uiOutput("titulo_panel_main")),
sidebarPanel(
actionButton("apiKeys", "Twitter API keys"),
tags$style(
type = 'text/css',
"#apiKeys { width:100%; background-color: #73A931;border: none; }"
),
#457079
h4(textInput(
"txtHashTag", label = h4(uiOutput("labelHashTag")), value = ""
)),
# Horizontal line ----
tags$hr(),
h4(
prettyCheckbox(
inputId = "retweet",
label = "Retweet",
thick = TRUE,
fill = TRUE,
bigger = TRUE,
icon = icon("check")
)
),
# Horizontal line ----
tags$hr(),
h4(
prettyCheckbox(
inputId = "ratelimit",
label = "Rate limit",
thick = TRUE,
fill = TRUE,
bigger = TRUE,
icon = icon("check")
)
),
tags$hr(),
h4(numericInput(
"numLimite", label = h4(uiOutput("labelnumLimite")), value = 10
)),
h4(
selectInput(
"selectIdioma",
label = h4(uiOutput("labelselectIdioma")),
choices = list(
"-" = '-',
"ANYWHERE" = 'FALSE',
"ES" = 'es',
"EN" = 'en'
),
selected = 1
)
),
h4(numericInput(
"max_id", label = h4(uiOutput("labelmax_id")), value = ""
)),
tags$hr(),
#dateInput("dateTuit", label = h3("Fecha"), value = "2019-01-01"),
withBusyIndicatorUI(actionButton(
"sent", label = uiOutput("labelsent") , class = "button"
)),
tags$style(
type = 'text/css',
"#sent { width:100%; background-color: #73A931;border: none; }"
),
tags$hr(),
),
mainPanel(body)
),
HTML(
'<center><img src="images/sastuit.jpg" alt="sastuit"></center>'
)
),
tabPanel(
title = uiOutput("title_panel_bind_files"),
panel(
titlePanel(title = uiOutput("title_panel_main_file")),
sidebarPanel(
h4(
fileInput(
"csvs",
label = h4(uiOutput("labelUploadFiles")),
multiple = TRUE,
accept = c(".csv")
),
tags$style(
"
.btn-file {
background-color:#2D9AB6;
border: none;
pointer-events: none;
}
.progress-bar {
background-color: #2D9AB6;
border: none;
}
"
)
),
h4(
selectInput(
"selectIdiomaCombina",
label = h4(uiOutput("labelselectIdiomaCombina")),
choices = list("-" = '-', "ES" = 'es', "EN" = 'en'),
selected = 1
)
),
withBusyIndicatorUI(actionButton(
"combinaFiles", uiOutput("btn_label_rbind"), class = "button"
)),
tags$style(
type = 'text/css',
"#combinaFiles { width:100%; background-color: #73A931;border: none; }"
),
mainPanel()
),
mainPanel(tablaCombinaFiles)
),
HTML(
'<center><img src="images/sastuit.jpg" alt="sastuit"></center>'
)
),
tabPanel(
title = uiOutput("title_panel_cleaningData"),
panel(
titlePanel(title = uiOutput("titulo_panel_main_clean")),
sidebarPanel(
box(width = 12, align = "center",
h2(uiOutput(
"title_filters", inline = TRUE
)),
h2(uiOutput("titulo_select_tweets")),
tags$hr()),
h4(
prettyCheckbox(
"twImgPerfil",
label = uiOutput("title_img_profile"),
thick = TRUE,
fill = TRUE,
bigger = TRUE,
icon = icon("check")
)
),
# Horizontal line ----
tags$hr(),
h4(
prettyCheckbox(
inputId = "twImgPortada",
label = uiOutput("title_profilebanners"),
thick = TRUE,
fill = TRUE,
bigger = TRUE,
icon = icon("check")
)
),
# Horizontal line ----
tags$hr(),
h4(
prettyCheckbox(
inputId = "twUbicacionVacia",
label = uiOutput("title_location"),
thick = TRUE,
fill = TRUE,
bigger = TRUE,
icon = icon("check")
)
),
# Horizontal line ----
tags$hr(),
h4(
prettyCheckbox(
inputId = "twhashtags_f",
label = uiOutput("title_hashtagF"),
thick = TRUE,
fill = TRUE,
bigger = TRUE,
icon = icon("check")
)
),
# Horizontal line ----
tags$hr(),
h4(sliderInput(
"longtext",
label = h4(uiOutput("title_longtext")),
min = 0,
max = 280,
value = 0
)),
h4(
fileInput(
"csvs_stopwords",
label = h4(uiOutput("title_stopwords")),
multiple = FALSE,
accept = c(".csv")
)
),
actionButton("btn_limpieza", uiOutput("title_btn_clean"), class = "button"),
tags$style(
type = 'text/css',
"#btn_limpieza { width:100%; background-color: #73A931;border: none; }"
)
),
mainPanel(tablaLimpiezaDatos)
),
HTML(
'<center><img src="images/sastuit.jpg" alt="sastuit"></center>'
)
),
tabPanel(
title = uiOutput("title_panel_prediction"),
panel(
titlePanel(title = uiOutput("title_panel_predict_")),
sidebarPanel(
div(class = "title_panel_model", h2(uiOutput(
"title_panel_model"
))),
withBusyIndicatorUI(actionButton(
'btn_modelo_IA', uiOutput("btn_title_model"), class = "button"
)),
tags$style(
type = 'text/css',
"#btn_modelo_IA { width:100%; background-color: #73A931;border: none; }"
),
tags$hr()
),
mainPanel(modelo_IA)
),
HTML(
'<center><img src="images/sastuit.jpg" alt="sastuit"></center>'
)
),
navbarMenu(
title = uiOutput("title_panel_language", inline = TRUE),
tabPanel(
"EN",
tags$hr(),
tags$br(),
panel(fluidRow(
align = "center",
p(uiOutput("textSASTuit2")),
HTML(
'<center><img src="images/sastuit_emoji.jpg" alt="sastuit"></center>'
),
)),
tags$br(),
tags$hr(),
),
tabPanel(
"ES",
tags$hr(),
tags$br(),
panel(fluidRow(
align = "center",
p(uiOutput("textSASTuit3")),
HTML(
'<center><img src="images/sastuit_emoji.jpg" alt="sastuit"></center>'
),
)),
tags$br(),
tags$hr(),
)
),
tabPanel(
title = uiOutput("title_panel_about"),
panel(
titlePanel(title =uiOutput("title_detailes")),
tags$hr(),
sidebarPanel(
width = 12,
div(class = "title_panel_model", h2(uiOutput(
"sd"
))),
helpText("- R version 3.6.1"),
helpText("- Copyright (C) 2017 The R Foundation for Statistical Computing."),
helpText(uiOutput("description_developer")),
helpText(uiOutput("description_methodology_based")),
helpText( a("Click Here", href="https://url2.cl/MJaBh" ,target="_blank")),
),
mainPanel()
),
HTML(
'<center><img src="images/sastuit.jpg" alt="sastuit"></center>'
),
)
)
)
server <- function(input, output, session) {
# call file functions
source("www/files/funciones.R")
# max size file upload csv: 1gb
(shiny.maxRequestSize = 10000 * 1024 ^ 2)
options(shiny.maxRequestSize = 10000 * 1024 ^ 2)
# default variables
var_lang_sent <<- FALSE
var_lang_CombinaFiles <<- FALSE
var_bandera_clean_text <<- FALSE
condicion_token <<- FALSE
#-----------------------------------------------------------------------------
# change language
# select language EN default
#updateTabsetPanel(session, "navbar",selected = "EN")
source("www/files/en.R")
observeEvent(input$navbar, {
if (input$navbar == "EN") {
# call files in EN
source("www/files/en.R")
} else if (input$navbar == "ES") {
# call files in ES
source("www/files/es.R")
}
# ---------------------------------------------------------------
# name menu Home
# title_panel_inicio
output$title_panel_inicio = renderText({
title_panel_inicio_
})
# ---------------------------------------------------------------
# ---------------------------------------------------------------
# name menu Download tweets
# title SASTuit
output$titulo_panel_main = renderText({
HTML(paste0("<b>", titulo_panel_main_, "</b>"))
#titulo_panel_main_
})
# label_name_app
output$label_name_app = renderText({
label_name_app_
})
# label_consumerKey
output$label_consumerKey = renderText({
label_consumerKey
})
# label_consumerSecret
output$label_consumerSecret = renderText({
label_consumerSecret
})
# label_accessToken
output$label_accessToken = renderText({
label_accessToken
})
# label_accessSecret
output$label_accessSecret = renderText({
label_accessSecret
})
# labelHashTag
output$labelHashTag = renderText({
labelHashTag_
})
output$textSASTuit = renderText({
#HTML(paste0("<b>",prueba,"</b>"))
textSASTuit_
})
output$textSASTuit2 = renderText({
#HTML(paste0("<b>",prueba,"</b>"))
textSASTuit_
})
output$textSASTuit3 = renderText({
#HTML(paste0("<b>",prueba,"</b>"))
textSASTuit_
})
# labelnumLimite
output$labelnumLimite = renderText({
labelnumLimite_
})
# labelselectIdioma
output$labelselectIdioma = renderText({
title_general_language_
})
# labelmax_id
output$labelmax_id = renderText({
labelmax_id
})
output$labelsent = renderText({
labelsent_
})
# title_panel_dtweets table
output$title_panel_dtweets = renderText({
title_panel_dtweets_
})
# title number row
output$nowRegistrosTuits <- renderText({
if (exists('num_nrowDf_search')) {
paste(nowrows_, num_nrowDf_search)
} else{
paste(nowrows_, "0")
}
})
# btn_download
output$btn_download = renderText({
btn_download_
})
# ---------------------------------------------------------------
# ---------------------------------------------------------------
# name menu Bind files
# title SASTuit
output$title_panel_main_file = renderText({
HTML(paste0("<b>", titulo_panel_main_, "</b>"))
#titulo_panel_main_
})
# title_panel_bind_files
output$title_panel_bind_files = renderText({
title_panel_bind_files_
})
# labelUploadFiles
output$labelUploadFiles = renderText({
labelUploadFiles_
})
# labelselectIdiomaCombina
output$labelselectIdiomaCombina = renderText({
title_general_language_
})
# btn_label_rbind
output$btn_label_rbind = renderText({
btn_label_rbind_
})
# title_tCombinaFiles table
output$title_tCombinaFiles = renderText({
title_tlimpieza_
})
# btn_download_rbind
output$btn_download_rbind = renderText({
btn_download_
})
# title number row
output$nowRegistros <- renderText({
if (exists('nrow_Panelcombina')) {
paste(nowrows_, nrow_Panelcombina)
} else{
paste(nowrows_, "0")
}
})
# ---------------------------------------------------------------
# ---------------------------------------------------------------
# name menu Text data cleaning
# title SASTuit
output$titulo_panel_main_clean = renderText({
HTML(paste0("<b>", titulo_panel_main_, "</b>"))
#titulo_panel_main_
})
output$description_developer = renderText({
description_developer
})
#description_methodology_based
output$description_methodology_based = renderText({
description_methodology_based
})
# titulo_select_tweets
output$titulo_select_tweets = renderText({
titulo_select_tweets
})
# title_panel_cleaningData
output$title_panel_cleaningData = renderText({
title_panel_cleaningData_
})
# title_filters
output$title_filters = renderText({
HTML(paste0("<b>", title_filters_, "</b>"))
#title_filters_
})
# title_img_profile
output$title_img_profile = renderText({
#HTML(paste0("<b>",title_img_profile_,"</b>"))
title_img_profile_
})
# title_profilebanners
output$title_profilebanners <- renderText({
title_profilebanners_
})
# title_location
output$title_location <- renderText({
title_location_
})
# title_hashtagF
output$title_hashtagF <- renderText({
title_hashtagF_
})
# title_longtext
output$title_longtext <- renderText({
title_longtext_
})
# title_stopwords
output$title_stopwords <- renderText({
title_stopwords_
})
# title_btn_clean
output$title_btn_clean <- renderText({
title_btn_clean_
})
# title_tlimpieza
output$title_tlimpieza = renderText({
title_tlimpieza_
})
# title number row
output$nowRegistros2 <- renderText({
if (exists('df_search_Clean')) {
paste(nowrows_, nrow(df_search_Clean))
} else{
paste(nowrows_, "0")
}
})
# btn_download_clean
output$btn_download_clean = renderText({
btn_download_
})
# btn_download_Classification
output$btn_download_Classification = renderText({
btn_download_
})
# ---------------------------------------------------------------
# ---------------------------------------------------------------
# name menu Prediction
# title_panel_predict_
output$title_panel_predict_ = renderText({
HTML(paste0("<b>", titulo_panel_main_, "</b>"))
})
# title_panel_prediction - menu
output$title_panel_prediction = renderText({
title_panel_prediction_
})
# title_panel_model
output$title_panel_model = renderText({
title_panel_model_
})
# label_result
output$label_result = renderText({
label_result_
})
# label_result_sa
output$label_result_sa = renderText({
label_result_sa
})
# label_result_frehash
output$label_result_frehash = renderText({
label_result_frehash
})
# label_result_country
output$label_result_country = renderText({
label_result_country
})
# btn_title_model
output$btn_title_model = renderText({
btn_title_model
})
# ---------------------------------------------------------------
# name menu Language
output$title_panel_language = renderText({
title_panel_language_
})
output$title_panel_about = renderText({
title_panel_about
})
output$title_detailes = renderText({
title_detailes
})
# ---------------------------------------------------------------
})
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# default buttons disabled
toggleState("downloadData")
toggleState("downloadDataCombinaDatos")
toggleState("downloadDataLimpiezaDatos")
toggleState("downloadDataClassification")
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# base files show
df_search <-
read.table(
"www/files/files_csv/df_search_base.csv",
header = TRUE,
sep = ",",
check.names = TRUE,
encoding = "Windows-1252"
)
result_tweets <-
read.table(
"www/files/files_csv/result_tweets_base.csv",
header = TRUE,
sep = ",",
check.names = F
)
df_hashtag <-
read.table(
"www/files/files_csv/hashtagVacio.csv",
header = TRUE,
sep = ",",
encoding = "Windows-1252"
)
row.names(df_hashtag) <- ""
location_base <-
read.csv(
"www/files/files_csv/location_base.csv",
header = TRUE,
sep = ",",
encoding = "Windows-1252"
)
stopwords_spanish <-
read.table(
"www/files/files_csv/stopwords_spanish.csv",
header = F,
sep = ",",
check.names = TRUE,
encoding = "Windows-1252"
)
#tSparse_Modelo <- read.csv("www/files/files_csv/es/tSparse_Modelo.csv",header = TRUE, sep = ",",encoding = "Windows-1252")
# print base files
output$mytable = DT::renderDataTable({
df_search
})
output$preViewCombinaDatos = DT::renderDataTable({
df_search
})
output$tablaLimpiezaDatos = DT::renderDataTable({
df_search
})
output$t_modelo_IA <- renderTable({
result_tweets
})
#-----------------------------------------------------------------------------
observeEvent(input$btn_modelo_IA, {
# When the button is clicked, wrap the code in a call to `withBusyIndicatorServer()`
if (var_bandera_clean_text) {
#verify lang in spanish to classify
if (tweetsIdioma != "es") {
sendSweetAlert(
session = session,
title = "¡Error!",
text = message_model2,
type = "error"
)
} else{
withBusyIndicatorServer("btn_modelo_IA", {
# preprocessing column text
source("www/files/model.R")
enable("downloadDataClassification")
# descargar dataframe en formato csv
output$downloadDataClassification <- downloadHandler(
filename = function() {
paste("classification-", Sys.Date(), ".csv", sep = "")
},
content = function(file) {
save_as_csv(
demo,
file,
prepend_ids = TRUE,
na = "",
fileEncoding = "Windows-1252"
)
}
)
# print dataframe
output$t_modelo_IA <- renderTable({
table_results
})
if (var_apply_top_location) {
output$t_location = DT::renderDataTable({
datatable(tabla_location,
options = list(searching = FALSE, paging = FALSE))
})
} else{
output$t_location = DT::renderDataTable({
datatable(NULL, options = list(dom = ''))
})
}
if (contains_hashtags) {
output$t_hashtag = DT::renderDataTable({
datatable(tabla_hashtag,
options = list(searching = FALSE, paging = FALSE))
})
} else{
output$t_hashtag = DT::renderDataTable({
datatable(NULL, options = list(dom = ''))
})
}
})
}
} else{
sendSweetAlert(
session = session,
title = "¡Error!",
text = message_model,
type = "error"
)
}
})
# boton combinaFiles
observeEvent(input$combinaFiles, {
withBusyIndicatorServer("combinaFiles", {
req(input$csvs)
})
language <- input$selectIdiomaCombina
if (language == "-") {
sendSweetAlert(
session = session,
title = "¡Error!",
text = message_language,
type = "error"
)
} else{
withBusyIndicatorServer("combinaFiles", {
tweetsIdioma_CombinaFiles <<- language
var_lang_CombinaFiles <<- TRUE
var_lang_sent <<- FALSE
df_search_API <<-
rbindlist(lapply(input$csvs$datapath, fread),
use.names = TRUE,
fill = TRUE)
})
nrow_Panelcombina <<- dim(df_search_API)[1]
# obtained number column lang
number_column_lang_pre <- match("lang", names(df_search_API))
if (!is.na(number_column_lang_pre)) {
#Seleccionar tweets en el idioma seleccionado.
df_search_API <<-
df_search_API %>% filter(lang == tweetsIdioma_CombinaFiles)
}
# imprimir dataframe
output$preViewCombinaDatos = DT::renderDataTable({
df_search_API
})
# habilita boton descarga
enable("downloadDataCombinaDatos")
# descargar dataframe en formato csv
output$downloadDataCombinaDatos <- downloadHandler(
filename = function() {
paste("data-bind", ".csv", sep = "")
},
content = function(file) {
save_as_csv(
df_search_API,
file,
prepend_ids = FALSE,
na = "",
fileEncoding = "Windows-1252"
)
}
)
output$nowRegistros <- renderText({
paste(nowrows_, nrow_Panelcombina)
})
}
})
observeEvent(input$ratelimit, {
ratelimit = input$ratelimit
if (ratelimit) {
toggleState("numLimite")
ratelimit <<- TRUE
} else{
enable("numLimite")
ratelimit <<- FALSE
}
})
observeEvent(input$btn_limpieza, {
# verifica que df_search_API exista
bandera <- exists('df_search_API')
if (bandera) {
if (dim(df_search_API)[1] >= 1) {
tweetsImgPerfil <<- input$twImgPerfil
tweetsImgPortada <<- input$twImgPortada
tweetsUbicacionVacia <<- input$twUbicacionVacia
tweetshashtags_f <<- input$twhashtags_f
tweetsLongitud <<- input$longtext
#language <- input$selectIdiomaLimpieza
boolean_idioma <- FALSE
if (var_lang_CombinaFiles == TRUE && var_lang_sent == TRUE) {
tweetsIdioma <<- tweetsIdioma_CombinaFiles
boolean_idioma <- TRUE
} else if (var_lang_CombinaFiles == TRUE &&
var_lang_sent == FALSE) {
tweetsIdioma <<- tweetsIdioma_CombinaFiles
boolean_idioma <- TRUE
} else if (var_lang_sent == TRUE &&
var_lang_CombinaFiles == FALSE) {
tweetsIdioma <<- language
boolean_idioma <- TRUE
} else{
boolean_idioma <- FALSE
}
if (boolean_idioma) {
#extras_stopwords <<- ""
# validar si existe un archivo para stopwords
if (!is.null(input$csvs_stopwords))
{
variable_stopwords <<- TRUE
inFile_stopwords <- input$csvs_stopwords
upload_file_stopwords <-
read.csv(inFile_stopwords$datapath,
sep = ",",
header = F)
extras_stopwords <-
as.character(upload_file_stopwords$V1)
extras_stopwords <<-
c(extras_stopwords, stopwords(tweetsIdioma))
} else{
if (tweetsIdioma == "es") {
extras_stopwords <- as.character(stopwords_spanish$V1)
#extras_stopwords <<- c(extras_stopwords, stopwords('es'))
extras_stopwords <<- c(extras_stopwords)
} else if (tweetsIdioma == "en") {
#extras_stopwords<<- stopwords(tweetsIdioma)
extras_stopwords <<- stopwords('en')
}
}
# call file limpiezaDatos
source("www/files/limpiezaDatos.R")
if (mensaje_columText) {
sendSweetAlert(
session = session,
title = "¡Error!",
text = alert_Textdataset,
type = "error"
)
} else{
output$tablaLimpiezaDatos = DT::renderDataTable({
df_search_Clean
})
output$nowRegistros2 <- renderText({
paste(nowrows_, nrow(df_search_Clean))
})
# habilita boton descarga
enable("downloadDataLimpiezaDatos")
# descargar dataframe en formato csv
output$downloadDataLimpiezaDatos <- downloadHandler(
filename = function() {
paste("clean", ".csv", sep = "")
},
content = function(file) {
save_as_csv(
df_search_Clean,
file,
prepend_ids = TRUE,
na = "",
fileEncoding = "Windows-1252"
)
}
)
}
}
} else{
sendSweetAlert(
session = session,
title = "¡Error!",
text = alert_low_data,
type = "error"
)
}
} else{
sendSweetAlert(
session = session,
title = "¡Error!",
text = alert_no_existdt ,
type = "error"
)
}
})
dataModal <- function(failed = FALSE) {
modalDialog(
title = title_APITwitter_,
#HTML('<img src="http://www.google.nl/images/branding/googlelogo/2x/googlelogo_color_272x92dp.png">'),
textInput(
"name_app",
label = h4(uiOutput("label_name_app")),
width = '100%'
),
h4(textInput(
"api_key", label = h4(uiOutput("label_consumerKey")), width = '100%'
)),
h4(textInput(
"api_secret",
label = h4(uiOutput("label_consumerSecret")),
width = '100%'
)),
h4(textInput(
"acc_token",
label = h4(uiOutput("label_accessToken")),
width = '100%'
)),
h4(textInput(
"acc_secret",
label = h4(uiOutput("label_accessSecret")),
width = '100%'
)),
footer = tagList(fluidRow(
#modalButton(label_cancel),
actionButton("cancel", label_cancel,
style = "width:20%;background-color: #FF2700;border: none; "),
actionButton("envio", "OK",
style = "width:20%; background-color: #73A931;border: none; ")
)),
# child: images
tags$div(
class = "landing-block background-content",
HTML(
'<center><img src="images/sastuit.jpg" alt="sastuit"></center>'
)
)
)
}
observeEvent(input$apiKeys, {
showModal(dataModal())
})
observeEvent(input$cancel, {
removeModal()
})
observeEvent(input$envio, {
name_app_ <<- trim(input$name_app)
api_key <<- trim(input$api_key)
api_secret <<- trim(input$api_secret)
acc_token <<- trim(input$acc_token)
acc_secret <<- trim(input$acc_secret)
if (!isTruthy(name_app_)) {
sendSweetAlert(
session = session,
title = "¡Error!",
text = alert_name_app,
type = "error"
)
} else if (!isTruthy(trim(input$api_key))) {
sendSweetAlert(
session = session,
title = "¡Error!",
text = alert_consumerKey,
type = "error"
)
} else if (!isTruthy(trim(input$api_secret))) {
sendSweetAlert(
session = session,
title = "¡Error!",
text = alert_consumerSecret,
type = "error"
)
} else if (!isTruthy(trim(input$acc_token))) {
sendSweetAlert(
session = session,
title = "¡Error!",
text = alert_accessToken,
type = "error"
)
} else if (!isTruthy(trim(input$acc_secret))) {
sendSweetAlert(
session = session,
title = "¡Error!",
text = alert_accessSecret,
type = "error"
)
}
else{
condicion_token <<- TRUE
# showModal(popupModal(failed = TRUE))
removeModal()
}
})
observeEvent(input$sent, {
# credeciales API Twitter
if (condicion_token) {
token <- create_token(
app = name_app_,
consumer_key = api_key,
consumer_secret = api_secret,
access_token = acc_token,
access_secret = acc_secret,
set_renv = FALSE
)
var_rate_limit <- rate_limit(token, "search_tweets")[1, 2]
if (is.null(var_rate_limit)) {
showModal(dataModal())
sendSweetAlert(
session = session,
title = "¡Error!",
text = label_error_token_correct,
type = "error"
)
} else if (var_rate_limit == 0) {
condicion_token <<- FALSE
sendSweetAlert(
session = session,
title = "¡Error!",
text = label_error_ratelimit,
type = "error"
)
} else if (var_rate_limit != 0) {
observeEvent(input$name_app, {
updateTextInput(session, "name_app", value = name_app_)
})
observeEvent(input$api_key, {
updateTextInput(session, "api_key", value = api_key)
})
observeEvent(input$api_key, {
updateTextInput(session, "api_secret", value = api_secret)
})
observeEvent(input$acc_token, {
updateTextInput(session, "acc_token", value = acc_token)
})
observeEvent(input$acc_secret, {
updateTextInput(session, "acc_secret", value = acc_secret)
})
condicion = TRUE
# Se obtienen valores de los inputs
txtHashTag = trim(input$txtHashTag)
include_rts = input$retweet
numLimite = trim(input$numLimite)
language <<- input$selectIdioma
max_id = trim(input$max_id)
if (!isTruthy(txtHashTag)) {
sendSweetAlert(
session = session,
title = "¡Error!",
text = alert_HashTag,
type = "error"
)
condicion = FALSE
}
else if (language == "-") {
sendSweetAlert(
session = session,
title = "¡Error!",
text = message_language,
type = "error"
)
condicion = FALSE
}
if (numLimite <= 0 || !isTruthy(numLimite)) {
condicion = FALSE
sendSweetAlert(
session = session,
title = "¡Error!",
text = alert_numTweet,
type = "error"
)
}
if (include_rts) {
include_rts = TRUE
}
else{
include_rts = FALSE
}
if (!isTruthy(max_id)) {
max_id = NULL
}
if (condicion) {
withBusyIndicatorServer("sent", {
#df_rate_limit <<- rate_limit()
if (language != FALSE) {
# query - busqueda tweets
df_search <- search_tweets(
txtHashTag,
max_id = max_id,
n = numLimite,
lang = language,
retryonratelimit = ratelimit,
include_rts = include_rts,
token = token
)
} else{
# query - busqueda tweets
df_search <- search_tweets(
txtHashTag,
max_id = max_id,
n = numLimite,
retryonratelimit = ratelimit,
include_rts = include_rts,
token = token
)
}
})
tweetsIdioma_sent <<- language
var_lang_sent <<- TRUE
var_lang_CombinaFiles <<- FALSE
if (dim(df_search)[1] == 0) {
#errorCondition(df_search)
sendSweetAlert(
session = session,
title = "¡Error!",
text = alert_limitT,
type = "error"
)
}
# copia del dataframe generado
df_search_API <<- data.frame(df_search)
# dataframe generado
df_search <- data.table::data.table(df_search)
num_nrowDf_search <<- dim(df_search)[1]
#numerofilas_df_search <<- dim(df_search)[1]
# print dataframe
output$mytable = DT::renderDataTable({
df_search
})
output$nowRegistrosTuits <- renderText({
if (exists('num_nrowDf_search')) {
paste(nowrows_, num_nrowDf_search)
} else{
paste(nowrows_, "0")
}
})
# enable button Descargar
enable("downloadData")
# download dataframe en formato csv
output$downloadData <- downloadHandler(
filename = function() {
paste("data-search-", Sys.Date(), ".csv", sep = "")
},
content = function(file) {
save_as_csv(
df_search,
file,
prepend_ids = TRUE,
na = "",
fileEncoding = "Windows-1252"
)
}
)
}
}
} else{
showModal(dataModal())
sendSweetAlert(
session = session,
title = "¡Error!",
text = label_error_token,
type = "error"
)
}
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
2e7571225491e81713a9a7c96be3a2cc36cb3bdc | 8b441f592a6deb9b0a515cbd92bb4663ad79ffe4 | /convergence/src/main/R/02_compute_historical_campaign_variables.R | c81e0442ffd5e41628022be3e0b36e8cc72ac4e1 | [] | no_license | carnaum2/use-cases | 0d391a6a10bb70b60a4025152a278b0e4c595d01 | 24920e3828234da691ab643b6dd9a0aa0a5c0df5 | refs/heads/master | 2022-12-07T03:41:34.299274 | 2020-09-07T10:20:32 | 2020-09-07T10:20:32 | 293,249,567 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,456 | r | 02_compute_historical_campaign_variables.R | source("configuration.R")
init.month <- "201605" # FIXME
# FIXME: No sobreescribir el último mes, usar un nombre diferente
main_02_compute_historical_campaign_variables <- function(curr.month) {
cat ("\n[MAIN] 02_compute_historical_campaign_variables", curr.month, "\n")
if (curr.month == init.month) {
ofolder <- paste0(data.folder,"/Campaign/", curr.month)
if (! "dt.Campaigns" %in% ls()) {
ifile <- paste0(ofolder, "/dt.Campaigns.", curr.month, ".RData")
cat("[LOAD] ", ifile, "\n")
load(ifile)
}
if ("HIST_number_of_contacts" %in% colnames(dt.Campaigns))
dt.Campaigns[, HIST_number_of_contacts := NULL]
if ("i.HIST_number_of_contacts" %in% colnames(dt.Campaigns))
dt.Campaigns[, i.HIST_number_of_contacts := NULL]
if ("HIST_number_of_responses" %in% colnames(dt.Campaigns))
dt.Campaigns[, HIST_number_of_responses := NULL]
if ("i.HIST_number_of_responses" %in% colnames(dt.Campaigns))
dt.Campaigns[, i.HIST_number_of_responses := NULL]
dt.Campaigns[, HIST_number_of_contacts := number_of_contacts]
dt.Campaigns[, HIST_number_of_responses := number_of_responses]
ofile <- ifile
cat("[SAVE] ", ofile, "-", nrow(dt.Campaigns), "\n")
save(dt.Campaigns, file = ofile) # FIXME: Overwrites dt.Campaigns.<month>.RData
}
else {
# Load Previous month
# FIXME: Contemplar el caso de que te bajes una extracción de un mes en medio del mes con formato YYYYMMDD, en lugar de YYYYMM
prev.month <- paste0(curr.month, "01")
prev.month <- as.POSIXct(strptime(prev.month, "%Y%m%d"))
prev.month <- prev.month - 2
prev.month <- as.character(prev.month)
prev.month <- substr(prev.month, 1, 7)
prev.month <- gsub("-", "", prev.month)
ifolder <- paste0(data.folder,"/Campaign/", prev.month)
ifile <- paste0(ifolder, "/dt.Campaigns.", prev.month, ".RData")
cat("[LOAD] ", ifile, "\n")
load(ifile)
if (prev.month == init.month) {
dt.Campaigns.prev <- dt.Campaigns[, .(CIF_NIF, HIST_number_of_contacts, HIST_number_of_responses)]
} else {
dt.Campaigns.prev <- dt.Campaigns[, .(CIF_NIF, HIST_number_of_contacts, HIST_number_of_responses, number_of_contacts,
number_of_responses)]
dt.Campaigns.prev[, HIST_number_of_contacts := HIST_number_of_contacts + number_of_contacts]
dt.Campaigns.prev[, HIST_number_of_responses := HIST_number_of_responses + number_of_responses]
dt.Campaigns.prev[, number_of_contacts := NULL]
dt.Campaigns.prev[, number_of_responses := NULL]
}
setkey(dt.Campaigns.prev, CIF_NIF)
# Load Current Month
ifolder <- paste0(data.folder,"/Campaign/", curr.month)
ifile <- paste0(ifolder, "/dt.Campaigns.", curr.month, ".RData")
cat("[LOAD] ", ifile, "\n")
load(ifile)
setkey(dt.Campaigns, CIF_NIF)
if ("HIST_number_of_contacts" %in% colnames(dt.Campaigns))
dt.Campaigns[, HIST_number_of_contacts := NULL]
if ("HIST_number_of_responses" %in% colnames(dt.Campaigns))
dt.Campaigns[, HIST_number_of_responses := NULL]
if ("i.HIST_number_of_contacts" %in% colnames(dt.Campaigns))
dt.Campaigns[, i.HIST_number_of_contacts := NULL]
if ("i.HIST_number_of_responses" %in% colnames(dt.Campaigns))
dt.Campaigns[, i.HIST_number_of_responses := NULL]
cat("[INFO] Joining....\n")
dt.Campaigns <- dt.Campaigns.prev[dt.Campaigns]
dt.Campaigns[is.na(HIST_number_of_contacts), HIST_number_of_contacts := 0]
dt.Campaigns[is.na(HIST_number_of_responses), HIST_number_of_responses := 0]
ofile <- ifile
cat("[SAVE] ", ofile, "-", nrow(dt.Campaigns), "\n")
save(dt.Campaigns, file = ofile) # FIXME: Overwrites dt.Campaigns.<month>.RData
}
}
#----------------------------------------------------------------------------------------------------------
if (!exists("sourced")) {
option_list <- list(
make_option(c("-m", "--month"), type = "character", default = NULL, help = "input month (YYYYMM)",
metavar = "character")
)
opt_parser <- OptionParser(option_list = option_list)
opt <- parse_args(opt_parser)
if (is.null(opt$month)) {
print_help(opt_parser)
stop("At least one parameter must be supplied (input month: YYYYMM)", call. = FALSE)
} else {
main_02_compute_historical_campaign_variables(opt$month)
}
}
|
9e3ff852441d4703e16106735cb88972dee21d28 | 9a5cd516300be561dc627ebb3fc07ead2707b502 | /tests/testthat/test-exceed_threshold.R | 17d347674069ec3c79e56d29e79194073aaf3a84 | [] | no_license | cran/incadata | 11535f59e08977e5cb0dca961ea16f5e723e4b2e | a80e811b5d22ae44d39231b5ed1994653dc01d27 | refs/heads/master | 2021-05-23T02:43:52.314286 | 2020-04-09T07:20:02 | 2020-04-09T07:20:02 | 82,063,922 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 926 | r | test-exceed_threshold.R | context("exceed_threshold")
x <- c(rep("2012-01-01", 9), "foo")
test_that("exced_threshold", {
expect_message(exceed_threshold(x, as.Date(x)))
expect_message(expect_equal(exceed_threshold(x, as.Date(x)), x))
expect_warning(exceed_threshold(x, as.Date(x), force = TRUE),
"the input vector coerced to Date with foo set to NA!")
expect_equal(exceed_threshold(x, as.Date(x), threshold = 1), x)
expect_warning(
exceed_threshold(x, as.Date(x), var_name = "bar", force = TRUE), "bar")
expect_message(
exceed_threshold(1:10, as.numeric(1:10)),
"the input vector coerced to numeric")
expect_message(
exceed_threshold(1:10, 1:10), "the input vector coerced to integer")
expect_equal(exceed_threshold(1:10, 1:10), 1:10)
# Require user input
skip_if_not(interactive())
expect_message(exceed_threshold(x, as.Date(x), ask = TRUE))
})
|
3ccbf18b09e9695360c34123e4a712abd5960f49 | 0272d8f561df3fd8ac1c2df6f62c1d9901168245 | /PP_kammer_hartheim/P_roll_lateral_Versuch.R | 1233be7fa01fcd17034e2d836d54a236e44f8fcc | [] | no_license | laurin-f/WWM | bb54f33b7292f9eb34b4058bf4df78fd5db7b5e2 | 4b06e6eaf80c4cb02dd8ada937d9aa1ce4cfcb33 | refs/heads/master | 2023-08-07T15:51:50.428553 | 2023-07-19T13:56:01 | 2023-07-19T13:56:01 | 248,497,735 | 0 | 0 | null | 2021-01-26T14:29:35 | 2020-03-19T12:29:31 | R | UTF-8 | R | false | false | 22,910 | r | P_roll_lateral_Versuch.R | hauptpfad <- "C:/Users/ThinkPad/Documents/FVA/P01677_WindWaldMethan/"
metapfad<- paste0(hauptpfad,"Daten/Metadaten/")
metapfad_harth<- paste0(metapfad,"Hartheim/")
metapfad_comsol<- paste0(metapfad,"COMSOL/")
datapfad<- paste0(hauptpfad,"Daten/Urdaten/Dynament/")
plotpfad_PPchamber <- paste0(hauptpfad,"Dokumentation/Berichte/plots/PP_Kammer/")
samplerpfad <- paste0(hauptpfad,"Daten/aufbereiteteDaten/sampler_data/")
klimapfad<- paste0(hauptpfad,"Daten/Urdaten/Klimadaten_Hartheim/")
klimapfad_CR1000<- paste0(hauptpfad,"Daten/Urdaten/Klimadaten_Hartheim/Hartheim CR1000/")
soilpfad<-paste0(hauptpfad,"Daten/Urdaten/Boden_Hartheim/")
kammer_datapfad <- paste0(hauptpfad,"Daten/aufbereiteteDaten/Kammermessungen/")
datapfad_PP_Kammer <- paste0(hauptpfad,"Daten/aufbereiteteDaten/PP_Kammer/")
chamber_arduino_pfad <- paste0(hauptpfad,"/Daten/Urdaten/Kammermessungen_Arduino/")
metapfad_PP <- paste0(metapfad,"PP_Kammer/")
detach("package:pkg.WWM", unload = TRUE)
library(pkg.WWM)
packages<-c("lubridate","stringr","ggplot2","units","dplyr","readODS")
check.packages(packages)
theme_set(theme_classic())
plot <- F
##################
#Metadata
pp_chamber <- read_ods(paste0(metapfad_PP,"PP_Kammer_Messungen_hartheim.ods"))
pp_chamber$Start <- dmy_hm(pp_chamber$Start)
pp_chamber$Ende <- dmy_hm(pp_chamber$Ende)
injections <- read_ods(paste0(metapfad_PP,"injektionen_hartheim.ods"))
injections$Start <- dmy_hm(injections$Start)
injections$Ende <- dmy_hm(injections$Ende)
Versuch <- 20
for(Versuch in c(10,20,21,22)){
if(Versuch == 20){
datelim <- c(pp_chamber$Start[Versuch],pp_chamber$Ende[Versuch]+3600*24*0.5)
}else{
datelim <- c(pp_chamber$Start[Versuch]-3600 * 6,pp_chamber$Ende[Versuch]+3600*24*0.5)
}
if(Versuch == 21){
datelim[2] <- ymd_h("23.01.16 08")
}
###################
#PPC data
data_PPC <- read_PP(datelim = datelim,table="PP_1min",corfac = "P_corfac_date")
# datelim <- ymd_hms("2022-12-07 00:00:00 UTC", "2022-12-13 12:00:00 UTC")
# data_PPC <- read_PP(datelim = datelim,corfac = "P_corfac_date")
#
# load(paste0(klimapfad_CR1000,"klima_data_PP_kammer.RData"))
# ggplot(sub_daterange(data_PPC,ymd_hm("2022-12-07 00:00","2022-12-09 15:00")))+geom_line(aes(date,PPC,col=factor(id)))
#
# ggplot(sub_daterange(data_PPC,ymd_hm("2022-12-09 09:00","2022-12-09 09:02")))+geom_line(aes(date,P,col=factor(id)))
# ggplot(sub_daterange(data_PPC,ymd_hm("2022-12-09 11:00","2022-12-09 11:02")))+geom_line(aes(date,P,col=factor(id)))
#data_PPC <- subset(data_PPC,id %in% c(1:6))
dt <- round(median(diff_time(data_PPC$date[data_PPC$id == 1]),na.rm=T),2)
data_PPC <- data_PPC %>%
group_by(id) %>%
mutate(dt = diff_time(date,"secs"),
PPC5 = RcppRoll::roll_mean(P_diff,10*60/!!dt,fill=NA),
P_roll = RcppRoll::roll_mean(P,20,fill=NA))
P_ref <- subset(data_PPC,id==5)
P_ref$P_ref <- P_ref$P_roll
data_PPC <- subset(data_PPC,id!=5)
data_PPC <- merge(data_PPC,P_ref[,c("date","P_ref")])
data_PPC$P_roll <- data_PPC$P_roll - data_PPC$P_ref
data_PPC[which(data_PPC$dt > 3600),c("PPC","PPC5","P_roll")] <- NA
step_thr <- ifelse(Versuch == 20,0.27,0.4)
data_PPC <- data_PPC[order(data_PPC$date),]
PPC_steps <- data_PPC %>%
filter(id %in% 1:4) %>%
mutate(date = round_date(date,"10 min")) %>%
group_by(id,date) %>%
summarise(across(everything(),mean)) %>%
mutate(P_diff = abs(c(NA,diff(P))),
step = ifelse(P_diff > step_thr,1,0),
)
step_date <- sort(unique(PPC_steps$date[PPC_steps$step == 1]))
step_date <- step_date[c(T,c(as.numeric(diff(step_date))) > 60*3)]
step_date <- step_date[!is.na(step_date)]
n_versuche <- 3
n_steps <- 4
step_h <- 8 * 3600
break_h <- 10 * 3600
hours_vec <- cumsum(c(rep(c(rep(step_h,n_steps),break_h),n_versuche-1),rep(step_h,n_steps)))
if(Versuch == 21){
step_date <- c(step_date[1],step_date[1]+hours_vec+3600)
}
if(Versuch == 22){
step_date <- c(step_date[1],step_date[1]+hours_vec)
}
#step_date - step_date_2
ggplot(PPC_steps)+
geom_vline(xintercept = step_date,col="grey",linetype=2)+
geom_hline(yintercept = step_thr,col="grey",linetype=2)+
geom_line(aes(date,P,col=factor(id)))+
# geom_point(aes(date,P,col=factor(id)))+
geom_line(aes(date,P_diff))#+
# xlim(ymd_h("22.12.09 03","22.12.09 23"))
# facet_wrap(~id)
PPC_steps_wide <- tidyr::pivot_wider(PPC_steps,id_cols = date,names_from = id,values_from = P_roll,names_prefix = "P_roll_")
step_df <- PPC_steps_wide %>%
mutate(step = ifelse(date %in% !!step_date,1,0),
step_id=cumsum(step)) %>%
group_by(step_id) %>%
summarise(across(matches("P_roll"),mean,na.rm=T),
Start = min(date),
End = max(date))
if(Versuch %in% 20:21){
modes <- c(rep(c("0,0","1,1","1,-1","-1,1","-1,-1"),3),"0,0")
}
if(Versuch %in% 22){
modes <- c(rep(c("0,0","1,1","1,-1","-1,-1","-1,1"),3),"0,0")
}
if(Versuch %in% 24){
modes <- c(rep(c("0,0","100","70","1","-1","-70","-100"),2),"0,0")
}
if(Versuch %in% 25){
modes <- c(rep(c("0,0","70,70","70,1","1,70","-40,1","1,-40","-40,-40"),2),"0,0")
}
if(Versuch %in% 26){
modes <- c("0,0",c(t(cbind(c("-100","-70","-1","30","70"),"0"))),"100","0,0")
}
if(Versuch %in% 27){
modes <- c("0,0",c(t(cbind(c("-100","100","-70","70","-1"),"0"))),"30","0,0")
}
if(Versuch == 10){
modes <- c("0,0","40","20","-20","-40","0,0")
}
step_df$mode <- modes
step_df$step <- factor(step_df$mode,
levels = unique(step_df$mode),
labels = seq_along(unique(step_df$mode))-1)
data_PPC$step_id <- NA
data_PPC$mode <- NA
for(i in 1:nrow(step_df)){
id <- which(daterange_id(data_PPC,c(step_df$Start[i]+3600*3,step_df$End[i]-1800)))
data_PPC$mode[id] <- step_df$mode[i]
data_PPC$step_id[id] <- step_df$step_id[i]
}
cal_period <- data_PPC %>%
filter(mode == "0,0") %>%
filter(!is.na(P_roll)) %>%
group_by(id) %>%
mutate(zeit = as.numeric(difftime(date,min(date))),
sensor_drift = predict(glm(P_roll ~ zeit)),
P_roll_cal = P_roll - sensor_drift)
data_PPC <- merge(data_PPC,cal_period[,c("date","sensor_drift","id")],all = T)
data_PPC <- data_PPC %>%
group_by(id) %>%
mutate(sensor_drift = imputeTS::na_interpolation(sensor_drift),
P_roll_cal = P_roll - sensor_drift,
P_tot = RcppRoll::roll_sumr(P_roll_cal,24*60))
ggplot(subset(data_PPC,id != 6))+
geom_line(aes(date,P_roll,col=id))+
geom_line(aes(date,sensor_drift,col=id))
ggplot(subset(data_PPC,id != 6))+
geom_line(aes(date,P_roll_cal,col=id))
# ggplot(subset(data_PPC,id != 6))+
# geom_line(aes(date,P_roll_cal,col=id))+
# geom_line(aes(date,P_tot,col=id))
#
############
#probe 1 u 2
data_probe1u2 <- read_sampler("sampler1u2",datelim = datelim, format = "long")
data_probe1u2 <- data_probe1u2 %>%
group_by(tiefe) %>%
mutate(CO2_smp1_roll = RcppRoll::roll_mean(CO2_smp1,5,fill=NA),
CO2_smp2_roll = RcppRoll::roll_mean(CO2_smp2,5,fill=NA)
)
###########
#CO2atm
chamber_data <- read_arduino_chamber(datelim=datelim)
chamber_data$atm <- ifelse(minute(chamber_data$date) %% 30 > 5 & minute(chamber_data$date) %% 30 < 29,1,0)
chamber_data$date
CO2_atm <- chamber_data %>%
filter(atm == 1) %>%
select(date,CO2) %>%
mutate(date = round_date(date,"10 mins")) %>%
group_by(date) %>%
summarise(CO2_atm = mean(CO2,na.rm=T)) %>%
mutate(CO2_atm = imputeTS::na_interpolation(CO2_atm))
#############
#CO2 flux
if(Versuch %in% c(25,27)){
load(paste0(datapfad_PP_Kammer,"flux_ls.RData"))
flux <- sub_daterange(flux,datelim)
flux$CO2_flux_raw <- flux$CO2_GGA_mumol_per_s_m2
flux$CO2_flux <- RcppRoll::roll_mean(flux$CO2_GGA_mumol_per_s_m2,5,fill=NA)
flux$CH4_flux_raw <- flux$CH4_mumol_per_s_m2 * 10^3
flux$CH4_flux <- RcppRoll::roll_mean(flux$CH4_mumol_per_s_m2 * 10^3,5,fill=NA)
}
#########
#swc
source("./PP_kammer_hartheim/SWC_hartheim.R")
swc_sub <- sub_daterange(swc_wide,datelim)
########################
# data_PPC_wide <- tidyr::pivot_wider(data_PPC[,c("date","id","P_roll_cal","mode","step_id")],names_from = id,values_from =P_roll_cal,names_prefix = "P_")
#
data_PPC_wide <- tidyr::pivot_wider(data_PPC[,c("date","id","P_roll_cal","PPC5","mode","step_id")],names_from = id,values_from =c(P_roll_cal,PPC5))
names(data_PPC_wide) <- str_replace_all(names(data_PPC_wide),c("P_roll_cal"="P","PPC5"="PPC"))
data_probes_wide <- tidyr::pivot_wider(data_probe1u2,id_cols=c(date,T_C),names_from = tiefenstufe,values_from = matches("CO2_smp"))
range(data_probes_wide$date)
data <- merge(data_PPC_wide,data_probes_wide)
data <- merge(data,swc_sub,all.x = T)
data <- merge(data,CO2_atm)
data <- data %>% mutate(zeit = as.numeric(difftime(date,min(date))))
if(Versuch %in% c(20,22,24,25,26)){
data[,paste0("CO2_smp1_roll_",1:2)] <- NA
}
data_long <- tidyr::pivot_longer(data,matches("CO2_smp\\d_roll_\\d"),names_pattern = "CO2_smp(\\d)_roll_(\\d)",values_to = "CO2",names_to = c("probe","tiefe"))
data_long$smp_depth <- paste(data_long$probe,data_long$tiefe,sep="_")
data_long$CO2_preds <- NA
for(i in unique(data_long$smp_depth)){
cal_df <- subset(data_long, smp_depth == i & mode == "0,0" & !is.na(CO2))
cal_datelim <- range(cal_df$date)
if(any(!is.na(cal_df$CO2))){
if(Versuch == 10){
fm <- glm(CO2 ~poly(zeit,2),data = cal_df)
}else{
fm <- mgcv::gam(CO2 ~ s(zeit),data = cal_df)
}
tiefenID <- which(data_long$smp_depth == i & daterange_id(data_long,cal_datelim))
data_long$CO2_preds[tiefenID] <- predict(fm,newdata = data_long[tiefenID,])
}
}
data_long$CO2_offset <- data_long$CO2 - data_long$CO2_preds
data_long$P_horiz <- data_long$P_2 - data_long$P_3
names(data_long)
step_df_cal <- data_long %>%
filter(!is.na(step_id)) %>%
group_by(step_id,mode) %>%
summarise(across(matches("^P_"),mean,na.rm=T)) %>%
ungroup() %>%
mutate(Start = step_df$Start,
End = step_df$End)
data_long <- data_long %>%
group_by(step_id,tiefe,probe) %>%
mutate(dummy = 1,
mode_zeit = cumsum(dummy))
# ggplot(data_long)+
# geom_line(aes(date,P_1,col=factor(step_id),group=smp_depth))
# ggplot(data_long)+
# geom_line(aes(date,mode_zeit,col=factor(step_id)))+
# geom_hline(yintercept = 9)
data_long$CO2_cal <- ifelse(data_long$mode == "0,0",NA,data_long$CO2)
data_long$CO2_shift <- data_long$CO2_offset / data_long$CO2_preds * 100
data_long$profile <- factor(data_long$probe,levels = 2:1,labels = 1:2)
data_long$subspace <- factor(data_long$probe,levels = 2:1,labels = 2:3)
data_long$depth <- factor(as.numeric(data_long$tiefe) * 3.5)
if(Versuch %in% 20:22){
step_df_cal$P_alpha <- step_df_cal$P_horiz
}else{
step_df_cal$P_alpha <- step_df_cal$P_3
}
# ggplot(data_long)+
# geom_vline(xintercept = step_date,col="grey",linetype=2)+
# geom_line(aes(date,CO2_cal,col=tiefe))+
# geom_line(aes(date,CO2,col=tiefe),linetype = 3)+
# geom_line(aes(date,CO2_preds,col=tiefe),linetype=2)+
# guides(alpha=F)+
# facet_wrap(~probe,ncol=1)+
# ggsave(paste0(plotpfad_PPchamber,"CO2_offset_fm_",Versuch,".png"),width = 7,height = 6)
# step_df
# if(Versuch == 20){
# step_df$step <- factor(step_df$step,labels = c("0","high P","P-"))
# }
if(plot){
cols <- RColorBrewer::brewer.pal(4,"PuOr")
PPC_col <- "brown"
names(data_long)
CO2_offset_plot <-
ggplot(data_long)+
geom_rect(data=subset(step_df,step != 0),aes(xmin = Start, xmax=End,ymin=-Inf,ymax = Inf,alpha=step))+
scale_alpha_discrete(range = c(0.1,0.4),guide = guide_legend(order = 1))+
geom_line(data = subset(data_long, tiefe %in% 1:4),aes(date,CO2_cal,col=factor(tiefe,labels = c(1:4))))+
scale_color_manual("subspace",values = c(cols),guide = guide_legend(order = 3))+
ggnewscale::new_scale_color()+
# geom_line(data = subset(data_long, tiefe %in% 1),aes(date,CO2_cal,col=factor(tiefe,labels = "")))+
# scale_color_manual("ambient PPC",values = PPC_col,guide = guide_legend(order = 4))+
# ggnewscale::new_scale_color()+
geom_line(data = subset(data_long, tiefe %in% 1),aes(date,CO2_cal,col=factor(tiefe,labels = "")))+
scale_color_manual(expression(P[lateral]),values = 1,guide = guide_legend(order = 5))+
ggnewscale::new_scale_color()+
#guides(alpha = F)+
geom_vline(xintercept = step_date,col="grey",linetype=2)+
geom_line(aes(date,CO2_cal,col=depth))+
geom_point(data = subset(data_long,mode == "0,0"),aes(date,CO2,col=depth),pch=20)+
geom_line(aes(date,CO2,col=depth),alpha = 0.5)+
geom_line(aes(date,CO2_preds,col=depth),linetype=2)+
scale_color_discrete(guide = guide_legend(order = 2))+
labs(y = expression(CO[2]~"(ppm)"),col = "depth (cm)")+
facet_wrap(~paste0("profile ",profile," (subspace ",subspace,")"),ncol=1)
CO2_plot <-
ggplot(data_long)+
#geom_rect(data=step_df_cal,aes(xmin = Start, xmax=End,ymin=-Inf,ymax = Inf,alpha=abs(P_alpha)))+
geom_rect(data=subset(step_df,step != 0),aes(xmin = Start, xmax=End,ymin=-Inf,ymax = Inf,alpha=step))+
scale_alpha_discrete(range = c(0.1,0.4))+
#scale_alpha(range = c(0,0.4))+
geom_hline(yintercept = 0,col="grey",linetype=2)+
geom_line(aes(date,CO2_shift,col=tiefe))+
geom_vline(xintercept = step_date,col="grey",linetype=2)+
facet_wrap(~paste0("profile ",profile," (subspace ",subspace,")"),ncol=1)+
guides(col= F)+
labs(y = expression(CO[2]*"-shift"~("%")), x ="",col="depth")
#labs(y = expression(CO[2~offset]~(ppm)), x ="")
#P_tot_plt <-
ggplot(data_long)+
geom_rect(data=subset(step_df,step != 0),aes(xmin = Start, xmax=End,ymin=-Inf,ymax = Inf,alpha=step))+
scale_alpha_discrete(range = c(0.1,0.4))+
guides(alpha=F)+
geom_vline(xintercept = step_date,col="grey",linetype=2)+
geom_line(aes(date,-(cumsum(P_1)/4 + P_1 *50)+3200,col="1"))+
#geom_line(aes(date,-(cumsum(P_1)+P_1*500),col="2"))+
geom_line(data = subset(data_long,tiefe==7& probe == 1),aes(date,CO2,col="3"))+
scale_color_manual(values = c(cols,1))+
labs(col="subspace",y = expression(P[roll]~"(Pa)"))
#ggpubr::ggarrange(CO2_offset_plot,P_tot_plt,common.legend = T,legend="right",ncol=1,align="v")
P_plt <- ggplot(data_long)+
geom_rect(data=subset(step_df,step != 0),aes(xmin = Start, xmax=End,ymin=-Inf,ymax = Inf,alpha=step))+
scale_alpha_discrete(range = c(0.1,0.4))+
guides(alpha=F)+
geom_line(aes(date,P_horiz,col="lateral"))+
geom_hline(yintercept = 0,col="grey",linetype=2)+
geom_vline(xintercept = step_date,col="grey",linetype=2)+
geom_line(aes(date,P_1,col="1"))+
geom_line(aes(date,P_2,col="2"))+
geom_line(aes(date,P_3,col="3"))+
geom_line(aes(date,P_4,col="4"))+
scale_color_manual(values = c(cols,1))+
labs(col="subspace",y = expression(P[mean]~"(Pa)"))
names(data_long)
PPC_plt <-
ggplot(data_long)+
geom_rect(data=subset(step_df,step != 0),aes(xmin = Start, xmax=End,ymin=-Inf,ymax = Inf,alpha=step))+
scale_alpha_discrete(range = c(0,0.4))+
guides(alpha=F)+
geom_line(aes(date,PPC_1,col="1"))+
geom_line(aes(date,PPC_2,col="2"))+
geom_vline(xintercept = step_date,col="grey",linetype=2)+
geom_line(aes(date,PPC_3,col="3"))+
geom_line(aes(date,PPC_4,col="4"))+
#geom_line(aes(date,PPC_6,col="ambient PPC"))+
scale_color_manual(values = c(cols,PPC_col))+
theme(legend.text.align = 0.5)+
labs(col="subspace",y = "PPC (Pa/s)", x ="")
if(exists("flux")){
FCO2_plt <- ggplot(flux)+
geom_rect(data=subset(step_df,step != 0),aes(xmin = Start, xmax=End,ymin=-Inf,ymax = Inf,alpha=step))+
scale_alpha_discrete(range = c(0.1,0.4))+
guides(alpha=F)+
geom_vline(xintercept = step_date,col="grey",linetype=2)+
geom_line(aes(date,CO2_flux_raw),alpha=.5)+
geom_line(aes(date,CO2_flux))+
xlim(range(data_long$date))+
labs(y = expression(italic(F[CO2])~"("*mu * mol ~ m^{-2} ~ s^{-1}*")"))
FCH4_plt <- ggplot(flux)+
geom_rect(data=subset(step_df,step != 0),aes(xmin = Start, xmax=End,ymin=-Inf,ymax = Inf,alpha=step))+
scale_alpha_discrete(range = c(0.1,0.4))+
guides(alpha=F)+
geom_vline(xintercept = step_date,col="grey",linetype=2)+
geom_line(aes(date,CH4_flux_raw),alpha=.5)+
geom_line(aes(date,CH4_flux))+
xlim(range(data_long$date))+
labs(y=expression(italic(F[CH4])~"("*n * mol ~ m^{-2} ~ s^{-1}*")"))
T_plt <- ggplot(flux)+
geom_rect(data=subset(step_df,step != 0),aes(xmin = Start, xmax=End,ymin=-Inf,ymax = Inf,alpha=step))+
scale_alpha_discrete(range = c(0.1,0.4))+
guides(alpha=F)+
geom_vline(xintercept = step_date,col="grey",linetype=2)+
geom_line(aes(date,T_C))+
xlim(range(data_long$date))
ggpubr::ggarrange(CO2_offset_plot+
labs(title = paste("Versuch",paste0(Versuch,":"),pp_chamber$Modus[Versuch]))+
theme(axis.title.x = element_blank(),axis.text.x = element_blank()),
#CO2_plot+theme(axis.title.x = element_blank(),
# axis.text.x = element_blank()),
FCO2_plt+theme(axis.title.x = element_blank(),
axis.text.x = element_blank()),
FCH4_plt+theme(axis.title.x = element_blank(),
axis.text.x = element_blank()),
P_plt+theme(axis.title.x = element_blank(),
axis.text.x = element_blank()),
T_plt,ncol=1,align = "v",heights = c(3,1,1,1,1),common.legend = T,legend = "right")+
ggsave(paste0(plotpfad_PPchamber,"CO2_offset_flux_",Versuch,".png"),width = 7,height = 10)
}
ggpubr::ggarrange(CO2_offset_plot+
labs(title = paste("Versuch",paste0(Versuch,":"),pp_chamber$Modus[Versuch]))+
theme(axis.title.x = element_blank(),axis.text.x = element_blank()),
CO2_plot+theme(axis.title.x = element_blank(),
axis.text.x = element_blank()),
PPC_plt+theme(axis.title.x = element_blank(),
axis.text.x = element_blank()),
P_plt,ncol=1,align = "v",heights = c(3,2,.8,1),common.legend = T,legend = "right")+
ggsave(paste0(plotpfad_PPchamber,"CO2_offset_",Versuch,".png"),width = 7,height = 7)
if(Versuch == 20){
Sys.setlocale("LC_ALL", "English")
ggpubr::ggarrange(CO2_offset_plot+
scale_alpha_discrete("mode",range = c(0.1,0.4),labels =
expression("1: high P",
"2: P"[lateral],
"3: P"[lateral],
"4: low P"),guide = guide_legend(order = 1))+
theme(legend.text.align = 0,axis.title.x = element_blank(),axis.text.x = element_blank()),
CO2_plot+theme(axis.title.x = element_blank(),
axis.text.x = element_blank()),
#PPC_plt+theme(axis.title.x = element_blank(),
# axis.text.x = element_blank()),
P_plt,ncol=1,align = "v",heights = c(3,2,1),common.legend = T,legend = "right")+
ggsave(paste0(plotpfad_PPchamber,"Figure_4.png"),width = 7,height = 7)
Sys.setlocale("LC_ALL", "")
}
ggpubr::ggarrange(CO2_plot+labs(title = paste("Versuch",paste0(Versuch,":"),pp_chamber$Modus[Versuch])),P_plt,ncol=1,align = "v",heights = c(2,1))+
ggsave(paste0(plotpfad_PPchamber,"CO2_offset_PPC_",Versuch,".png"),width = 7,height = 6)
}
data_long$Versuch <- Versuch
save(data_long,file = paste0(datapfad_PP_Kammer,"CO2_offset_",Versuch,".RData"))
}
ggpubr::ggarrange(CO2_offset_plot+
theme(axis.title.x = element_blank(),axis.text.x = element_blank()),
P_plt,ncol=1,align = "v",heights = c(3,1),common.legend = T,legend = "right")+
ggsave(paste0(plotpfad_PPchamber,"CO2_offset_flux_Werkst1.png"),width = 7,height = 6)
ggpubr::ggarrange(CO2_plot+theme(axis.title.x = element_blank(),
axis.text.x = element_blank()),
P_plt,ncol=1,align = "v",heights = c(3,1),common.legend = T,legend = "right")+
ggsave(paste0(plotpfad_PPchamber,"CO2_offset_flux_Werkst2.png"),width = 7,height = 6)
########################################
########################################
########################################
# ggplot(subset(data_long,mode_zeit > 9))+
# geom_point(aes(P_3,CO2_offset,col=factor(tiefe)))+
# geom_smooth(aes(P_3,CO2_offset,col=factor(tiefe)),method = "glm")+
# facet_grid(~probe)
# ggplot(subset(data_long,mode_zeit > 9))+
# geom_point(aes(P_horiz,CO2_offset,col=factor(tiefe)))+
# geom_smooth(aes(P_horiz,CO2_offset,col=factor(tiefe)),method = "glm")+
# facet_grid(~probe)
#
#
# ggplot(data)+
# geom_point(aes(P_3,CO2_smp2_6,col=P_2))
# ggplot(data)+
# geom_point(aes(P_3,CO2_smp1_6,col=P_2))
# probe1_plot <-
# ggplot(subset(data_probe1u2,tiefenstufe %in% c(3:7)))+
# geom_vline(xintercept = step_date,col="grey",linetype=2)+
# geom_rect(data=step_df,aes(xmin = Start, xmax=End,ymin=-Inf,ymax = Inf,fill=mode),alpha=0.4)+
# geom_line(aes(date, CO2_smp1_roll,col=factor(abs(tiefe))))+
# labs(col="tiefe (cm)",y = "CO2 profile 1 (ppm)",x="")+
# scale_color_manual(values = scales::hue_pal()(8),limits = factor(0:7 * 3.5))+
# theme(axis.title.y = element_text(colour = "blue"))+
# scale_fill_manual(values = c(0,scales::hue_pal()(4)),limits = unique(modes))
#
# probe2_plot <- ggplot(subset(data_probe1u2,tiefenstufe %in% c(1:7)))+
# geom_rect(data=step_df,aes(xmin = Start, xmax=End,ymin=-Inf,ymax = Inf,fill=mode),alpha=0.4)+
# geom_vline(xintercept = step_date,col="grey",linetype=2)+
# geom_line(aes(date, CO2_smp2_roll,col=factor(abs(tiefe))))+
# scale_color_manual(values = scales::hue_pal()(8),limits = factor(0:7 * 3.5))+
# geom_line(data = data,aes(date, CO2_atm,col=factor(0)))+
#
# labs(y = "CO2 profile 2 (ppm)",x="")+
# scale_fill_manual(values = c(0,scales::hue_pal()(4)),limits = unique(modes))
#
# P_roll_plot <- ggplot(subset(data_PPC,id%in%1:4))+
# geom_vline(xintercept = step_date,col="grey",linetype=2)+
# geom_hline(yintercept = 0,col="grey",linetype=2)+
# geom_rect(data=step_df,aes(xmin = Start, xmax=End,ymin=-Inf,ymax = Inf,fill=mode),alpha=0.4)+
# geom_line(aes(date, P_roll_cal,col=factor(id),group=id),alpha=0.8)+
# labs(y = expression(P[rollmean]~"(Pa)"))+
# scale_color_manual(values = c("black","black","blue","blue"))+
# scale_fill_manual(values = c(0,scales::hue_pal()(4)),limits = unique(modes))
#
# ggpubr::ggarrange(probe1_plot,probe2_plot,P_roll_plot,ncol=1,align = "v",common.legend = T,legend = "right")+
# ggsave(paste0(plotpfad_PPchamber,"P_roll_lateral_Versuch.png"),width = 7,height = 6)
|
4373590a858d3b5e143498dd930d81f193d3b9e2 | ea82c804eba4fed67e37093a5dd47dd82b0b1459 | /figure_scripts/Supp._Table_1._trait_N_table.r | 23423d337ac7efd6d3ab29d957459fb8a4e1dd8a | [] | no_license | colinaverill/Averill_et_al_2019_myco.traits | f8159f50d7ddcc3aa7a44981b5999b163cd2872e | ac72a187b2d84ef3de2e63b3656243de3c19934e | refs/heads/master | 2020-07-30T08:28:49.711202 | 2019-09-22T14:52:20 | 2019-09-22T14:52:20 | 210,155,388 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,280 | r | Supp._Table_1._trait_N_table.r | #counting number of observations by trait.
#Using MCMCglmm to fit PGLS models.
#zero model selection performed, just straight up sending model structures based on apriori hypotheses.
rm(list=ls())
source('paths.r')
source('functions/pgls_glmm_no_selection.r')
source('functions/tic_toc.r')
library(data.table)
library(phytools)
library(caper)
#set output path.----
output.path <- trait_N_table.path
#specify traits, count observations.----
traits <- c('Ngreen','Nsenes','Nroots','Pgreen','Psenes','Proots')
preds <- c('tpl.Species','MYCO_ASSO','nfix','pgf','deciduous','mat.c','map.c','biome_bore','biome_trop')
#load data.----
d <- readRDS(inter_specific_analysis_data.path)
phy <- read.tree(phylogeny_raw.path) #'colin_2018-12--2.tre'
d$biome_trop <- ifelse(d$biome3 == 'b_tropical',1, 0)
d$biome_bore <- ifelse(d$biome3 == 'c_boreal',1, 0)
#Some data prep.----
phy$tip.label <- paste0(toupper(substr(phy$tip.label, 1, 1)), substr(phy$tip.label, 2, nchar(phy$tip.label)))
phy$tip.label <- gsub('_',' ',phy$tip.label)
phy$node.label <- NULL
d <- d[d$Species %in% phy$tip.label,]
d$MYCO_ASSO <- droplevels(d$MYCO_ASSO)
#Must have at least 1 trait observation.
drop <- d[is.na(d$Ngreen) & is.na(d$Nsenes) & is.na(d$Nroots) & is.na(d$Pgreen) & is.na(d$Psenes) & is.na(d$Proots) & is.na(d$log.LL) & is.na(d$root_lifespan),]
d <- d[!(d$tpl.Species %in% drop$tpl.Species),]
keep <- d[,preds]
keep <- keep[complete.cases(keep),]
d <- d[d$tpl.Species %in% keep$tpl.Species,]
d <- d[,c(traits,preds)]
#count observations.----
sum <- list()
spp <- list()
for(i in 1:length(traits)){
dat <- d[,colnames(d) %in% c(traits[i],preds)]
dat <- dat[complete.cases(dat),]
#we did not analyze tropical or boreal root lifespan observations.
if(traits[i] == 'root_lifespan'){
dat <- dat[dat$biome_bore == 0,]
dat <- dat[dat$biome_trop == 0,]
}
N <- nrow(dat)
AM <- nrow(dat[dat$MYCO_ASSO == 'AM',])
EM <- nrow(dat[dat$MYCO_ASSO == 'ECM',])
nfix <- nrow(dat[dat$nfix == 1,])
angio <- nrow(dat[dat$pgf == 'angio',])
gymno <- nrow(dat[dat$pgf == 'gymno',])
everg <- nrow(dat[dat$deciduous == 0,])
decid <- nrow(dat[dat$deciduous == 1,])
bore <- nrow(dat[dat$biome_bore == 1,])
trop <- nrow(dat[dat$biome_trop == 1,])
temp <- N - (bore + trop)
return <- c(N,AM,EM,nfix,angio,gymno,everg,decid,bore,temp,trop)
sum[[i]] <- return
spp[[i]] <- dat$tpl.Species
}
sum <- do.call(rbind, sum)
spp <- unlist(spp)
traits <- c('green foliar N','senescent foliar N','root N',
'green foliar P','senescent foliar P','root P')
sum <- data.frame(cbind(traits, sum))
colnames(sum) <- c('Trait','N','AM','EM','N-fixer','angiosperm','gymnosperm','evergreen','deciduous','boreal','temperate','tropical')
for(i in 2:ncol(sum)){
sum[,i] <- as.numeric(as.character(sum[,i]))
}
#save output as .csv----
write.csv(sum, output.path)
#How many total unique species?
n.spp <- length(unique(d$tpl.Species))
#how many trait observations?
#myc type, nfix, angio/gymno for every species, as well as the traits.
dat <- d[,c('Ngreen','Nsenes','Nroots','Pgreen','Psenes','Proots')]
#multiply by 4 for myco, deciduous, Nfix and angio-gymno.
n.trait <- length(unique(spp)) * 4 + sum(!is.na(dat))
cat('You observed',n.trait,'traits across',n.spp,'unique species.\n')
|
58da5c2d315bf198ccc5386cc7d62c56541fe954 | 2219f7988e07df1d78f09e2bd3ce2feb6c87fc51 | /tests/testthat/test-rrd.R | 210f5331d80255f2551a5c8826d94cafcfd0cb54 | [] | no_license | rtdists/rtdists | d1374c0e57fdbe05c0bdd3ce7d2b71d53a4f84e8 | 99a226f750c22de61e8e4899d2776104c316f03d | refs/heads/master | 2022-01-19T10:51:30.547103 | 2022-01-04T09:00:15 | 2022-01-04T09:00:15 | 23,277,437 | 41 | 14 | null | 2023-08-12T02:39:35 | 2014-08-24T09:20:45 | R | UTF-8 | R | false | false | 1,949 | r | test-rrd.R |
context("diffusion parameter input (via rdiffusion)")
test_that("check individual parameters:", {
expect_that(rdiffusion(10, a=1, z=0.5, v=2, t0=0.5, d=0, sz = 0, sv = 0, st0 = 0), is_a("data.frame"))
expect_that(suppressWarnings(rdiffusion(10, a=1, z=0.5, v=2, t0=0.5, d=0, sz = 0, sv = 0, st0 = NULL)), throws_error("Not enough parameters"))
expect_that(rdiffusion(10, a=1, z=0.5, v=2, t0=0.5, d=0, sz = 0, sv = Inf, st0 = 0), throws_error())
expect_that(suppressWarnings(rdiffusion(10, a=1, z=NA, v=2, t0=0.5, d=0, sz = 0, sv = 0, st0 = 0)), throws_error())
})
# test_that("check parameters:", {
# p1 <- c(1, 0.5, 2, 0.5, 0, 0, 0, 0)
# expect_that(rdiffusion(10, parameters = p1), is_a("data.frame"))
# expect_that(rdiffusion(10, parameters = p1[1:7]), throws_error())
# names(p1) <- c("a", "z", "v","t0","d", "sz","sv","st0")
# expect_that(rdiffusion(10, parameters = p1), is_a("data.frame"))
# names(p1) <- c(c("a","v","t0","z"), sample(c("sz","sv","st0", "d")))
# expect_that(rdiffusion(10, parameters = p1), is_a("data.frame"))
# names(p1)[3] <- "xx"
# expect_that(rdiffusion(10, parameters = p1), throws_error())
# names(p1) <- NULL
# p1[1] <- NA
# expect_that(rdiffusion(10, parameters = p1), throws_error())
# p1[1] <- Inf
# expect_that(rdiffusion(10, parameters = p1), throws_error())
# })
context("rdiffusion: random number generation for the diffusion model")
test_that("rdiffusion works", {
rrds1 <- rdiffusion(10, a=1, z=0.5, v=2, t0=0.5, d=0, sz = 0, sv = 0, st0 = 0)
rrds2 <- rdiffusion(10, a=1, z=0.5, v=2, t0=0.5, d=0, sz = 0, sv = 0, st0 = 0)
expect_that(rrds1, is_a("data.frame"))
expect_false(isTRUE(all.equal(rrds1, rrds2)))
set.seed(1)
rrds1 <- rdiffusion(10, a=1, z=0.5, v=2, t0=0.5, d=0, sz = 0, sv = 0, st0 = 0)
set.seed(1)
rrds2 <- rdiffusion(10, a=1, z=0.5, v=2, t0=0.5, d=0, sz = 0, sv = 0, st0 = 0)
expect_that(rrds1, equals(rrds2))
set.seed(NULL)
})
|
8f37057c3550965f7a67475c31527d0c14ec0f4f | 962f5656d8c91a1d5220a4fb80a972f723b67c1d | /Discussion Code/Christopher Discussion 1 - Homework 1.R | c0a62a6d525e99b77697b2b03ac2dd7489a68ce8 | [
"Apache-2.0"
] | permissive | christopheraden/Nonparametric-Statistics | 292b616a59ccdb1fc6a90e945c75e2cb68165566 | 15fb5ec1cbae0e00649237a944602932f6ebaedf | refs/heads/master | 2020-04-29T03:26:08.749755 | 2015-06-11T02:26:02 | 2015-06-11T02:26:02 | 34,493,459 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,588 | r | Christopher Discussion 1 - Homework 1.R | #Intro to R:
x = 10 #Assigning variables
x #Printing the value of the variable
x = c(1, 2, 11, 24) #A vector of values
x #Value changed. "x" is no longer 10.
x > 5 #Tests if EACH value is greater than 5.
as.numeric(x>5) #If true, makes value 1, else, false.
sum(x>5) #Number of values where x > 5.
mean(x) #Takes the average of all x values.
sd(x) #Standard deviation.
sqrt(x) #Square root each entry.
sum(x) #Sum of all elements.
n = length(x) #Tells us how many elements there are.
sort(x) #Sort the values of x, smallest to largest.
order(x) #Gives the rankings of the elements. 1 is to minimum of the vector.
M = matrix(x, nrow=2, ncol=2) #Makes the values into a matrix. Fills in column by column.
M #Print the matrix to the screen
as.data.frame(M) #Converts matrices into data frames.
#Some stats
alpha = .05 #Set type I error rate.
qnorm(1-alpha/2) #Gives the quantile of the normal distribution.
binom.test(2, 12, p=.5) #An exact binomial test--how odd is 2 successes out of 12 if H0: p=0.5?
?binom.test
###
#Some homework help
###
#Make a dataset. This is NOT the same as the homework.
rainfall = c(24.3, 20.6, 17.6, 23.0, 27.2, 28.5, 32.8,
28.2, 25.9, 19.5, 27.2, 31.1, 28.7, 24.8,
24.3, 27.1, 30.6, 26.8, 18.9, 36.3, 28.0)
n = length(rainfall) #How many observations are there?
#Asymptotic Confidence for Median, from the book, alpha = .05.
a = (.5*n) - (sqrt(.25*n) * qnorm(.975))
b = (1 + .5*n) + (sqrt(.25*n) * qnorm(.975))
#This is the ORDER of the confidence interval. To get the CI find
#the corresponding value from the sample. a and b are not always integers, so you need to round them.
#This code may NOT work. You need to figure out which way to round a and b.
#To round up, use ceiling(x). To round down, use floor(x), where x is the value.
#For example, ceiling(7.49) = 8, and floor(9.999) = 9.
sort(rainfall)[c(a,b)] #Gives the value of the sample corresponding to the order statistic a and b.
#Exact
find_a_and_b = function(a, b, n) {
#Function takes integers a and b, representing the lower and upper upper statistic bounds
#as well as the number of trials, n, and outputs the probability of a binomial lying between
#those two values if probability of success was 1/2.
k = a : (b-1)
sum(choose(n,k) * .5^n)
}
#Remember to use the ROUNDED versions of a and b. Code will not work if a and b are not whole numbers!!
find_a_and_b(a, b, n) #Should confirm the results pretty well
#What if we assumed normality and naively built a CI for the MEAN?
lower = mean(rainfall) - sd(rainfall) * qnorm(.975) / sqrt(n)
upper = mean(rainfall) + sd(rainfall) * qnorm(.975) / sqrt(n)
c(lower, upper) #Is this close? Depends on the amount of skew and the sample size.
#5
#rep(value, times) repeats a value a given number of times. Ex: rep(2, 4) will output c(2,2,2,2).
fake_data = c(rep(71.1, 39), 100) #39 values of 71.1, 1 of 100.
n = length(fake_data) #Number of observations
binom_data = as.numeric(fake_data > 71) #Which obs are more than 71?
x = sum(binom_data) #Number of obs more than 71
binom.test(x, n, p = .5, alternative = "greater") #Test that proportion of obs less than 71 is p = .50
#CLT test
Ztest = function(X) {
#Function returns the z-test value, given a vector of data.
#This z-test assumes the null hypothesis is zero mean.
sqrt(length(X)) * mean(X) / sd(X)
}
Ztest(fake_data)
#Let's change the 100 to a 90. What's the z-score here?
#Does this jive with intuition?
fake_data2 = c(rep(71.3, 39), 90)
Ztest(fake_data2)
|
8a3982925cdc965afcfe663ac552321988c22e00 | 8b0d730a24b664f19eff71b2871fdeb3a70391f3 | /predictor/Prep_Analysis.R | e8ac5da95572316dc02e4ea59b09b65cb5b52298 | [] | no_license | REMAR-Project/Predictor-Tool | 07076e00094dfb93b8311d0b31784b8815880b29 | c4d1ebdcbf2fdf22721703c3475cdc631f3275f6 | refs/heads/main | 2023-02-06T16:44:32.274792 | 2020-12-18T14:32:15 | 2020-12-18T14:32:15 | 322,615,883 | 1 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 29,111 | r | Prep_Analysis.R | prep_analysis = function(fileNumber=0, tides=NULL, svg=FALSE, n=1){
if (n==2){
cat(yellow("\nInfo: Prep Analysis has not run (n=2)\n"))
return(1)
}
library(ggplot2)
cat(yellow("\nStarting Prep_Analysis.R\n"))
state = tides$station$ID.state
# Convert UTC time to local time
tides$tide$tide.time.utc = tides$tide$tide.time.utc + tides$station$clock.offset*60*60
# Used to separate high and low tides
tide.range = diff(tides$tide$tide.level)
# Separate high and low tides (tide level)
tide.level.high = tides$tide$tide.level[which(tide.range < 0)]
tide.level.low = tides$tide$tide.level[which(tide.range > 0)]
# Separate high and low tides (time points)
tide.local.time.high = tides$tide$tide.time.utc[which(tide.range < 0)]
tide.local.time.low = tides$tide$tide.time.utc[which(tide.range > 0)]
# Do interpolation of tides
func.interpolate.high = approxfun(x = tide.local.time.high, y = tide.level.high, method="linear", ties = mean)
func.interpolate.low = approxfun(x = tide.local.time.low, y = tide.level.low, method="linear", ties = mean)
# Better to plot
tide.range.interpolated = func.interpolate.high(tides$tide$tide.time.utc) - func.interpolate.low(tides$tide$tide.time.utc)
# Plot tidal amplitude over time (interpolated)
# plot(tides$tide$tide.time.utc, tide.range.interpolated, type="l", col="grey30", ylab="Tidal amplitude", main=paste(tides$station$ID.numb, "-", tides$station$ID.name))
# Get start and end year from data
year.start = as.numeric(format(tides$tide$tide.time.utc[1],"%Y"))
if (as.numeric(format(tides$tide$tide.time.utc[1],"%m")) != 1){year.start = year.start + 1}
year.end = as.numeric(format(tides$tide$tide.time.utc[length(tides$tide$tide.time.utc)],"%Y"))
if (year.end > 2024){year.end = 2024}
# Only keep moons for months present in the data
new.moons <- moon.new.dates[which(format(as.Date.POSIXct(moon.new.dates),"%Y") >= year.start & format(as.Date.POSIXct(moon.new.dates),"%Y") <= year.end)]
full.moons <- moon.full.dates[which(format(as.Date.POSIXct(moon.full.dates),"%Y") >= year.start & format(as.Date.POSIXct(moon.full.dates),"%Y") <= year.end)]
# Find out which date is the earliest
# e.g if new moon = 14/01/06 and full moon = 29/01/06 then temp1 is new moon
# this ensures dates are sorted ascendingly, also helps getting the current phase into the data frame
if (full.moons[1] < new.moons[1]){
temp1 <- full.moons
temp2 <- new.moons
phase <- c("FM", "NM")
} else {
temp1 <- new.moons
temp2 <- full.moons
phase <- c("NM", "FM")
}
moons <- vector()
phases <- vector()
for (i in 1:length(temp1)){
moons <- c(moons, temp1[i])
phases <- c(phases, phase[1])
moons <- c(moons, temp2[i])
phases <- c(phases, phase[2])
}
if (NA %in% moons){
if (match(NA, moons) == length(moons))
{
moons <- moons[!is.na(moons)]
phases <- head(phases, -1)
}
}
# data frame that will be exported
prep.analysis <- data.frame(as.Date.POSIXct(moons), phases)
# Get all mtas, std dev, std error and max tidal amplitude for temp1
mta1 <- numeric()
stdev1 <- numeric()
sterror1 <- numeric()
maxta1 <- numeric()
if (state == "PA" | state == "PA1" | state == "PA2"){
minus = 1
} else {
minus = 0
}
plus = 0
for (i in 1:length(temp1))
{
index.high <- which(as.Date(tide.local.time.high) >= (as.Date.POSIXct(temp1[i])-minus) & as.Date(tide.local.time.high) <= (as.Date.POSIXct(temp1[i])+plus))
index.low <- which(as.Date(tide.local.time.low) >= (as.Date.POSIXct(temp1[i])-minus) & as.Date(tide.local.time.low) <= (as.Date.POSIXct(temp1[i])+plus))
amplitudes <- numeric()
if (length(index.high) != 0 & length(index.low) != 0){
for (j in 1:min(c(length(index.high), length(index.low))))
{
amplitudes <- c(amplitudes, tide.level.high[index.high[j]] - tide.level.low[index.low[j]])
}
mta1 <- c(mta1, mean(amplitudes))
stdev1 <- c(stdev1, sd(amplitudes))
sterror1 <- c(sterror1, sd(amplitudes)/sqrt(length(amplitudes)))
maxta1 <- c(maxta1, max(amplitudes))
}
else{
mta1 <- c(mta1, NA)
stdev1 <- c(stdev1, NA)
sterror1 <- c(sterror1, NA)
maxta1 <- c(maxta1, NA)
}
}
# Get all mtas, std dev, std error and max tidal amplitude for temp2
mta2 <- numeric()
stdev2 <- numeric()
sterror2 <- numeric()
maxta2 <- numeric()
for (i in 1:length(temp2))
{
index.high <- which(as.Date(tide.local.time.high) >= (as.Date.POSIXct(temp2[i])-minus) & as.Date(tide.local.time.high) <= (as.Date.POSIXct(temp2[i])+plus))
index.low <- which(as.Date(tide.local.time.low) >= (as.Date.POSIXct(temp2[i])-minus) & as.Date(tide.local.time.low) <= (as.Date.POSIXct(temp2[i])+plus))
amplitudes <- numeric()
if (length(index.high) != 0 & length(index.low) != 0){
for (j in 1:min(c(length(index.high), length(index.low))))
{
amplitudes <- c(amplitudes, tide.level.high[index.high[j]] - tide.level.low[index.low[j]])
}
mta2 <- c(mta2, mean(amplitudes))
stdev2 <- c(stdev2, sd(amplitudes))
sterror2 <- c(sterror2, sd(amplitudes)/sqrt(length(amplitudes)))
maxta2 <- c(maxta2, max(amplitudes))
} else {
mta2 <- c(mta2, NA)
stdev2 <- c(stdev2, NA)
sterror2 <- c(sterror2, NA)
maxta2 <- c(maxta2, NA)
}
}
# Make one big array of both temp1 and temp2 mtas, std dev, std error and max ta
mta <- numeric()
stdev <- numeric()
sterror <- numeric()
maxta <- numeric()
for (i in 1:length(temp1)){
mta <- c(mta, mta1[i], mta2[i])
stdev <- c(stdev, stdev1[i], stdev2[i])
sterror <- c(sterror, sterror1[i], sterror2[i])
maxta <- c(maxta, maxta1[i], maxta2[i])
}
# if (NA %in% mta & NA %in% stdev & NA %in% sterror & NA %in% maxta){
# if (match(NA, mta) == length(mta))
# {
# mta <- mta[!is.na(mta)]
# stdev <- stdev[!is.na(stdev)]
# sterror <- sterror[!is.na(sterror)]
# maxta <- maxta[!is.na(maxta)]
# }
# }
if (is.na(mta[length(mta)])){
mta <- mta[1:(length(mta)-1)]
stdev <- stdev[1:(length(stdev)-1)]
sterror <- sterror[1:(length(sterror)-1)]
maxta <- maxta[1:(length(maxta)-1)]
}
prep.analysis <- cbind(prep.analysis, round(mta,2), round(stdev, 2), round(sterror, 2), round(maxta,2))
names(prep.analysis) <- c("Date", "Phase", "MTA", "Std Dev", "Std Error", "Max TA")
# plot
if(isTRUE(svg)) {
setwd(paste0(dirname(rstudioapi::getSourceEditorContext()$path)))
dir.create("export", showWarnings = FALSE)
setwd(paste0(dirname(rstudioapi::getSourceEditorContext()$path), "/export"))
name = paste0(fileNumber, ". ", substr(tides$station$file, 1, nchar(tides$station$file)-4), " (Prep Analysis).png")
png(filename=name, width = 1150, height = 780)
}
par(mar=c(3,3,5,2)) #reset margins
if (prep.analysis$Phase[1] == "FM"){
temp1.type = "FM"
temp1 = smooth.spline(spline(as.Date.POSIXct(full.moons), mta1, ties=mean))
temp2 = smooth.spline(spline(as.Date.POSIXct(new.moons), mta2, ties=mean))
plot(temp1, type="l", lwd=2, col="darkorange2", ylab="Mean Tidal Amplitude (m)", ylim=c(0,6.5), xaxt='n', yaxt='n')
lines(temp2, lwd=2, col="black")
} else {
temp1.type = "NM"
temp1 = smooth.spline(spline(as.Date.POSIXct(new.moons), mta1, ties=mean))
temp2 = smooth.spline(spline(as.Date.POSIXct(full.moons), mta2, ties=mean))
plot(temp1, type="l", lwd=2, col="black", ylab="Mean Tidal Amplitude (m)", ylim=c(0,6.5), xaxt='n', yaxt='n')
lines(temp2, lwd=2, col="darkorange2")
}
# make temp1 and temp2 have the same length
if (length(temp1$x) > length(temp2$x)){
temp1$x = temp1$x[seq(1, length(temp2$x))]
temp1$y = temp1$y[seq(1, length(temp2$y))]
} else {
temp2$x = temp2$x[seq(1, length(temp1$x))]
temp2$y = temp2$y[seq(1, length(temp1$y))]
}
points(as.Date.POSIXct(full.moons), rep(6.5, length(full.moons)), col="darkorange2", pch=19, cex=1.2)
points(as.Date.POSIXct(new.moons), rep(6.5, length(new.moons)), col="black", pch=19, cex=1.2)
# Andada block
state = tides$station$ID.state
if (state == "RJ" | state == "SP" | state == "PR" | state == "SC" | state == "RS"){
for (i in (year.start - 1):(year.end + 1)){
rect(as.Date.POSIXct(ISOdate((i-1),11,1)), 0, as.Date.POSIXct(ISOdate(i,1,31)), 6, col=rgb(1,1,1,0), lty=2, lwd=2)
}
}else {
for (i in (year.start - 1):(year.end + 1)){
rect(as.Date.POSIXct(ISOdate(i,1,1)), 0, as.Date.POSIXct(ISOdate(i,3,31)), 6, col=rgb(1,1,1,0), lty=2, lwd=2)
}
}
months <- vector()
months.labels <- vector()
years <- numeric()
for(i in (year.start):(year.end+1)){
for (j in 1:12){
months <- c(months, ISOdate(i, j, 1))
}
months.labels <- c(months.labels, c('Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'))
years <- c(years, ISOdate(i,1,1))
}
for(i in seq(1, length(months), by=2)){
rect(as.Date.POSIXct(months[i]), -1, as.Date.POSIXct(months[i+1]), 7, col=rgb(0.01,0.01,0.01,0.1), lty=0)
}
abline(v = as.Date.POSIXct(months), col = "grey75", lty = 3)
axis(3, at=as.Date.POSIXct(months)+15, labels=months.labels, cex.axis=0.7, col="white")
axis(3, at=as.Date.POSIXct(months), labels=rep(c(""), times=length(months)) , cex.axis=0.7)
axis(2, seq(0,6), seq(0,6), las=1)
axis(1, as.Date.POSIXct(years), format(as.Date.POSIXct(years),"%Y"))
title(paste("Estação: ", tides$station$ID.numb, "\nNome: ", toTitleCase(tolower(tides$station$ID.name)), "\n"), adj=0, cex.main=0.8, line=2)
title(paste("Estado:", state, "\nLat/Long:", paste0(round(tides$station$latitude, 2), ", ", round(tides$station$longitude, 2)), "\n"), adj=1, cex.main=0.8, line=2)
# Intersection points #
poi.x <- numeric()
poi.y <- numeric()
for (i in seq(1, length(temp1$x), by=8)){
result = tryCatch({
t1 <- spline(temp1$x[seq(i,i+30)], temp1$y[seq(i,i+30)])
t2 <- spline(temp2$x[seq(i,i+30)], temp2$y[seq(i,i+30)])
poi.x <- c(poi.x, curve_intersect(t1, t2)$x)
poi.y <- c(poi.y, curve_intersect(t1, t2)$y)
}, error = function(e) {})
}
#points(poi.x, poi.y, pch=19, col="red")
poi.dates <<- as.Date.POSIXct(as.POSIXct.Date(poi.x))
poi.x <- poi.x[!duplicated(poi.dates)]
poi.y <- poi.y[!duplicated(poi.dates)]
poi.dates <<- poi.dates[!duplicated(poi.dates)]
points(poi.x, poi.y, pch=19, col="red")
if(isTRUE(svg)) {
dev.off()
#fileName = paste0(fileNumber, ". ", substr(tides$station$file, 1, nchar(tides$station$file)-4), " (Prep Analysis).csv")
#write.table(prep.analysis,paste0(dirname(rstudioapi::getSourceEditorContext()$path), "/export/", fileName), sep=",", row.names = FALSE, col.names=TRUE)
}
## Advice with poi ##
advice <- rep("", length(prep.analysis$Date))
rule <- rep("", length(prep.analysis$Date))
# Go through each point of intersection (poi)
for (i in 1:length(poi.dates)){
# Extract year of current poi
poi.year = as.numeric(format(poi.dates[i], "%Y"))
# Go through each moon in prep.analysis file
for (j in 1:(length(prep.analysis$Date)-1)){
# Extract year and month from each date in prep.analysis file
moon.year = as.numeric(format(prep.analysis$Date[j], "%Y"))
moon.month = as.numeric(format(prep.analysis$Date[j], "%m"))
# RULE 2: Transition in last week of November or first three weeks December
if (transitionPoints(poi.dates[i]) == "Rule 2"){
# if rule 2, advice has to be applied to the following Jan, Feb, Mar and Apr
year.apply = poi.year + 1
# Ensure the advice is only applied to Ja, Feb, March and Apr
if (moon.year == year.apply & (moon.month == 1 | moon.month == 2 | moon.month == 3 | moon.month == 4)){
# Calculate advice based on mta
if (prep.analysis$MTA[j] > prep.analysis$MTA[j+1]){
advice[j] = prep.analysis$Phase[j]
} else {
advice[j] = prep.analysis$Phase[j+1]
}
rule[j] = "Rule 2"
}
} # end of rule 2
# Rule 3A: transition in last week of December or fisrt 3 weeks of January (1/1/1)
if (transitionPoints(poi.dates[i]) == "Rule 3A"){
year.apply = poi.year
# Ensure the advice is only applied to Ja, Feb, March and Apr
if (moon.year == year.apply & (moon.month == 1 | moon.month == 2 | moon.month == 3 | moon.month == 4)){
# Calculate advice based on mta
if (prep.analysis$MTA[j] > prep.analysis$MTA[j+1]){
advice[j] = prep.analysis$Phase[j]
} else {
advice[j] = prep.analysis$Phase[j+1]
}
rule[j] = "Rule 3A"
}
} # end of rule 3A
# Rule 3B: transition in last week January and first 3 weeks of February (2/1/1)
if (transitionPoints(poi.dates[i]) == "Rule 3B"){
year.apply = poi.year
# Apply the advice "both moons" to Jan
if (moon.year == year.apply & moon.month == 1){
advice[j] = "Both"
rule[j] = "Rule 3B"
}
# Apply mta advice to Feb, Mar and Apr
else if (moon.year == year.apply & (moon.month == 2 | moon.month == 3 | moon.month == 4)){
# Calculate advice based on mta
if (prep.analysis$MTA[j] > prep.analysis$MTA[j+1]){
advice[j] = prep.analysis$Phase[j]
} else {
advice[j] = prep.analysis$Phase[j+1]
}
rule[j] = "Rule 3B"
}
} # end of rule 3B
# Rule 3C: Transition in last week of February or first 3 weeks of March (1/2/2)
if (transitionPoints(poi.dates[i]) == "Rule 3C"){
year.apply = poi.year
# Apply the advice "both moons" to Feb and Mar
if (moon.year == year.apply & (moon.month == 2 | moon.month == 3)){
advice[j] = "Both"
rule[j] = "Rule 3C"
}
# Apply mta advice to Jan and Apr
else if (moon.year == year.apply & (moon.month == 1 | moon.month == 4)){
# Calculate advice based on mta
if (prep.analysis$MTA[j] > prep.analysis$MTA[j+1]){
advice[j] = prep.analysis$Phase[j]
} else {
advice[j] = prep.analysis$Phase[j+1]
}
rule[j] = "Rule 3C"
}
} # end of rule 3C
# Rule 4: Transition in last week March or anytime in April (1/1/2)
if (transitionPoints(poi.dates[i]) == "Rule 4"){
year.apply = poi.year
# Apply the advice "both moons" to Mar
if (moon.year == year.apply & moon.month == 3){
advice[j] = "Both"
rule[j] = "Rule 4"
}
# Apply mta advice to Jan, Feb and Apr
else if (moon.year == year.apply & (moon.month == 1 | moon.month == 2 | moon.month == 4)){
# Calculate advice based on mta
if (prep.analysis$MTA[j] > prep.analysis$MTA[j+1]){
advice[j] = prep.analysis$Phase[j]
} else {
advice[j] = prep.analysis$Phase[j+1]
}
rule[j] = "Rule 4"
}
} # end of rule 4
# Apply rule 1 everywhere else
else {
year.apply = poi.year
# Ensure the advice is only applied to Ja, Feb, March and Apr
if (moon.year == year.apply){
# Check that there is no advice
if (advice[j] == ""){
# Calculate advice based on mta
if (prep.analysis$MTA[j] > prep.analysis$MTA[j+1]){
advice[j] = prep.analysis$Phase[j]
} else {
advice[j] = prep.analysis$Phase[j+1]
}
rule[j] = "Rule 1"
}
}
} # End rule 1
} # End for loop prep.analysis
} # End for loop poi
prep.analysis <- cbind(prep.analysis, advice)
names(prep.analysis) <- c("Date", "Phase", "MTA", "Std Dev", "Std Error", "Max TA", "Advice Next Phase (Algo)")
## Get andada advice
andada <- rep("", length(prep.analysis$Date))
for (i in 1:length(prep.analysis$Date)){
if (prep.analysis$`Advice Next Phase (Algo)`[i] == "Both"){
andada[i] = "Yes"
} else if (prep.analysis$`Advice Next Phase (Algo)`[i] == prep.analysis$Phase[i]){
andada[i] = "Yes"
} else {
andada[i] = "No"
}
}
prep.analysis <- cbind(prep.analysis, andada)
names(prep.analysis) <- c("Date", "Phase", "MTA", "Std Dev", "Std Error", "Max TA", "Advice Next Phase (Algo)", "Andada Next Phase (Algo)")
##################################################
### Checking next month instead of next phase ####
##################################################
if(state=="AP" | state=="MA" | state=="PI" | state=="PA" | state=="PA1" | state=="PA2"){
prep.analysis <- cbind(prep.analysis, south.pred(prep.analysis, poi.dates))
} else {
prep.analysis <- cbind(prep.analysis, south.pred(prep.analysis, poi.dates))
}
names(prep.analysis) <- c("Date", "Phase", "MTA", "Std Dev", "Std Error", "Max TA", "Advice Next Phase (Algo)","Andada Next Phase (Algo)", "Advice Next Month (Algo)")
## Get andada advice
andada <- rep("", length(prep.analysis$Date))
for (i in 1:length(prep.analysis$Date)){
if (prep.analysis$`Advice Next Month (Algo)`[i] == "Both"){
andada[i] = "Yes"
} else if (prep.analysis$`Advice Next Month (Algo)`[i] == prep.analysis$Phase[i]){
andada[i] = "Yes"
} else {
andada[i] = "No"
}
}
prep.analysis <- cbind(prep.analysis, andada, rule)
names(prep.analysis) <- c("Date", "Phase", "MTA", "Std Dev", "Std Error", "Max TA", "Advice Next Phase (Algo)","Andada Next Phase (Algo)", "Advice Next Month (Algo)", "Andada Next Month (Algo)", "Rule")
### GGPLOT TEST
x1 <- numeric()
x2 <- numeric()
y1 <- numeric()
y2 <- numeric()
color <- vector()
border <- vector()
t1 <- data.frame(x=temp1$x, y=temp1$y)
yyy <- vector()
mmm <- vector()
ddd <- vector()
former.year = ""
former.month = ""
for(i in 1:length(t1$x)){
yyy <- c(yyy, format(as.Date.POSIXct(as.POSIXct.Date(t1$x[i])), "%Y"))
mmm <- c(mmm, format(as.Date.POSIXct(as.POSIXct.Date(t1$x[i])), "%b"))
ddd <- c(ddd, as.POSIXlt.Date(t1$x[i])$yday)
if (format(as.Date.POSIXct(as.POSIXct.Date(t1$x[i])), "%Y") == former.year & format(as.Date.POSIXct(as.POSIXct.Date(t1$x[i])), "%m") == "01"){
x1 = c(x1, 0)
x2 = c(x2, 90)
y1 = c(y1, 0)
y2 = c(y2, 6)
color <- c(color, NA)
border <- c(border, "#000000")
} else if (format(as.Date.POSIXct(as.POSIXct.Date(t1$x[i])), "%Y") == former.year & format(as.Date.POSIXct(as.POSIXct.Date(t1$x[i])), "%m") == former.month){
x1 = c(x1, 0)
x2 = c(x2, 0)
y1 = c(y1, 0)
y2 = c(y2, 0)
color <- c(color, NA)
border <- c(border, NA)
} else {
x1 = c(x1, (as.numeric(format(as.Date.POSIXct(as.POSIXct.Date(t1$x[i])), "%m"))-1)*30)
x2 = c(x2, (as.numeric(format(as.Date.POSIXct(as.POSIXct.Date(t1$x[i])), "%m")) * 30))
y1 = c(y1, -Inf)
y2 = c(y2, Inf)
if((as.numeric(format(as.Date.POSIXct(as.POSIXct.Date(t1$x[i])), "%m")) %% 2) == 0){
color <- c(color, "#FFFFFF")
} else {
color <- c(color, "#CECECE")
}
border <- c(border, NA)
}
former.year = format(as.Date.POSIXct(as.POSIXct.Date(t1$x[i])), "%Y")
former.month = format(as.Date.POSIXct(as.POSIXct.Date(t1$x[i])), "%m")
}
t1 <- cbind(t1, year=yyy, month=mmm, day=ddd, x1=x1, x2=x2, y1=y1, y2=y2, color=color, border=border)
t2 <- data.frame(x=temp2$x, y=temp2$y)
yyy <- vector()
for(i in 1:length(t2$x)){yyy <- c(yyy, format(as.Date.POSIXct(as.POSIXct.Date(t2$x[i])), "%Y"))}
t2 <- cbind(t2, year=yyy)
mmm <- vector()
for(i in 1:length(t2$x)){mmm <- c(mmm, format(as.Date.POSIXct(as.POSIXct.Date(t2$x[i])), "%b"))}
t2 <- cbind(t2, month=mmm)
ddd <- vector()
for(i in 1:length(t2$x)){ddd <- c(ddd, as.POSIXlt.Date(t2$x[i])$yday)}
t2 <- cbind(t2, day=ddd)
poix<- NA[seq(1, length(t1$x))]
poiy <- NA[seq(1, length(t1$x))]
for(i in 1:length(poi.x)){
py = format(as.Date.POSIXct(as.POSIXct.Date(poi.x[i])), "%Y")
pm = format(as.Date.POSIXct(as.POSIXct.Date(poi.x[i])), "%m")
for (j in 1:length(t1$x)){
dy = format(as.Date.POSIXct(as.POSIXct.Date(t1$x[j])), "%Y")
dm = format(as.Date.POSIXct(as.POSIXct.Date(t1$x[j])), "%m")
if (py == dy & pm == dm){
poix[j] = as.POSIXlt.Date(poi.x[i])$yday
poiy[j] = poi.y[i]
}
}
}
t1 <- cbind(t1, poix=poix, poiy=poiy)
if (temp1.type == "FM"){
line1.color = "darkorange1"
line2.color = "black"
} else {
line1.color = "black"
line2.color = "darkorange1"
}
title = paste("Estação: ", tides$station$ID.numb, " ", "-", " ", "Nome: ", toTitleCase(tolower(tides$station$ID.name)), " ", "-", " ", "Estado:", state, " ", "-", " ", "Lat/Long:", paste0(round(tides$station$latitude, 2), round(tides$station$longitude, 2)))
if(isTRUE(svg)) {
setwd(paste0(dirname(rstudioapi::getSourceEditorContext()$path)))
dir.create("export", showWarnings = FALSE)
setwd(paste0(dirname(rstudioapi::getSourceEditorContext()$path), "/export"))
name = paste0(fileNumber, ". ", substr(tides$station$file, 1, nchar(tides$station$file)-4), " (multiples).png")
png(filename=name, width = 1150, height = 780)
}
plot <- ggplot(t1, aes(day, y), col=c("Full", "New")) +
geom_rect(data=t1, mapping=aes(xmin=x1, xmax=x2, ymin=y1, ymax=y2), fill=color, color=border, alpha=0.3, linetype = "dashed") +
geom_line(aes(day,y), size=1.2, color=line1.color) +
geom_line(aes(t2$day, t2$y), size=1.2, color=line2.color) +
geom_point(aes(poix, poiy), size=2.5, color="red") +
facet_wrap(~year) +
theme_bw() +
xlab("") + ylab("MTA") +
theme(axis.title.x=element_blank(), axis.text.x=element_blank(), axis.ticks.x=element_blank()) +
scale_x_continuous(limits=c(0,364), minor_breaks = seq(0 , 364, 30), breaks = seq(0, 364, 30)) +
scale_y_continuous(limits = c(0, 6), breaks = seq(0, 6, by = 1)) +
ggtitle(title) +
theme(plot.title = element_text(size=10, face="bold", hjust=0.5), axis.title=element_text(size=9))
options(warn=-1)
print(plot)
temp.date <- vector()
temp.phase <- vector()
temp.mta <- vector()
temp.std <- vector()
temp.ste <- vector()
temp.maxta <- vector()
temp.advice1 <- vector()
temp.advice2 <- vector()
temp.andada1 <- vector()
temp.andada2 <- vector()
temp.rule <- vector()
## Remove months outside november-april range
for (i in 1:length(prep.analysis$Phase)){
tempM = format(prep.analysis$Date[i], "%m")
if (tempM == "11" | tempM == "12" |tempM == "01" |tempM == "02" |tempM == "03" |tempM == "04"){
temp.date <- c(temp.date, prep.analysis$Date[i])
temp.phase <- c(temp.phase, prep.analysis$Phase[i])
temp.mta <- c(temp.mta, prep.analysis$MTA[i])
temp.std <- c(temp.std, prep.analysis$`Std Dev`[i])
temp.ste <- c(temp.ste, prep.analysis$`Std Error`[i])
temp.maxta <- c(temp.maxta, prep.analysis$`Max TA`[i])
temp.advice1 <- c(temp.advice1, prep.analysis$`Advice Next Phase (Algo)`[i])
temp.advice2 <- c(temp.advice2, prep.analysis$`Advice Next Month (Algo)`[i])
temp.andada1 <- c(temp.andada1, prep.analysis$`Andada Next Phase (Algo)`[i])
temp.andada2 <- c(temp.andada2, prep.analysis$`Andada Next Month (Algo)`[i])
temp.rule <- c(temp.rule, prep.analysis$Rule[i])
}
}
prep.analysis.short <- data.frame(as.Date(as.POSIXct.Date(temp.date)), temp.phase, temp.mta, temp.std, temp.ste, temp.maxta, temp.advice1, temp.andada1, temp.advice2, temp.andada2, temp.rule)
names(prep.analysis.short) <- c("Date", "Phase", "MTA", "Std Dev", "Std Error", "Max TA", "Advice Next Phase (Algo)","Andada Next Phase (Algo)", "Advice Next Month (Algo)", "Andada Next Month (Algo)", "Rule")
stations = c('20520', '10525', '30540', '30825', '40240', '40263', '60135', '60245', '10566')
if (tides$station$ID.numb %in% stations){
prep.analysis.short <- accuracyCheck(prep.analysis.short$`Andada Next Phase (Algo)`, prep.analysis.short, state, 1, "Andadas.csv")
prep.analysis.short <- accuracyCheck(prep.analysis.short$`Andada Next Phase (Algo)`, prep.analysis.short, state, 1, "Andadas3.csv")
prep.analysis.short <- accuracyCheck(prep.analysis.short$`Andada Next Month (Algo)`, prep.analysis.short, state, 1, "Andadas.csv")
prep.analysis.short <- accuracyCheck(prep.analysis.short$`Andada Next Month (Algo)`, prep.analysis.short, state, 1, "Andadas3.csv")
prep.analysis.short <- accuracyCheck(prep.analysis.short$`Andada Next Month (Algo)`, prep.analysis.short, state, 2, "Andadas.csv")
prep.analysis.short <- accuracyCheck(prep.analysis.short$`Andada Next Month (Algo)`, prep.analysis.short, state, 2, "Andadas3.csv")
accuracyREMAR(state, 1)
accuracyREMAR(state, 2)
names(prep.analysis.short) <- c("Date", "Phase", "MTA", "Std Dev", "Std Error", "Max TA", "Advice 1","Andada 1", "Advice 2", "Andada 2", "Rule", "Accuracy 1 (obs)", "Accuracy 1 (obs2)", "Accuracy 2 (obs)", "Accuracy 2 (obs2)","Observation", "Prediction")
}
prep.analysis.short <<- prep.analysis.short
prep.analysis <<- prep.analysis
if(isTRUE(svg)) {
dev.off()
#fileName = paste0(fileNumber, ". ", substr(tides$station$file, 1, nchar(tides$station$file)-4), " (Prep Analysis - Short).csv")
fileName = paste0("maxime-", state, ".csv")
write.table(prep.analysis.short,paste0(dirname(rstudioapi::getSourceEditorContext()$path), "/export/", fileName), sep=",", row.names = FALSE, col.names=TRUE)
}
options(warn=0)
}
transitionPoints = function(poi){
tempD = format(poi, "%d")
tempM = format(poi, "%m")
tempY = format(poi, "%Y")
# RULE 1: No transition between last week of November to April
if(tempM != "12" & tempM != "01" & tempM != "02" & tempM != "03" & tempM != "04" & !(tempM=="11" & as.numeric(tempD) >= 24)){
return("Rule 1")
}
# RULE 2: Transition in last week of November or first three weeks December
else if ((tempM == "12" & (as.numeric(tempD) < 24)) | (tempM == "11" & (as.numeric(tempD) >= 24))){
return("Rule 2")
}
# Rule 3A: transition in last week of December or fisrt 3 weeks of January (1/1/1)
else if ((tempM == "01" & as.numeric(tempD) < 24) | (tempM == "12" & as.numeric(tempD) >= 24)) {
return("Rule 3A")
}
# Rule 3B: transition in last week January and first 3 weeks of February (2/1/1)
else if ((tempM == "01" & as.numeric(tempD) >= 24) | (tempM == "02" & as.numeric(tempD) < 22)) {
return("Rule 3B")
}
# Rule 3C: Transition in last week of February or first 3 weeks of March (1/2/2)
else if ((tempM == "02" & as.numeric(tempD) >= 22) | (tempM == "03" & as.numeric(tempD) < 25)) {
return("Rule 3C")
}
# Rule 4: Transition in last week March or anytime in April (1/1/2)
else if ((tempM == "03" & as.numeric(tempD) >= 24) | (tempM == "04")) {
return("Rule 4")
}
}
transitionPointsSouth = function(poi){
tempD = format(poi, "%d")
tempM = format(poi, "%m")
tempY = format(poi, "%Y")
# RULE 1: No transition between last week of November to April
if(tempM != "10" & tempM != "11" & tempM != "12" & tempM != "01" & tempM != "02" & !(tempM=="9" & as.numeric(tempD) >= 24)){
return("Rule 1")
}
# RULE 2: Transition in last week of November or first three weeks December
else if ((tempM == "10" & (as.numeric(tempD) < 24)) | (tempM == "9" & (as.numeric(tempD) >= 24))){
return("Rule 2")
}
# Rule 3A: transition in last week of December or fisrt 3 weeks of January (1/1/1)
else if ((tempM == "11" & as.numeric(tempD) < 24) | (tempM == "10" & as.numeric(tempD) >= 24)) {
return("Rule 3A")
}
# Rule 3B: transition in last week January and first 3 weeks of February (2/1/1)
else if ((tempM == "11" & as.numeric(tempD) >= 24) | (tempM == "12" & as.numeric(tempD) < 22)) {
return("Rule 3B")
}
# Rule 3C: Transition in last week of February or first 3 weeks of March (1/2/2)
else if ((tempM == "12" & as.numeric(tempD) >= 22) | (tempM == "01" & as.numeric(tempD) < 25)) {
return("Rule 3C")
}
# Rule 4: Transition in last week March or anytime in April (1/1/2)
else if ((tempM == "01" & as.numeric(tempD) >= 24) | (tempM == "02")) {
return("Rule 4")
}
}
|
ab2a45e3592e3ea1633a7ca6fdf3d476be0f0073 | e31dfbbd67f70384ee6f44c760c9b1b2c7c184ff | /hw/hw5/dieroller/test/testthat/test-check-die.R | a7a24119cec8d56a74c5e305e4a216503ca46274 | [] | no_license | rlaehddo/stat133-spring2018 | 7ce49bfe4b91d953aef9ded03bd985940dca1bb6 | 31a845b3eafb7d8495e345a9875a11c9f96e1e10 | refs/heads/master | 2023-08-27T07:37:39.940101 | 2021-10-23T10:44:20 | 2021-10-23T10:44:20 | 410,713,592 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,031 | r | test-check-die.R | context("Check die arguments")
test_that("check_sides with ok vectors", {
expect_true(check_sides(c(1, 2, 3, 4, 5, 6)))
expect_true(check_sides(letters[1:6]))
})
test_that("check_sides fails with invalid lengths", {
expect_error(check_sides(c('one', 'two', 'three')))
expect_error(check_sides(c('one')))
expect_error(check_sides(1:5))
expect_error(check_sides(1))
})
test_that("check_prob works with ok vectors", {
expect_true(check_prob(rep(1/6, 6)))
expect_true(check_prob(c(0.1, 0.1, 0.2, 0.2, 0.35, 0.05)))
expect_true(check_prob(c(1, 0, 0, 0, 0, 0)))
expect_true(check_prob(c(0.1, 0, 0, 0, 0, 0.9)))
})
test_that("check_prob fails with invalid lengths", {
expect_error(check_prob(1:5))
expect_error(check_prob(1))
})
test_that("check_prob fails with invalid numbers", {
expect_error(check_prob(rep(1/5, 6)))
expect_error(check_prob(c(0.1, 0.1, 0.2, 0.2, 0.35, 0.1)))
expect_error(check_prob(c(rep(0.5, 6))))
expect_error(check_prob(c(0.1, 0.1, 0.2, 0.2, 0.35, NA)))
})
|
e2af8d39730caceff1f8c513a926cca7032d4aa8 | 4541733f4089ba0ca8727a999062cdab178b70ed | /man/GE_annot.Rd | 6cd64dc2e6a9f3e1bda88b4be4ac80706f91b218 | [] | no_license | llrs/gliomaData | 5f2257077b054f78a251605dbae9e12dfeb8f398 | d86901558a42c2fc5fa76aef62f438d509e1675e | refs/heads/master | 2021-09-26T05:24:28.209641 | 2021-09-24T08:17:40 | 2021-09-24T08:17:40 | 139,417,841 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 443 | rd | GE_annot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GE_annot.R
\docType{data}
\name{GE_annot}
\alias{GE_annot}
\title{Gene expression annotations}
\format{
one instance, 1 row per probe
}
\source{
IGR, Villejuif, France
}
\usage{
GE_annot
}
\description{
This data set contains annotations on the probes used for
gene expression
}
\examples{
#read data
data(GE_annot)
head(GE_annot)
}
\author{
V Frouin, 2012-07-31
}
|
84cb7e2640a9474946928ce9d183d2280f3b6987 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/BIOMASS/examples/latlong2UTM.Rd.R | a7bd4494e78f6ad53b6199b060e68083d05b9c7f | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 303 | r | latlong2UTM.Rd.R | library(BIOMASS)
### Name: latlong2UTM
### Title: Translate the long lat coordinate in UTM coordinate
### Aliases: latlong2UTM
### ** Examples
long <- c(-52.68, -51.12, -53.11)
lat <- c(4.08, 3.98, 4.12)
coord <- cbind(long, lat)
## Not run:
##D UTMcoord <- latlong2UTM(coord)
## End(Not run)
|
cf6713e769a8098ca8163ffb31d9a4e390568875 | c731f04dff3ba52af528311ad3451d9f4f1fdc10 | /R_cmd_line/dada2_dereplicate_seqs.R | cfa0d721d31314fa202b84d26df2e0ec6dee8164 | [] | no_license | leffj/dada2helper | 592dcf81a801115bba2c324a526c9b69037014ea | 9dbca778a1d8aeb13a8e59ea5826e2fda6761b59 | refs/heads/master | 2020-12-25T05:47:02.295576 | 2016-08-10T06:53:09 | 2016-08-10T06:53:09 | 61,908,282 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,919 | r | dada2_dereplicate_seqs.R | #!/usr/bin/env Rscript
library(optparse)
## Quality filter sequences in fastq files
description = "Dereplicate sequences using dada2"
option_list = list(
make_option(c("-i", "--input_dir"), type = "character", default = NULL,
help = "Directory with filtered sequences in fastq format",
metavar = "input_directory"),
make_option(c("-o", "--output_dir"), type = "character", default = "./derep",
help = paste0("Directory where the dereplicated files will be ",
"saved (default = ./derep)."),
metavar = "output_directory"),
make_option(c("-v", "--verbose"), default = FALSE, action = 'store_true',
help = "Print out extra info?")
)
opt_parser = OptionParser(option_list = option_list, description = description)
opt = parse_args(opt_parser)
if (is.null(opt$input_dir)) {
print_help(opt_parser)
stop("No arguments supplied.\n", call. = FALSE)
}
get_sample_names_and_fps2 = function(indir) {
path = ifelse(substr(indir, nchar(indir), nchar(indir)) == '/',
indir, paste0(indir, '/'))
fns = list.files(path)
fastqs = fns[grepl(".fq.gz$|.fastq.gz$", fns)]
# print(fastqs)
fastqs = sort(fastqs) # Sort ensures forward/reverse reads are in same order
fnFs = fastqs[grepl("__filtR1", fastqs)] # Just the forward read files
fnRs = fastqs[grepl("__filtR2", fastqs)] # Just the reverse read files
# Get sample names from the first part of the forward read filenames
# ensure forward and reverse reads in same order
if (!identical(sapply(strsplit(fnFs, "__filtR1"), `[`, 1),
sapply(strsplit(fnRs, "__filtR2"), `[`, 1))) {
stop('Forward and reverse reads not sorted in same order. Try simpler ",
"sample names.')
}
sample.names = sapply(strsplit(fnFs, "__filtR1"), `[`, 1)
# ensure all sample names are unique
if (length(unique(sample.names)) != length(sample.names)) {
stop('Make sure all sample IDs are unique.')
}
# Fully specify the path for the fnFs and fnRs
fnFs = paste0(path, fnFs)
fnRs = paste0(path, fnRs)
# print(sample.names)
list(sample.names = sample.names, fnFs = fnFs, fnRs = fnRs)
}
names_fps = get_sample_names_and_fps2(opt$input_dir)
message("Dereplicating forward reads.")
derepFs = dada2::derepFastq(names_fps$fnFs, verbose = opt$verbose)
message("Dereplicating reverse reads.")
derepRs = dada2::derepFastq(names_fps$fnRs, verbose = opt$verbose)
# Name the derep-class objects by the sample names
names(derepFs) = names_fps$sample.names
names(derepRs) = names_fps$sample.names
# output as r data objects
outdir = ifelse(
substr(opt$output_dir, nchar(opt$output_dir), nchar(opt$output_dir)) == '/',
opt$output_dir,
paste0(opt$output_dir, '/')
)
if (!dir.exists(outdir)) dir.create(outdir)
saveRDS(derepFs, file = paste0(outdir, "derepFs.RDS"))
saveRDS(derepRs, file = paste0(outdir, "derepRs.RDS"))
|
dafc5241a8f2b481441eb2cc9982478ae8e5d134 | 069aa0fd5037224d8d960108fa4c2d8ac620fae5 | /src/basisDecomposition.r | 06f23ef13f1463ce2b8be128cf504bcce2212d8c | [
"MIT"
] | permissive | waternk/Hypoxia_Lake_Erie | d7cf7775a850ef5ad5bca6ef394c15ed08aeb5d4 | cae9a846a187e44b508b9e6e6a8fc5db2fb303e6 | refs/heads/master | 2020-06-14T23:57:14.684718 | 2019-03-22T10:46:18 | 2019-03-22T10:46:18 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,944 | r | basisDecomposition.r | # This is the script to represent data with basis functions
# Available basis: B-Spline and temporal basis function through SVD
source("./src/database.R")
source("./src/plot.R")
source("./src/helper.R")
#source("./src/interpolation.R")
require(sp)
require(gstat)
require(dplyr)
require(reshape2)
require(geoR)
coeff2Value <- function(coef, basis){
return(basis %*% t(coef)) # return a matrix
}
plot_variogram <- function(df, formu = "value~1"){
coordinates(df) = ~x+y
print(plot(variogram(as.formula(formu),data =df,cutoff = 120, cloud=TRUE)))
}
NMF_scale <- function(NMFRes, center = "basis"){
W <- NMFRes$basis
H <- NMFRes$coef
for(i in 1:ncol(W)){
if(center == "basis"){
print("conducting NMF and scale basis max to 12")
alpha <- 12 / max(W[,i])
W[,i] <- W[,i]*alpha
H[i,] <- H[i,]/alpha
}else if(center == "basis2"){
print("conducting NMF and scale basis mean to 6")
alpha <- 6 / mean(W[,i])
W[,i] <- W[,i]*alpha
H[i,] <- H[i,]/alpha
}
else if(center == "coef"){
print("conducting NMF and scale max coefficients to 1")
alpha <- 1 / max(H[i,])
# alpha <- 1 / max(H)
H[i,] <- H[i,] * alpha
W[,i] <- W[,i] / alpha
}
}
return(list(basis = W, coef = H))
}
NMF_basis <- function(DOdata, r, ...){
method <- list(...)$method
require(NMF)
DOdata <- as.matrix(DOdata)
DOdata <- ifelse(DOdata<0.01, 0.01, DOdata)
if(is.null(method) | method == "brunet"){
print("using default NMF fitting method")
nmfRes <- nmf(DOdata, r, nrun = 60)
}else{
print(paste0("using ",method, " fitting gmethod"))
nmfRes <- nmf(DOdata, r, nrun = 60, method = method, beta = 0.1)
}
W <- nmfRes@fit@W
H <- nmfRes@fit@H
return(list(basis = W, coef = H))
}
SVD_basis <- function(DOdata, r){
# r is the column vectors to keep
DOdata <- as.matrix(DOdata)
print("Doing Scaling!!")
svdRes <- svd(DOdata %>% scale()) # Yes (the original paper did so)
basis <- svdRes$u[,1:r]
t = nrow(basis)
for(i in 1:r){
splineRes <- smooth.spline(x = 1:t, y = basis[,i], df = t*0.2)
basis[,i] <- predict(splineRes, 1:t)$y
}
basis <- cbind(basis,1) # add the bias term
coef <- lsfit(x = basis, y= DOdata, intercept = FALSE)$coefficients
DO_fit <- basis %*% coef # coef, each column is the coefficents for different basis
print("variance explained")
print(sum(svdRes$d[1:r]**2)/sum(svdRes$d**2))
return(list(fit = DO_fit, coef = coef, basis = basis,varExpl = sum(svdRes$d[1:r]**2)/sum(svdRes$d**2)))
}
B_spline <- function(DOdata,knots,norder = 4){
require(fda)
T <- nrow(DOdata)
DOdata <- as.matrix(DOdata)
nBasis <- length(knots)-2+norder
bbasis <- create.bspline.basis(range(knots),nBasis,norder,knots)
basismat <- eval.basis(1:T, bbasis)
coef <- solve(crossprod(basismat), crossprod(basismat,DOdata))
DOfd <- fd(coef, bbasis)
DO_fit <- predict(DOfd,1:T)
return(list(fit = DO_fit, coef = coef, basis = bbasis)) # return fit value, coefficients and basis object
}
|
7b74c7b2b15191d5faccf9538227709081ba729f | 29585dff702209dd446c0ab52ceea046c58e384e | /pid/R/paretoPlot.R | 4894589f25eb47bd9070713c8f9f0490423053ff | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,352 | r | paretoPlot.R | # (c) Kevin Dunn, 2014-2015.
paretoPlot <- function(lsmodel, xlab="Effect name", ylab="Magnitude of effect",
main="Pareto plot", legendtitle="Sign of coefficient",
negative=c("Negative", "grey"),
positive=c("Positive", "black")){
# This code draws a Pareto plot; it requires the "ggplot2" library
# install.packages("ggplot2", dependencies = TRUE)
# require(ggplot2)
# Extract all the coefficients, except for the intercept
coeff.full <- coef(lsmodel)[2:length(coef(lsmodel))]
coeff.full <- na.omit(coeff.full)
# Return the absolute values of the coefficients
coeff.abs <- unname(abs(coeff.full))
# Use "shell" sort, to avoid ties being reordered.
coeff <- sort(coeff.abs, index.return=TRUE, method="shell")
grouping <- unname( (coeff.full > 0)[coeff$ix] )
grouping[grouping == FALSE] <- negative[1]
grouping[grouping == TRUE] <- positive[1]
temp <- names(coeff.full)[coeff$ix]
fnames <- factor(temp, levels=temp, ordered=TRUE)
dat <- data.frame(
label=fnames,
value=coeff$x,
group=grouping
)
# Make this work to get the scipt uploaded into CRAN
# https://stackoverflow.com/questions/9439256/how-can-i-handle-r-cmd-check-no-visible-binding-for-global-variable-notes-when
label <- value <- group <- NULL # Setting the variables to NULL first
p <- ggplot(dat, aes(x=label, y=value, fill=group)) +
geom_bar(stat="identity") +
coord_flip() + theme_bw() +
scale_fill_manual(values=c(negative[2], positive[2]),
labels=c(negative[1], positive[1]),
name = legendtitle) +
xlab(xlab) +
ylab(ylab) +
ggtitle(main)
plot(p) # Execute the plot (i.e. draw it!)
return(p) # Return the plot, so user can continue to modify it
}
if (FALSE){
flip <- c(-1,1)
design <- expand.grid(D=flip, N=flip, P=flip)
D <- design$D
N <- design$N
P <- design$P
y <- c(64, 68, 72, 70, 78, 80, 82, 80)
fit <- lm(y ~ D * N * P)
paretoPlot(fit)
T <- c(-1, +1, -1, +1) # centered and scaled temperature
S <- c(-1, -1, +1, +1) # centered and scaled speed variable
y <- c(69, 60, 24, 53) # conversion, is our response variable, y
doe.model <- lm(y ~ T + S + T * S) # create a model with main effects, and interaction
paretoPlot(doe.model)
} |
2428eaef37fcab4db30f5b3bff4cc5a2cc099f20 | 1d9a00050221646d4569d9a5196ba604aaab644d | /plot4.R | df63b9a33a80f9c0706f41a78ecf5a89918b1b75 | [] | no_license | gayathriganesan/ExData_Plotting1 | b870f07156e8c79794282585f2135c5f0d5d32ea | e63bc91573b8484ad7b8eacfb0ac7b6b1c9b6c7c | refs/heads/master | 2021-04-15T18:59:10.502569 | 2015-03-06T15:25:10 | 2015-03-06T15:25:10 | 31,579,451 | 0 | 0 | null | 2015-03-03T04:33:22 | 2015-03-03T04:33:22 | null | UTF-8 | R | false | false | 1,755 | r | plot4.R | ## Exploratory Data Analysis -Course Project 1 - Plot4
# the source text file is at data/CourseProj1/household_power_consumption.txt under the working directory
filename<-file("data/CourseProj1/household_power_consumption.txt")
library(sqldf)
# only loads the data from the text file for Feb 1, 2007 & Feb 2, 2007;
# prelimnary look at data indicates these dates will be in the text file as 1/2/2007 & 2/2/2007
df <- sqldf("Select * from filename where Date in ('1/2/2007','2/2/2007')", file.format=list(header=TRUE, sep=";"))
close(filename) # close connection
# df is the required dataframe of the 2 days with 2880 obs and 9 variables
library(lubridate)
#join the date & Time variables and conver to a date using lubridate package
df<- transform(df,completedate=dmy_hms(paste(df$Date,df$Time)))
par(mfrow=c(2,2))
par(mar=c(5,4,2,2))
plot(df$completedate,df$Global_active_power,ylab="Global Active Power",xlab="",pch=1,cex=0.05)
lines(df$completedate,df$Global_active_power,type="l")
plot(df$completedate,df$Voltage,ylab="Voltage",xlab="datetime",pch=1,cex=0.05)
lines(df$completedate,df$Voltage,type="l")
plot(df$completedate,df$Sub_metering_1,ylab="Energy sub metering",xlab="",type="n")
lines(df$completedate,df$Sub_metering_1,col="black",lwd=1)
lines(df$completedate,df$Sub_metering_2,col="orangered",lwd=1)
lines(df$completedate,df$Sub_metering_3,col="blue",lwd=1)
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=1,col=c("black","orangered","blue"),cex=.75,bty="n")
plot(df$completedate,df$Global_reactive_power,ylab="Global_reactive_power",xlab="datetime",pch=1,cex=0.05)
lines(df$completedate,df$Global_reactive_power,type="l")
dev.copy(png,filename="ToGit/ExData_Plotting1/plot4.png",width=480,height=480)
dev.off(); |
718411d57f550a1354dd7511c85511bc6550fd40 | 025efda207669f89f482340b816eff4668a32d98 | /plot4.R | 74b2d16cb770735e968d250bcc8656598459c5b8 | [] | no_license | jsqwe/ExData_Plotting1 | 4d8d190a65a920e4b56b57794f375f248f1c9d70 | 7e4e13f38328daf0cd9c0e9e2b2eed77d373c5b1 | refs/heads/master | 2021-01-17T23:50:58.875589 | 2014-06-06T03:51:34 | 2014-06-06T03:51:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,335 | r | plot4.R | ## Read data
data <- read.csv2("household_power_consumption.txt", na.strings = "?",
colClasses = c(rep("character",2), rep("numeric", 7)),
dec = ".")
## Filter data to only 1/2/2007 and 2/22007
fd <- data[data$Date == "1/2/2007" | data$Date == "2/2/2007",]
## Change Date and Time columns to their appropriate classes
date_time <- paste(fd$Date, fd$Time, sep = " ")
fd$Time <- strptime(date_time,format = "%d/%m/%Y %H:%M:%S")
fd$Date <- as.Date(fd$Date,format = "%d/%m/%Y")
names(fd)[2] <- "DateTime"
##Open PNG device, create plot4.png in working dir
png(file = "plot4.png", res = 55)
## 4 x 4 grid for plotting
par(mfrow = c(2, 2))
with(fd, {
plot(DateTime, Global_active_power, ylab = "Global Active Power", type = "l")
plot(DateTime, Voltage, type = "l")
plot(x=DateTime, y = Sub_metering_1,
type = "l", col = "black", ylab = "Energy sub metering")
lines(x=DateTime, y = Sub_metering_2, type = "l", col = "red")
lines(x=DateTime, y = Sub_metering_3, type = "l", col = "blue")
legend("topright", lty = 1 , bty = "n", col = c("black", "red", "blue"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(DateTime, Global_reactive_power, type = "l")
})
## Close PNG file device
dev.off() |
5ec59b68ae55b76c308f3f1ef959b8e2f487937f | ea51c94187b198c7ecc1f54d59e6c31c147343c2 | /merge_species.R | 368ad5c505b2747182625d7611fdc809fd64a651 | [] | no_license | ashiklom/preprocess-try | 80f653a07da966b8575c0fca9c9b8bdb62696204 | abba92233156d72b894e427adbb2e596bccd4dbe | refs/heads/master | 2021-01-22T06:23:05.873132 | 2018-01-04T21:26:01 | 2018-01-04T21:26:01 | 92,546,913 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,919 | r | merge_species.R | source('common.R')
species <- collect(species)
tpl_proc <- readRDS('pfts_species/theplantlist.rds')
#count(species)
#semi_join(species, tpl_proc) %>% count()
#anti_join(species, tpl_proc) %>% count()
message('Fixing species name encoding...')
species_fixenc <- species %>%
mutate(AccSpeciesName = stringi::stri_trans_general(AccSpeciesName, 'latin-ascii'))
message('Done!')
message('Merging species with ThePlantList data...')
species_merge <- species_fixenc %>%
left_join(tpl_proc) %>%
mutate(Family = recode(Family,
`Isoëtaceae` = 'Isoetaceae',
`Athyriaceae` = 'Aspleniaceae',
`Compositae` = 'Asteraceae',
`Leguminosae` = 'Fabaceae'
)) %>%
mutate(Family = case_when(!is.na(.$Family) ~ .$Family,
is.na(.$Family) & .$AccSpeciesName == 'Poaceae sp' ~ 'Poaceae',
is.na(.$Family) & .$AccSpeciesName == 'Fabaceae sp' ~ 'Fabaceae',
is.na(.$Family) & .$AccSpeciesName == 'Carex sp' ~ 'Cyperaceae',
is.na(.$Family) & .$AccSpeciesName == 'Populus sp' ~ 'Salicaceae',
is.na(.$Family) & .$AccSpeciesName == 'Salix sp' ~ 'Salicaceae',
is.na(.$Family) & .$AccSpeciesName == 'Protium sp' ~ 'Burseraceae',
is.na(.$Family) & .$AccSpeciesName == 'Hieracium pilosella' ~ 'Asteraceae',
is.na(.$Family) & .$AccSpeciesName == 'Hammada scoparia' ~ 'Amaranthaceae',
is.na(.$Family) & .$AccSpeciesName == 'Maxillaria uncata' ~ 'Orchidaceae',
is.na(.$Family) & .$AccSpeciesName == 'Dicranopteris dichotoma' ~ 'Gleicheniaceae',
is.na(.$Family) & .$AccSpeciesName == 'Triticum sp' ~ 'Poaceae',
is.na(.$Family) & .$AccSpeciesName == 'Amphicarpa bracteata' ~ 'Fabaceae',
is.na(.$Family) & .$AccSpeciesName == 'Coussarea racemosa' ~ 'Rubiaceae',
is.na(.$Family) & .$AccSpeciesName == 'Citrofortunella mitis' ~ 'Rutaceae',
is.na(.$Family) & .$AccSpeciesName == 'Eucalyptus sp' ~ 'Myrtaceae',
is.na(.$Family) & .$AccSpeciesName == 'Thymus polytrichus' ~ 'Lamiaceae',
is.na(.$Family) & .$AccSpeciesName == 'Achnatherum splendens' ~ 'Poaceae',
is.na(.$Family) & .$AccSpeciesName == 'Jessenia bataua' ~ 'Arecaceae',
is.na(.$Family) & .$AccSpeciesName == 'Digitalis micrantha' ~ 'Plantaginaceae',
TRUE ~ NA_character_))
message('Done!')
saveRDS(species_merge, 'pfts_species/tps_species.rds')
|
90ff6a02c9cc592a862ca71784c542f72b0cefa8 | 2426cc47e952f6da7b9411937d8fd16a710132b0 | /inst/code_examples/kapittel_10-logistisk-regresjon.R | e948e09797252fd935adc1850f7d70273343304f | [
"MIT"
] | permissive | ihrke/rnorsk | 8c510c61436e3b71d013eba4ec2870ac04d17711 | c4c8a2c0c9b98c1f046561a8cc54ad2d421ebc60 | refs/heads/master | 2021-07-11T01:47:56.640575 | 2020-07-03T12:31:46 | 2020-07-03T12:31:46 | 159,663,510 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 6,285 | r | kapittel_10-logistisk-regresjon.R |
## kapittel_10-logistisk-regresjon.R
#
# Denne filen inneholder alle kildekoder fra kapittel 10 i
# Mehmetoglu & Mittner (2020). Innføring i R for statistiske dataanalyser. Universitetsforlaget.
##
## setup
library(tidyverse)
library(rnorsk)
theme_set(theme_rnorsk())
## -- Eksempel 1
#
prob <- seq(0,1,by=0.001)
logodds <- log(prob/(1-prob))
## -- Eksempel 2
#
library(rnorsk)
data(titanic)
table(titanic$Survived)
## -- Eksempel 3
#
mod <- glm(Survived ~ Age, family=binomial(link="logit"),
data=titanic)
## -- Eksempel 4
#
summary(mod)
## -- Eksempel 5
#
confint(mod)
## -- Eksempel 6
#
koeffisienter <- coef(mod)
koef.intersept <- koeffisienter["(Intercept)"]
plogis(koef.intersept)
## -- Eksempel 7
#
koef.alder <- koeffisienter["Age"]
plogis(koef.alder)
## -- Eksempel 8
#
p.60år <- plogis(koef.intersept + 60*koef.alder)
p.70år <- plogis(koef.intersept + 70*koef.alder)
p.70år - p.60år
## -- Eksempel 9
#
predict(mod, newdata=data.frame(Age=c(60,70)), type="response")
## -- Eksempel 10
#
p.2030år <- predict(mod, newdata=data.frame(Age=c(20,30)),
type="response")
diff(p.2030år)
## -- Eksempel 11
#
library(visreg)
visreg(mod)
visreg(mod, scale="response")
## -- Eksempel 12
#
library(lmtest)
lrtest(mod)
## -- Eksempel 13
#
library(DescTools)
PseudoR2(mod, which="all")
## -- Eksempel 14
#
mod2 <- glm(Survived ~ Sex+Age,
family=binomial(link='logit'),
data=titanic)
summary(mod2)
## -- Eksempel 15
#
lrtest(mod, mod2)
## -- Eksempel 16
#
rbind(
alder = PseudoR2(mod, which = "Nagelkerke"),
alder.og.kjønn = PseudoR2(mod2, which = "Nagelkerke"))
## -- Eksempel 17
#
koeff <- coef(mod2)
p.kvinner <- plogis( koeff["(Intercept)"] )
p.menn <- plogis( koeff["(Intercept)"]+koeff["Sexmale"] )
cat(p.kvinner,p.menn)
## -- Eksempel 18
#
visreg(mod2, scale ="response")
## -- Eksempel 19
#
mod3 <- glm(Survived ~ Sex+Age+Sex:Age,
family=binomial(link='logit'),
data=titanic)
## -- Eksempel 20
#
lrtest(mod,mod2,mod3)
rbind(
alder = PseudoR2(mod, which = "Nagelkerke"),
alder.og.kjønn = PseudoR2(mod2, which = "Nagelkerke"),
alder.og.kjønn.ia = PseudoR2(mod3, which = "Nagelkerke"))
## -- Eksempel 21
#
summary(mod3)$coefficients
## -- Eksempel 22
#
visreg(mod3, xvar = "Age", by="Sex",scale="response")
## -- Eksempel 23
#
table(titanic$Pclass)
## -- Eksempel 24
#
class(titanic$Pclass)
## -- Eksempel 25
#
mod4 <- glm(Survived ~ Sex*Age + Pclass,
family=binomial(link='logit'),
data=titanic)
## -- Eksempel 26
#
lrtest(mod3,mod4)
rbind(
alder.og.kjønn.ia = PseudoR2(mod3, which = "Nagelkerke"),
alder.og.kjønn.ia.pclass = PseudoR2(mod4, which = "Nagelkerke"))
## -- Eksempel 27
#
summary(mod4)$coefficients
## -- Eksempel 28
#
d <- expand.grid(Age=seq(0,80),
Sex=c("female","male"),
Pclass=c(1,2,3))
## -- Eksempel 29
#
library(modelr)
dpred <- d %>% add_predictions(model=mod4, type="response")
## -- Eksempel 30
#
ggplot(dpred, aes(x=Age,y=pred,color=Sex))+
geom_line()+
facet_wrap( ~ Pclass)
## -- Eksempel 31
#
mod5 <- glm(Survived ~ Sex*Age * Pclass,
family=binomial(link='logit'),
data=titanic)
## -- Eksempel 32
#
lrtest(mod4, mod5)
rbind(
alder.kjønn.pclass = PseudoR2(mod4, which = "Nagelkerke"),
alder.kjønn.pclass.ia = PseudoR2(mod5, which = "Nagelkerke"))
## -- Eksempel 33
#
dpred2 <- d %>%
add_predictions(var = "mod4", model=mod4, type="response") %>%
add_predictions(var = "mod5", model=mod5, type="response")
## -- Eksempel 34
#
dpred2 %>%
gather(model, pred, mod4, mod5) %>%
ggplot(aes(x=Age,y=pred,color=model))+
geom_line()+facet_grid(Sex ~ Pclass)
## -- Eksempel 35
#
titanic.complete <- na.omit(titanic, cols=c("Survived", "Age",
"Sex", "Pclass"))
## -- Eksempel 36
#
ntotal <- nrow(titanic.complete)
ntrain <- floor(0.75*ntotal)
ntest <- ntotal-ntrain
## -- Eksempel 37
#
train.ix <- sample.int(n=ntotal, size = ntrain, replace = F)
## -- Eksempel 38
#
titanic.train <- titanic.complete[ train.ix,]
titanic.test <- titanic.complete[-train.ix,]
## -- Eksempel 39
#
mod.train <- glm(Survived ~ Sex*Age * Pclass,
family=binomial(link='logit'),
data=titanic.train)
## -- Eksempel 40
#
titanic.test.pred <- add_predictions(data = titanic.test,
model= mod.train,
var = "pred.sannsynlighet",
type = "response")
## -- Eksempel 41
#
titanic.test.pred <- titanic.test.pred %>%
mutate(pred.Survived =
case_when(pred.sannsynlighet>0.5 ~ 1,
T ~ 0))
## -- Eksempel 42
#
with(titanic.test.pred, table(Survived, pred.Survived))
## -- Eksempel 43
#
library(caret)
predikert <- factor(titanic.test.pred$pred.Survived,
labels=c("doede", "overlevde"))
overlevde <- factor(titanic.test.pred$Survived,
labels=c("doede", "overlevde"))
confusionMatrix(predikert, overlevde)
## -- Eksempel 44
#
library(plotROC)
ggplot(titanic.test.pred, aes(d=Survived, m=pred.sannsynlighet))+
geom_roc()
## -- Eksempel 45
#
p <- ggplot(titanic.test.pred, aes(d=Survived,
m=pred.sannsynlighet))+
geom_roc()
calc_auc(p)[["AUC"]]
## -- Eksempel 46
#
library(broom)
library(modelr)
library(purrr)
titanic.complete %>% crossv_kfold(k=10) %>%
mutate(mod=map(train, ~ glm(Survived ~ Sex*Age * Pclass,
family=binomial(link='logit'),
data=.))) %>%
mutate(predicted = map2(mod, test,
~ augment(.x, newdata = .y,
type.predict="response"))) %>%
mutate(andel.korrekt=map(predicted, function(df) {
df %>% mutate(pred=.fitted>0.5,
correct=(Survived==pred)) %>%
pull(correct) %>% sum(na.rm=T)/(dim(df)[1])
})) %>% unnest(andel.korrekt) %>% pull(andel.korrekt) ->
andel.korrekt
andel.korrekt
## -- Eksempel 47
#
summary(andel.korrekt)
|
36090d58a52e14e9a22e3765b54f8e1292c44a77 | f330087d8a5a0c7b67eeff0e7eebf66275fae98a | /make_ratings.R | 82de921cd0b86f9902a65544606b9efb9fd4621a | [] | no_license | suziebrown/ratings | 5eddd826851b2bbbfcc4ea7f83a3a9a0cc1e8911 | 2717dd0bfa7625a458be0232fe3526b5a7fb3958 | refs/heads/master | 2021-08-22T10:55:47.304500 | 2017-11-30T01:49:22 | 2017-11-30T01:49:25 | 111,672,231 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,027 | r | make_ratings.R | #' Simulate ratings data
#'
#' @param n.alts number of alternatives to be rated
#' @param n.raters number of people assigning ratings
#' @param strengths a vector of length n.alts containing the underlying strengths; if NULL strengths are chosen at random
#' @param sd.strengths standard deviation for randomly assigning Normally-distributed strengths
#' @param sd.noise standard deviation of additive Normal noise
#'
#' @return a list of two items; first the matrix where each column is the vector of ratings assigned by one rater, then the vector of random underlying strengths
#'
#' @export make.ratings
#'
make.ratings <- function(n.alts, n.raters, strengths=NULL, sd.strengths=1, sd.noise=0.1) {
if (!is.null(strengths) && length(strengths)!=n.alts){stop("length of strengths vector is not equal to number of alternatives")}
if (is.null(strengths)){strengths <- rnorm(n.alts, 0, sd.strengths)}
ratings <- replicate(n.raters, strengths + rnorm(n.alts, 0, sd.noise))
list(ratings=ratings, strengths=strengths)
} |
a9bd310b023c1ae622ab24f4a06f67bd1c00f774 | c287bcd8eb4cc723787454b15442f0a4e4f0db0d | /R/benchmark.getDTeval.R | 171a8dcd9191db8ceadd4c19f708c6cbadbc831e | [] | no_license | Zoe0409/getDTeval | f0fbb7e4b4e45be7e39ea905ded58964eba26e53 | d26d32f56d7362bd169c8c92a49879148fb44915 | refs/heads/master | 2020-11-30T20:36:22.495031 | 2020-07-24T01:46:30 | 2020-07-24T01:46:30 | 230,473,930 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,425 | r | benchmark.getDTeval.R | #' benchmark.getDTeval
#'
#' @description Performs a benchmarking experiment for data.table coding statements that use get() or eval() for programmatic designs. The a) original statement is compared to b)passing the original statement through getDTeval and also to c) an optimized coding statement. The results can demonstrate the overall improvement of using the coding translations offered by getDTeval().
#'
#' @param the.statement a character value expressing a data.table calculation, such as \code{dt[Age < 50, .(mean(Income)), by = "Region"]}. When the.statement includes variables that are called by reference to a naming constant using get() or eval(), then these statements are translated into the names of the variables for substantial improvements in speed.
#' @param times The number of iterations to run the benchmarking experiment.
#' @param seed an integer value specifying the seed of the pseudorandom number generator.
#' @param envir The environment in which the calculation takes place, with the global environment .GlobalEnv set as the default.
#' @param ... Not used at this time.
#'
#' @source getDTeval::getDTeval
#' @import data.table
#' @import microbenchmark
#'
#' @export
benchmark.getDTeval <-
function(the.statement,
times = 30,
seed = 47,
envir = .GlobalEnv,
...) {
"." <- NULL
"category" <- NULL
"seconds" <- NULL
"time" <- NULL
set.seed(seed = seed)
if (!is.character(the.statement) &
!is.expression(x = the.statement)) {
return("Error: the.statement must be a character or expression.")
}
translated.statement <-
getDTeval(the.statement = the.statement,
return.as = "code",
envir = envir)
times.translated <-
tryCatch(data.table::as.data.table(microbenchmark::microbenchmark(
eval(parse(text = translated.statement)), times = times)),
error = function(cond)data.table::data.table(expr = NA, time = NA))
times.translated[, category := "optimized statement"]
times.dt <-
tryCatch(data.table::as.data.table(microbenchmark::microbenchmark(
eval(parse(text = the.statement)), times = times)),
error = function(cond)data.table::data.table(expr = NA, time = NA))
times.dt[, category := "original statement"]
times.getDTeval <-
tryCatch(data.table::as.data.table(microbenchmark::microbenchmark(
getDTeval(
the.statement = the.statement,
return.as = "result",
envir = envir
),
times = times
)), error = function(cond)data.table::data.table(expr = NA, time = NA))
times.getDTeval[, category := "getDTeval"]
res <-
rbindlist(l = list(times.translated, times.dt, times.getDTeval),
fill = TRUE)
res[, seconds := time / (10 ^ 9)]
the.tab <-
res[, .(metric = names(summary(seconds)), seconds = summary(seconds)), keyby = "category"]
the.summary <-
data.table::dcast.data.table(data = the.tab,
formula = category ~ metric,
value.var = "seconds")
the.summary = the.summary[,.SD,.SDcols = c(
"category",
"Min.",
"1st Qu.",
"Median",
"Mean",
"3rd Qu.",
"Max."
)]
setorderv(x = the.summary, cols = "Mean", na.last = TRUE)
return(the.summary)
}
|
8f6174f19cdad2203254c51811d322c7c43f21ae | 911115a9acec8000c9ddba12c8ecfca55f684ec5 | /run_analysis.R | cf1809f56ec7178d37b631425d7a24e3852ee214 | [] | no_license | manjicar/GettingCleaningData | 2584df730eaafe62836bef2e01ff4775a99cdbb3 | 32256ff78954e3dc2d79376698f9180f6ee2414b | refs/heads/master | 2020-07-03T16:40:03.413581 | 2016-11-21T03:03:53 | 2016-11-21T03:03:53 | 74,246,909 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,037 | r | run_analysis.R | # Read the data sets
xTrain <- read.table("X_train.txt")
yTrain <- read.table("y_train.txt")
subjectTrain <- read.table("subject_train.txt")
xTest <- read.table("X_test.txt")
yTest <- read.table("y_test.txt")
subjectTest <- read.table("subject_test.txt")
features <- read.table("features.txt")
activityLabels <- read.table("activity_labels.txt")
# Combine Train and Test for x, y and subject
xAll <- rbind(xTrain, xTest)
yAll <- rbind(yTrain, yTest)
subjectAll <- rbind(subjectTrain, subjectTest)
#Change variables' names
colnames(xAll) <- features$V2
colnames(yAll) <- "Activity"
colnames(subjectAll) <- "Subject"
# Change activity values to activity names in yAll
yAll[, 1] <- activityLabels[yAll[, 1], 2]
# Select columns with mean and std in xAll
mean_std <- grep(".*mean.*|.*std.*", features[,2])
xAllMeanStd <- xAll[,mean_std]
# Combine yAll, subjectAll and xAllMeanStd
Final <- cbind(subjectAll, yAll, xAllMeanStd)
# Create final table
write.table(Final, "final.txt", row.name = FALSE)
|
80abf94b60d711ad85e39218df0ed8d4eab0ac07 | fd34554dc7416c67491e519019ec57331a835c91 | /binomial/man/bin_variable.Rd | 0fdcc6afe496f3c26c0776f7d36cb12b764d8cd7 | [] | no_license | stat133-sp19/hw-stat133-alanvaldez02 | 252d2f7bec7b90be7cf3342fc967c84e87a3a740 | ba41f6aa7d70fa8b2f87ba5bcf2753eb2be3de13 | refs/heads/master | 2020-04-28T20:39:50.541157 | 2019-05-04T00:22:28 | 2019-05-04T00:22:28 | 175,552,306 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 314 | rd | bin_variable.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bin_variable.R
\name{bin_variable}
\alias{bin_variable}
\title{bin_variable}
\usage{
bin_variable(trials, prob)
}
\arguments{
\item{trials}{number of trials}
\item{prob}{probability}
}
\value{
variables
}
\description{
calculates...
}
|
5c970163014e857d35bf7425a8fa7769d5ebec99 | 3841eb5fbed3bba2490b9c8b0bf4193f55b16bbc | /man/Valid.correlation.Rd | adc89b714b2a72162afad4c75ea219a6f470f7d6 | [] | no_license | cran/PoisNor | aa906efef068fe5a37a63bb3c89ec23230245864 | fd45a990c7fdafa9b16a98f718f7a29038b58585 | refs/heads/master | 2021-06-20T01:57:02.974526 | 2021-03-21T21:50:02 | 2021-03-21T21:50:02 | 17,928,742 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 876 | rd | Valid.correlation.Rd | \name{Valid.correlation}
\alias{Valid.correlation}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Computes the lower and upper correlation bounds in the form of two matrices
}
\description{
The function computes the lower and upper bounds for the target correlations based on the marginal rates.
}
\usage{
Valid.correlation(no.pois, no.norm, lamvec)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{no.pois}{
Number of Poisson variables.
}
\item{no.norm}{
Number of normal variables.
}
\item{lamvec}{
A vector of marginal rates for Poisson variables.
}
}
\details{
The function returns a list of two matrices. The \code{min} contains the lower bounds and the \code{max} contains the upper bounds of the feasible correlations.
}
\examples{
lamvec= c(0.05,0.07,0.09)
Valid.correlation(no.pois=3, no.norm=3,lamvec)
}
|
b3103a38322037cf78fbd4442ca8171a9c9adf94 | 738da2690597f09ea30eab5cacb639618df3b152 | /man/correlateReads.Rd | f8d1b323c8977ebb38eb8417f937ee21c576d425 | [] | no_license | LTLA/csaw | 2ba04325357ac6027a5f246a66a2bcb47e5c2822 | cac071b818458ae19b091c20b4bea9f477ec80a9 | refs/heads/master | 2023-03-23T01:07:46.995530 | 2023-03-14T05:46:54 | 2023-03-14T05:46:54 | 102,946,148 | 5 | 4 | null | 2020-11-02T18:30:29 | 2017-09-09T10:10:50 | R | UTF-8 | R | false | false | 4,316 | rd | correlateReads.Rd | \name{correlateReads}
\alias{correlateReads}
\title{Compute correlation coefficients between reads}
\description{Computes the auto- or cross-correlation coefficients between read positions across a set of delay intervals.}
\usage{
correlateReads(bam.files, max.dist=1000, cross=TRUE, param=readParam(),
BPPARAM=SerialParam())
}
\arguments{
\item{bam.files}{A character vector containing paths to sorted and indexed BAM files.
Alternatively, a list of \linkS4class{BamFile} objects.}
\item{max.dist}{An integer scalar specifying the maximum delay distance over which correlation coefficients will be calculated.}
\item{cross}{A logical scalar specifying whether cross-correlations should be computed.}
\item{param}{A \linkS4class{readParam} object containing read extraction parameters.}
\item{BPPARAM}{A \linkS4class{BiocParallelParam} specifying how parallelization is to be performed across files.}
}
\value{
A numeric vector of length \code{max.dist+1} containing the correlation coefficients for each delay interval from 0 to \code{max.dist}.
}
\details{
If \code{cross=TRUE}, reads are separated into those mapping on the forward and reverse strands.
Positions on the forward strand are shifted forward by a delay interval.
The chromosome-wide correlation coefficient between the shifted forward positions and the original reverse positions are computed.
This is repeated for all delay intervals less than \code{max.dist}.
A weighted mean for the cross-correlation is taken across all chromosomes, with weighting based on the number of reads.
Cross-correlation plots can be used to check the quality of immunoprecipitation for ChIP-Seq experiments involving transcription factors or punctate histone marks.
Strong immunoprecipitation should result in a peak at a delay corresponding to the fragment length.
A spike may also be observed at the delay corresponding to the read length.
This is probably an artefact of the mapping process where unique mapping occurs to the same sequence on each strand.
By default, marked duplicate reads are removed from each BAM file prior to calculation of coefficients.
This is strongly recommended, even if the rest of the analysis will be performed with duplicates retained.
Otherwise, the read length spike will dominate the plot, such that the fragment length peak will no longer be easily visible.
If \code{cross=FALSE}, auto-correlation coefficients are computed without use of strand information.
This is designed to guide estimation of the average width of enrichment for diffuse histone marks.
For example, the width can be defined as the delay distance at which the autocorrelations become negligble.
However, this tends to be ineffective in practice as diffuse marks tend to have very weak correlations to begin with.
If multiple BAM files are specified in \code{bam.files}, the reads from all libraries are pooled prior to calculation of the correlation coefficients.
This is convenient for determining the average correlation profile across an entire dataset.
Separate calculations for each file will require multiple calls to \code{correlateReads}.
Paired-end data is also supported, whereby correlations are computed using only those reads in proper pairs.
This may be less meaningful as the presence of proper pairs will inevitably result in a strong peak at the fragment length.
Instead, IP efficiency can be diagnosed by treating paired-end data as single-end, e.g., with \code{pe="first"} in \code{\link{readParam}}.
}
\examples{
n <- 20
bamFile <- system.file("exdata", "rep1.bam", package="csaw")
par(mfrow=c(2,2))
x <- correlateReads(bamFile, max.dist=n)
plot(0:n, x, xlab="delay (bp)", ylab="ccf")
x <- correlateReads(bamFile, max.dist=n, param=readParam(dedup=TRUE))
plot(0:n, x, xlab="delay (bp)", ylab="ccf")
x <- correlateReads(bamFile, max.dist=n, cross=FALSE)
plot(0:n, x, xlab="delay (bp)", ylab="acf")
# Also works on paired-end data.
bamFile <- system.file("exdata", "pet.bam", package="csaw")
x <- correlateReads(bamFile, param=readParam(pe="both"))
head(x)
}
\seealso{
\code{\link{ccf}}
}
\references{
Kharchenko PV, Tolstorukov MY and Park, PJ (2008). Design and analysis of
ChIP-seq experiments for DNA-binding proteins. \emph{Nat. Biotechnol.} 26,
1351-1359.
}
\author{Aaron Lun}
\keyword{diagnostics}
|
5e8c8f455e663d42d0d248003f2da7ba8bddb4b5 | bacdafc215720beab30760b35cb74c447ada8a92 | /Ecological-Modeling/Rpplane-SimpleCoralMucus.R | a2095934447080cef29c27dcff00d505802d32ed | [] | no_license | jlmorano/Reference-R-scripts | 38f5aaf44ad854d29c667e0279c3b84654bf1a28 | dd67863c506fa8adbcd848df9c81491775768bce | refs/heads/master | 2023-07-03T04:06:27.045859 | 2021-08-09T18:56:32 | 2021-08-09T18:56:32 | 90,040,998 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 522 | r | Rpplane-SimpleCoralMucus.R | rm(list=ls(all=TRUE))
require(Rpplane);
# Function to compute state variable derivatives.
# Note that the state variables are sent in as two separate arguments,
# not as a vector, and that time t is not an argument.
coralmucus=function(B,P,parms) {
r.B=parms[1]; r.P=parms[2]; lambda=parms[3];
S=1-B-P;
dBdt=r.B*B*S - B;
dPdt=r.P*P*S*exp(-lambda*B)- P;
return(c(dBdt,dPdt))
}
r.B=2; r.P=4; lambda=2;
parms=c(r.B,r.P,lambda)
Rpplane(coralmucus,c(-0.02,0.8),c(-0.02,0.9),parms=parms);
|
795588c30becaf41a1a7277feba2c99ffe3219c8 | 155f0b41ac738e2c9671299da0e094e69fca5939 | /man/cal_proxy_toggle.Rd | 09a2b6753ba3d32c62a40f68ce3625b95361693b | [
"MIT"
] | permissive | jeanantoinedasilva/toastui | dd8fa04519c5822966abffffb1b9b23ea0e25a3a | 7dfa548d11c2df13d3c7317047862079f8e5d07d | refs/heads/master | 2023-09-02T08:47:47.501078 | 2021-11-03T15:45:55 | 2021-11-03T15:45:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,567 | rd | cal_proxy_toggle.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calendar-proxy.R
\name{cal_proxy_toggle}
\alias{cal_proxy_toggle}
\title{Toggle schedules visibility with Proxy}
\usage{
cal_proxy_toggle(proxy, calendarId, toHide = TRUE)
}
\arguments{
\item{proxy}{A \code{\link[=calendar_proxy]{calendar_proxy()}} \code{htmlwidget} object.}
\item{calendarId}{One or several calendar IDs to toggle.}
\item{toHide}{Logical, show or hide schedules with provided calendar IDs.}
}
\value{
A \code{calendar_proxy} object.
}
\description{
This function allow to show or hide schedules based on their calendar's ID.
}
\examples{
library(shiny)
library(toastui)
ui <- fluidPage(
fluidRow(
column(
width = 2,
tags$h4("Checkbox logic :"),
checkboxGroupInput(
inputId = "calendarId",
label = "Calendars to show:",
choices = list(
"Perso" = "1",
"Work" = "2",
"Courses" = "3"
),
selected = 1:3
),
tags$h4("Button logic :"),
actionButton("cal_1", "Perso", class= "btn-block"),
actionButton("cal_2", "Work", class= "btn-block"),
actionButton("cal_3", "Courses", class= "btn-block")
),
column(
width = 10,
tags$h2("Show / Hide schedules by calendarId"),
calendarOutput(outputId = "cal"),
uiOutput("ui")
)
)
)
server <- function(input, output, session) {
output$cal <- renderCalendar({
calendar(view = "month", taskView = TRUE, useDetailPopup = FALSE) \%>\%
cal_props(cal_demo_props()) \%>\%
cal_schedules(cal_demo_data())
})
# With checkbox
observeEvent(input$calendarId, {
cal_proxy_toggle("cal", input$calendarId, toHide = FALSE)
cal_proxy_toggle("cal", setdiff(1:3, input$calendarId), toHide = TRUE)
}, ignoreInit = TRUE, ignoreNULL = FALSE)
# With buttons
observeEvent(input$cal_1, {
cal_proxy_toggle("cal", "1", toHide = input$cal_1 \%\% 2 == 1)
}, ignoreInit = TRUE)
observeEvent(input$cal_2, {
cal_proxy_toggle("cal", "2", toHide = input$cal_2 \%\% 2 == 1)
}, ignoreInit = TRUE)
observeEvent(input$cal_3, {
cal_proxy_toggle("cal", "3", toHide = input$cal_3 \%\% 2 == 1)
}, ignoreInit = TRUE)
}
if (interactive())
shinyApp(ui, server)
}
\seealso{
Other calendar proxy methods:
\code{\link{cal_proxy_clear}()},
\code{\link{cal_proxy_options}()},
\code{\link{cal_proxy_view}()},
\code{\link{calendar-proxy-navigate}},
\code{\link{calendar-proxy-schedule}},
\code{\link{calendar_proxy}()}
}
\concept{calendar proxy methods}
|
774f18d826bff0a499d918f5986675327378996e | 2a6f2e9519ec64b8a19593fb8e9d4f40f4d2920d | /R Scripts/CpG Context LOLA Enrichment Analysis.R | 85db8fcc3b7aefed7725fc5753e9d048785da3ea | [
"MIT"
] | permissive | cemordaunt/AutismCordBloodMethylation | ff839065a1c1824aa8b02dea37de6f0306e60c7d | 5b1a4101a7b1d8bc3cfcb1df693d842fa887d12f | refs/heads/master | 2022-01-31T21:26:43.924461 | 2022-01-12T23:24:15 | 2022-01-12T23:24:15 | 149,683,492 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 49,143 | r | CpG Context LOLA Enrichment Analysis.R | # CpG Context LOLA Enrichment Analysis ---------------------------------------------
# Autism Cord Blood Methylation
# Charles Mordaunt
# 7/8/19
# Packages ####
.libPaths("/share/lasallelab/Charles/R")
sapply(c("tidyverse", "LOLA", "simpleCache", "GenomicRanges", "qvalue", "annotatr", "scales", "reshape2"), require, character.only = TRUE)
# Functions ####
# Cluster
loadRegions <- function(file, chroms = c(paste("chr", 1:22, sep = ""), "chrX", "chrY", "chrM"), sort = TRUE){
if(grepl("txt", file, fixed = TRUE)){
regions <- read.delim(file, sep = "\t", header = TRUE, stringsAsFactors = FALSE)
}
else{
regions <- read.csv(file, header = TRUE, stringsAsFactors = FALSE)
}
if("seqnames" %in% colnames(regions)){
colnames(regions)[colnames(regions) == "seqnames"] <- "chr"
}
regions <- subset(regions, chr %in% chroms)
regions$chr <- factor(regions$chr, levels = chroms)
if(sort){
regions <- regions[order(regions$chr, regions$start),]
}
return(regions)
}
makeGRange <- function(DMRs, direction = c("all", "hyper", "hypo")){
if(direction == "hyper"){DMRs <- subset(DMRs, percentDifference > 0)}
if(direction == "hypo"){DMRs <- subset(DMRs, percentDifference < 0)}
GR <- GRanges(seqnames = DMRs$chr, ranges = IRanges(start = DMRs$start, end = DMRs$end))
}
# Laptop
source("R Scripts/DMR Analysis Functions.R")
# Get CpG Context Annotation ####
CpGs <- build_annotations(genome = "hg38", annotations = "hg38_cpgs") %>%
GenomeInfoDb::keepStandardChromosomes(pruning.mode = "coarse") %>% as.data.frame()
CpGs <- CpGs[,c("seqnames", "start", "end", "type")]
CpGs$type <- str_replace_all(CpGs$type, pattern = c("hg38_cpg_islands" = "CpG_Island", "hg38_cpg_shores" = "CpG_Shore",
"hg38_cpg_shelves" = "CpG_Shelf", "hg38_cpg_inter" = "CpG_Open_Sea"))
write.table(CpGs, "UCSC Tracks/CpG_Context.bed", sep = "\t", quote = FALSE, row.names = FALSE, col.names = TRUE)
splitFileIntoCollection("UCSC Tracks/CpG_Context.bed", splitCol = 4, collectionFolder = "UCSC Tracks")
index <- data.frame(filename = c("CpG_Island.bed", "CpG_Shore.bed", "CpG_Shelf.bed", "CpG_Open_Sea.bed"),
description = c("CpG Island", "CpG Shore", "CpG Shelf", "CpG Open Sea"))
write.table(index, "Tables/index.txt", sep = "\t", quote = FALSE, row.names = FALSE, col.names = TRUE)
rm(index)
# Get All DMRs LOLA Enrichments ----------------------------------------------------
# Data ####
regionDB <- loadRegionDB(dbLocation = "/share/lasallelab/programs/LOLA/hg38", useCache = TRUE, limit = NULL,
collections = c("CpG_context"))
maleChroms <- c(paste("chr", 1:22, sep = ""), "chrX", "chrY", "chrM")
femaleChroms <- c(paste("chr", 1:22, sep = ""), "chrX", "chrM")
# Discovery Males DMRs ####
setwd("/share/lasallelab/Charles/CM_WGBS_ASD_CordBlood/Bismark_Reports/Discovery/")
DMRs <- loadRegions(file = "Dx_Males/DMRs_Dx_Discovery50_males.csv", chroms = maleChroms, sort = TRUE)
DMRlist <- list("AllDMRs" = makeGRange(DMRs = DMRs, direction = "all"),
"HyperDMRs" = makeGRange(DMRs = DMRs, direction = "hyper"),
"HypoDMRs" = makeGRange(DMRs = DMRs, direction = "hypo"))
Background <- loadRegions(file = "Dx_Males/bsseq_background_Discovery50_males.csv", chroms = maleChroms, sort = TRUE) %>%
makeGRange(direction = "all")
Results <- runLOLA(userSets = DMRlist, userUniverse = Background, regionDB = regionDB, cores = 2, redefineUserSets = TRUE)
writeCombinedEnrichment(combinedResults = Results, outFolder = "LOLA", includeSplits = FALSE)
file.copy(from = "LOLA/allEnrichments.tsv", to = "LOLA/LOLA_CpG_Context_Dx_Discovery50_males_DMRs.tsv", overwrite = TRUE)
rm(DMRs, DMRlist, Background, Results)
# Discovery Females DMRs ####
setwd("/share/lasallelab/Charles/CM_WGBS_ASD_CordBlood/Bismark_Reports/Discovery/")
DMRs <- loadRegions(file = "Dx_Females/DMRs_Dx_Discovery50_females.csv", chroms = femaleChroms, sort = TRUE)
DMRlist <- list("AllDMRs" = makeGRange(DMRs = DMRs, direction = "all"),
"HyperDMRs" = makeGRange(DMRs = DMRs, direction = "hyper"),
"HypoDMRs" = makeGRange(DMRs = DMRs, direction = "hypo"))
Background <- loadRegions(file = "Dx_Females/bsseq_background_Discovery50_females.csv", chroms = femaleChroms, sort = TRUE) %>%
makeGRange(direction = "all")
Results <- runLOLA(userSets = DMRlist, userUniverse = Background, regionDB = regionDB, cores = 2, redefineUserSets = TRUE)
writeCombinedEnrichment(combinedResults = Results, outFolder = "LOLA", includeSplits = FALSE)
file.copy(from = "LOLA/allEnrichments.tsv", to = "LOLA/LOLA_CpG_Context_Dx_Discovery50_females_DMRs.tsv", overwrite = TRUE)
rm(DMRs, DMRlist, Background, Results)
# Replication Males DMRs ####
setwd("/share/lasallelab/Charles/CM_WGBS_ASD_CordBlood/Bismark_Reports/Replication/")
DMRs <- loadRegions(file = "Dx_Males/DMRs_Dx_Replication50_males.csv", chroms = maleChroms, sort = TRUE)
DMRlist <- list("AllDMRs" = makeGRange(DMRs = DMRs, direction = "all"),
"HyperDMRs" = makeGRange(DMRs = DMRs, direction = "hyper"),
"HypoDMRs" = makeGRange(DMRs = DMRs, direction = "hypo"))
Background <- loadRegions(file = "Dx_Males/bsseq_background_Replication50_males.csv", chroms = maleChroms, sort = TRUE) %>%
makeGRange(direction = "all")
Results <- runLOLA(userSets = DMRlist, userUniverse = Background, regionDB = regionDB, cores = 2, redefineUserSets = TRUE)
writeCombinedEnrichment(combinedResults = Results, outFolder = "LOLA", includeSplits = FALSE)
file.copy(from = "LOLA/allEnrichments.tsv", to = "LOLA/LOLA_CpG_Context_Dx_Replication50_males_DMRs.tsv", overwrite = TRUE)
rm(DMRs, DMRlist, Background, Results)
# Replication Females DMRs ####
setwd("/share/lasallelab/Charles/CM_WGBS_ASD_CordBlood/Bismark_Reports/Replication/")
DMRs <- loadRegions(file = "Dx_Females_100/DMRs_Dx_Replication100_females.csv", chroms = femaleChroms, sort = TRUE)
DMRlist <- list("AllDMRs" = makeGRange(DMRs = DMRs, direction = "all"),
"HyperDMRs" = makeGRange(DMRs = DMRs, direction = "hyper"),
"HypoDMRs" = makeGRange(DMRs = DMRs, direction = "hypo"))
Background <- loadRegions(file = "Dx_Females_100/bsseq_background_Replication100_females.csv", chroms = femaleChroms, sort = TRUE) %>%
makeGRange(direction = "all")
Results <- runLOLA(userSets = DMRlist, userUniverse = Background, regionDB = regionDB, cores = 2, redefineUserSets = TRUE)
writeCombinedEnrichment(combinedResults = Results, outFolder = "LOLA", includeSplits = FALSE)
file.copy(from = "LOLA/allEnrichments.tsv", to = "LOLA/LOLA_CpG_Context_Dx_Replication100_females_DMRs.tsv", overwrite = TRUE)
rm(DMRs, DMRlist, Background, Results)
# Analyze All DMRs LOLA Enrichments -----------------------------------------
# Load Data and Combine ####
lola <- list(read.delim("Tables/LOLA_CpG_Context_Dx_Discovery50_males_DMRs.tsv", sep = "\t", header = TRUE,
stringsAsFactors = FALSE),
read.delim("Tables/LOLA_CpG_Context_Dx_Discovery50_females_DMRs.tsv", sep = "\t", header = TRUE,
stringsAsFactors = FALSE),
read.delim("Tables/LOLA_CpG_Context_Dx_Replication50_males_DMRs.tsv", sep = "\t", header = TRUE,
stringsAsFactors = FALSE),
read.delim("Tables/LOLA_CpG_Context_Dx_Replication100_females_DMRs.tsv", sep = "\t", header = TRUE,
stringsAsFactors = FALSE))
names(lola) <- c("Discovery_Males", "Discovery_Females", "Replication_Males", "Replication_Females")
lola <- lapply(lola, function(x){
x <- subset(x, userSet %in% c("HyperDMRs", "HypoDMRs"))
x$qValue <- p.adjust(10^(-x$pValueLog), method = "fdr")
return(x)
})
lola <- lapply(lola, function(x) x[,colnames(lola[["Discovery_Males"]])])
lola <- rbind(lola[["Discovery_Males"]], lola[["Discovery_Females"]], lola[["Replication_Males"]], lola[["Replication_Females"]])
lola$DMRs <- c(rep(c("Discovery_Males", "Discovery_Females", "Replication_Males", "Replication_Females"), each = 8)) %>%
factor(levels = c("Discovery_Males", "Discovery_Females", "Replication_Males", "Replication_Females"))
table(lola$support < 5) # All FALSE
lola$pct_DMRs <- lola$support * 100 / (lola$support + lola$c)
lola$pValueLog[is.infinite(lola$pValueLog)] <- NA
lola$pValueLog[is.na(lola$pValueLog)] <- max(lola$pValueLog, na.rm = TRUE)
lola$pValue <- 10^(-lola$pValueLog)
lola$qValueLog <- -log10(lola$qValue)
lola$qValueLog[is.infinite(lola$qValueLog)] <- NA
lola$qValueLog[is.na(lola$qValueLog)] <- max(lola$qValueLog, na.rm = TRUE)
lola <- lola[,c("DMRs", "userSet", "description", "pValue", "qValue", "pValueLog", "qValueLog", "oddsRatio", "support", "pct_DMRs",
"b", "c", "d", "size")]
lola$userSet <- factor(lola$userSet, levels = c("HyperDMRs", "HypoDMRs"))
lola$description <- gsub("CpG ", replacement = "", x = lola$description, fixed = TRUE) %>%
factor(levels = rev(c("Island", "Shore", "Shelf", "Open Sea")))
lola <- lola[order(lola$DMRs, lola$userSet, lola$description),]
lola$Sex <- c(rep(c("Males", "Females"), each = 8), rep(c("Males", "Females"), each = 8)) %>%
factor(levels = c("Males", "Females"))
lola$Regions <- c(rep(rep(c("Discovery Hyper", "Discovery Hypo"), each = 4), 2),
rep(rep(c("Replication Hyper", "Replication Hypo"), each = 4), 2)) %>%
factor(levels = c("Discovery Hyper", "Replication Hyper", "Discovery Hypo", "Replication Hypo"))
lola$Significant <- (lola$qValue < 0.05) %>% factor(levels = c("TRUE", "FALSE"))
table(is.infinite(lola$oddsRatio), lola$description)
# Open Sea Shelf Shore Island
# FALSE 8 8 8 8
write.csv(lola, "Tables/LOLA CpG Context Enrichment Results.csv", row.names = FALSE)
# Plot Odds Ratio Heatmap ####
gg <- ggplot(data = lola)
gg +
geom_tile(aes(x = Regions, y = description, fill = oddsRatio)) +
geom_text(aes(x = Regions, y = description, alpha = Significant), label = "*", color = "white", size = 14, nudge_y = -0.25) +
facet_grid(cols = vars(Sex)) +
scale_fill_gradientn("Odds Ratio", colors = c("black", "#FF0000"), values = c(0, 1),
na.value = "#FF0000", limits = c(0, max(lola$oddsRatio)), breaks = pretty_breaks(n = 3)) +
scale_alpha_manual(breaks = c("TRUE", "FALSE"), values = c(1, 0), guide = FALSE) +
theme_bw(base_size = 24) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.border = element_rect(color = "black", size = 1.25),
panel.background = element_rect(fill = "black"), axis.ticks.x = element_line(size = 1.25),
axis.ticks.y = element_line(size = 1.25), legend.key = element_blank(), legend.position = c(1.14, 0.83),
legend.background = element_blank(), legend.title = element_text(size = 15), legend.text = element_text(size = 14),
plot.margin = unit(c(0, 6, 0.5, 0.5), "lines"), axis.text.y = element_text(size = 14, color = "black"),
axis.text.x = element_text(size = 14, color = "black", angle = 45, hjust = 1),
axis.title = element_blank(), plot.title = element_text(size = 18, hjust = 0.5, vjust = 0),
strip.background = element_blank(), strip.text = element_text(size = 15), legend.key.size = unit(1, "lines")) +
scale_x_discrete(expand = c(0, 0)) +
scale_y_discrete(expand = c(0, 0))
ggsave("Figures/LOLA CpG Context Enrichment Odds Ratio Heatmap.png", dpi = 600, width = 7, height = 4, units = "in")
rm(gg)
# All DMRs CpG Context Distribution -----------------------------------------------
# Load Regions ####
maleChroms <- c(paste("chr", 1:22, sep = ""), "chrX", "chrY", "chrM")
femaleChroms <- c(paste("chr", 1:22, sep = ""), "chrX", "chrM")
DiscDMRs <- list(Males = loadRegions("DMRs/Discovery/Diagnosis Males 50/DMRs_Dx_Discovery50_males.csv",
chroms = maleChroms, sort = TRUE, DMRid = TRUE),
Females = loadRegions("DMRs/Discovery/Diagnosis Females 50/DMRs_Dx_Discovery50_females.csv",
chroms = femaleChroms, sort = TRUE, DMRid = TRUE))
DiscBackground <- list(Males = loadRegions("DMRs/Discovery/Diagnosis Males 50/bsseq_background_Discovery50_males.csv",
chroms = maleChroms, sort = TRUE),
Females = loadRegions("DMRs/Discovery/Diagnosis Females 50/bsseq_background_Discovery50_females.csv",
chroms = femaleChroms, sort = TRUE))
RepDMRs <- list(Males = loadRegions("DMRs/Replication/Diagnosis Males 50/DMRs_Dx_Replication50_males.csv",
chroms = maleChroms, sort = TRUE, DMRid = TRUE),
Females = loadRegions("DMRs/Replication/Diagnosis Females 100/DMRs_Dx_Replication100_females.csv",
chroms = femaleChroms, sort = TRUE, DMRid = TRUE))
RepBackground <- list(Males = loadRegions("DMRs/Replication/Diagnosis Males 50/bsseq_background_Replication50_males.csv",
chroms = maleChroms, sort = TRUE),
Females = loadRegions("DMRs/Replication/Diagnosis Females 100/bsseq_background_Replication100_females.csv",
chroms = femaleChroms, sort = TRUE))
# Get CpG Context ###
CpGs <- build_annotations(genome = "hg38", annotations = "hg38_cpgs") %>%
GenomeInfoDb::keepStandardChromosomes(pruning.mode = "coarse")
CpGs <- list(Island = CpGs[CpGs$type == "hg38_cpg_islands"], Shore = CpGs[CpGs$type == "hg38_cpg_shores"],
Shelf = CpGs[CpGs$type == "hg38_cpg_shelves"], OpenSea = CpGs[CpGs$type == "hg38_cpg_inter"])
genome <- sapply(CpGs, function(x) sum(width(x)))
genome * 100 / sum(genome)
# Island Shore Shelf OpenSea
# 0.7069396 3.2250091 2.7543133 93.3137380
contextColors <- c("Island" = "forestgreen", "Shore" = "goldenrod2", "Shelf" = "dodgerblue", "Open Sea" = "blue3")
# All DMRs CpG Context Distribution and Plot ####
GR_Regions <- list(Disc_Males_Background = makeGRange(DiscBackground$Males, direction = "all"),
Disc_Males_Hyper = makeGRange(DiscDMRs$Males, direction = "hyper"),
Disc_Males_Hypo = makeGRange(DiscDMRs$Males, direction = "hypo"),
Disc_Females_Background = makeGRange(DiscBackground$Females, direction = "all"),
Disc_Females_Hyper = makeGRange(DiscDMRs$Females, direction = "hyper"),
Disc_Females_Hypo = makeGRange(DiscDMRs$Females, direction = "hypo"),
Rep_Males_Background = makeGRange(RepBackground$Males, direction = "all"),
Rep_Males_Hyper = makeGRange(RepDMRs$Males, direction = "hyper"),
Rep_Males_Hypo = makeGRange(RepDMRs$Males, direction = "hypo"),
Rep_Females_Background = makeGRange(RepBackground$Females, direction = "all"),
Rep_Females_Hyper = makeGRange(RepDMRs$Females, direction = "hyper"),
Rep_Females_Hypo = makeGRange(RepDMRs$Females, direction = "hypo"))
Males_Genome <- genome
Females_Genome <- genome
distribution <- sapply(GR_Regions, function(x){
sapply(CpGs, function(y) suppressWarnings(intersect(x, y)) %>% width() %>% sum())}) %>%
cbind(Males_Genome, Females_Genome, .) %>% apply(2, function (x) x * 100 / sum(x)) %>% as.data.frame()
distribution$Context <- c("Island", "Shore", "Shelf", "Open Sea") %>%
factor(levels = rev(c("Island", "Shore", "Shelf", "Open Sea")))
distribution <- melt(distribution, id.vars = "Context")
colnames(distribution) <- c("Context", "Regions", "Proportion")
distribution$Sex <- c(rep(c("Males", "Females"), each = 4), rep(c("Males", "Females"), each = 12),
rep(c("Males", "Females"), each = 12)) %>% factor(levels = c("Males", "Females"))
distribution$Regions <- as.character(distribution$Regions) %>%
str_replace_all(pattern = c("Males_" = "", "Females_" = "", "Disc_" = "Discovery ", "Rep_" = "Replication ")) %>%
factor(levels = c("Genome", "Discovery Background", "Replication Background", "Discovery Hyper", "Replication Hyper",
"Discovery Hypo", "Replication Hypo"))
gg <- ggplot(distribution, aes(x = Regions, y = Proportion, fill = Context, color = Context))
gg +
geom_bar(stat = "identity") +
facet_grid(cols = vars(Sex)) +
theme_bw(base_size = 24) +
theme(panel.grid.major = element_blank(), panel.border = element_rect(color = "black", size = 1.25),
legend.key = element_blank(), panel.grid.minor = element_blank(), legend.position = c(1.15, 0.85),
legend.background = element_blank(), legend.key.size = unit(0.8, "cm"), axis.title.y = element_text(size = 22),
axis.ticks = element_line(size = 1.25, color = "black"), plot.title = element_text(size = 24, vjust = 0),
legend.text = element_text(size = 16, margin = unit(c(0, 0, 0, 0.5), "lines")),
strip.background = element_blank(), legend.direction = "vertical", panel.spacing.y = unit(0, "lines"),
plot.margin = unit(c(0.5, 9, 1, 1), "lines"), axis.title.x = element_blank(),
axis.text.x = element_text(size = 17, color = "black", angle = 45, hjust = 1),
axis.text.y = element_text(size = 17, color = "black"), legend.title = element_blank()) +
ylab("Width in CpG Context (%)") +
scale_fill_manual(values = contextColors) +
scale_color_manual(values = contextColors) +
coord_cartesian(ylim = c(0, 100)) +
scale_y_continuous(expand = c(0.004, 0))
ggsave("Figures/All DMR CpG Context Stacked Barplot.png", dpi = 600, width = 9, height = 7, units = "in")
# Get Autosome DMRs LOLA Enrichments ----------------------------------------------------
# Data ####
regionDB <- loadRegionDB(dbLocation = "/share/lasallelab/programs/LOLA/hg38", useCache = TRUE, limit = NULL,
collections = c("CpG_context"))
chroms <- paste("chr", 1:22, sep = "")
# Discovery Males DMRs ####
setwd("/share/lasallelab/Charles/CM_WGBS_ASD_CordBlood/Bismark_Reports/Discovery/")
DMRs <- loadRegions(file = "Dx_Males/DMRs_Dx_Discovery50_males.csv", chroms = chroms, sort = TRUE)
DMRlist <- list("AllDMRs" = makeGRange(DMRs = DMRs, direction = "all"),
"HyperDMRs" = makeGRange(DMRs = DMRs, direction = "hyper"),
"HypoDMRs" = makeGRange(DMRs = DMRs, direction = "hypo"))
Background <- loadRegions(file = "Dx_Males/bsseq_background_Discovery50_males.csv", chroms = chroms, sort = TRUE) %>%
makeGRange(direction = "all")
Results <- runLOLA(userSets = DMRlist, userUniverse = Background, regionDB = regionDB, cores = 2, redefineUserSets = TRUE)
writeCombinedEnrichment(combinedResults = Results, outFolder = "LOLA", includeSplits = FALSE)
file.copy(from = "LOLA/allEnrichments.tsv", to = "LOLA/LOLA_CpG_Context_Dx_Discovery50_males_auto_DMRs.tsv", overwrite = TRUE)
rm(DMRs, DMRlist, Background, Results)
# Discovery Females DMRs ####
setwd("/share/lasallelab/Charles/CM_WGBS_ASD_CordBlood/Bismark_Reports/Discovery/")
DMRs <- loadRegions(file = "Dx_Females/DMRs_Dx_Discovery50_females.csv", chroms = chroms, sort = TRUE)
DMRlist <- list("AllDMRs" = makeGRange(DMRs = DMRs, direction = "all"),
"HyperDMRs" = makeGRange(DMRs = DMRs, direction = "hyper"),
"HypoDMRs" = makeGRange(DMRs = DMRs, direction = "hypo"))
Background <- loadRegions(file = "Dx_Females/bsseq_background_Discovery50_females.csv", chroms = chroms, sort = TRUE) %>%
makeGRange(direction = "all")
Results <- runLOLA(userSets = DMRlist, userUniverse = Background, regionDB = regionDB, cores = 2, redefineUserSets = TRUE)
writeCombinedEnrichment(combinedResults = Results, outFolder = "LOLA", includeSplits = FALSE)
file.copy(from = "LOLA/allEnrichments.tsv", to = "LOLA/LOLA_CpG_Context_Dx_Discovery50_females_auto_DMRs.tsv", overwrite = TRUE)
rm(DMRs, DMRlist, Background, Results)
# Replication Males DMRs ####
setwd("/share/lasallelab/Charles/CM_WGBS_ASD_CordBlood/Bismark_Reports/Replication/")
DMRs <- loadRegions(file = "Dx_Males/DMRs_Dx_Replication50_males.csv", chroms = chroms, sort = TRUE)
DMRlist <- list("AllDMRs" = makeGRange(DMRs = DMRs, direction = "all"),
"HyperDMRs" = makeGRange(DMRs = DMRs, direction = "hyper"),
"HypoDMRs" = makeGRange(DMRs = DMRs, direction = "hypo"))
Background <- loadRegions(file = "Dx_Males/bsseq_background_Replication50_males.csv", chroms = chroms, sort = TRUE) %>%
makeGRange(direction = "all")
Results <- runLOLA(userSets = DMRlist, userUniverse = Background, regionDB = regionDB, cores = 2, redefineUserSets = TRUE)
writeCombinedEnrichment(combinedResults = Results, outFolder = "LOLA", includeSplits = FALSE)
file.copy(from = "LOLA/allEnrichments.tsv", to = "LOLA/LOLA_CpG_Context_Dx_Replication50_males_auto_DMRs.tsv", overwrite = TRUE)
rm(DMRs, DMRlist, Background, Results)
# Replication Females DMRs ####
setwd("/share/lasallelab/Charles/CM_WGBS_ASD_CordBlood/Bismark_Reports/Replication/")
DMRs <- loadRegions(file = "Dx_Females_100/DMRs_Dx_Replication100_females.csv", chroms = chroms, sort = TRUE)
DMRlist <- list("AllDMRs" = makeGRange(DMRs = DMRs, direction = "all"),
"HyperDMRs" = makeGRange(DMRs = DMRs, direction = "hyper"),
"HypoDMRs" = makeGRange(DMRs = DMRs, direction = "hypo"))
Background <- loadRegions(file = "Dx_Females_100/bsseq_background_Replication100_females.csv", chroms = chroms, sort = TRUE) %>%
makeGRange(direction = "all")
Results <- runLOLA(userSets = DMRlist, userUniverse = Background, regionDB = regionDB, cores = 2, redefineUserSets = TRUE)
writeCombinedEnrichment(combinedResults = Results, outFolder = "LOLA", includeSplits = FALSE)
file.copy(from = "LOLA/allEnrichments.tsv", to = "LOLA/LOLA_CpG_Context_Dx_Replication100_females_auto_DMRs.tsv", overwrite = TRUE)
rm(DMRs, DMRlist, Background, Results)
# Analyze Autosome DMRs LOLA Enrichments -----------------------------------------
# Load Data and Combine ####
lola <- list(read.delim("Tables/LOLA_CpG_Context_Dx_Discovery50_males_auto_DMRs.tsv", sep = "\t", header = TRUE,
stringsAsFactors = FALSE),
read.delim("Tables/LOLA_CpG_Context_Dx_Discovery50_females_auto_DMRs.tsv", sep = "\t", header = TRUE,
stringsAsFactors = FALSE),
read.delim("Tables/LOLA_CpG_Context_Dx_Replication50_males_auto_DMRs.tsv", sep = "\t", header = TRUE,
stringsAsFactors = FALSE),
read.delim("Tables/LOLA_CpG_Context_Dx_Replication100_females_auto_DMRs.tsv", sep = "\t", header = TRUE,
stringsAsFactors = FALSE))
names(lola) <- c("Discovery_Males", "Discovery_Females", "Replication_Males", "Replication_Females")
lola <- lapply(lola, function(x){
x <- subset(x, userSet %in% c("HyperDMRs", "HypoDMRs"))
x$qValue <- p.adjust(10^(-x$pValueLog), method = "fdr")
return(x)
})
lola <- lapply(lola, function(x) x[,colnames(lola[["Discovery_Males"]])])
lola <- rbind(lola[["Discovery_Males"]], lola[["Discovery_Females"]], lola[["Replication_Males"]], lola[["Replication_Females"]])
lola$DMRs <- c(rep(c("Discovery_Males", "Discovery_Females", "Replication_Males", "Replication_Females"), each = 8)) %>%
factor(levels = c("Discovery_Males", "Discovery_Females", "Replication_Males", "Replication_Females"))
table(lola$support < 5) # All FALSE
lola$pct_DMRs <- lola$support * 100 / (lola$support + lola$c)
lola$pValueLog[is.infinite(lola$pValueLog)] <- NA
lola$pValueLog[is.na(lola$pValueLog)] <- max(lola$pValueLog, na.rm = TRUE)
lola$pValue <- 10^(-lola$pValueLog)
lola$qValueLog <- -log10(lola$qValue)
lola$qValueLog[is.infinite(lola$qValueLog)] <- NA
lola$qValueLog[is.na(lola$qValueLog)] <- max(lola$qValueLog, na.rm = TRUE)
lola <- lola[,c("DMRs", "userSet", "description", "pValue", "qValue", "pValueLog", "qValueLog", "oddsRatio", "support", "pct_DMRs",
"b", "c", "d", "size")]
lola$userSet <- factor(lola$userSet, levels = c("HyperDMRs", "HypoDMRs"))
lola$description <- gsub("CpG ", replacement = "", x = lola$description, fixed = TRUE) %>%
factor(levels = rev(c("Island", "Shore", "Shelf", "Open Sea")))
lola <- lola[order(lola$DMRs, lola$userSet, lola$description),]
lola$Sex <- c(rep(c("Males", "Females"), each = 8), rep(c("Males", "Females"), each = 8)) %>%
factor(levels = c("Males", "Females"))
lola$Regions <- c(rep(rep(c("Discovery Hyper", "Discovery Hypo"), each = 4), 2),
rep(rep(c("Replication Hyper", "Replication Hypo"), each = 4), 2)) %>%
factor(levels = c("Discovery Hyper", "Replication Hyper", "Discovery Hypo", "Replication Hypo"))
lola$Significant <- (lola$qValue < 0.05) %>% factor(levels = c("TRUE", "FALSE"))
table(is.infinite(lola$oddsRatio), lola$description)
# Open Sea Shelf Shore Island
# FALSE 8 8 8 8
write.csv(lola, "Tables/LOLA Autosome DMRs CpG Context Enrichment Results.csv", row.names = FALSE)
# Plot Odds Ratio Heatmap ####
gg <- ggplot(data = lola)
gg +
geom_tile(aes(x = Regions, y = description, fill = oddsRatio)) +
geom_text(aes(x = Regions, y = description, alpha = Significant), label = "*", color = "white", size = 14, nudge_y = -0.25) +
facet_grid(cols = vars(Sex)) +
scale_fill_gradientn("Odds Ratio", colors = c("black", "#FF0000"), values = c(0, 1),
na.value = "#FF0000", limits = c(0, max(lola$oddsRatio)), breaks = pretty_breaks(n = 3)) +
scale_alpha_manual(breaks = c("TRUE", "FALSE"), values = c(1, 0), guide = FALSE) +
theme_bw(base_size = 24) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.border = element_rect(color = "black", size = 1.25),
panel.background = element_rect(fill = "black"), axis.ticks.x = element_line(size = 1.25),
axis.ticks.y = element_line(size = 1.25), legend.key = element_blank(), legend.position = c(1.14, 0.83),
legend.background = element_blank(), legend.title = element_text(size = 15), legend.text = element_text(size = 14),
plot.margin = unit(c(0, 6, 0.5, 0.5), "lines"), axis.text.y = element_text(size = 14, color = "black"),
axis.text.x = element_text(size = 14, color = "black", angle = 45, hjust = 1),
axis.title = element_blank(), plot.title = element_text(size = 18, hjust = 0.5, vjust = 0),
strip.background = element_blank(), strip.text = element_text(size = 15), legend.key.size = unit(1, "lines")) +
scale_x_discrete(expand = c(0, 0)) +
scale_y_discrete(expand = c(0, 0))
ggsave("Figures/LOLA Autosome DMRs CpG Context Enrichment Odds Ratio Heatmap.png", dpi = 600, width = 7, height = 4, units = "in")
rm(gg)
# Autosome DMRs CpG Context Distribution -----------------------------------------------
# Load Regions ####
chroms <- paste("chr", 1:22, sep = "")
DiscDMRs <- list(Males = loadRegions("DMRs/Discovery/Diagnosis Males 50/DMRs_Dx_Discovery50_males.csv",
chroms = chroms, sort = TRUE, DMRid = TRUE),
Females = loadRegions("DMRs/Discovery/Diagnosis Females 50/DMRs_Dx_Discovery50_females.csv",
chroms = chroms, sort = TRUE, DMRid = TRUE))
DiscBackground <- list(Males = loadRegions("DMRs/Discovery/Diagnosis Males 50/bsseq_background_Discovery50_males.csv",
chroms = chroms, sort = TRUE),
Females = loadRegions("DMRs/Discovery/Diagnosis Females 50/bsseq_background_Discovery50_females.csv",
chroms = chroms, sort = TRUE))
RepDMRs <- list(Males = loadRegions("DMRs/Replication/Diagnosis Males 50/DMRs_Dx_Replication50_males.csv",
chroms = chroms, sort = TRUE, DMRid = TRUE),
Females = loadRegions("DMRs/Replication/Diagnosis Females 100/DMRs_Dx_Replication100_females.csv",
chroms = chroms, sort = TRUE, DMRid = TRUE))
RepBackground <- list(Males = loadRegions("DMRs/Replication/Diagnosis Males 50/bsseq_background_Replication50_males.csv",
chroms = chroms, sort = TRUE),
Females = loadRegions("DMRs/Replication/Diagnosis Females 100/bsseq_background_Replication100_females.csv",
chroms = chroms, sort = TRUE))
# Get CpG Context ###
CpGs <- build_annotations(genome = "hg38", annotations = "hg38_cpgs") %>%
GenomeInfoDb::keepStandardChromosomes(pruning.mode = "coarse")
CpGs <- list(Island = CpGs[CpGs$type == "hg38_cpg_islands"], Shore = CpGs[CpGs$type == "hg38_cpg_shores"],
Shelf = CpGs[CpGs$type == "hg38_cpg_shelves"], OpenSea = CpGs[CpGs$type == "hg38_cpg_inter"])
CpGs <- lapply(CpGs, function(x) subset(x, seqnames %in% chroms))
autosomes <- sapply(CpGs, function(x) sum(width(x)))
autosomes * 100 / sum(autosomes)
# Island Shore Shelf OpenSea
# 0.7289139 3.3326966 2.8456386 93.0927509
contextColors <- c("Island" = "forestgreen", "Shore" = "goldenrod2", "Shelf" = "dodgerblue", "Open Sea" = "blue3")
# Autosome DMRs CpG Context Distribution and Plot ####
GR_Regions <- list(Disc_Males_Background = makeGRange(DiscBackground$Males, direction = "all"),
Disc_Males_Hyper = makeGRange(DiscDMRs$Males, direction = "hyper"),
Disc_Males_Hypo = makeGRange(DiscDMRs$Males, direction = "hypo"),
Disc_Females_Background = makeGRange(DiscBackground$Females, direction = "all"),
Disc_Females_Hyper = makeGRange(DiscDMRs$Females, direction = "hyper"),
Disc_Females_Hypo = makeGRange(DiscDMRs$Females, direction = "hypo"),
Rep_Males_Background = makeGRange(RepBackground$Males, direction = "all"),
Rep_Males_Hyper = makeGRange(RepDMRs$Males, direction = "hyper"),
Rep_Males_Hypo = makeGRange(RepDMRs$Males, direction = "hypo"),
Rep_Females_Background = makeGRange(RepBackground$Females, direction = "all"),
Rep_Females_Hyper = makeGRange(RepDMRs$Females, direction = "hyper"),
Rep_Females_Hypo = makeGRange(RepDMRs$Females, direction = "hypo"))
Males_Autosomes <- autosomes
Females_Autosomes <- autosomes
distribution <- sapply(GR_Regions, function(x){
sapply(CpGs, function(y) suppressWarnings(intersect(x, y)) %>% width() %>% sum())}) %>%
cbind(Males_Autosomes, Females_Autosomes, .) %>% apply(2, function (x) x * 100 / sum(x)) %>% as.data.frame()
distribution$Context <- c("Island", "Shore", "Shelf", "Open Sea") %>%
factor(levels = rev(c("Island", "Shore", "Shelf", "Open Sea")))
distribution <- melt(distribution, id.vars = "Context")
colnames(distribution) <- c("Context", "Regions", "Proportion")
distribution$Sex <- c(rep(c("Males", "Females"), each = 4), rep(c("Males", "Females"), each = 12),
rep(c("Males", "Females"), each = 12)) %>% factor(levels = c("Males", "Females"))
distribution$Regions <- as.character(distribution$Regions) %>%
str_replace_all(pattern = c("Males_" = "", "Females_" = "", "Disc_" = "Discovery ", "Rep_" = "Replication ")) %>%
factor(levels = c("Autosomes", "Discovery Background", "Replication Background", "Discovery Hyper", "Replication Hyper",
"Discovery Hypo", "Replication Hypo"))
gg <- ggplot(distribution, aes(x = Regions, y = Proportion, fill = Context, color = Context))
gg +
geom_bar(stat = "identity") +
facet_grid(cols = vars(Sex)) +
theme_bw(base_size = 24) +
theme(panel.grid.major = element_blank(), panel.border = element_rect(color = "black", size = 1.25),
legend.key = element_blank(), panel.grid.minor = element_blank(), legend.position = c(1.15, 0.85),
legend.background = element_blank(), legend.key.size = unit(0.8, "cm"), axis.title.y = element_text(size = 22),
axis.ticks = element_line(size = 1.25, color = "black"), plot.title = element_text(size = 24, vjust = 0),
legend.text = element_text(size = 16, margin = unit(c(0, 0, 0, 0.5), "lines")),
strip.background = element_blank(), legend.direction = "vertical", panel.spacing.y = unit(0, "lines"),
plot.margin = unit(c(0.5, 9, 1, 1), "lines"), axis.title.x = element_blank(),
axis.text.x = element_text(size = 17, color = "black", angle = 45, hjust = 1),
axis.text.y = element_text(size = 17, color = "black"), legend.title = element_blank()) +
ylab("Width in CpG Context (%)") +
scale_fill_manual(values = contextColors) +
scale_color_manual(values = contextColors) +
coord_cartesian(ylim = c(0, 100)) +
scale_y_continuous(expand = c(0.004, 0))
ggsave("Figures/Autosome DMRs CpG Context Stacked Barplot.png", dpi = 600, width = 9, height = 7, units = "in")
# Get ChrX LOLA Enrichments ----------------------------------------------------
# Data ####
regionDB <- loadRegionDB(dbLocation = "/share/lasallelab/programs/LOLA/hg38", useCache = TRUE, limit = NULL,
collections = c("CpG_context"))
# Discovery Males DMRs ####
setwd("/share/lasallelab/Charles/CM_WGBS_ASD_CordBlood/Bismark_Reports/Discovery/")
DMRs <- loadRegions(file = "Dx_Males/DMRs_Dx_Discovery50_males.csv", chroms = "chrX", sort = TRUE)
DMRlist <- list("AllDMRs" = makeGRange(DMRs = DMRs, direction = "all"),
"HyperDMRs" = makeGRange(DMRs = DMRs, direction = "hyper"),
"HypoDMRs" = makeGRange(DMRs = DMRs, direction = "hypo"))
Background <- loadRegions(file = "Dx_Males/bsseq_background_Discovery50_males.csv", chroms = "chrX", sort = TRUE) %>%
makeGRange(direction = "all")
Results <- runLOLA(userSets = DMRlist, userUniverse = Background, regionDB = regionDB, cores = 2, redefineUserSets = TRUE)
writeCombinedEnrichment(combinedResults = Results, outFolder = "LOLA", includeSplits = FALSE)
file.copy(from = "LOLA/allEnrichments.tsv", to = "LOLA/LOLA_CpG_Context_Dx_Discovery50_males_chrX_DMRs.tsv", overwrite = TRUE)
rm(DMRs, DMRlist, Background, Results)
# Discovery Females DMRs ####
setwd("/share/lasallelab/Charles/CM_WGBS_ASD_CordBlood/Bismark_Reports/Discovery/")
DMRs <- loadRegions(file = "Dx_Females/DMRs_Dx_Discovery50_females.csv", chroms = "chrX", sort = TRUE)
DMRlist <- list("AllDMRs" = makeGRange(DMRs = DMRs, direction = "all"),
"HyperDMRs" = makeGRange(DMRs = DMRs, direction = "hyper"),
"HypoDMRs" = makeGRange(DMRs = DMRs, direction = "hypo"))
Background <- loadRegions(file = "Dx_Females/bsseq_background_Discovery50_females.csv", chroms = "chrX", sort = TRUE) %>%
makeGRange(direction = "all")
Results <- runLOLA(userSets = DMRlist, userUniverse = Background, regionDB = regionDB, cores = 2, redefineUserSets = TRUE)
writeCombinedEnrichment(combinedResults = Results, outFolder = "LOLA", includeSplits = FALSE)
file.copy(from = "LOLA/allEnrichments.tsv", to = "LOLA/LOLA_CpG_Context_Dx_Discovery50_females_chrX_DMRs.tsv", overwrite = TRUE)
rm(DMRs, DMRlist, Background, Results)
# Replication Males DMRs ####
setwd("/share/lasallelab/Charles/CM_WGBS_ASD_CordBlood/Bismark_Reports/Replication/")
DMRs <- loadRegions(file = "Dx_Males/DMRs_Dx_Replication50_males.csv", chroms = "chrX", sort = TRUE)
DMRlist <- list("AllDMRs" = makeGRange(DMRs = DMRs, direction = "all"),
"HyperDMRs" = makeGRange(DMRs = DMRs, direction = "hyper"),
"HypoDMRs" = makeGRange(DMRs = DMRs, direction = "hypo"))
Background <- loadRegions(file = "Dx_Males/bsseq_background_Replication50_males.csv", chroms = "chrX", sort = TRUE) %>%
makeGRange(direction = "all")
Results <- runLOLA(userSets = DMRlist, userUniverse = Background, regionDB = regionDB, cores = 2, redefineUserSets = TRUE)
writeCombinedEnrichment(combinedResults = Results, outFolder = "LOLA", includeSplits = FALSE)
file.copy(from = "LOLA/allEnrichments.tsv", to = "LOLA/LOLA_CpG_Context_Dx_Replication50_males_chrX_DMRs.tsv", overwrite = TRUE)
rm(DMRs, DMRlist, Background, Results)
# Replication Females DMRs ####
setwd("/share/lasallelab/Charles/CM_WGBS_ASD_CordBlood/Bismark_Reports/Replication/")
DMRs <- loadRegions(file = "Dx_Females_100/DMRs_Dx_Replication100_females.csv", chroms = "chrX", sort = TRUE)
DMRlist <- list("AllDMRs" = makeGRange(DMRs = DMRs, direction = "all"),
"HyperDMRs" = makeGRange(DMRs = DMRs, direction = "hyper"),
"HypoDMRs" = makeGRange(DMRs = DMRs, direction = "hypo"))
Background <- loadRegions(file = "Dx_Females_100/bsseq_background_Replication100_females.csv", chroms = "chrX", sort = TRUE) %>%
makeGRange(direction = "all")
Results <- runLOLA(userSets = DMRlist, userUniverse = Background, regionDB = regionDB, cores = 2, redefineUserSets = TRUE)
writeCombinedEnrichment(combinedResults = Results, outFolder = "LOLA", includeSplits = FALSE)
file.copy(from = "LOLA/allEnrichments.tsv", to = "LOLA/LOLA_CpG_Context_Dx_Replication100_females_chrX_DMRs.tsv", overwrite = TRUE)
rm(DMRs, DMRlist, Background, Results)
# Analyze ChrX LOLA Enrichments -----------------------------------------
# Load Data and Combine ####
lola <- list(read.delim("Tables/LOLA_CpG_Context_Dx_Discovery50_males_chrX_DMRs.tsv", sep = "\t", header = TRUE,
stringsAsFactors = FALSE),
read.delim("Tables/LOLA_CpG_Context_Dx_Discovery50_females_chrX_DMRs.tsv", sep = "\t", header = TRUE,
stringsAsFactors = FALSE),
read.delim("Tables/LOLA_CpG_Context_Dx_Replication50_males_chrX_DMRs.tsv", sep = "\t", header = TRUE,
stringsAsFactors = FALSE),
read.delim("Tables/LOLA_CpG_Context_Dx_Replication100_females_chrX_DMRs.tsv", sep = "\t", header = TRUE,
stringsAsFactors = FALSE))
names(lola) <- c("Discovery_Males", "Discovery_Females", "Replication_Males", "Replication_Females")
lola <- lapply(lola, function(x){
x <- subset(x, userSet %in% c("HyperDMRs", "HypoDMRs"))
x$qValue <- p.adjust(10^(-x$pValueLog), method = "fdr")
return(x)
})
lola <- lapply(lola, function(x) x[,colnames(lola[["Discovery_Males"]])])
lola <- rbind(lola[["Discovery_Males"]], lola[["Discovery_Females"]], lola[["Replication_Males"]], lola[["Replication_Females"]])
lola$DMRs <- c(rep(c("Discovery_Males", "Discovery_Females", "Replication_Males", "Replication_Females"), each = 8)) %>%
factor(levels = c("Discovery_Males", "Discovery_Females", "Replication_Males", "Replication_Females"))
table(lola$support < 5) # TRUE 6
lola$pct_DMRs <- lola$support * 100 / (lola$support + lola$c)
lola$pValueLog[is.infinite(lola$pValueLog)] <- NA
lola$pValueLog[is.na(lola$pValueLog)] <- max(lola$pValueLog, na.rm = TRUE)
lola$pValue <- 10^(-lola$pValueLog)
lola$qValueLog <- -log10(lola$qValue)
lola$qValueLog[is.infinite(lola$qValueLog)] <- NA
lola$qValueLog[is.na(lola$qValueLog)] <- max(lola$qValueLog, na.rm = TRUE)
lola <- lola[,c("DMRs", "userSet", "description", "pValue", "qValue", "pValueLog", "qValueLog", "oddsRatio", "support", "pct_DMRs",
"b", "c", "d", "size")]
lola$userSet <- factor(lola$userSet, levels = c("HyperDMRs", "HypoDMRs"))
lola$description <- gsub("CpG ", replacement = "", x = lola$description, fixed = TRUE) %>%
factor(levels = rev(c("Island", "Shore", "Shelf", "Open Sea")))
lola <- lola[order(lola$DMRs, lola$userSet, lola$description),]
lola$Sex <- c(rep(c("Males", "Females"), each = 8), rep(c("Males", "Females"), each = 8)) %>%
factor(levels = c("Males", "Females"))
lola$Regions <- c(rep(rep(c("Discovery Hyper", "Discovery Hypo"), each = 4), 2),
rep(rep(c("Replication Hyper", "Replication Hypo"), each = 4), 2)) %>%
factor(levels = c("Discovery Hyper", "Replication Hyper", "Discovery Hypo", "Replication Hypo"))
lola$Significant <- (lola$qValue < 0.05) %>% factor(levels = c("TRUE", "FALSE"))
table(is.infinite(lola$oddsRatio), lola$description)
# Open Sea Shelf Shore Island
# FALSE 4 8 8 8
# TRUE 4 0 0 0
lola$oddsRatio[is.infinite(lola$oddsRatio)] <- NA
lola$oddsRatio[is.na(lola$oddsRatio)] <- max(lola$oddsRatio[lola$description == "Open Sea"], na.rm = TRUE)
write.csv(lola, "Tables/LOLA CpG Context chrX Enrichment Results.csv", row.names = FALSE)
# Plot Odds Ratio Heatmap ####
gg <- ggplot(data = lola)
gg +
geom_tile(aes(x = Regions, y = description, fill = oddsRatio)) +
geom_text(aes(x = Regions, y = description, alpha = Significant), label = "*", color = "white", size = 14, nudge_y = -0.25) +
facet_grid(cols = vars(Sex)) +
scale_fill_gradientn("Odds Ratio", colors = c("black", "#FF0000"), values = c(0, 1),
na.value = "#FF0000", limits = c(0, max(lola$oddsRatio)), breaks = pretty_breaks(n = 3)) +
scale_alpha_manual(breaks = c("TRUE", "FALSE"), values = c(1, 0), guide = FALSE) +
theme_bw(base_size = 24) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.border = element_rect(color = "black", size = 1.25),
panel.background = element_rect(fill = "black"), axis.ticks.x = element_line(size = 1.25),
axis.ticks.y = element_line(size = 1.25), legend.key = element_blank(), legend.position = c(1.14, 0.83),
legend.background = element_blank(), legend.title = element_text(size = 15), legend.text = element_text(size = 14),
plot.margin = unit(c(0, 6, 0.5, 0.5), "lines"), axis.text.y = element_text(size = 14, color = "black"),
axis.text.x = element_text(size = 14, color = "black", angle = 45, hjust = 1),
axis.title = element_blank(), plot.title = element_text(size = 18, hjust = 0.5, vjust = 0),
strip.background = element_blank(), strip.text = element_text(size = 15), legend.key.size = unit(1, "lines")) +
scale_x_discrete(expand = c(0, 0)) +
scale_y_discrete(expand = c(0, 0))
ggsave("Figures/LOLA CpG Context chrX Enrichment Odds Ratio Heatmap.png", dpi = 600, width = 7, height = 4, units = "in")
rm(gg)
# ChrX CpG Context Distribution -----------------------------------------------
# Load Regions ####
DiscDMRs <- list(Males = loadRegions("DMRs/Discovery/Diagnosis Males 50/DMRs_Dx_Discovery50_males.csv",
chroms = "chrX", sort = TRUE, DMRid = TRUE),
Females = loadRegions("DMRs/Discovery/Diagnosis Females 50/DMRs_Dx_Discovery50_females.csv",
chroms = "chrX", sort = TRUE, DMRid = TRUE))
DiscBackground <- list(Males = loadRegions("DMRs/Discovery/Diagnosis Males 50/bsseq_background_Discovery50_males.csv",
chroms = "chrX", sort = TRUE),
Females = loadRegions("DMRs/Discovery/Diagnosis Females 50/bsseq_background_Discovery50_females.csv",
chroms = "chrX", sort = TRUE))
RepDMRs <- list(Males = loadRegions("DMRs/Replication/Diagnosis Males 50/DMRs_Dx_Replication50_males.csv",
chroms = "chrX", sort = TRUE, DMRid = TRUE),
Females = loadRegions("DMRs/Replication/Diagnosis Females 100/DMRs_Dx_Replication100_females.csv",
chroms = "chrX", sort = TRUE, DMRid = TRUE))
RepBackground <- list(Males = loadRegions("DMRs/Replication/Diagnosis Males 50/bsseq_background_Replication50_males.csv",
chroms = "chrX", sort = TRUE),
Females = loadRegions("DMRs/Replication/Diagnosis Females 100/bsseq_background_Replication100_females.csv",
chroms = "chrX", sort = TRUE))
# Get CpG Context ###
CpGs <- build_annotations(genome = "hg38", annotations = "hg38_cpgs") %>%
GenomeInfoDb::keepStandardChromosomes(pruning.mode = "coarse")
CpGs <- list(Island = CpGs[CpGs$type == "hg38_cpg_islands"], Shore = CpGs[CpGs$type == "hg38_cpg_shores"],
Shelf = CpGs[CpGs$type == "hg38_cpg_shelves"], OpenSea = CpGs[CpGs$type == "hg38_cpg_inter"])
CpGs <- lapply(CpGs, function(x) x[seqnames(x) == "chrX"])
chrX <- sapply(CpGs, function(x) sum(width(x)))
chrX * 100 / sum(chrX)
# Island Shore Shelf OpenSea
# 0.4816162 2.0430916 1.7931661 95.6821260
contextColors <- c("Island" = "forestgreen", "Shore" = "goldenrod2", "Shelf" = "dodgerblue", "Open Sea" = "blue3")
# ChrX DMRs CpG Context Distribution and Plot ####
GR_Regions <- list(Disc_Males_Background = makeGRange(DiscBackground$Males, direction = "all"),
Disc_Males_Hyper = makeGRange(DiscDMRs$Males, direction = "hyper"),
Disc_Males_Hypo = makeGRange(DiscDMRs$Males, direction = "hypo"),
Disc_Females_Background = makeGRange(DiscBackground$Females, direction = "all"),
Disc_Females_Hyper = makeGRange(DiscDMRs$Females, direction = "hyper"),
Disc_Females_Hypo = makeGRange(DiscDMRs$Females, direction = "hypo"),
Rep_Males_Background = makeGRange(RepBackground$Males, direction = "all"),
Rep_Males_Hyper = makeGRange(RepDMRs$Males, direction = "hyper"),
Rep_Males_Hypo = makeGRange(RepDMRs$Males, direction = "hypo"),
Rep_Females_Background = makeGRange(RepBackground$Females, direction = "all"),
Rep_Females_Hyper = makeGRange(RepDMRs$Females, direction = "hyper"),
Rep_Females_Hypo = makeGRange(RepDMRs$Females, direction = "hypo"))
Males_chrX <- chrX
Females_chrX <- chrX
distribution <- sapply(GR_Regions, function(x){
sapply(CpGs, function(y) suppressWarnings(intersect(x, y)) %>% width() %>% sum())}) %>%
cbind(Males_chrX, Females_chrX, .) %>% apply(2, function (x) x * 100 / sum(x)) %>% as.data.frame()
distribution$Context <- c("Island", "Shore", "Shelf", "Open Sea") %>%
factor(levels = rev(c("Island", "Shore", "Shelf", "Open Sea")))
distribution <- melt(distribution, id.vars = "Context")
colnames(distribution) <- c("Context", "Regions", "Proportion")
distribution$Sex <- c(rep(c("Males", "Females"), each = 4), rep(c("Males", "Females"), each = 12),
rep(c("Males", "Females"), each = 12)) %>% factor(levels = c("Males", "Females"))
distribution$Regions <- as.character(distribution$Regions) %>%
str_replace_all(pattern = c("Males_" = "", "Females_" = "", "Disc_" = "Discovery ", "Rep_" = "Replication ")) %>%
factor(levels = c("chrX", "Discovery Background", "Replication Background", "Discovery Hyper", "Replication Hyper",
"Discovery Hypo", "Replication Hypo"))
gg <- ggplot(distribution, aes(x = Regions, y = Proportion, fill = Context, color = Context))
gg +
geom_bar(stat = "identity") +
facet_grid(cols = vars(Sex)) +
theme_bw(base_size = 24) +
theme(panel.grid.major = element_blank(), panel.border = element_rect(color = "black", size = 1.25),
legend.key = element_blank(), panel.grid.minor = element_blank(), legend.position = c(1.15, 0.85),
legend.background = element_blank(), legend.key.size = unit(0.8, "cm"), axis.title.y = element_text(size = 22),
axis.ticks = element_line(size = 1.25, color = "black"), plot.title = element_text(size = 24, vjust = 0),
legend.text = element_text(size = 16, margin = unit(c(0, 0, 0, 0.5), "lines")),
strip.background = element_blank(), legend.direction = "vertical", panel.spacing.y = unit(0, "lines"),
plot.margin = unit(c(0.5, 9, 1, 1), "lines"), axis.title.x = element_blank(),
axis.text.x = element_text(size = 17, color = "black", angle = 45, hjust = 1),
axis.text.y = element_text(size = 17, color = "black"), legend.title = element_blank()) +
ylab("Width in CpG Context (%)") +
scale_fill_manual(values = contextColors) +
scale_color_manual(values = contextColors) +
coord_cartesian(ylim = c(0, 100)) +
scale_y_continuous(expand = c(0.004, 0))
ggsave("Figures/ChrX DMR CpG Context Stacked Barplot.png", dpi = 600, width = 9, height = 7, units = "in")
|
a89d33e4cbcd8ed70e441bbe671db73f3ac0db6a | 7f6ebeae15924e7aa0ce463859c8122e438c62aa | /chap04/APS.R | 1b3a6983102cdf1f04bed4c414ad03189d2e598d | [] | no_license | leetschau/app-of-rlang-in-stats | fd9d67f69e4c9f1373beb66a9dcebc7be3e1e51d | fb3c01873167f87944c7d35cb42cd5ada52dd483 | refs/heads/master | 2021-09-06T06:47:24.250126 | 2018-02-03T11:58:31 | 2018-02-03T11:58:31 | 120,087,020 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 125 | r | APS.R | ## (1)
pbinom(6, size = 30, prob = 0.4)
## (2)
1 - ppois(6, lambda = 2.4)
## (3)
phyper(q=3, m=10, n=52-10, k=34) |
71bdae1f1e9ff69cfef866a9465c35f31e3fd9a1 | eb46831a43d55a348e29989d827401f2eb49e30a | /Functions/mandateData_exposure.R | e8dd46019cc441edccb957d52c6aa12e0288e533 | [] | no_license | bplloyd/R-risk-mgmt | 32a0315bdd95790acb4c98cc87c52993a1cd8f9a | ad0e0f3596e5588fb4c483486e271fb98ad942cc | refs/heads/master | 2020-12-13T09:19:32.536834 | 2016-08-03T22:08:14 | 2016-08-03T22:08:14 | 49,309,816 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 534 | r | mandateData_exposure.R | mandateData_exposure = function(id, deltaAdj = 0)
{
#library(RODBC)
library(data.table)
library(xts)
library(PerformanceAnalytics)
#cn = odbcDriverConnect("driver={SQL Server}; server=HAT-SQL-01; database=Hatteras_Sandbox_Tools; trusted_connection=true")
parID = paste0("@id = ", id)
parDA = paste0("@exposureMode = ", deltaAdj)
res = executeSP(procname = "usp_Exposure_Rolling", paramstring = paste(parID, parDA, sep = ", "))
res =xts(res[,2:ncol(res)], order.by = as.Date.factor(res$DateReported))
return(res)
} |
23b3016ad921b833d6a3d11ce45b84fac661d57a | 8abffdffea71532e93618d622e8efd8bf831332b | /veterans-0-import.R | afb85e6db79c6907b883b2026eabbe863f3ff6bc | [] | no_license | andrie/veterans | 34577d46b5dc014aa39918f538092495bcd464ce | 265ef2abbb144e017e4c45cede75c5fc9df015b9 | refs/heads/master | 2021-01-23T13:48:45.184065 | 2013-10-28T10:35:57 | 2013-10-28T10:35:57 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 128 | r | veterans-0-import.R | datafile <- "data/cup98LRN.txt"
rdsfile <- "data/cup98LRN.rds"
dat <- read.csv(datafile)
dim(dat)
saveRDS(dat, file=rdsfile)
|
4316b00c423b9f5ee051aaa25498bae0fa04ac40 | 2efd6577c2fa013630ba2c319ae6f7a898e5226c | /man/plot.CoreModel.Rd | 7bd350d9491ec2f80289d15b35950c05f7fb93eb | [] | no_license | rmarko/CORElearn | 983ea2d727798cbe6980b78584475190e968c6ab | fc696cb021a6117b0a4cd289e7dd8ce893e04e7b | refs/heads/master | 2022-11-19T03:04:22.661883 | 2022-11-06T11:38:21 | 2022-11-06T11:38:21 | 76,198,397 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,822 | rd | plot.CoreModel.Rd | \name{plot.CoreModel}
\alias{plot.CoreModel}
\title{ Visualization of CoreModel models }
\description{
The method \code{plot} visualizes the models returned by CoreModel()
function or summaries obtained by applying these models to data.
Different plots can be produced depending on the type of the model.
}
\usage{
\method{plot}{CoreModel}(x, trainSet, rfGraphType=c("attrEval", "outliers", "scaling",
"prototypes", "attrEvalCluster"), clustering=NULL, ...)
}
\arguments{
\item{x}{The model structure as returned by \code{\link{CoreModel}}.}
\item{trainSet}{ The data frame containing training data which produced the model \code{x}. }
\item{rfGraphType}{ The type of the graph to produce for random forest models. See details.}
\item{clustering}{The clustering of the training instances used in some model types. See details.}
\item{\dots }{ Other options controlling graphical output passed to additional graphical functions.}
}
\details{
The output of function \code{\link{CoreModel}} is visualized. Depending on the model type, different visualizations
are produced. Currently, classification tree, regression tree, and random forests are supported
(models "tree", "regTree", "rf", and "rfNear").
For classification and regression trees (models "tree" and "regTree") the visualization produces a graph
representing structure
of classification and regression tree, respectively. This process exploits graphical capabilities of
\code{\link{rpart.plot}} package. Internal structures of
\code{CoreModel} are converted to \code{\link{rpart.object}} and then visualized by calling
\code{\link{rpart.plot}} using default parameters. Any additional parameters are passed on to this function. For further
control use the \code{\link{getRpartModel}} function and call the function \code{\link{rpart.plot}}
or \code{\link{plot.rpart}} with different parameters.
Note that \code{rpart.plot} can only display a single value in a leaf, which is not appropriate for model trees using e.g.,
linear regression in the leaves. For these cases function \code{\link{display}} is a better alternative.
For random forest models (models "rf" and "rfNear") different types of visualizations can be produced depending on the
\code{graphType} parameter:
\itemize{
\item \code{"attrEval"} the attributes are evaluated with random forest model and the importance scores are then
visualized. For details see \code{\link{rfAttrEval}}.
\item \code{"attrEvalClustering"} similarly to the \code{"attrEval"} the attributes are evaluated with random forest
model and the importance scores are then visualized, but the importance scores are generated
for each cluster separately. The parameter \code{clustering} provides clustering information on
the \code{trainSet}. If \code{clustering} parameter is set to NULL, the class values are used as
clustering information and visualization of attribute importance for each class separately is
generated.
For details see \code{\link{rfAttrEvalClustering}}.
\item \code{"outliers"} the random forest proximity measure of training instances in \code{trainSet}
is visualized and outliers for each class separately can be detected.
For details see \code{\link{rfProximity}} and \code{\link{rfOutliers}}.
\item \code{"prototypes"} typical instances are found based on predicted class probabilities
and their values are visualized (see \code{\link{classPrototypes}}).
\item \code{"scaling"} returns a scaling plot of training instances in a two dimensional space using
random forest based proximity as the distance (see \code{\link{rfProximity}}
and a scaling function \code{\link{cmdscale}}).
}
}
\value{
The method returns no value.
}
\examples{
# decision tree
dataset <- iris
md <- CoreModel(Species ~ ., dataset, model="tree")
plot(md, dataset) # additional parameters are passed directly to rpart.plot
# Additional visualizations can be obtained by explicit conversion to rpart.object
#rpm <- getRpartModel(md,dataset)
# and than setting graphical parameters in plot.rpart and text.rpart
#require(rpart)
# E.g., set angle to tan(0.5)=45 (degrees) and length of branches at least 5,
# try to make a dendrogram more compact
#plot(rpm, branch=0.5, minbranch=5, compress=TRUE)
#(pretty=0) full names of attributes, numbers to 3 decimals,
#text(rpm, pretty=0, digits=3)
destroyModels(md) # clean up
# regression tree
dataset <- CO2
mdr <- CoreModel(uptake ~ ., dataset, model="regTree")
plot(mdr, dataset)
destroyModels(mdr) # clean up
#random forests
dataset <- iris
mdRF <- CoreModel(Species ~ ., dataset, model="rf", rfNoTrees=30, maxThreads=1)
plot(mdRF, dataset, rfGraphType="attrEval")
plot(mdRF, dataset, rfGraphType="outliers")
plot(mdRF, dataset, rfGraphType="scaling")
plot(mdRF, dataset, rfGraphType="prototypes")
plot(mdRF, dataset, rfGraphType="attrEvalCluster", clustering=NULL)
destroyModels(mdRF) # clean up
}
\author{ John Adeyanju Alao (initial implementation) and Marko Robnik-Sikonja (integration, improvements)}
\seealso{
\code{\link{CoreModel}},
\code{\link{rfProximity}},
\code{\link{pam}},
\code{\link{rfClustering}},
\code{\link{rfAttrEvalClustering}},
\code{\link{rfOutliers}},
\code{\link{classPrototypes}},
\code{\link{cmdscale}}
}
\references{
Leo Breiman: Random Forests. \emph{Machine Learning Journal}, 45:5-32, 2001
}
\keyword{cluster}
\keyword{robust}
\keyword{tree}
|
4ff091cd81413c5b81bde3d8fcc7414207ddd9ef | 35de14603463a45028bd2aca76fa336c41186577 | /man/NOTT_2019.bigwig_metadata.Rd | 0fbade29c1b168f2b0980476cda47c471fa62e06 | [
"MIT"
] | permissive | UKDRI/echolocatoR | e3cf1d65cc7113d02b2403960d6793b9249892de | 0ccf40d2f126f755074e731f82386e4e01d6f6bb | refs/heads/master | 2023-07-14T21:55:27.825635 | 2021-08-28T17:02:33 | 2021-08-28T17:02:33 | 416,442,683 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,492 | rd | NOTT_2019.bigwig_metadata.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{NOTT_2019.bigwig_metadata}
\alias{NOTT_2019.bigwig_metadata}
\title{Metadata and links to data}
\format{
An object of class \code{data.table} (inherits from \code{data.frame}) with 18 rows and 14 columns.
}
\source{
\url{https://science.sciencemag.org/content/366/6469/1134}
}
\usage{
NOTT_2019.bigwig_metadata
}
\description{
Metadata for cell type-specific epigenomic bigWig files hosted on UCSC Genome Browser.
bigWig files contain the genomic ranges from each epigenomic assay,
as well as a Score column which describes the peaks of the aggregate reads.
}
\examples{
\dontrun{
NOTT_2019.bigwig_metadata <- data.table::data.table(readxl::read_excel("~/Desktop/Fine_Mapping/echolocatoR/annotations/Nott_2019/Nott_2019.snEpigenomics.xlsx"))
usethis::use_data(NOTT_2019.bigwig_metadata, overwrite = T)
}
}
\seealso{
Other NOTT_2019:
\code{\link{NOTT_2019.epigenomic_histograms}()},
\code{\link{NOTT_2019.get_epigenomic_peaks}()},
\code{\link{NOTT_2019.get_interactions}()},
\code{\link{NOTT_2019.get_interactome}()},
\code{\link{NOTT_2019.get_promoter_celltypes}()},
\code{\link{NOTT_2019.get_promoter_interactome_data}()},
\code{\link{NOTT_2019.get_regulatory_regions}()},
\code{\link{NOTT_2019.interactome}},
\code{\link{NOTT_2019.plac_seq_plot}()},
\code{\link{NOTT_2019.superenhancer_interactome}},
\code{\link{NOTT_2019.superenhancers}()}
}
\concept{NOTT_2019}
\keyword{datasets}
|
c59e23644f14652b41d861785665c770a7e157b4 | 5d24927b8ea91179106f2f4055334c7ebdeb6ecb | /server.R | c18fac0ab804dacf319c8f29a64288d709de33c2 | [] | no_license | victorabelmurcia/UDE-APP | 4b6bc2176d237d3852673cd6cf67f4296ba02e48 | 30bb0f21f709ccc9a5bc2899e1742b1571b5fdf4 | refs/heads/master | 2021-01-14T09:36:41.144350 | 2016-01-08T16:59:42 | 2016-01-08T16:59:42 | 49,276,521 | 0 | 0 | null | 2016-01-08T14:28:34 | 2016-01-08T14:28:34 | null | UTF-8 | R | false | false | 530 | r | server.R | # server.R --- defines the logic of the application
# Logic
shinyServer(function(input, output) {
# Load user data
datasetInput <- reactive({
inputData <- input$data
if (is.null(inputData)) return(NULL)
data <- read.csv(inputData$datapath, header=input$header, sep=input$sep, quote=input$quote)
return(data)
})
# TEST
output$test <- renderPrint({
data <- datasetInput()
head(data)
})
}
) |
af3d0ec658631035efa15bde20c0202b05542277 | 994861903a0f4ccd5e180ba351cd803a66274313 | /shinyapps/app1/server.R | d33dfc92f9f45e0f9ed5a7e300b46fc79a5c7e98 | [] | no_license | arminakvn/philips-data-shiny | b40f4c9c94fe619f6cf0653205fe4628158b981c | d59e9e467c42edeacdaf68e07fc78a748bd913ac | refs/heads/master | 2020-02-26T17:08:00.437388 | 2016-10-23T19:34:46 | 2016-10-23T19:34:46 | 71,647,372 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,445 | r | server.R | library(shiny)
# install.packages("ggthemes")
# install.packages("lubridate")
# install.packages("gstat")
# install.packages("sp")
# install.packages("maptools")
# install.packages("ggmap")
library(ggthemes)
library(lubridate)
library(gstat)
library(sp)
library(maptools)
library(ggmap)
# =========== init / plot ============
theme_set(theme_solarized(light=F)+ theme(panel.background=element_rect(size=0), strip.background=element_rect(fill="transparent", size = 0), strip.text=element_text(colour="#586e75")))
scale_colour_discrete <- function(...) scale_color_solarized()
scale_fill_discrete <- function(...) scale_color_solarized()
start = ymd_hms("2016-08-24 0:00:00", tz = "America/Los_Angeles")
end = ymd_hms("2016-08-24 23:59:00", tz = "America/Los_Angeles")
setint <- function(sub, start, end) {sub <- sub[sub$time >=start & sub$time <=end,]; return(sub)}
prep <- function(sub,start,end) {
sub <- setint(sub,start,end)
sub$lat <- cut(sub$Latitude, lats)
sub$lng <- cut(sub$Longitude, lons)
return(sub)
}
lons <- c(-118.289444,
-118.2891023,
-118.2885641,
-118.2881616,
-118.2876,
-118.2871086,
-118.2869588,
-118.2864767)
lats <- c(34.0927221,
34.09242256,
34.09192646,
34.09141631,
34.09103253,
34.090733,
34.09046154,
34.09006372,
34.08961442,
34.08903407,
34.08871114,
34.09288122)
ro <- c(
'(34.0907,34.091]' = 'Santa Monica',
'(34.091,34.0914]' = '',
'(34.0887,34.089]' = 'Lockwood',
'(34.0896,34.0901]' = 'Willow Brooks',
'(34.0919,34.0924]' = '',
'(34.0924,34.0927]' = '',
'(34.0914,34.0919]' = '',
'(34.0927,34.0929]' = '',
'(34.0905,34.0907]' = '',
'(34.0901,34.0905]' = '')
co <- c(
'(-118.2882,-118.2876]' = 'N Westmoreland',
'(-118.2871,-118.287]' = '',
'(-118.2894,-118.2891]' = 'N Madison',
'(-118.2876,-118.2871]' = '',
'(-118.287,-118.2865]' = 'N Virgil',
'(-118.2891,-118.2886]' = '',
'(-118.2886,-118.2882]' = '')
# /home/philips-data-shiny/shinyapps/app1/
load("/home/philips-data-shiny/shinyapps/app1/dat0825.rdata")
dat <- dat[!(substr(dat$DeviceId, 1, 3) %in% c("+n+","79c","fWP", "Uoo", "bNZ", "guS", "Kac", "Ixi", "J7R", "UL4", "VZf", "GKU")),]
start = ymd_hms("2016-08-24 6:00:00", tz = "America/Los_Angeles")
end = ymd_hms("2016-08-24 7:00:00", tz = "America/Los_Angeles")
sub <- prep(dat[dat$component %in% c("Lmindba", "Leqdba", "Lmaxdba"),], start, end); scl <- scale_color_manual(values=c("#bd0026", "#ffffb2", "#fd8d3c"))
# Define server logic required to draw a histogram
function(input, output) {
# Expression that generates a histogram. The expression is
# wrapped in a call to renderPlot to indicate that:
#
# 1) It is "reactive" and therefore should be automatically
# re-executed when inputs change
# 2) Its output type is a plot
#dat <- dat[substr(dat$DeviceId, 1, 3)=="r8+" & dat$db>0,]
sliderValues <- reactive({
data.frame(
Name = c("Time Range"),
Value = as.character(c(input$slider_datetime)),
stringsAsFactors=FALSE
)
})
selectValues <- reactive({
if(input$types == "Lmindba...") {
data.frame(
Name = c("Type Value"),
Value = c("Lmindba", "Leqdba", "Lmaxdba")
# stringsAsFactors=FALSE
)
} else {
data.frame(
Name = c("Type Value"),
Value = c("Base", "Voice", "High"),
stringsAsFactors=FALSE
)
}
# print(input$types)
})
output$distPlot <- renderPlot({
#mainPanel(
print(sliderValues()[1,"Value"])
print(selectValues()[,"Value"])
# # with(sub, reorder(sub$DeviceId,sub$time))
sub <- prep(dat[dat$component %in% selectValues()[,"Value"],],ymd_hms(sliderValues()[1, "Value"],tz = "America/Los_Angeles"),ymd_hms(sliderValues()[2, "Value"],tz = "America/Los_Angeles")); scl <- scale_color_manual(values=c("#bd0026", "#ffffb2", "#fd8d3c"))
q <- ggplot(data= sub, aes(x=time,y=db, color=component)) + theme(axis.text.x = element_text(angle = 45, hjust = 1))+ facet_grid(lat~lng, as.table = F, labeller=labeller(lat=ro,lng=co)) + scl + geom_point(size=0.5,alpha=0.3)
#hist(sub$Latitude)
print(q)
# )
},height=700)
} |
4f2a8c41a3ec7d73bf14c46ea72262b4bb98a226 | 6874d2514172b9e809dccf1e4879e0edfaabb050 | /R/check_sen2r_deps.R | 1d9acf40cc477edf230c4b455d537598960f7393 | [] | no_license | cran/sen2r | 27e59e874a36d30b02f319b1e77fcd66f54d3f2e | 3720d77a025fc9f8d9e04825910e830f35ffa61b | refs/heads/master | 2023-06-29T11:44:40.672296 | 2023-06-16T06:10:02 | 2023-06-16T06:10:02 | 216,648,730 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 45,483 | r | check_sen2r_deps.R | #' @title Check package dependencies
#' @description The function allows to graphically check that all the
#' optional runtime dependencies are installed.
#' @return NULL (the function is called for its side effects)
#' @details This package needs some external dependencies in order to run
#' specific actions:
#' - Sen2Cor for atmospheric correction;
#' - GDAL for cloud mask smoothing and buffering;
#' - aria2 to download SAFE images with an alternative downloader.
#'
#' This function opens a GUI which allows to check that these dependencies
#' are installed. This check is highly suggested before using the library for
#' the fist time, in order to avoid errors.
#' @author Luigi Ranghetti, phD (2019)
#' @references L. Ranghetti, M. Boschetti, F. Nutini, L. Busetto (2020).
#' "sen2r": An R toolbox for automatically downloading and preprocessing
#' Sentinel-2 satellite data. _Computers & Geosciences_, 139, 104473.
#' \doi{10.1016/j.cageo.2020.104473}, URL: \url{https://sen2r.ranghetti.info/}.
#' @note License: GPL 3.0
#' @importFrom utils capture.output
#' @importFrom httr RETRY write_disk progress
#' @importFrom jsonlite fromJSON toJSON
#' @export
#' @examples
#' if (interactive()) {
#' check_sen2r_deps()
#' }
check_sen2r_deps <- function() {
jscode <- "shinyjs.closeWindow = function() { window.close(); }"
# Check shiny* / leaflet* suggested dependencies to be installed
check_gui_deps()
# Define internal functions as aliases of shiny* - leaflet* ones,
# so to avoid using "shiny::" every time
a <- shiny::a
actionButton <- shiny::actionButton
addResourcePath <- shiny::addResourcePath
br <- shiny::br
code <- shiny::code
column <- shiny::column
conditionalPanel <- shiny::conditionalPanel
disable <- shinyjs::disable
disabled <- shinyjs::disabled
div <- shiny::div
em <- shiny::em
enable <- shinyjs::enable
extendShinyjs <- shinyjs::extendShinyjs
fluidPage <- shiny::fluidPage
fluidRow <- shiny::fluidRow
getVolumes <- shinyFiles::getVolumes
h3 <- shiny::h3
helpText <- shiny::helpText
hide <- shinyjs::hide
hr <- shiny::hr
HTML <- shiny::HTML
html <- shinyjs::html
htmlOutput <- shiny::htmlOutput
icon <- shiny::icon
modalButton <- shiny::modalButton
modalDialog <- shiny::modalDialog
observe <- shiny::observe
observeEvent <- shiny::observeEvent
outputOptions <- shiny::outputOptions
p <- shiny::p
parseDirPath <- shinyFiles::parseDirPath
parseFilePaths <- shinyFiles::parseFilePaths
radioButtons <- shiny::radioButtons
reactive <- shiny::reactive
reactiveFileReader <- shiny::reactiveFileReader
reactivePoll <- shiny::reactivePoll
reactiveValues <- shiny::reactiveValues
removeModal <- shiny::removeModal
renderText <- shiny::renderText
renderUI <- shiny::renderUI
runApp <- shiny::runApp
shinyApp <- shiny::shinyApp
shinyDirButton <- shinyFiles::shinyDirButton
shinyDirChoose <- shinyFiles::shinyDirChoose
shinyFileChoose <- shinyFiles::shinyFileChoose
shinyFilesButton <- shinyFiles::shinyFilesButton
showModal <- shiny::showModal
span <- shiny::span
stopApp <- shiny::stopApp
strong <- shiny::strong
textInput <- shiny::textInput
textOutput <- shiny::textOutput
uiOutput <- shiny::uiOutput
updateTextInput <- shiny::updateTextInput
useShinyjs <- shinyjs::useShinyjs
verbatimTextOutput <- shiny::verbatimTextOutput
# get server volumes
volumes <- c(
"Home" = normalize_path("~"),
"sen2r" = system.file(package = "sen2r"),
getVolumes()()
)
settings.ui <- fluidPage(
# header
shinyjs::useShinyjs(),
extendShinyjs(text = jscode, functions = c("closeWindow")),
fluidRow(column(
title="Dependencies",
width=12,
h3("Google Cloud SDK"),
helpText(em(
"Google Cloud SDK is used to search and download Sentinel-2 SAFE",
"archives from Google Cloud.",
"It must be installed and configured externally following the",
a("official instructions.",
href='https://cloud.google.com/sdk/docs/install',
target='_blank'),
"Done that, use this utility to associate it to sen2r."
)),
span(style="display:inline-block;vertical-align:center;padding-top:5px;",
actionButton("where_check_gcloud", "Check Google Cloud SDK", width=200),
"\u2000"),
span(style="display:inline-block;vertical-align:center;",
htmlOutput("check_gcloud_icon")),
h3("Sen2Cor"),
helpText(em(
"Sen2Cor is used to perform atmospheric correction of Sentinel-2",
"Level-1C products: it is required by the package,",
"unless you choose not to correct products locally",
"(using only Level-1C \u2013 TOA products",
"or downloading directly Level-2A products)."
)),
span(style="display:inline-block;vertical-align:center;padding-top:5px;",
actionButton("check_sen2cor", "Check Sen2Cor", width=200),
"\u2000"),
span(style="display:inline-block;vertical-align:center;",
htmlOutput("check_sen2cor_icon")),
h3("GDAL"),
helpText(em(
"An external GDAL runtime environment is", strong("no more needed"),
"to run sen2r",
"(it is only required in order to smooth / buffer a cloud mask,",
"and", strong("optionally"), "used to compute spectral indices,",
"RGB images and thumbnails).",
if (Sys.info()["sysname"] == "Windows") {span(
"On Windows",
strong("it is strictly required to install GDAL using OSGeo4W"),
"in order to avoid errors.",
"To satisfy this requirement, click on \"Check GDAL\" and,",
"whenever the search of a valid installation will finish, download",
"the OSGeo4W installer and install it in the default directory",
"(or, in any case, maintain the directory name \"OSGeo4W64\")."
)}
)),
helpText(em(
span(style = "color:red;font-weight:bold;", "Note:"),
"Configuring a runtime GDAL environment could fail for different reasons",
"(e.g. problems related to GDAL environment like missing libraries,",
"or custom environment variables which could conflict).",
"sen2r maintainers are not responsible in case of GDAL-related issues."
)),
span(style="display:inline-block;vertical-align:center;padding-top:5px;",
actionButton("where_check_gdal", "Check GDAL", width=200),
"\u2000"),
span(style="display:inline-block;vertical-align:center;",
htmlOutput("check_gdal_icon")),
h3("aria2"),
helpText(em(
"aria2 is an alternative unrequired", strong("(optional)"),
"downloader which can be used to download SAFE archives.",
"Since the number of concurrent downloads from ESA SciHub is limited to 2,",
"the use of aria2 is generally not faster than the default downloader;",
"for this reason, its use is recommended only in case of problems with",
"the default downloader."
)),
span(style="display:inline-block;vertical-align:center;padding-top:5px;",
actionButton("check_aria2", "Check aria2", width=200),
"\u2000"),
span(style="display:inline-block;vertical-align:center;",
htmlOutput("check_aria2_icon")),
hr(style="margin-top: 0.75em; margin-bottom: 0.75em;"),
uiOutput("footer_buttons")
)) # end of fluidRow Dependencies
) # end of fluidPage
settings.server <- function(input, output, session) {
# link to www directory and objects
addResourcePath("www", system.file("www", package="sen2r"))
rv <- reactiveValues()
# output OS name (to be used in conditionalPanel)
output$os_name <- renderText(Sys.info()["sysname"])
outputOptions(output, "os_name", suspendWhenHidden = FALSE)
# list of dependencies
dependencies <- c(
"gdalbuildvrt", "gdal_translate", "gdalwarp", "gdal_calc", "gdaldem", "gdalinfo", "ogrinfo",
"python", "sen2cor"
)
# load binpaths
binpaths <- reactivePoll(1000, session, function() {}, load_binpaths)
##-- Perform checks of dependencies --##
#-- Check Google Cloud SDK --#
# build the icon of the check
observe({
rv$check_gcloud_isvalid <- if (!is.null(binpaths()$gsutil)) {
file.exists(binpaths()$gsutil)
} else {FALSE}
})
output$check_gcloud_isvalid <- renderText(rv$check_gcloud_isvalid)
output$check_gcloud_icon <- renderUI({
if (is.na(rv$check_gcloud_isvalid)) {
""
} else if (rv$check_gcloud_isvalid) {
span(style="color:darkgreen;", "\u2714")
} else {
span(style="color:red;", "") #"\u2718")
}
})
outputOptions(output, "check_gcloud_isvalid", suspendWhenHidden = FALSE)
# build the modal dialog
check_gcloud_modal <- reactive({
modalDialog(
title = "Google Cloud SDK check",
size = "s",
uiOutput("check_gcloud_message"),
verbatimTextOutput("check_gcloud_outmessages"),
easyClose = FALSE,
footer = NULL
)
})
# use a reactive output for install GDAL message
# (this because otherwise it does not react to check_gcloud_valid chages)
output$check_gcloud_message <- renderUI({
if (is.na(rv$check_gcloud_isvalid)) {
div(
align="center",
p(style="text-align:center;font-size:500%;color:darkgrey;",
icon("cog", class = "fa-spin"))
)
} else if (!rv$check_gcloud_isvalid) {
div(
p(style="text-align:center;font-size:500%;color:red;",
icon("times-circle")),
p("Google Cloud SDK needs to be installed or searched in a",
"different directory. To install it, follow the",
a("official instructions.",
href='https://cloud.google.com/sdk/docs/install',
target='_blank')),
hr(style="margin-top: 0.75em; margin-bottom: 0.75em;"),
div(style="text-align:right;",
modalButton("\u2000Close", icon = icon("check")))
)
} else if (rv$check_gcloud_isvalid) {
div(
p(style="text-align:center;font-size:500%;color:darkgreen;",
icon("check-circle")),
p("Google Cloud SDK is correctly installed and configured."),
hr(style="margin-top: 0.75em; margin-bottom: 0.75em;"),
div(style="text-align:right;",
modalButton("\u2000Close", icon = icon("check")))
)
}
})
# ask where searching gcloud before checking
observeEvent(input$where_check_gcloud, {
showModal(modalDialog(
title = "Check Google Cloud SDK",
size = "s",
radioButtons(
"path_gsutil_isauto", NULL,
choices = c("Search Google Cloud SDK in a default path" = TRUE,
"Specify where Google Cloud SDK should be searched" = FALSE),
selected = TRUE, width = "100%"
),
conditionalPanel(
condition = "input.path_gsutil_isauto == 'FALSE'",
div(
p("Specify the path:"),
div(div(style="display:inline-block;vertical-align:top;width:50pt;",
shinyDirButton(
"path_gdalman_sel", "Select",
"Specify directory in which Google Cloud SDK should be searched"
)),
div(style="display:inline-block;vertical-align:top;width:180px;",
textInput("path_gsutilman_textin", NULL, ""))),
div(style="height:20px;vertical-aling:top;",
htmlOutput("path_gsutilman_errormess"))
)
),
easyClose = FALSE,
footer = div(
disabled(actionButton("check_gcloud_button", strong("\u2000Check"), icon=icon("check"))),
modalButton("\u2000Cancel", icon = icon("ban"))
)
))
})
# check the gdal path
observeEvent(c(input$path_gsutilman_textin, input$path_gsutil_isauto), {
path_gsutilman_errormess <- path_check(
input$path_gsutilman_textin,
mustbe_empty = FALSE,
mustbe_writable = FALSE
)
output$path_gsutilman_errormess <- path_gsutilman_errormess
if (any(
input$path_gsutil_isauto == TRUE,
TRUE %in% attr(path_gsutilman_errormess, "isvalid")
)) {
enable("check_gcloud_button")
} else {
disable("check_gcloud_button")
}
})
shinyDirChoose(input, "path_gdalman_sel", roots = volumes)
observeEvent(input$path_gdalman_sel, {
path_gdalman_string <- parseDirPath(volumes, input$path_gdalman_sel)
updateTextInput(session, "path_gsutilman_textin", value = path_gdalman_string)
})
# do the check when button is pressed
observeEvent(input$check_gcloud_button, {
# reset check value
rv$check_gcloud_isvalid <- NA # FIXME not working
# open modaldialog
showModal(check_gcloud_modal())
shinyjs::html(
"check_gcloud_message",
as.character(div(
align="center",
p(style="text-align:center;font-size:500%;color:darkgrey;",
icon("spinner", class = "fa-pulse"))
))
)
# do the check
withCallingHandlers({
shinyjs::html("check_gcloud_outmessages", "")
rv$check_gcloud_isvalid <- if (input$path_gsutilman_textin == "") {
check_gcloud(abort = FALSE, force = TRUE)
} else {
check_gcloud(
gsutil_dir = input$path_gsutilman_textin,
abort = FALSE, force = TRUE
)
}
},
message = function(m) {
shinyjs::html(id = "check_gcloud_outmessages", html = m$message, add = TRUE)
})
shinyjs::hide("check_gcloud_outmessages")
})
#-- Check GDAL --#
# build the icon of the check
observe({
rv$check_gdal_isvalid <- if (!is.null(binpaths()$gdalinfo)) {
file.exists(binpaths()$gdalinfo)
} else {FALSE}
})
output$check_gdal_isvalid <- renderText(rv$check_gdal_isvalid)
output$check_gdal_icon <- renderUI({
if (is.na(rv$check_gdal_isvalid)) {
""
} else if (rv$check_gdal_isvalid) {
span(style="color:darkgreen;", "\u2714")
} else {
span(style="color:red;", "") #"\u2718")
}
})
outputOptions(output, "check_gdal_isvalid", suspendWhenHidden = FALSE)
# build the modal dialog
check_gdal_modal <- reactive({
modalDialog(
title = "GDAL check",
size = "s",
uiOutput("check_gdal_message"),
verbatimTextOutput("check_gdal_outmessages"),
easyClose = FALSE,
footer = NULL
)
})
# use a reactive output for install GDAL message
# (this because otherwise it does not react to check_gdal_valid chages)
output$check_gdal_message <- renderUI({
if (is.na(rv$check_gdal_isvalid)) {
div(
align="center",
p(style="text-align:center;font-size:500%;color:darkgrey;",
icon("cog", class = "fa-spin"))
)
} else if (!rv$check_gdal_isvalid) {
if (Sys.info()["sysname"] == "Windows") {
div(
p(style="text-align:center;font-size:500%;color:red;",
icon("times-circle")),
p(HTML(
"GDAL needs to be installed or searched in a different directory.",
"To install it:<ol>",
"<li>download the OSGeo4W installer using the button below;</li>",
"<li>when the file will be automatically opened,",
"give the administrator rules when required;</li>",
"<li>choose the \"Advanced install\";</li>",
"<li>continue clicking \"Next\";</li>",
"<li>when the window for choosing the packages to install will appear,",
"check the package \"gdal-python\" and install it.</li></ol>"
)),
hr(style="margin-top: 0.75em; margin-bottom: 0.75em;"),
div(style="text-align:right;",
actionButton("install_gdal_button", strong("\u2000Download"), icon=icon("download")),
modalButton("\u2000Cancel", icon = icon("ban")))
)
} else {
div(
p(style="text-align:center;font-size:500%;color:red;",
icon("times-circle")),
p("GDAL needs to be installed or searched in a different directory.",
"To install it, install the package \"python-gdal\",",
"then repeat this check."),
hr(style="margin-top: 0.75em; margin-bottom: 0.75em;"),
div(style="text-align:right;",
modalButton("\u2000Close", icon = icon("check")))
)
}
} else if (rv$check_gdal_isvalid) {
div(
p(style="text-align:center;font-size:500%;color:darkgreen;",
icon("check-circle")),
p("GDAL is correctly installed."),
hr(style="margin-top: 0.75em; margin-bottom: 0.75em;"),
div(style="text-align:right;",
modalButton("\u2000Close", icon = icon("check")))
)
}
})
# build the modal dialog
install_gdal_modal <- reactive({
modalDialog(
title = "Install GDAL",
size = "s",
uiOutput("install_gdal_message"),
easyClose = FALSE,
footer = NULL
)
})
# ask where searching GDAL before checking
observeEvent(input$where_check_gdal, {
showModal(modalDialog(
title = "Check GDAL",
size = "s",
radioButtons(
"path_gdal_isauto", NULL,
choices = c("Search GDAL in a default path" = TRUE,
"Specify where GDAL should be searched" = FALSE),
selected = TRUE, width = "100%"
),
conditionalPanel(
condition = "input.path_gdal_isauto == 'FALSE'",
div(
p("Specify the path:"),
div(div(style="display:inline-block;vertical-align:top;width:50pt;",
shinyDirButton("path_gdalman_sel", "Select", "Specify directory in which GDAL should be searched")),
div(style="display:inline-block;vertical-align:top;width:180px;",
textInput("path_gdalman_textin", NULL, ""))),
div(style="height:20px;vertical-aling:top;",
htmlOutput("path_gdalman_errormess"))
)
),
easyClose = FALSE,
footer = div(
disabled(actionButton("check_gdal_button", strong("\u2000Check"), icon=icon("check"))),
modalButton("\u2000Cancel", icon = icon("ban"))
)
))
})
# check the gdal path
# observeEvent(input$path_gdalman_textin, {
# path_gdalman_errormess <- path_check(
# input$path_gdalman_textin,
# mustbe_empty = FALSE,
# mustbe_writable = FALSE
# )
# output$path_gdalman_errormess <- path_gdalman_errormess
# })
observeEvent(c(input$path_gdalman_textin, input$path_gdal_isauto), {
path_gdalman_errormess <- path_check(
input$path_gdalman_textin,
mustbe_empty = FALSE,
mustbe_writable = FALSE
)
output$path_gdalman_errormess <- path_gdalman_errormess
if (any(
input$path_gdal_isauto == TRUE,
TRUE %in% attr(path_gdalman_errormess, "isvalid")
)) {
enable("check_gdal_button")
} else {
disable("check_gdal_button")
}
})
shinyDirChoose(input, "path_gdalman_sel", roots = volumes)
observeEvent(input$path_gdalman_sel, {
path_gdalman_string <- parseDirPath(volumes, input$path_gdalman_sel)
updateTextInput(session, "path_gdalman_textin", value = path_gdalman_string)
})
# do the check when button is pressed
observeEvent(input$check_gdal_button, {
# reset check value
rv$check_gdal_isvalid <- NA # FIXME not working
# open modaldialog
showModal(check_gdal_modal())
shinyjs::html(
"check_gdal_message",
as.character(div(
align="center",
p(style="text-align:center;font-size:500%;color:darkgrey;",
icon("spinner", class = "fa-pulse"))
))
)
# do the check
withCallingHandlers({
shinyjs::html("check_gdal_outmessages", "")
rv$check_gdal_isvalid <- check_gdal(
gdal_path = if (input$path_gdalman_textin == "") {
NULL
} else {
input$path_gdalman_textin
},
abort = FALSE, force = TRUE
)
},
message = function(m) {
shinyjs::html(id = "check_gdal_outmessages", html = m$message, add = TRUE)
})
shinyjs::hide("check_gdal_outmessages")
})
# install osgeo
observeEvent(input$install_gdal_button, {
showModal(install_gdal_modal())
# create the text to show in the modaldialog
shinyjs::html(
"install_gdal_message",
as.character(div(
br(),
p(style="color:darkgrey;text-align:center;font-size:500%;","\u23F3"),
p("Wait while the OSGeo4W installer is being downloaded...")
)),
add=FALSE
)
# Download osgeo4w
osgeo4w_url <- paste0(
"http://download.osgeo.org/osgeo4w/osgeo4w-setup-x86",
if (Sys.info()["machine"]=="x86-64") {"_64"},".exe"
)
osgeo4w_path <- tempfile(pattern="dir",fileext = ".exe")
out_bar <- if (all(inherits(stdout(), "terminal"), interactive())) {
NULL
} else {
file(out_bar_path <- tempfile(), open = "a")
}
RETRY(
verb = "GET",
url = osgeo4w_url,
times = 5, pause_cap = 8,
progress(con = if (length(out_bar) > 0) {out_bar} else {stdout()}),
write_disk(osgeo4w_path, overwrite = TRUE)
)
if (length(out_bar) > 0) {
close(out_bar)
invisible(file.remove(out_bar_path))
}
if (file.exists(osgeo4w_path)) {
shinyjs::html(
"install_gdal_message",
as.character(div(
p("OSGeo4W was correctly downloaded."),
p("The installer window was opened;",
"please install the package \"gdal-python\" following the instructions.")
)),
add = TRUE
)
shell(osgeo4w_path)
shinyjs::html(
"install_gdal_message",
as.character(div(
p("The installation was terminated;",
"please close the interface,",strong("restart R"),
"and repeat the check to be sure all was correctly installed."),
hr(style="margin-top: 0.75em; margin-bottom: 0.75em;"),
div(style="text-align:right;",
modalButton("\u2000Close", icon = icon("check")))
)),
add = TRUE
)
} else {
shinyjs::html(
"install_gdal_message",
as.character(div(
p("Something went wrong during the download; please retry."),
hr(style="margin-top: 0.75em; margin-bottom: 0.75em;"),
div(style="text-align:right;",
modalButton("\u2000Close", icon = icon("check")))
)),
add = TRUE
)
}
})
#-- Check aria2 --#
# build the icon of the check
observe({
input$check_aria2
rv$check_aria2_isvalid <- if (!is.null(binpaths()$aria2c)) {
file.exists(binpaths()$aria2c)
} else {FALSE}
})
output$check_aria2_isvalid <- renderText(rv$check_aria2_isvalid)
output$check_aria2_icon <- renderUI({
if (is.na(rv$check_aria2_isvalid)) {
""
} else if (rv$check_aria2_isvalid) {
span(style="color:darkgreen;", "\u2714")
} else {
span(style="color:red;", "") #"\u2718")
}
})
outputOptions(output, "check_aria2_isvalid", suspendWhenHidden = FALSE)
output$check_aria2_message <- renderUI({
if (is.na(rv$check_aria2_isvalid)) {
""
} else if (!rv$check_aria2_isvalid) {
div(
align = if (Sys.info()["sysname"] == "Windows") {"left"} else {"center"},
p(style="color:red;text-align:center;font-size:500%;",
icon("times-circle")),
if (Sys.info()["sysname"] == "Windows") {
div(
p("aria2 needs to be linked to sen2r, or downloaded if missing."),
radioButtons(
"aria2_link_or_install", NULL,
c("Download a new aria2 binary" = "install",
"Set the path of an existing binary" = "link"),
selected = "install"
),
conditionalPanel(
condition = "input.aria2_link_or_install == 'install'",
div(
p("Please provide the path of a directory ",
"in which installing it:"),
div(div(style="display:inline-block;vertical-align:top;width:50pt;",
shinyDirButton("path_newaria2_sel", "Select", "Specify directory in which installing aria2")),
div(style="display:inline-block;vertical-align:top;width:180px;",
textInput("path_newaria2_textin", NULL, ""))),
div(style="height:20px;vertical-aling:top;",
htmlOutput("path_newaria2_errormess")),
hr(style="margin-top: 0.75em; margin-bottom: 0.75em;"),
div(style="text-align:right;",
disabled(actionButton("install_aria2_button", strong("\u2000Download"), icon=icon("download"))),
modalButton("\u2000Cancel", icon = icon("ban")))
)
),
conditionalPanel(
condition = "input.aria2_link_or_install == 'link'",
div(
p("Please provide the path of an existing aria2 binary:"),
div(div(style="display:inline-block;vertical-align:top;width:50pt;",
shinyFilesButton("path_exiaria2_sel", "Select", "Specify the aria2 path", multiple = FALSE)),
div(style="display:inline-block;vertical-align:top;width:180px;",
textInput("path_exiaria2_textin", NULL, ""))),
div(style="height:20px;vertical-aling:top;",
htmlOutput("path_exiaria2_errormess")),
hr(style="margin-top: 0.75em; margin-bottom: 0.75em;"),
div(style="text-align:right;",
disabled(actionButton("link_aria2_button", strong("\u2000Ok"), icon=icon("check"))),
modalButton("\u2000Cancel", icon = icon("ban")))
)
)
)
} else {
div(
p("aria2 needs to be installed",
"To do it, install the package \"aria2\",",
"then repeat this check."),
hr(style="margin-top: 0.75em; margin-bottom: 0.75em;"),
div(style="text-align:right;",
modalButton("\u2000Close", icon = icon("check")))
)
}
)
} else if (rv$check_aria2_isvalid) {
div(
align = "center",
p(style="text-align:center;font-size:500%;color:darkgreen;",
icon("check-circle")),
p("aria2 is correctly installed."),
hr(style="margin-top: 0.75em; margin-bottom: 0.75em;"),
div(style="text-align:right;",
modalButton("\u2000Close", icon = icon("check")))
)
}
})
# check the aria2 path
observeEvent(input$path_newaria2_textin, {
path_newaria2_errormess <- path_check(
input$path_newaria2_textin,
mustbe_writable = TRUE,
mustbe_empty = FALSE
)
output$path_newaria2_errormess <- path_newaria2_errormess
if (TRUE %in% attr(path_newaria2_errormess, "isvalid")) {
enable("install_aria2_button")
} else {
disable("install_aria2_button")
}
})
shinyDirChoose(input, "path_newaria2_sel", roots = volumes)
observeEvent(input$path_newaria2_sel, {
path_newaria2_string <- parseDirPath(volumes, input$path_newaria2_sel)
updateTextInput(session, "path_newaria2_textin", value = path_newaria2_string)
})
observeEvent(input$path_exiaria2_textin, {
if (any(length(input$path_exiaria2_textin)==0, input$path_exiaria2_textin[1]=="")) {
output$path_exiaria2_errormess <- renderText("")
disable("link_aria2_button")
} else if (!file.exists(input$path_exiaria2_textin)) {
output$path_exiaria2_errormess <- renderUI(span(
style="color:red",
"\u2718 (the file does not exist)"
))
disable("link_aria2_button")
} else if (!grepl("^aria2c?\\.exe$", basename(input$path_exiaria2_textin))) {
output$path_exiaria2_errormess <- renderUI(span(
style="color:red",
"\u2718 (this is not aria2c.exe)"
))
disable("link_aria2_button")
} else {
output$path_exiaria2_errormess <- renderUI(span(
style="color:darkgreen",
"\u2714"
))
enable("link_aria2_button")
}
})
shinyFileChoose(input, "path_exiaria2_sel", roots = volumes)
observeEvent(input$path_exiaria2_sel, {
path_exiaria2_string <- parseFilePaths(volumes, input$path_exiaria2_sel)$datapath
updateTextInput(session, "path_exiaria2_textin", value = path_exiaria2_string)
})
# build the modalDialog
check_aria2_modal <- modalDialog(
title = "aria2 check",
size = "s",
uiOutput("check_aria2_message"),
easyClose = FALSE,
footer = NULL
)
# open the modaldialog when button is pressed
observeEvent(input$check_aria2, {
# update the check
rv$check_aria2_isvalid <- if (
!is.null(suppressWarnings(load_binpaths("aria2c")$aria2c))
) {
file.exists(load_binpaths()$aria2c)
} else {
FALSE
}
# open modaldialog
showModal(check_aria2_modal)
})
# install aria2
observeEvent(input$install_aria2_button, {
shinyjs::html(
"check_aria2_message",
as.character(div(
align="center",
p(style="text-align:center;font-size:500%;color:darkgrey;",
icon("cog", class = "fa-spin")),
p("Wait while aria2 is being installed...")
))
)
Sys.sleep(0.5)
check_aria2_outerr <- tryCatch(
install_aria2(input$path_newaria2_textin),
error = function(e) {print(e)}
)
# remove the text
if (is(check_aria2_outerr, "error")) {
shinyjs::html(
"check_aria2_message",
as.character(div(
align="center",
p(style="text-align:center;font-size:500%;color:red;",
icon("times-circle")),
p("Some errors occurred:"),
p(code(check_aria2_outerr)),
hr(style="margin-top: 0.75em; margin-bottom: 0.75em;"),
div(style="text-align:right;",
modalButton("\u2000Close", icon = icon("check")))
))
)
rv$check_aria2_isvalid <- FALSE
} else {
shinyjs::html(
"check_aria2_message",
as.character(div(
align="center",
p(style="text-align:center;font-size:500%;color:darkgreen;",
icon("check-circle")),
p("aria2 was correctly installed."),
hr(style="margin-top: 0.75em; margin-bottom: 0.75em;"),
div(style="text-align:right;",
modalButton("\u2000Close", icon = icon("check")))
))
)
rv$check_aria2_isvalid <- TRUE
}
})
# link an existing aria2
observeEvent(input$link_aria2_button, {
binpaths_content <- load_binpaths()
binpaths_content$aria2c <- normalize_path(input$path_exiaria2_textin)
writeLines(jsonlite::toJSON(binpaths_content, pretty=TRUE), attr(binpaths(), "path"))
rv$check_aria2_isvalid <- TRUE
removeModal()
})
#-- Check sen2cor --#
# build the icon of the check
observe({
input$check_sen2cor # redo when the button is pressed
rv$check_sen2cor_isvalid <- if (!is.null(binpaths()$sen2cor)) {
file.exists(binpaths()$sen2cor)
} else {FALSE}
})
output$check_sen2cor_isvalid <- renderText(rv$check_sen2cor_isvalid)
output$check_sen2cor_icon <- renderUI({
if (is.na(rv$check_sen2cor_isvalid)) {
""
} else if (rv$check_sen2cor_isvalid) {
span(style="color:darkgreen;", "\u2714")
} else {
span(style="color:red;", "\u2718")
}
})
outputOptions(output, "check_sen2cor_isvalid", suspendWhenHidden = FALSE)
output$check_sen2cor_message <- renderUI({
if (is.na(rv$check_sen2cor_isvalid)) {
""
} else if (rv$check_sen2cor_isvalid) {
div(
align = "center",
p(style="color:darkgreen;text-align:center;font-size:500%;",
icon("check-circle")),
p("Sen2Cor is correctly installed."),
hr(style="margin-top: 0.75em; margin-bottom: 0.75em;"),
div(style="text-align:right;",
modalButton("\u2000Close", icon = icon("check")))
)
} else {
div(
align = "left",
p(style="color:red;text-align:center;font-size:500%;",
icon("times-circle")),
div(
style = "margin-bottom: 1em;",
p("Sen2Cor needs to be linked to sen2r, or downloaded if missing.")
),
radioButtons(
"sen2cor_link_or_install", NULL,
c("Install a new Sen2Cor environment" = "install",
"Set the path of an existing Sen2Cor" = "link"),
selected = "install"
),
conditionalPanel(
condition = "input.sen2cor_link_or_install == 'install'",
div(
p("Please provide the path of an empty directory ",
"in which installing it:"),
div(div(style="display:inline-block;vertical-align:top;width:50pt;",
shinyDirButton("path_newsen2cor_sel", "Select", "Specify directory in which installing Sen2Cor")),
div(style="display:inline-block;vertical-align:top;width:480px;",
textInput("path_newsen2cor_textin", NULL, normalize_path(
file.path(dirname(attr(binpaths(), "path")), "sen2cor"), mustWork = FALSE
)))),
div(style="height:20px;vertical-aling:top;",
htmlOutput("path_newsen2cor_errormess")),
radioButtons(
"sen2cor_version", "Sen2Cor version to be installed:",
c("Stable version (2.5.5)" = "2.5.5",
"Newer, lighter version (2.8.0)" = "2.8.0"),
selected = "2.5.5"
),
shiny::tags$small(em(p(
"Sen2Cor 2.8.0 is faster and makes use of less RAM, but",
"it only works for SAFE products version >= 14.2 and",
"some problems were encountered running it on Windows;",
"it is recommended to use Sen2Cor 2.5.5."
)))
)
),
conditionalPanel(
condition = "input.sen2cor_link_or_install == 'link'",
div(
p("Please provide the path of the directory ",
"in which Sen2Cor is currently installed:"),
div(div(style="display:inline-block;vertical-align:top;width:50pt;",
shinyDirButton("path_exisen2cor_sel", "Select", "Specify directory in which Sen2Cor is installed")),
div(style="display:inline-block;vertical-align:top;width:480px;",
textInput("path_exisen2cor_textin", NULL, normalize_path(
file.path(dirname(attr(binpaths(), "path")), "sen2cor"), mustWork = FALSE
)))),
div(style="height:20px;vertical-aling:top;",
htmlOutput("path_exisen2cor_errormess"))
)
),
# radioButtons(
# "sen2cor_use_dem", "Use DEM for topographic correction?",
# c("Yes (as done in ESA Hub Level-2 products)" = TRUE,
# "No (default Sen2Cor setting)" = FALSE,
# "Use existing choice (Sen2Cor reinstallation / link)" = NA),
# selected = TRUE, width = "100%"
# ),
hr(style="margin-top: 0.75em; margin-bottom: 0.75em;"),
conditionalPanel(
condition = "input.sen2cor_link_or_install == 'install'",
div(style="text-align:right;",
disabled(actionButton("install_sen2cor_button", strong("\u2000Download"), icon=icon("download"))),
modalButton("\u2000Cancel", icon = icon("ban")))
),
conditionalPanel(
condition = "input.sen2cor_link_or_install == 'link'",
div(style="text-align:right;",
disabled(actionButton("link_sen2cor_button", strong("\u2000Ok"), icon=icon("check"))),
modalButton("\u2000Cancel", icon = icon("ban")))
)
)
}
})
# check the Sen2Cor paths
observeEvent(input$path_newsen2cor_textin, {
path_newsen2cor_errormess <- path_check(
input$path_newsen2cor_textin,
mustbe_writable = TRUE,
mustbe_empty = TRUE
)
output$path_newsen2cor_errormess <- path_newsen2cor_errormess
if (TRUE %in% attr(path_newsen2cor_errormess, "isvalid")) {
enable("install_sen2cor_button")
} else {
disable("install_sen2cor_button")
}
})
shinyDirChoose(input, "path_newsen2cor_sel", roots = volumes)
observeEvent(input$path_newsen2cor_sel, {
path_newsen2cor_string <- parseDirPath(volumes, input$path_newsen2cor_sel)
updateTextInput(session, "path_newsen2cor_textin", value = path_newsen2cor_string)
})
observeEvent(input$path_exisen2cor_textin, {
path_exisen2cor_errormess <- path_check(
input$path_exisen2cor_textin,
mustbe_writable = FALSE,
mustbe_empty = FALSE
)
if (TRUE %in% attr(path_exisen2cor_errormess, "isvalid")) {
if (.sen2cor_exists(input$path_exisen2cor_textin)) {
output$path_exisen2cor_errormess <- path_exisen2cor_errormess
enable("link_sen2cor_button")
} else {
output$path_exisen2cor_errormess <- renderUI(span(
style="color:red",
"\u2718 (Sen2Cor was not found here)"
))
disable("link_sen2cor_button")
}
} else {
output$path_exisen2cor_errormess <- path_exisen2cor_errormess
disable("link_sen2cor_button")
}
})
shinyDirChoose(input, "path_exisen2cor_sel", roots = volumes)
observeEvent(input$path_exisen2cor_sel, {
path_exisen2cor_string <- parseDirPath(volumes, input$path_exisen2cor_sel)
updateTextInput(session, "path_exisen2cor_textin", value = path_exisen2cor_string)
})
# build the modalDialog
check_sen2cor_modal <- modalDialog(
title = "Sen2Cor check",
size = "m",
uiOutput("check_sen2cor_message"),
easyClose = FALSE,
footer = NULL
)
# open the modaldialog when button is pressed
observeEvent(input$check_sen2cor, {
# open the dialog
showModal(check_sen2cor_modal)
})
# install sen2cor
observeEvent(input$install_sen2cor_button, {
# create the text to show in the modaldialog
shinyjs::html(
"check_sen2cor_message",
as.character(div(
align="center",
p(style="text-align:center;font-size:500%;color:darkgrey;",
icon("cog", class = "fa-spin")),
p("Wait while Sen2Cor is being installed...")
))
)
check_sen2cor_outmess <- capture.output(
check_sen2cor_outerr <- tryCatch(
.install_sen2cor(
input$path_newsen2cor_textin,
version = input$sen2cor_version,
interactive = FALSE
),
error = function(e) {print(e)}
),
type = "message"
)
# remove the text
if (is(check_sen2cor_outerr, "error")) {
rv$check_sen2cor_isvalid <- FALSE
shinyjs::html(
"check_sen2cor_message",
as.character(div(
p(code(check_sen2cor_outmess)),
p(code(check_sen2cor_outerr)),
hr(style="margin-top: 0.75em; margin-bottom: 0.75em;"),
div(style="text-align:right;",
modalButton("\u2000Close", icon = icon("check")))
))
)
} else {
rv$check_sen2cor_isvalid <- TRUE
shinyjs::html(
"check_sen2cor_message",
as.character(div(
align="center",
p(style="text-align:center;font-size:500%;color:darkgreen;",
icon("check-circle")),
p("Sen2Cor was correctly installed."),
hr(style="margin-top: 0.75em; margin-bottom: 0.75em;"),
div(style="text-align:right;",
modalButton("\u2000Close", icon = icon("check")))
))
)
}
})
# link an existing sen2cor
observeEvent(input$link_sen2cor_button, {
link_sen2cor(input$path_exisen2cor_textin)
rv$check_sen2cor_isvalid <- TRUE
removeModal()
})
# ##-- Footer buttons --##
# observe({
# rv$check_all_isvalid <- all(c(
# rv$check_gdal_isvalid, rv$check_aria2_isvalid,
# rv$check_sen2cor_isvalid
# ))
# })
output$footer_buttons <- renderUI({
div(
style = "vertical-align:center;text-align:right;",
# if (rv$check_all_isvalid) {
# span(
# style = "display:inline-block;",
# "All the dependencies are satisfied, you can safely use the library.\u2000"
# )
# # actionButton("close_gui", "\u2000Close", icon = icon("check"), class = "darkbutton")
# },
actionButton(
"close_gui", "\u2000Close",
# icon = icon(ifelse(rv$check_all_isvalid, "check", "exclamation-triangle")),
icon = icon("check"),
class = "darkbutton"
)
)
})
# Close the connection when button is pressed
observeEvent(input$close_gui, {
# if (!rv$check_all_isvalid) {
# confirmSweetAlert(
# session = session, inputId = "confirm_close", type = "warning",
# title = "Closing the GUI?",
# text = paste0(
# "Are you sure do you want to quit? ",
# "Running the package with unsatisfied ",
# "dependencies can lead to errors."
# ),
# danger_mode = TRUE, btn_labels = c("Cancel", "Close window")
# )
# } else {
shinyjs::js$closeWindow()
stopApp()
# }
})
observeEvent(input$confirm_close, {
if (input$confirm_close) {
shinyjs::js$closeWindow()
stopApp()
}
})
# Close the connection when window is closed
session$onSessionEnded(function() {
stopApp()
})
}
settings.shiny <- shinyApp(
ui = settings.ui, server = settings.server,
options = list(width="400px", height="400px")
)
# run
if (interactive()) {
options(device.ask.default = FALSE)
return(runApp(settings.shiny))
} else {
stop("The function must be run from an interactive R session.")
}
}
|
4bbcc66aa6cc3fef75dc9a5420e0e2fb1ee95ce3 | 0fb33ca8eef07fcb5d3687f4cf2793ef187f79f4 | /R/sx-scoreFACIT_AI.R | c0937aa8aa85c118b12d35d6964aadcf69f08607 | [
"MIT"
] | permissive | raybaser/FACTscorer | e3c10b9a065cb5b6290b211519b72ed9171a1fc2 | 070a1cf479ee8c1f19bf6a295c2ed0d544ff6406 | refs/heads/master | 2022-03-16T20:20:29.198088 | 2022-03-12T09:42:36 | 2022-03-12T09:42:36 | 61,918,573 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,188 | r | sx-scoreFACIT_AI.R | #' @title Score the FACIT-AI
#'
#' @description
#' Scores the Functional Assessment of Chronic Illness Therapy-Ascites Index
#' (FACIT-AI) from item responses.
#'
#'
#' @template paramsFACTG
#'
#'
#' @templateVar MEASURE FACIT-AI
#' @templateVar NAMESUB FACIT_AI
#' @templateVar SCORENAME FACIT Ascites Index
#' @templateVar SCOREFUN scoreFACIT_AI
#' @template details1score
#'
#' @templateVar ITEMS1 'C6', 'GF5', 'BMT5', 'B1', 'GP2', 'O2', 'ACT11', 'O1',
#' @templateVar ITEMS2 'GP1', 'ACT10', 'BL2', 'CX6', 'AI1'
#' @template example1score_items2
#'
#'
#' @references FACIT-AI Scoring Guidelines, available at
#' \url{http://www.facit.org}
#'
#'
#' @export
scoreFACIT_AI <- function(df, id = NULL, updateItems = FALSE,
keepNvalid = FALSE){
df_scores <- scoreFACT_any(
df = df,
id = id,
namesAC = c("C6", "GF5", "BMT5", "B1", "GP2", "O2", "ACT11", "O1",
"GP1", "ACT10", "BL2", "CX6", "AI1"),
namesRev = c("B1", "GP2", "O2", "ACT11", "O1", "GP1", "ACT10", "BL2",
"CX6", "AI1"),
nameSub = "FACIT_AI",
AConly = TRUE,
updateItems = updateItems,
keepNvalid = keepNvalid
)
return(df_scores)
}
|
adc0f5eec9036e397304f7594dd538b7c6299366 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/MGDrivE/man/oneDay_pupation_stochastic_Patch.Rd | 4b3a2dbcaa1f3028cd148e95d6aef73c9114bb8b | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 736 | rd | oneDay_pupation_stochastic_Patch.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Patch-Simulation.R
\name{oneDay_pupation_stochastic_Patch}
\alias{oneDay_pupation_stochastic_Patch}
\title{Stochastic Pupation}
\usage{
oneDay_pupation_stochastic_Patch()
}
\description{
Pupa first undergo one extra day of survival, calculated as a binomial over
\deqn{\overline{P_{[t-1]}} * (1-\mu_{ad})}.
This is an artifact of the conversion from continuous to discrete time (as mentioned
in the original Hancock paper this model is derived from). \cr
Then, pupation is sampled from a binomial, where \eqn{(1-\overline{\phi})} is
the genotype-specific probability of becoming male, and \eqn{\overline{\phi}}
is the genotype-specific of becoming female.
}
|
38e3afb8295231268cc9ce87f41675fb42fae701 | 6b943275756eee4b359cdd92ad1f940313b98fad | /R/functions_other_output.R | cb60f9dda072f6945409f97b10a6ecaa5564884b | [] | no_license | antonkalen/basketball-reselection | 48d7605892c2536f3a54b4c7b3b446d5beea7bd8 | 54dc448f77871e5eaa6fe71c7805ac6c12e2143a | refs/heads/master | 2022-12-31T02:53:33.617243 | 2020-07-09T15:01:51 | 2020-07-09T15:01:51 | 234,495,074 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,440 | r | functions_other_output.R |
# Birth quarter text results ----------------------------------------------
create_quarter_results <- function(draws, .width) {
# Filter and prepare data
draws <- prepare_quarter_draws(draws = draws)
# Calculate probabilities for top and bottom ranks
probabilities <- caclulate_quarter_reselection(draws = draws, .width = .width)
# Calculate relative risk
relative_risk <- calculate_quarter_rr(draws = draws, .width = .width)
# Join probabilities and relative risk
dplyr::left_join(probabilities, relative_risk, by = c("gender"))
}
prepare_quarter_draws <- function(draws) {
draws_filtered <- dplyr::filter(
.data = draws,
player_age == 20,
birth_quarter %in% c(min(birth_quarter), max(birth_quarter))
)
draws_filtered <- dplyr::mutate(
.data = draws_filtered,
birth_quarter = dplyr::case_when(
birth_quarter == min(birth_quarter) ~ "q1",
birth_quarter == max(birth_quarter) ~ "q4",
)
)
draws_filtered
}
caclulate_quarter_reselection <- function(draws, .width) {
draws_grouped <- dplyr::group_by(.data = draws, gender, birth_quarter)
reselection_probability <- tidybayes::mean_hdci(
.data = draws_grouped,
cum_prob,
.width = .width
)
reselection_probability <- dplyr::select(
.data = reselection_probability,
gender,
birth_quarter,
value = cum_prob,
.lower,
.upper
)
reselection_wide <- tidyr::pivot_wider(
data = reselection_probability,
names_from = birth_quarter,
values_from = c(value, .lower, .upper)
)
reselection_wide <- dplyr::select(
.data = reselection_wide,
gender,
value_q1,
.lower_q1,
.upper_q1,
value_q4,
.lower_q4,
.upper_q4
)
reselection_wide <- dplyr::ungroup(reselection_wide)
reselection_wide
}
calculate_quarter_rr <- function(draws = draws, .width = .width) {
draw_comps <- tidybayes::compare_levels(
data = draws,
cum_prob,
by = birth_quarter,
fun = `/`,
draw_indices = c("gender", "debut", ".draw")
)
comps_grouped <- dplyr::group_by(.data = draw_comps, gender)
relative_risk <- tidybayes::mean_hdci(
.data = comps_grouped,
cum_prob,
.width = .width
)
relative_risk <- dplyr::select(
.data = relative_risk,
gender,
rr = cum_prob,
.lower_rr = .lower,
.upper_rr = .upper
)
relative_risk <- dplyr::ungroup(relative_risk)
relative_risk
}
# Supplementary table 1 ---------------------------------------------------
create_supplementary_table_1 <- function(coefs, loos, .width, var_names) {
loos <- prepare_loos(loos = loos)
coefs <- prepare_coefs(coefs = coefs, .width = .width, var_names = var_names)
table <- dplyr::bind_rows(coefs, loos)
table <- dplyr::select(
.data = table,
Parameters = name,
`Est Model 1` = value_model_1,
`LL Model 1` = .lower_model_1,
`UL Model 1` = .upper_model_1,
`Est Model 2` = value_model_2,
`LL Model 2` = .lower_model_2,
`UL Model 2` = .upper_model_2,
`Est Model 3` = value_model_3,
`LL Model 3` = .lower_model_3,
`UL Model 3` = .upper_model_3,
`Est Model 4` = value_model_4,
`LL Model 4` = .lower_model_4,
`UL Model 4` = .upper_model_4
)
table
}
# Function to format loo values for the supplementary material
prepare_loos <- function(loos) {
loos <- as.data.frame(loos)
loos <- tibble::rownames_to_column(loos)
loos <- dplyr::select(
.data = loos,
name = rowname,
value = elpd_diff,
.lower = se_diff
)
loos <- tidyr::pivot_longer(
data = loos,
cols = c(value, .lower),
names_to = "names")
loos <- dplyr::mutate(
.data = loos,
value = ifelse(names == "value", value * -2, value * 2),
names = paste(names, name, sep = "_"),
name = "Relative LOO-IC (SE)"
)
loos <- tidyr::pivot_wider(
data = loos,
names_from = names,
values_from = value
)
loos
}
prepare_coefs <- function(coefs, .width, var_names) {
coefs_grouped <- dplyr::group_by(.data = coefs, model, name)
coef_summarised <- tidybayes::mean_hdci(
.data = coefs_grouped,
value,
.width = .width
)
coef_summarised <- dplyr::select(
.data = coef_summarised,
model,
name,
value,
.lower,
.upper
)
coef_summarised <- tidyr::pivot_wider(
data = coef_summarised,
names_from = model,
names_prefix = "model_",
values_from = c(value, .lower, .upper)
)
coef_summarised <- dplyr::mutate(
.data = coef_summarised,
name = dplyr::recode(name, !!!var_names),
)
coef_summarised <- dplyr::arrange(
.data = coef_summarised,
factor(name, levels = var_names)
)
coef_summarised
}
# Supplementary figure 1 --------------------------------------------------
create_supplementary_figure_1 <- function(draws, .width, theme) {
plot_data <- prep_data_create_supplementary_figure_1(
draws = draws,
.width = .width
)
plot <- ggplot2::ggplot(
data = plot_data,
mapping = ggplot2::aes(
x = player_age,
y = cum_prob,
linetype = birth_quarter
)
) +
ggplot2::geom_ribbon(
ggplot2::aes(ymin = .lower, ymax = .upper),
show.legend = FALSE,
alpha = .2
) +
ggplot2::geom_line(
size = .3
) +
ggplot2::facet_grid(
cols = ggplot2::vars(debut),
rows = ggplot2::vars(gender),
labeller = ggplot2::labeller(debut = debut_labels),
switch = "y"
) +
ggplot2::guides(
linetype = ggplot2::guide_legend(
override.aes = list(fill = NA)
)
) + theme
plot
}
# Function to prepare data for plotting supplementary figure 1
prep_data_create_supplementary_figure_1 <- function(draws, .width) {
# Calculate mean and ci for re-selection create a first season with prob = 1
first_year <- create_first_year_probs(
draws = draws,
gender,
debut,
birth_quarter
)
summarised_data <- prepare_plot_data(
draws = draws,
gender,
birth_quarter,
debut,
player_age,
.width = .width
)
# Join data and keep only first and last quarter
plot_data <- dplyr::bind_rows(summarised_data, first_year)
plot_data <- dplyr::filter(
.data = plot_data,
birth_quarter %in% c(-1.5, 1.5)
)
plot_data <- dplyr::mutate(
.data = plot_data,
birth_quarter = dplyr::case_when(
birth_quarter == -1.5 ~ "Quarter 1",
birth_quarter == 1.5 ~ "Quarter 4",
)
)
}
# Supplementary figure 2 --------------------------------------------------
create_supplementary_figure_2 <- function(draws, .width, theme) {
plot_data <- prep_data_supplementary_figure_2(
draws = draws,
.width = .width
)
plot <- ggplot2::ggplot(
data = plot_data,
mapping = ggplot2::aes(
x = player_age,
y = cum_prob,
linetype = scaled_log2_points
)
) +
ggplot2::geom_ribbon(
ggplot2::aes(ymin = .lower, ymax = .upper),
alpha = .2
) +
ggplot2::geom_line(
size = .3
) +
ggplot2::facet_grid(
cols = ggplot2::vars(debut),
rows = ggplot2::vars(gender),
labeller = ggplot2::labeller(debut = debut_labels),
switch = "y"
) +
ggplot2::guides(
linetype = ggplot2::guide_legend(
override.aes = list(fill = NA)
)
) + theme
plot
}
# Function to prepare data for plotting supplementary figure 2
prep_data_supplementary_figure_2 <- function(draws, .width) {
# Calculate mean and ci for re-selection create a first season with prob = 1
first_year <- create_first_year_probs(
draws = draws,
gender,
debut,
scaled_log2_points
)
summarised_data <- prepare_plot_data(
draws = draws,
gender,
scaled_log2_points,
debut,
player_age,
.width = .width
)
# Join data and keep only first and last quarter
plot_data <- dplyr::bind_rows(summarised_data, first_year)
plot_data <- dplyr::group_by(.data = plot_data, gender)
plot_data <- dplyr::filter(
.data = plot_data,
scaled_log2_points %in% c(min(scaled_log2_points), max(scaled_log2_points))
)
plot_data <- dplyr::mutate(
.data = plot_data,
scaled_log2_points = dplyr::case_when(
scaled_log2_points == max(scaled_log2_points) ~ "Top ranked",
scaled_log2_points == min(scaled_log2_points) ~ "Bottom ranked"
),
scaled_log2_points = factor(
scaled_log2_points,
levels = c("Top ranked", "Bottom ranked")
)
)
plot_data <- dplyr::ungroup(plot_data)
plot_data
}
|
0170c1e25c3eaf5ae7f1957665b6de5188158270 | e3e93deab30e5a660bd78570f821061b66d99c2d | /exercise-2/exercise.R | 22492c994e9aec22a980198437f16aaffed4dfa8 | [
"MIT"
] | permissive | zubinchopra/m7-functions | c7c829dd1d0c49144c9c67b24daffd9565e924c2 | 26a9a18dbe07081caab79fc1bcb67785d4062588 | refs/heads/master | 2021-01-19T01:19:01.635077 | 2017-04-06T01:16:23 | 2017-04-06T01:16:23 | 87,237,997 | 0 | 0 | null | 2017-04-04T21:43:39 | 2017-04-04T21:43:38 | null | UTF-8 | R | false | false | 1,265 | r | exercise.R | # Exercise 2: writing and executing functions (II)
# Write a function `CompareLength` that takes in 2 vectors, and returns the sentence:
x# "The difference in lengths is N"
CompareLength <- function(v1, v2){
difference <- abs(length(v1) - length(v2))
difference.string <- paste("The difference in lengths is", difference)
return(difference.string)
}
# Pass two vectors of different length to your `CompareLength` function
CompareLength(c(1, 2, 3), c("abc", "def"))
# Write a function `DescribeDifference` that will return one of the following statements:
# "Your first vector is longer by N elements"
# "Your second vector is longer by N elements"
DescribeDifference <- function(v1, v2){
difference <- length(v1) - length(v2)
if(difference < 0)
difference.string <- paste("Your second vector is longer by", abs(difference))
else if(difference > 0)
difference.string <- paste("Your first vector is longer by", difference)
else
difference.string <- paste("Both vectors are equal in length")
return(difference.string)
}
# Pass two vectors to your `DescribeDifference` function
DescribeDifference(c(1), c("abc", "def"))
### Bonus ###
# Rewrite your `DescribeDifference` function to tell you the name of the vector which is longer
|
495ec21677cf4f3c3fa1274671554cde995f9825 | be6e0a5b11fe08b1ff89468821cf121bfbe34877 | /Quiz4-4.R | 7c6be72746dc8129a0581a5d9d4999709d08c996 | [] | no_license | jguerra000/Coursera-GCLD | b5e374106071feb6048572638bd882ee6c7ef388 | ff2154d0ae2de62465f0992b1c2259c22edc7cb5 | refs/heads/master | 2016-09-06T01:21:22.230891 | 2014-08-25T09:56:13 | 2014-08-25T09:56:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,339 | r | Quiz4-4.R | # Question 4
# Load the Gross Domestic Product data for the 190 ranked countries in this data set:
#
# https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv
#
# Load the educational data from this data set:
#
# https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv
#
# Match the data based on the country shortcode. Of the countries for which the end of the fiscal year is available, how many end in June?
#
# Original data sources:
# http://data.worldbank.org/data-catalog/GDP-ranking-table
# http://data.worldbank.org/data-catalog/ed-stats
#
# download.file("http://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv ",
# destfile = "./data/grossDomesticProductData.csv")
# download.file("http://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv ",
# destfile = "./data/educacionalData.csv")
gpd_data <- read.table( "./data/grossDomesticProductData.csv", skip=5, , sep = ",",
quote = "", na.strings="NA", fill=TRUE, nrows=231)
edu_data <- read.csv("./data/educacionalData.csv")
merged_data <- merge ( edu_data, gpd_data, by.x = "CountryCode", by.y = "V1", all=FALSE)
length(grep("^Fiscal year end: June", merged_data$Special.Notes))
#13
# [1] 11 18 31 58 74 102 109 157 164 179 189 206 224
|
553b36b336f8cdbb2fb052b410a35d313a0667b6 | c7da4f8328e9af42899256479e47db23dbf83677 | /render_report.R | 07d4b2e49d37a2d347c6adb865388d89fb58dbd2 | [] | no_license | UW-GAC/tagging_data_analysis | 6faec7d0dcc328c06a0d85fd8d668937f4b66a4a | 690dc7cf0f90b689252488a69b587f9fe26f2cce | refs/heads/master | 2020-09-23T09:01:05.110619 | 2019-12-24T01:40:14 | 2019-12-24T01:40:14 | 225,459,878 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 70 | r | render_report.R | #!/usr/bin/env Rscript
rmarkdown::render('analyze_tagging_data.Rmd')
|
3b4cf1f2bfccf6169c5b477fde4f9a3d10730c61 | 35b9530d0bd20b69e2508954183d6e52db45826f | /R/qrVersionInfo.R | 7764b77327d153c2597e4741a5906d30ddf788ef | [] | no_license | victorteh/qrcode | fd67c312d0465ddf70fc6ef35d70c443dd5eb496 | 7d3305d3fa134afced8f48d13a5b4c4540ddb2e9 | refs/heads/master | 2021-01-01T05:38:18.649874 | 2015-08-24T00:12:36 | 2015-08-24T00:12:36 | 41,254,512 | 3 | 3 | null | 2018-06-19T12:33:44 | 2015-08-23T15:12:48 | R | UTF-8 | R | false | false | 1,177 | r | qrVersionInfo.R | #' Function to identify the version of the QRcode based on input string
#'
#' @param dataString dataString is the input string
#' @param ECLevel Error Correction Level. In QRcode standard, the are 4 levels \"L\",\"M\",\"Q\" and \"H\" which represent 7\%, 15\%, 20\% and 30\% data recovery capability.
#'
#' @return 1 row dataframe that include all required info to generate QRcode.
#' @importFrom utils data head
#' @export
qrVersionInfo <- function(dataString,ECLevel='L'){
if(max(ECLevel==c('L','M','Q','H'))==0){
warning('Wrong ECLevel. Allowed value are \"L\",\"M\",\"Q\" and \"H\"')
}
#load spec table
#data('qrCodeSpec')
qrCodeSpec<-''
data(qrCodeSpec, envir = environment())
#identify whether the data string is belongs to which category: Alphnumeric or Byte
if(length(grep('[a-z!?><;@#&()]',dataString))==0){
mode <- '0010' #Alphanumeric
qrInfo <- head(qrCodeSpec[(qrCodeSpec$ECL==ECLevel& qrCodeSpec$Alphanumeric>=nchar(dataString)),c(1:2,4,6:11)],1)
}else{
mode <- '0100' #Byte
qrInfo <- head(qrCodeSpec[(qrCodeSpec$ECL==ECLevel&qrCodeSpec$Byte>=nchar(dataString)),c(1:2,5:11)],1)
}
qrInfo$mode <- mode
return(qrInfo)
}
|
7c6d5ea8c5d4f68d800b9af44cce218dc4311472 | bbb13d6e632191b6844053d897fb4a76d2063710 | /Bootstrapping-Consolidated.R | 7fc87b359172d92eb63a153d5ab8d6ff59dfd1c3 | [] | no_license | sabreenaabedin/SYS4021 | 94e8fa8aec0e214a791a935dabc9f4ec68171151 | 43d6821f99fe3835d9c4c14c4924d5763c46151b | refs/heads/master | 2020-03-23T13:23:50.235075 | 2018-07-19T18:34:20 | 2018-07-19T18:34:20 | 141,615,785 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,652 | r | Bootstrapping-Consolidated.R | #***************************************************************
# Transplant Center
# Observational Analysis
#***************************************************************
#***************************
# Load the transplant data
#***************************
setwd("/Users/sabreenaabedin/Desktop/class/SYS4021")
source("TSbootfunctions.R")
library(boot)
source("SPM_Panel.R")
source("Transplant.plots.R")
r11xplant <- read.table("R11xplant.csv", sep = ",", header = T)
r11donor<-read.table("R11donor.csv", sep = ",", header = T)
uva <- read.table("UVAxplant.csv", sep = ",", header = T)
duke <- read.table("Dukexplant.csv", sep = ",", header = T)
mcv <- read.table("MCVxplant.csv", sep = ",", header = T)
unc <- read.table("UNCxplant.csv", sep = ",", header = T)
# How many years of transplant data are there?
nrow(r11xplant)
# 30 - 1 header = 29 years
# How many organs? What type of organs? 6
#*********************
# Scatter plot matrix
#*********************
# Create a scatter plot matrix for liver transplants using UVA, Duke, MCV, UNC, & Region 11 donors
liver<-data.frame(uva$Liver,duke$Liver,mcv$Liver,unc$Liver, r11donor$Liver)
uva.pairs(as.matrix(liver))
# Create a scatter plot matrix for pancreas transplants
pancreas<-data.frame(uva$Pancreas,duke$Pancreas,mcv$Pancreas,unc$Pancreas, r11donor$Pancreas)
uva.pairs(as.matrix(pancreas))
#*****************
# donortype.plot
#*****************
# remove the 30th observation (2017) since the data were not complete for that year
# DD means deceased donor; LD means living donor
donortype.plot(cbind(r11xplant$Lung_DD[-30], r11xplant$Lung_LD[-30], r11donor$Lung_DD[-30],r11donor$Lung_LD[-30]), title = "Lung")
donortype.plot(cbind(r11xplant$Heart_DD[-30], r11xplant$Heart_LD[-30], r11donor$Heart_DD[-30],r11donor$Heart_LD[-30]), title = "Heart")
donortype.plot(cbind(r11xplant$Liver_DD[-30], r11xplant$Liver_LD[-30], r11donor$Liver_DD[-30],r11donor$Liver_LD[-30]), title = "Liver")
#****************
# region.plot
#****************
region.plot(cbind(r11xplant$Heart[-30], r11donor$Heart[-30], uva$Heart[-30], unc$Heart[-30], mcv$Heart[-30], duke$Heart[-30]), title = "Heart")
region.plot(cbind(r11xplant$Liver[-30], r11donor$Liver[-30], uva$Liver[-30], unc$Liver[-30], mcv$Liver[-30], duke$Liver[-30]), title = "Liver")
region.plot(cbind(r11xplant$Kidney[-30], r11donor$Kidney[-30], uva$Kidney[-30], unc$Kidney[-30], mcv$Kidney[-30], duke$Kidney[-30]), title = "Kidney")
region.plot(cbind(r11xplant$Pancreas[-30], r11donor$Pancreas[-30], uva$Pancreas[-30], unc$Pancreas[-30], mcv$Pancreas[-30], duke$Pancreas[-30]), title = "Pancreas")
region.plot(cbind(r11xplant$Lung[-30], r11donor$Lung[-30], uva$Lung[-30], unc$Lung[-30], mcv$Lung[-30], duke$Lung[-30]), title = "Lung")
#***************
# center.plot
#***************
center.plot(cbind( uva$Pancreas[-30], unc$Pancreas[-30], mcv$Pancreas[-30], duke$Pancreas[-30]), title = "Pancreas")
center.plot(cbind( uva$Heart[-30], unc$Heart[-30], mcv$Heart[-30], duke$Heart[-30]), title = "Heart")
center.plot(cbind( uva$Kidney[-30], unc$Kidney[-30], mcv$Kidney[-30], duke$Kidney[-30]), title = "Kidney")
center.plot(cbind( uva$Liver[-30], unc$Liver[-30], mcv$Liver[-30], duke$Liver[-30]), title = "Liver")
center.plot(cbind( uva$All_Organs[-30], unc$All_Organs[-30], mcv$All_Organs[-30], duke$All_Organs[-30]), title = "All Organs")
#***************************************************************
# Transplant Center
# Bootstrapping 1
#***************************************************************
#************************************
# Part 1 - Bootstrap the differences
#************************************
# UVA-MCV
uva.kidney<-uva$Kidney
mcv.kidney<-mcv$Kidney
# Compute the difference between uva kidney transplants and mcv kidney transplants from 1988 to 2016
kid.diff<-ts(uva.kidney-mcv.kidney,1988,2016)
ts.plot(kid.diff,ylab='UVa-MCV',main = "Difference in Number of Transplants, UVA-MCV")
# Perform a paired t-test - shows significantly different
t.test(uva.kidney, mcv.kidney,paired=T)
# boot() resamples based on your chosen statistic
# chose the mean
bs.mean<-function(x,i) { return(mean(x[i])) }
# Bootstrap mean differences - syntax: boot(data= , statistic= , R= ), R = # replications
bs.kid.diff<-boot(kid.diff,bs.mean,R=2000)
bs.kid.diff
# original = regular t-test results
# Bias = difference between the mean of the 500 stored bootstrap samples and the original estimate
# std. error = standard deviation of the 2000 bootstrap samples and is an estimate of the standard error.
plot(bs.kid.diff,index=1)
# confidence intervals using bca and percentile
boot.ci(bs.kid.diff,0.95,type=c('bca','perc'))
#**********************************************
# Part 2- Bootstrap Regression and Time Series
#**********************************************
# uva$Liver=b0+b1*r11donor$Liver+e..
uva.liver.lm<-lm(uva$Liver[-30]~r11donor$Liver[-30])
summary(uva.liver.lm)
# Diagnostics
par(mfrow=c(2,2))
plot(uva.liver.lm)
par(mfrow=c(1,1))
# residuals vs variance has non-constant variance, not centered arond 0
# q-q plot is not normal
# has high cook's distance
# BOOTSTRAPPING BY RESIDUALS
# Get the fitted values from the regression model
uva.lfit <- fitted(uva.liver.lm)
# Get the residuals from the regression model
uva.le <- residuals(uva.liver.lm)
# Get the regression model - parameter estimates for model
uva.mod <- model.matrix(uva.liver.lm)
# Bootstrapping LM
uva.liver.boot <- RTSB(uva$Liver[-30], r11donor$Liver[-30], uva.lfit, uva.le, uva.mod,5000)
# outcome variable, input variable, fitted, residuals, parameters, # replications
uva.liver.boot$t
sqrt(abs(var(uva.liver.boot$t)))
# 95% CI of r11donor
boot.ci(uva.liver.boot, .95, index=2)
# Distribution of b1
par(mfrow = c(1,2))
hist(uva.liver.boot$t[,2], main = "Region 11 Donors",xlab ="Coefficient Values", col = "steelblue", breaks = 50)
qqnorm(uva.liver.boot $t[,2])
qqline(uva.liver.boot $t[,2])
par(mfrow = c(1,1))
#looks much better
#***************************************************************
# Bootstrap Regression and Time Series 3
#***************************************************************
# Build a linear model, uva.kid.lm that predicts uva kidney transplants by region 11 kidney donors from 1988-2016
uva.kid.lm <- lm(uva$Kidney[-30]~r11donor$Kidney[-30])
summary(uva.kid.lm) # significant
# Diagnostics
par(mfrow=c(2,2))
plot(uva.kid.lm)
par(mfrow=c(1,1)) # not great
# BOOTSTRAPPING LINEAR MODEL
# fitted
uva.kfit <- fitted(uva.kid.lm)
# residuals
uva.ke <- residuals(uva.kid.lm)
# regression parameters
uva.mod <- model.matrix(uva.kid.lm)
# boostrap coefficients for distribution
uva.kid.boot <- RTSB(uva$Kidney[-30], r11donor$Kidney[-30], uva.kfit, uva.ke, uva.mod,2000)
uva.kid.boot
summary(uva.kid.lm)
# Get the 99% CI for uva.kid.boot
boot.ci(uva.kid.boot, .99)
# Plot the results for the coeffiecient for region 11 donors
plot(uva.kid.boot, index = 2)
# A set of configurable plots
par(mfrow = c(1,2))
hist(uva.kid.boot$t[,2], main = "Region 11 Donors",xlab ="Coefficient Values", col = "steelblue", breaks = 50)
qqnorm(uva.kid.boot$t[,2])
qqline(uva.kid.boot$t[,2])
par(mfrow = c(1,1))
# BOOTSTRAPPING TIME SERIES
# Evaluating residual correlation from the model uva.kid.lm
# Hint: use the acf() and pcf()
par(mfrow = c(1,2))
acf(uva.kid.lm$residuals)
pacf(uva.kid.lm$residuals)
par(mfrow = c(1,1))
# Fit an ar model to the residuals using the yule-walker method
diff.ar.kid <- ar(uva.kid.lm$residuals, method = "yule-walker")
# How many autoregressive terms are needed?
diff.ar.kid
# If we use diff.ar.kid
uva.kid.lm2<- lm(uva$Kidney[2:29]~r11donor$Kidney[2:29] + uva.kid.lm$residuals[1:28])
summary(uva.kid.lm2)
# The problem here is we have only a few observations (28)
par(mfrow=c(2,2))
plot(uva.kid.lm2)
par(mfrow=c(1,1))
######## Repeat for liver transplants
# Build a linear model to predict uva liver transplants in terms of region 11 donors
# Model significance?
# Generate the diagnostic plots. Do you see any problems?
# Estimate the liver model with bootstrapping (by residuals). Is b1 significant?
# Bootstrapping LM
# What is the 95% CI of r11donor?
# Plot the distribution of B1
# Time series models for residuals of liver model
# Generate the ACF and PACF plots of the residuals from uva.liver.lm. What's your conclusion?
# Fit an ar model to the residuals, what order do you select?
# Bootstrap your time series model. Are the coefficients significant?
# Plot the results for the coeffiecient for region 11 donors and time series components
# What are the confidence intervals for each of the parameters?
|
d2187dc641320af15ffbd39bcfa70507388fdbfc | ce3bc493274116150497e73aa7539fef1c07442a | /R/model_functions.R | 9b8adc18f368dd3da64deabab9d0dedb2aebe3bb | [] | no_license | laresbernardo/lares | 6c67ff84a60efd53be98d05784a697357bd66626 | 8883d6ef3c3f41d092599ffbdd4c9c352a9becef | refs/heads/main | 2023-08-10T06:26:45.114342 | 2023-07-27T23:47:30 | 2023-07-27T23:48:57 | 141,465,288 | 235 | 61 | null | 2023-07-27T15:58:31 | 2018-07-18T17:04:39 | R | UTF-8 | R | false | false | 33,055 | r | model_functions.R | ####################################################################
#' Automated H2O's AutoML
#'
#' This function lets the user create a robust and fast model, using
#' H2O's AutoML function. The result is a list with the best model,
#' its parameters, datasets, performance metrics, variables
#' importance, and plots. Read more about the \code{h2o_automl()} pipeline
#' \href{https://laresbernardo.github.io/lares/articles/h2o_automl.html}{here}.
#'
#' @section List of algorithms:
#' \href{https://docs.h2o.ai/h2o/latest-stable/h2o-docs/automl.html}{-> Read more here}
#' \describe{
#' \item{DRF}{Distributed Random Forest, including Random Forest (RF)
#' and Extremely-Randomized Trees (XRT)}
#' \item{GLM}{Generalized Linear Model}
#' \item{XGBoost}{eXtreme Grading Boosting}
#' \item{GBM}{Gradient Boosting Machine}
#' \item{DeepLearning}{Fully-connected multi-layer artificial neural network}
#' \item{StackedEnsemble}{Stacked Ensemble}
#' }
#'
#' @section Methods:
#' \describe{
#' \item{print}{Use \code{print} method to print models stats and summary}
#' \item{plot}{Use \code{plot} method to plot results using \code{mplot_full()}}
#' }
#'
#' @family Machine Learning
#' @inheritParams h2o::h2o.automl
#' @param df Dataframe. Dataframe containing all your data, including
#' the independent variable labeled as \code{'tag'}. If you want to define
#' which variable should be used instead, use the \code{y} parameter.
#' @param y Variable or Character. Name of the independent variable.
#' @param ignore Character vector. Force columns for the model to ignore
#' @param train_test Character. If needed, \code{df}'s column name with 'test'
#' and 'train' values to split
#' @param split Numeric. Value between 0 and 1 to split as train/test
#' datasets. Value is for training set. Set value to 1 to train with all
#' available data and test with same data (cross-validation will still be
#' used when training). If \code{train_test} is set, value will be overwritten
#' with its real split rate.
#' @param weight Column with observation weights. Giving some observation a
#' weight of zero is equivalent to excluding it from the dataset; giving an
#' observation a relative weight of 2 is equivalent to repeating that
#' row twice. Negative weights are not allowed.
#' @param target Value. Which is your target positive value? If
#' set to \code{'auto'}, the target with largest \code{mean(score)} will be
#' selected. Change the value to overwrite. Only used when binary
#' categorical model.
#' @param balance Boolean. Auto-balance train dataset with under-sampling?
#' @param impute Boolean. Fill \code{NA} values with MICE?
#' @param no_outliers Boolean/Numeric. Remove \code{y}'s outliers from the dataset?
#' Will remove those values that are farther than n standard deviations from
#' the independent variable's mean (Z-score). Set to \code{TRUE} for default (3)
#' or numeric to set a different multiplier.
#' @param unique_train Boolean. Keep only unique row observations for training data?
#' @param center,scale Boolean. Using the base function scale, do you wish
#' to center and/or scale all numerical values?
#' @param thresh Integer. Threshold for selecting binary or regression
#' models: this number is the threshold of unique values we should
#' have in \code{'tag'} (more than: regression; less than: classification)
#' @param seed Integer. Set a seed for reproducibility. AutoML can only
#' guarantee reproducibility if max_models is used because max_time is
#' resource limited.
#' @param max_models,max_time Numeric. Max number of models and seconds
#' you wish for the function to iterate. Note that max_models guarantees
#' reproducibility and max_time not (because it depends entirely on your
#' machine's computational characteristics)
#' @param start_clean Boolean. Erase everything in the current h2o
#' instance before we start to train models? You may want to keep other models
#' or not. To group results into a custom common AutoML project, you may
#' use \code{project_name} argument.
#' @param exclude_algos,include_algos Vector of character strings. Algorithms
#' to skip or include during the model-building phase. Set NULL to ignore.
#' When both are defined, only \code{include_algos} will be valid.
#' @param plots Boolean. Create plots objects?
#' @param alarm Boolean. Ping (sound) when done. Requires \code{beepr}.
#' @param quiet Boolean. Quiet all messages, warnings, recommendations?
#' @param print Boolean. Print summary when process ends?
#' @param save Boolean. Do you wish to save/export results into your
#' working directory?
#' @param subdir Character. In which directory do you wish to save
#' the results? Working directory as default.
#' @param project Character. Your project's name
#' @param ... Additional parameters on \code{h2o::h2o.automl}
#' @return List. Trained model, predicted scores and datasets used, performance
#' metrics, parameters, importance data.frame, seed, and plots when \code{plots=TRUE}.
#' @examples
#' \dontrun{
#' # CRAN
#' data(dft) # Titanic dataset
#' dft <- subset(dft, select = -c(Ticket, PassengerId, Cabin))
#'
#' # Classification: Binomial - 2 Classes
#' r <- h2o_automl(dft, y = Survived, max_models = 1, impute = FALSE, target = "TRUE", alarm = FALSE)
#'
#' # Let's see all the stuff we have inside:
#' lapply(r, names)
#'
#' # Classification: Multi-Categorical - 3 Classes
#' r <- h2o_automl(dft, Pclass, ignore = c("Fare", "Cabin"), max_time = 30, plots = FALSE)
#'
#' # Regression: Continuous Values
#' r <- h2o_automl(dft, y = "Fare", ignore = c("Pclass"), exclude_algos = NULL, quiet = TRUE)
#' print(r)
#'
#' # WITH PRE-DEFINED TRAIN/TEST DATAFRAMES
#' splits <- msplit(dft, size = 0.8)
#' splits$train$split <- "train"
#' splits$test$split <- "test"
#' df <- rbind(splits$train, splits$test)
#' r <- h2o_automl(df, "Survived", max_models = 1, train_test = "split")
#' }
#' @export
h2o_automl <- function(df, y = "tag",
ignore = NULL,
train_test = NA,
split = 0.7,
weight = NULL,
target = "auto",
balance = FALSE,
impute = FALSE,
no_outliers = TRUE,
unique_train = TRUE,
center = FALSE,
scale = FALSE,
thresh = 10,
seed = 0,
nfolds = 5,
max_models = 3,
max_time = 10 * 60,
start_clean = FALSE,
exclude_algos = c("StackedEnsemble", "DeepLearning"),
include_algos = NULL,
plots = TRUE,
alarm = TRUE,
quiet = FALSE,
print = TRUE,
save = FALSE,
subdir = NA,
project = "AutoML Results",
verbosity = NULL,
...) {
tic(id = "h2o_automl")
on.exit(toc(id = "h2o_automl", msg = "Process duration:", quiet = quiet))
if (!quiet) message(paste(Sys.time(), "| Started process..."))
quiet(h2o.init(nthreads = -1, port = 54321))
df <- as.data.frame(df)
y <- gsub('"', "", as_label(enquo(y)))
# PROCESS THE DATA
processed <- model_preprocess(
df,
y = y,
train_test = train_test,
split = split,
weight = weight,
target = target,
balance = balance,
impute = impute,
no_outliers = no_outliers,
unique_train = unique_train,
center = center,
scale = scale,
thresh = thresh,
seed = seed,
quiet = quiet
)
# PROCESSED DATA: TRAIN AND TEST
df <- processed$data
train <- df[processed$train_index, ]
test <- df[-processed$train_index, ]
if (nrow(test) == 0) test <- train
# MODEL TYPE (based on inputs + thresh value)
model_type <- processed$model_type
# ALGORITHMS
if (length(exclude_algos) > 0 && length(include_algos) == 0 && !quiet) {
message(paste("- ALGORITHMS: excluded", vector2text(exclude_algos)))
}
if (length(include_algos) > 0 && !quiet) {
message(paste("- ALGORITHMS: included", vector2text(include_algos)))
exclude_algos <- NULL
}
# START FRESH?
if (!quiet && !isTRUE(start_clean)) {
message(sprintf(
paste(
"- CACHE: Previous models %s being erased.",
"You may use 'start_clean' [clear] or 'project_name' [join]"
),
ifelse(start_clean, "are", "are not")
))
}
if (start_clean) quiet(h2o.removeAll())
# INFORMATIVE MSG ON FLOW's UI
flow <- "http://localhost:54321/flow/index.html"
if (!quiet && Sys.getenv("HOSTNAME") == "") {
message("- UI: You may check results using H2O Flow's interactive platform: ", flow)
}
# RUN AUTOML
if (!quiet) {
message(sprintf(">>> Iterating until %s models or %s seconds...", max_models, max_time))
}
training <- .quiet_h2o(as.h2o(train), quiet = TRUE)
aml <- .quiet_h2o(h2o.automl(
x = colnames(df)[!colnames(df) %in% c("tag", ignore)],
y = "tag",
training_frame = training,
weights_column = weight,
max_runtime_secs = max_time,
max_models = max_models,
exclude_algos = exclude_algos,
include_algos = include_algos,
nfolds = nfolds,
# project_name = project,
seed = seed,
verbosity = verbosity,
...
), quiet = quiet)
if (nrow(aml@leaderboard) == 0) {
warning("NO MODELS TRAINED. Please set max_models to at least 1 and increase max_time")
} else {
if (!is.nan(aml@leaderboard[1, 2])) {
if (!quiet) {
message(paste("- EUREKA: Succesfully generated", nrow(aml@leaderboard), "models"))
if (print) print(head(aml@leaderboard, 3))
}
}
}
# GET RESULTS AND PERFORMANCE
results <- h2o_results(
aml, test, train, y,
which = 1,
model_type = model_type,
target = target,
split = split,
plots = plots,
project = project,
ignore = ignore,
seed = seed,
quiet = quiet
)
if (save) {
export_results(results, subdir = subdir, thresh = thresh)
if (!quiet) message("- EXPORT: Results and model files exported succesfully!")
}
if (!quiet && print) print(results)
if (alarm && !quiet) {
try_require("beepr", stop = FALSE)
try(beep())
}
attr(results, "type") <- "h2o_automl"
return(results)
}
#' @rdname h2o_automl
#' @aliases h2o_automl
#' @param x h2o_automl object
#' @export
plot.h2o_automl <- function(x, ...) {
if (!inherits(x, "h2o_automl")) {
stop("Object must be class h2o_automl")
}
if ("plots" %in% names(x)) {
x$plots$dashboard
} else {
invisible(mplot_full(
tag = x$scores_test$tag,
score = x$scores_test$score,
multis = select(x$scores_test, -.data$tag, -.data$score)
))
}
}
#' @rdname h2o_automl
#' @aliases h2o_automl
#' @param importance Boolean. Print important variables?
#' @export
print.h2o_automl <- function(x, importance = TRUE, ...) {
if (!inherits(x, "h2o_automl")) {
stop("Object must be class h2o_automl")
}
aux <- list()
selected <- which(as.vector(x$leaderboard$model_id) == x$model_name)
n_models <- nrow(x$leaderboard)
data_points <- nrow(x$datasets$global)
split <- round(100 * x$split)
if (x$type == "Classification") {
cats <- filter(x$datasets$global, grepl("train", .data$train_test)) %>%
.[, x$y] %>%
unique() %>%
nrow()
x$type <- sprintf("%s (%s classes)", x$type, cats)
}
aux[["met"]] <- glued(
"Test metrics:
{v2t({met}, sep = '\n', quotes = FALSE)}",
met = paste(
" ",
names(x$metrics$metrics), "=",
signif(x$metrics$metrics, 5)
)
)
if ("importance" %in% names(x) && importance == TRUE) {
if (nrow(x$importance) > 0) {
aux[["imp"]] <- glued(
"Most important variables:
{v2t({imp}, sep = '\n', quotes = FALSE)}",
imp = paste(
" ",
x$importance %>% head(5) %>%
mutate(label = sprintf(
"%s (%s)",
.data$variable,
formatNum(100 * .data$importance, 1, pos = "%")
)) %>%
pull(.data$label)
)
)
}
}
print(glued("
Model ({selected}/{n_models}): {x$model_name}
Independent Variable: {x$y}
Type: {x$type}
Algorithm: {toupper(x$algorithm)}
Split: {split}% training data (of {data_points} observations)
Seed: {x$seed}
{aux$met}
{aux$imp}"))
}
####################################################################
#' Automated H2O's AutoML Results
#'
#' This is an auxiliary function to calculate predictions and results
#' when using the \code{h2o_automl()} function.
#'
#' @inheritParams h2o_automl
#' @param h2o_object H2O Leaderboard (H2OFrame/H2OAutoML) or Model (h2o)
#' @param test,train Dataframe. Must have the same columns
#' @param which Integer. Which model to select from leaderboard
#' @param model_type Character. Select "Classification" or "Regression"
#' @param ignore Character vector. Columns too ignore
#' @param leaderboard H2O's Leaderboard. Passed when using
#' \code{h2o_selectmodel} as it contains plain model and no leader board.
#' @return List. Trained model, predicted scores and datasets used, performance
#' metrics, parameters, importance data.frame, seed, and plots when \code{plots=TRUE}.
#' @export
h2o_results <- function(h2o_object, test, train, y = "tag", which = 1,
model_type, target = "auto", split = 0.7,
ignore = NULL, quiet = FALSE,
project = "ML Project", seed = 0,
leaderboard = list(),
plots = TRUE,
...) {
# MODEL TYPE
types <- c("Classification", "Regression")
check_opts(model_type, types)
thresh <- ifelse(model_type == types[1], 10000, 0)
# When using h2o_select
if ("train_test" %in% colnames(test)) {
colnames(test)[colnames(test) == y] <- "tag"
colnames(train)[colnames(train) == y] <- "tag"
test <- test[, 1:(which(colnames(test) == "train_test") - 1)]
train <- train[, 1:(which(colnames(train) == "train_test") - 1)]
split <- round(nrow(train) / (nrow(train) + nrow(test)), 2)
}
# GLOBAL DATAFRAME FROM TEST AND TRAIN
if (!all(colnames(train) %in% colnames(test))) {
stop("All columns from train data must be present on test data as well!")
}
if (split == 1) {
global <- train %>% mutate(train_test = "train_test")
} else {
global <- data.frame(test) %>%
bind_rows(train) %>%
mutate(train_test = c(rep("test", nrow(test)), rep("train", nrow(train))))
}
colnames(global)[colnames(global) == "tag"] <- y
if (model_type == "Classification") {
cats <- unique(global[, colnames(global) == y])
} else {
cats <- "None"
}
# SELECT MODEL FROM h2o_automl()
if (any(c("H2OFrame", "H2OAutoML") %in% class(h2o_object))) {
# Note: Best model from leaderboard is which = 1
m <- h2o.getModel(as.vector(h2o_object@leaderboard$model_id[which]))
if (!quiet) message(paste("SELECTED MODEL:", as.vector(m@model_id)))
} else {
m <- h2o_object
}
# VARIABLES IMPORTANCES
# https://docs.h2o.ai/h2o/latest-stable/h2o-docs/variable-importance.html
if (sum(grepl("Stacked", as.vector(m@model_id))) > 0) {
stacked <- TRUE
if (!quiet) message("- NOTE: No importance features for Stacked Ensemble Models")
} else {
stacked <- FALSE
}
if (!stacked) {
imp <- data.frame(h2o.varimp(m)) %>%
{
if ("names" %in% colnames(.)) {
rename(., "variable" = "names", "importance" = "coefficients")
} else {
.
}
} %>%
{
if ("percentage" %in% colnames(.)) {
rename(., "importance" = "percentage")
} else {
.
}
}
noimp <- if (nrow(imp) > 0) {
dplyr::filter(imp, .data$importance < 1 / (nrow(imp) * 4)) %>%
arrange(desc(.data$importance))
} else {
imp
}
if (nrow(noimp) > 0) {
topn <- noimp %>%
ungroup() %>%
slice(1:8)
which <- vector2text(topn$variable, quotes = FALSE)
if (nrow(noimp) > 8) {
which <- paste(which, "and", nrow(noimp) - 8, "other...")
}
if (!quiet) {
message(paste("- NOTE: The following variables were the least important:", which))
}
}
}
# GET PREDICTIONS
if (!quiet) message(paste0(">>> Running predictions for ", y, "..."))
predictions <- .quiet_h2o(h2o_predict_model(global, m), quiet = TRUE)
global <- cbind(global, predictions)
# Change dots for space
if (sum(grepl(" ", cats)) > 0) {
colnames(global) <- str_replace_all(colnames(global), "\\.", " ")
}
# For performance metrics
scores_test <- .get_scores(
predictions, test,
model_type = model_type,
target = target,
cats = cats
)
multis <- scores_test$multis
scores <- scores_test$scores
# # Used for train metrics
# scores_train <- .get_scores(
# predictions, train,
# model_type = model_type,
# target = target, cats = cats)
# scores_tr <- scores_train$scores
# multis_tr <- scores_train$multis
# scores_train <- data.frame(tag = as.vector(train$tag), scores_tr)
# GET ALL RESULTS INTO A LIST
results <- list()
results[["model"]] <- m
results[["y"]] <- y
results[["scores_test"]] <- data.frame(tag = as.vector(test$tag), scores)
results[["metrics"]] <- model_metrics(
tag = results$scores_test$tag,
score = results$scores_test$score,
multis = multis,
thresh = thresh,
target = target,
model_name = as.vector(m@model_id),
plots = plots
)
cvresults <- m@model$cross_validation_metrics_summary
if (!is.null(cvresults)) {
results$metrics[["cv_metrics"]] <- as_tibble(
data.frame(
metric = rownames(cvresults),
mutate_all(cvresults, list(~ as.numeric(as.character(.))))
)
)
}
if (model_type == "Classification") {
if (length(cats) == 2) {
results$metrics[["max_metrics"]] <- data.frame(
m@model$cross_validation_metrics@metrics$max_criteria_and_metric_scores
)
}
if (length(cats) > 2) {
results$metrics[["hit_ratio"]] <- data.frame(
m@model$cross_validation_metrics@metrics$hit_ratio_table
)
}
}
# results[["parameters"]] <- m@parameters[
# sapply(m@parameters, function(x) length(x) == 1)] %>%
# bind_rows() %>% tidyr::gather(key = "parameter")
results[["parameters"]] <- m@parameters
if (!stacked) results[["importance"]] <- imp
results[["datasets"]] <- list(
global = as_tibble(global),
test = filter(global, grepl("test", .data$train_test))
)
# results[["metrics_train"]] <- model_metrics(
# tag = scores_train$tag,
# score = scores_train$score,
# multis = multis_tr,
# thresh = thresh,
# target = target,
# model_name = as.vector(m@model_id),
# type = "train")
results[["scoring_history"]] <- as_tibble(m@model$scoring_history)
results[["categoricals"]] <- list_cats(filter(global, grepl("train", .data$train_test)))
results[["type"]] <- model_type
results[["split"]] <- split
if (model_type == "Classification") {
results[["threshold"]] <- thresh
}
results[["model_name"]] <- as.vector(m@model_id)
results[["algorithm"]] <- m@algorithm
if (any(c("H2OFrame", "H2OAutoML") %in% class(h2o_object))) {
results[["leaderboard"]] <- h2o_object@leaderboard
}
if (length(leaderboard) > 0) {
results[["leaderboard"]] <- leaderboard
}
results[["project"]] <- project
results[["y"]] <- y
results[["ignored"]] <- ignore
results[["seed"]] <- seed
results[["h2o"]] <- h2o.getVersion()
if (plots) {
if (!quiet) message(">>> Generating plots...")
plots <- list()
plots[["dashboard"]] <- mplot_full(
tag = results$scores_test$tag,
score = results$scores_test$score,
multis = multis,
thresh = thresh,
subtitle = results$project,
model_name = results$model_name,
plot = FALSE
)
plots[["metrics"]] <- results$metrics$plots
results$metrics$plots <- NULL
if (length(multis) > 1) {
plots[["top_cats"]] <- mplot_topcats(
tag = results$scores_test$tag,
score = results$scores_test$score,
multis = multis,
model_name = results$model_name
)
}
if (!stacked) {
plots[["importance"]] <- mplot_importance(
var = results$importance$variable,
imp = results$importance$importance,
model_name = results$model_name,
subtitle = results$project
)
}
plots <- append(plots, rev(as.list(results$metrics$plots)))
results$plots <- plots
}
attr(results, "type") <- "h2o_automl"
class(results) <- c("h2o_automl", class(results))
return(results)
}
.get_scores <- function(predictions,
traintest,
model_type,
target = "auto",
cats) {
type <- deparse(substitute(traintest))
# Train or Test data
nrows <- nrow(traintest)
if (type == "test") {
scores <- predictions[1:nrows, ]
}
if (type == "train") {
scores <- predictions[(nrows + 1):nrows, ]
}
# Selected target value
if (target != "auto" && length(cats) == 2) {
scores <- target_set(
tag = as.vector(traintest$tag),
score = scores[, 2],
target = target,
quiet = TRUE
)$df
}
# Multis object and standard predictions output
multis <- NA
if (model_type == "Classification") {
if (length(cats) == 2) {
scores <- select_if(scores, is.numeric) %>% .[, 1]
} else {
colnames(scores)[1] <- "score"
multis <- select(scores, -.data$score)
}
} else {
scores <- data.frame(score = as.vector(scores))
}
ret <- list(scores = scores, multis = multis)
return(ret)
}
####################################################################
#' Select Model from h2o_automl's Leaderboard
#'
#' Select wich model from the h2o_automl function to use
#'
#' @family Machine Learning
#' @family Tools
#' @inheritParams h2o_automl
#' @param results \code{h2o_automl()} object.
#' @param which_model Integer. Which model from the leaderboard you wish to use?
#' @return H2O processed model
#' @export
h2o_selectmodel <- function(results, which_model = 1, quiet = FALSE, ...) {
check_attr(results, attr = "type", check = "h2o_automl")
# Select model (Best one by default)
ntop <- nrow(results$leaderboard)
if (which_model > ntop) {
stop("Select a valid model ID. Range: 1 to ", ntop)
}
model_id <- as.vector(results$leaderboard$model_id[which_model])
if (!quiet) message("Model selected: ", model_id)
m <- h2o.getModel(model_id)
d <- results$datasets
# Calculate everything
output <- h2o_results(
m,
test = d$test,
train = filter(d$global, grepl("test", .data$train_test)),
y = results$y,
which = which_model,
model_type = results$type,
project = results$project,
leaderboard = results$leaderboard,
seed = results$seed,
quiet = TRUE,
...
)
if (!quiet) print(output)
return(output)
}
####################################################################
#' Export h2o_automl's Results
#'
#' Export RDS, TXT, POJO, MOJO and all results from \code{h2o_automl()}.
#'
#' @family Machine Learning
#' @family Tools
#' @param results \code{h2o_automl} or \code{h2o} model
#' @param thresh Integer. Threshold for selecting binary or regression
#' models: this number is the threshold of unique values we should
#' have in 'tag' (more than: regression; less than: classification)
#' @param which Character vector. Select which file format to export:
#' Possible values: txt, csv, rds, binary, mojo, plots. You might also
#' use dev (txt, csv, rds) or production (binary, mojo) or simply don't use
#' parameter to export everything
#' @param note Character. Add a note to the txt file. Useful when lots of
#' models are trained and saved to remember which one is which one
#' @param subdir Character. In which directory do you wish to save
#' the results?
#' @param save Boolean. Do you wish to save/export results?
#' @param seed Numeric. For reproducible results and random splits.
#' @return No return value, called for side effects.
#' @export
export_results <- function(results,
thresh = 10,
which = c(
"txt", "csv", "rds",
"binary", "mojo", "plots",
"dev", "production"
),
note = NA,
subdir = NA,
save = TRUE,
seed = 0) {
if (save) {
quiet(h2o.init(nthreads = -1, port = 54321))
pass <- !is.null(attr(results, "type"))
if (!pass) results <- list(model = results)
stopifnot(grepl("H2O", class(results$model)))
name <- ifelse(pass, results$model_name, results$model@model_id)
subdir <- paste0(ifelse(is.na(subdir), "", subdir), "/", name)
if (substr(subdir, 1, 1) == "/") subdir <- substr(subdir, 2, nchar(subdir))
# Directory to save all our results
dir <- file.path(subdir)
message(paste("Export directory:", dir))
if (!dir.exists(dir)) {
message("Creating directory: ", subdir)
dir.create(dir, recursive = TRUE)
}
if ("dev" %in% which) which <- unique(c(which, "txt", "csv", "rds"))
if ("production" %in% which) which <- unique(c(which, "binary", "mojo"))
if ("txt" %in% which || !is.na(note)[1] && pass) {
on.exit(set.seed(seed))
results_txt <- list(
"Project" = results$project,
"Note" = note,
"Model Type" = results$type,
"Algorithm" = results$algorithm,
"Model name" = name,
"Train/Test" = table(results$datasets$global$train_test),
"Metrics Glossary" = results$metrics$dictionary,
"Test Metrics" = results$metrics$metrics,
"Test Metrics by labels" = if (length(results$metrics$metrics_tags) > 1) {
results$metrics$metrics_tags
} else {
"NA"
},
"Test's Confusion Matrix" = if (length(results$metrics$confusion_matrix) > 1) {
results$metrics$confusion_matrix
} else {
NULL
},
"Predicted Variable" = results$y,
"Ignored Variables" = results$ignored,
"Variables Importance" = results$importance,
"H2O Global Results" = results$model,
"Leaderboard" = results$leaderboard,
"Data examples" = data.frame(sample_n(results$datasets$global, 10)),
"Seed" = results$seed,
"H20 Version" = results$h2o
)
if (is.na(note)[1]) results_txt$Note <- NULL
capture.output(results_txt, file = paste0(dir, "/", name, ".txt"))
cats <- lapply(results$categoricals, data.frame)
aux <- cats[names(cats)[!names(cats) %in% results$ignore]]
capture.output(aux, file = paste0(dir, "/", name, "_cats.txt"))
message(">>> Summary text files saved...")
}
# Export CSV with predictions and datasets
if ("csv" %in% which && pass) {
write.csv(results$datasets$global,
paste0(dir, "/", name, ".csv"),
row.names = FALSE
)
message(">>> CSV file exported...")
}
# Export Results List
if ("rds" %in% which) {
saveRDS(results, file = paste0(dir, "/", name, ".rds"))
message(">>> RDS file exported...")
}
# Export Model as POJO && MOJO for Production
if ("mojo" %in% which) {
h2o.download_mojo(results$model, path = dir, get_genmodel_jar = TRUE)
message(">>> MOJO (zip + jar files) exported...")
}
# if (pojo) h2o.download_pojo(results$model, path = dir)
# Export Binary
if ("binary" %in% which) {
h2o.saveModel(results$model, path = dir, force = TRUE)
message(">>> Binary file saved...")
}
if ("plots" %in% which && "plots" %in% names(results) && pass) {
message(">>> Saving plots...")
# Metrics plots
aux <- results$plots$metrics
for (i in seq_along(aux)) {
export_plot(aux[[i]],
name = names(aux)[i],
width = 8, height = 6, res = 300,
subdir = paste0(subdir, "/Plots"),
quiet = TRUE
)
}
# Other plots
aux <- results$plots
aux$metrics <- NULL
for (i in seq_along(aux)) {
export_plot(aux[[i]],
name = names(aux)[i],
width = 8, height = 6, res = 300,
subdir = paste0(subdir, "/Plots"),
quiet = TRUE
)
}
message(">>> Plots saved...")
}
message(paste("Succesfully exported files:", vector2text(which)))
}
}
####################################################################
#' Split a dataframe for training and testing sets
#'
#' This function splits automatically a dataframe into train and
#' test datasets. You can define a seed to get the same results
#' every time, but has a default value. You can prevent it from
#' printing the split counter result.
#'
#' @family Machine Learning
#' @family Tools
#' @param df Dataframe
#' @param size Numeric. Split rate value, between 0 and 1. If set to
#' 1, the train and test set will be the same.
#' @param seed Integer. Seed for random split
#' @param print Boolean. Print summary results?
#' @return List with both datasets, summary, and split rate.
#' @examples
#' data(dft) # Titanic dataset
#' splits <- msplit(dft, size = 0.7, seed = 123)
#' names(splits)
#' @export
msplit <- function(df, size = 0.7, seed = 0, print = TRUE) {
if (size <= 0 || size > 1) stop("Set size parameter to a value >0 and <=1")
on.exit(set.seed(seed))
df <- data.frame(df)
if (size == 1) train <- test <- df
if (size < 1 && size > 0) {
ind <- sample(seq_len(nrow(df)), size = floor(size * nrow(df)))
train <- df[ind, ]
test <- df[-ind, ]
} else {
ind <- seq_len(nrow(df))
}
train_size <- dim(train)
test_size <- dim(test)
summary <- rbind(train_size, test_size)[, 1]
if (print == TRUE) print(summary)
sets <- list(
train = train,
test = test,
summary = summary,
split_size = size,
train_index = ind
)
return(sets)
}
####################################################################
#' Set Target Value in Target Variable
#'
#' This function detects or forces the target value when predicting
#' a categorical binary model. This is an auxiliary function.
#'
#' @param tag Vector. Real known label
#' @param score Vector. Predicted value or model's result
#' @param target Value. Which is your target positive value? If
#' set to 'auto', the target with largest mean(score) will be
#' selected. Change the value to overwrite. Only used when binary
#' categorical model.
#' @param quiet Boolean. Do not show message for auto target?
#' @return List. Contains original data.frame \code{df} and
#' \code{which} with the target variable.
#' @export
target_set <- function(tag, score, target = "auto", quiet = FALSE) {
df <- data.frame(tag = tag, score = score)
# Validate inputs
if (!is.numeric(score) || is.numeric(tag)) {
stop("Your tag must be categorical. Your score must be numerical.")
}
# Get mean scores for each tag
means <- df %>%
group_by(.data$tag) %>%
summarise(mean = mean(.data$score))
auto <- means$tag[means$mean == max(means$mean)]
if (length(auto) > 1) {
auto <- if (any(auto %in% target)) target else auto[1]
}
if (target == "auto") {
target <- auto
}
if (!target %in% unique(df$tag)) {
stop(paste(
"Your target value", target, "is not valid.",
"Possible other values:", vector2text(unique(df$tag))
))
}
if (!quiet) message(paste("Target value:", target))
# If the forced target value is the "lower scores" value, invert scores
if (auto != target) df$score <- df$score * (-1) + 1
ret <- list(df = df, which = target)
return(ret)
}
####################################################################
#' Iterate Seeds on AutoML
#'
#' This functions lets the user iterate and search for best seed. Note that if
#' the results change a lot, you are having a high variance in your data.
#'
#' @family Machine Learning
#' @inheritParams h2o_automl
#' @param tries Integer. Number of iterations
#' @param ... Additional arguments passed to \code{h2o_automl}
#' @return data.frame with performance results by seed tried on every row.
#' @export
iter_seeds <- function(df, y, tries = 10, ...) {
seeds <- data.frame()
for (i in 1:tries) {
model <- h2o_automl(df, y, seed = i, quiet = TRUE, ...)
seeds <- rbind(seeds, cbind(seed = i, model$metrics$metrics))
seeds <- arrange(seeds, desc(2))
statusbar(i, tries, seeds[1, 1])
}
return(seeds)
}
.quiet_h2o <- function(..., quiet = TRUE) {
if (quiet) on.exit(h2o.no_progress())
x <- eval(...)
h2o.show_progress()
return(x)
}
|
e37e2f9984211eba7bd37ca8474b6966cd7d1563 | 4e0a47f3995d3c7834270a06479cee615468aa5a | /lab programs/1(a).R | 3161f740e3793759c31c155cb547eb7ba27e93b0 | [] | no_license | madhuri-n/DSR | 0f9d53ffa75cba3a184c3813ed2dde102aa3c263 | 5e3fb63b2356ca8742959e25b3a57f07e13bee41 | refs/heads/master | 2020-09-13T04:00:26.508233 | 2019-11-19T19:33:05 | 2019-11-19T19:33:05 | 222,649,502 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 176 | r | 1(a).R | path="C:/Users/Sangu/Desktop/DATA_SET"
setwd(path)
datav=read.csv("iris.csv")
plot(datav$sepal.length,datav$sepal.width)
#length and width are taken as paarmeters here to plot
|
9ab8d05bdff11dc3baf58dd941c5493e563ed190 | c6d4151982154b9ffead267e10e00bc8b847a0bd | /datafest_analyse.R | 00ca424b6c062bb5590261ec4df4ccdf72455c40 | [] | no_license | pauschae/RSquaredDatafest | 1a5e8849bab1e86f71c63fbd50c9bda48425a4ce | 8409b3e04efa2e76f2ab1a2fcf1cfc916113867c | refs/heads/master | 2021-01-10T05:31:00.077261 | 2019-06-16T19:59:33 | 2019-06-16T19:59:33 | 55,712,993 | 0 | 1 | null | 2019-06-16T19:59:34 | 2016-04-07T17:08:26 | R | UTF-8 | R | false | false | 6,967 | r | datafest_analyse.R | library(rjags)
library(dplyr)
library(lme4)
library(MCMCpack)
library(superdiag)
#source("https://bioconductor.org/biocLite.R")
#biocLite("Rgraphviz")
#biocLite("graph")
#library(memisc)
#library(Hmisc)
#library(stringr)
#library(rvest)
## ---- load data ----
#library(data.table)
# adwords <- fread("./data and codebook/approved_adwords_v3.csv")
# data_purchase <- fread("./data and codebook/approved_data_purchase-v5.csv")
# ga_data <- fread("./data and codebook/approved_ga_data_v2.csv")
#
# datafest_db <- src_sqlite("datafest.db", create = TRUE)
# copy_to(datafest_db, adwords, temporary = FALSE)
# copy_to(datafest_db, data_purchase, temporary = FALSE)
# copy_to(datafest_db, ga_data, temporary = FALSE)
## ---- connect to database ----
datafest_db <- src_sqlite("datafest.db")
src_tbls(datafest_db)
## ---- select data ----
unique_venue_states <- tbl(datafest_db, sql("SELECT DISTINCT venue_state FROM data_purchase")) %>%
collect() %>%
unlist(., use.names = FALSE)
exclude_states <- c("SASKATCHEWAN",
"ALBERTA",
"ONTARIO",
"QUEBEC",
"BRITISH COLUMBIA",
"MANITOBA",
"PRINCE EDWARD ISLAND")
keep_states <- unique_venue_states[unique_venue_states %in% exclude_states == FALSE]
## ---- event data subset ----
# get event_data
event_data <- tbl(datafest_db, sql("SELECT * FROM data_purchase")) %>%
filter(., la_valid_tkt_event_flg == "Y ", major_cat_name == "CONCERTS") %>%
collect()
event_data_subset <- event_data[event_data$venue_state %in% keep_states, ]
unique(event_data_subset$venue_postal_cd_sgmt_1)
## ---- zip codes ----
zip_codes <- unique(event_data_subset$venue_postal_cd_sgmt_1) %>% sort
save(zip_codes, file = "zip_codes.RData")
# demographic variables
# almost no data
## ---- merge concerts and artists ----
# load data
# load("dfartists.RData")
# dfartists$obs <- c(1:nrow(dfartists))
# #write.csv(dfartists, file = "dfartists.csv")
# artists_rename <- read.csv("dfartists_encoding.csv", sep = ";")
#
# dfartists_neu <- merge(dfartists, artists_rename, by = c("obs"))
# dfartists_neu <- dfartists_neu[, -4]
# names(dfartists_neu)[3] <- "artists"
#
# save(dfartists_neu, file = "dfartists_neu.RData")
#
# rm(artists_rename)
# rm(dfartists)
#
# unique(event_data_subset$primary_act_name)
unique(event_data_subset$minor_cat_name) %>% length
table(event_data_subset$gndr_cd, useNA = "always")
concerts <- event_data_subset %>% select(., event_name,
primary_act_name,
secondary_act_name,
venue_city,
venue_state,
venue_postal_cd_sgmt_1,
minor_cat_name,
tickets_purchased_qty)
save(concerts, file = "concerts.RData")
## ---- data analysis ----
load("DataForAnalysis.RData")
daten <- datAggSTCD
daten$D.VotePrct <- daten$D.VotePrct/100
daten$country_more_25 <- 0
daten$country_more_25[daten$Country >= 0.25] <- 1
daten$africanamerican_more_25 <- 0
daten$africanamerican_more_25[daten$AfricanAmerican >= 0.25] <- 1
## ---- exploratory models ----
# lm(D.VotePrct ~ Metal + White + Black + Asian + Hispanic, data = daten) %>% summary
#
# lm(D.VotePrct ~ Latin + White + Black + Asian + Hispanic, data = daten) %>% summary
#
# lm(D.VotePrct ~ Folk + White + Black + Asian + Hispanic, data = daten) %>% summary
#
# lm(D.VotePrct ~ Country + White + Black + Asian + Hispanic, data = daten) %>% summary
#
# lm(D.VotePrct ~ Pop + White + Black + Asian + Hispanic, data = daten) %>% summary
#
# lm(D.VotePrct ~ Classical + White + Black + Asian + Hispanic, data = daten) %>% summary
#
# lm(D.VotePrct ~ BluesJazz + White + Black + Asian + Hispanic, data = daten) %>% summary
#
# lm(D.VotePrct ~ AfricanAmerican + White + Black + Asian + Hispanic, data = daten) %>% summary
#
# lm(D.VotePrct ~ Rock + White + Black + Asian + Hispanic, data = daten) %>% summary
#
# lm(D.VotePrct ~ Comedy + White + Black + Asian + Hispanic, data = daten) %>% summary
#
# lm(D.VotePrct ~ ELectronic + White + Black + Asian + Hispanic, data = daten) %>% summary
#
# lm(D.VotePrct ~ Comedy + White + Black + Asian + Hispanic, data = daten) %>% summary
## ---- final models ----
# ols
# genre: country
lm(D.VotePrct ~
Country, data = daten) %>% summary
lm(D.VotePrct ~
Country +
White, data = daten) %>% summary
lm(D.VotePrct ~
Country +
White +
Black +
Asian +
Hispanic, data = daten) %>% summary
# tobit models
censReg(D.VotePrct ~
Country +
White +
Black +
Asian +
Hispanic, data = daten, left = 0, right = 1) %>% summary
# add interaction
lm(D.VotePrct ~
Country * country_more_25 +
White +
Black +
Asian +
Hispanic, data = daten) %>% summary
# genre: "african-american"
lm(D.VotePrct ~
AfricanAmerican +
White +
Black +
Asian +
Hispanic, data = daten) %>% summary
censReg(D.VotePrct ~
AfricanAmerican +
White +
Black +
Asian +
Hispanic, data = daten, left = 0, right = 1) %>% summary
lm(D.VotePrct ~
AfricanAmerican * africanamerican_more_25 +
White +
Black +
Asian +
Hispanic, data = daten) %>% summary
# check correlations between music genres
names(daten)
cor(daten[, c(3:13)]*daten$totalSales) %>% round(., 2)
cor(daten[, c(3:13)]) %>% round(., 2)
# Bayesian models
mcmc_1 <- MCMCregress(D.VotePrct ~
Country +
White +
Black +
Asian +
Hispanic, data = daten, burnin = 10000, mcmc = 10000)
mcmc_2 <- MCMCregress(D.VotePrct ~
AfricanAmerican +
White +
Black +
Asian +
Hispanic, data = daten, burnin = 10000, mcmc = 10000)
# diagnostics
superdiag(mcmc_1, burnin = 5000)
superdiag(mcmc_2, burnin = 5000)
# results
summary(mcmc_1)$statistics %>% round(., 3)
summary(mcmc_2)$statistics %>% round(., 3)
# ---- plot some quantities of interest ----
# marginal effect
betasim_1 <- mcmc_1[, 1:6]
X_1 <- cbind(1, seq(0, 1, length.out = 100),
median(daten$White, na.rm = TRUE),
median(daten$Black, na.rm = TRUE),
median(daten$Asian, na.rm = TRUE),
median(daten$Hispanic, na.rm = TRUE))
mu <- X_1 %*% t(betasim_1)
plot(density(mu[1,] - mu[100, ]))
x <- seq(0, 1, length.out = 100)
q <- t(apply(mu, 1, quantile, c(0.05, 0.5, 0.95)))
plot(x, q[, 2], type = "l",
xlim = c(0, 1),
ylim = c(0, 1))
lines(x, q[, 1], lty = 2)
lines(x, q[, 3], lty = 2)
|
4cf9abf4545069a7dbb3f7638b21e987363a7f9a | 5a69a5598becead1d00e3cc34e5b04269d92392d | /odmsRater/hindcastGames.R | 9345dc0692651fe391b466e16885dfe345201a63 | [] | no_license | thezane/soccer-predictions | 22fdb9cf01b13b726bade2455db4b1dd80521665 | bab14c90491260443387b24c55ceabafd508d41e | refs/heads/master | 2020-12-02T17:40:13.130011 | 2018-07-06T03:43:33 | 2018-07-06T03:43:33 | 58,686,755 | 22 | 5 | null | null | null | null | UTF-8 | R | false | false | 829 | r | hindcastGames.R | hindcastGames <- function(rOptions) {
library(MASS)
library(hash)
library(parallel)
library(skellam)
regexRFiles <- ".*\\.R"
inputPath <- "../input/"
outputPath <- "../output/"
srcFiles <- list.files(".", regexRFiles,
full.names=TRUE, recursive=TRUE)
sapply(srcFiles, source)
readsData <- readData(rOptions, inputPath)
tTree <- readsData[["tTree"]]
gTree <- readsData[["gTree"]]
T <- readsData[["T"]]
gi <- new.EventIterator(gTree)
rOutput <- new.RatingsOutput(tTree, gTree, gi)
if (rOptions$isOptimized) {
rOutput <- computeRNN(rOptions, rOutput)
rData <- list(rOptions=rOptions, rOutput=rOutput)
}
else {
rData <- list(rOptions=rOptions, rOutput=rOutput)
rData <- optimizeRNN(tTree, gTree, gi, rData, outputPath)
}
writeGames(rData, T, outputPath)
rData
}
|
022dfb901f5a055b52ef9156bbe48c9b82707e61 | c174e265381f3924de8aadf3da0c498abf74f257 | /tests/testthat/test_SETRED.R | 437293e0bd376fd2614e2c1b24cc4b2ec879eb87 | [] | no_license | mabelc/SSC | 4cde0c396784e17a5412de2c94b27e264f5a975f | 4565f07e0f197e823bcea8442ed9ea82b0c94712 | refs/heads/master | 2022-04-05T12:12:36.306805 | 2019-12-16T20:06:18 | 2019-12-16T20:06:18 | 119,087,635 | 10 | 5 | null | 2019-12-16T20:06:19 | 2018-01-26T18:24:37 | R | UTF-8 | R | false | false | 3,909 | r | test_SETRED.R | context("Testing SETRED")
source("wine.R")
require(caret)
test_that(
desc = "setred works",
code = {
m <- setred(x = wine$xtrain, y = wine$ytrain, learner = knn3)
expect_is(m, "setred")
p <- predict(m, wine$xitest)
expect_is(p, "factor")
expect_equal(length(p), length(wine$ytrain))
m <- setred(x = wine$dtrain, y = wine$ytrain, x.inst = FALSE, learner = knn3)
expect_is(m, "setred")
p <- predict(m, wine$ditest[, m$instances.index])
expect_is(p, "factor")
expect_equal(length(p), length(wine$ytrain))
}
)
test_that(
desc = "prediction not fail when x is a vector",
code = {
m <- setred(x = wine$xtrain, y = wine$ytrain, learner = knn3)
p <- predict(m, wine$xitest[1,])
expect_is(p, "factor")
expect_equal(length(p), 1)
}
)
test_that(
desc = "the model structure is correct",
code = {
m <- setred(x = wine$xtrain, y = wine$ytrain, learner = knn3)
expect_equal(
names(m),
c("model", "instances.index", "classes", "pred", "pred.pars")
)
}
)
test_that(
desc = "x can be a data.frame",
code = {
expect_is(
setred(x = as.data.frame(wine$xtrain), y = wine$ytrain, learner = knn3),
"setred"
)
}
)
test_that(
desc = "y can be a vector",
code = {
expect_is(
setred(x = wine$xtrain, y = as.vector(wine$ytrain), learner = knn3),
"setred"
)
}
)
test_that(
desc = "x.inst can be coerced to logical",
code = {
expect_is(
setred(x = wine$xtrain, y = wine$ytrain, x.inst = TRUE, learner = knn3),
"setred"
)
expect_is(
setred(x = wine$xtrain, y = wine$ytrain, x.inst = 1, learner = knn3),
"setred"
)
expect_error(
setred(x = wine$xtrain, y = wine$ytrain, x.inst = "a", learner = knn3)
)
}
)
test_that(
desc = "relation between x and y is correct",
code = {
expect_error(
setred(x = wine$xtrain, y = wine$ytrain[-1], learner = knn3)
)
expect_error(
setred(x = wine$xtrain[-1,], y = wine$ytrain, learner = knn3)
)
}
)
test_that(
desc = "y has some labeled instances",
code = {
expect_error(
setred(x = wine$xtrain, y = rep(NA, length(wine$ytrain)), learner = knn3)
)
}
)
test_that(
desc = "y has some unlabeled instances",
code = {
expect_error(
setred(x = wine$xtrain, y = rep(1, length(wine$ytrain)), learner = knn3)
)
}
)
test_that(
desc = "x is a square matrix when x.inst is FALSE",
code = {
expect_error(
setred(x = wine$dtrain[-1,], y = wine$ytrain, x.inst = FALSE, learner = knn3)
)
expect_is(
setred(x = wine$dtrain, y = wine$ytrain, x.inst = FALSE, learner = knn3),
"setred"
)
}
)
test_that(
desc = "max.iter is a value greather than 0",
code = {
expect_error(
setred(x = wine$xtrain, y = wine$ytrain, learner = knn3, max.iter = -1)
)
expect_error(
setred(x = wine$xtrain, y = wine$ytrain, learner = knn3, max.iter = 0)
)
expect_is(
setred(x = wine$xtrain, y = wine$ytrain, learner = knn3, max.iter = 80),
"setred"
)
}
)
test_that(
desc = "perc.full is a value between 0 and 1",
code = {
expect_error(
setred(x = wine$xtrain, y = wine$ytrain, learner = knn3, perc.full = -0.5)
)
expect_error(
setred(x = wine$xtrain, y = wine$ytrain, learner = knn3, perc.full = 1.5)
)
expect_is(
setred(x = wine$xtrain, y = wine$ytrain, learner = knn3, perc.full = 0.8),
"setred"
)
}
)
test_that(
desc = "theta is a value between 0 and 1",
code = {
expect_error(
setred(x = wine$xtrain, y = wine$ytrain, learner = knn3, theta = -0.5)
)
expect_error(
setred(x = wine$xtrain, y = wine$ytrain, learner = knn3, theta = 1.5)
)
expect_is(
setred(x = wine$xtrain, y = wine$ytrain, learner = knn3, theta = 0.8),
"setred"
)
}
) |
bb1c79ba4b6c01e40b6f216d9c581bf2e79ee207 | 3d17a1ee10cfded91925d9129aac73cc1ccf0610 | /run_analysis.R | f73cdc594b92a619a54fae2efb1c4cb64b82956a | [] | no_license | sanderlings/CleanData_Assignment | 89982a7454929132b02e76858fc0206ea30b9a96 | 90ba6bc7d08cf7fa272f42da3d252030eef5034f | refs/heads/master | 2021-01-10T19:44:49.626161 | 2014-09-17T19:27:12 | 2014-09-17T19:27:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,760 | r | run_analysis.R | setwd("C:/Users/Joanna/Documents/Z-Study/Coursera/DS3-Clean/CleanData_Assignment/")
library(reshape2)
library(plyr)
# read the test measurement data
testdata <- read.table("./UCI HAR Dataset/test/X_test.txt")
# read the measurements names, then trim only the text column
colname <- read.table("./UCI HAR Dataset/features.txt")
colname2 <- subset(colname, select=c(V2))
# label the test data with the measurement names
names(testdata) <- colname$V2
# read the test activity
testact <- read.table("./UCI HAR Dataset/test/y_test.txt")
# label the column as "Activity"
names(testact) <- c("Activity")
# read the test subject ID
testsub <- read.table("./UCI HAR Dataset/test/subject_test.txt")
# label the column as "SubjectID"
names(testsub) <- c("SubjectID")
# combine the test set - subject ID, activity, measurement
testcmb <- cbind(testsub, testact, testdata)
# read the train set
traindata <- read.table("./UCI HAR Dataset/train/X_train.txt")
# label the train data with the measurement names
names(traindata) <- colname$V2
# read the train activity
trainact <- read.table("./UCI HAR Dataset/train/y_train.txt")
# label the column as "Activity"
names(trainact) <- c("Activity")
# read the test subject ID
trainsub <- read.table("./UCI HAR Dataset/train/subject_train.txt")
# label the column as "SubjectID"
names(trainsub) <- c("SubjectID")
# combine the test set - subject ID, activity, measurement
traincmb <- cbind(trainsub, trainact, traindata)
# merge the test data with train data using rbind
mdata = rbind(testcmb, traincmb)
# grep only column names with mean and standard deviation
mstd <- grep("mean\\(|std\\(", names(mdata), value=TRUE)
# extract only the columns with names mean or std, also subjectID and activity
xdata <- subset(mdata, select=c("SubjectID", "Activity", mstd))
# replace the activity code with activity name
xdata$Activity <- replace(xdata$Activity, xdata$Activity==1, "1-WALKING")
xdata$Activity <- replace(xdata$Activity, xdata$Activity==2, "2-WALKING_UPSTAIRS")
xdata$Activity <- replace(xdata$Activity, xdata$Activity==3, "3-WALKING_DOWNSTAIRS")
xdata$Activity <- replace(xdata$Activity, xdata$Activity==4, "4-SITTING")
xdata$Activity <- replace(xdata$Activity, xdata$Activity==5, "5-STANDING")
xdata$Activity <- replace(xdata$Activity, xdata$Activity==6, "6-LAYING")
# group the numeric columns by subjectID, then by activity
tdata <- aggregate(xdata[3:68], by=list(xdata$SubjectID, xdata$Activity), FUN=mean)
# replace the generated names with appropriate names
colnames(tdata)[1] <- "SubjectID"
colnames(tdata)[2] <- "Activity"
# order by subjectID, then activity
tdata <- arrange(tdata, tdata$SubjectID, tdata$Activity)
# save file as txt
write.table(tdata, file="./tidyData.txt", row.name=FALSE) |
fdaad2fb2bc4ea1640d02ae62f4b8352e80c25d2 | 0a906cf8b1b7da2aea87de958e3662870df49727 | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610130913-test.R | 5016c7209990898c2aa0de62aff142ed17cf928f | [] | no_license | akhikolla/updated-only-Issues | a85c887f0e1aae8a8dc358717d55b21678d04660 | 7d74489dfc7ddfec3955ae7891f15e920cad2e0c | refs/heads/master | 2023-04-13T08:22:15.699449 | 2021-04-21T16:25:35 | 2021-04-21T16:25:35 | 360,232,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 359 | r | 1610130913-test.R | testlist <- list(a = 0L, b = 0L, x = c(-117901064L, -117901064L, -117901064L, -117901064L, -117901064L, -117901064L, -117901064L, -117901064L, -117901064L, -117901064L, -117901064L, -117901064L, -121046792L, -117901064L, -117901312L, -171L, -1L, -16777216L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
20b3131bc88883b0939e7a23d7ffa47062b6778f | 09cfd76986efe97a4395657c74fd9611f070316c | /cachematrix.R | 47d929c3a84697bf2035fd8f0a5ecc9ac0ce8d14 | [] | no_license | mdnaveed91/ProgrammingAssignment2 | 11c55e953469e5b476a6be6750a3f07410b07aa7 | a10085200b3d7948d0bd8e1226a7f0426c7b6ea0 | refs/heads/master | 2021-01-17T05:30:41.557452 | 2015-02-21T20:47:43 | 2015-02-21T20:47:43 | 31,137,494 | 0 | 0 | null | 2015-02-21T19:20:51 | 2015-02-21T19:20:51 | null | UTF-8 | R | false | false | 1,158 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function() x
setmatrix<-function(solve) m<<- solve
getmatrix<-function() m
list(set=set, get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## Write a short comment describing this function
cacheSolve <- function(x=matrix(), ...) {
m<-x$getmatrix()
if(!is.null(m)){
message("getting cached data")
return(m) ## Return a matrix that is the inverse of 'x'
}
matrix<-x$get()
m<-solve(matrix, ...)
x$setmatrix(m)
m
}
test <- function(){
t <- matrix(c(1,3,1,2),nrow=2) # Creating a new matrix
message("Input Matrix:"); print(t)
message("Calling makeCacheMatrix")
t2 <- makeCacheMatrix(t) # Creating special matrix object
#print(t2)
message("Calling cacheSolve (1st time)")
print(cacheSolve(t2)) # Generating inverse
message("Calling cacheSolve (2nd time)")
cacheSolve(t2) # Generating inverse (returned from cache)
}
|
1e1b59da68294e1330cb12f200355f97672712b8 | 57b64b96a84935739474b18dd783183b74765960 | /man/build.paths.Rd | d7ce7df291cedb42f75fef62c657cdae69506340 | [] | no_license | erikbjohn/pkg.data.paths | 800439ab30b5ec1292eb242756e9a4117197415e | f34054d119e5f92d657339d42964d72e9d8e624b | refs/heads/master | 2021-01-01T15:34:50.656006 | 2017-08-15T15:17:38 | 2017-08-15T15:17:38 | 97,653,138 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 446 | rd | build.paths.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pkg.data.paths.R
\name{build.paths}
\alias{build.paths}
\title{build.paths}
\usage{
build.paths(path.root, str.pkg.name)
}
\arguments{
\item{path.root}{root local dropbox directory}
\item{str.pkg.name}{package name of files}
}
\description{
Build dropbox mapping for all data in pkg.data directory
}
\keyword{check}
\keyword{dropbox}
\keyword{package}
\keyword{path}
|
051420a60242e1abfae71f542167a5d8af514af7 | e2c7181ed4e32ad6375160811fc1e13a6c5c1752 | /R/class_statistics.R | 9a6bac5a7a69150581c568441b55a9f305b4a343 | [] | no_license | mauricioromero86/teamlucc | c6bbef6beff5bb19ba068db500e6c6e483086507 | b41fdd3135dd58c45a0a76c8c568768104267eaa | refs/heads/master | 2020-12-24T11:33:13.644455 | 2015-09-10T14:14:44 | 2015-09-10T14:14:44 | 40,678,059 | 0 | 2 | null | 2015-08-13T19:33:49 | 2015-08-13T19:33:48 | R | UTF-8 | R | false | false | 1,495 | r | class_statistics.R | #' Exports statistics on pixels within each of a set of land cover classes
#'
#' @export
#' @import raster
#' @importFrom dplyr group_by summarize
#' @importFrom reshape2 melt
#' @param x A \code{RasterLayer} from which class statistics will be
#' calculated.
#' @param y A \code{SpatialPolygonsDataFrame} with cover class
#' polygons
#' @param class_col the name of the column containing the response variable
#' (for example the land cover type of each pixel)
#' @return A data.frame of class statistics.
#' @examples
#' class_statistics(L5TSR_1986, L5TSR_1986_2001_training, "class_1986")
class_statistics <- function(x, y, class_col) {
if (projection(x) != projection(y)) {
stop('Coordinate systems do not match')
}
if (class(y) == "SpatialPolygonsDataFrame") {
pixels <- get_pixels(x, y, class_col)
} else if (class(y) %in% c("RasterLayer", "RasterBrick",
"RasterStack")) {
stop('class_statistics cannot yet handle Raster* objects')
}
pixels <- melt(data.frame(pixels@x, y=pixels@y), idvar='y')
# Set y and variable to NULL to pass R CMD CHECK without notes
value=variable=NULL
class_stats <- summarize(group_by(pixels, y, variable), mean=mean(value),
sd=sd(value), min=min(value), max=max(value),
n_pixels=length(value))
class_stats <- class_stats[order(class_stats$variable, class_stats$y), ]
return(class_stats)
}
|
c51e47fef24e4659933927073c260a1e71a20f5a | 8357f04a0a50e10697a650ad319b37738dcc2cf8 | /man/github-package.Rd | be9260385a8b8851bf05d1b23149e39fb2ec6c7b | [
"MIT"
] | permissive | prateek05/rgithub | 2a7114a8262cd6abefc8d31f03a907b75f127ba2 | 153bde1466252952e21c1fdb2ff0b452b9c8de99 | refs/heads/master | 2021-01-17T04:24:00.180261 | 2014-07-14T17:24:43 | 2014-07-14T17:24:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 861 | rd | github-package.Rd | \name{github-package}
\alias{github-package}
\alias{github}
\docType{package}
\title{
Use the Github API from R
~~ package title ~~
}
\description{
This package wraps the Github web service API so you can make R calls
against the Github API (to get information about repositories, or even
to create new content)
~~ A concise (1-5 lines) description of the package ~~
}
\details{
\tabular{ll}{
Package: \tab github\cr
Type: \tab Package\cr
Version: \tab 0.9.5\cr
Date: \tab 2013-02-28\cr
License: \tab MIT\cr
}
~~ An overview of how to use the package, including the most important functions ~~
}
\author{
Carlos Scheidegger
Maintainer: Carlos Scheidegger <carlos.scheidegger@gmail.com>
}
\references{
~~ Literature or other references for background information ~~
}
\keyword{ package }
\seealso{ RJSON }
\examples{
repos <- get.user.repositories("cscheid")
}
|
4a769cd87362f6e6c94d9f2dda0cc1a792cfcce1 | 0d0d0a8baa83af3ad38ea2e419544db094d4b9fd | /man/fm_checkdesign.Rd | e63aa8b2102be67f86ede681d4dc0ca286632b38 | [] | no_license | cran/fishmethods | 81162cf5bf35201c7ce85dd6e9815c4bca6b7646 | ac49e77d9f2b5ee892eb5eae1807e802cddd4ac8 | refs/heads/master | 2023-05-17T13:53:37.128033 | 2023-04-27T07:33:01 | 2023-04-27T07:33:01 | 17,696,062 | 5 | 3 | null | null | null | null | UTF-8 | R | false | false | 3,532 | rd | fm_checkdesign.Rd | \name{fm_checkdesign}
\alias{fm_checkdesign}
\title{Check parameter structure of Hightower et al. (2001) models}
\description{
Check design of parameter structure before use in function \code{fm_telemetry}.
}
\usage{
fm_checkdesign(occasions = NULL, design = NULL, type = "F" )
}
\arguments{
\item{occasions}{total number of occasions that will be modeled in data}
\item{design}{vector of characters specifying the occasion parameter structure (see details).}
\item{type}{character type of parameter to which design will be applied: F = fishing mortality, M = natural mortality, and P = probability of detection. Default = F.}
}
\details{The program allows the configuration of different parameter structure for the estimation of fishing and natural mortalities, and detection probabilities. These structures are specified in \code{design}. Consider the following examples:
\emph{Example 1}
Tags are relocated over seven occasions. One model structure might be constant fishing mortality estimates over occasions 1-3 and 4-6. To specify this model structure:
\code{design} is c(\dQuote{1},\dQuote{4}).
Note: The structures of \code{design} must always contain the first occasion for fishing mortality and natural mortality, whereas the structure for the probability of detection must not contain the first occasion.
\emph{Example 2}
Tags are relocated over six occasions. One model structure might be separate fishing mortality estimates for occasion 1-3 and the same parameter estimates for occasions 4-6. The \code{design} is c(\dQuote{1:3*4:6}).
Note: The structures of \code{Fdesign} and \code{Mdesign} must always start with the first occasion, whereas the structure for \code{Pdesign} must always start with the second occasion.
Use the multiplication sign to specify occasions whose estimates of F, M or P will be taken from values of other occasions.
\emph{Example 3}
Specification of model 3 listed in Table 1 of Hightower et al. (2001) is shown. Each occasion represented a quarter of the year. The quarter design for F specifies that quarterly estimates are the same in both years. \code{design} is c(\dQuote{1*14},\dQuote{4*17},\dQuote{7*20},\dQuote{11*24}).
\emph{Example 4}
In Hightower et al. (2001), the quarter and year design specifies that estimates are made for each quarter but are different for each year. \code{design} is
c(\dQuote{1}, \dQuote{4}, \dQuote{7}, \dQuote{11},
\dQuote{14}, \dQuote{17}, \dQuote{20}, \dQuote{24}).
If the number of occasions to be assigned parameters from other occasions are less than the number of original parameters (e.g., c(\dQuote{11:13*24:25}), then only the beginning sequence of original parameters equal to the number of occasions are used. For instance, in c(\dQuote{11:13*24:25}), only parameters 11 and 12 would be assigned to occasions 24 and 25.
If the number of occasions to be assigned parameters from other occasions are greater than the number of original parameters (e.g., c(\dQuote{11:12*24:26})), then the last original parameter is re-cycled. In the example c(\dQuote{11:12*24:26}), the parameter for occasion 12 is assigned to occasions 25 \emph{and} 26.
}
\value{dataframe containing the parameter order by occasion.
}
\author{Gary A. Nelson, Massachusetts Division of Marine Fisheries \email{gary.nelson@mass.gov}}
\seealso{\code{\link{fm_telemetry}}}
\examples{
fm_checkdesign(occasions=27, design=c("1*14","4*17","7*20","11*24"),type="F")
}
\keyword{misc}
|
8b0c7c4dd7bc3cf262a7be251b1f666c20c8f182 | 0380705740a297a32860ba83cbb8869464c40123 | /var.R | 1882e82a2f674d684f15c858af4c40b2d4909433 | [] | no_license | cwalenciak/WindApp | 527630082c1148ad8fbac1d2fb4baec4b8a09ea3 | f50fd3b668cc2bd3128d71c9998ade5e7702b20a | refs/heads/master | 2022-11-13T19:02:31.535634 | 2020-07-11T12:42:29 | 2020-07-11T12:42:29 | 264,351,771 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 557 | r | var.R |
var_page <- tabPanel(
"VAR",
sidebarLayout(
sidebarPanel(
width = 3,
var_data_select_ui("var_data_select")
),
mainPanel(
width = 9,
includeCSS("www/custom.css"),
tabBox(
width= 12,
var_review_tab_ui("var_review_tab"),
var_lag_select_tab_ui("var_lag_select_tab"),
var_forecast_tab_ui("var_forecast_tab"),
var_ase_tab_ui("var_ase_tab")
)
)
)
)
|
09383e14f0ec661928e6a8af7b3d96f826956051 | 18a55611ad5aa3145c7b49cee1ff84ebf8fd95a8 | /man/limit_cq.Rd | 8cf8e0ce7fd5c1e9418bffa1b5c02625d20ca5e8 | [] | no_license | michbur/dpcR | eb55969b0702ce7da9f2fe09b779c3145960118b | 741846b6a0f5febf682b03367b1b18c87cdb4c85 | refs/heads/master | 2022-11-21T10:29:31.475295 | 2022-11-07T20:26:50 | 2022-11-07T20:26:50 | 12,669,549 | 7 | 2 | null | 2014-04-25T07:51:33 | 2013-09-07T18:32:06 | R | UTF-8 | R | false | true | 3,670 | rd | limit_cq.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/limit_cq.R
\name{limit_cq}
\alias{limit_cq}
\title{Limit Cy0 values}
\usage{
limit_cq(
data,
cyc = 1,
fluo = NULL,
Cq_range = c(1, max(data[cyc])),
model = l5,
SDM = TRUE,
pb = FALSE
)
}
\arguments{
\item{data}{a dataframe containing the qPCR data.}
\item{cyc}{the column containing the cycle data. Defaults to first column.}
\item{fluo}{the column(s) (runs) to be analyzed. If \code{NULL}, all runs will be
considered (equivalent of \code{(1L:ncol(data))[-cyc]}).}
\item{Cq_range}{is a user defined range of cycles to be used for the
determination of the Cq values.}
\item{model}{is the model to be used for the analysis for all runs. Defaults
to 'l5' (see \code{\link[qpcR]{pcrfit}}).}
\item{SDM}{if \code{TRUE}, Cq is approximated by the second derivative
method. If \code{FALSE}, Cy0 method is used instead.}
\item{pb}{if \code{TRUE}, progress bar is shown.}
}
\value{
A data frame with two columns and number of rows equal to the number
of runs analyzed. The column \code{Cy0} contains calculated Cy0 values. The
column \code{in.range} contains adequate logical constant if given Cy0 value
is in user-defined \code{Cq_range}.
}
\description{
Calculates the Cq values of a qPCR experiment
within a defined range of cycles. The function can be used to extract Cq
values of a chamber based qPCR for conversion into a dPCR experiment. All Cq
values are obtained by Second Derivative Maximum or by Cy0 method (Guescini
et al. (2008)).
}
\details{
The \code{Cq_range} for this function an be defined be the user. The default
is to take all amplification curves into consideration. However, under
certain circumstances it is recommended to define a range. For example if
amplifications are positive in early cycle numbers (less than 10).
Approximated second derivative is influenced both by how often interpolation
takes place in each data interval and by the smoothing method used. The user
is encouraged to seek optimal parameters for his data himself. See
\code{\link[chipPCR]{inder}} for details.
The calculation of the Cy0 value (equivalent of Cq) is based on a
five-parameter function. From experience this functions leads to good
fitting and avoids overfitting of critical data sets. Regardless, the user
is recommended to test for the optimal fitting function himself (see
\code{\link[qpcR]{mselect}} for details).
}
\examples{
library(qpcR)
test <- cbind(reps[1L:45, ], reps2[1L:45, 2L:ncol(reps2)], reps3[
1L:45,
2L:ncol(reps3)
])
# results.dPCR contains a column with the Cy0 values and a column with
# converted values.
Cq.range <- c(20, 30)
ranged <- limit_cq(
data = test, cyc = 1, fluo = NULL,
Cq_range = Cq.range, model = l5
)
# Same as above, but without Cq.range
no_range <- limit_cq(data = test, cyc = 1, fluo = NULL, model = l5)
# Same as above, but only three columns
no_range234 <- limit_cq(data = test, cyc = 1, fluo = c(2:4), model = l5)
}
\references{
Guescini M, Sisti D, Rocchi MB, Stocchi L & Stocchi V (2008)
\emph{A new real-time PCR method to overcome significant quantitative
inaccuracy due to slight amplification inhibition}. BMC Bioinformatics, 9:
326.
Ruijter JM, Pfaffl MW, Zhao S, et al. (2013) \emph{Evaluation of qPCR curve
analysis methods for reliable biomarker discovery: bias, resolution,
precision, and implications}. Methods, San Diego Calif 59:32--46.
}
\seealso{
SDM method: \code{\link[chipPCR]{inder}},
\code{\link[chipPCR]{summary.der}}.
Cy0 method: \code{\link[qpcR]{mselect}}, \code{\link[qpcR]{efficiency}}.
}
\author{
Michal Burdukiewicz, Stefan Roediger.
}
\keyword{Cy0}
\keyword{dPCR}
\keyword{qPCR}
|
d997ce8392a1135a9a2bdd8399c870a5832162fb | fbe4928c7d1c78ee436b5aec4b1d6a3c89da5385 | /run_analysis.R | 9c3443c37ff4461768f827d613d89ec9bc7e0029 | [] | no_license | biswassubrata/Coursera-Course-3-Project-Work | d22a6f32f1ca499888aefe6aa3672c330310e970 | 595a8a9df9f013cc7666cdb7a38045b102ead4b6 | refs/heads/master | 2021-05-11T22:41:48.393438 | 2018-01-15T06:25:59 | 2018-01-15T06:25:59 | 117,497,119 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,783 | r | run_analysis.R | #
# Download and unzip the data file, if they are not available already
#
if (!file.exists("datafile.zip")) {
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip", "datafile.zip")
}
if (file.exists("UCI HAR Dataset")) {
unlink("UCI HAR Dataset")
}
unzip("datafile.zip")
#
# Identify the required features
#
features <- read.csv("UCI HAR Dataset/features.txt", sep="", header=F, stringsAsFactors = FALSE)
requiredFeatures <- grep("(mean|std)\\(\\)", tolower(features[, 2]))
activities <- read.csv("UCI HAR Dataset/activity_labels.txt", header=F, sep="", stringsAsFactors = FALSE)
#
# Read and merge test data and training data
#
test <- read.csv("UCI HAR Dataset/test/X_test.txt", header=F, sep="")[requiredFeatures]
testSubjects <- read.csv("UCI HAR Dataset/test/subject_test.txt", header=F, sep="")
testActivities <- read.csv("UCI HAR Dataset/test/Y_test.txt", header=F, sep="")
test <- cbind(testSubjects, testActivities, test)
train <- read.csv("UCI HAR Dataset/train/X_train.txt", header=F, sep="")[requiredFeatures]
trainSubjects<- read.csv("UCI HAR Dataset/train/subject_train.txt", header=F, sep="")
trainActivities <- read.csv("UCI HAR Dataset/train/Y_train.txt", header=F, sep="")
train <- cbind(trainSubjects, trainActivities, train)
mergedData <- rbind(test, train)
#
# Assign meaningful column names
#
colnames(mergedData) <- c("Subjects", "Activities", features[requiredFeatures, 2])
mergedData$Activities <- activities[mergedData$Activities, 2]
#
# Create tidy data
#
tidyData <- aggregate(mergedData[3:68],
list(Subjects = mergedData$Subjects, Activities = mergedData$Activities),
mean)
write.table(tidyData, "tidyData.txt", row.names = FALSE, quote=FALSE)
|
dfa2ed47bfcb908d49b332a24ffda3df5a60704a | ce6317a8de3fe10f2b3eea8e4a5b68d2841c6e24 | /man/PhylogeneticH2.Rd | cd5ec49ba14ba5da55bb6fd339a1f833f1813cd1 | [] | no_license | gtonkinhill/POUMM | 8fc55e702a28b6ff9e383481613c4cd64aa1d5d4 | 5199370f088b33e6f8bb3c216f29feb485576ee7 | refs/heads/master | 2020-04-05T05:09:03.474985 | 2017-07-02T13:23:35 | 2017-07-02T13:23:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 3,369 | rd | PhylogeneticH2.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paramsPOUMM.R
\name{PhylogeneticH2}
\alias{H2e}
\alias{PhylogeneticH2}
\alias{alpha}
\alias{sigmaOU}
\alias{sigmae}
\title{Phylogenetic Heritability}
\usage{
alpha(H2, sigma, sigmae, t = Inf)
sigmaOU(H2, alpha, sigmae, t = Inf)
sigmae(H2, alpha, sigma, t = Inf)
H2e(z, sigmae, tree = NULL, tFrom = 0, tTo = Inf)
}
\arguments{
\item{H2}{Phylogenetic heritability at time t.}
\item{sigmae}{Numeric, environmental phenotypic deviation at the tips.}
\item{t}{Numeric value denoting evolutionary time (i.e. distance from the
root of a phylogenetic tree).}
\item{alpha, sigma}{Numeric values or n-vectors, parameters of the OU process;
alpha and sigma must be non-negative. A zero alpha is interpreted as the
Brownian motion process in the limit alpha -> 0.}
\item{z}{Numerical vector of observed phenotypes.}
\item{tree}{A phylo object.}
\item{tFrom, tTo}{Numerical minimal and maximal root-tip distance to limit the
calculation.}
}
\value{
All functions return numerical values or NA, in case of invalid
parameters
}
\description{
The phylogenetic heritability, \eqn{H^2}, is defined as the
ratio of the genetic variance over the total phenotypic variance expected
at a given evolutionary time t (measured from the root of the tree). Thus,
the phylogenetic heritability connects the parameters alpha, sigma and
sigmae of the POUMM model through a set of equations. The functions
described here provide an R-implementation of these equations.
}
\details{
The function alpha invokes the gsl function lambert_W0.
The function sigmae uses the formula H2 = varOU(t, alpha, sigma) /
(varOU(t, alpha, sigma) + sigmae^2)
}
\section{Functions}{
\itemize{
\item \code{alpha}: Calculate alpha given time t, H2, sigma and sigmae
\item \code{sigmaOU}: Calculate sigma given time t, H2 at time t, alpha
and sigmae
\item \code{sigmae}: Calculate sigmae given alpha, sigma, and H2 at
time t
\item \code{H2e}: "Empirical" phylogenetic heritability estimated
from the empirical variance of the observed phenotypes and sigmae
}}
\note{
This function is called sigmaOU and not simply sigma to avoid a conflict
with a function sigma in the base R-package.
}
\examples{
# At POUMM stationary state (equilibrium, t=Inf)
H2 <- POUMM::H2(alpha = 0.75, sigma = 1, sigmae = 1, t = Inf) # 0.4
alpha <- POUMM::alpha(H2 = H2, sigma = 1, sigmae = 1, t = Inf) # 0.75
sigma <- POUMM::sigmaOU(H2 = H2, alpha = 0.75, sigmae = 1, t = Inf) # 1
sigmae <- POUMM::sigmae(H2 = H2, alpha = 0.75, sigma = 1, t = Inf) # 1
# At finite time t = 0.2
H2 <- POUMM::H2(alpha = 0.75, sigma = 1, sigmae = 1, t = 0.2) # 0.1473309
alpha <- POUMM::alpha(H2 = H2, sigma = 1, sigmae = 1, t = 0.2) # 0.75
sigma <- POUMM::sigmaOU(H2 = H2, alpha = 0.75, sigmae = 1, t = 0.2) # 1
sigmae <- POUMM::sigmae(H2 = H2, alpha = 0.75, sigma = 1, t = 0.2) # 1
# Comparing with the empirical H2e from a simulation
N <- 20
tree <- TreeSim::sim.bd.taxa(N, 1, lambda = 2, mu = 1, complete =
FALSE)[[1]]
tMean <- mean(nodeTimes(tree, tipsOnly = TRUE))
z <- rVNodesGivenTreePOUMM(tree, 8, 2, 2, 1)
\dontrun{
phytools::plotBranchbyTrait(tree, z, mode = "nodes", show.tip.label =
FALSE, show.node.label = FALSE)
ape::nodelabels(round(z[-(1:N)], 2))
ape::tiplabels(round(z[(1:N)], 2))
}
}
\seealso{
OU
}
|
c014aaefb699054c1d551656847e1318dfb5d7d6 | 4f871a504b17ac92fd8cef3764f182a4416f6f72 | /man/getGObiomaRt.Rd | 0339c6dc074b0bedcf013ea029b6cc2be728cd44 | [
"MIT"
] | permissive | martaint/InterCellar | 9227bd44f120fffecdc05adc0e75cb52315e3186 | d9483f7df8eb5168fc8660a73483572a12ca35cb | refs/heads/main | 2023-04-07T06:15:39.023654 | 2022-04-06T08:46:25 | 2022-04-06T08:46:25 | 326,775,859 | 7 | 3 | null | null | null | null | UTF-8 | R | false | true | 465 | rd | getGObiomaRt.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fct_annotation.R
\name{getGObiomaRt}
\alias{getGObiomaRt}
\title{Connection to Ensembl via biomaRt to get GO terms}
\usage{
getGObiomaRt(input_select_ensembl, input.data)
}
\arguments{
\item{input_select_ensembl}{chosen version of Ensembl}
\item{input.data}{filtered input data}
}
\value{
dataframe with GO annotation
}
\description{
Connection to Ensembl via biomaRt to get GO terms
}
|
06b134b21ca7fa211b1a7e4f55551ceda59b9948 | 598d36d24e3ae40e6422d8b5216f6018c1bc3ceb | /man/polynomial2.Rd | 9ff576cbea919a08e1e13827e4b5d35c7ed9eb2a | [] | no_license | femeunier/LianaHydro | b55244322fdac9fe5b98d607abe0615912ea83dc | 32d4050bff949dc96e7a6c58dfa689fbadfd40b4 | refs/heads/master | 2020-09-01T20:44:33.932223 | 2020-07-02T16:04:49 | 2020-07-02T16:04:49 | 219,051,562 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 335 | rd | polynomial2.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/polynomial2.R
\name{polynomial2}
\alias{polynomial2}
\title{polynomial2}
\usage{
polynomial2(psi, a = 2, b = -2)
}
\arguments{
\item{psi}{water potential}
\item{a}{a}
\item{b}{b (P50)}
}
\description{
Returns polynomial PLC
}
\author{
Félicien Meunier
}
|
f4937bda50f6adfc8ebec03a6d2d1b3f35b231a8 | 1aef9a9bae3c784239a35aac9f3d2d0422964193 | /alhe.R | 187a10af4e6c2cca67136ee3beacfa759431fe0e | [] | no_license | pawello2222/ALHE_TabuSearch | 989223a3cc864acb0df0ea7c8c44635f0eb28218 | 4fe789e5b60ae44566cade6d7653a29a196e38f1 | refs/heads/master | 2021-01-15T23:22:39.825629 | 2016-06-05T14:41:33 | 2016-06-05T14:41:33 | 61,218,871 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,046 | r | alhe.R | library(foreach)
source("TabuSearch.R")
source("MealsLogic.R")
source("ConfigVariables.R")
generateRandomPoint <- function()
{
randomPoint <- vector(mode = "list", length = mealsPerDay)
mealsSize <- nrow(dishes)
for (mealNo in 1:mealsPerDay)
{
randomMeal <- vector(mode = "list", length = dishesPerMeal)
for (dishNo in 1:dishesPerMeal)
{
randomDishIndex <- sample(1:mealsSize, 1)
randomDish <- dishes[randomDishIndex, ]
randomMeal[[dishNo]] <- randomDish
}
randomPoint[[mealNo]] <- randomMeal
}
return(randomPoint)
}
stopConditionFunc <- function(point)
{
#Przerywamy, jeżeli współczynnik dopasowania punktu osiągnie daną wartość
return(evaluateFunc(point) > stopCondition)
}
neighborHoodFunc <- function(point)
{
dishesSize <- nrow(dishes)
neighborHoodSize <- dishesPerMeal*mealsPerDay*(dishesSize-1)
neighborHoodInsertElementIndex <- 1
neighborHood <- vector(mode = "list", length = neighborHoodSize)
for (mealNo in 1:mealsPerDay)
{
meal = point[[mealNo]]
for (dishNo in 1:dishesPerMeal)
{
dish = meal[[dishNo]]
for (selectedDishIndex in 1:dishesSize)
{
selectedDish <- dishes[selectedDishIndex, ]
if (!identical(selectedDish[1], dish[1]))
{
neighbor <- point
neighbor[[mealNo]][[dishNo]] <- selectedDish
neighborHood[[neighborHoodInsertElementIndex]] <- neighbor
neighborHoodInsertElementIndex <- neighborHoodInsertElementIndex + 1
}
}
}
}
return(neighborHood)
}
monotonyRatio <- function(point)
{
dishesList <- list()
index <- 1
for (mealNo in 1:length(point))
{
meal <- point[[mealNo]]
for (dishNo in 1:length(meal))
{
dish <- meal[[dishNo]]
dishesList[[index]] <- dish[[1]]
index <- index + 1
}
}
duplicates <- duplicated(dishesList)
occurNo <- 0
for (i in 1:length(duplicates))
{
if (duplicates[[i]] == TRUE)
{
occurNo <- occurNo + 1
}
}
return(occurNo)
}
objectiveFunc <- function(point)
{
#Sumujemy priorytety (wagi)
prioritiesSum <- carbohydratesPriority + proteinsPriority + fatsPriority
#Obliczamy stosunek uzyskanej do optymalnej
xCarbohydrates <- sumDailyCarbohydrates(point) / optimalCarbohydrates
#Jeżeli większe od 1 to normalizujemy (np. 1.2 przechodzi w 0.8)
if (xCarbohydrates > 1) xCarbohydrates <- 1 - (xCarbohydrates - 1)
#Jeżeli mniejsze to zmniejszamy dodatkowo tą wagę, bo nie chcemy mieć mniej niż jest w diecie
else xCarbohydrates <- xCarbohydrates * xCarbohydrates
#Powtarzamy dla pozostałych makroskładników
xProteins <- sumDailyProteins(point) / optimalProteins
if (xProteins > 1) xProteins <- 1 - (xProteins - 1)
else xProteins <- xProteins * xProteins
xFats <- sumDailyFats(point) / optimalFats
if (xFats > 1) xFats <- 1 - (xFats - 1)
else xFats <- xFats * xFats
dishesCount <- dishesPerMeal * mealsPerDay
xMonotonyRatio <- monotonyRatio(point) / dishesCount
xMonotonyRatio <- xMonotonyRatio * monotonyPriority
#Liczymy średnia ważoną
xSum = ((xCarbohydrates*carbohydratesPriority)+(xProteins * proteinsPriority)+(xFats * fatsPriority))/prioritiesSum
return(xSum-xMonotonyRatio)
}
heuristicFunc <- function(point)
{
#Obliczamy stosunek uzyskanej do optymalnej
xCarbohydrates <- sumDailyCarbohydrates(point) / optimalCarbohydrates
#Jeżeli większe od 1 to normalizujemy (np. 1.2 przechodzi w 0.8)
if (xCarbohydrates > 1) xCarbohydrates <- 1 - (xCarbohydrates - 1)
#Powtarzamy dla pozostałych makroskładników
xProteins <- sumDailyProteins(point) / optimalProteins
if (xProteins > 1) xProteins <- 1 - (xProteins - 1)
xFats <- sumDailyFats(point) / optimalFats
if (xFats > 1) xFats <- 1 - (xFats - 1)
#Liczymy średnią
xSum = (xCarbohydrates+xProteins+xFats)/3
return(xSum)
}
evaluateFunc <- function(point)
{
objectiveFuncValue <- objectiveFunc(point)
heuristicFuncValue <- heuristicFunc(point)
prioritiesSum <- objectiveFuncPriority + heuristicFuncPriority
return((objectiveFuncValue*objectiveFuncPriority+heuristicFuncValue*heuristicFuncPriority)/prioritiesSum)
}
loadConfigVariablesAsGlobals <- function(configVariables)
{
foreach(key=names(configVariables), val=configVariables, .combine=rbind, .packages="foreach") %do% assign(key, val, envir = .GlobalEnv)
}
loadDishesAsGlobals <- function()
{
dishes = importDishes(allDishesCount)
assign("dishes", dishes, envir = .GlobalEnv)
}
executeWithConfig <- function(configVariablesName)
{
loadConfigVariablesAsGlobals(configVariablesName)
loadDishesAsGlobals()
randomPoint <- generateRandomPoint()
neighborHood <- neighborHoodFunc(randomPoint)
result = tabuSearch(generateRandomPoint(), stopConditionFunc, neighborHoodFunc, evaluateFunc)
diet <- result[[1]]
observations <- result[[2]]
return(list(diet,observations,sumDailyProteins(diet),sumDailyCarbohydrates(diet),sumDailyFats(diet), monotonyRatio(diet)))
}
|
19fd5d572295365c526d73029f6bb903b4b2e23e | 3f355008d1e128b783097dba39eced1a5ff366ee | /man/drop_levels.Rd | 3594ef5ed6574f418a81db772edaf164485b8854 | [] | no_license | ghowoo/Wu | 94c19372874b14dae24edbebd5081e0035b8c1be | 87a786c117492f7ddddcc69112b370d76f7b8620 | refs/heads/master | 2022-06-03T05:48:14.576333 | 2022-05-25T22:22:26 | 2022-05-25T22:22:26 | 179,358,646 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 307 | rd | drop_levels.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drop_levels.R
\name{drop_levels}
\alias{drop_levels}
\title{The Drop Levels Functions}
\usage{
drop_levels(df)
}
\description{
This function allows you to drop all unused levels in a data.frame.
}
\keyword{drop}
\keyword{levels}
|
459a0776f547e4116ca3c5736083f3f3a5c3a8be | 14a05292970e84d53b8456dd8cd0a2b89b053fc0 | /Misc_SA_Plots/plot_age_length_fit.r | 622d4554d6f36a813e45c8b794915745f5536325 | [] | no_license | tetemouton/GGP | 479d0c86727fd14e6fd55c57bdcb23647c880208 | d0f8061b85cf7d9fbdb66980ded03474be75570d | refs/heads/master | 2021-01-10T03:00:05.154877 | 2015-12-15T05:04:05 | 2015-12-15T05:04:05 | 46,684,946 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,436 | r | plot_age_length_fit.r | plot.age.length.fit <- function(alfile="path", frqfile=read.frq("path"), inifile=read.ini("path"), plotfile=read.rep("path"),
parfile=read.par("path"), fixlog=FALSE, fix_pars=NA, sdmult=1, ylims=c(30,130), xlbl="Age (quarters)")
{
require(reshape2)
require(scales)
require(grid)
theme_set(theme_bw())
a <- readLines(alfile)
nsamp <- as.numeric(a[2])
pos <- grep("# Year Month Fishery Species", a)
if(length(pos) != nsamp) stop("ERROR: no. samples does not match matrix observations in age_length file")
lfint <- frqfile$dl$lfint # no. of length intervals
lffirst <- frqfile$dl$lffirst # no. of length intervals
lfwidth <- frqfile$dl$lfwidth # no. of length intervals
nage <- inifile$nages
# - length intervals
lenint <- seq(from=lffirst, to= (lffirst + lfint -1), by=1)
# - age intervals
ageint <- c(1:inifile$nages)
alsamps <- list()
snames <- list(Length=as.character(c(1:nsamp)),Fishdets=c("Year","Month","Fishery",
"Species","nobs"))
samp_tbl <- matrix(0,nrow=nsamp,ncol=5,dimnames=snames)
for(k in 1:nsamp){
samp_tbl[k,c(1:4)] <- as.numeric(unlist(strsplit(a[(pos[k]+1)],split="[[:blank:]]+")))
anames <- list(Length=as.character(lenint),Age=as.character(ageint))
al_tbl <- matrix(0,nrow=length(lenint),ncol=length(ageint),dimnames=anames)
for(i in 1:lfint){
al_tbl[i,] <- as.numeric(unlist(strsplit(a[(pos[k]+1+i)],split="[[:blank:]]+")))
}
samp_tbl[k,5] <- sum(al_tbl)
alsamps[[k]] <- al_tbl
}
anames <- list(Length=as.character(lenint),Age=as.character(ageint))
tot_al_tbl <- matrix(0,nrow=length(lenint),ncol=length(ageint),dimnames=anames)
for(k in 1:nsamp){
tot_al_tbl[,] <- tot_al_tbl[,] + alsamps[[k]][,]
}
# sum(tot_al_tbl)
# Diagnostic plot of fit to age-length data
# Plot age-length data
if(fixlog){
Lmin <- fix_pars[1]
Lmax <- fix_pars[2]
K <- fix_pars[3]
} else {
Lmin <- parfile$Lmin
Lmax <- parfile$Lmax
K <- parfile$K
}
# -- plot --
obsdat <- melt(tot_al_tbl)
predat <- data.frame(age = ageint,
muL = plotfile$mean.LatAge,
sdL = plotfile$sd.LatAge,
LL = plotfile$mean.LatAge - sdmult*plotfile$sd.LatAge,
UL = plotfile$mean.LatAge + sdmult*plotfile$sd.LatAge)
print(paste("Upper and lower bounds displayed are", sdmult, "times the SD of length of age"))
pldat <- obsdat[obsdat$value > 0,]
xlims <- range(predat$age)
pl <- ggplot(data=pldat, aes(x=Age, y=Length)) + geom_point(aes(size=value), colour=alpha("#6699CC", 0.7)) +
geom_line(data=predat, aes(x=age, y=muL), colour=alpha("black",0.6), size=0.8) +
geom_line(data=predat, aes(x=age, y=LL), colour=alpha(grey(0.5),0.6), size=0.6) +
geom_line(data=predat, aes(x=age, y=UL), colour=alpha(grey(0.5),0.6), size=0.6) +
xlab(xlbl) + ylab("Length (cm)") + xlim(xlims) + ylim(ylims) +
theme(panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
legend.key=element_blank(),
legend.title=element_blank(),
legend.text = element_text(size=14),
legend.position=c(0.9, 0.2),
legend.key.size = unit(0.7, "cm"))
print(pl)
}
|
852ff996a25bcbd7674f80200fb02452e950c8ec | a317fe63879cf1e9a2cbc4d54af2bdf3e3d3ac3e | /course_project_2/plot4.R | 2ec43fedbce14191162606cb0a552c944d63401b | [] | no_license | siavash9000/ExData_Plotting1 | a9323f2df5ee3ebc9c5a674490e4913c6c418a0e | 688dd2d0c6284557c25f55559a8f07b7ac1502a1 | refs/heads/master | 2021-01-18T06:05:16.550743 | 2014-06-17T19:49:04 | 2014-06-17T19:49:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 688 | r | plot4.R | library(ggplot2)
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
NEI$type <- factor(NEI$type)
coal_types <- merge(SCC[grep("Comb",SCC$Short.Name),],SCC[grep("Coal",SCC$Short.Name),])
coal_observations <- merge(NEI,coal_types,by="SCC")
sumByYears <- aggregate(Emissions~year, data=coal_observations, FUN=sum)
sumByYears$Emissions <- sumByYears$Emissions /1000
png(file = "plot4.png",height=700,width=900)
ggplot(data=sumByYears,aes(x=year, y=Emissions)) +
geom_line() + ggtitle("Emissions From Coal Combustion-Related Sources In US") + theme() +
scale_x_continuous(breaks=sort(unique(sumByYears$year)))+
ylab("Emissions in kilotons")
dev.off() |
9662f86cecdb3deb330ed175fce04a86747c98aa | eb008c70bccc5b8868d588d3ac5c94e2b285878d | /R/bland-altman_with_proportional_bias.R | 6cf0cb5171ea8bb055d0c7b1b17e5edcba93e989 | [] | no_license | JamesHMcKay/bland_altman_plots | 01865c6b506c79617e61d4f7aeb41a5e47e91626 | d6fe0b8e12c01d087f4fac5fa16adcf7266d1343 | refs/heads/master | 2023-04-09T05:21:04.054349 | 2021-04-19T11:27:10 | 2021-04-19T11:27:10 | 338,759,666 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 823 | r | bland-altman_with_proportional_bias.R | plot_reliability <- function(measured_value_1, measured_value_2, label_1, label_2, range, filename = NULL) {
real_data = 0.5 * (measured_value_1 + measured_value_2)
difference = measured_value_1 - measured_value_2
#plot_differences(measured_value_1, measured_value_2)
#plot_differences_wrt_data(real_data, difference)
# create a data frame to easily pass the data into functions below
data <- data.frame(real_data, difference, stringsAsFactors = FALSE)
#plot_simple_ci(data)
plot_ci(linear_fit(data), data, label_1, label_2)
ggsave(paste0("plot_linear_", filename, ".pdf"), height = 5, width = 10)
plot <- plot_ci(quadratic_fit(data), data, label_1, label_2, range)
if (!is.null(filename)) {
ggsave(paste0("plot_quadratic_", filename, ".pdf"), height = 5, width = 10)
}
return(plot)
}
|
68e550770d034a4b594fd5330f304d55a227197d | 178282a8db55ece8e011a265983c286ffedfa2f8 | /cleaning/6-fuentes-producccion-energia-es/ext/script.R | 78c8d7ac92c1f2915cf18a58da7f3d312cc80637 | [] | no_license | IlanReinstein/Portfolio | 8dc113c7ed527dfe59b30eaa6360398425068ffd | 6053c5645e7227f14afefb202dc2c2f60d6029f5 | refs/heads/master | 2020-07-07T08:43:32.230551 | 2017-08-24T16:47:04 | 2017-08-24T16:47:04 | 74,037,915 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,303 | r | script.R | library(reshape2)
library(plyr)
library(datapackager)
#Production
p <- read.csv('production/eg.egy.prod.kt.oe_Indicator_es_csv_v2.csv')
p1 <- read.csv('production/MetaData_Country_eg.egy.prod.kt.oe_Indicator_es_csv_v2.csv')
p1 <- p1[,1:4]
names(p) <- c('country', 'code', 'indicator', 'indicatorCode',seq(1960,2014,1))
names(p1) <- c('country', 'code', 'region', 'incomeGroup')
levels(p1$region)[2] <- 'America Latina y el Caribe (paises en desarrollo solamente)'
A <- merge(p,p1, by = c('country', 'code'))
A <- melt(A, id = c('country', 'code', 'region', 'indicator', 'indicatorCode', 'incomeGroup'))
A <- A[which(A$region == 'America Latina y el Caribe (paises en desarrollo solamente)'),]
A <- A[,-c(3,5,6)]
names(A)[4] <- 'year'
A <- na.omit(A)
A <- arrange(A, year)
#Use
u <- read.csv('uso/eg.use.comm.kt.oe_Indicator_es_csv_v2.csv')
u1 <- read.csv('uso/MetaData_Country_eg.use.comm.kt.oe_Indicator_es_csv_v2.csv')
u1 <- u1[,1:4]
names(u) <- c('country', 'code', 'indicator', 'indicatorCode',as.character(c('1960':'2014')))
names(u1) <- c('country', 'code', 'region', 'incomeGroup')
levels(u1$region)[2] <- 'America Latina y el Caribe (paises en desarrollo solamente)'
B <- merge(u,u1, by = c('country', 'code'))
B <- melt(B, id = c('country', 'code', 'region', 'indicator', 'indicatorCode', 'incomeGroup'))
B <- B[which(B$region == 'America Latina y el Caribe (paises en desarrollo solamente)'),]
B <- B[,-c(3,5,6)]
B <- na.omit(B)
names(B)[4] <- 'year'
B <- arrange(B, year)
#Nuclear
n <- read.csv('nuclear/eg.use.comm.cl.zs_Indicator_es_csv_v2.csv')
n1 <- read.csv('nuclear/MetaData_Country_eg.use.comm.cl.zs_Indicator_es_csv_v2.csv')
n1 <- n1[,1:4]
names(n) <- c('country', 'code', 'indicator', 'indicatorCode',as.character(c('1960':'2014')))
names(n1) <- c('country', 'code', 'region', 'incomeGroup')
levels(n1$region)[2] <- 'America Latina y el Caribe (paises en desarrollo solamente)'
C <- merge(n,n1, by = c('country', 'code'))
C <- melt(C, id = c('country', 'code', 'region', 'indicator', 'indicatorCode', 'incomeGroup'))
C <- C[which(C$region == 'America Latina y el Caribe (paises en desarrollo solamente)'),]
C <- C[,-c(3,5,6)]
C <- na.omit(C)
names(C)[4] <- 'year'
C <- arrange(C, year)
#Importaciones
i <- read.csv('importaciones/eg.imp.cons.zs_Indicator_es_csv_v2.csv')
i1 <- read.csv('importaciones/MetaData_Country_eg.imp.cons.zs_Indicator_es_csv_v2.csv')
i1 <- i1[,1:4]
names(i) <- c('country', 'code', 'indicator', 'indicatorCode',as.character(c('1960':'2014')))
names(i1) <- c('country', 'code', 'region', 'incomeGroup')
levels(i1$region)[2] <- 'America Latina y el Caribe (paises en desarrollo solamente)'
D <- merge(i,i1, by = c('country', 'code'))
D <- melt(D, id = c('country', 'code', 'region', 'indicator', 'indicatorCode', 'incomeGroup'))
D <- D[which(D$region == 'America Latina y el Caribe (paises en desarrollo solamente)'),]
D <- D[,-c(3,5,6)]
D <- na.omit(D)
names(D)[4] <- 'year'
D <- arrange(D, year)
#Renewable
r <- read.csv('renewable/eg.use.crnw.zs_Indicator_es_csv_v2.csv')
r1 <- read.csv('renewable/MetaData_Country_eg.use.crnw.zs_Indicator_es_csv_v2.csv')
r1 <- r1[,1:4]
names(r) <- c('country', 'code', 'indicator', 'indicatorCode',as.character(c('1960':'2014')))
names(r1) <- c('country', 'code', 'region', 'incomeGroup')
levels(r1$region)[2] <- 'America Latina y el Caribe (paises en desarrollo solamente)'
E <- merge(r,r1, by = c('country', 'code'))
E <- melt(E, id = c('country', 'code', 'region', 'indicator', 'indicatorCode', 'incomeGroup'))
E <- E[which(E$region == 'America Latina y el Caribe (paises en desarrollo solamente)'),]
E <- E[,-c(3,5,6)]
E <- na.omit(E)
names(E)[4] <- 'year'
E <- arrange(E, year)
# Fosiles
f <- read.csv('fosiles/eg.use.comm.fo.zs_Indicator_es_csv_v2.csv')
f1 <- read.csv('fosiles/MetaData_Country_eg.use.comm.fo.zs_Indicator_es_csv_v2.csv')
f1 <- f1[,1:4]
names(f) <- c('country', 'code', 'indicator', 'indicatorCode',as.character(c('1960':'2014')))
names(f1) <- c('country', 'code', 'region', 'incomeGroup')
levels(f1$region)[2] <- 'America Latina y el Caribe (paises en desarrollo solamente)'
fos <- merge(f,f1, by = c('country', 'code'))
fos <- melt(fos, id = c('country', 'code', 'region', 'indicator', 'indicatorCode', 'incomeGroup'))
fos <- fos[which(fos$region == 'America Latina y el Caribe (paises en desarrollo solamente)'),]
fos <- fos[,-c(3,5,6)]
fos <- na.omit(fos)
names(fos)[4] <- 'year'
fos <- arrange(fos, year)
#GDP
g <- read.csv('PIB/ny.gdp.mktp.cd_Indicator_es_csv_v2.csv')
g1 <- read.csv('PIB/MetaData_Country_ny.gdp.mktp.cd_Indicator_es_csv_v2.csv')
g1 <- f1[,1:4]
names(g) <- c('country', 'code', 'indicator', 'indicatorCode',as.character(c('1960':'2014')))
names(g1) <- c('country', 'code', 'region', 'incomeGroup')
levels(g1$region)[2] <- 'America Latina y el Caribe (paises en desarrollo solamente)'
pib <- merge(g,g1, by = c('country', 'code'))
pib <- melt(pib, id = c('country', 'code', 'region', 'indicator', 'indicatorCode', 'incomeGroup'))
pib <- pib[which(pib$region == 'America Latina y el Caribe (paises en desarrollo solamente)'),]
pib <- pib[,-c(3,5,6)]
pib <- na.omit(pib)
names(pib)[4] <- 'year'
pib <- arrange(pib, year)
#population
pop <- read.csv('population/sp.pop.totl_Indicator_es_csv_v2.csv')
pop1 <- read.csv('population/MetaData_Country_sp.pop.totl_Indicator_es_csv_v2.csv')
pop1 <- pop1[,1:4]
names(pop) <- c('country', 'code', 'indicator', 'indicatorCode',as.character(c('1960':'2014')))
names(pop1) <- c('country', 'code', 'region', 'incomeGroup')
levels(pop1$region)[2] <- 'America Latina y el Caribe (paises en desarrollo solamente)'
P <- merge(pop,pop1, by = c('country', 'code'))
P <- melt(P, id = c('country', 'code', 'region', 'indicator', 'indicatorCode', 'incomeGroup'))
P <- P[which(P$region == 'America Latina y el Caribe (paises en desarrollo solamente)'),]
P <- P[,-c(3,5,6)]
P <- na.omit(P)
names(P)[4] <- 'year'
P <- arrange(P, year)
#Master Data Frame
df <-rbind(A,B,C,D,E,fos,pib,P)
df <- arrange(df, country)
df <- dcast(df, ...~indicator)
colnames(df) <- c('country', 'code','year','prod_ktOil', 'use_ktOil', 'nuclearPerc','importPerc', 'renewPerc','fosilPerc', 'GDP', 'population')
df <- na.omit(df)
write.csv(df, 'datosEnergia2.csv', row.names = F)
dp <- newDatapkg(df)
writeDatapackage(dp, path = 'data/')
|
0b9573f5cc9d01fea7cfe47ffda8cf1c49b22510 | 794b3436024b60a5e2a15923010e7406243a1321 | /Lectures/lec7/snails.R | ed1321c8437feecbfe2a3c4dd1da6b2fbad5a399 | [] | no_license | stel-nik/ADSM | 710a7d88952c1dc1d4391d3adc1c4d37fdbebe60 | b5280a7b74264b26a94fe22cb7b10052a19227ce | refs/heads/master | 2021-09-14T12:05:25.239445 | 2018-05-09T06:47:59 | 2018-05-09T06:47:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,195 | r | snails.R |
##################################################
# Snails
##################################################
setwd("~/Kurser/02424/2016/slides/week08")
snails <- read.table("snails.txt",header=TRUE)
head(snails)
snails$p <- snails$death/snails$n
library(scatterplot3d)
col <- numeric(dim(snails)[1])
pch <- numeric(dim(snails)[1])
col[snails$species=="A"] <- 1
col[snails$species=="B"] <- 2
pch[snails$exposure==1] <- 1
pch[snails$exposure==2] <- 2
pch[snails$exposure==3] <- 3
pch[snails$exposure==4] <- 4
scatterplot3d(snails[ ,c("humidity","temp","p")],color=col,pch=pch)
plot(p~humidity,color=col,pch=pch,data=snails)
plot(p~humidity,col=col,pch=pch,data=snails)
plot(p~temp,col=col,pch=pch,data=snails)
plot(p~exposure,col=col,pch=pch,data=snails)
snails$resp <- cbind(snails$death,snails$n-snails$death)
fit0 <- glm(resp~factor(humidity)*factor(temp)*factor(exposure)*species,data=snails,family=binomial)
fit1 <- update(fit0,.~.-factor(humidity):factor(temp):factor(exposure):species)
fit2 <- update(fit1,.~.-factor(humidity):factor(temp):factor(exposure)-
factor(humidity):factor(temp):species-
factor(humidity):factor(exposure):species-
factor(temp):factor(exposure):species)
fit3 <- update(fit2,.~.-factor(humidity):factor(exposure)- factor(humidity):factor(temp) -
factor(humidity):species)
fit4 <- update(fit3,.~.-factor(temp):species-factor(exposure):species )
fit5 <- update(fit4,.~.-factor(temp):factor(exposure))
fit6 <- glm(formula = resp ~ humidity + factor(temp) + factor(exposure) +
species, family = binomial, data = snails)
fit7 <- glm(formula = resp ~ humidity + temp + factor(exposure) +
species, family = binomial, data = snails)
anova(fit7,test="Chisq")
summary(fit1)
summary(fit2)
summary(fit3)
summary(fit4)
summary(fit5)
summary(fit7)
plot(fit7)
anova(fit1,fit0,test="Chisq")
anova(fit4,fit0,test="Chisq")
anova(fit5,fit0,test="Chisq")
anova(fit6,fit0,test="Chisq")
anova(fit7,fit6,test="Chisq")
anova(fit2,test="Chisq")
drop1(fit2,test="Chisq")
drop1(fit3,test="Chisq")
drop1(fit4,test="Chisq")
drop1(fit5,test="Chisq")
drop1(fit6,test="Chisq")
summary(fit0)
glm(resp~factor(humidity)+factor(temp)+factor(exposure)+species, family=binomial,data=snails)
glm(resp~factor(humidity),family=binomial,data=snails)
head(snails)
?scatterplot3d
fit8 <- glm(formula = resp ~ humidity + temp + factor(exposure) +
species, family = binomial, data = snails[snails$exposure!=1, ])
fit9 <- glm(formula = resp ~ humidity + temp + I(exposure-1) + I((exposure-1)^2) +
species, family = binomial, data = snails)
summary(fit8)
summary(fit9)
anova(fit9,fit7,test="Chisq")
summary(fit7)
4.10485*1-0.70029*1^2
4.10485*2-0.70029*2^2
4.10485*3-0.70029*3^2
fit10 <- glm(formula = resp ~ humidity+ temp + species+ I(exposure-1) + I((exposure-1)^2),
family = binomial, data = snails)
summary(fit10)
drop1(fit10,test="Chisq")
anova(fit10,fit9,test="Chisq")
hum <- seq(40,90,by=1)
temp <- seq(5,30,by=1)
pred1 <- predict(fit10, newdata = data.frame(humidity=hum[1], temp=temp,
species = "B", exposure = 4))
s3d <- scatterplot3d(cbind(hum[1],temp,pred1),type="l",color=gray(0.5),
xlim=c(40,90),ylim=c(5,30),zlim=c(-3.5,4.5))
for(i in 2:length(hum)){
pred1 <- predict(fit10, newdata = data.frame(humidity=hum[i],
temp=temp, species = "B", exposure = 4))
s3d$points3d(cbind(hum[i],temp,pred1),
pch=19,type="l",col=gray(0.5))
}
for(i in 1:length(temp)){
pred1 <- predict(fit10, newdata = data.frame(humidity=hum,
temp=temp[i], species = "B", exposure = 4))
s3d$points3d(cbind(hum,temp[i],pred1),
pch=19,type="l",col=gray(0.5))
}
pred1 <- predict(fit10, newdata = data.frame(humidity=hum[1], temp=temp,
species = "B", exposure = 4),
type="response")
s3d <- scatterplot3d(cbind(hum[1],temp,pred1),type="l",color=gray(0.5),
xlim=c(40,90),ylim=c(5,30),zlim=c(0,1))
I <- snails$species=="B" & snails$exposure==4
s3d$points3d(snails$humidity[I],snails$temp[I],snails$p[I],
pch=19,type="p",col=2)
for(i in 2:length(hum)){
pred1 <- predict(fit10, newdata = data.frame(humidity=hum[i],
temp=temp, species = "B", exposure = 4),
type="response")
s3d$points3d(cbind(hum[i],temp,pred1),
pch=19,type="l",col=gray(0.5))
}
for(i in 1:length(temp)){
pred1 <- predict(fit10, newdata = data.frame(humidity=hum,
temp=temp[i], species = "B", exposure = 4),
type="response")
s3d$points3d(cbind(hum,temp[i],pred1),
pch=19,type="l",col=gray(0.5))
}
plot(fit10)
fit11 <- glm(formula = resp ~ exposure + I(exposure*exposure) +
humidity+ temp + species,
family = binomial(link=logit), data = snails)
summary(fit11)
|
f04512182cc7d29e86c543f6bb9392b70036695e | dd1102ed8f681e5dfb675075b870ee948f017ccc | /plotting_parameters.R | 43d732d5da1177791ae38e3ea48bd2070bc6c43b | [] | no_license | garridoo/ljsphere | a726ec88922bd967bcee1c44ff13f73de8e146dc | 647a1bc7d6a8ae15f50a4f751c94baae89727771 | refs/heads/master | 2021-06-12T19:59:49.903325 | 2021-05-20T11:25:13 | 2021-05-20T11:25:13 | 254,386,539 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 944 | r | plotting_parameters.R |
pcoa_width <- 8
pcoa_height <- 6
pcoa_size <- 2
pcoa_alpha <- 0.7
shannon_width <- 4
shannon_height <- pcoa_height
shannon_alpha <- 0.7
boxplot_size <- 1
boxplot_width <- 0.75
boxplot_jitter_size <- 1
width_rec_barplot <- 5
height_rec_barplot <- 3
size_rec_barplot <- 0.35
size_cumsum <- 0.75
at_color <- "#f8766c"
lj_color <- "#00bfc4"
lj_mutant_color <- "#8aaeb6"
soil_color <- "#654321"
al_color <- "#9F0011"
lc_color <- "#8aaeb6"
lc_color <- "#2a2aff"
root_shape <- 19
rhizosphere_shape <- 3
soil_shape <- 18
# ggplot2 theme
main_theme <- theme(panel.background=element_blank(),
panel.grid=element_blank(),
axis.line=element_line(color="black", size=1),
axis.ticks=element_line(color="black", size=1),
legend.background=element_blank(),
legend.key=element_blank(),
text=element_text(size=18, color="black"),
legend.position="none")
|
cc2849338c886674293d7454a33bdb85fcb5e94c | b4266044694a4b07a888f0b72c78381d7983b885 | /plot3.R | 46b2baa216b9483ae2b138dc84d5d7b3781961a5 | [] | no_license | RobinSmithCA/ExData_Plotting1 | d38b2ca45db8d0950b17bb04ca59738e2127aa71 | 825251c9766953ce394804541db6d254db1cebf4 | refs/heads/master | 2021-01-21T05:43:47.756310 | 2014-05-15T07:12:48 | 2014-05-15T07:12:48 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 707 | r | plot3.R | source( "assignmentLibrary.R" )
targetFile <- "plot3.png"
# start by initializing the data and directories (from assignmentLibrary.R)
initializeData()
# Open the PNG file for writing
openTarget( targetFile )
# output a comparative plot of the sub metering values
plot( rawData$TimeStamp , rawData$Sub_metering_1, type = "l", col = "black", xlab = "", ylab = "Energy sub metering" )
points( rawData$TimeStamp , rawData$Sub_metering_2, type = "l", col = "red" )
points( rawData$TimeStamp , rawData$Sub_metering_3, type = "l", col = "blue" )
legend( "topright", lwd = 2.0, col = c( "black", "red", "blue" ), legend = c( "Sub_metering_1", "Sub_metering_2", "Sub_metering_3" ) )
# close the device
dev.off() |
69dc5044cbb81c1b71470d2414cb98b911f11498 | 7dab5664e3b5d55b8975209651b2ba06a67b1b65 | /drivendata/nepal-earthquake/model.R | 23e97e4c2de2407d20307ff27a7cc004edfdbd4d | [] | no_license | rodrigorsdc/ML-competitions | 51b5759e431ffd232664f123d387cbbd0af68b94 | b4caaa5125257fff65344b2691c820b544470d6d | refs/heads/master | 2020-09-02T13:32:28.462184 | 2020-02-17T04:27:54 | 2020-02-17T04:27:54 | 219,232,660 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,920 | r | model.R | library(keras)
library(corrplot)
library(dummies)
library(nnet)
TEST_MOD = FALSE
f1_micro <- function(y_true, y_pred) {
confusion <- table(y_true, y_pred)
TP1 <- confusion[1, 1]
TP2 <- confusion[2, 2]
TP3 <- confusion[3, 3]
FP1 <- sum(confusion[, 1]) - TP1
FP2 <- sum(confusion[, 2]) - TP2
FP3 <- sum(confusion[, 3]) - TP3
FN1 <- sum(confusion[1, ]) - TP1
FN2 <- sum(confusion[2, ]) - TP2
FN3 <- sum(confusion[3, ]) - TP3
Pmicro <- (TP1 + TP2 + TP3) / (TP1 + TP2 + TP3 + FP1 + FP2 + FP3)
Rmicro <- (TP1 + TP2 + TP3) / (TP1 + TP2 + TP3 + FN1 + FN2 + FN3)
Fmicro <- (2 * Pmicro * Rmicro) / (Pmicro + Rmicro)
return(Fmicro)
}
quakeX = read.csv('train_values.csv')
quakeY = read.csv('train_labels.csv')
quake = merge(quakeX, quakeY, by="building_id")
quakeTest = read.csv('test_values.csv')
test_building_id = quakeTest$building_id
quake1 = quake[quake$damage_grade == 1,]
quake2 = quake[quake$damage_grade == 2,]
quake3 = quake[quake$damage_grade == 3,]
quake1 = quake1[sample(nrow(quake1), 25000, replace=FALSE),]
quake2 = quake2[sample(nrow(quake2), 25000, replace=FALSE),]
quake3 = quake3[sample(nrow(quake3), 25000, replace=FALSE),]
## quake <- rbind(quake1, quake2, quake3)
quake13 <- rbind(quake1, quake3)
quake12 <- rbind(quake1, quake2)
quake23 <- rbind(quake2, quake3)
quake13 <- quake13[sample(nrow(quake13)),]
quake12 <- quake12[sample(nrow(quake12)),]
quake23 <- quake23[sample(nrow(quake23)),]
trainX <- train[, -ncol(train)]
trainY <- train[, c(1, ncol(train))]
quake$position = as.numeric(quake$position) - 1
quake$land_surface_condition = as.numeric(quake$land_surface_condition) - 1
quake$foundation_type = as.numeric(quake$foundation_type) - 1
quake$roof_type = as.numeric(quake$roof_type) - 1
quake$ground_floor_type = as.numeric(quake$ground_floor_type) - 1
quake$other_floor_type = as.numeric(quake$other_floor_type) - 1
quake$plan_configuration = as.numeric(quake$plan_configuration) - 1
quake$legal_ownership_status = as.numeric(quake$legal_ownership_status) - 1
quake$position = as.factor(quake$position)
quake$land_surface_condition = as.factor(quake$land_surface_condition)
quake$foundation_type = as.factor(quake$foundation_type)
quake$roof_type = as.factor(quake$roof_type)
quake$ground_floor_type = as.factor(quake$ground_floor_type)
quake$other_floor_type = as.factor(quake$other_floor_type)
quake$plan_configuration = as.factor(quake$plan_configuration)
quake$legal_ownership_status = as.factor(quake$legal_ownership_status)
quake$damage_grade = as.numeric(quake$damage_grade) - 1
quake$damage_grade = as.factor(quake$damage_grade)
alpha = ifelse(TEST_MOD == TRUE, 1.0, 0.8)
d = sort(sample(nrow(quake12), nrow(quake12)*alpha))
train12 = quake12[d,]
test12 = quake12[-d,]
alpha = ifelse(TEST_MOD == TRUE, 1.0, 0.8)
d = sort(sample(nrow(quake13), nrow(quake13)*alpha))
train13 = quake13[d,]
test13 = quake13[-d,]
alpha = ifelse(TEST_MOD == TRUE, 1.0, 0.8)
d = sort(sample(nrow(quake13), nrow(quake13)*alpha))
train23 = quake23[d,]
test23 = quake23[-d,]
M <- cor(quake, quake$damage_grade)
M
corrplot(M)
quakeTest$position = as.numeric(quakeTest$position) - 1
quakeTest$land_surface_condition =
as.numeric(quakeTest$land_surface_condition) - 1
quakeTest$foundation_type = as.numeric(quakeTest$foundation_type) - 1
quakeTest$roof_type = as.numeric(quakeTest$roof_type) - 1
quakeTest$ground_floor_type = as.numeric(quakeTest$ground_floor_type) - 1
quakeTest$other_floor_type = as.numeric(quakeTest$other_floor_type) - 1
quakeTest$plan_configuration = as.numeric(quakeTest$plan_configuration) - 1
quakeTest$legal_ownership_status =
as.numeric(quakeTest$legal_ownership_status) - 1
quakeTest$position = as.factor(quakeTest$position)
quakeTest$land_surface_condition =
as.factor(quakeTest$land_surface_condition)
quakeTest$foundation_type = as.factor(quakeTest$foundation_type)
quakeTest$roof_type = as.factor(quakeTest$roof_type)
quakeTest$ground_floor_type = as.factor(quakeTest$ground_floor_type)
quakeTest$other_floor_type = as.factor(quakeTest$other_floor_type)
quakeTest$plan_configuration = as.factor(quakeTest$plan_configuration)
quakeTest$legal_ownership_status =
as.factor(quakeTest$legal_ownership_status)
model12 <- multinom(damage_grade ~ foundation_type +
roof_type + ground_floor_type +
other_floor_type +
has_superstructure_mud_mortar_stone +
has_superstructure_cement_mortar_brick, data=train12)
model13 <- multinom(damage_grade ~ foundation_type +
roof_type + ground_floor_type +
other_floor_type +
has_superstructure_mud_mortar_stone +
has_superstructure_cement_mortar_brick, data=train13)
model23 <- multinom(damage_grade ~ foundation_type +
roof_type + ground_floor_type +
other_floor_type +
has_superstructure_mud_mortar_stone +
has_superstructure_cement_mortar_brick, data=train23)
if (TEST_MOD == TRUE) {
pp12 <- predict(model12, quakeTest, "probs")
pp23 <- predict(model23, quakeTest, "probs")
pred <- ifelse(pp12 < 0.3, 1, ifelse(pp23 > 0.55, 3, 2))
predicted <- data.frame(building_id=test_building_id,
damage_grade=as.numeric(pred))
write.csv(predicted, "predicted.csv", row.names=FALSE)
table(pred)
} else {
test <- rbind(test12, test23)
test <- test[sample(nrow(test)),]
pp12 <- predict(model12, test, "probs")
pp23 <- predict(model23, test, "probs")
pp23 <- ifelse(pp23 > 0.34, 3, 2)
mean(pp23 == test23$damage_grade)
table(test23$damage_grade)
tail(test$damage_grade)
pred <- ifelse(pp12 < 0.5, 1, ifelse(pp23 > 0.55, 3, 2))
table(pred)
mean(pred == test$damage_grade)
f1_micro(test$damage_grade, pred)
}
|
597dc0052207702ef4fe54e162f82a61f8024714 | af0c4e189164afa4a3a68e0d5abd916089cfd121 | /R/crim4.R | fd24a87c07b8ee51b2b1916a953799119ea05613 | [] | no_license | kkleinke/countimp | 72669d42ff799c318ed6c75bb388e9643afca7ab | db6baeb5cd3e59640791eb4d00175cfa12d2918a | refs/heads/master | 2022-03-09T14:11:02.159270 | 2022-02-14T16:26:39 | 2022-02-14T16:26:39 | 10,684,949 | 3 | 2 | null | null | null | null | UTF-8 | R | false | false | 932 | r | crim4.R | #' The CRIMOC data set in wide format
#'
#' Dataset in wide format, see Reinecke and Weins (2013) for details.
#'
#' \describe{
#' \item{\code{id}}{participant identifier}
#' \item{\code{FEMALE}}{Gender indicator}
#' \item{\code{RE}}{school type dummy (intermediate branch)}
#' \item{\code{GY}}{school type dummy (top level branch)}
#' \item{\code{HY}}{school type dummy (lowest level)}
#' \item{\code{ACRIM}}{Delinquency score -- time 1}
#' \item{\code{BCRIM}}{Delinquency score -- time 2}
#' \item{\code{CCRIM}}{Delinquency score -- time 3}
#' \item{\code{DCRIM}}{Delinquency score -- time 4}
#'}
#' @references Reinecke, J., & Weins, C. (2013). The development of delinquency during adolescence: a comparison of missing data techniques. \emph{Quality & Quantity, 47(6)}, 3319--3334.
#' @docType data
#' @keywords datasets
#' @name crim4w
#' @usage data(crim4w)
#' @format A data frame with 2064 rows and 9 variables
NULL |
2bbe98b71f85db5c540ca7befbc12e1e99203bf8 | aab045571a5d48845e8167ae4eec63549ff31945 | /file4_Clustering.R | cc91c82c38aff1df0cb7322fb7033f9e99cd3374 | [] | no_license | ManikantaDakoju/analytics | 38ccc74b783c0709a9bf7e58f68310773342e1c0 | a6aba423122bcfec6da8228ff41ca6547b1be23e | refs/heads/master | 2021-01-07T06:28:18.970714 | 2020-03-08T10:30:49 | 2020-03-08T10:30:49 | 241,606,705 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,544 | r | file4_Clustering.R | #association rule-----
library(arules)
library(arulesViz)
#clustering-----
ibrary(tidyverse) # data manipulation
library(cluster) # clustering algorithms
library(factoextra) # clustering visualization
library(dendextend) # for comparing two dendrograms
library(fpc)
library(NbClust) # finding the optimal number of clusters
library(amap)
set.seed(1234)
subject1 = trunc(rnorm(30, mean=60, sd=15))
range(subject1)
subject1
marks = data.frame(subject1)
head(marks)
marks
sort(marks$subject1)
k2 = kmeans(marks, centers=2)
k2
k2$size
k2$iter
cbind(marks, k2$cluster) #which data row in to which cluster
length(marks[k2$cluster==1,])
marks[k2$cluster==2,]
marks[k2$cluster==1,]
k2$centers
k2a = kmeans(marks, centers=c(50,70))
k2a
k2a$centers
#optimal number of clusters in data
#Reduce total within as
iris
dim(iris)
head(iris)
table(iris$Species)
data = iris[-5]
head(data)
km1 = kmeans(data, centers=1)
km1$withinss
km1$tot.withinss
km2 = kmeans(data, centers=2)
km2$tot.withinss
km2$withinss
km3 = kmeans(data, centers=3)
km3$tot.withinss
km4 = kmeans(data, centers=4)
km4$tot.withinss
km1$tot.withinss; km2$tot.withinss; km3$withinss; km4$withinss
library(cluster)
plot(data$sepal.Length,data$sepal.width,col=(1:3))
head(iris)
#plot1
library(cluster)
library(fpc)
data(iris)
data = iris[, -5]
km1 = kmeans(data, centers=3)
plotcluster(data, km1$cluster)
#plot2
#More Complex
clusplot(data, km1$cluster, color=TRUE, shade=TRUE, lables=2, lines=0 )
?clusplot
#plot3
with(iris, pairs(data, col=c(1:3)[km1$cluster]))
|
9b2bceb339425724d69109ccc7090b8f9b50b8b4 | bf3d44d21e0a367b7b7e0ed72c18dd2dee748de7 | /R/mergeENDDAT.r | 7c2523ea4d66cc5499a8b8d0b673dfffd1fb1815 | [
"CC0-1.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-public-domain-disclaimer"
] | permissive | ldecicco-USGS/rEnddat | 4dac635ba49659a908fc283fe0a43dad86436eb0 | a5622085436fba137790be3093afecd8247b0b69 | refs/heads/master | 2021-01-21T01:16:06.455089 | 2016-02-17T22:01:26 | 2016-02-17T22:01:26 | 51,953,576 | 0 | 0 | null | 2016-02-17T20:37:12 | 2016-02-17T20:37:12 | null | UTF-8 | R | false | false | 417 | r | mergeENDDAT.r | #' Merge EnDDaT dataframes
#'
#' Merge EnDDaT dataframes
#'
#' @param DF1 dataframe
#' @param DF2 dataframe
#' @return mergedDF dataframe
#' @export
#' @examples
#' DF1 <- data.frame(time=c(1,2,3), a=c(1,4,8), b=c(2,6,9))
#' DF2 <- data.frame(time=c(1,2,4), a=c(1,4,8), c=c(2,6,9))
#' mergedDF <- mergeENDDAT(DF1,DF2)
mergeENDDAT <- function(DF1, DF2){
mergedDF <- merge(DF1, DF2,all=TRUE)
return(mergedDF)
} |
ebfe23a2e951030f11a3a0f4f25921f0d4febc6e | 50ef4a7b4048d0460460b0e3acdc0a5402d9ac8c | /mtx_to_rds.R | e49ddbcfbd90158ac57f3a47dfc0732331c78afd | [] | no_license | valavro/Bulk2SC | 24d77639ae23bb314f9efe4913651210339bbace | e1d0a36b6eb6fe23cf3f768ccfacdb5d7d43473d | refs/heads/main | 2023-05-24T13:03:55.663445 | 2021-06-13T22:50:34 | 2021-06-13T22:50:34 | 372,216,759 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,036 | r | mtx_to_rds.R | library(Matrix)
args <- commandArgs(trailingOnly=TRUE)
dataset = args[1]
### Load expression matrix, cell IDs (barcode.names) and gene names (feature.names)
path = paste0("Datasets/", dataset, "/Deconvolution/raw_matrix")
print("Reading expression matrix:")
exp_mat <- readMM(paste0(path, "/exp_matrix.mtx"))
barcode.names = read.delim(paste0(path, "/barcodes.tsv"),
header = FALSE,
stringsAsFactors = FALSE)
feature.names = read.delim(paste0(path, "/gene_names.tsv"),
header = FALSE,
stringsAsFactors = FALSE)
### Set column and row names
colnames(exp_mat) <- barcode.names$V1
rownames(exp_mat) <- feature.names$V1
print(paste("Number of genes:", dim(exp_mat)[1]))
print(paste("Number of cells:", dim(exp_mat)[2]))
### Save sparse matrix in proper format
saveRDS(exp_mat, paste0("Datasets/", dataset, "/Deconvolution/", dataset, ".rds"))
print(paste("Saved as", paste0(dataset, ".rds"))) |
2782314a103ba53266993246cce1e351d3551f0c | 880c65fd982c6931e143258c49fedd2f98d9f756 | /dye_transfer_dilger.R | 7e95a66bc23d85f49bf3fc10947e1df7da660b31 | [] | no_license | MaxMenssen/Dilger_et_al_2020 | 1b0de054c8824041d41c44a8d6d14e6727498969 | e399529714bcca9650212fe51c3f38257fd953cd | refs/heads/master | 2022-04-27T00:43:05.686868 | 2020-04-21T11:50:21 | 2020-04-21T11:50:21 | 256,220,247 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,180 | r | dye_transfer_dilger.R | #------------------------------------------------------------------------------
#--------------------------- Dye Transfer -------------------------------------
#------------------------------------------------------------------------------
# Loading required packages
library(hnp)
library(tidyverse)
library(lubridate)
library(plyr)
library(emmeans)
# Data import
dat <- read.csv2("https://raw.githubusercontent.com/MaxMenssen/Dilger_et_al_2020/master/dye_transfer_dilger.csv")
# Renaming variables, take Day as date
dat <- dat %>%
dplyr::rename(Day=Messtag,
Treatment=Behandlung,
Run=Repetition) %>%
mutate(Day=ymd(Day),
Treatment=factor(Treatment, levels=c("Control",
"NIM-1",
"7d_NIM-2",
"30d_NIM-2",
"30d_NIM-2_1d_MAT")),
Run=factor(Run))
head(dat)
str(dat)
levels(dat$Treatment)
#------------------------------------------------------------------------------
# Overview about the data
ggplot(dat, aes(x=Day, y=coupling_rate_injected))+
theme_bw()+
geom_point(aes(color=Run))+
scale_x_date(date_breaks = "15 days", date_labels = "%d %b")+
facet_grid(~Treatment)+
theme(axis.text.x=element_text(angle=90, hjust = 1))
# ATTENTION: Due to the experimental design the treatment effect might be somehow confonded
# with a time effect.
#------------------------------------------------------------------------------
# Minimum coupling rate that is greater than 0
min_cr <- dat %>%
filter(coupling_rate_injected > 0) %>%
summarise(min(coupling_rate_injected)) %>%
unname()
# coupling rate + min(coupling rate)
dat$cr_p_min <- dat$coupling_rate_injected + min_cr[,1]
# Fit the model
fit <- lm(log(cr_p_min)~Treatment, dat)
# Mean comparisons (already back transformed from log scale)
comp <- emmeans(fit, specs="Treatment", contr="trt.vs.ctrl", type="response")
# Extracting the means, backtransformation to original scale
ls_means <- comp$emmeans %>%
data.frame() %>%
group_by(Treatment) %>%
transmute(coupling_rate_injected=response-min_cr[,1],
lower=lower.CL-min_cr[,1],
upper=upper.CL-min_cr[,1])
# Mark significant differences
ls_means$sig_star <- c("", "", "*", "*", "*")
# Save the least square means as csv file
write.csv2(ls_means, "dye_transfer_means_dilger.csv", row.names=FALSE)
# Save the contrasts
write.csv2(data.frame(comp$contrasts), "dye_transfer_contrasts_dilger.csv",
row.names=FALSE)
#------------------------------------------------------------------------------
# Grafic 2b
ggplot(dat, aes(x=Treatment, y=coupling_rate_injected))+
theme_bw()+
geom_point(aes(color=Treatment),
alpha=0.6,
position=position_jitter(height=0, width=0.1))+
geom_linerange(data=ls_means, aes(x=Treatment,
ymin=lower,
ymax=upper))+
geom_point(data=ls_means,
aes(x=Treatment, y=coupling_rate_injected),
shape="-", size=6)+
geom_text(data=ls_means, aes(y=upper+2.6,
x=Treatment,
label=sig_star), size=6)+
scale_y_continuous(trans="log",
breaks=c(0.001, 0.01, 0.1, 1, 10, 25),
limits=c(0.0005, 26))+
scale_x_discrete(breaks=c("Control",
"NIM-1",
"7d_NIM-2",
"30d_NIM-2",
"30d_NIM-2_1d_MAT"))+
scale_color_manual(breaks=c("Control",
"NIM-1",
"7d_NIM-2",
"30d_NIM-2",
"30d_NIM-2_1d_MAT"),
labels=c("Control",
"NIM-1",
"7d NIM-2",
"30d NIM-2",
"30d NIM-2 1d MAT"),
values=c("#FF6666",
"#CCCC33",
"#33CC33",
"#0099CC",
"#FF99FF"))+
theme(axis.text.x=element_blank(),
legend.title.align=0.5,
legend.position="",
legend.box.margin=margin(-10, -10, -10, -10),
legend.title = element_blank(),
legend.text = element_text(face="bold"),
strip.text.x = element_text(face="bold.italic"),
axis.ticks.x=element_blank(),
axis.text.y = element_text(color="black", face="bold"),
axis.title.y = element_text(color="black", size=15))+
ylab("LY dye coupling rate")+
xlab("")
ggsave("dye_transfer_fig_2.png", width=9, height=10, units="cm", dpi=600)
|
b14b2273c8311cbd023d63c27c5dfd33a34dab82 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/DCEM/examples/sd_uv.Rd.R | 8f31743c29e83a235a9e872cb1e57198d6c69fef | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 182 | r | sd_uv.Rd.R | library(DCEM)
### Name: sd_uv
### Title: sd_uv: Part of DCEM package.
### Aliases: sd_uv
### ** Examples
# Standard deviation of a random sample.
sd_uv(rnorm(100,20,10), 2)
|
13b5f91db02a47c1e366a54ffa79f83ac925a7e6 | f1532f6d61badcbbb1cfeb6f616fb2faac69ce9b | /shiny/NGS_cost/server.R | 82e295ee955e908764bc3ff3961e56d1b83edc2c | [
"MIT"
] | permissive | leylabmpi/leylab_pipelines | 16d70890df23324721160a2408831d6788c71e95 | 1c94b2ba55d877c489143a105c72c963e1e9ce51 | refs/heads/master | 2021-01-11T23:55:45.645801 | 2017-04-29T15:45:53 | 2017-04-29T15:45:53 | 78,647,037 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,601 | r | server.R | # Shiny server
library(shiny)
library(dplyr)
library(propagate)
library(rhandsontable)
library(readxl)
#-- functions --#
prop_total_GB = function(N_lanes, GB_per_run,
Lanes_per_run, alpha=0.05){
# N_lanes = numeric
# GB_per_run = vector(numeric...)
# CALC: Total_GB = GB_per_run / Lanes_per_run * N_lanes
df = cbind(N_lanes, Lanes_per_run, GB_per_run)
ex = expression(GB_per_run / Lanes_per_run * N_lanes)
ret = propagate(ex, df, type='stat', alpha=alpha)
ret = ret$prop[c(1, 3)] %>% as.data.frame
colnames(ret) = c('Total_GB')
return(ret)
}
prop_cost_per_GB = function(Total_cost, Total_GB, alpha=0.05){
# CALC: cost_per_GB = total_cost / total_GB
df = cbind(Total_cost, Total_GB)
ex = expression(Total_cost / Total_GB)
ret = propagate(ex, df, type='stat', alpha=alpha)
ret = ret$prop[c(1, 3)] %>% as.data.frame
colnames(ret) = ('Cost_per_GB')
return(ret)
}
prop_GB_per_sample = function(Total_GB, N_samples, alpha=0.05){
#.$Total_GB_mean, .$Total_GB_sd, .$N_samples
# n_samples = numeric
# total_GB = prop_object
# CALC: GB_per_sample = total_GB / n_samples
df = cbind(Total_GB, N_samples)
ex = expression(Total_GB / N_samples)
ret = propagate(ex, df, type='stat', alpha=alpha)
ret = ret$prop[c(1, 3)] %>% as.data.frame
colnames(ret) = c('GB_per_sample')
return(ret)
}
prop_target_coverage = function(GB_per_sample, Target_rel_abund,
Target_genome_size, alpha=0.05){
# GB_per_sample = prop_object
# Target_rel_abund = numeric (mean,sd)
# Target_genome_size = numeric (mean,sd)
# CALC: Target_coverage = GB_per_sample * (Target_rel_abund / 100) / (Target_genome_size / 1000)
df = cbind(GB_per_sample, Target_rel_abund, Target_genome_size)
ex = expression(GB_per_sample * (Target_rel_abund / 100) / (Target_genome_size / 1000))
ret = propagate(ex, df, alpha=alpha)
ret = ret$prop[c(1, 3)] %>% as.data.frame
colnames(ret) = c('Target_coverage')
return(ret)
}
make_sum_table = function(input, df_seq, df_lib){
# filtering df
if(input$sequencer == 'HiSeq_3000'){
sequencer_reagents = input$HiSeq_sequencer_reagents
} else
if(input$sequencer == 'MiSeq'){
sequencer_reagents = input$MiSeq_sequencer_reagents
} else{
stop('Sequencer not recognized')
}
df_seq = df_seq %>%
filter(Sequencer == input$sequencer,
Seq_reagents == sequencer_reagents)
df_lib = df_lib %>%
filter(Lib_prep_kit == input$library_prep_kit)
df_seq = cbind(df_seq, df_lib)
# calculating
df_seq = df_seq %>%
mutate(# number of sequencing lanes
N_samples = input$n_samples,
N_multiplex = input$n_multiplex,
N_lanes = ceiling(N_samples / N_multiplex),
N_samples_per_lane = N_samples / N_lanes,
N_lanes = N_lanes * input$n_runs_per_sample,
N_seq_reagent_kits = N_lanes,
N_lib_prep_kits = ceiling(N_samples) / Lib_prep_kit_multiplex,
# costs
Total_cost = N_lib_prep_kits * Lib_prep_kit_cost +
N_seq_reagent_kits * Lane_cost,
Cost_per_lane = Total_cost / N_lanes,
Cost_per_sample = Total_cost / N_samples
)
df_seq = rbind(df_seq, rep(0, length(df_seq)))
df_seq[2, 'GB_per_run'] = df_seq[1,'GB_per_run_sd']
df_seq = df_seq %>%
dplyr::select(-GB_per_run_sd)
# error propagation
## Total GB (all lanes)
df_tmp = df_seq %>%
do(prop_total_GB(.$N_lanes, .$GB_per_run, .$Lanes_per_run))
df_seq = cbind(df_seq, df_tmp)
## Cost per GB
df_tmp = df_seq %>%
do(prop_cost_per_GB(.$Total_cost, .$Total_GB))
df_seq = cbind(df_seq, df_tmp)
## GB per sample
df_tmp = df_seq %>%
do(prop_GB_per_sample(.$Total_GB, .$N_samples))
df_seq = cbind(df_seq, df_tmp)
## Coverage of target genome
if(input$target_genome_bool == TRUE){
df_tmp = data.frame(Target_rel_abund = c(input$target_rel_abund,
input$target_rel_abund_sd),
Target_genome_size = c(input$target_genome_size,
input$target_genome_size_sd))
df_seq = cbind(df_seq, df_tmp)
df_tmp = df_seq %>%
do(prop_target_coverage(.$GB_per_sample, .$Target_rel_abund, .$Target_genome_size))
} else {
df_tmp = data.frame(Target_coverage = c(NA, NA))
}
df_seq = cbind(df_seq, df_tmp)
# formatting output
df_seq = df_seq %>%
dplyr::select(GB_per_run, Lanes_per_run,
Lane_cost, Lib_prep_kit_cost,
N_lanes, N_seq_reagent_kits, N_lib_prep_kits,
N_samples_per_lane,
Total_cost, Cost_per_lane,
Cost_per_sample, Total_GB, Cost_per_GB,
GB_per_sample, Target_coverage)
df = df_seq %>% t %>% as.data.frame
colnames(df) = c('Mean', 'SD')
df$Variable = gsub('_', ' ', colnames(df_seq))
df$Variable = gsub('^N ', '# of ', df$Variable)
df = df %>%
dplyr::select(Variable, Mean, SD) %>%
mutate('Mean + SD' = Mean + SD,
'Mean - SD' = Mean - SD) %>%
dplyr::select(-SD)
return(df)
}
#-- server --#
shinyServer(function(input, output, session) {
values = reactiveValues()
df_seq = read_excel('data/seq_costs.xlsx', sheet='sequencer')
df_lib = read_excel('data/seq_costs.xlsx', sheet='lib_prep')
# summary table
output$summaryTable = renderTable(make_sum_table(input, df_seq, df_lib),
digits=1)
})
|
27c1e3f2e4304420c61c2ab9f5c547d8215ef1e2 | 9b2f64f9265a279f1e3784d7f741a59089a51ce9 | /modules/tools_tab/tools/pco2_tool.R | 18a39330e3da0383a9163f9d6ade9976f254e0c1 | [
"MIT"
] | permissive | jfontestad/SBER-METALP-data-portal | c7d29eb2f7660094c78dbebae0ab0e6bc69b798e | fe2da14658261b3844c331e09a7489788ca5718d | refs/heads/master | 2023-02-27T23:05:10.356685 | 2021-02-02T17:10:42 | 2021-02-02T17:10:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,427 | r | pco2_tool.R | ## This module contains the UI and server code for the pCO2 tool
## Create module UI function ######################################################
pCO2ToolUI <- function(id, ...) {
# Create the UI for the pCO2Tool module
# Parameters:
# - id: String, the module id
#
# Returns a div containing the layout
# Create namespace
ns <- NS(id)
# Create layout
div(
class = 'pco2-tool tools-layout',
div(
class ='raw-data',
toolTableUI(ns('rawData')),
toolTableUI(ns('labCst'))
),
div(
class = 'calculation',
div(
class = 'calculation-header',
h4('Calculated columns:'),
checkboxInput(ns('useTCst'), 'Use lab temp constant', value = FALSE),
checkboxInput(ns('usePCst'), 'Use lab pressure constant', value = FALSE),
actionButton(ns('calculate'), 'Calculate', class = 'custom-style custom-style--primary')
),
div(
class = 'calculated',
toolTableUI(ns('pco2Ch4')),
toolTableUI(ns('avgSd'))
)
)
)
}
## Create module server function ##################################################
pCO2Tool <- function(input, output, session, pool, site, datetime, ...) {
# Create the logic for the pCO2Tool module
# Parameters:
# - input, output, session: Default needed parameters to create a module
# - pool: The pool connection to the database
#
# Returns a reactive expression containing the updated row
## Track observer ##############################################################
# Reactive values that contain the observers output
observersOutput <- reactiveValues()
## Get Row ####################################################################
row <- reactive({
req(datetime(), site())
site <- site()
datetime <- datetime()
selectedDate <- datetime$date
selectedTime <- datetime$time
# Get columns
columns <- c(
'id', 'station', 'DATE_reading', 'TIME_reading', 'Convert_to_GMT', 'TIME_reading_GMT',
# Get pCO2 parameters
getRows(
pool,
'grab_param_categories',
category == 'pCO2',
columns = c('order', 'param_name')
) %>% pull('param_name'),
# Get field data used in calculation
'WTW_Temp_degC_1', 'Field_BP', 'Field_BP_altitude',
'created_at', 'updated_at'
)
# Get data
getRows(
pool,
'data',
station == site,
DATE_reading == selectedDate,
TIME_reading_GMT == selectedTime,
columns = columns
)
})
## Render raw data ####################################################################
# Row filtering
rawData <- reactive({
row() %>% select(starts_with('lab_co2_'), -c(lab_co2_lab_temp, lab_co2_lab_press, starts_with('lab_co2_ch4_dry')))
})
# Call table module and retrieve updates
rawDataUpdated <- callModule(toolTable, 'rawData', rawData, replicates = TRUE, ...)
## Render lab constant table ####################################################################
# Row filtering
labCst <- reactive({
row() %>% select(lab_co2_lab_temp, lab_co2_lab_press)
})
# Call table module and retrieve updates
labCstUpdated <- callModule(toolTable, 'labCst', labCst, ...)
## Render pCO2 and CH4 calculation #####################################################
# Calculated values
pco2Ch4 <- reactive({
if (useCalculated()) {
calculations$pco2Ch4
} else {
row() %>% select(matches('^CO2_HS|^pCO2_HS|^CH4|^lab_co2_ch4_dry_'), -matches('_avg$|_sd$'))
}
})
# Call table module and retrieve updates
pco2Ch4Updated <- callModule(toolTable, 'pco2Ch4', pco2Ch4, readOnly = TRUE, replicates = TRUE)
## Render avg and sd calculation ##################################################
# Calculated values
avgSd <- reactive({
if (useCalculated()) {
calculations$avgSd
} else {
row() %>% select(matches('_avg$|_sd$'))
}
})
# Call table module and retrieve updates
avgSdUpdated <- callModule(toolTable, 'avgSd', avgSd, readOnly = TRUE, replicates = TRUE)
## Calculation logic ############################################################
# Use default or calculated values
useCalculated <- reactiveVal(FALSE)
# Reset on site or date update
observersOutput$resetUseCalculated <- observe({
site();datetime()
useCalculated(FALSE)
})
# Store calculation
calculations <- reactiveValues()
# Calculate upon button click
observersOutput$calculationLogic <- observeEvent(input$calculate, ignoreInit = TRUE, {
# Calculate pCO2 and CH4
# Set to NULL in case of new calculation
calculations$pco2Ch4 <- NULL
# Get Field data used in calculation
fieldData <- row() %>% select(WTW_Temp_degC_1, Field_BP, Field_BP_altitude)
# Get use constant input value
if (input$useTCst) {
labTemp <- 'cst'
} else {
labTemp <- 'db'
}
if (input$usePCst) {
labPa <- 'cst'
} else {
labPa <- 'db'
}
# For each replicate
for (rep in c('A', 'B')) {
# Get replicate values
co2_raw <- rawDataUpdated() %>% select(starts_with('lab_co2_co2ppm') & ends_with(rep),)
ch4_h20 <- rawDataUpdated() %>% select(matches('^lab_co2_h2o|^lab_co2_ch4') & ends_with(rep))
# Create column names
colNames <- paste0(
c('lab_co2_ch4_dry_',
'CH4_calc_umol_L_',
'CO2_HS_Um_',
'pCO2_HS_uatm_',
'pCO2_HS_P1_uatm_',
'pCO2_HS_P2_uatm_'),
rep
)
# Calculate values that are needed for subsequent calculation
lab_co2_ch4_dry <- calcCH4dry(ch4_h20)
CO2_HS_Um <- calcCO2(
bind_cols(
co2_raw,
labCstUpdated()
),
pool,
labTemp,
labPa
)
# Parameters for pCO2 calculation
pC02parameters <- fieldData %>%
mutate(
!!colNames[3] := CO2_HS_Um
)
# Calculate new column
newCols <- setNames(
data.frame(
lab_co2_ch4_dry = lab_co2_ch4_dry,
CH4_calc_umol_L <- calcCH4(
bind_cols(
labCstUpdated(),
fieldData
) %>% mutate(
!!colNames[1] := lab_co2_ch4_dry
),
pool,
labTemp,
labPa
),
CO2_HS_Um = CO2_HS_Um,
pCO2_HS_uatm = calcpCO2(pC02parameters, pool),
pCO2_HS_P1_uatm = calcpCO2P1(pC02parameters, pool),
pCO2_HS_P2_uatm = calcpCO2P2(pC02parameters, pool)
),
colNames
)
# If calculations$pco2Ch4 is NULL, create it else update it
if (is.null(calculations$pco2Ch4)) {
calculations$pco2Ch4 <- newCols
} else {
calculations$pco2Ch4 <- bind_cols(
calculations$pco2Ch4,
newCols
)
}
}
# Calculate CO2 and CH4 avg and sd
# Set to NULL in case of second calculation
calculations$avgSd <- NULL
for (param in c('CO2_HS_Um', 'pCO2_HS_uatm', 'pCO2_HS_P1_uatm', 'pCO2_HS_P2_uatm', 'd13C_CO2', 'CH4')) {
# Select data
if (param == 'd13C_CO2') {
df <- rawDataUpdated() %>% select(starts_with('lab_co2_ico2'))
} else {
df <- calculations$pco2Ch4 %>% select(starts_with(param), -matches('_avg$|_sd$'))
}
# Calculate mean and sd
newMean <- calcMean(df)
newSd <- calcSd(df)
if (param == 'CH4') {
meanCol <- 'CH4_umol_L_avg'
sdCol <- 'CH4_umol_L_sd'
} else {
meanCol <- paste0(param, '_avg')
sdCol <- paste0(param, '_sd')
}
# Set new mean and sd
# If KEEP OLD, take it from the row()
newCols <- setNames(
data.frame(
meanCol = ifelse(
newMean != 'KEEP OLD',
newMean,
pull(row(), meanCol)
),
sdCol = ifelse(
newSd != 'KEEP OLD',
newSd,
pull(row(), sdCol)
)
),
c(meanCol, sdCol)
)
# If calculations is NULL, create it else update it
if (is.null(calculations$avgSd)) {
calculations$avgSd <- newCols
} else {
calculations$avgSd <- bind_cols(
calculations$avgSd,
newCols
)
}
}
# Use calculation
useCalculated(TRUE)
})
## Return row ####################################################################
# Return a reactive expression
return(
list(
# Returns the row to update
df = reactive({
# Re-run when site or date updates
site();datetime()
# Return the row
bind_cols(
row() %>% select(
id, station, starts_with('DATE'), starts_with('TIME'), ends_with('GMT'),
ends_with('_at')
),
rawDataUpdated(),
labCstUpdated(),
pco2Ch4Updated(),
avgSdUpdated()
)
}),
# Returns errors and warnings
errors = reactive(
list(
errors = c(),
warnings = c()
)
),
# Return observers to destroy them from the outer module
observers = observersOutput,
# Return a character vector containing the name of the columns not to check
noCheckCols = reactive(row() %>% select(matches('dry|h2o|_sd$|_temp$|_press$|^CO2|^(pCO2|CH4).*(A|B)$|^pCO2_HS_(P1_)?uatm$', ignore.case = FALSE)) %>% colnames()),
# Return a list containing key-value pairs of columns to check with the regex to get the columns to check against
checkCols = reactive({
cols2check <- list()
# Add all standard comparisons
cols <- row() %>% select(matches('^(d13C|pCO2_HS_P2|CH4).*_avg$', ignore.case = FALSE)) %>% colnames()
cols2check <- c(
cols2check,
`names<-`(as.list(cols), cols)
)
# Add complex comparisons
cols <- row() %>% select(matches('lab_co2_(ch4|ich4|co2ppm|ico2)_(A|B)$', ignore.case = FALSE)) %>% colnames()
cols2check <- c(
cols2check,
`names<-`(as.list(sub('_[AB]$', '_(A|B)', cols)), cols)
)
# Return list
cols2check
})
)
)
}
|
30bcbebe97456bf3f1d7e55e495a0658be10a5a8 | 00042f5bad50f8e0ff84b25bb04c5b26c0e44864 | /R Programming/week_2.R | 82abcf9884857fc3891e4bde9fa546c5f945b3ee | [] | no_license | guddu75/datasciencecoursera | 48fdef5677e86e495290de21223416d4dcabd84c | 50ed752378cf451a8756afe4978ec569d2fa9988 | refs/heads/master | 2023-01-19T02:41:51.963031 | 2020-11-20T20:05:56 | 2020-11-20T20:05:56 | 313,021,095 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,285 | r | week_2.R | ######## CONTROL STRUCTURE OF R ###############
######### IF-ELSE##############
if(x>3){
y<-10
} else{
y<-0
}
y <- if(x>3){
10
}else{
0
}
###both are same
### FoR LOOPS
for(i in 1:10){
print(i)
}
x<- c('a','b','c','d')
for(i in 1:4){
print(x[i])
}
for(i in seq_along(x)){ # seq_along takes a vector and return a integer sequence of length of vector
print(x[i])
}
for(letter in x){
print(letter)
}
for(i in 1:4) print(x[i])
#### WHILE LOOPS
count<-0
while (count<10) {
print(count)
count<- count+1
}
z<-5
while(z>=3 && z<=10){
print(z)
coin<-rbinom(1,1,0.5)
if(coin==1){
z<-z+1
}else{
z<-z-1
}
}
##### REPEAT NEXT BREAK
# repeat initiates a infinite loop
x0<-1
tol<-1e-8
repeat{
x1<- computeEstimate()
if(abs(x1-x0)<tol){
break;
}else{
x0<-x1
}
}
##next is like continue
for(i in 1:100){
if(i<=20){
next
}
print(i)
}
################## FUNCTION #####################
add2 <- function(x,y){
x+y
}
above10<- function(x){
use <- x>10
x[use]
}
column_mean <- function(x, removeNA=TRUE){
nc <- ncol(x)
means<- numeric(nc)
for (i in 1:nc){
means[i]<- mean(x[,i], na.rm = removeNA)
}
means
}
|
e70f1dd77ec2e5539d298feac2f56831a46773c4 | 71375d6e5c8b3ad5f49d2d689a7588ef686c10be | /cachematrix.R | de9a4c372bc20294a6a6dab4b1fb1466710db7a6 | [] | no_license | shadimari/ProgrammingAssignment2 | a375e5e997794213cf21cb9cf4f24509ec757b56 | 155f2de576284be24502cd799a15ea3e0e264f3a | refs/heads/master | 2021-01-18T10:36:52.583138 | 2015-02-22T14:49:38 | 2015-02-22T14:49:38 | 31,165,923 | 0 | 0 | null | 2015-02-22T14:43:56 | 2015-02-22T14:43:55 | null | UTF-8 | R | false | false | 1,328 | r | cachematrix.R | ## Below are two functions that are used to create a special object that stores a matrix and
## cache's its inverse.
## This function creates a special "matrix" which is really a list containing a function to:
## set the value of the matrix (set_matrix)
## get the value of the matrix (get_matrix)
## set the value of the inverse (set_inverse)
## get the value of the inverse (get_inverse)
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set_matrix <- function(y) {
x <<- y
inverse <<- NULL
}
get_matrix <- function() x
set_inverse <- function(solve) inverse <<- solve
get_inverse <- function() inverse
list(set_matrix = set_matrix, get_matrix = get_matrix,
set_inverse = set_inverse,
get_inverse = get_inverse)
}
## This function calculates the inverse of the special "matrix" created above.
## it first checks to see if the inverse has already been calculated
## If yes, then it gets the inverse from the cache.
## If no, it calculates the inverse and sets the value of the inverse in the cache
cacheSolve <- function(x, ...) {
inverse <- x$get_inverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get_matrix()
inverse <- solve(data, ...)
x$set_inverse(inverse)
inverse
} |
84a3f81e8a5b361b89e5749216b97feb12315a95 | b42b4404acbde3775c1de3eed457a7bd6e150b8b | /man/isValidDateTime.Rd | 61537bccdb047f7d130a25359defae79b907e0e4 | [
"MIT"
] | permissive | fabarca/reutiles | 3f886308ff87d754fd49af4dda4dcbf92db63fc8 | bdcfd0d79cbd2b1dceec3efb0edd4c4d38e93ae1 | refs/heads/master | 2020-05-21T17:57:15.518053 | 2018-04-10T12:12:07 | 2018-04-10T12:12:07 | 62,224,272 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 434 | rd | isValidDateTime.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/isValidFunctions.R
\name{isValidDateTime}
\alias{isValidDateTime}
\title{Check Datetime String Format}
\usage{
isValidDateTime(datetime, format = "\%Y-\%m-\%d \%H:\%M:\%S")
}
\arguments{
\item{format}{By default equals to "\%Y-\%m-\%d \%H:\%M:\%S"}
}
\description{
Return a boolean depending on whether the datetime string is in the right format or not.
}
|
5fa74b6a2b20ca670abc07cfa7b206d36d927fab | c73fe63e316fc0733f37d9052f37198b3b0c5bda | /simulations_imitation.R | e73c816c424aedf26cdba72912c2a987eab58f00 | [] | no_license | alescia/CS2-Simulations | a6fc28e3712d06fdc86536127936135485bab3aa | 042325dd4f85aa32b132c3a91e6f27a08408dec6 | refs/heads/master | 2021-05-04T10:47:50.037488 | 2018-03-01T13:19:53 | 2018-03-01T13:19:53 | 47,568,951 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,024 | r | simulations_imitation.R | # DISTRIBUTION FUNCTION; FOR DRAW OF RANDOM PRICES
library(distr)
library(ggplot2)
pricesA<-seq(from=0, to=5,by=0.1)
pricesBC<-c(pricesA,pricesA+5) # in second place, the CS prices --> identified as CS by adding 5
# to draw starting points
distA <-DiscreteDistribution(supp = pricesA, prob = rep(1/length(pricesA),length(pricesA)))
distBC <-DiscreteDistribution(supp = pricesBC, prob = rep(1/length(pricesBC),length(pricesBC)))
#
rdistA <- r(distA) # function to create random variates from p
rdistBC <- r(distBC)
pA<-c(rdistA(1))
pB<-c(rdistBC(1))
pC<-c(rdistBC(1))
# PROFIT FUNCTION
profit_ftn<-function(pA,pB,pC){
stA<-(pA<=5)
stB<-(pB>5)
stC<-(pC>5)
p<-cbind(pA,pB-5*stB,pC-5*stC,stA,stB,stC)
p<-matrix(p,ncol = 6)
st<-p[4:6]
p<-p[1:3]
v_naiveA<-c(v+e,v,v)-p # value for naive consumer of type A
v_naiveA
v_naiveB<-c(v,v+e,v)-p
v_naiveC<-c(v,v,v+e)-p
v_n_A<-as.numeric(v_naiveA==max(v_naiveA)) # which firm does naive of type A buy from (a,b,c)
v_n_B<-as.numeric(v_naiveB==max(v_naiveB))
v_n_C<-as.numeric(v_naiveC==max(v_naiveC))
sales_n_A<-v_n_A/sum(v_n_A) # to take account of ties
sales_n_B<-v_n_B/sum(v_n_B)
sales_n_C<-v_n_C/sum(v_n_C)
v_savvyA<-c(v,v,v)+c(e,0,0)*(1-st)-p*(1+(1-st)*lambda)
v_savvyB<-c(v,v,v)+c(0,e,0)*(1-st)-p*(1+(1-st)*lambda)
v_savvyC<-c(v,v,v)+c(0,0,e)*(1-st)-p*(1+(1-st)*lambda)
v_s_A<-as.numeric(v_savvyA==max(v_savvyA))
v_s_B<-as.numeric(v_savvyB==max(v_savvyB))
v_s_C<-as.numeric(v_savvyC==max(v_savvyC))
sales_s_A<-v_s_A/sum(v_s_A)
sales_s_B<-v_s_B/sum(v_s_B)
sales_s_C<-v_s_C/sum(v_s_C)
profits<-c(sum(st)==1)*(sales_n_A+sales_n_B+sales_n_C)*p+(sum(st)!=1)*((1-mu)*(sales_n_A+sales_n_B+sales_n_C)*p+mu*(sales_s_A+sales_s_B+sales_s_C)*p)
profits<-profits*(p<5)
profits<-profits*(p>0)
}
fbb<-function(x) profit_ftn(x[1],x[2],x[3]) #to compute max
# Best Imitation Functions
Best<-function(pA,pB,pC){
p<-c(pA,pB,pC)
st<-(p>5)
profit<-profit_ftn(pA,pB,pC)
profit
p_best<-p[which.max(profit)]
which.max(profit)
st_best<-(p_best>5)
pA<-(which.max(profit)==1)*(p_best)+(which.max(profit)>1)*(p_best-5*st_best) # to translate CS price into NCS price
pB<-(which.max(profit)==1)*(p_best+5*st[2])+(which.max(profit)>1)*(p_best)
pC<-(which.max(profit)==1)*(p_best+5*st[3])+(which.max(profit)>1)*(p_best)
p_best<-c(pA,pB,pC)
}
#test
mu=0.1
lambda=0.3
pA=4.9
pB=5.1
pC=5
print(Best(pA,pB,pC))
# RUNNING SIMULATION OF BEST RESPONSE DYNAMICS FOR GRAPHICAL REPRESENTATION
# INITIALIZATION
v<-5
e<-1
#start
pA<-3
pB<-5
pC<-6
P<-c(0,pA,pB,pC)
mu=0.3
lambda=0.2
jig<-0.015 #size of price experimentation
jig_st<-0.002 #probability of random format change
T<-5000
i<-1
while(i<=T){
pA<-Best(pA,pB,pC)[1]
pB<-Best(pA,pB,pC)[2]
pC<-Best(pA,pB,pC)[3]
stB<-(pB>5)
stC<-(pC>5)
pA<-pA+jig*runif(1,-1,1)
pA<-(pA>5)*5+(pA<=5)*pA
pA<-(pA<0)*0+(pA>=0)*pA
rand<-runif(1,0,1)
pB<-pB-5*stB+jig*runif(1,-1,1)
pB<-(pB>5)*5+(pB<=5)*pB
pB<-(pB<0)*0+(pB>=0)*pB
stB<-(rand>jig_st)*stB+(rand<=jig_st)*(1-stB)
pB<-pB+5*stB
rand2<-runif(1,0,1)
pC<-pC-5*stC+jig*runif(1,-1,1)
pC<-(pC>5)*5+(pC<=5)*pC
pC<-(pC<0)*0+(pC>=0)*pC
stC<-(rand2>jig_st)*stC+(rand2<=jig_st)*(1-stC)
pC<-pC+5*stC
P<-rbind(P,c(i,pA,pB,pC))
i<-i+1
}
P2<-matrix(P,ncol = 4)
P2<-P2[2:(T+1),] # get rid of initial values
nr_formatA<-(P2[,3]>5)+(P2[,4]>5)+1 # number of firms with format A
nr_formatA <- as.factor(nr_formatA)
#names(nr_formatA)<-c("firms with format A")
levels(nr_formatA) <- c("1", "2","3")
P3<-cbind(P2,nr_formatA)
Pf<-data.frame(P3)
names(Pf) <- c("time","prices","priceb","pricec","firmswithformatA")
Pf$firmswithformatA <- as.factor(Pf$firmswithformatA)
levels(Pf$firmswithformatA) <- c("1", "2","3")
# graph in paper
ggplot(Pf, aes(x=time, y=prices, group=1, colour=firmswithformatA)) +
geom_point(size=2) +
geom_line(color='steelblue',size=1, alpha=0.3)+
theme_minimal()+
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
xlab("period")
ggsave("Figure_mu_0.3.pdf", width = 12, height = 6, units = "in", dpi = 400)
# best imitation graph in the triopoly (given price by A and price by B, what is the best choice by C)
mu<-0.5
lambda<-0.5
pA<-1
pC<-5 #we do not care about this number since we compute what is the best to imitate of A and B for firm C
pB<-seq(from=0, to=10,by=0.01)
p<-rbind(pA,pB,pC)
p<-matrix(p,nrow=3)
p<-t(p)
imit_best<-function(x) Best(x[1],x[2],x[3]) #to compute as function of vector
tC<-apply(X=p,MARGIN=1,FUN=imit_best)
st<-(tC>5) #idneitfy standard offers
st
tC
p
st
as.numeric(tC[2,])
as.numeric(st[2,])
p2<-cbind(p,st[2,],as.numeric(tC[2,]-5*as.numeric(st[2,])))
p2<-matrix(p2,ncol = 5)
P2<-cbind(as.numeric(seq(1,1001,1)),p2)
P3<-matrix(P2,ncol = 6)
P<-data.frame(P3)
P$X1 <- factor(P$X1, levels = P$X1)
library(ggplot2)
pB<-P[,3]
pC<-P[,6]
format<-factor(P[,5])
ggplot(P, aes(x=pB, y=pC, group=1, color=format)) + geom_point(size=4) + geom_point(color='steelblue',size=1, alpha=0.3) |
77c040a90751ac7c91ebda96ce14a0c2f7e98c6c | 8eb72c67300a9ac0af4eff99820dba7fb2197441 | /scripts/tims_phangorn_tree_functions.R | 39e1d44987a915e316e94c41aa8a31d3bad69a78 | [] | no_license | CoolEvilgenius/phyc | e97c00477889ffe27f6293107e093cd610ece0f4 | 5e44389b3fbff2261c3240bb6de448c7563ed3ee | refs/heads/master | 2021-05-17T21:45:15.810412 | 2020-03-29T11:00:41 | 2020-03-29T11:00:41 | 250,965,565 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,354 | r | tims_phangorn_tree_functions.R | edges_of_trees<-function(tree,strains,plotname = "Strains")
{
outlst <- list()
e_labs <- rep("black", length(tree$edge[,1]))
n_labs <- NULL
for (st in strains){
tlab <- which(tree$tip.label == st)
te <- which(tree$edge[,2] == tlab)
t_internal <-tree$edge[te,1]
e_labs[[te]] <- "red"
n_labs <- c(tlab,n_labs)
outlst[[st]] <- c(tlab,t_internal)
}
plot(tree,edge.color = e_labs, show.tip.label = FALSE, main = plotname)
nodelabels(node = n_labs, pch = 21, cex = 1, bg = "blue", col = "blue")
tiplabels(strains,n_labs, cex = 0.3, col = "black", adj = -1, frame = "none")
return(outlst)
}
snps_on_edges<-function(phyDat_file,inlst)
{
#This assumes the input will be from a parsimony method rather than ML
res = NULL
for (nm in names(inlst))
{
rowvec <- NULL
temppr <- NULL
leaf <- inlst[[nm]][1]
in_node <- inlst[[nm]][2]
rowvec <- which(rowSums(phyDat_file[[leaf]] == phyDat_file[[in_node]]) != 4)
tmp <- (unlist(lapply(rowvec, function(x) paste(x, which(phyDat_file[[leaf]][x,] == 1), sep = ":"))))
res <- c(tmp,res)
temppr <- c(nm,inlst[[nm]],length(rowvec))
print(temppr)
}
return(res)
}
clean_snp_list<-function(snp_file)
{
snp2 <- snp_file[grep(":.{1}",snp_file)] # this removes ambiguous calls
snp3 <- gsub(":.{1}","",snp2)
return(snp3)
}
make_snp_table<-function(snpVec1,snpVec2)
{
t1 <- table(snpVec1)
t2 <- table(snpVec2)
t1df <- data.frame(t1, stringsAsFactors = FALSE, row.names = NULL)
colnames(t1df) <- c("Mutation", "Vec1")
t2df <- data.frame(t2, stringsAsFactors = FALSE, row.names = NULL)
colnames(t2df) <- c("Mutation", "Vec2")
fin_df <- merge(t1df,t2df,by = "Mutation",all = TRUE)
fin_df[is.na(fin_df)] <- 0
fin_df <- fin_df[-1,]
return(fin_df)
}
fisher_snp_table<-function(snp_table)
{
case1 <- sum(snp_table$Vec1)
case2 <- sum(snp_table$Vec2)
print("Total number of SNPS in Vec1 and Vec2")
print(case1)
print(case2)
#snp_table$Fisher <- apply(snp_table,1, function(x) fisher.test(matrix(c(x[2],case1,x[3],case2),2,2), alternative = "less"))
#
for (i in (1:nrow(snp_table))){
result <-fisher.test(matrix(c(snp_table[i,2],case1,snp_table[i,3],case2),2,2), alternative = "less")
snp_table$Fisher[i]<- result$p.value
}
# tempdf <- cbind(snp_table,fish_scores)
return(snp_table)
} |
a6ed6ac58f6ca1e28dd92c3a28a8591767683fd6 | 464f120f045b4da176822e12814dbc1c62612f4a | /analysis/src/ESM_utils.R | 459e061084c8df46b07185409fc7704b3fffa9a2 | [
"MIT"
] | permissive | soyowoo/ExploringSocialMetacognition | 1c62f2caa94a306e2d6de517b9037035da670f1b | e802991f8b9f5a912b6f3a0671b21485610025f6 | refs/heads/master | 2023-07-31T09:22:46.023655 | 2021-05-11T11:40:32 | 2021-05-11T11:40:32 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,274 | r | ESM_utils.R | # Exploring Social Metacognition support functions and variables ----------
# Libraries ---------------------------------------------------------------
# Load the dependencies for the main scripts
# Parsing JSON files
if(!require(jsonlite)) {
install.packages("jsonlite")
library(jsonlite)
}
# Calculating Bayes factors
if(!require(BayesFactor)) {
install.packages('BayesFactor')
library(BayesFactor)
}
# Plots
if(!require(tidyverse)) {
install.packages('tidyverse')
library(tidyverse)
}
# Forest plots
if(!require(ggridges)) {
install.packages('ggridges')
library(ggridges)
}
# # Long-to-wide conversions
# if(!require(reshape2)) {
# install.packages('reshape2')
# library(reshape2)
# }
# Linear modelling
if(!require(lme4)) {
install.packages('lme4')
library(lme4)
}
# CohensD calculations
if(!require(lsr)) {
install.packages('lsr')
library(lsr)
}
# repeated measures ANOVA
if(!require(ez)) {
install.packages('ez')
library(ez)
}
# RMarkdown to HTML conversion
if(!require(knitr)) {
install.packages('knitr')
library(knitr)
}
# Confidence intervals?
if(!require(Hmisc)) {
install.packages('Hmisc')
library(Hmisc)
}
# Brier scores
if(!require(scoring)) {
install.packages('scoring')
library(scoring)
}
# Reference and mapping functions -----------------------------------------
#' Return a property of an Advisor from their ID for a given participant.
#' @param property to return. Should be a column in advisors
#' @param advisorId
#' @param participantId will be joined to advisorId as a data.frame, so must be of the same length
#' @param advisors data frame of advisors
#' @return property of specified advisor
.lookupAdvisorProperty <- function(property, advisorId, participantId, advisors) {
df <- data.frame(advisorId, participantId, type = NA)
if(any(!is.na(df$advisorId))) {
tmp <- df[!is.na(df$advisorId), ]
tmp$type <- sapply(1:nrow(tmp),
function(i) advisors[advisors$pid == tmp$participantId[i]
& advisors$id == tmp$advisorId[i], property])
for(i in 1:nrow(tmp))
if(length(unlist(tmp$type[i])) == 0)
tmp$type[i] <- list(NA)
df[!is.na(df$advisorId), ] <- tmp
}
return(unlist(df$type))
}
#' Return an Advisor's adviceType from their ID for a given participant. Updated version of getAdviceType
#' @param advisorId
#' @param participantId will be joined to advisorId as a data.frame, so must be of the same length
#' @param advisors data frame of advisors
#' @return adviceType of specified advisor
findAdviceType <- function(advisorId, participantId, advisors) {
return(.lookupAdvisorProperty('adviceType', advisorId, participantId, advisors))
}
#' Return an Advisor's groupId from their ID for a given participant. Updated version of getAdviceType
#' @param advisorId
#' @param participantId will be joined to advisorId as a data.frame, so must be of the same length
#' @param advisors data frame of advisors
#' @return adviceType of specified advisor
findAdvisorGroup <- function(advisorId, participantId, advisors) {
return(.lookupAdvisorProperty('groupId', advisorId, participantId, advisors))
}
#' Return a vector of length \code{trials} containing the advice from an advisor
#' with advice profile \code{type} on each trial
#' @param trials data frame of trials to search
#' @param type advice type to search for
#' @requireSeen whether the advice must have been visible to the participant. If
#' TRUE, unseen advice is replaced with NA
getAdviceByType <- function(trials, type, requireSeen = T) {
out <- NULL
for(i in 1:nrow(trials)) {
tr <- trials[i, ]
if(tr$adviceType == type && !is.na(tr$adviceSide))
out <- c(out, tr$adviceSide)
else {
if(tr$advisor0type == type && !is.na(tr$advisor0adviceSide))
out <- c(out, tr$advisor0adviceSide)
else {
if(tr$advisor1type == type && !is.na(tr$advisor1adviceSide))
out <- c(out, tr$advisor1adviceSide)
else
out <- c(out, NA)
}
}
}
if(requireSeen)
out[!getAdviceSeen(trials, type)] <- NA
return(out)
}
#' @param trials data frame containing trials
#' @param type advisor's advice type
#' @return boolean vector of length \code{trials} with TRUE where the advice was
#' seen on a trial
getAdviceSeen <- function(trials, type) {
out <- NULL
for(i in 1:nrow(trials)) {
tr <- trials[i, ]
if(tr$type %in% c(trialTypes$force, trialTypes$choice, trialTypes$change))
out <- c(out, tr$adviceType == type)
else {
if(tr$type %in% c(trialTypes$dual))
out <- c(out, type %in% c(tr$advisor0type, tr$advisor1type))
else
out <- c(out, F)
}
}
return(out)
}
#' @param adviceTypeVector vector of advisor's adviceTypes
#' @param allowNeutral whether to allow neutral advisors as a type
#' @return list of pairs of adviceTypes which complement one another
getAdviceTypePairs <- function(adviceTypeVector, allowNeutral = F) {
types <- unique(adviceTypeVector)
if(!allowNeutral)
types <- types[types != adviceTypes$neutral]
types <- types[!is.na(types)]
types <- types[order(types)]
pairs <- list()
if(length(types) < 2)
return(list())
for(i in 1:(length(types)/2))
pairs[[i]] <- c(types[2*(i-1)+1], types[2*(i-1)+2])
return(pairs)
}
# Return the advice type of an advisor for participant with row number=pid
getAdviceTypeById <- function(aid, pid, advisor.data.frame) {
type <- advisor.data.frame[which(advisor.data.frame$participantId==pid),]
type <- type[which(type$id==aid),]
if (length(type) > 0)
return(type$adviceType)
return(NA)
}
# Return a vector of advice types for trial list t
getAdviceType <- function (t, participant.data.frame, advisor.data.frame, forceRecalculate = FALSE) {
# shortcut if we already calculated this
if('adviceType' %in% colnames(t) && !forceRecalculate)
return(t$adviceType)
out <- vector(length=dim(t)[1])
for (i in seq(length(out))) {
if (t$advisorId[i]==0) {
# no advisor
out[i] <- NA;
} else {
pid <- t$participantId[i]
out[i] <- getAdviceTypeById(t$advisorId[i], pid, advisor.data.frame)
}
}
return(out)
}
#' Find the confidence shift in a given trial
#' @param t trial list
#' @param rawShift whether to report the confidence shift without adjusting for the assymetric scale
#' @param forceRecalulate if true, simply return the appropriate column from t if it exists already
#' @return a vector of confidence shifts for trial list t
getConfidenceShift <- function (t, rawShift = FALSE, forceRecalculate = FALSE) {
scaleMaximum <- 50
# shortcut if we already calculated this
if('confidenceShift' %in% colnames(t) && !forceRecalculate)
return(t$confidenceShift)
out <- vector(length=dim(t)[1])
for (i in seq(length(out))) {
if (is.na(t$finalConfidence[i])) { # no advisor
out[i] <- NA
} else {
max.shift <- scaleMaximum - t$initialConfidence[i]
if(t$initialAnswer[i]==t$finalAnswer[i])
out[i] <- t$finalConfidence[i]-t$initialConfidence[i] # same side
else
out[i] <- -1 * (t$finalConfidence[i]+t$initialConfidence[i]) # switched sliders, so went to 0 on the first one
out[i] <- ifelse((abs(out[i]) > max.shift) & rawShift == F, max.shift*sign(out[i]), out[i])
}
}
return(out)
}
#' Return a vector of influences of advisors. Influence is +confidenceShift
#' where the advisor agrees, and -confidenceShift where the advisor disagrees.
#' @param advisorIds
#' @param advisorAgreements
#' @param confidenceShifts the parameters are all bound together into a
#' dataframe so must all be the same length
#' @return vector of influences of the advisors. NA where advisorId is NA
findInfluence <- function(advisorAgreements, confidenceShift) {
out <- NA
out[advisorAgreements == T
& !is.na(advisorAgreements)] <- confidenceShift[advisorAgreements == T
& !is.na(advisorAgreements)]
out[advisorAgreements == F
& !is.na(advisorAgreements)] <- -1 * confidenceShift[advisorAgreements == F
& !is.na(advisorAgreements)]
return(out)
}
# Return a vector of influence for trial list t
#' @param t trial list
#' @param rawShift whether to report the influence without adjusting for the assymetric scale
#' @param forceRecalulate if true, simply return the appropriate column from t if it exists already
#' @return a vector of influence for trial list t
getInfluence <- function (t, rawShift = FALSE, forceRecalculate = FALSE) {
# shortcut if we already calculated this
if('influence' %in% colnames(t) && !forceRecalculate)
return(t$influence)
out <- vector(length=dim(t)[1])
for (i in seq(length(out))) {
if (t$advisorId[i] == 0) { # no advisor
out[i] <- NA
} else {
if (t$advisorAgrees[i])
out[i] <- getConfidenceShift(t[i,], rawShift, forceRecalculate) # amount confidence increased
else
out[i] <- -1 * getConfidenceShift(t[i,], rawShift, forceRecalculate) # -1 * amount confidence increased
}
}
return(out)
}
#' Get the name of the advice type
#' @param adviceType the advice type to fetch the name for
#' @param long whether to return the long name
#' @return string of the advice type, or NA by default
getAdviceTypeName <- function(adviceType, long = FALSE) {
if(length(adviceType)>1) {
out <- NULL
for(aT in adviceType)
out <- c(out, getAdviceTypeName(aT, long = long))
return(out)
}
if(adviceType==adviceTypes$neutral)
return(ifelse(long, 'neutral', 'Ntl'))
if(adviceType==adviceTypes$AiC)
return(ifelse(long,'Agree-in-confidence', 'AiC'))
if(adviceType==adviceTypes$AiU)
return(ifelse(long,'Agree-in-uncertainty', 'AiU'))
if(adviceType==adviceTypes$HighAcc)
return(ifelse(long,'High accuracy', 'HighAcc'))
if(adviceType==adviceTypes$LowAcc)
return(ifelse(long,'Low accuracy', 'LowAcc'))
if(adviceType==adviceTypes$HighAgr)
return(ifelse(long, 'High agreement', 'HighAgr'))
if(adviceType==adviceTypes$LowAgr)
return(ifelse(long, 'Low agreement', 'LowAgr'))
if(adviceType==adviceTypes$avaAcc)
return(ifelse(long, 'AVA Accurate', 'avaAcc'))
if(adviceType==adviceTypes$avaAgr)
return(ifelse(long, 'AVA Agreement', 'avaAgr'))
return(ifelse(long, 'None', NA))
}
#' Get the name of a trial type
#' @param type of trial
getTrialTypeName <- function(type) {
names(trialTypes)[trialTypes == type]
}
# Type2 ROC ---------------------------------------------------------------
#' @param correctness vector of correctness of judgements
#' @param confidence vector of the confidence of judgements
#' @param bins number of bins to use, or NA if data should be judged by each confidence value individually
#' @return type 2 receiver operator characterisitc curve points
type2ROC <- function(correctness, confidence, bins = NA) {
if(!is.na(bins))
confidence <- cut(confidence, seq(0, 50, length.out = bins))
points <- data.frame(x = unique(confidence), y = NA)
for(i in 1:nrow(points)) {
points$y[i] <- mean(correctness[confidence == points$x[i]])
}
points <- points[order(points$x), ]
# scale x values
#points$x <- points$x / max(points$x)
return(points)
}
# Global variables --------------------------------------------------------
# advice types: neutral, agree-in-confidence, and agree-in-uncertainty
adviceTypes <- list(neutral=0,
AiC=3, AiU=4,
HighAcc=5, LowAcc=6,
HighAgr=7, LowAgr=8,
avaAcc=9, avaAgr=10)
trialTypes <- list(catch=0, force=1, choice=2, dual=3, change=4)
confidenceCategories <- list(low=0, medium=1, high=2)
# Advisor questionnaire dimensions
questionnaireDimensions <- list(accurate=1,
like=2,
trust=3,
influence=4)
# The Advisor portraits have properties which might affect ratings, so we should investigate these:
portraitDetails <- data.frame(
portraitId = 1:5,
category = factor(c('w', 'b', 'w', 'b', 'w')),
blackProp = c(0, .99, 0, .99, .01),
age = c(28.7, 24.9, 23.3, 24.6, 23.7)
) |
d6cfcd8a7d8ae665d02044c6717612c838d64a13 | 3178860d26781702ca94412669f49c45c2a26275 | /R/varSelect.R | 396aeb8bb48190cdbd534b4b3f9e67c0f6839902 | [] | no_license | cran/modnets | 08f1fea424212bcf851972de068c62434fb196f3 | 81983f2b895c53602ccb44c989d6692594439568 | refs/heads/master | 2023-08-10T18:19:43.907567 | 2021-10-01T07:20:02 | 2021-10-01T07:20:02 | 412,522,956 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 34,371 | r | varSelect.R | #' Variable selection for moderated networks
#'
#' Perform variable selection via the LASSO, best subsets selection, forward
#' selection, backward selection, or sequential replacement on unmoderated
#' networks. Or, perform variable selection via the hierarchical LASSO for
#' moderated networks. Can be used for both GGMs and SUR networks.
#'
#' The primary value of the output is to be used as input when fitting the
#' selected model with the \code{\link{fitNetwork}} function. Specifically, the
#' output of \code{\link{varSelect}} can be assigned to the \code{type} argument
#' of \code{\link{fitNetwork}} in order to fit the constrained models that were
#' selected across nodes.
#'
#' For moderated networks, the only variable selection approach available is
#' through the \code{glinternet} package, which implements the hierarchical
#' LASSO. The criterion for model selection dictates which function from the
#' package is used, where information criteria use the
#' \code{\link[glinternet:glinternet]{glinternet::glinternet}} function to
#' compute models, and cross-validation calls the
#' \code{\link[glinternet:glinternet.cv]{glinternet::glinternet.cv}} function.
#'
#' @param data \code{n x k} dataframe or matrix.
#' @param m Character vector or numeric vector indicating the moderator(s), if
#' any. Can also specify \code{"all"} to make every variable serve as a
#' moderator, or \code{0} to indicate that there are no moderators. If the
#' length of \code{m} is \code{k - 1} or longer, then it will not be possible
#' to have the moderators as exogenous variables. Thus, \code{exogenous} will
#' automatically become \code{FALSE}.
#' @param criterion The criterion for the variable selection procedure. Options
#' include: \code{"cv", "aic", "bic", "ebic", "cp", "rss", "adjr2", "rsq",
#' "r2"}. \code{"CV"} refers to cross-validation, the information criteria are
#' \code{"AIC", "BIC", "EBIC"}, and \code{"Cp"}, which refers to Mallow's Cp.
#' \code{"RSS"} is the residual sum of squares, \code{"adjR2"} is adjusted
#' R-squared, and \code{"Rsq"} or \code{"R2"} is R-squared. Capitalization is
#' ignored. For methods based on the LASSO, only \code{"CV", "AIC", "BIC",
#' "EBIC"} are available. For methods based on subset selection, only
#' \code{"Cp", "BIC", "RSS", "adjR2", "R2"} are available.
#' @param method Character string to indicate which method to use for variable
#' selection. Options include \code{"lasso"} and \code{"glmnet"}, both of
#' which use the LASSO via the \code{glmnet} package (either with
#' \code{\link[glmnet:glmnet]{glmnet::glmnet}} or
#' \code{\link[glmnet:cv.glmnet]{glmnet::cv.glmnet}}, depending upon the
#' criterion). \code{"subset", "backward", "forward", "seqrep"}, all call
#' different types of subset selection using the
#' \code{\link[leaps:regsubsets]{leaps::regsubsets}} function. Finally
#' \code{"glinternet"} is used for applying the hierarchical lasso, and is the
#' only method available for moderated network estimation (either with
#' \code{\link[glinternet:glinternet]{glinternet::glinternet}} or
#' \code{\link[glinternet:glinternet.cv]{glinternet::glinternet.cv}},
#' depending upon the criterion). If one or more moderators are specified,
#' then \code{method} will automatically default to \code{"glinternet"}.
#' @param lags Numeric or logical. Can only be 0, 1 or \code{TRUE} or
#' \code{FALSE}. \code{NULL} is interpreted as \code{FALSE}. Indicates whether
#' to fit a time-lagged network or a GGM.
#' @param exogenous Logical. Indicates whether moderator variables should be
#' treated as exogenous or not. If they are exogenous, they will not be
#' modeled as outcomes/nodes in the network. If the number of moderators
#' reaches \code{k - 1} or \code{k}, then \code{exogenous} will automatically
#' be \code{FALSE}.
#' @param type Determines whether to use gaussian models \code{"g"} or binomial
#' models \code{"c"}. Can also just use \code{"gaussian"} or
#' \code{"binomial"}. Moreover, a vector of length \code{k} can be provided
#' such that a value is given to every variable. Ultimately this is not
#' necessary, though, as such values are automatically detected.
#' @param center Logical. Determines whether to mean-center the variables.
#' @param scale Logical. Determines whether to standardize the variables.
#' @param gamma Numeric value of the hyperparameter for the \code{"EBIC"}
#' criterion. Only relevant if \code{criterion = "EBIC"}. Recommended to use a
#' value between 0 and .5, where larger values impose a larger penalty on the
#' criterion.
#' @param nfolds Only relevant if \code{criterion = "CV"}. Determines the number
#' of folds to use in cross-validation.
#' @param varSeed Numeric value providing a seed to be set at the beginning of
#' the selection procedure. Recommended for reproducible results.
#' @param useSE Logical. Only relevant if \code{method = "glinternet"} and
#' \code{criterion = "CV"}. Indicates whether to use the standard error of the
#' estimates across folds, if \code{TRUE}, or to use the standard deviation,
#' if \code{FALSE}.
#' @param nlam if \code{method = "glinternet"}, determines the number of lambda
#' values to evaluate in the selection path.
#' @param covs Numeric or character string indicating a variable to be used as a
#' covariate. Currently not working properly.
#' @param verbose Logical. Determines whether to provide output to the console
#' about the status of the procedure.
#' @param beepno Character string or numeric value to indicate which variable
#' (if any) encodes the survey number within a single day. Must be used in
#' conjunction with \code{dayno} argument.
#' @param dayno Character string or numeric value to indicate which variable (if
#' any) encodes the survey number within a single day. Must be used in
#' conjunction with \code{beepno} argument.
#'
#' @return List of all models, with the selected variables for each along with
#' model coefficients and the variable selection models themselves. Primarily
#' for use as input to the \code{type} argument of the
#' \code{\link{fitNetwork}} function.
#' @export
#'
#' @seealso \code{\link{resample}, \link{fitNetwork}, \link{bootNet},
#' \link{mlGVAR}, \link[glinternet:glinternet]{glinternet::glinternet},
#' \link[glinternet:glinternet.cv]{glinternet::glinternet.cv},
#' \link[glmnet:glmnet]{glmnet::glmnet},
#' \link[glmnet:cv.glmnet]{glmnet::cv.glmnet},
#' \link[leaps:regsubsets]{leaps::regsubsets}}
#'
#' @examples
#' \donttest{
#' vars1 <- varSelect(ggmDat, criterion = 'BIC', method = 'subset')
#' fit1 <- fitNetwork(ggmDat, type = vars1)
#'
#' vars2 <- varSelect(ggmDat, criterion = 'CV', method = 'glmnet')
#' fit2 <- fitNetwork(ggmDat, type = vars2, which.lam = 'min')
#'
#' # Add a moderator
#' vars3 <- varSelect(ggmDat, m = 'M', criterion = 'EBIC', gamma = .5)
#' fit3 <- fitNetwork(ggmDat, moderators = 'M', type = vars3)
#' }
varSelect <- function(data, m = NULL, criterion = "AIC", method = "glmnet",
lags = NULL, exogenous = TRUE, type = "g", center = TRUE,
scale = FALSE, gamma = .5, nfolds = 10, varSeed = NULL,
useSE = TRUE, nlam = NULL, covs = NULL, verbose = TRUE,
beepno = NULL, dayno = NULL){
dat <- data
ALL <- FALSE
dmnames <- NULL # VARSELECT START
mall <- which(sapply(c(0, 'all'), identical, as.character(m)))
if(any(mall)){
m <- switch(mall, NULL, 1:ncol(dat))
if(mall == 2){ALL <- TRUE}
} else if(isTRUE(is.character(m))){
m <- which(colnames(data) %in% m)
}
if(is.null(lags)){
if(length(m) >= ncol(dat) - 1){exogenous <- FALSE}
if(!exogenous){ALL <- TRUE}
} else if(any(!sapply(c(beepno, dayno), is.null))){
stopifnot(!is.null(beepno) & !is.null(dayno))
dat <- getConsec(data = dat, beepno = beepno, dayno = dayno)
}
if(!is(dat, 'list')){
dat <- structure(data.frame(dat), samp_ind = attr(data, "samp_ind"))
if(is.null(m) | ALL | !is.null(lags)){
if(!is.null(lags)){
vs <- colnames(dat)
if(!is.null(m) & !ALL){mname <- vs[m]}
if(!is.null(covs)){
if(is.character(covs)){covs <- which(colnames(dat) %in% covs)}
dat <- dat[, -covs]
if(!is.null(m) & !ALL){m <- which(colnames(dat) %in% mname)}
}
dmnames <- colnames(lagMat(data = dat, m = m)$X)
dat <- lagMat(data = dat, type = type, center = center, scale = scale)
dat$full <- cbind.data.frame(dat$X, dat$Y) # FULLFIX
if(ALL | (!is.null(m) & all(m == 0))){exogenous <- FALSE; m <- 1:ncol(dat$Y)}
#if(ALL | is.null(m)){exogenous <- FALSE; m <- 1:ncol(dat$Y)}
if(!is.null(m)){
if(exogenous & length(m) < ncol(dat$Y)){
dat$Y <- dat$Y[, -m]
dat$full <- dat$full[, -m]
if(!is(dat$Y, 'matrix')){ # NEW
#if(class(dat$Y) != "matrix"){
dat$Y <- as.matrix(dat$Y, ncol = 1)
colnames(dat$Y) <- colnames(dat$full)[1]
}
} else if(length(m) == ncol(dat$Y)){
mname <- gsub("[.]y$", "", colnames(dat$Y))
exogenous <- FALSE
ALL <- TRUE
}
}
} else {
mname <- colnames(dat)[m]
dat <- list(Y = dat, X = dat)
}
} else if(length(m) >= ncol(dat) - 1){
mname <- colnames(dat)[m]
exogenous <- FALSE; ALL <- TRUE
} else if(class(m) == "list"){
dat <- list(Y = dat, X = data.frame(dat, m))
m <- ncol(dat$Y)
} else if(class(m) %in% c("numeric", "integer")){
dn <- colnames(dat)
dat <- list(Y = dat[, -m], X = data.frame(dat[, -m], dat[, m]))
colnames(dat$X) <- c(dn[-m], dn[m])
m <- ncol(dat$Y)
}
}
if(!is.null(m)){
if(class(m) == "list"){
stopifnot(!ALL)
mname <- names(m)
m <- ncol(dat$Y)
if(m < (ncol(dat$X) - 1)){
covariates <- TRUE
dn <- colnames(dat$X)
m <- which(dn == mname)
dat$X <- data.frame(dat$X[, -m], dat$X[, m])
colnames(dat$X) <- c(dn[-m], dn[m])
m <- ncol(dat$X) - 1
}
}
if(all(m != 0)){m0 <- m}
if(!method %in% c('hiernet', 'glinternet')){method <- 'glinternet'}
}
if(!is.null(varSeed)){set.seed(varSeed)}
p <- ncol(dat$Y); data <- dat$X; Y <- dat$Y
method <- match.arg(tolower(method), c(
"hiernet", "glinternet", "subset", "backward",
"forward", "seqrep", "glmnet", "lasso"))
criterion <- toupper(match.arg(tolower(criterion), c(
"cv", "aic", "bic", "ebic", "cp", "rss", "adjr2", "rsq", "r2")))
### VARSELECT
if(method %in% c("hiernet", "glinternet")){
if(is.null(nlam)){nlam <- ifelse(method == "hiernet", 20, 50)}
hierMods <- list(); t <- c()
for(i in 1:p){
if(all(colnames(dat$Y) %in% colnames(dat$X))){
data <- cbind(y = Y[, i], dat$X[, -i])
} else {
data <- cbind(y = Y[, i], dat$X)
}
if(verbose == TRUE){cat("Fitting model ", i, "/", p, "...", sep = "")}
if(verbose == "pbar"){if(i == 1){pb <- txtProgressBar(min = 0, max = p, style = 2, width = 43)}}
if(ALL & !is.null(m)){
if(all(m != 0)){
m <- switch(2 - (i %in% m), NULL, which(colnames(data)[-1] %in% mname))
}
}
tx <- Sys.time()
hierMods[[i]] <- fitHierLASSO(data = data, yvar = i, type = type, m = m,
nlam = nlam, nfolds = nfolds, gamma = gamma,
method = method, useSE = useSE, lags = lags,
criterion = criterion, dmnames = dmnames,
verbose = ifelse(is.logical(verbose), verbose, FALSE))
t[i] <- tx <- Sys.time() - tx
names(t)[i] <- attr(tx, "units")
if(ALL & exists("m0", inherits = FALSE)){m <- m0}
if(verbose == TRUE){
cat(" Complete! ", "(", round(t[i], 2), " ", attr(tx, "units"), ")\n", sep = "")}
if(verbose == "pbar"){setTxtProgressBar(pb, i); if(i == p){close(pb)}}
}
if(verbose == TRUE){
if(length(unique(names(t))) == 1){
cat("####### Total time:", round(sum(t), 2), names(t)[1], "\n\n")
} else {
tt <- t
if(length(unique(names(tt))) == 2){
if(all(sort(unique(names(tt))) == c("mins", "secs"))){
tt[names(tt) == "mins"] <- 60 * tt[names(tt) == "mins"]
cat("####### Total time:", round(sum(tt)/60, 2), "mins\n\n")
} else if(all(sort(unique(names(tt))) == c("hours", "mins"))){
tt[names(tt) == "hours"] <- 60 * tt[names(tt) == "hours"]
cat("####### Total time:", round(sum(tt)/60, 2), "hours\n\n")
}
} else if(all(sort(unique(names(tt))) == c("hours", "mins", "secs"))){
tt[names(tt) == "hours"] <- 360 * tt[names(tt) == "hours"]
tt[names(tt) == "mins"] <- 60 * tt[names(tt) == "mins"]
cat("####### Total time:", round(sum(tt)/360, 2), "hours\n\n")
}
}
}
names(hierMods) <- colnames(Y)
attributes(hierMods)$method <- method
attributes(hierMods)$criterion <- criterion
if(criterion == "EBIC"){attributes(hierMods)$gamma <- gamma}
attributes(hierMods)$time <- t
if(exists("covariates", inherits = FALSE)){attributes(hierMods)$covariates <- TRUE}
if(!is.null(covs)){attributes(hierMods)$covs <- covs}
if(!is.null(lags)){
attributes(hierMods)$moderators <- mname
attributes(hierMods)$exogenous <- exogenous
}
return(hierMods)
}
### LASSO SELECTION
if(method %in% c("lasso", "glmnet")){
lassoMods <- list(); t <- c()
if(is.null(nlam)){nlam <- 100}
for(i in 1:p){
if(all(colnames(dat$Y) %in% colnames(dat$X))){
data <- cbind(y = Y[, i], dat$X[, -i])
} else {
data <- cbind(y = Y[, i], dat$X)
}
if(verbose != FALSE){if(i == 1){pb <- txtProgressBar(min = 0, max = p, style = 2, width = 43)}}
tx <- Sys.time()
lassoMods[[i]] <- lassoSelect(data = data, yvar = i, criterion = criterion,
type = type, gamma = gamma, nfolds = nfolds,
nlam = nlam)
t[i] <- tx <- Sys.time() - tx
names(t)[i] <- attr(tx, "units")
if(verbose != FALSE){setTxtProgressBar(pb, i); if(i == p){close(pb)}}
}
names(lassoMods) <- colnames(Y)
attributes(lassoMods)[c("method", "criterion", "time")] <- list("glmnet", criterion, t)
if(criterion %in% c("EBIC", "CV")){attr(lassoMods, "gamma") <- gamma}
if(exists("covariates", inherits = FALSE)){attributes(lassoMods)$covariates <- TRUE}
if(!is.null(covs)){attributes(lassoMods)$covs <- covs}
return(lassoMods)
}
if(method %in% c("subset", "backward", "forward", "seqrep")){
if(criterion == "CV"){criterion <- "Cp"}
if(tolower(criterion) %in% c("aic", "ebic")){criterion <- "bic"}
ind <- match.arg(tolower(criterion), c("cp", "bic", "adjr2", "rsq", "r2", "rss"))
if(method == "subset"){method <- "exhaustive"}
if(ind == "r2"){ind <- "rsq"}
best <- ifelse(ind %in% c("cp", "bic", "rss"), which.min, which.max)
regMods <- bestMods <- list()
for(i in 1:p){
if(all(colnames(dat$Y) %in% colnames(dat$X))){data <- dat$X[,-i]}
regMods[[i]] <- summary(leaps::regsubsets(data, Y[,i], nvmax = ncol(data), method = method))
bestMods[[i]] <- ifelse(regMods[[i]]$which, 1, 0)[best(regMods[[i]][[ind]]), -1]
}
bestMods <- lapply(bestMods, function(z) names(z)[z == 1])
bestMods <- lapply(1:p, function(z){
bestOut <- list(mod0 = bestMods[[z]], fitobj = regMods[[z]])
attr(bestOut, "family") <- "g"
return(bestOut)
})
names(bestMods) <- colnames(Y)
attributes(bestMods)$method <- "regsubsets"
attributes(bestMods)$criterion <- ind
return(bestMods)
}
}
##### lassoSelect: performs variable selection using the LASSO (glmnet)
lassoSelect <- function(data, yvar, type = "g", criterion = "EBIC",
gamma = .5, nfolds = 10, nlam = 100, alpha = 1){
if(any(grepl(":", colnames(data)))){data <- data[,!grepl(":", colnames(data))]}
criterion <- match.arg(criterion, c("CV", "EBIC", "BIC", "AIC"))
y <- as.numeric(data[, 1])
x <- data <- as.matrix(data[, -1])
if(length(type) > 1){type <- type[yvar]}
fam <- ifelse(type %in% c("g", "gaussian"), "gaussian", "binomial")
if(criterion == "CV"){
fit <- glmnet::cv.glmnet(x = x, y = y, family = fam, type.measure = "deviance",
nfolds = nfolds, nlambda = nlam, alpha = alpha)
} else {
fit <- glmnet::glmnet(x = x, y = y, family = fam, alpha = alpha, nlambda = nlam)
}
getIndices <- function(fit, y, x, fam = "gaussian", criterion = "EBIC",
gamma = .5, lam = "null", keepFit = FALSE){
n <- length(y); p <- ncol(x)
fam <- match.arg(fam, c("gaussian", "binomial", "multinomial"))
lam <- match.arg(lam, c("null", "lambda.min", "lambda.1se"))
if(criterion != "CV"){n_lambdas <- length(fit$lambda)}
if(fam == "gaussian"){
if(criterion != "CV"){
beta0 <- matrix(coef(fit, s = 1)[1], ncol = 1)
yhat <- rep(1, n) * as.vector(beta0)
n_neighbors <- sapply(1:n_lambdas, function(z){
colSums(as.matrix(coef(fit, s = fit$lambda[z])[-1,]) != 0)
})
LL_model <- sum(dnorm(y, mean = yhat, sd = sqrt(sum((y - yhat)^2)/n), log = TRUE))
} else {
mods <- lapply(c("lambda.min", "lambda.1se"), function(z){
betas <- matrix(coef(fit, s = z), ncol = 1)
yhat <- cbind(1, x) %*% as.vector(betas)
n_neighbors <- colSums(matrix(coef(fit, s = z)[-1, ], ncol = 1) != 0)
LL_model <- sum(dnorm(y, mean = yhat, sd = sqrt(sum((y - yhat)^2)/n), log = TRUE))
ic_lambda <- -2 * LL_model + n_neighbors * log(n) + 2 * gamma * n_neighbors * log(p)
return(list(betas = matrix(betas[-1, ], ncol = 1), EBIC = ic_lambda))
})
}
} else if(fam %in% c("multinomial", "binomial")){
lam <- ifelse(criterion == "CV", list(c("lambda.min", "lambda.1se")), list(1))[[1]]
mods <- lapply(lam, function(lam0){
cats <- unique(y)
n_cats <- length(cats)
m_respdum <- matrix(NA, n, n_cats)
m_coefs <- matrix(NA, n, n_cats)
m_LL_parts <- matrix(NA, nrow = n, ncol = n_cats + 1)
X <- cbind(rep(1, n), x)
for(catIter in 1:n_cats){
m_respdum[, catIter] <- (y == cats[catIter]) * 1
if(fam == "multinomial"){
m_coefs[, catIter] <- X %*% matrix(coef(fit, s = lam0)[[catIter]], ncol = 1)
} else {
m_coefs[, catIter] <- X %*% ((matrix(coef(fit, s = lam0), ncol = 1) * ifelse(catIter == 1, -1, 1))/2)
}
m_LL_parts[, catIter] <- m_respdum[, catIter] * m_coefs[, catIter]
}
m_LL_parts[, (n_cats + 1)] <- -log(rowSums(exp(m_coefs)))
LL_model <- sum(rowSums(m_LL_parts))
if(lam0 == 1){
n_lambdas <- length(fit$lambda)
n_neighbors <- c()
for(NN in 1:n_lambdas){
coefs_bin <- vector("list", length = n_cats)
for(ca in 1:n_cats){
if(fam == "multinomial"){
coefs_bin[[ca]] <- as.matrix(coef(fit, s = fit$lambda[NN])[[ca]][-1, ]) != 0
} else {
coefs_bin[[ca]] <- as.matrix(coef(fit, s = fit$lambda[NN])) != 0
}
}
n_neighbors[NN] <- colSums(Reduce("+", coefs_bin) != 0)
if(fam == "binomial"){n_neighbors[NN] <- n_neighbors[NN] - 1}
}
return(list(LL_model = LL_model, n_neighbors = n_neighbors))
} else {
coefs_bin <- vector("list", length = n_cats)
if(fam == "multinomial"){
for(ca in 1:n_cats){coefs_bin[[ca]] <- as.matrix(coef(fit, s = lam0)[[ca]][-1, ]) != 0}
} else {
for(ca in 1:n_cats){coefs_bin[[ca]] <- as.matrix(coef(fit, s = lam0)[-1, ]) != 0}
}
n_neighbors <- colSums(Reduce("+", coefs_bin) != 0)
ic_lambda <- -2 * LL_model + n_neighbors * log(n) + 2 * gamma * n_neighbors * log(p)
betas <- matrix(coef(fit, s = lam0), ncol = 1)
return(list(betas = matrix(betas[-1, ], ncol = 1), EBIC = ic_lambda))
}
})
if(criterion != "CV"){
LL_model <- mods[[1]]$LL_model
n_neighbors <- mods[[1]]$n_neighbors
}
}
if(criterion != "CV"){
LL_sat <- 1/2 * fit$nulldev + LL_model
deviance <- (1 - fit$dev.ratio) * fit$nulldev
LL_lambda_models <- -1/2 * deviance + LL_sat
ic_lambda <- -2 * LL_lambda_models + n_neighbors * ifelse(
criterion == "AIC", 2, log(n)) + ifelse(
criterion == "EBIC", list(2 * gamma * n_neighbors * log(p)), list(0))[[1]]
allCoefs <- lapply(seq_len(n_lambdas), function(z) coef(fit)[, z])
betas <- allCoefs[[which.min(ic_lambda)]][-1]
coefs <- Matrix::Matrix(betas, sparse = TRUE)
rownames(coefs) <- names(betas)
fitobj <- list(fit = fit, fit0 = NA, crit = ic_lambda)
if(keepFit){fitobj$fit0 <- glmnet::glmnet(x, y, fam, lambda = fit$lambda[which.min(ic_lambda)])}
names(fitobj)[3] <- criterion
output <- list(mod0 = names(betas)[betas != 0], coefs = coefs,
fitobj = fitobj, allCoefs = allCoefs)
if(length(output$mod0) == 0){output$mod0 <- 1}
} else {
coefs <- Matrix::Matrix(do.call(cbind, lapply(mods, '[[', "betas")), sparse = TRUE)
rownames(coefs) <- colnames(x)
colnames(coefs) <- paste0("mod", c("0", "1se"))
fitobj <- list(fitCV = fit, fit0 = NA, fit1se = NA)
if(keepFit){
fitobj$fit0 <- glmnet::glmnet(x, y, fam, lambda = fit$lambda.min)
fitobj$fit1se <- glmnet::glmnet(x, y, fam, lambda = fit$lambda.1se)
}
attr(fitobj$fit0, "EBIC") <- mods[[1]]$EBIC
attr(fitobj$fit1se, "EBIC") <- mods[[2]]$EBIC
output <- list(mod0 = colnames(x)[coefs[, 1] != 0],
mod1se = colnames(x)[coefs[, 2] != 0],
coefs = coefs, fitobj = fitobj)
if(length(output$mod0) == 0){output$mod0 <- 1}
if(length(output$mod1se) == 0){output$mod1se <- 1}
}
attr(output, "family") <- fam
return(output)
}
out <- getIndices(fit, y, x, fam, criterion, gamma)
return(out)
}
##### fitHierLASSO: performs variable selection using the hierarchical LASSO
fitHierLASSO <- function(data, yvar, type = "g", m = NULL, criterion = "CV",
method = "glinternet", gamma = .5, nfolds = 10,
nlam = 50, lags = NULL, useSE = TRUE, diag = FALSE,
outMsgs = FALSE, dmnames = NULL, verbose = TRUE){
if(any(grepl(":", colnames(data)))){data <- data[,!grepl(":", colnames(data))]}
method <- match.arg(tolower(method), c("hiernet", "glinternet"))
criterion <- match.arg(criterion, c("CV", "EBIC", "BIC", "AIC"))
y <- as.numeric(data[, 1])
x <- data <- as.matrix(data[, -1])
if(method == "hiernet"){
out1 <- capture.output({fitPath <- hierNet::hierNet.path(x, y, nlam = nlam, strong = TRUE, diagonal = diag)})
out2 <- capture.output({fitCV <- hierNet::hierNet.cv(fitPath, x, y, nfolds = nfolds)})
out3 <- capture.output({fit0 <- hierNet::hierNet(x, y, lam = fitCV$lamhat, strong = TRUE, diagonal = diag)})
out4 <- capture.output({fit1se <- hierNet::hierNet(x, y, lam = fitCV$lamhat.1se, strong = TRUE, diagonal = diag)})
mod0 <- c(fit0$bp - fit0$bn, fit0$th[lower.tri(fit0$th)])
mod1se <- c(fit1se$bp - fit1se$bn, fit1se$th[lower.tri(fit1se$th)])
coefs <- Matrix::Matrix(cbind(mod0, mod1se), sparse = TRUE)
} else if(method == "glinternet"){
if(length(type) > 1){type <- type[yvar]}
fam <- ifelse(type %in% c("g", "gaussian"), "gaussian", "binomial")
type <- rep(1, ncol(x))
if(criterion == "CV"){
fitCV <- tryCatch({glinternet::glinternet.cv(x, y, type, nFolds = nfolds, nLambda = nlam,
interactionCandidates = m, family = fam)},
error = function(e){
failed <- TRUE
take <- 1
if(verbose){cat("\n")}
while(failed == TRUE){
if(take <= 5){
if(verbose){cat(" Failed.. trying again, take =", take, "\n")}
fitCV <- try(glinternet::glinternet.cv(x, y, type, nFolds = nfolds, nLambda = nlam,
interactionCandidates = m, family = fam), silent = TRUE)
if(class(fitCV) == "try-error"){
failed <- TRUE
take <- take + 1
} else {
failed <- FALSE
}
} else if(take <= 10){
if(verbose){cat(" Failed.. trying nlam = 20, take =", take, "\n")}
fitCV <- try(glinternet::glinternet.cv(x, y, type, nFolds = nfolds, nLambda = 20,
interactionCandidates = m, family = fam), silent = TRUE)
if(class(fitCV) == "try-error"){
failed <- TRUE
take <- take + 1
} else {
failed <- FALSE
}
} else {
if(verbose){cat(" Failed.. trying nlam = 20 & nFolds = 3, take =", take, "\n")}
fitCV <- try(glinternet::glinternet.cv(x, y, type, nFolds = 3, nLambda = 20,
interactionCandidates = m, family = fam), silent = TRUE)
if(class(fitCV) == "try-error"){
failed <- TRUE
take <- take + 1
if(take == 20){break}
} else {
failed <- FALSE
}
}
}
fitCV
})
if(useSE == TRUE){
lamlist <- fitCV$lambda
errm <- fitCV$cvErr
errse <- fitCV$cvErrStd <- fitCV$cvErrStd/sqrt(nfolds)
o <- which.min(errm)
lamhat <- lamlist[o]
oo <- errm <= errm[o] + errse[o]
fitCV$lambdaHat1Std <- lamlist[oo & lamlist >= lamhat][1]
}
which.lam0 <- which(fitCV$lambda == fitCV$lambdaHat)
while(is.null(fitCV$glinternetFit$activeSet[[which.lam0]])){
if(verbose){cat("\n Mod0 empty.. choosing new lambda\n")}
which.lam0 <- which.lam0 + 1
}
fitCV$lambdaHat <- fitCV$lambda[which.lam0]
which.lam1se <- which(fitCV$lambda == fitCV$lambdaHat1Std)
while(is.null(fitCV$glinternetFit$activeSet[[which.lam1se]])){
if(verbose){cat("\n Mod1SE empty.. choosing new lambda\n")}
which.lam1se <- which.lam1se + 1
}
fitCV$lambdaHat1Std <- fitCV$lambda[which.lam1se]
fit0 <- glinternet::glinternet(x, y, type, lambda = fitCV$lambdaHat,
interactionCandidates = m, family = fam)
fit1se <- glinternet::glinternet(x, y, type, lambda = fitCV$lambdaHat1Std,
interactionCandidates = m, family = fam)
attributes(fit1se)$useSE <- attributes(fitCV)$useSE <- useSE
mod0 <- coef(fit0)[[2]]
mod1se <- coef(fit1se)[[2]]
} else {
fit <- glinternet::glinternet(x, y, type, interactionCandidates = m,
family = fam, nLambda = nlam)
coefs <- coef(fit)[-1]
mains <- 1:ncol(x)
ints <- t(combn(mains, 2))
ints2 <- as.numeric(apply(ints, 1, paste, collapse = ""))
if(is.null(lags) & !is.null(m)){
vs <- colnames(x)
vs1 <- vs[m]
if(length(vs1) > 1){vs1 <- paste0("(", paste(vs1, collapse = " + "), ")")}
vs2 <- as.formula(paste0("~ . * ", vs1))
dmnames <- colnames(model.matrix(vs2, data.frame(x)))[-1]
}
allCoefs <- lapply(coefs, function(z){
zmain1 <- z$mainEffects$cont
zmain2 <- z$mainEffectsCoef$cont
if(length(zmain1) != 0){
if(any(!mains %in% zmain1)){
zmiss1 <- mains[!mains %in% zmain1]
zcoefs1 <- c(zmain2, rep(0, length(zmiss1)))[order(c(zmain1, zmiss1))]
} else {
zcoefs1 <- zmain2[order(zmain1)]
}
} else {
zcoefs1 <- rep(0, length(mains))
}
zint1 <- z$interactions$contcont
zint2 <- z$interactionsCoef$contcont
if(length(zint1) != 0){
zints1 <- as.numeric(apply(zint1, 1, paste, collapse = ""))
if(nrow(ints) != nrow(zint1)){
zcoefs2 <- rep(0, nrow(ints))
zcoefs2[which(ints2 %in% zints1)] <- zint2
} else {
zcoefs2 <- zint2[match(zints1, ints2)]
}
} else {
zcoefs2 <- rep(0, nrow(ints))
}
betas <- unlist(c(zcoefs1, zcoefs2))
names(betas) <- c(colnames(x), apply(combn(colnames(x), 2), 2, paste, collapse = ":"))
if(!is.null(m)){
betas <- betas[which(names(betas) %in% dmnames)]
#if(is.null(lags)){
# x2 <- c(colnames(x), paste0(colnames(x)[-m], ":", colnames(x)[m]))
# betas <- betas[which(names(betas) %in% x2)]
#} else {
# betas <- betas[which(names(betas) %in% dmnames)]
#}
}
return(betas)
})
n_neighbors <- sapply(allCoefs, function(z) sum(z != 0))
LL_models <- sapply(1:length(allCoefs), function(z){
s2 <- sum((y - fit$fitted[, z + 1])^2)/length(y)
sum(dnorm(y, mean = fit$fitted[, z + 1], sd = sqrt(s2), log = TRUE))
})
p <- length(mains) + nrow(ints)
if(!is.null(m)){
if(all(m == 0)){
p <- length(mains)
} else {
p <- length(c(colnames(x), paste0(colnames(x)[-m], ":", colnames(x)[m])))
}
}
ic_lambda <- -2 * LL_models + n_neighbors * ifelse(
criterion == "AIC", 2, log(nrow(x))) + ifelse(
criterion == "EBIC", list(2 * gamma * n_neighbors * log(p)), list(0))[[1]]
betas <- allCoefs[[which.min(ic_lambda)]]
lambda_min <- fit$lambda[which.min(ic_lambda) + 1]
coefs <- Matrix::Matrix(betas, sparse = TRUE)
rownames(coefs) <- names(betas)
fitobj <- list(fit = fit, fit0 = NA, crit = ic_lambda)
if(ifelse(!is.null(m), ifelse(all(m == 0), FALSE, TRUE), TRUE) & method == "hiernet"){
fitobj$fit0 <- glinternet::glinternet(x, y, type, interactionCandidates = m,
lambda = lambda_min, family = fam)
}
names(fitobj)[3] <- criterion
output <- list(mod0 = names(betas)[betas != 0], coefs = coefs,
fitobj = fitobj, allCoefs = allCoefs)
attr(output, "family") <- ifelse(fam == "gaussian", "g", "c")
return(output)
}
mains <- 1:ncol(x)
ints <- t(combn(mains, 2))
ints2 <- as.numeric(apply(ints, 1, paste, collapse = ""))
if(length(mod0$mainEffects$cont) != 0){
if(any(!mains %in% mod0$mainEffects$cont)){
mod0miss1 <- mains[!mains %in% mod0$mainEffects$cont]
mod0coefs1 <- c(mod0$mainEffectsCoef$cont,
rep(0, length(mod0miss1)))[order(c(mod0$mainEffects$cont, mod0miss1))]
} else {
mod0coefs1 <- mod0$mainEffectsCoef$cont[order(mod0$mainEffects$cont)]
}
} else {
mod0coefs1 <- rep(0, length(mains))
}
if(length(mod1se$mainEffects$cont) != 0){
if(any(!mains %in% mod1se$mainEffects$cont)){
mod1semiss1 <- mains[!mains %in% mod1se$mainEffects$cont]
mod1secoefs1 <- c(mod1se$mainEffectsCoef$cont,
rep(0, length(mod1semiss1)))[order(c(mod1se$mainEffects$cont, mod1semiss1))]
} else {
mod1secoefs1 <- mod1se$mainEffectsCoef$cont[order(mod1se$mainEffects$cont)]
}
} else {
mod1secoefs1 <- rep(0, length(mains))
}
if(length(mod0$interactions$contcont) != 0){
mod0ints1 <- as.numeric(apply(mod0$interactions$contcont, 1, paste, collapse = ""))
if(nrow(ints) != nrow(mod0$interactions$contcont)){
mod0coefs2 <- rep(0, nrow(ints))
mod0coefs2[which(ints2 %in% mod0ints1)] <- mod0$interactionsCoef$contcont
} else {
mod0coefs2 <- mod0$interactionsCoef$contcont[match(mod0ints1, ints2)]
}
} else {
mod0coefs2 <- rep(0, nrow(ints))
}
if(length(mod1se$interactions$contcont) != 0){
mod1seints1 <- as.numeric(apply(mod1se$interactions$contcont, 1, paste, collapse = ""))
if(nrow(ints) != nrow(mod1se$interactions$contcont)){
mod1secoefs2 <- rep(0, nrow(ints))
mod1secoefs2[which(ints2 %in% mod1seints1)] <- mod1se$interactionsCoef$contcont
} else {
mod1secoefs2 <- mod1se$interactionsCoef$contcont[match(mod1seints1, ints2)]
}
} else {
mod1secoefs2 <- rep(0, nrow(ints))
}
mod0 <- unlist(c(mod0coefs1, mod0coefs2))
mod1se <- unlist(c(mod1secoefs1, mod1secoefs2))
coefs <- Matrix::Matrix(cbind(mod0, mod1se), sparse = TRUE)
}
allNames <- c(colnames(data), apply(combn(colnames(data), 2), 2, paste, collapse = ":"))
rownames(coefs) <- allNames
if(outMsgs == TRUE & method == "hiernet"){
output <- list(mod0 = allNames[mod0 != 0], mod1se = allNames[mod1se != 0], coefs = coefs,
fitobj = list(fitCV = fitCV, fit0 = fit0, fit1se = fit1se),
outMsgs = list(outPath = out1, outCV = out2, out0 = out3, out1se = out4))
} else {
output <- list(mod0 = allNames[mod0 != 0], mod1se = allNames[mod1se != 0], coefs = coefs,
fitobj = list(fitCV = fitCV, fit0 = fit0, fit1se = fit1se))
}
attr(output, "family") <- ifelse(method == "glinternet", ifelse(fam == "gaussian", "g", "c"), "g")
return(output)
}
|
9729268cbe0af06d0ee5e626ffb7d47ab30cd20e | 2ff52d00245ba3fa0668380ec6519052458ff3a0 | /scripts/COMP.modeling2.R | 9650bce62810f263670720f744873d207a67ac3e | [] | no_license | dnemens/black-oak | 1f5fdf20a0c9959ed3f941daf94ce0e5ba4dbbee | 723e6416c375e8560a048622dbb3e1a633e03eb0 | refs/heads/master | 2021-05-09T20:23:26.204832 | 2019-12-19T01:21:40 | 2019-12-19T01:21:40 | 118,685,094 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 962 | r | COMP.modeling2.R | library(tidyverse)
wide <- read.csv(file = "C:/Users/debne/Dropbox/CBO/black-oak/data sheets/mean.import.over.csv")
rdnbr <- read.csv(file = "C:/Users/debne/Dropbox/CBO/black-oak/data sheets/rdnbr.csv")
#combines dataframes for rdnbr values
import.wide <- data.frame(wide, rdnbr)
#selects only post-chips importance values
import.wide <- data.frame(import.wide[14:22])
#cleans up data frame
import.wide <- import.wide %>%
separate(plot, c("Storrie", "Chips", "Plot"), remove = T) %>%
select(-Plot) %>%
mutate(ABCO= "ABCO.2", CADE = "CADE.2", PILA = "PILA.2", PIPO="PIPO.2", PSME="PSME.2", QUKE="QUKE.2")
abco <- import.wide$ABCO
cade <- import.wide$CADE
pila <- import.wide$PILA
pipo <- import.wide$PIPO
psme <- import.wide$PSME
quke <- import.wide$QUKE
StoR <- import.wide$storrie_rdnbr
ChiR <- import.wide$chips_rdnbr
Stocat <- import.wide$Storrie
Chipcat <- import.wide$Chips
mod.A <- glm(abco~Stocat*Chipcat)
mod.A.R <- glm(abco~StoR*ChiR)
|
85fb36765c6fc97086c7c62940595459e021f90c | e55a80a875a694cf18027a92ea8714d5070335db | /TCGA_COAD_4_TOP1&CES2_compare.R | 39d9ff66d42880c142b2859cc77cb74fe96e8cef | [] | no_license | leeym950/CMS4_Irinotecan | 79f48d975f19ad0fb4f510f6cbb38748a41c7f01 | 85825493687aa8af7e28db740361694ee0ec51c1 | refs/heads/master | 2020-05-23T12:14:44.191788 | 2019-05-29T14:26:54 | 2019-05-29T14:26:54 | 186,753,602 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 963 | r | TCGA_COAD_4_TOP1&CES2_compare.R | ##
## CMS4 vs others : TOP1, CES2
##
##
##
library(ggplot2)
library(ggpubr)
subset <- cbind(t(expression.data), ntp.result)
CMS4 <- subset[ ,"prediction"] == "CMS4"
subset <- cbind(subset, CMS4)
ggboxplot(data=subset, "prediction", "TOP1")
ggboxplot(data=subset, "prediction", "CES2")
p <- ggboxplot(data=subset, "CMS4", "TOP1")
p+ stat_compare_means() + stat_mean()
p <- ggboxplot(data=subset, "CMS4", "CES2")
p+ stat_compare_means()
# Set FDR threshold:
FDR.filter <- 0.2
FDR.filtered.subset <- subset[subset[ ,"FDR"]<=0.2, ]
CMS4.filtered <- FDR.filtered.subset[ ,"prediction"] == "CMS4"
FDR.filtered.subset <- cbind(FDR.filtered.subset, CMS4.filtered)
ggboxplot(data=FDR.filtered.subset, "prediction", "TOP1")
ggboxplot(data=FDR.filtered.subset, "prediction", "CES2")
p <- ggboxplot(data=FDR.filtered.subset, "CMS4.filtered", "TOP1")
p+ stat_compare_means()
p <- ggboxplot(data=FDR.filtered.subset, "CMS4.filtered", "CES2")
p+ stat_compare_means()
|
495f3ca2107869c9978648b77321ac5c4d8f6e16 | b111b2cab6d52c3a5480820af36a8e363305f3cb | /demo/SNA - Correios.R | 96069c410bad04a9cb6578736617ca2bc29aac70 | [] | no_license | rommelnc/hudar | 240cb22226802de0241d2d95095d09bd3b60bba0 | 257b6360f2e8d5d7d642c8a9a5cb8b73f48ba4f2 | refs/heads/master | 2020-05-09T14:58:06.410576 | 2015-08-19T02:58:21 | 2015-08-19T02:58:21 | 40,375,394 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,366 | r | SNA - Correios.R | ## Correios
source('funcoesTwitter.R', encoding='UTF-8')
arquivos = list.files('../data', pattern='politics.*2014-1(0-3|1-)[[:digit:]]*.json', full.names=TRUE)
arquivos = list.files('../data', pattern='politics.*2014-11-08-0[[:digit:]]*.json', full.names=TRUE)
arquivos = list.files('../data', pattern='politics.*2014-11-09-0[[:digit:]]*.json', full.names=TRUE)
arquivos = list.files('../data', pattern='politics.*2014-((11-(1|2|3))|12-)[[:digit:]]*.json', full.names=TRUE)
arquivos = arquivos[c(17,18)]
arquivos
salvarParseTweets(arquivos, TRUE)
source('funcoesTwitter.R', encoding='UTF-8')
source('funcoesSNA.R', encoding='UTF-8')
arquivos = list.files('../data', pattern='politics.*2014-1(1|2)-[[:digit:]]*.Rda', full.names=TRUE)
arquivos
## Analisar apenas os últimos X dias
arquivos = arquivos[(length(arquivos)-13):length(arquivos)]
# Tweets sobre os correios
palavras = c('correios','ECT')
tweets = recuperarTweetsEmArquivos(arquivos, palavras)
arquivo = '../datapolitics-tweets-2014-11-01-to-2014-12-12-correios.Rda'
save(tweets, file=arquivo)
analiseTweetsPorTema(arquivo, palavras)
load('../datapolitics-tweets-2014-11-01-to-2014-12-12-correios-resultado.Rda')
load(arquivo)
analiseGeralDosTemas(resultado, palavras, tweets, arquivo)
analiseDosTemas(resultado, arquivo)
# Colocar o servidor para rodar
require(servr)
setwd('../html/')
servr::httd()
|
8f510b677b7dff0e6cba1ecd43594a8cc2cca59e | 86a93c2e665fc296cd7d2c6e9242e80cf50259e3 | /R/plotwindserie.R | 4c2dead50f6374f40ba346b31088b73a2033ec3f | [] | no_license | mbonoli/WindResource | eb40ce3418b1e8768d1f2acfa4c3c98bba5f600d | 21a8c1648d17378e2f2753b48d584d10bfec9cb5 | refs/heads/master | 2020-05-20T13:15:44.225895 | 2018-09-19T04:24:58 | 2018-09-19T04:24:58 | 13,712,635 | 0 | 0 | null | 2014-03-20T04:42:13 | 2013-10-20T02:52:06 | R | UTF-8 | R | false | false | 4,446 | r | plotwindserie.R | #' @title Conversion of dataframe in class \code{windata}
#'
#' @description
#' Shape a dataframe in a class 'windata'
#'
#' @details
#' The object windata is a list with the parameters that were mencionated before.
#'
#' @param wdata a dataframe to be converted
#' @param var interval of time betwen two registers. Actually, it's only acepted intervals of 1 minut.
#' @param ane the name of the variable that contains the dates of measurements
#' @param year the admit formats are:
#' - 'YYYYMMDD','YYYY-MM-DD','YYYY.MM.DD' or 'DD/MM/YYYY'
#' @param month the name of the variable that contains the times of measurements
#' @param axis the admit formats are:
#' - 'HHMM','HHMMSS','HH:MM','HH:MM:SS','HH.MM' or 'HH.MM.SS'
#' @param shiny the names to indicate the differents anemometers in the list.
#' @return Object of class 'windata' (see details).
#'
#' @author Valeria Gogni, Mariano Bonoli, Ruben Bufanio, Diego Edwards
#'
#' @importFrom googleVis gvisAnnotatedTimeLine
#'
#' @export
#' @examples
#' # simple example using the windspeed data set
#' data(data)
#'
#' # let's examine windspeed to see the variables' names
#' head(data)
#'
plotwindserie <- function(wdata, year, month, ane,
var = c("Ave", "Min", "Max", "Temp", "Pres", "Dir"),
axis = c("Ave", "Min", "Max", "Temp", "Pres", "Dir"),
shiny = F) {
if (sum(wdata$time$year == year & wdata$time$month == month) == 0)
stop("No existe el anio y mes seleccionado")
data <- data.frame(dt = as.POSIXct(NA), val = NA, type = NA)
colorlines <- "["
# 'blue', 'lightblue', 'lightblue', 'red']'
if ("ave" %in% var) {
data <- rbind(data, data.frame(dt = wdata$time$dt,
val = wdata[["ane"]][[ane]]$ave,
type = "ave")[wdata$time$year == year & wdata$time$month == month, ])
colorlines <- paste(colorlines, ifelse(nchar(colorlines) == 1, "", ","),
" 'blue'", sep = "")
}
if ("min" %in% var) {
data <- rbind(data, data.frame(dt = wdata$time$dt, val = wdata[["ane"]][[ane]]$min,
type = "min")[wdata$time$year == year & wdata$time$month == month, ])
colorlines <- paste(colorlines, ifelse(nchar(colorlines) == 1, "", ","),
" 'lightgray'", sep = "")
}
if ("max" %in% var) {
data <- rbind(data, data.frame(dt = wdata$time$dt, val = wdata[["ane"]][[ane]]$max,
type = "max")[wdata$time$year == year & wdata$time$month == month, ])
colorlines <- paste(colorlines, ifelse(nchar(colorlines) == 1, "", ","),
" 'lightgray'", sep = "")
}
if ("temp" %in% var) {
data <- rbind(data, data.frame(dt = wdata$time$dt, val = wdata$par$temp$value,
type = "temp")[wdata$time$year == year & wdata$time$month == month, ])
colorlines <- paste(colorlines, ifelse(nchar(colorlines) == 1, "", ","),
" 'green'", sep = "")
}
if ("pres" %in% var) {
data <- rbind(data, data.frame(dt = wdata$time$dt, val = wdata$par$pres$value,
type = "pres")[wdata$time$year == year & wdata$time$month == month, ])
colorlines <- paste(colorlines, ifelse(nchar(colorlines) == 1, "", ","),
" 'lightgreen'", sep = "")
}
colorlines <- paste(colorlines, "]")
# Borro registros sin datos
data <- data[!is.na(data$dt), ]
# esto es para que las escalas de las 3 velocidades sea la misma. Se modifica
# el primer registro de la serie
max <- max(data[, "val"], na.rm = TRUE)
data[data$type == "min", ][1, 2] <- max
data[data$type == "ave", ][1, 2] <- max
if (length(axis) == 1) {
scalecol <- paste("[", which(var == axis[1]) - 1, "]", sep = "")
} else {
scalecol <- paste("[", which(var == axis[1]) - 1, ",", which(var == axis[2]) -
1, "]", sep = "")
}
if (shiny == T) {
gvisAnnotatedTimeLine(data, datevar = "dt", numvar = "val", idvar = "type",
options=list(width="100%"))
} else {
dataplot <- gvisAnnotatedTimeLine(data, datevar = "dt", numvar = "val",
idvar = "type",
options=list(width="100",higth="100"))
plot(dataplot)
}
}
|
e724e3569f8afead788c564d7a20d2f4f39c8e3d | 16d00df419d17a6e222e53342344fe89e67b096d | /code/singleDecisions.R | 7115eb5b15e5d2862f17cc356068ba271a0edfe0 | [] | no_license | mattia-cai/SICOMA_2020 | f35b3d9a2677bc583db7b7646489dda8e374842b | de31feb7da697ab6c40e84345ae4a224542bd927 | refs/heads/master | 2023-05-04T19:55:06.828982 | 2021-05-26T13:54:45 | 2021-05-26T13:54:45 | 292,814,843 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,216 | r | singleDecisions.R | rm( list = ls( ) )
# Compute the price impacts using the leontief price model
# In this case I am only interested in two cases: trucks in 2016 and finance in 2013
# author: mattia cai
##############################################
### Warning: XLConnect conflicts with xlsx ###
##############################################
##################
### Input data ###
##################
# Dependencies
require( "data.table" )
library( "stats")
#library( "XLConnect")
source( "code/funlib/leontiefPriceModel.R" )
source( "code/funlib/write_list.R" )
# dataset -- (single Nace 2 assignment)
load( file = paste0( "outputData/compCases.RData"), verbose = T )
str( dta.cases )
# EU28 supply and use system (2014)
load( "intmData/IO_EU28_2014.RData" )
#################################
### The two cases of interest ###
#################################
# Finance case
dta.cases[ year == 2013 & nace2_2d_a64 == "K64" ]
dta.cases[ year == 2013 & case_id == "AT.39914", tag := 1 ]
# Trucks
dta.cases[ year == 2016 & nace2_2d_a64 == "C29" ]
dta.cases[ year == 2016 & case_id == "AT.39824", tag := 1 ]
# Only those two cases
dta.cases[ tag == 1 ]
# One case at a time
singleCaseSpillovers <- function( A, dt, w.var = "mkt_t_a64_log" ) {
w <- rep( 0, nrow( A ) )
names( w ) <- rownames( A )
dp <- w
w[ dt[, nace2_2d_a64 ] ] <- dt[ , get( w.var ) / go2_a64 ]
dp[ dt[, nace2_2d_a64 ] ] <- dt[ , delta_p ]
spill <- IOpriceSpilloverMatrix( A = A, w = w, rho = 1 - dp )
spill <- rowSums( spill )
x <- cbind( w = w, within = dp * w, spillover = spill )
x <- x * 100
x <- as.data.table( cbind( nace2 = rownames( x ), as.data.frame( x ) ) )
x[ , nace2 := as.character( nace2 )]
return( x )
}
# Apply listwise to the cases
dta.list <- split( dta.cases[ tag == 1 ], f = dta.cases[ tag == 1, case_id ] )
dta.list <- lapply( dta.list, function( dt ) singleCaseSpillovers( A = Ad, dt = dt ) )
# Get gross output for aggregation
dta.list <- lapply( dta.list, function( dt ) dt[ , go2 := colSums( Vt )[ dt[ , nace2 ] ] ] )
# Average
dta.list <- sapply( dta.list, function( dt ) dt[ , .( within = weighted.mean( x = within, w = go2 ), spillover = weighted.mean( x = spillover, w = go2 ) )] )
dta.list |
349e704862d77112c584981597ba965e073ad362 | 4aaad46c2e1f999d14b08edffa1105e541277804 | /Plot1.R | ee8d39361a64502f9603fd47d28913070b371a66 | [] | no_license | Pietersgithub/ExData_Plotting1 | 1241e1d3cf3feb1efcea7461f3e19d766cbe3ce4 | 2dada1c8da09d89594e60a6a3f35dca4825d9a01 | refs/heads/master | 2020-12-11T05:46:32.888467 | 2015-06-07T16:50:47 | 2015-06-07T16:50:47 | 36,806,653 | 0 | 0 | null | 2015-06-03T13:49:03 | 2015-06-03T13:49:02 | null | UTF-8 | R | false | false | 686 | r | Plot1.R | ##Plot1
## Load data frame into variable
full_data_set <- read.table("household_power_consumption.txt", header = TRUE, sep = ';', na.strings = "?", dec=".", colClasses=c("character", "character", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric", "numeric"))
## Make a subset
data_2_days <- full_data_set[(full_data_set$Date == "1/2/2007" | full_data_set$Date == "2/2/2007"), ]
## Remove total data set from memory
rm(full_data_set)
## Make histogram
hist(data_2_days$Global_active_power, col="red", xlab = "Global Active Power (kilowatts)", main = "Global Active Power")
## Make png from histogram
dev.copy(png, file = "plot1.png", height = 480, width = 480)
dev.off()
|
b525bea41e66d266b42be9d16b238a7e7815fbc5 | 18a6a2dd2e9054e102dc97d3a9aacad4350aa647 | /cachematrix.R | ff7ac0f004b696d92b5b3aa7166dfba173b14f09 | [] | no_license | ChandralekhaGhosh/ProgrammingAssignment2 | c53df512880b08cc73656ec6d4b3cd6260b91356 | 08c3f8341a6028fe7d21f94c82ec4c60b723769d | refs/heads/master | 2022-12-24T12:53:53.917029 | 2020-10-06T15:17:37 | 2020-10-06T15:17:37 | 301,461,475 | 0 | 0 | null | 2020-10-05T15:51:14 | 2020-10-05T15:51:13 | null | UTF-8 | R | false | false | 1,703 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
#In this assignment we're writing a function to cache the inverse of a square matrix.
#Caching is an efficient way to avoid re-eval long intensive computations.
## Write a short comment describing this function
#makeCacheMatrix:creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv<-NULL ##Initializing values
set<-function(y){ ## set the value of the vector
x<<-y
inv<<-NULL
}
get<-function(){x} ## get the value of the vector
setInverse<-function(inverse){inv<<-inverse} ## set the value of the inverse
getInverse<-function(){inv} ## get the value of the inverse
list(set=set, get=get,
setInverse=setInverse,
getInverse=getInverse)
}
## Write a short comment describing this function
#cacheSolve calculates inverse of the special "matrix" returned by makeCacheMatrix
cacheSolve <- function(x, ...) {
inv<-x$getInverse() ## Return a matrix that is the inverse of 'x'
#Checking if the inverse already been calculated, if so can get the inverse
# from Cache, can skip the computization. A message will be popped up
if(!is.null(inv)){
message("getting from cache")
return(inv)
}
mat<-x$get()
inv<-solve(mat,...) ##solve:standard R fun to compute inverse of the matrix
x$setInverse(inv) #set the value of the inverse in the Cache using setInverse
inv
}
################CHECKING#######################################################
testmat1<-makeCacheMatrix(matrix(1:4,nrow=2,ncol=2))
testmat1$get()
testmat1$getInverse()
cacheSolve(testmat1)
|
29560f6d59b61af69e40b709e1dd22a4d018efe7 | 6151a16653d8462debb34d414f83ef8cd57d5fbc | /fhitings_Anu.R | 9449ead5aaac09fe6c759829e75e98885acd5f5e | [] | no_license | anusurendra/ITS | fed61b14aac00218405959af9995f000c1e4c3d4 | c445830a3f00864a3b726d6df9053c82a4eb393b | refs/heads/master | 2016-09-06T13:08:29.712252 | 2015-06-29T19:00:44 | 2015-06-29T19:00:44 | 37,540,588 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,154 | r | fhitings_Anu.R | #!/usr/bin/Rscript
#check to see if pacages are installed and install if needed
ch1<-require(reshape)
ch2<-require(plyr)
ch3<-require(gtools)
if(ch1){
library(reshape)
}else{
install.packages("reshape")
}
if(ch2){
library(plyr)
}else{
install.packages("plyr")
}
if(ch3){
library(gtools)
}else{
install.packages("gtools")
}
#set the significant digits
options(scipen=2000)
#read in arguments
args <- commandArgs(trailingOnly = TRUE)
#Intialize functions
#Parse arguments (we expect the form --arg=value)
parseArgs <- function(x) strsplit(sub("^--", "", x), "=")
printHelpStatement<- function(error.string){
statement <- "USAGE\nfhitings [-h] [-filedir filepath] [-taxafile filename] [-evalue_cutoff float_value] [-perc_identity_cutoff float_value] [-blastfiles filenames]\n
DESCRIPTION\n-filedir <String>\npath to Blast results directory\nDefault = '.'\n
-taxafile <String>\nPath to the taxanomy assignment CSV file\nDefault = './Hogan_db_mod.csv'\n
-evalue_cutoff <Float>\nEvalue cutoff for blast results\nDefault = '0.001'\n
-perc_identity_cutoff <Float>\npercentage cutoff for taxanomic assignment resolution\nDefault = '0.08'\n
-ucfiles <String>\nlist of mapped reads to OTUs files to process separated by ',' \nDefault = ''\n
-blastfiles <String>\nlist of blast output files to process separated by ',' \nDefault = ''\n"
statement <- ifelse(is.null(error.string),statement,paste(error.string,statement,sep="\n"))
return(statement)
}
#Get variables
argsDF <- as.data.frame(do.call("rbind", parseArgs(args)))
argsL <- as.list(as.character(argsDF$V2))
if(length(which(argsDF[,1]=="h"))==1){
writeLines(printHelpStatement(NULL))
quit(save = "no", status = 1, runLast = FALSE)
}
#Initialize variables
FILEDIR <- ifelse(length(which(argsDF[,1]=="filedir"))==1,as.vector(argsDF[which(argsDF[,1]=="filedir"),2]),getwd())
TAXAFILE <- ifelse(length(which(argsDF[,1]=="taxafile"))==1,as.vector(argsDF[which(argsDF[,1]=="taxafile"),2]),paste(getwd(),"Hogan_db_mod.csv",sep="/"))
EVALUE_CUTOFF <- ifelse(length(which(argsDF[,1]=="evalue_cutoff"))==1,as.double(as.vector(argsDF[which(argsDF[,1]=="evalue_cutoff"),2])),0.001)
HIT_ABUNDANCE_CUTOFF <- ifelse(length(which(argsDF[,1]=="hit_abundance_cutoff"))==1,as.double(as.vector(argsDF[which(argsDF[,1]=="hit_abundance_cutoff"),2])),0.8)
if(grepl(",",argsDF[which(argsDF[,1]=="blastfiles"),2])==TRUE){
BLAST_FILES <- ifelse(length(which(argsDF[,1]=="blastfiles"))==1,strsplit(as.vector(argsDF[which(argsDF[,1]=="blastfiles"),2]),","),NULL)
}else{
BLAST_FILES <- ifelse(length(which(argsDF[,1]=="blastfiles"))==1,as.vector(argsDF[which(argsDF[,1]=="blastfiles"),2]),NULL)
}
print(BLAST_FILES)
if(grepl(",",argsDF[which(argsDF[,1]=="ucfiles"),2])==TRUE){
UC_FILES <- ifelse(length(which(argsDF[,1]=="ucfiles"))==1,strsplit(as.vector(argsDF[which(argsDF[,1]=="ucfiles"),2]),","),NULL)
}else{
UC_FILES <- ifelse(length(which(argsDF[,1]=="ucfiles"))==1,as.vector(argsDF[which(argsDF[,1]=="ucfiles"),2]),NULL)
}
print(UC_FILES)
taxanomic.csv.m<-as.data.frame(read.delim(file=TAXAFILE,
header=T,as.is=T,row.names=NULL,fill=F,sep=",",
quote="",comment.char="",blank.lines.skip=F,strip.white=T),stringsAsFactors=F)
bf.vec <- unlist(BLAST_FILES);
ucf.vec <- unlist(UC_FILES);
#print(BLAST_FILES)
for(i in c(1:length(bf.vec))){
bf <- bf.vec[i]
ucf <- ucf.vec[i]
print(file.exists(paste(FILEDIR, bf, sep="/")))
print(paste("Processing ",bf,sep=""))
if(!file.exists(paste(FILEDIR, bf, sep="/")) || !file.exists(paste(FILEDIR, ucf, sep="/"))){
writeLines(printHelpStatement("Ensure that the blast/uc file exists."))
quit(save = "no", status = 1, runLast = FALSE)
}
print(paste("Reading Blast Output",sep=""))
blast.results.m<-as.data.frame(read.delim(file=paste(FILEDIR, bf, sep="/"),
header=F,as.is=T,row.names=NULL,fill=F,sep="\t",
quote="",comment.char="",blank.lines.skip=F),stringsAsFactors=F)
species.count.m <- NULL
genus.count.m <- NULL
family.count.m <- NULL
order.count.m <- NULL
subclass.count.m <- NULL
class.count.m <- NULL
subphylum.count.m <- NULL
phylum.count.m <- NULL
kingdom.count.m <- NULL
taxanomic.assignment.m <- NULL
colnames(blast.results.m) <- c("qseqid","sseqid","pident","length","mismatch","gapopen","qstart","qend","sstart","send","evalue","bitscore")
print(paste("Applying evalue filter",sep=""))
pass.evalue.filter <- which(blast.results.m[,"evalue"]<=EVALUE_CUTOFF)
blast.results.m.evalue.filter <- blast.results.m[pass.evalue.filter,]
blast.results.m.evalue.filter.list <- split(blast.results.m.evalue.filter, blast.results.m.evalue.filter[,1])
print(paste("Assigning taxonomy",sep=""))
for (otu in mixedsort(names(blast.results.m.evalue.filter.list)))
{
print(otu)
blast.results.m.tmp<-NULL
accession.genus.species.m.tmp<-NULL
sseqid.count<-NULL
blast.results.m.tmp <- blast.results.m.evalue.filter.list[[otu]]
#accession.genus.species.m.tmp <- matrix(unlist(strsplit(as.vector(blast.results.m.tmp[,2]),"_")),ncol=3,byrow=TRUE)
accession.genus.species.m.tmp <- as.data.frame(apply(as.matrix(blast.results.m.tmp),1, function(x) unlist(strsplit(x[2],"_"))[1]))
accession.genus.species.m.tmp <- cbind(accession.genus.species.m.tmp,as.data.frame(apply(as.matrix(blast.results.m.tmp),1, function(x) unlist(strsplit(x[2],"_"))[2])))
accession.genus.species.m.tmp <- cbind(accession.genus.species.m.tmp,as.data.frame(apply(as.matrix(blast.results.m.tmp),1, function(x) unlist(strsplit(x[2],"_"))[3])))
accession.genus.species.m.tmp <- cbind(accession.genus.species.m.tmp,paste(accession.genus.species.m.tmp[,2],accession.genus.species.m.tmp[,3],sep="_"))
colnames(accession.genus.species.m.tmp) <- c("accession","genus","species","genusspecies")
sseqid.count <- ddply(accession.genus.species.m.tmp,.(genusspecies),summarize,cfreq=length(genusspecies))
print(head(sseqid.count))
sseqid.count <- sseqid.count[order(sseqid.count$cfreq, decreasing = T),]
sseqid.count[,2] <- sseqid.count[,2]/dim(blast.results.m.tmp)[1]
if((sseqid.count[1,2]==1 && sseqid.count[1,1]=="Unknown_Unknown") || (sseqid.count[1,2]>=HIT_ABUNDANCE_CUTOFF && sseqid.count[1,1]=="Unknown_Unknown")){
if(is.null(taxanomic.assignment.m)){
tmp.m<-NULL
tmp.m<-c("Unknown","Unknown","Unknown","Unknown","Unknown","Unknown","Unknown","Unknown")
dim(tmp.m)<-c(1,8)
taxanomic.assignment.m <- cbind(otu,"Unknown",tmp.m)
colnames(taxanomic.assignment.m)<-c("otu","Species","Genus","Family","Order","Subclass","Class","Subphylum","Phylum", "Kingdom")
}else{
tmp.m<-NULL
tmp.m<-c("Unknown","Unknown","Unknown","Unknown","Unknown","Unknown","Unknown","Unknown")
dim(tmp.m)<-c(1,8)
tmp.m <- cbind(otu,"Unknown",tmp.m)
colnames(tmp.m)<-c("otu","Species","Genus","Family","Order","Subclass","Class","Subphylum","Phylum", "Kingdom")
taxanomic.assignment.m <- rbind(taxanomic.assignment.m,tmp.m)
}
}else if(sseqid.count[1,2]==1){
if(is.null(taxanomic.assignment.m)){
taxanomic.assignment.m <- cbind(otu,gsub(".*_","",sseqid.count[1,1]),taxanomic.csv.m[which(taxanomic.csv.m[,1]==gsub("_.*","",sseqid.count[1,1])),])
colnames(taxanomic.assignment.m)<-c("otu","Species","Genus","Family","Order","Subclass","Class","Subphylum","Phylum", "Kingdom")
}else{
tmp.m<-NULL
tmp.m <- cbind(otu,gsub(".*_","",sseqid.count[1,1]),taxanomic.csv.m[which(taxanomic.csv.m[,1]==gsub("_.*","",sseqid.count[1,1])),])
colnames(tmp.m)<-c("otu","Species","Genus","Family","Order","Subclass","Class","Subphylum","Phylum", "Kingdom")
taxanomic.assignment.m <- rbind(taxanomic.assignment.m,tmp.m)
}
}else if(sseqid.count[1,2]>=HIT_ABUNDANCE_CUTOFF){
if(is.null(taxanomic.assignment.m)){
taxanomic.assignment.m <- cbind(otu,gsub(".*_","",sseqid.count[1,1]),taxanomic.csv.m[which(taxanomic.csv.m[,1]==gsub("_.*","",sseqid.count[1,1])),])
colnames(taxanomic.assignment.m)<-c("otu","Species","Genus","Family","Order","Subclass","Class","Subphylum","Phylum", "Kingdom")
}else{
tmp.m<-NULL
tmp.m <- cbind(otu,gsub(".*_","",sseqid.count[1,1]),taxanomic.csv.m[which(taxanomic.csv.m[,1]==gsub("_.*","",sseqid.count[1,1])),])
colnames(tmp.m)<-c("otu","Species","Genus","Family","Order","Subclass","Class","Subphylum","Phylum", "Kingdom")
taxanomic.assignment.m <- rbind(taxanomic.assignment.m,tmp.m)
}
}else{
all.taxa.lineage<-NULL
all.taxa.lineage <- taxanomic.csv.m[which(taxanomic.csv.m[,1] %in% unique(gsub("_.*","",sseqid.count[,1]))),]
if(is.null(taxanomic.assignment.m)){
tmp.m<-NULL
tmp.m<-apply(all.taxa.lineage,2, function(x){ifelse(length(as.vector(unique(x)))==1,unique(x),"Ambiguous")})
dim(tmp.m)<-c(1,8)
taxanomic.assignment.m <- cbind(otu,"Ambiguous",tmp.m)
colnames(taxanomic.assignment.m)<-c("otu","Species","Genus","Family","Order","Subclass","Class","Subphylum","Phylum", "Kingdom")
}else{
tmp.m<-NULL
tmp.m<-apply(all.taxa.lineage,2, function(x){ifelse(length(as.vector(unique(x)))==1,unique(x),"Ambiguous")})
dim(tmp.m)<-c(1,8)
tmp2.m<-NULL
tmp2.m<-cbind(otu,"Ambiguous",tmp.m)
colnames(tmp2.m)<-c("otu","Species","Genus","Family","Order","Subclass","Class","Subphylum","Phylum", "Kingdom")
taxanomic.assignment.m <- rbind(taxanomic.assignment.m,tmp2.m)
}
}
}
taxanomic.assignment.m <- apply(taxanomic.assignment.m,2, function(x){gsub("Incertae sedis","Unknown",x)})
taxanomic.assignment.m <- as.data.frame(as.matrix(taxanomic.assignment.m))
print(paste("Creating OTU table",sep=""))
ucf.results.m<-as.data.frame(read.delim(file=paste(FILEDIR, ucf, sep="/"),
header=F,as.is=T,row.names=NULL,fill=F,sep="\t",
quote="",comment.char="",blank.lines.skip=F,stringsAsFactors=F),stringsAsFactors=F)
colnames(ucf.results.m) <- c("record_type","cluster_number","sequence_length","percent_identity","strand","blank_1","blank_2","compressed_alignment","query_id","subject_id")
ucf.results.subset.m <- ucf.results.m[which(ucf.results.m[,"record_type"]=="H"),c("record_type","cluster_number","sequence_length","percent_identity","strand","compressed_alignment","query_id","subject_id")]
ucf.results.subset.m<-cbind(ucf.results.subset.m,gsub(".*=","",gsub(":.*","",ucf.results.subset.m[,"query_id"])))
ucf.results.subset.m<-cbind(ucf.results.subset.m,gsub(";","",gsub(".*;size=","",ucf.results.subset.m[,"query_id"])))
ucf.results.subset.m<-cbind(ucf.results.subset.m,gsub(";.*","",ucf.results.subset.m[,"subject_id"]))
ucf.results.subset.m<-cbind(ucf.results.subset.m,gsub(";","",gsub(".*;size=","",ucf.results.subset.m[,"subject_id"])))
colnames(ucf.results.subset.m)[9:12]<-c("sample_id","sample_raw_read_count","OTU_ID","raw_read_count")
ucf.results.subset.m[as.vector(grep("barcodelabel=.*",ucf.results.subset.m[,"sample_raw_read_count"])),"sample_raw_read_count"] <- NA
ucf.results.subset.m$"sample_raw_read_count"<-as.numeric(ucf.results.subset.m$"sample_raw_read_count")
ucf.results.subset.m[as.vector(is.na(ucf.results.subset.m[,"sample_raw_read_count"])),"sample_raw_read_count"] <- 1
print(paste("Getting raw counts per sample for each OTU",sep=""))
ucf.results.subset.otu.sample.counts <- ddply(as.data.frame(ucf.results.subset.m),c("sample_id","OTU_ID"),summarise,sum=sum(as.numeric(sample_raw_read_count)))
ucf.results.subset.otu.total.counts <- ddply(ucf.results.subset.m,.(OTU_ID),summarize,total_count=unique(raw_read_count))
ucf.results.subset.otu.m <- as.data.frame(matrix(data=0,nrow=length(as.vector(unique(ucf.results.subset.m[,"OTU_ID"]))),
ncol=length(as.vector(unique(ucf.results.subset.m[,"sample_id"])))),
row.names=mixedsort(as.vector(unique(ucf.results.subset.m[,"OTU_ID"]))))
colnames(ucf.results.subset.otu.m) <- mixedsort(as.vector(unique(ucf.results.subset.m[,"sample_id"])))
ucf.results.subset.otu.sample.counts.list <- split(ucf.results.subset.otu.sample.counts[,2:3], ucf.results.subset.otu.sample.counts[,1])
for(sampleid in mixedsort(names(ucf.results.subset.otu.sample.counts.list))){
otuid.tmp <- ucf.results.subset.otu.sample.counts.list[[sampleid]]
ucf.results.subset.otu.m[as.vector(otuid.tmp[,"OTU_ID"]),sampleid]<-otuid.tmp[,"sum"]
}
print(paste("Assigning taxanomy for each OTU",sep=""))
rownames(taxanomic.assignment.m) <- gsub(";.*","",taxanomic.assignment.m[,1])
ucf.results.subset.otu.taxa.m <- cbind(gsub(";.*","",taxanomic.assignment.m[,1]),paste("k__",taxanomic.assignment.m[,10],"; ","p__",taxanomic.assignment.m[,9],"; ",
"c__",taxanomic.assignment.m[,7],"; ","o__",taxanomic.assignment.m[,5],"; ",
"f__",taxanomic.assignment.m[,4],"; ","g__",taxanomic.assignment.m[,3],"; ",
"s__",taxanomic.assignment.m[,2],"; ",sep=""))
unassigned.tmp <- cbind(rownames(ucf.results.subset.otu.m)[-(which(rownames(ucf.results.subset.otu.m) %in% rownames(taxanomic.assignment.m)))],paste("k__Unassigned; ","p__Unassigned; ",
"c__Unassigned; ","o__Unassigned; ",
"f__Unassigned; ","g__Unassigned; ",
"s__Unassigned; ",sep=""))
unassigned.taxa.m <- as.data.frame(matrix(data="Unassigned",nrow=dim(unassigned.tmp)[1],ncol=dim(taxanomic.assignment.m)[2],dimnames=list(unassigned.tmp[,1],colnames(taxanomic.assignment.m))))
unassigned.taxa.m[,1] <- unassigned.tmp[,1]
taxanomic.assignment.m <- rbind(taxanomic.assignment.m,unassigned.taxa.m)
names(taxanomic.assignment.m$otu) <- "NULL"
names(taxanomic.assignment.m$Species) <- "NULL"
names(taxanomic.assignment.m$Genus) <- "NULL"
names(taxanomic.assignment.m$Family) <- "NULL"
names(taxanomic.assignment.m$Order) <- "NULL"
names(taxanomic.assignment.m$Subclass) <- "NULL"
names(taxanomic.assignment.m$Class) <- "NULL"
names(taxanomic.assignment.m$Subphylum) <- "NULL"
names(taxanomic.assignment.m$Phylum) <- "NULL"
names(taxanomic.assignment.m$Kingdom) <- "NULL"
print(paste("Getting taxonomic counts",sep=""))
species.count.m <- ddply(taxanomic.assignment.m,.(Species),"nrow")
genus.count.m <- ddply(data.frame(taxanomic.assignment.m),.(Genus),"nrow")
family.count.m <- ddply(data.frame(taxanomic.assignment.m),.(Family),"nrow")
order.count.m <- ddply(data.frame(taxanomic.assignment.m),.(Order),"nrow")
subclass.count.m <- ddply(data.frame(taxanomic.assignment.m),.(Subclass),"nrow")
class.count.m <- ddply(data.frame(taxanomic.assignment.m),.(Class),"nrow")
subphylum.count.m <- ddply(data.frame(taxanomic.assignment.m),.(Subphylum),"nrow")
phylum.count.m <- ddply(data.frame(taxanomic.assignment.m),.(Phylum),"nrow")
kingdom.count.m <- ddply(data.frame(taxanomic.assignment.m),.(Kingdom),"nrow")
print(paste("Creating taxonomic files",sep=""))
dir.create(file.path(FILEDIR, "output"), showWarnings = FALSE)
write.table(taxanomic.assignment.m,file=paste(FILEDIR, "output", gsub("\\..*",".results",bf,perl = T),sep="/"),row.names=F,quote=F, sep = "\t")
write.table(species.count.m,file=paste(FILEDIR, "output", gsub("\\..*","_species.txt",bf,perl = T),sep="/"),row.names=F,quote=F, sep = "\t")
write.table(genus.count.m,file=paste(FILEDIR, "output", gsub("\\..*","_genus.txt",bf,perl = T),sep="/"),row.names=F,quote=F, sep = "\t")
write.table(family.count.m,file=paste(FILEDIR, "output", gsub("\\..*","_family.txt",bf,perl = T),sep="/"),row.names=F,quote=F, sep = "\t")
write.table(order.count.m,file=paste(FILEDIR, "output", gsub("\\..*","_order.txt",bf,perl = T),sep="/"),row.names=F,quote=F, sep = "\t")
write.table(subclass.count.m,file=paste(FILEDIR, "output", gsub("\\..*","_subclass.txt",bf,perl = T),sep="/"),row.names=F,quote=F, sep = "\t")
write.table(class.count.m,file=paste(FILEDIR, "output", gsub("\\..*","_class.txt",bf,perl = T),sep="/"),row.names=F,quote=F, sep = "\t")
write.table(subphylum.count.m,file=paste(FILEDIR, "output", gsub("\\..*","_subphylum.txt",bf,perl = T),sep="/"),row.names=F,quote=F, sep = "\t")
write.table(phylum.count.m,file=paste(FILEDIR, "output", gsub("\\..*","_phylum.txt",bf,perl = T),sep="/"),row.names=F,quote=F, sep = "\t")
write.table(kingdom.count.m,file=paste(FILEDIR, "output", sub("\\..*","_kingdom.txt",bf,perl = T),sep="/"),row.names=F,quote=F, sep = "\t")
ucf.results.subset.otu.taxa.m <- rbind(ucf.results.subset.otu.taxa.m,unassigned.tmp)
print(paste("Creating OTU taxanomy file",sep=""))
write.table(ucf.results.subset.otu.taxa.m,file=paste(FILEDIR, "output", sub("\\..*","_biom_taxa.txt",bf,perl = T),sep="/"),row.names=F,
col.names=c("#OTUID","taxonomy"),quote=F, sep = "\t")
print(paste("Creating BIOM text file",sep=""))
write.table(cbind(rownames(ucf.results.subset.otu.m),ucf.results.subset.otu.m),file=paste(FILEDIR, "output", sub("\\..*","_biom.txt",bf,perl = T),sep="/"),
row.names=F,col.names=c("#OTU ID",colnames(ucf.results.subset.otu.m)),quote=F, sep = "\t")
} |
9830ffa0360bd42ff17b86bd9788b192f7b31426 | 4d85af4fb11e841eb5c5cd9517fd4e8b2dc9b377 | /tpch/tpch-2.R | ebdc5f68d231514ce4a70b972e168bd56220b606 | [
"Apache-2.0"
] | permissive | jonathanmclaus/grokit-queries | ea3916839f81b79751dd435d5f4548e1cf2059d7 | 7c4de5365c289923871a325df9ffe2e3b16e7f86 | refs/heads/master | 2021-01-25T06:05:46.276977 | 2014-09-02T19:15:08 | 2014-09-02T19:15:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 939 | r | tpch-2.R | library(gtBase)
part <- Read(part1t)
supplier <- Read(supplier1t)
partsupp <- Read(partsupp1t)
nation <- Read(nation1t)
region <- Read(region1t)
selpart <- part[match(p_type$ToString(), ".*BRASS") && p_size == 15]
selregion <- region[r_name == "EUROPE"]
j1 <- Join(nation, n_regionkey, selregion, r_regionkey)
j2 <- Join(supplier, s_nationkey, j1, nation@n_nationkey)
j3 <- Join(partsupp, ps_partkey, selpart, p_partkey)
j4 <- Join(j3, ps_suppkey, j2, s_suppkey)
groupby <- GroupBy(j4,
group = p_partkey,
ExtremeTuples(inputs = c(s_acctbal, s_name, n_name, p_mfgr, s_address, s_phone, s_comment),
outputs = c(s_acctbal, s_name, n_name, p_mfgr, s_address, s_phone, s_comment),
min(ps_supplycost))
)
orderby <- OrderBy(groupby, dsc(s_acctbal), asc(n_name), asc(s_name), asc(p_partkey), limit = 100)
View(orderby)
|
0bdb883d99daa8e14d98e8ce9f0f451eae00520f | 482bfd01497367b8ece31ee8ff42e020bf609b70 | /plot4.R | 808bde92ef8c4915ace199c3bf7b634129f1d93a | [] | no_license | dds63b/Coursera_Exploratory_Data_Analysis | f00d0c2066a38a06bdd559245fc67c7b58481209 | 5358fd715a1f05d767ccd13f7fe67ddb525bdfb2 | refs/heads/master | 2020-03-30T18:53:43.755330 | 2015-02-22T11:54:29 | 2015-02-22T11:54:29 | 31,161,433 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,272 | r | plot4.R | # Preparing environment
## Set working directory
setwd('./Coursera/Exploratory_Data_Analysis')
## Download file if it doesn't already exist
data <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip'
local.data <- 'ExDA_project2.zip'
if (! file.exists(local.data)) {
download.file(data,
destfile = local.data, method = 'curl')
}
## Unzip local data
if (file.exists(local.data)) {
unzip(local.data)
}
## Remove downloaded archive
file.remove(local.data)
## Reading data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Question 4
## Across the United States, how have emissions from coal combustion-related sources changed
## from 1999–2008?
## Subset coal combustion
combustion <- grepl("comb", SCC$SCC.Level.One, ignore.case=TRUE)
coal <- grepl("coal", SCC$SCC.Level.Four, ignore.case=TRUE)
coalCombustion <- (combustion & coal)
combustionSCC <- SCC[coalCombustion,]$SCC
combustionNEI <- NEI[NEI$SCC %in% combustionSCC,]
## Plot
library(ggplot2)
CCPlot <- ggplot(combustionNEI,aes(factor(year),Emissions)) +
geom_bar(stat="identity") +
labs(x="year", y="Emissions") +
labs(title="US Coal Combustion")
CCPlot
dev.copy(png,"plot4.png")
dev.off() |
eb77f1a9414ae2d71c74fdfbc11272586f0d2783 | 904e56e7fe5aa46080bfa23673cc453252b96cad | /R/mdm_ica_functions.R | 7f11f6d52329e619b8567a7aba5f2da4dab21baf | [] | no_license | zejin/EDMeasure | 77151fd0aba036ab108ff6353677b49ba944732c | 9f2373b238bec3764656e01a9b59306221835863 | refs/heads/master | 2021-04-26T16:42:23.629233 | 2018-02-25T22:26:56 | 2018-02-25T22:26:56 | 121,260,463 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,285 | r | mdm_ica_functions.R | asym_obj <- function(S) {
S <- as.matrix(S)
return(mdm(X = S, type = 'asym_dcov')$stat)
}
sym_obj <- function(S) {
S <- as.matrix(S)
return(mdm(X = S, type = 'sym_dcov')$stat)
}
comp_obj <- function(S) {
S <- as.matrix(S)
return(mdm(X = S, type = 'comp_simp')$stat)
}
dhsic_obj <- function(S) {
S <- as.matrix(S)
return(dhsic(X = S, matrix.input = TRUE)$dHSIC)
}
sqrt_inv <- function(A) {
evd <- eigen(A, symmetric = TRUE)
return(diag(1 / sqrt(evd$values)) %*% t(evd$vectors))
}
latin_hc_samp <- function(d, n) {
len_theta <- d * (d - 1) / 2
theta_mat <- matrix(0, len_theta, n)
# sample values from [0, 2 * pi] for the first d - 1 angles
bin_list1 <- seq(0, 2 * pi, length.out = n + 1)
# sample values from [0, pi] for the other angles
bin_list2 <- seq(0, pi, length.out = n + 1)
if (d >= 2) {
for (i in 1:(d - 1)) {
theta_mat[i, ] <- runif(n, min = bin_list1[1:n], max = bin_list1[2:(n + 1)])[sample(1:n)]
}
}
if (len_theta >= d) {
for (i in d:len_theta) {
theta_mat[i, ] <- runif(n, min = bin_list2[1:n], max = bin_list2[2:(n + 1)])[sample(1:n)]
}
}
theta_list <- list()
for (i in 1:n) {
theta_list[[i]] <- theta_mat[, i]
}
return(list(l = theta_list, m = t(theta_mat)))
}
# Given a theta, return a d x d Givens rotation matrix
# When d = 2, i < j, G = (a -b)
# (b a)
givens_rot_mat <- function(theta, d, index) {
a <- cos(theta)
b <- sin(theta)
i <- index[1]
j <- index[2]
G <- diag(d)
G[i, i] <- a
G[j, j] <- a
G[i, j] <- -b
G[j, i] <- b
return(G)
}
# Given a theta, return a d x d Givens rotation matrix
# W = Q_d-1,d %*% ... %*% Q_2,d %*% ... %*% Q_2,3 %*% Q_1,d %*% ... %*% Q_1,2
theta_to_W <- function(theta) {
d <- (sqrt(8 * length(theta) + 1) + 1) / 2
if (d != floor(d)) {
stop("theta must have length d * (d - 1) / 2.")
}
W <- diag(d)
index <- 1
for (i in 1:(d - 1)) {
for (j in (i + 1):d) {
Q_ij <- givens_rot_mat(theta[index], d, c(i, j))
W <- Q_ij %*% W
index <- index + 1
}
}
# for (j in 1:(d - 1)) {
# for (i in (j + 1):d) {
# Q_ij <- givens_rot_mat(theta[index], d, c(i, j))
# W <- Q_ij %*% W
# index <- index + 1
# }
# }
return(W)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.